source: sasview/src/sas/sascalc/dataloader/data_info.py @ a9f579c

ESS_GUIESS_GUI_DocsESS_GUI_batch_fittingESS_GUI_bumps_abstractionESS_GUI_iss1116ESS_GUI_iss879ESS_GUI_iss959ESS_GUI_openclESS_GUI_orderingESS_GUI_sync_sascalccostrafo411magnetic_scattrelease-4.1.1release-4.1.2release-4.2.2ticket-1009ticket-1094-headlessticket-1242-2d-resolutionticket-1243ticket-1249ticket885unittest-saveload
Last change on this file since a9f579c was a9f579c, checked in by jhbakker, 7 years ago

Manually added in all the SESANS modifications from Jurtest

  • Property mode set to 100644
File size: 41.6 KB
Line 
1"""
2    Module that contains classes to hold information read from
3    reduced data files.
4
5    A good description of the data members can be found in
6    the CanSAS 1D XML data format:
7
8    http://www.smallangles.net/wgwiki/index.php/cansas1d_documentation
9"""
10#####################################################################
11#This software was developed by the University of Tennessee as part of the
12#Distributed Data Analysis of Neutron Scattering Experiments (DANSE)
13#project funded by the US National Science Foundation.
14#See the license text in license.txt
15#copyright 2008, University of Tennessee
16######################################################################
17
18
19#TODO: Keep track of data manipulation in the 'process' data structure.
20#TODO: This module should be independent of plottables. We should write
21#        an adapter class for plottables when needed.
22
23#from sas.guitools.plottables import Data1D as plottable_1D
24from sas.sascalc.data_util.uncertainty import Uncertainty
25import numpy
26import math
27
28class plottable_sesans1D(object):
29    """
30    SESANS is a place holder for 1D SESANS plottables.
31
32    #TODO: This was directly copied from the plottables_1D. Modified Somewhat.
33    #Class has been updated.
34    """
35    # The presence of these should be mutually
36    # exclusive with the presence of Qdev (dx)
37    x = None
38    y = None
39    lam = None
40    dx = None
41    dy = None
42    dlam = None
43    ## Slit smearing length
44    dxl = None
45    ## Slit smearing width
46    dxw = None
47
48    # Units
49    _xaxis = ''
50    _xunit = ''
51    _yaxis = ''
52    _yunit = ''
53
54    def __init__(self, x, y, lam, dx=None, dy=None, dlam=None):
55#        print "SESANS plottable working"
56        self.x = numpy.asarray(x)
57        self.y = numpy.asarray(y)
58        self.lam = numpy.asarray(lam)
59        if dx is not None:
60            self.dx = numpy.asarray(dx)
61        if dy is not None:
62            self.dy = numpy.asarray(dy)
63        if dlam is not None:
64            self.dlam = numpy.asarray(dlam)
65
66    def xaxis(self, label, unit):
67        """
68        set the x axis label and unit
69        """
70        self._xaxis = label
71        self._xunit = unit
72
73    def yaxis(self, label, unit):
74        """
75        set the y axis label and unit
76        """
77        self._yaxis = label
78        self._yunit = unit
79
80
81class plottable_1D(object):
82    """
83    Data1D is a place holder for 1D plottables.
84    """
85    # The presence of these should be mutually
86    # exclusive with the presence of Qdev (dx)
87    x = None
88    y = None
89    dx = None
90    dy = None
91    ## Slit smearing length
92    dxl = None
93    ## Slit smearing width
94    dxw = None
95    ## SESANS specific params (wavelengths for spin echo length calculation)
96    lam = None
97    dlam = None
98
99    # Units
100    _xaxis = ''
101    _xunit = ''
102    _yaxis = ''
103    _yunit = ''
104
105    def __init__(self, x, y, dx=None, dy=None, dxl=None, dxw=None, lam=None, dlam=None):
106        self.x = numpy.asarray(x)
107        self.y = numpy.asarray(y)
108        if dx is not None:
109            self.dx = numpy.asarray(dx)
110        if dy is not None:
111            self.dy = numpy.asarray(dy)
112        if dxl is not None:
113            self.dxl = numpy.asarray(dxl)
114        if dxw is not None:
115            self.dxw = numpy.asarray(dxw)
116        if lam is not None:
117            self.lam = numpy.asarray(lam)
118        if dlam is not None:
119            self.dlam = numpy.asarray(dlam)
120
121    def xaxis(self, label, unit):
122        """
123        set the x axis label and unit
124        """
125        self._xaxis = label
126        self._xunit = unit
127
128    def yaxis(self, label, unit):
129        """
130        set the y axis label and unit
131        """
132        self._yaxis = label
133        self._yunit = unit
134
135
136class plottable_2D(object):
137    """
138    Data2D is a place holder for 2D plottables.
139    """
140    xmin = None
141    xmax = None
142    ymin = None
143    ymax = None
144    data = None
145    qx_data = None
146    qy_data = None
147    q_data = None
148    err_data = None
149    dqx_data = None
150    dqy_data = None
151    mask = None
152
153    # Units
154    _xaxis = ''
155    _xunit = ''
156    _yaxis = ''
157    _yunit = ''
158    _zaxis = ''
159    _zunit = ''
160
161    def __init__(self, data=None, err_data=None, qx_data=None,
162                 qy_data=None, q_data=None, mask=None,
163                 dqx_data=None, dqy_data=None):
164        self.data = numpy.asarray(data)
165        self.qx_data = numpy.asarray(qx_data)
166        self.qy_data = numpy.asarray(qy_data)
167        self.q_data = numpy.asarray(q_data)
168        self.mask = numpy.asarray(mask)
169        self.err_data = numpy.asarray(err_data)
170        if dqx_data is not None:
171            self.dqx_data = numpy.asarray(dqx_data)
172        if dqy_data is not None:
173            self.dqy_data = numpy.asarray(dqy_data)
174
175    def xaxis(self, label, unit):
176        """
177        set the x axis label and unit
178        """
179        self._xaxis = label
180        self._xunit = unit
181
182    def yaxis(self, label, unit):
183        """
184        set the y axis label and unit
185        """
186        self._yaxis = label
187        self._yunit = unit
188
189    def zaxis(self, label, unit):
190        """
191        set the z axis label and unit
192        """
193        self._zaxis = label
194        self._zunit = unit
195
196
197class Vector(object):
198    """
199    Vector class to hold multi-dimensional objects
200    """
201    ## x component
202    x = None
203    ## y component
204    y = None
205    ## z component
206    z = None
207
208    def __init__(self, x=None, y=None, z=None):
209        """
210        Initialization. Components that are not
211        set a set to None by default.
212
213        :param x: x component
214        :param y: y component
215        :param z: z component
216        """
217        self.x = x
218        self.y = y
219        self.z = z
220
221    def __str__(self):
222        msg = "x = %s\ty = %s\tz = %s" % (str(self.x), str(self.y), str(self.z))
223        return msg
224
225
226class Detector(object):
227    """
228    Class to hold detector information
229    """
230    ## Name of the instrument [string]
231    name = None
232    ## Sample to detector distance [float] [mm]
233    distance = None
234    distance_unit = 'mm'
235    ## Offset of this detector position in X, Y,
236    #(and Z if necessary) [Vector] [mm]
237    offset = None
238    offset_unit = 'm'
239    ## Orientation (rotation) of this detector in roll,
240    # pitch, and yaw [Vector] [degrees]
241    orientation = None
242    orientation_unit = 'degree'
243    ## Center of the beam on the detector in X and Y
244    #(and Z if necessary) [Vector] [mm]
245    beam_center = None
246    beam_center_unit = 'mm'
247    ## Pixel size in X, Y, (and Z if necessary) [Vector] [mm]
248    pixel_size = None
249    pixel_size_unit = 'mm'
250    ## Slit length of the instrument for this detector.[float] [mm]
251    slit_length = None
252    slit_length_unit = 'mm'
253
254    def __init__(self):
255        """
256        Initialize class attribute that are objects...
257        """
258        self.offset = Vector()
259        self.orientation = Vector()
260        self.beam_center = Vector()
261        self.pixel_size = Vector()
262
263    def __str__(self):
264        _str = "Detector:\n"
265        _str += "   Name:         %s\n" % self.name
266        _str += "   Distance:     %s [%s]\n" % \
267            (str(self.distance), str(self.distance_unit))
268        _str += "   Offset:       %s [%s]\n" % \
269            (str(self.offset), str(self.offset_unit))
270        _str += "   Orientation:  %s [%s]\n" % \
271            (str(self.orientation), str(self.orientation_unit))
272        _str += "   Beam center:  %s [%s]\n" % \
273            (str(self.beam_center), str(self.beam_center_unit))
274        _str += "   Pixel size:   %s [%s]\n" % \
275            (str(self.pixel_size), str(self.pixel_size_unit))
276        _str += "   Slit length:  %s [%s]\n" % \
277            (str(self.slit_length), str(self.slit_length_unit))
278        return _str
279
280
281class Aperture(object):
282    ## Name
283    name = None
284    ## Type
285    type = None
286    ## Size name
287    size_name = None
288    ## Aperture size [Vector]
289    size = None
290    size_unit = 'mm'
291    ## Aperture distance [float]
292    distance = None
293    distance_unit = 'mm'
294
295    def __init__(self):
296        self.size = Vector()
297
298
299class Collimation(object):
300    """
301    Class to hold collimation information
302    """
303    ## Name
304    name = None
305    ## Length [float] [mm]
306    length = None
307    length_unit = 'mm'
308    ## Aperture
309    aperture = None
310
311    def __init__(self):
312        self.aperture = []
313
314    def __str__(self):
315        _str = "Collimation:\n"
316        _str += "   Length:       %s [%s]\n" % \
317            (str(self.length), str(self.length_unit))
318        for item in self.aperture:
319            _str += "   Aperture size:%s [%s]\n" % \
320                (str(item.size), str(item.size_unit))
321            _str += "   Aperture_dist:%s [%s]\n" % \
322                (str(item.distance), str(item.distance_unit))
323        return _str
324
325
326class Source(object):
327    """
328    Class to hold source information
329    """
330    ## Name
331    name = None
332    ## Radiation type [string]
333    radiation = None
334    ## Beam size name
335    beam_size_name = None
336    ## Beam size [Vector] [mm]
337    beam_size = None
338    beam_size_unit = 'mm'
339    ## Beam shape [string]
340    beam_shape = None
341    ## Wavelength [float] [Angstrom]
342    wavelength = None
343    wavelength_unit = 'A'
344    ## Minimum wavelength [float] [Angstrom]
345    wavelength_min = None
346    wavelength_min_unit = 'nm'
347    ## Maximum wavelength [float] [Angstrom]
348    wavelength_max = None
349    wavelength_max_unit = 'nm'
350    ## Wavelength spread [float] [Angstrom]
351    wavelength_spread = None
352    wavelength_spread_unit = 'percent'
353
354    def __init__(self):
355        self.beam_size = Vector()
356
357    def __str__(self):
358        _str = "Source:\n"
359        _str += "   Radiation:    %s\n" % str(self.radiation)
360        _str += "   Shape:        %s\n" % str(self.beam_shape)
361        _str += "   Wavelength:   %s [%s]\n" % \
362            (str(self.wavelength), str(self.wavelength_unit))
363        _str += "   Waveln_min:   %s [%s]\n" % \
364            (str(self.wavelength_min), str(self.wavelength_min_unit))
365        _str += "   Waveln_max:   %s [%s]\n" % \
366            (str(self.wavelength_max), str(self.wavelength_max_unit))
367        _str += "   Waveln_spread:%s [%s]\n" % \
368            (str(self.wavelength_spread), str(self.wavelength_spread_unit))
369        _str += "   Beam_size:    %s [%s]\n" % \
370            (str(self.beam_size), str(self.beam_size_unit))
371        return _str
372
373
374"""
375Definitions of radiation types
376"""
377NEUTRON = 'neutron'
378XRAY = 'x-ray'
379MUON = 'muon'
380ELECTRON = 'electron'
381
382
383class Sample(object):
384    """
385    Class to hold the sample description
386    """
387    ## Short name for sample
388    name = ''
389    ## ID
390    ID = ''
391    ## Thickness [float] [mm]
392    thickness = None
393    thickness_unit = 'mm'
394    ## Transmission [float] [fraction]
395    transmission = None
396    ## Temperature [float] [No Default]
397    temperature = None
398    temperature_unit = None
399    ## Position [Vector] [mm]
400    position = None
401    position_unit = 'mm'
402    ## Orientation [Vector] [degrees]
403    orientation = None
404    orientation_unit = 'degree'
405    ## Details
406    details = None
407
408    def __init__(self):
409        self.position = Vector()
410        self.orientation = Vector()
411        self.details = []
412
413    def __str__(self):
414        _str = "Sample:\n"
415        _str += "   ID:           %s\n" % str(self.ID)
416        _str += "   Transmission: %s\n" % str(self.transmission)
417        _str += "   Thickness:    %s [%s]\n" % \
418            (str(self.thickness), str(self.thickness_unit))
419        _str += "   Temperature:  %s [%s]\n" % \
420            (str(self.temperature), str(self.temperature_unit))
421        _str += "   Position:     %s [%s]\n" % \
422            (str(self.position), str(self.position_unit))
423        _str += "   Orientation:  %s [%s]\n" % \
424            (str(self.orientation), str(self.orientation_unit))
425
426        _str += "   Details:\n"
427        for item in self.details:
428            _str += "      %s\n" % item
429
430        return _str
431
432
433class Process(object):
434    """
435    Class that holds information about the processes
436    performed on the data.
437    """
438    name = ''
439    date = ''
440    description = ''
441    term = None
442    notes = None
443
444    def __init__(self):
445        self.term = []
446        self.notes = []
447
448    def is_empty(self):
449        """
450            Return True if the object is empty
451        """
452        return len(self.name) == 0 and len(self.date) == 0 and len(self.description) == 0 \
453            and len(self.term) == 0 and len(self.notes) == 0
454
455    def single_line_desc(self):
456        """
457            Return a single line string representing the process
458        """
459        return "%s %s %s" % (self.name, self.date, self.description)
460
461    def __str__(self):
462        _str = "Process:\n"
463        _str += "   Name:         %s\n" % self.name
464        _str += "   Date:         %s\n" % self.date
465        _str += "   Description:  %s\n" % self.description
466        for item in self.term:
467            _str += "   Term:         %s\n" % item
468        for item in self.notes:
469            _str += "   Note:         %s\n" % item
470        return _str
471
472
473class TransmissionSpectrum(object):
474    """
475    Class that holds information about transmission spectrum
476    for white beams and spallation sources.
477    """
478    name = ''
479    timestamp = ''
480    ## Wavelength (float) [A]
481    wavelength = None
482    wavelength_unit = 'A'
483    ## Transmission (float) [unit less]
484    transmission = None
485    transmission_unit = ''
486    ## Transmission Deviation (float) [unit less]
487    transmission_deviation = None
488    transmission_deviation_unit = ''
489
490    def __init__(self):
491        self.wavelength = []
492        self.transmission = []
493        self.transmission_deviation = []
494
495    def __str__(self):
496        _str = "Transmission Spectrum:\n"
497        _str += "   Name:             \t{0}\n".format(self.name)
498        _str += "   Timestamp:        \t{0}\n".format(self.timestamp)
499        _str += "   Wavelength unit:  \t{0}\n".format(self.wavelength_unit)
500        _str += "   Transmission unit:\t{0}\n".format(self.transmission_unit)
501        _str += "   Trans. Dev. unit:  \t{0}\n".format(\
502                                            self.transmission_deviation_unit)
503        length_list = [len(self.wavelength), len(self.transmission), \
504                len(self.transmission_deviation)]
505        _str += "   Number of Pts:    \t{0}\n".format(max(length_list))
506        return _str
507
508
509class DataInfo(object):
510    """
511    Class to hold the data read from a file.
512    It includes four blocks of data for the
513    instrument description, the sample description,
514    the data itself and any other meta data.
515    """
516    ## Title
517    title = ''
518    ## Run number
519    run = None
520    ## Run name
521    run_name = None
522    ## File name
523    filename = ''
524    ## Notes
525    notes = None
526    ## Processes (Action on the data)
527    process = None
528    ## Instrument name
529    instrument = ''
530    ## Detector information
531    detector = None
532    ## Sample information
533    sample = None
534    ## Source information
535    source = None
536    ## Collimation information
537    collimation = None
538    ## Transmission Spectrum INfo
539    trans_spectrum = None
540    ## Additional meta-data
541    meta_data = None
542    ## Loading errors
543    errors = None
544
545    def __init__(self):
546        """
547        Initialization
548        """
549        ## Title
550        self.title = ''
551        ## Run number
552        self.run = []
553        self.run_name = {}
554        ## File name
555        self.filename = ''
556        ## Notes
557        self.notes = []
558        ## Processes (Action on the data)
559        self.process = []
560        ## Instrument name
561        self.instrument = ''
562        ## Detector information
563        self.detector = []
564        ## Sample information
565        self.sample = Sample()
566        ## Source information
567        self.source = Source()
568        ## Collimation information
569        self.collimation = []
570        ## Transmission Spectrum
571        self.trans_spectrum = []
572        ## Additional meta-data
573        self.meta_data = {}
574        ## Loading errors
575        self.errors = []
576
577    def append_empty_process(self):
578        """
579        """
580        self.process.append(Process())
581
582    def add_notes(self, message=""):
583        """
584        Add notes to datainfo
585        """
586        self.notes.append(message)
587
588    def __str__(self):
589        """
590        Nice printout
591        """
592        _str = "File:            %s\n" % self.filename
593        _str += "Title:           %s\n" % self.title
594        _str += "Run:             %s\n" % str(self.run)
595        _str += "Instrument:      %s\n" % str(self.instrument)
596        _str += "%s\n" % str(self.sample)
597        _str += "%s\n" % str(self.source)
598        for item in self.detector:
599            _str += "%s\n" % str(item)
600        for item in self.collimation:
601            _str += "%s\n" % str(item)
602        for item in self.process:
603            _str += "%s\n" % str(item)
604        for item in self.notes:
605            _str += "%s\n" % str(item)
606        for item in self.trans_spectrum:
607            _str += "%s\n" % str(item)
608        return _str
609
610    # Private method to perform operation. Not implemented for DataInfo,
611    # but should be implemented for each data class inherited from DataInfo
612    # that holds actual data (ex.: Data1D)
613    def _perform_operation(self, other, operation):
614        """
615        Private method to perform operation. Not implemented for DataInfo,
616        but should be implemented for each data class inherited from DataInfo
617        that holds actual data (ex.: Data1D)
618        """
619        return NotImplemented
620
621    def _perform_union(self, other):
622        """
623        Private method to perform union operation. Not implemented for DataInfo,
624        but should be implemented for each data class inherited from DataInfo
625        that holds actual data (ex.: Data1D)
626        """
627        return NotImplemented
628
629    def __add__(self, other):
630        """
631        Add two data sets
632
633        :param other: data set to add to the current one
634        :return: new data set
635        :raise ValueError: raised when two data sets are incompatible
636        """
637        def operation(a, b):
638            return a + b
639        return self._perform_operation(other, operation)
640
641    def __radd__(self, other):
642        """
643        Add two data sets
644
645        :param other: data set to add to the current one
646        :return: new data set
647        :raise ValueError: raised when two data sets are incompatible
648        """
649        def operation(a, b):
650            return b + a
651        return self._perform_operation(other, operation)
652
653    def __sub__(self, other):
654        """
655        Subtract two data sets
656
657        :param other: data set to subtract from the current one
658        :return: new data set
659        :raise ValueError: raised when two data sets are incompatible
660        """
661        def operation(a, b):
662            return a - b
663        return self._perform_operation(other, operation)
664
665    def __rsub__(self, other):
666        """
667        Subtract two data sets
668
669        :param other: data set to subtract from the current one
670        :return: new data set
671        :raise ValueError: raised when two data sets are incompatible
672        """
673        def operation(a, b):
674            return b - a
675        return self._perform_operation(other, operation)
676
677    def __mul__(self, other):
678        """
679        Multiply two data sets
680
681        :param other: data set to subtract from the current one
682        :return: new data set
683        :raise ValueError: raised when two data sets are incompatible
684        """
685        def operation(a, b):
686            return a * b
687        return self._perform_operation(other, operation)
688
689    def __rmul__(self, other):
690        """
691        Multiply two data sets
692
693        :param other: data set to subtract from the current one
694        :return: new data set
695        :raise ValueError: raised when two data sets are incompatible
696        """
697        def operation(a, b):
698            return b * a
699        return self._perform_operation(other, operation)
700
701    def __div__(self, other):
702        """
703        Divided a data set by another
704
705        :param other: data set that the current one is divided by
706        :return: new data set
707        :raise ValueError: raised when two data sets are incompatible
708        """
709        def operation(a, b):
710            return a/b
711        return self._perform_operation(other, operation)
712
713    def __rdiv__(self, other):
714        """
715        Divided a data set by another
716
717        :param other: data set that the current one is divided by
718        :return: new data set
719        :raise ValueError: raised when two data sets are incompatible
720        """
721        def operation(a, b):
722            return b/a
723        return self._perform_operation(other, operation)
724
725    def __or__(self, other):
726        """
727        Union a data set with another
728
729        :param other: data set to be unified
730        :return: new data set
731        :raise ValueError: raised when two data sets are incompatible
732        """
733        return self._perform_union(other)
734
735    def __ror__(self, other):
736        """
737        Union a data set with another
738
739        :param other: data set to be unified
740        :return: new data set
741        :raise ValueError: raised when two data sets are incompatible
742        """
743        return self._perform_union(other)
744
745class Data1D(plottable_1D, DataInfo):
746    """
747    1D data class
748    """
749    def __init__(self, x=None, y=None, dx=None, dy=None, lam=None, dlam=None, isSesans=False):
750        self.isSesans = isSesans
751        DataInfo.__init__(self)
752        plottable_1D.__init__(self, x, y, dx, dy,None, None, lam, dlam)
753        if self.isSesans:
754            x_unit = 'A'
755            y_unit = 'pol'
756        elif not self.isSesans: # it's SANS data! (Could also be simple else statement, but i prefer exhaustive conditionals...-JHB)
757            x_unit = '1/A'
758            y_unit = '1/cm'
759        else: # and if it's neither, you get punished!
760            raise(TypeError,'This is neither SANS nor SESANS data, what the hell are you doing??')
761
762    def __str__(self):
763        """
764        Nice printout
765        """
766        _str = "%s\n" % DataInfo.__str__(self)
767        _str += "Data:\n"
768        _str += "   Type:         %s\n" % self.__class__.__name__
769        _str += "   X-axis:       %s\t[%s]\n" % (self._xaxis, self._xunit)
770        _str += "   Y-axis:       %s\t[%s]\n" % (self._yaxis, self._yunit)
771        _str += "   Length:       %g\n" % len(self.x)
772        return _str
773
774    def is_slit_smeared(self):
775        """
776        Check whether the data has slit smearing information
777        :return: True is slit smearing info is present, False otherwise
778        """
779        def _check(v):
780            if (v.__class__ == list or v.__class__ == numpy.ndarray) \
781                and len(v) > 0 and min(v) > 0:
782                return True
783            return False
784        return _check(self.dxl) or _check(self.dxw)
785
786    def clone_without_data(self, length=0, clone=None):
787        """
788        Clone the current object, without copying the data (which
789        will be filled out by a subsequent operation).
790        The data arrays will be initialized to zero.
791
792        :param length: length of the data array to be initialized
793        :param clone: if provided, the data will be copied to clone
794        """
795        from copy import deepcopy
796
797        if clone is None or not issubclass(clone.__class__, Data1D):
798            x = numpy.zeros(length)
799            dx = numpy.zeros(length)
800            y = numpy.zeros(length)
801            dy = numpy.zeros(length)
802            lam = numpy.zeros(length)
803            dlam = numpy.zeros(length)
804            clone = Data1D(x, y, lam=lam, dx=dx, dy=dy, dlam=dlam)
805
806        clone.title = self.title
807        clone.run = self.run
808        clone.filename = self.filename
809        clone.instrument = self.instrument
810        clone.notes = deepcopy(self.notes)
811        clone.process = deepcopy(self.process)
812        clone.detector = deepcopy(self.detector)
813        clone.sample = deepcopy(self.sample)
814        clone.source = deepcopy(self.source)
815        clone.collimation = deepcopy(self.collimation)
816        clone.trans_spectrum = deepcopy(self.trans_spectrum)
817        clone.meta_data = deepcopy(self.meta_data)
818        clone.errors = deepcopy(self.errors)
819
820        return clone
821
822    def _validity_check(self, other):
823        """
824        Checks that the data lengths are compatible.
825        Checks that the x vectors are compatible.
826        Returns errors vectors equal to original
827        errors vectors if they were present or vectors
828        of zeros when none was found.
829
830        :param other: other data set for operation
831        :return: dy for self, dy for other [numpy arrays]
832        :raise ValueError: when lengths are not compatible
833        """
834        dy_other = None
835        if isinstance(other, Data1D):
836            # Check that data lengths are the same
837            if len(self.x) != len(other.x) or \
838                len(self.y) != len(other.y):
839                msg = "Unable to perform operation: data length are not equal"
840                raise ValueError, msg
841            # Here we could also extrapolate between data points
842            TOLERANCE = 0.01
843            for i in range(len(self.x)):
844                if math.fabs((self.x[i] - other.x[i])/self.x[i]) > TOLERANCE:
845                    msg = "Incompatible data sets: x-values do not match"
846                    raise ValueError, msg
847
848            # Check that the other data set has errors, otherwise
849            # create zero vector
850            dy_other = other.dy
851            if other.dy == None or (len(other.dy) != len(other.y)):
852                dy_other = numpy.zeros(len(other.y))
853
854        # Check that we have errors, otherwise create zero vector
855        dy = self.dy
856        if self.dy == None or (len(self.dy) != len(self.y)):
857            dy = numpy.zeros(len(self.y))
858
859        return dy, dy_other
860
861    def _perform_operation(self, other, operation):
862        """
863        """
864        # First, check the data compatibility
865        dy, dy_other = self._validity_check(other)
866        result = self.clone_without_data(len(self.x))
867        if self.dxw == None:
868            result.dxw = None
869        else:
870            result.dxw = numpy.zeros(len(self.x))
871        if self.dxl == None:
872            result.dxl = None
873        else:
874            result.dxl = numpy.zeros(len(self.x))
875
876        for i in range(len(self.x)):
877            result.x[i] = self.x[i]
878            if self.dx is not None and len(self.x) == len(self.dx):
879                result.dx[i] = self.dx[i]
880            if self.dxw is not None and len(self.x) == len(self.dxw):
881                result.dxw[i] = self.dxw[i]
882            if self.dxl is not None and len(self.x) == len(self.dxl):
883                result.dxl[i] = self.dxl[i]
884
885            a = Uncertainty(self.y[i], dy[i]**2)
886            if isinstance(other, Data1D):
887                b = Uncertainty(other.y[i], dy_other[i]**2)
888                if other.dx is not None:
889                    result.dx[i] *= self.dx[i]
890                    result.dx[i] += (other.dx[i]**2)
891                    result.dx[i] /= 2
892                    result.dx[i] = math.sqrt(result.dx[i])
893                if result.dxl is not None and other.dxl is not None:
894                    result.dxl[i] *= self.dxl[i]
895                    result.dxl[i] += (other.dxl[i]**2)
896                    result.dxl[i] /= 2
897                    result.dxl[i] = math.sqrt(result.dxl[i])
898            else:
899                b = other
900
901            output = operation(a, b)
902            result.y[i] = output.x
903            result.dy[i] = math.sqrt(math.fabs(output.variance))
904        return result
905
906    def _validity_check_union(self, other):
907        """
908        Checks that the data lengths are compatible.
909        Checks that the x vectors are compatible.
910        Returns errors vectors equal to original
911        errors vectors if they were present or vectors
912        of zeros when none was found.
913
914        :param other: other data set for operation
915        :return: bool
916        :raise ValueError: when data types are not compatible
917        """
918        if not isinstance(other, Data1D):
919            msg = "Unable to perform operation: different types of data set"
920            raise ValueError, msg
921        return True
922
923    def _perform_union(self, other):
924        """
925        """
926        # First, check the data compatibility
927        self._validity_check_union(other)
928        result = self.clone_without_data(len(self.x) + len(other.x))
929        if self.dy == None or other.dy is None:
930            result.dy = None
931        else:
932            result.dy = numpy.zeros(len(self.x) + len(other.x))
933        if self.dx == None or other.dx is None:
934            result.dx = None
935        else:
936            result.dx = numpy.zeros(len(self.x) + len(other.x))
937        if self.dxw == None or other.dxw is None:
938            result.dxw = None
939        else:
940            result.dxw = numpy.zeros(len(self.x) + len(other.x))
941        if self.dxl == None or other.dxl is None:
942            result.dxl = None
943        else:
944            result.dxl = numpy.zeros(len(self.x) + len(other.x))
945
946        result.x = numpy.append(self.x, other.x)
947        #argsorting
948        ind = numpy.argsort(result.x)
949        result.x = result.x[ind]
950        result.y = numpy.append(self.y, other.y)
951        result.y = result.y[ind]
952        if result.dy != None:
953            result.dy = numpy.append(self.dy, other.dy)
954            result.dy = result.dy[ind]
955        if result.dx is not None:
956            result.dx = numpy.append(self.dx, other.dx)
957            result.dx = result.dx[ind]
958        if result.dxw is not None:
959            result.dxw = numpy.append(self.dxw, other.dxw)
960            result.dxw = result.dxw[ind]
961        if result.dxl is not None:
962            result.dxl = numpy.append(self.dxl, other.dxl)
963            result.dxl = result.dxl[ind]
964        return result
965
966
967class Data2D(plottable_2D, DataInfo):
968    """
969    2D data class
970    """
971    ## Units for Q-values
972    Q_unit = '1/A'
973    ## Units for I(Q) values
974    I_unit = '1/cm'
975    ## Vector of Q-values at the center of each bin in x
976    x_bins = None
977    ## Vector of Q-values at the center of each bin in y
978    y_bins = None
979
980    def __init__(self, data=None, err_data=None, qx_data=None,
981                 qy_data=None, q_data=None, mask=None,
982                 dqx_data=None, dqy_data=None):
983        self.y_bins = []
984        self.x_bins = []
985        DataInfo.__init__(self)
986        plottable_2D.__init__(self, data, err_data, qx_data,
987                              qy_data, q_data, mask, dqx_data, dqy_data)
988        if len(self.detector) > 0:
989            raise RuntimeError, "Data2D: Detector bank already filled at init"
990
991    def __str__(self):
992        _str = "%s\n" % DataInfo.__str__(self)
993        _str += "Data:\n"
994        _str += "   Type:         %s\n" % self.__class__.__name__
995        _str += "   X- & Y-axis:  %s\t[%s]\n" % (self._yaxis, self._yunit)
996        _str += "   Z-axis:       %s\t[%s]\n" % (self._zaxis, self._zunit)
997        _str += "   Length:       %g \n" % (len(self.data))
998        _str += "   Shape:        (%d, %d)\n" % (len(self.y_bins), len(self.x_bins))
999        return _str
1000
1001    def clone_without_data(self, length=0, clone=None):
1002        """
1003        Clone the current object, without copying the data (which
1004        will be filled out by a subsequent operation).
1005        The data arrays will be initialized to zero.
1006
1007        :param length: length of the data array to be initialized
1008        :param clone: if provided, the data will be copied to clone
1009        """
1010        from copy import deepcopy
1011
1012        if clone is None or not issubclass(clone.__class__, Data2D):
1013            data = numpy.zeros(length)
1014            err_data = numpy.zeros(length)
1015            qx_data = numpy.zeros(length)
1016            qy_data = numpy.zeros(length)
1017            q_data = numpy.zeros(length)
1018            mask = numpy.zeros(length)
1019            dqx_data = None
1020            dqy_data = None
1021            clone = Data2D(data=data, err_data=err_data,
1022                           qx_data=qx_data, qy_data=qy_data,
1023                           q_data=q_data, mask=mask)
1024
1025        clone.title = self.title
1026        clone.run = self.run
1027        clone.filename = self.filename
1028        clone.instrument = self.instrument
1029        clone.notes = deepcopy(self.notes)
1030        clone.process = deepcopy(self.process)
1031        clone.detector = deepcopy(self.detector)
1032        clone.sample = deepcopy(self.sample)
1033        clone.source = deepcopy(self.source)
1034        clone.collimation = deepcopy(self.collimation)
1035        clone.trans_spectrum = deepcopy(self.trans_spectrum)
1036        clone.meta_data = deepcopy(self.meta_data)
1037        clone.errors = deepcopy(self.errors)
1038
1039        return clone
1040
1041    def _validity_check(self, other):
1042        """
1043        Checks that the data lengths are compatible.
1044        Checks that the x vectors are compatible.
1045        Returns errors vectors equal to original
1046        errors vectors if they were present or vectors
1047        of zeros when none was found.
1048
1049        :param other: other data set for operation
1050        :return: dy for self, dy for other [numpy arrays]
1051        :raise ValueError: when lengths are not compatible
1052        """
1053        err_other = None
1054        TOLERANCE = 0.01
1055        if isinstance(other, Data2D):
1056            # Check that data lengths are the same
1057            if len(self.data) != len(other.data) or \
1058                len(self.qx_data) != len(other.qx_data) or \
1059                len(self.qy_data) != len(other.qy_data):
1060                msg = "Unable to perform operation: data length are not equal"
1061                raise ValueError, msg
1062            for ind in range(len(self.data)):
1063                if math.fabs((self.qx_data[ind] - other.qx_data[ind])/self.qx_data[ind]) > TOLERANCE:
1064                    msg = "Incompatible data sets: qx-values do not match: %s %s" % (self.qx_data[ind], other.qx_data[ind])
1065                    raise ValueError, msg
1066                if math.fabs((self.qy_data[ind] - other.qy_data[ind])/self.qy_data[ind]) > TOLERANCE:
1067                    msg = "Incompatible data sets: qy-values do not match: %s %s" % (self.qy_data[ind], other.qy_data[ind])
1068                    raise ValueError, msg
1069
1070            # Check that the scales match
1071            err_other = other.err_data
1072            if other.err_data == None or \
1073                (len(other.err_data) != len(other.data)):
1074                err_other = numpy.zeros(len(other.data))
1075
1076        # Check that we have errors, otherwise create zero vector
1077        err = self.err_data
1078        if self.err_data == None or \
1079            (len(self.err_data) != len(self.data)):
1080            err = numpy.zeros(len(other.data))
1081        return err, err_other
1082
1083    def _perform_operation(self, other, operation):
1084        """
1085        Perform 2D operations between data sets
1086
1087        :param other: other data set
1088        :param operation: function defining the operation
1089        """
1090        # First, check the data compatibility
1091        dy, dy_other = self._validity_check(other)
1092        result = self.clone_without_data(numpy.size(self.data))
1093        if self.dqx_data == None or self.dqy_data == None:
1094            result.dqx_data = None
1095            result.dqy_data = None
1096        else:
1097            result.dqx_data = numpy.zeros(len(self.data))
1098            result.dqy_data = numpy.zeros(len(self.data))
1099        for i in range(numpy.size(self.data)):
1100            result.data[i] = self.data[i]
1101            if self.err_data is not None and \
1102                numpy.size(self.data) == numpy.size(self.err_data):
1103                result.err_data[i] = self.err_data[i]
1104            if self.dqx_data is not None:
1105                result.dqx_data[i] = self.dqx_data[i]
1106            if self.dqy_data is not None:
1107                result.dqy_data[i] = self.dqy_data[i]
1108            result.qx_data[i] = self.qx_data[i]
1109            result.qy_data[i] = self.qy_data[i]
1110            result.q_data[i] = self.q_data[i]
1111            result.mask[i] = self.mask[i]
1112
1113            a = Uncertainty(self.data[i], dy[i]**2)
1114            if isinstance(other, Data2D):
1115                b = Uncertainty(other.data[i], dy_other[i]**2)
1116                if other.dqx_data is not None and \
1117                        result.dqx_data is not None:
1118                    result.dqx_data[i] *= self.dqx_data[i]
1119                    result.dqx_data[i] += (other.dqx_data[i]**2)
1120                    result.dqx_data[i] /= 2
1121                    result.dqx_data[i] = math.sqrt(result.dqx_data[i])
1122                if other.dqy_data is not None and \
1123                        result.dqy_data is not None:
1124                    result.dqy_data[i] *= self.dqy_data[i]
1125                    result.dqy_data[i] += (other.dqy_data[i]**2)
1126                    result.dqy_data[i] /= 2
1127                    result.dqy_data[i] = math.sqrt(result.dqy_data[i])
1128            else:
1129                b = other
1130            output = operation(a, b)
1131            result.data[i] = output.x
1132            result.err_data[i] = math.sqrt(math.fabs(output.variance))
1133        return result
1134
1135    def _validity_check_union(self, other):
1136        """
1137        Checks that the data lengths are compatible.
1138        Checks that the x vectors are compatible.
1139        Returns errors vectors equal to original
1140        errors vectors if they were present or vectors
1141        of zeros when none was found.
1142
1143        :param other: other data set for operation
1144        :return: bool
1145        :raise ValueError: when data types are not compatible
1146        """
1147        if not isinstance(other, Data2D):
1148            msg = "Unable to perform operation: different types of data set"
1149            raise ValueError, msg
1150        return True
1151
1152    def _perform_union(self, other):
1153        """
1154        Perform 2D operations between data sets
1155
1156        :param other: other data set
1157        :param operation: function defining the operation
1158        """
1159        # First, check the data compatibility
1160        self._validity_check_union(other)
1161        result = self.clone_without_data(numpy.size(self.data) + \
1162                                         numpy.size(other.data))
1163        result.xmin = self.xmin
1164        result.xmax = self.xmax
1165        result.ymin = self.ymin
1166        result.ymax = self.ymax
1167        if self.dqx_data == None or self.dqy_data == None or \
1168                other.dqx_data == None or other.dqy_data == None:
1169            result.dqx_data = None
1170            result.dqy_data = None
1171        else:
1172            result.dqx_data = numpy.zeros(len(self.data) + \
1173                                         numpy.size(other.data))
1174            result.dqy_data = numpy.zeros(len(self.data) + \
1175                                         numpy.size(other.data))
1176
1177        result.data = numpy.append(self.data, other.data)
1178        result.qx_data = numpy.append(self.qx_data, other.qx_data)
1179        result.qy_data = numpy.append(self.qy_data, other.qy_data)
1180        result.q_data = numpy.append(self.q_data, other.q_data)
1181        result.mask = numpy.append(self.mask, other.mask)
1182        if result.err_data is not None:
1183            result.err_data = numpy.append(self.err_data, other.err_data)
1184        if self.dqx_data is not None:
1185            result.dqx_data = numpy.append(self.dqx_data, other.dqx_data)
1186        if self.dqy_data is not None:
1187            result.dqy_data = numpy.append(self.dqy_data, other.dqy_data)
1188
1189        return result
1190
1191
1192def combine_data_info_with_plottable(data, datainfo):
1193    """
1194    A function that combines the DataInfo data in self.current_datainto with a plottable_1D or 2D data object.
1195
1196    :param data: A plottable_1D or plottable_2D data object
1197    :return: A fully specified Data1D or Data2D object
1198    """
1199
1200    final_dataset = None
1201    if isinstance(data, plottable_1D):
1202        final_dataset = Data1D(data.x, data.y)
1203        final_dataset.dx = data.dx
1204        final_dataset.dy = data.dy
1205        final_dataset.dxl = data.dxl
1206        final_dataset.dxw = data.dxw
1207        final_dataset.xaxis(data._xaxis, data._xunit)
1208        final_dataset.yaxis(data._yaxis, data._yunit)
1209    elif isinstance(data, plottable_2D):
1210        final_dataset = Data2D(data.data, data.err_data, data.qx_data, data.qy_data, data.q_data,
1211                               data.mask, data.dqx_data, data.dqy_data)
1212        final_dataset.xaxis(data._xaxis, data._xunit)
1213        final_dataset.yaxis(data._yaxis, data._yunit)
1214        final_dataset.zaxis(data._zaxis, data._zunit)
1215        final_dataset.x_bins = data.x_bins
1216        final_dataset.y_bins = data.y_bins
1217    else:
1218        return_string = "Should Never Happen: _combine_data_info_with_plottable input is not a plottable1d or " + \
1219                        "plottable2d data object"
1220        return return_string
1221
1222    final_dataset.xmax = data.xmax
1223    final_dataset.ymax = data.ymax
1224    final_dataset.xmin = data.xmin
1225    final_dataset.ymin = data.ymin
1226    final_dataset.title = datainfo.title
1227    final_dataset.run = datainfo.run
1228    final_dataset.run_name = datainfo.run_name
1229    final_dataset.filename = datainfo.filename
1230    final_dataset.notes = datainfo.notes
1231    final_dataset.process = datainfo.process
1232    final_dataset.instrument = datainfo.instrument
1233    final_dataset.detector = datainfo.detector
1234    final_dataset.sample = datainfo.sample
1235    final_dataset.source = datainfo.source
1236    final_dataset.collimation = datainfo.collimation
1237    final_dataset.trans_spectrum = datainfo.trans_spectrum
1238    final_dataset.meta_data = datainfo.meta_data
1239    final_dataset.errors = datainfo.errors
1240    return final_dataset
Note: See TracBrowser for help on using the repository browser.