source: sasview/src/sas/sascalc/dataloader/data_info.py @ 26c9b85

ESS_GUIESS_GUI_DocsESS_GUI_batch_fittingESS_GUI_bumps_abstractionESS_GUI_iss1116ESS_GUI_iss879ESS_GUI_iss959ESS_GUI_openclESS_GUI_orderingESS_GUI_sync_sascalccostrafo411magnetic_scattrelease-4.1.1release-4.1.2release-4.2.2ticket-1009ticket-1094-headlessticket-1242-2d-resolutionticket-1243ticket-1249ticket885unittest-saveload
Last change on this file since 26c9b85 was 2ffe241, checked in by krzywon, 8 years ago

Fix issues with loading 2D data in the SESANS branch and always set isSesans to False when working with 2D.

  • Property mode set to 100644
File size: 40.7 KB
Line 
1"""
2    Module that contains classes to hold information read from
3    reduced data files.
4
5    A good description of the data members can be found in
6    the CanSAS 1D XML data format:
7
8    http://www.smallangles.net/wgwiki/index.php/cansas1d_documentation
9"""
10#####################################################################
11#This software was developed by the University of Tennessee as part of the
12#Distributed Data Analysis of Neutron Scattering Experiments (DANSE)
13#project funded by the US National Science Foundation.
14#See the license text in license.txt
15#copyright 2008, University of Tennessee
16######################################################################
17
18
19#TODO: Keep track of data manipulation in the 'process' data structure.
20#TODO: This module should be independent of plottables. We should write
21#        an adapter class for plottables when needed.
22
23#from sas.guitools.plottables import Data1D as plottable_1D
24from sas.sascalc.data_util.uncertainty import Uncertainty
25import numpy
26import math
27
28class plottable_1D(object):
29    """
30    Data1D is a place holder for 1D plottables.
31    """
32    # The presence of these should be mutually
33    # exclusive with the presence of Qdev (dx)
34    x = None
35    y = None
36    dx = None
37    dy = None
38    ## Slit smearing length
39    dxl = None
40    ## Slit smearing width
41    dxw = None
42    ## SESANS specific params (wavelengths for spin echo length calculation)
43    lam = None
44    dlam = None
45
46    # Units
47    _xaxis = ''
48    _xunit = ''
49    _yaxis = ''
50    _yunit = ''
51
52    def __init__(self, x, y, dx=None, dy=None, dxl=None, dxw=None, lam=None, dlam=None):
53        self.x = numpy.asarray(x)
54        self.y = numpy.asarray(y)
55        if dx is not None:
56            self.dx = numpy.asarray(dx)
57        if dy is not None:
58            self.dy = numpy.asarray(dy)
59        if dxl is not None:
60            self.dxl = numpy.asarray(dxl)
61        if dxw is not None:
62            self.dxw = numpy.asarray(dxw)
63        if lam is not None:
64            self.lam = numpy.asarray(lam)
65        if dlam is not None:
66            self.dlam = numpy.asarray(dlam)
67
68    def xaxis(self, label, unit):
69        """
70        set the x axis label and unit
71        """
72        self._xaxis = label
73        self._xunit = unit
74
75    def yaxis(self, label, unit):
76        """
77        set the y axis label and unit
78        """
79        self._yaxis = label
80        self._yunit = unit
81
82
83class plottable_2D(object):
84    """
85    Data2D is a place holder for 2D plottables.
86    """
87    xmin = None
88    xmax = None
89    ymin = None
90    ymax = None
91    data = None
92    qx_data = None
93    qy_data = None
94    q_data = None
95    err_data = None
96    dqx_data = None
97    dqy_data = None
98    mask = None
99
100    # Units
101    _xaxis = ''
102    _xunit = ''
103    _yaxis = ''
104    _yunit = ''
105    _zaxis = ''
106    _zunit = ''
107
108    def __init__(self, data=None, err_data=None, qx_data=None,
109                 qy_data=None, q_data=None, mask=None,
110                 dqx_data=None, dqy_data=None):
111        self.data = numpy.asarray(data)
112        self.qx_data = numpy.asarray(qx_data)
113        self.qy_data = numpy.asarray(qy_data)
114        self.q_data = numpy.asarray(q_data)
115        self.mask = numpy.asarray(mask)
116        self.err_data = numpy.asarray(err_data)
117        if dqx_data is not None:
118            self.dqx_data = numpy.asarray(dqx_data)
119        if dqy_data is not None:
120            self.dqy_data = numpy.asarray(dqy_data)
121
122    def xaxis(self, label, unit):
123        """
124        set the x axis label and unit
125        """
126        self._xaxis = label
127        self._xunit = unit
128
129    def yaxis(self, label, unit):
130        """
131        set the y axis label and unit
132        """
133        self._yaxis = label
134        self._yunit = unit
135
136    def zaxis(self, label, unit):
137        """
138        set the z axis label and unit
139        """
140        self._zaxis = label
141        self._zunit = unit
142
143
144class Vector(object):
145    """
146    Vector class to hold multi-dimensional objects
147    """
148    ## x component
149    x = None
150    ## y component
151    y = None
152    ## z component
153    z = None
154
155    def __init__(self, x=None, y=None, z=None):
156        """
157        Initialization. Components that are not
158        set a set to None by default.
159
160        :param x: x component
161        :param y: y component
162        :param z: z component
163        """
164        self.x = x
165        self.y = y
166        self.z = z
167
168    def __str__(self):
169        msg = "x = %s\ty = %s\tz = %s" % (str(self.x), str(self.y), str(self.z))
170        return msg
171
172
173class Detector(object):
174    """
175    Class to hold detector information
176    """
177    ## Name of the instrument [string]
178    name = None
179    ## Sample to detector distance [float] [mm]
180    distance = None
181    distance_unit = 'mm'
182    ## Offset of this detector position in X, Y,
183    #(and Z if necessary) [Vector] [mm]
184    offset = None
185    offset_unit = 'm'
186    ## Orientation (rotation) of this detector in roll,
187    # pitch, and yaw [Vector] [degrees]
188    orientation = None
189    orientation_unit = 'degree'
190    ## Center of the beam on the detector in X and Y
191    #(and Z if necessary) [Vector] [mm]
192    beam_center = None
193    beam_center_unit = 'mm'
194    ## Pixel size in X, Y, (and Z if necessary) [Vector] [mm]
195    pixel_size = None
196    pixel_size_unit = 'mm'
197    ## Slit length of the instrument for this detector.[float] [mm]
198    slit_length = None
199    slit_length_unit = 'mm'
200
201    def __init__(self):
202        """
203        Initialize class attribute that are objects...
204        """
205        self.offset = Vector()
206        self.orientation = Vector()
207        self.beam_center = Vector()
208        self.pixel_size = Vector()
209
210    def __str__(self):
211        _str = "Detector:\n"
212        _str += "   Name:         %s\n" % self.name
213        _str += "   Distance:     %s [%s]\n" % \
214            (str(self.distance), str(self.distance_unit))
215        _str += "   Offset:       %s [%s]\n" % \
216            (str(self.offset), str(self.offset_unit))
217        _str += "   Orientation:  %s [%s]\n" % \
218            (str(self.orientation), str(self.orientation_unit))
219        _str += "   Beam center:  %s [%s]\n" % \
220            (str(self.beam_center), str(self.beam_center_unit))
221        _str += "   Pixel size:   %s [%s]\n" % \
222            (str(self.pixel_size), str(self.pixel_size_unit))
223        _str += "   Slit length:  %s [%s]\n" % \
224            (str(self.slit_length), str(self.slit_length_unit))
225        return _str
226
227
228class Aperture(object):
229    ## Name
230    name = None
231    ## Type
232    type = None
233    ## Size name
234    size_name = None
235    ## Aperture size [Vector]
236    size = None
237    size_unit = 'mm'
238    ## Aperture distance [float]
239    distance = None
240    distance_unit = 'mm'
241
242    def __init__(self):
243        self.size = Vector()
244
245
246class Collimation(object):
247    """
248    Class to hold collimation information
249    """
250    ## Name
251    name = None
252    ## Length [float] [mm]
253    length = None
254    length_unit = 'mm'
255    ## Aperture
256    aperture = None
257
258    def __init__(self):
259        self.aperture = []
260
261    def __str__(self):
262        _str = "Collimation:\n"
263        _str += "   Length:       %s [%s]\n" % \
264            (str(self.length), str(self.length_unit))
265        for item in self.aperture:
266            _str += "   Aperture size:%s [%s]\n" % \
267                (str(item.size), str(item.size_unit))
268            _str += "   Aperture_dist:%s [%s]\n" % \
269                (str(item.distance), str(item.distance_unit))
270        return _str
271
272
273class Source(object):
274    """
275    Class to hold source information
276    """
277    ## Name
278    name = None
279    ## Radiation type [string]
280    radiation = None
281    ## Beam size name
282    beam_size_name = None
283    ## Beam size [Vector] [mm]
284    beam_size = None
285    beam_size_unit = 'mm'
286    ## Beam shape [string]
287    beam_shape = None
288    ## Wavelength [float] [Angstrom]
289    wavelength = None
290    wavelength_unit = 'A'
291    ## Minimum wavelength [float] [Angstrom]
292    wavelength_min = None
293    wavelength_min_unit = 'nm'
294    ## Maximum wavelength [float] [Angstrom]
295    wavelength_max = None
296    wavelength_max_unit = 'nm'
297    ## Wavelength spread [float] [Angstrom]
298    wavelength_spread = None
299    wavelength_spread_unit = 'percent'
300
301    def __init__(self):
302        self.beam_size = Vector()
303
304    def __str__(self):
305        _str = "Source:\n"
306        _str += "   Radiation:    %s\n" % str(self.radiation)
307        _str += "   Shape:        %s\n" % str(self.beam_shape)
308        _str += "   Wavelength:   %s [%s]\n" % \
309            (str(self.wavelength), str(self.wavelength_unit))
310        _str += "   Waveln_min:   %s [%s]\n" % \
311            (str(self.wavelength_min), str(self.wavelength_min_unit))
312        _str += "   Waveln_max:   %s [%s]\n" % \
313            (str(self.wavelength_max), str(self.wavelength_max_unit))
314        _str += "   Waveln_spread:%s [%s]\n" % \
315            (str(self.wavelength_spread), str(self.wavelength_spread_unit))
316        _str += "   Beam_size:    %s [%s]\n" % \
317            (str(self.beam_size), str(self.beam_size_unit))
318        return _str
319
320
321"""
322Definitions of radiation types
323"""
324NEUTRON = 'neutron'
325XRAY = 'x-ray'
326MUON = 'muon'
327ELECTRON = 'electron'
328
329
330class Sample(object):
331    """
332    Class to hold the sample description
333    """
334    ## Short name for sample
335    name = ''
336    ## ID
337    ID = ''
338    ## Thickness [float] [mm]
339    thickness = None
340    thickness_unit = 'mm'
341    ## Transmission [float] [fraction]
342    transmission = None
343    ## Temperature [float] [No Default]
344    temperature = None
345    temperature_unit = None
346    ## Position [Vector] [mm]
347    position = None
348    position_unit = 'mm'
349    ## Orientation [Vector] [degrees]
350    orientation = None
351    orientation_unit = 'degree'
352    ## Details
353    details = None
354    ## SESANS zacceptance
355    zacceptance = None
356
357    def __init__(self):
358        self.position = Vector()
359        self.orientation = Vector()
360        self.details = []
361
362    def __str__(self):
363        _str = "Sample:\n"
364        _str += "   ID:           %s\n" % str(self.ID)
365        _str += "   Transmission: %s\n" % str(self.transmission)
366        _str += "   Thickness:    %s [%s]\n" % \
367            (str(self.thickness), str(self.thickness_unit))
368        _str += "   Temperature:  %s [%s]\n" % \
369            (str(self.temperature), str(self.temperature_unit))
370        _str += "   Position:     %s [%s]\n" % \
371            (str(self.position), str(self.position_unit))
372        _str += "   Orientation:  %s [%s]\n" % \
373            (str(self.orientation), str(self.orientation_unit))
374
375        _str += "   Details:\n"
376        for item in self.details:
377            _str += "      %s\n" % item
378
379        return _str
380
381
382class Process(object):
383    """
384    Class that holds information about the processes
385    performed on the data.
386    """
387    name = ''
388    date = ''
389    description = ''
390    term = None
391    notes = None
392
393    def __init__(self):
394        self.term = []
395        self.notes = []
396
397    def is_empty(self):
398        """
399            Return True if the object is empty
400        """
401        return len(self.name) == 0 and len(self.date) == 0 and len(self.description) == 0 \
402            and len(self.term) == 0 and len(self.notes) == 0
403
404    def single_line_desc(self):
405        """
406            Return a single line string representing the process
407        """
408        return "%s %s %s" % (self.name, self.date, self.description)
409
410    def __str__(self):
411        _str = "Process:\n"
412        _str += "   Name:         %s\n" % self.name
413        _str += "   Date:         %s\n" % self.date
414        _str += "   Description:  %s\n" % self.description
415        for item in self.term:
416            _str += "   Term:         %s\n" % item
417        for item in self.notes:
418            _str += "   Note:         %s\n" % item
419        return _str
420
421
422class TransmissionSpectrum(object):
423    """
424    Class that holds information about transmission spectrum
425    for white beams and spallation sources.
426    """
427    name = ''
428    timestamp = ''
429    ## Wavelength (float) [A]
430    wavelength = None
431    wavelength_unit = 'A'
432    ## Transmission (float) [unit less]
433    transmission = None
434    transmission_unit = ''
435    ## Transmission Deviation (float) [unit less]
436    transmission_deviation = None
437    transmission_deviation_unit = ''
438
439    def __init__(self):
440        self.wavelength = []
441        self.transmission = []
442        self.transmission_deviation = []
443
444    def __str__(self):
445        _str = "Transmission Spectrum:\n"
446        _str += "   Name:             \t{0}\n".format(self.name)
447        _str += "   Timestamp:        \t{0}\n".format(self.timestamp)
448        _str += "   Wavelength unit:  \t{0}\n".format(self.wavelength_unit)
449        _str += "   Transmission unit:\t{0}\n".format(self.transmission_unit)
450        _str += "   Trans. Dev. unit:  \t{0}\n".format(\
451                                            self.transmission_deviation_unit)
452        length_list = [len(self.wavelength), len(self.transmission), \
453                len(self.transmission_deviation)]
454        _str += "   Number of Pts:    \t{0}\n".format(max(length_list))
455        return _str
456
457
458class DataInfo(object):
459    """
460    Class to hold the data read from a file.
461    It includes four blocks of data for the
462    instrument description, the sample description,
463    the data itself and any other meta data.
464    """
465    ## Title
466    title = ''
467    ## Run number
468    run = None
469    ## Run name
470    run_name = None
471    ## File name
472    filename = ''
473    ## Notes
474    notes = None
475    ## Processes (Action on the data)
476    process = None
477    ## Instrument name
478    instrument = ''
479    ## Detector information
480    detector = None
481    ## Sample information
482    sample = None
483    ## Source information
484    source = None
485    ## Collimation information
486    collimation = None
487    ## Transmission Spectrum INfo
488    trans_spectrum = None
489    ## Additional meta-data
490    meta_data = None
491    ## Loading errors
492    errors = None
493    ## SESANS data check
494    isSesans = None
495
496
497    def __init__(self):
498        """
499        Initialization
500        """
501        ## Title
502        self.title = ''
503        ## Run number
504        self.run = []
505        self.run_name = {}
506        ## File name
507        self.filename = ''
508        ## Notes
509        self.notes = []
510        ## Processes (Action on the data)
511        self.process = []
512        ## Instrument name
513        self.instrument = ''
514        ## Detector information
515        self.detector = []
516        ## Sample information
517        self.sample = Sample()
518        ## Source information
519        self.source = Source()
520        ## Collimation information
521        self.collimation = []
522        ## Transmission Spectrum
523        self.trans_spectrum = []
524        ## Additional meta-data
525        self.meta_data = {}
526        ## Loading errors
527        self.errors = []
528        ## SESANS data check
529        self.isSesans = False
530
531    def append_empty_process(self):
532        """
533        """
534        self.process.append(Process())
535
536    def add_notes(self, message=""):
537        """
538        Add notes to datainfo
539        """
540        self.notes.append(message)
541
542    def __str__(self):
543        """
544        Nice printout
545        """
546        _str = "File:            %s\n" % self.filename
547        _str += "Title:           %s\n" % self.title
548        _str += "Run:             %s\n" % str(self.run)
549        _str += "SESANS:          %s\n" % str(self.isSesans)
550        _str += "Instrument:      %s\n" % str(self.instrument)
551        _str += "%s\n" % str(self.sample)
552        _str += "%s\n" % str(self.source)
553        for item in self.detector:
554            _str += "%s\n" % str(item)
555        for item in self.collimation:
556            _str += "%s\n" % str(item)
557        for item in self.process:
558            _str += "%s\n" % str(item)
559        for item in self.notes:
560            _str += "%s\n" % str(item)
561        for item in self.trans_spectrum:
562            _str += "%s\n" % str(item)
563        return _str
564
565    # Private method to perform operation. Not implemented for DataInfo,
566    # but should be implemented for each data class inherited from DataInfo
567    # that holds actual data (ex.: Data1D)
568    def _perform_operation(self, other, operation):
569        """
570        Private method to perform operation. Not implemented for DataInfo,
571        but should be implemented for each data class inherited from DataInfo
572        that holds actual data (ex.: Data1D)
573        """
574        return NotImplemented
575
576    def _perform_union(self, other):
577        """
578        Private method to perform union operation. Not implemented for DataInfo,
579        but should be implemented for each data class inherited from DataInfo
580        that holds actual data (ex.: Data1D)
581        """
582        return NotImplemented
583
584    def __add__(self, other):
585        """
586        Add two data sets
587
588        :param other: data set to add to the current one
589        :return: new data set
590        :raise ValueError: raised when two data sets are incompatible
591        """
592        def operation(a, b):
593            return a + b
594        return self._perform_operation(other, operation)
595
596    def __radd__(self, other):
597        """
598        Add two data sets
599
600        :param other: data set to add to the current one
601        :return: new data set
602        :raise ValueError: raised when two data sets are incompatible
603        """
604        def operation(a, b):
605            return b + a
606        return self._perform_operation(other, operation)
607
608    def __sub__(self, other):
609        """
610        Subtract two data sets
611
612        :param other: data set to subtract from the current one
613        :return: new data set
614        :raise ValueError: raised when two data sets are incompatible
615        """
616        def operation(a, b):
617            return a - b
618        return self._perform_operation(other, operation)
619
620    def __rsub__(self, other):
621        """
622        Subtract two data sets
623
624        :param other: data set to subtract from the current one
625        :return: new data set
626        :raise ValueError: raised when two data sets are incompatible
627        """
628        def operation(a, b):
629            return b - a
630        return self._perform_operation(other, operation)
631
632    def __mul__(self, other):
633        """
634        Multiply two data sets
635
636        :param other: data set to subtract from the current one
637        :return: new data set
638        :raise ValueError: raised when two data sets are incompatible
639        """
640        def operation(a, b):
641            return a * b
642        return self._perform_operation(other, operation)
643
644    def __rmul__(self, other):
645        """
646        Multiply two data sets
647
648        :param other: data set to subtract from the current one
649        :return: new data set
650        :raise ValueError: raised when two data sets are incompatible
651        """
652        def operation(a, b):
653            return b * a
654        return self._perform_operation(other, operation)
655
656    def __div__(self, other):
657        """
658        Divided a data set by another
659
660        :param other: data set that the current one is divided by
661        :return: new data set
662        :raise ValueError: raised when two data sets are incompatible
663        """
664        def operation(a, b):
665            return a/b
666        return self._perform_operation(other, operation)
667
668    def __rdiv__(self, other):
669        """
670        Divided a data set by another
671
672        :param other: data set that the current one is divided by
673        :return: new data set
674        :raise ValueError: raised when two data sets are incompatible
675        """
676        def operation(a, b):
677            return b/a
678        return self._perform_operation(other, operation)
679
680    def __or__(self, other):
681        """
682        Union a data set with another
683
684        :param other: data set to be unified
685        :return: new data set
686        :raise ValueError: raised when two data sets are incompatible
687        """
688        return self._perform_union(other)
689
690    def __ror__(self, other):
691        """
692        Union a data set with another
693
694        :param other: data set to be unified
695        :return: new data set
696        :raise ValueError: raised when two data sets are incompatible
697        """
698        return self._perform_union(other)
699
700class Data1D(plottable_1D, DataInfo):
701    """
702    1D data class
703    """
704    def __init__(self, x=None, y=None, dx=None, dy=None, lam=None, dlam=None, isSesans=None):
705        DataInfo.__init__(self)
706        plottable_1D.__init__(self, x, y, dx, dy,None, None, lam, dlam)
707        self.isSesans = isSesans
708        try:
709            if self.isSesans: # the data is SESANS
710                self.x_unit = 'A'
711                self.y_unit = 'pol'
712            elif not self.isSesans: # the data is SANS
713                self.x_unit = '1/A'
714                self.y_unit = '1/cm'
715        except: # the data is not recognized/supported, and the user is notified
716            raise(TypeError, 'data not recognized, check documentation for supported 1D data formats')
717
718    def __str__(self):
719        """
720        Nice printout
721        """
722        _str = "%s\n" % DataInfo.__str__(self)
723        _str += "Data:\n"
724        _str += "   Type:         %s\n" % self.__class__.__name__
725        _str += "   X-axis:       %s\t[%s]\n" % (self._xaxis, self._xunit)
726        _str += "   Y-axis:       %s\t[%s]\n" % (self._yaxis, self._yunit)
727        _str += "   Length:       %g\n" % len(self.x)
728        return _str
729
730    def is_slit_smeared(self):
731        """
732        Check whether the data has slit smearing information
733        :return: True is slit smearing info is present, False otherwise
734        """
735        def _check(v):
736            if (v.__class__ == list or v.__class__ == numpy.ndarray) \
737                and len(v) > 0 and min(v) > 0:
738                return True
739            return False
740        return _check(self.dxl) or _check(self.dxw)
741
742    def clone_without_data(self, length=0, clone=None):
743        """
744        Clone the current object, without copying the data (which
745        will be filled out by a subsequent operation).
746        The data arrays will be initialized to zero.
747
748        :param length: length of the data array to be initialized
749        :param clone: if provided, the data will be copied to clone
750        """
751        from copy import deepcopy
752
753        if clone is None or not issubclass(clone.__class__, Data1D):
754            x = numpy.zeros(length)
755            dx = numpy.zeros(length)
756            y = numpy.zeros(length)
757            dy = numpy.zeros(length)
758            lam = numpy.zeros(length)
759            dlam = numpy.zeros(length)
760            clone = Data1D(x, y, lam=lam, dx=dx, dy=dy, dlam=dlam)
761
762        clone.title = self.title
763        clone.run = self.run
764        clone.filename = self.filename
765        clone.instrument = self.instrument
766        clone.notes = deepcopy(self.notes)
767        clone.process = deepcopy(self.process)
768        clone.detector = deepcopy(self.detector)
769        clone.sample = deepcopy(self.sample)
770        clone.source = deepcopy(self.source)
771        clone.collimation = deepcopy(self.collimation)
772        clone.trans_spectrum = deepcopy(self.trans_spectrum)
773        clone.meta_data = deepcopy(self.meta_data)
774        clone.errors = deepcopy(self.errors)
775
776        return clone
777
778    def _validity_check(self, other):
779        """
780        Checks that the data lengths are compatible.
781        Checks that the x vectors are compatible.
782        Returns errors vectors equal to original
783        errors vectors if they were present or vectors
784        of zeros when none was found.
785
786        :param other: other data set for operation
787        :return: dy for self, dy for other [numpy arrays]
788        :raise ValueError: when lengths are not compatible
789        """
790        dy_other = None
791        if isinstance(other, Data1D):
792            # Check that data lengths are the same
793            if len(self.x) != len(other.x) or \
794                len(self.y) != len(other.y):
795                msg = "Unable to perform operation: data length are not equal"
796                raise ValueError, msg
797            # Here we could also extrapolate between data points
798            TOLERANCE = 0.01
799            for i in range(len(self.x)):
800                if math.fabs((self.x[i] - other.x[i])/self.x[i]) > TOLERANCE:
801                    msg = "Incompatible data sets: x-values do not match"
802                    raise ValueError, msg
803
804            # Check that the other data set has errors, otherwise
805            # create zero vector
806            dy_other = other.dy
807            if other.dy == None or (len(other.dy) != len(other.y)):
808                dy_other = numpy.zeros(len(other.y))
809
810        # Check that we have errors, otherwise create zero vector
811        dy = self.dy
812        if self.dy == None or (len(self.dy) != len(self.y)):
813            dy = numpy.zeros(len(self.y))
814
815        return dy, dy_other
816
817    def _perform_operation(self, other, operation):
818        """
819        """
820        # First, check the data compatibility
821        dy, dy_other = self._validity_check(other)
822        result = self.clone_without_data(len(self.x))
823        if self.dxw == None:
824            result.dxw = None
825        else:
826            result.dxw = numpy.zeros(len(self.x))
827        if self.dxl == None:
828            result.dxl = None
829        else:
830            result.dxl = numpy.zeros(len(self.x))
831
832        for i in range(len(self.x)):
833            result.x[i] = self.x[i]
834            if self.dx is not None and len(self.x) == len(self.dx):
835                result.dx[i] = self.dx[i]
836            if self.dxw is not None and len(self.x) == len(self.dxw):
837                result.dxw[i] = self.dxw[i]
838            if self.dxl is not None and len(self.x) == len(self.dxl):
839                result.dxl[i] = self.dxl[i]
840
841            a = Uncertainty(self.y[i], dy[i]**2)
842            if isinstance(other, Data1D):
843                b = Uncertainty(other.y[i], dy_other[i]**2)
844                if other.dx is not None:
845                    result.dx[i] *= self.dx[i]
846                    result.dx[i] += (other.dx[i]**2)
847                    result.dx[i] /= 2
848                    result.dx[i] = math.sqrt(result.dx[i])
849                if result.dxl is not None and other.dxl is not None:
850                    result.dxl[i] *= self.dxl[i]
851                    result.dxl[i] += (other.dxl[i]**2)
852                    result.dxl[i] /= 2
853                    result.dxl[i] = math.sqrt(result.dxl[i])
854            else:
855                b = other
856
857            output = operation(a, b)
858            result.y[i] = output.x
859            result.dy[i] = math.sqrt(math.fabs(output.variance))
860        return result
861
862    def _validity_check_union(self, other):
863        """
864        Checks that the data lengths are compatible.
865        Checks that the x vectors are compatible.
866        Returns errors vectors equal to original
867        errors vectors if they were present or vectors
868        of zeros when none was found.
869
870        :param other: other data set for operation
871        :return: bool
872        :raise ValueError: when data types are not compatible
873        """
874        if not isinstance(other, Data1D):
875            msg = "Unable to perform operation: different types of data set"
876            raise ValueError, msg
877        return True
878
879    def _perform_union(self, other):
880        """
881        """
882        # First, check the data compatibility
883        self._validity_check_union(other)
884        result = self.clone_without_data(len(self.x) + len(other.x))
885        if self.dy == None or other.dy is None:
886            result.dy = None
887        else:
888            result.dy = numpy.zeros(len(self.x) + len(other.x))
889        if self.dx == None or other.dx is None:
890            result.dx = None
891        else:
892            result.dx = numpy.zeros(len(self.x) + len(other.x))
893        if self.dxw == None or other.dxw is None:
894            result.dxw = None
895        else:
896            result.dxw = numpy.zeros(len(self.x) + len(other.x))
897        if self.dxl == None or other.dxl is None:
898            result.dxl = None
899        else:
900            result.dxl = numpy.zeros(len(self.x) + len(other.x))
901
902        result.x = numpy.append(self.x, other.x)
903        #argsorting
904        ind = numpy.argsort(result.x)
905        result.x = result.x[ind]
906        result.y = numpy.append(self.y, other.y)
907        result.y = result.y[ind]
908        if result.dy != None:
909            result.dy = numpy.append(self.dy, other.dy)
910            result.dy = result.dy[ind]
911        if result.dx is not None:
912            result.dx = numpy.append(self.dx, other.dx)
913            result.dx = result.dx[ind]
914        if result.dxw is not None:
915            result.dxw = numpy.append(self.dxw, other.dxw)
916            result.dxw = result.dxw[ind]
917        if result.dxl is not None:
918            result.dxl = numpy.append(self.dxl, other.dxl)
919            result.dxl = result.dxl[ind]
920        return result
921
922
923class Data2D(plottable_2D, DataInfo):
924    """
925    2D data class
926    """
927    ## Units for Q-values
928    Q_unit = '1/A'
929    ## Units for I(Q) values
930    I_unit = '1/cm'
931    ## Vector of Q-values at the center of each bin in x
932    x_bins = None
933    ## Vector of Q-values at the center of each bin in y
934    y_bins = None
935    ## No 2D SESANS data as of yet. Always set it to False
936    isSesans = False
937
938    def __init__(self, data=None, err_data=None, qx_data=None,
939                 qy_data=None, q_data=None, mask=None,
940                 dqx_data=None, dqy_data=None):
941        DataInfo.__init__(self)
942        plottable_2D.__init__(self, data, err_data, qx_data,
943                              qy_data, q_data, mask, dqx_data, dqy_data)
944        self.y_bins = []
945        self.x_bins = []
946
947        if len(self.detector) > 0:
948            raise RuntimeError, "Data2D: Detector bank already filled at init"
949
950    def __str__(self):
951        _str = "%s\n" % DataInfo.__str__(self)
952        _str += "Data:\n"
953        _str += "   Type:         %s\n" % self.__class__.__name__
954        _str += "   X- & Y-axis:  %s\t[%s]\n" % (self._yaxis, self._yunit)
955        _str += "   Z-axis:       %s\t[%s]\n" % (self._zaxis, self._zunit)
956        _str += "   Length:       %g \n" % (len(self.data))
957        _str += "   Shape:        (%d, %d)\n" % (len(self.y_bins), len(self.x_bins))
958        return _str
959
960    def clone_without_data(self, length=0, clone=None):
961        """
962        Clone the current object, without copying the data (which
963        will be filled out by a subsequent operation).
964        The data arrays will be initialized to zero.
965
966        :param length: length of the data array to be initialized
967        :param clone: if provided, the data will be copied to clone
968        """
969        from copy import deepcopy
970
971        if clone is None or not issubclass(clone.__class__, Data2D):
972            data = numpy.zeros(length)
973            err_data = numpy.zeros(length)
974            qx_data = numpy.zeros(length)
975            qy_data = numpy.zeros(length)
976            q_data = numpy.zeros(length)
977            mask = numpy.zeros(length)
978            dqx_data = None
979            dqy_data = None
980            clone = Data2D(data=data, err_data=err_data,
981                           qx_data=qx_data, qy_data=qy_data,
982                           q_data=q_data, mask=mask)
983
984        clone.title = self.title
985        clone.run = self.run
986        clone.filename = self.filename
987        clone.instrument = self.instrument
988        clone.notes = deepcopy(self.notes)
989        clone.process = deepcopy(self.process)
990        clone.detector = deepcopy(self.detector)
991        clone.sample = deepcopy(self.sample)
992        clone.source = deepcopy(self.source)
993        clone.collimation = deepcopy(self.collimation)
994        clone.trans_spectrum = deepcopy(self.trans_spectrum)
995        clone.meta_data = deepcopy(self.meta_data)
996        clone.errors = deepcopy(self.errors)
997
998        return clone
999
1000    def _validity_check(self, other):
1001        """
1002        Checks that the data lengths are compatible.
1003        Checks that the x vectors are compatible.
1004        Returns errors vectors equal to original
1005        errors vectors if they were present or vectors
1006        of zeros when none was found.
1007
1008        :param other: other data set for operation
1009        :return: dy for self, dy for other [numpy arrays]
1010        :raise ValueError: when lengths are not compatible
1011        """
1012        err_other = None
1013        TOLERANCE = 0.01
1014        if isinstance(other, Data2D):
1015            # Check that data lengths are the same
1016            if len(self.data) != len(other.data) or \
1017                len(self.qx_data) != len(other.qx_data) or \
1018                len(self.qy_data) != len(other.qy_data):
1019                msg = "Unable to perform operation: data length are not equal"
1020                raise ValueError, msg
1021            for ind in range(len(self.data)):
1022                if math.fabs((self.qx_data[ind] - other.qx_data[ind])/self.qx_data[ind]) > TOLERANCE:
1023                    msg = "Incompatible data sets: qx-values do not match: %s %s" % (self.qx_data[ind], other.qx_data[ind])
1024                    raise ValueError, msg
1025                if math.fabs((self.qy_data[ind] - other.qy_data[ind])/self.qy_data[ind]) > TOLERANCE:
1026                    msg = "Incompatible data sets: qy-values do not match: %s %s" % (self.qy_data[ind], other.qy_data[ind])
1027                    raise ValueError, msg
1028
1029            # Check that the scales match
1030            err_other = other.err_data
1031            if other.err_data == None or \
1032                (len(other.err_data) != len(other.data)):
1033                err_other = numpy.zeros(len(other.data))
1034
1035        # Check that we have errors, otherwise create zero vector
1036        err = self.err_data
1037        if self.err_data == None or \
1038            (len(self.err_data) != len(self.data)):
1039            err = numpy.zeros(len(other.data))
1040        return err, err_other
1041
1042    def _perform_operation(self, other, operation):
1043        """
1044        Perform 2D operations between data sets
1045
1046        :param other: other data set
1047        :param operation: function defining the operation
1048        """
1049        # First, check the data compatibility
1050        dy, dy_other = self._validity_check(other)
1051        result = self.clone_without_data(numpy.size(self.data))
1052        if self.dqx_data == None or self.dqy_data == None:
1053            result.dqx_data = None
1054            result.dqy_data = None
1055        else:
1056            result.dqx_data = numpy.zeros(len(self.data))
1057            result.dqy_data = numpy.zeros(len(self.data))
1058        for i in range(numpy.size(self.data)):
1059            result.data[i] = self.data[i]
1060            if self.err_data is not None and \
1061                numpy.size(self.data) == numpy.size(self.err_data):
1062                result.err_data[i] = self.err_data[i]
1063            if self.dqx_data is not None:
1064                result.dqx_data[i] = self.dqx_data[i]
1065            if self.dqy_data is not None:
1066                result.dqy_data[i] = self.dqy_data[i]
1067            result.qx_data[i] = self.qx_data[i]
1068            result.qy_data[i] = self.qy_data[i]
1069            result.q_data[i] = self.q_data[i]
1070            result.mask[i] = self.mask[i]
1071
1072            a = Uncertainty(self.data[i], dy[i]**2)
1073            if isinstance(other, Data2D):
1074                b = Uncertainty(other.data[i], dy_other[i]**2)
1075                if other.dqx_data is not None and \
1076                        result.dqx_data is not None:
1077                    result.dqx_data[i] *= self.dqx_data[i]
1078                    result.dqx_data[i] += (other.dqx_data[i]**2)
1079                    result.dqx_data[i] /= 2
1080                    result.dqx_data[i] = math.sqrt(result.dqx_data[i])
1081                if other.dqy_data is not None and \
1082                        result.dqy_data is not None:
1083                    result.dqy_data[i] *= self.dqy_data[i]
1084                    result.dqy_data[i] += (other.dqy_data[i]**2)
1085                    result.dqy_data[i] /= 2
1086                    result.dqy_data[i] = math.sqrt(result.dqy_data[i])
1087            else:
1088                b = other
1089            output = operation(a, b)
1090            result.data[i] = output.x
1091            result.err_data[i] = math.sqrt(math.fabs(output.variance))
1092        return result
1093
1094    def _validity_check_union(self, other):
1095        """
1096        Checks that the data lengths are compatible.
1097        Checks that the x vectors are compatible.
1098        Returns errors vectors equal to original
1099        errors vectors if they were present or vectors
1100        of zeros when none was found.
1101
1102        :param other: other data set for operation
1103        :return: bool
1104        :raise ValueError: when data types are not compatible
1105        """
1106        if not isinstance(other, Data2D):
1107            msg = "Unable to perform operation: different types of data set"
1108            raise ValueError, msg
1109        return True
1110
1111    def _perform_union(self, other):
1112        """
1113        Perform 2D operations between data sets
1114
1115        :param other: other data set
1116        :param operation: function defining the operation
1117        """
1118        # First, check the data compatibility
1119        self._validity_check_union(other)
1120        result = self.clone_without_data(numpy.size(self.data) + \
1121                                         numpy.size(other.data))
1122        result.xmin = self.xmin
1123        result.xmax = self.xmax
1124        result.ymin = self.ymin
1125        result.ymax = self.ymax
1126        if self.dqx_data == None or self.dqy_data == None or \
1127                other.dqx_data == None or other.dqy_data == None:
1128            result.dqx_data = None
1129            result.dqy_data = None
1130        else:
1131            result.dqx_data = numpy.zeros(len(self.data) + \
1132                                         numpy.size(other.data))
1133            result.dqy_data = numpy.zeros(len(self.data) + \
1134                                         numpy.size(other.data))
1135
1136        result.data = numpy.append(self.data, other.data)
1137        result.qx_data = numpy.append(self.qx_data, other.qx_data)
1138        result.qy_data = numpy.append(self.qy_data, other.qy_data)
1139        result.q_data = numpy.append(self.q_data, other.q_data)
1140        result.mask = numpy.append(self.mask, other.mask)
1141        if result.err_data is not None:
1142            result.err_data = numpy.append(self.err_data, other.err_data)
1143        if self.dqx_data is not None:
1144            result.dqx_data = numpy.append(self.dqx_data, other.dqx_data)
1145        if self.dqy_data is not None:
1146            result.dqy_data = numpy.append(self.dqy_data, other.dqy_data)
1147
1148        return result
1149
1150
1151def combine_data_info_with_plottable(data, datainfo):
1152    """
1153    A function that combines the DataInfo data in self.current_datainto with a plottable_1D or 2D data object.
1154
1155    :param data: A plottable_1D or plottable_2D data object
1156    :return: A fully specified Data1D or Data2D object
1157    """
1158
1159    final_dataset = None
1160    if isinstance(data, plottable_1D):
1161        final_dataset = Data1D(data.x, data.y)
1162        final_dataset.dx = data.dx
1163        final_dataset.dy = data.dy
1164        final_dataset.dxl = data.dxl
1165        final_dataset.dxw = data.dxw
1166        final_dataset.xaxis(data._xaxis, data._xunit)
1167        final_dataset.yaxis(data._yaxis, data._yunit)
1168    elif isinstance(data, plottable_2D):
1169        final_dataset = Data2D(data.data, data.err_data, data.qx_data, data.qy_data, data.q_data,
1170                               data.mask, data.dqx_data, data.dqy_data)
1171        final_dataset.xaxis(data._xaxis, data._xunit)
1172        final_dataset.yaxis(data._yaxis, data._yunit)
1173        final_dataset.zaxis(data._zaxis, data._zunit)
1174        final_dataset.x_bins = data.x_bins
1175        final_dataset.y_bins = data.y_bins
1176    else:
1177        return_string = "Should Never Happen: _combine_data_info_with_plottable input is not a plottable1d or " + \
1178                        "plottable2d data object"
1179        return return_string
1180
1181    final_dataset.xmax = data.xmax
1182    final_dataset.ymax = data.ymax
1183    final_dataset.xmin = data.xmin
1184    final_dataset.ymin = data.ymin
1185    final_dataset.isSesans = datainfo.isSesans
1186    final_dataset.title = datainfo.title
1187    final_dataset.run = datainfo.run
1188    final_dataset.run_name = datainfo.run_name
1189    final_dataset.filename = datainfo.filename
1190    final_dataset.notes = datainfo.notes
1191    final_dataset.process = datainfo.process
1192    final_dataset.instrument = datainfo.instrument
1193    final_dataset.detector = datainfo.detector
1194    final_dataset.sample = datainfo.sample
1195    final_dataset.source = datainfo.source
1196    final_dataset.collimation = datainfo.collimation
1197    final_dataset.trans_spectrum = datainfo.trans_spectrum
1198    final_dataset.meta_data = datainfo.meta_data
1199    final_dataset.errors = datainfo.errors
1200    return final_dataset
Note: See TracBrowser for help on using the repository browser.