source: sasview/src/sas/sascalc/dataloader/data_info.py @ 6082048

ESS_GUIESS_GUI_DocsESS_GUI_batch_fittingESS_GUI_bumps_abstractionESS_GUI_iss1116ESS_GUI_iss879ESS_GUI_iss959ESS_GUI_openclESS_GUI_orderingESS_GUI_sync_sascalccostrafo411magnetic_scattrelease-4.2.2ticket-1009ticket-1094-headlessticket-1242-2d-resolutionticket-1243ticket-1249ticket885unittest-saveload
Last change on this file since 6082048 was 7432acb, checked in by andyfaff, 8 years ago

MAINT: search+replace '!= None' by 'is not None'

  • Property mode set to 100644
File size: 39.4 KB
RevLine 
[959eb01]1"""
2    Module that contains classes to hold information read from
3    reduced data files.
4
5    A good description of the data members can be found in
6    the CanSAS 1D XML data format:
7
8    http://www.smallangles.net/wgwiki/index.php/cansas1d_documentation
9"""
10#####################################################################
11#This software was developed by the University of Tennessee as part of the
12#Distributed Data Analysis of Neutron Scattering Experiments (DANSE)
13#project funded by the US National Science Foundation.
14#See the license text in license.txt
15#copyright 2008, University of Tennessee
16######################################################################
17
18
19#TODO: Keep track of data manipulation in the 'process' data structure.
20#TODO: This module should be independent of plottables. We should write
21#        an adapter class for plottables when needed.
22
23#from sas.guitools.plottables import Data1D as plottable_1D
24from sas.sascalc.data_util.uncertainty import Uncertainty
25import numpy as np
26import math
27
28class plottable_1D(object):
29    """
30    Data1D is a place holder for 1D plottables.
31    """
32    # The presence of these should be mutually
33    # exclusive with the presence of Qdev (dx)
34    x = None
35    y = None
36    dx = None
37    dy = None
38    ## Slit smearing length
39    dxl = None
40    ## Slit smearing width
41    dxw = None
42    ## SESANS specific params (wavelengths for spin echo length calculation)
43    lam = None
44    dlam = None
45
46    # Units
47    _xaxis = ''
48    _xunit = ''
49    _yaxis = ''
50    _yunit = ''
51
52    def __init__(self, x, y, dx=None, dy=None, dxl=None, dxw=None, lam=None, dlam=None):
53        self.x = np.asarray(x)
54        self.y = np.asarray(y)
55        if dx is not None:
56            self.dx = np.asarray(dx)
57        if dy is not None:
58            self.dy = np.asarray(dy)
59        if dxl is not None:
60            self.dxl = np.asarray(dxl)
61        if dxw is not None:
62            self.dxw = np.asarray(dxw)
63        if lam is not None:
64            self.lam = np.asarray(lam)
65        if dlam is not None:
66            self.dlam = np.asarray(dlam)
67
68    def xaxis(self, label, unit):
69        """
70        set the x axis label and unit
71        """
72        self._xaxis = label
73        self._xunit = unit
74
75    def yaxis(self, label, unit):
76        """
77        set the y axis label and unit
78        """
79        self._yaxis = label
80        self._yunit = unit
81
82
83class plottable_2D(object):
84    """
85    Data2D is a place holder for 2D plottables.
86    """
87    xmin = None
88    xmax = None
89    ymin = None
90    ymax = None
91    data = None
92    qx_data = None
93    qy_data = None
94    q_data = None
95    err_data = None
96    dqx_data = None
97    dqy_data = None
98    mask = None
99
100    # Units
101    _xaxis = ''
102    _xunit = ''
103    _yaxis = ''
104    _yunit = ''
105    _zaxis = ''
106    _zunit = ''
107
108    def __init__(self, data=None, err_data=None, qx_data=None,
109                 qy_data=None, q_data=None, mask=None,
110                 dqx_data=None, dqy_data=None):
111        self.data = np.asarray(data)
112        self.qx_data = np.asarray(qx_data)
113        self.qy_data = np.asarray(qy_data)
114        self.q_data = np.asarray(q_data)
115        self.mask = np.asarray(mask)
116        self.err_data = np.asarray(err_data)
117        if dqx_data is not None:
118            self.dqx_data = np.asarray(dqx_data)
119        if dqy_data is not None:
120            self.dqy_data = np.asarray(dqy_data)
121
122    def xaxis(self, label, unit):
123        """
124        set the x axis label and unit
125        """
126        self._xaxis = label
127        self._xunit = unit
128
129    def yaxis(self, label, unit):
130        """
131        set the y axis label and unit
132        """
133        self._yaxis = label
134        self._yunit = unit
135
136    def zaxis(self, label, unit):
137        """
138        set the z axis label and unit
139        """
140        self._zaxis = label
141        self._zunit = unit
142
143
144class Vector(object):
145    """
146    Vector class to hold multi-dimensional objects
147    """
148    ## x component
149    x = None
150    ## y component
151    y = None
152    ## z component
153    z = None
154
155    def __init__(self, x=None, y=None, z=None):
156        """
157        Initialization. Components that are not
158        set a set to None by default.
159
160        :param x: x component
161        :param y: y component
162        :param z: z component
163        """
164        self.x = x
165        self.y = y
166        self.z = z
167
168    def __str__(self):
169        msg = "x = %s\ty = %s\tz = %s" % (str(self.x), str(self.y), str(self.z))
170        return msg
171
172
173class Detector(object):
174    """
175    Class to hold detector information
176    """
177    ## Name of the instrument [string]
178    name = None
179    ## Sample to detector distance [float] [mm]
180    distance = None
181    distance_unit = 'mm'
182    ## Offset of this detector position in X, Y,
183    #(and Z if necessary) [Vector] [mm]
184    offset = None
185    offset_unit = 'm'
186    ## Orientation (rotation) of this detector in roll,
187    # pitch, and yaw [Vector] [degrees]
188    orientation = None
189    orientation_unit = 'degree'
190    ## Center of the beam on the detector in X and Y
191    #(and Z if necessary) [Vector] [mm]
192    beam_center = None
193    beam_center_unit = 'mm'
194    ## Pixel size in X, Y, (and Z if necessary) [Vector] [mm]
195    pixel_size = None
196    pixel_size_unit = 'mm'
197    ## Slit length of the instrument for this detector.[float] [mm]
198    slit_length = None
199    slit_length_unit = 'mm'
200
201    def __init__(self):
202        """
203        Initialize class attribute that are objects...
204        """
205        self.offset = Vector()
206        self.orientation = Vector()
207        self.beam_center = Vector()
208        self.pixel_size = Vector()
209
210    def __str__(self):
211        _str = "Detector:\n"
212        _str += "   Name:         %s\n" % self.name
213        _str += "   Distance:     %s [%s]\n" % \
214            (str(self.distance), str(self.distance_unit))
215        _str += "   Offset:       %s [%s]\n" % \
216            (str(self.offset), str(self.offset_unit))
217        _str += "   Orientation:  %s [%s]\n" % \
218            (str(self.orientation), str(self.orientation_unit))
219        _str += "   Beam center:  %s [%s]\n" % \
220            (str(self.beam_center), str(self.beam_center_unit))
221        _str += "   Pixel size:   %s [%s]\n" % \
222            (str(self.pixel_size), str(self.pixel_size_unit))
223        _str += "   Slit length:  %s [%s]\n" % \
224            (str(self.slit_length), str(self.slit_length_unit))
225        return _str
226
227
228class Aperture(object):
229    ## Name
230    name = None
231    ## Type
232    type = None
233    ## Size name
234    size_name = None
235    ## Aperture size [Vector]
236    size = None
237    size_unit = 'mm'
238    ## Aperture distance [float]
239    distance = None
240    distance_unit = 'mm'
241
242    def __init__(self):
243        self.size = Vector()
244
245
246class Collimation(object):
247    """
248    Class to hold collimation information
249    """
250    ## Name
251    name = None
252    ## Length [float] [mm]
253    length = None
254    length_unit = 'mm'
255    ## Aperture
256    aperture = None
257
258    def __init__(self):
259        self.aperture = []
260
261    def __str__(self):
262        _str = "Collimation:\n"
263        _str += "   Length:       %s [%s]\n" % \
264            (str(self.length), str(self.length_unit))
265        for item in self.aperture:
266            _str += "   Aperture size:%s [%s]\n" % \
267                (str(item.size), str(item.size_unit))
268            _str += "   Aperture_dist:%s [%s]\n" % \
269                (str(item.distance), str(item.distance_unit))
270        return _str
271
272
273class Source(object):
274    """
275    Class to hold source information
276    """
277    ## Name
278    name = None
279    ## Radiation type [string]
280    radiation = None
281    ## Beam size name
282    beam_size_name = None
283    ## Beam size [Vector] [mm]
284    beam_size = None
285    beam_size_unit = 'mm'
286    ## Beam shape [string]
287    beam_shape = None
288    ## Wavelength [float] [Angstrom]
289    wavelength = None
290    wavelength_unit = 'A'
291    ## Minimum wavelength [float] [Angstrom]
292    wavelength_min = None
293    wavelength_min_unit = 'nm'
294    ## Maximum wavelength [float] [Angstrom]
295    wavelength_max = None
296    wavelength_max_unit = 'nm'
297    ## Wavelength spread [float] [Angstrom]
298    wavelength_spread = None
299    wavelength_spread_unit = 'percent'
300
301    def __init__(self):
302        self.beam_size = Vector()
303
304    def __str__(self):
305        _str = "Source:\n"
306        _str += "   Radiation:    %s\n" % str(self.radiation)
307        _str += "   Shape:        %s\n" % str(self.beam_shape)
308        _str += "   Wavelength:   %s [%s]\n" % \
309            (str(self.wavelength), str(self.wavelength_unit))
310        _str += "   Waveln_min:   %s [%s]\n" % \
311            (str(self.wavelength_min), str(self.wavelength_min_unit))
312        _str += "   Waveln_max:   %s [%s]\n" % \
313            (str(self.wavelength_max), str(self.wavelength_max_unit))
314        _str += "   Waveln_spread:%s [%s]\n" % \
315            (str(self.wavelength_spread), str(self.wavelength_spread_unit))
316        _str += "   Beam_size:    %s [%s]\n" % \
317            (str(self.beam_size), str(self.beam_size_unit))
318        return _str
319
320
321"""
322Definitions of radiation types
323"""
324NEUTRON = 'neutron'
325XRAY = 'x-ray'
326MUON = 'muon'
327ELECTRON = 'electron'
328
329
330class Sample(object):
331    """
332    Class to hold the sample description
333    """
334    ## Short name for sample
335    name = ''
336    ## ID
337    ID = ''
338    ## Thickness [float] [mm]
339    thickness = None
340    thickness_unit = 'mm'
341    ## Transmission [float] [fraction]
342    transmission = None
343    ## Temperature [float] [No Default]
344    temperature = None
345    temperature_unit = None
346    ## Position [Vector] [mm]
347    position = None
348    position_unit = 'mm'
349    ## Orientation [Vector] [degrees]
350    orientation = None
351    orientation_unit = 'degree'
352    ## Details
353    details = None
354    ## SESANS zacceptance
355    zacceptance = (0,"")
356    yacceptance = (0,"")
357
358    def __init__(self):
359        self.position = Vector()
360        self.orientation = Vector()
361        self.details = []
362
363    def __str__(self):
364        _str = "Sample:\n"
365        _str += "   ID:           %s\n" % str(self.ID)
366        _str += "   Transmission: %s\n" % str(self.transmission)
367        _str += "   Thickness:    %s [%s]\n" % \
368            (str(self.thickness), str(self.thickness_unit))
369        _str += "   Temperature:  %s [%s]\n" % \
370            (str(self.temperature), str(self.temperature_unit))
371        _str += "   Position:     %s [%s]\n" % \
372            (str(self.position), str(self.position_unit))
373        _str += "   Orientation:  %s [%s]\n" % \
374            (str(self.orientation), str(self.orientation_unit))
375
376        _str += "   Details:\n"
377        for item in self.details:
378            _str += "      %s\n" % item
379
380        return _str
381
382
383class Process(object):
384    """
385    Class that holds information about the processes
386    performed on the data.
387    """
388    name = ''
389    date = ''
390    description = ''
391    term = None
392    notes = None
393
394    def __init__(self):
395        self.term = []
396        self.notes = []
397
398    def is_empty(self):
399        """
400            Return True if the object is empty
401        """
402        return len(self.name) == 0 and len(self.date) == 0 and len(self.description) == 0 \
403            and len(self.term) == 0 and len(self.notes) == 0
404
405    def single_line_desc(self):
406        """
407            Return a single line string representing the process
408        """
409        return "%s %s %s" % (self.name, self.date, self.description)
410
411    def __str__(self):
412        _str = "Process:\n"
413        _str += "   Name:         %s\n" % self.name
414        _str += "   Date:         %s\n" % self.date
415        _str += "   Description:  %s\n" % self.description
416        for item in self.term:
417            _str += "   Term:         %s\n" % item
418        for item in self.notes:
419            _str += "   Note:         %s\n" % item
420        return _str
421
422
423class TransmissionSpectrum(object):
424    """
425    Class that holds information about transmission spectrum
426    for white beams and spallation sources.
427    """
428    name = ''
429    timestamp = ''
430    ## Wavelength (float) [A]
431    wavelength = None
432    wavelength_unit = 'A'
433    ## Transmission (float) [unit less]
434    transmission = None
435    transmission_unit = ''
436    ## Transmission Deviation (float) [unit less]
437    transmission_deviation = None
438    transmission_deviation_unit = ''
439
440    def __init__(self):
441        self.wavelength = []
442        self.transmission = []
443        self.transmission_deviation = []
444
445    def __str__(self):
446        _str = "Transmission Spectrum:\n"
447        _str += "   Name:             \t{0}\n".format(self.name)
448        _str += "   Timestamp:        \t{0}\n".format(self.timestamp)
449        _str += "   Wavelength unit:  \t{0}\n".format(self.wavelength_unit)
450        _str += "   Transmission unit:\t{0}\n".format(self.transmission_unit)
451        _str += "   Trans. Dev. unit:  \t{0}\n".format(\
452                                            self.transmission_deviation_unit)
453        length_list = [len(self.wavelength), len(self.transmission), \
454                len(self.transmission_deviation)]
455        _str += "   Number of Pts:    \t{0}\n".format(max(length_list))
456        return _str
457
458
459class DataInfo(object):
460    """
461    Class to hold the data read from a file.
462    It includes four blocks of data for the
463    instrument description, the sample description,
464    the data itself and any other meta data.
465    """
466    ## Title
467    title = ''
468    ## Run number
469    run = None
470    ## Run name
471    run_name = None
472    ## File name
473    filename = ''
474    ## Notes
475    notes = None
476    ## Processes (Action on the data)
477    process = None
478    ## Instrument name
479    instrument = ''
480    ## Detector information
481    detector = None
482    ## Sample information
483    sample = None
484    ## Source information
485    source = None
486    ## Collimation information
487    collimation = None
488    ## Transmission Spectrum INfo
489    trans_spectrum = None
490    ## Additional meta-data
491    meta_data = None
492    ## Loading errors
493    errors = None
494    ## SESANS data check
495    isSesans = None
496
497
498    def __init__(self):
499        """
500        Initialization
501        """
502        ## Title
503        self.title = ''
504        ## Run number
505        self.run = []
506        self.run_name = {}
507        ## File name
508        self.filename = ''
509        ## Notes
510        self.notes = []
511        ## Processes (Action on the data)
512        self.process = []
513        ## Instrument name
514        self.instrument = ''
515        ## Detector information
516        self.detector = []
517        ## Sample information
518        self.sample = Sample()
519        ## Source information
520        self.source = Source()
521        ## Collimation information
522        self.collimation = []
523        ## Transmission Spectrum
524        self.trans_spectrum = []
525        ## Additional meta-data
526        self.meta_data = {}
527        ## Loading errors
528        self.errors = []
529        ## SESANS data check
530        self.isSesans = False
531
532    def append_empty_process(self):
533        """
534        """
535        self.process.append(Process())
536
537    def add_notes(self, message=""):
538        """
539        Add notes to datainfo
540        """
541        self.notes.append(message)
542
543    def __str__(self):
544        """
545        Nice printout
546        """
547        _str = "File:            %s\n" % self.filename
548        _str += "Title:           %s\n" % self.title
549        _str += "Run:             %s\n" % str(self.run)
550        _str += "SESANS:          %s\n" % str(self.isSesans)
551        _str += "Instrument:      %s\n" % str(self.instrument)
552        _str += "%s\n" % str(self.sample)
553        _str += "%s\n" % str(self.source)
554        for item in self.detector:
555            _str += "%s\n" % str(item)
556        for item in self.collimation:
557            _str += "%s\n" % str(item)
558        for item in self.process:
559            _str += "%s\n" % str(item)
560        for item in self.notes:
561            _str += "%s\n" % str(item)
562        for item in self.trans_spectrum:
563            _str += "%s\n" % str(item)
564        return _str
565
566    # Private method to perform operation. Not implemented for DataInfo,
567    # but should be implemented for each data class inherited from DataInfo
568    # that holds actual data (ex.: Data1D)
569    def _perform_operation(self, other, operation):
570        """
571        Private method to perform operation. Not implemented for DataInfo,
572        but should be implemented for each data class inherited from DataInfo
573        that holds actual data (ex.: Data1D)
574        """
575        return NotImplemented
576
577    def _perform_union(self, other):
578        """
579        Private method to perform union operation. Not implemented for DataInfo,
580        but should be implemented for each data class inherited from DataInfo
581        that holds actual data (ex.: Data1D)
582        """
583        return NotImplemented
584
585    def __add__(self, other):
586        """
587        Add two data sets
588
589        :param other: data set to add to the current one
590        :return: new data set
591        :raise ValueError: raised when two data sets are incompatible
592        """
593        def operation(a, b):
594            return a + b
595        return self._perform_operation(other, operation)
596
597    def __radd__(self, other):
598        """
599        Add two data sets
600
601        :param other: data set to add to the current one
602        :return: new data set
603        :raise ValueError: raised when two data sets are incompatible
604        """
605        def operation(a, b):
606            return b + a
607        return self._perform_operation(other, operation)
608
609    def __sub__(self, other):
610        """
611        Subtract two data sets
612
613        :param other: data set to subtract from the current one
614        :return: new data set
615        :raise ValueError: raised when two data sets are incompatible
616        """
617        def operation(a, b):
618            return a - b
619        return self._perform_operation(other, operation)
620
621    def __rsub__(self, other):
622        """
623        Subtract two data sets
624
625        :param other: data set to subtract from the current one
626        :return: new data set
627        :raise ValueError: raised when two data sets are incompatible
628        """
629        def operation(a, b):
630            return b - a
631        return self._perform_operation(other, operation)
632
633    def __mul__(self, other):
634        """
635        Multiply two data sets
636
637        :param other: data set to subtract from the current one
638        :return: new data set
639        :raise ValueError: raised when two data sets are incompatible
640        """
641        def operation(a, b):
642            return a * b
643        return self._perform_operation(other, operation)
644
645    def __rmul__(self, other):
646        """
647        Multiply two data sets
648
649        :param other: data set to subtract from the current one
650        :return: new data set
651        :raise ValueError: raised when two data sets are incompatible
652        """
653        def operation(a, b):
654            return b * a
655        return self._perform_operation(other, operation)
656
657    def __div__(self, other):
658        """
659        Divided a data set by another
660
661        :param other: data set that the current one is divided by
662        :return: new data set
663        :raise ValueError: raised when two data sets are incompatible
664        """
665        def operation(a, b):
666            return a/b
667        return self._perform_operation(other, operation)
668
669    def __rdiv__(self, other):
670        """
671        Divided a data set by another
672
673        :param other: data set that the current one is divided by
674        :return: new data set
675        :raise ValueError: raised when two data sets are incompatible
676        """
677        def operation(a, b):
678            return b/a
679        return self._perform_operation(other, operation)
680
681    def __or__(self, other):
682        """
683        Union a data set with another
684
685        :param other: data set to be unified
686        :return: new data set
687        :raise ValueError: raised when two data sets are incompatible
688        """
689        return self._perform_union(other)
690
691    def __ror__(self, other):
692        """
693        Union a data set with another
694
695        :param other: data set to be unified
696        :return: new data set
697        :raise ValueError: raised when two data sets are incompatible
698        """
699        return self._perform_union(other)
700
701class Data1D(plottable_1D, DataInfo):
702    """
703    1D data class
704    """
705    def __init__(self, x=None, y=None, dx=None, dy=None, lam=None, dlam=None, isSesans=None):
706        DataInfo.__init__(self)
707        plottable_1D.__init__(self, x, y, dx, dy,None, None, lam, dlam)
708        self.isSesans = isSesans
709        try:
710            if self.isSesans: # the data is SESANS
711                self.x_unit = 'A'
712                self.y_unit = 'pol'
713            elif not self.isSesans: # the data is SANS
714                self.x_unit = '1/A'
715                self.y_unit = '1/cm'
716        except: # the data is not recognized/supported, and the user is notified
717            raise(TypeError, 'data not recognized, check documentation for supported 1D data formats')
718
719    def __str__(self):
720        """
721        Nice printout
722        """
723        _str = "%s\n" % DataInfo.__str__(self)
724        _str += "Data:\n"
725        _str += "   Type:         %s\n" % self.__class__.__name__
726        _str += "   X-axis:       %s\t[%s]\n" % (self._xaxis, self._xunit)
727        _str += "   Y-axis:       %s\t[%s]\n" % (self._yaxis, self._yunit)
728        _str += "   Length:       %g\n" % len(self.x)
729        return _str
730
731    def is_slit_smeared(self):
732        """
733        Check whether the data has slit smearing information
734        :return: True is slit smearing info is present, False otherwise
735        """
736        def _check(v):
737            if (v.__class__ == list or v.__class__ == np.ndarray) \
738                and len(v) > 0 and min(v) > 0:
739                return True
740            return False
741        return _check(self.dxl) or _check(self.dxw)
742
743    def clone_without_data(self, length=0, clone=None):
744        """
745        Clone the current object, without copying the data (which
746        will be filled out by a subsequent operation).
747        The data arrays will be initialized to zero.
748
749        :param length: length of the data array to be initialized
750        :param clone: if provided, the data will be copied to clone
751        """
752        from copy import deepcopy
753
754        if clone is None or not issubclass(clone.__class__, Data1D):
755            x = np.zeros(length)
756            dx = np.zeros(length)
757            y = np.zeros(length)
758            dy = np.zeros(length)
759            lam = np.zeros(length)
760            dlam = np.zeros(length)
761            clone = Data1D(x, y, lam=lam, dx=dx, dy=dy, dlam=dlam)
762
763        clone.title = self.title
764        clone.run = self.run
765        clone.filename = self.filename
766        clone.instrument = self.instrument
767        clone.notes = deepcopy(self.notes)
768        clone.process = deepcopy(self.process)
769        clone.detector = deepcopy(self.detector)
770        clone.sample = deepcopy(self.sample)
771        clone.source = deepcopy(self.source)
772        clone.collimation = deepcopy(self.collimation)
773        clone.trans_spectrum = deepcopy(self.trans_spectrum)
774        clone.meta_data = deepcopy(self.meta_data)
775        clone.errors = deepcopy(self.errors)
776
777        return clone
778
779    def _validity_check(self, other):
780        """
781        Checks that the data lengths are compatible.
782        Checks that the x vectors are compatible.
783        Returns errors vectors equal to original
784        errors vectors if they were present or vectors
785        of zeros when none was found.
786
787        :param other: other data set for operation
788        :return: dy for self, dy for other [numpy arrays]
789        :raise ValueError: when lengths are not compatible
790        """
791        dy_other = None
792        if isinstance(other, Data1D):
793            # Check that data lengths are the same
794            if len(self.x) != len(other.x) or \
795                len(self.y) != len(other.y):
796                msg = "Unable to perform operation: data length are not equal"
797                raise ValueError, msg
798            # Here we could also extrapolate between data points
799            TOLERANCE = 0.01
800            for i in range(len(self.x)):
801                if math.fabs((self.x[i] - other.x[i])/self.x[i]) > TOLERANCE:
802                    msg = "Incompatible data sets: x-values do not match"
803                    raise ValueError, msg
804
805            # Check that the other data set has errors, otherwise
806            # create zero vector
807            dy_other = other.dy
[235f514]808            if other.dy is None or (len(other.dy) != len(other.y)):
[959eb01]809                dy_other = np.zeros(len(other.y))
810
811        # Check that we have errors, otherwise create zero vector
812        dy = self.dy
[235f514]813        if self.dy is None or (len(self.dy) != len(self.y)):
[959eb01]814            dy = np.zeros(len(self.y))
815
816        return dy, dy_other
817
818    def _perform_operation(self, other, operation):
819        """
820        """
821        # First, check the data compatibility
822        dy, dy_other = self._validity_check(other)
823        result = self.clone_without_data(len(self.x))
[235f514]824        if self.dxw is None:
[959eb01]825            result.dxw = None
826        else:
827            result.dxw = np.zeros(len(self.x))
[235f514]828        if self.dxl is None:
[959eb01]829            result.dxl = None
830        else:
831            result.dxl = np.zeros(len(self.x))
832
833        for i in range(len(self.x)):
834            result.x[i] = self.x[i]
835            if self.dx is not None and len(self.x) == len(self.dx):
836                result.dx[i] = self.dx[i]
837            if self.dxw is not None and len(self.x) == len(self.dxw):
838                result.dxw[i] = self.dxw[i]
839            if self.dxl is not None and len(self.x) == len(self.dxl):
840                result.dxl[i] = self.dxl[i]
841
842            a = Uncertainty(self.y[i], dy[i]**2)
843            if isinstance(other, Data1D):
844                b = Uncertainty(other.y[i], dy_other[i]**2)
845                if other.dx is not None:
846                    result.dx[i] *= self.dx[i]
847                    result.dx[i] += (other.dx[i]**2)
848                    result.dx[i] /= 2
849                    result.dx[i] = math.sqrt(result.dx[i])
850                if result.dxl is not None and other.dxl is not None:
851                    result.dxl[i] *= self.dxl[i]
852                    result.dxl[i] += (other.dxl[i]**2)
853                    result.dxl[i] /= 2
854                    result.dxl[i] = math.sqrt(result.dxl[i])
855            else:
856                b = other
857
858            output = operation(a, b)
859            result.y[i] = output.x
860            result.dy[i] = math.sqrt(math.fabs(output.variance))
861        return result
862
863    def _validity_check_union(self, other):
864        """
865        Checks that the data lengths are compatible.
866        Checks that the x vectors are compatible.
867        Returns errors vectors equal to original
868        errors vectors if they were present or vectors
869        of zeros when none was found.
870
871        :param other: other data set for operation
872        :return: bool
873        :raise ValueError: when data types are not compatible
874        """
875        if not isinstance(other, Data1D):
876            msg = "Unable to perform operation: different types of data set"
877            raise ValueError, msg
878        return True
879
880    def _perform_union(self, other):
881        """
882        """
883        # First, check the data compatibility
884        self._validity_check_union(other)
885        result = self.clone_without_data(len(self.x) + len(other.x))
[235f514]886        if self.dy is None or other.dy is None:
[959eb01]887            result.dy = None
888        else:
889            result.dy = np.zeros(len(self.x) + len(other.x))
[235f514]890        if self.dx is None or other.dx is None:
[959eb01]891            result.dx = None
892        else:
893            result.dx = np.zeros(len(self.x) + len(other.x))
[235f514]894        if self.dxw is None or other.dxw is None:
[959eb01]895            result.dxw = None
896        else:
897            result.dxw = np.zeros(len(self.x) + len(other.x))
[235f514]898        if self.dxl is None or other.dxl is None:
[959eb01]899            result.dxl = None
900        else:
901            result.dxl = np.zeros(len(self.x) + len(other.x))
902
903        result.x = np.append(self.x, other.x)
904        #argsorting
905        ind = np.argsort(result.x)
906        result.x = result.x[ind]
907        result.y = np.append(self.y, other.y)
908        result.y = result.y[ind]
[7432acb]909        if result.dy is not None:
[959eb01]910            result.dy = np.append(self.dy, other.dy)
911            result.dy = result.dy[ind]
912        if result.dx is not None:
913            result.dx = np.append(self.dx, other.dx)
914            result.dx = result.dx[ind]
915        if result.dxw is not None:
916            result.dxw = np.append(self.dxw, other.dxw)
917            result.dxw = result.dxw[ind]
918        if result.dxl is not None:
919            result.dxl = np.append(self.dxl, other.dxl)
920            result.dxl = result.dxl[ind]
921        return result
922
923
924class Data2D(plottable_2D, DataInfo):
925    """
926    2D data class
927    """
928    ## Units for Q-values
929    Q_unit = '1/A'
930    ## Units for I(Q) values
931    I_unit = '1/cm'
932    ## Vector of Q-values at the center of each bin in x
933    x_bins = None
934    ## Vector of Q-values at the center of each bin in y
935    y_bins = None
936    ## No 2D SESANS data as of yet. Always set it to False
937    isSesans = False
938
939    def __init__(self, data=None, err_data=None, qx_data=None,
940                 qy_data=None, q_data=None, mask=None,
941                 dqx_data=None, dqy_data=None):
942        DataInfo.__init__(self)
943        plottable_2D.__init__(self, data, err_data, qx_data,
944                              qy_data, q_data, mask, dqx_data, dqy_data)
945        self.y_bins = []
946        self.x_bins = []
947
948        if len(self.detector) > 0:
949            raise RuntimeError, "Data2D: Detector bank already filled at init"
950
951    def __str__(self):
952        _str = "%s\n" % DataInfo.__str__(self)
953        _str += "Data:\n"
954        _str += "   Type:         %s\n" % self.__class__.__name__
955        _str += "   X- & Y-axis:  %s\t[%s]\n" % (self._yaxis, self._yunit)
956        _str += "   Z-axis:       %s\t[%s]\n" % (self._zaxis, self._zunit)
957        _str += "   Length:       %g \n" % (len(self.data))
958        _str += "   Shape:        (%d, %d)\n" % (len(self.y_bins), len(self.x_bins))
959        return _str
960
961    def clone_without_data(self, length=0, clone=None):
962        """
963        Clone the current object, without copying the data (which
964        will be filled out by a subsequent operation).
965        The data arrays will be initialized to zero.
966
967        :param length: length of the data array to be initialized
968        :param clone: if provided, the data will be copied to clone
969        """
970        from copy import deepcopy
971
972        if clone is None or not issubclass(clone.__class__, Data2D):
973            data = np.zeros(length)
974            err_data = np.zeros(length)
975            qx_data = np.zeros(length)
976            qy_data = np.zeros(length)
977            q_data = np.zeros(length)
978            mask = np.zeros(length)
979            dqx_data = None
980            dqy_data = None
981            clone = Data2D(data=data, err_data=err_data,
982                           qx_data=qx_data, qy_data=qy_data,
983                           q_data=q_data, mask=mask)
984
985        clone.title = self.title
986        clone.run = self.run
987        clone.filename = self.filename
988        clone.instrument = self.instrument
989        clone.notes = deepcopy(self.notes)
990        clone.process = deepcopy(self.process)
991        clone.detector = deepcopy(self.detector)
992        clone.sample = deepcopy(self.sample)
993        clone.source = deepcopy(self.source)
994        clone.collimation = deepcopy(self.collimation)
995        clone.trans_spectrum = deepcopy(self.trans_spectrum)
996        clone.meta_data = deepcopy(self.meta_data)
997        clone.errors = deepcopy(self.errors)
998
999        return clone
1000
1001    def _validity_check(self, other):
1002        """
1003        Checks that the data lengths are compatible.
1004        Checks that the x vectors are compatible.
1005        Returns errors vectors equal to original
1006        errors vectors if they were present or vectors
1007        of zeros when none was found.
1008
1009        :param other: other data set for operation
1010        :return: dy for self, dy for other [numpy arrays]
1011        :raise ValueError: when lengths are not compatible
1012        """
1013        err_other = None
1014        TOLERANCE = 0.01
1015        if isinstance(other, Data2D):
1016            # Check that data lengths are the same
1017            if len(self.data) != len(other.data) or \
1018                len(self.qx_data) != len(other.qx_data) or \
1019                len(self.qy_data) != len(other.qy_data):
1020                msg = "Unable to perform operation: data length are not equal"
1021                raise ValueError, msg
1022            for ind in range(len(self.data)):
1023                if math.fabs((self.qx_data[ind] - other.qx_data[ind])/self.qx_data[ind]) > TOLERANCE:
1024                    msg = "Incompatible data sets: qx-values do not match: %s %s" % (self.qx_data[ind], other.qx_data[ind])
1025                    raise ValueError, msg
1026                if math.fabs((self.qy_data[ind] - other.qy_data[ind])/self.qy_data[ind]) > TOLERANCE:
1027                    msg = "Incompatible data sets: qy-values do not match: %s %s" % (self.qy_data[ind], other.qy_data[ind])
1028                    raise ValueError, msg
1029
1030            # Check that the scales match
1031            err_other = other.err_data
[235f514]1032            if other.err_data is None or \
[959eb01]1033                (len(other.err_data) != len(other.data)):
1034                err_other = np.zeros(len(other.data))
1035
1036        # Check that we have errors, otherwise create zero vector
1037        err = self.err_data
[235f514]1038        if self.err_data is None or \
[959eb01]1039            (len(self.err_data) != len(self.data)):
1040            err = np.zeros(len(other.data))
1041        return err, err_other
1042
1043    def _perform_operation(self, other, operation):
1044        """
1045        Perform 2D operations between data sets
1046
1047        :param other: other data set
1048        :param operation: function defining the operation
1049        """
1050        # First, check the data compatibility
1051        dy, dy_other = self._validity_check(other)
1052        result = self.clone_without_data(np.size(self.data))
[235f514]1053        if self.dqx_data is None or self.dqy_data is None:
[959eb01]1054            result.dqx_data = None
1055            result.dqy_data = None
1056        else:
1057            result.dqx_data = np.zeros(len(self.data))
1058            result.dqy_data = np.zeros(len(self.data))
1059        for i in range(np.size(self.data)):
1060            result.data[i] = self.data[i]
1061            if self.err_data is not None and \
1062                            np.size(self.data) == np.size(self.err_data):
1063                result.err_data[i] = self.err_data[i]
1064            if self.dqx_data is not None:
1065                result.dqx_data[i] = self.dqx_data[i]
1066            if self.dqy_data is not None:
1067                result.dqy_data[i] = self.dqy_data[i]
1068            result.qx_data[i] = self.qx_data[i]
1069            result.qy_data[i] = self.qy_data[i]
1070            result.q_data[i] = self.q_data[i]
1071            result.mask[i] = self.mask[i]
1072
1073            a = Uncertainty(self.data[i], dy[i]**2)
1074            if isinstance(other, Data2D):
1075                b = Uncertainty(other.data[i], dy_other[i]**2)
1076                if other.dqx_data is not None and \
1077                        result.dqx_data is not None:
1078                    result.dqx_data[i] *= self.dqx_data[i]
1079                    result.dqx_data[i] += (other.dqx_data[i]**2)
1080                    result.dqx_data[i] /= 2
1081                    result.dqx_data[i] = math.sqrt(result.dqx_data[i])
1082                if other.dqy_data is not None and \
1083                        result.dqy_data is not None:
1084                    result.dqy_data[i] *= self.dqy_data[i]
1085                    result.dqy_data[i] += (other.dqy_data[i]**2)
1086                    result.dqy_data[i] /= 2
1087                    result.dqy_data[i] = math.sqrt(result.dqy_data[i])
1088            else:
1089                b = other
1090            output = operation(a, b)
1091            result.data[i] = output.x
1092            result.err_data[i] = math.sqrt(math.fabs(output.variance))
1093        return result
1094
1095    def _validity_check_union(self, other):
1096        """
1097        Checks that the data lengths are compatible.
1098        Checks that the x vectors are compatible.
1099        Returns errors vectors equal to original
1100        errors vectors if they were present or vectors
1101        of zeros when none was found.
1102
1103        :param other: other data set for operation
1104        :return: bool
1105        :raise ValueError: when data types are not compatible
1106        """
1107        if not isinstance(other, Data2D):
1108            msg = "Unable to perform operation: different types of data set"
1109            raise ValueError, msg
1110        return True
1111
1112    def _perform_union(self, other):
1113        """
1114        Perform 2D operations between data sets
1115
1116        :param other: other data set
1117        :param operation: function defining the operation
1118        """
1119        # First, check the data compatibility
1120        self._validity_check_union(other)
1121        result = self.clone_without_data(np.size(self.data) + \
1122                                         np.size(other.data))
1123        result.xmin = self.xmin
1124        result.xmax = self.xmax
1125        result.ymin = self.ymin
1126        result.ymax = self.ymax
[235f514]1127        if self.dqx_data is None or self.dqy_data is None or \
1128                other.dqx_data is None or other.dqy_data is None:
[959eb01]1129            result.dqx_data = None
1130            result.dqy_data = None
1131        else:
1132            result.dqx_data = np.zeros(len(self.data) + \
1133                                       np.size(other.data))
1134            result.dqy_data = np.zeros(len(self.data) + \
1135                                       np.size(other.data))
1136
1137        result.data = np.append(self.data, other.data)
1138        result.qx_data = np.append(self.qx_data, other.qx_data)
1139        result.qy_data = np.append(self.qy_data, other.qy_data)
1140        result.q_data = np.append(self.q_data, other.q_data)
1141        result.mask = np.append(self.mask, other.mask)
1142        if result.err_data is not None:
1143            result.err_data = np.append(self.err_data, other.err_data)
1144        if self.dqx_data is not None:
1145            result.dqx_data = np.append(self.dqx_data, other.dqx_data)
1146        if self.dqy_data is not None:
1147            result.dqy_data = np.append(self.dqy_data, other.dqy_data)
1148
1149        return result
1150
1151
1152def combine_data_info_with_plottable(data, datainfo):
1153    """
1154    A function that combines the DataInfo data in self.current_datainto with a plottable_1D or 2D data object.
1155
1156    :param data: A plottable_1D or plottable_2D data object
1157    :return: A fully specified Data1D or Data2D object
1158    """
1159
1160    final_dataset = None
1161    if isinstance(data, plottable_1D):
1162        final_dataset = Data1D(data.x, data.y)
1163        final_dataset.dx = data.dx
1164        final_dataset.dy = data.dy
1165        final_dataset.dxl = data.dxl
1166        final_dataset.dxw = data.dxw
1167        final_dataset.xaxis(data._xaxis, data._xunit)
1168        final_dataset.yaxis(data._yaxis, data._yunit)
1169    elif isinstance(data, plottable_2D):
1170        final_dataset = Data2D(data.data, data.err_data, data.qx_data, data.qy_data, data.q_data,
1171                               data.mask, data.dqx_data, data.dqy_data)
1172        final_dataset.xaxis(data._xaxis, data._xunit)
1173        final_dataset.yaxis(data._yaxis, data._yunit)
1174        final_dataset.zaxis(data._zaxis, data._zunit)
1175        final_dataset.x_bins = data.x_bins
1176        final_dataset.y_bins = data.y_bins
1177    else:
1178        return_string = "Should Never Happen: _combine_data_info_with_plottable input is not a plottable1d or " + \
1179                        "plottable2d data object"
1180        return return_string
1181
1182    final_dataset.xmax = data.xmax
1183    final_dataset.ymax = data.ymax
1184    final_dataset.xmin = data.xmin
1185    final_dataset.ymin = data.ymin
1186    final_dataset.isSesans = datainfo.isSesans
1187    final_dataset.title = datainfo.title
1188    final_dataset.run = datainfo.run
1189    final_dataset.run_name = datainfo.run_name
1190    final_dataset.filename = datainfo.filename
1191    final_dataset.notes = datainfo.notes
1192    final_dataset.process = datainfo.process
1193    final_dataset.instrument = datainfo.instrument
1194    final_dataset.detector = datainfo.detector
1195    final_dataset.sample = datainfo.sample
1196    final_dataset.source = datainfo.source
1197    final_dataset.collimation = datainfo.collimation
1198    final_dataset.trans_spectrum = datainfo.trans_spectrum
1199    final_dataset.meta_data = datainfo.meta_data
1200    final_dataset.errors = datainfo.errors
1201    return final_dataset
Note: See TracBrowser for help on using the repository browser.