source: sasview/src/sas/sascalc/dataloader/data_info.py @ 2a2b43a

ESS_GUIESS_GUI_DocsESS_GUI_batch_fittingESS_GUI_bumps_abstractionESS_GUI_iss1116ESS_GUI_iss879ESS_GUI_iss959ESS_GUI_openclESS_GUI_orderingESS_GUI_sync_sascalccostrafo411magnetic_scattrelease-4.1.1release-4.1.2release-4.2.2ticket-1009ticket-1094-headlessticket-1242-2d-resolutionticket-1243ticket-1249ticket885unittest-saveload
Last change on this file since 2a2b43a was 18501795, checked in by krzywon, 8 years ago

Set the x_unit and y_unit in Data1D to class variables instead of instance variables to fix failing unit tests.

  • Property mode set to 100644
File size: 40.5 KB
Line 
1"""
2    Module that contains classes to hold information read from
3    reduced data files.
4
5    A good description of the data members can be found in
6    the CanSAS 1D XML data format:
7
8    http://www.smallangles.net/wgwiki/index.php/cansas1d_documentation
9"""
10#####################################################################
11#This software was developed by the University of Tennessee as part of the
12#Distributed Data Analysis of Neutron Scattering Experiments (DANSE)
13#project funded by the US National Science Foundation.
14#See the license text in license.txt
15#copyright 2008, University of Tennessee
16######################################################################
17
18
19#TODO: Keep track of data manipulation in the 'process' data structure.
20#TODO: This module should be independent of plottables. We should write
21#        an adapter class for plottables when needed.
22
23#from sas.guitools.plottables import Data1D as plottable_1D
24from sas.sascalc.data_util.uncertainty import Uncertainty
25import numpy
26import math
27
28class plottable_1D(object):
29    """
30    Data1D is a place holder for 1D plottables.
31    """
32    # The presence of these should be mutually
33    # exclusive with the presence of Qdev (dx)
34    x = None
35    y = None
36    dx = None
37    dy = None
38    ## Slit smearing length
39    dxl = None
40    ## Slit smearing width
41    dxw = None
42    ## SESANS specific params (wavelengths for spin echo length calculation)
43    lam = None
44    dlam = None
45
46    # Units
47    _xaxis = ''
48    _xunit = ''
49    _yaxis = ''
50    _yunit = ''
51
52    def __init__(self, x, y, dx=None, dy=None, dxl=None, dxw=None, lam=None, dlam=None):
53        self.x = numpy.asarray(x)
54        self.y = numpy.asarray(y)
55        if dx is not None:
56            self.dx = numpy.asarray(dx)
57        if dy is not None:
58            self.dy = numpy.asarray(dy)
59        if dxl is not None:
60            self.dxl = numpy.asarray(dxl)
61        if dxw is not None:
62            self.dxw = numpy.asarray(dxw)
63        if lam is not None:
64            self.lam = numpy.asarray(lam)
65        if dlam is not None:
66            self.dlam = numpy.asarray(dlam)
67
68    def xaxis(self, label, unit):
69        """
70        set the x axis label and unit
71        """
72        self._xaxis = label
73        self._xunit = unit
74
75    def yaxis(self, label, unit):
76        """
77        set the y axis label and unit
78        """
79        self._yaxis = label
80        self._yunit = unit
81
82
83class plottable_2D(object):
84    """
85    Data2D is a place holder for 2D plottables.
86    """
87    xmin = None
88    xmax = None
89    ymin = None
90    ymax = None
91    data = None
92    qx_data = None
93    qy_data = None
94    q_data = None
95    err_data = None
96    dqx_data = None
97    dqy_data = None
98    mask = None
99
100    # Units
101    _xaxis = ''
102    _xunit = ''
103    _yaxis = ''
104    _yunit = ''
105    _zaxis = ''
106    _zunit = ''
107
108    def __init__(self, data=None, err_data=None, qx_data=None,
109                 qy_data=None, q_data=None, mask=None,
110                 dqx_data=None, dqy_data=None):
111        self.data = numpy.asarray(data)
112        self.qx_data = numpy.asarray(qx_data)
113        self.qy_data = numpy.asarray(qy_data)
114        self.q_data = numpy.asarray(q_data)
115        self.mask = numpy.asarray(mask)
116        self.err_data = numpy.asarray(err_data)
117        if dqx_data is not None:
118            self.dqx_data = numpy.asarray(dqx_data)
119        if dqy_data is not None:
120            self.dqy_data = numpy.asarray(dqy_data)
121
122    def xaxis(self, label, unit):
123        """
124        set the x axis label and unit
125        """
126        self._xaxis = label
127        self._xunit = unit
128
129    def yaxis(self, label, unit):
130        """
131        set the y axis label and unit
132        """
133        self._yaxis = label
134        self._yunit = unit
135
136    def zaxis(self, label, unit):
137        """
138        set the z axis label and unit
139        """
140        self._zaxis = label
141        self._zunit = unit
142
143
144class Vector(object):
145    """
146    Vector class to hold multi-dimensional objects
147    """
148    ## x component
149    x = None
150    ## y component
151    y = None
152    ## z component
153    z = None
154
155    def __init__(self, x=None, y=None, z=None):
156        """
157        Initialization. Components that are not
158        set a set to None by default.
159
160        :param x: x component
161        :param y: y component
162        :param z: z component
163        """
164        self.x = x
165        self.y = y
166        self.z = z
167
168    def __str__(self):
169        msg = "x = %s\ty = %s\tz = %s" % (str(self.x), str(self.y), str(self.z))
170        return msg
171
172
173class Detector(object):
174    """
175    Class to hold detector information
176    """
177    ## Name of the instrument [string]
178    name = None
179    ## Sample to detector distance [float] [mm]
180    distance = None
181    distance_unit = 'mm'
182    ## Offset of this detector position in X, Y,
183    #(and Z if necessary) [Vector] [mm]
184    offset = None
185    offset_unit = 'm'
186    ## Orientation (rotation) of this detector in roll,
187    # pitch, and yaw [Vector] [degrees]
188    orientation = None
189    orientation_unit = 'degree'
190    ## Center of the beam on the detector in X and Y
191    #(and Z if necessary) [Vector] [mm]
192    beam_center = None
193    beam_center_unit = 'mm'
194    ## Pixel size in X, Y, (and Z if necessary) [Vector] [mm]
195    pixel_size = None
196    pixel_size_unit = 'mm'
197    ## Slit length of the instrument for this detector.[float] [mm]
198    slit_length = None
199    slit_length_unit = 'mm'
200
201    def __init__(self):
202        """
203        Initialize class attribute that are objects...
204        """
205        self.offset = Vector()
206        self.orientation = Vector()
207        self.beam_center = Vector()
208        self.pixel_size = Vector()
209
210    def __str__(self):
211        _str = "Detector:\n"
212        _str += "   Name:         %s\n" % self.name
213        _str += "   Distance:     %s [%s]\n" % \
214            (str(self.distance), str(self.distance_unit))
215        _str += "   Offset:       %s [%s]\n" % \
216            (str(self.offset), str(self.offset_unit))
217        _str += "   Orientation:  %s [%s]\n" % \
218            (str(self.orientation), str(self.orientation_unit))
219        _str += "   Beam center:  %s [%s]\n" % \
220            (str(self.beam_center), str(self.beam_center_unit))
221        _str += "   Pixel size:   %s [%s]\n" % \
222            (str(self.pixel_size), str(self.pixel_size_unit))
223        _str += "   Slit length:  %s [%s]\n" % \
224            (str(self.slit_length), str(self.slit_length_unit))
225        return _str
226
227
228class Aperture(object):
229    ## Name
230    name = None
231    ## Type
232    type = None
233    ## Size name
234    size_name = None
235    ## Aperture size [Vector]
236    size = None
237    size_unit = 'mm'
238    ## Aperture distance [float]
239    distance = None
240    distance_unit = 'mm'
241
242    def __init__(self):
243        self.size = Vector()
244
245
246class Collimation(object):
247    """
248    Class to hold collimation information
249    """
250    ## Name
251    name = None
252    ## Length [float] [mm]
253    length = None
254    length_unit = 'mm'
255    ## Aperture
256    aperture = None
257
258    def __init__(self):
259        self.aperture = []
260
261    def __str__(self):
262        _str = "Collimation:\n"
263        _str += "   Length:       %s [%s]\n" % \
264            (str(self.length), str(self.length_unit))
265        for item in self.aperture:
266            _str += "   Aperture size:%s [%s]\n" % \
267                (str(item.size), str(item.size_unit))
268            _str += "   Aperture_dist:%s [%s]\n" % \
269                (str(item.distance), str(item.distance_unit))
270        return _str
271
272
273class Source(object):
274    """
275    Class to hold source information
276    """
277    ## Name
278    name = None
279    ## Radiation type [string]
280    radiation = None
281    ## Beam size name
282    beam_size_name = None
283    ## Beam size [Vector] [mm]
284    beam_size = None
285    beam_size_unit = 'mm'
286    ## Beam shape [string]
287    beam_shape = None
288    ## Wavelength [float] [Angstrom]
289    wavelength = None
290    wavelength_unit = 'A'
291    ## Minimum wavelength [float] [Angstrom]
292    wavelength_min = None
293    wavelength_min_unit = 'nm'
294    ## Maximum wavelength [float] [Angstrom]
295    wavelength_max = None
296    wavelength_max_unit = 'nm'
297    ## Wavelength spread [float] [Angstrom]
298    wavelength_spread = None
299    wavelength_spread_unit = 'percent'
300
301    def __init__(self):
302        self.beam_size = Vector()
303
304    def __str__(self):
305        _str = "Source:\n"
306        _str += "   Radiation:    %s\n" % str(self.radiation)
307        _str += "   Shape:        %s\n" % str(self.beam_shape)
308        _str += "   Wavelength:   %s [%s]\n" % \
309            (str(self.wavelength), str(self.wavelength_unit))
310        _str += "   Waveln_min:   %s [%s]\n" % \
311            (str(self.wavelength_min), str(self.wavelength_min_unit))
312        _str += "   Waveln_max:   %s [%s]\n" % \
313            (str(self.wavelength_max), str(self.wavelength_max_unit))
314        _str += "   Waveln_spread:%s [%s]\n" % \
315            (str(self.wavelength_spread), str(self.wavelength_spread_unit))
316        _str += "   Beam_size:    %s [%s]\n" % \
317            (str(self.beam_size), str(self.beam_size_unit))
318        return _str
319
320
321"""
322Definitions of radiation types
323"""
324NEUTRON = 'neutron'
325XRAY = 'x-ray'
326MUON = 'muon'
327ELECTRON = 'electron'
328
329
330class Sample(object):
331    """
332    Class to hold the sample description
333    """
334    ## Short name for sample
335    name = ''
336    ## ID
337    ID = ''
338    ## Thickness [float] [mm]
339    thickness = None
340    thickness_unit = 'mm'
341    ## Transmission [float] [fraction]
342    transmission = None
343    ## Temperature [float] [No Default]
344    temperature = None
345    temperature_unit = None
346    ## Position [Vector] [mm]
347    position = None
348    position_unit = 'mm'
349    ## Orientation [Vector] [degrees]
350    orientation = None
351    orientation_unit = 'degree'
352    ## Details
353    details = None
354
355    def __init__(self):
356        self.position = Vector()
357        self.orientation = Vector()
358        self.details = []
359
360    def __str__(self):
361        _str = "Sample:\n"
362        _str += "   ID:           %s\n" % str(self.ID)
363        _str += "   Transmission: %s\n" % str(self.transmission)
364        _str += "   Thickness:    %s [%s]\n" % \
365            (str(self.thickness), str(self.thickness_unit))
366        _str += "   Temperature:  %s [%s]\n" % \
367            (str(self.temperature), str(self.temperature_unit))
368        _str += "   Position:     %s [%s]\n" % \
369            (str(self.position), str(self.position_unit))
370        _str += "   Orientation:  %s [%s]\n" % \
371            (str(self.orientation), str(self.orientation_unit))
372
373        _str += "   Details:\n"
374        for item in self.details:
375            _str += "      %s\n" % item
376
377        return _str
378
379
380class Process(object):
381    """
382    Class that holds information about the processes
383    performed on the data.
384    """
385    name = ''
386    date = ''
387    description = ''
388    term = None
389    notes = None
390
391    def __init__(self):
392        self.term = []
393        self.notes = []
394
395    def is_empty(self):
396        """
397            Return True if the object is empty
398        """
399        return len(self.name) == 0 and len(self.date) == 0 and len(self.description) == 0 \
400            and len(self.term) == 0 and len(self.notes) == 0
401
402    def single_line_desc(self):
403        """
404            Return a single line string representing the process
405        """
406        return "%s %s %s" % (self.name, self.date, self.description)
407
408    def __str__(self):
409        _str = "Process:\n"
410        _str += "   Name:         %s\n" % self.name
411        _str += "   Date:         %s\n" % self.date
412        _str += "   Description:  %s\n" % self.description
413        for item in self.term:
414            _str += "   Term:         %s\n" % item
415        for item in self.notes:
416            _str += "   Note:         %s\n" % item
417        return _str
418
419
420class TransmissionSpectrum(object):
421    """
422    Class that holds information about transmission spectrum
423    for white beams and spallation sources.
424    """
425    name = ''
426    timestamp = ''
427    ## Wavelength (float) [A]
428    wavelength = None
429    wavelength_unit = 'A'
430    ## Transmission (float) [unit less]
431    transmission = None
432    transmission_unit = ''
433    ## Transmission Deviation (float) [unit less]
434    transmission_deviation = None
435    transmission_deviation_unit = ''
436
437    def __init__(self):
438        self.wavelength = []
439        self.transmission = []
440        self.transmission_deviation = []
441
442    def __str__(self):
443        _str = "Transmission Spectrum:\n"
444        _str += "   Name:             \t{0}\n".format(self.name)
445        _str += "   Timestamp:        \t{0}\n".format(self.timestamp)
446        _str += "   Wavelength unit:  \t{0}\n".format(self.wavelength_unit)
447        _str += "   Transmission unit:\t{0}\n".format(self.transmission_unit)
448        _str += "   Trans. Dev. unit:  \t{0}\n".format(\
449                                            self.transmission_deviation_unit)
450        length_list = [len(self.wavelength), len(self.transmission), \
451                len(self.transmission_deviation)]
452        _str += "   Number of Pts:    \t{0}\n".format(max(length_list))
453        return _str
454
455
456class DataInfo(object):
457    """
458    Class to hold the data read from a file.
459    It includes four blocks of data for the
460    instrument description, the sample description,
461    the data itself and any other meta data.
462    """
463    ## Title
464    title = ''
465    ## Run number
466    run = None
467    ## Run name
468    run_name = None
469    ## File name
470    filename = ''
471    ## Notes
472    notes = None
473    ## Processes (Action on the data)
474    process = None
475    ## Instrument name
476    instrument = ''
477    ## Detector information
478    detector = None
479    ## Sample information
480    sample = None
481    ## Source information
482    source = None
483    ## Collimation information
484    collimation = None
485    ## Transmission Spectrum INfo
486    trans_spectrum = None
487    ## Additional meta-data
488    meta_data = None
489    ## Loading errors
490    errors = None
491    ## SESANS data check
492    isSesans = None
493
494
495    def __init__(self):
496        """
497        Initialization
498        """
499        ## Title
500        self.title = ''
501        ## Run number
502        self.run = []
503        self.run_name = {}
504        ## File name
505        self.filename = ''
506        ## Notes
507        self.notes = []
508        ## Processes (Action on the data)
509        self.process = []
510        ## Instrument name
511        self.instrument = ''
512        ## Detector information
513        self.detector = []
514        ## Sample information
515        self.sample = Sample()
516        ## Source information
517        self.source = Source()
518        ## Collimation information
519        self.collimation = []
520        ## Transmission Spectrum
521        self.trans_spectrum = []
522        ## Additional meta-data
523        self.meta_data = {}
524        ## Loading errors
525        self.errors = []
526        ## SESANS data check
527        self.isSesans = False
528
529    def append_empty_process(self):
530        """
531        """
532        self.process.append(Process())
533
534    def add_notes(self, message=""):
535        """
536        Add notes to datainfo
537        """
538        self.notes.append(message)
539
540    def __str__(self):
541        """
542        Nice printout
543        """
544        _str = "File:            %s\n" % self.filename
545        _str += "Title:           %s\n" % self.title
546        _str += "Run:             %s\n" % str(self.run)
547        _str += "Instrument:      %s\n" % str(self.instrument)
548        _str += "%s\n" % str(self.sample)
549        _str += "%s\n" % str(self.source)
550        for item in self.detector:
551            _str += "%s\n" % str(item)
552        for item in self.collimation:
553            _str += "%s\n" % str(item)
554        for item in self.process:
555            _str += "%s\n" % str(item)
556        for item in self.notes:
557            _str += "%s\n" % str(item)
558        for item in self.trans_spectrum:
559            _str += "%s\n" % str(item)
560        return _str
561
562    # Private method to perform operation. Not implemented for DataInfo,
563    # but should be implemented for each data class inherited from DataInfo
564    # that holds actual data (ex.: Data1D)
565    def _perform_operation(self, other, operation):
566        """
567        Private method to perform operation. Not implemented for DataInfo,
568        but should be implemented for each data class inherited from DataInfo
569        that holds actual data (ex.: Data1D)
570        """
571        return NotImplemented
572
573    def _perform_union(self, other):
574        """
575        Private method to perform union operation. Not implemented for DataInfo,
576        but should be implemented for each data class inherited from DataInfo
577        that holds actual data (ex.: Data1D)
578        """
579        return NotImplemented
580
581    def __add__(self, other):
582        """
583        Add two data sets
584
585        :param other: data set to add to the current one
586        :return: new data set
587        :raise ValueError: raised when two data sets are incompatible
588        """
589        def operation(a, b):
590            return a + b
591        return self._perform_operation(other, operation)
592
593    def __radd__(self, other):
594        """
595        Add two data sets
596
597        :param other: data set to add to the current one
598        :return: new data set
599        :raise ValueError: raised when two data sets are incompatible
600        """
601        def operation(a, b):
602            return b + a
603        return self._perform_operation(other, operation)
604
605    def __sub__(self, other):
606        """
607        Subtract two data sets
608
609        :param other: data set to subtract from the current one
610        :return: new data set
611        :raise ValueError: raised when two data sets are incompatible
612        """
613        def operation(a, b):
614            return a - b
615        return self._perform_operation(other, operation)
616
617    def __rsub__(self, other):
618        """
619        Subtract two data sets
620
621        :param other: data set to subtract from the current one
622        :return: new data set
623        :raise ValueError: raised when two data sets are incompatible
624        """
625        def operation(a, b):
626            return b - a
627        return self._perform_operation(other, operation)
628
629    def __mul__(self, other):
630        """
631        Multiply two data sets
632
633        :param other: data set to subtract from the current one
634        :return: new data set
635        :raise ValueError: raised when two data sets are incompatible
636        """
637        def operation(a, b):
638            return a * b
639        return self._perform_operation(other, operation)
640
641    def __rmul__(self, other):
642        """
643        Multiply two data sets
644
645        :param other: data set to subtract from the current one
646        :return: new data set
647        :raise ValueError: raised when two data sets are incompatible
648        """
649        def operation(a, b):
650            return b * a
651        return self._perform_operation(other, operation)
652
653    def __div__(self, other):
654        """
655        Divided a data set by another
656
657        :param other: data set that the current one is divided by
658        :return: new data set
659        :raise ValueError: raised when two data sets are incompatible
660        """
661        def operation(a, b):
662            return a/b
663        return self._perform_operation(other, operation)
664
665    def __rdiv__(self, other):
666        """
667        Divided a data set by another
668
669        :param other: data set that the current one is divided by
670        :return: new data set
671        :raise ValueError: raised when two data sets are incompatible
672        """
673        def operation(a, b):
674            return b/a
675        return self._perform_operation(other, operation)
676
677    def __or__(self, other):
678        """
679        Union a data set with another
680
681        :param other: data set to be unified
682        :return: new data set
683        :raise ValueError: raised when two data sets are incompatible
684        """
685        return self._perform_union(other)
686
687    def __ror__(self, other):
688        """
689        Union a data set with another
690
691        :param other: data set to be unified
692        :return: new data set
693        :raise ValueError: raised when two data sets are incompatible
694        """
695        return self._perform_union(other)
696
697class Data1D(plottable_1D, DataInfo):
698    """
699    1D data class
700    """
701    def __init__(self, x=None, y=None, dx=None, dy=None, lam=None, dlam=None, isSesans=None):
702        DataInfo.__init__(self)
703        plottable_1D.__init__(self, x, y, dx, dy,None, None, lam, dlam)
704        self.isSesans = isSesans
705        try:
706            if self.isSesans: # the data is SESANS
707                self.x_unit = 'A'
708                self.y_unit = 'pol'
709            elif not self.isSesans: # the data is SANS
710                self.x_unit = '1/A'
711                self.y_unit = '1/cm'
712        except: # the data is not recognized/supported, and the user is notified
713            raise(TypeError, 'data not recognized, check documentation for supported 1D data formats')
714
715    def __str__(self):
716        """
717        Nice printout
718        """
719        _str = "%s\n" % DataInfo.__str__(self)
720        _str += "Data:\n"
721        _str += "   Type:         %s\n" % self.__class__.__name__
722        _str += "   X-axis:       %s\t[%s]\n" % (self._xaxis, self._xunit)
723        _str += "   Y-axis:       %s\t[%s]\n" % (self._yaxis, self._yunit)
724        _str += "   Length:       %g\n" % len(self.x)
725        return _str
726
727    def is_slit_smeared(self):
728        """
729        Check whether the data has slit smearing information
730        :return: True is slit smearing info is present, False otherwise
731        """
732        def _check(v):
733            if (v.__class__ == list or v.__class__ == numpy.ndarray) \
734                and len(v) > 0 and min(v) > 0:
735                return True
736            return False
737        return _check(self.dxl) or _check(self.dxw)
738
739    def clone_without_data(self, length=0, clone=None):
740        """
741        Clone the current object, without copying the data (which
742        will be filled out by a subsequent operation).
743        The data arrays will be initialized to zero.
744
745        :param length: length of the data array to be initialized
746        :param clone: if provided, the data will be copied to clone
747        """
748        from copy import deepcopy
749
750        if clone is None or not issubclass(clone.__class__, Data1D):
751            x = numpy.zeros(length)
752            dx = numpy.zeros(length)
753            y = numpy.zeros(length)
754            dy = numpy.zeros(length)
755            lam = numpy.zeros(length)
756            dlam = numpy.zeros(length)
757            clone = Data1D(x, y, lam=lam, dx=dx, dy=dy, dlam=dlam)
758
759        clone.title = self.title
760        clone.run = self.run
761        clone.filename = self.filename
762        clone.instrument = self.instrument
763        clone.notes = deepcopy(self.notes)
764        clone.process = deepcopy(self.process)
765        clone.detector = deepcopy(self.detector)
766        clone.sample = deepcopy(self.sample)
767        clone.source = deepcopy(self.source)
768        clone.collimation = deepcopy(self.collimation)
769        clone.trans_spectrum = deepcopy(self.trans_spectrum)
770        clone.meta_data = deepcopy(self.meta_data)
771        clone.errors = deepcopy(self.errors)
772
773        return clone
774
775    def _validity_check(self, other):
776        """
777        Checks that the data lengths are compatible.
778        Checks that the x vectors are compatible.
779        Returns errors vectors equal to original
780        errors vectors if they were present or vectors
781        of zeros when none was found.
782
783        :param other: other data set for operation
784        :return: dy for self, dy for other [numpy arrays]
785        :raise ValueError: when lengths are not compatible
786        """
787        dy_other = None
788        if isinstance(other, Data1D):
789            # Check that data lengths are the same
790            if len(self.x) != len(other.x) or \
791                len(self.y) != len(other.y):
792                msg = "Unable to perform operation: data length are not equal"
793                raise ValueError, msg
794            # Here we could also extrapolate between data points
795            TOLERANCE = 0.01
796            for i in range(len(self.x)):
797                if math.fabs((self.x[i] - other.x[i])/self.x[i]) > TOLERANCE:
798                    msg = "Incompatible data sets: x-values do not match"
799                    raise ValueError, msg
800
801            # Check that the other data set has errors, otherwise
802            # create zero vector
803            dy_other = other.dy
804            if other.dy == None or (len(other.dy) != len(other.y)):
805                dy_other = numpy.zeros(len(other.y))
806
807        # Check that we have errors, otherwise create zero vector
808        dy = self.dy
809        if self.dy == None or (len(self.dy) != len(self.y)):
810            dy = numpy.zeros(len(self.y))
811
812        return dy, dy_other
813
814    def _perform_operation(self, other, operation):
815        """
816        """
817        # First, check the data compatibility
818        dy, dy_other = self._validity_check(other)
819        result = self.clone_without_data(len(self.x))
820        if self.dxw == None:
821            result.dxw = None
822        else:
823            result.dxw = numpy.zeros(len(self.x))
824        if self.dxl == None:
825            result.dxl = None
826        else:
827            result.dxl = numpy.zeros(len(self.x))
828
829        for i in range(len(self.x)):
830            result.x[i] = self.x[i]
831            if self.dx is not None and len(self.x) == len(self.dx):
832                result.dx[i] = self.dx[i]
833            if self.dxw is not None and len(self.x) == len(self.dxw):
834                result.dxw[i] = self.dxw[i]
835            if self.dxl is not None and len(self.x) == len(self.dxl):
836                result.dxl[i] = self.dxl[i]
837
838            a = Uncertainty(self.y[i], dy[i]**2)
839            if isinstance(other, Data1D):
840                b = Uncertainty(other.y[i], dy_other[i]**2)
841                if other.dx is not None:
842                    result.dx[i] *= self.dx[i]
843                    result.dx[i] += (other.dx[i]**2)
844                    result.dx[i] /= 2
845                    result.dx[i] = math.sqrt(result.dx[i])
846                if result.dxl is not None and other.dxl is not None:
847                    result.dxl[i] *= self.dxl[i]
848                    result.dxl[i] += (other.dxl[i]**2)
849                    result.dxl[i] /= 2
850                    result.dxl[i] = math.sqrt(result.dxl[i])
851            else:
852                b = other
853
854            output = operation(a, b)
855            result.y[i] = output.x
856            result.dy[i] = math.sqrt(math.fabs(output.variance))
857        return result
858
859    def _validity_check_union(self, other):
860        """
861        Checks that the data lengths are compatible.
862        Checks that the x vectors are compatible.
863        Returns errors vectors equal to original
864        errors vectors if they were present or vectors
865        of zeros when none was found.
866
867        :param other: other data set for operation
868        :return: bool
869        :raise ValueError: when data types are not compatible
870        """
871        if not isinstance(other, Data1D):
872            msg = "Unable to perform operation: different types of data set"
873            raise ValueError, msg
874        return True
875
876    def _perform_union(self, other):
877        """
878        """
879        # First, check the data compatibility
880        self._validity_check_union(other)
881        result = self.clone_without_data(len(self.x) + len(other.x))
882        if self.dy == None or other.dy is None:
883            result.dy = None
884        else:
885            result.dy = numpy.zeros(len(self.x) + len(other.x))
886        if self.dx == None or other.dx is None:
887            result.dx = None
888        else:
889            result.dx = numpy.zeros(len(self.x) + len(other.x))
890        if self.dxw == None or other.dxw is None:
891            result.dxw = None
892        else:
893            result.dxw = numpy.zeros(len(self.x) + len(other.x))
894        if self.dxl == None or other.dxl is None:
895            result.dxl = None
896        else:
897            result.dxl = numpy.zeros(len(self.x) + len(other.x))
898
899        result.x = numpy.append(self.x, other.x)
900        #argsorting
901        ind = numpy.argsort(result.x)
902        result.x = result.x[ind]
903        result.y = numpy.append(self.y, other.y)
904        result.y = result.y[ind]
905        if result.dy != None:
906            result.dy = numpy.append(self.dy, other.dy)
907            result.dy = result.dy[ind]
908        if result.dx is not None:
909            result.dx = numpy.append(self.dx, other.dx)
910            result.dx = result.dx[ind]
911        if result.dxw is not None:
912            result.dxw = numpy.append(self.dxw, other.dxw)
913            result.dxw = result.dxw[ind]
914        if result.dxl is not None:
915            result.dxl = numpy.append(self.dxl, other.dxl)
916            result.dxl = result.dxl[ind]
917        return result
918
919
920class Data2D(plottable_2D, DataInfo):
921    """
922    2D data class
923    """
924    ## Units for Q-values
925    Q_unit = '1/A'
926    ## Units for I(Q) values
927    I_unit = '1/cm'
928    ## Vector of Q-values at the center of each bin in x
929    x_bins = None
930    ## Vector of Q-values at the center of each bin in y
931    y_bins = None
932
933    def __init__(self, data=None, err_data=None, qx_data=None,
934                 qy_data=None, q_data=None, mask=None,
935                 dqx_data=None, dqy_data=None, isSesans=None):
936        DataInfo.__init__(self)
937        plottable_2D.__init__(self, data, err_data, qx_data,
938                              qy_data, q_data, mask, dqx_data, dqy_data)
939        self.y_bins = []
940        self.x_bins = []
941        self.isSesans=isSesans
942
943        if len(self.detector) > 0:
944            raise RuntimeError, "Data2D: Detector bank already filled at init"
945
946    def __str__(self):
947        _str = "%s\n" % DataInfo.__str__(self)
948        _str += "Data:\n"
949        _str += "   Type:         %s\n" % self.__class__.__name__
950        _str += "   X- & Y-axis:  %s\t[%s]\n" % (self._yaxis, self._yunit)
951        _str += "   Z-axis:       %s\t[%s]\n" % (self._zaxis, self._zunit)
952        _str += "   Length:       %g \n" % (len(self.data))
953        _str += "   Shape:        (%d, %d)\n" % (len(self.y_bins), len(self.x_bins))
954        return _str
955
956    def clone_without_data(self, length=0, clone=None):
957        """
958        Clone the current object, without copying the data (which
959        will be filled out by a subsequent operation).
960        The data arrays will be initialized to zero.
961
962        :param length: length of the data array to be initialized
963        :param clone: if provided, the data will be copied to clone
964        """
965        from copy import deepcopy
966
967        if clone is None or not issubclass(clone.__class__, Data2D):
968            data = numpy.zeros(length)
969            err_data = numpy.zeros(length)
970            qx_data = numpy.zeros(length)
971            qy_data = numpy.zeros(length)
972            q_data = numpy.zeros(length)
973            mask = numpy.zeros(length)
974            dqx_data = None
975            dqy_data = None
976            clone = Data2D(data=data, err_data=err_data,
977                           qx_data=qx_data, qy_data=qy_data,
978                           q_data=q_data, mask=mask)
979
980        clone.title = self.title
981        clone.run = self.run
982        clone.filename = self.filename
983        clone.instrument = self.instrument
984        clone.notes = deepcopy(self.notes)
985        clone.process = deepcopy(self.process)
986        clone.detector = deepcopy(self.detector)
987        clone.sample = deepcopy(self.sample)
988        clone.source = deepcopy(self.source)
989        clone.collimation = deepcopy(self.collimation)
990        clone.trans_spectrum = deepcopy(self.trans_spectrum)
991        clone.meta_data = deepcopy(self.meta_data)
992        clone.errors = deepcopy(self.errors)
993
994        return clone
995
996    def _validity_check(self, other):
997        """
998        Checks that the data lengths are compatible.
999        Checks that the x vectors are compatible.
1000        Returns errors vectors equal to original
1001        errors vectors if they were present or vectors
1002        of zeros when none was found.
1003
1004        :param other: other data set for operation
1005        :return: dy for self, dy for other [numpy arrays]
1006        :raise ValueError: when lengths are not compatible
1007        """
1008        err_other = None
1009        TOLERANCE = 0.01
1010        if isinstance(other, Data2D):
1011            # Check that data lengths are the same
1012            if len(self.data) != len(other.data) or \
1013                len(self.qx_data) != len(other.qx_data) or \
1014                len(self.qy_data) != len(other.qy_data):
1015                msg = "Unable to perform operation: data length are not equal"
1016                raise ValueError, msg
1017            for ind in range(len(self.data)):
1018                if math.fabs((self.qx_data[ind] - other.qx_data[ind])/self.qx_data[ind]) > TOLERANCE:
1019                    msg = "Incompatible data sets: qx-values do not match: %s %s" % (self.qx_data[ind], other.qx_data[ind])
1020                    raise ValueError, msg
1021                if math.fabs((self.qy_data[ind] - other.qy_data[ind])/self.qy_data[ind]) > TOLERANCE:
1022                    msg = "Incompatible data sets: qy-values do not match: %s %s" % (self.qy_data[ind], other.qy_data[ind])
1023                    raise ValueError, msg
1024
1025            # Check that the scales match
1026            err_other = other.err_data
1027            if other.err_data == None or \
1028                (len(other.err_data) != len(other.data)):
1029                err_other = numpy.zeros(len(other.data))
1030
1031        # Check that we have errors, otherwise create zero vector
1032        err = self.err_data
1033        if self.err_data == None or \
1034            (len(self.err_data) != len(self.data)):
1035            err = numpy.zeros(len(other.data))
1036        return err, err_other
1037
1038    def _perform_operation(self, other, operation):
1039        """
1040        Perform 2D operations between data sets
1041
1042        :param other: other data set
1043        :param operation: function defining the operation
1044        """
1045        # First, check the data compatibility
1046        dy, dy_other = self._validity_check(other)
1047        result = self.clone_without_data(numpy.size(self.data))
1048        if self.dqx_data == None or self.dqy_data == None:
1049            result.dqx_data = None
1050            result.dqy_data = None
1051        else:
1052            result.dqx_data = numpy.zeros(len(self.data))
1053            result.dqy_data = numpy.zeros(len(self.data))
1054        for i in range(numpy.size(self.data)):
1055            result.data[i] = self.data[i]
1056            if self.err_data is not None and \
1057                numpy.size(self.data) == numpy.size(self.err_data):
1058                result.err_data[i] = self.err_data[i]
1059            if self.dqx_data is not None:
1060                result.dqx_data[i] = self.dqx_data[i]
1061            if self.dqy_data is not None:
1062                result.dqy_data[i] = self.dqy_data[i]
1063            result.qx_data[i] = self.qx_data[i]
1064            result.qy_data[i] = self.qy_data[i]
1065            result.q_data[i] = self.q_data[i]
1066            result.mask[i] = self.mask[i]
1067
1068            a = Uncertainty(self.data[i], dy[i]**2)
1069            if isinstance(other, Data2D):
1070                b = Uncertainty(other.data[i], dy_other[i]**2)
1071                if other.dqx_data is not None and \
1072                        result.dqx_data is not None:
1073                    result.dqx_data[i] *= self.dqx_data[i]
1074                    result.dqx_data[i] += (other.dqx_data[i]**2)
1075                    result.dqx_data[i] /= 2
1076                    result.dqx_data[i] = math.sqrt(result.dqx_data[i])
1077                if other.dqy_data is not None and \
1078                        result.dqy_data is not None:
1079                    result.dqy_data[i] *= self.dqy_data[i]
1080                    result.dqy_data[i] += (other.dqy_data[i]**2)
1081                    result.dqy_data[i] /= 2
1082                    result.dqy_data[i] = math.sqrt(result.dqy_data[i])
1083            else:
1084                b = other
1085            output = operation(a, b)
1086            result.data[i] = output.x
1087            result.err_data[i] = math.sqrt(math.fabs(output.variance))
1088        return result
1089
1090    def _validity_check_union(self, other):
1091        """
1092        Checks that the data lengths are compatible.
1093        Checks that the x vectors are compatible.
1094        Returns errors vectors equal to original
1095        errors vectors if they were present or vectors
1096        of zeros when none was found.
1097
1098        :param other: other data set for operation
1099        :return: bool
1100        :raise ValueError: when data types are not compatible
1101        """
1102        if not isinstance(other, Data2D):
1103            msg = "Unable to perform operation: different types of data set"
1104            raise ValueError, msg
1105        return True
1106
1107    def _perform_union(self, other):
1108        """
1109        Perform 2D operations between data sets
1110
1111        :param other: other data set
1112        :param operation: function defining the operation
1113        """
1114        # First, check the data compatibility
1115        self._validity_check_union(other)
1116        result = self.clone_without_data(numpy.size(self.data) + \
1117                                         numpy.size(other.data))
1118        result.xmin = self.xmin
1119        result.xmax = self.xmax
1120        result.ymin = self.ymin
1121        result.ymax = self.ymax
1122        if self.dqx_data == None or self.dqy_data == None or \
1123                other.dqx_data == None or other.dqy_data == None:
1124            result.dqx_data = None
1125            result.dqy_data = None
1126        else:
1127            result.dqx_data = numpy.zeros(len(self.data) + \
1128                                         numpy.size(other.data))
1129            result.dqy_data = numpy.zeros(len(self.data) + \
1130                                         numpy.size(other.data))
1131
1132        result.data = numpy.append(self.data, other.data)
1133        result.qx_data = numpy.append(self.qx_data, other.qx_data)
1134        result.qy_data = numpy.append(self.qy_data, other.qy_data)
1135        result.q_data = numpy.append(self.q_data, other.q_data)
1136        result.mask = numpy.append(self.mask, other.mask)
1137        if result.err_data is not None:
1138            result.err_data = numpy.append(self.err_data, other.err_data)
1139        if self.dqx_data is not None:
1140            result.dqx_data = numpy.append(self.dqx_data, other.dqx_data)
1141        if self.dqy_data is not None:
1142            result.dqy_data = numpy.append(self.dqy_data, other.dqy_data)
1143
1144        return result
1145
1146
1147def combine_data_info_with_plottable(data, datainfo):
1148    """
1149    A function that combines the DataInfo data in self.current_datainto with a plottable_1D or 2D data object.
1150
1151    :param data: A plottable_1D or plottable_2D data object
1152    :return: A fully specified Data1D or Data2D object
1153    """
1154
1155    final_dataset = None
1156    if isinstance(data, plottable_1D):
1157        final_dataset = Data1D(data.x, data.y)
1158        final_dataset.dx = data.dx
1159        final_dataset.dy = data.dy
1160        final_dataset.dxl = data.dxl
1161        final_dataset.dxw = data.dxw
1162        final_dataset.xaxis(data._xaxis, data._xunit)
1163        final_dataset.yaxis(data._yaxis, data._yunit)
1164    elif isinstance(data, plottable_2D):
1165        final_dataset = Data2D(data.data, data.err_data, data.qx_data, data.qy_data, data.q_data,
1166                               data.mask, data.dqx_data, data.dqy_data)
1167        final_dataset.xaxis(data._xaxis, data._xunit)
1168        final_dataset.yaxis(data._yaxis, data._yunit)
1169        final_dataset.zaxis(data._zaxis, data._zunit)
1170        final_dataset.x_bins = data.x_bins
1171        final_dataset.y_bins = data.y_bins
1172    else:
1173        return_string = "Should Never Happen: _combine_data_info_with_plottable input is not a plottable1d or " + \
1174                        "plottable2d data object"
1175        return return_string
1176
1177    final_dataset.xmax = data.xmax
1178    final_dataset.ymax = data.ymax
1179    final_dataset.xmin = data.xmin
1180    final_dataset.ymin = data.ymin
1181    final_dataset.title = datainfo.title
1182    final_dataset.run = datainfo.run
1183    final_dataset.run_name = datainfo.run_name
1184    final_dataset.filename = datainfo.filename
1185    final_dataset.notes = datainfo.notes
1186    final_dataset.process = datainfo.process
1187    final_dataset.instrument = datainfo.instrument
1188    final_dataset.detector = datainfo.detector
1189    final_dataset.sample = datainfo.sample
1190    final_dataset.source = datainfo.source
1191    final_dataset.collimation = datainfo.collimation
1192    final_dataset.trans_spectrum = datainfo.trans_spectrum
1193    final_dataset.meta_data = datainfo.meta_data
1194    final_dataset.errors = datainfo.errors
1195    return final_dataset
Note: See TracBrowser for help on using the repository browser.