source: sasview/src/sas/sascalc/dataloader/data_info.py @ 51a4d78

ESS_GUIESS_GUI_DocsESS_GUI_batch_fittingESS_GUI_bumps_abstractionESS_GUI_iss1116ESS_GUI_iss879ESS_GUI_iss959ESS_GUI_openclESS_GUI_orderingESS_GUI_sync_sascalccostrafo411magnetic_scattrelease-4.1.1release-4.1.2release-4.2.2ticket-1009ticket-1094-headlessticket-1242-2d-resolutionticket-1243ticket-1249ticket885unittest-saveload
Last change on this file since 51a4d78 was 51a4d78, checked in by jhbakker, 8 years ago

Merge branch 'ajj_sesans' into Jurrian1D

  • Property mode set to 100644
File size: 40.7 KB
Line 
1"""
2    Module that contains classes to hold information read from
3    reduced data files.
4
5    A good description of the data members can be found in
6    the CanSAS 1D XML data format:
7
8    http://www.smallangles.net/wgwiki/index.php/cansas1d_documentation
9"""
10#####################################################################
11#This software was developed by the University of Tennessee as part of the
12#Distributed Data Analysis of Neutron Scattering Experiments (DANSE)
13#project funded by the US National Science Foundation.
14#See the license text in license.txt
15#copyright 2008, University of Tennessee
16######################################################################
17
18
19#TODO: Keep track of data manipulation in the 'process' data structure.
20#TODO: This module should be independent of plottables. We should write
21#        an adapter class for plottables when needed.
22
23#from sas.guitools.plottables import Data1D as plottable_1D
24from sas.sascalc.data_util.uncertainty import Uncertainty
25import numpy
26import math
27
28class plottable_1D(object):
29    """
30    Data1D is a place holder for 1D plottables.
31    """
32    # The presence of these should be mutually
33    # exclusive with the presence of Qdev (dx)
34    x = None
35    y = None
36    dx = None
37    dy = None
38    ## Slit smearing length
39    dxl = None
40    ## Slit smearing width
41    dxw = None
42
43    ## SESANS specific params (wavelengths for spin echo length calculation)
44
45    lam = None
46    dlam = None
47
48    # Units
49    _xaxis = ''
50    _xunit = ''
51    _yaxis = ''
52    _yunit = ''
53
54    def __init__(self, x, y, dx=None, dy=None, dxl=None, dxw=None, lam=None, dlam=None):
55        self.x = numpy.asarray(x)
56        self.y = numpy.asarray(y)
57        if dx is not None:
58            self.dx = numpy.asarray(dx)
59        if dy is not None:
60            self.dy = numpy.asarray(dy)
61        if dxl is not None:
62            self.dxl = numpy.asarray(dxl)
63        if dxw is not None:
64            self.dxw = numpy.asarray(dxw)
65        if lam is not None:
66            self.lam = numpy.asarray(lam)
67        if dlam is not None:
68            self.dlam = numpy.asarray(dlam)
69
70    def xaxis(self, label, unit):
71        """
72        set the x axis label and unit
73        """
74        self._xaxis = label
75        self._xunit = unit
76
77    def yaxis(self, label, unit):
78        """
79        set the y axis label and unit
80        """
81        self._yaxis = label
82        self._yunit = unit
83
84
85class plottable_2D(object):
86    """
87    Data2D is a place holder for 2D plottables.
88    """
89    xmin = None
90    xmax = None
91    ymin = None
92    ymax = None
93    data = None
94    qx_data = None
95    qy_data = None
96    q_data = None
97    err_data = None
98    dqx_data = None
99    dqy_data = None
100    mask = None
101
102    # Units
103    _xaxis = ''
104    _xunit = ''
105    _yaxis = ''
106    _yunit = ''
107    _zaxis = ''
108    _zunit = ''
109
110    def __init__(self, data=None, err_data=None, qx_data=None,
111                 qy_data=None, q_data=None, mask=None,
112                 dqx_data=None, dqy_data=None):
113        self.data = numpy.asarray(data)
114        self.qx_data = numpy.asarray(qx_data)
115        self.qy_data = numpy.asarray(qy_data)
116        self.q_data = numpy.asarray(q_data)
117        self.mask = numpy.asarray(mask)
118        self.err_data = numpy.asarray(err_data)
119        if dqx_data is not None:
120            self.dqx_data = numpy.asarray(dqx_data)
121        if dqy_data is not None:
122            self.dqy_data = numpy.asarray(dqy_data)
123
124    def xaxis(self, label, unit):
125        """
126        set the x axis label and unit
127        """
128        self._xaxis = label
129        self._xunit = unit
130
131    def yaxis(self, label, unit):
132        """
133        set the y axis label and unit
134        """
135        self._yaxis = label
136        self._yunit = unit
137
138    def zaxis(self, label, unit):
139        """
140        set the z axis label and unit
141        """
142        self._zaxis = label
143        self._zunit = unit
144
145
146class Vector(object):
147    """
148    Vector class to hold multi-dimensional objects
149    """
150    ## x component
151    x = None
152    ## y component
153    y = None
154    ## z component
155    z = None
156
157    def __init__(self, x=None, y=None, z=None):
158        """
159        Initialization. Components that are not
160        set a set to None by default.
161
162        :param x: x component
163        :param y: y component
164        :param z: z component
165        """
166        self.x = x
167        self.y = y
168        self.z = z
169
170    def __str__(self):
171        msg = "x = %s\ty = %s\tz = %s" % (str(self.x), str(self.y), str(self.z))
172        return msg
173
174
175class Detector(object):
176    """
177    Class to hold detector information
178    """
179    ## Name of the instrument [string]
180    name = None
181    ## Sample to detector distance [float] [mm]
182    distance = None
183    distance_unit = 'mm'
184    ## Offset of this detector position in X, Y,
185    #(and Z if necessary) [Vector] [mm]
186    offset = None
187    offset_unit = 'm'
188    ## Orientation (rotation) of this detector in roll,
189    # pitch, and yaw [Vector] [degrees]
190    orientation = None
191    orientation_unit = 'degree'
192    ## Center of the beam on the detector in X and Y
193    #(and Z if necessary) [Vector] [mm]
194    beam_center = None
195    beam_center_unit = 'mm'
196    ## Pixel size in X, Y, (and Z if necessary) [Vector] [mm]
197    pixel_size = None
198    pixel_size_unit = 'mm'
199    ## Slit length of the instrument for this detector.[float] [mm]
200    slit_length = None
201    slit_length_unit = 'mm'
202
203    def __init__(self):
204        """
205        Initialize class attribute that are objects...
206        """
207        self.offset = Vector()
208        self.orientation = Vector()
209        self.beam_center = Vector()
210        self.pixel_size = Vector()
211
212    def __str__(self):
213        _str = "Detector:\n"
214        _str += "   Name:         %s\n" % self.name
215        _str += "   Distance:     %s [%s]\n" % \
216            (str(self.distance), str(self.distance_unit))
217        _str += "   Offset:       %s [%s]\n" % \
218            (str(self.offset), str(self.offset_unit))
219        _str += "   Orientation:  %s [%s]\n" % \
220            (str(self.orientation), str(self.orientation_unit))
221        _str += "   Beam center:  %s [%s]\n" % \
222            (str(self.beam_center), str(self.beam_center_unit))
223        _str += "   Pixel size:   %s [%s]\n" % \
224            (str(self.pixel_size), str(self.pixel_size_unit))
225        _str += "   Slit length:  %s [%s]\n" % \
226            (str(self.slit_length), str(self.slit_length_unit))
227        return _str
228
229
230class Aperture(object):
231    ## Name
232    name = None
233    ## Type
234    type = None
235    ## Size name
236    size_name = None
237    ## Aperture size [Vector]
238    size = None
239    size_unit = 'mm'
240    ## Aperture distance [float]
241    distance = None
242    distance_unit = 'mm'
243
244    def __init__(self):
245        self.size = Vector()
246
247
248class Collimation(object):
249    """
250    Class to hold collimation information
251    """
252    ## Name
253    name = None
254    ## Length [float] [mm]
255    length = None
256    length_unit = 'mm'
257    ## Aperture
258    aperture = None
259
260    def __init__(self):
261        self.aperture = []
262
263    def __str__(self):
264        _str = "Collimation:\n"
265        _str += "   Length:       %s [%s]\n" % \
266            (str(self.length), str(self.length_unit))
267        for item in self.aperture:
268            _str += "   Aperture size:%s [%s]\n" % \
269                (str(item.size), str(item.size_unit))
270            _str += "   Aperture_dist:%s [%s]\n" % \
271                (str(item.distance), str(item.distance_unit))
272        return _str
273
274
275class Source(object):
276    """
277    Class to hold source information
278    """
279    ## Name
280    name = None
281    ## Radiation type [string]
282    radiation = None
283    ## Beam size name
284    beam_size_name = None
285    ## Beam size [Vector] [mm]
286    beam_size = None
287    beam_size_unit = 'mm'
288    ## Beam shape [string]
289    beam_shape = None
290    ## Wavelength [float] [Angstrom]
291    wavelength = None
292    wavelength_unit = 'A'
293    ## Minimum wavelength [float] [Angstrom]
294    wavelength_min = None
295    wavelength_min_unit = 'nm'
296    ## Maximum wavelength [float] [Angstrom]
297    wavelength_max = None
298    wavelength_max_unit = 'nm'
299    ## Wavelength spread [float] [Angstrom]
300    wavelength_spread = None
301    wavelength_spread_unit = 'percent'
302
303    def __init__(self):
304        self.beam_size = Vector()
305
306    def __str__(self):
307        _str = "Source:\n"
308        _str += "   Radiation:    %s\n" % str(self.radiation)
309        _str += "   Shape:        %s\n" % str(self.beam_shape)
310        _str += "   Wavelength:   %s [%s]\n" % \
311            (str(self.wavelength), str(self.wavelength_unit))
312        _str += "   Waveln_min:   %s [%s]\n" % \
313            (str(self.wavelength_min), str(self.wavelength_min_unit))
314        _str += "   Waveln_max:   %s [%s]\n" % \
315            (str(self.wavelength_max), str(self.wavelength_max_unit))
316        _str += "   Waveln_spread:%s [%s]\n" % \
317            (str(self.wavelength_spread), str(self.wavelength_spread_unit))
318        _str += "   Beam_size:    %s [%s]\n" % \
319            (str(self.beam_size), str(self.beam_size_unit))
320        return _str
321
322
323"""
324Definitions of radiation types
325"""
326NEUTRON = 'neutron'
327XRAY = 'x-ray'
328MUON = 'muon'
329ELECTRON = 'electron'
330
331
332class Sample(object):
333    """
334    Class to hold the sample description
335    """
336    ## Short name for sample
337    name = ''
338    ## ID
339    ID = ''
340    ## Thickness [float] [mm]
341    thickness = None
342    thickness_unit = 'mm'
343    ## Transmission [float] [fraction]
344    transmission = None
345    ## Temperature [float] [No Default]
346    temperature = None
347    temperature_unit = None
348    ## Position [Vector] [mm]
349    position = None
350    position_unit = 'mm'
351    ## Orientation [Vector] [degrees]
352    orientation = None
353    orientation_unit = 'degree'
354    ## Details
355    details = None
356
357    def __init__(self):
358        self.position = Vector()
359        self.orientation = Vector()
360        self.details = []
361
362    def __str__(self):
363        _str = "Sample:\n"
364        _str += "   ID:           %s\n" % str(self.ID)
365        _str += "   Transmission: %s\n" % str(self.transmission)
366        _str += "   Thickness:    %s [%s]\n" % \
367            (str(self.thickness), str(self.thickness_unit))
368        _str += "   Temperature:  %s [%s]\n" % \
369            (str(self.temperature), str(self.temperature_unit))
370        _str += "   Position:     %s [%s]\n" % \
371            (str(self.position), str(self.position_unit))
372        _str += "   Orientation:  %s [%s]\n" % \
373            (str(self.orientation), str(self.orientation_unit))
374
375        _str += "   Details:\n"
376        for item in self.details:
377            _str += "      %s\n" % item
378
379        return _str
380
381
382class Process(object):
383    """
384    Class that holds information about the processes
385    performed on the data.
386    """
387    name = ''
388    date = ''
389    description = ''
390    term = None
391    notes = None
392
393    def __init__(self):
394        self.term = []
395        self.notes = []
396
397    def is_empty(self):
398        """
399            Return True if the object is empty
400        """
401        return len(self.name) == 0 and len(self.date) == 0 and len(self.description) == 0 \
402            and len(self.term) == 0 and len(self.notes) == 0
403
404    def single_line_desc(self):
405        """
406            Return a single line string representing the process
407        """
408        return "%s %s %s" % (self.name, self.date, self.description)
409
410    def __str__(self):
411        _str = "Process:\n"
412        _str += "   Name:         %s\n" % self.name
413        _str += "   Date:         %s\n" % self.date
414        _str += "   Description:  %s\n" % self.description
415        for item in self.term:
416            _str += "   Term:         %s\n" % item
417        for item in self.notes:
418            _str += "   Note:         %s\n" % item
419        return _str
420
421
422class TransmissionSpectrum(object):
423    """
424    Class that holds information about transmission spectrum
425    for white beams and spallation sources.
426    """
427    name = ''
428    timestamp = ''
429    ## Wavelength (float) [A]
430    wavelength = None
431    wavelength_unit = 'A'
432    ## Transmission (float) [unit less]
433    transmission = None
434    transmission_unit = ''
435    ## Transmission Deviation (float) [unit less]
436    transmission_deviation = None
437    transmission_deviation_unit = ''
438
439    def __init__(self):
440        self.wavelength = []
441        self.transmission = []
442        self.transmission_deviation = []
443
444    def __str__(self):
445        _str = "Transmission Spectrum:\n"
446        _str += "   Name:             \t{0}\n".format(self.name)
447        _str += "   Timestamp:        \t{0}\n".format(self.timestamp)
448        _str += "   Wavelength unit:  \t{0}\n".format(self.wavelength_unit)
449        _str += "   Transmission unit:\t{0}\n".format(self.transmission_unit)
450        _str += "   Trans. Dev. unit:  \t{0}\n".format(\
451                                            self.transmission_deviation_unit)
452        length_list = [len(self.wavelength), len(self.transmission), \
453                len(self.transmission_deviation)]
454        _str += "   Number of Pts:    \t{0}\n".format(max(length_list))
455        return _str
456
457
458class DataInfo(object):
459    """
460    Class to hold the data read from a file.
461    It includes four blocks of data for the
462    instrument description, the sample description,
463    the data itself and any other meta data.
464    """
465    ## Title
466    title = ''
467    ## Run number
468    run = None
469    ## Run name
470    run_name = None
471    ## File name
472    filename = ''
473    ## Notes
474    notes = None
475    ## Processes (Action on the data)
476    process = None
477    ## Instrument name
478    instrument = ''
479    ## Detector information
480    detector = None
481    ## Sample information
482    sample = None
483    ## Source information
484    source = None
485    ## Collimation information
486    collimation = None
487    ## Transmission Spectrum INfo
488    trans_spectrum = None
489    ## Additional meta-data
490    meta_data = None
491    ## Loading errors
492    errors = None
493
494    def __init__(self):
495        """
496        Initialization
497        """
498        ## Title
499        self.title = ''
500        ## Run number
501        self.run = []
502        self.run_name = {}
503        ## File name
504        self.filename = ''
505        ## Notes
506        self.notes = []
507        ## Processes (Action on the data)
508        self.process = []
509        ## Instrument name
510        self.instrument = ''
511        ## Detector information
512        self.detector = []
513        ## Sample information
514        self.sample = Sample()
515        ## Source information
516        self.source = Source()
517        ## Collimation information
518        self.collimation = []
519        ## Transmission Spectrum
520        self.trans_spectrum = []
521        ## Additional meta-data
522        self.meta_data = {}
523        ## Loading errors
524        self.errors = []
525
526    def append_empty_process(self):
527        """
528        """
529        self.process.append(Process())
530
531    def add_notes(self, message=""):
532        """
533        Add notes to datainfo
534        """
535        self.notes.append(message)
536
537    def __str__(self):
538        """
539        Nice printout
540        """
541        _str = "File:            %s\n" % self.filename
542        _str += "Title:           %s\n" % self.title
543        _str += "Run:             %s\n" % str(self.run)
544        _str += "Instrument:      %s\n" % str(self.instrument)
545        _str += "%s\n" % str(self.sample)
546        _str += "%s\n" % str(self.source)
547        for item in self.detector:
548            _str += "%s\n" % str(item)
549        for item in self.collimation:
550            _str += "%s\n" % str(item)
551        for item in self.process:
552            _str += "%s\n" % str(item)
553        for item in self.notes:
554            _str += "%s\n" % str(item)
555        for item in self.trans_spectrum:
556            _str += "%s\n" % str(item)
557        return _str
558
559    # Private method to perform operation. Not implemented for DataInfo,
560    # but should be implemented for each data class inherited from DataInfo
561    # that holds actual data (ex.: Data1D)
562    def _perform_operation(self, other, operation):
563        """
564        Private method to perform operation. Not implemented for DataInfo,
565        but should be implemented for each data class inherited from DataInfo
566        that holds actual data (ex.: Data1D)
567        """
568        return NotImplemented
569
570    def _perform_union(self, other):
571        """
572        Private method to perform union operation. Not implemented for DataInfo,
573        but should be implemented for each data class inherited from DataInfo
574        that holds actual data (ex.: Data1D)
575        """
576        return NotImplemented
577
578    def __add__(self, other):
579        """
580        Add two data sets
581
582        :param other: data set to add to the current one
583        :return: new data set
584        :raise ValueError: raised when two data sets are incompatible
585        """
586        def operation(a, b):
587            return a + b
588        return self._perform_operation(other, operation)
589
590    def __radd__(self, other):
591        """
592        Add two data sets
593
594        :param other: data set to add to the current one
595        :return: new data set
596        :raise ValueError: raised when two data sets are incompatible
597        """
598        def operation(a, b):
599            return b + a
600        return self._perform_operation(other, operation)
601
602    def __sub__(self, other):
603        """
604        Subtract two data sets
605
606        :param other: data set to subtract from the current one
607        :return: new data set
608        :raise ValueError: raised when two data sets are incompatible
609        """
610        def operation(a, b):
611            return a - b
612        return self._perform_operation(other, operation)
613
614    def __rsub__(self, other):
615        """
616        Subtract two data sets
617
618        :param other: data set to subtract from the current one
619        :return: new data set
620        :raise ValueError: raised when two data sets are incompatible
621        """
622        def operation(a, b):
623            return b - a
624        return self._perform_operation(other, operation)
625
626    def __mul__(self, other):
627        """
628        Multiply two data sets
629
630        :param other: data set to subtract from the current one
631        :return: new data set
632        :raise ValueError: raised when two data sets are incompatible
633        """
634        def operation(a, b):
635            return a * b
636        return self._perform_operation(other, operation)
637
638    def __rmul__(self, other):
639        """
640        Multiply two data sets
641
642        :param other: data set to subtract from the current one
643        :return: new data set
644        :raise ValueError: raised when two data sets are incompatible
645        """
646        def operation(a, b):
647            return b * a
648        return self._perform_operation(other, operation)
649
650    def __div__(self, other):
651        """
652        Divided a data set by another
653
654        :param other: data set that the current one is divided by
655        :return: new data set
656        :raise ValueError: raised when two data sets are incompatible
657        """
658        def operation(a, b):
659            return a/b
660        return self._perform_operation(other, operation)
661
662    def __rdiv__(self, other):
663        """
664        Divided a data set by another
665
666        :param other: data set that the current one is divided by
667        :return: new data set
668        :raise ValueError: raised when two data sets are incompatible
669        """
670        def operation(a, b):
671            return b/a
672        return self._perform_operation(other, operation)
673
674    def __or__(self, other):
675        """
676        Union a data set with another
677
678        :param other: data set to be unified
679        :return: new data set
680        :raise ValueError: raised when two data sets are incompatible
681        """
682        return self._perform_union(other)
683
684    def __ror__(self, other):
685        """
686        Union a data set with another
687
688        :param other: data set to be unified
689        :return: new data set
690        :raise ValueError: raised when two data sets are incompatible
691        """
692        return self._perform_union(other)
693
694class Data1D(plottable_1D, DataInfo):
695    """
696    1D data class
697    """
698    if plottable_1D.lam is None: # This means it's SANS data!
699        x_unit = '1/A'
700        y_unit = '1/cm'
701    elif plottable_1D.lam is not None: # This means it's SESANS data!
702        x_unit = 'A'
703        y_unit = 'pol'
704    else: # and if it's neither, you get punished!
705        raise(TypeError,'This is neither SANS nor SESANS data, what the hell are you doing??')
706
707    def __init__(self, x=None, y=None, dx=None, dy=None, lam=None, dlam=None):
708        DataInfo.__init__(self)
709        plottable_1D.__init__(self, x, y, dx, dy,None, None, lam, dlam)
710        if self.lam is None: # This means the lam param was not detected in the data: it's SANS data!
711            x_unit = '1/A'
712            y_unit = '1/cm'
713        elif self.lam is not None: # This means lam was detected (should be an empty ndarray): it's SESANS data!
714            x_unit = 'A'
715            y_unit = 'pol'
716        else: # and if it's neither, you get punished!
717            raise(TypeError,'This is neither SANS nor SESANS data, what the hell are you doing??')
718
719    def __str__(self):
720        """
721        Nice printout
722        """
723        _str = "%s\n" % DataInfo.__str__(self)
724        _str += "Data:\n"
725        _str += "   Type:         %s\n" % self.__class__.__name__
726        _str += "   X-axis:       %s\t[%s]\n" % (self._xaxis, self._xunit)
727        _str += "   Y-axis:       %s\t[%s]\n" % (self._yaxis, self._yunit)
728        _str += "   Length:       %g\n" % len(self.x)
729        return _str
730
731    def is_slit_smeared(self):
732        """
733        Check whether the data has slit smearing information
734        :return: True is slit smearing info is present, False otherwise
735        """
736        def _check(v):
737            if (v.__class__ == list or v.__class__ == numpy.ndarray) \
738                and len(v) > 0 and min(v) > 0:
739                return True
740            return False
741        return _check(self.dxl) or _check(self.dxw)
742
743    def clone_without_data(self, length=0, clone=None):
744        """
745        Clone the current object, without copying the data (which
746        will be filled out by a subsequent operation).
747        The data arrays will be initialized to zero.
748
749        :param length: length of the data array to be initialized
750        :param clone: if provided, the data will be copied to clone
751        """
752        from copy import deepcopy
753
754        if clone is None or not issubclass(clone.__class__, Data1D):
755            x = numpy.zeros(length)
756            dx = numpy.zeros(length)
757            y = numpy.zeros(length)
758            dy = numpy.zeros(length)
759            lam = numpy.zeros(length)
760            dlam = numpy.zeros(length)
761            clone = Data1D(x, y, lam=lam, dx=dx, dy=dy, dlam=dlam )
762
763        clone.title = self.title
764        clone.run = self.run
765        clone.filename = self.filename
766        clone.instrument = self.instrument
767        clone.notes = deepcopy(self.notes)
768        clone.process = deepcopy(self.process)
769        clone.detector = deepcopy(self.detector)
770        clone.sample = deepcopy(self.sample)
771        clone.source = deepcopy(self.source)
772        clone.collimation = deepcopy(self.collimation)
773        clone.trans_spectrum = deepcopy(self.trans_spectrum)
774        clone.meta_data = deepcopy(self.meta_data)
775        clone.errors = deepcopy(self.errors)
776
777        return clone
778
779    def _validity_check(self, other):
780        """
781        Checks that the data lengths are compatible.
782        Checks that the x vectors are compatible.
783        Returns errors vectors equal to original
784        errors vectors if they were present or vectors
785        of zeros when none was found.
786
787        :param other: other data set for operation
788        :return: dy for self, dy for other [numpy arrays]
789        :raise ValueError: when lengths are not compatible
790        """
791        dy_other = None
792        if isinstance(other, Data1D):
793            # Check that data lengths are the same
794            if len(self.x) != len(other.x) or \
795                len(self.y) != len(other.y):
796                msg = "Unable to perform operation: data length are not equal"
797                raise ValueError, msg
798            # Here we could also extrapolate between data points
799            TOLERANCE = 0.01
800            for i in range(len(self.x)):
801                if math.fabs((self.x[i] - other.x[i])/self.x[i]) > TOLERANCE:
802                    msg = "Incompatible data sets: x-values do not match"
803                    raise ValueError, msg
804
805            # Check that the other data set has errors, otherwise
806            # create zero vector
807            dy_other = other.dy
808            if other.dy == None or (len(other.dy) != len(other.y)):
809                dy_other = numpy.zeros(len(other.y))
810
811        # Check that we have errors, otherwise create zero vector
812        dy = self.dy
813        if self.dy == None or (len(self.dy) != len(self.y)):
814            dy = numpy.zeros(len(self.y))
815
816        return dy, dy_other
817
818    def _perform_operation(self, other, operation):
819        """
820        """
821        # First, check the data compatibility
822        dy, dy_other = self._validity_check(other)
823        result = self.clone_without_data(len(self.x))
824        if self.dxw == None:
825            result.dxw = None
826        else:
827            result.dxw = numpy.zeros(len(self.x))
828        if self.dxl == None:
829            result.dxl = None
830        else:
831            result.dxl = numpy.zeros(len(self.x))
832
833        for i in range(len(self.x)):
834            result.x[i] = self.x[i]
835            if self.dx is not None and len(self.x) == len(self.dx):
836                result.dx[i] = self.dx[i]
837            if self.dxw is not None and len(self.x) == len(self.dxw):
838                result.dxw[i] = self.dxw[i]
839            if self.dxl is not None and len(self.x) == len(self.dxl):
840                result.dxl[i] = self.dxl[i]
841
842            a = Uncertainty(self.y[i], dy[i]**2)
843            if isinstance(other, Data1D):
844                b = Uncertainty(other.y[i], dy_other[i]**2)
845                if other.dx is not None:
846                    result.dx[i] *= self.dx[i]
847                    result.dx[i] += (other.dx[i]**2)
848                    result.dx[i] /= 2
849                    result.dx[i] = math.sqrt(result.dx[i])
850                if result.dxl is not None and other.dxl is not None:
851                    result.dxl[i] *= self.dxl[i]
852                    result.dxl[i] += (other.dxl[i]**2)
853                    result.dxl[i] /= 2
854                    result.dxl[i] = math.sqrt(result.dxl[i])
855            else:
856                b = other
857
858            output = operation(a, b)
859            result.y[i] = output.x
860            result.dy[i] = math.sqrt(math.fabs(output.variance))
861        return result
862
863    def _validity_check_union(self, other):
864        """
865        Checks that the data lengths are compatible.
866        Checks that the x vectors are compatible.
867        Returns errors vectors equal to original
868        errors vectors if they were present or vectors
869        of zeros when none was found.
870
871        :param other: other data set for operation
872        :return: bool
873        :raise ValueError: when data types are not compatible
874        """
875        if not isinstance(other, Data1D):
876            msg = "Unable to perform operation: different types of data set"
877            raise ValueError, msg
878        return True
879
880    def _perform_union(self, other):
881        """
882        """
883        # First, check the data compatibility
884        self._validity_check_union(other)
885        result = self.clone_without_data(len(self.x) + len(other.x))
886        if self.dy == None or other.dy is None:
887            result.dy = None
888        else:
889            result.dy = numpy.zeros(len(self.x) + len(other.x))
890        if self.dx == None or other.dx is None:
891            result.dx = None
892        else:
893            result.dx = numpy.zeros(len(self.x) + len(other.x))
894        if self.dxw == None or other.dxw is None:
895            result.dxw = None
896        else:
897            result.dxw = numpy.zeros(len(self.x) + len(other.x))
898        if self.dxl == None or other.dxl is None:
899            result.dxl = None
900        else:
901            result.dxl = numpy.zeros(len(self.x) + len(other.x))
902
903        result.x = numpy.append(self.x, other.x)
904        #argsorting
905        ind = numpy.argsort(result.x)
906        result.x = result.x[ind]
907        result.y = numpy.append(self.y, other.y)
908        result.y = result.y[ind]
909        if result.dy != None:
910            result.dy = numpy.append(self.dy, other.dy)
911            result.dy = result.dy[ind]
912        if result.dx is not None:
913            result.dx = numpy.append(self.dx, other.dx)
914            result.dx = result.dx[ind]
915        if result.dxw is not None:
916            result.dxw = numpy.append(self.dxw, other.dxw)
917            result.dxw = result.dxw[ind]
918        if result.dxl is not None:
919            result.dxl = numpy.append(self.dxl, other.dxl)
920            result.dxl = result.dxl[ind]
921        return result
922
923
924class Data2D(plottable_2D, DataInfo):
925    """
926    2D data class
927    """
928    ## Units for Q-values
929    Q_unit = '1/A'
930    ## Units for I(Q) values
931    I_unit = '1/cm'
932    ## Vector of Q-values at the center of each bin in x
933    x_bins = None
934    ## Vector of Q-values at the center of each bin in y
935    y_bins = None
936
937    def __init__(self, data=None, err_data=None, qx_data=None,
938                 qy_data=None, q_data=None, mask=None,
939                 dqx_data=None, dqy_data=None):
940        self.y_bins = []
941        self.x_bins = []
942        DataInfo.__init__(self)
943        plottable_2D.__init__(self, data, err_data, qx_data,
944                              qy_data, q_data, mask, dqx_data, dqy_data)
945        if len(self.detector) > 0:
946            raise RuntimeError, "Data2D: Detector bank already filled at init"
947
948    def __str__(self):
949        _str = "%s\n" % DataInfo.__str__(self)
950        _str += "Data:\n"
951        _str += "   Type:         %s\n" % self.__class__.__name__
952        _str += "   X- & Y-axis:  %s\t[%s]\n" % (self._yaxis, self._yunit)
953        _str += "   Z-axis:       %s\t[%s]\n" % (self._zaxis, self._zunit)
954        _str += "   Length:       %g \n" % (len(self.data))
955        _str += "   Shape:        (%d, %d)\n" % (len(self.y_bins), len(self.x_bins))
956        return _str
957
958    def clone_without_data(self, length=0, clone=None):
959        """
960        Clone the current object, without copying the data (which
961        will be filled out by a subsequent operation).
962        The data arrays will be initialized to zero.
963
964        :param length: length of the data array to be initialized
965        :param clone: if provided, the data will be copied to clone
966        """
967        from copy import deepcopy
968
969        if clone is None or not issubclass(clone.__class__, Data2D):
970            data = numpy.zeros(length)
971            err_data = numpy.zeros(length)
972            qx_data = numpy.zeros(length)
973            qy_data = numpy.zeros(length)
974            q_data = numpy.zeros(length)
975            mask = numpy.zeros(length)
976            dqx_data = None
977            dqy_data = None
978            clone = Data2D(data=data, err_data=err_data,
979                           qx_data=qx_data, qy_data=qy_data,
980                           q_data=q_data, mask=mask)
981
982        clone.title = self.title
983        clone.run = self.run
984        clone.filename = self.filename
985        clone.instrument = self.instrument
986        clone.notes = deepcopy(self.notes)
987        clone.process = deepcopy(self.process)
988        clone.detector = deepcopy(self.detector)
989        clone.sample = deepcopy(self.sample)
990        clone.source = deepcopy(self.source)
991        clone.collimation = deepcopy(self.collimation)
992        clone.trans_spectrum = deepcopy(self.trans_spectrum)
993        clone.meta_data = deepcopy(self.meta_data)
994        clone.errors = deepcopy(self.errors)
995
996        return clone
997
998    def _validity_check(self, other):
999        """
1000        Checks that the data lengths are compatible.
1001        Checks that the x vectors are compatible.
1002        Returns errors vectors equal to original
1003        errors vectors if they were present or vectors
1004        of zeros when none was found.
1005
1006        :param other: other data set for operation
1007        :return: dy for self, dy for other [numpy arrays]
1008        :raise ValueError: when lengths are not compatible
1009        """
1010        err_other = None
1011        TOLERANCE = 0.01
1012        if isinstance(other, Data2D):
1013            # Check that data lengths are the same
1014            if len(self.data) != len(other.data) or \
1015                len(self.qx_data) != len(other.qx_data) or \
1016                len(self.qy_data) != len(other.qy_data):
1017                msg = "Unable to perform operation: data length are not equal"
1018                raise ValueError, msg
1019            for ind in range(len(self.data)):
1020                if math.fabs((self.qx_data[ind] - other.qx_data[ind])/self.qx_data[ind]) > TOLERANCE:
1021                    msg = "Incompatible data sets: qx-values do not match: %s %s" % (self.qx_data[ind], other.qx_data[ind])
1022                    raise ValueError, msg
1023                if math.fabs((self.qy_data[ind] - other.qy_data[ind])/self.qy_data[ind]) > TOLERANCE:
1024                    msg = "Incompatible data sets: qy-values do not match: %s %s" % (self.qy_data[ind], other.qy_data[ind])
1025                    raise ValueError, msg
1026
1027            # Check that the scales match
1028            err_other = other.err_data
1029            if other.err_data == None or \
1030                (len(other.err_data) != len(other.data)):
1031                err_other = numpy.zeros(len(other.data))
1032
1033        # Check that we have errors, otherwise create zero vector
1034        err = self.err_data
1035        if self.err_data == None or \
1036            (len(self.err_data) != len(self.data)):
1037            err = numpy.zeros(len(other.data))
1038        return err, err_other
1039
1040    def _perform_operation(self, other, operation):
1041        """
1042        Perform 2D operations between data sets
1043
1044        :param other: other data set
1045        :param operation: function defining the operation
1046        """
1047        # First, check the data compatibility
1048        dy, dy_other = self._validity_check(other)
1049        result = self.clone_without_data(numpy.size(self.data))
1050        if self.dqx_data == None or self.dqy_data == None:
1051            result.dqx_data = None
1052            result.dqy_data = None
1053        else:
1054            result.dqx_data = numpy.zeros(len(self.data))
1055            result.dqy_data = numpy.zeros(len(self.data))
1056        for i in range(numpy.size(self.data)):
1057            result.data[i] = self.data[i]
1058            if self.err_data is not None and \
1059                numpy.size(self.data) == numpy.size(self.err_data):
1060                result.err_data[i] = self.err_data[i]
1061            if self.dqx_data is not None:
1062                result.dqx_data[i] = self.dqx_data[i]
1063            if self.dqy_data is not None:
1064                result.dqy_data[i] = self.dqy_data[i]
1065            result.qx_data[i] = self.qx_data[i]
1066            result.qy_data[i] = self.qy_data[i]
1067            result.q_data[i] = self.q_data[i]
1068            result.mask[i] = self.mask[i]
1069
1070            a = Uncertainty(self.data[i], dy[i]**2)
1071            if isinstance(other, Data2D):
1072                b = Uncertainty(other.data[i], dy_other[i]**2)
1073                if other.dqx_data is not None and \
1074                        result.dqx_data is not None:
1075                    result.dqx_data[i] *= self.dqx_data[i]
1076                    result.dqx_data[i] += (other.dqx_data[i]**2)
1077                    result.dqx_data[i] /= 2
1078                    result.dqx_data[i] = math.sqrt(result.dqx_data[i])
1079                if other.dqy_data is not None and \
1080                        result.dqy_data is not None:
1081                    result.dqy_data[i] *= self.dqy_data[i]
1082                    result.dqy_data[i] += (other.dqy_data[i]**2)
1083                    result.dqy_data[i] /= 2
1084                    result.dqy_data[i] = math.sqrt(result.dqy_data[i])
1085            else:
1086                b = other
1087            output = operation(a, b)
1088            result.data[i] = output.x
1089            result.err_data[i] = math.sqrt(math.fabs(output.variance))
1090        return result
1091
1092    def _validity_check_union(self, other):
1093        """
1094        Checks that the data lengths are compatible.
1095        Checks that the x vectors are compatible.
1096        Returns errors vectors equal to original
1097        errors vectors if they were present or vectors
1098        of zeros when none was found.
1099
1100        :param other: other data set for operation
1101        :return: bool
1102        :raise ValueError: when data types are not compatible
1103        """
1104        if not isinstance(other, Data2D):
1105            msg = "Unable to perform operation: different types of data set"
1106            raise ValueError, msg
1107        return True
1108
1109    def _perform_union(self, other):
1110        """
1111        Perform 2D operations between data sets
1112
1113        :param other: other data set
1114        :param operation: function defining the operation
1115        """
1116        # First, check the data compatibility
1117        self._validity_check_union(other)
1118        result = self.clone_without_data(numpy.size(self.data) + \
1119                                         numpy.size(other.data))
1120        result.xmin = self.xmin
1121        result.xmax = self.xmax
1122        result.ymin = self.ymin
1123        result.ymax = self.ymax
1124        if self.dqx_data == None or self.dqy_data == None or \
1125                other.dqx_data == None or other.dqy_data == None:
1126            result.dqx_data = None
1127            result.dqy_data = None
1128        else:
1129            result.dqx_data = numpy.zeros(len(self.data) + \
1130                                         numpy.size(other.data))
1131            result.dqy_data = numpy.zeros(len(self.data) + \
1132                                         numpy.size(other.data))
1133
1134        result.data = numpy.append(self.data, other.data)
1135        result.qx_data = numpy.append(self.qx_data, other.qx_data)
1136        result.qy_data = numpy.append(self.qy_data, other.qy_data)
1137        result.q_data = numpy.append(self.q_data, other.q_data)
1138        result.mask = numpy.append(self.mask, other.mask)
1139        if result.err_data is not None:
1140            result.err_data = numpy.append(self.err_data, other.err_data)
1141        if self.dqx_data is not None:
1142            result.dqx_data = numpy.append(self.dqx_data, other.dqx_data)
1143        if self.dqy_data is not None:
1144            result.dqy_data = numpy.append(self.dqy_data, other.dqy_data)
1145
1146        return result
1147
1148
1149def combine_data_info_with_plottable(data, datainfo):
1150    """
1151    A function that combines the DataInfo data in self.current_datainto with a plottable_1D or 2D data object.
1152
1153    :param data: A plottable_1D or plottable_2D data object
1154    :return: A fully specified Data1D or Data2D object
1155    """
1156
1157    final_dataset = None
1158    if isinstance(data, plottable_1D):
1159        final_dataset = Data1D(data.x, data.y)
1160        final_dataset.dx = data.dx
1161        final_dataset.dy = data.dy
1162        final_dataset.dxl = data.dxl
1163        final_dataset.dxw = data.dxw
1164        final_dataset.xaxis(data._xaxis, data._xunit)
1165        final_dataset.yaxis(data._yaxis, data._yunit)
1166    elif isinstance(data, plottable_2D):
1167        final_dataset = Data2D(data.data, data.err_data, data.qx_data, data.qy_data, data.q_data,
1168                               data.mask, data.dqx_data, data.dqy_data)
1169        final_dataset.xaxis(data._xaxis, data._xunit)
1170        final_dataset.yaxis(data._yaxis, data._yunit)
1171        final_dataset.zaxis(data._zaxis, data._zunit)
1172        final_dataset.x_bins = data.x_bins
1173        final_dataset.y_bins = data.y_bins
1174    else:
1175        return_string = "Should Never Happen: _combine_data_info_with_plottable input is not a plottable1d or " + \
1176                        "plottable2d data object"
1177        return return_string
1178
1179    final_dataset.xmax = data.xmax
1180    final_dataset.ymax = data.ymax
1181    final_dataset.xmin = data.xmin
1182    final_dataset.ymin = data.ymin
1183    final_dataset.title = datainfo.title
1184    final_dataset.run = datainfo.run
1185    final_dataset.run_name = datainfo.run_name
1186    final_dataset.filename = datainfo.filename
1187    final_dataset.notes = datainfo.notes
1188    final_dataset.process = datainfo.process
1189    final_dataset.instrument = datainfo.instrument
1190    final_dataset.detector = datainfo.detector
1191    final_dataset.sample = datainfo.sample
1192    final_dataset.source = datainfo.source
1193    final_dataset.collimation = datainfo.collimation
1194    final_dataset.trans_spectrum = datainfo.trans_spectrum
1195    final_dataset.meta_data = datainfo.meta_data
1196    final_dataset.errors = datainfo.errors
1197    return final_dataset
Note: See TracBrowser for help on using the repository browser.