source: sasview/src/sas/sascalc/dataloader/data_info.py @ 9a1204d

magnetic_scattrelease-4.2.2ticket-1009ticket-1242-2d-resolutionticket-1249
Last change on this file since 9a1204d was 8c9e65c, checked in by GitHub <noreply@…>, 6 years ago

py37 support for sascalc. Refs #888 an #1233.

  • Property mode set to 100644
File size: 40.1 KB
Line 
1"""
2    Module that contains classes to hold information read from
3    reduced data files.
4
5    A good description of the data members can be found in
6    the CanSAS 1D XML data format:
7
8    http://www.smallangles.net/wgwiki/index.php/cansas1d_documentation
9"""
10#####################################################################
11#This software was developed by the University of Tennessee as part of the
12#Distributed Data Analysis of Neutron Scattering Experiments (DANSE)
13#project funded by the US National Science Foundation.
14#See the license text in license.txt
15#copyright 2008, University of Tennessee
16######################################################################
17
18from __future__ import print_function
19
20#TODO: Keep track of data manipulation in the 'process' data structure.
21#TODO: This module should be independent of plottables. We should write
22#        an adapter class for plottables when needed.
23
24#from sas.guitools.plottables import Data1D as plottable_1D
25from sas.sascalc.data_util.uncertainty import Uncertainty
26import numpy as np
27import math
28from math import fabs
29
30class plottable_1D(object):
31    """
32    Data1D is a place holder for 1D plottables.
33    """
34    # The presence of these should be mutually
35    # exclusive with the presence of Qdev (dx)
36    x = None
37    y = None
38    dx = None
39    dy = None
40    ## Slit smearing length
41    dxl = None
42    ## Slit smearing width
43    dxw = None
44    ## SESANS specific params (wavelengths for spin echo length calculation)
45    lam = None
46    dlam = None
47
48    # Units
49    _xaxis = ''
50    _xunit = ''
51    _yaxis = ''
52    _yunit = ''
53
54    def __init__(self, x, y, dx=None, dy=None, dxl=None, dxw=None, lam=None, dlam=None):
55        self.x = np.asarray(x)
56        self.y = np.asarray(y)
57        if dx is not None:
58            self.dx = np.asarray(dx)
59        if dy is not None:
60            self.dy = np.asarray(dy)
61        if dxl is not None:
62            self.dxl = np.asarray(dxl)
63        if dxw is not None:
64            self.dxw = np.asarray(dxw)
65        if lam is not None:
66            self.lam = np.asarray(lam)
67        if dlam is not None:
68            self.dlam = np.asarray(dlam)
69
70    def xaxis(self, label, unit):
71        """
72        set the x axis label and unit
73        """
74        self._xaxis = label
75        self._xunit = unit
76
77    def yaxis(self, label, unit):
78        """
79        set the y axis label and unit
80        """
81        self._yaxis = label
82        self._yunit = unit
83
84
85class plottable_2D(object):
86    """
87    Data2D is a place holder for 2D plottables.
88    """
89    xmin = None
90    xmax = None
91    ymin = None
92    ymax = None
93    data = None
94    qx_data = None
95    qy_data = None
96    q_data = None
97    err_data = None
98    dqx_data = None
99    dqy_data = None
100    mask = None
101
102    # Units
103    _xaxis = ''
104    _xunit = ''
105    _yaxis = ''
106    _yunit = ''
107    _zaxis = ''
108    _zunit = ''
109
110    def __init__(self, data=None, err_data=None, qx_data=None,
111                 qy_data=None, q_data=None, mask=None,
112                 dqx_data=None, dqy_data=None):
113        self.data = np.asarray(data)
114        self.qx_data = np.asarray(qx_data)
115        self.qy_data = np.asarray(qy_data)
116        self.q_data = np.asarray(q_data)
117        self.mask = np.asarray(mask)
118        self.err_data = np.asarray(err_data)
119        if dqx_data is not None:
120            self.dqx_data = np.asarray(dqx_data)
121        if dqy_data is not None:
122            self.dqy_data = np.asarray(dqy_data)
123
124    def xaxis(self, label, unit):
125        """
126        set the x axis label and unit
127        """
128        self._xaxis = label
129        self._xunit = unit
130
131    def yaxis(self, label, unit):
132        """
133        set the y axis label and unit
134        """
135        self._yaxis = label
136        self._yunit = unit
137
138    def zaxis(self, label, unit):
139        """
140        set the z axis label and unit
141        """
142        self._zaxis = label
143        self._zunit = unit
144
145
146class Vector(object):
147    """
148    Vector class to hold multi-dimensional objects
149    """
150    ## x component
151    x = None
152    ## y component
153    y = None
154    ## z component
155    z = None
156
157    def __init__(self, x=None, y=None, z=None):
158        """
159        Initialization. Components that are not
160        set a set to None by default.
161
162        :param x: x component
163        :param y: y component
164        :param z: z component
165        """
166        self.x = x
167        self.y = y
168        self.z = z
169
170    def __str__(self):
171        msg = "x = %s\ty = %s\tz = %s" % (str(self.x), str(self.y), str(self.z))
172        return msg
173
174
175class Detector(object):
176    """
177    Class to hold detector information
178    """
179    ## Name of the instrument [string]
180    name = None
181    ## Sample to detector distance [float] [mm]
182    distance = None
183    distance_unit = 'mm'
184    ## Offset of this detector position in X, Y,
185    #(and Z if necessary) [Vector] [mm]
186    offset = None
187    offset_unit = 'm'
188    ## Orientation (rotation) of this detector in roll,
189    # pitch, and yaw [Vector] [degrees]
190    orientation = None
191    orientation_unit = 'degree'
192    ## Center of the beam on the detector in X and Y
193    #(and Z if necessary) [Vector] [mm]
194    beam_center = None
195    beam_center_unit = 'mm'
196    ## Pixel size in X, Y, (and Z if necessary) [Vector] [mm]
197    pixel_size = None
198    pixel_size_unit = 'mm'
199    ## Slit length of the instrument for this detector.[float] [mm]
200    slit_length = None
201    slit_length_unit = 'mm'
202
203    def __init__(self):
204        """
205        Initialize class attribute that are objects...
206        """
207        self.offset = Vector()
208        self.orientation = Vector()
209        self.beam_center = Vector()
210        self.pixel_size = Vector()
211
212    def __str__(self):
213        _str = "Detector:\n"
214        _str += "   Name:         %s\n" % self.name
215        _str += "   Distance:     %s [%s]\n" % \
216            (str(self.distance), str(self.distance_unit))
217        _str += "   Offset:       %s [%s]\n" % \
218            (str(self.offset), str(self.offset_unit))
219        _str += "   Orientation:  %s [%s]\n" % \
220            (str(self.orientation), str(self.orientation_unit))
221        _str += "   Beam center:  %s [%s]\n" % \
222            (str(self.beam_center), str(self.beam_center_unit))
223        _str += "   Pixel size:   %s [%s]\n" % \
224            (str(self.pixel_size), str(self.pixel_size_unit))
225        _str += "   Slit length:  %s [%s]\n" % \
226            (str(self.slit_length), str(self.slit_length_unit))
227        return _str
228
229
230class Aperture(object):
231    ## Name
232    name = None
233    ## Type
234    type = None
235    ## Size name
236    size_name = None
237    ## Aperture size [Vector]
238    size = None
239    size_unit = 'mm'
240    ## Aperture distance [float]
241    distance = None
242    distance_unit = 'mm'
243
244    def __init__(self):
245        self.size = Vector()
246
247
248class Collimation(object):
249    """
250    Class to hold collimation information
251    """
252    ## Name
253    name = None
254    ## Length [float] [mm]
255    length = None
256    length_unit = 'mm'
257    ## Aperture
258    aperture = None
259
260    def __init__(self):
261        self.aperture = []
262
263    def __str__(self):
264        _str = "Collimation:\n"
265        _str += "   Length:       %s [%s]\n" % \
266            (str(self.length), str(self.length_unit))
267        for item in self.aperture:
268            _str += "   Aperture size:%s [%s]\n" % \
269                (str(item.size), str(item.size_unit))
270            _str += "   Aperture_dist:%s [%s]\n" % \
271                (str(item.distance), str(item.distance_unit))
272        return _str
273
274
275class Source(object):
276    """
277    Class to hold source information
278    """
279    ## Name
280    name = None
281    ## Radiation type [string]
282    radiation = None
283    ## Beam size name
284    beam_size_name = None
285    ## Beam size [Vector] [mm]
286    beam_size = None
287    beam_size_unit = 'mm'
288    ## Beam shape [string]
289    beam_shape = None
290    ## Wavelength [float] [Angstrom]
291    wavelength = None
292    wavelength_unit = 'A'
293    ## Minimum wavelength [float] [Angstrom]
294    wavelength_min = None
295    wavelength_min_unit = 'nm'
296    ## Maximum wavelength [float] [Angstrom]
297    wavelength_max = None
298    wavelength_max_unit = 'nm'
299    ## Wavelength spread [float] [Angstrom]
300    wavelength_spread = None
301    wavelength_spread_unit = 'percent'
302
303    def __init__(self):
304        self.beam_size = Vector()
305
306    def __str__(self):
307        _str = "Source:\n"
308        _str += "   Radiation:    %s\n" % str(self.radiation)
309        _str += "   Shape:        %s\n" % str(self.beam_shape)
310        _str += "   Wavelength:   %s [%s]\n" % \
311            (str(self.wavelength), str(self.wavelength_unit))
312        _str += "   Waveln_min:   %s [%s]\n" % \
313            (str(self.wavelength_min), str(self.wavelength_min_unit))
314        _str += "   Waveln_max:   %s [%s]\n" % \
315            (str(self.wavelength_max), str(self.wavelength_max_unit))
316        _str += "   Waveln_spread:%s [%s]\n" % \
317            (str(self.wavelength_spread), str(self.wavelength_spread_unit))
318        _str += "   Beam_size:    %s [%s]\n" % \
319            (str(self.beam_size), str(self.beam_size_unit))
320        return _str
321
322
323"""
324Definitions of radiation types
325"""
326NEUTRON = 'neutron'
327XRAY = 'x-ray'
328MUON = 'muon'
329ELECTRON = 'electron'
330
331
332class Sample(object):
333    """
334    Class to hold the sample description
335    """
336    ## Short name for sample
337    name = ''
338    ## ID
339    ID = ''
340    ## Thickness [float] [mm]
341    thickness = None
342    thickness_unit = 'mm'
343    ## Transmission [float] [fraction]
344    transmission = None
345    ## Temperature [float] [No Default]
346    temperature = None
347    temperature_unit = None
348    ## Position [Vector] [mm]
349    position = None
350    position_unit = 'mm'
351    ## Orientation [Vector] [degrees]
352    orientation = None
353    orientation_unit = 'degree'
354    ## Details
355    details = None
356    ## SESANS zacceptance
357    zacceptance = (0,"")
358    yacceptance = (0,"")
359
360    def __init__(self):
361        self.position = Vector()
362        self.orientation = Vector()
363        self.details = []
364
365    def __str__(self):
366        _str = "Sample:\n"
367        _str += "   ID:           %s\n" % str(self.ID)
368        _str += "   Transmission: %s\n" % str(self.transmission)
369        _str += "   Thickness:    %s [%s]\n" % \
370            (str(self.thickness), str(self.thickness_unit))
371        _str += "   Temperature:  %s [%s]\n" % \
372            (str(self.temperature), str(self.temperature_unit))
373        _str += "   Position:     %s [%s]\n" % \
374            (str(self.position), str(self.position_unit))
375        _str += "   Orientation:  %s [%s]\n" % \
376            (str(self.orientation), str(self.orientation_unit))
377
378        _str += "   Details:\n"
379        for item in self.details:
380            _str += "      %s\n" % item
381
382        return _str
383
384
385class Process(object):
386    """
387    Class that holds information about the processes
388    performed on the data.
389    """
390    name = ''
391    date = ''
392    description = ''
393    term = None
394    notes = None
395
396    def __init__(self):
397        self.term = []
398        self.notes = []
399
400    def is_empty(self):
401        """
402            Return True if the object is empty
403        """
404        return len(self.name) == 0 and len(self.date) == 0 and len(self.description) == 0 \
405            and len(self.term) == 0 and len(self.notes) == 0
406
407    def single_line_desc(self):
408        """
409            Return a single line string representing the process
410        """
411        return "%s %s %s" % (self.name, self.date, self.description)
412
413    def __str__(self):
414        _str = "Process:\n"
415        _str += "   Name:         %s\n" % self.name
416        _str += "   Date:         %s\n" % self.date
417        _str += "   Description:  %s\n" % self.description
418        for item in self.term:
419            _str += "   Term:         %s\n" % item
420        for item in self.notes:
421            _str += "   Note:         %s\n" % item
422        return _str
423
424
425class TransmissionSpectrum(object):
426    """
427    Class that holds information about transmission spectrum
428    for white beams and spallation sources.
429    """
430    name = ''
431    timestamp = ''
432    ## Wavelength (float) [A]
433    wavelength = None
434    wavelength_unit = 'A'
435    ## Transmission (float) [unit less]
436    transmission = None
437    transmission_unit = ''
438    ## Transmission Deviation (float) [unit less]
439    transmission_deviation = None
440    transmission_deviation_unit = ''
441
442    def __init__(self):
443        self.wavelength = []
444        self.transmission = []
445        self.transmission_deviation = []
446
447    def __str__(self):
448        _str = "Transmission Spectrum:\n"
449        _str += "   Name:             \t{0}\n".format(self.name)
450        _str += "   Timestamp:        \t{0}\n".format(self.timestamp)
451        _str += "   Wavelength unit:  \t{0}\n".format(self.wavelength_unit)
452        _str += "   Transmission unit:\t{0}\n".format(self.transmission_unit)
453        _str += "   Trans. Dev. unit:  \t{0}\n".format(\
454                                            self.transmission_deviation_unit)
455        length_list = [len(self.wavelength), len(self.transmission), \
456                len(self.transmission_deviation)]
457        _str += "   Number of Pts:    \t{0}\n".format(max(length_list))
458        return _str
459
460
461class DataInfo(object):
462    """
463    Class to hold the data read from a file.
464    It includes four blocks of data for the
465    instrument description, the sample description,
466    the data itself and any other meta data.
467    """
468    ## Title
469    title = ''
470    ## Run number
471    run = None
472    ## Run name
473    run_name = None
474    ## File name
475    filename = ''
476    ## Notes
477    notes = None
478    ## Processes (Action on the data)
479    process = None
480    ## Instrument name
481    instrument = ''
482    ## Detector information
483    detector = None
484    ## Sample information
485    sample = None
486    ## Source information
487    source = None
488    ## Collimation information
489    collimation = None
490    ## Transmission Spectrum INfo
491    trans_spectrum = None
492    ## Additional meta-data
493    meta_data = None
494    ## Loading errors
495    errors = None
496    ## SESANS data check
497    isSesans = None
498
499
500    def __init__(self):
501        """
502        Initialization
503        """
504        ## Title
505        self.title = ''
506        ## Run number
507        self.run = []
508        self.run_name = {}
509        ## File name
510        self.filename = ''
511        ## Notes
512        self.notes = []
513        ## Processes (Action on the data)
514        self.process = []
515        ## Instrument name
516        self.instrument = ''
517        ## Detector information
518        self.detector = []
519        ## Sample information
520        self.sample = Sample()
521        ## Source information
522        self.source = Source()
523        ## Collimation information
524        self.collimation = []
525        ## Transmission Spectrum
526        self.trans_spectrum = []
527        ## Additional meta-data
528        self.meta_data = {}
529        ## Loading errors
530        self.errors = []
531        ## SESANS data check
532        self.isSesans = False
533
534    def append_empty_process(self):
535        """
536        """
537        self.process.append(Process())
538
539    def add_notes(self, message=""):
540        """
541        Add notes to datainfo
542        """
543        self.notes.append(message)
544
545    def __str__(self):
546        """
547        Nice printout
548        """
549        _str = "File:            %s\n" % self.filename
550        _str += "Title:           %s\n" % self.title
551        _str += "Run:             %s\n" % str(self.run)
552        _str += "SESANS:          %s\n" % str(self.isSesans)
553        _str += "Instrument:      %s\n" % str(self.instrument)
554        _str += "%s\n" % str(self.sample)
555        _str += "%s\n" % str(self.source)
556        for item in self.detector:
557            _str += "%s\n" % str(item)
558        for item in self.collimation:
559            _str += "%s\n" % str(item)
560        for item in self.process:
561            _str += "%s\n" % str(item)
562        for item in self.notes:
563            _str += "%s\n" % str(item)
564        for item in self.trans_spectrum:
565            _str += "%s\n" % str(item)
566        return _str
567
568    # Private method to perform operation. Not implemented for DataInfo,
569    # but should be implemented for each data class inherited from DataInfo
570    # that holds actual data (ex.: Data1D)
571    def _perform_operation(self, other, operation):
572        """
573        Private method to perform operation. Not implemented for DataInfo,
574        but should be implemented for each data class inherited from DataInfo
575        that holds actual data (ex.: Data1D)
576        """
577        return NotImplemented
578
579    def _perform_union(self, other):
580        """
581        Private method to perform union operation. Not implemented for DataInfo,
582        but should be implemented for each data class inherited from DataInfo
583        that holds actual data (ex.: Data1D)
584        """
585        return NotImplemented
586
587    def __add__(self, other):
588        """
589        Add two data sets
590
591        :param other: data set to add to the current one
592        :return: new data set
593        :raise ValueError: raised when two data sets are incompatible
594        """
595        def operation(a, b):
596            return a + b
597        return self._perform_operation(other, operation)
598
599    def __radd__(self, other):
600        """
601        Add two data sets
602
603        :param other: data set to add to the current one
604        :return: new data set
605        :raise ValueError: raised when two data sets are incompatible
606        """
607        def operation(a, b):
608            return b + a
609        return self._perform_operation(other, operation)
610
611    def __sub__(self, other):
612        """
613        Subtract two data sets
614
615        :param other: data set to subtract from the current one
616        :return: new data set
617        :raise ValueError: raised when two data sets are incompatible
618        """
619        def operation(a, b):
620            return a - b
621        return self._perform_operation(other, operation)
622
623    def __rsub__(self, other):
624        """
625        Subtract two data sets
626
627        :param other: data set to subtract from the current one
628        :return: new data set
629        :raise ValueError: raised when two data sets are incompatible
630        """
631        def operation(a, b):
632            return b - a
633        return self._perform_operation(other, operation)
634
635    def __mul__(self, other):
636        """
637        Multiply two data sets
638
639        :param other: data set to subtract from the current one
640        :return: new data set
641        :raise ValueError: raised when two data sets are incompatible
642        """
643        def operation(a, b):
644            return a * b
645        return self._perform_operation(other, operation)
646
647    def __rmul__(self, other):
648        """
649        Multiply two data sets
650
651        :param other: data set to subtract from the current one
652        :return: new data set
653        :raise ValueError: raised when two data sets are incompatible
654        """
655        def operation(a, b):
656            return b * a
657        return self._perform_operation(other, operation)
658
659    def __truediv__(self, other):
660        """
661        Divided a data set by another
662
663        :param other: data set that the current one is divided by
664        :return: new data set
665        :raise ValueError: raised when two data sets are incompatible
666        """
667        def operation(a, b):
668            return a/b
669        return self._perform_operation(other, operation)
670    __div__ = __truediv__
671
672    def __rtruediv__(self, other):
673        """
674        Divided a data set by another
675
676        :param other: data set that the current one is divided by
677        :return: new data set
678        :raise ValueError: raised when two data sets are incompatible
679        """
680        def operation(a, b):
681            return b/a
682        return self._perform_operation(other, operation)
683    __rdiv__ = __rtruediv__
684
685    def __or__(self, other):
686        """
687        Union a data set with another
688
689        :param other: data set to be unified
690        :return: new data set
691        :raise ValueError: raised when two data sets are incompatible
692        """
693        return self._perform_union(other)
694
695    def __ror__(self, other):
696        """
697        Union a data set with another
698
699        :param other: data set to be unified
700        :return: new data set
701        :raise ValueError: raised when two data sets are incompatible
702        """
703        return self._perform_union(other)
704
705class Data1D(plottable_1D, DataInfo):
706    """
707    1D data class
708    """
709    def __init__(self, x=None, y=None, dx=None, dy=None, lam=None, dlam=None, isSesans=None):
710        DataInfo.__init__(self)
711        plottable_1D.__init__(self, x, y, dx, dy,None, None, lam, dlam)
712        self.isSesans = isSesans
713        try:
714            if self.isSesans: # the data is SESANS
715                self.x_unit = 'A'
716                self.y_unit = 'pol'
717            elif not self.isSesans: # the data is SANS
718                self.x_unit = '1/A'
719                self.y_unit = '1/cm'
720        except: # the data is not recognized/supported, and the user is notified
721            raise TypeError('data not recognized, check documentation for supported 1D data formats')
722
723    def __str__(self):
724        """
725        Nice printout
726        """
727        _str = "%s\n" % DataInfo.__str__(self)
728        _str += "Data:\n"
729        _str += "   Type:         %s\n" % self.__class__.__name__
730        _str += "   X-axis:       %s\t[%s]\n" % (self._xaxis, self._xunit)
731        _str += "   Y-axis:       %s\t[%s]\n" % (self._yaxis, self._yunit)
732        _str += "   Length:       %g\n" % len(self.x)
733        return _str
734
735    def is_slit_smeared(self):
736        """
737        Check whether the data has slit smearing information
738        :return: True is slit smearing info is present, False otherwise
739        """
740        def _check(v):
741            if (v.__class__ == list or v.__class__ == np.ndarray) \
742                and len(v) > 0 and min(v) > 0:
743                return True
744            return False
745        return _check(self.dxl) or _check(self.dxw)
746
747    def clone_without_data(self, length=0, clone=None):
748        """
749        Clone the current object, without copying the data (which
750        will be filled out by a subsequent operation).
751        The data arrays will be initialized to zero.
752
753        :param length: length of the data array to be initialized
754        :param clone: if provided, the data will be copied to clone
755        """
756        from copy import deepcopy
757
758        if clone is None or not issubclass(clone.__class__, Data1D):
759            x = np.zeros(length)
760            dx = np.zeros(length)
761            y = np.zeros(length)
762            dy = np.zeros(length)
763            lam = np.zeros(length)
764            dlam = np.zeros(length)
765            clone = Data1D(x, y, lam=lam, dx=dx, dy=dy, dlam=dlam)
766
767        clone.title = self.title
768        clone.run = self.run
769        clone.filename = self.filename
770        clone.instrument = self.instrument
771        clone.notes = deepcopy(self.notes)
772        clone.process = deepcopy(self.process)
773        clone.detector = deepcopy(self.detector)
774        clone.sample = deepcopy(self.sample)
775        clone.source = deepcopy(self.source)
776        clone.collimation = deepcopy(self.collimation)
777        clone.trans_spectrum = deepcopy(self.trans_spectrum)
778        clone.meta_data = deepcopy(self.meta_data)
779        clone.errors = deepcopy(self.errors)
780
781        return clone
782
783    def _validity_check(self, other):
784        """
785        Checks that the data lengths are compatible.
786        Checks that the x vectors are compatible.
787        Returns errors vectors equal to original
788        errors vectors if they were present or vectors
789        of zeros when none was found.
790
791        :param other: other data set for operation
792        :return: dy for self, dy for other [numpy arrays]
793        :raise ValueError: when lengths are not compatible
794        """
795        dy_other = None
796        if isinstance(other, Data1D):
797            # Check that data lengths are the same
798            if len(self.x) != len(other.x) or \
799                len(self.y) != len(other.y):
800                msg = "Unable to perform operation: data length are not equal"
801                raise ValueError(msg)
802            # Here we could also extrapolate between data points
803            TOLERANCE = 0.01
804            for i in range(len(self.x)):
805                if fabs(self.x[i] - other.x[i]) > self.x[i]*TOLERANCE:
806                    msg = "Incompatible data sets: x-values do not match"
807                    raise ValueError(msg)
808
809            # Check that the other data set has errors, otherwise
810            # create zero vector
811            dy_other = other.dy
812            if other.dy is None or (len(other.dy) != len(other.y)):
813                dy_other = np.zeros(len(other.y))
814
815        # Check that we have errors, otherwise create zero vector
816        dy = self.dy
817        if self.dy is None or (len(self.dy) != len(self.y)):
818            dy = np.zeros(len(self.y))
819
820        return dy, dy_other
821
822    def _perform_operation(self, other, operation):
823        """
824        """
825        # First, check the data compatibility
826        dy, dy_other = self._validity_check(other)
827        result = self.clone_without_data(len(self.x))
828        if self.dxw is None:
829            result.dxw = None
830        else:
831            result.dxw = np.zeros(len(self.x))
832        if self.dxl is None:
833            result.dxl = None
834        else:
835            result.dxl = np.zeros(len(self.x))
836
837        for i in range(len(self.x)):
838            result.x[i] = self.x[i]
839            if self.dx is not None and len(self.x) == len(self.dx):
840                result.dx[i] = self.dx[i]
841            if self.dxw is not None and len(self.x) == len(self.dxw):
842                result.dxw[i] = self.dxw[i]
843            if self.dxl is not None and len(self.x) == len(self.dxl):
844                result.dxl[i] = self.dxl[i]
845
846            a = Uncertainty(self.y[i], dy[i]**2)
847            if isinstance(other, Data1D):
848                b = Uncertainty(other.y[i], dy_other[i]**2)
849                if other.dx is not None:
850                    result.dx[i] *= self.dx[i]
851                    result.dx[i] += (other.dx[i]**2)
852                    result.dx[i] /= 2
853                    result.dx[i] = math.sqrt(result.dx[i])
854                if result.dxl is not None and other.dxl is not None:
855                    result.dxl[i] *= self.dxl[i]
856                    result.dxl[i] += (other.dxl[i]**2)
857                    result.dxl[i] /= 2
858                    result.dxl[i] = math.sqrt(result.dxl[i])
859            else:
860                b = other
861
862            output = operation(a, b)
863            result.y[i] = output.x
864            result.dy[i] = math.sqrt(math.fabs(output.variance))
865        return result
866
867    def _validity_check_union(self, other):
868        """
869        Checks that the data lengths are compatible.
870        Checks that the x vectors are compatible.
871        Returns errors vectors equal to original
872        errors vectors if they were present or vectors
873        of zeros when none was found.
874
875        :param other: other data set for operation
876        :return: bool
877        :raise ValueError: when data types are not compatible
878        """
879        if not isinstance(other, Data1D):
880            msg = "Unable to perform operation: different types of data set"
881            raise ValueError(msg)
882        return True
883
884    def _perform_union(self, other):
885        """
886        """
887        # First, check the data compatibility
888        self._validity_check_union(other)
889        result = self.clone_without_data(len(self.x) + len(other.x))
890        if self.dy is None or other.dy is None:
891            result.dy = None
892        else:
893            result.dy = np.zeros(len(self.x) + len(other.x))
894        if self.dx is None or other.dx is None:
895            result.dx = None
896        else:
897            result.dx = np.zeros(len(self.x) + len(other.x))
898        if self.dxw is None or other.dxw is None:
899            result.dxw = None
900        else:
901            result.dxw = np.zeros(len(self.x) + len(other.x))
902        if self.dxl is None or other.dxl is None:
903            result.dxl = None
904        else:
905            result.dxl = np.zeros(len(self.x) + len(other.x))
906
907        result.x = np.append(self.x, other.x)
908        #argsorting
909        ind = np.argsort(result.x)
910        result.x = result.x[ind]
911        result.y = np.append(self.y, other.y)
912        result.y = result.y[ind]
913        if result.dy is not None:
914            result.dy = np.append(self.dy, other.dy)
915            result.dy = result.dy[ind]
916        if result.dx is not None:
917            result.dx = np.append(self.dx, other.dx)
918            result.dx = result.dx[ind]
919        if result.dxw is not None:
920            result.dxw = np.append(self.dxw, other.dxw)
921            result.dxw = result.dxw[ind]
922        if result.dxl is not None:
923            result.dxl = np.append(self.dxl, other.dxl)
924            result.dxl = result.dxl[ind]
925        return result
926
927
928class Data2D(plottable_2D, DataInfo):
929    """
930    2D data class
931    """
932    ## Units for Q-values
933    Q_unit = '1/A'
934    ## Units for I(Q) values
935    I_unit = '1/cm'
936    ## Vector of Q-values at the center of each bin in x
937    x_bins = None
938    ## Vector of Q-values at the center of each bin in y
939    y_bins = None
940    ## No 2D SESANS data as of yet. Always set it to False
941    isSesans = False
942
943    def __init__(self, data=None, err_data=None, qx_data=None,
944                 qy_data=None, q_data=None, mask=None,
945                 dqx_data=None, dqy_data=None):
946        DataInfo.__init__(self)
947        plottable_2D.__init__(self, data, err_data, qx_data,
948                              qy_data, q_data, mask, dqx_data, dqy_data)
949        self.y_bins = []
950        self.x_bins = []
951
952        if len(self.detector) > 0:
953            raise RuntimeError("Data2D: Detector bank already filled at init")
954
955    def __str__(self):
956        _str = "%s\n" % DataInfo.__str__(self)
957        _str += "Data:\n"
958        _str += "   Type:         %s\n" % self.__class__.__name__
959        _str += "   X-axis:       %s\t[%s]\n" % (self._xaxis, self._xunit)
960        _str += "   Y-axis:       %s\t[%s]\n" % (self._yaxis, self._yunit)
961        _str += "   Z-axis:       %s\t[%s]\n" % (self._zaxis, self._zunit)
962        _str += "   Length:       %g \n" % (len(self.data))
963        _str += "   Shape:        (%d, %d)\n" % (len(self.y_bins), len(self.x_bins))
964        return _str
965
966    def clone_without_data(self, length=0, clone=None):
967        """
968        Clone the current object, without copying the data (which
969        will be filled out by a subsequent operation).
970        The data arrays will be initialized to zero.
971
972        :param length: length of the data array to be initialized
973        :param clone: if provided, the data will be copied to clone
974        """
975        from copy import deepcopy
976
977        if clone is None or not issubclass(clone.__class__, Data2D):
978            data = np.zeros(length)
979            err_data = np.zeros(length)
980            qx_data = np.zeros(length)
981            qy_data = np.zeros(length)
982            q_data = np.zeros(length)
983            mask = np.zeros(length)
984            dqx_data = None
985            dqy_data = None
986            clone = Data2D(data=data, err_data=err_data,
987                           qx_data=qx_data, qy_data=qy_data,
988                           q_data=q_data, mask=mask)
989
990        clone._xaxis = self._xaxis
991        clone._yaxis = self._yaxis
992        clone._zaxis = self._zaxis
993        clone._xunit = self._xunit
994        clone._yunit = self._yunit
995        clone._zunit = self._zunit
996        clone.x_bins = self.x_bins
997        clone.y_bins = self.y_bins
998
999        clone.title = self.title
1000        clone.run = self.run
1001        clone.filename = self.filename
1002        clone.instrument = self.instrument
1003        clone.notes = deepcopy(self.notes)
1004        clone.process = deepcopy(self.process)
1005        clone.detector = deepcopy(self.detector)
1006        clone.sample = deepcopy(self.sample)
1007        clone.source = deepcopy(self.source)
1008        clone.collimation = deepcopy(self.collimation)
1009        clone.trans_spectrum = deepcopy(self.trans_spectrum)
1010        clone.meta_data = deepcopy(self.meta_data)
1011        clone.errors = deepcopy(self.errors)
1012
1013        return clone
1014
1015    def _validity_check(self, other):
1016        """
1017        Checks that the data lengths are compatible.
1018        Checks that the x vectors are compatible.
1019        Returns errors vectors equal to original
1020        errors vectors if they were present or vectors
1021        of zeros when none was found.
1022
1023        :param other: other data set for operation
1024        :return: dy for self, dy for other [numpy arrays]
1025        :raise ValueError: when lengths are not compatible
1026        """
1027        err_other = None
1028        TOLERANCE = 0.01
1029        if isinstance(other, Data2D):
1030            # Check that data lengths are the same
1031            if len(self.data) != len(other.data) or \
1032                len(self.qx_data) != len(other.qx_data) or \
1033                len(self.qy_data) != len(other.qy_data):
1034                msg = "Unable to perform operation: data length are not equal"
1035                raise ValueError(msg)
1036            for ind in range(len(self.data)):
1037                if fabs(self.qx_data[ind] - other.qx_data[ind]) > fabs(self.qx_data[ind])*TOLERANCE:
1038                    msg = "Incompatible data sets: qx-values do not match: %s %s" % (self.qx_data[ind], other.qx_data[ind])
1039                    raise ValueError(msg)
1040                if fabs(self.qy_data[ind] - other.qy_data[ind]) > fabs(self.qy_data[ind])*TOLERANCE:
1041                    msg = "Incompatible data sets: qy-values do not match: %s %s" % (self.qy_data[ind], other.qy_data[ind])
1042                    raise ValueError(msg)
1043
1044            # Check that the scales match
1045            err_other = other.err_data
1046            if other.err_data is None or \
1047                (len(other.err_data) != len(other.data)):
1048                err_other = np.zeros(len(other.data))
1049
1050        # Check that we have errors, otherwise create zero vector
1051        err = self.err_data
1052        if self.err_data is None or \
1053            (len(self.err_data) != len(self.data)):
1054            err = np.zeros(len(other.data))
1055        return err, err_other
1056
1057    def _perform_operation(self, other, operation):
1058        """
1059        Perform 2D operations between data sets
1060
1061        :param other: other data set
1062        :param operation: function defining the operation
1063        """
1064        # First, check the data compatibility
1065        dy, dy_other = self._validity_check(other)
1066        result = self.clone_without_data(np.size(self.data))
1067        if self.dqx_data is None or self.dqy_data is None:
1068            result.dqx_data = None
1069            result.dqy_data = None
1070        else:
1071            result.dqx_data = np.zeros(len(self.data))
1072            result.dqy_data = np.zeros(len(self.data))
1073        for i in range(np.size(self.data)):
1074            result.data[i] = self.data[i]
1075            if self.err_data is not None and \
1076                            np.size(self.data) == np.size(self.err_data):
1077                result.err_data[i] = self.err_data[i]
1078            if self.dqx_data is not None:
1079                result.dqx_data[i] = self.dqx_data[i]
1080            if self.dqy_data is not None:
1081                result.dqy_data[i] = self.dqy_data[i]
1082            result.qx_data[i] = self.qx_data[i]
1083            result.qy_data[i] = self.qy_data[i]
1084            result.q_data[i] = self.q_data[i]
1085            result.mask[i] = self.mask[i]
1086
1087            a = Uncertainty(self.data[i], dy[i]**2)
1088            if isinstance(other, Data2D):
1089                b = Uncertainty(other.data[i], dy_other[i]**2)
1090                if other.dqx_data is not None and \
1091                        result.dqx_data is not None:
1092                    result.dqx_data[i] *= self.dqx_data[i]
1093                    result.dqx_data[i] += (other.dqx_data[i]**2)
1094                    result.dqx_data[i] /= 2
1095                    result.dqx_data[i] = math.sqrt(result.dqx_data[i])
1096                if other.dqy_data is not None and \
1097                        result.dqy_data is not None:
1098                    result.dqy_data[i] *= self.dqy_data[i]
1099                    result.dqy_data[i] += (other.dqy_data[i]**2)
1100                    result.dqy_data[i] /= 2
1101                    result.dqy_data[i] = math.sqrt(result.dqy_data[i])
1102            else:
1103                b = other
1104            output = operation(a, b)
1105            result.data[i] = output.x
1106            result.err_data[i] = math.sqrt(math.fabs(output.variance))
1107        return result
1108
1109    def _validity_check_union(self, other):
1110        """
1111        Checks that the data lengths are compatible.
1112        Checks that the x vectors are compatible.
1113        Returns errors vectors equal to original
1114        errors vectors if they were present or vectors
1115        of zeros when none was found.
1116
1117        :param other: other data set for operation
1118        :return: bool
1119        :raise ValueError: when data types are not compatible
1120        """
1121        if not isinstance(other, Data2D):
1122            msg = "Unable to perform operation: different types of data set"
1123            raise ValueError(msg)
1124        return True
1125
1126    def _perform_union(self, other):
1127        """
1128        Perform 2D operations between data sets
1129
1130        :param other: other data set
1131        :param operation: function defining the operation
1132        """
1133        # First, check the data compatibility
1134        self._validity_check_union(other)
1135        result = self.clone_without_data(np.size(self.data) + \
1136                                         np.size(other.data))
1137        result.xmin = self.xmin
1138        result.xmax = self.xmax
1139        result.ymin = self.ymin
1140        result.ymax = self.ymax
1141        if self.dqx_data is None or self.dqy_data is None or \
1142                other.dqx_data is None or other.dqy_data is None:
1143            result.dqx_data = None
1144            result.dqy_data = None
1145        else:
1146            result.dqx_data = np.zeros(len(self.data) + \
1147                                       np.size(other.data))
1148            result.dqy_data = np.zeros(len(self.data) + \
1149                                       np.size(other.data))
1150
1151        result.data = np.append(self.data, other.data)
1152        result.qx_data = np.append(self.qx_data, other.qx_data)
1153        result.qy_data = np.append(self.qy_data, other.qy_data)
1154        result.q_data = np.append(self.q_data, other.q_data)
1155        result.mask = np.append(self.mask, other.mask)
1156        if result.err_data is not None:
1157            result.err_data = np.append(self.err_data, other.err_data)
1158        if self.dqx_data is not None:
1159            result.dqx_data = np.append(self.dqx_data, other.dqx_data)
1160        if self.dqy_data is not None:
1161            result.dqy_data = np.append(self.dqy_data, other.dqy_data)
1162
1163        return result
1164
1165
1166def combine_data_info_with_plottable(data, datainfo):
1167    """
1168    A function that combines the DataInfo data in self.current_datainto with a
1169    plottable_1D or 2D data object.
1170
1171    :param data: A plottable_1D or plottable_2D data object
1172    :return: A fully specified Data1D or Data2D object
1173    """
1174
1175    final_dataset = None
1176    if isinstance(data, plottable_1D):
1177        final_dataset = Data1D(data.x, data.y, isSesans=datainfo.isSesans)
1178        final_dataset.dx = data.dx
1179        final_dataset.dy = data.dy
1180        final_dataset.dxl = data.dxl
1181        final_dataset.dxw = data.dxw
1182        final_dataset.x_unit = data._xunit
1183        final_dataset.y_unit = data._yunit
1184        final_dataset.xaxis(data._xaxis, data._xunit)
1185        final_dataset.yaxis(data._yaxis, data._yunit)
1186    elif isinstance(data, plottable_2D):
1187        final_dataset = Data2D(data.data, data.err_data, data.qx_data,
1188                               data.qy_data, data.q_data, data.mask,
1189                               data.dqx_data, data.dqy_data)
1190        final_dataset.xaxis(data._xaxis, data._xunit)
1191        final_dataset.yaxis(data._yaxis, data._yunit)
1192        final_dataset.zaxis(data._zaxis, data._zunit)
1193        final_dataset.y_bins = data.y_bins
1194        final_dataset.x_bins = data.x_bins
1195    else:
1196        return_string = ("Should Never Happen: _combine_data_info_with_plottabl"
1197                         "e input is not a plottable1d or plottable2d data "
1198                         "object")
1199        return return_string
1200
1201    if hasattr(data, "xmax"):
1202        final_dataset.xmax = data.xmax
1203    if hasattr(data, "ymax"):
1204        final_dataset.ymax = data.ymax
1205    if hasattr(data, "xmin"):
1206        final_dataset.xmin = data.xmin
1207    if hasattr(data, "ymin"):
1208        final_dataset.ymin = data.ymin
1209    final_dataset.isSesans = datainfo.isSesans
1210    final_dataset.title = datainfo.title
1211    final_dataset.run = datainfo.run
1212    final_dataset.run_name = datainfo.run_name
1213    final_dataset.filename = datainfo.filename
1214    final_dataset.notes = datainfo.notes
1215    final_dataset.process = datainfo.process
1216    final_dataset.instrument = datainfo.instrument
1217    final_dataset.detector = datainfo.detector
1218    final_dataset.sample = datainfo.sample
1219    final_dataset.source = datainfo.source
1220    final_dataset.collimation = datainfo.collimation
1221    final_dataset.trans_spectrum = datainfo.trans_spectrum
1222    final_dataset.meta_data = datainfo.meta_data
1223    final_dataset.errors = datainfo.errors
1224    return final_dataset
Note: See TracBrowser for help on using the repository browser.