source: sasview/src/sas/sascalc/dataloader/data_info.py @ 5218180

magnetic_scattrelease-4.2.2ticket-1009ticket-1094-headlessticket-1242-2d-resolutionticket-1243ticket-1249unittest-saveload
Last change on this file since 5218180 was 4fdcc65, checked in by krzywon, 6 years ago

File loader fixes and code cleanup.

  • Property mode set to 100644
File size: 40.1 KB
RevLine 
[959eb01]1"""
2    Module that contains classes to hold information read from
3    reduced data files.
4
5    A good description of the data members can be found in
6    the CanSAS 1D XML data format:
7
8    http://www.smallangles.net/wgwiki/index.php/cansas1d_documentation
9"""
10#####################################################################
11#This software was developed by the University of Tennessee as part of the
12#Distributed Data Analysis of Neutron Scattering Experiments (DANSE)
13#project funded by the US National Science Foundation.
14#See the license text in license.txt
15#copyright 2008, University of Tennessee
16######################################################################
17
[a1b8fee]18from __future__ import print_function
[959eb01]19
20#TODO: Keep track of data manipulation in the 'process' data structure.
21#TODO: This module should be independent of plottables. We should write
22#        an adapter class for plottables when needed.
23
24#from sas.guitools.plottables import Data1D as plottable_1D
25from sas.sascalc.data_util.uncertainty import Uncertainty
26import numpy as np
27import math
28
29class plottable_1D(object):
30    """
31    Data1D is a place holder for 1D plottables.
32    """
33    # The presence of these should be mutually
34    # exclusive with the presence of Qdev (dx)
35    x = None
36    y = None
37    dx = None
38    dy = None
39    ## Slit smearing length
40    dxl = None
41    ## Slit smearing width
42    dxw = None
43    ## SESANS specific params (wavelengths for spin echo length calculation)
44    lam = None
45    dlam = None
46
47    # Units
48    _xaxis = ''
49    _xunit = ''
50    _yaxis = ''
51    _yunit = ''
52
53    def __init__(self, x, y, dx=None, dy=None, dxl=None, dxw=None, lam=None, dlam=None):
54        self.x = np.asarray(x)
55        self.y = np.asarray(y)
56        if dx is not None:
57            self.dx = np.asarray(dx)
58        if dy is not None:
59            self.dy = np.asarray(dy)
60        if dxl is not None:
61            self.dxl = np.asarray(dxl)
62        if dxw is not None:
63            self.dxw = np.asarray(dxw)
64        if lam is not None:
65            self.lam = np.asarray(lam)
66        if dlam is not None:
67            self.dlam = np.asarray(dlam)
68
69    def xaxis(self, label, unit):
70        """
71        set the x axis label and unit
72        """
73        self._xaxis = label
74        self._xunit = unit
75
76    def yaxis(self, label, unit):
77        """
78        set the y axis label and unit
79        """
80        self._yaxis = label
81        self._yunit = unit
82
83
84class plottable_2D(object):
85    """
86    Data2D is a place holder for 2D plottables.
87    """
88    xmin = None
89    xmax = None
90    ymin = None
91    ymax = None
92    data = None
93    qx_data = None
94    qy_data = None
95    q_data = None
96    err_data = None
97    dqx_data = None
98    dqy_data = None
99    mask = None
100
101    # Units
102    _xaxis = ''
103    _xunit = ''
104    _yaxis = ''
105    _yunit = ''
106    _zaxis = ''
107    _zunit = ''
108
109    def __init__(self, data=None, err_data=None, qx_data=None,
110                 qy_data=None, q_data=None, mask=None,
111                 dqx_data=None, dqy_data=None):
112        self.data = np.asarray(data)
113        self.qx_data = np.asarray(qx_data)
114        self.qy_data = np.asarray(qy_data)
115        self.q_data = np.asarray(q_data)
116        self.mask = np.asarray(mask)
117        self.err_data = np.asarray(err_data)
118        if dqx_data is not None:
119            self.dqx_data = np.asarray(dqx_data)
120        if dqy_data is not None:
121            self.dqy_data = np.asarray(dqy_data)
122
123    def xaxis(self, label, unit):
124        """
125        set the x axis label and unit
126        """
127        self._xaxis = label
128        self._xunit = unit
129
130    def yaxis(self, label, unit):
131        """
132        set the y axis label and unit
133        """
134        self._yaxis = label
135        self._yunit = unit
136
137    def zaxis(self, label, unit):
138        """
139        set the z axis label and unit
140        """
141        self._zaxis = label
142        self._zunit = unit
143
144
145class Vector(object):
146    """
147    Vector class to hold multi-dimensional objects
148    """
149    ## x component
150    x = None
151    ## y component
152    y = None
153    ## z component
154    z = None
155
156    def __init__(self, x=None, y=None, z=None):
157        """
158        Initialization. Components that are not
159        set a set to None by default.
160
161        :param x: x component
162        :param y: y component
163        :param z: z component
164        """
165        self.x = x
166        self.y = y
167        self.z = z
168
169    def __str__(self):
170        msg = "x = %s\ty = %s\tz = %s" % (str(self.x), str(self.y), str(self.z))
171        return msg
172
173
174class Detector(object):
175    """
176    Class to hold detector information
177    """
178    ## Name of the instrument [string]
179    name = None
180    ## Sample to detector distance [float] [mm]
181    distance = None
182    distance_unit = 'mm'
183    ## Offset of this detector position in X, Y,
184    #(and Z if necessary) [Vector] [mm]
185    offset = None
186    offset_unit = 'm'
187    ## Orientation (rotation) of this detector in roll,
188    # pitch, and yaw [Vector] [degrees]
189    orientation = None
190    orientation_unit = 'degree'
191    ## Center of the beam on the detector in X and Y
192    #(and Z if necessary) [Vector] [mm]
193    beam_center = None
194    beam_center_unit = 'mm'
195    ## Pixel size in X, Y, (and Z if necessary) [Vector] [mm]
196    pixel_size = None
197    pixel_size_unit = 'mm'
198    ## Slit length of the instrument for this detector.[float] [mm]
199    slit_length = None
200    slit_length_unit = 'mm'
201
202    def __init__(self):
203        """
204        Initialize class attribute that are objects...
205        """
206        self.offset = Vector()
207        self.orientation = Vector()
208        self.beam_center = Vector()
209        self.pixel_size = Vector()
210
211    def __str__(self):
212        _str = "Detector:\n"
213        _str += "   Name:         %s\n" % self.name
214        _str += "   Distance:     %s [%s]\n" % \
215            (str(self.distance), str(self.distance_unit))
216        _str += "   Offset:       %s [%s]\n" % \
217            (str(self.offset), str(self.offset_unit))
218        _str += "   Orientation:  %s [%s]\n" % \
219            (str(self.orientation), str(self.orientation_unit))
220        _str += "   Beam center:  %s [%s]\n" % \
221            (str(self.beam_center), str(self.beam_center_unit))
222        _str += "   Pixel size:   %s [%s]\n" % \
223            (str(self.pixel_size), str(self.pixel_size_unit))
224        _str += "   Slit length:  %s [%s]\n" % \
225            (str(self.slit_length), str(self.slit_length_unit))
226        return _str
227
228
229class Aperture(object):
230    ## Name
231    name = None
232    ## Type
233    type = None
234    ## Size name
235    size_name = None
236    ## Aperture size [Vector]
237    size = None
238    size_unit = 'mm'
239    ## Aperture distance [float]
240    distance = None
241    distance_unit = 'mm'
242
243    def __init__(self):
244        self.size = Vector()
245
246
247class Collimation(object):
248    """
249    Class to hold collimation information
250    """
251    ## Name
252    name = None
253    ## Length [float] [mm]
254    length = None
255    length_unit = 'mm'
256    ## Aperture
257    aperture = None
258
259    def __init__(self):
260        self.aperture = []
261
262    def __str__(self):
263        _str = "Collimation:\n"
264        _str += "   Length:       %s [%s]\n" % \
265            (str(self.length), str(self.length_unit))
266        for item in self.aperture:
267            _str += "   Aperture size:%s [%s]\n" % \
268                (str(item.size), str(item.size_unit))
269            _str += "   Aperture_dist:%s [%s]\n" % \
270                (str(item.distance), str(item.distance_unit))
271        return _str
272
273
274class Source(object):
275    """
276    Class to hold source information
277    """
278    ## Name
279    name = None
280    ## Radiation type [string]
281    radiation = None
282    ## Beam size name
283    beam_size_name = None
284    ## Beam size [Vector] [mm]
285    beam_size = None
286    beam_size_unit = 'mm'
287    ## Beam shape [string]
288    beam_shape = None
289    ## Wavelength [float] [Angstrom]
290    wavelength = None
291    wavelength_unit = 'A'
292    ## Minimum wavelength [float] [Angstrom]
293    wavelength_min = None
294    wavelength_min_unit = 'nm'
295    ## Maximum wavelength [float] [Angstrom]
296    wavelength_max = None
297    wavelength_max_unit = 'nm'
298    ## Wavelength spread [float] [Angstrom]
299    wavelength_spread = None
300    wavelength_spread_unit = 'percent'
301
302    def __init__(self):
303        self.beam_size = Vector()
304
305    def __str__(self):
306        _str = "Source:\n"
307        _str += "   Radiation:    %s\n" % str(self.radiation)
308        _str += "   Shape:        %s\n" % str(self.beam_shape)
309        _str += "   Wavelength:   %s [%s]\n" % \
310            (str(self.wavelength), str(self.wavelength_unit))
311        _str += "   Waveln_min:   %s [%s]\n" % \
312            (str(self.wavelength_min), str(self.wavelength_min_unit))
313        _str += "   Waveln_max:   %s [%s]\n" % \
314            (str(self.wavelength_max), str(self.wavelength_max_unit))
315        _str += "   Waveln_spread:%s [%s]\n" % \
316            (str(self.wavelength_spread), str(self.wavelength_spread_unit))
317        _str += "   Beam_size:    %s [%s]\n" % \
318            (str(self.beam_size), str(self.beam_size_unit))
319        return _str
320
321
322"""
323Definitions of radiation types
324"""
325NEUTRON = 'neutron'
326XRAY = 'x-ray'
327MUON = 'muon'
328ELECTRON = 'electron'
329
330
331class Sample(object):
332    """
333    Class to hold the sample description
334    """
335    ## Short name for sample
336    name = ''
337    ## ID
338    ID = ''
339    ## Thickness [float] [mm]
340    thickness = None
341    thickness_unit = 'mm'
342    ## Transmission [float] [fraction]
343    transmission = None
344    ## Temperature [float] [No Default]
345    temperature = None
346    temperature_unit = None
347    ## Position [Vector] [mm]
348    position = None
349    position_unit = 'mm'
350    ## Orientation [Vector] [degrees]
351    orientation = None
352    orientation_unit = 'degree'
353    ## Details
354    details = None
355    ## SESANS zacceptance
356    zacceptance = (0,"")
357    yacceptance = (0,"")
358
359    def __init__(self):
360        self.position = Vector()
361        self.orientation = Vector()
362        self.details = []
363
364    def __str__(self):
365        _str = "Sample:\n"
366        _str += "   ID:           %s\n" % str(self.ID)
367        _str += "   Transmission: %s\n" % str(self.transmission)
368        _str += "   Thickness:    %s [%s]\n" % \
369            (str(self.thickness), str(self.thickness_unit))
370        _str += "   Temperature:  %s [%s]\n" % \
371            (str(self.temperature), str(self.temperature_unit))
372        _str += "   Position:     %s [%s]\n" % \
373            (str(self.position), str(self.position_unit))
374        _str += "   Orientation:  %s [%s]\n" % \
375            (str(self.orientation), str(self.orientation_unit))
376
377        _str += "   Details:\n"
378        for item in self.details:
379            _str += "      %s\n" % item
380
381        return _str
382
383
384class Process(object):
385    """
386    Class that holds information about the processes
387    performed on the data.
388    """
389    name = ''
390    date = ''
391    description = ''
392    term = None
393    notes = None
394
395    def __init__(self):
396        self.term = []
397        self.notes = []
398
399    def is_empty(self):
400        """
401            Return True if the object is empty
402        """
403        return len(self.name) == 0 and len(self.date) == 0 and len(self.description) == 0 \
404            and len(self.term) == 0 and len(self.notes) == 0
405
406    def single_line_desc(self):
407        """
408            Return a single line string representing the process
409        """
410        return "%s %s %s" % (self.name, self.date, self.description)
411
412    def __str__(self):
413        _str = "Process:\n"
414        _str += "   Name:         %s\n" % self.name
415        _str += "   Date:         %s\n" % self.date
416        _str += "   Description:  %s\n" % self.description
417        for item in self.term:
418            _str += "   Term:         %s\n" % item
419        for item in self.notes:
420            _str += "   Note:         %s\n" % item
421        return _str
422
423
424class TransmissionSpectrum(object):
425    """
426    Class that holds information about transmission spectrum
427    for white beams and spallation sources.
428    """
429    name = ''
430    timestamp = ''
431    ## Wavelength (float) [A]
432    wavelength = None
433    wavelength_unit = 'A'
434    ## Transmission (float) [unit less]
435    transmission = None
436    transmission_unit = ''
437    ## Transmission Deviation (float) [unit less]
438    transmission_deviation = None
439    transmission_deviation_unit = ''
440
441    def __init__(self):
442        self.wavelength = []
443        self.transmission = []
444        self.transmission_deviation = []
445
446    def __str__(self):
447        _str = "Transmission Spectrum:\n"
448        _str += "   Name:             \t{0}\n".format(self.name)
449        _str += "   Timestamp:        \t{0}\n".format(self.timestamp)
450        _str += "   Wavelength unit:  \t{0}\n".format(self.wavelength_unit)
451        _str += "   Transmission unit:\t{0}\n".format(self.transmission_unit)
452        _str += "   Trans. Dev. unit:  \t{0}\n".format(\
453                                            self.transmission_deviation_unit)
454        length_list = [len(self.wavelength), len(self.transmission), \
455                len(self.transmission_deviation)]
456        _str += "   Number of Pts:    \t{0}\n".format(max(length_list))
457        return _str
458
459
460class DataInfo(object):
461    """
462    Class to hold the data read from a file.
463    It includes four blocks of data for the
464    instrument description, the sample description,
465    the data itself and any other meta data.
466    """
467    ## Title
468    title = ''
469    ## Run number
470    run = None
471    ## Run name
472    run_name = None
473    ## File name
474    filename = ''
475    ## Notes
476    notes = None
477    ## Processes (Action on the data)
478    process = None
479    ## Instrument name
480    instrument = ''
481    ## Detector information
482    detector = None
483    ## Sample information
484    sample = None
485    ## Source information
486    source = None
487    ## Collimation information
488    collimation = None
489    ## Transmission Spectrum INfo
490    trans_spectrum = None
491    ## Additional meta-data
492    meta_data = None
493    ## Loading errors
494    errors = None
495    ## SESANS data check
496    isSesans = None
497
498
499    def __init__(self):
500        """
501        Initialization
502        """
503        ## Title
504        self.title = ''
505        ## Run number
506        self.run = []
507        self.run_name = {}
508        ## File name
509        self.filename = ''
510        ## Notes
511        self.notes = []
512        ## Processes (Action on the data)
513        self.process = []
514        ## Instrument name
515        self.instrument = ''
516        ## Detector information
517        self.detector = []
518        ## Sample information
519        self.sample = Sample()
520        ## Source information
521        self.source = Source()
522        ## Collimation information
523        self.collimation = []
524        ## Transmission Spectrum
525        self.trans_spectrum = []
526        ## Additional meta-data
527        self.meta_data = {}
528        ## Loading errors
529        self.errors = []
530        ## SESANS data check
531        self.isSesans = False
532
533    def append_empty_process(self):
534        """
535        """
536        self.process.append(Process())
537
538    def add_notes(self, message=""):
539        """
540        Add notes to datainfo
541        """
542        self.notes.append(message)
543
544    def __str__(self):
545        """
546        Nice printout
547        """
548        _str = "File:            %s\n" % self.filename
549        _str += "Title:           %s\n" % self.title
550        _str += "Run:             %s\n" % str(self.run)
551        _str += "SESANS:          %s\n" % str(self.isSesans)
552        _str += "Instrument:      %s\n" % str(self.instrument)
553        _str += "%s\n" % str(self.sample)
554        _str += "%s\n" % str(self.source)
555        for item in self.detector:
556            _str += "%s\n" % str(item)
557        for item in self.collimation:
558            _str += "%s\n" % str(item)
559        for item in self.process:
560            _str += "%s\n" % str(item)
561        for item in self.notes:
562            _str += "%s\n" % str(item)
563        for item in self.trans_spectrum:
564            _str += "%s\n" % str(item)
565        return _str
566
567    # Private method to perform operation. Not implemented for DataInfo,
568    # but should be implemented for each data class inherited from DataInfo
569    # that holds actual data (ex.: Data1D)
570    def _perform_operation(self, other, operation):
571        """
572        Private method to perform operation. Not implemented for DataInfo,
573        but should be implemented for each data class inherited from DataInfo
574        that holds actual data (ex.: Data1D)
575        """
576        return NotImplemented
577
578    def _perform_union(self, other):
579        """
580        Private method to perform union operation. Not implemented for DataInfo,
581        but should be implemented for each data class inherited from DataInfo
582        that holds actual data (ex.: Data1D)
583        """
584        return NotImplemented
585
586    def __add__(self, other):
587        """
588        Add two data sets
589
590        :param other: data set to add to the current one
591        :return: new data set
592        :raise ValueError: raised when two data sets are incompatible
593        """
594        def operation(a, b):
595            return a + b
596        return self._perform_operation(other, operation)
597
598    def __radd__(self, other):
599        """
600        Add two data sets
601
602        :param other: data set to add to the current one
603        :return: new data set
604        :raise ValueError: raised when two data sets are incompatible
605        """
606        def operation(a, b):
607            return b + a
608        return self._perform_operation(other, operation)
609
610    def __sub__(self, other):
611        """
612        Subtract two data sets
613
614        :param other: data set to subtract from the current one
615        :return: new data set
616        :raise ValueError: raised when two data sets are incompatible
617        """
618        def operation(a, b):
619            return a - b
620        return self._perform_operation(other, operation)
621
622    def __rsub__(self, other):
623        """
624        Subtract two data sets
625
626        :param other: data set to subtract from the current one
627        :return: new data set
628        :raise ValueError: raised when two data sets are incompatible
629        """
630        def operation(a, b):
631            return b - a
632        return self._perform_operation(other, operation)
633
634    def __mul__(self, other):
635        """
636        Multiply two data sets
637
638        :param other: data set to subtract from the current one
639        :return: new data set
640        :raise ValueError: raised when two data sets are incompatible
641        """
642        def operation(a, b):
643            return a * b
644        return self._perform_operation(other, operation)
645
646    def __rmul__(self, other):
647        """
648        Multiply two data sets
649
650        :param other: data set to subtract from the current one
651        :return: new data set
652        :raise ValueError: raised when two data sets are incompatible
653        """
654        def operation(a, b):
655            return b * a
656        return self._perform_operation(other, operation)
657
658    def __div__(self, other):
659        """
660        Divided a data set by another
661
662        :param other: data set that the current one is divided by
663        :return: new data set
664        :raise ValueError: raised when two data sets are incompatible
665        """
666        def operation(a, b):
667            return a/b
668        return self._perform_operation(other, operation)
669
670    def __rdiv__(self, other):
671        """
672        Divided a data set by another
673
674        :param other: data set that the current one is divided by
675        :return: new data set
676        :raise ValueError: raised when two data sets are incompatible
677        """
678        def operation(a, b):
679            return b/a
680        return self._perform_operation(other, operation)
681
682    def __or__(self, other):
683        """
684        Union a data set with another
685
686        :param other: data set to be unified
687        :return: new data set
688        :raise ValueError: raised when two data sets are incompatible
689        """
690        return self._perform_union(other)
691
692    def __ror__(self, other):
693        """
694        Union a data set with another
695
696        :param other: data set to be unified
697        :return: new data set
698        :raise ValueError: raised when two data sets are incompatible
699        """
700        return self._perform_union(other)
701
702class Data1D(plottable_1D, DataInfo):
703    """
704    1D data class
705    """
706    def __init__(self, x=None, y=None, dx=None, dy=None, lam=None, dlam=None, isSesans=None):
707        DataInfo.__init__(self)
708        plottable_1D.__init__(self, x, y, dx, dy,None, None, lam, dlam)
709        self.isSesans = isSesans
710        try:
711            if self.isSesans: # the data is SESANS
712                self.x_unit = 'A'
713                self.y_unit = 'pol'
714            elif not self.isSesans: # the data is SANS
715                self.x_unit = '1/A'
716                self.y_unit = '1/cm'
717        except: # the data is not recognized/supported, and the user is notified
[574adc7]718            raise TypeError('data not recognized, check documentation for supported 1D data formats')
[959eb01]719
720    def __str__(self):
721        """
722        Nice printout
723        """
724        _str = "%s\n" % DataInfo.__str__(self)
725        _str += "Data:\n"
726        _str += "   Type:         %s\n" % self.__class__.__name__
727        _str += "   X-axis:       %s\t[%s]\n" % (self._xaxis, self._xunit)
728        _str += "   Y-axis:       %s\t[%s]\n" % (self._yaxis, self._yunit)
729        _str += "   Length:       %g\n" % len(self.x)
730        return _str
731
732    def is_slit_smeared(self):
733        """
734        Check whether the data has slit smearing information
735        :return: True is slit smearing info is present, False otherwise
736        """
737        def _check(v):
738            if (v.__class__ == list or v.__class__ == np.ndarray) \
739                and len(v) > 0 and min(v) > 0:
740                return True
741            return False
742        return _check(self.dxl) or _check(self.dxw)
743
744    def clone_without_data(self, length=0, clone=None):
745        """
746        Clone the current object, without copying the data (which
747        will be filled out by a subsequent operation).
748        The data arrays will be initialized to zero.
749
750        :param length: length of the data array to be initialized
751        :param clone: if provided, the data will be copied to clone
752        """
753        from copy import deepcopy
754
755        if clone is None or not issubclass(clone.__class__, Data1D):
756            x = np.zeros(length)
757            dx = np.zeros(length)
758            y = np.zeros(length)
759            dy = np.zeros(length)
760            lam = np.zeros(length)
761            dlam = np.zeros(length)
762            clone = Data1D(x, y, lam=lam, dx=dx, dy=dy, dlam=dlam)
763
764        clone.title = self.title
765        clone.run = self.run
766        clone.filename = self.filename
767        clone.instrument = self.instrument
768        clone.notes = deepcopy(self.notes)
769        clone.process = deepcopy(self.process)
770        clone.detector = deepcopy(self.detector)
771        clone.sample = deepcopy(self.sample)
772        clone.source = deepcopy(self.source)
773        clone.collimation = deepcopy(self.collimation)
774        clone.trans_spectrum = deepcopy(self.trans_spectrum)
775        clone.meta_data = deepcopy(self.meta_data)
776        clone.errors = deepcopy(self.errors)
777
778        return clone
779
780    def _validity_check(self, other):
781        """
782        Checks that the data lengths are compatible.
783        Checks that the x vectors are compatible.
784        Returns errors vectors equal to original
785        errors vectors if they were present or vectors
786        of zeros when none was found.
787
788        :param other: other data set for operation
789        :return: dy for self, dy for other [numpy arrays]
790        :raise ValueError: when lengths are not compatible
791        """
792        dy_other = None
793        if isinstance(other, Data1D):
794            # Check that data lengths are the same
795            if len(self.x) != len(other.x) or \
796                len(self.y) != len(other.y):
797                msg = "Unable to perform operation: data length are not equal"
[574adc7]798                raise ValueError(msg)
[959eb01]799            # Here we could also extrapolate between data points
800            TOLERANCE = 0.01
801            for i in range(len(self.x)):
802                if math.fabs((self.x[i] - other.x[i])/self.x[i]) > TOLERANCE:
803                    msg = "Incompatible data sets: x-values do not match"
[574adc7]804                    raise ValueError(msg)
[959eb01]805
806            # Check that the other data set has errors, otherwise
807            # create zero vector
808            dy_other = other.dy
[235f514]809            if other.dy is None or (len(other.dy) != len(other.y)):
[959eb01]810                dy_other = np.zeros(len(other.y))
811
812        # Check that we have errors, otherwise create zero vector
813        dy = self.dy
[235f514]814        if self.dy is None or (len(self.dy) != len(self.y)):
[959eb01]815            dy = np.zeros(len(self.y))
816
817        return dy, dy_other
818
819    def _perform_operation(self, other, operation):
820        """
821        """
822        # First, check the data compatibility
823        dy, dy_other = self._validity_check(other)
824        result = self.clone_without_data(len(self.x))
[235f514]825        if self.dxw is None:
[959eb01]826            result.dxw = None
827        else:
828            result.dxw = np.zeros(len(self.x))
[235f514]829        if self.dxl is None:
[959eb01]830            result.dxl = None
831        else:
832            result.dxl = np.zeros(len(self.x))
833
834        for i in range(len(self.x)):
835            result.x[i] = self.x[i]
836            if self.dx is not None and len(self.x) == len(self.dx):
837                result.dx[i] = self.dx[i]
838            if self.dxw is not None and len(self.x) == len(self.dxw):
839                result.dxw[i] = self.dxw[i]
840            if self.dxl is not None and len(self.x) == len(self.dxl):
841                result.dxl[i] = self.dxl[i]
842
843            a = Uncertainty(self.y[i], dy[i]**2)
844            if isinstance(other, Data1D):
845                b = Uncertainty(other.y[i], dy_other[i]**2)
846                if other.dx is not None:
847                    result.dx[i] *= self.dx[i]
848                    result.dx[i] += (other.dx[i]**2)
849                    result.dx[i] /= 2
850                    result.dx[i] = math.sqrt(result.dx[i])
851                if result.dxl is not None and other.dxl is not None:
852                    result.dxl[i] *= self.dxl[i]
853                    result.dxl[i] += (other.dxl[i]**2)
854                    result.dxl[i] /= 2
855                    result.dxl[i] = math.sqrt(result.dxl[i])
856            else:
857                b = other
858
859            output = operation(a, b)
860            result.y[i] = output.x
861            result.dy[i] = math.sqrt(math.fabs(output.variance))
862        return result
863
864    def _validity_check_union(self, other):
865        """
866        Checks that the data lengths are compatible.
867        Checks that the x vectors are compatible.
868        Returns errors vectors equal to original
869        errors vectors if they were present or vectors
870        of zeros when none was found.
871
872        :param other: other data set for operation
873        :return: bool
874        :raise ValueError: when data types are not compatible
875        """
876        if not isinstance(other, Data1D):
877            msg = "Unable to perform operation: different types of data set"
[574adc7]878            raise ValueError(msg)
[959eb01]879        return True
880
881    def _perform_union(self, other):
882        """
883        """
884        # First, check the data compatibility
885        self._validity_check_union(other)
886        result = self.clone_without_data(len(self.x) + len(other.x))
[235f514]887        if self.dy is None or other.dy is None:
[959eb01]888            result.dy = None
889        else:
890            result.dy = np.zeros(len(self.x) + len(other.x))
[235f514]891        if self.dx is None or other.dx is None:
[959eb01]892            result.dx = None
893        else:
894            result.dx = np.zeros(len(self.x) + len(other.x))
[235f514]895        if self.dxw is None or other.dxw is None:
[959eb01]896            result.dxw = None
897        else:
898            result.dxw = np.zeros(len(self.x) + len(other.x))
[235f514]899        if self.dxl is None or other.dxl is None:
[959eb01]900            result.dxl = None
901        else:
902            result.dxl = np.zeros(len(self.x) + len(other.x))
903
904        result.x = np.append(self.x, other.x)
905        #argsorting
906        ind = np.argsort(result.x)
907        result.x = result.x[ind]
908        result.y = np.append(self.y, other.y)
909        result.y = result.y[ind]
[7432acb]910        if result.dy is not None:
[959eb01]911            result.dy = np.append(self.dy, other.dy)
912            result.dy = result.dy[ind]
913        if result.dx is not None:
914            result.dx = np.append(self.dx, other.dx)
915            result.dx = result.dx[ind]
916        if result.dxw is not None:
917            result.dxw = np.append(self.dxw, other.dxw)
918            result.dxw = result.dxw[ind]
919        if result.dxl is not None:
920            result.dxl = np.append(self.dxl, other.dxl)
921            result.dxl = result.dxl[ind]
922        return result
923
924
925class Data2D(plottable_2D, DataInfo):
926    """
927    2D data class
928    """
929    ## Units for Q-values
930    Q_unit = '1/A'
931    ## Units for I(Q) values
932    I_unit = '1/cm'
933    ## Vector of Q-values at the center of each bin in x
934    x_bins = None
935    ## Vector of Q-values at the center of each bin in y
936    y_bins = None
937    ## No 2D SESANS data as of yet. Always set it to False
938    isSesans = False
939
940    def __init__(self, data=None, err_data=None, qx_data=None,
941                 qy_data=None, q_data=None, mask=None,
942                 dqx_data=None, dqy_data=None):
943        DataInfo.__init__(self)
944        plottable_2D.__init__(self, data, err_data, qx_data,
945                              qy_data, q_data, mask, dqx_data, dqy_data)
946        self.y_bins = []
947        self.x_bins = []
948
949        if len(self.detector) > 0:
[574adc7]950            raise RuntimeError("Data2D: Detector bank already filled at init")
[959eb01]951
952    def __str__(self):
953        _str = "%s\n" % DataInfo.__str__(self)
954        _str += "Data:\n"
955        _str += "   Type:         %s\n" % self.__class__.__name__
[8d5e11c]956        _str += "   X-axis:       %s\t[%s]\n" % (self._xaxis, self._xunit)
957        _str += "   Y-axis:       %s\t[%s]\n" % (self._yaxis, self._yunit)
[959eb01]958        _str += "   Z-axis:       %s\t[%s]\n" % (self._zaxis, self._zunit)
959        _str += "   Length:       %g \n" % (len(self.data))
960        _str += "   Shape:        (%d, %d)\n" % (len(self.y_bins), len(self.x_bins))
961        return _str
962
963    def clone_without_data(self, length=0, clone=None):
964        """
965        Clone the current object, without copying the data (which
966        will be filled out by a subsequent operation).
967        The data arrays will be initialized to zero.
968
969        :param length: length of the data array to be initialized
970        :param clone: if provided, the data will be copied to clone
971        """
972        from copy import deepcopy
973
974        if clone is None or not issubclass(clone.__class__, Data2D):
975            data = np.zeros(length)
976            err_data = np.zeros(length)
977            qx_data = np.zeros(length)
978            qy_data = np.zeros(length)
979            q_data = np.zeros(length)
980            mask = np.zeros(length)
981            dqx_data = None
982            dqy_data = None
983            clone = Data2D(data=data, err_data=err_data,
984                           qx_data=qx_data, qy_data=qy_data,
985                           q_data=q_data, mask=mask)
986
[8d5e11c]987        clone._xaxis = self._xaxis
988        clone._yaxis = self._yaxis
989        clone._zaxis = self._zaxis
990        clone._xunit = self._xunit
991        clone._yunit = self._yunit
992        clone._zunit = self._zunit
[4fdcc65]993        clone.x_bins = self.x_bins
994        clone.y_bins = self.y_bins
[8d5e11c]995
[959eb01]996        clone.title = self.title
997        clone.run = self.run
998        clone.filename = self.filename
999        clone.instrument = self.instrument
1000        clone.notes = deepcopy(self.notes)
1001        clone.process = deepcopy(self.process)
1002        clone.detector = deepcopy(self.detector)
1003        clone.sample = deepcopy(self.sample)
1004        clone.source = deepcopy(self.source)
1005        clone.collimation = deepcopy(self.collimation)
1006        clone.trans_spectrum = deepcopy(self.trans_spectrum)
1007        clone.meta_data = deepcopy(self.meta_data)
1008        clone.errors = deepcopy(self.errors)
1009
1010        return clone
1011
1012    def _validity_check(self, other):
1013        """
1014        Checks that the data lengths are compatible.
1015        Checks that the x vectors are compatible.
1016        Returns errors vectors equal to original
1017        errors vectors if they were present or vectors
1018        of zeros when none was found.
1019
1020        :param other: other data set for operation
1021        :return: dy for self, dy for other [numpy arrays]
1022        :raise ValueError: when lengths are not compatible
1023        """
1024        err_other = None
1025        TOLERANCE = 0.01
1026        if isinstance(other, Data2D):
1027            # Check that data lengths are the same
1028            if len(self.data) != len(other.data) or \
1029                len(self.qx_data) != len(other.qx_data) or \
1030                len(self.qy_data) != len(other.qy_data):
1031                msg = "Unable to perform operation: data length are not equal"
[574adc7]1032                raise ValueError(msg)
[959eb01]1033            for ind in range(len(self.data)):
1034                if math.fabs((self.qx_data[ind] - other.qx_data[ind])/self.qx_data[ind]) > TOLERANCE:
1035                    msg = "Incompatible data sets: qx-values do not match: %s %s" % (self.qx_data[ind], other.qx_data[ind])
[574adc7]1036                    raise ValueError(msg)
[959eb01]1037                if math.fabs((self.qy_data[ind] - other.qy_data[ind])/self.qy_data[ind]) > TOLERANCE:
1038                    msg = "Incompatible data sets: qy-values do not match: %s %s" % (self.qy_data[ind], other.qy_data[ind])
[574adc7]1039                    raise ValueError(msg)
[959eb01]1040
1041            # Check that the scales match
1042            err_other = other.err_data
[235f514]1043            if other.err_data is None or \
[959eb01]1044                (len(other.err_data) != len(other.data)):
1045                err_other = np.zeros(len(other.data))
1046
1047        # Check that we have errors, otherwise create zero vector
1048        err = self.err_data
[235f514]1049        if self.err_data is None or \
[959eb01]1050            (len(self.err_data) != len(self.data)):
1051            err = np.zeros(len(other.data))
1052        return err, err_other
1053
1054    def _perform_operation(self, other, operation):
1055        """
1056        Perform 2D operations between data sets
1057
1058        :param other: other data set
1059        :param operation: function defining the operation
1060        """
1061        # First, check the data compatibility
1062        dy, dy_other = self._validity_check(other)
1063        result = self.clone_without_data(np.size(self.data))
[235f514]1064        if self.dqx_data is None or self.dqy_data is None:
[959eb01]1065            result.dqx_data = None
1066            result.dqy_data = None
1067        else:
1068            result.dqx_data = np.zeros(len(self.data))
1069            result.dqy_data = np.zeros(len(self.data))
1070        for i in range(np.size(self.data)):
1071            result.data[i] = self.data[i]
1072            if self.err_data is not None and \
1073                            np.size(self.data) == np.size(self.err_data):
1074                result.err_data[i] = self.err_data[i]
1075            if self.dqx_data is not None:
1076                result.dqx_data[i] = self.dqx_data[i]
1077            if self.dqy_data is not None:
1078                result.dqy_data[i] = self.dqy_data[i]
1079            result.qx_data[i] = self.qx_data[i]
1080            result.qy_data[i] = self.qy_data[i]
1081            result.q_data[i] = self.q_data[i]
1082            result.mask[i] = self.mask[i]
1083
1084            a = Uncertainty(self.data[i], dy[i]**2)
1085            if isinstance(other, Data2D):
1086                b = Uncertainty(other.data[i], dy_other[i]**2)
1087                if other.dqx_data is not None and \
1088                        result.dqx_data is not None:
1089                    result.dqx_data[i] *= self.dqx_data[i]
1090                    result.dqx_data[i] += (other.dqx_data[i]**2)
1091                    result.dqx_data[i] /= 2
1092                    result.dqx_data[i] = math.sqrt(result.dqx_data[i])
1093                if other.dqy_data is not None and \
1094                        result.dqy_data is not None:
1095                    result.dqy_data[i] *= self.dqy_data[i]
1096                    result.dqy_data[i] += (other.dqy_data[i]**2)
1097                    result.dqy_data[i] /= 2
1098                    result.dqy_data[i] = math.sqrt(result.dqy_data[i])
1099            else:
1100                b = other
1101            output = operation(a, b)
1102            result.data[i] = output.x
1103            result.err_data[i] = math.sqrt(math.fabs(output.variance))
1104        return result
1105
1106    def _validity_check_union(self, other):
1107        """
1108        Checks that the data lengths are compatible.
1109        Checks that the x vectors are compatible.
1110        Returns errors vectors equal to original
1111        errors vectors if they were present or vectors
1112        of zeros when none was found.
1113
1114        :param other: other data set for operation
1115        :return: bool
1116        :raise ValueError: when data types are not compatible
1117        """
1118        if not isinstance(other, Data2D):
1119            msg = "Unable to perform operation: different types of data set"
[574adc7]1120            raise ValueError(msg)
[959eb01]1121        return True
1122
1123    def _perform_union(self, other):
1124        """
1125        Perform 2D operations between data sets
1126
1127        :param other: other data set
1128        :param operation: function defining the operation
1129        """
1130        # First, check the data compatibility
1131        self._validity_check_union(other)
1132        result = self.clone_without_data(np.size(self.data) + \
1133                                         np.size(other.data))
1134        result.xmin = self.xmin
1135        result.xmax = self.xmax
1136        result.ymin = self.ymin
1137        result.ymax = self.ymax
[235f514]1138        if self.dqx_data is None or self.dqy_data is None or \
1139                other.dqx_data is None or other.dqy_data is None:
[959eb01]1140            result.dqx_data = None
1141            result.dqy_data = None
1142        else:
1143            result.dqx_data = np.zeros(len(self.data) + \
1144                                       np.size(other.data))
1145            result.dqy_data = np.zeros(len(self.data) + \
1146                                       np.size(other.data))
1147
1148        result.data = np.append(self.data, other.data)
1149        result.qx_data = np.append(self.qx_data, other.qx_data)
1150        result.qy_data = np.append(self.qy_data, other.qy_data)
1151        result.q_data = np.append(self.q_data, other.q_data)
1152        result.mask = np.append(self.mask, other.mask)
1153        if result.err_data is not None:
1154            result.err_data = np.append(self.err_data, other.err_data)
1155        if self.dqx_data is not None:
1156            result.dqx_data = np.append(self.dqx_data, other.dqx_data)
1157        if self.dqy_data is not None:
1158            result.dqy_data = np.append(self.dqy_data, other.dqy_data)
1159
1160        return result
1161
1162
1163def combine_data_info_with_plottable(data, datainfo):
1164    """
[8d5e11c]1165    A function that combines the DataInfo data in self.current_datainto with a
1166    plottable_1D or 2D data object.
[959eb01]1167
1168    :param data: A plottable_1D or plottable_2D data object
1169    :return: A fully specified Data1D or Data2D object
1170    """
1171
1172    final_dataset = None
1173    if isinstance(data, plottable_1D):
[17e257b5]1174        final_dataset = Data1D(data.x, data.y, isSesans=datainfo.isSesans)
[959eb01]1175        final_dataset.dx = data.dx
1176        final_dataset.dy = data.dy
1177        final_dataset.dxl = data.dxl
1178        final_dataset.dxw = data.dxw
[17e257b5]1179        final_dataset.x_unit = data._xunit
1180        final_dataset.y_unit = data._yunit
[959eb01]1181        final_dataset.xaxis(data._xaxis, data._xunit)
1182        final_dataset.yaxis(data._yaxis, data._yunit)
1183    elif isinstance(data, plottable_2D):
[8d5e11c]1184        final_dataset = Data2D(data.data, data.err_data, data.qx_data,
1185                               data.qy_data, data.q_data, data.mask,
1186                               data.dqx_data, data.dqy_data)
[959eb01]1187        final_dataset.xaxis(data._xaxis, data._xunit)
1188        final_dataset.yaxis(data._yaxis, data._yunit)
1189        final_dataset.zaxis(data._zaxis, data._zunit)
[4fdcc65]1190        final_dataset.y_bins = data.y_bins
1191        final_dataset.x_bins = data.x_bins
[959eb01]1192    else:
[8d5e11c]1193        return_string = ("Should Never Happen: _combine_data_info_with_plottabl"
1194                         "e input is not a plottable1d or plottable2d data "
1195                         "object")
[959eb01]1196        return return_string
1197
[beba407]1198    if hasattr(data, "xmax"):
1199        final_dataset.xmax = data.xmax
1200    if hasattr(data, "ymax"):
1201        final_dataset.ymax = data.ymax
1202    if hasattr(data, "xmin"):
1203        final_dataset.xmin = data.xmin
1204    if hasattr(data, "ymin"):
1205        final_dataset.ymin = data.ymin
[959eb01]1206    final_dataset.isSesans = datainfo.isSesans
1207    final_dataset.title = datainfo.title
1208    final_dataset.run = datainfo.run
1209    final_dataset.run_name = datainfo.run_name
1210    final_dataset.filename = datainfo.filename
1211    final_dataset.notes = datainfo.notes
1212    final_dataset.process = datainfo.process
1213    final_dataset.instrument = datainfo.instrument
1214    final_dataset.detector = datainfo.detector
1215    final_dataset.sample = datainfo.sample
1216    final_dataset.source = datainfo.source
1217    final_dataset.collimation = datainfo.collimation
1218    final_dataset.trans_spectrum = datainfo.trans_spectrum
1219    final_dataset.meta_data = datainfo.meta_data
1220    final_dataset.errors = datainfo.errors
1221    return final_dataset
Note: See TracBrowser for help on using the repository browser.