source: sasview/src/sas/sascalc/dataloader/data_info.py @ b5db35d

ESS_GUIESS_GUI_DocsESS_GUI_batch_fittingESS_GUI_bumps_abstractionESS_GUI_iss1116ESS_GUI_iss879ESS_GUI_iss959ESS_GUI_openclESS_GUI_orderingESS_GUI_sync_sascalccostrafo411magnetic_scattrelease-4.1.1release-4.1.2release-4.2.2ticket-1009ticket-1094-headlessticket-1242-2d-resolutionticket-1243ticket-1249ticket885unittest-saveload
Last change on this file since b5db35d was b5db35d, checked in by jhbakker, 7 years ago

more bugfixes (stupid mistake on the sesansreader…)

  • Property mode set to 100644
File size: 40.3 KB
Line 
1"""
2    Module that contains classes to hold information read from
3    reduced data files.
4
5    A good description of the data members can be found in
6    the CanSAS 1D XML data format:
7
8    http://www.smallangles.net/wgwiki/index.php/cansas1d_documentation
9"""
10#####################################################################
11#This software was developed by the University of Tennessee as part of the
12#Distributed Data Analysis of Neutron Scattering Experiments (DANSE)
13#project funded by the US National Science Foundation.
14#See the license text in license.txt
15#copyright 2008, University of Tennessee
16######################################################################
17
18
19#TODO: Keep track of data manipulation in the 'process' data structure.
20#TODO: This module should be independent of plottables. We should write
21#        an adapter class for plottables when needed.
22
23#from sas.guitools.plottables import Data1D as plottable_1D
24from sas.sascalc.data_util.uncertainty import Uncertainty
25import numpy
26import math
27
28class plottable_1D(object):
29    """
30    Data1D is a place holder for 1D plottables.
31    """
32    # The presence of these should be mutually
33    # exclusive with the presence of Qdev (dx)
34    x = None
35    y = None
36    dx = None
37    dy = None
38    ## Slit smearing length
39    dxl = None
40    ## Slit smearing width
41    dxw = None
42    ## SESANS specific params (wavelengths for spin echo length calculation)
43    lam = None
44    dlam = None
45
46    # Units
47    _xaxis = ''
48    _xunit = ''
49    _yaxis = ''
50    _yunit = ''
51
52    def __init__(self, x, y, dx=None, dy=None, dxl=None, dxw=None, lam=None, dlam=None):
53        self.x = numpy.asarray(x)
54        self.y = numpy.asarray(y)
55        if dx is not None:
56            self.dx = numpy.asarray(dx)
57        if dy is not None:
58            self.dy = numpy.asarray(dy)
59        if dxl is not None:
60            self.dxl = numpy.asarray(dxl)
61        if dxw is not None:
62            self.dxw = numpy.asarray(dxw)
63        if lam is not None:
64            self.lam = numpy.asarray(lam)
65        if dlam is not None:
66            self.dlam = numpy.asarray(dlam)
67
68    def xaxis(self, label, unit):
69        """
70        set the x axis label and unit
71        """
72        self._xaxis = label
73        self._xunit = unit
74
75    def yaxis(self, label, unit):
76        """
77        set the y axis label and unit
78        """
79        self._yaxis = label
80        self._yunit = unit
81
82
83class plottable_2D(object):
84    """
85    Data2D is a place holder for 2D plottables.
86    """
87    xmin = None
88    xmax = None
89    ymin = None
90    ymax = None
91    data = None
92    qx_data = None
93    qy_data = None
94    q_data = None
95    err_data = None
96    dqx_data = None
97    dqy_data = None
98    mask = None
99
100    # Units
101    _xaxis = ''
102    _xunit = ''
103    _yaxis = ''
104    _yunit = ''
105    _zaxis = ''
106    _zunit = ''
107
108    def __init__(self, data=None, err_data=None, qx_data=None,
109                 qy_data=None, q_data=None, mask=None,
110                 dqx_data=None, dqy_data=None):
111        self.data = numpy.asarray(data)
112        self.qx_data = numpy.asarray(qx_data)
113        self.qy_data = numpy.asarray(qy_data)
114        self.q_data = numpy.asarray(q_data)
115        self.mask = numpy.asarray(mask)
116        self.err_data = numpy.asarray(err_data)
117        if dqx_data is not None:
118            self.dqx_data = numpy.asarray(dqx_data)
119        if dqy_data is not None:
120            self.dqy_data = numpy.asarray(dqy_data)
121
122    def xaxis(self, label, unit):
123        """
124        set the x axis label and unit
125        """
126        self._xaxis = label
127        self._xunit = unit
128
129    def yaxis(self, label, unit):
130        """
131        set the y axis label and unit
132        """
133        self._yaxis = label
134        self._yunit = unit
135
136    def zaxis(self, label, unit):
137        """
138        set the z axis label and unit
139        """
140        self._zaxis = label
141        self._zunit = unit
142
143
144class Vector(object):
145    """
146    Vector class to hold multi-dimensional objects
147    """
148    ## x component
149    x = None
150    ## y component
151    y = None
152    ## z component
153    z = None
154
155    def __init__(self, x=None, y=None, z=None):
156        """
157        Initialization. Components that are not
158        set a set to None by default.
159
160        :param x: x component
161        :param y: y component
162        :param z: z component
163        """
164        self.x = x
165        self.y = y
166        self.z = z
167
168    def __str__(self):
169        msg = "x = %s\ty = %s\tz = %s" % (str(self.x), str(self.y), str(self.z))
170        return msg
171
172
173class Detector(object):
174    """
175    Class to hold detector information
176    """
177    ## Name of the instrument [string]
178    name = None
179    ## Sample to detector distance [float] [mm]
180    distance = None
181    distance_unit = 'mm'
182    ## Offset of this detector position in X, Y,
183    #(and Z if necessary) [Vector] [mm]
184    offset = None
185    offset_unit = 'm'
186    ## Orientation (rotation) of this detector in roll,
187    # pitch, and yaw [Vector] [degrees]
188    orientation = None
189    orientation_unit = 'degree'
190    ## Center of the beam on the detector in X and Y
191    #(and Z if necessary) [Vector] [mm]
192    beam_center = None
193    beam_center_unit = 'mm'
194    ## Pixel size in X, Y, (and Z if necessary) [Vector] [mm]
195    pixel_size = None
196    pixel_size_unit = 'mm'
197    ## Slit length of the instrument for this detector.[float] [mm]
198    slit_length = None
199    slit_length_unit = 'mm'
200
201    def __init__(self):
202        """
203        Initialize class attribute that are objects...
204        """
205        self.offset = Vector()
206        self.orientation = Vector()
207        self.beam_center = Vector()
208        self.pixel_size = Vector()
209
210    def __str__(self):
211        _str = "Detector:\n"
212        _str += "   Name:         %s\n" % self.name
213        _str += "   Distance:     %s [%s]\n" % \
214            (str(self.distance), str(self.distance_unit))
215        _str += "   Offset:       %s [%s]\n" % \
216            (str(self.offset), str(self.offset_unit))
217        _str += "   Orientation:  %s [%s]\n" % \
218            (str(self.orientation), str(self.orientation_unit))
219        _str += "   Beam center:  %s [%s]\n" % \
220            (str(self.beam_center), str(self.beam_center_unit))
221        _str += "   Pixel size:   %s [%s]\n" % \
222            (str(self.pixel_size), str(self.pixel_size_unit))
223        _str += "   Slit length:  %s [%s]\n" % \
224            (str(self.slit_length), str(self.slit_length_unit))
225        return _str
226
227
228class Aperture(object):
229    ## Name
230    name = None
231    ## Type
232    type = None
233    ## Size name
234    size_name = None
235    ## Aperture size [Vector]
236    size = None
237    size_unit = 'mm'
238    ## Aperture distance [float]
239    distance = None
240    distance_unit = 'mm'
241
242    def __init__(self):
243        self.size = Vector()
244
245
246class Collimation(object):
247    """
248    Class to hold collimation information
249    """
250    ## Name
251    name = None
252    ## Length [float] [mm]
253    length = None
254    length_unit = 'mm'
255    ## Aperture
256    aperture = None
257
258    def __init__(self):
259        self.aperture = []
260
261    def __str__(self):
262        _str = "Collimation:\n"
263        _str += "   Length:       %s [%s]\n" % \
264            (str(self.length), str(self.length_unit))
265        for item in self.aperture:
266            _str += "   Aperture size:%s [%s]\n" % \
267                (str(item.size), str(item.size_unit))
268            _str += "   Aperture_dist:%s [%s]\n" % \
269                (str(item.distance), str(item.distance_unit))
270        return _str
271
272
273class Source(object):
274    """
275    Class to hold source information
276    """
277    ## Name
278    name = None
279    ## Radiation type [string]
280    radiation = None
281    ## Beam size name
282    beam_size_name = None
283    ## Beam size [Vector] [mm]
284    beam_size = None
285    beam_size_unit = 'mm'
286    ## Beam shape [string]
287    beam_shape = None
288    ## Wavelength [float] [Angstrom]
289    wavelength = None
290    wavelength_unit = 'A'
291    ## Minimum wavelength [float] [Angstrom]
292    wavelength_min = None
293    wavelength_min_unit = 'nm'
294    ## Maximum wavelength [float] [Angstrom]
295    wavelength_max = None
296    wavelength_max_unit = 'nm'
297    ## Wavelength spread [float] [Angstrom]
298    wavelength_spread = None
299    wavelength_spread_unit = 'percent'
300
301    def __init__(self):
302        self.beam_size = Vector()
303
304    def __str__(self):
305        _str = "Source:\n"
306        _str += "   Radiation:    %s\n" % str(self.radiation)
307        _str += "   Shape:        %s\n" % str(self.beam_shape)
308        _str += "   Wavelength:   %s [%s]\n" % \
309            (str(self.wavelength), str(self.wavelength_unit))
310        _str += "   Waveln_min:   %s [%s]\n" % \
311            (str(self.wavelength_min), str(self.wavelength_min_unit))
312        _str += "   Waveln_max:   %s [%s]\n" % \
313            (str(self.wavelength_max), str(self.wavelength_max_unit))
314        _str += "   Waveln_spread:%s [%s]\n" % \
315            (str(self.wavelength_spread), str(self.wavelength_spread_unit))
316        _str += "   Beam_size:    %s [%s]\n" % \
317            (str(self.beam_size), str(self.beam_size_unit))
318        return _str
319
320
321"""
322Definitions of radiation types
323"""
324NEUTRON = 'neutron'
325XRAY = 'x-ray'
326MUON = 'muon'
327ELECTRON = 'electron'
328
329
330class Sample(object):
331    """
332    Class to hold the sample description
333    """
334    ## Short name for sample
335    name = ''
336    ## ID
337    ID = ''
338    ## Thickness [float] [mm]
339    thickness = None
340    thickness_unit = 'mm'
341    ## Transmission [float] [fraction]
342    transmission = None
343    ## Temperature [float] [No Default]
344    temperature = None
345    temperature_unit = None
346    ## Position [Vector] [mm]
347    position = None
348    position_unit = 'mm'
349    ## Orientation [Vector] [degrees]
350    orientation = None
351    orientation_unit = 'degree'
352    ## Details
353    details = None
354
355    def __init__(self):
356        self.position = Vector()
357        self.orientation = Vector()
358        self.details = []
359
360    def __str__(self):
361        _str = "Sample:\n"
362        _str += "   ID:           %s\n" % str(self.ID)
363        _str += "   Transmission: %s\n" % str(self.transmission)
364        _str += "   Thickness:    %s [%s]\n" % \
365            (str(self.thickness), str(self.thickness_unit))
366        _str += "   Temperature:  %s [%s]\n" % \
367            (str(self.temperature), str(self.temperature_unit))
368        _str += "   Position:     %s [%s]\n" % \
369            (str(self.position), str(self.position_unit))
370        _str += "   Orientation:  %s [%s]\n" % \
371            (str(self.orientation), str(self.orientation_unit))
372
373        _str += "   Details:\n"
374        for item in self.details:
375            _str += "      %s\n" % item
376
377        return _str
378
379
380class Process(object):
381    """
382    Class that holds information about the processes
383    performed on the data.
384    """
385    name = ''
386    date = ''
387    description = ''
388    term = None
389    notes = None
390
391    def __init__(self):
392        self.term = []
393        self.notes = []
394
395    def is_empty(self):
396        """
397            Return True if the object is empty
398        """
399        return len(self.name) == 0 and len(self.date) == 0 and len(self.description) == 0 \
400            and len(self.term) == 0 and len(self.notes) == 0
401
402    def single_line_desc(self):
403        """
404            Return a single line string representing the process
405        """
406        return "%s %s %s" % (self.name, self.date, self.description)
407
408    def __str__(self):
409        _str = "Process:\n"
410        _str += "   Name:         %s\n" % self.name
411        _str += "   Date:         %s\n" % self.date
412        _str += "   Description:  %s\n" % self.description
413        for item in self.term:
414            _str += "   Term:         %s\n" % item
415        for item in self.notes:
416            _str += "   Note:         %s\n" % item
417        return _str
418
419
420class TransmissionSpectrum(object):
421    """
422    Class that holds information about transmission spectrum
423    for white beams and spallation sources.
424    """
425    name = ''
426    timestamp = ''
427    ## Wavelength (float) [A]
428    wavelength = None
429    wavelength_unit = 'A'
430    ## Transmission (float) [unit less]
431    transmission = None
432    transmission_unit = ''
433    ## Transmission Deviation (float) [unit less]
434    transmission_deviation = None
435    transmission_deviation_unit = ''
436
437    def __init__(self):
438        self.wavelength = []
439        self.transmission = []
440        self.transmission_deviation = []
441
442    def __str__(self):
443        _str = "Transmission Spectrum:\n"
444        _str += "   Name:             \t{0}\n".format(self.name)
445        _str += "   Timestamp:        \t{0}\n".format(self.timestamp)
446        _str += "   Wavelength unit:  \t{0}\n".format(self.wavelength_unit)
447        _str += "   Transmission unit:\t{0}\n".format(self.transmission_unit)
448        _str += "   Trans. Dev. unit:  \t{0}\n".format(\
449                                            self.transmission_deviation_unit)
450        length_list = [len(self.wavelength), len(self.transmission), \
451                len(self.transmission_deviation)]
452        _str += "   Number of Pts:    \t{0}\n".format(max(length_list))
453        return _str
454
455
456class DataInfo(object):
457    """
458    Class to hold the data read from a file.
459    It includes four blocks of data for the
460    instrument description, the sample description,
461    the data itself and any other meta data.
462    """
463    ## Title
464    title = ''
465    ## Run number
466    run = None
467    ## Run name
468    run_name = None
469    ## File name
470    filename = ''
471    ## Notes
472    notes = None
473    ## Processes (Action on the data)
474    process = None
475    ## Instrument name
476    instrument = ''
477    ## Detector information
478    detector = None
479    ## Sample information
480    sample = None
481    ## Source information
482    source = None
483    ## Collimation information
484    collimation = None
485    ## Transmission Spectrum INfo
486    trans_spectrum = None
487    ## Additional meta-data
488    meta_data = None
489    ## Loading errors
490    errors = None
491
492    def __init__(self):
493        """
494        Initialization
495        """
496        ## Title
497        self.title = ''
498        ## Run number
499        self.run = []
500        self.run_name = {}
501        ## File name
502        self.filename = ''
503        ## Notes
504        self.notes = []
505        ## Processes (Action on the data)
506        self.process = []
507        ## Instrument name
508        self.instrument = ''
509        ## Detector information
510        self.detector = []
511        ## Sample information
512        self.sample = Sample()
513        ## Source information
514        self.source = Source()
515        ## Collimation information
516        self.collimation = []
517        ## Transmission Spectrum
518        self.trans_spectrum = []
519        ## Additional meta-data
520        self.meta_data = {}
521        ## Loading errors
522        self.errors = []
523
524    def append_empty_process(self):
525        """
526        """
527        self.process.append(Process())
528
529    def add_notes(self, message=""):
530        """
531        Add notes to datainfo
532        """
533        self.notes.append(message)
534
535    def __str__(self):
536        """
537        Nice printout
538        """
539        _str = "File:            %s\n" % self.filename
540        _str += "Title:           %s\n" % self.title
541        _str += "Run:             %s\n" % str(self.run)
542        _str += "Instrument:      %s\n" % str(self.instrument)
543        _str += "%s\n" % str(self.sample)
544        _str += "%s\n" % str(self.source)
545        for item in self.detector:
546            _str += "%s\n" % str(item)
547        for item in self.collimation:
548            _str += "%s\n" % str(item)
549        for item in self.process:
550            _str += "%s\n" % str(item)
551        for item in self.notes:
552            _str += "%s\n" % str(item)
553        for item in self.trans_spectrum:
554            _str += "%s\n" % str(item)
555        return _str
556
557    # Private method to perform operation. Not implemented for DataInfo,
558    # but should be implemented for each data class inherited from DataInfo
559    # that holds actual data (ex.: Data1D)
560    def _perform_operation(self, other, operation):
561        """
562        Private method to perform operation. Not implemented for DataInfo,
563        but should be implemented for each data class inherited from DataInfo
564        that holds actual data (ex.: Data1D)
565        """
566        return NotImplemented
567
568    def _perform_union(self, other):
569        """
570        Private method to perform union operation. Not implemented for DataInfo,
571        but should be implemented for each data class inherited from DataInfo
572        that holds actual data (ex.: Data1D)
573        """
574        return NotImplemented
575
576    def __add__(self, other):
577        """
578        Add two data sets
579
580        :param other: data set to add to the current one
581        :return: new data set
582        :raise ValueError: raised when two data sets are incompatible
583        """
584        def operation(a, b):
585            return a + b
586        return self._perform_operation(other, operation)
587
588    def __radd__(self, other):
589        """
590        Add two data sets
591
592        :param other: data set to add to the current one
593        :return: new data set
594        :raise ValueError: raised when two data sets are incompatible
595        """
596        def operation(a, b):
597            return b + a
598        return self._perform_operation(other, operation)
599
600    def __sub__(self, other):
601        """
602        Subtract two data sets
603
604        :param other: data set to subtract from the current one
605        :return: new data set
606        :raise ValueError: raised when two data sets are incompatible
607        """
608        def operation(a, b):
609            return a - b
610        return self._perform_operation(other, operation)
611
612    def __rsub__(self, other):
613        """
614        Subtract two data sets
615
616        :param other: data set to subtract from the current one
617        :return: new data set
618        :raise ValueError: raised when two data sets are incompatible
619        """
620        def operation(a, b):
621            return b - a
622        return self._perform_operation(other, operation)
623
624    def __mul__(self, other):
625        """
626        Multiply two data sets
627
628        :param other: data set to subtract from the current one
629        :return: new data set
630        :raise ValueError: raised when two data sets are incompatible
631        """
632        def operation(a, b):
633            return a * b
634        return self._perform_operation(other, operation)
635
636    def __rmul__(self, other):
637        """
638        Multiply two data sets
639
640        :param other: data set to subtract from the current one
641        :return: new data set
642        :raise ValueError: raised when two data sets are incompatible
643        """
644        def operation(a, b):
645            return b * a
646        return self._perform_operation(other, operation)
647
648    def __div__(self, other):
649        """
650        Divided a data set by another
651
652        :param other: data set that the current one is divided by
653        :return: new data set
654        :raise ValueError: raised when two data sets are incompatible
655        """
656        def operation(a, b):
657            return a/b
658        return self._perform_operation(other, operation)
659
660    def __rdiv__(self, other):
661        """
662        Divided a data set by another
663
664        :param other: data set that the current one is divided by
665        :return: new data set
666        :raise ValueError: raised when two data sets are incompatible
667        """
668        def operation(a, b):
669            return b/a
670        return self._perform_operation(other, operation)
671
672    def __or__(self, other):
673        """
674        Union a data set with another
675
676        :param other: data set to be unified
677        :return: new data set
678        :raise ValueError: raised when two data sets are incompatible
679        """
680        return self._perform_union(other)
681
682    def __ror__(self, other):
683        """
684        Union a data set with another
685
686        :param other: data set to be unified
687        :return: new data set
688        :raise ValueError: raised when two data sets are incompatible
689        """
690        return self._perform_union(other)
691
692class Data1D(plottable_1D, DataInfo):
693    """
694    1D data class
695    """
696    def __init__(self, x=None, y=None, dx=None, dy=None, lam=None, dlam=None, isSesans=False):
697        self.isSesans = isSesans
698        DataInfo.__init__(self)
699        plottable_1D.__init__(self, x, y, dx, dy,None, None, lam, dlam)
700        if self.isSesans:
701            x_unit = 'A'
702            y_unit = 'pol'
703        elif not self.isSesans: # it's SANS data! (Could also be simple else statement, but i prefer exhaustive conditionals...-JHB)
704            x_unit = '1/A'
705            y_unit = '1/cm'
706        else: # and if it's neither, you get punished!
707            raise(TypeError,'This is neither SANS nor SESANS data, what the hell are you doing??')
708
709    def __str__(self):
710        """
711        Nice printout
712        """
713        _str = "%s\n" % DataInfo.__str__(self)
714        _str += "Data:\n"
715        _str += "   Type:         %s\n" % self.__class__.__name__
716        _str += "   X-axis:       %s\t[%s]\n" % (self._xaxis, self._xunit)
717        _str += "   Y-axis:       %s\t[%s]\n" % (self._yaxis, self._yunit)
718        _str += "   Length:       %g\n" % len(self.x)
719        return _str
720
721    def is_slit_smeared(self):
722        """
723        Check whether the data has slit smearing information
724        :return: True is slit smearing info is present, False otherwise
725        """
726        def _check(v):
727            if (v.__class__ == list or v.__class__ == numpy.ndarray) \
728                and len(v) > 0 and min(v) > 0:
729                return True
730            return False
731        return _check(self.dxl) or _check(self.dxw)
732
733    def clone_without_data(self, length=0, clone=None):
734        """
735        Clone the current object, without copying the data (which
736        will be filled out by a subsequent operation).
737        The data arrays will be initialized to zero.
738
739        :param length: length of the data array to be initialized
740        :param clone: if provided, the data will be copied to clone
741        """
742        from copy import deepcopy
743
744        if clone is None or not issubclass(clone.__class__, Data1D):
745            x = numpy.zeros(length)
746            dx = numpy.zeros(length)
747            y = numpy.zeros(length)
748            dy = numpy.zeros(length)
749            lam = numpy.zeros(length)
750            dlam = numpy.zeros(length)
751            clone = Data1D(x, y, lam=lam, dx=dx, dy=dy, dlam=dlam)
752
753        clone.title = self.title
754        clone.run = self.run
755        clone.filename = self.filename
756        clone.instrument = self.instrument
757        clone.notes = deepcopy(self.notes)
758        clone.process = deepcopy(self.process)
759        clone.detector = deepcopy(self.detector)
760        clone.sample = deepcopy(self.sample)
761        clone.source = deepcopy(self.source)
762        clone.collimation = deepcopy(self.collimation)
763        clone.trans_spectrum = deepcopy(self.trans_spectrum)
764        clone.meta_data = deepcopy(self.meta_data)
765        clone.errors = deepcopy(self.errors)
766
767        return clone
768
769    def _validity_check(self, other):
770        """
771        Checks that the data lengths are compatible.
772        Checks that the x vectors are compatible.
773        Returns errors vectors equal to original
774        errors vectors if they were present or vectors
775        of zeros when none was found.
776
777        :param other: other data set for operation
778        :return: dy for self, dy for other [numpy arrays]
779        :raise ValueError: when lengths are not compatible
780        """
781        dy_other = None
782        if isinstance(other, Data1D):
783            # Check that data lengths are the same
784            if len(self.x) != len(other.x) or \
785                len(self.y) != len(other.y):
786                msg = "Unable to perform operation: data length are not equal"
787                raise ValueError, msg
788            # Here we could also extrapolate between data points
789            TOLERANCE = 0.01
790            for i in range(len(self.x)):
791                if math.fabs((self.x[i] - other.x[i])/self.x[i]) > TOLERANCE:
792                    msg = "Incompatible data sets: x-values do not match"
793                    raise ValueError, msg
794
795            # Check that the other data set has errors, otherwise
796            # create zero vector
797            dy_other = other.dy
798            if other.dy == None or (len(other.dy) != len(other.y)):
799                dy_other = numpy.zeros(len(other.y))
800
801        # Check that we have errors, otherwise create zero vector
802        dy = self.dy
803        if self.dy == None or (len(self.dy) != len(self.y)):
804            dy = numpy.zeros(len(self.y))
805
806        return dy, dy_other
807
808    def _perform_operation(self, other, operation):
809        """
810        """
811        # First, check the data compatibility
812        dy, dy_other = self._validity_check(other)
813        result = self.clone_without_data(len(self.x))
814        if self.dxw == None:
815            result.dxw = None
816        else:
817            result.dxw = numpy.zeros(len(self.x))
818        if self.dxl == None:
819            result.dxl = None
820        else:
821            result.dxl = numpy.zeros(len(self.x))
822
823        for i in range(len(self.x)):
824            result.x[i] = self.x[i]
825            if self.dx is not None and len(self.x) == len(self.dx):
826                result.dx[i] = self.dx[i]
827            if self.dxw is not None and len(self.x) == len(self.dxw):
828                result.dxw[i] = self.dxw[i]
829            if self.dxl is not None and len(self.x) == len(self.dxl):
830                result.dxl[i] = self.dxl[i]
831
832            a = Uncertainty(self.y[i], dy[i]**2)
833            if isinstance(other, Data1D):
834                b = Uncertainty(other.y[i], dy_other[i]**2)
835                if other.dx is not None:
836                    result.dx[i] *= self.dx[i]
837                    result.dx[i] += (other.dx[i]**2)
838                    result.dx[i] /= 2
839                    result.dx[i] = math.sqrt(result.dx[i])
840                if result.dxl is not None and other.dxl is not None:
841                    result.dxl[i] *= self.dxl[i]
842                    result.dxl[i] += (other.dxl[i]**2)
843                    result.dxl[i] /= 2
844                    result.dxl[i] = math.sqrt(result.dxl[i])
845            else:
846                b = other
847
848            output = operation(a, b)
849            result.y[i] = output.x
850            result.dy[i] = math.sqrt(math.fabs(output.variance))
851        return result
852
853    def _validity_check_union(self, other):
854        """
855        Checks that the data lengths are compatible.
856        Checks that the x vectors are compatible.
857        Returns errors vectors equal to original
858        errors vectors if they were present or vectors
859        of zeros when none was found.
860
861        :param other: other data set for operation
862        :return: bool
863        :raise ValueError: when data types are not compatible
864        """
865        if not isinstance(other, Data1D):
866            msg = "Unable to perform operation: different types of data set"
867            raise ValueError, msg
868        return True
869
870    def _perform_union(self, other):
871        """
872        """
873        # First, check the data compatibility
874        self._validity_check_union(other)
875        result = self.clone_without_data(len(self.x) + len(other.x))
876        if self.dy == None or other.dy is None:
877            result.dy = None
878        else:
879            result.dy = numpy.zeros(len(self.x) + len(other.x))
880        if self.dx == None or other.dx is None:
881            result.dx = None
882        else:
883            result.dx = numpy.zeros(len(self.x) + len(other.x))
884        if self.dxw == None or other.dxw is None:
885            result.dxw = None
886        else:
887            result.dxw = numpy.zeros(len(self.x) + len(other.x))
888        if self.dxl == None or other.dxl is None:
889            result.dxl = None
890        else:
891            result.dxl = numpy.zeros(len(self.x) + len(other.x))
892
893        result.x = numpy.append(self.x, other.x)
894        #argsorting
895        ind = numpy.argsort(result.x)
896        result.x = result.x[ind]
897        result.y = numpy.append(self.y, other.y)
898        result.y = result.y[ind]
899        if result.dy != None:
900            result.dy = numpy.append(self.dy, other.dy)
901            result.dy = result.dy[ind]
902        if result.dx is not None:
903            result.dx = numpy.append(self.dx, other.dx)
904            result.dx = result.dx[ind]
905        if result.dxw is not None:
906            result.dxw = numpy.append(self.dxw, other.dxw)
907            result.dxw = result.dxw[ind]
908        if result.dxl is not None:
909            result.dxl = numpy.append(self.dxl, other.dxl)
910            result.dxl = result.dxl[ind]
911        return result
912
913
914class Data2D(plottable_2D, DataInfo):
915    """
916    2D data class
917    """
918    ## Units for Q-values
919    Q_unit = '1/A'
920    ## Units for I(Q) values
921    I_unit = '1/cm'
922    ## Vector of Q-values at the center of each bin in x
923    x_bins = None
924    ## Vector of Q-values at the center of each bin in y
925    y_bins = None
926
927    def __init__(self, data=None, err_data=None, qx_data=None,
928                 qy_data=None, q_data=None, mask=None,
929                 dqx_data=None, dqy_data=None):
930        self.y_bins = []
931        self.x_bins = []
932        DataInfo.__init__(self)
933        plottable_2D.__init__(self, data, err_data, qx_data,
934                              qy_data, q_data, mask, dqx_data, dqy_data)
935        if len(self.detector) > 0:
936            raise RuntimeError, "Data2D: Detector bank already filled at init"
937
938    def __str__(self):
939        _str = "%s\n" % DataInfo.__str__(self)
940        _str += "Data:\n"
941        _str += "   Type:         %s\n" % self.__class__.__name__
942        _str += "   X- & Y-axis:  %s\t[%s]\n" % (self._yaxis, self._yunit)
943        _str += "   Z-axis:       %s\t[%s]\n" % (self._zaxis, self._zunit)
944        _str += "   Length:       %g \n" % (len(self.data))
945        _str += "   Shape:        (%d, %d)\n" % (len(self.y_bins), len(self.x_bins))
946        return _str
947
948    def clone_without_data(self, length=0, clone=None):
949        """
950        Clone the current object, without copying the data (which
951        will be filled out by a subsequent operation).
952        The data arrays will be initialized to zero.
953
954        :param length: length of the data array to be initialized
955        :param clone: if provided, the data will be copied to clone
956        """
957        from copy import deepcopy
958
959        if clone is None or not issubclass(clone.__class__, Data2D):
960            data = numpy.zeros(length)
961            err_data = numpy.zeros(length)
962            qx_data = numpy.zeros(length)
963            qy_data = numpy.zeros(length)
964            q_data = numpy.zeros(length)
965            mask = numpy.zeros(length)
966            dqx_data = None
967            dqy_data = None
968            clone = Data2D(data=data, err_data=err_data,
969                           qx_data=qx_data, qy_data=qy_data,
970                           q_data=q_data, mask=mask)
971
972        clone.title = self.title
973        clone.run = self.run
974        clone.filename = self.filename
975        clone.instrument = self.instrument
976        clone.notes = deepcopy(self.notes)
977        clone.process = deepcopy(self.process)
978        clone.detector = deepcopy(self.detector)
979        clone.sample = deepcopy(self.sample)
980        clone.source = deepcopy(self.source)
981        clone.collimation = deepcopy(self.collimation)
982        clone.trans_spectrum = deepcopy(self.trans_spectrum)
983        clone.meta_data = deepcopy(self.meta_data)
984        clone.errors = deepcopy(self.errors)
985
986        return clone
987
988    def _validity_check(self, other):
989        """
990        Checks that the data lengths are compatible.
991        Checks that the x vectors are compatible.
992        Returns errors vectors equal to original
993        errors vectors if they were present or vectors
994        of zeros when none was found.
995
996        :param other: other data set for operation
997        :return: dy for self, dy for other [numpy arrays]
998        :raise ValueError: when lengths are not compatible
999        """
1000        err_other = None
1001        TOLERANCE = 0.01
1002        if isinstance(other, Data2D):
1003            # Check that data lengths are the same
1004            if len(self.data) != len(other.data) or \
1005                len(self.qx_data) != len(other.qx_data) or \
1006                len(self.qy_data) != len(other.qy_data):
1007                msg = "Unable to perform operation: data length are not equal"
1008                raise ValueError, msg
1009            for ind in range(len(self.data)):
1010                if math.fabs((self.qx_data[ind] - other.qx_data[ind])/self.qx_data[ind]) > TOLERANCE:
1011                    msg = "Incompatible data sets: qx-values do not match: %s %s" % (self.qx_data[ind], other.qx_data[ind])
1012                    raise ValueError, msg
1013                if math.fabs((self.qy_data[ind] - other.qy_data[ind])/self.qy_data[ind]) > TOLERANCE:
1014                    msg = "Incompatible data sets: qy-values do not match: %s %s" % (self.qy_data[ind], other.qy_data[ind])
1015                    raise ValueError, msg
1016
1017            # Check that the scales match
1018            err_other = other.err_data
1019            if other.err_data == None or \
1020                (len(other.err_data) != len(other.data)):
1021                err_other = numpy.zeros(len(other.data))
1022
1023        # Check that we have errors, otherwise create zero vector
1024        err = self.err_data
1025        if self.err_data == None or \
1026            (len(self.err_data) != len(self.data)):
1027            err = numpy.zeros(len(other.data))
1028        return err, err_other
1029
1030    def _perform_operation(self, other, operation):
1031        """
1032        Perform 2D operations between data sets
1033
1034        :param other: other data set
1035        :param operation: function defining the operation
1036        """
1037        # First, check the data compatibility
1038        dy, dy_other = self._validity_check(other)
1039        result = self.clone_without_data(numpy.size(self.data))
1040        if self.dqx_data == None or self.dqy_data == None:
1041            result.dqx_data = None
1042            result.dqy_data = None
1043        else:
1044            result.dqx_data = numpy.zeros(len(self.data))
1045            result.dqy_data = numpy.zeros(len(self.data))
1046        for i in range(numpy.size(self.data)):
1047            result.data[i] = self.data[i]
1048            if self.err_data is not None and \
1049                numpy.size(self.data) == numpy.size(self.err_data):
1050                result.err_data[i] = self.err_data[i]
1051            if self.dqx_data is not None:
1052                result.dqx_data[i] = self.dqx_data[i]
1053            if self.dqy_data is not None:
1054                result.dqy_data[i] = self.dqy_data[i]
1055            result.qx_data[i] = self.qx_data[i]
1056            result.qy_data[i] = self.qy_data[i]
1057            result.q_data[i] = self.q_data[i]
1058            result.mask[i] = self.mask[i]
1059
1060            a = Uncertainty(self.data[i], dy[i]**2)
1061            if isinstance(other, Data2D):
1062                b = Uncertainty(other.data[i], dy_other[i]**2)
1063                if other.dqx_data is not None and \
1064                        result.dqx_data is not None:
1065                    result.dqx_data[i] *= self.dqx_data[i]
1066                    result.dqx_data[i] += (other.dqx_data[i]**2)
1067                    result.dqx_data[i] /= 2
1068                    result.dqx_data[i] = math.sqrt(result.dqx_data[i])
1069                if other.dqy_data is not None and \
1070                        result.dqy_data is not None:
1071                    result.dqy_data[i] *= self.dqy_data[i]
1072                    result.dqy_data[i] += (other.dqy_data[i]**2)
1073                    result.dqy_data[i] /= 2
1074                    result.dqy_data[i] = math.sqrt(result.dqy_data[i])
1075            else:
1076                b = other
1077            output = operation(a, b)
1078            result.data[i] = output.x
1079            result.err_data[i] = math.sqrt(math.fabs(output.variance))
1080        return result
1081
1082    def _validity_check_union(self, other):
1083        """
1084        Checks that the data lengths are compatible.
1085        Checks that the x vectors are compatible.
1086        Returns errors vectors equal to original
1087        errors vectors if they were present or vectors
1088        of zeros when none was found.
1089
1090        :param other: other data set for operation
1091        :return: bool
1092        :raise ValueError: when data types are not compatible
1093        """
1094        if not isinstance(other, Data2D):
1095            msg = "Unable to perform operation: different types of data set"
1096            raise ValueError, msg
1097        return True
1098
1099    def _perform_union(self, other):
1100        """
1101        Perform 2D operations between data sets
1102
1103        :param other: other data set
1104        :param operation: function defining the operation
1105        """
1106        # First, check the data compatibility
1107        self._validity_check_union(other)
1108        result = self.clone_without_data(numpy.size(self.data) + \
1109                                         numpy.size(other.data))
1110        result.xmin = self.xmin
1111        result.xmax = self.xmax
1112        result.ymin = self.ymin
1113        result.ymax = self.ymax
1114        if self.dqx_data == None or self.dqy_data == None or \
1115                other.dqx_data == None or other.dqy_data == None:
1116            result.dqx_data = None
1117            result.dqy_data = None
1118        else:
1119            result.dqx_data = numpy.zeros(len(self.data) + \
1120                                         numpy.size(other.data))
1121            result.dqy_data = numpy.zeros(len(self.data) + \
1122                                         numpy.size(other.data))
1123
1124        result.data = numpy.append(self.data, other.data)
1125        result.qx_data = numpy.append(self.qx_data, other.qx_data)
1126        result.qy_data = numpy.append(self.qy_data, other.qy_data)
1127        result.q_data = numpy.append(self.q_data, other.q_data)
1128        result.mask = numpy.append(self.mask, other.mask)
1129        if result.err_data is not None:
1130            result.err_data = numpy.append(self.err_data, other.err_data)
1131        if self.dqx_data is not None:
1132            result.dqx_data = numpy.append(self.dqx_data, other.dqx_data)
1133        if self.dqy_data is not None:
1134            result.dqy_data = numpy.append(self.dqy_data, other.dqy_data)
1135
1136        return result
1137
1138
1139def combine_data_info_with_plottable(data, datainfo):
1140    """
1141    A function that combines the DataInfo data in self.current_datainto with a plottable_1D or 2D data object.
1142
1143    :param data: A plottable_1D or plottable_2D data object
1144    :return: A fully specified Data1D or Data2D object
1145    """
1146
1147    final_dataset = None
1148    if isinstance(data, plottable_1D):
1149        final_dataset = Data1D(data.x, data.y)
1150        final_dataset.dx = data.dx
1151        final_dataset.dy = data.dy
1152        final_dataset.dxl = data.dxl
1153        final_dataset.dxw = data.dxw
1154        final_dataset.xaxis(data._xaxis, data._xunit)
1155        final_dataset.yaxis(data._yaxis, data._yunit)
1156    elif isinstance(data, plottable_2D):
1157        final_dataset = Data2D(data.data, data.err_data, data.qx_data, data.qy_data, data.q_data,
1158                               data.mask, data.dqx_data, data.dqy_data)
1159        final_dataset.xaxis(data._xaxis, data._xunit)
1160        final_dataset.yaxis(data._yaxis, data._yunit)
1161        final_dataset.zaxis(data._zaxis, data._zunit)
1162        final_dataset.x_bins = data.x_bins
1163        final_dataset.y_bins = data.y_bins
1164    else:
1165        return_string = "Should Never Happen: _combine_data_info_with_plottable input is not a plottable1d or " + \
1166                        "plottable2d data object"
1167        return return_string
1168
1169    final_dataset.xmax = data.xmax
1170    final_dataset.ymax = data.ymax
1171    final_dataset.xmin = data.xmin
1172    final_dataset.ymin = data.ymin
1173    final_dataset.title = datainfo.title
1174    final_dataset.run = datainfo.run
1175    final_dataset.run_name = datainfo.run_name
1176    final_dataset.filename = datainfo.filename
1177    final_dataset.notes = datainfo.notes
1178    final_dataset.process = datainfo.process
1179    final_dataset.instrument = datainfo.instrument
1180    final_dataset.detector = datainfo.detector
1181    final_dataset.sample = datainfo.sample
1182    final_dataset.source = datainfo.source
1183    final_dataset.collimation = datainfo.collimation
1184    final_dataset.trans_spectrum = datainfo.trans_spectrum
1185    final_dataset.meta_data = datainfo.meta_data
1186    final_dataset.errors = datainfo.errors
1187    return final_dataset
Note: See TracBrowser for help on using the repository browser.