source: sasview/src/sas/sascalc/dataloader/data_info.py @ e1e41de

ESS_GUIESS_GUI_DocsESS_GUI_batch_fittingESS_GUI_bumps_abstractionESS_GUI_iss1116ESS_GUI_iss879ESS_GUI_iss959ESS_GUI_openclESS_GUI_orderingESS_GUI_sync_sascalccostrafo411magnetic_scattrelease-4.1.1release-4.1.2release-4.2.2ticket-1009ticket-1094-headlessticket-1242-2d-resolutionticket-1243ticket-1249ticket885unittest-saveload
Last change on this file since e1e41de was 1fac6c0, checked in by jhbakker, 8 years ago

SESANS is almost working, but this is NOT a stable version!

  • Property mode set to 100644
File size: 40.7 KB
Line 
1"""
2    Module that contains classes to hold information read from
3    reduced data files.
4
5    A good description of the data members can be found in
6    the CanSAS 1D XML data format:
7
8    http://www.smallangles.net/wgwiki/index.php/cansas1d_documentation
9"""
10#####################################################################
11#This software was developed by the University of Tennessee as part of the
12#Distributed Data Analysis of Neutron Scattering Experiments (DANSE)
13#project funded by the US National Science Foundation.
14#See the license text in license.txt
15#copyright 2008, University of Tennessee
16######################################################################
17
18
19#TODO: Keep track of data manipulation in the 'process' data structure.
20#TODO: This module should be independent of plottables. We should write
21#        an adapter class for plottables when needed.
22
23#from sas.guitools.plottables import Data1D as plottable_1D
24from sas.sascalc.data_util.uncertainty import Uncertainty
25import numpy
26import math
27class plottable_1D(object):
28    """
29    Data1D is a place holder for 1D plottables.
30    """
31    # The presence of these should be mutually
32    # exclusive with the presence of Qdev (dx)
33    x = None
34    y = None
35    dx = None
36    dy = None
37    ## Slit smearing length
38    dxl = None
39    ## Slit smearing width
40    dxw = None
41
42    ## SESANS specific params (wavelengths for spin echo length calculation)
43
44    lam = None
45    dlam = None
46
47    # Units
48    _xaxis = ''
49    _xunit = ''
50    _yaxis = ''
51    _yunit = ''
52
53    def __init__(self, x, y, dx=None, dy=None, dxl=None, dxw=None, lam=None, dlam=None):
54        self.x = numpy.asarray(x)
55        self.y = numpy.asarray(y)
56        if dx is not None:
57            self.dx = numpy.asarray(dx)
58        if dy is not None:
59            self.dy = numpy.asarray(dy)
60        if dxl is not None:
61            self.dxl = numpy.asarray(dxl)
62        if dxw is not None:
63            self.dxw = numpy.asarray(dxw)
64        if lam is not None:
65            self.lam = numpy.asarray(lam)
66        if dlam is not None:
67            self.dlam = numpy.asarray(dlam)
68
69    def xaxis(self, label, unit):
70        """
71        set the x axis label and unit
72        """
73        self._xaxis = label
74        self._xunit = unit
75
76    def yaxis(self, label, unit):
77        """
78        set the y axis label and unit
79        """
80        self._yaxis = label
81        self._yunit = unit
82
83
84class plottable_2D(object):
85    """
86    Data2D is a place holder for 2D plottables.
87    """
88    xmin = None
89    xmax = None
90    ymin = None
91    ymax = None
92    data = None
93    qx_data = None
94    qy_data = None
95    q_data = None
96    err_data = None
97    dqx_data = None
98    dqy_data = None
99    mask = None
100
101    # Units
102    _xaxis = ''
103    _xunit = ''
104    _yaxis = ''
105    _yunit = ''
106    _zaxis = ''
107    _zunit = ''
108
109    def __init__(self, data=None, err_data=None, qx_data=None,
110                 qy_data=None, q_data=None, mask=None,
111                 dqx_data=None, dqy_data=None):
112        self.data = numpy.asarray(data)
113        self.qx_data = numpy.asarray(qx_data)
114        self.qy_data = numpy.asarray(qy_data)
115        self.q_data = numpy.asarray(q_data)
116        self.mask = numpy.asarray(mask)
117        self.err_data = numpy.asarray(err_data)
118        if dqx_data is not None:
119            self.dqx_data = numpy.asarray(dqx_data)
120        if dqy_data is not None:
121            self.dqy_data = numpy.asarray(dqy_data)
122
123    def xaxis(self, label, unit):
124        """
125        set the x axis label and unit
126        """
127        self._xaxis = label
128        self._xunit = unit
129
130    def yaxis(self, label, unit):
131        """
132        set the y axis label and unit
133        """
134        self._yaxis = label
135        self._yunit = unit
136
137    def zaxis(self, label, unit):
138        """
139        set the z axis label and unit
140        """
141        self._zaxis = label
142        self._zunit = unit
143
144
145class Vector(object):
146    """
147    Vector class to hold multi-dimensional objects
148    """
149    ## x component
150    x = None
151    ## y component
152    y = None
153    ## z component
154    z = None
155
156    def __init__(self, x=None, y=None, z=None):
157        """
158        Initialization. Components that are not
159        set a set to None by default.
160
161        :param x: x component
162        :param y: y component
163        :param z: z component
164        """
165        self.x = x
166        self.y = y
167        self.z = z
168
169    def __str__(self):
170        msg = "x = %s\ty = %s\tz = %s" % (str(self.x), str(self.y), str(self.z))
171        return msg
172
173
174class Detector(object):
175    """
176    Class to hold detector information
177    """
178    ## Name of the instrument [string]
179    name = None
180    ## Sample to detector distance [float] [mm]
181    distance = None
182    distance_unit = 'mm'
183    ## Offset of this detector position in X, Y,
184    #(and Z if necessary) [Vector] [mm]
185    offset = None
186    offset_unit = 'm'
187    ## Orientation (rotation) of this detector in roll,
188    # pitch, and yaw [Vector] [degrees]
189    orientation = None
190    orientation_unit = 'degree'
191    ## Center of the beam on the detector in X and Y
192    #(and Z if necessary) [Vector] [mm]
193    beam_center = None
194    beam_center_unit = 'mm'
195    ## Pixel size in X, Y, (and Z if necessary) [Vector] [mm]
196    pixel_size = None
197    pixel_size_unit = 'mm'
198    ## Slit length of the instrument for this detector.[float] [mm]
199    slit_length = None
200    slit_length_unit = 'mm'
201
202    def __init__(self):
203        """
204        Initialize class attribute that are objects...
205        """
206        self.offset = Vector()
207        self.orientation = Vector()
208        self.beam_center = Vector()
209        self.pixel_size = Vector()
210
211    def __str__(self):
212        _str = "Detector:\n"
213        _str += "   Name:         %s\n" % self.name
214        _str += "   Distance:     %s [%s]\n" % \
215            (str(self.distance), str(self.distance_unit))
216        _str += "   Offset:       %s [%s]\n" % \
217            (str(self.offset), str(self.offset_unit))
218        _str += "   Orientation:  %s [%s]\n" % \
219            (str(self.orientation), str(self.orientation_unit))
220        _str += "   Beam center:  %s [%s]\n" % \
221            (str(self.beam_center), str(self.beam_center_unit))
222        _str += "   Pixel size:   %s [%s]\n" % \
223            (str(self.pixel_size), str(self.pixel_size_unit))
224        _str += "   Slit length:  %s [%s]\n" % \
225            (str(self.slit_length), str(self.slit_length_unit))
226        return _str
227
228
229class Aperture(object):
230    ## Name
231    name = None
232    ## Type
233    type = None
234    ## Size name
235    size_name = None
236    ## Aperture size [Vector]
237    size = None
238    size_unit = 'mm'
239    ## Aperture distance [float]
240    distance = None
241    distance_unit = 'mm'
242
243    def __init__(self):
244        self.size = Vector()
245
246
247class Collimation(object):
248    """
249    Class to hold collimation information
250    """
251    ## Name
252    name = None
253    ## Length [float] [mm]
254    length = None
255    length_unit = 'mm'
256    ## Aperture
257    aperture = None
258
259    def __init__(self):
260        self.aperture = []
261
262    def __str__(self):
263        _str = "Collimation:\n"
264        _str += "   Length:       %s [%s]\n" % \
265            (str(self.length), str(self.length_unit))
266        for item in self.aperture:
267            _str += "   Aperture size:%s [%s]\n" % \
268                (str(item.size), str(item.size_unit))
269            _str += "   Aperture_dist:%s [%s]\n" % \
270                (str(item.distance), str(item.distance_unit))
271        return _str
272
273
274class Source(object):
275    """
276    Class to hold source information
277    """
278    ## Name
279    name = None
280    ## Radiation type [string]
281    radiation = None
282    ## Beam size name
283    beam_size_name = None
284    ## Beam size [Vector] [mm]
285    beam_size = None
286    beam_size_unit = 'mm'
287    ## Beam shape [string]
288    beam_shape = None
289    ## Wavelength [float] [Angstrom]
290    wavelength = None
291    wavelength_unit = 'A'
292    ## Minimum wavelength [float] [Angstrom]
293    wavelength_min = None
294    wavelength_min_unit = 'nm'
295    ## Maximum wavelength [float] [Angstrom]
296    wavelength_max = None
297    wavelength_max_unit = 'nm'
298    ## Wavelength spread [float] [Angstrom]
299    wavelength_spread = None
300    wavelength_spread_unit = 'percent'
301
302    def __init__(self):
303        self.beam_size = Vector()
304
305    def __str__(self):
306        _str = "Source:\n"
307        _str += "   Radiation:    %s\n" % str(self.radiation)
308        _str += "   Shape:        %s\n" % str(self.beam_shape)
309        _str += "   Wavelength:   %s [%s]\n" % \
310            (str(self.wavelength), str(self.wavelength_unit))
311        _str += "   Waveln_min:   %s [%s]\n" % \
312            (str(self.wavelength_min), str(self.wavelength_min_unit))
313        _str += "   Waveln_max:   %s [%s]\n" % \
314            (str(self.wavelength_max), str(self.wavelength_max_unit))
315        _str += "   Waveln_spread:%s [%s]\n" % \
316            (str(self.wavelength_spread), str(self.wavelength_spread_unit))
317        _str += "   Beam_size:    %s [%s]\n" % \
318            (str(self.beam_size), str(self.beam_size_unit))
319        return _str
320
321
322"""
323Definitions of radiation types
324"""
325NEUTRON = 'neutron'
326XRAY = 'x-ray'
327MUON = 'muon'
328ELECTRON = 'electron'
329
330
331class Sample(object):
332    """
333    Class to hold the sample description
334    """
335    ## Short name for sample
336    name = ''
337    ## ID
338    ID = ''
339    ## Thickness [float] [mm]
340    thickness = None
341    thickness_unit = 'mm'
342    ## Transmission [float] [fraction]
343    transmission = None
344    ## Temperature [float] [No Default]
345    temperature = None
346    temperature_unit = None
347    ## Position [Vector] [mm]
348    position = None
349    position_unit = 'mm'
350    ## Orientation [Vector] [degrees]
351    orientation = None
352    orientation_unit = 'degree'
353    ## Details
354    details = None
355
356    def __init__(self):
357        self.position = Vector()
358        self.orientation = Vector()
359        self.details = []
360
361    def __str__(self):
362        _str = "Sample:\n"
363        _str += "   ID:           %s\n" % str(self.ID)
364        _str += "   Transmission: %s\n" % str(self.transmission)
365        _str += "   Thickness:    %s [%s]\n" % \
366            (str(self.thickness), str(self.thickness_unit))
367        _str += "   Temperature:  %s [%s]\n" % \
368            (str(self.temperature), str(self.temperature_unit))
369        _str += "   Position:     %s [%s]\n" % \
370            (str(self.position), str(self.position_unit))
371        _str += "   Orientation:  %s [%s]\n" % \
372            (str(self.orientation), str(self.orientation_unit))
373
374        _str += "   Details:\n"
375        for item in self.details:
376            _str += "      %s\n" % item
377
378        return _str
379
380
381class Process(object):
382    """
383    Class that holds information about the processes
384    performed on the data.
385    """
386    name = ''
387    date = ''
388    description = ''
389    term = None
390    notes = None
391
392    def __init__(self):
393        self.term = []
394        self.notes = []
395
396    def is_empty(self):
397        """
398            Return True if the object is empty
399        """
400        return len(self.name) == 0 and len(self.date) == 0 and len(self.description) == 0 \
401            and len(self.term) == 0 and len(self.notes) == 0
402
403    def single_line_desc(self):
404        """
405            Return a single line string representing the process
406        """
407        return "%s %s %s" % (self.name, self.date, self.description)
408
409    def __str__(self):
410        _str = "Process:\n"
411        _str += "   Name:         %s\n" % self.name
412        _str += "   Date:         %s\n" % self.date
413        _str += "   Description:  %s\n" % self.description
414        for item in self.term:
415            _str += "   Term:         %s\n" % item
416        for item in self.notes:
417            _str += "   Note:         %s\n" % item
418        return _str
419
420
421class TransmissionSpectrum(object):
422    """
423    Class that holds information about transmission spectrum
424    for white beams and spallation sources.
425    """
426    name = ''
427    timestamp = ''
428    ## Wavelength (float) [A]
429    wavelength = None
430    wavelength_unit = 'A'
431    ## Transmission (float) [unit less]
432    transmission = None
433    transmission_unit = ''
434    ## Transmission Deviation (float) [unit less]
435    transmission_deviation = None
436    transmission_deviation_unit = ''
437
438    def __init__(self):
439        self.wavelength = []
440        self.transmission = []
441        self.transmission_deviation = []
442
443    def __str__(self):
444        _str = "Transmission Spectrum:\n"
445        _str += "   Name:             \t{0}\n".format(self.name)
446        _str += "   Timestamp:        \t{0}\n".format(self.timestamp)
447        _str += "   Wavelength unit:  \t{0}\n".format(self.wavelength_unit)
448        _str += "   Transmission unit:\t{0}\n".format(self.transmission_unit)
449        _str += "   Trans. Dev. unit:  \t{0}\n".format(\
450                                            self.transmission_deviation_unit)
451        length_list = [len(self.wavelength), len(self.transmission), \
452                len(self.transmission_deviation)]
453        _str += "   Number of Pts:    \t{0}\n".format(max(length_list))
454        return _str
455
456
457class DataInfo(object):
458    """
459    Class to hold the data read from a file.
460    It includes four blocks of data for the
461    instrument description, the sample description,
462    the data itself and any other meta data.
463    """
464    ## Title
465    title = ''
466    ## Run number
467    run = None
468    ## Run name
469    run_name = None
470    ## File name
471    filename = ''
472    ## Notes
473    notes = None
474    ## Processes (Action on the data)
475    process = None
476    ## Instrument name
477    instrument = ''
478    ## Detector information
479    detector = None
480    ## Sample information
481    sample = None
482    ## Source information
483    source = None
484    ## Collimation information
485    collimation = None
486    ## Transmission Spectrum INfo
487    trans_spectrum = None
488    ## Additional meta-data
489    meta_data = None
490    ## Loading errors
491    errors = None
492
493    def __init__(self):
494        """
495        Initialization
496        """
497        ## Title
498        self.title = ''
499        ## Run number
500        self.run = []
501        self.run_name = {}
502        ## File name
503        self.filename = ''
504        ## Notes
505        self.notes = []
506        ## Processes (Action on the data)
507        self.process = []
508        ## Instrument name
509        self.instrument = ''
510        ## Detector information
511        self.detector = []
512        ## Sample information
513        self.sample = Sample()
514        ## Source information
515        self.source = Source()
516        ## Collimation information
517        self.collimation = []
518        ## Transmission Spectrum
519        self.trans_spectrum = []
520        ## Additional meta-data
521        self.meta_data = {}
522        ## Loading errors
523        self.errors = []
524
525    def append_empty_process(self):
526        """
527        """
528        self.process.append(Process())
529
530    def add_notes(self, message=""):
531        """
532        Add notes to datainfo
533        """
534        self.notes.append(message)
535
536    def __str__(self):
537        """
538        Nice printout
539        """
540        _str = "File:            %s\n" % self.filename
541        _str += "Title:           %s\n" % self.title
542        _str += "Run:             %s\n" % str(self.run)
543        _str += "Instrument:      %s\n" % str(self.instrument)
544        _str += "%s\n" % str(self.sample)
545        _str += "%s\n" % str(self.source)
546        for item in self.detector:
547            _str += "%s\n" % str(item)
548        for item in self.collimation:
549            _str += "%s\n" % str(item)
550        for item in self.process:
551            _str += "%s\n" % str(item)
552        for item in self.notes:
553            _str += "%s\n" % str(item)
554        for item in self.trans_spectrum:
555            _str += "%s\n" % str(item)
556        return _str
557
558    # Private method to perform operation. Not implemented for DataInfo,
559    # but should be implemented for each data class inherited from DataInfo
560    # that holds actual data (ex.: Data1D)
561    def _perform_operation(self, other, operation):
562        """
563        Private method to perform operation. Not implemented for DataInfo,
564        but should be implemented for each data class inherited from DataInfo
565        that holds actual data (ex.: Data1D)
566        """
567        return NotImplemented
568
569    def _perform_union(self, other):
570        """
571        Private method to perform union operation. Not implemented for DataInfo,
572        but should be implemented for each data class inherited from DataInfo
573        that holds actual data (ex.: Data1D)
574        """
575        return NotImplemented
576
577    def __add__(self, other):
578        """
579        Add two data sets
580
581        :param other: data set to add to the current one
582        :return: new data set
583        :raise ValueError: raised when two data sets are incompatible
584        """
585        def operation(a, b):
586            return a + b
587        return self._perform_operation(other, operation)
588
589    def __radd__(self, other):
590        """
591        Add two data sets
592
593        :param other: data set to add to the current one
594        :return: new data set
595        :raise ValueError: raised when two data sets are incompatible
596        """
597        def operation(a, b):
598            return b + a
599        return self._perform_operation(other, operation)
600
601    def __sub__(self, other):
602        """
603        Subtract two data sets
604
605        :param other: data set to subtract from the current one
606        :return: new data set
607        :raise ValueError: raised when two data sets are incompatible
608        """
609        def operation(a, b):
610            return a - b
611        return self._perform_operation(other, operation)
612
613    def __rsub__(self, other):
614        """
615        Subtract two data sets
616
617        :param other: data set to subtract from the current one
618        :return: new data set
619        :raise ValueError: raised when two data sets are incompatible
620        """
621        def operation(a, b):
622            return b - a
623        return self._perform_operation(other, operation)
624
625    def __mul__(self, other):
626        """
627        Multiply two data sets
628
629        :param other: data set to subtract from the current one
630        :return: new data set
631        :raise ValueError: raised when two data sets are incompatible
632        """
633        def operation(a, b):
634            return a * b
635        return self._perform_operation(other, operation)
636
637    def __rmul__(self, other):
638        """
639        Multiply two data sets
640
641        :param other: data set to subtract from the current one
642        :return: new data set
643        :raise ValueError: raised when two data sets are incompatible
644        """
645        def operation(a, b):
646            return b * a
647        return self._perform_operation(other, operation)
648
649    def __div__(self, other):
650        """
651        Divided a data set by another
652
653        :param other: data set that the current one is divided by
654        :return: new data set
655        :raise ValueError: raised when two data sets are incompatible
656        """
657        def operation(a, b):
658            return a/b
659        return self._perform_operation(other, operation)
660
661    def __rdiv__(self, other):
662        """
663        Divided a data set by another
664
665        :param other: data set that the current one is divided by
666        :return: new data set
667        :raise ValueError: raised when two data sets are incompatible
668        """
669        def operation(a, b):
670            return b/a
671        return self._perform_operation(other, operation)
672
673    def __or__(self, other):
674        """
675        Union a data set with another
676
677        :param other: data set to be unified
678        :return: new data set
679        :raise ValueError: raised when two data sets are incompatible
680        """
681        return self._perform_union(other)
682
683    def __ror__(self, other):
684        """
685        Union a data set with another
686
687        :param other: data set to be unified
688        :return: new data set
689        :raise ValueError: raised when two data sets are incompatible
690        """
691        return self._perform_union(other)
692
693class Data1D(plottable_1D, DataInfo):
694    """
695    1D data class
696    """
697    #if plottable_1D.lam is None: # This means it's SANS data!
698     #   x_unit = '1/A'
699      #  y_unit = '1/cm'
700    #elif plottable_1D.lam is not None: # This means it's SESANS data!
701     #   x_unit = 'A'
702      #  y_unit = 'pol'
703    #else: # and if it's neither, you get punished!
704     #   raise(TypeError,'This is neither SANS nor SESANS data, what the hell are you doing??')
705
706    def __init__(self, x=None, y=None, dx=None, dy=None, lam=None, dlam=None, isSesans=False):
707        self.isSesans = isSesans
708        DataInfo.__init__(self)
709        plottable_1D.__init__(self, x, y, dx, dy,None, None, lam, dlam)
710        if self.isSesans:
711            x_unit = 'A'
712            y_unit = 'pol'
713        elif not self.isSesans: # it's SANS data! (Could also be simple else statement, but i prefer exhaustive conditionals...-JHB)
714            x_unit = '1/A'
715            y_unit = '1/cm'
716        else: # and if it's neither, you get punished!
717            raise(TypeError,'This is neither SANS nor SESANS data, what the hell are you doing??')
718
719    def __str__(self):
720        """
721        Nice printout
722        """
723        _str = "%s\n" % DataInfo.__str__(self)
724        _str += "Data:\n"
725        _str += "   Type:         %s\n" % self.__class__.__name__
726        _str += "   X-axis:       %s\t[%s]\n" % (self._xaxis, self._xunit)
727        _str += "   Y-axis:       %s\t[%s]\n" % (self._yaxis, self._yunit)
728        _str += "   Length:       %g\n" % len(self.x)
729        return _str
730
731    def is_slit_smeared(self):
732        """
733        Check whether the data has slit smearing information
734        :return: True is slit smearing info is present, False otherwise
735        """
736        def _check(v):
737            if (v.__class__ == list or v.__class__ == numpy.ndarray) \
738                and len(v) > 0 and min(v) > 0:
739                return True
740            return False
741        return _check(self.dxl) or _check(self.dxw)
742
743    def clone_without_data(self, length=0, clone=None):
744        """
745        Clone the current object, without copying the data (which
746        will be filled out by a subsequent operation).
747        The data arrays will be initialized to zero.
748
749        :param length: length of the data array to be initialized
750        :param clone: if provided, the data will be copied to clone
751        """
752        from copy import deepcopy
753
754        if clone is None or not issubclass(clone.__class__, Data1D):
755            x = numpy.zeros(length)
756            dx = numpy.zeros(length)
757            y = numpy.zeros(length)
758            dy = numpy.zeros(length)
759            lam = numpy.zeros(length)
760            dlam = numpy.zeros(length)
761            clone = Data1D(x, y, lam=lam, dx=dx, dy=dy, dlam=dlam )
762
763        clone.title = self.title
764        clone.run = self.run
765        clone.filename = self.filename
766        clone.instrument = self.instrument
767        clone.notes = deepcopy(self.notes)
768        clone.process = deepcopy(self.process)
769        clone.detector = deepcopy(self.detector)
770        clone.sample = deepcopy(self.sample)
771        clone.source = deepcopy(self.source)
772        clone.collimation = deepcopy(self.collimation)
773        clone.trans_spectrum = deepcopy(self.trans_spectrum)
774        clone.meta_data = deepcopy(self.meta_data)
775        clone.errors = deepcopy(self.errors)
776
777        return clone
778
779    def _validity_check(self, other):
780        """
781        Checks that the data lengths are compatible.
782        Checks that the x vectors are compatible.
783        Returns errors vectors equal to original
784        errors vectors if they were present or vectors
785        of zeros when none was found.
786
787        :param other: other data set for operation
788        :return: dy for self, dy for other [numpy arrays]
789        :raise ValueError: when lengths are not compatible
790        """
791        dy_other = None
792        if isinstance(other, Data1D):
793            # Check that data lengths are the same
794            if len(self.x) != len(other.x) or \
795                len(self.y) != len(other.y):
796                msg = "Unable to perform operation: data length are not equal"
797                raise ValueError, msg
798            # Here we could also extrapolate between data points
799            TOLERANCE = 0.01
800            for i in range(len(self.x)):
801                if math.fabs((self.x[i] - other.x[i])/self.x[i]) > TOLERANCE:
802                    msg = "Incompatible data sets: x-values do not match"
803                    raise ValueError, msg
804
805            # Check that the other data set has errors, otherwise
806            # create zero vector
807            dy_other = other.dy
808            if other.dy == None or (len(other.dy) != len(other.y)):
809                dy_other = numpy.zeros(len(other.y))
810
811        # Check that we have errors, otherwise create zero vector
812        dy = self.dy
813        if self.dy == None or (len(self.dy) != len(self.y)):
814            dy = numpy.zeros(len(self.y))
815
816        return dy, dy_other
817
818    def _perform_operation(self, other, operation):
819        """
820        """
821        # First, check the data compatibility
822        dy, dy_other = self._validity_check(other)
823        result = self.clone_without_data(len(self.x))
824        if self.dxw == None:
825            result.dxw = None
826        else:
827            result.dxw = numpy.zeros(len(self.x))
828        if self.dxl == None:
829            result.dxl = None
830        else:
831            result.dxl = numpy.zeros(len(self.x))
832
833        for i in range(len(self.x)):
834            result.x[i] = self.x[i]
835            if self.dx is not None and len(self.x) == len(self.dx):
836                result.dx[i] = self.dx[i]
837            if self.dxw is not None and len(self.x) == len(self.dxw):
838                result.dxw[i] = self.dxw[i]
839            if self.dxl is not None and len(self.x) == len(self.dxl):
840                result.dxl[i] = self.dxl[i]
841
842            a = Uncertainty(self.y[i], dy[i]**2)
843            if isinstance(other, Data1D):
844                b = Uncertainty(other.y[i], dy_other[i]**2)
845                if other.dx is not None:
846                    result.dx[i] *= self.dx[i]
847                    result.dx[i] += (other.dx[i]**2)
848                    result.dx[i] /= 2
849                    result.dx[i] = math.sqrt(result.dx[i])
850                if result.dxl is not None and other.dxl is not None:
851                    result.dxl[i] *= self.dxl[i]
852                    result.dxl[i] += (other.dxl[i]**2)
853                    result.dxl[i] /= 2
854                    result.dxl[i] = math.sqrt(result.dxl[i])
855            else:
856                b = other
857
858            output = operation(a, b)
859            result.y[i] = output.x
860            result.dy[i] = math.sqrt(math.fabs(output.variance))
861        return result
862
863    def _validity_check_union(self, other):
864        """
865        Checks that the data lengths are compatible.
866        Checks that the x vectors are compatible.
867        Returns errors vectors equal to original
868        errors vectors if they were present or vectors
869        of zeros when none was found.
870
871        :param other: other data set for operation
872        :return: bool
873        :raise ValueError: when data types are not compatible
874        """
875        if not isinstance(other, Data1D):
876            msg = "Unable to perform operation: different types of data set"
877            raise ValueError, msg
878        return True
879
880    def _perform_union(self, other):
881        """
882        """
883        # First, check the data compatibility
884        self._validity_check_union(other)
885        result = self.clone_without_data(len(self.x) + len(other.x))
886        if self.dy == None or other.dy is None:
887            result.dy = None
888        else:
889            result.dy = numpy.zeros(len(self.x) + len(other.x))
890        if self.dx == None or other.dx is None:
891            result.dx = None
892        else:
893            result.dx = numpy.zeros(len(self.x) + len(other.x))
894        if self.dxw == None or other.dxw is None:
895            result.dxw = None
896        else:
897            result.dxw = numpy.zeros(len(self.x) + len(other.x))
898        if self.dxl == None or other.dxl is None:
899            result.dxl = None
900        else:
901            result.dxl = numpy.zeros(len(self.x) + len(other.x))
902
903        result.x = numpy.append(self.x, other.x)
904        #argsorting
905        ind = numpy.argsort(result.x)
906        result.x = result.x[ind]
907        result.y = numpy.append(self.y, other.y)
908        result.y = result.y[ind]
909        if result.dy != None:
910            result.dy = numpy.append(self.dy, other.dy)
911            result.dy = result.dy[ind]
912        if result.dx is not None:
913            result.dx = numpy.append(self.dx, other.dx)
914            result.dx = result.dx[ind]
915        if result.dxw is not None:
916            result.dxw = numpy.append(self.dxw, other.dxw)
917            result.dxw = result.dxw[ind]
918        if result.dxl is not None:
919            result.dxl = numpy.append(self.dxl, other.dxl)
920            result.dxl = result.dxl[ind]
921        return result
922
923
924class Data2D(plottable_2D, DataInfo):
925    """
926    2D data class
927    """
928    ## Units for Q-values
929    Q_unit = '1/A'
930    ## Units for I(Q) values
931    I_unit = '1/cm'
932    ## Vector of Q-values at the center of each bin in x
933    x_bins = None
934    ## Vector of Q-values at the center of each bin in y
935    y_bins = None
936
937    def __init__(self, data=None, err_data=None, qx_data=None,
938                 qy_data=None, q_data=None, mask=None,
939                 dqx_data=None, dqy_data=None):
940        self.y_bins = []
941        self.x_bins = []
942        DataInfo.__init__(self)
943        plottable_2D.__init__(self, data, err_data, qx_data,
944                              qy_data, q_data, mask, dqx_data, dqy_data)
945        if len(self.detector) > 0:
946            raise RuntimeError, "Data2D: Detector bank already filled at init"
947
948    def __str__(self):
949        _str = "%s\n" % DataInfo.__str__(self)
950        _str += "Data:\n"
951        _str += "   Type:         %s\n" % self.__class__.__name__
952        _str += "   X- & Y-axis:  %s\t[%s]\n" % (self._yaxis, self._yunit)
953        _str += "   Z-axis:       %s\t[%s]\n" % (self._zaxis, self._zunit)
954        _str += "   Length:       %g \n" % (len(self.data))
955        _str += "   Shape:        (%d, %d)\n" % (len(self.y_bins), len(self.x_bins))
956        return _str
957
958    def clone_without_data(self, length=0, clone=None):
959        """
960        Clone the current object, without copying the data (which
961        will be filled out by a subsequent operation).
962        The data arrays will be initialized to zero.
963
964        :param length: length of the data array to be initialized
965        :param clone: if provided, the data will be copied to clone
966        """
967        from copy import deepcopy
968
969        if clone is None or not issubclass(clone.__class__, Data2D):
970            data = numpy.zeros(length)
971            err_data = numpy.zeros(length)
972            qx_data = numpy.zeros(length)
973            qy_data = numpy.zeros(length)
974            q_data = numpy.zeros(length)
975            mask = numpy.zeros(length)
976            dqx_data = None
977            dqy_data = None
978            clone = Data2D(data=data, err_data=err_data,
979                           qx_data=qx_data, qy_data=qy_data,
980                           q_data=q_data, mask=mask)
981
982        clone.title = self.title
983        clone.run = self.run
984        clone.filename = self.filename
985        clone.instrument = self.instrument
986        clone.notes = deepcopy(self.notes)
987        clone.process = deepcopy(self.process)
988        clone.detector = deepcopy(self.detector)
989        clone.sample = deepcopy(self.sample)
990        clone.source = deepcopy(self.source)
991        clone.collimation = deepcopy(self.collimation)
992        clone.trans_spectrum = deepcopy(self.trans_spectrum)
993        clone.meta_data = deepcopy(self.meta_data)
994        clone.errors = deepcopy(self.errors)
995
996        return clone
997
998    def _validity_check(self, other):
999        """
1000        Checks that the data lengths are compatible.
1001        Checks that the x vectors are compatible.
1002        Returns errors vectors equal to original
1003        errors vectors if they were present or vectors
1004        of zeros when none was found.
1005
1006        :param other: other data set for operation
1007        :return: dy for self, dy for other [numpy arrays]
1008        :raise ValueError: when lengths are not compatible
1009        """
1010        err_other = None
1011        TOLERANCE = 0.01
1012        if isinstance(other, Data2D):
1013            # Check that data lengths are the same
1014            if len(self.data) != len(other.data) or \
1015                len(self.qx_data) != len(other.qx_data) or \
1016                len(self.qy_data) != len(other.qy_data):
1017                msg = "Unable to perform operation: data length are not equal"
1018                raise ValueError, msg
1019            for ind in range(len(self.data)):
1020                if math.fabs((self.qx_data[ind] - other.qx_data[ind])/self.qx_data[ind]) > TOLERANCE:
1021                    msg = "Incompatible data sets: qx-values do not match: %s %s" % (self.qx_data[ind], other.qx_data[ind])
1022                    raise ValueError, msg
1023                if math.fabs((self.qy_data[ind] - other.qy_data[ind])/self.qy_data[ind]) > TOLERANCE:
1024                    msg = "Incompatible data sets: qy-values do not match: %s %s" % (self.qy_data[ind], other.qy_data[ind])
1025                    raise ValueError, msg
1026
1027            # Check that the scales match
1028            err_other = other.err_data
1029            if other.err_data == None or \
1030                (len(other.err_data) != len(other.data)):
1031                err_other = numpy.zeros(len(other.data))
1032
1033        # Check that we have errors, otherwise create zero vector
1034        err = self.err_data
1035        if self.err_data == None or \
1036            (len(self.err_data) != len(self.data)):
1037            err = numpy.zeros(len(other.data))
1038        return err, err_other
1039
1040    def _perform_operation(self, other, operation):
1041        """
1042        Perform 2D operations between data sets
1043
1044        :param other: other data set
1045        :param operation: function defining the operation
1046        """
1047        # First, check the data compatibility
1048        dy, dy_other = self._validity_check(other)
1049        result = self.clone_without_data(numpy.size(self.data))
1050        if self.dqx_data == None or self.dqy_data == None:
1051            result.dqx_data = None
1052            result.dqy_data = None
1053        else:
1054            result.dqx_data = numpy.zeros(len(self.data))
1055            result.dqy_data = numpy.zeros(len(self.data))
1056        for i in range(numpy.size(self.data)):
1057            result.data[i] = self.data[i]
1058            if self.err_data is not None and \
1059                numpy.size(self.data) == numpy.size(self.err_data):
1060                result.err_data[i] = self.err_data[i]
1061            if self.dqx_data is not None:
1062                result.dqx_data[i] = self.dqx_data[i]
1063            if self.dqy_data is not None:
1064                result.dqy_data[i] = self.dqy_data[i]
1065            result.qx_data[i] = self.qx_data[i]
1066            result.qy_data[i] = self.qy_data[i]
1067            result.q_data[i] = self.q_data[i]
1068            result.mask[i] = self.mask[i]
1069
1070            a = Uncertainty(self.data[i], dy[i]**2)
1071            if isinstance(other, Data2D):
1072                b = Uncertainty(other.data[i], dy_other[i]**2)
1073                if other.dqx_data is not None and \
1074                        result.dqx_data is not None:
1075                    result.dqx_data[i] *= self.dqx_data[i]
1076                    result.dqx_data[i] += (other.dqx_data[i]**2)
1077                    result.dqx_data[i] /= 2
1078                    result.dqx_data[i] = math.sqrt(result.dqx_data[i])
1079                if other.dqy_data is not None and \
1080                        result.dqy_data is not None:
1081                    result.dqy_data[i] *= self.dqy_data[i]
1082                    result.dqy_data[i] += (other.dqy_data[i]**2)
1083                    result.dqy_data[i] /= 2
1084                    result.dqy_data[i] = math.sqrt(result.dqy_data[i])
1085            else:
1086                b = other
1087            output = operation(a, b)
1088            result.data[i] = output.x
1089            result.err_data[i] = math.sqrt(math.fabs(output.variance))
1090        return result
1091
1092    def _validity_check_union(self, other):
1093        """
1094        Checks that the data lengths are compatible.
1095        Checks that the x vectors are compatible.
1096        Returns errors vectors equal to original
1097        errors vectors if they were present or vectors
1098        of zeros when none was found.
1099
1100        :param other: other data set for operation
1101        :return: bool
1102        :raise ValueError: when data types are not compatible
1103        """
1104        if not isinstance(other, Data2D):
1105            msg = "Unable to perform operation: different types of data set"
1106            raise ValueError, msg
1107        return True
1108
1109    def _perform_union(self, other):
1110        """
1111        Perform 2D operations between data sets
1112
1113        :param other: other data set
1114        :param operation: function defining the operation
1115        """
1116        # First, check the data compatibility
1117        self._validity_check_union(other)
1118        result = self.clone_without_data(numpy.size(self.data) + \
1119                                         numpy.size(other.data))
1120        result.xmin = self.xmin
1121        result.xmax = self.xmax
1122        result.ymin = self.ymin
1123        result.ymax = self.ymax
1124        if self.dqx_data == None or self.dqy_data == None or \
1125                other.dqx_data == None or other.dqy_data == None:
1126            result.dqx_data = None
1127            result.dqy_data = None
1128        else:
1129            result.dqx_data = numpy.zeros(len(self.data) + \
1130                                         numpy.size(other.data))
1131            result.dqy_data = numpy.zeros(len(self.data) + \
1132                                         numpy.size(other.data))
1133
1134        result.data = numpy.append(self.data, other.data)
1135        result.qx_data = numpy.append(self.qx_data, other.qx_data)
1136        result.qy_data = numpy.append(self.qy_data, other.qy_data)
1137        result.q_data = numpy.append(self.q_data, other.q_data)
1138        result.mask = numpy.append(self.mask, other.mask)
1139        if result.err_data is not None:
1140            result.err_data = numpy.append(self.err_data, other.err_data)
1141        if self.dqx_data is not None:
1142            result.dqx_data = numpy.append(self.dqx_data, other.dqx_data)
1143        if self.dqy_data is not None:
1144            result.dqy_data = numpy.append(self.dqy_data, other.dqy_data)
1145
1146        return result
1147
1148
1149def combine_data_info_with_plottable(data, datainfo):
1150    """
1151    A function that combines the DataInfo data in self.current_datainto with a plottable_1D or 2D data object.
1152
1153    :param data: A plottable_1D or plottable_2D data object
1154    :return: A fully specified Data1D or Data2D object
1155    """
1156
1157    final_dataset = None
1158    if isinstance(data, plottable_1D):
1159        final_dataset = Data1D(data.x, data.y)
1160        final_dataset.dx = data.dx
1161        final_dataset.dy = data.dy
1162        final_dataset.dxl = data.dxl
1163        final_dataset.dxw = data.dxw
1164        final_dataset.xaxis(data._xaxis, data._xunit)
1165        final_dataset.yaxis(data._yaxis, data._yunit)
1166    elif isinstance(data, plottable_2D):
1167        final_dataset = Data2D(data.data, data.err_data, data.qx_data, data.qy_data, data.q_data,
1168                               data.mask, data.dqx_data, data.dqy_data)
1169        final_dataset.xaxis(data._xaxis, data._xunit)
1170        final_dataset.yaxis(data._yaxis, data._yunit)
1171        final_dataset.zaxis(data._zaxis, data._zunit)
1172        final_dataset.x_bins = data.x_bins
1173        final_dataset.y_bins = data.y_bins
1174    else:
1175        return_string = "Should Never Happen: _combine_data_info_with_plottable input is not a plottable1d or " + \
1176                        "plottable2d data object"
1177        return return_string
1178
1179    final_dataset.xmax = data.xmax
1180    final_dataset.ymax = data.ymax
1181    final_dataset.xmin = data.xmin
1182    final_dataset.ymin = data.ymin
1183    final_dataset.title = datainfo.title
1184    final_dataset.run = datainfo.run
1185    final_dataset.run_name = datainfo.run_name
1186    final_dataset.filename = datainfo.filename
1187    final_dataset.notes = datainfo.notes
1188    final_dataset.process = datainfo.process
1189    final_dataset.instrument = datainfo.instrument
1190    final_dataset.detector = datainfo.detector
1191    final_dataset.sample = datainfo.sample
1192    final_dataset.source = datainfo.source
1193    final_dataset.collimation = datainfo.collimation
1194    final_dataset.trans_spectrum = datainfo.trans_spectrum
1195    final_dataset.meta_data = datainfo.meta_data
1196    final_dataset.errors = datainfo.errors
1197    return final_dataset
Note: See TracBrowser for help on using the repository browser.