source: sasview/src/sas/sascalc/dataloader/readers/ascii_reader.py @ ad92c5a

ESS_GUIESS_GUI_DocsESS_GUI_batch_fittingESS_GUI_bumps_abstractionESS_GUI_iss1116ESS_GUI_iss879ESS_GUI_iss959ESS_GUI_openclESS_GUI_orderingESS_GUI_sync_sascalccostrafo411magnetic_scattrelease-4.2.2ticket-1009ticket-1094-headlessticket-1242-2d-resolutionticket-1243ticket-1249ticket885unittest-saveload
Last change on this file since ad92c5a was ad92c5a, checked in by krzywon, 7 years ago

ABS reader converted to new system.

  • Property mode set to 100644
File size: 6.3 KB
Line 
1"""
2    Generic multi-column ASCII data reader
3"""
4############################################################################
5# This software was developed by the University of Tennessee as part of the
6# Distributed Data Analysis of Neutron Scattering Experiments (DANSE)
7# project funded by the US National Science Foundation.
8# If you use DANSE applications to do scientific research that leads to
9# publication, we ask that you acknowledge the use of the software with the
10# following sentence:
11# This work benefited from DANSE software developed under NSF award DMR-0520547.
12# copyright 2008, University of Tennessee
13#############################################################################
14
15import logging
16import numpy as np
17from sas.sascalc.dataloader.file_reader_base_class import FileReader
18from sas.sascalc.dataloader.data_info import DataInfo, plottable_1D
19from sas.sascalc.dataloader.loader_exceptions import FileContentsException,\
20    DefaultReaderException
21
22logger = logging.getLogger(__name__)
23
24
25class Reader(FileReader):
26    """
27    Class to load ascii files (2, 3 or 4 columns).
28    """
29    # File type
30    type_name = "ASCII"
31    # Wildcards
32    type = ["ASCII files (*.txt)|*.txt",
33            "ASCII files (*.dat)|*.dat",
34            "ASCII files (*.abs)|*.abs",
35            "CSV files (*.csv)|*.csv"]
36    # List of allowed extensions
37    ext = ['.txt', '.dat', '.abs', '.csv']
38    # Flag to bypass extension check
39    allow_all = True
40    # data unless that is the only data
41    min_data_pts = 5
42
43    def get_file_contents(self):
44        """
45        Get the contents of the file
46        """
47
48        buff = self.f_open.read()
49        filepath = self.f_open.name
50        lines = buff.splitlines()
51        self.output = []
52        self.current_datainfo = DataInfo()
53        self.current_datainfo.filename = filepath
54        self.reset_data_list(len(lines))
55
56        # The first good line of data will define whether
57        # we have 2-column or 3-column ascii
58        has_error_dx = None
59        has_error_dy = None
60
61        # Initialize counters for data lines and header lines.
62        is_data = False
63        # More than "5" lines of data is considered as actual
64        # To count # of current data candidate lines
65        candidate_lines = 0
66        # To count total # of previous data candidate lines
67        candidate_lines_previous = 0
68        # Current line number
69        line_no = 0
70        # minimum required number of columns of data
71        lentoks = 2
72        for line in lines:
73            toks = self.splitline(line.strip())
74            # To remember the number of columns in the current line of data
75            new_lentoks = len(toks)
76            try:
77                if new_lentoks == 0:
78                    # If the line is blank, skip and continue on
79                    # In case of breaks within data sets.
80                    continue
81                elif new_lentoks != lentoks and is_data:
82                    # If a footer is found, break the loop and save the data
83                    break
84                elif new_lentoks != lentoks and not is_data:
85                    # If header lines are numerical
86                    candidate_lines = 0
87                    self.reset_data_list(len(lines) - line_no)
88
89                self.current_dataset.x[candidate_lines] = float(toks[0])
90                self.current_dataset.y[candidate_lines] = float(toks[1])
91
92                # If a 3rd row is present, consider it dy
93                if new_lentoks > 2:
94                    self.current_dataset.dy[candidate_lines] = \
95                        float(toks[2])
96                    has_error_dy = True
97
98                # If a 4th row is present, consider it dx
99                if new_lentoks > 3:
100                    self.current_dataset.dx[candidate_lines] = \
101                        float(toks[3])
102                    has_error_dx = True
103
104                candidate_lines += 1
105                # If 5 or more lines, this is considering the set data
106                if candidate_lines >= self.min_data_pts:
107                    is_data = True
108
109                # To remember the # of columns on the current line
110                # for the next line of data
111                lentoks = new_lentoks
112                line_no += 1
113            except ValueError:
114                # ValueError is raised when non numeric strings conv. to float
115                # It is data and meet non - number, then stop reading
116                if is_data:
117                    break
118                # Delete the previously stored lines of data candidates if
119                # the list is not data
120                self.reset_data_list(len(lines) - line_no)
121                lentoks = 2
122                has_error_dx = None
123                has_error_dy = None
124                # Reset # of lines of data candidates
125                candidate_lines = 0
126
127        if not is_data:
128            self.set_all_to_none()
129            if self.extension in self.ext:
130                msg = "ASCII Reader error: Fewer than five Q data points found "
131                msg += "in {}.".format(filepath)
132                raise FileContentsException(msg)
133            else:
134                msg = "ASCII Reader could not load the file {}".format(filepath)
135                raise DefaultReaderException(msg)
136        # Sanity check
137        if has_error_dy and not len(self.current_dataset.y) == \
138                len(self.current_dataset.dy):
139            msg = "ASCII Reader error: Number of I and dI data points are"
140            msg += " different in {}.".format(filepath)
141            # TODO: Add error to self.current_datainfo.errors instead?
142            self.set_all_to_none()
143            raise FileContentsException(msg)
144        if has_error_dx and not len(self.current_dataset.x) == \
145                len(self.current_dataset.dx):
146            msg = "ASCII Reader error: Number of Q and dQ data points are"
147            msg += " different in {}.".format(filepath)
148            # TODO: Add error to self.current_datainfo.errors instead?
149            self.set_all_to_none()
150            raise FileContentsException(msg)
151
152        self.remove_empty_q_values(has_error_dx, has_error_dy)
153        self.current_dataset.xaxis("\\rm{Q}", 'A^{-1}')
154        self.current_dataset.yaxis("\\rm{Intensity}", "cm^{-1}")
155
156        # Store loading process information
157        self.current_datainfo.meta_data['loader'] = self.type_name
158        self.send_to_output()
Note: See TracBrowser for help on using the repository browser.