1 | """ |
---|
2 | SESANS reader (based on ASCII reader) |
---|
3 | |
---|
4 | Reader for .ses or .sesans file format |
---|
5 | |
---|
6 | Jurrian Bakker |
---|
7 | """ |
---|
8 | import numpy |
---|
9 | import os |
---|
10 | from sas.sascalc.dataloader.data_info import SESANSData1D |
---|
11 | |
---|
12 | # Check whether we have a converter available |
---|
13 | has_converter = True |
---|
14 | try: |
---|
15 | from sas.sascalc.data_util.nxsunit import Converter |
---|
16 | except: |
---|
17 | has_converter = False |
---|
18 | _ZERO = 1e-16 |
---|
19 | |
---|
20 | class Reader: |
---|
21 | """ |
---|
22 | Class to load sesans files (6 columns). |
---|
23 | """ |
---|
24 | ## File type |
---|
25 | type_name = "SESANS" |
---|
26 | |
---|
27 | ## Wildcards |
---|
28 | type = ["SESANS files (*.ses)|*.ses", |
---|
29 | "SESANS files (*..sesans)|*.sesans"] |
---|
30 | ## List of allowed extensions |
---|
31 | ext = ['.ses', '.SES', '.sesans', '.SESANS'] |
---|
32 | |
---|
33 | ## Flag to bypass extension check |
---|
34 | allow_all = True |
---|
35 | |
---|
36 | def read(self, path): |
---|
37 | |
---|
38 | # print "reader triggered" |
---|
39 | |
---|
40 | """ |
---|
41 | Load data file |
---|
42 | |
---|
43 | :param path: file path |
---|
44 | |
---|
45 | :return: SESANSData1D object, or None |
---|
46 | |
---|
47 | :raise RuntimeError: when the file can't be opened |
---|
48 | :raise ValueError: when the length of the data vectors are inconsistent |
---|
49 | """ |
---|
50 | if os.path.isfile(path): |
---|
51 | basename = os.path.basename(path) |
---|
52 | _, extension = os.path.splitext(basename) |
---|
53 | if self.allow_all or extension.lower() in self.ext: |
---|
54 | try: |
---|
55 | # Read in binary mode since GRASP frequently has no-ascii |
---|
56 | # characters that brakes the open operation |
---|
57 | input_f = open(path,'rb') |
---|
58 | except: |
---|
59 | raise RuntimeError, "sesans_reader: cannot open %s" % path |
---|
60 | buff = input_f.read() |
---|
61 | # print buff |
---|
62 | lines = buff.splitlines() |
---|
63 | # print lines |
---|
64 | #Jae could not find python universal line spliter: |
---|
65 | #keep the below for now |
---|
66 | # some ascii data has \r line separator, |
---|
67 | # try it when the data is on only one long line |
---|
68 | # if len(lines) < 2 : |
---|
69 | # lines = buff.split('\r') |
---|
70 | |
---|
71 | x = numpy.zeros(0) |
---|
72 | y = numpy.zeros(0) |
---|
73 | dy = numpy.zeros(0) |
---|
74 | lam = numpy.zeros(0) |
---|
75 | dlam = numpy.zeros(0) |
---|
76 | dx = numpy.zeros(0) |
---|
77 | |
---|
78 | #temp. space to sort data |
---|
79 | tx = numpy.zeros(0) |
---|
80 | ty = numpy.zeros(0) |
---|
81 | tdy = numpy.zeros(0) |
---|
82 | tlam = numpy.zeros(0) |
---|
83 | tdlam = numpy.zeros(0) |
---|
84 | tdx = numpy.zeros(0) |
---|
85 | # print "all good" |
---|
86 | output = SESANSData1D(x=x, y=y, lam=lam, dy=dy, dx=dx, dlam=dlam) |
---|
87 | # print output |
---|
88 | self.filename = output.filename = basename |
---|
89 | |
---|
90 | # #Initialize counters for data lines and header lines. |
---|
91 | # is_data = False # Has more than 5 lines |
---|
92 | # # More than "5" lines of data is considered as actual |
---|
93 | # # data unless that is the only data |
---|
94 | # mum_data_lines = 5 |
---|
95 | # # To count # of current data candidate lines |
---|
96 | # i = -1 |
---|
97 | # # To count total # of previous data candidate lines |
---|
98 | # i1 = -1 |
---|
99 | # # To count # of header lines |
---|
100 | # j = -1 |
---|
101 | # # Helps to count # of header lines |
---|
102 | # j1 = -1 |
---|
103 | # #minimum required number of columns of data; ( <= 4). |
---|
104 | # lentoks = 2 |
---|
105 | paramnames=[] |
---|
106 | paramvals=[] |
---|
107 | zvals=[] |
---|
108 | dzvals=[] |
---|
109 | lamvals=[] |
---|
110 | dlamvals=[] |
---|
111 | Pvals=[] |
---|
112 | dPvals=[] |
---|
113 | # print x |
---|
114 | # print zvals |
---|
115 | for line in lines: |
---|
116 | # Initial try for CSV (split on ,) |
---|
117 | line=line.strip() |
---|
118 | toks = line.split('\t') |
---|
119 | if len(toks)==2: |
---|
120 | paramnames.append(toks[0]) |
---|
121 | paramvals.append(toks[1]) |
---|
122 | if len(toks)>5: |
---|
123 | zvals.append(toks[0]) |
---|
124 | dzvals.append(toks[1]) |
---|
125 | lamvals.append(toks[2]) |
---|
126 | dlamvals.append(toks[3]) |
---|
127 | Pvals.append(toks[4]) |
---|
128 | dPvals.append(toks[5]) |
---|
129 | else: |
---|
130 | continue |
---|
131 | |
---|
132 | x=[] |
---|
133 | y=[] |
---|
134 | lam=[] |
---|
135 | dx=[] |
---|
136 | dy=[] |
---|
137 | dlam=[] |
---|
138 | lam_header = lamvals[0].split() |
---|
139 | data_conv_z = None |
---|
140 | default_z_unit = "A" |
---|
141 | data_conv_P = None |
---|
142 | default_p_unit = " " |
---|
143 | lam_unit = lam_header[1].replace("[","").replace("]","") |
---|
144 | varheader=[zvals[0],dzvals[0],lamvals[0],dlamvals[0],Pvals[0],dPvals[0]] |
---|
145 | valrange=range(1, len(zvals)) |
---|
146 | for i in valrange: |
---|
147 | x.append(float(zvals[i])) |
---|
148 | y.append(float(Pvals[i])) |
---|
149 | lam.append(float(lamvals[i])) |
---|
150 | dy.append(float(dPvals[i])) |
---|
151 | dx.append(float(dzvals[i])) |
---|
152 | dlam.append(float(dlamvals[i])) |
---|
153 | |
---|
154 | x,y,lam,dy,dx,dlam = [ |
---|
155 | numpy.asarray(v, 'double') |
---|
156 | for v in (x,y,lam,dy,dx,dlam) |
---|
157 | ] |
---|
158 | |
---|
159 | input_f.close() |
---|
160 | |
---|
161 | output.x, output.x_unit = self._unit_conversion(x, lam_unit, default_z_unit) |
---|
162 | output.y = y |
---|
163 | output.dx, output.dx_unit = self._unit_conversion(dx, lam_unit, default_z_unit) |
---|
164 | output.dy = dy |
---|
165 | output.lam, output.lam_unit = self._unit_conversion(lam, lam_unit, default_z_unit) |
---|
166 | output.dlam, output.dlam_unit = self._unit_conversion(dlam, lam_unit, default_z_unit) |
---|
167 | |
---|
168 | output.xaxis("\rm{z}", output.x_unit) |
---|
169 | output.yaxis("\\rm{P/P0}", output.y_unit) |
---|
170 | # Store loading process information |
---|
171 | output.meta_data['loader'] = self.type_name |
---|
172 | output.sample.thickness = float(paramvals[6]) |
---|
173 | output.sample.name = paramvals[1] |
---|
174 | output.sample.ID = paramvals[0] |
---|
175 | zaccept_unit_split = paramnames[7].split("[") |
---|
176 | zaccept_unit = zaccept_unit_split[1].replace("]","") |
---|
177 | if zaccept_unit.strip() == '\AA^-1': |
---|
178 | zaccept_unit = "1/A" |
---|
179 | output.sample.zacceptance=(float(paramvals[7]),zaccept_unit) |
---|
180 | output.vars=varheader |
---|
181 | |
---|
182 | if len(output.x) < 1: |
---|
183 | raise RuntimeError, "%s is empty" % path |
---|
184 | return output |
---|
185 | |
---|
186 | else: |
---|
187 | raise RuntimeError, "%s is not a file" % path |
---|
188 | return None |
---|
189 | |
---|
190 | def _unit_conversion(self, value, value_unit, default_unit): |
---|
191 | if has_converter == True and value_unit != default_unit: |
---|
192 | data_conv_q = Converter(value_unit) |
---|
193 | value = data_conv_q(value, units=default_unit) |
---|
194 | new_unit = default_unit |
---|
195 | else: |
---|
196 | new_unit = value_unit |
---|
197 | return value, new_unit |
---|