Changes in / [2fd2d99:ce94504] in sasview
- Files:
-
- 26 added
- 3 deleted
- 5 edited
Legend:
- Unmodified
- Added
- Removed
-
docs/sphinx-docs/source/user/user.rst
r20a3c55 r5a71761 14 14 15 15 Working with SasView <working> 16 17 Computations with GPU <gpu_computations> -
src/sas/sascalc/dataloader/readers/ascii_reader.py
r7d94915 rb699768 33 33 ## File type 34 34 type_name = "ASCII" 35 35 36 36 ## Wildcards 37 37 type = ["ASCII files (*.txt)|*.txt", … … 41 41 ## List of allowed extensions 42 42 ext = ['.txt', '.TXT', '.dat', '.DAT', '.abs', '.ABS', 'csv', 'CSV'] 43 43 44 44 ## Flag to bypass extension check 45 45 allow_all = True 46 46 47 47 def read(self, path): 48 48 """ 49 49 Load data file 50 50 51 51 :param path: file path 52 52 53 :return: Data1D object, or None 53 54 54 55 :raise RuntimeError: when the file can't be opened 55 56 :raise ValueError: when the length of the data vectors are inconsistent … … 61 62 try: 62 63 # Read in binary mode since GRASP frequently has no-ascii 63 # characters that br eaks the open operation64 # characters that brakes the open operation 64 65 input_f = open(path,'rb') 65 66 except: … … 67 68 buff = input_f.read() 68 69 lines = buff.splitlines() 69 70 # Arrays for data storage 71 tx = numpy.zeros(0) 72 ty = numpy.zeros(0) 70 71 x = numpy.zeros(0) 72 y = numpy.zeros(0) 73 dy = numpy.zeros(0) 74 dx = numpy.zeros(0) 75 76 #temp. space to sort data 77 tx = numpy.zeros(0) 78 ty = numpy.zeros(0) 73 79 tdy = numpy.zeros(0) 74 80 tdx = numpy.zeros(0) 75 81 82 output = Data1D(x, y, dy=dy, dx=dx) 83 self.filename = output.filename = basename 84 85 data_conv_q = None 86 data_conv_i = None 87 88 if has_converter == True and output.x_unit != '1/A': 89 data_conv_q = Converter('1/A') 90 # Test it 91 data_conv_q(1.0, output.x_unit) 92 93 if has_converter == True and output.y_unit != '1/cm': 94 data_conv_i = Converter('1/cm') 95 # Test it 96 data_conv_i(1.0, output.y_unit) 97 76 98 # The first good line of data will define whether 77 99 # we have 2-column or 3-column ascii 78 100 has_error_dx = None 79 101 has_error_dy = None 80 102 81 103 #Initialize counters for data lines and header lines. 82 is_data = False 104 is_data = False # Has more than 5 lines 83 105 # More than "5" lines of data is considered as actual 84 106 # data unless that is the only data 85 m in_data_pts = 5107 mum_data_lines = 5 86 108 # To count # of current data candidate lines 87 candidate_lines = 0109 i = -1 88 110 # To count total # of previous data candidate lines 89 candidate_lines_previous = 0 90 #minimum required number of columns of data 111 i1 = -1 112 # To count # of header lines 113 j = -1 114 # Helps to count # of header lines 115 j1 = -1 116 #minimum required number of columns of data; ( <= 4). 91 117 lentoks = 2 92 118 for line in lines: 93 toks = self.splitline(line) 94 # To remember the # of columns in the current line of data 95 new_lentoks = len(toks) 119 # Initial try for CSV (split on ,) 120 toks = line.split(',') 121 # Now try SCSV (split on ;) 122 if len(toks) < 2: 123 toks = line.split(';') 124 # Now go for whitespace 125 if len(toks) < 2: 126 toks = line.split() 96 127 try: 97 if new_lentoks == 1 and not is_data:98 ## If only one item in list, no longer data99 raise ValueError100 elif new_lentoks == 0:101 ## If the line is blank, skip and continue on102 ## In case of breaks within data sets.103 continue104 elif new_lentoks != lentoks and is_data:105 ## If a footer is found, break the loop and save the data106 break107 elif new_lentoks != lentoks and not is_data:108 ## If header lines are numerical109 candidate_lines = 0110 candidate_lines_previous = 0111 112 128 #Make sure that all columns are numbers. 113 129 for colnum in range(len(toks)): 114 # Any non-floating point values throw ValueError115 130 float(toks[colnum]) 116 117 candidate_lines += 1 131 118 132 _x = float(toks[0]) 119 133 _y = float(toks[1]) 134 135 #Reset the header line counters 136 if j == j1: 137 j = 0 138 j1 = 0 139 140 if i > 1: 141 is_data = True 142 143 if data_conv_q is not None: 144 _x = data_conv_q(_x, units=output.x_unit) 145 146 if data_conv_i is not None: 147 _y = data_conv_i(_y, units=output.y_unit) 148 149 # If we have an extra token, check 150 # whether it can be interpreted as a 151 # third column. 152 _dy = None 153 if len(toks) > 2: 154 try: 155 _dy = float(toks[2]) 156 157 if data_conv_i is not None: 158 _dy = data_conv_i(_dy, units=output.y_unit) 159 160 except: 161 # The third column is not a float, skip it. 162 pass 163 164 # If we haven't set the 3rd column 165 # flag, set it now. 166 if has_error_dy == None: 167 has_error_dy = False if _dy == None else True 168 169 #Check for dx 120 170 _dx = None 121 _dy = None 122 123 #If 5 or more lines, this is considering the set data 124 if candidate_lines >= min_data_pts: 125 is_data = True 126 127 # If a 3rd row is present, consider it dy 128 if new_lentoks > 2: 129 _dy = float(toks[2]) 130 has_error_dy = False if _dy == None else True 131 132 # If a 4th row is present, consider it dx 133 if new_lentoks > 3: 134 _dx = float(toks[3]) 135 has_error_dx = False if _dx == None else True 136 137 # Delete the previously stored lines of data candidates if 138 # the list is not data 139 if candidate_lines == 1 and -1 < candidate_lines_previous < min_data_pts and \ 171 if len(toks) > 3: 172 try: 173 _dx = float(toks[3]) 174 175 if data_conv_i is not None: 176 _dx = data_conv_i(_dx, units=output.x_unit) 177 178 except: 179 # The 4th column is not a float, skip it. 180 pass 181 182 # If we haven't set the 3rd column 183 # flag, set it now. 184 if has_error_dx == None: 185 has_error_dx = False if _dx == None else True 186 187 #After talked with PB, we decided to take care of only 188 # 4 columns of data for now. 189 #number of columns in the current line 190 #To remember the # of columns in the current 191 #line of data 192 new_lentoks = len(toks) 193 194 #If the previous columns not equal to the current, 195 #mark the previous as non-data and reset the dependents. 196 if lentoks != new_lentoks: 197 if is_data == True: 198 break 199 else: 200 i = -1 201 i1 = 0 202 j = -1 203 j1 = -1 204 205 #Delete the previously stored lines of data candidates 206 # if is not data. 207 if i < 0 and -1 < i1 < mum_data_lines and \ 208 is_data == False: 209 try: 210 x = numpy.zeros(0) 211 y = numpy.zeros(0) 212 except: 213 pass 214 215 x = numpy.append(x, _x) 216 y = numpy.append(y, _y) 217 218 if has_error_dy == True: 219 #Delete the previously stored lines of 220 # data candidates if is not data. 221 if i < 0 and -1 < i1 < mum_data_lines and \ 222 is_data == False: 223 try: 224 dy = numpy.zeros(0) 225 except: 226 pass 227 dy = numpy.append(dy, _dy) 228 229 if has_error_dx == True: 230 #Delete the previously stored lines of 231 # data candidates if is not data. 232 if i < 0 and -1 < i1 < mum_data_lines and \ 233 is_data == False: 234 try: 235 dx = numpy.zeros(0) 236 except: 237 pass 238 dx = numpy.append(dx, _dx) 239 240 #Same for temp. 241 #Delete the previously stored lines of data candidates 242 # if is not data. 243 if i < 0 and -1 < i1 < mum_data_lines and\ 140 244 is_data == False: 141 245 try: 142 246 tx = numpy.zeros(0) 143 247 ty = numpy.zeros(0) 144 tdy = numpy.zeros(0)145 tdx = numpy.zeros(0)146 248 except: 147 249 pass 148 250 251 tx = numpy.append(tx, _x) 252 ty = numpy.append(ty, _y) 253 149 254 if has_error_dy == True: 255 #Delete the previously stored lines of 256 # data candidates if is not data. 257 if i < 0 and -1 < i1 < mum_data_lines and \ 258 is_data == False: 259 try: 260 tdy = numpy.zeros(0) 261 except: 262 pass 150 263 tdy = numpy.append(tdy, _dy) 151 264 if has_error_dx == True: 265 #Delete the previously stored lines of 266 # data candidates if is not data. 267 if i < 0 and -1 < i1 < mum_data_lines and \ 268 is_data == False: 269 try: 270 tdx = numpy.zeros(0) 271 except: 272 pass 152 273 tdx = numpy.append(tdx, _dx) 153 tx = numpy.append(tx, _x) 154 ty = numpy.append(ty, _y) 155 274 275 #reset i1 and flag lentoks for the next 276 if lentoks < new_lentoks: 277 if is_data == False: 278 i1 = -1 156 279 #To remember the # of columns on the current line 157 280 # for the next line of data 158 lentoks = new_lentoks 159 candidate_lines_previous = candidate_lines 160 except ValueError: 281 lentoks = len(toks) 282 283 #Reset # of header lines and counts # 284 # of data candidate lines 285 if j == 0 and j1 == 0: 286 i1 = i + 1 287 i += 1 288 except: 161 289 # It is data and meet non - number, then stop reading 162 290 if is_data == True: 163 291 break 164 292 lentoks = 2 165 has_error_dx = None 166 has_error_dy = None 293 #Counting # of header lines 294 j += 1 295 if j == j1 + 1: 296 j1 = j 297 else: 298 j = -1 167 299 #Reset # of lines of data candidates 168 candidate_lines = 0 169 except: 300 i = -1 301 302 # Couldn't parse this line, skip it 170 303 pass 171 304 172 305 input_f.close() 173 if not is_data:174 return None175 306 # Sanity check 176 if has_error_dy == True and not len( ty) == len(tdy):307 if has_error_dy == True and not len(y) == len(dy): 177 308 msg = "ascii_reader: y and dy have different length" 178 309 raise RuntimeError, msg 179 if has_error_dx == True and not len( tx) == len(tdx):310 if has_error_dx == True and not len(x) == len(dx): 180 311 msg = "ascii_reader: y and dy have different length" 181 312 raise RuntimeError, msg 182 313 # If the data length is zero, consider this as 183 314 # though we were not able to read the file. 184 if len( tx) == 0:315 if len(x) == 0: 185 316 raise RuntimeError, "ascii_reader: could not load file" 186 317 187 318 #Let's re-order the data to make cal. 188 319 # curve look better some cases 189 320 ind = numpy.lexsort((ty, tx)) 190 x = numpy.zeros(len(tx))191 y = numpy.zeros(len(ty))192 dy = numpy.zeros(len(tdy))193 dx = numpy.zeros(len(tdx))194 output = Data1D(x, y, dy=dy, dx=dx)195 self.filename = output.filename = basename196 197 321 for i in ind: 198 322 x[i] = tx[ind[i]] … … 214 338 output.dx = dx[x != 0] if has_error_dx == True\ 215 339 else numpy.zeros(len(output.x)) 216 217 output.xaxis("\\rm{Q}", 'A^{-1}') 218 output.yaxis("\\rm{Intensity}", "cm^{-1}") 219 340 341 if data_conv_q is not None: 342 output.xaxis("\\rm{Q}", output.x_unit) 343 else: 344 output.xaxis("\\rm{Q}", 'A^{-1}') 345 if data_conv_i is not None: 346 output.yaxis("\\rm{Intensity}", output.y_unit) 347 else: 348 output.yaxis("\\rm{Intensity}", "cm^{-1}") 349 220 350 # Store loading process information 221 351 output.meta_data['loader'] = self.type_name … … 223 353 raise RuntimeError, "%s is empty" % path 224 354 return output 225 355 226 356 else: 227 357 raise RuntimeError, "%s is not a file" % path 228 358 return None 229 230 def splitline(self, line):231 """232 Splits a line into pieces based on common delimeters233 :param line: A single line of text234 :return: list of values235 """236 # Initial try for CSV (split on ,)237 toks = line.split(',')238 # Now try SCSV (split on ;)239 if len(toks) < 2:240 toks = line.split(';')241 # Now go for whitespace242 if len(toks) < 2:243 toks = line.split()244 return toks -
src/sas/sasgui/perspectives/fitting/media/fitting.rst
r05829fb rd85c194 18 18 19 19 Information on the SasView Optimisers <optimizer.rst> 20 21 Writing a Plugin <plugin.rst> 20 -
src/sas/sasgui/perspectives/fitting/media/fitting_help.rst
r05829fb rb64b87c 132 132 * By :ref:`Writing_a_Plugin` 133 133 134 *NB: Because of the way these options are implemented, it is not possible for them* 135 *to use the polydispersity algorithms in SasView. Only models in the model library* 136 *can do this. At the time of writing (Release 3.1.0) work is in hand to make it* 137 *easier to add new models to the model library.* 138 134 139 .. ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ 135 140 … … 158 163 the :ref:`Advanced` option. 159 164 160 *NB: "Fit Parameters" has been split into two sections, those which can be161 polydisperse (shape and orientation parameters) and those which are not162 (scattering length densities, for example).*163 164 165 Sum|Multi(p1,p2) 165 166 ^^^^^^^^^^^^^^^^ … … 191 192 *Advanced Custom Model Editor*. 192 193 193 See :ref:`Writing_a_Plugin` for details on the plugin format. 194 195 *NB: Sum/Product models are still using the SasView 3.x model format. Unless 196 you are confident about what you are doing, it is recommended that you 197 only modify lines denoted with the ## <----- comments!* 194 *NB: Unless you are confident about what you are doing, it is recommended that you* 195 *only modify lines denoted with the ## <----- comments!* 198 196 199 197 When editing is complete, select *Run -> Compile* from the *Model Editor* menu bar. An … … 213 211 214 212 *NB: Custom models shipped with SasView cannot be removed in this way.* 213 214 .. ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ 215 216 .. _Writing_a_Plugin: 217 218 Writing a Plugin 219 ---------------- 220 221 Advanced users can write their own model in Python and save it to the the SasView 222 *plugin_models* folder 223 224 *C:\\Users\\[username]\\.sasview\\plugin_models* - (on Windows) 225 226 in .py format. The next time SasView is started it will compile the plugin and add 227 it to the list of *Customized Models*. 228 229 It is recommended that existing plugin models be used as templates. 215 230 216 231 .. ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ -
test/sasdataloader/test/utest_ascii.py
r7d94915 rb699768 94 94 f = self.loader.load("ascii_test_6.txt") 95 95 # The length of the data is 5 96 self.assertEqual(f, None) 96 self.assertEqual(len(f.x), 4) 97 self.assertEqual(f.x[0],0.013534) 98 self.assertEqual(f.x[3],0.022254) 97 99 98 100 if __name__ == '__main__':
Note: See TracChangeset
for help on using the changeset viewer.