Changes in / [ce94504:2fd2d99] in sasview
- Files:
-
- 3 added
- 26 deleted
- 5 edited
Legend:
- Unmodified
- Added
- Removed
-
docs/sphinx-docs/source/user/user.rst
r5a71761 r20a3c55 14 14 15 15 Working with SasView <working> 16 17 Computations with GPU <gpu_computations> -
src/sas/sascalc/dataloader/readers/ascii_reader.py
rb699768 r7d94915 33 33 ## File type 34 34 type_name = "ASCII" 35 35 36 36 ## Wildcards 37 37 type = ["ASCII files (*.txt)|*.txt", … … 41 41 ## List of allowed extensions 42 42 ext = ['.txt', '.TXT', '.dat', '.DAT', '.abs', '.ABS', 'csv', 'CSV'] 43 43 44 44 ## Flag to bypass extension check 45 45 allow_all = True 46 46 47 47 def read(self, path): 48 48 """ 49 49 Load data file 50 50 51 51 :param path: file path 52 53 52 :return: Data1D object, or None 54 53 55 54 :raise RuntimeError: when the file can't be opened 56 55 :raise ValueError: when the length of the data vectors are inconsistent … … 62 61 try: 63 62 # Read in binary mode since GRASP frequently has no-ascii 64 # characters that br akes the open operation63 # characters that breaks the open operation 65 64 input_f = open(path,'rb') 66 65 except: … … 68 67 buff = input_f.read() 69 68 lines = buff.splitlines() 70 71 x = numpy.zeros(0) 72 y = numpy.zeros(0) 73 dy = numpy.zeros(0) 74 dx = numpy.zeros(0) 75 76 #temp. space to sort data 77 tx = numpy.zeros(0) 78 ty = numpy.zeros(0) 69 70 # Arrays for data storage 71 tx = numpy.zeros(0) 72 ty = numpy.zeros(0) 79 73 tdy = numpy.zeros(0) 80 74 tdx = numpy.zeros(0) 81 82 output = Data1D(x, y, dy=dy, dx=dx) 83 self.filename = output.filename = basename 84 85 data_conv_q = None 86 data_conv_i = None 87 88 if has_converter == True and output.x_unit != '1/A': 89 data_conv_q = Converter('1/A') 90 # Test it 91 data_conv_q(1.0, output.x_unit) 92 93 if has_converter == True and output.y_unit != '1/cm': 94 data_conv_i = Converter('1/cm') 95 # Test it 96 data_conv_i(1.0, output.y_unit) 97 75 98 76 # The first good line of data will define whether 99 77 # we have 2-column or 3-column ascii 100 78 has_error_dx = None 101 79 has_error_dy = None 102 80 103 81 #Initialize counters for data lines and header lines. 104 is_data = False # Has more than 5 lines82 is_data = False 105 83 # More than "5" lines of data is considered as actual 106 84 # data unless that is the only data 107 m um_data_lines = 585 min_data_pts = 5 108 86 # To count # of current data candidate lines 109 i = -187 candidate_lines = 0 110 88 # To count total # of previous data candidate lines 111 i1 = -1 112 # To count # of header lines 113 j = -1 114 # Helps to count # of header lines 115 j1 = -1 116 #minimum required number of columns of data; ( <= 4). 89 candidate_lines_previous = 0 90 #minimum required number of columns of data 117 91 lentoks = 2 118 92 for line in lines: 119 # Initial try for CSV (split on ,) 120 toks = line.split(',') 121 # Now try SCSV (split on ;) 122 if len(toks) < 2: 123 toks = line.split(';') 124 # Now go for whitespace 125 if len(toks) < 2: 126 toks = line.split() 93 toks = self.splitline(line) 94 # To remember the # of columns in the current line of data 95 new_lentoks = len(toks) 127 96 try: 97 if new_lentoks == 1 and not is_data: 98 ## If only one item in list, no longer data 99 raise ValueError 100 elif new_lentoks == 0: 101 ## If the line is blank, skip and continue on 102 ## In case of breaks within data sets. 103 continue 104 elif new_lentoks != lentoks and is_data: 105 ## If a footer is found, break the loop and save the data 106 break 107 elif new_lentoks != lentoks and not is_data: 108 ## If header lines are numerical 109 candidate_lines = 0 110 candidate_lines_previous = 0 111 128 112 #Make sure that all columns are numbers. 129 113 for colnum in range(len(toks)): 114 # Any non-floating point values throw ValueError 130 115 float(toks[colnum]) 131 116 117 candidate_lines += 1 132 118 _x = float(toks[0]) 133 119 _y = float(toks[1]) 134 135 #Reset the header line counters 136 if j == j1: 137 j = 0 138 j1 = 0 139 140 if i > 1: 120 _dx = None 121 _dy = None 122 123 #If 5 or more lines, this is considering the set data 124 if candidate_lines >= min_data_pts: 141 125 is_data = True 142 143 if data_conv_q is not None: 144 _x = data_conv_q(_x, units=output.x_unit) 145 146 if data_conv_i is not None: 147 _y = data_conv_i(_y, units=output.y_unit) 148 149 # If we have an extra token, check 150 # whether it can be interpreted as a 151 # third column. 152 _dy = None 153 if len(toks) > 2: 154 try: 155 _dy = float(toks[2]) 156 157 if data_conv_i is not None: 158 _dy = data_conv_i(_dy, units=output.y_unit) 159 160 except: 161 # The third column is not a float, skip it. 162 pass 163 164 # If we haven't set the 3rd column 165 # flag, set it now. 166 if has_error_dy == None: 167 has_error_dy = False if _dy == None else True 168 169 #Check for dx 170 _dx = None 171 if len(toks) > 3: 172 try: 173 _dx = float(toks[3]) 174 175 if data_conv_i is not None: 176 _dx = data_conv_i(_dx, units=output.x_unit) 177 178 except: 179 # The 4th column is not a float, skip it. 180 pass 181 182 # If we haven't set the 3rd column 183 # flag, set it now. 184 if has_error_dx == None: 185 has_error_dx = False if _dx == None else True 186 187 #After talked with PB, we decided to take care of only 188 # 4 columns of data for now. 189 #number of columns in the current line 190 #To remember the # of columns in the current 191 #line of data 192 new_lentoks = len(toks) 193 194 #If the previous columns not equal to the current, 195 #mark the previous as non-data and reset the dependents. 196 if lentoks != new_lentoks: 197 if is_data == True: 198 break 199 else: 200 i = -1 201 i1 = 0 202 j = -1 203 j1 = -1 204 205 #Delete the previously stored lines of data candidates 206 # if is not data. 207 if i < 0 and -1 < i1 < mum_data_lines and \ 208 is_data == False: 209 try: 210 x = numpy.zeros(0) 211 y = numpy.zeros(0) 212 except: 213 pass 214 215 x = numpy.append(x, _x) 216 y = numpy.append(y, _y) 217 218 if has_error_dy == True: 219 #Delete the previously stored lines of 220 # data candidates if is not data. 221 if i < 0 and -1 < i1 < mum_data_lines and \ 222 is_data == False: 223 try: 224 dy = numpy.zeros(0) 225 except: 226 pass 227 dy = numpy.append(dy, _dy) 228 229 if has_error_dx == True: 230 #Delete the previously stored lines of 231 # data candidates if is not data. 232 if i < 0 and -1 < i1 < mum_data_lines and \ 233 is_data == False: 234 try: 235 dx = numpy.zeros(0) 236 except: 237 pass 238 dx = numpy.append(dx, _dx) 239 240 #Same for temp. 241 #Delete the previously stored lines of data candidates 242 # if is not data. 243 if i < 0 and -1 < i1 < mum_data_lines and\ 126 127 # If a 3rd row is present, consider it dy 128 if new_lentoks > 2: 129 _dy = float(toks[2]) 130 has_error_dy = False if _dy == None else True 131 132 # If a 4th row is present, consider it dx 133 if new_lentoks > 3: 134 _dx = float(toks[3]) 135 has_error_dx = False if _dx == None else True 136 137 # Delete the previously stored lines of data candidates if 138 # the list is not data 139 if candidate_lines == 1 and -1 < candidate_lines_previous < min_data_pts and \ 244 140 is_data == False: 245 141 try: 246 142 tx = numpy.zeros(0) 247 143 ty = numpy.zeros(0) 144 tdy = numpy.zeros(0) 145 tdx = numpy.zeros(0) 248 146 except: 249 147 pass 250 148 149 if has_error_dy == True: 150 tdy = numpy.append(tdy, _dy) 151 if has_error_dx == True: 152 tdx = numpy.append(tdx, _dx) 251 153 tx = numpy.append(tx, _x) 252 154 ty = numpy.append(ty, _y) 253 254 if has_error_dy == True: 255 #Delete the previously stored lines of 256 # data candidates if is not data. 257 if i < 0 and -1 < i1 < mum_data_lines and \ 258 is_data == False: 259 try: 260 tdy = numpy.zeros(0) 261 except: 262 pass 263 tdy = numpy.append(tdy, _dy) 264 if has_error_dx == True: 265 #Delete the previously stored lines of 266 # data candidates if is not data. 267 if i < 0 and -1 < i1 < mum_data_lines and \ 268 is_data == False: 269 try: 270 tdx = numpy.zeros(0) 271 except: 272 pass 273 tdx = numpy.append(tdx, _dx) 274 275 #reset i1 and flag lentoks for the next 276 if lentoks < new_lentoks: 277 if is_data == False: 278 i1 = -1 155 279 156 #To remember the # of columns on the current line 280 157 # for the next line of data 281 lentoks = len(toks) 282 283 #Reset # of header lines and counts # 284 # of data candidate lines 285 if j == 0 and j1 == 0: 286 i1 = i + 1 287 i += 1 288 except: 158 lentoks = new_lentoks 159 candidate_lines_previous = candidate_lines 160 except ValueError: 289 161 # It is data and meet non - number, then stop reading 290 162 if is_data == True: 291 163 break 292 164 lentoks = 2 293 #Counting # of header lines 294 j += 1 295 if j == j1 + 1: 296 j1 = j 297 else: 298 j = -1 165 has_error_dx = None 166 has_error_dy = None 299 167 #Reset # of lines of data candidates 300 i = -1 301 302 # Couldn't parse this line, skip it 168 candidate_lines = 0 169 except: 303 170 pass 304 171 305 172 input_f.close() 173 if not is_data: 174 return None 306 175 # Sanity check 307 if has_error_dy == True and not len( y) == len(dy):176 if has_error_dy == True and not len(ty) == len(tdy): 308 177 msg = "ascii_reader: y and dy have different length" 309 178 raise RuntimeError, msg 310 if has_error_dx == True and not len( x) == len(dx):179 if has_error_dx == True and not len(tx) == len(tdx): 311 180 msg = "ascii_reader: y and dy have different length" 312 181 raise RuntimeError, msg 313 182 # If the data length is zero, consider this as 314 183 # though we were not able to read the file. 315 if len( x) == 0:184 if len(tx) == 0: 316 185 raise RuntimeError, "ascii_reader: could not load file" 317 186 318 187 #Let's re-order the data to make cal. 319 188 # curve look better some cases 320 189 ind = numpy.lexsort((ty, tx)) 190 x = numpy.zeros(len(tx)) 191 y = numpy.zeros(len(ty)) 192 dy = numpy.zeros(len(tdy)) 193 dx = numpy.zeros(len(tdx)) 194 output = Data1D(x, y, dy=dy, dx=dx) 195 self.filename = output.filename = basename 196 321 197 for i in ind: 322 198 x[i] = tx[ind[i]] … … 338 214 output.dx = dx[x != 0] if has_error_dx == True\ 339 215 else numpy.zeros(len(output.x)) 340 341 if data_conv_q is not None: 342 output.xaxis("\\rm{Q}", output.x_unit) 343 else: 344 output.xaxis("\\rm{Q}", 'A^{-1}') 345 if data_conv_i is not None: 346 output.yaxis("\\rm{Intensity}", output.y_unit) 347 else: 348 output.yaxis("\\rm{Intensity}", "cm^{-1}") 349 216 217 output.xaxis("\\rm{Q}", 'A^{-1}') 218 output.yaxis("\\rm{Intensity}", "cm^{-1}") 219 350 220 # Store loading process information 351 221 output.meta_data['loader'] = self.type_name … … 353 223 raise RuntimeError, "%s is empty" % path 354 224 return output 355 225 356 226 else: 357 227 raise RuntimeError, "%s is not a file" % path 358 228 return None 229 230 def splitline(self, line): 231 """ 232 Splits a line into pieces based on common delimeters 233 :param line: A single line of text 234 :return: list of values 235 """ 236 # Initial try for CSV (split on ,) 237 toks = line.split(',') 238 # Now try SCSV (split on ;) 239 if len(toks) < 2: 240 toks = line.split(';') 241 # Now go for whitespace 242 if len(toks) < 2: 243 toks = line.split() 244 return toks -
src/sas/sasgui/perspectives/fitting/media/fitting.rst
rd85c194 r05829fb 18 18 19 19 Information on the SasView Optimisers <optimizer.rst> 20 20 21 Writing a Plugin <plugin.rst> -
src/sas/sasgui/perspectives/fitting/media/fitting_help.rst
rb64b87c r05829fb 132 132 * By :ref:`Writing_a_Plugin` 133 133 134 *NB: Because of the way these options are implemented, it is not possible for them*135 *to use the polydispersity algorithms in SasView. Only models in the model library*136 *can do this. At the time of writing (Release 3.1.0) work is in hand to make it*137 *easier to add new models to the model library.*138 139 134 .. ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ 140 135 … … 163 158 the :ref:`Advanced` option. 164 159 160 *NB: "Fit Parameters" has been split into two sections, those which can be 161 polydisperse (shape and orientation parameters) and those which are not 162 (scattering length densities, for example).* 163 165 164 Sum|Multi(p1,p2) 166 165 ^^^^^^^^^^^^^^^^ … … 192 191 *Advanced Custom Model Editor*. 193 192 194 *NB: Unless you are confident about what you are doing, it is recommended that you* 195 *only modify lines denoted with the ## <----- comments!* 193 See :ref:`Writing_a_Plugin` for details on the plugin format. 194 195 *NB: Sum/Product models are still using the SasView 3.x model format. Unless 196 you are confident about what you are doing, it is recommended that you 197 only modify lines denoted with the ## <----- comments!* 196 198 197 199 When editing is complete, select *Run -> Compile* from the *Model Editor* menu bar. An … … 211 213 212 214 *NB: Custom models shipped with SasView cannot be removed in this way.* 213 214 .. ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ215 216 .. _Writing_a_Plugin:217 218 Writing a Plugin219 ----------------220 221 Advanced users can write their own model in Python and save it to the the SasView222 *plugin_models* folder223 224 *C:\\Users\\[username]\\.sasview\\plugin_models* - (on Windows)225 226 in .py format. The next time SasView is started it will compile the plugin and add227 it to the list of *Customized Models*.228 229 It is recommended that existing plugin models be used as templates.230 215 231 216 .. ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ -
test/sasdataloader/test/utest_ascii.py
rb699768 r7d94915 94 94 f = self.loader.load("ascii_test_6.txt") 95 95 # The length of the data is 5 96 self.assertEqual(len(f.x), 4) 97 self.assertEqual(f.x[0],0.013534) 98 self.assertEqual(f.x[3],0.022254) 96 self.assertEqual(f, None) 99 97 100 98 if __name__ == '__main__':
Note: See TracChangeset
for help on using the changeset viewer.