Changeset 5a8cdbb in sasview for src/sas/sascalc/dataloader
- Timestamp:
- Aug 1, 2017 12:02:35 PM (7 years ago)
- Branches:
- master, ESS_GUI, ESS_GUI_Docs, ESS_GUI_batch_fitting, ESS_GUI_bumps_abstraction, ESS_GUI_iss1116, ESS_GUI_iss879, ESS_GUI_iss959, ESS_GUI_opencl, ESS_GUI_ordering, ESS_GUI_sync_sascalc, costrafo411, magnetic_scatt, release-4.2.2, ticket-1009, ticket-1094-headless, ticket-1242-2d-resolution, ticket-1243, ticket-1249, ticket885, unittest-saveload
- Children:
- 511ccb2d
- Parents:
- 248ff73 (diff), bc04647 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the (diff) links above to see all the changes relative to each parent. - Location:
- src/sas/sascalc/dataloader
- Files:
-
- 2 added
- 1 deleted
- 15 edited
Legend:
- Unmodified
- Added
- Removed
-
src/sas/sascalc/dataloader/data_info.py
rbeba407 r5a8cdbb 16 16 ###################################################################### 17 17 18 from __future__ import print_function 18 19 19 20 #TODO: Keep track of data manipulation in the 'process' data structure. -
src/sas/sascalc/dataloader/manipulations.py
r7432acb r324e0bf 1 from __future__ import division 1 2 """ 2 3 Data manipulations for 2D data sets. 3 4 Using the meta data information, various types of averaging 4 5 are performed in Q-space 6 7 To test this module use: 8 ``` 9 cd test 10 PYTHONPATH=../src/ python2 -m sasdataloader.test.utest_averaging DataInfoTests.test_sectorphi_quarter 11 ``` 5 12 """ 6 13 ##################################################################### 7 # This software was developed by the University of Tennessee as part of the8 # Distributed Data Analysis of Neutron Scattering Experiments (DANSE)9 # project funded by the US National Science Foundation.10 # See the license text in license.txt11 # copyright 2008, University of Tennessee14 # This software was developed by the University of Tennessee as part of the 15 # Distributed Data Analysis of Neutron Scattering Experiments (DANSE) 16 # project funded by the US National Science Foundation. 17 # See the license text in license.txt 18 # copyright 2008, University of Tennessee 12 19 ###################################################################### 13 20 14 #TODO: copy the meta data from the 2D object to the resulting 1D object 21 22 # TODO: copy the meta data from the 2D object to the resulting 1D object 15 23 import math 16 import numpy 24 import numpy as np 25 import sys 17 26 18 27 #from data_info import plottable_2D … … 70 79 return phi_out 71 80 72 73 def reader2D_converter(data2d=None):74 """75 convert old 2d format opened by IhorReader or danse_reader76 to new Data2D format77 78 :param data2d: 2d array of Data2D object79 :return: 1d arrays of Data2D object80 81 """82 if data2d.data is None or data2d.x_bins is None or data2d.y_bins is None:83 raise ValueError, "Can't convert this data: data=None..."84 new_x = numpy.tile(data2d.x_bins, (len(data2d.y_bins), 1))85 new_y = numpy.tile(data2d.y_bins, (len(data2d.x_bins), 1))86 new_y = new_y.swapaxes(0, 1)87 88 new_data = data2d.data.flatten()89 qx_data = new_x.flatten()90 qy_data = new_y.flatten()91 q_data = numpy.sqrt(qx_data * qx_data + qy_data * qy_data)92 if data2d.err_data is None or numpy.any(data2d.err_data <= 0):93 new_err_data = numpy.sqrt(numpy.abs(new_data))94 else:95 new_err_data = data2d.err_data.flatten()96 mask = numpy.ones(len(new_data), dtype=bool)97 98 #TODO: make sense of the following two lines...99 #from sas.sascalc.dataloader.data_info import Data2D100 #output = Data2D()101 output = data2d102 output.data = new_data103 output.err_data = new_err_data104 output.qx_data = qx_data105 output.qy_data = qy_data106 output.q_data = q_data107 output.mask = mask108 109 return output110 111 112 class _Slab(object):113 """114 Compute average I(Q) for a region of interest115 """116 def __init__(self, x_min=0.0, x_max=0.0, y_min=0.0,117 y_max=0.0, bin_width=0.001):118 # Minimum Qx value [A-1]119 self.x_min = x_min120 # Maximum Qx value [A-1]121 self.x_max = x_max122 # Minimum Qy value [A-1]123 self.y_min = y_min124 # Maximum Qy value [A-1]125 self.y_max = y_max126 # Bin width (step size) [A-1]127 self.bin_width = bin_width128 # If True, I(|Q|) will be return, otherwise,129 # negative q-values are allowed130 self.fold = False131 132 def __call__(self, data2D):133 return NotImplemented134 135 def _avg(self, data2D, maj):136 """137 Compute average I(Q_maj) for a region of interest.138 The major axis is defined as the axis of Q_maj.139 The minor axis is the axis that we average over.140 141 :param data2D: Data2D object142 :param maj_min: min value on the major axis143 :return: Data1D object144 """145 if len(data2D.detector) > 1:146 msg = "_Slab._avg: invalid number of "147 msg += " detectors: %g" % len(data2D.detector)148 raise RuntimeError, msg149 150 # Get data151 data = data2D.data[numpy.isfinite(data2D.data)]152 err_data = data2D.err_data[numpy.isfinite(data2D.data)]153 qx_data = data2D.qx_data[numpy.isfinite(data2D.data)]154 qy_data = data2D.qy_data[numpy.isfinite(data2D.data)]155 156 # Build array of Q intervals157 if maj == 'x':158 if self.fold:159 x_min = 0160 else:161 x_min = self.x_min162 nbins = int(math.ceil((self.x_max - x_min) / self.bin_width))163 elif maj == 'y':164 if self.fold:165 y_min = 0166 else:167 y_min = self.y_min168 nbins = int(math.ceil((self.y_max - y_min) / self.bin_width))169 else:170 raise RuntimeError, "_Slab._avg: unrecognized axis %s" % str(maj)171 172 x = numpy.zeros(nbins)173 y = numpy.zeros(nbins)174 err_y = numpy.zeros(nbins)175 y_counts = numpy.zeros(nbins)176 177 # Average pixelsize in q space178 for npts in range(len(data)):179 # default frac180 frac_x = 0181 frac_y = 0182 # get ROI183 if self.x_min <= qx_data[npts] and self.x_max > qx_data[npts]:184 frac_x = 1185 if self.y_min <= qy_data[npts] and self.y_max > qy_data[npts]:186 frac_y = 1187 frac = frac_x * frac_y188 189 if frac == 0:190 continue191 # binning: find axis of q192 if maj == 'x':193 q_value = qx_data[npts]194 min_value = x_min195 if maj == 'y':196 q_value = qy_data[npts]197 min_value = y_min198 if self.fold and q_value < 0:199 q_value = -q_value200 # bin201 i_q = int(math.ceil((q_value - min_value) / self.bin_width)) - 1202 203 # skip outside of max bins204 if i_q < 0 or i_q >= nbins:205 continue206 207 #TODO: find better definition of x[i_q] based on q_data208 # min_value + (i_q + 1) * self.bin_width / 2.0209 x[i_q] += frac * q_value210 y[i_q] += frac * data[npts]211 212 if err_data is None or err_data[npts] == 0.0:213 if data[npts] < 0:214 data[npts] = -data[npts]215 err_y[i_q] += frac * frac * data[npts]216 else:217 err_y[i_q] += frac * frac * err_data[npts] * err_data[npts]218 y_counts[i_q] += frac219 220 # Average the sums221 for n in range(nbins):222 err_y[n] = math.sqrt(err_y[n])223 224 err_y = err_y / y_counts225 y = y / y_counts226 x = x / y_counts227 idx = (numpy.isfinite(y) & numpy.isfinite(x))228 229 if not idx.any():230 msg = "Average Error: No points inside ROI to average..."231 raise ValueError, msg232 return Data1D(x=x[idx], y=y[idx], dy=err_y[idx])233 234 235 class SlabY(_Slab):236 """237 Compute average I(Qy) for a region of interest238 """239 def __call__(self, data2D):240 """241 Compute average I(Qy) for a region of interest242 243 :param data2D: Data2D object244 :return: Data1D object245 """246 return self._avg(data2D, 'y')247 248 249 class SlabX(_Slab):250 """251 Compute average I(Qx) for a region of interest252 """253 def __call__(self, data2D):254 """255 Compute average I(Qx) for a region of interest256 :param data2D: Data2D object257 :return: Data1D object258 """259 return self._avg(data2D, 'x')260 261 262 class Boxsum(object):263 """264 Perform the sum of counts in a 2D region of interest.265 """266 def __init__(self, x_min=0.0, x_max=0.0, y_min=0.0, y_max=0.0):267 # Minimum Qx value [A-1]268 self.x_min = x_min269 # Maximum Qx value [A-1]270 self.x_max = x_max271 # Minimum Qy value [A-1]272 self.y_min = y_min273 # Maximum Qy value [A-1]274 self.y_max = y_max275 276 def __call__(self, data2D):277 """278 Perform the sum in the region of interest279 280 :param data2D: Data2D object281 :return: number of counts, error on number of counts,282 number of points summed283 """284 y, err_y, y_counts = self._sum(data2D)285 286 # Average the sums287 counts = 0 if y_counts == 0 else y288 error = 0 if y_counts == 0 else math.sqrt(err_y)289 290 # Added y_counts to return, SMK & PDB, 04/03/2013291 return counts, error, y_counts292 293 def _sum(self, data2D):294 """295 Perform the sum in the region of interest296 297 :param data2D: Data2D object298 :return: number of counts,299 error on number of counts, number of entries summed300 """301 if len(data2D.detector) > 1:302 msg = "Circular averaging: invalid number "303 msg += "of detectors: %g" % len(data2D.detector)304 raise RuntimeError, msg305 # Get data306 data = data2D.data[numpy.isfinite(data2D.data)]307 err_data = data2D.err_data[numpy.isfinite(data2D.data)]308 qx_data = data2D.qx_data[numpy.isfinite(data2D.data)]309 qy_data = data2D.qy_data[numpy.isfinite(data2D.data)]310 311 y = 0.0312 err_y = 0.0313 y_counts = 0.0314 315 # Average pixelsize in q space316 for npts in range(len(data)):317 # default frac318 frac_x = 0319 frac_y = 0320 321 # get min and max at each points322 qx = qx_data[npts]323 qy = qy_data[npts]324 325 # get the ROI326 if self.x_min <= qx and self.x_max > qx:327 frac_x = 1328 if self.y_min <= qy and self.y_max > qy:329 frac_y = 1330 #Find the fraction along each directions331 frac = frac_x * frac_y332 if frac == 0:333 continue334 y += frac * data[npts]335 if err_data is None or err_data[npts] == 0.0:336 if data[npts] < 0:337 data[npts] = -data[npts]338 err_y += frac * frac * data[npts]339 else:340 err_y += frac * frac * err_data[npts] * err_data[npts]341 y_counts += frac342 return y, err_y, y_counts343 344 345 class Boxavg(Boxsum):346 """347 Perform the average of counts in a 2D region of interest.348 """349 def __init__(self, x_min=0.0, x_max=0.0, y_min=0.0, y_max=0.0):350 super(Boxavg, self).__init__(x_min=x_min, x_max=x_max,351 y_min=y_min, y_max=y_max)352 353 def __call__(self, data2D):354 """355 Perform the sum in the region of interest356 357 :param data2D: Data2D object358 :return: average counts, error on average counts359 360 """361 y, err_y, y_counts = self._sum(data2D)362 363 # Average the sums364 counts = 0 if y_counts == 0 else y / y_counts365 error = 0 if y_counts == 0 else math.sqrt(err_y) / y_counts366 367 return counts, error368 369 370 81 def get_pixel_fraction_square(x, xmin, xmax): 371 82 """ … … 390 101 return 1.0 391 102 392 393 class CircularAverage(object): 394 """ 395 Perform circular averaging on 2D data 396 397 The data returned is the distribution of counts 398 as a function of Q 399 """ 400 def __init__(self, r_min=0.0, r_max=0.0, bin_width=0.0005): 401 # Minimum radius included in the average [A-1] 402 self.r_min = r_min 403 # Maximum radius included in the average [A-1] 404 self.r_max = r_max 405 # Bin width (step size) [A-1] 406 self.bin_width = bin_width 407 408 def __call__(self, data2D, ismask=False): 409 """ 410 Perform circular averaging on the data 411 412 :param data2D: Data2D object 413 :return: Data1D object 414 """ 415 # Get data W/ finite values 416 data = data2D.data[numpy.isfinite(data2D.data)] 417 q_data = data2D.q_data[numpy.isfinite(data2D.data)] 418 err_data = data2D.err_data[numpy.isfinite(data2D.data)] 419 mask_data = data2D.mask[numpy.isfinite(data2D.data)] 420 421 dq_data = None 422 423 # Get the dq for resolution averaging 424 if data2D.dqx_data is not None and data2D.dqy_data is not None: 425 # The pinholes and det. pix contribution present 426 # in both direction of the 2D which must be subtracted when 427 # converting to 1D: dq_overlap should calculated ideally at 428 # q = 0. Note This method works on only pinhole geometry. 429 # Extrapolate dqx(r) and dqy(phi) at q = 0, and take an average. 430 z_max = max(data2D.q_data) 431 z_min = min(data2D.q_data) 432 x_max = data2D.dqx_data[data2D.q_data[z_max]] 433 x_min = data2D.dqx_data[data2D.q_data[z_min]] 434 y_max = data2D.dqy_data[data2D.q_data[z_max]] 435 y_min = data2D.dqy_data[data2D.q_data[z_min]] 436 # Find qdx at q = 0 437 dq_overlap_x = (x_min * z_max - x_max * z_min) / (z_max - z_min) 438 # when extrapolation goes wrong 439 if dq_overlap_x > min(data2D.dqx_data): 440 dq_overlap_x = min(data2D.dqx_data) 441 dq_overlap_x *= dq_overlap_x 442 # Find qdx at q = 0 443 dq_overlap_y = (y_min * z_max - y_max * z_min) / (z_max - z_min) 444 # when extrapolation goes wrong 445 if dq_overlap_y > min(data2D.dqy_data): 446 dq_overlap_y = min(data2D.dqy_data) 447 # get dq at q=0. 448 dq_overlap_y *= dq_overlap_y 449 450 dq_overlap = numpy.sqrt((dq_overlap_x + dq_overlap_y) / 2.0) 451 # Final protection of dq 452 if dq_overlap < 0: 453 dq_overlap = y_min 454 dqx_data = data2D.dqx_data[numpy.isfinite(data2D.data)] 455 dqy_data = data2D.dqy_data[numpy.isfinite(data2D.data)] - dq_overlap 456 # def; dqx_data = dq_r dqy_data = dq_phi 457 # Convert dq 2D to 1D here 458 dqx = dqx_data * dqx_data 459 dqy = dqy_data * dqy_data 460 dq_data = numpy.add(dqx, dqy) 461 dq_data = numpy.sqrt(dq_data) 462 463 #q_data_max = numpy.max(q_data) 464 if len(data2D.q_data) is None: 465 msg = "Circular averaging: invalid q_data: %g" % data2D.q_data 466 raise RuntimeError, msg 467 468 # Build array of Q intervals 469 nbins = int(math.ceil((self.r_max - self.r_min) / self.bin_width)) 470 471 x = numpy.zeros(nbins) 472 y = numpy.zeros(nbins) 473 err_y = numpy.zeros(nbins) 474 err_x = numpy.zeros(nbins) 475 y_counts = numpy.zeros(nbins) 476 477 for npt in range(len(data)): 478 479 if ismask and not mask_data[npt]: 480 continue 481 482 frac = 0 483 484 # q-value at the pixel (j,i) 485 q_value = q_data[npt] 486 data_n = data[npt] 487 488 ## No need to calculate the frac when all data are within range 489 if self.r_min >= self.r_max: 490 raise ValueError, "Limit Error: min > max" 491 492 if self.r_min <= q_value and q_value <= self.r_max: 493 frac = 1 494 if frac == 0: 495 continue 496 i_q = int(math.floor((q_value - self.r_min) / self.bin_width)) 497 498 # Take care of the edge case at phi = 2pi. 499 if i_q == nbins: 500 i_q = nbins - 1 501 y[i_q] += frac * data_n 502 # Take dqs from data to get the q_average 503 x[i_q] += frac * q_value 504 if err_data is None or err_data[npt] == 0.0: 505 if data_n < 0: 506 data_n = -data_n 507 err_y[i_q] += frac * frac * data_n 508 else: 509 err_y[i_q] += frac * frac * err_data[npt] * err_data[npt] 510 if dq_data is not None: 511 # To be consistent with dq calculation in 1d reduction, 512 # we need just the averages (not quadratures) because 513 # it should not depend on the number of the q points 514 # in the qr bins. 515 err_x[i_q] += frac * dq_data[npt] 516 else: 517 err_x = None 518 y_counts[i_q] += frac 519 520 # Average the sums 521 for n in range(nbins): 522 if err_y[n] < 0: 523 err_y[n] = -err_y[n] 524 err_y[n] = math.sqrt(err_y[n]) 525 #if err_x is not None: 526 # err_x[n] = math.sqrt(err_x[n]) 527 528 err_y = err_y / y_counts 529 err_y[err_y == 0] = numpy.average(err_y) 530 y = y / y_counts 531 x = x / y_counts 532 idx = (numpy.isfinite(y)) & (numpy.isfinite(x)) 533 534 if err_x is not None: 535 d_x = err_x[idx] / y_counts[idx] 536 else: 537 d_x = None 538 539 if not idx.any(): 540 msg = "Average Error: No points inside ROI to average..." 541 raise ValueError, msg 542 543 return Data1D(x=x[idx], y=y[idx], dy=err_y[idx], dx=d_x) 544 545 546 class Ring(object): 547 """ 548 Defines a ring on a 2D data set. 549 The ring is defined by r_min, r_max, and 550 the position of the center of the ring. 551 552 The data returned is the distribution of counts 553 around the ring as a function of phi. 554 555 Phi_min and phi_max should be defined between 0 and 2*pi 556 in anti-clockwise starting from the x- axis on the left-hand side 557 """ 558 #Todo: remove center. 559 def __init__(self, r_min=0, r_max=0, center_x=0, center_y=0, nbins=36): 560 # Minimum radius 561 self.r_min = r_min 562 # Maximum radius 563 self.r_max = r_max 564 # Center of the ring in x 565 self.center_x = center_x 566 # Center of the ring in y 567 self.center_y = center_y 568 # Number of angular bins 569 self.nbins_phi = nbins 570 571 572 def __call__(self, data2D): 573 """ 574 Apply the ring to the data set. 575 Returns the angular distribution for a given q range 576 577 :param data2D: Data2D object 578 579 :return: Data1D object 580 """ 581 if data2D.__class__.__name__ not in ["Data2D", "plottable_2D"]: 582 raise RuntimeError, "Ring averaging only take plottable_2D objects" 583 584 Pi = math.pi 585 586 # Get data 587 data = data2D.data[numpy.isfinite(data2D.data)] 588 q_data = data2D.q_data[numpy.isfinite(data2D.data)] 589 err_data = data2D.err_data[numpy.isfinite(data2D.data)] 590 qx_data = data2D.qx_data[numpy.isfinite(data2D.data)] 591 qy_data = data2D.qy_data[numpy.isfinite(data2D.data)] 592 593 # Set space for 1d outputs 594 phi_bins = numpy.zeros(self.nbins_phi) 595 phi_counts = numpy.zeros(self.nbins_phi) 596 phi_values = numpy.zeros(self.nbins_phi) 597 phi_err = numpy.zeros(self.nbins_phi) 598 599 # Shift to apply to calculated phi values in order 600 # to center first bin at zero 601 phi_shift = Pi / self.nbins_phi 602 603 for npt in range(len(data)): 604 frac = 0 605 # q-value at the point (npt) 606 q_value = q_data[npt] 607 data_n = data[npt] 608 609 # phi-value at the point (npt) 610 phi_value = math.atan2(qy_data[npt], qx_data[npt]) + Pi 611 612 if self.r_min <= q_value and q_value <= self.r_max: 613 frac = 1 614 if frac == 0: 615 continue 616 # binning 617 i_phi = int(math.floor((self.nbins_phi) * \ 618 (phi_value + phi_shift) / (2 * Pi))) 619 620 # Take care of the edge case at phi = 2pi. 621 if i_phi >= self.nbins_phi: 622 i_phi = 0 623 phi_bins[i_phi] += frac * data[npt] 624 625 if err_data is None or err_data[npt] == 0.0: 626 if data_n < 0: 627 data_n = -data_n 628 phi_err[i_phi] += frac * frac * math.fabs(data_n) 629 else: 630 phi_err[i_phi] += frac * frac * err_data[npt] * err_data[npt] 631 phi_counts[i_phi] += frac 632 633 for i in range(self.nbins_phi): 634 phi_bins[i] = phi_bins[i] / phi_counts[i] 635 phi_err[i] = math.sqrt(phi_err[i]) / phi_counts[i] 636 phi_values[i] = 2.0 * math.pi / self.nbins_phi * (1.0 * i) 637 638 idx = (numpy.isfinite(phi_bins)) 639 640 if not idx.any(): 641 msg = "Average Error: No points inside ROI to average..." 642 raise ValueError, msg 643 #elif len(phi_bins[idx])!= self.nbins_phi: 644 # print "resulted",self.nbins_phi- len(phi_bins[idx]) 645 #,"empty bin(s) due to tight binning..." 646 return Data1D(x=phi_values[idx], y=phi_bins[idx], dy=phi_err[idx]) 647 103 def get_intercept(q, q_0, q_1): 104 """ 105 Returns the fraction of the side at which the 106 q-value intercept the pixel, None otherwise. 107 The values returned is the fraction ON THE SIDE 108 OF THE LOWEST Q. :: 109 110 A B 111 +-----------+--------+ <--- pixel size 112 0 1 113 Q_0 -------- Q ----- Q_1 <--- equivalent Q range 114 if Q_1 > Q_0, A is returned 115 if Q_1 < Q_0, B is returned 116 if Q is outside the range of [Q_0, Q_1], None is returned 117 118 """ 119 if q_1 > q_0: 120 if q > q_0 and q <= q_1: 121 return (q - q_0) / (q_1 - q_0) 122 else: 123 if q > q_1 and q <= q_0: 124 return (q - q_1) / (q_0 - q_1) 125 return None 648 126 649 127 def get_pixel_fraction(qmax, q_00, q_01, q_10, q_11): … … 710 188 return frac_max 711 189 712 713 def get_intercept(q, q_0, q_1): 714 """ 715 Returns the fraction of the side at which the 716 q-value intercept the pixel, None otherwise. 717 The values returned is the fraction ON THE SIDE 718 OF THE LOWEST Q. :: 719 720 A B 721 +-----------+--------+ <--- pixel size 722 0 1 723 Q_0 -------- Q ----- Q_1 <--- equivalent Q range 724 if Q_1 > Q_0, A is returned 725 if Q_1 < Q_0, B is returned 726 if Q is outside the range of [Q_0, Q_1], None is returned 727 728 """ 729 if q_1 > q_0: 730 if q > q_0 and q <= q_1: 731 return (q - q_0) / (q_1 - q_0) 190 def get_dq_data(data2D): 191 ''' 192 Get the dq for resolution averaging 193 The pinholes and det. pix contribution present 194 in both direction of the 2D which must be subtracted when 195 converting to 1D: dq_overlap should calculated ideally at 196 q = 0. Note This method works on only pinhole geometry. 197 Extrapolate dqx(r) and dqy(phi) at q = 0, and take an average. 198 ''' 199 z_max = max(data2D.q_data) 200 z_min = min(data2D.q_data) 201 dqx_at_z_max = data2D.dqx_data[np.argmax(data2D.q_data)] 202 dqx_at_z_min = data2D.dqx_data[np.argmin(data2D.q_data)] 203 dqy_at_z_max = data2D.dqy_data[np.argmax(data2D.q_data)] 204 dqy_at_z_min = data2D.dqy_data[np.argmin(data2D.q_data)] 205 # Find qdx at q = 0 206 dq_overlap_x = (dqx_at_z_min * z_max - dqx_at_z_max * z_min) / (z_max - z_min) 207 # when extrapolation goes wrong 208 if dq_overlap_x > min(data2D.dqx_data): 209 dq_overlap_x = min(data2D.dqx_data) 210 dq_overlap_x *= dq_overlap_x 211 # Find qdx at q = 0 212 dq_overlap_y = (dqy_at_z_min * z_max - dqy_at_z_max * z_min) / (z_max - z_min) 213 # when extrapolation goes wrong 214 if dq_overlap_y > min(data2D.dqy_data): 215 dq_overlap_y = min(data2D.dqy_data) 216 # get dq at q=0. 217 dq_overlap_y *= dq_overlap_y 218 219 dq_overlap = np.sqrt((dq_overlap_x + dq_overlap_y) / 2.0) 220 # Final protection of dq 221 if dq_overlap < 0: 222 dq_overlap = dqy_at_z_min 223 dqx_data = data2D.dqx_data[np.isfinite(data2D.data)] 224 dqy_data = data2D.dqy_data[np.isfinite( 225 data2D.data)] - dq_overlap 226 # def; dqx_data = dq_r dqy_data = dq_phi 227 # Convert dq 2D to 1D here 228 dq_data = np.sqrt(dqx_data**2 + dqx_data**2) 229 return dq_data 230 231 ################################################################################ 232 233 def reader2D_converter(data2d=None): 234 """ 235 convert old 2d format opened by IhorReader or danse_reader 236 to new Data2D format 237 This is mainly used by the Readers 238 239 :param data2d: 2d array of Data2D object 240 :return: 1d arrays of Data2D object 241 242 """ 243 if data2d.data is None or data2d.x_bins is None or data2d.y_bins is None: 244 raise ValueError("Can't convert this data: data=None...") 245 new_x = np.tile(data2d.x_bins, (len(data2d.y_bins), 1)) 246 new_y = np.tile(data2d.y_bins, (len(data2d.x_bins), 1)) 247 new_y = new_y.swapaxes(0, 1) 248 249 new_data = data2d.data.flatten() 250 qx_data = new_x.flatten() 251 qy_data = new_y.flatten() 252 q_data = np.sqrt(qx_data * qx_data + qy_data * qy_data) 253 if data2d.err_data is None or np.any(data2d.err_data <= 0): 254 new_err_data = np.sqrt(np.abs(new_data)) 732 255 else: 733 if q > q_1 and q <= q_0: 734 return (q - q_1) / (q_0 - q_1) 735 return None 256 new_err_data = data2d.err_data.flatten() 257 mask = np.ones(len(new_data), dtype=bool) 258 259 # TODO: make sense of the following two lines... 260 #from sas.sascalc.dataloader.data_info import Data2D 261 #output = Data2D() 262 output = data2d 263 output.data = new_data 264 output.err_data = new_err_data 265 output.qx_data = qx_data 266 output.qy_data = qy_data 267 output.q_data = q_data 268 output.mask = mask 269 270 return output 271 272 ################################################################################ 273 274 class Binning(object): 275 ''' 276 This class just creates a binning object 277 either linear or log 278 ''' 279 280 def __init__(self, min_value, max_value, n_bins, base=None): 281 ''' 282 if base is None: Linear binning 283 ''' 284 self.min = min_value if min_value > 0 else 0.0001 285 self.max = max_value 286 self.n_bins = n_bins 287 self.base = base 288 289 def get_bin_index(self, value): 290 ''' 291 The general formula logarithm binning is: 292 bin = floor(N * (log(x) - log(min)) / (log(max) - log(min))) 293 ''' 294 if self.base: 295 temp_x = self.n_bins * (math.log(value, self.base) - math.log(self.min, self.base)) 296 temp_y = math.log(self.max, self.base) - math.log(self.min, self.base) 297 else: 298 temp_x = self.n_bins * (value - self.min) 299 temp_y = self.max - self.min 300 # Bin index calulation 301 return int(math.floor(temp_x / temp_y)) 302 303 304 ################################################################################ 305 306 class _Slab(object): 307 """ 308 Compute average I(Q) for a region of interest 309 """ 310 311 def __init__(self, x_min=0.0, x_max=0.0, y_min=0.0, 312 y_max=0.0, bin_width=0.001): 313 # Minimum Qx value [A-1] 314 self.x_min = x_min 315 # Maximum Qx value [A-1] 316 self.x_max = x_max 317 # Minimum Qy value [A-1] 318 self.y_min = y_min 319 # Maximum Qy value [A-1] 320 self.y_max = y_max 321 # Bin width (step size) [A-1] 322 self.bin_width = bin_width 323 # If True, I(|Q|) will be return, otherwise, 324 # negative q-values are allowed 325 self.fold = False 326 327 def __call__(self, data2D): 328 return NotImplemented 329 330 def _avg(self, data2D, maj): 331 """ 332 Compute average I(Q_maj) for a region of interest. 333 The major axis is defined as the axis of Q_maj. 334 The minor axis is the axis that we average over. 335 336 :param data2D: Data2D object 337 :param maj_min: min value on the major axis 338 :return: Data1D object 339 """ 340 if len(data2D.detector) > 1: 341 msg = "_Slab._avg: invalid number of " 342 msg += " detectors: %g" % len(data2D.detector) 343 raise RuntimeError(msg) 344 345 # Get data 346 data = data2D.data[np.isfinite(data2D.data)] 347 err_data = data2D.err_data[np.isfinite(data2D.data)] 348 qx_data = data2D.qx_data[np.isfinite(data2D.data)] 349 qy_data = data2D.qy_data[np.isfinite(data2D.data)] 350 351 # Build array of Q intervals 352 if maj == 'x': 353 if self.fold: 354 x_min = 0 355 else: 356 x_min = self.x_min 357 nbins = int(math.ceil((self.x_max - x_min) / self.bin_width)) 358 elif maj == 'y': 359 if self.fold: 360 y_min = 0 361 else: 362 y_min = self.y_min 363 nbins = int(math.ceil((self.y_max - y_min) / self.bin_width)) 364 else: 365 raise RuntimeError("_Slab._avg: unrecognized axis %s" % str(maj)) 366 367 x = np.zeros(nbins) 368 y = np.zeros(nbins) 369 err_y = np.zeros(nbins) 370 y_counts = np.zeros(nbins) 371 372 # Average pixelsize in q space 373 for npts in range(len(data)): 374 # default frac 375 frac_x = 0 376 frac_y = 0 377 # get ROI 378 if self.x_min <= qx_data[npts] and self.x_max > qx_data[npts]: 379 frac_x = 1 380 if self.y_min <= qy_data[npts] and self.y_max > qy_data[npts]: 381 frac_y = 1 382 frac = frac_x * frac_y 383 384 if frac == 0: 385 continue 386 # binning: find axis of q 387 if maj == 'x': 388 q_value = qx_data[npts] 389 min_value = x_min 390 if maj == 'y': 391 q_value = qy_data[npts] 392 min_value = y_min 393 if self.fold and q_value < 0: 394 q_value = -q_value 395 # bin 396 i_q = int(math.ceil((q_value - min_value) / self.bin_width)) - 1 397 398 # skip outside of max bins 399 if i_q < 0 or i_q >= nbins: 400 continue 401 402 # TODO: find better definition of x[i_q] based on q_data 403 # min_value + (i_q + 1) * self.bin_width / 2.0 404 x[i_q] += frac * q_value 405 y[i_q] += frac * data[npts] 406 407 if err_data is None or err_data[npts] == 0.0: 408 if data[npts] < 0: 409 data[npts] = -data[npts] 410 err_y[i_q] += frac * frac * data[npts] 411 else: 412 err_y[i_q] += frac * frac * err_data[npts] * err_data[npts] 413 y_counts[i_q] += frac 414 415 # Average the sums 416 for n in range(nbins): 417 err_y[n] = math.sqrt(err_y[n]) 418 419 err_y = err_y / y_counts 420 y = y / y_counts 421 x = x / y_counts 422 idx = (np.isfinite(y) & np.isfinite(x)) 423 424 if not idx.any(): 425 msg = "Average Error: No points inside ROI to average..." 426 raise ValueError(msg) 427 return Data1D(x=x[idx], y=y[idx], dy=err_y[idx]) 428 429 430 class SlabY(_Slab): 431 """ 432 Compute average I(Qy) for a region of interest 433 """ 434 435 def __call__(self, data2D): 436 """ 437 Compute average I(Qy) for a region of interest 438 439 :param data2D: Data2D object 440 :return: Data1D object 441 """ 442 return self._avg(data2D, 'y') 443 444 445 class SlabX(_Slab): 446 """ 447 Compute average I(Qx) for a region of interest 448 """ 449 450 def __call__(self, data2D): 451 """ 452 Compute average I(Qx) for a region of interest 453 :param data2D: Data2D object 454 :return: Data1D object 455 """ 456 return self._avg(data2D, 'x') 457 458 ################################################################################ 459 460 class Boxsum(object): 461 """ 462 Perform the sum of counts in a 2D region of interest. 463 """ 464 465 def __init__(self, x_min=0.0, x_max=0.0, y_min=0.0, y_max=0.0): 466 # Minimum Qx value [A-1] 467 self.x_min = x_min 468 # Maximum Qx value [A-1] 469 self.x_max = x_max 470 # Minimum Qy value [A-1] 471 self.y_min = y_min 472 # Maximum Qy value [A-1] 473 self.y_max = y_max 474 475 def __call__(self, data2D): 476 """ 477 Perform the sum in the region of interest 478 479 :param data2D: Data2D object 480 :return: number of counts, error on number of counts, 481 number of points summed 482 """ 483 y, err_y, y_counts = self._sum(data2D) 484 485 # Average the sums 486 counts = 0 if y_counts == 0 else y 487 error = 0 if y_counts == 0 else math.sqrt(err_y) 488 489 # Added y_counts to return, SMK & PDB, 04/03/2013 490 return counts, error, y_counts 491 492 def _sum(self, data2D): 493 """ 494 Perform the sum in the region of interest 495 496 :param data2D: Data2D object 497 :return: number of counts, 498 error on number of counts, number of entries summed 499 """ 500 if len(data2D.detector) > 1: 501 msg = "Circular averaging: invalid number " 502 msg += "of detectors: %g" % len(data2D.detector) 503 raise RuntimeError(msg) 504 # Get data 505 data = data2D.data[np.isfinite(data2D.data)] 506 err_data = data2D.err_data[np.isfinite(data2D.data)] 507 qx_data = data2D.qx_data[np.isfinite(data2D.data)] 508 qy_data = data2D.qy_data[np.isfinite(data2D.data)] 509 510 y = 0.0 511 err_y = 0.0 512 y_counts = 0.0 513 514 # Average pixelsize in q space 515 for npts in range(len(data)): 516 # default frac 517 frac_x = 0 518 frac_y = 0 519 520 # get min and max at each points 521 qx = qx_data[npts] 522 qy = qy_data[npts] 523 524 # get the ROI 525 if self.x_min <= qx and self.x_max > qx: 526 frac_x = 1 527 if self.y_min <= qy and self.y_max > qy: 528 frac_y = 1 529 # Find the fraction along each directions 530 frac = frac_x * frac_y 531 if frac == 0: 532 continue 533 y += frac * data[npts] 534 if err_data is None or err_data[npts] == 0.0: 535 if data[npts] < 0: 536 data[npts] = -data[npts] 537 err_y += frac * frac * data[npts] 538 else: 539 err_y += frac * frac * err_data[npts] * err_data[npts] 540 y_counts += frac 541 return y, err_y, y_counts 542 543 544 class Boxavg(Boxsum): 545 """ 546 Perform the average of counts in a 2D region of interest. 547 """ 548 549 def __init__(self, x_min=0.0, x_max=0.0, y_min=0.0, y_max=0.0): 550 super(Boxavg, self).__init__(x_min=x_min, x_max=x_max, 551 y_min=y_min, y_max=y_max) 552 553 def __call__(self, data2D): 554 """ 555 Perform the sum in the region of interest 556 557 :param data2D: Data2D object 558 :return: average counts, error on average counts 559 560 """ 561 y, err_y, y_counts = self._sum(data2D) 562 563 # Average the sums 564 counts = 0 if y_counts == 0 else y / y_counts 565 error = 0 if y_counts == 0 else math.sqrt(err_y) / y_counts 566 567 return counts, error 568 569 ################################################################################ 570 571 class CircularAverage(object): 572 """ 573 Perform circular averaging on 2D data 574 575 The data returned is the distribution of counts 576 as a function of Q 577 """ 578 579 def __init__(self, r_min=0.0, r_max=0.0, bin_width=0.0005): 580 # Minimum radius included in the average [A-1] 581 self.r_min = r_min 582 # Maximum radius included in the average [A-1] 583 self.r_max = r_max 584 # Bin width (step size) [A-1] 585 self.bin_width = bin_width 586 587 def __call__(self, data2D, ismask=False): 588 """ 589 Perform circular averaging on the data 590 591 :param data2D: Data2D object 592 :return: Data1D object 593 """ 594 # Get data W/ finite values 595 data = data2D.data[np.isfinite(data2D.data)] 596 q_data = data2D.q_data[np.isfinite(data2D.data)] 597 err_data = data2D.err_data[np.isfinite(data2D.data)] 598 mask_data = data2D.mask[np.isfinite(data2D.data)] 599 600 dq_data = None 601 if data2D.dqx_data is not None and data2D.dqy_data is not None: 602 dq_data = get_dq_data(data2D) 603 604 if len(q_data) == 0: 605 msg = "Circular averaging: invalid q_data: %g" % data2D.q_data 606 raise RuntimeError(msg) 607 608 # Build array of Q intervals 609 nbins = int(math.ceil((self.r_max - self.r_min) / self.bin_width)) 610 611 x = np.zeros(nbins) 612 y = np.zeros(nbins) 613 err_y = np.zeros(nbins) 614 err_x = np.zeros(nbins) 615 y_counts = np.zeros(nbins) 616 617 for npt in range(len(data)): 618 619 if ismask and not mask_data[npt]: 620 continue 621 622 frac = 0 623 624 # q-value at the pixel (j,i) 625 q_value = q_data[npt] 626 data_n = data[npt] 627 628 # No need to calculate the frac when all data are within range 629 if self.r_min >= self.r_max: 630 raise ValueError("Limit Error: min > max") 631 632 if self.r_min <= q_value and q_value <= self.r_max: 633 frac = 1 634 if frac == 0: 635 continue 636 i_q = int(math.floor((q_value - self.r_min) / self.bin_width)) 637 638 # Take care of the edge case at phi = 2pi. 639 if i_q == nbins: 640 i_q = nbins - 1 641 y[i_q] += frac * data_n 642 # Take dqs from data to get the q_average 643 x[i_q] += frac * q_value 644 if err_data is None or err_data[npt] == 0.0: 645 if data_n < 0: 646 data_n = -data_n 647 err_y[i_q] += frac * frac * data_n 648 else: 649 err_y[i_q] += frac * frac * err_data[npt] * err_data[npt] 650 if dq_data is not None: 651 # To be consistent with dq calculation in 1d reduction, 652 # we need just the averages (not quadratures) because 653 # it should not depend on the number of the q points 654 # in the qr bins. 655 err_x[i_q] += frac * dq_data[npt] 656 else: 657 err_x = None 658 y_counts[i_q] += frac 659 660 # Average the sums 661 for n in range(nbins): 662 if err_y[n] < 0: 663 err_y[n] = -err_y[n] 664 err_y[n] = math.sqrt(err_y[n]) 665 # if err_x is not None: 666 # err_x[n] = math.sqrt(err_x[n]) 667 668 err_y = err_y / y_counts 669 err_y[err_y == 0] = np.average(err_y) 670 y = y / y_counts 671 x = x / y_counts 672 idx = (np.isfinite(y)) & (np.isfinite(x)) 673 674 if err_x is not None: 675 d_x = err_x[idx] / y_counts[idx] 676 else: 677 d_x = None 678 679 if not idx.any(): 680 msg = "Average Error: No points inside ROI to average..." 681 raise ValueError(msg) 682 683 return Data1D(x=x[idx], y=y[idx], dy=err_y[idx], dx=d_x) 684 685 ################################################################################ 686 687 class Ring(object): 688 """ 689 Defines a ring on a 2D data set. 690 The ring is defined by r_min, r_max, and 691 the position of the center of the ring. 692 693 The data returned is the distribution of counts 694 around the ring as a function of phi. 695 696 Phi_min and phi_max should be defined between 0 and 2*pi 697 in anti-clockwise starting from the x- axis on the left-hand side 698 """ 699 # Todo: remove center. 700 701 def __init__(self, r_min=0, r_max=0, center_x=0, center_y=0, nbins=36): 702 # Minimum radius 703 self.r_min = r_min 704 # Maximum radius 705 self.r_max = r_max 706 # Center of the ring in x 707 self.center_x = center_x 708 # Center of the ring in y 709 self.center_y = center_y 710 # Number of angular bins 711 self.nbins_phi = nbins 712 713 def __call__(self, data2D): 714 """ 715 Apply the ring to the data set. 716 Returns the angular distribution for a given q range 717 718 :param data2D: Data2D object 719 720 :return: Data1D object 721 """ 722 if data2D.__class__.__name__ not in ["Data2D", "plottable_2D"]: 723 raise RuntimeError("Ring averaging only take plottable_2D objects") 724 725 Pi = math.pi 726 727 # Get data 728 data = data2D.data[np.isfinite(data2D.data)] 729 q_data = data2D.q_data[np.isfinite(data2D.data)] 730 err_data = data2D.err_data[np.isfinite(data2D.data)] 731 qx_data = data2D.qx_data[np.isfinite(data2D.data)] 732 qy_data = data2D.qy_data[np.isfinite(data2D.data)] 733 734 # Set space for 1d outputs 735 phi_bins = np.zeros(self.nbins_phi) 736 phi_counts = np.zeros(self.nbins_phi) 737 phi_values = np.zeros(self.nbins_phi) 738 phi_err = np.zeros(self.nbins_phi) 739 740 # Shift to apply to calculated phi values in order 741 # to center first bin at zero 742 phi_shift = Pi / self.nbins_phi 743 744 for npt in range(len(data)): 745 frac = 0 746 # q-value at the point (npt) 747 q_value = q_data[npt] 748 data_n = data[npt] 749 750 # phi-value at the point (npt) 751 phi_value = math.atan2(qy_data[npt], qx_data[npt]) + Pi 752 753 if self.r_min <= q_value and q_value <= self.r_max: 754 frac = 1 755 if frac == 0: 756 continue 757 # binning 758 i_phi = int(math.floor((self.nbins_phi) * 759 (phi_value + phi_shift) / (2 * Pi))) 760 761 # Take care of the edge case at phi = 2pi. 762 if i_phi >= self.nbins_phi: 763 i_phi = 0 764 phi_bins[i_phi] += frac * data[npt] 765 766 if err_data is None or err_data[npt] == 0.0: 767 if data_n < 0: 768 data_n = -data_n 769 phi_err[i_phi] += frac * frac * math.fabs(data_n) 770 else: 771 phi_err[i_phi] += frac * frac * err_data[npt] * err_data[npt] 772 phi_counts[i_phi] += frac 773 774 for i in range(self.nbins_phi): 775 phi_bins[i] = phi_bins[i] / phi_counts[i] 776 phi_err[i] = math.sqrt(phi_err[i]) / phi_counts[i] 777 phi_values[i] = 2.0 * math.pi / self.nbins_phi * (1.0 * i) 778 779 idx = (np.isfinite(phi_bins)) 780 781 if not idx.any(): 782 msg = "Average Error: No points inside ROI to average..." 783 raise ValueError(msg) 784 # elif len(phi_bins[idx])!= self.nbins_phi: 785 # print "resulted",self.nbins_phi- len(phi_bins[idx]) 786 #,"empty bin(s) due to tight binning..." 787 return Data1D(x=phi_values[idx], y=phi_bins[idx], dy=phi_err[idx]) 736 788 737 789 … … 748 800 starting from the x- axis on the left-hand side 749 801 """ 750 def __init__(self, r_min, r_max, phi_min=0, phi_max=2 * math.pi, nbins=20): 802 803 def __init__(self, r_min, r_max, phi_min=0, phi_max=2 * math.pi, nbins=20, 804 base=None): 805 ''' 806 :param base: must be a valid base for an algorithm, i.e., 807 a positive number 808 ''' 751 809 self.r_min = r_min 752 810 self.r_max = r_max … … 754 812 self.phi_max = phi_max 755 813 self.nbins = nbins 814 self.base = base 756 815 757 816 def _agv(self, data2D, run='phi'): … … 765 824 """ 766 825 if data2D.__class__.__name__ not in ["Data2D", "plottable_2D"]: 767 raise RuntimeError, "Ring averaging only take plottable_2D objects" 768 Pi = math.pi 826 raise RuntimeError("Ring averaging only take plottable_2D objects") 769 827 770 828 # Get the all data & info 771 data = data2D.data[numpy.isfinite(data2D.data)] 772 q_data = data2D.q_data[numpy.isfinite(data2D.data)] 773 err_data = data2D.err_data[numpy.isfinite(data2D.data)] 774 qx_data = data2D.qx_data[numpy.isfinite(data2D.data)] 775 qy_data = data2D.qy_data[numpy.isfinite(data2D.data)] 829 data = data2D.data[np.isfinite(data2D.data)] 830 q_data = data2D.q_data[np.isfinite(data2D.data)] 831 err_data = data2D.err_data[np.isfinite(data2D.data)] 832 qx_data = data2D.qx_data[np.isfinite(data2D.data)] 833 qy_data = data2D.qy_data[np.isfinite(data2D.data)] 834 776 835 dq_data = None 777 778 # Get the dq for resolution averaging779 836 if data2D.dqx_data is not None and data2D.dqy_data is not None: 780 # The pinholes and det. pix contribution present 781 # in both direction of the 2D which must be subtracted when 782 # converting to 1D: dq_overlap should calculated ideally at 783 # q = 0. 784 # Extrapolate dqy(perp) at q = 0 785 z_max = max(data2D.q_data) 786 z_min = min(data2D.q_data) 787 x_max = data2D.dqx_data[data2D.q_data[z_max]] 788 x_min = data2D.dqx_data[data2D.q_data[z_min]] 789 y_max = data2D.dqy_data[data2D.q_data[z_max]] 790 y_min = data2D.dqy_data[data2D.q_data[z_min]] 791 # Find qdx at q = 0 792 dq_overlap_x = (x_min * z_max - x_max * z_min) / (z_max - z_min) 793 # when extrapolation goes wrong 794 if dq_overlap_x > min(data2D.dqx_data): 795 dq_overlap_x = min(data2D.dqx_data) 796 dq_overlap_x *= dq_overlap_x 797 # Find qdx at q = 0 798 dq_overlap_y = (y_min * z_max - y_max * z_min) / (z_max - z_min) 799 # when extrapolation goes wrong 800 if dq_overlap_y > min(data2D.dqy_data): 801 dq_overlap_y = min(data2D.dqy_data) 802 # get dq at q=0. 803 dq_overlap_y *= dq_overlap_y 804 805 dq_overlap = numpy.sqrt((dq_overlap_x + dq_overlap_y) / 2.0) 806 if dq_overlap < 0: 807 dq_overlap = y_min 808 dqx_data = data2D.dqx_data[numpy.isfinite(data2D.data)] 809 dqy_data = data2D.dqy_data[numpy.isfinite(data2D.data)] - dq_overlap 810 # def; dqx_data = dq_r dqy_data = dq_phi 811 # Convert dq 2D to 1D here 812 dqx = dqx_data * dqx_data 813 dqy = dqy_data * dqy_data 814 dq_data = numpy.add(dqx, dqy) 815 dq_data = numpy.sqrt(dq_data) 816 817 #set space for 1d outputs 818 x = numpy.zeros(self.nbins) 819 y = numpy.zeros(self.nbins) 820 y_err = numpy.zeros(self.nbins) 821 x_err = numpy.zeros(self.nbins) 822 y_counts = numpy.zeros(self.nbins) 837 dq_data = get_dq_data(data2D) 838 839 # set space for 1d outputs 840 x = np.zeros(self.nbins) 841 y = np.zeros(self.nbins) 842 y_err = np.zeros(self.nbins) 843 x_err = np.zeros(self.nbins) 844 y_counts = np.zeros(self.nbins) # Cycle counts (for the mean) 823 845 824 846 # Get the min and max into the region: 0 <= phi < 2Pi … … 826 848 phi_max = flip_phi(self.phi_max) 827 849 850 # binning object 851 if run.lower() == 'phi': 852 binning = Binning(self.phi_min, self.phi_max, self.nbins, self.base) 853 else: 854 binning = Binning(self.r_min, self.r_max, self.nbins, self.base) 855 828 856 for n in range(len(data)): 829 frac = 0830 857 831 858 # q-value at the pixel (j,i) … … 837 864 838 865 # phi-value of the pixel (j,i) 839 phi_value = math.atan2(qy_data[n], qx_data[n]) + Pi 840 841 ## No need to calculate the frac when all data are within range 842 if self.r_min <= q_value and q_value <= self.r_max: 843 frac = 1 844 if frac == 0: 866 phi_value = math.atan2(qy_data[n], qx_data[n]) + math.pi 867 868 # No need to calculate: data outside of the radius 869 if self.r_min > q_value or q_value > self.r_max: 845 870 continue 846 #In case of two ROIs (symmetric major and minor regions)(for 'q2') 871 872 # In case of two ROIs (symmetric major and minor regions)(for 'q2') 847 873 if run.lower() == 'q2': 848 # #For minor sector wing874 # For minor sector wing 849 875 # Calculate the minor wing phis 850 phi_min_minor = flip_phi(phi_min - Pi)851 phi_max_minor = flip_phi(phi_max - Pi)876 phi_min_minor = flip_phi(phi_min - math.pi) 877 phi_max_minor = flip_phi(phi_max - math.pi) 852 878 # Check if phis of the minor ring is within 0 to 2pi 853 879 if phi_min_minor > phi_max_minor: 854 is_in = (phi_value > phi_min_minor or \855 880 is_in = (phi_value > phi_min_minor or 881 phi_value < phi_max_minor) 856 882 else: 857 is_in = (phi_value > phi_min_minor and \858 859 860 # For all cases(i.e.,for 'q', 'q2', and 'phi')861 # Find pixels within ROI883 is_in = (phi_value > phi_min_minor and 884 phi_value < phi_max_minor) 885 886 # For all cases(i.e.,for 'q', 'q2', and 'phi') 887 # Find pixels within ROI 862 888 if phi_min > phi_max: 863 is_in = is_in or (phi_value > phi_min or \864 889 is_in = is_in or (phi_value > phi_min or 890 phi_value < phi_max) 865 891 else: 866 is_in = is_in or (phi_value >= phi_min and \ 867 phi_value < phi_max) 868 892 is_in = is_in or (phi_value >= phi_min and 893 phi_value < phi_max) 894 895 # data oustide of the phi range 869 896 if not is_in: 870 frac = 0871 if frac == 0:872 897 continue 873 # Check which type of averaging we need 898 899 # Get the binning index 874 900 if run.lower() == 'phi': 875 temp_x = (self.nbins) * (phi_value - self.phi_min) 876 temp_y = (self.phi_max - self.phi_min) 877 i_bin = int(math.floor(temp_x / temp_y)) 901 i_bin = binning.get_bin_index(phi_value) 878 902 else: 879 temp_x = (self.nbins) * (q_value - self.r_min) 880 temp_y = (self.r_max - self.r_min) 881 i_bin = int(math.floor(temp_x / temp_y)) 903 i_bin = binning.get_bin_index(q_value) 882 904 883 905 # Take care of the edge case at phi = 2pi. … … 885 907 i_bin = self.nbins - 1 886 908 887 # #Get the total y888 y[i_bin] += frac *data_n889 x[i_bin] += frac *q_value909 # Get the total y 910 y[i_bin] += data_n 911 x[i_bin] += q_value 890 912 if err_data[n] is None or err_data[n] == 0.0: 891 913 if data_n < 0: 892 914 data_n = -data_n 893 y_err[i_bin] += frac * frac *data_n915 y_err[i_bin] += data_n 894 916 else: 895 y_err[i_bin] += frac * frac * err_data[n] * err_data[n]917 y_err[i_bin] += err_data[n]**2 896 918 897 919 if dq_data is not None: … … 900 922 # it should not depend on the number of the q points 901 923 # in the qr bins. 902 x_err[i_bin] += frac *dq_data[n]924 x_err[i_bin] += dq_data[n] 903 925 else: 904 926 x_err = None 905 y_counts[i_bin] += frac927 y_counts[i_bin] += 1 906 928 907 929 # Organize the results … … 918 940 # We take the center of ring area, not radius. 919 941 # This is more accurate than taking the radial center of ring. 920 # delta_r = (self.r_max - self.r_min) / self.nbins921 # r_inner = self.r_min + delta_r * i922 # r_outer = r_inner + delta_r923 # x[i] = math.sqrt((r_inner * r_inner + r_outer * r_outer) / 2)942 # delta_r = (self.r_max - self.r_min) / self.nbins 943 # r_inner = self.r_min + delta_r * i 944 # r_outer = r_inner + delta_r 945 # x[i] = math.sqrt((r_inner * r_inner + r_outer * r_outer) / 2) 924 946 x[i] = x[i] / y_counts[i] 925 y_err[y_err == 0] = n umpy.average(y_err)926 idx = (n umpy.isfinite(y) & numpy.isfinite(y_err))947 y_err[y_err == 0] = np.average(y_err) 948 idx = (np.isfinite(y) & np.isfinite(y_err)) 927 949 if x_err is not None: 928 950 d_x = x_err[idx] / y_counts[idx] … … 931 953 if not idx.any(): 932 954 msg = "Average Error: No points inside sector of ROI to average..." 933 raise ValueError , msg934 # elif len(y[idx])!= self.nbins:955 raise ValueError(msg) 956 # elif len(y[idx])!= self.nbins: 935 957 # print "resulted",self.nbins- len(y[idx]), 936 # "empty bin(s) due to tight binning..."958 # "empty bin(s) due to tight binning..." 937 959 return Data1D(x=x[idx], y=y[idx], dy=y_err[idx], dx=d_x) 938 960 … … 946 968 The number of bin in phi also has to be defined. 947 969 """ 970 948 971 def __call__(self, data2D): 949 972 """ … … 965 988 The number of bin in Q also has to be defined. 966 989 """ 990 967 991 def __call__(self, data2D): 968 992 """ … … 975 999 return self._agv(data2D, 'q2') 976 1000 1001 ################################################################################ 977 1002 978 1003 class Ringcut(object): … … 987 1012 in anti-clockwise starting from the x- axis on the left-hand side 988 1013 """ 1014 989 1015 def __init__(self, r_min=0, r_max=0, center_x=0, center_y=0): 990 1016 # Minimum radius … … 1007 1033 """ 1008 1034 if data2D.__class__.__name__ not in ["Data2D", "plottable_2D"]: 1009 raise RuntimeError , "Ring cut only take plottable_2D objects"1035 raise RuntimeError("Ring cut only take plottable_2D objects") 1010 1036 1011 1037 # Get data 1012 1038 qx_data = data2D.qx_data 1013 1039 qy_data = data2D.qy_data 1014 q_data = n umpy.sqrt(qx_data * qx_data + qy_data * qy_data)1040 q_data = np.sqrt(qx_data * qx_data + qy_data * qy_data) 1015 1041 1016 1042 # check whether or not the data point is inside ROI … … 1018 1044 return out 1019 1045 1046 ################################################################################ 1020 1047 1021 1048 class Boxcut(object): … … 1023 1050 Find a rectangular 2D region of interest. 1024 1051 """ 1052 1025 1053 def __init__(self, x_min=0.0, x_max=0.0, y_min=0.0, y_max=0.0): 1026 1054 # Minimum Qx value [A-1] … … 1055 1083 """ 1056 1084 if data2D.__class__.__name__ not in ["Data2D", "plottable_2D"]: 1057 raise RuntimeError , "Boxcut take only plottable_2D objects"1085 raise RuntimeError("Boxcut take only plottable_2D objects") 1058 1086 # Get qx_ and qy_data 1059 1087 qx_data = data2D.qx_data … … 1066 1094 return outx & outy 1067 1095 1096 ################################################################################ 1068 1097 1069 1098 class Sectorcut(object): … … 1077 1106 and (phi_max-phi_min) should not be larger than pi 1078 1107 """ 1108 1079 1109 def __init__(self, phi_min=0, phi_max=math.pi): 1080 1110 self.phi_min = phi_min … … 1106 1136 """ 1107 1137 if data2D.__class__.__name__ not in ["Data2D", "plottable_2D"]: 1108 raise RuntimeError , "Sectorcut take only plottable_2D objects"1138 raise RuntimeError("Sectorcut take only plottable_2D objects") 1109 1139 Pi = math.pi 1110 1140 # Get data … … 1113 1143 1114 1144 # get phi from data 1115 phi_data = n umpy.arctan2(qy_data, qx_data)1145 phi_data = np.arctan2(qy_data, qx_data) 1116 1146 1117 1147 # Get the min and max into the region: -pi <= phi < Pi … … 1120 1150 # check for major sector 1121 1151 if phi_min_major > phi_max_major: 1122 out_major = (phi_min_major <= phi_data) + (phi_max_major > phi_data) 1152 out_major = (phi_min_major <= phi_data) + \ 1153 (phi_max_major > phi_data) 1123 1154 else: 1124 out_major = (phi_min_major <= phi_data) & (phi_max_major > phi_data) 1155 out_major = (phi_min_major <= phi_data) & ( 1156 phi_max_major > phi_data) 1125 1157 1126 1158 # minor sector … … 1132 1164 if phi_min_minor > phi_max_minor: 1133 1165 out_minor = (phi_min_minor <= phi_data) + \ 1134 1166 (phi_max_minor >= phi_data) 1135 1167 else: 1136 1168 out_minor = (phi_min_minor <= phi_data) & \ 1137 1169 (phi_max_minor >= phi_data) 1138 1170 out = out_major + out_minor 1139 1171 -
src/sas/sascalc/dataloader/readers/IgorReader.py
r959eb01 ra1b8fee 12 12 #copyright 2008, University of Tennessee 13 13 ############################################################################# 14 from __future__ import print_function 15 14 16 import os 15 17 -
src/sas/sascalc/dataloader/readers/sesans_reader.py
rb2c28a5 r5a8cdbb 24 24 Class to load sesans files (6 columns). 25 25 """ 26 # #File type26 # File type 27 27 type_name = "SESANS" 28 28 … … 30 30 type = ["SESANS files (*.ses)|*.ses", 31 31 "SESANS files (*..sesans)|*.sesans"] 32 # #List of allowed extensions32 # List of allowed extensions 33 33 ext = ['.ses', '.SES', '.sesans', '.SESANS'] 34 34 35 # #Flag to bypass extension check36 allow_all = False35 # Flag to bypass extension check 36 allow_all = True 37 37 38 38 def get_file_contents(self): … … 42 42 self.output = [] 43 43 44 error_message = "" 45 loaded_correctly = True 44 line = self.f_open.readline() 45 params = {} 46 while not line.startswith("BEGIN_DATA"): 47 terms = line.split() 48 if len(terms) >= 2: 49 params[terms[0]] = " ".join(terms[1:]) 50 line = self.f_open.readline() 51 self.params = params 46 52 47 import pdb; pdb.set_trace() 53 if "FileFormatVersion" not in self.params: 54 raise FileContentsException("SES file missing FileFormatVersion") 55 if float(self.params["FileFormatVersion"]) >= 2.0: 56 raise FileContentsException("SASView only supports SES version 1") 48 57 49 buff = self.f_open.read() 50 lines = buff.splitlines() 58 if "SpinEchoLength_unit" not in self.params: 59 raise FileContentsException("SpinEchoLength has no units") 60 if "Wavelength_unit" not in self.params: 61 raise FileContentsException("Wavelength has no units") 62 if params["SpinEchoLength_unit"] != params["Wavelength_unit"]: 63 raise FileContentsException("The spin echo data has rudely used " 64 "different units for the spin echo length " 65 "and the wavelength. While sasview could " 66 "handle this instance, it is a violation " 67 "of the file format and will not be " 68 "handled by other software.") 51 69 52 self.current_datainfo.filename = os.path.basename(self.f_open.name)70 headers = self.f_open.readline().split() 53 71 54 paramnames=[] 55 paramvals=[] 56 zvals=[] 57 dzvals=[] 58 lamvals=[] 59 dlamvals=[] 60 Pvals=[] 61 dPvals=[] 72 self._insist_header(headers, "SpinEchoLength") 73 self._insist_header(headers, "Depolarisation") 74 self._insist_header(headers, "Depolarisation_error") 75 self._insist_header(headers, "Wavelength") 62 76 63 for line in lines: 64 # Initial try for CSV (split on ,) 65 line=line.strip() 66 toks = line.split('\t') 67 if len(toks)==2: 68 paramnames.append(toks[0]) 69 paramvals.append(toks[1]) 70 elif len(toks)>5: 71 zvals.append(toks[0]) 72 dzvals.append(toks[3]) 73 lamvals.append(toks[4]) 74 dlamvals.append(toks[5]) 75 Pvals.append(toks[1]) 76 dPvals.append(toks[2]) 77 data = np.loadtxt(self.f_open) 78 79 if data.shape[1] != len(headers): 80 raise FileContentsException( 81 "File has {} headers, but {} columns".format( 82 len(headers), 83 data.shape[1])) 84 85 if not data.size: 86 raise FileContentsException("{} is empty".format(path)) 87 x = data[:, headers.index("SpinEchoLength")] 88 if "SpinEchoLength_error" in headers: 89 dx = data[:, headers.index("SpinEchoLength_error")] 77 90 else: 78 continue 91 dx = x * 0.05 92 lam = data[:, headers.index("Wavelength")] 93 if "Wavelength_error" in headers: 94 dlam = data[:, headers.index("Wavelength_error")] 95 else: 96 dlam = lam * 0.05 97 y = data[:, headers.index("Depolarisation")] 98 dy = data[:, headers.index("Depolarisation_error")] 79 99 80 x=[] 81 y=[] 82 lam=[] 83 dx=[] 84 dy=[] 85 dlam=[] 86 lam_header = lamvals[0].split() 87 data_conv_z = None 88 default_z_unit = "A" 89 data_conv_P = None 90 default_p_unit = " " # Adjust unit for axis (L^-3) 91 lam_unit = lam_header[1].replace("[","").replace("]","") 92 if lam_unit == 'AA': 93 lam_unit = 'A' 94 varheader=[zvals[0],dzvals[0],lamvals[0],dlamvals[0],Pvals[0],dPvals[0]] 95 valrange=range(1, len(zvals)) 96 try: 97 for i in valrange: 98 x.append(float(zvals[i])) 99 y.append(float(Pvals[i])) 100 lam.append(float(lamvals[i])) 101 dy.append(float(dPvals[i])) 102 dx.append(float(dzvals[i])) 103 dlam.append(float(dlamvals[i])) 104 except ValueError as val_err: 105 err_msg = "Invalid float" 106 err_msg += ":".join(val_err.message.split(":")[1:]) 107 raise FileContentsException(err_msg) 100 lam_unit = self._unit_fetch("Wavelength") 101 x, x_unit = self._unit_conversion(x, "A", 102 self._unit_fetch( 103 "SpinEchoLength")) 104 dx, dx_unit = self._unit_conversion( 105 dx, lam_unit, 106 self._unit_fetch("SpinEchoLength")) 107 dlam, dlam_unit = self._unit_conversion( 108 dlam, lam_unit, 109 self._unit_fetch("Wavelength")) 110 y_unit = self._unit_fetch("Depolarisation") 108 111 109 x, y, lam, dy, dx, dlam = [ 110 np.asarray(v, 'double') 111 for v in (x, y, lam, dy, dx, dlam) 112 ] 112 self.current_dataset.x = x 113 self.current_dataset.y = y 114 self.current_dataset.lam = lam 115 self.current_dataset.dy = dy 116 self.current_dataset.dx = dx 117 self.current_dataset.dlam = dlam 118 self.current_datainfo.isSesans = True 113 119 114 self.f_open.close() 120 self.current_datainfo._yunit = y_unit 121 self.current_datainfo._xunit = x_unit 122 self.current_datainfo.source.wavelength_unit = lam_unit 123 self.current_datainfo.source.wavelength = lam 124 self.current_datainfo.filename = os.basename(self.f_open.name) 125 self.current_dataset.xaxis(r"\rm{z}", x_unit) 126 # Adjust label to ln P/(lam^2 t), remove lam column refs 127 self.current_dataset.yaxis(r"\rm{ln(P)/(t \lambda^2)}", y_unit) 128 # Store loading process information 129 self.current_datainfo.meta_data['loader'] = self.type_name 130 self.current_datainfo.sample.name = params["Sample"] 131 self.current_datainfo.sample.ID = params["DataFileTitle"] 132 self.current_datainfo.sample.thickness = self._unit_conversion( 133 float(params["Thickness"]), "cm", 134 self._unit_fetch("Thickness"))[0] 115 135 116 self.current_dataset.x, self.current_dataset._xunit = self._unit_conversion(x, lam_unit, default_z_unit) 117 self.current_dataset.y = y 118 self.current_dataset._yunit = r'\AA^{-2} cm^{-1}' # output y_unit added 119 self.current_dataset.dx, _ = self._unit_conversion(dx, lam_unit, default_z_unit) 120 self.current_dataset.dy = dy 121 self.current_dataset.lam, _ = self._unit_conversion(lam, lam_unit, default_z_unit) 122 self.current_dataset.dlam, _ = self._unit_conversion(dlam, lam_unit, default_z_unit) 136 self.current_datainfo.sample.zacceptance = ( 137 float(params["Theta_zmax"]), 138 self._unit_fetch("Theta_zmax")) 123 139 124 self.current_dataset.xaxis(r"\rm{z}", self.current_dataset._xunit) 125 self.current_dataset.yaxis(r"\rm{ln(P)/(t \lambda^2)}", self.current_dataset._yunit) # Adjust label to ln P/(lam^2 t), remove lam column refs 140 self.current_datainfo.sample.yacceptance = ( 141 float(params["Theta_ymax"]), 142 self._unit_fetch("Theta_ymax")) 126 143 127 # Store loading process information 128 self.current_datainfo.meta_data['loader'] = self.type_name 129 try: 130 self.current_datainfo.sample.thickness = float(paramvals[6]) 131 except ValueError as val_err: 132 loaded_correctly = False 133 error_message += "\nInvalid sample thickness '{}'".format(paramvals[6]) 144 self.send_to_output() 134 145 135 self.current_datainfo.sample.name = paramvals[1] 136 self.current_datainfo.sample.ID = paramvals[0] 137 zaccept_unit_split = paramnames[7].split("[") 138 zaccept_unit = zaccept_unit_split[1].replace("]","") 139 if zaccept_unit.strip() == r'\AA^-1' or zaccept_unit.strip() == r'\A^-1': 140 zaccept_unit = "1/A" 141 self.current_datainfo.sample.zacceptance=(float(paramvals[7]),zaccept_unit) 146 @staticmethod 147 def _insist_header(headers, name): 148 if name not in headers: 149 raise FileContentsException( 150 "Missing {} column in spin echo data".format(name)) 142 151 143 self.current_datainfo.vars = varheader 152 @staticmethod 153 def _unit_conversion(value, value_unit, default_unit): 154 """ 155 Performs unit conversion on a measurement. 144 156 145 if len(self.current_dataset.x) < 1: 146 raise FileContentsException("No data points in file.") 147 148 self.send_to_output() 149 150 if not loaded_correctly: 151 raise DataReaderException(error_message) 152 153 def _unit_conversion(self, value, value_unit, default_unit): 154 if has_converter == True and value_unit != default_unit: 155 data_conv_q = Converter(value_unit) 156 value = data_conv_q(value, units=default_unit) 157 :param value: The magnitude of the measurement 158 :param value_unit: a string containing the final desired unit 159 :param default_unit: string with the units of the original measurement 160 :return: The magnitude of the measurement in the new units 161 """ 162 # (float, string, string) -> float 163 if has_converter and value_unit != default_unit: 164 data_conv_q = Converter(default_unit) 165 value = data_conv_q(value, units=value_unit) 157 166 new_unit = default_unit 158 167 else: 159 168 new_unit = value_unit 160 169 return value, new_unit 170 171 def _unit_fetch(self, unit): 172 return self.params[unit+"_unit"] -
src/sas/sascalc/dataloader/loader.py
r463e7ffc r8dec7e7 1 1 """ 2 2 File handler to support different file extensions. 3 Uses reflectomet ry'sregistry utility.3 Uses reflectometer registry utility. 4 4 5 5 The default readers are found in the 'readers' sub-module … … 14 14 """ 15 15 ##################################################################### 16 # This software was developed by the University of Tennessee as part of the17 # Distributed Data Analysis of Neutron Scattering Experiments (DANSE)18 # project funded by the US National Science Foundation.19 # See the license text in license.txt20 # copyright 2008, University of Tennessee16 # This software was developed by the University of Tennessee as part of the 17 # Distributed Data Analysis of Neutron Scattering Experiments (DANSE) 18 # project funded by the US National Science Foundation. 19 # See the license text in license.txt 20 # copyright 2008, University of Tennessee 21 21 ###################################################################### 22 22 … … 29 29 # Default readers are defined in the readers sub-module 30 30 import readers 31 from loader_exceptions import NoKnownLoaderException, FileContentsException,\ 32 DefaultReaderException 31 33 from readers import ascii_reader 32 34 from readers import cansas_reader 35 from readers import cansas_reader_HDF5 33 36 34 37 logger = logging.getLogger(__name__) 38 35 39 36 40 class Registry(ExtensionRegistry): … … 39 43 Readers and writers are supported. 40 44 """ 41 42 45 def __init__(self): 43 46 super(Registry, self).__init__() 44 47 45 # #Writers48 # Writers 46 49 self.writers = {} 47 50 48 # #List of wildcards51 # List of wildcards 49 52 self.wildcards = ['All (*.*)|*.*'] 50 53 51 # #Creation time, for testing54 # Creation time, for testing 52 55 self._created = time.time() 53 56 … … 63 66 of a particular reader 64 67 65 Defaults to the ascii (multi-column) reader 66 if no reader was registered for the file's 67 extension. 68 Defaults to the ascii (multi-column), cansas XML, and cansas NeXuS 69 readers if no reader was registered for the file's extension. 68 70 """ 69 71 try: 70 72 return super(Registry, self).load(path, format=format) 71 except: 72 try: 73 # No reader was found. Default to the ascii reader. 74 ascii_loader = ascii_reader.Reader() 75 return ascii_loader.read(path) 76 except: 77 cansas_loader = cansas_reader.Reader() 78 return cansas_loader.read(path) 73 except NoKnownLoaderException as nkl_e: 74 pass # try the ASCII reader 75 except FileContentsException as fc_exc: 76 raise RuntimeError(fc_exc.message) 77 except Exception: 78 pass 79 try: 80 ascii_loader = ascii_reader.Reader() 81 return ascii_loader.read(path) 82 except DefaultReaderException: 83 pass # Loader specific error to try the cansas XML reader 84 except FileContentsException as e: 85 # TODO: handle error 86 raise RuntimeError(e) 87 try: 88 cansas_loader = cansas_reader.Reader() 89 return cansas_loader.read(path) 90 except DefaultReaderException: 91 pass # Loader specific error to try the cansas NeXuS reader 92 except FileContentsException as e: 93 # TODO: Handle errors properly 94 raise RuntimeError(e) 95 except Exception as csr: 96 # TODO: Modify cansas reader to throw DefaultReaderException 97 pass 98 try: 99 cansas_nexus_loader = cansas_reader_HDF5.Reader() 100 return cansas_nexus_loader.read(path) 101 except DefaultReaderException as e: 102 logging.error("No default loader can load the data") 103 # No known reader available. Give up and throw an error 104 msg = "\n\tUnknown data format: %s.\n\tThe file is not a " % path 105 msg += "known format that can be loaded by SasView.\n" 106 raise NoKnownLoaderException, msg 107 except FileContentsException as e: 108 # TODO: Handle error(s) properly 109 raise RuntimeError(e) 79 110 80 111 def find_plugins(self, dir): -
src/sas/sascalc/dataloader/readers/__init__.py
r959eb01 r7a5d066 1 # Backward compatibility with the previous implementation of thedefault readers2 from associations import re gister_readers1 # Method to associate extensions to default readers 2 from associations import read_associations 3 3 4 # Method to associate extensions to default readers5 from associations import read_associations6 4 7 5 # Method to return the location of the XML settings file -
src/sas/sascalc/dataloader/readers/abs_reader.py
r959eb01 rad92c5a 1 1 """ 2 IGOR 1D data reader 2 3 """ 3 4 ##################################################################### 4 # This software was developed by the University of Tennessee as part of the5 # Distributed Data Analysis of Neutron Scattering Experiments (DANSE)6 # project funded by the US National Science Foundation.7 # See the license text in license.txt8 # copyright 2008, University of Tennessee5 # This software was developed by the University of Tennessee as part of the 6 # Distributed Data Analysis of Neutron Scattering Experiments (DANSE) 7 # project funded by the US National Science Foundation. 8 # See the license text in license.txt 9 # copyright 2008, University of Tennessee 9 10 ###################################################################### 10 11 12 import logging 11 13 import numpy as np 12 import os 13 from sas.sascalc.dataloader.data_info import Data1D 14 from sas.sascalc.dataloader.data_info import Detector 15 16 has_converter = True 17 try: 18 from sas.sascalc.data_util.nxsunit import Converter 19 except: 20 has_converter = False 21 22 23 class Reader: 14 from sas.sascalc.dataloader.file_reader_base_class import FileReader 15 from sas.sascalc.dataloader.data_info import DataInfo, plottable_1D, Data1D,\ 16 Detector 17 from sas.sascalc.dataloader.loader_exceptions import FileContentsException,\ 18 DefaultReaderException 19 20 logger = logging.getLogger(__name__) 21 22 23 class Reader(FileReader): 24 24 """ 25 25 Class to load IGOR reduced .ABS files 26 26 """ 27 # #File type27 # File type 28 28 type_name = "IGOR 1D" 29 # #Wildcards29 # Wildcards 30 30 type = ["IGOR 1D files (*.abs)|*.abs"] 31 # #List of allowed extensions32 ext = ['.abs' , '.ABS']31 # List of allowed extensions 32 ext = ['.abs'] 33 33 34 def read(self, path):34 def get_file_contents(self): 35 35 """ 36 Load data file. 37 38 :param path: file path 39 40 :return: Data1D object, or None 36 Get the contents of the file 41 37 42 38 :raise RuntimeError: when the file can't be opened 43 39 :raise ValueError: when the length of the data vectors are inconsistent 44 40 """ 45 if os.path.isfile(path): 46 basename = os.path.basename(path) 47 root, extension = os.path.splitext(basename) 48 if extension.lower() in self.ext: 49 try: 50 input_f = open(path,'r') 41 buff = self.f_open.read() 42 filepath = self.f_open.name 43 lines = buff.splitlines() 44 self.has_converter = True 45 try: 46 from sas.sascalc.data_util.nxsunit import Converter 47 except: 48 self.has_converter = False 49 self.output = [] 50 self.current_datainfo = DataInfo() 51 self.current_datainfo.filename = filepath 52 self.reset_data_list(len(lines)) 53 detector = Detector() 54 data_line = 0 55 self.reset_data_list(len(lines)) 56 self.current_datainfo.detector.append(detector) 57 self.current_datainfo.filename = filepath 58 59 is_info = False 60 is_center = False 61 is_data_started = False 62 63 base_q_unit = '1/A' 64 base_i_unit = '1/cm' 65 data_conv_q = Converter(base_q_unit) 66 data_conv_i = Converter(base_i_unit) 67 68 for line in lines: 69 # Information line 1 70 if is_info: 71 is_info = False 72 line_toks = line.split() 73 74 # Wavelength in Angstrom 75 try: 76 value = float(line_toks[1]) 77 if self.has_converter and \ 78 self.current_datainfo.source.wavelength_unit != 'A': 79 conv = Converter('A') 80 self.current_datainfo.source.wavelength = conv(value, 81 units=self.current_datainfo.source.wavelength_unit) 82 else: 83 self.current_datainfo.source.wavelength = value 84 except KeyError: 85 msg = "ABSReader cannot read wavelength from %s" % filepath 86 self.current_datainfo.errors.append(msg) 87 88 # Detector distance in meters 89 try: 90 value = float(line_toks[3]) 91 if self.has_converter and detector.distance_unit != 'm': 92 conv = Converter('m') 93 detector.distance = conv(value, 94 units=detector.distance_unit) 95 else: 96 detector.distance = value 51 97 except: 52 raise RuntimeError, "abs_reader: cannot open %s" % path 53 buff = input_f.read() 54 lines = buff.split('\n') 55 x = np.zeros(0) 56 y = np.zeros(0) 57 dy = np.zeros(0) 58 dx = np.zeros(0) 59 output = Data1D(x, y, dy=dy, dx=dx) 60 detector = Detector() 61 output.detector.append(detector) 62 output.filename = basename 63 64 is_info = False 98 msg = "ABSReader cannot read SDD from %s" % filepath 99 self.current_datainfo.errors.append(msg) 100 101 # Transmission 102 try: 103 self.current_datainfo.sample.transmission = \ 104 float(line_toks[4]) 105 except ValueError: 106 # Transmission isn't always in the header 107 pass 108 109 # Sample thickness in mm 110 try: 111 value = float(line_toks[5]) 112 if self.has_converter and \ 113 self.current_datainfo.sample.thickness_unit != 'cm': 114 conv = Converter('cm') 115 self.current_datainfo.sample.thickness = conv(value, 116 units=self.current_datainfo.sample.thickness_unit) 117 else: 118 self.current_datainfo.sample.thickness = value 119 except ValueError: 120 # Thickness is not a mandatory entry 121 pass 122 123 # MON CNT LAMBDA DET ANG DET DIST TRANS THICK AVE STEP 124 if line.count("LAMBDA") > 0: 125 is_info = True 126 127 # Find center info line 128 if is_center: 65 129 is_center = False 66 is_data_started = False 67 68 data_conv_q = None 69 data_conv_i = None 70 71 if has_converter == True and output.x_unit != '1/A': 72 data_conv_q = Converter('1/A') 73 # Test it 74 data_conv_q(1.0, output.x_unit) 75 76 if has_converter == True and output.y_unit != '1/cm': 77 data_conv_i = Converter('1/cm') 78 # Test it 79 data_conv_i(1.0, output.y_unit) 80 81 for line in lines: 82 83 # Information line 1 84 if is_info == True: 85 is_info = False 86 line_toks = line.split() 87 88 # Wavelength in Angstrom 89 try: 90 value = float(line_toks[1]) 91 if has_converter == True and \ 92 output.source.wavelength_unit != 'A': 93 conv = Converter('A') 94 output.source.wavelength = conv(value, 95 units=output.source.wavelength_unit) 96 else: 97 output.source.wavelength = value 98 except: 99 #goes to ASC reader 100 msg = "abs_reader: cannot open %s" % path 101 raise RuntimeError, msg 102 103 # Distance in meters 104 try: 105 value = float(line_toks[3]) 106 if has_converter == True and \ 107 detector.distance_unit != 'm': 108 conv = Converter('m') 109 detector.distance = conv(value, 110 units=detector.distance_unit) 111 else: 112 detector.distance = value 113 except: 114 #goes to ASC reader 115 msg = "abs_reader: cannot open %s" % path 116 raise RuntimeError, msg 117 # Transmission 118 try: 119 output.sample.transmission = float(line_toks[4]) 120 except: 121 # Transmission is not a mandatory entry 122 pass 123 124 # Thickness in mm 125 try: 126 value = float(line_toks[5]) 127 if has_converter == True and \ 128 output.sample.thickness_unit != 'cm': 129 conv = Converter('cm') 130 output.sample.thickness = conv(value, 131 units=output.sample.thickness_unit) 132 else: 133 output.sample.thickness = value 134 except: 135 # Thickness is not a mandatory entry 136 pass 137 138 #MON CNT LAMBDA DET ANG DET DIST TRANS THICK 139 # AVE STEP 140 if line.count("LAMBDA") > 0: 141 is_info = True 142 143 # Find center info line 144 if is_center == True: 145 is_center = False 146 line_toks = line.split() 147 # Center in bin number 148 center_x = float(line_toks[0]) 149 center_y = float(line_toks[1]) 150 151 # Bin size 152 if has_converter == True and \ 153 detector.pixel_size_unit != 'mm': 154 conv = Converter('mm') 155 detector.pixel_size.x = conv(5.0, 156 units=detector.pixel_size_unit) 157 detector.pixel_size.y = conv(5.0, 158 units=detector.pixel_size_unit) 159 else: 160 detector.pixel_size.x = 5.0 161 detector.pixel_size.y = 5.0 162 163 # Store beam center in distance units 164 # Det 640 x 640 mm 165 if has_converter == True and \ 166 detector.beam_center_unit != 'mm': 167 conv = Converter('mm') 168 detector.beam_center.x = conv(center_x * 5.0, 169 units=detector.beam_center_unit) 170 detector.beam_center.y = conv(center_y * 5.0, 171 units=detector.beam_center_unit) 172 else: 173 detector.beam_center.x = center_x * 5.0 174 detector.beam_center.y = center_y * 5.0 175 176 # Detector type 177 try: 178 detector.name = line_toks[7] 179 except: 180 # Detector name is not a mandatory entry 181 pass 182 183 #BCENT(X,Y) A1(mm) A2(mm) A1A2DIST(m) DL/L 184 # BSTOP(mm) DET_TYP 185 if line.count("BCENT") > 0: 186 is_center = True 187 188 # Parse the data 189 if is_data_started == True: 190 toks = line.split() 191 192 try: 193 _x = float(toks[0]) 194 _y = float(toks[1]) 195 _dy = float(toks[2]) 196 _dx = float(toks[3]) 197 198 if data_conv_q is not None: 199 _x = data_conv_q(_x, units=output.x_unit) 200 _dx = data_conv_i(_dx, units=output.x_unit) 201 202 if data_conv_i is not None: 203 _y = data_conv_i(_y, units=output.y_unit) 204 _dy = data_conv_i(_dy, units=output.y_unit) 205 206 x = np.append(x, _x) 207 y = np.append(y, _y) 208 dy = np.append(dy, _dy) 209 dx = np.append(dx, _dx) 210 211 except: 212 # Could not read this data line. If we are here 213 # it is because we are in the data section. Just 214 # skip it. 215 pass 216 217 #The 6 columns are | Q (1/A) | I(Q) (1/cm) | std. dev. 218 # I(Q) (1/cm) | sigmaQ | meanQ | ShadowFactor| 219 if line.count("The 6 columns") > 0: 220 is_data_started = True 221 222 # Sanity check 223 if not len(y) == len(dy): 224 msg = "abs_reader: y and dy have different length" 225 raise ValueError, msg 226 # If the data length is zero, consider this as 227 # though we were not able to read the file. 228 if len(x) == 0: 229 raise ValueError, "ascii_reader: could not load file" 230 231 output.x = x[x != 0] 232 output.y = y[x != 0] 233 output.dy = dy[x != 0] 234 output.dx = dx[x != 0] 235 if data_conv_q is not None: 236 output.xaxis("\\rm{Q}", output.x_unit) 130 line_toks = line.split() 131 # Center in bin number 132 center_x = float(line_toks[0]) 133 center_y = float(line_toks[1]) 134 135 # Bin size 136 if self.has_converter and detector.pixel_size_unit != 'mm': 137 conv = Converter('mm') 138 detector.pixel_size.x = conv(5.08, 139 units=detector.pixel_size_unit) 140 detector.pixel_size.y = conv(5.08, 141 units=detector.pixel_size_unit) 237 142 else: 238 output.xaxis("\\rm{Q}", 'A^{-1}') 239 if data_conv_i is not None: 240 output.yaxis("\\rm{Intensity}", output.y_unit) 143 detector.pixel_size.x = 5.08 144 detector.pixel_size.y = 5.08 145 146 # Store beam center in distance units 147 # Det 640 x 640 mm 148 if self.has_converter and detector.beam_center_unit != 'mm': 149 conv = Converter('mm') 150 detector.beam_center.x = conv(center_x * 5.08, 151 units=detector.beam_center_unit) 152 detector.beam_center.y = conv(center_y * 5.08, 153 units=detector.beam_center_unit) 241 154 else: 242 output.yaxis("\\rm{Intensity}", "cm^{-1}") 243 244 # Store loading process information 245 output.meta_data['loader'] = self.type_name 246 return output 155 detector.beam_center.x = center_x * 5.08 156 detector.beam_center.y = center_y * 5.08 157 158 # Detector type 159 try: 160 detector.name = line_toks[7] 161 except: 162 # Detector name is not a mandatory entry 163 pass 164 165 # BCENT(X,Y) A1(mm) A2(mm) A1A2DIST(m) DL/L BSTOP(mm) DET_TYP 166 if line.count("BCENT") > 0: 167 is_center = True 168 169 # Parse the data 170 if is_data_started: 171 toks = line.split() 172 173 try: 174 _x = float(toks[0]) 175 _y = float(toks[1]) 176 _dy = float(toks[2]) 177 _dx = float(toks[3]) 178 179 if data_conv_q is not None: 180 _x = data_conv_q(_x, units=base_q_unit) 181 _dx = data_conv_q(_dx, units=base_q_unit) 182 183 if data_conv_i is not None: 184 _y = data_conv_i(_y, units=base_i_unit) 185 _dy = data_conv_i(_dy, units=base_i_unit) 186 187 self.current_dataset.x[data_line] = _x 188 self.current_dataset.y[data_line] = _y 189 self.current_dataset.dy[data_line] = _dy 190 self.current_dataset.dx[data_line] = _dx 191 data_line += 1 192 193 except ValueError: 194 # Could not read this data line. If we are here 195 # it is because we are in the data section. Just 196 # skip it. 197 pass 198 199 # The 6 columns are | Q (1/A) | I(Q) (1/cm) | std. dev. 200 # I(Q) (1/cm) | sigmaQ | meanQ | ShadowFactor| 201 if line.count("The 6 columns") > 0: 202 is_data_started = True 203 204 self.remove_empty_q_values(True, True) 205 206 # Sanity check 207 if not len(self.current_dataset.y) == len(self.current_dataset.dy): 208 self.set_all_to_none() 209 msg = "abs_reader: y and dy have different length" 210 raise ValueError(msg) 211 # If the data length is zero, consider this as 212 # though we were not able to read the file. 213 if len(self.current_dataset.x) == 0: 214 self.set_all_to_none() 215 raise ValueError("ascii_reader: could not load file") 216 217 if data_conv_q is not None: 218 self.current_dataset.xaxis("\\rm{Q}", base_q_unit) 247 219 else: 248 raise RuntimeError, "%s is not a file" % path 249 return None 220 self.current_dataset.xaxis("\\rm{Q}", 'A^{-1}') 221 if data_conv_i is not None: 222 self.current_dataset.yaxis("\\rm{Intensity}", base_i_unit) 223 else: 224 self.current_dataset.yaxis("\\rm{Intensity}", "cm^{-1}") 225 226 # Store loading process information 227 self.current_datainfo.meta_data['loader'] = self.type_name 228 self.send_to_output() -
src/sas/sascalc/dataloader/readers/anton_paar_saxs_reader.py
ra235f715 rfafe52a 9 9 10 10 from sas.sascalc.dataloader.readers.xml_reader import XMLreader 11 from sas.sascalc.dataloader.data_info import plottable_1D, Data1D, Sample, Source11 from sas.sascalc.dataloader.data_info import plottable_1D, Data1D, DataInfo, Sample, Source 12 12 from sas.sascalc.dataloader.data_info import Process, Aperture, Collimation, TransmissionSpectrum, Detector 13 13 from sas.sascalc.dataloader.loader_exceptions import FileContentsException, DataReaderException 14 14 15 15 class Reader(XMLreader): 16 16 """ 17 A class for reading in CanSAS v2.0 data files. The existing iteration opens Mantid generated HDF5 formatted files 18 with file extension .h5/.H5. Any number of data sets may be present within the file and any dimensionality of data 19 may be used. Currently 1D and 2D SAS data sets are supported, but future implementations will include 1D and 2D 20 SESANS data. This class assumes a single data set for each sasentry. 21 22 :Dependencies: 23 The CanSAS HDF5 reader requires h5py v2.5.0 or later. 17 A class for reading in Anton Paar .pdh files 24 18 """ 25 19 … … 30 24 ## Raw file contents to be processed 31 25 raw_data = None 32 ## Data set being modified33 current_dataset = None34 26 ## For recursion and saving purposes, remember parent objects 35 27 parent_list = None … … 42 34 ## Flag to bypass extension check 43 35 allow_all = False 44 ## List of files to return45 output = None46 36 47 37 def reset_state(self): 48 self.current_dataset = Data1D(np.empty(0), np.empty(0),49 np.empty(0), np.empty(0))38 self.current_dataset = plottable_1D(np.empty(0), np.empty(0), np.empty(0), np.empty(0)) 39 self.current_datainfo = DataInfo() 50 40 self.datasets = [] 51 41 self.raw_data = None … … 63 53 self.lower = 5 64 54 65 def read(self, filename):55 def get_file_contents(self): 66 56 """ 67 57 This is the general read method that all SasView data_loaders must have. … … 73 63 ## Reinitialize the class when loading a new data file to reset all class variables 74 64 self.reset_state() 75 ## Check that the file exists 76 if os.path.isfile(filename): 77 basename = os.path.basename(filename) 78 _, extension = os.path.splitext(basename) 79 # If the file type is not allowed, return empty list 80 if extension in self.ext or self.allow_all: 81 ## Load the data file 82 input_f = open(filename, 'r') 83 buff = input_f.read() 84 self.raw_data = buff.splitlines() 85 self.read_data() 86 return self.output 65 buff = self.f_open.read() 66 self.raw_data = buff.splitlines() 67 self.read_data() 87 68 88 69 def read_data(self): 70 correctly_loaded = True 71 error_message = "" 72 89 73 q_unit = "1/nm" 90 74 i_unit = "1/um^2" 91 self.current_dataset.title = self.raw_data[0] 92 self.current_dataset.meta_data["Keywords"] = self.raw_data[1] 93 line3 = self.raw_data[2].split() 94 line4 = self.raw_data[3].split() 95 line5 = self.raw_data[4].split() 96 self.data_points = int(line3[0]) 97 self.lower = 5 98 self.upper = self.lower + self.data_points 99 self.source.radiation = 'x-ray' 100 normal = float(line4[3]) 101 self.current_dataset.source.radiation = "x-ray" 102 self.current_dataset.source.name = "Anton Paar SAXSess Instrument" 103 self.current_dataset.source.wavelength = float(line4[4]) 104 xvals = [] 105 yvals = [] 106 dyvals = [] 107 for i in range(self.lower, self.upper): 108 index = i - self.lower 109 data = self.raw_data[i].split() 110 xvals.insert(index, normal * float(data[0])) 111 yvals.insert(index, normal * float(data[1])) 112 dyvals.insert(index, normal * float(data[2])) 75 try: 76 self.current_datainfo.title = self.raw_data[0] 77 self.current_datainfo.meta_data["Keywords"] = self.raw_data[1] 78 line3 = self.raw_data[2].split() 79 line4 = self.raw_data[3].split() 80 line5 = self.raw_data[4].split() 81 self.data_points = int(line3[0]) 82 self.lower = 5 83 self.upper = self.lower + self.data_points 84 self.source.radiation = 'x-ray' 85 normal = float(line4[3]) 86 self.current_datainfo.source.radiation = "x-ray" 87 self.current_datainfo.source.name = "Anton Paar SAXSess Instrument" 88 self.current_datainfo.source.wavelength = float(line4[4]) 89 xvals = [] 90 yvals = [] 91 dyvals = [] 92 for i in range(self.lower, self.upper): 93 index = i - self.lower 94 data = self.raw_data[i].split() 95 xvals.insert(index, normal * float(data[0])) 96 yvals.insert(index, normal * float(data[1])) 97 dyvals.insert(index, normal * float(data[2])) 98 except Exception as e: 99 error_message = "Couldn't load {}.\n".format(self.f_open.name) 100 error_message += e.message 101 raise FileContentsException(error_message) 113 102 self.current_dataset.x = np.append(self.current_dataset.x, xvals) 114 103 self.current_dataset.y = np.append(self.current_dataset.y, yvals) 115 104 self.current_dataset.dy = np.append(self.current_dataset.dy, dyvals) 116 105 if self.data_points != self.current_dataset.x.size: 117 self.errors.add("Not all data was loaded properly.") 118 if self.current_dataset.dx.size != self.current_dataset.x.size: 119 dxvals = np.zeros(self.current_dataset.x.size) 120 self.current_dataset.dx = dxvals 106 error_message += "Not all data points could be loaded.\n" 107 correctly_loaded = False 121 108 if self.current_dataset.x.size != self.current_dataset.y.size: 122 self.errors.add("The x and y data sets are not the same size.") 109 error_message += "The x and y data sets are not the same size.\n" 110 correctly_loaded = False 123 111 if self.current_dataset.y.size != self.current_dataset.dy.size: 124 self.errors.add("The y and dy datasets are not the same size.") 125 self.current_dataset.errors = self.errors 112 error_message += "The y and dy datasets are not the same size.\n" 113 correctly_loaded = False 114 126 115 self.current_dataset.xaxis("Q", q_unit) 127 116 self.current_dataset.yaxis("Intensity", i_unit) 128 117 xml_intermediate = self.raw_data[self.upper:] 129 118 xml = ''.join(xml_intermediate) 130 self.set_xml_string(xml) 131 dom = self.xmlroot.xpath('/fileinfo') 132 self._parse_child(dom) 133 self.output.append(self.current_dataset) 119 try: 120 self.set_xml_string(xml) 121 dom = self.xmlroot.xpath('/fileinfo') 122 self._parse_child(dom) 123 except Exception as e: 124 # Data loaded but XML metadata has an error 125 error_message += "Data points have been loaded but there was an " 126 error_message += "error reading XML metadata: " + e.message 127 correctly_loaded = False 128 self.send_to_output() 129 if not correctly_loaded: 130 raise DataReaderException(error_message) 134 131 135 132 def _parse_child(self, dom, parent=''): … … 146 143 self._parse_child(node, key) 147 144 if key == "SampleDetector": 148 self.current_data set.detector.append(self.detector)145 self.current_datainfo.detector.append(self.detector) 149 146 self.detector = Detector() 150 147 else: 151 148 if key == "value": 152 149 if parent == "Wavelength": 153 self.current_data set.source.wavelength = value150 self.current_datainfo.source.wavelength = value 154 151 elif parent == "SampleDetector": 155 152 self.detector.distance = value 156 153 elif parent == "Temperature": 157 self.current_data set.sample.temperature = value154 self.current_datainfo.sample.temperature = value 158 155 elif parent == "CounterSlitLength": 159 156 self.detector.slit_length = value … … 161 158 value = value.replace("_", "") 162 159 if parent == "Wavelength": 163 self.current_data set.source.wavelength_unit = value160 self.current_datainfo.source.wavelength_unit = value 164 161 elif parent == "SampleDetector": 165 162 self.detector.distance_unit = value … … 169 166 self.current_dataset.yaxis(self.current_dataset._yaxis, value) 170 167 elif parent == "Temperature": 171 self.current_data set.sample.temperature_unit = value168 self.current_datainfo.sample.temperature_unit = value 172 169 elif parent == "CounterSlitLength": 173 170 self.detector.slit_length_unit = value -
src/sas/sascalc/dataloader/readers/ascii_reader.py
r235f514 r080d88e 1 1 """ 2 ASCIIreader2 Generic multi-column ASCII data reader 3 3 """ 4 4 ############################################################################ 5 # This software was developed by the University of Tennessee as part of the6 # Distributed Data Analysis of Neutron Scattering Experiments (DANSE)7 # project funded by the US National Science Foundation.8 # If you use DANSE applications to do scientific research that leads to9 # publication, we ask that you acknowledge the use of the software with the10 # following sentence:11 # This work benefited from DANSE software developed under NSF award DMR-0520547.12 # copyright 2008, University of Tennessee5 # This software was developed by the University of Tennessee as part of the 6 # Distributed Data Analysis of Neutron Scattering Experiments (DANSE) 7 # project funded by the US National Science Foundation. 8 # If you use DANSE applications to do scientific research that leads to 9 # publication, we ask that you acknowledge the use of the software with the 10 # following sentence: 11 # This work benefited from DANSE software developed under NSF award DMR-0520547. 12 # copyright 2008, University of Tennessee 13 13 ############################################################################# 14 14 15 import logging 16 from sas.sascalc.dataloader.file_reader_base_class import FileReader 17 from sas.sascalc.dataloader.data_info import DataInfo, plottable_1D 18 from sas.sascalc.dataloader.loader_exceptions import FileContentsException,\ 19 DefaultReaderException 15 20 16 import numpy as np 17 import os 18 from sas.sascalc.dataloader.data_info import Data1D 19 20 # Check whether we have a converter available 21 has_converter = True 22 try: 23 from sas.sascalc.data_util.nxsunit import Converter 24 except: 25 has_converter = False 26 _ZERO = 1e-16 21 logger = logging.getLogger(__name__) 27 22 28 23 29 class Reader :24 class Reader(FileReader): 30 25 """ 31 26 Class to load ascii files (2, 3 or 4 columns). 32 27 """ 33 # #File type28 # File type 34 29 type_name = "ASCII" 35 36 ## Wildcards 30 # Wildcards 37 31 type = ["ASCII files (*.txt)|*.txt", 38 32 "ASCII files (*.dat)|*.dat", 39 33 "ASCII files (*.abs)|*.abs", 40 34 "CSV files (*.csv)|*.csv"] 41 ## List of allowed extensions 42 ext = ['.txt', '.TXT', '.dat', '.DAT', '.abs', '.ABS', 'csv', 'CSV'] 35 # List of allowed extensions 36 ext = ['.txt', '.dat', '.abs', '.csv'] 37 # Flag to bypass extension check 38 allow_all = True 39 # data unless that is the only data 40 min_data_pts = 5 43 41 44 ## Flag to bypass extension check 45 allow_all = True 42 def get_file_contents(self): 43 """ 44 Get the contents of the file 45 """ 46 46 47 def read(self, path): 48 """ 49 Load data file 47 buff = self.f_open.read() 48 filepath = self.f_open.name 49 lines = buff.splitlines() 50 self.output = [] 51 self.current_datainfo = DataInfo() 52 self.current_datainfo.filename = filepath 53 self.reset_data_list(len(lines)) 50 54 51 :param path: file path 52 :return: Data1D object, or None 55 # The first good line of data will define whether 56 # we have 2-column or 3-column ascii 57 has_error_dx = None 58 has_error_dy = None 53 59 54 :raise RuntimeError: when the file can't be opened 55 :raise ValueError: when the length of the data vectors are inconsistent 56 """ 57 if os.path.isfile(path): 58 basename = os.path.basename(path) 59 _, extension = os.path.splitext(basename) 60 if self.allow_all or extension.lower() in self.ext: 61 try: 62 # Read in binary mode since GRASP frequently has no-ascii 63 # characters that breaks the open operation 64 input_f = open(path,'rb') 65 except: 66 raise RuntimeError, "ascii_reader: cannot open %s" % path 67 buff = input_f.read() 68 lines = buff.splitlines() 60 # Initialize counters for data lines and header lines. 61 is_data = False 62 # More than "5" lines of data is considered as actual 63 # To count # of current data candidate lines 64 candidate_lines = 0 65 # To count total # of previous data candidate lines 66 candidate_lines_previous = 0 67 # Current line number 68 line_no = 0 69 # minimum required number of columns of data 70 lentoks = 2 71 for line in lines: 72 toks = self.splitline(line.strip()) 73 # To remember the number of columns in the current line of data 74 new_lentoks = len(toks) 75 try: 76 if new_lentoks == 0: 77 # If the line is blank, skip and continue on 78 # In case of breaks within data sets. 79 continue 80 elif new_lentoks != lentoks and is_data: 81 # If a footer is found, break the loop and save the data 82 break 83 elif new_lentoks != lentoks and not is_data: 84 # If header lines are numerical 85 candidate_lines = 0 86 self.reset_data_list(len(lines) - line_no) 69 87 70 # Arrays for data storage 71 tx = np.zeros(0) 72 ty = np.zeros(0) 73 tdy = np.zeros(0) 74 tdx = np.zeros(0) 88 self.current_dataset.x[candidate_lines] = float(toks[0]) 75 89 76 # The first good line of data will define whether 77 # we have 2-column or 3-column ascii 90 if new_lentoks > 1: 91 self.current_dataset.y[candidate_lines] = float(toks[1]) 92 93 # If a 3rd row is present, consider it dy 94 if new_lentoks > 2: 95 self.current_dataset.dy[candidate_lines] = \ 96 float(toks[2]) 97 has_error_dy = True 98 99 # If a 4th row is present, consider it dx 100 if new_lentoks > 3: 101 self.current_dataset.dx[candidate_lines] = \ 102 float(toks[3]) 103 has_error_dx = True 104 105 candidate_lines += 1 106 # If 5 or more lines, this is considering the set data 107 if candidate_lines >= self.min_data_pts: 108 is_data = True 109 110 # To remember the # of columns on the current line 111 # for the next line of data 112 lentoks = new_lentoks 113 line_no += 1 114 except ValueError: 115 # ValueError is raised when non numeric strings conv. to float 116 # It is data and meet non - number, then stop reading 117 if is_data: 118 break 119 # Delete the previously stored lines of data candidates if 120 # the list is not data 121 self.reset_data_list(len(lines) - line_no) 122 lentoks = 2 78 123 has_error_dx = None 79 124 has_error_dy = None 125 # Reset # of lines of data candidates 126 candidate_lines = 0 80 127 81 #Initialize counters for data lines and header lines. 82 is_data = False 83 # More than "5" lines of data is considered as actual 84 # data unless that is the only data 85 min_data_pts = 5 86 # To count # of current data candidate lines 87 candidate_lines = 0 88 # To count total # of previous data candidate lines 89 candidate_lines_previous = 0 90 #minimum required number of columns of data 91 lentoks = 2 92 for line in lines: 93 toks = self.splitline(line) 94 # To remember the # of columns in the current line of data 95 new_lentoks = len(toks) 96 try: 97 if new_lentoks == 1 and not is_data: 98 ## If only one item in list, no longer data 99 raise ValueError 100 elif new_lentoks == 0: 101 ## If the line is blank, skip and continue on 102 ## In case of breaks within data sets. 103 continue 104 elif new_lentoks != lentoks and is_data: 105 ## If a footer is found, break the loop and save the data 106 break 107 elif new_lentoks != lentoks and not is_data: 108 ## If header lines are numerical 109 candidate_lines = 0 110 candidate_lines_previous = 0 128 if not is_data: 129 self.set_all_to_none() 130 if self.extension in self.ext: 131 msg = "ASCII Reader error: Fewer than five Q data points found " 132 msg += "in {}.".format(filepath) 133 raise FileContentsException(msg) 134 else: 135 msg = "ASCII Reader could not load the file {}".format(filepath) 136 raise DefaultReaderException(msg) 137 # Sanity check 138 if has_error_dy and not len(self.current_dataset.y) == \ 139 len(self.current_dataset.dy): 140 msg = "ASCII Reader error: Number of I and dI data points are" 141 msg += " different in {}.".format(filepath) 142 # TODO: Add error to self.current_datainfo.errors instead? 143 self.set_all_to_none() 144 raise FileContentsException(msg) 145 if has_error_dx and not len(self.current_dataset.x) == \ 146 len(self.current_dataset.dx): 147 msg = "ASCII Reader error: Number of Q and dQ data points are" 148 msg += " different in {}.".format(filepath) 149 # TODO: Add error to self.current_datainfo.errors instead? 150 self.set_all_to_none() 151 raise FileContentsException(msg) 111 152 112 #Make sure that all columns are numbers. 113 for colnum in range(len(toks)): 114 # Any non-floating point values throw ValueError 115 float(toks[colnum]) 153 self.remove_empty_q_values(has_error_dx, has_error_dy) 154 self.current_dataset.xaxis("\\rm{Q}", 'A^{-1}') 155 self.current_dataset.yaxis("\\rm{Intensity}", "cm^{-1}") 116 156 117 candidate_lines += 1 118 _x = float(toks[0]) 119 _y = float(toks[1]) 120 _dx = None 121 _dy = None 122 123 #If 5 or more lines, this is considering the set data 124 if candidate_lines >= min_data_pts: 125 is_data = True 126 127 # If a 3rd row is present, consider it dy 128 if new_lentoks > 2: 129 _dy = float(toks[2]) 130 has_error_dy = False if _dy is None else True 131 132 # If a 4th row is present, consider it dx 133 if new_lentoks > 3: 134 _dx = float(toks[3]) 135 has_error_dx = False if _dx is None else True 136 137 # Delete the previously stored lines of data candidates if 138 # the list is not data 139 if candidate_lines == 1 and -1 < candidate_lines_previous < min_data_pts and \ 140 is_data == False: 141 try: 142 tx = np.zeros(0) 143 ty = np.zeros(0) 144 tdy = np.zeros(0) 145 tdx = np.zeros(0) 146 except: 147 pass 148 149 if has_error_dy == True: 150 tdy = np.append(tdy, _dy) 151 if has_error_dx == True: 152 tdx = np.append(tdx, _dx) 153 tx = np.append(tx, _x) 154 ty = np.append(ty, _y) 155 156 #To remember the # of columns on the current line 157 # for the next line of data 158 lentoks = new_lentoks 159 candidate_lines_previous = candidate_lines 160 except ValueError: 161 # It is data and meet non - number, then stop reading 162 if is_data == True: 163 break 164 lentoks = 2 165 has_error_dx = None 166 has_error_dy = None 167 #Reset # of lines of data candidates 168 candidate_lines = 0 169 except: 170 pass 171 172 input_f.close() 173 if not is_data: 174 msg = "ascii_reader: x has no data" 175 raise RuntimeError, msg 176 # Sanity check 177 if has_error_dy == True and not len(ty) == len(tdy): 178 msg = "ascii_reader: y and dy have different length" 179 raise RuntimeError, msg 180 if has_error_dx == True and not len(tx) == len(tdx): 181 msg = "ascii_reader: y and dy have different length" 182 raise RuntimeError, msg 183 # If the data length is zero, consider this as 184 # though we were not able to read the file. 185 if len(tx) == 0: 186 raise RuntimeError, "ascii_reader: could not load file" 187 188 #Let's re-order the data to make cal. 189 # curve look better some cases 190 ind = np.lexsort((ty, tx)) 191 x = np.zeros(len(tx)) 192 y = np.zeros(len(ty)) 193 dy = np.zeros(len(tdy)) 194 dx = np.zeros(len(tdx)) 195 output = Data1D(x, y, dy=dy, dx=dx) 196 self.filename = output.filename = basename 197 198 for i in ind: 199 x[i] = tx[ind[i]] 200 y[i] = ty[ind[i]] 201 if has_error_dy == True: 202 dy[i] = tdy[ind[i]] 203 if has_error_dx == True: 204 dx[i] = tdx[ind[i]] 205 # Zeros in dx, dy 206 if has_error_dx: 207 dx[dx == 0] = _ZERO 208 if has_error_dy: 209 dy[dy == 0] = _ZERO 210 #Data 211 output.x = x[x != 0] 212 output.y = y[x != 0] 213 output.dy = dy[x != 0] if has_error_dy == True\ 214 else np.zeros(len(output.y)) 215 output.dx = dx[x != 0] if has_error_dx == True\ 216 else np.zeros(len(output.x)) 217 218 output.xaxis("\\rm{Q}", 'A^{-1}') 219 output.yaxis("\\rm{Intensity}", "cm^{-1}") 220 221 # Store loading process information 222 output.meta_data['loader'] = self.type_name 223 if len(output.x) < 1: 224 raise RuntimeError, "%s is empty" % path 225 return output 226 227 else: 228 raise RuntimeError, "%s is not a file" % path 229 return None 230 231 def splitline(self, line): 232 """ 233 Splits a line into pieces based on common delimeters 234 :param line: A single line of text 235 :return: list of values 236 """ 237 # Initial try for CSV (split on ,) 238 toks = line.split(',') 239 # Now try SCSV (split on ;) 240 if len(toks) < 2: 241 toks = line.split(';') 242 # Now go for whitespace 243 if len(toks) < 2: 244 toks = line.split() 245 return toks 157 # Store loading process information 158 self.current_datainfo.meta_data['loader'] = self.type_name 159 self.send_to_output() -
src/sas/sascalc/dataloader/readers/associations.py
ra1b8fee r248ff73 14 14 #copyright 2009, University of Tennessee 15 15 ############################################################################# 16 from __future__ import print_function17 18 import os19 16 import sys 20 17 import logging 21 import json22 18 23 19 logger = logging.getLogger(__name__) 24 20 25 FILE_NAME = 'defaults.json' 21 FILE_ASSOCIATIONS = { 22 ".xml": "cansas_reader", 23 ".ses": "sesans_reader", 24 ".h5": "cansas_reader_HDF5", 25 ".txt": "ascii_reader", 26 ".dat": "red2d_reader", 27 ".abs": "abs_reader", 28 ".d1d": "hfir1d_reader", 29 ".sans": "danse_reader", 30 ".nxs": "nexus_reader", 31 ".pdh": "anton_paar_saxs_reader" 32 } 26 33 27 def read_associations(loader, settings=FILE_NAME): 34 35 def read_associations(loader, settings=FILE_ASSOCIATIONS): 28 36 """ 29 37 Read the specified settings file to associate 30 38 default readers to file extension. 31 39 32 40 :param loader: Loader object 33 41 :param settings: path to the json settings file [string] 34 42 """ 35 reader_dir = os.path.dirname(__file__) 36 path = os.path.join(reader_dir, settings) 37 38 # If we can't find the file in the installation 39 # directory, look into the execution directory. 40 if not os.path.isfile(path): 41 path = os.path.join(os.getcwd(), settings) 42 if not os.path.isfile(path): 43 path = os.path.join(sys.path[0], settings) 44 if not os.path.isfile(path): 45 path = settings 46 if not os.path.isfile(path): 47 path = "./%s" % settings 48 if os.path.isfile(path): 49 with open(path) as fh: 50 json_tree = json.load(fh) 51 52 # Read in the file extension associations 53 entry_list = json_tree['SasLoader']['FileType'] 54 55 # For each FileType entry, get the associated reader and extension 56 for entry in entry_list: 57 reader = entry['-reader'] 58 ext = entry['-extension'] 59 60 if reader is not None and ext is not None: 61 # Associate the extension with a particular reader 62 # TODO: Modify the Register code to be case-insensitive 63 # and remove the extra line below. 64 try: 65 exec "import %s" % reader 66 exec "loader.associate_file_type('%s', %s)" % (ext.lower(), 67 reader) 68 exec "loader.associate_file_type('%s', %s)" % (ext.upper(), 69 reader) 70 except: 71 msg = "read_associations: skipping association" 72 msg += " for %s\n %s" % (ext.lower(), sys.exc_value) 73 logger.error(msg) 74 else: 75 print("Could not find reader association settings\n %s [%s]" % (__file__, os.getcwd())) 76 77 78 def register_readers(registry_function): 79 """ 80 Function called by the registry/loader object to register 81 all default readers using a call back function. 82 83 :WARNING: this method is now obsolete 84 85 :param registry_function: function to be called to register each reader 86 """ 87 logger.info("register_readers is now obsolete: use read_associations()") 88 import abs_reader 89 import ascii_reader 90 import cansas_reader 91 import danse_reader 92 import hfir1d_reader 93 import IgorReader 94 import red2d_reader 95 #import tiff_reader 96 import nexus_reader 97 import sesans_reader 98 import cansas_reader_HDF5 99 import anton_paar_saxs_reader 100 registry_function(sesans_reader) 101 registry_function(abs_reader) 102 registry_function(ascii_reader) 103 registry_function(cansas_reader) 104 registry_function(danse_reader) 105 registry_function(hfir1d_reader) 106 registry_function(IgorReader) 107 registry_function(red2d_reader) 108 #registry_function(tiff_reader) 109 registry_function(nexus_reader) 110 registry_function(cansas_reader_HDF5) 111 registry_function(anton_paar_saxs_reader) 112 return True 43 # For each FileType entry, get the associated reader and extension 44 for ext, reader in settings.iteritems(): 45 if reader is not None and ext is not None: 46 # Associate the extension with a particular reader 47 # TODO: Modify the Register code to be case-insensitive 48 # FIXME: Remove exec statements 49 # and remove the extra line below. 50 try: 51 exec "import %s" % reader 52 exec "loader.associate_file_type('%s', %s)" % (ext.lower(), 53 reader) 54 exec "loader.associate_file_type('%s', %s)" % (ext.upper(), 55 reader) 56 except: 57 msg = "read_associations: skipping association" 58 msg += " for %s\n %s" % (ext.lower(), sys.exc_value) 59 logger.error(msg) -
src/sas/sascalc/dataloader/readers/cansas_reader.py
r7432acb r248ff73 1 """2 CanSAS data reader - new recursive cansas_version.3 """4 ############################################################################5 #This software was developed by the University of Tennessee as part of the6 #Distributed Data Analysis of Neutron Scattering Experiments (DANSE)7 #project funded by the US National Science Foundation.8 #If you use DANSE applications to do scientific research that leads to9 #publication, we ask that you acknowledge the use of the software with the10 #following sentence:11 #This work benefited from DANSE software developed under NSF award DMR-0520547.12 #copyright 2008,2009 University of Tennessee13 #############################################################################14 15 1 import logging 16 2 import numpy as np … … 29 15 from sas.sascalc.dataloader.readers.xml_reader import XMLreader 30 16 from sas.sascalc.dataloader.readers.cansas_constants import CansasConstants, CurrentLevel 17 from sas.sascalc.dataloader.loader_exceptions import FileContentsException, \ 18 DefaultReaderException, DataReaderException 31 19 32 20 # The following 2 imports *ARE* used. Do not remove either. 33 21 import xml.dom.minidom 34 22 from xml.dom.minidom import parseString 23 24 from lxml import etree 35 25 36 26 logger = logging.getLogger(__name__) … … 55 45 56 46 class Reader(XMLreader): 57 """58 Class to load cansas 1D XML files59 60 :Dependencies:61 The CanSAS reader requires PyXML 0.8.4 or later.62 """63 # CanSAS version - defaults to version 1.064 47 cansas_version = "1.0" 65 48 base_ns = "{cansas1d/1.0}" … … 75 58 ns_list = None 76 59 # Temporary storage location for loading multiple data sets in a single file 77 current_datainfo = None78 current_dataset = None79 60 current_data1d = None 80 61 data = None 81 # List of data1D objects to be sent back to SasView82 output = None83 62 # Wildcards 84 63 type = ["XML files (*.xml)|*.xml", "SasView Save Files (*.svs)|*.svs"] … … 110 89 111 90 def read(self, xml_file, schema_path="", invalid=True): 112 """ 113 Validate and read in an xml_file file in the canSAS format. 114 115 :param xml_file: A canSAS file path in proper XML format 116 :param schema_path: A file path to an XML schema to validate the xml_file against 117 """ 118 # For every file loaded, reset everything to a base state 91 if schema_path != "" or invalid != True: 92 # read has been called from self.get_file_contents because xml file doens't conform to schema 93 _, self.extension = os.path.splitext(os.path.basename(xml_file)) 94 return self.get_file_contents(xml_file=xml_file, schema_path=schema_path, invalid=invalid) 95 96 # Otherwise, read has been called by the data loader - file_reader_base_class handles this 97 return super(XMLreader, self).read(xml_file) 98 99 def get_file_contents(self, xml_file=None, schema_path="", invalid=True): 100 # Reset everything since we're loading a new file 119 101 self.reset_state() 120 102 self.invalid = invalid 121 # Check that the file exists 122 if os.path.isfile(xml_file): 123 basename, extension = os.path.splitext(os.path.basename(xml_file)) 124 # If the file type is not allowed, return nothing 125 if extension in self.ext or self.allow_all: 126 # Get the file location of 127 self.load_file_and_schema(xml_file, schema_path) 128 self.add_data_set() 129 # Try to load the file, but raise an error if unable to. 130 # Check the file matches the XML schema 103 if xml_file is None: 104 xml_file = self.f_open.name 105 # We don't sure f_open since lxml handles opnening/closing files 106 if not self.f_open.closed: 107 self.f_open.close() 108 109 basename, _ = os.path.splitext(os.path.basename(xml_file)) 110 111 try: 112 # Raises FileContentsException 113 self.load_file_and_schema(xml_file, schema_path) 114 self.current_datainfo = DataInfo() 115 # Raises FileContentsException if file doesn't meet CanSAS schema 116 self.is_cansas(self.extension) 117 self.invalid = False # If we reach this point then file must be valid CanSAS 118 119 # Parse each SASentry 120 entry_list = self.xmlroot.xpath('/ns:SASroot/ns:SASentry', namespaces={ 121 'ns': self.cansas_defaults.get("ns") 122 }) 123 # Look for a SASentry 124 self.names.append("SASentry") 125 self.set_processing_instructions() 126 127 for entry in entry_list: 128 self.current_datainfo.filename = basename + self.extension 129 self.current_datainfo.meta_data["loader"] = "CanSAS XML 1D" 130 self.current_datainfo.meta_data[PREPROCESS] = self.processing_instructions 131 self._parse_entry(entry) 132 has_error_dx = self.current_dataset.dx is not None 133 has_error_dy = self.current_dataset.dy is not None 134 self.remove_empty_q_values(has_error_dx=has_error_dx, 135 has_error_dy=has_error_dy) 136 self.send_to_output() # Combine datasets with DataInfo 137 self.current_datainfo = DataInfo() # Reset DataInfo 138 except FileContentsException as fc_exc: 139 # File doesn't meet schema - try loading with a less strict schema 140 base_name = xml_reader.__file__ 141 base_name = base_name.replace("\\", "/") 142 base = base_name.split("/sas/")[0] 143 if self.cansas_version == "1.1": 144 invalid_schema = INVALID_SCHEMA_PATH_1_1.format(base, self.cansas_defaults.get("schema")) 145 else: 146 invalid_schema = INVALID_SCHEMA_PATH_1_0.format(base, self.cansas_defaults.get("schema")) 147 self.set_schema(invalid_schema) 148 if self.invalid: 131 149 try: 132 self.is_cansas(extension) 133 self.invalid = False 134 # Get each SASentry from XML file and add it to a list. 135 entry_list = self.xmlroot.xpath( 136 '/ns:SASroot/ns:SASentry', 137 namespaces={'ns': self.cansas_defaults.get("ns")}) 138 self.names.append("SASentry") 139 140 # Get all preprocessing events and encoding 141 self.set_processing_instructions() 142 143 # Parse each <SASentry> item 144 for entry in entry_list: 145 # Create a new DataInfo object for every <SASentry> 146 147 # Set the file name and then parse the entry. 148 self.current_datainfo.filename = basename + extension 149 self.current_datainfo.meta_data["loader"] = "CanSAS XML 1D" 150 self.current_datainfo.meta_data[PREPROCESS] = \ 151 self.processing_instructions 152 153 # Parse the XML SASentry 154 self._parse_entry(entry) 155 # Combine datasets with datainfo 156 self.add_data_set() 157 except RuntimeError: 158 # If the file does not match the schema, raise this error 150 # Load data with less strict schema 151 self.read(xml_file, invalid_schema, False) 152 153 # File can still be read but doesn't match schema, so raise exception 154 self.load_file_and_schema(xml_file) # Reload strict schema so we can find where error are in file 159 155 invalid_xml = self.find_invalid_xml() 160 invalid_xml = INVALID_XML.format(basename + extension) + invalid_xml 161 self.errors.add(invalid_xml) 162 # Try again with an invalid CanSAS schema, that requires only a data set in each 163 base_name = xml_reader.__file__ 164 base_name = base_name.replace("\\", "/") 165 base = base_name.split("/sas/")[0] 166 if self.cansas_version == "1.1": 167 invalid_schema = INVALID_SCHEMA_PATH_1_1.format(base, self.cansas_defaults.get("schema")) 168 else: 169 invalid_schema = INVALID_SCHEMA_PATH_1_0.format(base, self.cansas_defaults.get("schema")) 170 self.set_schema(invalid_schema) 171 try: 172 if self.invalid: 173 if self.is_cansas(): 174 self.output = self.read(xml_file, invalid_schema, False) 175 else: 176 raise RuntimeError 177 else: 178 raise RuntimeError 179 except RuntimeError: 180 x = np.zeros(1) 181 y = np.zeros(1) 182 self.current_data1d = Data1D(x,y) 183 self.current_data1d.errors = self.errors 184 return [self.current_data1d] 185 else: 186 self.output.append("Not a valid file path.") 187 # Return a list of parsed entries that dataloader can manage 188 return self.output 156 invalid_xml = INVALID_XML.format(basename + self.extension) + invalid_xml 157 raise DataReaderException(invalid_xml) # Handled by base class 158 except FileContentsException as fc_exc: 159 if not self.extension in self.ext: # If the file has no associated loader 160 raise DefaultReaderException(msg) 161 msg = "CanSAS Reader could not load the file {}".format(xml_file) 162 if fc_exc.message is not None: # Propagate error messages from earlier 163 msg = fc_exc.message 164 raise FileContentsException(msg) 165 pass 166 else: 167 raise fc_exc 168 except Exception as e: # Convert all other exceptions to FileContentsExceptions 169 raise FileContentsException(e.message) 170 171 172 def load_file_and_schema(self, xml_file, schema_path=""): 173 base_name = xml_reader.__file__ 174 base_name = base_name.replace("\\", "/") 175 base = base_name.split("/sas/")[0] 176 177 # Try and parse the XML file 178 try: 179 self.set_xml_file(xml_file) 180 except etree.XMLSyntaxError: # File isn't valid XML so can't be loaded 181 msg = "Cansas cannot load {}.\n Invalid XML syntax".format(xml_file) 182 raise FileContentsException(msg) 183 184 self.cansas_version = self.xmlroot.get("version", "1.0") 185 self.cansas_defaults = CANSAS_NS.get(self.cansas_version, "1.0") 186 187 if schema_path == "": 188 schema_path = "{}/sas/sascalc/dataloader/readers/schema/{}".format( 189 base, self.cansas_defaults.get("schema").replace("\\", "/") 190 ) 191 self.set_schema(schema_path) 192 193 def is_cansas(self, ext="xml"): 194 """ 195 Checks to see if the XML file is a CanSAS file 196 197 :param ext: The file extension of the data file 198 :raises FileContentsException: Raised if XML file isn't valid CanSAS 199 """ 200 if self.validate_xml(): # Check file is valid XML 201 name = "{http://www.w3.org/2001/XMLSchema-instance}schemaLocation" 202 value = self.xmlroot.get(name) 203 # Check schema CanSAS version matches file CanSAS version 204 if CANSAS_NS.get(self.cansas_version).get("ns") == value.rsplit(" ")[0]: 205 return True 206 if ext == "svs": 207 return True # Why is this required? 208 # If we get to this point then file isn't valid CanSAS 209 raise FileContentsException("The file is not valid CanSAS") 189 210 190 211 def _parse_entry(self, dom, recurse=False): 191 """192 Parse a SASEntry - new recursive method for parsing the dom of193 the CanSAS data format. This will allow multiple data files194 and extra nodes to be read in simultaneously.195 196 :param dom: dom object with a namespace base of names197 """198 199 212 if not self._is_call_local() and not recurse: 200 213 self.reset_state() 201 self.add_data_set() 214 self.data = [] 215 self.current_datainfo = DataInfo() 202 216 self.names.append("SASentry") 203 217 self.parent_class = "SASentry" 204 self._check_for_empty_data() 205 self.base_ns = "{0}{1}{2}".format("{", \ 206 CANSAS_NS.get(self.cansas_version).get("ns"), "}") 207 208 # Go through each child in the parent element 218 # Create an empty dataset if no data has been passed to the reader 219 if self.current_dataset is None: 220 self.current_dataset = plottable_1D(np.empty(0), np.empty(0), 221 np.empty(0), np.empty(0)) 222 self.base_ns = "{" + CANSAS_NS.get(self.cansas_version).get("ns") + "}" 223 224 # Loop through each child in the parent element 209 225 for node in dom: 210 226 attr = node.attrib … … 217 233 if tagname == "fitting_plug_in" or tagname == "pr_inversion" or tagname == "invariant": 218 234 continue 219 220 235 # Get where to store content 221 236 self.names.append(tagname_original) … … 233 248 else: 234 249 self.current_dataset.shape = () 235 # Recurs ion stepto access data within the group236 self._parse_entry(node, True)250 # Recurse to access data within the group 251 self._parse_entry(node, recurse=True) 237 252 if tagname == "SASsample": 238 253 self.current_datainfo.sample.name = name … … 244 259 self.aperture.name = name 245 260 self.aperture.type = type 246 self. add_intermediate()261 self._add_intermediate() 247 262 else: 248 263 if isinstance(self.current_dataset, plottable_2D): … … 261 276 self.current_datainfo.notes.append(data_point) 262 277 263 # I and Q - 1D data278 # I and Q points 264 279 elif tagname == 'I' and isinstance(self.current_dataset, plottable_1D): 265 280 unit_list = unit.split("|") … … 283 298 self.current_dataset.dx = np.append(self.current_dataset.dx, data_point) 284 299 elif tagname == 'dQw': 300 if self.current_dataset.dqw is None: self.current_dataset.dqw = np.empty(0) 285 301 self.current_dataset.dxw = np.append(self.current_dataset.dxw, data_point) 286 302 elif tagname == 'dQl': 303 if self.current_dataset.dxl is None: self.current_dataset.dxl = np.empty(0) 287 304 self.current_dataset.dxl = np.append(self.current_dataset.dxl, data_point) 288 305 elif tagname == 'Qmean': … … 356 373 elif tagname == 'name' and self.parent_class == 'SASinstrument': 357 374 self.current_datainfo.instrument = data_point 375 358 376 # Detector Information 359 377 elif tagname == 'name' and self.parent_class == 'SASdetector': … … 401 419 self.detector.orientation.z = data_point 402 420 self.detector.orientation_unit = unit 421 403 422 # Collimation and Aperture 404 423 elif tagname == 'length' and self.parent_class == 'SAScollimation': … … 434 453 elif tagname == 'term' and self.parent_class == 'SASprocess': 435 454 unit = attr.get("unit", "") 436 dic = {} 437 dic["name"] = name 438 dic["value"] = data_point 439 dic["unit"] = unit 455 dic = { "name": name, "value": data_point, "unit": unit } 440 456 self.process.term.append(dic) 441 457 … … 490 506 if not self._is_call_local() and not recurse: 491 507 self.frm = "" 492 self.add_data_set() 508 self.current_datainfo.errors = set() 509 for error in self.errors: 510 self.current_datainfo.errors.add(error) 511 self.errors.clear() 512 self.send_to_output() 493 513 empty = None 494 514 return self.output[0], empty 495 515 496 497 516 def _is_call_local(self): 498 """499 500 """501 517 if self.frm == "": 502 518 inter = inspect.stack() … … 510 526 return True 511 527 512 def is_cansas(self, ext="xml"): 513 """ 514 Checks to see if the xml file is a CanSAS file 515 516 :param ext: The file extension of the data file 517 """ 518 if self.validate_xml(): 519 name = "{http://www.w3.org/2001/XMLSchema-instance}schemaLocation" 520 value = self.xmlroot.get(name) 521 if CANSAS_NS.get(self.cansas_version).get("ns") == \ 522 value.rsplit(" ")[0]: 523 return True 524 if ext == "svs": 525 return True 526 raise RuntimeError 527 528 def load_file_and_schema(self, xml_file, schema_path=""): 529 """ 530 Loads the file and associates a schema, if a schema is passed in or if one already exists 531 532 :param xml_file: The xml file path sent to Reader.read 533 :param schema_path: The path to a schema associated with the xml_file, or find one based on the file 534 """ 535 base_name = xml_reader.__file__ 536 base_name = base_name.replace("\\", "/") 537 base = base_name.split("/sas/")[0] 538 539 # Load in xml file and get the cansas version from the header 540 self.set_xml_file(xml_file) 541 self.cansas_version = self.xmlroot.get("version", "1.0") 542 543 # Generic values for the cansas file based on the version 544 self.cansas_defaults = CANSAS_NS.get(self.cansas_version, "1.0") 545 if schema_path == "": 546 schema_path = "{0}/sas/sascalc/dataloader/readers/schema/{1}".format \ 547 (base, self.cansas_defaults.get("schema")).replace("\\", "/") 548 549 # Link a schema to the XML file. 550 self.set_schema(schema_path) 551 552 def add_data_set(self): 553 """ 554 Adds the current_dataset to the list of outputs after preforming final processing on the data and then calls a 555 private method to generate a new data set. 556 557 :param key: NeXus group name for current tree level 558 """ 559 560 if self.current_datainfo and self.current_dataset: 561 self._final_cleanup() 562 self.data = [] 563 self.current_datainfo = DataInfo() 564 565 def _initialize_new_data_set(self, node=None): 566 """ 567 A private class method to generate a new 1D data object. 568 Outside methods should call add_data_set() to be sure any existing data is stored properly. 569 570 :param node: XML node to determine if 1D or 2D data 571 """ 572 x = np.array(0) 573 y = np.array(0) 574 for child in node: 575 if child.tag.replace(self.base_ns, "") == "Idata": 576 for i_child in child: 577 if i_child.tag.replace(self.base_ns, "") == "Qx": 578 self.current_dataset = plottable_2D() 579 return 580 self.current_dataset = plottable_1D(x, y) 581 582 def add_intermediate(self): 528 def _add_intermediate(self): 583 529 """ 584 530 This method stores any intermediate objects within the final data set after fully reading the set. 585 586 :param parent: The NXclass name for the h5py Group object that just finished being processed 587 """ 588 531 """ 589 532 if self.parent_class == 'SASprocess': 590 533 self.current_datainfo.process.append(self.process) … … 605 548 self._check_for_empty_resolution() 606 549 self.data.append(self.current_dataset) 607 608 def _final_cleanup(self):609 """610 Final cleanup of the Data1D object to be sure it has all the611 appropriate information needed for perspectives612 """613 614 # Append errors to dataset and reset class errors615 self.current_datainfo.errors = set()616 for error in self.errors:617 self.current_datainfo.errors.add(error)618 self.errors.clear()619 620 # Combine all plottables with datainfo and append each to output621 # Type cast data arrays to float64 and find min/max as appropriate622 for dataset in self.data:623 if isinstance(dataset, plottable_1D):624 if dataset.x is not None:625 dataset.x = np.delete(dataset.x, [0])626 dataset.x = dataset.x.astype(np.float64)627 dataset.xmin = np.min(dataset.x)628 dataset.xmax = np.max(dataset.x)629 if dataset.y is not None:630 dataset.y = np.delete(dataset.y, [0])631 dataset.y = dataset.y.astype(np.float64)632 dataset.ymin = np.min(dataset.y)633 dataset.ymax = np.max(dataset.y)634 if dataset.dx is not None:635 dataset.dx = np.delete(dataset.dx, [0])636 dataset.dx = dataset.dx.astype(np.float64)637 if dataset.dxl is not None:638 dataset.dxl = np.delete(dataset.dxl, [0])639 dataset.dxl = dataset.dxl.astype(np.float64)640 if dataset.dxw is not None:641 dataset.dxw = np.delete(dataset.dxw, [0])642 dataset.dxw = dataset.dxw.astype(np.float64)643 if dataset.dy is not None:644 dataset.dy = np.delete(dataset.dy, [0])645 dataset.dy = dataset.dy.astype(np.float64)646 np.trim_zeros(dataset.x)647 np.trim_zeros(dataset.y)648 np.trim_zeros(dataset.dy)649 elif isinstance(dataset, plottable_2D):650 dataset.data = dataset.data.astype(np.float64)651 dataset.qx_data = dataset.qx_data.astype(np.float64)652 dataset.xmin = np.min(dataset.qx_data)653 dataset.xmax = np.max(dataset.qx_data)654 dataset.qy_data = dataset.qy_data.astype(np.float64)655 dataset.ymin = np.min(dataset.qy_data)656 dataset.ymax = np.max(dataset.qy_data)657 dataset.q_data = np.sqrt(dataset.qx_data * dataset.qx_data658 + dataset.qy_data * dataset.qy_data)659 if dataset.err_data is not None:660 dataset.err_data = dataset.err_data.astype(np.float64)661 if dataset.dqx_data is not None:662 dataset.dqx_data = dataset.dqx_data.astype(np.float64)663 if dataset.dqy_data is not None:664 dataset.dqy_data = dataset.dqy_data.astype(np.float64)665 if dataset.mask is not None:666 dataset.mask = dataset.mask.astype(dtype=bool)667 668 if len(dataset.shape) == 2:669 n_rows, n_cols = dataset.shape670 dataset.y_bins = dataset.qy_data[0::int(n_cols)]671 dataset.x_bins = dataset.qx_data[:int(n_cols)]672 dataset.data = dataset.data.flatten()673 else:674 dataset.y_bins = []675 dataset.x_bins = []676 dataset.data = dataset.data.flatten()677 678 final_dataset = combine_data(dataset, self.current_datainfo)679 self.output.append(final_dataset)680 681 def _create_unique_key(self, dictionary, name, numb=0):682 """683 Create a unique key value for any dictionary to prevent overwriting684 Recurse until a unique key value is found.685 686 :param dictionary: A dictionary with any number of entries687 :param name: The index of the item to be added to dictionary688 :param numb: The number to be appended to the name, starts at 0689 """690 if dictionary.get(name) is not None:691 numb += 1692 name = name.split("_")[0]693 name += "_{0}".format(numb)694 name = self._create_unique_key(dictionary, name, numb)695 return name696 550 697 551 def _get_node_value(self, node, tagname): … … 801 655 return node_value, value_unit 802 656 803 def _check_for_empty_data(self):804 """805 Creates an empty data set if no data is passed to the reader806 807 :param data1d: presumably a Data1D object808 """809 if self.current_dataset is None:810 x_vals = np.empty(0)811 y_vals = np.empty(0)812 dx_vals = np.empty(0)813 dy_vals = np.empty(0)814 dxl = np.empty(0)815 dxw = np.empty(0)816 self.current_dataset = plottable_1D(x_vals, y_vals, dx_vals, dy_vals)817 self.current_dataset.dxl = dxl818 self.current_dataset.dxw = dxw819 820 657 def _check_for_empty_resolution(self): 821 658 """ 822 A method to check all resolution data sets are the same size as I and Q 823 """ 824 if isinstance(self.current_dataset, plottable_1D): 825 dql_exists = False 826 dqw_exists = False 827 dq_exists = False 828 di_exists = False 829 if self.current_dataset.dxl is not None: 830 dql_exists = True 831 if self.current_dataset.dxw is not None: 832 dqw_exists = True 833 if self.current_dataset.dx is not None: 834 dq_exists = True 835 if self.current_dataset.dy is not None: 836 di_exists = True 837 if dqw_exists and not dql_exists: 838 array_size = self.current_dataset.dxw.size - 1 839 self.current_dataset.dxl = np.append(self.current_dataset.dxl, 840 np.zeros([array_size])) 841 elif dql_exists and not dqw_exists: 842 array_size = self.current_dataset.dxl.size - 1 843 self.current_dataset.dxw = np.append(self.current_dataset.dxw, 844 np.zeros([array_size])) 845 elif not dql_exists and not dqw_exists and not dq_exists: 846 array_size = self.current_dataset.x.size - 1 847 self.current_dataset.dx = np.append(self.current_dataset.dx, 848 np.zeros([array_size])) 849 if not di_exists: 850 array_size = self.current_dataset.y.size - 1 851 self.current_dataset.dy = np.append(self.current_dataset.dy, 852 np.zeros([array_size])) 853 elif isinstance(self.current_dataset, plottable_2D): 854 dqx_exists = False 855 dqy_exists = False 856 di_exists = False 857 mask_exists = False 858 if self.current_dataset.dqx_data is not None: 859 dqx_exists = True 860 if self.current_dataset.dqy_data is not None: 861 dqy_exists = True 862 if self.current_dataset.err_data is not None: 863 di_exists = True 864 if self.current_dataset.mask is not None: 865 mask_exists = True 866 if not dqy_exists: 867 array_size = self.current_dataset.qy_data.size - 1 868 self.current_dataset.dqy_data = np.append( 869 self.current_dataset.dqy_data, np.zeros([array_size])) 870 if not dqx_exists: 871 array_size = self.current_dataset.qx_data.size - 1 872 self.current_dataset.dqx_data = np.append( 873 self.current_dataset.dqx_data, np.zeros([array_size])) 874 if not di_exists: 875 array_size = self.current_dataset.data.size - 1 876 self.current_dataset.err_data = np.append( 877 self.current_dataset.err_data, np.zeros([array_size])) 878 if not mask_exists: 879 array_size = self.current_dataset.data.size - 1 880 self.current_dataset.mask = np.append( 881 self.current_dataset.mask, 882 np.ones([array_size] ,dtype=bool)) 883 884 ####### All methods below are for writing CanSAS XML files ####### 885 659 a method to check all resolution data sets are the same size as I and q 660 """ 661 dql_exists = False 662 dqw_exists = False 663 dq_exists = False 664 di_exists = False 665 if self.current_dataset.dxl is not None: 666 dql_exists = True 667 if self.current_dataset.dxw is not None: 668 dqw_exists = True 669 if self.current_dataset.dx is not None: 670 dq_exists = True 671 if self.current_dataset.dy is not None: 672 di_exists = True 673 if dqw_exists and not dql_exists: 674 array_size = self.current_dataset.dxw.size - 1 675 self.current_dataset.dxl = np.append(self.current_dataset.dxl, 676 np.zeros([array_size])) 677 elif dql_exists and not dqw_exists: 678 array_size = self.current_dataset.dxl.size - 1 679 self.current_dataset.dxw = np.append(self.current_dataset.dxw, 680 np.zeros([array_size])) 681 elif not dql_exists and not dqw_exists and not dq_exists: 682 array_size = self.current_dataset.x.size - 1 683 self.current_dataset.dx = np.append(self.current_dataset.dx, 684 np.zeros([array_size])) 685 if not di_exists: 686 array_size = self.current_dataset.y.size - 1 687 self.current_dataset.dy = np.append(self.current_dataset.dy, 688 np.zeros([array_size])) 689 690 def _initialize_new_data_set(self, node=None): 691 if node is not None: 692 for child in node: 693 if child.tag.replace(self.base_ns, "") == "Idata": 694 for i_child in child: 695 if i_child.tag.replace(self.base_ns, "") == "Qx": 696 self.current_dataset = plottable_2D() 697 return 698 self.current_dataset = plottable_1D(np.array(0), np.array(0)) 699 700 ## Writing Methods 886 701 def write(self, filename, datainfo): 887 702 """ … … 1514 1329 exec "storage.%s = entry.text.strip()" % variable 1515 1330 1516 1517 1331 # DO NOT REMOVE Called by outside packages: 1518 1332 # sas.sasgui.perspectives.invariant.invariant_state -
src/sas/sascalc/dataloader/readers/cansas_reader_HDF5.py
rc94280c r8dec7e7 13 13 TransmissionSpectrum, Detector 14 14 from sas.sascalc.dataloader.data_info import combine_data_info_with_plottable 15 16 17 class Reader(): 15 from sas.sascalc.dataloader.loader_exceptions import FileContentsException, DefaultReaderException 16 from sas.sascalc.dataloader.file_reader_base_class import FileReader 17 18 19 class Reader(FileReader): 18 20 """ 19 21 A class for reading in CanSAS v2.0 data files. The existing iteration opens … … 40 42 # Raw file contents to be processed 41 43 raw_data = None 42 # Data info currently being read in43 current_datainfo = None44 # SASdata set currently being read in45 current_dataset = None46 44 # List of plottable1D objects that should be linked to the current_datainfo 47 45 data1d = None … … 56 54 # Flag to bypass extension check 57 55 allow_all = True 58 # List of files to return 59 output = None 60 61 def read(self, filename): 56 57 def get_file_contents(self): 62 58 """ 63 59 This is the general read method that all SasView data_loaders must have. … … 68 64 # Reinitialize when loading a new data file to reset all class variables 69 65 self.reset_class_variables() 66 67 filename = self.f_open.name 68 self.f_open.close() # IO handled by h5py 69 70 70 # Check that the file exists 71 71 if os.path.isfile(filename): … … 75 75 if extension in self.ext or self.allow_all: 76 76 # Load the data file 77 self.raw_data = h5py.File(filename, 'r') 77 try: 78 self.raw_data = h5py.File(filename, 'r') 79 except Exception as e: 80 if extension not in self.ext: 81 msg = "CanSAS2.0 HDF5 Reader could not load file {}".format(basename + extension) 82 raise DefaultReaderException(msg) 83 raise FileContentsException(e.message) 78 84 # Read in all child elements of top level SASroot 79 85 self.read_children(self.raw_data, []) … … 427 433 Data1D and Data2D objects 428 434 """ 429 430 435 # Type cast data arrays to float64 431 436 if len(self.current_datainfo.trans_spectrum) > 0: … … 451 456 # Type cast data arrays to float64 and find min/max as appropriate 452 457 for dataset in self.data2d: 453 dataset.data = dataset.data.astype(np.float64)454 dataset.err_data = dataset.err_data.astype(np.float64)455 if dataset.qx_data is not None:456 dataset.xmin = np.min(dataset.qx_data)457 dataset.xmax = np.max(dataset.qx_data)458 dataset.qx_data = dataset.qx_data.astype(np.float64)459 if dataset.dqx_data is not None:460 dataset.dqx_data = dataset.dqx_data.astype(np.float64)461 if dataset.qy_data is not None:462 dataset.ymin = np.min(dataset.qy_data)463 dataset.ymax = np.max(dataset.qy_data)464 dataset.qy_data = dataset.qy_data.astype(np.float64)465 if dataset.dqy_data is not None:466 dataset.dqy_data = dataset.dqy_data.astype(np.float64)467 if dataset.q_data is not None:468 dataset.q_data = dataset.q_data.astype(np.float64)469 458 zeros = np.ones(dataset.data.size, dtype=bool) 470 459 try: … … 489 478 dataset.x_bins = dataset.qx_data[:n_cols] 490 479 dataset.data = dataset.data.flatten() 491 492 final_dataset = combine_data_info_with_plottable( 493 dataset, self.current_datainfo) 494 self.output.append(final_dataset) 480 self.current_dataset = dataset 481 self.send_to_output() 495 482 496 483 for dataset in self.data1d: 497 if dataset.x is not None: 498 dataset.x = dataset.x.astype(np.float64) 499 dataset.xmin = np.min(dataset.x) 500 dataset.xmax = np.max(dataset.x) 501 if dataset.y is not None: 502 dataset.y = dataset.y.astype(np.float64) 503 dataset.ymin = np.min(dataset.y) 504 dataset.ymax = np.max(dataset.y) 505 if dataset.dx is not None: 506 dataset.dx = dataset.dx.astype(np.float64) 507 if dataset.dxl is not None: 508 dataset.dxl = dataset.dxl.astype(np.float64) 509 if dataset.dxw is not None: 510 dataset.dxw = dataset.dxw.astype(np.float64) 511 if dataset.dy is not None: 512 dataset.dy = dataset.dy.astype(np.float64) 513 final_dataset = combine_data_info_with_plottable( 514 dataset, self.current_datainfo) 515 self.output.append(final_dataset) 484 self.current_dataset = dataset 485 self.send_to_output() 516 486 517 487 def add_data_set(self, key=""): -
src/sas/sascalc/dataloader/readers/danse_reader.py
r235f514 r713a047 5 5 #This software was developed by the University of Tennessee as part of the 6 6 #Distributed Data Analysis of Neutron Scattering Experiments (DANSE) 7 #project funded by the US National Science Foundation. 7 #project funded by the US National Science Foundation. 8 8 #If you use DANSE applications to do scientific research that leads to 9 9 #publication, we ask that you acknowledge the use of the software with the … … 14 14 import math 15 15 import os 16 import sys17 16 import numpy as np 18 17 import logging 19 from sas.sascalc.dataloader.data_info import Data2D, Detector18 from sas.sascalc.dataloader.data_info import plottable_2D, DataInfo, Detector 20 19 from sas.sascalc.dataloader.manipulations import reader2D_converter 20 from sas.sascalc.dataloader.file_reader_base_class import FileReader 21 from sas.sascalc.dataloader.loader_exceptions import FileContentsException, DataReaderException 21 22 22 23 logger = logging.getLogger(__name__) … … 30 31 31 32 32 class Reader :33 class Reader(FileReader): 33 34 """ 34 35 Example data manipulation … … 40 41 ## Extension 41 42 ext = ['.sans', '.SANS'] 42 43 def read(self, filename=None): 44 """ 45 Open and read the data in a file 46 @param file: path of the file 47 """ 48 49 read_it = False 50 for item in self.ext: 51 if filename.lower().find(item) >= 0: 52 read_it = True 53 54 if read_it: 43 44 def get_file_contents(self): 45 self.current_datainfo = DataInfo() 46 self.current_dataset = plottable_2D() 47 self.output = [] 48 49 loaded_correctly = True 50 error_message = "" 51 52 # defaults 53 # wavelength in Angstrom 54 wavelength = 10.0 55 # Distance in meter 56 distance = 11.0 57 # Pixel number of center in x 58 center_x = 65 59 # Pixel number of center in y 60 center_y = 65 61 # Pixel size [mm] 62 pixel = 5.0 63 # Size in x, in pixels 64 size_x = 128 65 # Size in y, in pixels 66 size_y = 128 67 # Format version 68 fversion = 1.0 69 70 self.current_datainfo.filename = os.path.basename(self.f_open.name) 71 detector = Detector() 72 self.current_datainfo.detector.append(detector) 73 74 self.current_dataset.data = np.zeros([size_x, size_y]) 75 self.current_dataset.err_data = np.zeros([size_x, size_y]) 76 77 read_on = True 78 data_start_line = 1 79 while read_on: 80 line = self.f_open.readline() 81 data_start_line += 1 82 if line.find("DATA:") >= 0: 83 read_on = False 84 break 85 toks = line.split(':') 55 86 try: 56 datafile = open(filename, 'r')57 except:58 raise RuntimeError,"danse_reader cannot open %s" % (filename)59 60 # defaults61 # wavelength in Angstrom62 wavelength = 10.063 # Distance in meter64 distance = 11.065 # Pixel number of center in x66 center_x = 6567 # Pixel number of center in y68 center_y = 6569 # Pixel size [mm]70 pixel = 5.071 # Size in x, in pixels72 size_x = 12873 # Size in y, in pixels74 size_y = 12875 # Format version76 fversion = 1.077 78 output = Data2D()79 output.filename = os.path.basename(filename)80 detector = Detector()81 output.detector.append(detector)82 83 output.data = np.zeros([size_x,size_y])84 output.err_data = np.zeros([size_x, size_y])85 86 data_conv_q = None87 data_conv_i = None88 89 if has_converter == True and output.Q_unit != '1/A':90 data_conv_q = Converter('1/A')91 # Test it92 data_conv_q(1.0, output.Q_unit)93 94 if has_converter == True and output.I_unit != '1/cm':95 data_conv_i = Converter('1/cm')96 # Test it97 data_conv_i(1.0, output.I_unit)98 99 read_on = True100 while read_on:101 line = datafile.readline()102 if line.find("DATA:") >= 0:103 read_on = False104 break105 toks = line.split(':')106 87 if toks[0] == "FORMATVERSION": 107 88 fversion = float(toks[1]) 108 if toks[0] == "WAVELENGTH":89 elif toks[0] == "WAVELENGTH": 109 90 wavelength = float(toks[1]) 110 91 elif toks[0] == "DISTANCE": … … 120 101 elif toks[0] == "SIZE_Y": 121 102 size_y = int(toks[1]) 122 123 # Read the data 124 data = [] 125 error = [] 126 if fversion == 1.0: 127 data_str = datafile.readline() 128 data = data_str.split(' ') 129 else: 130 read_on = True 131 while read_on: 132 data_str = datafile.readline() 133 if len(data_str) == 0: 134 read_on = False 135 else: 136 toks = data_str.split() 137 try: 138 val = float(toks[0]) 139 err = float(toks[1]) 140 if data_conv_i is not None: 141 val = data_conv_i(val, units=output._yunit) 142 err = data_conv_i(err, units=output._yunit) 143 data.append(val) 144 error.append(err) 145 except: 146 logger.info("Skipping line:%s,%s" %(data_str, 147 sys.exc_value)) 148 149 # Initialize 150 x_vals = [] 151 y_vals = [] 152 ymin = None 153 ymax = None 154 xmin = None 155 xmax = None 156 157 # Qx and Qy vectors 158 theta = pixel / distance / 100.0 159 stepq = 4.0 * math.pi / wavelength * math.sin(theta / 2.0) 160 for i_x in range(size_x): 161 theta = (i_x - center_x + 1) * pixel / distance / 100.0 162 qx = 4.0 * math.pi / wavelength * math.sin(theta / 2.0) 163 164 if has_converter == True and output.Q_unit != '1/A': 165 qx = data_conv_q(qx, units=output.Q_unit) 166 167 x_vals.append(qx) 168 if xmin is None or qx < xmin: 169 xmin = qx 170 if xmax is None or qx > xmax: 171 xmax = qx 172 173 ymin = None 174 ymax = None 175 for i_y in range(size_y): 176 theta = (i_y - center_y + 1) * pixel / distance / 100.0 177 qy = 4.0 * math.pi / wavelength * math.sin(theta/2.0) 178 179 if has_converter == True and output.Q_unit != '1/A': 180 qy = data_conv_q(qy, units=output.Q_unit) 181 182 y_vals.append(qy) 183 if ymin is None or qy < ymin: 184 ymin = qy 185 if ymax is None or qy > ymax: 186 ymax = qy 187 188 # Store the data in the 2D array 189 i_x = 0 190 i_y = -1 191 192 for i_pt in range(len(data)): 193 try: 194 value = float(data[i_pt]) 195 except: 196 # For version 1.0, the data were still 197 # stored as strings at this point. 198 msg = "Skipping entry (v1.0):%s,%s" % (str(data[i_pt]), 199 sys.exc_value) 200 logger.info(msg) 201 202 # Get bin number 203 if math.fmod(i_pt, size_x) == 0: 204 i_x = 0 205 i_y += 1 206 else: 207 i_x += 1 208 209 output.data[i_y][i_x] = value 210 if fversion>1.0: 211 output.err_data[i_y][i_x] = error[i_pt] 212 213 # Store all data 214 # Store wavelength 215 if has_converter == True and output.source.wavelength_unit != 'A': 216 conv = Converter('A') 217 wavelength = conv(wavelength, 218 units=output.source.wavelength_unit) 219 output.source.wavelength = wavelength 220 221 # Store distance 222 if has_converter == True and detector.distance_unit != 'm': 223 conv = Converter('m') 224 distance = conv(distance, units=detector.distance_unit) 225 detector.distance = distance 226 227 # Store pixel size 228 if has_converter == True and detector.pixel_size_unit != 'mm': 229 conv = Converter('mm') 230 pixel = conv(pixel, units=detector.pixel_size_unit) 231 detector.pixel_size.x = pixel 232 detector.pixel_size.y = pixel 233 234 # Store beam center in distance units 235 detector.beam_center.x = center_x * pixel 236 detector.beam_center.y = center_y * pixel 237 238 # Store limits of the image (2D array) 239 xmin = xmin - stepq / 2.0 240 xmax = xmax + stepq / 2.0 241 ymin = ymin - stepq /2.0 242 ymax = ymax + stepq / 2.0 243 244 if has_converter == True and output.Q_unit != '1/A': 245 xmin = data_conv_q(xmin, units=output.Q_unit) 246 xmax = data_conv_q(xmax, units=output.Q_unit) 247 ymin = data_conv_q(ymin, units=output.Q_unit) 248 ymax = data_conv_q(ymax, units=output.Q_unit) 249 output.xmin = xmin 250 output.xmax = xmax 251 output.ymin = ymin 252 output.ymax = ymax 253 254 # Store x and y axis bin centers 255 output.x_bins = x_vals 256 output.y_bins = y_vals 257 258 # Units 259 if data_conv_q is not None: 260 output.xaxis("\\rm{Q_{x}}", output.Q_unit) 261 output.yaxis("\\rm{Q_{y}}", output.Q_unit) 262 else: 263 output.xaxis("\\rm{Q_{x}}", 'A^{-1}') 264 output.yaxis("\\rm{Q_{y}}", 'A^{-1}') 265 266 if data_conv_i is not None: 267 output.zaxis("\\rm{Intensity}", output.I_unit) 268 else: 269 output.zaxis("\\rm{Intensity}", "cm^{-1}") 270 271 if not fversion >= 1.0: 272 msg = "Danse_reader can't read this file %s" % filename 273 raise ValueError, msg 274 else: 275 logger.info("Danse_reader Reading %s \n" % filename) 276 277 # Store loading process information 278 output.meta_data['loader'] = self.type_name 279 output = reader2D_converter(output) 280 return output 281 282 return None 103 except ValueError as e: 104 error_message += "Unable to parse {}. Default value used.\n".format(toks[0]) 105 loaded_correctly = False 106 107 # Read the data 108 data = [] 109 error = [] 110 if not fversion >= 1.0: 111 msg = "danse_reader can't read this file {}".format(self.f_open.name) 112 raise FileContentsException(msg) 113 114 for line_num, data_str in enumerate(self.f_open.readlines()): 115 toks = data_str.split() 116 try: 117 val = float(toks[0]) 118 err = float(toks[1]) 119 data.append(val) 120 error.append(err) 121 except ValueError as exc: 122 msg = "Unable to parse line {}: {}".format(line_num + data_start_line, data_str.strip()) 123 raise FileContentsException(msg) 124 125 num_pts = size_x * size_y 126 if len(data) < num_pts: 127 msg = "Not enough data points provided. Expected {} but got {}".format( 128 size_x * size_y, len(data)) 129 raise FileContentsException(msg) 130 elif len(data) > num_pts: 131 error_message += ("Too many data points provided. Expected {0} but" 132 " got {1}. Only the first {0} will be used.\n").format(num_pts, len(data)) 133 loaded_correctly = False 134 data = data[:num_pts] 135 error = error[:num_pts] 136 137 # Qx and Qy vectors 138 theta = pixel / distance / 100.0 139 i_x = np.arange(size_x) 140 theta = (i_x - center_x + 1) * pixel / distance / 100.0 141 x_vals = 4.0 * np.pi / wavelength * np.sin(theta / 2.0) 142 xmin = x_vals.min() 143 xmax = x_vals.max() 144 145 i_y = np.arange(size_y) 146 theta = (i_y - center_y + 1) * pixel / distance / 100.0 147 y_vals = 4.0 * np.pi / wavelength * np.sin(theta / 2.0) 148 ymin = y_vals.min() 149 ymax = y_vals.max() 150 151 self.current_dataset.data = np.array(data, dtype=np.float64).reshape((size_y, size_x)) 152 if fversion > 1.0: 153 self.current_dataset.err_data = np.array(error, dtype=np.float64).reshape((size_y, size_x)) 154 155 # Store all data 156 # Store wavelength 157 if has_converter == True and self.current_datainfo.source.wavelength_unit != 'A': 158 conv = Converter('A') 159 wavelength = conv(wavelength, 160 units=self.current_datainfo.source.wavelength_unit) 161 self.current_datainfo.source.wavelength = wavelength 162 163 # Store distance 164 if has_converter == True and detector.distance_unit != 'm': 165 conv = Converter('m') 166 distance = conv(distance, units=detector.distance_unit) 167 detector.distance = distance 168 169 # Store pixel size 170 if has_converter == True and detector.pixel_size_unit != 'mm': 171 conv = Converter('mm') 172 pixel = conv(pixel, units=detector.pixel_size_unit) 173 detector.pixel_size.x = pixel 174 detector.pixel_size.y = pixel 175 176 # Store beam center in distance units 177 detector.beam_center.x = center_x * pixel 178 detector.beam_center.y = center_y * pixel 179 180 181 self.current_dataset.xaxis("\\rm{Q_{x}}", 'A^{-1}') 182 self.current_dataset.yaxis("\\rm{Q_{y}}", 'A^{-1}') 183 self.current_dataset.zaxis("\\rm{Intensity}", "cm^{-1}") 184 185 self.current_dataset.x_bins = x_vals 186 self.current_dataset.y_bins = y_vals 187 188 # Reshape data 189 x_vals = np.tile(x_vals, (size_y, 1)).flatten() 190 y_vals = np.tile(y_vals, (size_x, 1)).T.flatten() 191 if self.current_dataset.err_data == np.all(np.array(None)) or np.any(self.current_dataset.err_data <= 0): 192 new_err_data = np.sqrt(np.abs(self.current_dataset.data)) 193 else: 194 new_err_data = self.current_dataset.err_data.flatten() 195 196 self.current_dataset.err_data = new_err_data 197 self.current_dataset.qx_data = x_vals 198 self.current_dataset.qy_data = y_vals 199 self.current_dataset.q_data = np.sqrt(x_vals**2 + y_vals**2) 200 self.current_dataset.mask = np.ones(len(x_vals), dtype=bool) 201 202 # Store loading process information 203 self.current_datainfo.meta_data['loader'] = self.type_name 204 205 self.send_to_output() 206 207 if not loaded_correctly: 208 raise DataReaderException(error_message) -
src/sas/sascalc/dataloader/readers/red2d_reader.py
ra1b8fee r2f85af7 5 5 #This software was developed by the University of Tennessee as part of the 6 6 #Distributed Data Analysis of Neutron Scattering Experiments (DANSE) 7 #project funded by the US National Science Foundation. 7 #project funded by the US National Science Foundation. 8 8 #See the license text in license.txt 9 9 #copyright 2008, University of Tennessee 10 10 ###################################################################### 11 from __future__ import print_function12 13 11 import os 14 12 import numpy as np 15 13 import math 16 from sas.sascalc.dataloader.data_info import Data2D, Detector 14 from sas.sascalc.dataloader.data_info import plottable_2D, DataInfo, Detector 15 from sas.sascalc.dataloader.file_reader_base_class import FileReader 16 from sas.sascalc.dataloader.loader_exceptions import FileContentsException 17 17 18 18 # Look for unit converter … … 22 22 except: 23 23 has_converter = False 24 25 24 25 26 26 def check_point(x_point): 27 27 """ … … 33 33 except: 34 34 return 0 35 36 37 class Reader :35 36 37 class Reader(FileReader): 38 38 """ Simple data reader for Igor data files """ 39 39 ## File type … … 43 43 ## Extension 44 44 ext = ['.DAT', '.dat'] 45 45 46 46 def write(self, filename, data): 47 47 """ 48 48 Write to .dat 49 49 50 50 :param filename: file name to write 51 51 :param data: data2D … … 53 53 import time 54 54 # Write the file 55 fd = open(filename, 'w') 56 t = time.localtime() 57 time_str = time.strftime("%H:%M on %b %d %y", t) 58 59 header_str = "Data columns are Qx - Qy - I(Qx,Qy)\n\nASCII data" 60 header_str += " created at %s \n\n" % time_str 61 # simple 2D header 62 fd.write(header_str) 63 # write qx qy I values 64 for i in range(len(data.data)): 65 fd.write("%g %g %g\n" % (data.qx_data[i], 66 data.qy_data[i], 67 data.data[i])) 68 # close 69 fd.close() 70 71 def read(self, filename=None): 72 """ Read file """ 73 if not os.path.isfile(filename): 74 raise ValueError, \ 75 "Specified file %s is not a regular file" % filename 76 55 try: 56 fd = open(filename, 'w') 57 t = time.localtime() 58 time_str = time.strftime("%H:%M on %b %d %y", t) 59 60 header_str = "Data columns are Qx - Qy - I(Qx,Qy)\n\nASCII data" 61 header_str += " created at %s \n\n" % time_str 62 # simple 2D header 63 fd.write(header_str) 64 # write qx qy I values 65 for i in range(len(data.data)): 66 fd.write("%g %g %g\n" % (data.qx_data[i], 67 data.qy_data[i], 68 data.data[i])) 69 finally: 70 fd.close() 71 72 def get_file_contents(self): 77 73 # Read file 78 f = open(filename, 'r') 79 buf = f.read() 80 f.close() 74 buf = self.f_open.read() 75 self.f_open.close() 81 76 # Instantiate data object 82 output = Data2D() 83 output.filename = os.path.basename(filename) 84 detector = Detector() 85 if len(output.detector) > 0: 86 print(str(output.detector[0])) 87 output.detector.append(detector) 88 77 self.current_dataset = plottable_2D() 78 self.current_datainfo = DataInfo() 79 self.current_datainfo.filename = os.path.basename(self.f_open.name) 80 self.current_datainfo.detector.append(Detector()) 81 89 82 # Get content 90 data Started = False91 83 data_started = False 84 92 85 ## Defaults 93 86 lines = buf.split('\n') 94 87 x = [] 95 88 y = [] 96 89 97 90 wavelength = None 98 91 distance = None 99 92 transmission = None 100 93 101 94 pixel_x = None 102 95 pixel_y = None 103 104 isInfo = False 105 isCenter = False 106 107 data_conv_q = None 108 data_conv_i = None 109 110 # Set units: This is the unit assumed for Q and I in the data file. 111 if has_converter == True and output.Q_unit != '1/A': 112 data_conv_q = Converter('1/A') 113 # Test it 114 data_conv_q(1.0, output.Q_unit) 115 116 if has_converter == True and output.I_unit != '1/cm': 117 data_conv_i = Converter('1/cm') 118 # Test it 119 data_conv_i(1.0, output.I_unit) 120 121 96 97 is_info = False 98 is_center = False 99 122 100 # Remove the last lines before the for loop if the lines are empty 123 101 # to calculate the exact number of data points … … 135 113 ## Reading the header applies only to IGOR/NIST 2D q_map data files 136 114 # Find setup info line 137 if is Info:138 is Info = False115 if is_info: 116 is_info = False 139 117 line_toks = line.split() 140 118 # Wavelength in Angstrom … … 143 121 # Units 144 122 if has_converter == True and \ 145 output.source.wavelength_unit != 'A':123 self.current_datainfo.source.wavelength_unit != 'A': 146 124 conv = Converter('A') 147 125 wavelength = conv(wavelength, 148 units= output.source.wavelength_unit)126 units=self.current_datainfo.source.wavelength_unit) 149 127 except: 150 128 #Not required … … 154 132 distance = float(line_toks[3]) 155 133 # Units 156 if has_converter == True and detector.distance_unit != 'm':134 if has_converter == True and self.current_datainfo.detector[0].distance_unit != 'm': 157 135 conv = Converter('m') 158 distance = conv(distance, units=detector.distance_unit) 136 distance = conv(distance, 137 units=self.current_datainfo.detector[0].distance_unit) 159 138 except: 160 139 #Not required 161 140 pass 162 141 163 142 # Distance in meters 164 143 try: … … 167 146 #Not required 168 147 pass 169 148 170 149 if line.count("LAMBDA") > 0: 171 is Info = True172 150 is_info = True 151 173 152 # Find center info line 174 if is Center:175 is Center = False153 if is_center: 154 is_center = False 176 155 line_toks = line.split() 177 156 # Center in bin number … … 180 159 181 160 if line.count("BCENT") > 0: 182 is Center = True161 is_center = True 183 162 # Check version 184 163 if line.count("Data columns") > 0: … … 187 166 # Find data start 188 167 if line.count("ASCII data") > 0: 189 data Started = True168 data_started = True 190 169 continue 191 170 192 171 ## Read and get data. 193 if data Started == True:172 if data_started == True: 194 173 line_toks = line.split() 195 174 if len(line_toks) == 0: 196 175 #empty line 197 176 continue 198 # the number of columns must be stayed same 177 # the number of columns must be stayed same 199 178 col_num = len(line_toks) 200 179 break … … 204 183 # index for lines_array 205 184 lines_index = np.arange(len(lines)) 206 185 207 186 # get the data lines 208 187 data_lines = lines_array[lines_index >= (line_num - 1)] … … 213 192 # split all data to one big list w/" "separator 214 193 data_list = data_list.split() 215 194 216 195 # Check if the size is consistent with data, otherwise 217 196 #try the tab(\t) separator … … 233 212 data_point = data_array.reshape(row_num, col_num).transpose() 234 213 except: 235 msg = "red2d_reader : Can't read this file: Not a proper file format"236 raise ValueError, msg214 msg = "red2d_reader can't read this file: Incorrect number of data points provided." 215 raise FileContentsException(msg) 237 216 ## Get the all data: Let's HARDcoding; Todo find better way 238 217 # Defaults … … 257 236 #if col_num > (6 + ver): mask[data_point[(6 + ver)] < 1] = False 258 237 q_data = np.sqrt(qx_data*qx_data+qy_data*qy_data+qz_data*qz_data) 259 260 # Extra protection(it is needed for some data files): 238 239 # Extra protection(it is needed for some data files): 261 240 # If all mask elements are False, put all True 262 241 if not mask.any(): 263 242 mask[mask == False] = True 264 243 265 244 # Store limits of the image in q space 266 245 xmin = np.min(qx_data) … … 269 248 ymax = np.max(qy_data) 270 249 271 # units272 if has_converter == True and output.Q_unit != '1/A':273 xmin = data_conv_q(xmin, units=output.Q_unit)274 xmax = data_conv_q(xmax, units=output.Q_unit)275 ymin = data_conv_q(ymin, units=output.Q_unit)276 ymax = data_conv_q(ymax, units=output.Q_unit)277 278 250 ## calculate the range of the qx and qy_data 279 251 x_size = math.fabs(xmax - xmin) 280 252 y_size = math.fabs(ymax - ymin) 281 253 282 254 # calculate the number of pixels in the each axes 283 255 npix_y = math.floor(math.sqrt(len(data))) 284 256 npix_x = math.floor(len(data) / npix_y) 285 257 286 258 # calculate the size of bins 287 259 xstep = x_size / (npix_x - 1) 288 260 ystep = y_size / (npix_y - 1) 289 261 290 262 # store x and y axis bin centers in q space 291 263 x_bins = np.arange(xmin, xmax + xstep, xstep) 292 264 y_bins = np.arange(ymin, ymax + ystep, ystep) 293 265 294 266 # get the limits of q values 295 267 xmin = xmin - xstep / 2 … … 297 269 ymin = ymin - ystep / 2 298 270 ymax = ymax + ystep / 2 299 271 300 272 #Store data in outputs 301 273 #TODO: Check the lengths 302 output.data = data274 self.current_dataset.data = data 303 275 if (err_data == 1).all(): 304 output.err_data = np.sqrt(np.abs(data))305 output.err_data[output.err_data == 0.0] = 1.0276 self.current_dataset.err_data = np.sqrt(np.abs(data)) 277 self.current_dataset.err_data[self.current_dataset.err_data == 0.0] = 1.0 306 278 else: 307 output.err_data = err_data308 309 output.qx_data = qx_data310 output.qy_data = qy_data311 output.q_data = q_data312 output.mask = mask313 314 output.x_bins = x_bins315 output.y_bins = y_bins316 317 output.xmin = xmin318 output.xmax = xmax319 output.ymin = ymin320 output.ymax = ymax321 322 output.source.wavelength = wavelength323 279 self.current_dataset.err_data = err_data 280 281 self.current_dataset.qx_data = qx_data 282 self.current_dataset.qy_data = qy_data 283 self.current_dataset.q_data = q_data 284 self.current_dataset.mask = mask 285 286 self.current_dataset.x_bins = x_bins 287 self.current_dataset.y_bins = y_bins 288 289 self.current_dataset.xmin = xmin 290 self.current_dataset.xmax = xmax 291 self.current_dataset.ymin = ymin 292 self.current_dataset.ymax = ymax 293 294 self.current_datainfo.source.wavelength = wavelength 295 324 296 # Store pixel size in mm 325 detector.pixel_size.x = pixel_x326 detector.pixel_size.y = pixel_y327 297 self.current_datainfo.detector[0].pixel_size.x = pixel_x 298 self.current_datainfo.detector[0].pixel_size.y = pixel_y 299 328 300 # Store the sample to detector distance 329 detector.distance = distance330 301 self.current_datainfo.detector[0].distance = distance 302 331 303 # optional data: if all of dq data == 0, do not pass to output 332 304 if len(dqx_data) == len(qx_data) and dqx_data.any() != 0: … … 340 312 cos_th = qx_data / diag 341 313 sin_th = qy_data / diag 342 output.dqx_data = np.sqrt((dqx_data * cos_th) * \314 self.current_dataset.dqx_data = np.sqrt((dqx_data * cos_th) * \ 343 315 (dqx_data * cos_th) \ 344 316 + (dqy_data * sin_th) * \ 345 317 (dqy_data * sin_th)) 346 output.dqy_data = np.sqrt((dqx_data * sin_th) * \318 self.current_dataset.dqy_data = np.sqrt((dqx_data * sin_th) * \ 347 319 (dqx_data * sin_th) \ 348 320 + (dqy_data * cos_th) * \ 349 321 (dqy_data * cos_th)) 350 322 else: 351 output.dqx_data = dqx_data352 output.dqy_data = dqy_data323 self.current_dataset.dqx_data = dqx_data 324 self.current_dataset.dqy_data = dqy_data 353 325 354 326 # Units of axes 355 if data_conv_q is not None: 356 output.xaxis("\\rm{Q_{x}}", output.Q_unit) 357 output.yaxis("\\rm{Q_{y}}", output.Q_unit) 358 else: 359 output.xaxis("\\rm{Q_{x}}", 'A^{-1}') 360 output.yaxis("\\rm{Q_{y}}", 'A^{-1}') 361 if data_conv_i is not None: 362 output.zaxis("\\rm{Intensity}", output.I_unit) 363 else: 364 output.zaxis("\\rm{Intensity}", "cm^{-1}") 365 327 self.current_dataset.xaxis("\\rm{Q_{x}}", 'A^{-1}') 328 self.current_dataset.yaxis("\\rm{Q_{y}}", 'A^{-1}') 329 self.current_dataset.zaxis("\\rm{Intensity}", "cm^{-1}") 330 366 331 # Store loading process information 367 output.meta_data['loader'] = self.type_name368 369 return output332 self.current_datainfo.meta_data['loader'] = self.type_name 333 334 self.send_to_output() -
src/sas/sascalc/dataloader/readers/xml_reader.py
r235f514 rfafe52a 18 18 from lxml import etree 19 19 from lxml.builder import E 20 from sas.sascalc.dataloader.file_reader_base_class import FileReader 20 21 21 22 logger = logging.getLogger(__name__) … … 23 24 PARSER = etree.ETCompatXMLParser(remove_comments=True, remove_pis=False) 24 25 25 class XMLreader( ):26 class XMLreader(FileReader): 26 27 """ 27 28 Generic XML read and write class. Mostly helper functions. … … 74 75 except etree.XMLSyntaxError as xml_error: 75 76 logger.info(xml_error) 77 raise xml_error 76 78 except Exception: 77 79 self.xml = None … … 91 93 except etree.XMLSyntaxError as xml_error: 92 94 logger.info(xml_error) 93 except Exception: 95 raise xml_error 96 except Exception as exc: 94 97 self.xml = None 95 98 self.xmldoc = None 96 99 self.xmlroot = None 100 raise exc 97 101 98 102 def set_schema(self, schema): … … 206 210 Create a unique key value for any dictionary to prevent overwriting 207 211 Recurses until a unique key value is found. 208 212 209 213 :param dictionary: A dictionary with any number of entries 210 214 :param name: The index of the item to be added to dictionary … … 222 226 Create an element tree for processing from an etree element 223 227 224 :param root: etree Element(s) 228 :param root: etree Element(s) 225 229 """ 226 230 return etree.ElementTree(root)
Note: See TracChangeset
for help on using the changeset viewer.