Changes in / [de99a5f0:ec65dc81] in sasview
- Files:
-
- 19 deleted
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
run.py
r168d359 r64ca561 62 62 def import_package(modname, path): 63 63 """Import a package into a particular point in the python namespace""" 64 logger.debug("Dynamicly importing: %s", path)65 64 mod = imp.load_source(modname, abspath(joinpath(path,'__init__.py'))) 66 65 sys.modules[modname] = mod -
src/sas/sascalc/dataloader/manipulations.py
r168d359 r959eb01 1 from __future__ import division2 1 """ 3 2 Data manipulations for 2D data sets. 4 3 Using the meta data information, various types of averaging 5 4 are performed in Q-space 6 7 To test this module use:8 ```9 cd test10 PYTHONPATH=../src/ python2 -m sasdataloader.test.utest_averaging DataInfoTests.test_sectorphi_quarter11 ```12 5 """ 13 6 ##################################################################### 14 # 15 # 16 # 17 # 18 # 7 #This software was developed by the University of Tennessee as part of the 8 #Distributed Data Analysis of Neutron Scattering Experiments (DANSE) 9 #project funded by the US National Science Foundation. 10 #See the license text in license.txt 11 #copyright 2008, University of Tennessee 19 12 ###################################################################### 20 13 21 22 # TODO: copy the meta data from the 2D object to the resulting 1D object 14 #TODO: copy the meta data from the 2D object to the resulting 1D object 23 15 import math 24 import numpy as np 25 import sys 26 16 import numpy 17 27 18 #from data_info import plottable_2D 28 19 from data_info import Data1D … … 79 70 return phi_out 80 71 72 73 def reader2D_converter(data2d=None): 74 """ 75 convert old 2d format opened by IhorReader or danse_reader 76 to new Data2D format 77 78 :param data2d: 2d array of Data2D object 79 :return: 1d arrays of Data2D object 80 81 """ 82 if data2d.data is None or data2d.x_bins is None or data2d.y_bins is None: 83 raise ValueError, "Can't convert this data: data=None..." 84 new_x = numpy.tile(data2d.x_bins, (len(data2d.y_bins), 1)) 85 new_y = numpy.tile(data2d.y_bins, (len(data2d.x_bins), 1)) 86 new_y = new_y.swapaxes(0, 1) 87 88 new_data = data2d.data.flatten() 89 qx_data = new_x.flatten() 90 qy_data = new_y.flatten() 91 q_data = numpy.sqrt(qx_data * qx_data + qy_data * qy_data) 92 if data2d.err_data is None or numpy.any(data2d.err_data <= 0): 93 new_err_data = numpy.sqrt(numpy.abs(new_data)) 94 else: 95 new_err_data = data2d.err_data.flatten() 96 mask = numpy.ones(len(new_data), dtype=bool) 97 98 #TODO: make sense of the following two lines... 99 #from sas.sascalc.dataloader.data_info import Data2D 100 #output = Data2D() 101 output = data2d 102 output.data = new_data 103 output.err_data = new_err_data 104 output.qx_data = qx_data 105 output.qy_data = qy_data 106 output.q_data = q_data 107 output.mask = mask 108 109 return output 110 111 112 class _Slab(object): 113 """ 114 Compute average I(Q) for a region of interest 115 """ 116 def __init__(self, x_min=0.0, x_max=0.0, y_min=0.0, 117 y_max=0.0, bin_width=0.001): 118 # Minimum Qx value [A-1] 119 self.x_min = x_min 120 # Maximum Qx value [A-1] 121 self.x_max = x_max 122 # Minimum Qy value [A-1] 123 self.y_min = y_min 124 # Maximum Qy value [A-1] 125 self.y_max = y_max 126 # Bin width (step size) [A-1] 127 self.bin_width = bin_width 128 # If True, I(|Q|) will be return, otherwise, 129 # negative q-values are allowed 130 self.fold = False 131 132 def __call__(self, data2D): 133 return NotImplemented 134 135 def _avg(self, data2D, maj): 136 """ 137 Compute average I(Q_maj) for a region of interest. 138 The major axis is defined as the axis of Q_maj. 139 The minor axis is the axis that we average over. 140 141 :param data2D: Data2D object 142 :param maj_min: min value on the major axis 143 :return: Data1D object 144 """ 145 if len(data2D.detector) > 1: 146 msg = "_Slab._avg: invalid number of " 147 msg += " detectors: %g" % len(data2D.detector) 148 raise RuntimeError, msg 149 150 # Get data 151 data = data2D.data[numpy.isfinite(data2D.data)] 152 err_data = data2D.err_data[numpy.isfinite(data2D.data)] 153 qx_data = data2D.qx_data[numpy.isfinite(data2D.data)] 154 qy_data = data2D.qy_data[numpy.isfinite(data2D.data)] 155 156 # Build array of Q intervals 157 if maj == 'x': 158 if self.fold: 159 x_min = 0 160 else: 161 x_min = self.x_min 162 nbins = int(math.ceil((self.x_max - x_min) / self.bin_width)) 163 elif maj == 'y': 164 if self.fold: 165 y_min = 0 166 else: 167 y_min = self.y_min 168 nbins = int(math.ceil((self.y_max - y_min) / self.bin_width)) 169 else: 170 raise RuntimeError, "_Slab._avg: unrecognized axis %s" % str(maj) 171 172 x = numpy.zeros(nbins) 173 y = numpy.zeros(nbins) 174 err_y = numpy.zeros(nbins) 175 y_counts = numpy.zeros(nbins) 176 177 # Average pixelsize in q space 178 for npts in range(len(data)): 179 # default frac 180 frac_x = 0 181 frac_y = 0 182 # get ROI 183 if self.x_min <= qx_data[npts] and self.x_max > qx_data[npts]: 184 frac_x = 1 185 if self.y_min <= qy_data[npts] and self.y_max > qy_data[npts]: 186 frac_y = 1 187 frac = frac_x * frac_y 188 189 if frac == 0: 190 continue 191 # binning: find axis of q 192 if maj == 'x': 193 q_value = qx_data[npts] 194 min_value = x_min 195 if maj == 'y': 196 q_value = qy_data[npts] 197 min_value = y_min 198 if self.fold and q_value < 0: 199 q_value = -q_value 200 # bin 201 i_q = int(math.ceil((q_value - min_value) / self.bin_width)) - 1 202 203 # skip outside of max bins 204 if i_q < 0 or i_q >= nbins: 205 continue 206 207 #TODO: find better definition of x[i_q] based on q_data 208 # min_value + (i_q + 1) * self.bin_width / 2.0 209 x[i_q] += frac * q_value 210 y[i_q] += frac * data[npts] 211 212 if err_data == None or err_data[npts] == 0.0: 213 if data[npts] < 0: 214 data[npts] = -data[npts] 215 err_y[i_q] += frac * frac * data[npts] 216 else: 217 err_y[i_q] += frac * frac * err_data[npts] * err_data[npts] 218 y_counts[i_q] += frac 219 220 # Average the sums 221 for n in range(nbins): 222 err_y[n] = math.sqrt(err_y[n]) 223 224 err_y = err_y / y_counts 225 y = y / y_counts 226 x = x / y_counts 227 idx = (numpy.isfinite(y) & numpy.isfinite(x)) 228 229 if not idx.any(): 230 msg = "Average Error: No points inside ROI to average..." 231 raise ValueError, msg 232 return Data1D(x=x[idx], y=y[idx], dy=err_y[idx]) 233 234 235 class SlabY(_Slab): 236 """ 237 Compute average I(Qy) for a region of interest 238 """ 239 def __call__(self, data2D): 240 """ 241 Compute average I(Qy) for a region of interest 242 243 :param data2D: Data2D object 244 :return: Data1D object 245 """ 246 return self._avg(data2D, 'y') 247 248 249 class SlabX(_Slab): 250 """ 251 Compute average I(Qx) for a region of interest 252 """ 253 def __call__(self, data2D): 254 """ 255 Compute average I(Qx) for a region of interest 256 :param data2D: Data2D object 257 :return: Data1D object 258 """ 259 return self._avg(data2D, 'x') 260 261 262 class Boxsum(object): 263 """ 264 Perform the sum of counts in a 2D region of interest. 265 """ 266 def __init__(self, x_min=0.0, x_max=0.0, y_min=0.0, y_max=0.0): 267 # Minimum Qx value [A-1] 268 self.x_min = x_min 269 # Maximum Qx value [A-1] 270 self.x_max = x_max 271 # Minimum Qy value [A-1] 272 self.y_min = y_min 273 # Maximum Qy value [A-1] 274 self.y_max = y_max 275 276 def __call__(self, data2D): 277 """ 278 Perform the sum in the region of interest 279 280 :param data2D: Data2D object 281 :return: number of counts, error on number of counts, 282 number of points summed 283 """ 284 y, err_y, y_counts = self._sum(data2D) 285 286 # Average the sums 287 counts = 0 if y_counts == 0 else y 288 error = 0 if y_counts == 0 else math.sqrt(err_y) 289 290 # Added y_counts to return, SMK & PDB, 04/03/2013 291 return counts, error, y_counts 292 293 def _sum(self, data2D): 294 """ 295 Perform the sum in the region of interest 296 297 :param data2D: Data2D object 298 :return: number of counts, 299 error on number of counts, number of entries summed 300 """ 301 if len(data2D.detector) > 1: 302 msg = "Circular averaging: invalid number " 303 msg += "of detectors: %g" % len(data2D.detector) 304 raise RuntimeError, msg 305 # Get data 306 data = data2D.data[numpy.isfinite(data2D.data)] 307 err_data = data2D.err_data[numpy.isfinite(data2D.data)] 308 qx_data = data2D.qx_data[numpy.isfinite(data2D.data)] 309 qy_data = data2D.qy_data[numpy.isfinite(data2D.data)] 310 311 y = 0.0 312 err_y = 0.0 313 y_counts = 0.0 314 315 # Average pixelsize in q space 316 for npts in range(len(data)): 317 # default frac 318 frac_x = 0 319 frac_y = 0 320 321 # get min and max at each points 322 qx = qx_data[npts] 323 qy = qy_data[npts] 324 325 # get the ROI 326 if self.x_min <= qx and self.x_max > qx: 327 frac_x = 1 328 if self.y_min <= qy and self.y_max > qy: 329 frac_y = 1 330 #Find the fraction along each directions 331 frac = frac_x * frac_y 332 if frac == 0: 333 continue 334 y += frac * data[npts] 335 if err_data == None or err_data[npts] == 0.0: 336 if data[npts] < 0: 337 data[npts] = -data[npts] 338 err_y += frac * frac * data[npts] 339 else: 340 err_y += frac * frac * err_data[npts] * err_data[npts] 341 y_counts += frac 342 return y, err_y, y_counts 343 344 345 class Boxavg(Boxsum): 346 """ 347 Perform the average of counts in a 2D region of interest. 348 """ 349 def __init__(self, x_min=0.0, x_max=0.0, y_min=0.0, y_max=0.0): 350 super(Boxavg, self).__init__(x_min=x_min, x_max=x_max, 351 y_min=y_min, y_max=y_max) 352 353 def __call__(self, data2D): 354 """ 355 Perform the sum in the region of interest 356 357 :param data2D: Data2D object 358 :return: average counts, error on average counts 359 360 """ 361 y, err_y, y_counts = self._sum(data2D) 362 363 # Average the sums 364 counts = 0 if y_counts == 0 else y / y_counts 365 error = 0 if y_counts == 0 else math.sqrt(err_y) / y_counts 366 367 return counts, error 368 369 81 370 def get_pixel_fraction_square(x, xmin, xmax): 82 371 """ … … 101 390 return 1.0 102 391 103 def get_intercept(q, q_0, q_1): 104 """ 105 Returns the fraction of the side at which the 106 q-value intercept the pixel, None otherwise. 107 The values returned is the fraction ON THE SIDE 108 OF THE LOWEST Q. :: 109 110 A B 111 +-----------+--------+ <--- pixel size 112 0 1 113 Q_0 -------- Q ----- Q_1 <--- equivalent Q range 114 if Q_1 > Q_0, A is returned 115 if Q_1 < Q_0, B is returned 116 if Q is outside the range of [Q_0, Q_1], None is returned 117 118 """ 119 if q_1 > q_0: 120 if q > q_0 and q <= q_1: 121 return (q - q_0) / (q_1 - q_0) 122 else: 123 if q > q_1 and q <= q_0: 124 return (q - q_1) / (q_0 - q_1) 125 return None 392 393 class CircularAverage(object): 394 """ 395 Perform circular averaging on 2D data 396 397 The data returned is the distribution of counts 398 as a function of Q 399 """ 400 def __init__(self, r_min=0.0, r_max=0.0, bin_width=0.0005): 401 # Minimum radius included in the average [A-1] 402 self.r_min = r_min 403 # Maximum radius included in the average [A-1] 404 self.r_max = r_max 405 # Bin width (step size) [A-1] 406 self.bin_width = bin_width 407 408 def __call__(self, data2D, ismask=False): 409 """ 410 Perform circular averaging on the data 411 412 :param data2D: Data2D object 413 :return: Data1D object 414 """ 415 # Get data W/ finite values 416 data = data2D.data[numpy.isfinite(data2D.data)] 417 q_data = data2D.q_data[numpy.isfinite(data2D.data)] 418 err_data = data2D.err_data[numpy.isfinite(data2D.data)] 419 mask_data = data2D.mask[numpy.isfinite(data2D.data)] 420 421 dq_data = None 422 423 # Get the dq for resolution averaging 424 if data2D.dqx_data != None and data2D.dqy_data != None: 425 # The pinholes and det. pix contribution present 426 # in both direction of the 2D which must be subtracted when 427 # converting to 1D: dq_overlap should calculated ideally at 428 # q = 0. Note This method works on only pinhole geometry. 429 # Extrapolate dqx(r) and dqy(phi) at q = 0, and take an average. 430 z_max = max(data2D.q_data) 431 z_min = min(data2D.q_data) 432 x_max = data2D.dqx_data[data2D.q_data[z_max]] 433 x_min = data2D.dqx_data[data2D.q_data[z_min]] 434 y_max = data2D.dqy_data[data2D.q_data[z_max]] 435 y_min = data2D.dqy_data[data2D.q_data[z_min]] 436 # Find qdx at q = 0 437 dq_overlap_x = (x_min * z_max - x_max * z_min) / (z_max - z_min) 438 # when extrapolation goes wrong 439 if dq_overlap_x > min(data2D.dqx_data): 440 dq_overlap_x = min(data2D.dqx_data) 441 dq_overlap_x *= dq_overlap_x 442 # Find qdx at q = 0 443 dq_overlap_y = (y_min * z_max - y_max * z_min) / (z_max - z_min) 444 # when extrapolation goes wrong 445 if dq_overlap_y > min(data2D.dqy_data): 446 dq_overlap_y = min(data2D.dqy_data) 447 # get dq at q=0. 448 dq_overlap_y *= dq_overlap_y 449 450 dq_overlap = numpy.sqrt((dq_overlap_x + dq_overlap_y) / 2.0) 451 # Final protection of dq 452 if dq_overlap < 0: 453 dq_overlap = y_min 454 dqx_data = data2D.dqx_data[numpy.isfinite(data2D.data)] 455 dqy_data = data2D.dqy_data[numpy.isfinite(data2D.data)] - dq_overlap 456 # def; dqx_data = dq_r dqy_data = dq_phi 457 # Convert dq 2D to 1D here 458 dqx = dqx_data * dqx_data 459 dqy = dqy_data * dqy_data 460 dq_data = numpy.add(dqx, dqy) 461 dq_data = numpy.sqrt(dq_data) 462 463 #q_data_max = numpy.max(q_data) 464 if len(data2D.q_data) == None: 465 msg = "Circular averaging: invalid q_data: %g" % data2D.q_data 466 raise RuntimeError, msg 467 468 # Build array of Q intervals 469 nbins = int(math.ceil((self.r_max - self.r_min) / self.bin_width)) 470 471 x = numpy.zeros(nbins) 472 y = numpy.zeros(nbins) 473 err_y = numpy.zeros(nbins) 474 err_x = numpy.zeros(nbins) 475 y_counts = numpy.zeros(nbins) 476 477 for npt in range(len(data)): 478 479 if ismask and not mask_data[npt]: 480 continue 481 482 frac = 0 483 484 # q-value at the pixel (j,i) 485 q_value = q_data[npt] 486 data_n = data[npt] 487 488 ## No need to calculate the frac when all data are within range 489 if self.r_min >= self.r_max: 490 raise ValueError, "Limit Error: min > max" 491 492 if self.r_min <= q_value and q_value <= self.r_max: 493 frac = 1 494 if frac == 0: 495 continue 496 i_q = int(math.floor((q_value - self.r_min) / self.bin_width)) 497 498 # Take care of the edge case at phi = 2pi. 499 if i_q == nbins: 500 i_q = nbins - 1 501 y[i_q] += frac * data_n 502 # Take dqs from data to get the q_average 503 x[i_q] += frac * q_value 504 if err_data == None or err_data[npt] == 0.0: 505 if data_n < 0: 506 data_n = -data_n 507 err_y[i_q] += frac * frac * data_n 508 else: 509 err_y[i_q] += frac * frac * err_data[npt] * err_data[npt] 510 if dq_data != None: 511 # To be consistent with dq calculation in 1d reduction, 512 # we need just the averages (not quadratures) because 513 # it should not depend on the number of the q points 514 # in the qr bins. 515 err_x[i_q] += frac * dq_data[npt] 516 else: 517 err_x = None 518 y_counts[i_q] += frac 519 520 # Average the sums 521 for n in range(nbins): 522 if err_y[n] < 0: 523 err_y[n] = -err_y[n] 524 err_y[n] = math.sqrt(err_y[n]) 525 #if err_x != None: 526 # err_x[n] = math.sqrt(err_x[n]) 527 528 err_y = err_y / y_counts 529 err_y[err_y == 0] = numpy.average(err_y) 530 y = y / y_counts 531 x = x / y_counts 532 idx = (numpy.isfinite(y)) & (numpy.isfinite(x)) 533 534 if err_x != None: 535 d_x = err_x[idx] / y_counts[idx] 536 else: 537 d_x = None 538 539 if not idx.any(): 540 msg = "Average Error: No points inside ROI to average..." 541 raise ValueError, msg 542 543 return Data1D(x=x[idx], y=y[idx], dy=err_y[idx], dx=d_x) 544 545 546 class Ring(object): 547 """ 548 Defines a ring on a 2D data set. 549 The ring is defined by r_min, r_max, and 550 the position of the center of the ring. 551 552 The data returned is the distribution of counts 553 around the ring as a function of phi. 554 555 Phi_min and phi_max should be defined between 0 and 2*pi 556 in anti-clockwise starting from the x- axis on the left-hand side 557 """ 558 #Todo: remove center. 559 def __init__(self, r_min=0, r_max=0, center_x=0, center_y=0, nbins=36): 560 # Minimum radius 561 self.r_min = r_min 562 # Maximum radius 563 self.r_max = r_max 564 # Center of the ring in x 565 self.center_x = center_x 566 # Center of the ring in y 567 self.center_y = center_y 568 # Number of angular bins 569 self.nbins_phi = nbins 570 571 572 def __call__(self, data2D): 573 """ 574 Apply the ring to the data set. 575 Returns the angular distribution for a given q range 576 577 :param data2D: Data2D object 578 579 :return: Data1D object 580 """ 581 if data2D.__class__.__name__ not in ["Data2D", "plottable_2D"]: 582 raise RuntimeError, "Ring averaging only take plottable_2D objects" 583 584 Pi = math.pi 585 586 # Get data 587 data = data2D.data[numpy.isfinite(data2D.data)] 588 q_data = data2D.q_data[numpy.isfinite(data2D.data)] 589 err_data = data2D.err_data[numpy.isfinite(data2D.data)] 590 qx_data = data2D.qx_data[numpy.isfinite(data2D.data)] 591 qy_data = data2D.qy_data[numpy.isfinite(data2D.data)] 592 593 # Set space for 1d outputs 594 phi_bins = numpy.zeros(self.nbins_phi) 595 phi_counts = numpy.zeros(self.nbins_phi) 596 phi_values = numpy.zeros(self.nbins_phi) 597 phi_err = numpy.zeros(self.nbins_phi) 598 599 # Shift to apply to calculated phi values in order 600 # to center first bin at zero 601 phi_shift = Pi / self.nbins_phi 602 603 for npt in range(len(data)): 604 frac = 0 605 # q-value at the point (npt) 606 q_value = q_data[npt] 607 data_n = data[npt] 608 609 # phi-value at the point (npt) 610 phi_value = math.atan2(qy_data[npt], qx_data[npt]) + Pi 611 612 if self.r_min <= q_value and q_value <= self.r_max: 613 frac = 1 614 if frac == 0: 615 continue 616 # binning 617 i_phi = int(math.floor((self.nbins_phi) * \ 618 (phi_value + phi_shift) / (2 * Pi))) 619 620 # Take care of the edge case at phi = 2pi. 621 if i_phi >= self.nbins_phi: 622 i_phi = 0 623 phi_bins[i_phi] += frac * data[npt] 624 625 if err_data == None or err_data[npt] == 0.0: 626 if data_n < 0: 627 data_n = -data_n 628 phi_err[i_phi] += frac * frac * math.fabs(data_n) 629 else: 630 phi_err[i_phi] += frac * frac * err_data[npt] * err_data[npt] 631 phi_counts[i_phi] += frac 632 633 for i in range(self.nbins_phi): 634 phi_bins[i] = phi_bins[i] / phi_counts[i] 635 phi_err[i] = math.sqrt(phi_err[i]) / phi_counts[i] 636 phi_values[i] = 2.0 * math.pi / self.nbins_phi * (1.0 * i) 637 638 idx = (numpy.isfinite(phi_bins)) 639 640 if not idx.any(): 641 msg = "Average Error: No points inside ROI to average..." 642 raise ValueError, msg 643 #elif len(phi_bins[idx])!= self.nbins_phi: 644 # print "resulted",self.nbins_phi- len(phi_bins[idx]) 645 #,"empty bin(s) due to tight binning..." 646 return Data1D(x=phi_values[idx], y=phi_bins[idx], dy=phi_err[idx]) 647 126 648 127 649 def get_pixel_fraction(qmax, q_00, q_01, q_10, q_11): … … 188 710 return frac_max 189 711 190 def get_dq_data(data2D): 191 ''' 192 Get the dq for resolution averaging 193 The pinholes and det. pix contribution present 194 in both direction of the 2D which must be subtracted when 195 converting to 1D: dq_overlap should calculated ideally at 196 q = 0. Note This method works on only pinhole geometry. 197 Extrapolate dqx(r) and dqy(phi) at q = 0, and take an average. 198 ''' 199 z_max = max(data2D.q_data) 200 z_min = min(data2D.q_data) 201 x_max = data2D.dqx_data[data2D.q_data[z_max]] 202 x_min = data2D.dqx_data[data2D.q_data[z_min]] 203 y_max = data2D.dqy_data[data2D.q_data[z_max]] 204 y_min = data2D.dqy_data[data2D.q_data[z_min]] 205 # Find qdx at q = 0 206 dq_overlap_x = (x_min * z_max - x_max * z_min) / (z_max - z_min) 207 # when extrapolation goes wrong 208 if dq_overlap_x > min(data2D.dqx_data): 209 dq_overlap_x = min(data2D.dqx_data) 210 dq_overlap_x *= dq_overlap_x 211 # Find qdx at q = 0 212 dq_overlap_y = (y_min * z_max - y_max * z_min) / (z_max - z_min) 213 # when extrapolation goes wrong 214 if dq_overlap_y > min(data2D.dqy_data): 215 dq_overlap_y = min(data2D.dqy_data) 216 # get dq at q=0. 217 dq_overlap_y *= dq_overlap_y 218 219 dq_overlap = np.sqrt((dq_overlap_x + dq_overlap_y) / 2.0) 220 # Final protection of dq 221 if dq_overlap < 0: 222 dq_overlap = y_min 223 dqx_data = data2D.dqx_data[np.isfinite(data2D.data)] 224 dqy_data = data2D.dqy_data[np.isfinite( 225 data2D.data)] - dq_overlap 226 # def; dqx_data = dq_r dqy_data = dq_phi 227 # Convert dq 2D to 1D here 228 dq_data = np.sqrt(dqx_data**2 + dqx_data**2) 229 return dq_data 230 231 ################################################################################ 232 233 def reader2D_converter(data2d=None): 234 """ 235 convert old 2d format opened by IhorReader or danse_reader 236 to new Data2D format 237 This is mainly used by the Readers 238 239 :param data2d: 2d array of Data2D object 240 :return: 1d arrays of Data2D object 241 242 """ 243 if data2d.data is None or data2d.x_bins is None or data2d.y_bins is None: 244 raise ValueError("Can't convert this data: data=None...") 245 new_x = np.tile(data2d.x_bins, (len(data2d.y_bins), 1)) 246 new_y = np.tile(data2d.y_bins, (len(data2d.x_bins), 1)) 247 new_y = new_y.swapaxes(0, 1) 248 249 new_data = data2d.data.flatten() 250 qx_data = new_x.flatten() 251 qy_data = new_y.flatten() 252 q_data = np.sqrt(qx_data * qx_data + qy_data * qy_data) 253 if data2d.err_data is None or np.any(data2d.err_data <= 0): 254 new_err_data = np.sqrt(np.abs(new_data)) 712 713 def get_intercept(q, q_0, q_1): 714 """ 715 Returns the fraction of the side at which the 716 q-value intercept the pixel, None otherwise. 717 The values returned is the fraction ON THE SIDE 718 OF THE LOWEST Q. :: 719 720 A B 721 +-----------+--------+ <--- pixel size 722 0 1 723 Q_0 -------- Q ----- Q_1 <--- equivalent Q range 724 if Q_1 > Q_0, A is returned 725 if Q_1 < Q_0, B is returned 726 if Q is outside the range of [Q_0, Q_1], None is returned 727 728 """ 729 if q_1 > q_0: 730 if q > q_0 and q <= q_1: 731 return (q - q_0) / (q_1 - q_0) 255 732 else: 256 new_err_data = data2d.err_data.flatten() 257 mask = np.ones(len(new_data), dtype=bool) 258 259 # TODO: make sense of the following two lines... 260 #from sas.sascalc.dataloader.data_info import Data2D 261 #output = Data2D() 262 output = data2d 263 output.data = new_data 264 output.err_data = new_err_data 265 output.qx_data = qx_data 266 output.qy_data = qy_data 267 output.q_data = q_data 268 output.mask = mask 269 270 return output 271 272 ################################################################################ 273 274 class Binning(object): 275 ''' 276 This class just creates a binning object 277 either linear or log 278 ''' 279 280 def __init__(self, min_value, max_value, n_bins, base=None): 281 ''' 282 if base is None: Linear binning 283 ''' 284 self.min = min_value if min_value > 0 else 0.0001 285 self.max = max_value 286 self.n_bins = n_bins 287 self.base = base 288 289 def get_bin_index(self, value): 290 ''' 291 The general formula logarithm binning is: 292 bin = floor(N * (log(x) - log(min)) / (log(max) - log(min))) 293 ''' 294 if self.base: 295 temp_x = self.n_bins * (math.log(value, self.base) - math.log(self.min, self.base)) 296 temp_y = math.log(self.max, self.base) - math.log(self.min, self.base) 297 else: 298 temp_x = self.n_bins * (value - self.min) 299 temp_y = self.max - self.min 300 # Bin index calulation 301 return int(math.floor(temp_x / temp_y)) 302 303 304 ################################################################################ 305 306 class _Slab(object): 307 """ 308 Compute average I(Q) for a region of interest 309 """ 310 311 def __init__(self, x_min=0.0, x_max=0.0, y_min=0.0, 312 y_max=0.0, bin_width=0.001): 313 # Minimum Qx value [A-1] 314 self.x_min = x_min 315 # Maximum Qx value [A-1] 316 self.x_max = x_max 317 # Minimum Qy value [A-1] 318 self.y_min = y_min 319 # Maximum Qy value [A-1] 320 self.y_max = y_max 321 # Bin width (step size) [A-1] 322 self.bin_width = bin_width 323 # If True, I(|Q|) will be return, otherwise, 324 # negative q-values are allowed 325 self.fold = False 326 327 def __call__(self, data2D): 328 return NotImplemented 329 330 def _avg(self, data2D, maj): 331 """ 332 Compute average I(Q_maj) for a region of interest. 333 The major axis is defined as the axis of Q_maj. 334 The minor axis is the axis that we average over. 335 336 :param data2D: Data2D object 337 :param maj_min: min value on the major axis 338 :return: Data1D object 339 """ 340 if len(data2D.detector) > 1: 341 msg = "_Slab._avg: invalid number of " 342 msg += " detectors: %g" % len(data2D.detector) 343 raise RuntimeError(msg) 344 345 # Get data 346 data = data2D.data[np.isfinite(data2D.data)] 347 err_data = data2D.err_data[np.isfinite(data2D.data)] 348 qx_data = data2D.qx_data[np.isfinite(data2D.data)] 349 qy_data = data2D.qy_data[np.isfinite(data2D.data)] 350 351 # Build array of Q intervals 352 if maj == 'x': 353 if self.fold: 354 x_min = 0 355 else: 356 x_min = self.x_min 357 nbins = int(math.ceil((self.x_max - x_min) / self.bin_width)) 358 elif maj == 'y': 359 if self.fold: 360 y_min = 0 361 else: 362 y_min = self.y_min 363 nbins = int(math.ceil((self.y_max - y_min) / self.bin_width)) 364 else: 365 raise RuntimeError("_Slab._avg: unrecognized axis %s" % str(maj)) 366 367 x = np.zeros(nbins) 368 y = np.zeros(nbins) 369 err_y = np.zeros(nbins) 370 y_counts = np.zeros(nbins) 371 372 # Average pixelsize in q space 373 for npts in range(len(data)): 374 # default frac 375 frac_x = 0 376 frac_y = 0 377 # get ROI 378 if self.x_min <= qx_data[npts] and self.x_max > qx_data[npts]: 379 frac_x = 1 380 if self.y_min <= qy_data[npts] and self.y_max > qy_data[npts]: 381 frac_y = 1 382 frac = frac_x * frac_y 383 384 if frac == 0: 385 continue 386 # binning: find axis of q 387 if maj == 'x': 388 q_value = qx_data[npts] 389 min_value = x_min 390 if maj == 'y': 391 q_value = qy_data[npts] 392 min_value = y_min 393 if self.fold and q_value < 0: 394 q_value = -q_value 395 # bin 396 i_q = int(math.ceil((q_value - min_value) / self.bin_width)) - 1 397 398 # skip outside of max bins 399 if i_q < 0 or i_q >= nbins: 400 continue 401 402 # TODO: find better definition of x[i_q] based on q_data 403 # min_value + (i_q + 1) * self.bin_width / 2.0 404 x[i_q] += frac * q_value 405 y[i_q] += frac * data[npts] 406 407 if err_data is None or err_data[npts] == 0.0: 408 if data[npts] < 0: 409 data[npts] = -data[npts] 410 err_y[i_q] += frac * frac * data[npts] 411 else: 412 err_y[i_q] += frac * frac * err_data[npts] * err_data[npts] 413 y_counts[i_q] += frac 414 415 # Average the sums 416 for n in range(nbins): 417 err_y[n] = math.sqrt(err_y[n]) 418 419 err_y = err_y / y_counts 420 y = y / y_counts 421 x = x / y_counts 422 idx = (np.isfinite(y) & np.isfinite(x)) 423 424 if not idx.any(): 425 msg = "Average Error: No points inside ROI to average..." 426 raise ValueError(msg) 427 return Data1D(x=x[idx], y=y[idx], dy=err_y[idx]) 428 429 430 class SlabY(_Slab): 431 """ 432 Compute average I(Qy) for a region of interest 433 """ 434 435 def __call__(self, data2D): 436 """ 437 Compute average I(Qy) for a region of interest 438 439 :param data2D: Data2D object 440 :return: Data1D object 441 """ 442 return self._avg(data2D, 'y') 443 444 445 class SlabX(_Slab): 446 """ 447 Compute average I(Qx) for a region of interest 448 """ 449 450 def __call__(self, data2D): 451 """ 452 Compute average I(Qx) for a region of interest 453 :param data2D: Data2D object 454 :return: Data1D object 455 """ 456 return self._avg(data2D, 'x') 457 458 ################################################################################ 459 460 class Boxsum(object): 461 """ 462 Perform the sum of counts in a 2D region of interest. 463 """ 464 465 def __init__(self, x_min=0.0, x_max=0.0, y_min=0.0, y_max=0.0): 466 # Minimum Qx value [A-1] 467 self.x_min = x_min 468 # Maximum Qx value [A-1] 469 self.x_max = x_max 470 # Minimum Qy value [A-1] 471 self.y_min = y_min 472 # Maximum Qy value [A-1] 473 self.y_max = y_max 474 475 def __call__(self, data2D): 476 """ 477 Perform the sum in the region of interest 478 479 :param data2D: Data2D object 480 :return: number of counts, error on number of counts, 481 number of points summed 482 """ 483 y, err_y, y_counts = self._sum(data2D) 484 485 # Average the sums 486 counts = 0 if y_counts == 0 else y 487 error = 0 if y_counts == 0 else math.sqrt(err_y) 488 489 # Added y_counts to return, SMK & PDB, 04/03/2013 490 return counts, error, y_counts 491 492 def _sum(self, data2D): 493 """ 494 Perform the sum in the region of interest 495 496 :param data2D: Data2D object 497 :return: number of counts, 498 error on number of counts, number of entries summed 499 """ 500 if len(data2D.detector) > 1: 501 msg = "Circular averaging: invalid number " 502 msg += "of detectors: %g" % len(data2D.detector) 503 raise RuntimeError(msg) 504 # Get data 505 data = data2D.data[np.isfinite(data2D.data)] 506 err_data = data2D.err_data[np.isfinite(data2D.data)] 507 qx_data = data2D.qx_data[np.isfinite(data2D.data)] 508 qy_data = data2D.qy_data[np.isfinite(data2D.data)] 509 510 y = 0.0 511 err_y = 0.0 512 y_counts = 0.0 513 514 # Average pixelsize in q space 515 for npts in range(len(data)): 516 # default frac 517 frac_x = 0 518 frac_y = 0 519 520 # get min and max at each points 521 qx = qx_data[npts] 522 qy = qy_data[npts] 523 524 # get the ROI 525 if self.x_min <= qx and self.x_max > qx: 526 frac_x = 1 527 if self.y_min <= qy and self.y_max > qy: 528 frac_y = 1 529 # Find the fraction along each directions 530 frac = frac_x * frac_y 531 if frac == 0: 532 continue 533 y += frac * data[npts] 534 if err_data is None or err_data[npts] == 0.0: 535 if data[npts] < 0: 536 data[npts] = -data[npts] 537 err_y += frac * frac * data[npts] 538 else: 539 err_y += frac * frac * err_data[npts] * err_data[npts] 540 y_counts += frac 541 return y, err_y, y_counts 542 543 544 class Boxavg(Boxsum): 545 """ 546 Perform the average of counts in a 2D region of interest. 547 """ 548 549 def __init__(self, x_min=0.0, x_max=0.0, y_min=0.0, y_max=0.0): 550 super(Boxavg, self).__init__(x_min=x_min, x_max=x_max, 551 y_min=y_min, y_max=y_max) 552 553 def __call__(self, data2D): 554 """ 555 Perform the sum in the region of interest 556 557 :param data2D: Data2D object 558 :return: average counts, error on average counts 559 560 """ 561 y, err_y, y_counts = self._sum(data2D) 562 563 # Average the sums 564 counts = 0 if y_counts == 0 else y / y_counts 565 error = 0 if y_counts == 0 else math.sqrt(err_y) / y_counts 566 567 return counts, error 568 569 ################################################################################ 570 571 class CircularAverage(object): 572 """ 573 Perform circular averaging on 2D data 574 575 The data returned is the distribution of counts 576 as a function of Q 577 """ 578 579 def __init__(self, r_min=0.0, r_max=0.0, bin_width=0.0005): 580 # Minimum radius included in the average [A-1] 581 self.r_min = r_min 582 # Maximum radius included in the average [A-1] 583 self.r_max = r_max 584 # Bin width (step size) [A-1] 585 self.bin_width = bin_width 586 587 def __call__(self, data2D, ismask=False): 588 """ 589 Perform circular averaging on the data 590 591 :param data2D: Data2D object 592 :return: Data1D object 593 """ 594 # Get data W/ finite values 595 data = data2D.data[np.isfinite(data2D.data)] 596 q_data = data2D.q_data[np.isfinite(data2D.data)] 597 err_data = data2D.err_data[np.isfinite(data2D.data)] 598 mask_data = data2D.mask[np.isfinite(data2D.data)] 599 600 dq_data = None 601 if data2D.dqx_data is not None and data2D.dqy_data is not None: 602 dq_data = get_dq_data(data2D) 603 604 #q_data_max = np.max(q_data) 605 if len(data2D.q_data) is None: 606 msg = "Circular averaging: invalid q_data: %g" % data2D.q_data 607 raise RuntimeError(msg) 608 609 # Build array of Q intervals 610 nbins = int(math.ceil((self.r_max - self.r_min) / self.bin_width)) 611 612 x = np.zeros(nbins) 613 y = np.zeros(nbins) 614 err_y = np.zeros(nbins) 615 err_x = np.zeros(nbins) 616 y_counts = np.zeros(nbins) 617 618 for npt in range(len(data)): 619 620 if ismask and not mask_data[npt]: 621 continue 622 623 frac = 0 624 625 # q-value at the pixel (j,i) 626 q_value = q_data[npt] 627 data_n = data[npt] 628 629 # No need to calculate the frac when all data are within range 630 if self.r_min >= self.r_max: 631 raise ValueError("Limit Error: min > max") 632 633 if self.r_min <= q_value and q_value <= self.r_max: 634 frac = 1 635 if frac == 0: 636 continue 637 i_q = int(math.floor((q_value - self.r_min) / self.bin_width)) 638 639 # Take care of the edge case at phi = 2pi. 640 if i_q == nbins: 641 i_q = nbins - 1 642 y[i_q] += frac * data_n 643 # Take dqs from data to get the q_average 644 x[i_q] += frac * q_value 645 if err_data is None or err_data[npt] == 0.0: 646 if data_n < 0: 647 data_n = -data_n 648 err_y[i_q] += frac * frac * data_n 649 else: 650 err_y[i_q] += frac * frac * err_data[npt] * err_data[npt] 651 if dq_data is not None: 652 # To be consistent with dq calculation in 1d reduction, 653 # we need just the averages (not quadratures) because 654 # it should not depend on the number of the q points 655 # in the qr bins. 656 err_x[i_q] += frac * dq_data[npt] 657 else: 658 err_x = None 659 y_counts[i_q] += frac 660 661 # Average the sums 662 for n in range(nbins): 663 if err_y[n] < 0: 664 err_y[n] = -err_y[n] 665 err_y[n] = math.sqrt(err_y[n]) 666 # if err_x is not None: 667 # err_x[n] = math.sqrt(err_x[n]) 668 669 err_y = err_y / y_counts 670 err_y[err_y == 0] = np.average(err_y) 671 y = y / y_counts 672 x = x / y_counts 673 idx = (np.isfinite(y)) & (np.isfinite(x)) 674 675 if err_x is not None: 676 d_x = err_x[idx] / y_counts[idx] 677 else: 678 d_x = None 679 680 if not idx.any(): 681 msg = "Average Error: No points inside ROI to average..." 682 raise ValueError(msg) 683 684 return Data1D(x=x[idx], y=y[idx], dy=err_y[idx], dx=d_x) 685 686 ################################################################################ 687 688 class Ring(object): 689 """ 690 Defines a ring on a 2D data set. 691 The ring is defined by r_min, r_max, and 692 the position of the center of the ring. 693 694 The data returned is the distribution of counts 695 around the ring as a function of phi. 696 697 Phi_min and phi_max should be defined between 0 and 2*pi 698 in anti-clockwise starting from the x- axis on the left-hand side 699 """ 700 # Todo: remove center. 701 702 def __init__(self, r_min=0, r_max=0, center_x=0, center_y=0, nbins=36): 703 # Minimum radius 704 self.r_min = r_min 705 # Maximum radius 706 self.r_max = r_max 707 # Center of the ring in x 708 self.center_x = center_x 709 # Center of the ring in y 710 self.center_y = center_y 711 # Number of angular bins 712 self.nbins_phi = nbins 713 714 def __call__(self, data2D): 715 """ 716 Apply the ring to the data set. 717 Returns the angular distribution for a given q range 718 719 :param data2D: Data2D object 720 721 :return: Data1D object 722 """ 723 if data2D.__class__.__name__ not in ["Data2D", "plottable_2D"]: 724 raise RuntimeError("Ring averaging only take plottable_2D objects") 725 726 Pi = math.pi 727 728 # Get data 729 data = data2D.data[np.isfinite(data2D.data)] 730 q_data = data2D.q_data[np.isfinite(data2D.data)] 731 err_data = data2D.err_data[np.isfinite(data2D.data)] 732 qx_data = data2D.qx_data[np.isfinite(data2D.data)] 733 qy_data = data2D.qy_data[np.isfinite(data2D.data)] 734 735 # Set space for 1d outputs 736 phi_bins = np.zeros(self.nbins_phi) 737 phi_counts = np.zeros(self.nbins_phi) 738 phi_values = np.zeros(self.nbins_phi) 739 phi_err = np.zeros(self.nbins_phi) 740 741 # Shift to apply to calculated phi values in order 742 # to center first bin at zero 743 phi_shift = Pi / self.nbins_phi 744 745 for npt in range(len(data)): 746 frac = 0 747 # q-value at the point (npt) 748 q_value = q_data[npt] 749 data_n = data[npt] 750 751 # phi-value at the point (npt) 752 phi_value = math.atan2(qy_data[npt], qx_data[npt]) + Pi 753 754 if self.r_min <= q_value and q_value <= self.r_max: 755 frac = 1 756 if frac == 0: 757 continue 758 # binning 759 i_phi = int(math.floor((self.nbins_phi) * 760 (phi_value + phi_shift) / (2 * Pi))) 761 762 # Take care of the edge case at phi = 2pi. 763 if i_phi >= self.nbins_phi: 764 i_phi = 0 765 phi_bins[i_phi] += frac * data[npt] 766 767 if err_data is None or err_data[npt] == 0.0: 768 if data_n < 0: 769 data_n = -data_n 770 phi_err[i_phi] += frac * frac * math.fabs(data_n) 771 else: 772 phi_err[i_phi] += frac * frac * err_data[npt] * err_data[npt] 773 phi_counts[i_phi] += frac 774 775 for i in range(self.nbins_phi): 776 phi_bins[i] = phi_bins[i] / phi_counts[i] 777 phi_err[i] = math.sqrt(phi_err[i]) / phi_counts[i] 778 phi_values[i] = 2.0 * math.pi / self.nbins_phi * (1.0 * i) 779 780 idx = (np.isfinite(phi_bins)) 781 782 if not idx.any(): 783 msg = "Average Error: No points inside ROI to average..." 784 raise ValueError(msg) 785 # elif len(phi_bins[idx])!= self.nbins_phi: 786 # print "resulted",self.nbins_phi- len(phi_bins[idx]) 787 #,"empty bin(s) due to tight binning..." 788 return Data1D(x=phi_values[idx], y=phi_bins[idx], dy=phi_err[idx]) 789 790 ################################################################################ 733 if q > q_1 and q <= q_0: 734 return (q - q_1) / (q_0 - q_1) 735 return None 736 791 737 792 738 class _Sector(object): … … 802 748 starting from the x- axis on the left-hand side 803 749 """ 804 805 def __init__(self, r_min, r_max, phi_min=0, phi_max=2 * math.pi, nbins=20, base = None): 750 def __init__(self, r_min, r_max, phi_min=0, phi_max=2 * math.pi, nbins=20): 806 751 self.r_min = r_min 807 752 self.r_max = r_max … … 809 754 self.phi_max = phi_max 810 755 self.nbins = nbins 811 self.base = base812 756 813 757 def _agv(self, data2D, run='phi'): … … 821 765 """ 822 766 if data2D.__class__.__name__ not in ["Data2D", "plottable_2D"]: 823 raise RuntimeError("Ring averaging only take plottable_2D objects") 767 raise RuntimeError, "Ring averaging only take plottable_2D objects" 768 Pi = math.pi 824 769 825 770 # Get the all data & info 826 data = data2D.data[np.isfinite(data2D.data)] 827 q_data = data2D.q_data[np.isfinite(data2D.data)] 828 err_data = data2D.err_data[np.isfinite(data2D.data)] 829 qx_data = data2D.qx_data[np.isfinite(data2D.data)] 830 qy_data = data2D.qy_data[np.isfinite(data2D.data)] 831 771 data = data2D.data[numpy.isfinite(data2D.data)] 772 q_data = data2D.q_data[numpy.isfinite(data2D.data)] 773 err_data = data2D.err_data[numpy.isfinite(data2D.data)] 774 qx_data = data2D.qx_data[numpy.isfinite(data2D.data)] 775 qy_data = data2D.qy_data[numpy.isfinite(data2D.data)] 832 776 dq_data = None 833 if data2D.dqx_data is not None and data2D.dqy_data is not None: 834 dq_data = get_dq_data(data2D) 835 836 # set space for 1d outputs 837 x = np.zeros(self.nbins) 838 y = np.zeros(self.nbins) 839 y_err = np.zeros(self.nbins) 840 x_err = np.zeros(self.nbins) 841 y_counts = np.zeros(self.nbins) # Cycle counts (for the mean) 777 778 # Get the dq for resolution averaging 779 if data2D.dqx_data != None and data2D.dqy_data != None: 780 # The pinholes and det. pix contribution present 781 # in both direction of the 2D which must be subtracted when 782 # converting to 1D: dq_overlap should calculated ideally at 783 # q = 0. 784 # Extrapolate dqy(perp) at q = 0 785 z_max = max(data2D.q_data) 786 z_min = min(data2D.q_data) 787 x_max = data2D.dqx_data[data2D.q_data[z_max]] 788 x_min = data2D.dqx_data[data2D.q_data[z_min]] 789 y_max = data2D.dqy_data[data2D.q_data[z_max]] 790 y_min = data2D.dqy_data[data2D.q_data[z_min]] 791 # Find qdx at q = 0 792 dq_overlap_x = (x_min * z_max - x_max * z_min) / (z_max - z_min) 793 # when extrapolation goes wrong 794 if dq_overlap_x > min(data2D.dqx_data): 795 dq_overlap_x = min(data2D.dqx_data) 796 dq_overlap_x *= dq_overlap_x 797 # Find qdx at q = 0 798 dq_overlap_y = (y_min * z_max - y_max * z_min) / (z_max - z_min) 799 # when extrapolation goes wrong 800 if dq_overlap_y > min(data2D.dqy_data): 801 dq_overlap_y = min(data2D.dqy_data) 802 # get dq at q=0. 803 dq_overlap_y *= dq_overlap_y 804 805 dq_overlap = numpy.sqrt((dq_overlap_x + dq_overlap_y) / 2.0) 806 if dq_overlap < 0: 807 dq_overlap = y_min 808 dqx_data = data2D.dqx_data[numpy.isfinite(data2D.data)] 809 dqy_data = data2D.dqy_data[numpy.isfinite(data2D.data)] - dq_overlap 810 # def; dqx_data = dq_r dqy_data = dq_phi 811 # Convert dq 2D to 1D here 812 dqx = dqx_data * dqx_data 813 dqy = dqy_data * dqy_data 814 dq_data = numpy.add(dqx, dqy) 815 dq_data = numpy.sqrt(dq_data) 816 817 #set space for 1d outputs 818 x = numpy.zeros(self.nbins) 819 y = numpy.zeros(self.nbins) 820 y_err = numpy.zeros(self.nbins) 821 x_err = numpy.zeros(self.nbins) 822 y_counts = numpy.zeros(self.nbins) 842 823 843 824 # Get the min and max into the region: 0 <= phi < 2Pi 844 825 phi_min = flip_phi(self.phi_min) 845 826 phi_max = flip_phi(self.phi_max) 846 847 # binning object 848 if run.lower() == 'phi': 849 binning = Binning(self.phi_min, self.phi_max, self.nbins, self.base) 850 else: 851 binning = Binning(self.r_min, self.r_max, self.nbins, self.base) 852 827 853 828 for n in range(len(data)): 829 frac = 0 854 830 855 831 # q-value at the pixel (j,i) … … 861 837 862 838 # phi-value of the pixel (j,i) 863 phi_value = math.atan2(qy_data[n], qx_data[n]) + math.pi 864 865 # No need to calculate: data outside of the radius 866 if self.r_min > q_value or q_value > self.r_max: 839 phi_value = math.atan2(qy_data[n], qx_data[n]) + Pi 840 841 ## No need to calculate the frac when all data are within range 842 if self.r_min <= q_value and q_value <= self.r_max: 843 frac = 1 844 if frac == 0: 867 845 continue 868 869 # In case of two ROIs (symmetric major and minor regions)(for 'q2') 846 #In case of two ROIs (symmetric major and minor regions)(for 'q2') 870 847 if run.lower() == 'q2': 871 # For minor sector wing848 ## For minor sector wing 872 849 # Calculate the minor wing phis 873 phi_min_minor = flip_phi(phi_min - math.pi)874 phi_max_minor = flip_phi(phi_max - math.pi)850 phi_min_minor = flip_phi(phi_min - Pi) 851 phi_max_minor = flip_phi(phi_max - Pi) 875 852 # Check if phis of the minor ring is within 0 to 2pi 876 853 if phi_min_minor > phi_max_minor: 877 is_in = (phi_value > phi_min_minor or 878 phi_value < phi_max_minor)854 is_in = (phi_value > phi_min_minor or \ 855 phi_value < phi_max_minor) 879 856 else: 880 is_in = (phi_value > phi_min_minor and 881 phi_value < phi_max_minor)882 883 # 884 # 857 is_in = (phi_value > phi_min_minor and \ 858 phi_value < phi_max_minor) 859 860 #For all cases(i.e.,for 'q', 'q2', and 'phi') 861 #Find pixels within ROI 885 862 if phi_min > phi_max: 886 is_in = is_in or (phi_value > phi_min or 887 phi_value < phi_max)863 is_in = is_in or (phi_value > phi_min or \ 864 phi_value < phi_max) 888 865 else: 889 is_in = is_in or (phi_value >= phi_min and 890 phi_value < phi_max) 891 892 # data oustide of the phi range 866 is_in = is_in or (phi_value >= phi_min and \ 867 phi_value < phi_max) 868 893 869 if not is_in: 870 frac = 0 871 if frac == 0: 894 872 continue 895 896 # Get the binning index 873 # Check which type of averaging we need 897 874 if run.lower() == 'phi': 898 i_bin = binning.get_bin_index(phi_value) 875 temp_x = (self.nbins) * (phi_value - self.phi_min) 876 temp_y = (self.phi_max - self.phi_min) 877 i_bin = int(math.floor(temp_x / temp_y)) 899 878 else: 900 i_bin = binning.get_bin_index(q_value) 879 temp_x = (self.nbins) * (q_value - self.r_min) 880 temp_y = (self.r_max - self.r_min) 881 i_bin = int(math.floor(temp_x / temp_y)) 901 882 902 883 # Take care of the edge case at phi = 2pi. … … 904 885 i_bin = self.nbins - 1 905 886 906 # Get the total y907 y[i_bin] += data_n908 x[i_bin] += q_value909 if err_data[n] isNone or err_data[n] == 0.0:887 ## Get the total y 888 y[i_bin] += frac * data_n 889 x[i_bin] += frac * q_value 890 if err_data[n] == None or err_data[n] == 0.0: 910 891 if data_n < 0: 911 892 data_n = -data_n 912 y_err[i_bin] += data_n893 y_err[i_bin] += frac * frac * data_n 913 894 else: 914 y_err[i_bin] += err_data[n]**2915 916 if dq_data is notNone:895 y_err[i_bin] += frac * frac * err_data[n] * err_data[n] 896 897 if dq_data != None: 917 898 # To be consistent with dq calculation in 1d reduction, 918 899 # we need just the averages (not quadratures) because 919 900 # it should not depend on the number of the q points 920 901 # in the qr bins. 921 x_err[i_bin] += dq_data[n]902 x_err[i_bin] += frac * dq_data[n] 922 903 else: 923 904 x_err = None 924 y_counts[i_bin] += 1905 y_counts[i_bin] += frac 925 906 926 907 # Organize the results … … 942 923 #x[i] = math.sqrt((r_inner * r_inner + r_outer * r_outer) / 2) 943 924 x[i] = x[i] / y_counts[i] 944 y_err[y_err == 0] = n p.average(y_err)945 idx = (n p.isfinite(y) & np.isfinite(y_err))946 if x_err is notNone:925 y_err[y_err == 0] = numpy.average(y_err) 926 idx = (numpy.isfinite(y) & numpy.isfinite(y_err)) 927 if x_err != None: 947 928 d_x = x_err[idx] / y_counts[idx] 948 929 else: … … 950 931 if not idx.any(): 951 932 msg = "Average Error: No points inside sector of ROI to average..." 952 raise ValueError (msg)953 # 933 raise ValueError, msg 934 #elif len(y[idx])!= self.nbins: 954 935 # print "resulted",self.nbins- len(y[idx]), 955 936 #"empty bin(s) due to tight binning..." … … 965 946 The number of bin in phi also has to be defined. 966 947 """ 967 968 948 def __call__(self, data2D): 969 949 """ … … 985 965 The number of bin in Q also has to be defined. 986 966 """ 987 988 967 def __call__(self, data2D): 989 968 """ … … 996 975 return self._agv(data2D, 'q2') 997 976 998 ################################################################################999 977 1000 978 class Ringcut(object): … … 1009 987 in anti-clockwise starting from the x- axis on the left-hand side 1010 988 """ 1011 1012 989 def __init__(self, r_min=0, r_max=0, center_x=0, center_y=0): 1013 990 # Minimum radius … … 1030 1007 """ 1031 1008 if data2D.__class__.__name__ not in ["Data2D", "plottable_2D"]: 1032 raise RuntimeError ("Ring cut only take plottable_2D objects")1009 raise RuntimeError, "Ring cut only take plottable_2D objects" 1033 1010 1034 1011 # Get data 1035 1012 qx_data = data2D.qx_data 1036 1013 qy_data = data2D.qy_data 1037 q_data = n p.sqrt(qx_data * qx_data + qy_data * qy_data)1014 q_data = numpy.sqrt(qx_data * qx_data + qy_data * qy_data) 1038 1015 1039 1016 # check whether or not the data point is inside ROI … … 1041 1018 return out 1042 1019 1043 ################################################################################1044 1020 1045 1021 class Boxcut(object): … … 1047 1023 Find a rectangular 2D region of interest. 1048 1024 """ 1049 1050 1025 def __init__(self, x_min=0.0, x_max=0.0, y_min=0.0, y_max=0.0): 1051 1026 # Minimum Qx value [A-1] … … 1080 1055 """ 1081 1056 if data2D.__class__.__name__ not in ["Data2D", "plottable_2D"]: 1082 raise RuntimeError ("Boxcut take only plottable_2D objects")1057 raise RuntimeError, "Boxcut take only plottable_2D objects" 1083 1058 # Get qx_ and qy_data 1084 1059 qx_data = data2D.qx_data … … 1091 1066 return outx & outy 1092 1067 1093 ################################################################################1094 1068 1095 1069 class Sectorcut(object): … … 1103 1077 and (phi_max-phi_min) should not be larger than pi 1104 1078 """ 1105 1106 1079 def __init__(self, phi_min=0, phi_max=math.pi): 1107 1080 self.phi_min = phi_min … … 1133 1106 """ 1134 1107 if data2D.__class__.__name__ not in ["Data2D", "plottable_2D"]: 1135 raise RuntimeError ("Sectorcut take only plottable_2D objects")1108 raise RuntimeError, "Sectorcut take only plottable_2D objects" 1136 1109 Pi = math.pi 1137 1110 # Get data … … 1140 1113 1141 1114 # get phi from data 1142 phi_data = n p.arctan2(qy_data, qx_data)1115 phi_data = numpy.arctan2(qy_data, qx_data) 1143 1116 1144 1117 # Get the min and max into the region: -pi <= phi < Pi … … 1147 1120 # check for major sector 1148 1121 if phi_min_major > phi_max_major: 1149 out_major = (phi_min_major <= phi_data) + \ 1150 (phi_max_major > phi_data) 1122 out_major = (phi_min_major <= phi_data) + (phi_max_major > phi_data) 1151 1123 else: 1152 out_major = (phi_min_major <= phi_data) & ( 1153 phi_max_major > phi_data) 1124 out_major = (phi_min_major <= phi_data) & (phi_max_major > phi_data) 1154 1125 1155 1126 # minor sector … … 1161 1132 if phi_min_minor > phi_max_minor: 1162 1133 out_minor = (phi_min_minor <= phi_data) + \ 1163 (phi_max_minor >= phi_data)1134 (phi_max_minor >= phi_data) 1164 1135 else: 1165 1136 out_minor = (phi_min_minor <= phi_data) & \ 1166 (phi_max_minor >= phi_data)1137 (phi_max_minor >= phi_data) 1167 1138 out = out_major + out_minor 1168 1139 -
test/sasdataloader/test/utest_averaging.py
rfa4af76 r9a5097c 1 1 2 import unittest 2 3 import math 3 import os 4 import unittest 4 5 from sas.sascalc.dataloader.loader import Loader 6 from sas.sascalc.dataloader.manipulations import Ring, CircularAverage, SectorPhi, get_q,reader2D_converter 5 7 6 8 import numpy as np 7 8 9 import sas.sascalc.dataloader.data_info as data_info 9 from sas.sascalc.dataloader.loader import Loader10 from sas.sascalc.dataloader.manipulations import (Boxavg, Boxsum,11 CircularAverage, Ring,12 SectorPhi, SectorQ, SlabX,13 SlabY, get_q,14 reader2D_converter)15 16 10 17 11 class Averaging(unittest.TestCase): … … 19 13 Test averaging manipulations on a flat distribution 20 14 """ 21 22 15 def setUp(self): 23 16 """ … … 25 18 should return the predefined height of the distribution (1.0). 26 19 """ 27 x_0 = np.ones([100,100])28 dx_0 = np.ones([100, 29 20 x_0 = np.ones([100,100]) 21 dx_0 = np.ones([100,100]) 22 30 23 self.data = data_info.Data2D(data=x_0, err_data=dx_0) 31 24 detector = data_info.Detector() 32 detector.distance = 1000.0 # 33 detector.pixel_size.x = 1.0 #mm34 detector.pixel_size.y = 1.0 #mm35 25 detector.distance = 1000.0 #mm 26 detector.pixel_size.x = 1.0 #mm 27 detector.pixel_size.y = 1.0 #mm 28 36 29 # center in pixel position = (len(x_0)-1)/2 37 detector.beam_center.x = (len(x_0) - 1) / 2 #pixel number38 detector.beam_center.y = (len(x_0) - 1) / 2 #pixel number30 detector.beam_center.x = (len(x_0)-1)/2 #pixel number 31 detector.beam_center.y = (len(x_0)-1)/2 #pixel number 39 32 self.data.detector.append(detector) 40 33 41 34 source = data_info.Source() 42 source.wavelength = 10.0 #A35 source.wavelength = 10.0 #A 43 36 self.data.source = source 44 45 # get_q(dx, dy, det_dist, wavelength) where units are mm,mm,mm,and A 46 # respectively. 37 38 # get_q(dx, dy, det_dist, wavelength) where units are mm,mm,mm,and A respectively. 47 39 self.qmin = get_q(1.0, 1.0, detector.distance, source.wavelength) 48 40 49 41 self.qmax = get_q(49.5, 49.5, detector.distance, source.wavelength) 50 42 51 43 self.qstep = len(x_0) 52 x = np.linspace(start=-1 *self.qmax,53 stop=self.qmax,54 num=self.qstep,55 endpoint=True)56 y = np.linspace(start= -1 *self.qmax,57 stop=self.qmax,58 num=self.qstep,59 endpoint=True)60 self.data.x_bins =x61 self.data.y_bins =y44 x= np.linspace(start= -1*self.qmax, 45 stop= self.qmax, 46 num= self.qstep, 47 endpoint=True ) 48 y = np.linspace(start= -1*self.qmax, 49 stop= self.qmax, 50 num= self.qstep, 51 endpoint=True ) 52 self.data.x_bins=x 53 self.data.y_bins=y 62 54 self.data = reader2D_converter(self.data) 63 55 64 56 def test_ring_flat_distribution(self): 65 57 """ 66 58 Test ring averaging 67 59 """ 68 r = Ring(r_min=2 * self.qmin, r_max=5 * self.qmin,69 center_x=self.data.detector[0].beam_center.x, 60 r = Ring(r_min=2*self.qmin, r_max=5*self.qmin, 61 center_x=self.data.detector[0].beam_center.x, 70 62 center_y=self.data.detector[0].beam_center.y) 71 63 r.nbins_phi = 20 72 64 73 65 o = r(self.data) 74 66 for i in range(20): 75 67 self.assertEqual(o.y[i], 1.0) 76 68 77 69 def test_sectorphi_full(self): 78 70 """ 79 71 Test sector averaging 80 72 """ 81 r = SectorPhi(r_min=self.qmin, r_max=3 * self.qmin,82 phi_min=0, phi_max=math.pi *2.0)73 r = SectorPhi(r_min=self.qmin, r_max=3*self.qmin, 74 phi_min=0, phi_max=math.pi*2.0) 83 75 r.nbins_phi = 20 84 76 o = r(self.data) 85 77 for i in range(7): 86 78 self.assertEqual(o.y[i], 1.0) 87 79 80 88 81 def test_sectorphi_partial(self): 89 82 """ 90 83 """ 91 84 phi_max = math.pi * 1.5 92 r = SectorPhi(r_min=self.qmin, r_max=3 * self.qmin,85 r = SectorPhi(r_min=self.qmin, r_max=3*self.qmin, 93 86 phi_min=0, phi_max=phi_max) 94 87 self.assertEqual(r.phi_max, phi_max) … … 98 91 for i in range(17): 99 92 self.assertEqual(o.y[i], 1.0) 100 101 102 class DataInfoTests(unittest.TestCase): 103 93 94 95 96 class data_info_tests(unittest.TestCase): 97 104 98 def setUp(self): 105 filepath = os.path.join(os.path.dirname( 106 os.path.realpath(__file__)), 'MAR07232_rest.ASC') 107 self.data = Loader().load(filepath) 108 99 self.data = Loader().load('MAR07232_rest.ASC') 100 109 101 def test_ring(self): 110 102 """ 111 103 Test ring averaging 112 104 """ 113 r = Ring(r_min=.005, r_max=.01, 114 center_x=self.data.detector[0].beam_center.x, 105 r = Ring(r_min=.005, r_max=.01, 106 center_x=self.data.detector[0].beam_center.x, 115 107 center_y=self.data.detector[0].beam_center.y, 116 nbins =20)108 nbins = 20) 117 109 ##r.nbins_phi = 20 118 119 o = r(self.data) 120 filepath = os.path.join(os.path.dirname( 121 os.path.realpath(__file__)), 'ring_testdata.txt') 122 answer = Loader().load(filepath) 123 110 111 o = r(self.data) 112 answer = Loader().load('ring_testdata.txt') 113 124 114 for i in range(r.nbins_phi - 1): 125 115 self.assertAlmostEqual(o.x[i + 1], answer.x[i], 4) 126 116 self.assertAlmostEqual(o.y[i + 1], answer.y[i], 4) 127 117 self.assertAlmostEqual(o.dy[i + 1], answer.dy[i], 4) 128 118 129 119 def test_circularavg(self): 130 120 """ 131 Test circular averaging 132 The test data was not generated by IGOR. 133 """ 134 r = CircularAverage(r_min=.00, r_max=.025, 135 bin_width=0.0003) 136 r.nbins_phi = 20 137 138 o = r(self.data) 139 140 filepath = os.path.join(os.path.dirname( 141 os.path.realpath(__file__)), 'avg_testdata.txt') 142 answer = Loader().load(filepath) 121 Test circular averaging 122 The test data was not generated by IGOR. 123 """ 124 r = CircularAverage(r_min=.00, r_max=.025, 125 bin_width=0.0003) 126 r.nbins_phi = 20 127 128 o = r(self.data) 129 130 answer = Loader().load('avg_testdata.txt') 143 131 for i in range(r.nbins_phi): 144 132 self.assertAlmostEqual(o.x[i], answer.x[i], 4) 145 133 self.assertAlmostEqual(o.y[i], answer.y[i], 4) 146 134 self.assertAlmostEqual(o.dy[i], answer.dy[i], 4) 147 135 148 136 def test_box(self): 149 137 """ … … 151 139 The test data was not generated by IGOR. 152 140 """ 153 141 from sas.sascalc.dataloader.manipulations import Boxsum, Boxavg 142 154 143 r = Boxsum(x_min=.01, x_max=.015, y_min=0.01, y_max=0.015) 155 144 s, ds, npoints = r(self.data) 156 145 self.assertAlmostEqual(s, 34.278990899999997, 4) 157 146 self.assertAlmostEqual(ds, 7.8007981835194293, 4) 158 self.assertAlmostEqual(npoints, 324.0000, 4) 159 147 self.assertAlmostEqual(npoints, 324.0000, 4) 148 160 149 r = Boxavg(x_min=.01, x_max=.015, y_min=0.01, y_max=0.015) 161 150 s, ds = r(self.data) 162 151 self.assertAlmostEqual(s, 0.10579935462962962, 4) 163 152 self.assertAlmostEqual(ds, 0.024076537603455028, 4) 164 153 165 154 def test_slabX(self): 166 155 """ … … 168 157 The test data was not generated by IGOR. 169 158 """ 170 171 r = SlabX(x_min=-.01, x_max=.01, y_min=-0.0002,172 159 from sas.sascalc.dataloader.manipulations import SlabX 160 161 r = SlabX(x_min=-.01, x_max=.01, y_min=-0.0002, y_max=0.0002, bin_width=0.0004) 173 162 r.fold = False 174 163 o = r(self.data) 175 164 176 filepath = os.path.join(os.path.dirname( 177 os.path.realpath(__file__)), 'slabx_testdata.txt') 178 answer = Loader().load(filepath) 179 for i in range(len(o.x)): 180 self.assertAlmostEqual(o.x[i], answer.x[i], 4) 181 self.assertAlmostEqual(o.y[i], answer.y[i], 4) 182 self.assertAlmostEqual(o.dy[i], answer.dy[i], 4) 183 165 answer = Loader().load('slabx_testdata.txt') 166 for i in range(len(o.x)): 167 self.assertAlmostEqual(o.x[i], answer.x[i], 4) 168 self.assertAlmostEqual(o.y[i], answer.y[i], 4) 169 self.assertAlmostEqual(o.dy[i], answer.dy[i], 4) 170 184 171 def test_slabY(self): 185 172 """ … … 187 174 The test data was not generated by IGOR. 188 175 """ 189 190 r = SlabY(x_min=.005, x_max=.01, y_min=-191 176 from sas.sascalc.dataloader.manipulations import SlabY 177 178 r = SlabY(x_min=.005, x_max=.01, y_min=-0.01, y_max=0.01, bin_width=0.0004) 192 179 r.fold = False 193 180 o = r(self.data) 194 181 195 filepath = os.path.join(os.path.dirname( 196 os.path.realpath(__file__)), 'slaby_testdata.txt') 197 answer = Loader().load(filepath) 198 for i in range(len(o.x)): 199 self.assertAlmostEqual(o.x[i], answer.x[i], 4) 200 self.assertAlmostEqual(o.y[i], answer.y[i], 4) 201 self.assertAlmostEqual(o.dy[i], answer.dy[i], 4) 202 182 answer = Loader().load('slaby_testdata.txt') 183 for i in range(len(o.x)): 184 self.assertAlmostEqual(o.x[i], answer.x[i], 4) 185 self.assertAlmostEqual(o.y[i], answer.y[i], 4) 186 self.assertAlmostEqual(o.dy[i], answer.dy[i], 4) 187 203 188 def test_sectorphi_full(self): 204 189 """ … … 208 193 The test data was not generated by IGOR. 209 194 """ 210 195 from sas.sascalc.dataloader.manipulations import SectorPhi 196 import math 197 211 198 nbins = 19 212 199 phi_min = math.pi / (nbins + 1) 213 200 phi_max = math.pi * 2 - phi_min 214 201 215 202 r = SectorPhi(r_min=.005, 216 203 r_max=.01, … … 220 207 o = r(self.data) 221 208 222 filepath = os.path.join(os.path.dirname( 223 os.path.realpath(__file__)), 'ring_testdata.txt') 224 answer = Loader().load(filepath) 225 for i in range(len(o.x)): 226 self.assertAlmostEqual(o.x[i], answer.x[i], 4) 227 self.assertAlmostEqual(o.y[i], answer.y[i], 4) 228 self.assertAlmostEqual(o.dy[i], answer.dy[i], 4) 229 209 answer = Loader().load('ring_testdata.txt') 210 for i in range(len(o.x)): 211 self.assertAlmostEqual(o.x[i], answer.x[i], 4) 212 self.assertAlmostEqual(o.y[i], answer.y[i], 4) 213 self.assertAlmostEqual(o.dy[i], answer.dy[i], 4) 214 230 215 def test_sectorphi_quarter(self): 231 216 """ … … 233 218 The test data was not generated by IGOR. 234 219 """ 235 236 r = SectorPhi(r_min=.005, r_max=.01, phi_min=0, phi_max=math.pi / 2.0)237 r.nbins_phi = 20238 o = r(self.data)239 240 filepath = os.path.join(os.path.dirname(241 os.path.realpath(__file__)), 'sectorphi_testdata.txt') 242 answer = Loader().load( filepath)243 for i in range(len(o.x)): 244 self.assertAlmostEqual(o.x[i], answer.x[i], 4) 245 self.assertAlmostEqual(o.y[i], answer.y[i], 4) 246 self.assertAlmostEqual(o.dy[i], answer.dy[i], 4) 247 220 from sas.sascalc.dataloader.manipulations import SectorPhi 221 import math 222 223 r = SectorPhi(r_min=.005, r_max=.01, phi_min=0, phi_max=math.pi/2.0) 224 r.nbins_phi = 20 225 o = r(self.data) 226 227 answer = Loader().load('sectorphi_testdata.txt') 228 for i in range(len(o.x)): 229 self.assertAlmostEqual(o.x[i], answer.x[i], 4) 230 self.assertAlmostEqual(o.y[i], answer.y[i], 4) 231 self.assertAlmostEqual(o.dy[i], answer.dy[i], 4) 232 248 233 def test_sectorq_full(self): 249 234 """ … … 251 236 The test data was not generated by IGOR. 252 237 """ 253 254 r = SectorQ(r_min=.005, r_max=.01, phi_min=0, phi_max=math.pi / 2.0)255 r.nbins_phi = 20256 o = r(self.data)257 258 filepath = os.path.join(os.path.dirname(259 os.path.realpath(__file__)), 'sectorq_testdata.txt') 260 answer = Loader().load( filepath)261 for i in range(len(o.x)): 262 self.assertAlmostEqual(o.x[i], answer.x[i], 4) 263 self.assertAlmostEqual(o.y[i], answer.y[i], 4) 264 self.assertAlmostEqual(o.dy[i], answer.dy[i], 4) 265 238 from sas.sascalc.dataloader.manipulations import SectorQ 239 import math 240 241 r = SectorQ(r_min=.005, r_max=.01, phi_min=0, phi_max=math.pi/2.0) 242 r.nbins_phi = 20 243 o = r(self.data) 244 245 answer = Loader().load('sectorq_testdata.txt') 246 for i in range(len(o.x)): 247 self.assertAlmostEqual(o.x[i], answer.x[i], 4) 248 self.assertAlmostEqual(o.y[i], answer.y[i], 4) 249 self.assertAlmostEqual(o.dy[i], answer.dy[i], 4) 250 266 251 267 252 if __name__ == '__main__': -
test/sasguiframe/test/utest_manipulations.py
r959eb01 r959eb01 2 2 Unit tests for data manipulations 3 3 """ 4 # TODO: what happens if you add a Data1D to a Data2D? 5 4 #TODO: what happens if you add a Data1D to a Data2D? 5 6 import unittest 6 7 import math 8 import numpy as np 9 from sas.sascalc.dataloader.loader import Loader 10 from sas.sasgui.guiframe.dataFitting import Data1D, Data2D 11 from sas.sasgui.guiframe.dataFitting import Data1D as Theory1D 12 7 13 import os.path 8 import unittest 9 10 import numpy as np 11 12 from sas.sascalc.dataloader.loader import Loader 13 from sas.sasgui.guiframe.dataFitting import Data1D 14 from sas.sasgui.guiframe.dataFitting import Data2D 15 16 17 class DataInfoTests(unittest.TestCase): 18 14 15 class data_info_tests(unittest.TestCase): 16 19 17 def setUp(self): 20 18 data = Loader().load("cansas1d.xml") 21 19 self.data = data[0] 22 20 23 21 def test_clone1D(self): 24 22 """ … … 26 24 """ 27 25 clone = self.data.clone_without_data() 28 26 29 27 for i in range(len(self.data.detector)): 30 self.assertEqual( 31 self.data.detector[i].distance, clone.detector[i].distance) 32 33 34 class Theory1DTests(unittest.TestCase): 35 28 self.assertEqual(self.data.detector[i].distance, clone.detector[i].distance) 29 30 class theory1d_tests(unittest.TestCase): 31 36 32 def setUp(self): 37 33 data = Loader().load("cansas1d.xml") 38 34 self.data = data[0] 39 35 40 36 def test_clone_theory1D(self): 41 37 """ 42 38 Test basic cloning 43 39 """ 44 theory = Data1D(x=[], y=[], dy=None)40 theory = Theory1D(x=[], y=[], dy=None) 45 41 theory.clone_without_data(clone=self.data) 46 42 theory.copy_from_datainfo(data1d=self.data) 47 43 for i in range(len(self.data.detector)): 48 self.assertEqual( 49 self.data.detector[i].distance, theory.detector[i].distance) 50 44 self.assertEqual(self.data.detector[i].distance, theory.detector[i].distance) 45 51 46 for i in range(len(self.data.x)): 52 47 self.assertEqual(self.data.x[i], theory.x[i]) … … 54 49 self.assertEqual(self.data.dy[i], theory.dy[i]) 55 50 56 57 class ManipTests(unittest.TestCase): 58 51 class manip_tests(unittest.TestCase): 52 59 53 def setUp(self): 60 54 # Create two data sets to play with 61 55 x_0 = np.ones(5) 62 56 for i in range(5): 63 x_0[i] = x_0[i] * (i +1.0)64 65 y_0 = 2.0 *np.ones(5)66 dy_0 = 0.5 *np.ones(5)57 x_0[i] = x_0[i]*(i+1.0) 58 59 y_0 = 2.0*np.ones(5) 60 dy_0 = 0.5*np.ones(5) 67 61 self.data = Data1D(x_0, y_0, dy=dy_0) 68 62 69 63 x = self.data.x 70 64 y = np.ones(5) 71 65 dy = np.ones(5) 72 66 self.data2 = Data1D(x, y, dy=dy) 73 67 68 74 69 def test_load(self): 75 70 """ … … 78 73 # There should be 5 entries in the file 79 74 self.assertEqual(len(self.data.x), 5) 80 75 81 76 for i in range(5): 82 77 # The x values should be from 1 to 5 83 self.assertEqual(self.data.x[i], float(i +1))84 78 self.assertEqual(self.data.x[i], float(i+1)) 79 85 80 # All y-error values should be 0.5 86 self.assertEqual(self.data.dy[i], 0.5) 87 81 self.assertEqual(self.data.dy[i], 0.5) 82 88 83 # All y values should be 2.0 89 self.assertEqual(self.data.y[i], 2.0) 90 84 self.assertEqual(self.data.y[i], 2.0) 85 91 86 def test_add(self): 92 result = self.data2 +self.data87 result = self.data2+self.data 93 88 for i in range(5): 94 89 self.assertEqual(result.y[i], 3.0) 95 self.assertEqual(result.dy[i], math.sqrt(0.5**2 +1.0))96 90 self.assertEqual(result.dy[i], math.sqrt(0.5**2+1.0)) 91 97 92 def test_sub(self): 98 result = self.data2 -self.data93 result = self.data2-self.data 99 94 for i in range(5): 100 95 self.assertEqual(result.y[i], -1.0) 101 self.assertEqual(result.dy[i], math.sqrt(0.5**2 +1.0))102 96 self.assertEqual(result.dy[i], math.sqrt(0.5**2+1.0)) 97 103 98 def test_mul(self): 104 result = self.data2 *self.data99 result = self.data2*self.data 105 100 for i in range(5): 106 101 self.assertEqual(result.y[i], 2.0) 107 self.assertEqual(result.dy[i], math.sqrt( 108 (0.5 * 1.0)**2 + (1.0 * 2.0)**2)) 109 102 self.assertEqual(result.dy[i], math.sqrt((0.5*1.0)**2+(1.0*2.0)**2)) 103 110 104 def test_div(self): 111 result = self.data2 /self.data105 result = self.data2/self.data 112 106 for i in range(5): 113 107 self.assertEqual(result.y[i], 0.5) 114 self.assertEqual(result.dy[i], math.sqrt( 115 (1.0 / 2.0)**2 + (0.5 * 1.0 / 4.0)**2)) 116 108 self.assertEqual(result.dy[i], math.sqrt((1.0/2.0)**2+(0.5*1.0/4.0)**2)) 109 117 110 def test_radd(self): 118 result = self.data +3.0111 result = self.data+3.0 119 112 for i in range(5): 120 113 self.assertEqual(result.y[i], 5.0) 121 114 self.assertEqual(result.dy[i], 0.5) 122 123 result = 3.0 +self.data115 116 result = 3.0+self.data 124 117 for i in range(5): 125 118 self.assertEqual(result.y[i], 5.0) 126 119 self.assertEqual(result.dy[i], 0.5) 127 120 128 121 def test_rsub(self): 129 result = self.data -3.0122 result = self.data-3.0 130 123 for i in range(5): 131 124 self.assertEqual(result.y[i], -1.0) 132 125 self.assertEqual(result.dy[i], 0.5) 133 134 result = 3.0 -self.data126 127 result = 3.0-self.data 135 128 for i in range(5): 136 129 self.assertEqual(result.y[i], 1.0) 137 130 self.assertEqual(result.dy[i], 0.5) 138 131 139 132 def test_rmul(self): 140 result = self.data *3.0133 result = self.data*3.0 141 134 for i in range(5): 142 135 self.assertEqual(result.y[i], 6.0) 143 136 self.assertEqual(result.dy[i], 1.5) 144 145 result = 3.0 *self.data137 138 result = 3.0*self.data 146 139 for i in range(5): 147 140 self.assertEqual(result.y[i], 6.0) 148 141 self.assertEqual(result.dy[i], 1.5) 149 142 150 143 def test_rdiv(self): 151 result = self.data /4.0144 result = self.data/4.0 152 145 for i in range(5): 153 146 self.assertEqual(result.y[i], 0.5) 154 147 self.assertEqual(result.dy[i], 0.125) 155 156 result = 6.0 /self.data148 149 result = 6.0/self.data 157 150 for i in range(5): 158 151 self.assertEqual(result.y[i], 3.0) 159 self.assertEqual(result.dy[i], 6.0 * 0.5 / 4.0) 160 161 162 class Manin2DTests(unittest.TestCase): 163 152 self.assertEqual(result.dy[i], 6.0*0.5/4.0) 153 154 class manip_2D(unittest.TestCase): 155 164 156 def setUp(self): 165 157 # Create two data sets to play with 166 x_0 = 2.0 *np.ones(25)167 dx_0 = 0.5 *np.ones(25)158 x_0 = 2.0*np.ones(25) 159 dx_0 = 0.5*np.ones(25) 168 160 qx_0 = np.arange(25) 169 161 qy_0 = np.arange(25) 170 162 mask_0 = np.zeros(25) 171 dqx_0 = np.arange(25) /100172 dqy_0 = np.arange(25) /100163 dqx_0 = np.arange(25)/100 164 dqy_0 = np.arange(25)/100 173 165 q_0 = np.sqrt(qx_0 * qx_0 + qy_0 * qy_0) 174 self.data = Data2D(image=x_0, err_image=dx_0, qx_data=qx_0, 175 qy_data=qy_0, q_data=q_0, mask=mask_0, 166 self.data = Data2D(image=x_0, err_image=dx_0, qx_data=qx_0, 167 qy_data=qy_0, q_data=q_0, mask=mask_0, 176 168 dqx_data=dqx_0, dqy_data=dqy_0) 177 169 178 170 y = np.ones(25) 179 171 dy = np.ones(25) … … 182 174 mask = np.zeros(25) 183 175 q = np.sqrt(qx * qx + qy * qy) 184 self.data2 = Data2D(image=y, err_image=dy, qx_data=qx, qy_data=qy, 176 self.data2 = Data2D(image=y, err_image=dy, qx_data=qx, qy_data=qy, 185 177 q_data=q, mask=mask) 186 178 179 187 180 def test_load(self): 188 181 """ … … 191 184 # There should be 5 entries in the file 192 185 self.assertEqual(np.size(self.data.data), 25) 193 186 194 187 for i in range(25): 195 188 # All y-error values should be 0.5 196 self.assertEqual(self.data.err_data[i], 0.5) 197 189 self.assertEqual(self.data.err_data[i], 0.5) 190 198 191 # All y values should be 2.0 199 self.assertEqual(self.data.data[i], 2.0) 200 192 self.assertEqual(self.data.data[i], 2.0) 193 201 194 def test_add(self): 202 result = self.data2 +self.data195 result = self.data2+self.data 203 196 for i in range(25): 204 197 self.assertEqual(result.data[i], 3.0) 205 self.assertEqual(result.err_data[i], math.sqrt(0.5**2 +1.0))206 198 self.assertEqual(result.err_data[i], math.sqrt(0.5**2+1.0)) 199 207 200 def test_sub(self): 208 result = self.data2 - self.data 201 result = self.data2-self.data 202 for i in range(25): 203 self.assertEqual(result.data[i], -1.0) 204 self.assertEqual(result.err_data[i], math.sqrt(0.5**2+1.0)) 205 206 def test_mul(self): 207 result = self.data2*self.data 208 for i in range(25): 209 self.assertEqual(result.data[i], 2.0) 210 self.assertEqual(result.err_data[i], math.sqrt((0.5*1.0)**2+(1.0*2.0)**2)) 211 212 def test_div(self): 213 result = self.data2/self.data 214 for i in range(25): 215 self.assertEqual(result.data[i], 0.5) 216 self.assertEqual(result.err_data[i], math.sqrt((1.0/2.0)**2+(0.5*1.0/4.0)**2)) 217 218 def test_radd(self): 219 result = self.data+3.0 220 for i in range(25): 221 self.assertEqual(result.data[i], 5.0) 222 self.assertEqual(result.err_data[i], 0.5) 223 224 result = 3.0+self.data 225 for i in range(25): 226 self.assertEqual(result.data[i], 5.0) 227 self.assertEqual(result.err_data[i], 0.5) 228 229 def test_rsub(self): 230 result = self.data-3.0 209 231 for i in range(25): 210 232 self.assertEqual(result.data[i], -1.0) 211 self.assertEqual(result.err_data[i], math.sqrt(0.5**2 + 1.0)) 212 213 def test_mul(self): 214 result = self.data2 * self.data 215 for i in range(25): 216 self.assertEqual(result.data[i], 2.0) 217 self.assertEqual(result.err_data[i], math.sqrt( 218 (0.5 * 1.0)**2 + (1.0 * 2.0)**2)) 219 220 def test_div(self): 221 result = self.data2 / self.data 222 for i in range(25): 223 self.assertEqual(result.data[i], 0.5) 224 self.assertEqual(result.err_data[i], math.sqrt( 225 (1.0 / 2.0)**2 + (0.5 * 1.0 / 4.0)**2)) 226 227 def test_radd(self): 228 result = self.data + 3.0 229 for i in range(25): 230 self.assertEqual(result.data[i], 5.0) 231 self.assertEqual(result.err_data[i], 0.5) 232 233 result = 3.0 + self.data 234 for i in range(25): 235 self.assertEqual(result.data[i], 5.0) 236 self.assertEqual(result.err_data[i], 0.5) 237 238 def test_rsub(self): 239 result = self.data - 3.0 240 for i in range(25): 241 self.assertEqual(result.data[i], -1.0) 242 self.assertEqual(result.err_data[i], 0.5) 243 244 result = 3.0 - self.data 233 self.assertEqual(result.err_data[i], 0.5) 234 235 result = 3.0-self.data 245 236 for i in range(25): 246 237 self.assertEqual(result.data[i], 1.0) 247 238 self.assertEqual(result.err_data[i], 0.5) 248 239 249 240 def test_rmul(self): 250 result = self.data *3.0241 result = self.data*3.0 251 242 for i in range(25): 252 243 self.assertEqual(result.data[i], 6.0) 253 244 self.assertEqual(result.err_data[i], 1.5) 254 255 result = 3.0 *self.data245 246 result = 3.0*self.data 256 247 for i in range(25): 257 248 self.assertEqual(result.data[i], 6.0) 258 249 self.assertEqual(result.err_data[i], 1.5) 259 250 260 251 def test_rdiv(self): 261 result = self.data /4.0252 result = self.data/4.0 262 253 for i in range(25): 263 254 self.assertEqual(result.data[i], 0.5) 264 255 self.assertEqual(result.err_data[i], 0.125) 265 256 266 result = 6.0 /self.data257 result = 6.0/self.data 267 258 for i in range(25): 268 259 self.assertEqual(result.data[i], 3.0) 269 self.assertEqual(result.err_data[i], 6.0 * 0.5 / 4.0) 270 271 272 class ExtraManip2DTests(unittest.TestCase): 273 260 self.assertEqual(result.err_data[i], 6.0*0.5/4.0) 261 262 class extra_manip_2D(unittest.TestCase): 263 274 264 def setUp(self): 275 265 # Create two data sets to play with 276 x_0 = 2.0 *np.ones(25)277 dx_0 = 0.5 *np.ones(25)266 x_0 = 2.0*np.ones(25) 267 dx_0 = 0.5*np.ones(25) 278 268 qx_0 = np.arange(25) 279 269 qy_0 = np.arange(25) 280 270 mask_0 = np.zeros(25) 281 dqx_0 = np.arange(25) /100282 dqy_0 = np.arange(25) /100271 dqx_0 = np.arange(25)/100 272 dqy_0 = np.arange(25)/100 283 273 q_0 = np.sqrt(qx_0 * qx_0 + qy_0 * qy_0) 284 self.data = Data2D(image=x_0, err_image=dx_0, qx_data=qx_0, 285 qy_data=qy_0, q_data=q_0, mask=mask_0, 274 self.data = Data2D(image=x_0, err_image=dx_0, qx_data=qx_0, 275 qy_data=qy_0, q_data=q_0, mask=mask_0, 286 276 dqx_data=dqx_0, dqy_data=dqy_0) 287 277 288 278 y = np.ones(25) 289 279 dy = np.ones(25) … … 292 282 mask = np.zeros(25) 293 283 q = np.sqrt(qx * qx + qy * qy) 294 self.data2 = Data2D(image=y, err_image=dy, qx_data=qx, qy_data=qy, 284 self.data2 = Data2D(image=y, err_image=dy, qx_data=qx, qy_data=qy, 295 285 q_data=q, mask=mask) 296 286 287 297 288 def test_load(self): 298 289 """ … … 301 292 # There should be 5 entries in the file 302 293 self.assertEqual(np.size(self.data.data), 25) 303 294 304 295 for i in range(25): 305 296 # All y-error values should be 0.5 306 self.assertEqual(self.data.err_data[i], 0.5) 307 297 self.assertEqual(self.data.err_data[i], 0.5) 298 308 299 # All y values should be 2.0 309 self.assertEqual(self.data.data[i], 2.0) 310 300 self.assertEqual(self.data.data[i], 2.0) 301 311 302 def test_add(self): 312 result = self.data2 +self.data303 result = self.data2+self.data 313 304 for i in range(25): 314 305 self.assertEqual(result.data[i], 3.0) 315 self.assertEqual(result.err_data[i], math.sqrt(0.5**2 +1.0))316 306 self.assertEqual(result.err_data[i], math.sqrt(0.5**2+1.0)) 307 317 308 def test_sub(self): 318 result = self.data2 - self.data 309 result = self.data2-self.data 310 for i in range(25): 311 self.assertEqual(result.data[i], -1.0) 312 self.assertEqual(result.err_data[i], math.sqrt(0.5**2+1.0)) 313 314 def test_mul(self): 315 result = self.data2*self.data 316 for i in range(25): 317 self.assertEqual(result.data[i], 2.0) 318 self.assertEqual(result.err_data[i], math.sqrt((0.5*1.0)**2+(1.0*2.0)**2)) 319 320 def test_div(self): 321 result = self.data2/self.data 322 for i in range(25): 323 self.assertEqual(result.data[i], 0.5) 324 self.assertEqual(result.err_data[i], math.sqrt((1.0/2.0)**2+(0.5*1.0/4.0)**2)) 325 326 def test_radd(self): 327 result = self.data+3.0 328 for i in range(25): 329 self.assertEqual(result.data[i], 5.0) 330 self.assertEqual(result.err_data[i], 0.5) 331 332 result = 3.0+self.data 333 for i in range(25): 334 self.assertEqual(result.data[i], 5.0) 335 self.assertEqual(result.err_data[i], 0.5) 336 337 def test_rsub(self): 338 result = self.data-3.0 319 339 for i in range(25): 320 340 self.assertEqual(result.data[i], -1.0) 321 self.assertEqual(result.err_data[i], math.sqrt(0.5**2 + 1.0)) 322 323 def test_mul(self): 324 result = self.data2 * self.data 325 for i in range(25): 326 self.assertEqual(result.data[i], 2.0) 327 self.assertEqual(result.err_data[i], math.sqrt( 328 (0.5 * 1.0)**2 + (1.0 * 2.0)**2)) 329 330 def test_div(self): 331 result = self.data2 / self.data 332 for i in range(25): 333 self.assertEqual(result.data[i], 0.5) 334 self.assertEqual(result.err_data[i], math.sqrt( 335 (1.0 / 2.0)**2 + (0.5 * 1.0 / 4.0)**2)) 336 337 def test_radd(self): 338 result = self.data + 3.0 339 for i in range(25): 340 self.assertEqual(result.data[i], 5.0) 341 self.assertEqual(result.err_data[i], 0.5) 342 343 result = 3.0 + self.data 344 for i in range(25): 345 self.assertEqual(result.data[i], 5.0) 346 self.assertEqual(result.err_data[i], 0.5) 347 348 def test_rsub(self): 349 result = self.data - 3.0 350 for i in range(25): 351 self.assertEqual(result.data[i], -1.0) 352 self.assertEqual(result.err_data[i], 0.5) 353 354 result = 3.0 - self.data 341 self.assertEqual(result.err_data[i], 0.5) 342 343 result = 3.0-self.data 355 344 for i in range(25): 356 345 self.assertEqual(result.data[i], 1.0) 357 346 self.assertEqual(result.err_data[i], 0.5) 358 347 359 348 def test_rmul(self): 360 result = self.data *3.0349 result = self.data*3.0 361 350 for i in range(25): 362 351 self.assertEqual(result.data[i], 6.0) 363 352 self.assertEqual(result.err_data[i], 1.5) 364 365 result = 3.0 *self.data353 354 result = 3.0*self.data 366 355 for i in range(25): 367 356 self.assertEqual(result.data[i], 6.0) 368 357 self.assertEqual(result.err_data[i], 1.5) 369 358 370 359 def test_rdiv(self): 371 result = self.data /4.0360 result = self.data/4.0 372 361 for i in range(25): 373 362 self.assertEqual(result.data[i], 0.5) 374 363 self.assertEqual(result.err_data[i], 0.125) 375 364 376 result = 6.0 /self.data365 result = 6.0/self.data 377 366 for i in range(25): 378 367 self.assertEqual(result.data[i], 3.0) 379 self.assertEqual(result.err_data[i], 6.0 * 0.5 / 4.0) 380 368 self.assertEqual(result.err_data[i], 6.0*0.5/4.0) 381 369 382 370 if __name__ == '__main__': 383 371 unittest.main() 372
Note: See TracChangeset
for help on using the changeset viewer.