- Timestamp:
- Nov 17, 2010 5:40:17 AM (14 years ago)
- Branches:
- master, ESS_GUI, ESS_GUI_Docs, ESS_GUI_batch_fitting, ESS_GUI_bumps_abstraction, ESS_GUI_iss1116, ESS_GUI_iss879, ESS_GUI_iss959, ESS_GUI_opencl, ESS_GUI_ordering, ESS_GUI_sync_sascalc, costrafo411, magnetic_scatt, release-4.1.1, release-4.1.2, release-4.2.2, release_4.0.1, ticket-1009, ticket-1094-headless, ticket-1242-2d-resolution, ticket-1243, ticket-1249, ticket885, unittest-saveload
- Children:
- 4a2b054
- Parents:
- effce1d
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
Invariant/invariant.py
r16f60cb rcbaa2f4 56 56 output_points = [(self.linearize_q_value(p[0]), 57 57 math.log(p[1]), 58 p[2]/p[1]) for p in data_points if p[0]>0 and p[1]>0 and p[2]>0] 58 p[2]/p[1]) for p in data_points if p[0]>0 and \ 59 p[1]>0 and p[2]>0] 59 60 60 61 x_out, y_out, dy_out = zip(*output_points) … … 75 76 :param data: Data1D object 76 77 """ 77 return [p[0]>0 and p[1]>0 and p[2]>0 for p in zip(data.x, data.y, data.dy)] 78 return [p[0]>0 and p[1]>0 and p[2]>0 for p in zip(data.x, data.y, 79 data.dy)] 78 80 79 81 def linearize_q_value(self, value): … … 150 152 :param x: array of q-values 151 153 """ 152 p1 = numpy.array([self.dscale * math.exp(-((self.radius * q)**2/3)) for q in x]) 153 p2 = numpy.array([self.scale * math.exp(-((self.radius * q)**2/3)) * (-(q**2/3)) * 2 * self.radius * self.dradius for q in x]) 154 p1 = numpy.array([self.dscale * math.exp(-((self.radius * q)**2/3)) \ 155 for q in x]) 156 p2 = numpy.array([self.scale * math.exp(-((self.radius * q)**2/3))\ 157 * (-(q**2/3)) * 2 * self.radius * self.dradius for q in x]) 154 158 diq2 = p1*p1 + p2*p2 155 159 return numpy.array( [math.sqrt(err) for err in diq2] ) … … 170 174 # a radius of a guinier function 171 175 if self.radius <= 0: 172 raise ValueError("Rg expected positive value, but got %s"%self.radius) 176 msg = "Rg expected positive value, but got %s"%self.radius 177 raise ValueError(msg) 173 178 value = numpy.array([math.exp(-((self.radius * i)**2/3)) for i in x ]) 174 179 return self.scale * value … … 220 225 """ 221 226 p1 = numpy.array([self.dscale * math.pow(q, -self.power) for q in x]) 222 p2 = numpy.array([self.scale * self.power * math.pow(q, -self.power-1) * self.dpower for q in x]) 227 p2 = numpy.array([self.scale * self.power * math.pow(q, -self.power-1)\ 228 * self.dpower for q in x]) 223 229 diq2 = p1*p1 + p2*p2 224 230 return numpy.array( [math.sqrt(err) for err in diq2] ) … … 238 244 """ 239 245 if self.power <= 0: 240 raise ValueError("Power_law function expected positive power, but got %s"%self.power) 246 msg = "Power_law function expected positive power," 247 msg += " but got %s"%self.power 248 raise ValueError(msg) 241 249 if self.scale <= 0: 242 raise ValueError("scale expected positive value, but got %s"%self.scale) 250 msg = "scale expected positive value, but got %s"%self.scale 251 raise ValueError(msg) 243 252 244 253 value = numpy.array([ math.pow(i, -self.power) for i in x ]) … … 254 263 255 264 If a model is given, it will be used to linearize the data before 256 the extrapolation is performed. If None, a simple linear fit will be done. 265 the extrapolation is performed. If None, 266 a simple linear fit will be done. 257 267 258 268 :param data: data containing x and y such as y = ax + b … … 289 299 290 300 # Uncertainty 291 if type(self.data.dy)==numpy.ndarray and len(self.data.dy)==len(self.data.x): 301 if type(self.data.dy)==numpy.ndarray and \ 302 len(self.data.dy)==len(self.data.x): 292 303 sigma = self.data.dy 293 304 else: … … 299 310 # Linearize the data 300 311 if self.model is not None: 301 linearized_data = self.model.linearize_data(LoaderData1D(self.data.x[idx], 312 linearized_data = self.model.linearize_data(\ 313 LoaderData1D(self.data.x[idx], 302 314 fx[idx], 303 315 dy = sigma[idx])) 304 316 else: 305 317 linearized_data = LoaderData1D(self.data.x[idx], … … 315 327 316 328 317 deltas = linearized_data.x*a+numpy.ones(len(linearized_data.x))*b-linearized_data.y 329 deltas = linearized_data.x*a + \ 330 numpy.ones(len(linearized_data.x))*b-linearized_data.y 318 331 residuals = numpy.sum(deltas*deltas/sigma2) 319 332 … … 323 336 A = numpy.vstack([ linearized_data.x/linearized_data.dy, 324 337 1.0/linearized_data.dy]).T 325 (p, residuals, rank, s) = numpy.linalg.lstsq(A, linearized_data.y/linearized_data.dy) 338 (p, residuals, rank, s) = numpy.linalg.lstsq(A, 339 linearized_data.y/linearized_data.dy) 326 340 327 341 # Get the covariance matrix, defined as inv_cov = a_transposed * a … … 354 368 355 369 :param data: data must be of type DataLoader.Data1D 356 :param background: Background value. The data will be corrected before processing 357 :param scale: Scaling factor for I(q). The data will be corrected before processing 370 :param background: Background value. The data will be corrected 371 before processing 372 :param scale: Scaling factor for I(q). The data will be corrected 373 before processing 358 374 """ 359 375 # Background and scale should be private data member if the only way to … … 434 450 p, dp = extrapolator.fit(power=power, qmin=qmin, qmax=qmax) 435 451 436 return model.extract_model_parameters(constant=p[1], slope=p[0], dconstant=dp[1], dslope=dp[0]) 452 return model.extract_model_parameters(constant=p[1], slope=p[0], 453 dconstant=dp[1], dslope=dp[0]) 437 454 438 455 def _get_qstar(self, data): … … 459 476 if len(data.x) <= 1 or len(data.y) <= 1 or len(data.x)!= len(data.y): 460 477 msg = "Length x and y must be equal" 461 msg += " and greater than 1; got x=%s, y=%s"%(len(data.x), len(data.y)) 478 msg += " and greater than 1; got x=%s, y=%s"%(len(data.x), 479 len(data.y)) 462 480 raise ValueError, msg 463 481 else: … … 481 499 return sum 482 500 else: 483 #iterate between for element different from the first and the last 501 #iterate between for element different 502 #from the first and the last 484 503 for i in xrange(1, n-1): 485 504 dxi = (data.x[i+1] - data.x[i-1])/2 … … 534 553 return math.sqrt(sum) 535 554 else: 536 #iterate between for element different from the first and the last 555 #iterate between for element different 556 #from the first and the last 537 557 for i in xrange(1, n-1): 538 558 dxi = (data.x[i+1] - data.x[i-1])/2 … … 566 586 def get_extrapolation_power(self, range='high'): 567 587 """ 568 :return: the fitted power for power law function for a given extrapolation range 588 :return: the fitted power for power law function for a given 589 extrapolation range 569 590 """ 570 591 if range == 'low': … … 598 619 self._low_q_limit = qmin/10 599 620 600 data = self._get_extrapolated_data(model=self._low_extrapolation_function, 601 npts=INTEGRATION_NSTEPS, 602 q_start=self._low_q_limit, q_end=qmin) 621 data = self._get_extrapolated_data(\ 622 model=self._low_extrapolation_function, 623 npts=INTEGRATION_NSTEPS, 624 q_start=self._low_q_limit, q_end=qmin) 603 625 604 626 # Systematic error … … 606 628 # may not be a Guinier or simple power law. The following is a conservative 607 629 # estimation for the systematic error. 608 err = qmin*qmin*math.fabs((qmin-self._low_q_limit)*(data.y[0] - data.y[INTEGRATION_NSTEPS-1])) 630 err = qmin*qmin*math.fabs((qmin-self._low_q_limit)*\ 631 (data.y[0] - data.y[INTEGRATION_NSTEPS-1])) 609 632 return self._get_qstar(data), self._get_qstar_uncertainty(data)+err 610 633 … … 632 655 633 656 #create new Data1D to compute the invariant 634 data = self._get_extrapolated_data(model=self._high_extrapolation_function, 657 data = self._get_extrapolated_data(\ 658 model=self._high_extrapolation_function, 635 659 npts=INTEGRATION_NSTEPS, 636 660 q_start=qmax, q_end=Q_MAXIMUM) … … 647 671 invariant calculation. 648 672 649 :param npts_in: number of data points for which the extrapolated data overlap 673 :param npts_in: number of data points for which 674 the extrapolated data overlap 650 675 :param q_start: is the minimum value to uses for extrapolated data 651 676 :param npts: the number of points in the extrapolated distribution … … 663 688 return numpy.zeros(0), numpy.zeros(0) 664 689 665 return self._get_extrapolated_data(model=self._low_extrapolation_function, 690 return self._get_extrapolated_data(\ 691 model=self._low_extrapolation_function, 666 692 npts=npts, 667 693 q_start=q_start, q_end=q_end) … … 676 702 invariant calculation. 677 703 678 :param npts_in: number of data points for which the extrapolated data overlap 704 :param npts_in: number of data points for which the 705 extrapolated data overlap 679 706 :param q_end: is the maximum value to uses for extrapolated data 680 707 :param npts: the number of points in the extrapolated distribution … … 689 716 return numpy.zeros(0), numpy.zeros(0) 690 717 691 return self._get_extrapolated_data(model=self._high_extrapolation_function, 718 return self._get_extrapolated_data(\ 719 model=self._high_extrapolation_function, 692 720 npts=npts, 693 721 q_start=q_start, q_end=q_end) … … 699 727 700 728 :param range: a keyword set the type of extrapolation . type string 701 :param npts: the numbers of q points of data to consider for extrapolation 702 :param function: a keyword to select the function to use for extrapolation. 729 :param npts: the numbers of q points of data to consider 730 for extrapolation 731 :param function: a keyword to select the function to use 732 for extrapolation. 703 733 of type string. 704 734 :param power: an power to apply power_low function … … 710 740 function = function.lower() 711 741 if function not in ['power_law', 'guinier']: 712 raise ValueError, "Extrapolation function should be 'guinier' or 'power_law'" 742 msg = "Extrapolation function should be 'guinier' or 'power_law'" 743 raise ValueError, msg 713 744 714 745 if range == 'high': 715 746 if function != 'power_law': 716 raise ValueError, "Extrapolation only allows a power law at high Q" 747 msg = "Extrapolation only allows a power law at high Q" 748 raise ValueError, msg 717 749 self._high_extrapolation_npts = npts 718 750 self._high_extrapolation_power = power … … 735 767 :return q_star: invariant of the data within data's q range 736 768 737 :warning: When using setting data to Data1D , the user is responsible of 738 checking that the scale and the background are properly apply to the data 769 :warning: When using setting data to Data1D , 770 the user is responsible of 771 checking that the scale and the background are 772 properly apply to the data 739 773 740 774 """ … … 784 818 # Compute the volume 785 819 volume = self.get_volume_fraction(contrast, extrapolation) 786 return 2 * math.pi * volume *(1 - volume) * float(porod_const)/self._qstar 820 return 2 * math.pi * volume *(1 - volume) * \ 821 float(porod_const)/self._qstar 787 822 788 823 def get_volume_fraction(self, contrast, extrapolation=None): … … 818 853 819 854 if self._qstar <= 0: 820 raise RuntimeError, "Invalid invariant: Invariant Q* must be greater than zero" 855 msg = "Invalid invariant: Invariant Q* must be greater than zero" 856 raise RuntimeError, msg 821 857 822 858 # Compute intermediate constant … … 827 863 # Compute volume fraction 828 864 if discrim < 0: 829 raise RuntimeError, "Could not compute the volume fraction: negative discriminant" 865 msg = "Could not compute the volume fraction: negative discriminant" 866 raise RuntimeError, msg 830 867 elif discrim == 0: 831 868 return 1/2 … … 838 875 elif 0 <= volume2 and volume2 <= 1: 839 876 return volume2 840 raise RuntimeError, "Could not compute the volume fraction: inconsistent results" 877 msg = "Could not compute the volume fraction: inconsistent results" 878 raise RuntimeError, msg 841 879 842 880 def get_qstar_with_error(self, extrapolation=None): … … 882 920 uncertainty = -1 883 921 # Compute uncertainty 884 uncertainty = math.fabs((0.5 * 4 * k * self._qstar_err)/(2 * math.sqrt(1 - k * self._qstar))) 922 uncertainty = math.fabs((0.5 * 4 * k * \ 923 self._qstar_err)/(2 * math.sqrt(1 - k * self._qstar))) 885 924 886 925 return volume, uncertainty
Note: See TracChangeset
for help on using the changeset viewer.