Package helpers :: Module FilterTableCalculator
[hide private]

Source Code for Module helpers.FilterTableCalculator

  1  #! /usr/bin/env python 
  2  #------------------------------------------------------------------------------ 
  3  # $Id: FilterTableCalculator.py 9753 2013-03-14 10:47:27Z NicholasCross $ 
  4  """ 
  5     Calculates constants for the Filter table. Based on the filter curve 
  6     properties and already calibrated catalogues. These constants are the 50% 
  7     cut-on and cut-offs, the A/E(B-V) extinction values, the VegaToAB and 1s 
  8     magnitude limit in Vega. The output is written to a file, not the database, 
  9     because some of the numbers in the DB come from external sources and should 
 10     not be overwritten. 
 11   
 12     @author: N.J.G. Cross 
 13     @org:    WFAU, IfA, University of Edinburgh 
 14   
 15     @newfield contributors: Contributors, Contributors (Alphabetical Order) 
 16     @contributors: R.S. Collins 
 17  """ 
 18  #------------------------------------------------------------------------------ 
 19  import math 
 20  import numpy 
 21  import os 
 22   
 23  from   scipy                                import interpolate 
 24  from   Scientific.Functions import LeastSquares as LSQ 
 25   
 26  from   wsatools.CLI                 import CLI 
 27  import wsatools.DataFactory             as df 
 28  from   wsatools.DbConnect.DbSession import DbSession 
 29  import wsatools.DbConnect.DbConstants   as dbc 
 30  from   wsatools.Logger              import Logger 
 31  import wsatools.Statistics              as stats 
 32  #------------------------------------------------------------------------------ 
 33  try: 
 34      import biggles as big 
 35  except ImportError: 
 36      haveBig = False 
 37  else: 
 38      haveBig = True 
 39  #------------------------------------------------------------------------------ 
 40   
41 -class FilterCalculator(DbSession):
42 """ 43 Calculate filter parameters. 44 45 @todo: make more sophisticated. Transmission curves read from FITS files 46 with instructions on how to use encapsulated in header values. 47 """ 48 wlRes = 4. # Resolution=4A. 49 initLamb, finalLamb = 1500, 25000 # UV-NIR 50 51 filterList = None 52 maxNoDets = 50000 # Maximum number of detections in a single detector 53 leffAebvFit = numpy.poly1d(numpy.array([3.70500678e-18, -3.17077496e-13, 54 1.20646461e-08, -3.75441111e-05, 55 1.89276578e-01])) 56
57 - def calculatePars(self):
58 """ Set up programme database settings and/or schema. 59 """ 60 sysc = self.sysc 61 self.directory = sysc.filterDataDirPath() 62 63 self.refEllGalSpec = os.path.join(sysc.filterRefDataDirPath(), 64 'mean_ell_spec.dat') 65 self.acsBroadVfilt = os.path.join(sysc.filterRefDataDirPath(), 66 'pcf555wsys.txt') 67 self.vegaReference = os.path.join(sysc.filterRefDataDirPath(), 68 'Vega_reference.sed') 69 # Read in reference data 70 self.ellGalData = self.rebinFilterData(self.readInFilterFile( 71 self.refEllGalSpec, [0, 2], lambFact=1., transFact=1.)) 72 self.fillMissingEllData() 73 self.acsVData = self.rebinFilterData(self.readInFilterFile( 74 self.acsBroadVfilt, [0, 1], lambFact=1., transFact=1.)) 75 self.vegaRef = self.rebinFilterData(self.readInFilterFile( 76 self.vegaReference, [0, 1], lambFact=1., transFact=1.)) 77 if self.isTrialRun and haveBig: 78 p = big.FramedPlot() 79 p.title = "Ell gal curve" 80 p.xlabel = r"$\lambda$" 81 p.ylabel = r"$trans$" 82 p.xrange = self.initLamb, self.finalLamb 83 p.add(big.Curve(self.ellGalData[:, 0], self.ellGalData[:, 1], color="blue")) 84 p.show() 85 86 p = big.FramedPlot() 87 p.title = "Broad V curve" 88 p.xlabel = r"$\lambda$" 89 p.ylabel = r"$trans$" 90 91 p.add(big.Curve(self.acsVData[:, 0], self.acsVData[:, 1], color="blue")) 92 p.show() 93 94 filterTable = df.Table("Filter", self) 95 96 # read in files 97 98 instrument = "%s%s" % (self.sysc.telescopeName.lower(), 99 self.sysc.instrumentName.capitalize()) 100 101 if self.filterList: 102 nonDefFilters = [] 103 for filterID in filterTable.getAll("filterID"): 104 filterTable.setCurRow(filterID=filterID) 105 if filterTable.getAttr("shortName").lower() in self.filterList: 106 nonDefFilters.append(filterID) 107 else: 108 nonDefFilters = [filterID for filterID in 109 filterTable.getAll("filterID") if filterID > 0] 110 outputRows = ["# filterID, cutOn, cutOff, aebv, vegaToAB, oneSecMlVg\n"] 111 for filterID in nonDefFilters: 112 self.waveCuts = (dbc.realDefault(), dbc.realDefault()) 113 aebv = dbc.realDefault() 114 vegaToAB = dbc.realDefault() 115 filterTable.setCurRow(filterID=filterID) 116 if filterTable.getAttr("shortName") == 'blnk': continue 117 filePath = os.path.join(self.directory, '%s_%d_%s.txt' % 118 (instrument, filterID, 119 filterTable.getAttr("shortName").lower())) 120 if os.path.exists(filePath): 121 Logger.addMessage("Reading filter transmission data from %s" 122 % filePath) 123 filterData = self.readInFilterFile(filePath) 124 125 # Calc from filter transmissity tables 126 # But actual cutoffs different due to operating temperature and angle. 127 self.waveCuts = getCuts(filterData) 128 # Use these to adjust filter function 129 130 filterData = self.recalWavelength(filterTable, filterData) 131 #@@TODO convert transmission to full QE 132 #filterData=self.getQE(filterData) 133 aebv = self.getAebv(filterData) 134 vegaToAB = self.getVegaToAB(filterData) 135 else: 136 Logger.addMessage("No filter transmission data for filter %s" 137 % filterTable.getAttr("shortName")) 138 Logger.addMessage("Will estimate A/E(B-V) from wavelength " 139 "cuts") 140 cutOn = filterTable.getAttr("cutOn") 141 cutOff = filterTable.getAttr("cutOff") 142 self.waveCuts = (cutOn, cutOff) 143 leff = 10000.*(0.5 * (cutOn + cutOff)) 144 aebv = 1. / self.leffAebvFit(leff) 145 Logger.addMessage("Calculating one-second magnitude limit for filter %s..." % filterID) 146 oneSecMlVg = self.getOneSecMlVg(filterID) 147 Logger.addMessage("Calculated one-second magnitude limit for filter %s" % filterID) 148 149 outputRows.append("%2d\t%5.4f\t%5.4f\t%5.4f\t%4.3f\t%6.3f\n" % 150 (filterID, self.waveCuts[0], self.waveCuts[1], 151 aebv, vegaToAB, oneSecMlVg)) 152 file(self.outputFile, "w").writelines(outputRows) 153 Logger.addMessage("Written outputs to %s" % self.outputFile)
154
155 - def readInFilterFile(self, filePath, columns=[0, 1], lambFact=None, 156 transFact=None):
157 """Read in data from file. Convert to correct units Angs and trans 158 rebin to correct 159 """ 160 data = [] 161 lines = file(filePath).readlines() 162 if (not lambFact or not transFact): 163 lambFact = 1. 164 transFact = 1. 165 for line in lines: 166 if line[0] == '#': 167 if line[1:5] == "Wlen": 168 lambdaUnit = line.split('(')[columns[0] + 1].split(')')[0] 169 transUnit = line.split('(')[columns[1] + 1].split(')')[0] 170 if lambdaUnit == 'nm': 171 lambFact = 10 172 if transUnit == '%': 173 transFact = 0.01 174 for line in lines: 175 if line[0] != '#': 176 wrds = line.split() 177 if len(wrds) >= 2: 178 data.append((lambFact * float(wrds[columns[0]]), 179 transFact * float(wrds[columns[1]]))) 180 return numpy.array(sorted(data))
181
182 - def rebinFilterData(self, filterData):
183 """ 184 This takes a data set with constant or varying stepsize and rebins 185 it to the a constant given stepsize. 186 187 @todo: smoothing, lsq 188 189 """ 190 newLamb = numpy.arange((self.initLamb + 0.5 * self.wlRes), 191 (self.finalLamb + 0.5 * self.wlRes), self.wlRes) 192 newData = numpy.zeros([newLamb.size, 2]) 193 newData[:, 0] = newLamb 194 # missing data 195 196 # typical resolution 197 wlData = filterData[:, 0] 198 for ii, lamb in enumerate(newLamb): 199 if lamb > max(wlData) or lamb < min(wlData): 200 indices = [] 201 else: 202 minOff = numpy.min(numpy.abs(numpy.subtract(wlData, lamb))) 203 index = numpy.nonzero(numpy.equal( 204 numpy.abs(numpy.subtract(wlData, lamb)), minOff))[0][0] 205 206 # Now interpolate between 3 closest values 207 indices = [index - 1, index, index + 1] 208 if min(indices) < 0: indices = [(idx + 1) for idx in indices] 209 if max(indices) >= filterData.shape[0]: 210 indices = [(idx - 1) for idx in indices] 211 212 if (len(indices) >= 2): 213 xx = filterData[numpy.array(indices), 0] 214 yy = filterData[numpy.array(indices), 1] 215 # @@TODO Use linalg to do lstsq fit?? 216 fit = interpolate.interp1d(numpy.reshape(xx, [1, xx.size])[0], 217 numpy.reshape(yy, [1, yy.size])[0]) 218 newData[ii][1] = fit(lamb) 219 else: 220 newData[ii][1] = 0. 221 return(newData)
222
223 - def recalWavelength(self, filterTable, filterData):
224 """ 225 """ 226 cutOn = filterTable.getAttr("cutOn") 227 cutOff = filterTable.getAttr("cutOff") 228 # Find scaling relation 229 scale = (cutOff - cutOn) / (self.waveCuts[1] - self.waveCuts[0]) 230 offset = 10000.*(cutOn - scale * self.waveCuts[0]) 231 filterData[:, 0] = numpy.add(numpy.multiply(filterData[:, 0], scale), offset) 232 return self.rebinFilterData(filterData)
233
234 - def getAebv(self, filterData):
235 """ 236 """ 237 deltaMv = 0.001 # check this 238 cAAv = calcA_AV( 239 numpy.multiply(0.0001, filterData[:, 0])) 240 dustFac = numpy.power(10, numpy.multiply(cAAv, -0.4 * deltaMv)) 241 Afilter = self.calcDeltaM(filterData, dustFac) 242 Av = self.calcDeltaM(self.acsVData, dustFac) 243 return 3.252 * float(Afilter) / float(Av)
244
245 - def calcDeltaM(self, filterData, dustFac):
246 """ 247 """ 248 denom = numpy.multiply(numpy.multiply( 249 self.wlRes, self.ellGalData[:, 1]), filterData[:, 1]) 250 numer = numpy.multiply(denom, dustFac) 251 if self.isTrialRun and haveBig: 252 p = big.FramedPlot() 253 p.title = "filter trans curve" 254 p.xlabel = r"$\lambda$" 255 p.ylabel = r"$trans$" 256 p.xrange = 0.9 * 10000 * self.waveCuts[0], 1.1 * 10000 * self.waveCuts[1] 257 maxVal = max(numpy.max(denom), numpy.max(self.ellGalData[:, 1]), numpy.max(numer)) 258 maxVal2 = numpy.max(filterData[:, 1]) 259 p.add(big.Curve(filterData[:, 0], numpy.divide(numpy.multiply(filterData[:, 1], maxVal), maxVal2), color="red")) 260 p.add(big.Curve(filterData[:, 0], self.ellGalData[:, 1], color="blue")) 261 p.add(big.Curve(filterData[:, 0], numpy.multiply(denom, 0.9), color="green")) 262 p.add(big.Curve(filterData[:, 0], numpy.multiply(numer, 0.8), color="black")) 263 p.add(big.Curve(filterData[:, 0], numpy.multiply(numpy.multiply(dustFac, 0.7), maxVal), color="cyan")) 264 p.show() 265 266 return numpy.log10(numpy.divide(numpy.sum(numer), numpy.sum(denom)))
267
268 - def fillMissingEllData(self):
269 """ Fill in any zeros in ellip galaxy data 270 """ 271 goodData = numpy.nonzero(self.ellGalData[:, 1]) 272 lhsIndex = goodData[0][0] 273 rhsIndex = goodData[0][goodData[0].size - 1] 274 ratioLHS = float(self.ellGalData[lhsIndex, 1]) / float( 275 self.ellGalData[lhsIndex, 0]) 276 ratioRHS = float(self.ellGalData[rhsIndex, 1]) / float( 277 self.ellGalData[rhsIndex, 0]) 278 279 mask1 = numpy.less(self.ellGalData[:, 0], 280 self.ellGalData[lhsIndex, 0]) 281 mask2 = numpy.greater(self.ellGalData[:, 0], 282 self.ellGalData[rhsIndex, 0]) 283 self.ellGalData[:, 1] = numpy.add(self.ellGalData[:, 1], numpy.multiply( 284 mask1, numpy.multiply(self.ellGalData[:, 0], ratioLHS))) 285 self.ellGalData[:, 1] = numpy.add(self.ellGalData[:, 1], numpy.multiply( 286 mask2, numpy.multiply(self.ellGalData[:, 0], ratioRHS)))
287
288 - def getVegaToAB(self, filterData):
289 """Calculate vegaToAB correction, Based on method from Benitez 290 """ 291 clightAHz = 2.99792458e18 292 # Operations necessary for normalization and detector effects 293 filterData[:, 1] = numpy.multiply(filterData[:, 1], filterData[:, 0]) 294 #normalTrans=numpy.trapz(filterData[:,1],filterData[:,0]) 295 296 const = 1. / (numpy.trapz(numpy.divide(filterData[:, 1], numpy.multiply( 297 filterData[:, 0], filterData[:, 0])), filterData[:, 0]) * clightAHz) 298 #print (numpy.trapz(numpy.divide(filterData[:,1],numpy.multiply( 299 # filterData[:,0],filterData[:,0])),filterData[:,0]),const) 300 301 return -2.5 * math.log10(numpy.trapz( 302 numpy.multiply(self.vegaRef[:, 1], filterData[:, 1]), 303 filterData[:, 0]) * const) - 48.60
304 305 #-------------------------------------------------------------------------- 306
307 - def getOneSecMlVg(self, filterID):
308 """Select short exposure stacks in DB and measure mag limit 309 for low airmass, low seeing, low back stacks. 310 """ 311 # Find typical values of seeing, sky brightness for low airmass, 312 # low totalExpTime. 313 nObsLimit = 50 314 progTable = df.ProgrammeTable(self) 315 # @TODO: Set up for non-UKIDSS filters... 316 limitValues = self.query( 317 selectStr="totalExpTime,min(xPixSize*seeing),avg(xPixSize*seeing)," 318 "max(xPixSize*seeing),COUNT(*)", 319 fromStr="CurrentAstrometry as ca,MultiframeDetector as md," 320 "Multiframe as m,ProgrammeFrame as p", 321 whereStr="md.multiframeID=ca.multiframeID and md.extNum=ca.extNum " 322 "and md.multiframeID=m.multiframeID and md.filterID=%d and " 323 "m.deprecated=0 and md.deprecated=0 and m.frameType like " 324 "'%%stack' and amStart<1.3 and amEnd<1.3 and abs(b)>30 " 325 "and tableRows<%d and seeing>0. and p.multiframeID=m.multiframeID " 326 " and p.programmeID in (101,102,103,104,105) " 327 "group by totalExpTime " 328 "order by totalExpTime" % (filterID, self.maxNoDets)) 329 330 331 if not limitValues: 332 # Use programmes which have been QCed - ProgrammeBuilder has run 333 limitValues = self.query( 334 selectStr="totalExpTime,min(xPixSize*seeing)," 335 "avg(xPixSize*seeing),max(xPixSize*seeing),COUNT(*)", 336 fromStr="CurrentAstrometry as ca,MultiframeDetector as md," 337 "Multiframe as m,ProgrammeFrame as p,Programme as pg", 338 whereStr="md.multiframeID=ca.multiframeID and md.extNum=ca.extNum " 339 "and md.multiframeID=m.multiframeID and md.filterID=%d and " 340 "m.deprecated=0 and md.deprecated=0 and m.frameType like " 341 "'%%stack' and amStart<1.3 and amEnd<1.3 " 342 "and tableRows<%d and seeing>0. and p.multiframeID=m.multiframeID " 343 " and p.programmeID=pg.programmeID and pg.sourceTable!='NONE' " 344 "group by totalExpTime " 345 "order by totalExpTime" % (filterID, self.maxNoDets)) 346 347 # Select lowest limitValues>20 observations. If none, then combine 348 values = None 349 total = 0 350 tempValues = None 351 for tt, mnS, see, mxS, numb in limitValues: 352 if numb >= nObsLimit: 353 #range 354 mnSee = see - 0.3 * (see - mnS) 355 mxSee = see + 0.3 * (mxS - see) 356 values = [tt, mnSee, mxSee] 357 break 358 else: 359 total += numb 360 if total >= nObsLimit: 361 mnSee = see - 0.3 * (see - mnS) 362 mxSee = see + 0.3 * (mxS - see) 363 tempValues = [tt, mnSee, mxSee] 364 365 sel = '=' 366 if not values: 367 values = tempValues 368 sel = '<=' 369 magLimList = [] 370 if values: 371 testObs = self.query( 372 selectStr="top %d p.programmeID,md.multiframeID,md.extNum," 373 "md.totalExpTime" % nObsLimit, 374 fromStr="CurrentAstrometry as ca,MultiframeDetector as md," 375 "ProgrammeFrame as p,Multiframe as m", 376 whereStr="md.multiframeID=ca.multiframeID and md.extNum=" 377 "ca.extNum and md.multiframeID=p.multiframeID and " 378 "m.filterID=%d and md.deprecated=0 and (ca.xPixSize*seeing)>=" 379 "%s and (ca.xPixSize*seeing)<=%s and totalExpTime%s%s and " 380 "m.multiframeID=md.multiframeID and m.deprecated=0 and " 381 "m.frameType like '%%stack' and amStart<1.3 and amEnd<1.3 " 382 "and tableRows<%d order by seeing desc" % 383 (filterID, values[1], values[2], sel, values[0], self.maxNoDets)) 384 for progID, mfID, eNum, totExpTime in testObs: 385 progTable.setCurRow(programmeID=progID) 386 # select mag,magErr 387 mags = numpy.array(self.query( 388 selectStr="aperMag3,aperMag3Err", 389 fromStr="%s as p,%s as d" % 390 (progTable.getPhotometryTable(), 391 progTable.getDetectionTable()), 392 whereStr="p.multiframeID=d.multiframeID and " 393 "p.extNum=d.extNum and p.seqNum=d.seqNum and " 394 "p.multiframeID=%d and p.extNum=%d " 395 "and d.ppErrBits=0 and aperMag3>0 and aperMag3Err>0 " 396 "and class=-1 order by aperMag3" % (mfID, eNum))) 397 if mags.size > 0: 398 magLim = getRobustLimit(mags) 399 if magLim: 400 magLimList.append(magLim - 1.25 * math.log10(totExpTime)) 401 402 return stats.clippedMADHistogram(magLimList)[0]
403 404
405 -def getCuts(filterData, trVal=0.5):
406 """ 407 """ 408 angsToMicrons = 0.0001 # Convert from angstroms to microns 409 # Find two regions where tr~trVal 410 cutOn = dbc.realDefault() 411 cutOff = dbc.realDefault() 412 for idx in range(filterData.shape[0] - 1): 413 #print (filterData[idx][1],filterData[idx+1][1],trVal) 414 if (filterData[idx][1] < trVal and filterData[idx + 1][1] > trVal): 415 # rising with increasing lamb 416 polyFit = getFit(idx, filterData) 417 polyFit[len(polyFit) - 1] -= trVal 418 roots = numpy.roots(polyFit) 419 for root in roots: 420 if (root > filterData[idx - 1][0] and root < filterData[idx + 2][0]): 421 cutOn = angsToMicrons * root 422 if (filterData[idx][1] > trVal and filterData[idx + 1][1] < trVal): 423 # rising with increasing lamb 424 polyFit = getFit(idx, filterData) 425 polyFit[len(polyFit) - 1] -= trVal 426 roots = numpy.roots(polyFit) 427 for root in roots: 428 if (root > filterData[idx - 1][0] and root < filterData[idx + 2][0]): 429 cutOff = angsToMicrons * root 430 return (cutOn, cutOff)
431
432 -def getFit(idx, filterData):
433 indices = [idx - 2, idx - 1, idx, idx + 1, idx + 2] 434 if min(indices) < 0: indices = [(idx - indices[0]) for idx in indices] 435 if max(indices) >= len(filterData): 436 indices = [(idx - indices[len(indices) - 1]) for idx in indices] 437 idxArray = numpy.array(indices) 438 xx = filterData[idxArray][:, 0] 439 yy = filterData[idxArray][:, 1] 440 return numpy.polyfit(xx, yy, 2)
441
442 -def calcA_AV(lambArray):
443 """lamb is wavelength in microns 444 445 """ 446 # @@TODO speed up using numpy 447 Rv = 3.1 448 param1 = [1., 0.17699, -0.50447, -0.02427, 0.72085, 0.01979, -0.77530, 0.32999] 449 param2 = [0., 1.41338, 2.28305, 1.07233, -5.38434, -0.62551, 5.30260, -2.09002] 450 p1 = numpy.poly1d([param1[len(param1) - 1 - ii] for ii in range(len(param1))]) 451 p2 = numpy.poly1d([param2[len(param2) - 1 - ii] for ii in range(len(param2))]) 452 p3 = numpy.poly1d([-0.009779, -0.04473, 0., 0.]) 453 p4 = numpy.poly1d([0.1207, 0.2130, 0., 0.]) 454 455 456 invLamb = numpy.divide(1., lambArray) 457 aArray = numpy.zeros(lambArray.shape) 458 bArray = numpy.zeros(lambArray.shape) 459 mask1 = numpy.logical_and(numpy.greater_equal(invLamb, 0.3), 460 numpy.less(invLamb, 1.1)) 461 mask2 = numpy.logical_and(numpy.greater_equal(invLamb, 1.1), 462 numpy.less(invLamb, 3.3)) 463 mask3 = numpy.logical_and(numpy.greater_equal(invLamb, 3.3), 464 numpy.less(invLamb, 5.9)) 465 mask4 = numpy.logical_and(numpy.greater_equal(invLamb, 5.9), 466 numpy.less_equal(invLamb, 8.)) 467 # NIR - CCM 468 aArray = numpy.add(aArray, numpy.multiply(mask1, numpy.multiply( 469 0.574, numpy.power(invLamb, 1.61)))) 470 bArray = numpy.add(bArray, numpy.multiply(mask1, numpy.multiply( 471 -0.527, numpy.power(invLamb, 1.61)))) 472 # Optical - use O'Donnell 473 aArray = numpy.add(aArray, numpy.multiply(mask2, p1(numpy.subtract( 474 invLamb, 1.82)))) 475 bArray = numpy.add(bArray, numpy.multiply(mask2, p2(numpy.subtract( 476 invLamb, 1.82)))) 477 # UV - CCM 478 cArray = numpy.add(1.752, numpy.add(numpy.multiply( 479 invLamb, -0.316), numpy.divide(-0.104, numpy.add(0.341, numpy.power( 480 numpy.subtract(invLamb, 4.67), 2.))))) 481 dArray = numpy.add(-3.090, numpy.add(numpy.multiply( 482 invLamb, 1.852), numpy.divide(1.206, numpy.add(0.263, numpy.power( 483 numpy.subtract(invLamb, 4.62), 2.))))) 484 aArray = numpy.add(aArray, numpy.multiply(mask3, numpy.add(cArray, p3( 485 numpy.subtract(invLamb, 5.9))))) 486 bArray = numpy.add(bArray, numpy.multiply(mask3, numpy.add(dArray, p4( 487 numpy.subtract(invLamb, 5.9))))) 488 aArray = numpy.add(aArray, numpy.multiply(mask4, cArray)) 489 bArray = numpy.add(bArray, numpy.multiply(mask4, dArray)) 490 return numpy.add(aArray, numpy.divide(bArray, Rv))
491
492 -def getRobustLimit(magnitudeData):
493 """Find best fit function. 494 """ 495 # Use bins with fixed number of objects - not fixed width 496 rmsLimit = 0.217 497 noObjsBin = 10 498 nBins = max(10, (magnitudeData.shape[0] / noObjsBin)) 499 nObjsBin = int(math.floor(magnitudeData.shape[0] / float(nBins))) 500 data = [] 501 magList = [] 502 mgErrList = [] 503 504 for bb in range(nBins): 505 mnInd = bb * nObjsBin 506 mxInd = min(((bb + 1) * nObjsBin), (magnitudeData.shape[0] - 1)) 507 mnMag = magnitudeData[mnInd, 0] 508 mxMag = magnitudeData[mxInd, 0] 509 avMag = numpy.mean(magnitudeData[mnInd:mxInd, 0]) 510 511 mgErrs = magnitudeData[numpy.nonzero(numpy.logical_and( 512 numpy.greater(magnitudeData[:, 0], mnMag), 513 numpy.less_equal(magnitudeData[:, 0], mxMag)))[0], 1] 514 515 if mgErrs.size > 5: 516 meanErr, sdErr = stats.clippedMADHistogram(mgErrs) 517 else: 518 meanErr, sdErr = dbc.realDefault(), dbc.realDefault() 519 if sdErr > 0. and meanErr > 0.1 * rmsLimit and meanErr < 10.*rmsLimit: 520 data.append((float(avMag), float(meanErr), float(sdErr))) 521 #errs.append(sdErr) 522 magList.append(avMag) 523 mgErrList.append(meanErr) 524 params = [50., -5., 0.1] 525 mLimit = [] 526 if len(data) > 3: 527 fit = LSQ.polynomialLeastSquaresFit(params, data) 528 529 #polyFit=numpy.polyfit(numpy.array(magList),numpy.array(mgErrList),2) 530 #polyFit[len(polyFit)-1]-=rmsLimit 531 532 fit[0][0] -= rmsLimit 533 polyFit = [fit[0][len(fit[0]) - 1 - pp] for pp in range(len(fit[0]))] 534 roots = numpy.roots(polyFit) 535 magListRange = max(magList) - min(magList) 536 centre = 0.5 * (max(magList) + min(magList)) 537 for root in roots: 538 # Find one which is sensible 539 if root > centre - 0.75 * magListRange and root < centre + 0.75 * magListRange: 540 mLimit.append(root) 541 542 if len(mLimit) >= 1: 543 return mLimit[0]
544 545 #------------------------------------------------------------------------------ 546 # Entry point for script. 547 548 # Allow module to be imported as well as executed from the command line 549 if __name__ == "__main__": 550 CLI.progArgs.append(CLI.Argument("outputFile", 551 "OutputFile.dat")) 552 CLI.progOpts += [ 553 CLI.Option('f', "filterFileDir", 554 "Directory of filter files", "FILE",), 555 CLI.Option('b', "filters", "Comma separated list of filters to be used", 556 "LIST")] 557 558 cli = CLI("FilterTableCalculator", "$Revision: 9753 $", __doc__) 559 Logger.isVerbose = False 560 Logger.addMessage(cli.getProgDetails()) 561 cu = FilterCalculator(cli=cli) 562 cu.outputFile = cli.getArg("outputFile") 563 if cli.getOpt("filterFileDir"): 564 cu.directory = cli.getOpt("filterFileDir") 565 if cli.getOpt("filters"): 566 cu.filterList = cli.getOpt("filters").lower().split(",") 567 568 cu.calculatePars() 569 570 #----------------------------------------------------------------------------- 571 # Change log: 572 # 573 # 28-Oct-2009, NJC: Original version. 574