Package invocations :: Package cu9 :: Module cu9
[hide private]

Source Code for Module invocations.cu9.cu9

  1  #! /usr/bin/env python 
  2  #------------------------------------------------------------------------------ 
  3  #$Id: cu9.py 9470 2012-10-29 12:07:58Z NicholasCross $ 
  4  """ 
  5     Invoke CU9. Produce list-driven intra-WFCAM remeasurements for a given 
  6     programme. 
  7   
  8     @TODO: Update to newer CU7 style way of working (e.g. enable ingest, 
  9            logging, dropping indicies, updating tables, tiding up files etc.) 
 10   
 11     @author: N.J.G. Cross 
 12     @org:    WFAU, IfA, University of Edinburgh 
 13   
 14     @newfield contributors: Contributors, Contributors (Alphabetical Order) 
 15     @contributors: R.S. Collins 
 16  """ 
 17  #------------------------------------------------------------------------------ 
 18  from __future__      import division, print_function 
 19   
 20  import os 
 21  import time 
 22  from   collections import defaultdict 
 23  import dircache 
 24  import numpy 
 25  import pyfits 
 26  from pyfits import Column 
 27   
 28  from   wsatools.CLI                 import CLI 
 29  import wsatools.DataFactory             as df 
 30  import wsatools.DbConnect.CommonQueries as queries 
 31  from   wsatools.DbConnect.CuSession import CuSession 
 32  from   wsatools.DbConnect.DbSession import Ingester 
 33  import wsatools.DbConnect.Schema        as schema 
 34  import wsatools.ExternalProcess         as extp 
 35  import wsatools.FitsUtils               as fits 
 36  from   wsatools.Logger              import Logger 
 37  import wsatools.Utilities               as utils 
 38  import wsatools.WfcamsrcInterface       as wfcamsrc 
 39  from   wsatools.File                   import File 
 40   
 41  #------------------------------------------------------------------------------ 
 42   
43 -class Cu9(CuSession):
44 """ Produce list-driven remeasurements (data with a survey) 45 for a given programme. 46 """ 47 # Define class constants (access as Cu9.varName) 48 cuNum = 9 49 #: Maximum amount of memory that CU9 should consume in kilobytes. 50 memoryLimit = 0.5 * utils.getSystemMemoryKb() 51 #: CASU software tool executable to produce lists 52 casuToolName = "cir_imcore_list" 53 54 #-------------------------------------------------------------------------- 55 # Define public member variable default values (access as obj.varName) 56 # these need to be set from command-line options 57 58 dateRange = CuSession.sysc.obsCal.dateRange() 59 """ Range of observation dates to include. 60 """ 61 dateString = time.strftime("%Y%m%d", time.gmtime()) 62 """ Date string for output dir date. 63 """ 64 internalOnly = False 65 """ Only run internal list-driven photometry programmes. 66 """ 67 runSynop = False 68 """ Run list-driven photometry on each individual stack? Just those that 69 contribute t---++ Modifications to WSA non-survey and VSA schema files 70 71 Modify the relevant Template, see <nop>SystemConstants for the 72 <nop>autoTemplateSchema or <nop>autoTemplateNeighbourSchema. Consult 73 AutoTemplateSchema to comprehend the templates and create these using 74 =src/curation/helpers/SetupProgramme.py= or the template parsing part 75 of CU21 that is described in NonSurveyCuration#SchemaUpdate. o the 76 MergeLog table. 77 """ 78 79 oneOnly = False 80 """ Use one FrameGroup only for testing 81 """ 82 83 skipCats = False 84 """ Skip catalogue creation and move directly onto ingest (Testing only) 85 """ 86 87 isTrialRun = False 88 #-------------------------------------------------------------------------- 89
90 - def _onRun(self):
91 """ 92 Invoke list-driven photometry for a specified programme in the WFCAM 93 Science Archive. The technique produces consistent photometry across 94 all WFCAM filters for the image data within a given field. 95 96 Method: for the given programme ID, the procedure is: i) establish the 97 list of fields to be analysed; ii) remove any previous remeasurements 98 based on deprecated or updated frame sets; 99 iii) 100 101 @TODO: Break up into functions. 102 @TODO: First just work on MergeLog - forget multi-epoch. 103 @TODO: A lot of work is needed on the schema - the tags etc. 104 @TODO: Run imcore_list detector by detector for increased efficiency? 105 @TODO: TILES -- do not have filtered tiles for OB level data. 106 Have to reproduce!! or run list-driven on pawprints and 107 calculate weighted mean (may also work on overlaps, 108 but seeing issues) 109 @TODO: Run list-driven on pawprints for all tiles. 110 @TODO: Luptitudes instead of magnitudes 111 @TODO: Run from J-band list, not main Source table - option in survey requirements 112 @TODO: Update RequiredListDrivenProduct - like RequiredNeighbours? 113 @TODO: Add column for filtered tiles into Multiframe (for VISTA). 114 @TPDP: Add in option for input list... 115 """ 116 # What type of processing is necessary if any 117 # Check requirements. 118 # Select internal ones to start. 119 # @TODO: VIKING - still select from Source table! 120 listDrivReq = self.archive.query( 121 "tableID,filterID,transFile,extractor", 122 "RequiredListDrivenProduct", 123 "programmeID=%s and surveyID<0" % self.programmeID) 124 if len(listDrivReq) > 1: 125 # @TODO: Raise exception 126 raise Exception 127 tableInfo = self.archive.query( 128 "tableName,sourceIDName", 129 "ProgrammeTable", 130 "programmeID=%s AND tableID=%s" % (self.programmeID, 131 listDrivReq[0].tableID), 132 firstOnly=True) 133 # select frameSets that haven't already been fully processed 134 self.passbands = df.PassbandList(self.programme).passbands 135 spaceRequired = (len(self.passbands) * self.archive.queryNumRows( 136 self.programme.getSourceTable()) * self.archive.queryRowSize( 137 self.programme.getListRemeasTable())) 138 self.setUpWorkingDirs(spaceRequired) 139 self.prodType = self.programme.getAttr('sourceProdType') 140 self.procDateSet = set() 141 self._prefix = ''.join(["cu", "%02d" % (self.cuNum), "id", 142 str(self.cuEventID), 143 '_', os.getenv('HOST'), '_', 144 self.archive.database]) 145 filePidPathName = fits.createFileListDict(self.programme)[self.prodType] 146 147 Logger.addMessage("Will write list-driven catalogues to %s" 148 % self.outputDirPath) 149 150 whereString = ''.join("listproc%s=0 or " % pband 151 for pband in self.passbands) 152 153 frameSets = self.archive.query( 154 selectStr="frameSetID", 155 fromStr=self.programme.getMergeLogTable(), 156 whereStr=whereString[:-4]) 157 158 # Now find frameGroups - frameSets which include the same files 159 # If pawprints - multiple framesets access the same multiframes 160 frameGroups = queries.getFrameSetGroups(self.programme, frameSets) 161 162 frameGroups = [frameGroups[0]] if self.oneOnly else frameGroups 163 Logger.addMessage("Produced list of %d frameSet groups to process" 164 % len(frameGroups)) 165 166 # Processed list 167 procLDCatList = [] 168 progSchema = schema.parseTables(self.programme.getSchemaScript()) 169 ingester = Ingester(self.archive, progSchema, self.shareFileID) 170 for ii, frmGroup in enumerate(frameGroups): 171 Logger.addMessage("Getting multiframes for frameGroup %d" % (ii + 1)) 172 # Select multiframes to work on. 173 mfList = self.getMultiframeList(frmGroup) 174 # Get size of frames - decide what type 175 isPwprint = getIsPPT(mfList) 176 mxImgSizeDeg = max([(max(mfl.imgSizeX, mfl.imgSizeY) * 177 self.sysc.instrumentPixScale / 178 (3600.*mfl.micro)) for mfl in mfList]) 179 180 mxDithSizeDeg = max([(max((mfl.imgSizeX - mfl.micro * mfl.detX), 181 (mfl.imgSizeY - mfl.micro * mfl.detY)) * 182 self.sysc.instrumentPixScale / 183 (3600.*mfl.micro)) for mfl in mfList]) 184 if isPwprint: 185 # If pwprint - use field of view 186 basicSize = 0.5 * max(self.sysc.fieldOfView) 187 # Add in dithering offset. 188 189 radiusDeg = basicSize + mxDithSizeDeg 190 else: 191 # If tile or mosaic - just use size 192 193 radiusDeg = 0.5 * mxImgSizeDeg 194 # Add in matching tolerance and an extra 10% 195 # Maybe smaller? 196 tolerance = self.getTolerance(frmGroup) 197 radiusDeg = 1.1 * (radiusDeg + tolerance) 198 centre = (mfList[0].ra, mfList[0].dec) 199 # Get sources 200 sourceList = self.getSources(centre, radiusDeg) 201 Logger.addMessage("Number of sources is %d" % len(sourceList)) 202 file(self.tempSourceFileName, 'w').writelines( 203 ["%s %s\n" % (src.ra, src.dec) for src in sourceList]) 204 Logger.addMessage("Selected source list for this frameSet group") 205 206 207 # Run CASU list-driven code on frames 208 for mfl in mfList: 209 self.procDateSet.add(fits.getDate(mfl.fileName)) 210 # @TODO: check if this is what I want. 211 listDrivCatName = self.runLDExtractor(mfl, sourceList) 212 213 214 215 216 # hdulist = fits.open(listDrivCatName) 217 # Add to list of processed file 218 procLDCatList.append("%s %s\n" % 219 (fits.stripServer(mfl.fileName), 220 listDrivCatName)) 221 222 # @TODO: sourceIDs? 223 224 # remove temporary source list 225 os.remove(self.tempSourceFileName) 226 file(filePidPathName, "w").writelines(procLDCatList) 227 # Now ingest list-driven FITS files 228 # Don't have to worry about clashing with daily ingests 229 # CU4 type process, but slightly different - use same C++ code 230 # @TODO: Do we want an objectID? 231 #nextObjID = self.archive.queryAttrMax( 232 # "objID", self.programme.getListRemeasTable()) or 0 233 234 # Only one table needed..... 235 # create schema list 236 self.createSchemaList() 237 238 # set table variables that will be passed on to the ingester 239 self.setTableVariables() 240 241 # make sure there are no old .dat files in the dbSharePath 242 self.cleanUpFileShare() 243 244 # create the illumination table list 245 # @TODO: Is this working -- take this out of CU4? 246 self.createIlluminationTableList() 247 248 # print(self.isTrialRun, os.path.getsize(filePidPathName)) 249 250 if not self.isTrialRun and os.path.getsize(filePidPathName) > 0: 251 252 #print(self._schemaFileList[0], filePidPathName, self.cuEventID, 253 # self.archive.sharePath(), self._prefix, self.illumFile.name, 254 # self._schemaFileList[1], nextObjID) 255 256 # notIngList, lastObjID = 257 # @TODO: objID? 258 wfcamsrc.extractDetections(self._schemaFileList[0], 259 filePidPathName, self.cuEventID, self.archive.sharePath(), 260 self._prefix, self.illumFile.name, self._schemaFileList[1], 261 0, self.sysc) 262 ingestFilePathList = [(tableName, 263 os.path.join(self.archive.sharePath(), "%s_%s.dat" 264 % (self._prefix, tableName))) 265 for tableName in self._schemaFileList[1].split(',')] 266 for tableName, ingestPathFile in ingestFilePathList: 267 ingester.ingestTable(tableName, ingestPathFile, 268 isCsv=False)
269 270 271 272 # Don't use SourceMerging -- much easier.. 273 # Already know sourceID ... 274 275 # Then run merged cat - SourceMerger, new class -- like SynopticMerger? 276 # framesetlist --- merge log is already fine, just need to run C++ code.. 277 # SynopticMerger.minProdID = minProdID 278 # 279 # SynopticMerger(str(programmeID), cli=cli).run() 280 281 #-------------------------------------------------------------------------- 282 283
284 - def cleanUpFileShare(self):
285 """Make sure there are no old .dat files in the dbSharePath. 286 """ 287 for table in self._tableDict: 288 csvFileName = os.path.join( 289 self.archive.sharePath(), self._tableDict[table]["ingfile"]) 290 if os.path.exists(csvFileName): 291 os.remove(csvFileName)
292 293 #-------------------------------------------------------------------------- 294
296 """Create the list of used illumination tables and write it for use 297 with exnumeric to the working directory. 298 @TODO: Move CU4 version to Astrometry?? and link to that. 299 300 """ 301 illumDict = defaultdict(list) 302 illumFiles = [illumFileName 303 for illumFileName in dircache.listdir(self.sysc.illuminationDir()) 304 if not "_200" in illumFileName] 305 306 for dateStr in self.procDateSet: 307 # find existing illumination tables 308 for illumFileName in illumFiles: 309 if illumFileName.endswith(".table"): 310 _prefix, semester, month, _band = \ 311 illumFileName.partition(".table")[0].split('_') 312 semester = "20%s" % semester.upper() 313 if semester == self.sysc.obsCal.checkDate(dateStr) \ 314 and month.lower() == self.sysc.obsCal.getMonth(dateStr): 315 illumDict[dateStr].append(os.path.join( 316 self.sysc.illuminationDir(), illumFileName)) 317 318 # fill missing entries with default table 319 if dateStr not in illumDict: 320 illumDict[dateStr] = [ 321 os.path.join(self.sysc.illuminationDir(), fn) 322 for fn in illumFiles 323 if '_00x_def_' in fn and fn.endswith(".table")] 324 Logger.addMessage("<Warning> No illumination correction table" 325 " available, using default table!") 326 327 # create table list file and csv files for exnumeric 328 self.illumFile = File(os.path.join(self.tempWorkingDirPath, 329 "illumTables.dat")) 330 self.illumFile.wopen() 331 for dateStr in self.procDateSet: 332 for illumTableName in illumDict[dateStr]: 333 if not os.path.exists(illumTableName + ".csv"): 334 self.parseIllumTable(illumTableName) 335 self.illumFile.writetheline(illumTableName + ".csv") 336 self.illumFile.close()
337 338 #-------------------------------------------------------------------------- 339 340
341 - def createSchemaList(self):
342 """ Create schema list. 343 """ 344 345 rawTable = self.programme.getListRemeasTable() + 'Raw' 346 astroTable = self.programme.getListRemeasTable() + 'Astrometry' 347 photoTable = self.programme.getListRemeasTable() + 'Photometry' 348 349 tables = [rawTable, astroTable, photoTable] 350 351 self._schemaFileList = [ 352 os.path.join(self.tempWorkingDirPath, "schema.dat"), 353 ','.join(tables)] 354 355 schema = self.programme.getAttr('catalogueSchema') 356 357 file(self._schemaFileList[0], 'w').writelines( 358 "%s %s\n" % (schema, table) for table in tables)
359 360 #-------------------------------------------------------------------------- 361
362 - def filterCat(self, listDrivCatName, origCatName, sourceList):
363 """ Remove sources outside edge. Add in sourceIDs 364 @TODO: Do this in C++ code -- more efficient. 365 """ 366 sourceIDs = numpy.array([sourceID for sourceID, _ra, _dec in sourceList]) 367 newCol = Column(name='Source_ID', format='K', unit='Number', 368 array=sourceIDs) 369 tmpCatName = os.path.join(os.path.dirname(listDrivCatName), "temp.fits") 370 hdulist = pyfits.open(listDrivCatName) 371 hdulist2 = pyfits.HDUList() 372 hdulistC = fits.open(origCatName) 373 for ii, hdu in enumerate(hdulist): 374 if ii > 0: 375 t = hdu.columns + newCol 376 newhdu = pyfits.new_table(t) 377 newhdu.header = self.updateHeader(newhdu.header, hdu.header) 378 newhdu.header = self.updateHeader(newhdu.header, 379 hdulistC[ii].header) 380 hdulist2.append(newhdu) 381 else: 382 hdulist2.append(hdu) 383 hdulist2.writeto(tmpCatName) 384 hdulist.close() 385 hdulist2.close() 386 hdulistC.close() 387 os.remove(listDrivCatName) 388 hdulist = pyfits.open(tmpCatName) 389 hdulist2 = pyfits.HDUList() 390 for ii, hdu in enumerate(hdulist): 391 if ii > 0: 392 nxout = hdu.header['NXOUT'] 393 nyout = hdu.header['NYOUT'] 394 tbdata = hdu.data 395 #seqNum=tbdata.field("Sequence_number") 396 mask = numpy.logical_and(numpy.logical_and(numpy.logical_and( 397 numpy.greater_equal(tbdata.field("X_coordinate"), 1), 398 numpy.less_equal(tbdata.field("X_coordinate"), nxout)), 399 numpy.greater_equal(tbdata.field("Y_coordinate"), 1)), 400 numpy.less_equal(tbdata.field("Y_coordinate"), nyout)) 401 newhdu = pyfits.new_table(hdu.columns) 402 newhdu.data = hdu.data[mask] 403 newhdu.header = self.updateHeader(newhdu.header, hdu.header) 404 hdulist2.append(newhdu) 405 else: 406 hdulist2.append(hdu) 407 hdulist2.writeto(listDrivCatName) 408 hdulist.close() 409 hdulist2.close() 410 os.remove(tmpCatName)
411 412 #-------------------------------------------------------------------------- 413
414 - def getMultiframeList(self, frmGroup):
415 """Gets list of multiframes 416 """ 417 multiframeList = list() 418 for passband in self.passbands: 419 multiframeList += self.archive.query( 420 selectStr="m.multiframeID as mfID, (15.*rabase) as ra," 421 "decbase as dec, frameType, " 422 "cast(sqrt(nuStep) as int) as micro, fileName, " 423 "max(axis1length) as imgSizeX, " 424 "max(axis2length) as imgSizeY, " 425 "max(detRows) as detX, max(detCols) as detY", 426 fromStr="Multiframe as m,MultiframeDetector as md,%s as l" 427 % self.programme.getMergeLogTable(), 428 whereStr="frameSetID in (%s) and m.multiframeID=l.%smfID and " 429 "m.multiframeID=md.multiframeID and l.listProc%s=0 " 430 "and m.multiframeID>0 " 431 "group by m.multiframeID,frameType,(15.*rabase)," 432 "decbase,nustep,fileName" 433 % (','.join(str(fsID) for fsID in frmGroup), 434 passband, passband)) 435 436 return multiframeList
437 438 #-------------------------------------------------------------------------- 439
440 - def getSources(self, centre, radiusDeg):
441 """Returns all primary sources within circle 442 """ 443 return self.archive.query( 444 selectStr="sourceID,ra,dec", 445 fromStr=self.programme.getSourceTable(), 446 whereStr="(priOrSec=0 or priOrSec=frameSetID) and " 447 "dbo.fGreatCircleDist(ra,dec,%f,%f)<%f" 448 % (centre[0], centre[1], 60.*radiusDeg))
449 450 #--------------------------------------------------------------------------
451 - def getTolerance(self, frmGroup):
452 """ 453 """ 454 fieldIDList = [] 455 for fSetID in frmGroup: 456 fieldIDList.append(queries.getFieldID( 457 self.archive, fSetID, self.programme)) 458 return self.archive.queryAttrMax( 459 "stackRadius", "Required%s" % 460 self.prodType, 461 where="fieldID in (%s)" % ','.join(map(str, fieldIDList)))
462 463 464 #-------------------------------------------------------------------------- 465
466 - def runLDExtractor(self, mfl, sourceList):
467 """ 468 Run CASU Extractor. 469 470 @TODO: Parallelise - like CU13 471 @TODO: Describe inputs, e.g. args. 472 @TODO: Add in ZP etc... 473 """ 474 thresh = self.sysc.casuDetThresh[mfl.micro * mfl.micro] 475 rCore = mfl.micro * self.sysc.rCoreBasic 476 # nBinSize = mfl.micro * 64 477 imgName = fits.stripServer(mfl.fileName) 478 cnfName = fits.getConfMap(imgName) 479 catName = fits.getCatalogue(imgName) 480 listCatName = fits.getListDrivenCat(self.outputDirPath, imgName) 481 # if self.sysc.testOutputPath() in self.outputDirPath: 482 # newImgName=os.path.join(self.outputDirPath, 483 # os.path.basename(imgName)) 484 # newCnfName=os.path.join(self.outputDirPath, 485 # os.path.basename(cnfName)) 486 # extp.run('cp %s %s' % (imgName,newImgName)) 487 # extp.run('cp %s %s' % (cnfName,newCnfName)) 488 # imgName=newImgName 489 # cnfName=newCnfName 490 argstr = ' '.join([imgName, cnfName, self.tempSourceFileName, 491 listCatName, str(thresh)]) 492 argstr += " --rcore %s " % rCore 493 argstr += " --cattype %s" % self.sysc.casuCatType 494 495 Logger.addMessage("Extracting for camera %s" % 496 self.sysc.instrumentName, 497 alwaysLog=False) 498 commands = ["imcore_list " + argstr] 499 for cmd in commands: 500 #Logger.addMessage(cmd) 501 if not self.skipCats: 502 extp.run(cmd) 503 fits.removeDuplicateKeywords(listCatName) 504 self.filterCat(listCatName, catName, sourceList) 505 506 return listCatName
507 508 #-------------------------------------------------------------------------- 509 510
511 - def setTableVariables(self):
512 """ Set the variables dependent of the tables used in this cu. 513 """ 514 tableList = [] 515 processedTables = [] 516 tableList += zip( 517 utils.extractColumn(self._schemaFileList[0], 1), 518 utils.extractColumn(self._schemaFileList[0], 0)) 519 processedTables += self._schemaFileList[1].split(',') 520 521 self._ingestOrder = [] 522 self._tableDict = {} 523 for table, schema in tableList: 524 if table in processedTables: 525 self._ingestOrder.append(table) 526 self._tableDict[table] = dict(schema=schema, 527 ingfile="%s_%s.dat" % (self._prefix, table)) 528 529 self._ingestOrder = utils.orderedSet(self._ingestOrder)
530 531 #-------------------------------------------------------------------------- 532
533 - def setUpWorkingDirs(self, spaceRequired):
534 """ 535 Creates the directory to store newly created deep stacks. 536 537 @param spaceRequired: Amount of disk space required. 538 @type spaceRequired: float 539 540 @return: Full path to directory where deep stacks will be created. 541 @rtype: str 542 543 """ 544 baseDir = (self.sysc.testOutputPath() if not self.archive.isRealRun else 545 utils.getNextDisk(self.sysc, preAllocMem=spaceRequired)) 546 547 twDir = os.path.join(baseDir, self.sysc.tempProductsDir) 548 opDir = os.path.join(baseDir, self.sysc.listDrivDir) 549 utils.ensureDirExist(twDir) 550 utils.ensureDirExist(opDir) 551 552 self.outputDirPath = \ 553 os.path.join(opDir, '%s_v%s' % (self.dateString, self.releaseNum)) 554 self.tempWorkingDirPath = \ 555 os.path.join(twDir, '%s' % (self.dateString)) 556 557 utils.ensureDirExist(self.outputDirPath) 558 utils.ensureDirExist(self.tempWorkingDirPath) 559 self.tempSourceFileName = os.path.join( 560 self.tempWorkingDirPath, 'sourceList%s%s.ascii' % 561 (self.programme.getAcronym(), self.dateString))
562 563 #-------------------------------------------------------------------------- 564
565 - def updateHeader(self, header, oldheader):
566 """ Add keywords that are not in header from old header 567 """ 568 origList = [card.key for card in header.ascardlist()] 569 #print(origList) 570 for card in oldheader.ascardlist(): 571 if card.key not in origList: 572 header.update(card.key, card.value, card.comment) 573 return header
574 #------------------------------------------------------------------------------ 575
576 -def getIsPPT(mfList):
577 """ 578 """ 579 fTypes = set(not ('tile' in mfInfo.frameType.lower() or 580 'mosaic' in mfInfo.frameType.lower()) 581 for mfInfo in mfList) 582 583 if len(fTypes) > 1: 584 raise Exception("?") 585 586 return fTypes.pop()
587 588 #------------------------------------------------------------------------------ 589 # Entry point for script. 590 591 # Allow module to be imported as well as executed from the command line 592 if __name__ == '__main__': 593 CLI.progArgs.append(CLI.Argument('programmeID', 'DXS')) 594 CLI.progArgs.append(CLI.Argument('releaseNum', '3', 595 isValOK=lambda x: x.isdigit())) 596 597 # Define additional command-line interface options for Cu6 598 CLI.progOpts += [ 599 CLI.Option('i', 'internalOnly', 'Only do internal list-driven photometry'), 600 CLI.Option('o', 'oneFrameGroupOnly', 'Only test one frameGroup'), 601 CLI.Option('v', 'verbose', 602 'more verbose logging'), 603 CLI.Option('s', 'synoptic', 604 'Run synoptic list-driven photometry'), 605 CLI.Option('S', 'skipCats', 'Skip catalogue creation')] 606 607 cli = CLI(Cu9, "$Revision: 9470 $") 608 Logger.isVerbose = cli.getOpt('verbose') 609 Logger.addMessage(cli.getProgDetails()) 610 611 cu = Cu9(cli.getArg('programmeID'), cli=cli) 612 cu.internalOnly = cli.getOpt('internalOnly') 613 cu.releaseNum = int(cli.getArg('releaseNum')) 614 cu.runSynop = cli.getOpt('synoptic') 615 cu.skipCats = cli.getOpt('skipCats') 616 cu.isTrialRun = cli.getOpt('test') 617 cu.oneOnly = cli.getOpt('oneFrameGroupOnly') 618 cu.run() 619 620 #------------------------------------------------------------------------------ 621 # Change log: 622 # 623 # 18-Jan-2005, NCH: Original version (very icomplete) 624 # 24-Jan-2005, NCH: Substantial additions 625 # 26-Jan-2005, NCH: More development 626 # 31-Jan-2005, NCH: Yet more development 627 # 24-Feb-2005, NCH: Finishing off as far as possible at this time: 628 # enhanced removeAnalysedSets to make it robust against 629 # schema evolution. 630 # 2-Mar-2005, NCH: Still finishing off... 631 # 24-Nov-2005, NJC: Minor correction 632 # 20-Jan-2006, JB: Added code so that the name of the database being used 633 # is prepended to the logfile filename and added the 634 # variable "mainDb" for the above and the DB Connection 635 # and changed references to "name" to "curator" for 636 # consistnacy, also added sys to the list of imported 637 # Python modules. 638 # 24-Apr-2006, RSC: Fixed bug in the design of Utilities.cleanFileShare() 639 # whereby random files would be deleted. Now file share 640 # files are named *CuIDxxx* where xxx is the cuEventID 641 # instead of *xxx* where xxx is the process ID. 642 # 27-Apr-2006, RSC: Now file share files are named *???CuIDxxx* where ??? 643 # is the database name, to remove possible conflict. 644 # 9-May-2006, RSC: Deprecated utils.getNameFromPath() 645 # 2-Aug-2006, RSC: Updated to new ExternalProcess.py design. 646 # 29-Aug-2006, RSC: Upgraded Cu9 to SourceMerger OO style. 647 # 29-Jan-2007, RSC: Additional -v/--verbose option. 648 # 25-Jul-2007, NJC: Changed CASU tool name. 649