Package wsatools :: Module ProgrammeBuilder
[hide private]

Source Code for Module wsatools.ProgrammeBuilder

   1  #------------------------------------------------------------------------------ 
   2  # $Id: ProgrammeBuilder.py 10248 2014-03-17 15:55:01Z RossCollins $ 
   3  """ 
   4     Automatic programme setup tools to determine the correct curation settings. 
   5     Updates the curation tables in the database and creates the necessary 
   6     schema files. 
   7   
   8     @author: N.J.G. Cross 
   9     @org:    WFAU, IfA, University of Edinburgh 
  10   
  11     @newfield contributors: Contributors, Contributors (Alphabetical Order) 
  12     @contributors: R.S. Collins 
  13   
  14     @todo: Make sure new pointingString and old pointingString refer to same 
  15            positions 
  16     @todo: Need to replace some hard wired constants with database constants 
  17            parsed from WSA_InitiateArchive.sql. Therefore, may need to add more 
  18            constants to that SQL script. 
  19  """ 
  20  #------------------------------------------------------------------------------ 
  21  from __future__      import division, print_function 
  22  from future_builtins import zip 
  23   
  24  from   bisect      import bisect 
  25  from   collections import defaultdict, namedtuple 
  26  import math 
  27  import numpy 
  28  from   operator    import itemgetter 
  29  import os 
  30   
  31  import wsatools.Astrometry                   as astro 
  32  import wsatools.Banner                       as banner 
  33  import wsatools.DataFactory                  as df 
  34  import wsatools.DbConnect.CommonQueries      as queries 
  35  from   wsatools.DbConnect.CuSession      import CuSession 
  36  import wsatools.DbConnect.DbConstants        as dbc 
  37  from   wsatools.DbConnect.DbSession      import Join, SelectSQL 
  38  import wsatools.DbConnect.Schema             as schema 
  39  import wsatools.ExternalProcess              as extp 
  40  from   wsatools.ExternalProductConverter import ExternalProduct 
  41  from   wsatools.Logger                   import Logger 
  42  from   wsatools.ObsCalendar              import DateRange 
  43  import wsatools.Statistics                   as stats 
  44  from   wsatools.SystemConstants          import DepCodes, SystemConstants 
  45  import wsatools.Utilities                    as utils 
  46  #------------------------------------------------------------------------------ 
  47   
48 -class ProgrammeBuilder(CuSession):
49 """ 50 Automatically determines the correct curation settings for a given 51 programme. Updates the curation tables in the database and creates the 52 necessary schema files. 53 54 """ 55 #-------------------------------------------------------------------------- 56 # Private class parameters for this procedure - should not be altered 57 58 _autoCommit = True # Overrides CuSession default 59 cuNum = 21 # Overrides CuSession default 60 61 #-------------------------------------------------------------------------- 62 # Define public member variable default values (access as obj.varName) 63 # these need to be set from command-line options 64 65 bandMergeCrit = None #: Band merging criterion. 66 createSchemaOnly = False #: Just update schema with latest template? 67 dateRange = CuSession.sysc.obsCal.dateRange() #: Range of nights to include. 68 maxEllipticity = 1.0 #: Maximum stellar ellipticity for deep stacks. 69 maxSeeing = 2.0 #: Maximum seeing for deep stacks. 70 newPoints = False #: Only modify RequiredStack with new pointings? 71 numberStks = dbc.intDefault() #: Max number of components of a deep stack. 72 redoSchema = False #: Recreate existing database schema? 73 redoSetup = True #: Recreate all RequiredStack pointings? 74 displayDeeps = False #: Output data for deep fields 75 76 #-------------------------------------------------------------------------- 77
78 - def _onRun(self):
79 """ Set up programme database settings and/or schema. 80 """ 81 if self.createSchemaOnly or self.redoSchema: 82 # No programme set up required, just re-parse schema, optionally 83 # updating existing schema in the database. 84 self.createSchema(syncToDb=self.redoSchema) 85 if self.redoSchema: 86 commitSchema(self.archive, "updated", 87 isNonSurvey=self.onlyNonSurveys) 88 if not self.onlyNonSurveys and not self.onlySurveys: 89 # Commit any left over non-survey schema files 90 commitSchema(self.archive, "updated") 91 92 elif not self.archive.isTrialRun: 93 raise ProgrammeBuilder.CuError("No database update requested.") 94 95 elif not self.programmeID: 96 # Find and set up new progs 97 self.setUpNewProgs() 98 if self.programmeID: # i.e. there are new progs. 99 self.createSchema() 100 self.createTables() 101 commitSchema(self.archive, "new") 102 103 elif self.setUpProgramme(): 104 # Recreate schema 105 self.createSchema(syncToDb=True) 106 if not self.programme.isNonSurvey(): 107 # Non-survey schema is committed by NonSurveyRelease 108 commitSchema(self.archive, "updated", isNonSurvey=False) 109 self.initialiseNewRelease()
110 111 #-------------------------------------------------------------------------- 112
113 - def createSchema(self, syncToDb=False):
114 """ 115 Update schema files for current non-survey programme using the latest 116 version of sql/Templates/*_autoTemplateSchema.sql and the latest 117 curation settings in the database for this programme. 118 119 @param syncToDb: If True, update existing schema in database with new 120 schema, for all post-Detection tables. 121 @type syncToDb: bool 122 123 @todo: Combine surveyparser.py functionality into this function, which 124 will allow surveyparser.py to be just an interface to this class 125 and improve consistency between survey and non-survey schema. 126 """ 127 try: 128 progIDs = list(self.programmeID if self.programmeID else 129 self.programme.getProgIDList(onlyNonSurvey=self.onlyNonSurveys, 130 onlySurvey=self.onlySurveys)) 131 except TypeError: 132 progIDs = [self.programmeID] 133 134 if any(progID in self.sysc.manualProgs for progID in progIDs): 135 raise ProgrammeBuilder.CuError("Can only create schema for " 136 "automatically set up programmes") 137 138 for progID in sorted(progIDs): 139 self.programme.setCurRow(programmeID=progID) 140 Logger.addMessage("Creating schema for programme " + 141 self.programme.getAcronym().upper()) 142 143 if syncToDb and self.programme.isNonSurvey(): 144 # Drop existing post-Detection tables before changing schema. 145 # NOTE: Cannot just overwrite when creating if schema has 146 # changed such that e.g. a BestMatch table is replaced by a 147 # SynopticBestMatch table. 148 progSchema = \ 149 schema.parseTables(self.programme.getSchemaScript()) 150 151 for table in reversed(progSchema): 152 if not table.name.endswith(("Raw", "Astrometry", 153 "Photometry", "Detection", 154 "TileSet", "TilePawPrints")): 155 156 Logger.addMessage("Dropping table %s" % table) 157 self.archive.dropTable(table, progSchema) 158 159 schemaPath = self.sysc.autoTemplateSchema 160 if self.programme.isSatelliteProg(): 161 schemaPath = \ 162 self.sysc.sqlTemplatePath("WSA_SatelliteTemplateSchema.sql") 163 164 schemaFiles = [(schemaPath, os.path.join(self.sysc.sqlScriptPath, 165 self.programme.getSchemaScript()))] 166 167 if self.programme.getAttr("neighboursSchema") \ 168 not in (dbc.charDefault(), "WSA_NeighboursSchema.sql"): 169 schemaFiles.append( 170 (self.sysc.autoTemplateNeighboursSchema, 171 os.path.join(self.sysc.sqlScriptPath, 172 self.programme.getAttr("neighboursSchema")))) 173 174 # Update the schema definition files 175 isUpdatedForTable = self.createProgSchema(schemaFiles, progIDs) 176 177 # Recreate all post-Detection tables from updated schema file 178 if syncToDb: 179 progSchema = \ 180 schema.parseTables(self.programme.getSchemaScript()) 181 182 Logger.addMessage("Synchronising database to latest schema...") 183 for table in progSchema: 184 if self.archive.createTable(table, progSchema, 185 overWrite=isUpdatedForTable.get(table.name)): 186 Logger.addMessage("Created table %s" % table) 187 188 # @TODO: ViewSchema like IndexSchema 189 indexFiles = [] 190 viewFiles = [] 191 if self.onlyNonSurveys or not self.onlySurveys: 192 indexFiles.append((self.sysc.nsIndexScript, 193 self.programme.getProgIDList(onlyNonSurvey=True))) 194 195 if not self.sysc.isWSA() \ 196 and (self.onlySurveys or not self.onlyNonSurveys): 197 198 indexFiles.append((self.sysc.indexScript, 199 self.programme.getProgIDList(onlySurvey=True))) 200 201 viewFiles.append((self.sysc.surveyViewScript, 202 self.programme.getProgIDList(onlySurvey=True))) 203 204 try: 205 for schemaFileName, progIDs in indexFiles: 206 self.createIndexSchema(schemaFileName, progIDs) 207 208 for schemaFileName, progIDs in viewFiles: 209 self.createIndexSchema(schemaFileName, progIDs, isView=True) 210 211 finally: 212 # Reset programme to the current programme 213 if not isinstance(self.programmeID, set): 214 self.programme.setCurRow(programmeID=self.programmeID)
215 216 #-------------------------------------------------------------------------- 217
218 - def createIndexSchema(self, schemaFileName, progIDs, isView=False):
219 """ 220 The index schema script is recreated from scratch each time for all 221 programmes that have detection tables. 222 223 @param schemaFileName: Schema file to update. 224 @type schemaFileName: str 225 @param progIDs: List of programme IDs for this file. 226 @type progIDs: list(int) 227 @param isView: If True, template is to be used to create a view. 228 @type isView: bool 229 230 """ 231 Logger.addMessage("Updating %s..." % schemaFileName) 232 schemaPath = os.path.join(self.sysc.sqlScriptPath, schemaFileName) 233 schemaFiles = [(self.sysc.autoTemplateViewSchema if isView else 234 self.sysc.autoTemplateIndexSchema, schemaPath)] 235 236 file(schemaPath, 'w').write('') # initialise 237 238 for progID in progIDs: 239 self.programme.setCurRow(programmeID=progID) 240 self.createProgSchema(schemaFiles, progIDs)
241 242 #-------------------------------------------------------------------------- 243
244 - def createProgSchema(self, schemaFiles, progIDs):
245 """ 246 Parses *autoTemplate.sql to produce the custom schema for the current 247 programme. 248 249 @param schemaFiles: List of schema template and programme files. 250 @type schemaFiles: list(tuple(str, str)) 251 @param progIDs: List of programme IDs currently being processed. 252 @type progIDs: list(int) 253 254 @returns: Dictionary indicating the updated status of each table. 255 @rtype: dict(str: bool) 256 257 """ 258 # Use programmeInfo to drive curation 259 # isDeep, isLowLat, useSEx, isCorr, isSurvey, isNs, isEarly 260 # Detection tables - use **flags to create all 4 schemas 261 # Source tables - use **flags to create mergeSource and Source 262 # @TODO: Get wavelength info - if NB filter, do colours compared to 263 # nearest BB filter 264 fullName = self.programme.getName() 265 if self.programme.isNonSurvey(): 266 fullName = fullName[0].lower() + fullName[1:] 267 268 stdSubs = [("&template&", self.programme.getAcronym()), 269 ("&TEMPLATE&", self.programme.getAcronym().upper()), 270 ("&templatelong&", fullName), 271 ("@dataBase", self.archive.database if self.archive.database != 'VSAVVV' else 'VSA')] 272 bandUcdDict = {'z':'em.opt.I', 273 'y':'em.IR.NIR', 274 'j':'em.IR.J', 275 'h':'em.IR.H', 276 'ks':'em.IR.K'} 277 statusForTable = {} 278 synSetup = SynopticSetup(self.programme, self.dateRange) 279 for templatePath, schemaPath in schemaFiles: 280 status = Status(self, progIDs, synSetup) 281 if self.programme.getAttr("neighboursSchema") in schemaPath: 282 status.setNeighbours() 283 if not status.neighTables: 284 Logger.addMessage("No tables in RequiredNeighbours") 285 continue 286 287 if not any(indexScript in schemaPath for indexScript 288 in (self.sysc.indexScript, self.sysc.nsIndexScript, 289 self.sysc.surveyViewScript)): 290 291 Logger.addMessage( 292 "Creating script %s" % os.path.basename(schemaPath)) 293 294 file(schemaPath, 'w').write('') # initialise 295 296 # @TODO: Add views in for VSA 297 templateScript = file(templatePath).readlines() 298 299 # Set split tables 300 lineNum = 0 301 dataLines = {'main':[]} 302 tables = [('main')] 303 dictFiltData = None 304 dictColData = None 305 dictNeighData = None 306 dictBorData = None 307 tableOrder = [('main', True)] 308 while lineNum < len(templateScript): 309 line = templateScript[lineNum] 310 if line.startswith(("+", "=")): 311 # attributes after this 312 oldDropStatus = status.dropTables 313 status.update(line) 314 315 # Compare splitTables to dataLines 316 if status.splitTables: 317 if any(table not in dataLines for table 318 in status.getAllTables(status.splitTables)): 319 320 for key, _cre in tableOrder: 321 statusForTable.update( 322 extractTables(dataLines[key], status)) 323 324 # write everything so far 325 file(schemaPath, 'a').writelines(dataLines[key]) 326 327 dataLines = status.getAllTables(status.splitTables) 328 tableOrder = status.tableOrder 329 330 if not status.splitTables: 331 if 'main' not in dataLines: 332 for key, _cre in tableOrder: 333 statusForTable.update( 334 extractTables(dataLines[key], status)) 335 336 # write everything so far 337 file(schemaPath, 'a').writelines(dataLines[key]) 338 339 dataLines = {'main':[]} 340 tableOrder = [('main', True)] 341 342 if status.dropTables != oldDropStatus: 343 dlDict = {} 344 curDropStatus = status.dropTables 345 status.dropTables = oldDropStatus 346 for key, _cre in tableOrder: 347 statusForTable.update( 348 extractTables(dataLines[key], status)) 349 # write everything so far 350 file(schemaPath, 'a').writelines(dataLines[key]) 351 dlDict[key] = [] 352 dataLines = dlDict 353 status.dropTables = curDropStatus 354 355 if status.useFilters and status.writeLine: 356 if not dictFiltData: 357 dictFiltData, dictOrder = \ 358 status.initialise('filter', dataLines) 359 360 if dictFiltData and not status.useFilters: 361 for key in dataLines: 362 filtDataList = [] 363 for filtName in dictOrder: 364 filtDataList.extend(dictFiltData[key][filtName]) 365 dataLines[key].extend(filtDataList) 366 dictFiltData = None 367 368 if status.useColours and status.writeLine: 369 if not dictColData: 370 dictColData, dictOrder = \ 371 status.initialise('colour', dataLines) 372 373 if dictColData and not status.useColours: 374 for key in dataLines: 375 colDataList = [] 376 for colName in dictOrder: 377 colDataList.extend(dictColData[key][colName]) 378 379 dataLines[key] += colDataList 380 381 dictColData = None 382 383 if status.useBorders and status.writeLine: 384 if not dictBorData: 385 dictBorData, dictOrder = \ 386 status.initialise('border', dataLines) 387 388 if dictBorData and not status.useBorders: 389 for key in dataLines: 390 borDataList = [] 391 for borName in dictOrder: 392 borDataList.extend(dictBorData[key][borName]) 393 394 dataLines[key] += borDataList 395 396 dictBorData = None 397 398 if status.useNeighs and status.writeLine: 399 # If dict is not not empty 400 if not dictNeighData: 401 dictNeighData, dictOrder = status.initialise( 402 'neigh', dataLines) 403 404 if dictNeighData and not status.useNeighs: 405 for key in dataLines: 406 neighDataList = [] 407 for neighName in dictOrder: 408 if neighName in dictNeighData[key]: 409 neighDataList.extend( 410 dictNeighData[key][neighName]) 411 412 dataLines[key] += neighDataList 413 414 dictNeighData = None 415 416 # skip 417 lineNum += 1 418 continue 419 420 if not line.startswith("*"): 421 tables = ['main'] 422 else: 423 tables = status.splitTables.get(line[2]) 424 if tables is not None: 425 line = line[4:] 426 else: 427 Logger.addMessage("<Info> Table set %s does not have " 428 "option %s available for programme %s" 429 % (status.splitTables, line[2], 430 self.programme.getAcronym().upper()), 431 alwaysLog=False) 432 433 lineNum += 1 434 continue 435 436 if status.writeLine: 437 # Now replace any strings and repeat any filters 438 # constant strings first 439 line = utils.multiSub(line, stdSubs) 440 441 # Replace filterlist with correct list 442 # @@TODO Replace with new method 443 if '&filterlist' in line: 444 listType = line.split('&filterlist:')[1].split('&')[0] 445 filterList = status.filterGroups[listType] 446 line = line.replace('&filterlist:%s&' % listType, 447 ', '.join(filterList).title()) 448 if '$[' in line: 449 line = status.complexSub(line) 450 451 # if in filter or colour mode, cycle through 452 if status.useFilters: 453 # produce the right number of lines 454 for ii, shtName in enumerate(status.filters): 455 subs = [("&A&", shtName.upper()), 456 ("&a&", shtName.lower()), 457 ("&As&", shtName.title()), 458 ("&n&", 'n' if ii > 0 else ''), 459 ("&MagUCD&", (";%s" % 460 bandUcdDict[shtName.lower()] 461 if shtName.lower() in bandUcdDict else ""))] 462 463 fline = utils.multiSub(line, subs) 464 for table in tables: 465 dictFiltData[table][shtName].append(fline) 466 467 lineNum += 1 468 continue 469 470 elif status.useColours: 471 # produce the right number of lines 472 for shtName1, shtName2 in status.colours: 473 colUcd = "" 474 colUcd += (";%s" % bandUcdDict[shtName1.lower()] 475 if shtName1.lower() in bandUcdDict else "") 476 colUcd += (";%s" % bandUcdDict[shtName2.lower()] 477 if shtName2.lower() in bandUcdDict else "") 478 colName = '%sm%s' % (shtName1, shtName2) 479 subs = [("&A&", shtName1.upper()), 480 ("&a&", shtName1.lower()), 481 ("&As&", shtName1.title()), 482 ("&B&", shtName2.upper()), 483 ("&b&", shtName2.lower()), 484 ("&Bs&", shtName2.title()), 485 ("&ColorUCD&", colUcd)] 486 487 fline = utils.multiSub(line, subs) 488 for table in tables: 489 dictColData[table][colName].append(fline) 490 491 lineNum += 1 492 continue 493 494 elif status.useBorders: 495 for border in status.borders: 496 subs = [("&A&", border.upper()), 497 ("&a&", border.lower())] 498 fline = utils.multiSub(line, subs) 499 for table in tables: 500 dictBorData[table][border].append(fline) 501 lineNum += 1 502 continue 503 504 elif status.useNeighs: 505 for table in tables: 506 for ii, nTable in \ 507 enumerate(status.neighs[table]['neightable']): 508 509 matchDist = round(3600 510 * status.neighs[table]['matchdist'][ii], 1) 511 512 # Replace all terms, careful if they may or may 513 # not exist 514 subs = [("&neighTable&", nTable), 515 ("&MatchDist&", str(matchDist)), 516 ("&PrimeTable&", 517 status.neighs[table]['primetable'][ii]), 518 ("&PrimeID&", 519 str(status.neighs[table]['primeid'][ii])) 520 ] 521 522 fline = utils.multiSub(line, subs) 523 if 'externtable' in status.neighs[table]: 524 fline = fline.replace("&ExternTable&", 525 status.neighs[table]['externtable'][ii]) 526 527 if 'externname' in status.neighs[table]: 528 fline = fline.replace("&Extern&", 529 status.neighs[table]['externname'][ii]) 530 531 if 'externid' in status.neighs[table]: 532 fline = fline.replace("&ExternID&", 533 str(status.neighs[table]['externid'][ii])) 534 535 if 'externdb' in status.neighs[table]: 536 fline = fline.replace("&ExternDB&", 537 status.neighs[table]['externdb'][ii]) 538 539 if 'extprogid' in status.neighs[table]: 540 extProg = self.programme.getAcronym(progID= 541 int(status.neighs[table]['extprogid'][ii])) 542 543 fline = fline.replace("&SecProg&", 544 extProg.upper()) 545 546 if (not status.maxIndex or status.maxIndex > ii)\ 547 and (not status.checkSpecNeigh or 548 table == 'XMatch' and 549 status.checkSpecNeigh.upper() in 550 status.neighs[table]['externname'][ii] 551 .upper()): 552 553 dictNeighData[table][nTable].append(fline) 554 555 lineNum += 1 556 continue 557 558 isPublicForTable = dict(status.tableOrder) 559 for table in tables: 560 # Replace split string if necessary 561 fline = line.replace('&split&', table) 562 if "&fluxTable&" in fline: 563 fline = fline.replace('&fluxTable&', 564 self.programme.getDetectionTable() + '.' 565 if table in ("Photometry", "Astrometry") else '') 566 dataLines[table].append(fline.replace('&createTable&', 567 "CREATE TABLE" if isPublicForTable[table] else 568 "create table")) 569 570 # Put data in memory ... 571 # If in split mode.. 572 lineNum += 1 573 574 for key, _cre in status.tableOrder: 575 # write everything else 576 statusForTable.update(extractTables(dataLines[key], status)) 577 file(schemaPath, 'a').writelines(dataLines[key]) 578 579 return statusForTable
580 581 #-------------------------------------------------------------------------- 582
583 - def createTables(self):
584 """ Creates all tables for the current list of programmes, where they 585 do not already exist, inserting default rows where necessary. 586 """ 587 for progID in sorted(self.programmeID): 588 script = self.programme.getSchemaScript(programmeID=progID) 589 progSchema = schema.parseTables(script) 590 for table in progSchema: 591 Logger.addMessage("Creating table %s" % table) 592 self.archive.createTable(table, progSchema) 593 if table.name.endswith(("Raw", "Astrometry", "Photometry")): 594 self.archive.insertData(table.name, 595 [column.getDefaultValue() for column in table.columns])
596 597 #-------------------------------------------------------------------------- 598
599 - def getMaxReleaseOnDisk(self):
600 """ 601 Check available disks to see what the maximum releaseNum is for products 602 on the main disks, for the given programme. 603 604 @return: Highest release number in use in the disk file system. 605 @rtype: int 606 607 """ 608 # @TODO: This code replicates FitsUtils.findProduct()! The common code 609 # needs to be compartmentalised into one place to prevent future 610 # bugs. Even better - this search should only be done once, but 611 # with both goals. 612 dirDict = {'stack': self.sysc.stackDir, 'mosaic': self.sysc.mosaicDir} 613 if self.sysc.hasOffsetPos: 614 dirDict['tile'] = self.sysc.tileDir 615 616 progStr = "%05d" % self.programme.getAttr("programmeID") 617 fileNameParts = [progStr, self.sysc.deepSuffix, self.sysc.stackSuffix, 618 self.sysc.mefType] 619 620 notFileNameParts = [self.sysc.confSuffix, self.sysc.catType, 621 self.sysc.catSuffix, self.sysc.filtSuffix] 622 623 releaseNums = [] 624 for prodType in self.sysc.productTypes: 625 for disk in self.sysc.availableRaidFileSystem(): 626 productDir = os.path.join(disk, dirDict[prodType]) 627 if os.path.exists(productDir): 628 # Check subdirectories 629 subDirs = [subDir for subDir in os.listdir(productDir) 630 if subDir.split('_')[0].isdigit() 631 and subDir.startswith('20')] 632 633 for subDir in subDirs: 634 subDirPath = os.path.join(productDir, subDir) 635 636 # Check files 637 for fileName in os.listdir(subDirPath): 638 filePathName = os.path.join(subDirPath, fileName) 639 640 if (all(part in filePathName 641 for part in fileNameParts) 642 and all(part not in filePathName 643 for part in notFileNameParts)): 644 645 # Make sure programmeID part is in correct place 646 if fileName.split('_')[1][:5] == progStr: 647 releaseNum = int(subDir.split("_v")[1]) 648 releaseNums.append(int(releaseNum)) 649 650 return max(releaseNums) if releaseNums else 0
651 652 #-------------------------------------------------------------------------- 653
654 - def getReleaseNum(self):
655 """ 656 Queries database to find the next available release number for a given 657 survey. 658 659 @return: Next available release number. 660 @rtype: int 661 662 """ 663 progID = self.programme.getAttr("programmeID") 664 665 if self.programme.getAttr("sourceProdType") == 'mosaic': 666 # Use latest releaseNum 667 relNum = self.archive.queryAttrMax("releaseNum", "ExternalProduct", 668 where="productType='mosaic' AND programmeID=%s" % progID) 669 670 if not relNum: 671 raise ProgrammeBuilder.CuError( 672 "There are no existing mosaics ready for curation. " 673 "Please set up ExternalProduct and re-run.") 674 675 return relNum 676 677 diskRelNum = self.getMaxReleaseOnDisk() 678 dbRelNum = self.archive.queryAttrMax("releaseNum", "ProgrammeFrame", 679 where="productID>0 AND programmeID=%s" % progID) or 0 680 681 return max(diskRelNum, dbRelNum) + 1
682 683 #-------------------------------------------------------------------------- 684
685 - def initialiseNewRelease(self):
686 """ 687 Clear ProductProcessing table entries for the programme and 688 determine/announce the new release number. 689 690 """ 691 name = self.programme.getAcronym() 692 Logger.addMessage("Wiping %s ProductProcessing table entries..." % name) 693 Logger.addMessage("...%s rows deleted." 694 % self.archive.delete("ProductProcessing", 695 whereStr="programmeID=%s" % self.programme.getAttr("programmeID"))) 696 697 # @TODO: test grouping 698 releaseNum = self.getReleaseNum() 699 Logger.addMessage("This is %s release number: %s" % (name, releaseNum)) 700 banner.horizontal(str(releaseNum))
701 702 #-------------------------------------------------------------------------- 703
704 - def setUpNewProgs(self):
705 """ Updates the Programme table entries for new programmes. 706 """ 707 self.onlyNonSurveys = True 708 Logger.addMessage("Checking for new non-survey programmes...") 709 710 newProgs = self.archive.query("programmeID, dfsIdString", "Programme", 711 whereStr="programmeID >= %s AND detectionTable = %r" % 712 (dbc.nonSurveyProgrammeOffset(), dbc.charDefault())) 713 714 Logger.addMessage("found %s new programmes." % len(newProgs)) 715 716 for progID, dfsIdString in sorted(newProgs): 717 # @TODO: OSA? 718 if self.sysc.isWSA(): 719 acronym = dfsIdString.replace('/', '').lower() 720 else: 721 acronym = dfsIdString.translate(None, '.-()').lower() 722 if acronym.startswith('2'): 723 acronym = "d%s" % acronym 724 elif acronym.startswith('3'): 725 acronym = "s%s" % acronym 726 elif acronym[:3].isdigit(): 727 acronym = "n%s" % acronym 728 729 # @@NOTE: This should really be in a transaction block in case of 730 # database failure, instead just log what we are doing. 731 Logger.addMessage("Updating Programme for " + acronym) 732 entries = [("detectionTable", repr(acronym + "Detection")), 733 ("catalogueSchema", "'NonSurvey/%sNS%s_%sSchema.sql'" % 734 (self.sysc.loadDatabase, acronym, acronym))] 735 736 rowIndex = [("programmeID", progID)] 737 self.archive.updateEntries("Programme", entries, rowIndex) 738 self.programmeID.add(progID) 739 740 # @@TODO: The 1 and objID symbols should be database constants, 741 # need them to be defined and used in WSA_InitArchive.sql too... 742 self.archive.insertData("ProgrammeTable", 743 [progID, 1, acronym + "Detection", "objID"]) 744 745 if self.programmeID: 746 self.programme.updateDetails()
747 748 #-------------------------------------------------------------------------- 749
750 - def setUpProgramme(self):
751 """ 752 Set up programme using the latest quality controlled data. 753 754 @return: True if the schema now needs updating. 755 @rtype: bool 756 757 """ 758 pAcronym = self.programme.getAcronym() 759 Logger.addMessage( 760 "Determining best setup for programme %s..." % pAcronym.upper()) 761 762 filterTable = df.Table("Filter", self.archive) 763 newSetup = BestSetup(self.programme, self.redoSetup, self.dateRange, 764 self.displayDeeps) 765 synSetup = SynopticSetup(self.programme, self.dateRange, 766 newSetup.isSurveyDeep, self.bandMergeCrit) 767 768 if self.programme.isNonSurvey(): 769 Logger.addMessage( 770 "RequiredFilters are:\n" 771 "| filter | isSynoptic |\n" + '\n'.join( 772 "| %6s | %3s |" 773 % (filterTable.getAttr("shortName", filterID=filterID), 774 ("Y" if isSynoptic else "N")) 775 for filterID, nPass, isSynoptic in newSetup.newFiltData)) 776 777 Logger.addMessage("<Info> %s is a %s survey" % (pAcronym.upper(), 778 ("DEEP" if newSetup.isSurveyDeep else "SHALLOW"))) 779 780 # Do nothing else for WSA surveys! 781 if self.programmeID in self.sysc.manualProgs: 782 Logger.addMessage( 783 "<Info> The data suggest the following filter setup:\n%s\n" 784 "and the following pointing setup:\n%s" 785 % (newSetup.newFiltData, newSetup.newPosData)) 786 787 if not self.archive.isTrialRun: 788 raise ProgrammeBuilder.CuError( 789 "This is a UKIDSS programme, so requirements will not be " 790 "overwritten.") 791 792 return False 793 794 # Check to see if programme should use external deep data 795 reqTables = [] 796 for pType in newSetup.productData: 797 if len(newSetup.productData[pType]): 798 reqTables.append("Required%s" % pType.title()) 799 Logger.addMessage("%s %ss will be inserted into Required%s" 800 % (len(newSetup.productData[pType]), 801 pType.lower(), pType.title())) 802 803 if newSetup.productLinks: 804 reqTables += ["ProductLinks"] 805 806 Logger.addMessage("%s links will be inserted into ProductLinks" 807 % len(newSetup.productLinks)) 808 809 if self.programme.isNonSurvey(): 810 reqTables.append("RequiredFilters") 811 # Why wipe this - just clean SourceXDetection and add in any new 812 # if not self.newPoints: 813 # reqTables.append("RequiredNeighbours") 814 815 # Wipe existing requirements 816 if self.redoSetup: 817 Logger.addMessage( 818 "Wiping old programme requirements from database...") 819 820 for tableName in reqTables: 821 self.archive.delete(tableName, 822 whereStr="programmeID=%s" % self.programmeID) 823 else: 824 Logger.addMessage("Wiping ProductLinks from database...") 825 self.archive.delete("ProductLinks", 826 whereStr="programmeID=%s" % self.programmeID) 827 828 Logger.addMessage("Updating %s..." % ", ".join(reqTables)) 829 830 # Update RequiredFilters 831 for filterID, nPass, isSynoptic in newSetup.newFiltData: 832 if self.programme.isNonSurvey(): 833 self.archive.insertData("RequiredFilters", 834 [self.programmeID, filterID, nPass, isSynoptic]) 835 else: 836 # Just update synoptic status 837 self.archive.updateEntries("RequiredFilters", 838 entryList=[("isSynoptic", isSynoptic)], 839 rowIndexList=[("programmeID", self.programmeID), 840 ("filterID", filterID)]) 841 842 # Update RequiredStack 843 for point in newSetup.productData["stack"]: 844 flName = filterTable.getAttr("shortName", filterID=point.filterID) 845 nameStr = "%s_%s_%s" % (pAcronym, point.fieldID, flName.title()) 846 desc = ("Deep %s stack: %s, pointing %11.7f, %11.7f %s, nustep %d" 847 % (pAcronym, nameStr, point.ra, point.dec, flName, point.nuStep) 848 if self.sysc.hasMicroStep else 849 "Deep %s stack: %s, pointing %11.7f, %11.7f %s" 850 % (pAcronym, nameStr, point.ra, point.dec, flName)) 851 entryList = [self.programmeID, point.productID, nameStr, desc, 852 point.filterID, point.ra, point.dec, self.numberStks] 853 if self.sysc.hasMicroStep: 854 entryList.append(point.nuStep) 855 856 if self.sysc.hasOffsetPos: 857 entryList.append(point.offsetPos) 858 859 entryList += [point.stackRadius, point.fieldID] 860 if self.sysc.hasVarOrient: 861 # Position angle 862 entryList.append(point.posAngle) 863 864 self.archive.insertData("RequiredStack", entryList) 865 866 for point in newSetup.productData["tile"]: 867 flName = filterTable.getAttr("shortName", filterID=point.filterID) 868 nameStr = "%s_%s_%s" % (pAcronym, point.fieldID, flName.title()) 869 desc = "Deep %s tile: %s, pointing %11.7f, %11.7f %s, nustep %d" \ 870 % (pAcronym, nameStr, point.ra, point.dec, flName, point.nuStep) 871 # @FIXME: No nustep in OSA 872 entryList = [self.programmeID, point.productID, nameStr, desc, 873 point.filterID, point.ra, point.dec, point.nuStep, 874 point.stackRadius, point.fieldID, point.posAngle] 875 876 self.archive.insertData("RequiredTile", entryList) 877 878 for point in newSetup.productData["mosaic"]: 879 flName = filterTable.getAttr("shortName", filterID=point.filterID) 880 nameStr = "%s_%s_%s" % (pAcronym, point.fieldID, flName.title()) 881 desc = ("Deep %s mosaic: %s, pointing %11.7f, %11.7f %s, " 882 "pixel size %3.2f, size %3.2f x %3.2f sq. degrees" \ 883 % (pAcronym, nameStr, point.ra, point.dec, flName, 884 point.pixelSize, point.raExtent, point.decExtent)) 885 886 dualFilterID = 5 887 888 # @FIXME: no nustep in OSA 889 # No mosaics in OSA (yet) 890 entryList = [self.programmeID, point.productID, nameStr, desc, 891 point.filterID, point.ra, point.dec, point.raExtent, 892 point.decExtent, point.nuStep, point.pixelSize, dualFilterID, 893 point.fieldID] 894 if self.sysc.hasVarOrient: 895 entryList.append(point.posAngle) 896 897 self.archive.insertData("RequiredMosaic", entryList) 898 899 for combiProdID, intProdID, combiProdType, intProdType \ 900 in newSetup.productLinks: 901 902 entryList = [self.programmeID, combiProdID, intProdID, 903 combiProdType, intProdType] 904 905 self.archive.insertData("ProductLinks", entryList) 906 907 if self.newPoints: 908 return False # Go no further if just updating required pointings 909 910 # Parse registration file for registration description 911 Logger.addMessage("Parsing registration files...") 912 registration = Registration(self.programme) 913 914 # New Programme table entries 915 entryList = [] 916 if self.programme.isNonSurvey(): 917 entryList += [ 918 ("description", "'%s'" % registration.description), 919 ("extractTool", "'CASU'"), 920 ("sourceTable", "'%sSource'" % pAcronym), 921 ("mergeLogTable", "'%sMergeLog'" % pAcronym), 922 ("neighboursSchema", "'NonSurvey/%sNS%s_%sNeighbourSchema.sql'" 923 % (self.sysc.loadDatabase, pAcronym, pAcronym)), 924 ("isLowGalacticLat", int(newSetup.isLowGalLat))] 925 926 927 # If band-merging criterion not supplied, see if this is already a 928 # previously set up correlated survey. 929 self.bandMergeCrit = (self.bandMergeCrit 930 or self.programme.getAttr("bandMergingCriterion")) 931 932 # Define deep table entries for Programme 933 if newSetup.isSurveyDeep: 934 synNeighTableDetails = [] 935 for ngTable in synSetup.neighTables: 936 synID = synSetup.getSynopticTableID(ngTable) 937 synNeighTableDetails.append((ngTable, synID)) 938 939 if synSetup.bestMatchTables: 940 entryList += [ 941 ("synopticBestMatchTable", repr(synSetup.varBMTable)), 942 ("variabilityTable", "'%sVariability'" % pAcronym)] 943 944 if synSetup.bandMergingCrit: 945 entryList += [ 946 ("synopticSourceTable", repr( 947 synSetup.getSynopticSourceTable())), 948 ("bandMergingCriterion", synSetup.bandMergingCrit)] 949 950 else: 951 entryList += [("synopticBestMatchTable", repr(dbc.charDefault())), 952 ("variabilityTable", repr(dbc.charDefault())), 953 ("synopticSourceTable", repr(dbc.charDefault()))] 954 955 # Update Programme 956 Logger.addMessage("Updating Programme with " + 957 ("deep table information" if not self.programme.isNonSurvey() else 958 "table names and neighbour schema") + 959 " for programme %s" % pAcronym.upper()) 960 961 self.archive.update("Programme", entryList, 962 where="programmeID=%s" % self.programmeID) 963 964 self.programme.updateDetails() 965 966 # For newly registered programmes only 967 if self.programme.isNonSurvey() \ 968 and not self.archive.queryEntriesExist("Survey", 969 where="surveyID=%s" % self.programmeID): 970 971 Logger.addMessage(pAcronym.upper() + " is a newly registered " 972 "programme. Adding entries into Survey, SurveyProgrammes and " 973 "ProgrammeTable...") 974 975 # Update Survey 976 self.archive.insertData("Survey", rowData=[self.programmeID, 977 self.programme.getAttr("dfsIDString"), "%s: %s" % 978 (self.programme.getName().replace("programme", "project"), 979 registration.description), pAcronym.upper(), dbc.yes()]) 980 981 # Update SurveyProgrammes - assuming just one programme in survey 982 self.archive.insertData("SurveyProgrammes", 983 [self.programmeID, self.programmeID, dbc.no()]) 984 985 # - and programme entry in world survey 986 if self.sysc.isWSA(): 987 self.archive.insertData("SurveyProgrammes", 988 [dbc.worldSurveyID(), self.programmeID, dbc.yes()]) 989 990 # Update ProgrammeTable to add in source table only 991 self.archive.insertData("ProgrammeTable", [self.programmeID, 2, 992 self.programme.getSourceTable(), "sourceID"]) 993 994 # If a newly correlated survey, need to update ProgrammeTable 995 if synSetup.bandMergingCrit: 996 synID = 3 997 if not self.archive.queryEntriesExist("ProgrammeTable", 998 where="programmeID=%s AND tableID=%s" % (self.programmeID, synID)): 999 1000 Logger.addMessage(pAcronym.upper() + " is now a correlated " 1001 "survey. Adding SynopticSource table entry " 1002 "into ProgrammeTable...") 1003 1004 self.archive.insertData("ProgrammeTable", [ 1005 self.programmeID, synID, 1006 synSetup.getSynopticSourceTable(), "synopticID"]) 1007 1008 Logger.addMessage("Updating RequiredNeighbours...") 1009 joinCriterion = 10.0 / 3600.0 # 10 arcsec 1010 1011 if self.programme.isNonSurvey(): 1012 # @TODO: Don't clean - either update or append. 1013 # - SourceNeighbours 1014 # @TODO: SDSS - WFCAM non-surveys, latest SDSS... 1015 twomassID = self.getExternalSurveyID('TWOMASS') 1016 ssaID = self.getExternalSurveyID('SSA') 1017 wiseAssID = self.getExternalSurveyID('WISE') 1018 neighbourEntries = [ 1019 [self.programmeID, 2, dbc.intDefault(), dbc.intDefault(), joinCriterion, 1020 "%sSourceNeighbours" % pAcronym, dbc.intDefault()], # - SourceNeighbours 1021 [self.programmeID, 2, twomassID, 1, joinCriterion, 1022 "%sSourceXtwomass_psc" % pAcronym, dbc.intDefault()], # - SourceXtwomass_psc 1023 [self.programmeID, 2, ssaID, 1, joinCriterion, 1024 "%sSourceXSSASource" % pAcronym, dbc.intDefault()], # - SourceXSSASource 1025 [self.programmeID, 2, wiseAssID, 2, joinCriterion, 1026 "%sSourceXwise_allskysc" % pAcronym, dbc.intDefault()], # - SourceXwise_allskysc 1027 ] 1028 if self.sysc.isWSA(): 1029 # Find latest SDSS 1030 latestSDSSsID = self.archive.queryAttrMax("surveyID", "ExternalSurvey", 1031 "surveyName like '%SDSS%-DR%'") 1032 surveyName, extTableName, tableID = self.archive.query( 1033 "surveyName,extTableName,extTableID", 1034 "ExternalSurveyTable as t, ExternalSurvey as s", 1035 "s.surveyID=t.surveyID and s.surveyID=%s and " 1036 "extTableName='PhotoObj'" % latestSDSSsID, firstOnly=True) 1037 neighTableName = self.getSDSSNeighTable(surveyName, extTableName) 1038 neighbourEntries += [[self.programmeID, 2, latestSDSSsID, tableID, joinCriterion, 1039 neighTableName, dbc.intDefault()]] 1040 for reqNeigh in neighbourEntries: 1041 if not self.archive.queryEntriesExist("RequiredNeighbours", 1042 "programmeID=%s and tableID=2 and surveyID=%s and extTableID=%s and extProgID=%s" 1043 % (self.programmeID, reqNeigh[2], reqNeigh[3], reqNeigh[6])): 1044 1045 self.archive.insertData("RequiredNeighbours", 1046 reqNeigh) 1047 1048 1049 self.archive.delete("RequiredNeighbours", 1050 whereStr="programmeID=extProgID AND programmeID=%s" % self.programmeID 1051 + " AND (neighbourTable LIKE '%XDetection'" 1052 " OR neighbourTable LIKE '%XSynopticSource')") 1053 1054 # - SourceXDetection or SourceXSynopticSource 1055 if newSetup.isSurveyDeep: 1056 for synNeighTable, synID in synNeighTableDetails: 1057 self.archive.insertData("RequiredNeighbours", 1058 [self.programmeID, 2, dbc.intDefault(), synID, 1059 joinCriterion, synNeighTable, self.programmeID]) 1060 1061 return True
1062 #-------------------------------------------------------------------------- 1063
1064 - def getExternalSurveyID(self, databaseName):
1065 """ 1066 """ 1067 return self.archive.query("surveyID", "ExternalSurvey", "databaseName='%s'" 1068 % databaseName, firstOnly=True, default=dbc.intDefault())
1069 #-------------------------------------------------------------------------- 1070
1071 - def getSDSSNeighTable(self, surveyName, extTableName):
1072 """ returns name of neighbour table between programme and SDSS survey 1073 """ 1074 surveyNameParts = surveyName.split('-') 1075 survName = ''.join([part.title() for part in surveyNameParts 1076 if part.upper() != 'SDSS']) 1077 return "%sX%s%s" % (self.programme.getSourceTable(), survName, extTableName)
1078 #------------------------------------------------------------------------------ 1079
1080 -class Status(object):
1081 """This translates schema template grammar into a current status 1082 """ 1083 actionDict = { 1084 '++':'start', 1085 '+-':'startNot', 1086 '==':'finish', 1087 '=-':'finishNot'} 1088 1089 methodDict = { 1090 'p':'specificProgrammes', 1091 'r':'repeatTable', 1092 'x':'sextractorValues', 1093 'h':'highGalacticLat', 1094 'd':'synTables', 1095 'f':'cycleThroughFilters', 1096 'c':'cycleThroughColours', 1097 's':'sourceTables', 1098 't':'dropTables', 1099 'e':'externalTable', 1100 'n':'cycleThroughNeighs', 1101 'o':'onceOnly', 1102 'b':'cycleThroughBorders', 1103 } 1104 1105 colourGroups = None 1106 filterGroups = None 1107 borderGroups = None 1108 progGroups = None 1109 # Default, not split table 1110 splitTables = None 1111 # Default condition use programme, write line. 1112 useProgSP = True 1113 useProgSX = True 1114 useProgHL = True 1115 useProgSY = True 1116 useProgSR = True 1117 useProgFO = True 1118 checkSpecNeigh = None 1119 writeLine = True 1120 dropTables = False 1121 # Normally don't cycle through colours or filters 1122 filters = None 1123 colours = None 1124 borders = None 1125 useColours = False 1126 useFilters = False 1127 useBorders = False 1128 tableOrder = [('main', True)] 1129 neighView = None 1130 neighs = None 1131 useNeighs = False 1132 lastIndex = None 1133 maxIndex = None 1134 progIndex = None 1135 1136 #: DbSession connection to database. 1137 archive = None 1138 #: DataFactory.ProgrammeTable set to the current programme. 1139 programme = None 1140 #: SystemConstants initialised for current archive. 1141 sysc = None 1142 #: SynopticSetup from Programme table details for current programme. 1143 synSetup = None 1144 1145 #-------------------------------------------------------------------------- 1146
1147 - def __init__(self, cu, progIDs, synSetup):
1148 """ 1149 Initialise status from current CuSession task. 1150 1151 @param cu: Curation task. 1152 @type cu: CuSession 1153 @param progIDs: List of programme IDs being processed. 1154 @type progIDs: list(int) 1155 1156 """ 1157 self.archive = cu.archive 1158 self.programme = cu.programme 1159 self.sysc = cu.sysc 1160 self.synSetup = synSetup 1161 programmeID = self.programme.getAttr("programmeID") 1162 self.progIndex = progIDs.index(programmeID) 1163 self.lastIndex = len(progIDs) - 1 1164 surveyIDs = self.programme.getProgIDList(onlySurvey=True) 1165 nonSurveyIDs = self.programme.getProgIDList(onlyNonSurvey=True) 1166 minSurveyProgID = min(self.sysc.scienceProgs.values()) 1167 self.progGroups = { 1168 'early': [progID for progID in surveyIDs if progID < minSurveyProgID], 1169 'ps': [progID for progID in surveyIDs if progID >= minSurveyProgID], 1170 'ns': nonSurveyIDs, 1171 'allProgs': surveyIDs + nonSurveyIDs} 1172 1173 # Filter sets 1174 allFilters = \ 1175 list(df.PassbandList(self.programme).getPassbandsLowerCase()) 1176 1177 bbFilters = df.PassbandList(self.programme).getBroadbandPassbands() 1178 1179 synFilters = list(df.PassbandList(self.programme, isSynoptic=True) 1180 .getPassbandsLowerCase()) 1181 1182 bbSynFilters = df.PassbandList(self.programme, isSynoptic=True)\ 1183 .getBroadbandPassbands() 1184 1185 # @@TODO one row only 1186 self.filterGroups = {'a': allFilters, 's': synFilters} 1187 1188 if self.sysc.hasOffsetPos: 1189 offSetMax = self.archive.queryAttrMax("noffsets", 1190 table=Join(["Multiframe", "ProgrammeFrame"], ["multiframeID"]), 1191 where="frameType LIKE '%tile%stack' AND " + 1192 DepCodes.selectNonDeprecated) or 6 1193 1194 self.filterGroups['o'] = \ 1195 ['o%d' % off for off in range(1, offSetMax + 1)] 1196 1197 l1 = bbFilters[:-1] 1198 l2 = bbFilters[1:] 1199 for fName in allFilters: 1200 if fName not in bbFilters: 1201 bbF = queries.getBBFilter(self.archive, fName, programmeID) 1202 if bbF: 1203 l1.append(fName) 1204 l2.append(bbF.lower()) 1205 1206 # Sort out NB filters to nearest BB - need to use wvl info 1207 self.colourGroups = { 1208 'a': list(zip(l1, l2)), 1209 'sb': list(zip(bbSynFilters[:-1], bbSynFilters[1:]))} 1210 1211 posList = [] 1212 for edge in ['detEdge', 'dithEdge']: 1213 for bound in ['Inner', 'Outer']: 1214 for pos in range(4): 1215 posList.append('%s%sPos%d' % (edge, bound, pos + 1)) 1216 1217 self.borderGroups = {'io': posList}
1218 1219 #-------------------------------------------------------------------------- 1220
1221 - def complexSub(self, line):
1222 """ 1223 """ 1224 # Get string to work on 1225 for _substitution in range(len(line.split('$[')) - 1): 1226 subString = '$[' + line.split('$[')[1].split('$')[0] + '$' 1227 repeatString = subString.split('[')[1].split(']')[0] 1228 joinStr = subString.split(';')[1] 1229 methodArg = subString.split(';')[2][0] 1230 addInfo = \ 1231 subString.split(';')[2].split(':')[1].split('$')[0].split(',') 1232 # 1233 method = self.methodDict[methodArg] 1234 if method == 'cycleThroughFilters': 1235 repeats = [] 1236 for ii, shtName in enumerate(self.filterGroups[addInfo[0]]): 1237 subs = [("&A&", shtName.upper()), 1238 ("&a&", shtName.lower()), 1239 ("&As&", shtName.title()), 1240 ("&n&", 'n' if ii > 0 else '')] 1241 repeats.append(utils.multiSub(repeatString, subs)) 1242 line = utils.multiSub(line, [(subString, joinStr.join(repeats))]) 1243 elif method == 'cycleThroughColours': 1244 repeats = [] 1245 for shtName1, shtName2 in self.colourGroups[addInfo[0]]: 1246 subs = [("&A&", shtName1.upper()), 1247 ("&a&", shtName1.lower()), 1248 ("&As&", shtName1.title()), 1249 ("&B&", shtName2.upper()), 1250 ("&b&", shtName2.lower()), 1251 ("&Bs&", shtName2.title())] 1252 repeats.append(utils.multiSub(repeatString, subs)) 1253 line = utils.multiSub(line, [(subString, joinStr.join(repeats))]) 1254 elif method == 'cycleThroughBorders': 1255 repeats = [] 1256 for border in self.borderGroups[addInfo[0]]: 1257 subs = [("&A&", border.upper()), 1258 ("&a&", border.lower())] 1259 repeats.append(utils.multiSub(repeatString, subs)) 1260 line = utils.multiSub(line, [(subString, joinStr.join(repeats))]) 1261 elif method == 'specificProgrammes': 1262 self.addInfo = addInfo 1263 if self.isProgMatch(): 1264 line = utils.multiSub(line, [(subString, repeatString)]) 1265 else: 1266 line = utils.multiSub(line, [(subString, joinStr)]) 1267 else: 1268 raise ProgrammeBuilder.CuError("Schema inconsistency") 1269 return line
1270 1271 #-------------------------------------------------------------------------- 1272
1273 - def setNeighbours(self):
1274 """ 1275 """ 1276 # Different types of neighbour table need different info 1277 # 4 types: n - neighbour with self 1278 # x - neighbour with external survey 1279 # s - neighbour with another table in programme 1280 # p - neighbour with table in another programme 1281 viewStringN = ("neighTable,matchDist,primeTable,primeID") 1282 viewStringX = viewStringN + (",externName,externDb,externTable,externID") 1283 viewStringS = viewStringN + (",externTable,externID") 1284 viewStringP = viewStringS + (",extProgID,externName") 1285 selectStringN = ("r.neighbourTable,r.joinCriterion,pt.tableName," 1286 "pt.sourceIDName") 1287 selectStringX = selectStringN + (",es.surveyName,es.databaseName," 1288 "est.extTableName,est.sourceIDName") 1289 selectStringS = selectStringN + (",pt2.tableName,pt2.sourceIDName") 1290 selectStringP = selectStringS + (",p.programmeID,p.title") 1291 # ## 1292 fromStringN = ("RequiredNeighbours as r,ProgrammeTable as pt") 1293 fromStringX = fromStringN + (",ExternalSurvey as es," 1294 "ExternalSurveyTable as est") 1295 fromStringS = fromStringN + (",ProgrammeTable as pt2") 1296 fromStringP = fromStringS + (",Programme as p") 1297 # ## 1298 whereStringN = ("r.programmeID=%d and r.programmeID=pt.programmeID and " 1299 "r.tableID=pt.tableID" % self.programme.getAttr("programmeID")) 1300 whereStringX = whereStringN + (" and r.surveyID=es.surveyID and " 1301 "r.surveyID=est.surveyID and " 1302 "r.extTableID=est.extTableID ") 1303 whereStringS = whereStringN + (" and r.programmeID=pt2.programmeID and " 1304 "r.extTableID=pt2.tableID") 1305 whereStringP = whereStringN + (" and r.extProgID=pt2.programmeID and " 1306 "r.extTableID=pt2.tableID and " 1307 "p.programmeID=r.extProgID") 1308 # 1309 # Check numbers of each table. 1310 isNeigh = self.archive.queryEntriesExist( 1311 fromStringN, whereStringN + 1312 " and r.surveyID<0 and r.extProgID<0") 1313 isXMatch = self.archive.queryEntriesExist( 1314 fromStringX, whereStringX + " and r.surveyID>0") 1315 # Should not call synoptic - e.g. if want SourceXPawprints 1316 isSynop = self.archive.queryEntriesExist( 1317 fromStringS, whereStringS + " and r.surveyID<0 and " 1318 "r.extProgID=r.programmeID") and self.programme.isDeep() 1319 isInter = self.archive.queryEntriesExist( 1320 fromStringS, whereStringS + " and r.surveyID<0 and " 1321 "r.extProgID=r.programmeID") and not self.programme.isDeep() 1322 isProg = self.archive.queryEntriesExist( 1323 fromStringP, whereStringP + " and r.surveyID<0 and " 1324 "r.extProgID!=r.programmeID") 1325 self.neighTables = {} 1326 if isNeigh: 1327 self.neighTables['Neigh'] = df.View(viewStringN.split(','), 1328 self.archive.query(selectStringN, fromStringN, 1329 whereStringN + 1330 " and r.surveyID<0 and r.extProgID<0")) 1331 if isXMatch: 1332 self.neighTables['XMatch'] = df.View(viewStringX.split(','), 1333 self.archive.query(selectStringX, fromStringX, 1334 whereStringX + " and r.surveyID>0")) 1335 if isSynop: 1336 self.neighTables['Synop'] = df.View(viewStringS.split(','), 1337 self.archive.query(selectStringS, fromStringS, 1338 whereStringS + " and r.surveyID<0 and " 1339 "r.extProgID=r.programmeID")) 1340 if isInter: 1341 self.neighTables['Inter'] = df.View(viewStringS.split(','), 1342 self.archive.query(selectStringS, fromStringS, 1343 whereStringS + " and r.surveyID<0 and " 1344 "r.extProgID=r.programmeID")) 1345 if isProg: 1346 self.neighTables['Prog'] = df.View(viewStringP.split(','), 1347 self.archive.query(selectStringP, fromStringP, 1348 whereStringP + " and r.surveyID<0 and " 1349 "r.extProgID!=r.programmeID")) 1350 self.neighTypes = self.neighTables.keys() 1351 if self.neighTables: 1352 self.neighTables['All'] = df.View(viewStringN.split(','), 1353 self.archive.query(selectStringN, fromStringN, whereStringN))
1354 1355 #-------------------------------------------------------------------------- 1356
1357 - def update(self, line):
1358 """ 1359 """ 1360 self.parseString(line) 1361 1362 # Combine programme information to decide whether to ignore 1363 # subsequent lines, or repeat them. 1364 # 1365 # Split tables 1366 # 1367 if (self.method == 'repeatTable' and 1368 self.action == 'start'): 1369 if self.addInfo[0] == 'd': 1370 self.tableOrder = [('', True), ('Raw', False), ('Astrometry', False), 1371 ('Photometry', False)] 1372 self.splitTables = {'d':['', 'Raw', 'Astrometry', 'Photometry'], 1373 'r':['', 'Raw'], 1374 'a':['', 'Astrometry'], 1375 'p':['', 'Photometry'], 1376 'D':[''], 1377 'R':['Raw'], 1378 'A':['Astrometry'], 1379 'P':['Photometry'], 1380 'S':['Raw', 'Astrometry', 'Photometry'], } 1381 elif self.addInfo[0] == 's': 1382 self.tableOrder = [('', True), ('Merge', False)] 1383 self.splitTables = {'m':['', 'Merge'], 1384 's':[''], } 1385 elif self.addInfo[0] == 'n': 1386 tableDict = {'Neigh':'n', 1387 'XMatch':'x', 1388 'Synop':'s', 1389 'Inter':'i', 1390 'Prog':'p'} 1391 self.tableOrder = [] 1392 self.splitTables = {'a':self.neighTypes} 1393 for table in self.neighTypes: 1394 self.tableOrder.append((table, True)) 1395 self.splitTables[tableDict[table]] = [table] 1396 elif (self.method == 'repeatTable' and 1397 self.action == 'finish'): 1398 # Reset 1399 self.splitTables = None 1400 self.tableOrder = [('main', True)] 1401 # 1402 # Specific Programme 1403 # 1404 if self.method == 'specificProgrammes': 1405 if self.action == 'start': 1406 if ((not self.isProgMatch() and not self.progSubtract) or 1407 (self.isProgMatch() and self.progSubtract)): 1408 self.useProgSP = False 1409 elif self.action == 'startNot': 1410 if ((self.isProgMatch() and not self.progSubtract) or 1411 (not self.isProgMatch() and self.progSubtract)): 1412 self.useProgSP = False 1413 elif self.action == 'finish': 1414 if ((not self.isProgMatch() and not self.progSubtract) or 1415 (self.isProgMatch() and self.progSubtract)): 1416 self.useProgSP = True 1417 elif self.action == 'finishNot': 1418 # Reset 1419 if ((self.isProgMatch() and not self.progSubtract) or 1420 (self.isProgMatch() and self.progSubtract)): 1421 self.useProgSP = True 1422 # 1423 # Source Tables 1424 # 1425 if (self.method == 'sourceTables' and 1426 self.action == 'start'): 1427 # Check to see if the programme has a source table defined 1428 if self.programme.getSourceTable() == dbc.charDefault(): 1429 self.useProgSR = False 1430 elif (self.method == 'sourceTables' and 1431 self.action == 'finish'): 1432 # Reset 1433 self.useProgSR = True 1434 # 1435 # SExtractor values 1436 # 1437 if (self.method == 'sextractorValues' and 1438 self.action == 'start'): 1439 # If not a SExtractor programme no match 1440 if not self.programme.useSExtractor(): 1441 self.useProgSX = False 1442 elif self.addInfo: 1443 # Should be programme removed for some reason 1444 if not self.progSubtract: 1445 raise ProgrammeBuilder.CuError("Schema inconsistency") 1446 # Check to see if programme is mentioned. 1447 if self.isProgMatch(): 1448 self.useProgSX = False 1449 elif (self.method == 'sextractorValues' and 1450 self.action == 'startNot'): 1451 # Check if programme not SExtractor - CASU extractor 1452 if self.programme.useSExtractor(): 1453 self.useProgSX = False 1454 elif self.addInfo: 1455 # Should be programme removed for some reason 1456 if not self.progSubtract: 1457 raise ProgrammeBuilder.CuError("Schema inconsistency") 1458 # Check to see if programme is mentioned. 1459 if self.isProgMatch(): 1460 self.useProgSX = False 1461 1462 elif (self.method == 'sextractorValues' and 1463 (self.action == 'finish' or 1464 self.action == 'finishNot')): 1465 # Reset 1466 self.useProgSX = True 1467 # 1468 # Low density extragalactic 1469 # 1470 if (self.method == 'highGalacticLat' and 1471 self.action == 'start'): 1472 1473 # Starts off true - only set to false 1474 # Set to false if isLowGalacticLat=False 1475 # and if programme not 1476 if self.programme.isLowGalLat(): 1477 self.useProgHL = False 1478 elif self.addInfo: 1479 if not self.progSubtract: 1480 raise ProgrammeBuilder.CuError("Schema inconsistency") 1481 if self.isProgMatch(): 1482 self.useProgHL = False 1483 1484 elif (self.method == 'highGalacticLat' and 1485 self.action == 'startNot'): 1486 1487 if not self.programme.isLowGalLat(): 1488 self.useProgHL = False 1489 1490 elif self.addInfo: 1491 # Should be programme removed for some reason 1492 if not self.progSubtract: 1493 raise ProgrammeBuilder.CuError("Schema inconsistency") 1494 if self.isProgMatch(): 1495 self.useProgHL = False 1496 1497 elif (self.method == 'highGalacticLat' and 1498 (self.action == 'finish' or 1499 self.action == 'finishNot')): 1500 # Reset 1501 self.useProgHL = True 1502 # 1503 # Is synoptic 1504 # 1505 if (self.method == 'synTables' and 1506 self.action == 'start'): 1507 # Check synopticStatus 1508 if self.addInfo[0] == 'a': # all Synoptic surveys 1509 if not self.programme.isDeep(): 1510 self.useProgSY = False 1511 elif self.addInfo[0] == 'c': # Correlated only 1512 if not self.synSetup.bandMergingCrit: 1513 self.useProgSY = False 1514 elif self.addInfo[0] == 'o': # Correlated and single epoch 1515 if not (self.synSetup.bandMergingCrit and self.synSetup.isCorSingleEpoch): 1516 self.useProgSY = False 1517 elif self.addInfo[0] == 'm': # Only if 1518 if not (self.synSetup.isCorrelBMTable()): 1519 self.useProgSY = False 1520 elif self.addInfo[0] == 'n': # Not correlated only 1521 if not (self.synSetup.isNonCorrelBMTable()): 1522 self.useProgSY = False 1523 elif self.addInfo[0] == 'k': # Variability table correlated 1524 if not (self.synSetup.isVarTableCorrel()): 1525 self.useProgSY = False 1526 elif self.addInfo[0] == 'v': # Variability table not correlated 1527 if not (self.programme.isDeep() and not 1528 self.synSetup.isVarTableCorrel()): 1529 self.useProgSY = False 1530 elif (self.method == 'synTables' and 1531 self.action == 'finish'): 1532 # Reset 1533 self.useProgSY = True 1534 # 1535 # Use first programme only - indices file 1536 # 1537 if (self.method == 'onceOnly' and 1538 self.action == 'start'): 1539 if self.addInfo[0] == 'f': 1540 if not self.progIndex == 0: 1541 self.useProgFO = False 1542 elif self.addInfo[0] == 'l': 1543 if not self.progIndex == self.lastIndex: 1544 self.useProgFO = False 1545 elif (self.method == 'onceOnly' and 1546 self.action == 'finish'): 1547 # reset 1548 self.useProgFO = True 1549 # 1550 # Specific External table 1551 # 1552 if (self.method == 'externalTable' and 1553 self.action == 'start'): 1554 if not self.addInfo: 1555 raise ProgrammeBuilder.CuError("Schema inconsistency") 1556 self.checkSpecNeigh = self.addInfo[0] 1557 if (self.method == 'externalTable' and 1558 self.action == 'finish'): 1559 self.checkSpecNeigh = None 1560 # 1561 # Decision 1562 # 1563 self.writeLine = (self.useProgSY and self.useProgHL and 1564 self.useProgSX and self.useProgSP and 1565 self.useProgSR and self.useProgFO) 1566 # 1567 # Set filters 1568 # 1569 if (self.method == 'cycleThroughFilters' and 1570 self.action == 'start'): 1571 self.useFilters = True 1572 self.filters = self.filterGroups[self.addInfo[0]] 1573 elif (self.method == 'cycleThroughFilters' and 1574 self.action == 'finish'): 1575 self.useFilters = False 1576 self.filters = None 1577 # 1578 # Set colours 1579 # 1580 if (self.method == 'cycleThroughColours' and 1581 self.action == 'start'): 1582 self.useColours = True 1583 self.colours = self.colourGroups[self.addInfo[0]][:] 1584 if len(self.addInfo) > 1: 1585 # Add or remove additional terms 1586 self.stripAddCols(','.join(self.addInfo[1:])) 1587 1588 elif (self.method == 'cycleThroughColours' and 1589 self.action == 'finish'): 1590 self.colours = None 1591 self.useColours = False 1592 1593 # 1594 # set neighs 1595 # 1596 if (self.method == 'cycleThroughNeighs' and 1597 self.action == 'start'): 1598 if self.addInfo: 1599 if self.addInfo[0] == 'o': 1600 self.maxIndex = 1 1601 self.useNeighs = True 1602 self.neighs = self.neighTables 1603 elif (self.method == 'cycleThroughNeighs' and 1604 self.action == 'finish'): 1605 self.maxIndex = None 1606 self.neighs = None 1607 self.useNeighs = False 1608 1609 # 1610 # set borders 1611 # 1612 if (self.method == 'cycleThroughBorders' and 1613 self.action == 'start'): 1614 self.useBorders = True 1615 self.borders = self.borderGroups[self.addInfo[0]] 1616 elif (self.method == 'cycleThroughBorders' and 1617 self.action == 'finish'): 1618 self.useBorders = False 1619 self.borders = None 1620 1621 # 1622 # Drop tables 1623 # 1624 if (self.method == 'dropTables' and 1625 self.action == 'start'): 1626 if self.isProgMatch(): 1627 self.dropTables = True 1628 elif (self.method == 'dropTables' and 1629 self.action == 'finish'): 1630 # Reset 1631 if self.isProgMatch(): 1632 self.dropTables = False
1633 1634 #-------------------------------------------------------------------------- 1635
1636 - def isProgMatch(self):
1637 """ 1638 """ 1639 progID = self.programme.getAttr("programmeID") 1640 for info in self.addInfo: 1641 if info in self.progGroups and progID in self.progGroups[info] \ 1642 or info == self.programme.getAcronym(): 1643 return True 1644 1645 return False
1646 1647 #-------------------------------------------------------------------------- 1648
1649 - def parseString(self, line):
1650 """ 1651 """ 1652 self.action = self.actionDict[line[0:2]] 1653 self.method = self.methodDict[line[2]] 1654 self.progSubtract = False 1655 self.addInfo = None 1656 if line[3] == ":": 1657 if line[4] == '-': 1658 self.progSubtract = True 1659 self.addInfo = line[5:].split()[0].split(',') # remove any spaces 1660 else: 1661 self.addInfo = line[4:].split()[0].split(',') # remove any spaces
1662 1663 #-------------------------------------------------------------------------- 1664
1665 - def getAllTables(self, tableDict):
1666 """ 1667 """ 1668 tables = set(sum(tableDict.values(), [])) 1669 return dict([(table, []) for table in tables])
1670 1671 #-------------------------------------------------------------------------- 1672
1673 - def initialise(self, dictType, tableStruct):
1674 """Initialise dictionary of filter or colour info 1675 """ 1676 dictData = dict() 1677 if dictType == 'filter': 1678 filterList = self.filters 1679 for table in tableStruct: 1680 dictFilt = {} 1681 for shtName in filterList: 1682 dictFilt[shtName] = list() 1683 dictData[table] = dictFilt 1684 elif dictType == 'colour': 1685 filterList = ['%sm%s' % (c1, c2) for c1, c2 in self.colours] 1686 for table in tableStruct: 1687 dictFilt = {} 1688 for shtName in filterList: 1689 dictFilt[shtName] = list() 1690 dictData[table] = dictFilt 1691 elif dictType == 'neigh': 1692 # different list for each table 1693 for table in tableStruct: 1694 dictFilt = {} 1695 for nTable in self.neighs[table]['neightable']: 1696 dictFilt[nTable] = list() 1697 dictData[table] = dictFilt 1698 filterList = self.neighs['All']['neightable'] 1699 elif dictType == 'border': 1700 filterList = self.borders 1701 for table in tableStruct: 1702 dictFilt = {} 1703 for shtName in filterList: 1704 dictFilt[shtName] = list() 1705 dictData[table] = dictFilt 1706 return dictData, filterList
1707 1708 #-------------------------------------------------------------------------- 1709
1710 - def stripAddCols(self, addColInfo):
1711 """ 1712 Parses a string with additional colour info and edits colour group 1713 accordingly. 1714 1715 """ 1716 # parse string 1717 addColsInd = addColInfo.find('+') 1718 subColsInd = addColInfo.find('-') 1719 if addColsInd < subColsInd: 1720 parts = addColInfo.split('-') 1721 if addColsInd >= 0: 1722 newCols = parts[0] 1723 remCols = parts[1] 1724 else: 1725 newCols = None 1726 remCols = parts[1] 1727 else: 1728 parts = addColInfo.split('+') 1729 if subColsInd >= 0: 1730 remCols = parts[0] 1731 newCols = parts[1] 1732 else: 1733 remCols = None 1734 newCols = parts[1] 1735 1736 for ii, subString in enumerate([newCols, remCols]): 1737 if subString: 1738 colList = subString.split(',') 1739 1740 # Make sure new colours match possible filters 1741 colList = self.testColours(colList) 1742 1743 # Add or subtract colours from/to self.colours 1744 if ii == 0: 1745 self.colours.extend(tuple(col.split('m')) for col in colList) 1746 else: 1747 oldCols = ['m'.join(cl) for cl in self.colours] 1748 [oldCols.remove(col) for col in colList 1749 if col in oldCols] 1750 self.colours = [tuple(col.split('m')) for col in oldCols]
1751 1752 #-------------------------------------------------------------------------- 1753
1754 - def testColours(self, colList):
1755 """ 1756 Tests whether filters that make up colours are available for this 1757 programme. 1758 1759 """ 1760 finalColList = [] 1761 for col in colList: 1762 filComb = col.split('m') 1763 if (filComb[0] in self.filterGroups['a'] and 1764 filComb[1] in self.filterGroups['a']): 1765 finalColList.append(col) 1766 1767 return finalColList
1768 1769 #------------------------------------------------------------------------------ 1770
1771 -class Registration(object):
1772 """ Programme requirements from registration file. 1773 """ 1774 comments = '' #: Extra comments on the programme. 1775 description = dbc.charDefault() #: Project description. 1776 email = '' #: PI's e-mail address. 1777 expectedFilters = None #: List of filters required by PI. 1778 password = '' #: PI password. 1779 piName = '' #: Name of the PI. 1780 1781 #-------------------------------------------------------------------------- 1782
1783 - def __init__(self, progTable):
1784 """ 1785 Parses registration files, if not already done so, and sets registered 1786 requirements for the given programme. 1787 1788 @param progTable: Programme table for current database with the 1789 current row set to that of the desired programme. 1790 @type progTable: df.ProgrammeTable 1791 1792 """ 1793 self.expectedFilters = [] 1794 if not progTable.isNonSurvey(): 1795 # No registration file if not a non-survey, so use hard wired value 1796 self.description = progTable.getAttr("description") 1797 return 1798 1799 programme = progTable.getAttr("dfsIdString") 1800 programmeName = progTable.getAcronym().upper() 1801 sysc = progTable._db.sysc 1802 1803 foundReg = False 1804 for fileName in os.listdir(sysc.nonSurveyRegPath()): 1805 if '.reg' in fileName: 1806 for line in utils.ParsedFile(sysc.nonSurveyRegPath(fileName)): 1807 if not foundReg \ 1808 and line.startswith("programme=" + programme): 1809 1810 foundReg = True 1811 1812 elif not foundReg and line.startswith("programmeName="): 1813 if line.endswith(programmeName): 1814 foundReg = True 1815 else: 1816 break 1817 1818 elif foundReg and line.startswith("title="): 1819 self.description = line.replace("title=", "") 1820 1821 elif foundReg and line.startswith("firstName="): 1822 self.piName = line.replace("firstName=", "") 1823 1824 elif foundReg and line.startswith("lastName="): 1825 self.piName += line.replace("lastName=", " ") 1826 1827 elif foundReg and line.startswith("email="): 1828 self.email = line.replace("email=", "") 1829 1830 elif foundReg and line.startswith("password="): 1831 self.password = line.replace("password=", "") 1832 1833 elif foundReg and "num=" in line: 1834 self.expectedFilters.append(line.split("num=")) 1835 1836 elif foundReg and line.startswith("comments="): 1837 self.comments = line.replace("comments=", "") 1838 1839 if foundReg: 1840 break
1841 1842 #------------------------------------------------------------------------------ 1843
1844 -class CurrentSetup(object):
1845 """ Current requirements in archive. 1846 """ 1847 currentFiltData = None #: List of ? 1848 currentStackPosData = None #: List of ? 1849 currentTilePosData = None #: List of ? 1850 isSurveyDeep = False #: Survey contains repeated observations? 1851 nCurrentStackProducts = 0 #: Number of required products. 1852 nCurrentTileProducts = 0 #: Number of required products. 1853 productData = None #: Dict(List of required stacks / tiles). 1854 1855 stackAttrs = "productID, fieldID, filterID, nuStep, ra, dec, stackRadius" 1856 1857 tileAttrs = "productID, fieldID, filterID, nuStep, ra, dec, stackRadius, "\ 1858 "posAngle" 1859 mosaicAttrs = "productID, fieldID, filterID, nuStep, pixelSize, ra, dec, " \ 1860 "raExtent, decExtent, posAngle" 1861 1862 #-------------------------------------------------------------------------- 1863
1864 - def __init__(self, archive, programmeID):
1865 """ 1866 """ 1867 1868 self.productData = {} 1869 if archive.sysc.isVSA(): 1870 self.stackAttrs += ", offsetPos, posAngle" 1871 if archive.sysc.isOSA(): 1872 self.stackAttrs = "productID, fieldID, filterID, ra, dec, " \ 1873 "stackRadius, posAngle" 1874 currentReqStacks = archive.query( 1875 selectStr=self.stackAttrs, 1876 fromStr="RequiredStack", 1877 whereStr="programmeID=%d" % programmeID) 1878 1879 self.nCurrentStackProducts = len(currentReqStacks) 1880 1881 if archive.sysc.isVSA(): 1882 currentReqTiles = archive.query( 1883 selectStr=self.tileAttrs, 1884 fromStr="RequiredTile", 1885 whereStr="programmeID=%d" % programmeID) 1886 1887 self.nCurrentTileProducts = len(currentReqTiles) 1888 1889 currentStkPointings = [(point.fieldID, math.radians(point.ra), 1890 math.radians(point.dec), point.stackRadius, point.offsetPos, 1891 point.posAngle) for point in currentReqStacks] 1892 1893 # Add in 0 for offsetPos to use same code for grouping stacks 1894 # and tiles 1895 currentTilePointings = [(point.fieldID, math.radians(point.ra), 1896 math.radians(point.dec), point.stackRadius, 0, point.posAngle) 1897 for point in currentReqTiles] 1898 1899 self.currentTilePosData = sorted(set(currentTilePointings)) 1900 self.productData["tile"] = currentReqTiles 1901 elif archive.sysc.isOSA(): 1902 currentStkPointings = [(point.fieldID, math.radians(point.ra), 1903 math.radians(point.dec), point.stackRadius, 1904 point.posAngle) for point in currentReqStacks] 1905 else: 1906 currentStkPointings = [(point.fieldID, math.radians(point.ra), 1907 math.radians(point.dec), point.stackRadius) 1908 for point in currentReqStacks] 1909 1910 self.currentStackPosData = sorted(set(currentStkPointings)) 1911 self.productData["stack"] = currentReqStacks 1912 1913 # Find first product ID of each pointing: 1914 currentRequiredFilters = archive.query( 1915 selectStr="filterID, nPass, isSynoptic", 1916 fromStr="RequiredFilters", 1917 whereStr="programmeID=%d" % programmeID) 1918 1919 # Now match microstep to filterID 1920 self.currentFiltData = [] 1921 self.isSurveyDeep = False 1922 for filterID, nPass, isSynop in currentRequiredFilters: 1923 if isSynop == 1: 1924 self.isSurveyDeep = True 1925 nusList = ([point.nuStep for point in currentReqStacks 1926 if filterID == point.filterID] 1927 if archive.sysc.hasMicroStep else []) 1928 1929 if len(nusList) > 0: 1930 typNustep = int(stats.clippedMADHistogram(nusList, 1931 retValues="median")[0]) 1932 else: 1933 typNustep = dbc.intDefault() 1934 # @TODO: is it simpler just to add in a default value? 1935 if archive.sysc.hasMicroStep: 1936 self.currentFiltData.append((filterID, typNustep, nPass, 1937 isSynop)) 1938 else: 1939 self.currentFiltData.append((filterID, nPass, isSynop))
1940 #------------------------------------------------------------------------------ 1941
1942 -class BestSetup(object):
1943 """ Best new setup from stack data. 1944 """ 1945 isSurveyDeep = False #: Survey contains repeated observations? 1946 isTile = False #: ? 1947 newFiltData = None #: List of ? 1948 newPosData = None #: List of ? 1949 posAngleTol = None #: ? 1950 productData = None #: ? 1951 productLinks = None #: ? 1952 productType = "stack" #: Product type, e.g. stack, tile etc. 1953 progID = None #: Unique ID of programme being set up. 1954 sysc = SystemConstants() #: Initialised system constants object. 1955 displayDeep = False 1956 #-------------------------------------------------------------------------- 1957
1958 - def __init__(self, progTable, recreate, dateRange, displayDeeps):
1959 """ 1960 """ 1961 self.productLinks = [] 1962 1963 # @@TODO if VISTA rotates by 180 degrees will stacking be a mess? 1964 # limit stacks by rotation angle too 1965 # then mosaic tiles at different rotation angles together. 1966 # Add in later. 1967 archive = progTable._db 1968 self.productData = defaultdict(list) 1969 self.sysc = archive.sysc 1970 self.displayDeep = displayDeeps 1971 self.progID = progTable.getAttr("programmeID") 1972 self.posAngTol = progTable.getAttr("posAngleTolerance") 1973 # @todo: Keep RequiredFilters for VISTA PSs 1974 currentSetup = CurrentSetup(archive, self.progID) 1975 Stack = namedtuple("Stack", currentSetup.stackAttrs) 1976 Tile = namedtuple("Tile", currentSetup.tileAttrs) 1977 Mosaic = namedtuple("Mosaic", currentSetup.mosaicAttrs) 1978 1979 stkAttr = "ra dec" 1980 posAttr = "fieldID ra dec gSize" 1981 if self.sysc.hasOffsetPos: 1982 posAttr += " off" 1983 stkAttr += " off" 1984 if self.sysc.hasVarOrient: 1985 posAttr += " pa" 1986 stkAttr += " pa" 1987 self.Position = namedtuple("Position", posAttr) 1988 ActStack = namedtuple("ActStack", stkAttr) 1989 1990 # First check to see in there are any external products 1991 prevReleaseNum = archive.queryAttrMax("releaseNum", 1992 table="ProgrammeFrame", 1993 where="programmeID=%s" % self.progID) or 0 1994 externalProd = archive.query("*", "ExternalProduct", 1995 "programmeID=%s and releaseNum>%s and productType in " 1996 "('mosaic')" % (self.progID, prevReleaseNum), firstOnly=True, 1997 default=None) 1998 if externalProd: 1999 extpd = ExternalProduct(archive, externalProd, progTable, dateRange) 2000 self.productData[externalProd.productType] = extpd.getProductList( 2001 {'mosaic': Mosaic, 2002 'stack': Stack, 'tile': Tile}[externalProd.productType]) 2003 2004 self.newPosData = extpd.getPosData() 2005 self.isLowGalLat = \ 2006 self.checkGalacticStatus(finalStacks=[], progTable=progTable) 2007 2008 self.isSurveyDeep = extpd.prepareProvList() 2009 self.newFiltData = extpd.getFiltData() 2010 self.productLinks = extpd.getProductLinks() 2011 extpd.renameFiles() 2012 2013 return 2014 2015 # @TODO: NonSurveys - initial setup 2016 if progTable.isNonSurvey(): 2017 # Check what productType 2018 # @TODO: what about mosaic surveys? 2019 if not self.sysc.hasOffsetPos: 2020 archive.update("Programme", 2021 entryList=[("sourceProdType", "'stack'"), 2022 ("epochFrameType", "'stack'")], 2023 where="programmeID=%s" % self.progID) 2024 if self.sysc.hasOffsetPos: 2025 prodType = ('tile' if archive.queryEntriesExist( 2026 Join(["ProgrammeFrame", "Multiframe"], ["multiframeID"]), 2027 where="programmeID=%s AND frameType like 'tile%%stack'" 2028 % self.progID) else 'stack') 2029 archive.update("Programme", 2030 entryList=[("sourceProdType", "'%s'" % prodType), 2031 ("epochFrameType", "'%s'" % prodType), 2032 ("posAngleTolerance", 2033 self.sysc.defaultPosAngleTolerance)], 2034 where="programmeID=%s" % self.progID) 2035 # Update progTable 2036 progTable = df.ProgrammeTable(archive) 2037 progTable.setCurRow(programmeID=self.progID) 2038 self.posAngTol = progTable.getAttr("posAngleTolerance") 2039 2040 # @todo 2nd time round - fit multiframes into existing products. 2041 # then make new products of anything left over. 2042 2043 dateRange = "utDate BETWEEN '%s' AND '%s'" % dateRange 2044 # Now find current archive setup 2045 # Case 2046 2047 frameTypeSel = queries.getFrameSelection('stack', noDeeps=True) 2048 nustepStr = ', nustep' if self.sysc.hasMicroStep else '' 2049 selectStr = ("m.multiframeID, (15.*rabase) as raBase, decBase, m.filterID%s, " 2050 % nustepStr 2051 + ("offsetX, offsetY, " if self.sysc.hasOffsetPos else "") 2052 + "AVG(totalExpTime) AS totalExpTime" 2053 + (", MIN(posAngle) AS posAngle" if self.sysc.hasVarOrient else 2054 "")) 2055 2056 fromStr = ("Multiframe as m,ProgrammeFrame as p,MultiframeDetector as d," 2057 "CurrentAstrometry as c") 2058 2059 whereStr = ("m.multiframeID=p.multiframeID and m.multiframeID=" 2060 "d.multiframeID and d.multiframeID=c.multiframeID and " 2061 "c.extNum=d.extNum AND %s" 2062 " AND programmeID=%s AND m.%s AND posAngle>=-180" 2063 " AND %s" % (frameTypeSel, self.progID, 2064 DepCodes.selectNonDeprecated, dateRange)) 2065 groupBy = ("m.multiframeID, raBase, decBase, " 2066 "m.filterID%s" % nustepStr 2067 + (",offsetX, offsetY" if self.sysc.hasOffsetPos else "") 2068 + ",telescope, instrument") 2069 orderBy = "decBase, m.filterID%s" % nustepStr 2070 2071 stacks = archive.query(selectStr, fromStr, whereStr, groupBy, orderBy) 2072 2073 if self.sysc.hasOffsetPos and progTable.getAttr('sourceProdType') == 'tile': 2074 mfIDSel = \ 2075 SelectSQL("m.multiframeID", fromStr, whereStr, groupBy=groupBy) 2076 2077 frameTypeSel = queries.getFrameSelection('tile', noDeeps=True) 2078 2079 # Make sure all mfIDs come from a good tile. 2080 goodTileSel = SelectSQL( 2081 select="p.combiframeID", 2082 table="(%s) AS t, Provenance AS p" % SelectSQL( 2083 select="pv.combiframeID, nOffsets", 2084 table="Provenance AS pv, Multiframe AS m", 2085 where="m.multiframeID=pv.combiframeID AND %s AND %s" 2086 " AND pv.multiframeID IN (%s)" % (frameTypeSel, 2087 DepCodes.selectNonDeprecated, mfIDSel)), 2088 where="t.combiframeID=p.combiframeID AND p.multiframeID IN (%s)" 2089 % mfIDSel, 2090 groupBy="p.combiframeID, t.nOffSets" 2091 " HAVING COUNT(distinct p.multiframeID)=t.nOffsets") 2092 2093 # Make sure each of these tiles has right no of good mfs. 2094 goodMfIDs = archive.query("multiframeID", "Provenance", 2095 whereStr="combiframeID IN (%s)" % goodTileSel) 2096 2097 stacks = [stack for stack in stacks 2098 if stack.multiframeID in goodMfIDs] 2099 2100 if not stacks: 2101 raise ProgrammeBuilder.CuError( 2102 "Programme %s has no non-deprecated catalogue data assigned in " 2103 "%s.ProgrammeFrame" % (progTable.getAcronym(), archive.database)) 2104 if self.sysc.hasOffsetPos and self.sysc.hasVarOrient: 2105 offsets = [astro.getOffSetPos(stack.offsetX, stack.offsetY) 2106 for stack in stacks] 2107 2108 possStacks = [ActStack(math.radians(stack.raBase), 2109 math.radians(stack.decBase), offset, stack.posAngle) 2110 for stack, offset in zip(stacks, offsets) 2111 if offset >= 0] 2112 elif self.sysc.hasOffsetPos: 2113 offsets = [astro.getOffSetPos(stack.offsetX, stack.offsetY) 2114 for stack in stacks] 2115 2116 possStacks = [ActStack(math.radians(stack.raBase), 2117 math.radians(stack.decBase), offset) 2118 for stack, offset in zip(stacks, offsets) 2119 if offset >= 0] 2120 elif self.sysc.hasVarOrient: 2121 possStacks = [ActStack(math.radians(stack.raBase), 2122 math.radians(stack.decBase), stack.posAngle) 2123 for stack in stacks] 2124 else: 2125 possStacks = [ActStack(math.radians(stack.raBase), 2126 math.radians(stack.decBase)) for stack in stacks] 2127 # group by ra,dec 2128 finalStacks = self.groupbyPosition(possStacks, 2129 self.sysc.maxRaDecExtentStackDeg, currentSetup.currentStackPosData, 2130 recreate) 2131 2132 self.isLowGalLat = self.checkGalacticStatus(finalStacks, progTable) 2133 if self.sysc.hasOffsetPos and self.sysc.hasVarOrient: 2134 self.productType = "tile" 2135 2136 finalTiles = self.findTiles(finalStacks, 2137 self.sysc.maxRaDecExtentStackDeg, recreate, 2138 currentSetup.currentTilePosData) 2139 2140 finalStacks = [self.Position(pID, math.degrees(ra), math.degrees(dec), gSize, 2141 ofP, posAng) 2142 for pID, ra, dec, gSize, ofP, posAng in finalStacks] 2143 2144 finalTiles = [(pID, math.degrees(ra), math.degrees(dec), gSize, 2145 posAng) 2146 for pID, ra, dec, gSize, ofP, posAng in finalTiles] 2147 2148 elif self.sysc.hasOffsetPos: 2149 self.productType = "tile" 2150 2151 finalTiles = self.findTiles(finalStacks, 2152 self.sysc.maxRaDecExtentStackDeg, recreate, 2153 currentSetup.currentTilePosData) 2154 2155 finalStacks = [self.Position(pID, math.degrees(ra), math.degrees(dec), gSize, 2156 ofP) 2157 for pID, ra, dec, gSize, ofP in finalStacks] 2158 2159 finalTiles = [(pID, math.degrees(ra), math.degrees(dec), gSize) 2160 for pID, ra, dec, gSize, ofP in finalTiles] 2161 elif self.sysc.hasVarOrient: 2162 2163 finalStacks = [self.Position(pID, math.degrees(ra), math.degrees(dec), gSize, 2164 posAng) 2165 for pID, ra, dec, gSize, posAng in finalStacks] 2166 2167 2168 else: 2169 finalStacks = [self.Position(pID, math.degrees(ra), math.degrees(dec), gSize) 2170 for pID, ra, dec, gSize in finalStacks] 2171 2172 Logger.addMessage("Found all product positions") 2173 2174 # newFiltData 2175 allFilters