Package wsatools :: Module ProgrammeBuilder
[hide private]

Source Code for Module wsatools.ProgrammeBuilder

   1  #------------------------------------------------------------------------------ 
   2  # $Id: ProgrammeBuilder.py 10248 2014-03-17 15:55:01Z RossCollins $ 
   3  """ 
   4     Automatic programme setup tools to determine the correct curation settings. 
   5     Updates the curation tables in the database and creates the necessary 
   6     schema files. 
   7   
   8     @author: N.J.G. Cross 
   9     @org:    WFAU, IfA, University of Edinburgh 
  10   
  11     @newfield contributors: Contributors, Contributors (Alphabetical Order) 
  12     @contributors: R.S. Collins 
  13   
  14     @todo: Make sure new pointingString and old pointingString refer to same 
  15            positions 
  16     @todo: Need to replace some hard wired constants with database constants 
  17            parsed from WSA_InitiateArchive.sql. Therefore, may need to add more 
  18            constants to that SQL script. 
  19  """ 
  20  #------------------------------------------------------------------------------ 
  21  from __future__      import division, print_function 
  22  from future_builtins import zip 
  23   
  24  from   bisect      import bisect 
  25  from   collections import defaultdict, namedtuple 
  26  import math 
  27  import numpy 
  28  from   operator    import itemgetter 
  29  import os 
  30   
  31  import wsatools.Astrometry                   as astro 
  32  import wsatools.Banner                       as banner 
  33  import wsatools.DataFactory                  as df 
  34  import wsatools.DbConnect.CommonQueries      as queries 
  35  from   wsatools.DbConnect.CuSession      import CuSession 
  36  import wsatools.DbConnect.DbConstants        as dbc 
  37  from   wsatools.DbConnect.DbSession      import Join, SelectSQL 
  38  import wsatools.DbConnect.Schema             as schema 
  39  import wsatools.ExternalProcess              as extp 
  40  from   wsatools.ExternalProductConverter import ExternalProduct 
  41  from   wsatools.Logger                   import Logger 
  42  from   wsatools.ObsCalendar              import DateRange 
  43  import wsatools.Statistics                   as stats 
  44  from   wsatools.SystemConstants          import DepCodes, SystemConstants 
  45  import wsatools.Utilities                    as utils 
  46  #------------------------------------------------------------------------------ 
  47   
48 -class ProgrammeBuilder(CuSession):
49 """ 50 Automatically determines the correct curation settings for a given 51 programme. Updates the curation tables in the database and creates the 52 necessary schema files. 53 54 """ 55 #-------------------------------------------------------------------------- 56 # Private class parameters for this procedure - should not be altered 57 58 _autoCommit = True # Overrides CuSession default 59 cuNum = 21 # Overrides CuSession default 60 61 #-------------------------------------------------------------------------- 62 # Define public member variable default values (access as obj.varName) 63 # these need to be set from command-line options 64 65 bandMergeCrit = None #: Band merging criterion. 66 createSchemaOnly = False #: Just update schema with latest template? 67 dateRange = CuSession.sysc.obsCal.dateRange() #: Range of nights to include. 68 maxEllipticity = 1.0 #: Maximum stellar ellipticity for deep stacks. 69 maxSeeing = 2.0 #: Maximum seeing for deep stacks. 70 newPoints = False #: Only modify RequiredStack with new pointings? 71 numberStks = dbc.intDefault() #: Max number of components of a deep stack. 72 redoSchema = False #: Recreate existing database schema? 73 redoSetup = True #: Recreate all RequiredStack pointings? 74 displayDeeps = False #: Output data for deep fields 75 76 #-------------------------------------------------------------------------- 77
78 - def _onRun(self):
79 """ Set up programme database settings and/or schema. 80 """ 81 if self.createSchemaOnly or self.redoSchema: 82 # No programme set up required, just re-parse schema, optionally 83 # updating existing schema in the database. 84 self.createSchema(syncToDb=self.redoSchema) 85 if self.redoSchema: 86 commitSchema(self.archive, "updated", 87 isNonSurvey=self.onlyNonSurveys) 88 if not self.onlyNonSurveys and not self.onlySurveys: 89 # Commit any left over non-survey schema files 90 commitSchema(self.archive, "updated") 91 92 elif not self.archive.isTrialRun: 93 raise ProgrammeBuilder.CuError("No database update requested.") 94 95 elif not self.programmeID: 96 # Find and set up new progs 97 self.setUpNewProgs() 98 if self.programmeID: # i.e. there are new progs. 99 self.createSchema() 100 self.createTables() 101 commitSchema(self.archive, "new") 102 103 elif self.setUpProgramme(): 104 # Recreate schema 105 self.createSchema(syncToDb=True) 106 if not self.programme.isNonSurvey(): 107 # Non-survey schema is committed by NonSurveyRelease 108 commitSchema(self.archive, "updated", isNonSurvey=False) 109 self.initialiseNewRelease()
110 111 #-------------------------------------------------------------------------- 112
113 - def createSchema(self, syncToDb=False):
114 """ 115 Update schema files for current non-survey programme using the latest 116 version of sql/Templates/*_autoTemplateSchema.sql and the latest 117 curation settings in the database for this programme. 118 119 @param syncToDb: If True, update existing schema in database with new 120 schema, for all post-Detection tables. 121 @type syncToDb: bool 122 123 @todo: Combine surveyparser.py functionality into this function, which 124 will allow surveyparser.py to be just an interface to this class 125 and improve consistency between survey and non-survey schema. 126 """ 127 try: 128 progIDs = list(self.programmeID if self.programmeID else 129 self.programme.getProgIDList(onlyNonSurvey=self.onlyNonSurveys, 130 onlySurvey=self.onlySurveys)) 131 except TypeError: 132 progIDs = [self.programmeID] 133 134 if any(progID in self.sysc.manualProgs for progID in progIDs): 135 raise ProgrammeBuilder.CuError("Can only create schema for " 136 "automatically set up programmes") 137 138 for progID in sorted(progIDs): 139 self.programme.setCurRow(programmeID=progID) 140 Logger.addMessage("Creating schema for programme " + 141 self.programme.getAcronym().upper()) 142 143 if syncToDb and self.programme.isNonSurvey(): 144 # Drop existing post-Detection tables before changing schema. 145 # NOTE: Cannot just overwrite when creating if schema has 146 # changed such that e.g. a BestMatch table is replaced by a 147 # SynopticBestMatch table. 148 progSchema = \ 149 schema.parseTables(self.programme.getSchemaScript()) 150 151 for table in reversed(progSchema): 152 if not table.name.endswith(("Raw", "Astrometry", 153 "Photometry", "Detection", 154 "TileSet", "TilePawPrints")): 155 156 Logger.addMessage("Dropping table %s" % table) 157 self.archive.dropTable(table, progSchema) 158 159 schemaPath = self.sysc.autoTemplateSchema 160 if self.programme.isSatelliteProg(): 161 schemaPath = \ 162 self.sysc.sqlTemplatePath("WSA_SatelliteTemplateSchema.sql") 163 164 schemaFiles = [(schemaPath, os.path.join(self.sysc.sqlScriptPath, 165 self.programme.getSchemaScript()))] 166 167 if self.programme.getAttr("neighboursSchema") \ 168 not in (dbc.charDefault(), "WSA_NeighboursSchema.sql"): 169 schemaFiles.append( 170 (self.sysc.autoTemplateNeighboursSchema, 171 os.path.join(self.sysc.sqlScriptPath, 172 self.programme.getAttr("neighboursSchema")))) 173 174 # Update the schema definition files 175 isUpdatedForTable = self.createProgSchema(schemaFiles, progIDs) 176 177 # Recreate all post-Detection tables from updated schema file 178 if syncToDb: 179 progSchema = \ 180 schema.parseTables(self.programme.getSchemaScript()) 181 182 Logger.addMessage("Synchronising database to latest schema...") 183 for table in progSchema: 184 if self.archive.createTable(table, progSchema, 185 overWrite=isUpdatedForTable.get(table.name)): 186 Logger.addMessage("Created table %s" % table) 187 188 # @TODO: ViewSchema like IndexSchema 189 indexFiles = [] 190 viewFiles = [] 191 if self.onlyNonSurveys or not self.onlySurveys: 192 indexFiles.append((self.sysc.nsIndexScript, 193 self.programme.getProgIDList(onlyNonSurvey=True))) 194 195 if not self.sysc.isWSA() \ 196 and (self.onlySurveys or not self.onlyNonSurveys): 197 198 indexFiles.append((self.sysc.indexScript, 199 self.programme.getProgIDList(onlySurvey=True))) 200 201 viewFiles.append((self.sysc.surveyViewScript, 202 self.programme.getProgIDList(onlySurvey=True))) 203 204 try: 205 for schemaFileName, progIDs in indexFiles: 206 self.createIndexSchema(schemaFileName, progIDs) 207 208 for schemaFileName, progIDs in viewFiles: 209 self.createIndexSchema(schemaFileName, progIDs, isView=True) 210 211 finally: 212 # Reset programme to the current programme 213 if not isinstance(self.programmeID, set): 214 self.programme.setCurRow(programmeID=self.programmeID)
215 216 #-------------------------------------------------------------------------- 217
218 - def createIndexSchema(self, schemaFileName, progIDs, isView=False):
219 """ 220 The index schema script is recreated from scratch each time for all 221 programmes that have detection tables. 222 223 @param schemaFileName: Schema file to update. 224 @type schemaFileName: str 225 @param progIDs: List of programme IDs for this file. 226 @type progIDs: list(int) 227 @param isView: If True, template is to be used to create a view. 228 @type isView: bool 229 230 """ 231 Logger.addMessage("Updating %s..." % schemaFileName) 232 schemaPath = os.path.join(self.sysc.sqlScriptPath, schemaFileName) 233 schemaFiles = [(self.sysc.autoTemplateViewSchema if isView else 234 self.sysc.autoTemplateIndexSchema, schemaPath)] 235 236 file(schemaPath, 'w').write('') # initialise 237 238 for progID in progIDs: 239 self.programme.setCurRow(programmeID=progID) 240 self.createProgSchema(schemaFiles, progIDs)
241 242 #-------------------------------------------------------------------------- 243
244 - def createProgSchema(self, schemaFiles, progIDs):
245 """ 246 Parses *autoTemplate.sql to produce the custom schema for the current 247 programme. 248 249 @param schemaFiles: List of schema template and programme files. 250 @type schemaFiles: list(tuple(str, str)) 251 @param progIDs: List of programme IDs currently being processed. 252 @type progIDs: list(int) 253 254 @returns: Dictionary indicating the updated status of each table. 255 @rtype: dict(str: bool) 256 257 """ 258 # Use programmeInfo to drive curation 259 # isDeep, isLowLat, useSEx, isCorr, isSurvey, isNs, isEarly 260 # Detection tables - use **flags to create all 4 schemas 261 # Source tables - use **flags to create mergeSource and Source 262 # @TODO: Get wavelength info - if NB filter, do colours compared to 263 # nearest BB filter 264 fullName = self.programme.getName() 265 if self.programme.isNonSurvey(): 266 fullName = fullName[0].lower() + fullName[1:] 267 268 stdSubs = [("&template&", self.programme.getAcronym()), 269 ("&TEMPLATE&", self.programme.getAcronym().upper()), 270 ("&templatelong&", fullName), 271 ("@dataBase", self.archive.database if self.archive.database != 'VSAVVV' else 'VSA')] 272 bandUcdDict = {'z':'em.opt.I', 273 'y':'em.IR.NIR', 274 'j':'em.IR.J', 275 'h':'em.IR.H', 276 'ks':'em.IR.K'} 277 statusForTable = {} 278 synSetup = SynopticSetup(self.programme, self.dateRange) 279 for templatePath, schemaPath in schemaFiles: 280 status = Status(self, progIDs, synSetup) 281 if self.programme.getAttr("neighboursSchema") in schemaPath: 282 status.setNeighbours() 283 if not status.neighTables: 284 Logger.addMessage("No tables in RequiredNeighbours") 285 continue 286 287 if not any(indexScript in schemaPath for indexScript 288 in (self.sysc.indexScript, self.sysc.nsIndexScript, 289 self.sysc.surveyViewScript)): 290 291 Logger.addMessage( 292 "Creating script %s" % os.path.basename(schemaPath)) 293 294 file(schemaPath, 'w').write('') # initialise 295 296 # @TODO: Add views in for VSA 297 templateScript = file(templatePath).readlines() 298 299 # Set split tables 300 lineNum = 0 301 dataLines = {'main':[]} 302 tables = [('main')] 303 dictFiltData = None 304 dictColData = None 305 dictNeighData = None 306 dictBorData = None 307 tableOrder = [('main', True)] 308 while lineNum < len(templateScript): 309 line = templateScript[lineNum] 310 if line.startswith(("+", "=")): 311 # attributes after this 312 oldDropStatus = status.dropTables 313 status.update(line) 314 315 # Compare splitTables to dataLines 316 if status.splitTables: 317 if any(table not in dataLines for table 318 in status.getAllTables(status.splitTables)): 319 320 for key, _cre in tableOrder: 321 statusForTable.update( 322 extractTables(dataLines[key], status)) 323 324 # write everything so far 325 file(schemaPath, 'a').writelines(dataLines[key]) 326 327 dataLines = status.getAllTables(status.splitTables) 328 tableOrder = status.tableOrder 329 330 if not status.splitTables: 331 if 'main' not in dataLines: 332 for key, _cre in tableOrder: 333 statusForTable.update( 334 extractTables(dataLines[key], status)) 335 336 # write everything so far 337 file(schemaPath, 'a').writelines(dataLines[key]) 338 339 dataLines = {'main':[]} 340 tableOrder = [('main', True)] 341 342 if status.dropTables != oldDropStatus: 343 dlDict = {} 344 curDropStatus = status.dropTables 345 status.dropTables = oldDropStatus 346 for key, _cre in tableOrder: 347 statusForTable.update( 348 extractTables(dataLines[key], status)) 349 # write everything so far 350 file(schemaPath, 'a').writelines(dataLines[key]) 351 dlDict[key] = [] 352 dataLines = dlDict 353 status.dropTables = curDropStatus 354 355 if status.useFilters and status.writeLine: 356 if not dictFiltData: 357 dictFiltData, dictOrder = \ 358 status.initialise('filter', dataLines) 359 360 if dictFiltData and not status.useFilters: 361 for key in dataLines: 362 filtDataList = [] 363 for filtName in dictOrder: 364 filtDataList.extend(dictFiltData[key][filtName]) 365 dataLines[key].extend(filtDataList) 366 dictFiltData = None 367 368 if status.useColours and status.writeLine: 369 if not dictColData: 370 dictColData, dictOrder = \ 371 status.initialise('colour', dataLines) 372 373 if dictColData and not status.useColours: 374 for key in dataLines: 375 colDataList = [] 376 for colName in dictOrder: 377 colDataList.extend(dictColData[key][colName]) 378 379 dataLines[key] += colDataList 380 381 dictColData = None 382 383 if status.useBorders and status.writeLine: 384 if not dictBorData: 385 dictBorData, dictOrder = \ 386 status.initialise('border', dataLines) 387 388 if dictBorData and not status.useBorders: 389 for key in dataLines: 390 borDataList = [] 391 for borName in dictOrder: 392 borDataList.extend(dictBorData[key][borName]) 393 394 dataLines[key] += borDataList 395 396 dictBorData = None 397 398 if status.useNeighs and status.writeLine: 399 # If dict is not not empty 400 if not dictNeighData: 401 dictNeighData, dictOrder = status.initialise( 402 'neigh', dataLines) 403 404 if dictNeighData and not status.useNeighs: 405 for key in dataLines: 406 neighDataList = [] 407 for neighName in dictOrder: 408 if neighName in dictNeighData[key]: 409 neighDataList.extend( 410 dictNeighData[key][neighName]) 411 412 dataLines[key] += neighDataList 413 414 dictNeighData = None 415 416 # skip 417 lineNum += 1 418 continue 419 420 if not line.startswith("*"): 421 tables = ['main'] 422 else: 423 tables = status.splitTables.get(line[2]) 424 if tables is not None: 425 line = line[4:] 426 else: 427 Logger.addMessage("<Info> Table set %s does not have " 428 "option %s available for programme %s" 429 % (status.splitTables, line[2], 430 self.programme.getAcronym().upper()), 431 alwaysLog=False) 432 433 lineNum += 1 434 continue 435 436 if status.writeLine: 437 # Now replace any strings and repeat any filters 438 # constant strings first 439 line = utils.multiSub(line, stdSubs) 440 441 # Replace filterlist with correct list 442 # @@TODO Replace with new method 443 if '&filterlist' in line: 444 listType = line.split('&filterlist:')[1].split('&')[0] 445 filterList = status.filterGroups[listType] 446 line = line.replace('&filterlist:%s&' % listType, 447 ', '.join(filterList).title()) 448 if '$[' in line: 449 line = status.complexSub(line) 450 451 # if in filter or colour mode, cycle through 452 if status.useFilters: 453 # produce the right number of lines 454 for ii, shtName in enumerate(status.filters): 455 subs = [("&A&", shtName.upper()), 456 ("&a&", shtName.lower()), 457 ("&As&", shtName.title()), 458 ("&n&", 'n' if ii > 0 else ''), 459 ("&MagUCD&", (";%s" % 460 bandUcdDict[shtName.lower()] 461 if shtName.lower() in bandUcdDict else ""))] 462 463 fline = utils.multiSub(line, subs) 464 for table in tables: 465 dictFiltData[table][shtName].append(fline) 466 467 lineNum += 1 468 continue 469 470 elif status.useColours: 471 # produce the right number of lines 472 for shtName1, shtName2 in status.colours: 473 colUcd = "" 474 colUcd += (";%s" % bandUcdDict[shtName1.lower()] 475 if shtName1.lower() in bandUcdDict else "") 476 colUcd += (";%s" % bandUcdDict[shtName2.lower()] 477 if shtName2.lower() in bandUcdDict else "") 478 colName = '%sm%s' % (shtName1, shtName2) 479 subs = [("&A&", shtName1.upper()), 480 ("&a&", shtName1.lower()), 481 ("&As&", shtName1.title()), 482 ("&B&", shtName2.upper()), 483 ("&b&", shtName2.lower()), 484 ("&Bs&", shtName2.title()), 485 ("&ColorUCD&", colUcd)] 486 487 fline = utils.multiSub(line, subs) 488 for table in tables: 489 dictColData[table][colName].append(fline) 490 491 lineNum += 1 492 continue 493 494 elif status.useBorders: 495 for border in status.borders: 496 subs = [("&A&", border.upper()), 497 ("&a&", border.lower())] 498 fline = utils.multiSub(line, subs) 499 for table in tables: 500 dictBorData[table][border].append(fline) 501 lineNum += 1 502 continue 503 504 elif status.useNeighs: 505 for table in tables: 506 for ii, nTable in \ 507 enumerate(status.neighs[table]['neightable']): 508 509 matchDist = round(3600 510 * status.neighs[table]['matchdist'][ii], 1) 511 512 # Replace all terms, careful if they may or may 513 # not exist 514 subs = [("&neighTable&", nTable), 515 ("&MatchDist&", str(matchDist)), 516 ("&PrimeTable&", 517 status.neighs[table]['primetable'][ii]), 518 ("&PrimeID&", 519 str(status.neighs[table]['primeid'][ii])) 520 ] 521 522 fline = utils.multiSub(line, subs) 523 if 'externtable' in status.neighs[table]: 524 fline = fline.replace("&ExternTable&", 525 status.neighs[table]['externtable'][ii]) 526 527 if 'externname' in status.neighs[table]: 528 fline = fline.replace("&Extern&", 529 status.neighs[table]['externname'][ii]) 530 531 if 'externid' in status.neighs[table]: 532 fline = fline.replace("&ExternID&", 533 str(status.neighs[table]['externid'][ii])) 534 535 if 'externdb' in status.neighs[table]: 536 fline = fline.replace("&ExternDB&", 537 status.neighs[table]['externdb'][ii]) 538 539 if 'extprogid' in status.neighs[table]: 540 extProg = self.programme.getAcronym(progID= 541 int(status.neighs[table]['extprogid'][ii])) 542 543 fline = fline.replace("&SecProg&", 544 extProg.upper()) 545 546 if (not status.maxIndex or status.maxIndex > ii)\ 547 and (not status.checkSpecNeigh or 548 table == 'XMatch' and 549 status.checkSpecNeigh.upper() in 550 status.neighs[table]['externname'][ii] 551 .upper()): 552 553 dictNeighData[table][nTable].append(fline) 554 555 lineNum += 1 556 continue 557 558 isPublicForTable = dict(status.tableOrder) 559 for table in tables: 560 # Replace split string if necessary 561 fline = line.replace('&split&', table) 562 if "&fluxTable&" in fline: 563 fline = fline.replace('&fluxTable&', 564 self.programme.getDetectionTable() + '.' 565 if table in ("Photometry", "Astrometry") else '') 566 dataLines[table].append(fline.replace('&createTable&', 567 "CREATE TABLE" if isPublicForTable[table] else 568 "create table")) 569 570 # Put data in memory ... 571 # If in split mode.. 572 lineNum += 1 573 574 for key, _cre in status.tableOrder: 575 # write everything else 576 statusForTable.update(extractTables(dataLines[key], status)) 577 file(schemaPath, 'a').writelines(dataLines[key]) 578 579 return statusForTable
580 581 #-------------------------------------------------------------------------- 582
583 - def createTables(self):
584 """ Creates all tables for the current list of programmes, where they 585 do not already exist, inserting default rows where necessary. 586 """ 587 for progID in sorted(self.programmeID): 588 script = self.programme.getSchemaScript(programmeID=progID) 589 progSchema = schema.parseTables(script) 590 for table in progSchema: 591 Logger.addMessage("Creating table %s" % table) 592 self.archive.createTable(table, progSchema) 593 if table.name.endswith(("Raw", "Astrometry", "Photometry")): 594 self.archive.insertData(table.name, 595 [column.getDefaultValue() for column in table.columns])
596 597 #-------------------------------------------------------------------------- 598
599 - def getMaxReleaseOnDisk(self):
600 """ 601 Check available disks to see what the maximum releaseNum is for products 602 on the main disks, for the given programme. 603 604 @return: Highest release number in use in the disk file system. 605 @rtype: int 606 607 """ 608 # @TODO: This code replicates FitsUtils.findProduct()! The common code 609 # needs to be compartmentalised into one place to prevent future 610 # bugs. Even better - this search should only be done once, but 611 # with both goals. 612 dirDict = {'stack': self.sysc.stackDir, 'mosaic': self.sysc.mosaicDir} 613 if self.sysc.hasOffsetPos: 614 dirDict['tile'] = self.sysc.tileDir 615 616 progStr = "%05d" % self.programme.getAttr("programmeID") 617 fileNameParts = [progStr, self.sysc.deepSuffix, self.sysc.stackSuffix, 618 self.sysc.mefType] 619 620 notFileNameParts = [self.sysc.confSuffix, self.sysc.catType, 621 self.sysc.catSuffix, self.sysc.filtSuffix] 622 623 releaseNums = [] 624 for prodType in self.sysc.productTypes: 625 for disk in self.sysc.availableRaidFileSystem(): 626 productDir = os.path.join(disk, dirDict[prodType]) 627 if os.path.exists(productDir): 628 # Check subdirectories 629 subDirs = [subDir for subDir in os.listdir(productDir) 630 if subDir.split('_')[0].isdigit() 631 and subDir.startswith('20')] 632 633 for subDir in subDirs: 634 subDirPath = os.path.join(productDir, subDir) 635 636 # Check files 637 for fileName in os.listdir(subDirPath): 638 filePathName = os.path.join(subDirPath, fileName) 639 640 if (all(part in filePathName 641 for part in fileNameParts) 642 and all(part not in filePathName 643 for part in notFileNameParts)): 644 645 # Make sure programmeID part is in correct place 646 if fileName.split('_')[1][:5] == progStr: 647 releaseNum = int(subDir.split("_v")[1]) 648 releaseNums.append(int(releaseNum)) 649 650 return max(releaseNums) if releaseNums else 0
651 652 #-------------------------------------------------------------------------- 653
654 - def getReleaseNum(self):
655 """ 656 Queries database to find the next available release number for a given 657 survey. 658 659 @return: Next available release number. 660 @rtype: int 661 662 """ 663 progID = self.programme.getAttr("programmeID") 664 665 if self.programme.getAttr("sourceProdType") == 'mosaic': 666 # Use latest releaseNum 667 relNum = self.archive.queryAttrMax("releaseNum", "ExternalProduct", 668 where="productType='mosaic' AND programmeID=%s" % progID) 669 670 if not relNum: 671 raise ProgrammeBuilder.CuError( 672 "There are no existing mosaics ready for curation. " 673 "Please set up ExternalProduct and re-run.") 674 675 return relNum 676 677 diskRelNum = self.getMaxReleaseOnDisk() 678 dbRelNum = self.archive.queryAttrMax("releaseNum", "ProgrammeFrame", 679 where="productID>0 AND programmeID=%s" % progID) or 0 680 681 return max(diskRelNum, dbRelNum) + 1
682 683 #-------------------------------------------------------------------------- 684
685 - def initialiseNewRelease(self):
686 """ 687 Clear ProductProcessing table entries for the programme and 688 determine/announce the new release number. 689 690 """ 691 name = self.programme.getAcronym() 692 Logger.addMessage("Wiping %s ProductProcessing table entries..." % name) 693 Logger.addMessage("...%s rows deleted." 694 % self.archive.delete("ProductProcessing", 695 whereStr="programmeID=%s" % self.programme.getAttr("programmeID"))) 696 697 # @TODO: test grouping 698 releaseNum = self.getReleaseNum() 699 Logger.addMessage("This is %s release number: %s" % (name, releaseNum)) 700 banner.horizontal(str(releaseNum))
701 702 #-------------------------------------------------------------------------- 703
704 - def setUpNewProgs(self):
705 """ Updates the Programme table entries for new programmes. 706 """ 707 self.onlyNonSurveys = True 708 Logger.addMessage("Checking for new non-survey programmes...") 709 710 newProgs = self.archive.query("programmeID, dfsIdString", "Programme", 711 whereStr="programmeID >= %s AND detectionTable = %r" % 712 (dbc.nonSurveyProgrammeOffset(), dbc.charDefault())) 713 714 Logger.addMessage("found %s new programmes." % len(newProgs)) 715 716 for progID, dfsIdString in sorted(newProgs): 717 # @TODO: OSA? 718 if self.sysc.isWSA(): 719 acronym = dfsIdString.replace('/', '').lower() 720 else: 721 acronym = dfsIdString.translate(None, '.-()').lower() 722 if acronym.startswith('2'): 723 acronym = "d%s" % acronym 724 elif acronym.startswith('3'): 725 acronym = "s%s" % acronym 726 elif acronym[:3].isdigit(): 727 acronym = "n%s" % acronym 728 729 # @@NOTE: This should really be in a transaction block in case of 730 # database failure, instead just log what we are doing. 731 Logger.addMessage("Updating Programme for " + acronym) 732 entries = [("detectionTable", repr(acronym + "Detection")), 733 ("catalogueSchema", "'NonSurvey/%sNS%s_%sSchema.sql'" % 734 (self.sysc.loadDatabase, acronym, acronym))] 735 736 rowIndex = [("programmeID", progID)] 737 self.archive.updateEntries("Programme", entries, rowIndex) 738 self.programmeID.add(progID) 739 740 # @@TODO: The 1 and objID symbols should be database constants, 741 # need them to be defined and used in WSA_InitArchive.sql too... 742 self.archive.insertData("ProgrammeTable", 743 [progID, 1, acronym + "Detection", "objID"]) 744 745 if self.programmeID: 746 self.programme.updateDetails()
747 748 #-------------------------------------------------------------------------- 749
750 - def setUpProgramme(self):
751 """ 752 Set up programme using the latest quality controlled data. 753 754 @return: True if the schema now needs updating. 755 @rtype: bool 756 757 """ 758 pAcronym = self.programme.getAcronym() 759 Logger.addMessage( 760 "Determining best setup for programme %s..." % pAcronym.upper()) 761 762 filterTable = df.Table("Filter", self.archive) 763 newSetup = BestSetup(self.programme, self.redoSetup, self.dateRange, 764 self.displayDeeps) 765 synSetup = SynopticSetup(self.programme, self.dateRange, 766 newSetup.isSurveyDeep, self.bandMergeCrit) 767 768 if self.programme.isNonSurvey(): 769 Logger.addMessage( 770 "RequiredFilters are:\n" 771 "| filter | isSynoptic |\n" + '\n'.join( 772 "| %6s | %3s |" 773 % (filterTable.getAttr("shortName", filterID=filterID), 774 ("Y" if isSynoptic else "N")) 775 for filterID, nPass, isSynoptic in newSetup.newFiltData)) 776 777 Logger.addMessage("<Info> %s is a %s survey" % (pAcronym.upper(), 778 ("DEEP" if newSetup.isSurveyDeep else "SHALLOW"))) 779 780 # Do nothing else for WSA surveys! 781 if self.programmeID in self.sysc.manualProgs: 782 Logger.addMessage( 783 "<Info> The data suggest the following filter setup:\n%s\n" 784 "and the following pointing setup:\n%s" 785 % (newSetup.newFiltData, newSetup.newPosData)) 786 787 if not self.archive.isTrialRun: 788 raise ProgrammeBuilder.CuError( 789 "This is a UKIDSS programme, so requirements will not be " 790 "overwritten.") 791 792 return False 793 794 # Check to see if programme should use external deep data 795 reqTables = [] 796 for pType in newSetup.productData: 797 if len(newSetup.productData[pType]): 798 reqTables.append("Required%s" % pType.title()) 799 Logger.addMessage("%s %ss will be inserted into Required%s" 800 % (len(newSetup.productData[pType]), 801 pType.lower(), pType.title())) 802 803 if newSetup.productLinks: 804 reqTables += ["ProductLinks"] 805 806 Logger.addMessage("%s links will be inserted into ProductLinks" 807 % len(newSetup.productLinks)) 808 809 if self.programme.isNonSurvey(): 810 reqTables.append("RequiredFilters") 811 # Why wipe this - just clean SourceXDetection and add in any new 812 # if not self.newPoints: 813 # reqTables.append("RequiredNeighbours") 814 815 # Wipe existing requirements 816 if self.redoSetup: 817 Logger.addMessage( 818 "Wiping old programme requirements from database...") 819 820 for tableName in reqTables: 821 self.archive.delete(tableName, 822 whereStr="programmeID=%s" % self.programmeID) 823 else: 824 Logger.addMessage("Wiping ProductLinks from database...") 825 self.archive.delete("ProductLinks", 826 whereStr="programmeID=%s" % self.programmeID) 827 828 Logger.addMessage("Updating %s..." % ", ".join(reqTables)) 829 830 # Update RequiredFilters 831 for filterID, nPass, isSynoptic in newSetup.newFiltData: 832 if self.programme.isNonSurvey(): 833 self.archive.insertData("RequiredFilters", 834 [self.programmeID, filterID, nPass, isSynoptic]) 835 else: 836 # Just update synoptic status 837 self.archive.updateEntries("RequiredFilters", 838 entryList=[("isSynoptic", isSynoptic)], 839 rowIndexList=[("programmeID", self.programmeID), 840 ("filterID", filterID)]) 841 842 # Update RequiredStack 843 for point in newSetup.productData["stack"]: 844 flName = filterTable.getAttr("shortName", filterID=point.filterID) 845 nameStr = "%s_%s_%s" % (pAcronym, point.fieldID, flName.title()) 846 desc = ("Deep %s stack: %s, pointing %11.7f, %11.7f %s, nustep %d" 847 % (pAcronym, nameStr, point.ra, point.dec, flName, point.nuStep) 848 if self.sysc.hasMicroStep else 849 "Deep %s stack: %s, pointing %11.7f, %11.7f %s" 850 % (pAcronym, nameStr, point.ra, point.dec, flName)) 851 entryList = [self.programmeID, point.productID, nameStr, desc, 852 point.filterID, point.ra, point.dec, self.numberStks] 853 if self.sysc.hasMicroStep: 854 entryList.append(point.nuStep) 855 856 if self.sysc.hasOffsetPos: 857 entryList.append(point.offsetPos) 858 859 entryList += [point.stackRadius, point.fieldID] 860 if self.sysc.hasVarOrient: 861 # Position angle 862 entryList.append(point.posAngle) 863 864 self.archive.insertData("RequiredStack", entryList) 865 866 for point in newSetup.productData["tile"]: 867 flName = filterTable.getAttr("shortName", filterID=point.filterID) 868 nameStr = "%s_%s_%s" % (pAcronym, point.fieldID, flName.title()) 869 desc = "Deep %s tile: %s, pointing %11.7f, %11.7f %s, nustep %d" \ 870 % (pAcronym, nameStr, point.ra, point.dec, flName, point.nuStep) 871 # @FIXME: No nustep in OSA 872 entryList = [self.programmeID, point.productID, nameStr, desc, 873 point.filterID, point.ra, point.dec, point.nuStep, 874 point.stackRadius, point.fieldID, point.posAngle] 875 876 self.archive.insertData("RequiredTile", entryList) 877 878 for point in newSetup.productData["mosaic"]: 879 flName = filterTable.getAttr("shortName", filterID=point.filterID) 880 nameStr = "%s_%s_%s" % (pAcronym, point.fieldID, flName.title()) 881 desc = ("Deep %s mosaic: %s, pointing %11.7f, %11.7f %s, " 882 "pixel size %3.2f, size %3.2f x %3.2f sq. degrees" \ 883 % (pAcronym, nameStr, point.ra, point.dec, flName, 884 point.pixelSize, point.raExtent, point.decExtent)) 885 886 dualFilterID = 5 887 888 # @FIXME: no nustep in OSA 889 # No mosaics in OSA (yet) 890 entryList = [self.programmeID, point.productID, nameStr, desc, 891 point.filterID, point.ra, point.dec, point.raExtent, 892 point.decExtent, point.nuStep, point.pixelSize, dualFilterID, 893 point.fieldID] 894 if self.sysc.hasVarOrient: 895 entryList.append(point.posAngle) 896 897 self.archive.insertData("RequiredMosaic", entryList) 898 899 for combiProdID, intProdID, combiProdType, intProdType \ 900 in newSetup.productLinks: 901 902 entryList = [self.programmeID, combiProdID, intProdID, 903 combiProdType, intProdType] 904 905 self.archive.insertData("ProductLinks", entryList) 906 907 if self.newPoints: 908 return False # Go no further if just updating required pointings 909 910 # Parse registration file for registration description 911 Logger.addMessage("Parsing registration files...") 912 registration = Registration(self.programme) 913 914 # New Programme table entries 915 entryList = [] 916 if self.programme.isNonSurvey(): 917 entryList += [ 918 ("description", "'%s'" % registration.description), 919 ("extractTool", "'CASU'"), 920 ("sourceTable", "'%sSource'" % pAcronym), 921 ("mergeLogTable", "'%sMergeLog'" % pAcronym), 922 ("neighboursSchema", "'NonSurvey/%sNS%s_%sNeighbourSchema.sql'" 923 % (self.sysc.loadDatabase, pAcronym, pAcronym)), 924 ("isLowGalacticLat", int(newSetup.isLowGalLat))] 925 926 927 # If band-merging criterion not supplied, see if this is already a 928 # previously set up correlated survey. 929 self.bandMergeCrit = (self.bandMergeCrit 930 or self.programme.getAttr("bandMergingCriterion")) 931 932 # Define deep table entries for Programme 933 if newSetup.isSurveyDeep: 934 synNeighTableDetails = [] 935 for ngTable in synSetup.neighTables: 936 synID = synSetup.getSynopticTableID(ngTable) 937 synNeighTableDetails.append((ngTable, synID)) 938 939 if synSetup.bestMatchTables: 940 entryList += [ 941 ("synopticBestMatchTable", repr(synSetup.varBMTable)), 942 ("variabilityTable", "'%sVariability'" % pAcronym)] 943 944 if synSetup.bandMergingCrit: 945 entryList += [ 946 ("synopticSourceTable", repr( 947 synSetup.getSynopticSourceTable())), 948 ("bandMergingCriterion", synSetup.bandMergingCrit)] 949 950 else: 951 entryList += [("synopticBestMatchTable", repr(dbc.charDefault())), 952 ("variabilityTable", repr(dbc.charDefault())), 953 ("synopticSourceTable", repr(dbc.charDefault()))] 954 955 # Update Programme 956 Logger.addMessage("Updating Programme with " + 957 ("deep table information" if not self.programme.isNonSurvey() else 958 "table names and neighbour schema") + 959 " for programme %s" % pAcronym.upper()) 960 961 self.archive.update("Programme", entryList, 962 where="programmeID=%s" % self.programmeID) 963 964 self.programme.updateDetails() 965 966 # For newly registered programmes only 967 if self.programme.isNonSurvey() \ 968 and not self.archive.queryEntriesExist("Survey", 969 where="surveyID=%s" % self.programmeID): 970 971 Logger.addMessage(pAcronym.upper() + " is a newly registered " 972 "programme. Adding entries into Survey, SurveyProgrammes and " 973 "ProgrammeTable...") 974 975 # Update Survey 976 self.archive.insertData("Survey", rowData=[self.programmeID, 977 self.programme.getAttr("dfsIDString"), "%s: %s" % 978 (self.programme.getName().replace("programme", "project"), 979 registration.description), pAcronym.upper(), dbc.yes()]) 980 981 # Update SurveyProgrammes - assuming just one programme in survey 982 self.archive.insertData("SurveyProgrammes", 983 [self.programmeID, self.programmeID, dbc.no()]) 984 985 # - and programme entry in world survey 986 if self.sysc.isWSA(): 987 self.archive.insertData("SurveyProgrammes", 988 [dbc.worldSurveyID(), self.programmeID, dbc.yes()]) 989 990 # Update ProgrammeTable to add in source table only 991 self.archive.insertData("ProgrammeTable", [self.programmeID, 2, 992 self.programme.getSourceTable(), "sourceID"]) 993 994 # If a newly correlated survey, need to update ProgrammeTable 995 if synSetup.bandMergingCrit: 996 synID = 3 997 if not self.archive.queryEntriesExist("ProgrammeTable", 998 where="programmeID=%s AND tableID=%s" % (self.programmeID, synID)): 999 1000 Logger.addMessage(pAcronym.upper() + " is now a correlated " 1001 "survey. Adding SynopticSource table entry " 1002 "into ProgrammeTable...") 1003 1004 self.archive.insertData("ProgrammeTable", [ 1005 self.programmeID, synID, 1006 synSetup.getSynopticSourceTable(), "synopticID"]) 1007 1008 Logger.addMessage("Updating RequiredNeighbours...") 1009 joinCriterion = 10.0 / 3600.0 # 10 arcsec 1010 1011 if self.programme.isNonSurvey(): 1012 # @TODO: Don't clean - either update or append. 1013 # - SourceNeighbours 1014 # @TODO: SDSS - WFCAM non-surveys, latest SDSS... 1015 twomassID = self.getExternalSurveyID('TWOMASS') 1016 ssaID = self.getExternalSurveyID('SSA') 1017 wiseAssID = self.getExternalSurveyID('WISE') 1018 neighbourEntries = [ 1019 [self.programmeID, 2, dbc.intDefault(), dbc.intDefault(), joinCriterion, 1020 "%sSourceNeighbours" % pAcronym, dbc.intDefault()], # - SourceNeighbours 1021 [self.programmeID, 2, twomassID, 1, joinCriterion, 1022 "%sSourceXtwomass_psc" % pAcronym, dbc.intDefault()], # - SourceXtwomass_psc 1023 [self.programmeID, 2, ssaID, 1, joinCriterion, 1024 "%sSourceXSSASource" % pAcronym, dbc.intDefault()], # - SourceXSSASource 1025 [self.programmeID, 2, wiseAssID, 2, joinCriterion, 1026 "%sSourceXwise_allskysc" % pAcronym, dbc.intDefault()], # - SourceXwise_allskysc 1027 ] 1028 if self.sysc.isWSA(): 1029 # Find latest SDSS 1030 latestSDSSsID = self.archive.queryAttrMax("surveyID", "ExternalSurvey", 1031 "surveyName like '%SDSS%-DR%'") 1032 surveyName, extTableName, tableID = self.archive.query( 1033 "surveyName,extTableName,extTableID", 1034 "ExternalSurveyTable as t, ExternalSurvey as s", 1035 "s.surveyID=t.surveyID and s.surveyID=%s and " 1036 "extTableName='PhotoObj'" % latestSDSSsID, firstOnly=True) 1037 neighTableName = self.getSDSSNeighTable(surveyName, extTableName) 1038 neighbourEntries += [[self.programmeID, 2, latestSDSSsID, tableID, joinCriterion, 1039 neighTableName, dbc.intDefault()]] 1040 for reqNeigh in neighbourEntries: 1041 if not self.archive.queryEntriesExist("RequiredNeighbours", 1042 "programmeID=%s and tableID=2 and surveyID=%s and extTableID=%s and extProgID=%s" 1043 % (self.programmeID, reqNeigh[2], reqNeigh[3], reqNeigh[6])): 1044 1045 self.archive.insertData("RequiredNeighbours", 1046 reqNeigh) 1047 1048 1049 self.archive.delete("RequiredNeighbours", 1050 whereStr="programmeID=extProgID AND programmeID=%s" % self.programmeID 1051 + " AND (neighbourTable LIKE '%XDetection'" 1052 " OR neighbourTable LIKE '%XSynopticSource')") 1053 1054 # - SourceXDetection or SourceXSynopticSource 1055 if newSetup.isSurveyDeep: 1056 for synNeighTable, synID in synNeighTableDetails: 1057 self.archive.insertData("RequiredNeighbours", 1058 [self.programmeID, 2, dbc.intDefault(), synID, 1059 joinCriterion, synNeighTable, self.programmeID]) 1060 1061 return True
1062 #-------------------------------------------------------------------------- 1063
1064 - def getExternalSurveyID(self, databaseName):
1065 """ 1066 """ 1067 return self.archive.query("surveyID", "ExternalSurvey", "databaseName='%s'" 1068 % databaseName, firstOnly=True, default=dbc.intDefault())
1069 #-------------------------------------------------------------------------- 1070
1071 - def getSDSSNeighTable(self, surveyName, extTableName):
1072 """ returns name of neighbour table between programme and SDSS survey 1073 """ 1074 surveyNameParts = surveyName.split('-') 1075 survName = ''.join([part.title() for part in surveyNameParts 1076 if part.upper() != 'SDSS']) 1077 return "%sX%s%s" % (self.programme.getSourceTable(), survName, extTableName)
1078 #------------------------------------------------------------------------------ 1079
1080 -class Status(object):
1081 """This translates schema template grammar into a current status 1082 """ 1083 actionDict = { 1084 '++':'start', 1085 '+-':'startNot', 1086 '==':'finish', 1087 '=-':'finishNot'} 1088 1089 methodDict = { 1090 'p':'specificProgrammes', 1091 'r':'repeatTable', 1092 'x':'sextractorValues', 1093 'h':'highGalacticLat', 1094 'd':'synTables', 1095 'f':'cycleThroughFilters', 1096 'c':'cycleThroughColours', 1097 's':'sourceTables', 1098 't':'dropTables', 1099 'e':'externalTable', 1100 'n':'cycleThroughNeighs', 1101 'o':'onceOnly', 1102 'b':'cycleThroughBorders', 1103 } 1104 1105 colourGroups = None 1106 filterGroups = None 1107 borderGroups = None 1108 progGroups = None 1109 # Default, not split table 1110 splitTables = None 1111 # Default condition use programme, write line. 1112 useProgSP = True 1113 useProgSX = True 1114 useProgHL = True 1115 useProgSY = True 1116 useProgSR = True 1117 useProgFO = True 1118 checkSpecNeigh = None 1119 writeLine = True 1120 dropTables = False 1121 # Normally don't cycle through colours or filters 1122 filters = None 1123 colours = None 1124 borders = None 1125 useColours = False 1126 useFilters = False 1127 useBorders = False 1128 tableOrder = [('main', True)] 1129 neighView = None 1130 neighs = None 1131 useNeighs = False 1132 lastIndex = None 1133 maxIndex = None 1134 progIndex = None 1135 1136 #: DbSession connection to database. 1137 archive = None 1138 #: DataFactory.ProgrammeTable set to the current programme. 1139 programme = None 1140 #: SystemConstants initialised for current archive. 1141 sysc = None 1142 #: SynopticSetup from Programme table details for current programme. 1143 synSetup = None 1144 1145 #-------------------------------------------------------------------------- 1146
1147 - def __init__(self, cu, progIDs, synSetup):
1148 """ 1149 Initialise status from current CuSession task. 1150 1151 @param cu: Curation task. 1152 @type cu: CuSession 1153 @param progIDs: List of programme IDs being processed. 1154 @type progIDs: list(int) 1155 1156 """ 1157 self.archive = cu.archive 1158 self.programme = cu.programme 1159 self.sysc = cu.sysc 1160 self.synSetup = synSetup 1161 programmeID = self.programme.getAttr("programmeID") 1162 self.progIndex = progIDs.index(programmeID) 1163 self.lastIndex = len(progIDs) - 1 1164 surveyIDs = self.programme.getProgIDList(onlySurvey=True) 1165 nonSurveyIDs = self.programme.getProgIDList(onlyNonSurvey=True) 1166 minSurveyProgID = min(self.sysc.scienceProgs.values()) 1167 self.progGroups = { 1168 'early': [progID for progID in surveyIDs if progID < minSurveyProgID], 1169 'ps': [progID for progID in surveyIDs if progID >= minSurveyProgID], 1170 'ns': nonSurveyIDs, 1171 'allProgs': surveyIDs + nonSurveyIDs} 1172 1173 # Filter sets 1174 allFilters = \ 1175 list(df.PassbandList(self.programme).getPassbandsLowerCase()) 1176 1177 bbFilters = df.PassbandList(self.programme).getBroadbandPassbands() 1178 1179 synFilters = list(df.PassbandList(self.programme, isSynoptic=True) 1180 .getPassbandsLowerCase()) 1181 1182 bbSynFilters = df.PassbandList(self.programme, isSynoptic=True)\ 1183 .getBroadbandPassbands() 1184 1185 # @@TODO one row only 1186 self.filterGroups = {'a': allFilters, 's': synFilters} 1187 1188 if self.sysc.hasOffsetPos: 1189 offSetMax = self.archive.queryAttrMax("noffsets", 1190 table=Join(["Multiframe", "ProgrammeFrame"], ["multiframeID"]), 1191 where="frameType LIKE '%tile%stack' AND " + 1192 DepCodes.selectNonDeprecated) or 6 1193 1194 self.filterGroups['o'] = \ 1195 ['o%d' % off for off in range(1, offSetMax + 1)] 1196 1197 l1 = bbFilters[:-1] 1198 l2 = bbFilters[1:] 1199 for fName in allFilters: 1200 if fName not in bbFilters: 1201 bbF = queries.getBBFilter(self.archive, fName, programmeID) 1202 if bbF: 1203 l1.append(fName) 1204 l2.append(bbF.lower()) 1205 1206 # Sort out NB filters to nearest BB - need to use wvl info 1207 self.colourGroups = { 1208 'a': list(zip(l1, l2)), 1209 'sb': list(zip(bbSynFilters[:-1], bbSynFilters[1:]))} 1210 1211 posList = [] 1212 for edge in ['detEdge', 'dithEdge']: 1213 for bound in ['Inner', 'Outer']: 1214 for pos in range(4): 1215 posList.append('%s%sPos%d' % (edge, bound, pos + 1)) 1216 1217 self.borderGroups = {'io': posList}
1218 1219 #-------------------------------------------------------------------------- 1220
1221 - def complexSub(self, line):
1222 """ 1223 """ 1224 # Get string to work on 1225 for _substitution in range(len(line.split('$[')) - 1): 1226 subString = '$[' + line.split('$[')[1].split('$')[0] + '$' 1227 repeatString = subString.split('[')[1].split(']')[0] 1228 joinStr = subString.split(';')[1] 1229 methodArg = subString.split(';')[2][0] 1230 addInfo = \ 1231 subString.split(';')[2].split(':')[1].split('$')[0].split(',') 1232 # 1233 method = self.methodDict[methodArg] 1234 if method == 'cycleThroughFilters': 1235 repeats = [] 1236 for ii, shtName in enumerate(self.filterGroups[addInfo[0]]): 1237 subs = [("&A&", shtName.upper()), 1238 ("&a&", shtName.lower()), 1239 ("&As&", shtName.title()), 1240 ("&n&", 'n' if ii > 0 else '')] 1241 repeats.append(utils.multiSub(repeatString, subs)) 1242 line = utils.multiSub(line, [(subString, joinStr.join(repeats))]) 1243 elif method == 'cycleThroughColours': 1244 repeats = [] 1245 for shtName1, shtName2 in self.colourGroups[addInfo[0]]: 1246 subs = [("&A&", shtName1.upper()), 1247 ("&a&", shtName1.lower()), 1248 ("&As&", shtName1.title()), 1249 ("&B&", shtName2.upper()), 1250 ("&b&", shtName2.lower()), 1251 ("&Bs&", shtName2.title())] 1252 repeats.append(utils.multiSub(repeatString, subs)) 1253 line = utils.multiSub(line, [(subString, joinStr.join(repeats))]) 1254 elif method == 'cycleThroughBorders': 1255 repeats = [] 1256 for border in self.borderGroups[addInfo[0]]: 1257 subs = [("&A&", border.upper()), 1258 ("&a&", border.lower())] 1259 repeats.append(utils.multiSub(repeatString, subs)) 1260 line = utils.multiSub(line, [(subString, joinStr.join(repeats))]) 1261 elif method == 'specificProgrammes': 1262 self.addInfo = addInfo 1263 if self.isProgMatch(): 1264 line = utils.multiSub(line, [(subString, repeatString)]) 1265 else: 1266 line = utils.multiSub(line, [(subString, joinStr)]) 1267 else: 1268 raise ProgrammeBuilder.CuError("Schema inconsistency") 1269 return line
1270 1271 #-------------------------------------------------------------------------- 1272
1273 - def setNeighbours(self):
1274 """ 1275 """ 1276 # Different types of neighbour table need different info 1277 # 4 types: n - neighbour with self 1278 # x - neighbour with external survey 1279 # s - neighbour with another table in programme 1280 # p - neighbour with table in another programme 1281 viewStringN = ("neighTable,matchDist,primeTable,primeID") 1282 viewStringX = viewStringN + (",externName,externDb,externTable,externID") 1283 viewStringS = viewStringN + (",externTable,externID") 1284 viewStringP = viewStringS + (",extProgID,externName") 1285 selectStringN = ("r.neighbourTable,r.joinCriterion,pt.tableName," 1286 "pt.sourceIDName") 1287 selectStringX = selectStringN + (",es.surveyName,es.databaseName," 1288 "est.extTableName,est.sourceIDName") 1289 selectStringS = selectStringN + (",pt2.tableName,pt2.sourceIDName") 1290 selectStringP = selectStringS + (",p.programmeID,p.title") 1291 # ## 1292 fromStringN = ("RequiredNeighbours as r,ProgrammeTable as pt") 1293 fromStringX = fromStringN + (",ExternalSurvey as es," 1294 "ExternalSurveyTable as est") 1295 fromStringS = fromStringN + (",ProgrammeTable as pt2") 1296 fromStringP = fromStringS + (",Programme as p") 1297 # ## 1298 whereStringN = ("r.programmeID=%d and r.programmeID=pt.programmeID and " 1299 "r.tableID=pt.tableID" % self.programme.getAttr("programmeID")) 1300 whereStringX = whereStringN + (" and r.surveyID=es.surveyID and " 1301 "r.surveyID=est.surveyID and " 1302 "r.extTableID=est.extTableID ") 1303 whereStringS = whereStringN + (" and r.programmeID=pt2.programmeID and " 1304 "r.extTableID=pt2.tableID") 1305 whereStringP = whereStringN + (" and r.extProgID=pt2.programmeID and " 1306 "r.extTableID=pt2.tableID and " 1307 "p.programmeID=r.extProgID") 1308 # 1309 # Check numbers of each table. 1310 isNeigh = self.archive.queryEntriesExist( 1311 fromStringN, whereStringN + 1312 " and r.surveyID<0 and r.extProgID<0") 1313 isXMatch = self.archive.queryEntriesExist( 1314 fromStringX, whereStringX + " and r.surveyID>0") 1315 # Should not call synoptic - e.g. if want SourceXPawprints 1316 isSynop = self.archive.queryEntriesExist( 1317 fromStringS, whereStringS + " and r.surveyID<0 and " 1318 "r.extProgID=r.programmeID") and self.programme.isDeep() 1319 isInter = self.archive.queryEntriesExist( 1320 fromStringS, whereStringS + " and r.surveyID<0 and " 1321 "r.extProgID=r.programmeID") and not self.programme.isDeep() 1322 isProg = self.archive.queryEntriesExist( 1323 fromStringP, whereStringP + " and r.surveyID<0 and " 1324 "r.extProgID!=r.programmeID") 1325 self.neighTables = {} 1326 if isNeigh: 1327 self.neighTables['Neigh'] = df.View(viewStringN.split(','), 1328 self.archive.query(selectStringN, fromStringN, 1329 whereStringN + 1330 " and r.surveyID<0 and r.extProgID<0")) 1331 if isXMatch: 1332 self.neighTables['XMatch'] = df.View(viewStringX.split(','), 1333 self.archive.query(selectStringX, fromStringX, 1334 whereStringX + " and r.surveyID>0")) 1335 if isSynop: 1336 self.neighTables['Synop'] = df.View(viewStringS.split(','), 1337 self.archive.query(selectStringS, fromStringS, 1338 whereStringS + " and r.surveyID<0 and " 1339 "r.extProgID=r.programmeID")) 1340 if isInter: 1341 self.neighTables['Inter'] = df.View(viewStringS.split(','), 1342 self.archive.query(selectStringS, fromStringS, 1343 whereStringS + " and r.surveyID<0 and " 1344 "r.extProgID=r.programmeID")) 1345 if isProg: 1346 self.neighTables['Prog'] = df.View(viewStringP.split(','), 1347 self.archive.query(selectStringP, fromStringP, 1348 whereStringP + " and r.surveyID<0 and " 1349 "r.extProgID!=r.programmeID")) 1350 self.neighTypes = self.neighTables.keys() 1351 if self.neighTables: 1352 self.neighTables['All'] = df.View(viewStringN.split(','), 1353 self.archive.query(selectStringN, fromStringN, whereStringN))
1354 1355 #-------------------------------------------------------------------------- 1356
1357 - def update(self, line):
1358 """ 1359 """ 1360 self.parseString(line) 1361 1362 # Combine programme information to decide whether to ignore 1363 # subsequent lines, or repeat them. 1364 # 1365 # Split tables 1366 # 1367 if (self.method == 'repeatTable' and 1368 self.action == 'start'): 1369 if self.addInfo[0] == 'd': 1370 self.tableOrder = [('', True), ('Raw', False), ('Astrometry', False), 1371 ('Photometry', False)] 1372 self.splitTables = {'d':['', 'Raw', 'Astrometry', 'Photometry'], 1373 'r':['', 'Raw'], 1374 'a':['', 'Astrometry'], 1375 'p':['', 'Photometry'], 1376 'D':[''], 1377 'R':['Raw'], 1378 'A':['Astrometry'], 1379 'P':['Photometry'], 1380 'S':['Raw', 'Astrometry', 'Photometry'], } 1381 elif self.addInfo[0] == 's': 1382 self.tableOrder = [('', True), ('Merge', False)] 1383 self.splitTables = {'m':['', 'Merge'], 1384 's':[''], } 1385 elif self.addInfo[0] == 'n': 1386 tableDict = {'Neigh':'n', 1387 'XMatch':'x', 1388 'Synop':'s', 1389 'Inter':'i', 1390 'Prog':'p'} 1391 self.tableOrder = [] 1392 self.splitTables = {'a':self.neighTypes} 1393 for table in self.neighTypes: 1394 self.tableOrder.append((table, True)) 1395 self.splitTables[tableDict[table]] = [table] 1396 elif (self.method == 'repeatTable' and 1397 self.action == 'finish'): 1398 # Reset 1399 self.splitTables = None 1400 self.tableOrder = [('main', True)] 1401 # 1402 # Specific Programme 1403 # 1404 if self.method == 'specificProgrammes': 1405 if self.action == 'start': 1406 if ((not self.isProgMatch() and not self.progSubtract) or 1407 (self.isProgMatch() and self.progSubtract)): 1408 self.useProgSP = False 1409 elif self.action == 'startNot': 1410 if ((self.isProgMatch() and not self.progSubtract) or 1411 (not self.isProgMatch() and self.progSubtract)): 1412 self.useProgSP = False 1413 elif self.action == 'finish': 1414 if ((not self.isProgMatch() and not self.progSubtract) or 1415 (self.isProgMatch() and self.progSubtract)): 1416 self.useProgSP = True 1417 elif self.action == 'finishNot': 1418 # Reset 1419 if ((self.isProgMatch() and not self.progSubtract) or 1420 (self.isProgMatch() and self.progSubtract)): 1421 self.useProgSP = True 1422 # 1423 # Source Tables 1424 # 1425 if (self.method == 'sourceTables' and 1426 self.action == 'start'): 1427 # Check to see if the programme has a source table defined 1428 if self.programme.getSourceTable() == dbc.charDefault(): 1429 self.useProgSR = False 1430 elif (self.method == 'sourceTables' and 1431 self.action == 'finish'): 1432 # Reset 1433 self.useProgSR = True 1434 # 1435 # SExtractor values 1436 # 1437 if (self.method == 'sextractorValues' and 1438 self.action == 'start'): 1439 # If not a SExtractor programme no match 1440 if not self.programme.useSExtractor(): 1441 self.useProgSX = False 1442 elif self.addInfo: 1443 # Should be programme removed for some reason 1444 if not self.progSubtract: 1445 raise ProgrammeBuilder.CuError("Schema inconsistency") 1446 # Check to see if programme is mentioned. 1447 if self.isProgMatch(): 1448 self.useProgSX = False 1449 elif (self.method == 'sextractorValues' and 1450 self.action == 'startNot'): 1451 # Check if programme not SExtractor - CASU extractor 1452 if self.programme.useSExtractor(): 1453 self.useProgSX = False 1454 elif self.addInfo: 1455 # Should be programme removed for some reason 1456 if not self.progSubtract: 1457 raise ProgrammeBuilder.CuError("Schema inconsistency") 1458 # Check to see if programme is mentioned. 1459 if self.isProgMatch(): 1460 self.useProgSX = False 1461 1462 elif (self.method == 'sextractorValues' and 1463 (self.action == 'finish' or 1464 self.action == 'finishNot')): 1465 # Reset 1466 self.useProgSX = True 1467 # 1468 # Low density extragalactic 1469 # 1470 if (self.method == 'highGalacticLat' and 1471 self.action == 'start'): 1472 1473 # Starts off true - only set to false 1474 # Set to false if isLowGalacticLat=False 1475 # and if programme not 1476 if self.programme.isLowGalLat(): 1477 self.useProgHL = False 1478 elif self.addInfo: 1479 if not self.progSubtract: 1480 raise ProgrammeBuilder.CuError("Schema inconsistency") 1481 if self.isProgMatch(): 1482 self.useProgHL = False 1483 1484 elif (self.method == 'highGalacticLat' and 1485 self.action == 'startNot'): 1486 1487 if not self.programme.isLowGalLat(): 1488 self.useProgHL = False 1489 1490 elif self.addInfo: 1491 # Should be programme removed for some reason 1492 if not self.progSubtract: 1493 raise ProgrammeBuilder.CuError("Schema inconsistency") 1494 if self.isProgMatch(): 1495 self.useProgHL = False 1496 1497 elif (self.method == 'highGalacticLat' and 1498 (self.action == 'finish' or 1499 self.action == 'finishNot')): 1500 # Reset 1501 self.useProgHL = True 1502 # 1503 # Is synoptic 1504 # 1505 if (self.method == 'synTables' and 1506 self.action == 'start'): 1507 # Check synopticStatus 1508 if self.addInfo[0] == 'a': # all Synoptic surveys 1509 if not self.programme.isDeep(): 1510 self.useProgSY = False 1511 elif self.addInfo[0] == 'c': # Correlated only 1512 if not self.synSetup.bandMergingCrit: 1513 self.useProgSY = False 1514 elif self.addInfo[0] == 'o': # Correlated and single epoch 1515 if not (self.synSetup.bandMergingCrit and self.synSetup.isCorSingleEpoch): 1516 self.useProgSY = False 1517 elif self.addInfo[0] == 'm': # Only if 1518 if not (self.synSetup.isCorrelBMTable()): 1519 self.useProgSY = False 1520 elif self.addInfo[0] == 'n': # Not correlated only 1521 if not (self.synSetup.isNonCorrelBMTable()): 1522 self.useProgSY = False 1523 elif self.addInfo[0] == 'k': # Variability table correlated 1524 if not (self.synSetup.isVarTableCorrel()): 1525 self.useProgSY = False 1526 elif self.addInfo[0] == 'v': # Variability table not correlated 1527 if not (self.programme.isDeep() and not 1528 self.synSetup.isVarTableCorrel()): 1529 self.useProgSY = False 1530 elif (self.method == 'synTables' and 1531 self.action == 'finish'): 1532 # Reset 1533 self.useProgSY = True 1534 # 1535 # Use first programme only - indices file 1536 # 1537 if (self.method == 'onceOnly' and 1538 self.action == 'start'): 1539 if self.addInfo[0] == 'f': 1540 if not self.progIndex == 0: 1541 self.useProgFO = False 1542 elif self.addInfo[0] == 'l': 1543 if not self.progIndex == self.lastIndex: 1544 self.useProgFO = False 1545 elif (self.method == 'onceOnly' and 1546 self.action == 'finish'): 1547 # reset 1548 self.useProgFO = True 1549 # 1550 # Specific External table 1551 # 1552 if (self.method == 'externalTable' and 1553 self.action == 'start'): 1554 if not self.addInfo: 1555 raise ProgrammeBuilder.CuError("Schema inconsistency") 1556 self.checkSpecNeigh = self.addInfo[0] 1557 if (self.method == 'externalTable' and 1558 self.action == 'finish'): 1559 self.checkSpecNeigh = None 1560 # 1561 # Decision 1562 # 1563 self.writeLine = (self.useProgSY and self.useProgHL and 1564 self.useProgSX and self.useProgSP and 1565 self.useProgSR and self.useProgFO) 1566 # 1567 # Set filters 1568 # 1569 if (self.method == 'cycleThroughFilters' and 1570 self.action == 'start'): 1571 self.useFilters = True 1572 self.filters = self.filterGroups[self.addInfo[0]] 1573 elif (self.method == 'cycleThroughFilters' and 1574 self.action == 'finish'): 1575 self.useFilters = False 1576 self.filters = None 1577 # 1578 # Set colours 1579 # 1580 if (self.method == 'cycleThroughColours' and 1581 self.action == 'start'): 1582 self.useColours = True 1583 self.colours = self.colourGroups[self.addInfo[0]][:] 1584 if len(self.addInfo) > 1: 1585 # Add or remove additional terms 1586 self.stripAddCols(','.join(self.addInfo[1:])) 1587 1588 elif (self.method == 'cycleThroughColours' and 1589 self.action == 'finish'): 1590 self.colours = None 1591 self.useColours = False 1592 1593 # 1594 # set neighs 1595 # 1596 if (self.method == 'cycleThroughNeighs' and 1597 self.action == 'start'): 1598 if self.addInfo: 1599 if self.addInfo[0] == 'o': 1600 self.maxIndex = 1 1601 self.useNeighs = True 1602 self.neighs = self.neighTables 1603 elif (self.method == 'cycleThroughNeighs' and 1604 self.action == 'finish'): 1605 self.maxIndex = None 1606 self.neighs = None 1607 self.useNeighs = False 1608 1609 # 1610 # set borders 1611 # 1612 if (self.method == 'cycleThroughBorders' and 1613 self.action == 'start'): 1614 self.useBorders = True 1615 self.borders = self.borderGroups[self.addInfo[0]] 1616 elif (self.method == 'cycleThroughBorders' and 1617 self.action == 'finish'): 1618 self.useBorders = False 1619 self.borders = None 1620 1621 # 1622 # Drop tables 1623 # 1624 if (self.method == 'dropTables' and 1625 self.action == 'start'): 1626 if self.isProgMatch(): 1627 self.dropTables = True 1628 elif (self.method == 'dropTables' and 1629 self.action == 'finish'): 1630 # Reset 1631 if self.isProgMatch(): 1632 self.dropTables = False
1633 1634 #-------------------------------------------------------------------------- 1635
1636 - def isProgMatch(self):
1637 """ 1638 """ 1639 progID = self.programme.getAttr("programmeID") 1640 for info in self.addInfo: 1641 if info in self.progGroups and progID in self.progGroups[info] \ 1642 or info == self.programme.getAcronym(): 1643 return True 1644 1645 return False
1646 1647 #-------------------------------------------------------------------------- 1648
1649 - def parseString(self, line):
1650 """ 1651 """ 1652 self.action = self.actionDict[line[0:2]] 1653 self.method = self.methodDict[line[2]] 1654 self.progSubtract = False 1655 self.addInfo = None 1656 if line[3] == ":": 1657 if line[4] == '-': 1658 self.progSubtract = True 1659 self.addInfo = line[5:].split()[0].split(',') # remove any spaces 1660 else: 1661 self.addInfo = line[4:].split()[0].split(',') # remove any spaces
1662 1663 #-------------------------------------------------------------------------- 1664
1665 - def getAllTables(self, tableDict):
1666 """ 1667 """ 1668 tables = set(sum(tableDict.values(), [])) 1669 return dict([(table, []) for table in tables])
1670 1671 #-------------------------------------------------------------------------- 1672
1673 - def initialise(self, dictType, tableStruct):
1674 """Initialise dictionary of filter or colour info 1675 """ 1676 dictData = dict() 1677 if dictType == 'filter': 1678 filterList = self.filters 1679 for table in tableStruct: 1680 dictFilt = {} 1681 for shtName in filterList: 1682 dictFilt[shtName] = list() 1683 dictData[table] = dictFilt 1684 elif dictType == 'colour': 1685 filterList = ['%sm%s' % (c1, c2) for c1, c2 in self.colours] 1686 for table in tableStruct: 1687 dictFilt = {} 1688 for shtName in filterList: 1689 dictFilt[shtName] = list() 1690 dictData[table] = dictFilt 1691 elif dictType == 'neigh': 1692 # different list for each table 1693 for table in tableStruct: 1694 dictFilt = {} 1695 for nTable in self.neighs[table]['neightable']: 1696 dictFilt[nTable] = list() 1697 dictData[table] = dictFilt 1698 filterList = self.neighs['All']['neightable'] 1699 elif dictType == 'border': 1700 filterList = self.borders 1701 for table in tableStruct: 1702 dictFilt = {} 1703 for shtName in filterList: 1704 dictFilt[shtName] = list() 1705 dictData[table] = dictFilt 1706 return dictData, filterList
1707 1708 #-------------------------------------------------------------------------- 1709
1710 - def stripAddCols(self, addColInfo):
1711 """ 1712 Parses a string with additional colour info and edits colour group 1713 accordingly. 1714 1715 """ 1716 # parse string 1717 addColsInd = addColInfo.find('+') 1718 subColsInd = addColInfo.find('-') 1719 if addColsInd < subColsInd: 1720 parts = addColInfo.split('-') 1721 if addColsInd >= 0: 1722 newCols = parts[0] 1723 remCols = parts[1] 1724 else: 1725 newCols = None 1726 remCols = parts[1] 1727 else: 1728 parts = addColInfo.split('+') 1729 if subColsInd >= 0: 1730 remCols = parts[0] 1731 newCols = parts[1] 1732 else: 1733 remCols = None 1734 newCols = parts[1] 1735 1736 for ii, subString in enumerate([newCols, remCols]): 1737 if subString: 1738 colList = subString.split(',') 1739 1740 # Make sure new colours match possible filters 1741 colList = self.testColours(colList) 1742 1743 # Add or subtract colours from/to self.colours 1744 if ii == 0: 1745 self.colours.extend(tuple(col.split('m')) for col in colList) 1746 else: 1747 oldCols = ['m'.join(cl) for cl in self.colours] 1748 [oldCols.remove(col) for col in colList 1749 if col in oldCols] 1750 self.colours = [tuple(col.split('m')) for col in oldCols]
1751 1752 #-------------------------------------------------------------------------- 1753
1754 - def testColours(self, colList):
1755 """ 1756 Tests whether filters that make up colours are available for this 1757 programme. 1758 1759 """ 1760 finalColList = [] 1761 for col in colList: 1762 filComb = col.split('m') 1763 if (filComb[0] in self.filterGroups['a'] and 1764 filComb[1] in self.filterGroups['a']): 1765 finalColList.append(col) 1766 1767 return finalColList
1768 1769 #------------------------------------------------------------------------------ 1770
1771 -class Registration(object):
1772 """ Programme requirements from registration file. 1773 """ 1774 comments = '' #: Extra comments on the programme. 1775 description = dbc.charDefault() #: Project description. 1776 email = '' #: PI's e-mail address. 1777 expectedFilters = None #: List of filters required by PI. 1778 password = '' #: PI password. 1779 piName = '' #: Name of the PI. 1780 1781 #-------------------------------------------------------------------------- 1782
1783 - def __init__(self, progTable):
1784 """ 1785 Parses registration files, if not already done so, and sets registered 1786 requirements for the given programme. 1787 1788 @param progTable: Programme table for current database with the 1789 current row set to that of the desired programme. 1790 @type progTable: df.ProgrammeTable 1791 1792 """ 1793 self.expectedFilters = [] 1794 if not progTable.isNonSurvey(): 1795 # No registration file if not a non-survey, so use hard wired value 1796 self.description = progTable.getAttr("description") 1797 return 1798 1799 programme = progTable.getAttr("dfsIdString") 1800 programmeName = progTable.getAcronym().upper() 1801 sysc = progTable._db.sysc 1802 1803 foundReg = False 1804 for fileName in os.listdir(sysc.nonSurveyRegPath()): 1805 if '.reg' in fileName: 1806 for line in utils.ParsedFile(sysc.nonSurveyRegPath(fileName)): 1807 if not foundReg \ 1808 and line.startswith("programme=" + programme): 1809 1810 foundReg = True 1811 1812 elif not foundReg and line.startswith("programmeName="): 1813 if line.endswith(programmeName): 1814 foundReg = True 1815 else: 1816 break 1817 1818 elif foundReg and line.startswith("title="): 1819 self.description = line.replace("title=", "") 1820 1821 elif foundReg and line.startswith("firstName="): 1822 self.piName = line.replace("firstName=", "") 1823 1824 elif foundReg and line.startswith("lastName="): 1825 self.piName += line.replace("lastName=", " ") 1826 1827 elif foundReg and line.startswith("email="): 1828 self.email = line.replace("email=", "") 1829 1830 elif foundReg and line.startswith("password="): 1831 self.password = line.replace("password=", "") 1832 1833 elif foundReg and "num=" in line: 1834 self.expectedFilters.append(line.split("num=")) 1835 1836 elif foundReg and line.startswith("comments="): 1837 self.comments = line.replace("comments=", "") 1838 1839 if foundReg: 1840 break
1841 1842 #------------------------------------------------------------------------------ 1843
1844 -class CurrentSetup(object):
1845 """ Current requirements in archive. 1846 """ 1847 currentFiltData = None #: List of ? 1848 currentStackPosData = None #: List of ? 1849 currentTilePosData = None #: List of ? 1850 isSurveyDeep = False #: Survey contains repeated observations? 1851 nCurrentStackProducts = 0 #: Number of required products. 1852 nCurrentTileProducts = 0 #: Number of required products. 1853 productData = None #: Dict(List of required stacks / tiles). 1854 1855 stackAttrs = "productID, fieldID, filterID, nuStep, ra, dec, stackRadius" 1856 1857 tileAttrs = "productID, fieldID, filterID, nuStep, ra, dec, stackRadius, "\ 1858 "posAngle" 1859 mosaicAttrs = "productID, fieldID, filterID, nuStep, pixelSize, ra, dec, " \ 1860 "raExtent, decExtent, posAngle" 1861 1862 #-------------------------------------------------------------------------- 1863
1864 - def __init__(self, archive, programmeID):
1865 """ 1866 """ 1867 1868 self.productData = {} 1869 if archive.sysc.isVSA(): 1870 self.stackAttrs += ", offsetPos, posAngle" 1871 if archive.sysc.isOSA(): 1872 self.stackAttrs = "productID, fieldID, filterID, ra, dec, " \ 1873 "stackRadius, posAngle" 1874 currentReqStacks = archive.query( 1875 selectStr=self.stackAttrs, 1876 fromStr="RequiredStack", 1877 whereStr="programmeID=%d" % programmeID) 1878 1879 self.nCurrentStackProducts = len(currentReqStacks) 1880 1881 if archive.sysc.isVSA(): 1882 currentReqTiles = archive.query( 1883 selectStr=self.tileAttrs, 1884 fromStr="RequiredTile", 1885 whereStr="programmeID=%d" % programmeID) 1886 1887 self.nCurrentTileProducts = len(currentReqTiles) 1888 1889 currentStkPointings = [(point.fieldID, math.radians(point.ra), 1890 math.radians(point.dec), point.stackRadius, point.offsetPos, 1891 point.posAngle) for point in currentReqStacks] 1892 1893 # Add in 0 for offsetPos to use same code for grouping stacks 1894 # and tiles 1895 currentTilePointings = [(point.fieldID, math.radians(point.ra), 1896 math.radians(point.dec), point.stackRadius, 0, point.posAngle) 1897 for point in currentReqTiles] 1898 1899 self.currentTilePosData = sorted(set(currentTilePointings)) 1900 self.productData["tile"] = currentReqTiles 1901 elif archive.sysc.isOSA(): 1902 currentStkPointings = [(point.fieldID, math.radians(point.ra), 1903 math.radians(point.dec), point.stackRadius, 1904 point.posAngle) for point in currentReqStacks] 1905 else: 1906 currentStkPointings = [(point.fieldID, math.radians(point.ra), 1907 math.radians(point.dec), point.stackRadius) 1908 for point in currentReqStacks] 1909 1910 self.currentStackPosData = sorted(set(currentStkPointings)) 1911 self.productData["stack"] = currentReqStacks 1912 1913 # Find first product ID of each pointing: 1914 currentRequiredFilters = archive.query( 1915 selectStr="filterID, nPass, isSynoptic", 1916 fromStr="RequiredFilters", 1917 whereStr="programmeID=%d" % programmeID) 1918 1919 # Now match microstep to filterID 1920 self.currentFiltData = [] 1921 self.isSurveyDeep = False 1922 for filterID, nPass, isSynop in currentRequiredFilters: 1923 if isSynop == 1: 1924 self.isSurveyDeep = True 1925 nusList = ([point.nuStep for point in currentReqStacks 1926 if filterID == point.filterID] 1927 if archive.sysc.hasMicroStep else []) 1928 1929 if len(nusList) > 0: 1930 typNustep = int(stats.clippedMADHistogram(nusList, 1931 retValues="median")[0]) 1932 else: 1933 typNustep = dbc.intDefault() 1934 # @TODO: is it simpler just to add in a default value? 1935 if archive.sysc.hasMicroStep: 1936 self.currentFiltData.append((filterID, typNustep, nPass, 1937 isSynop)) 1938 else: 1939 self.currentFiltData.append((filterID, nPass, isSynop))
1940 #------------------------------------------------------------------------------ 1941
1942 -class BestSetup(object):
1943 """ Best new setup from stack data. 1944 """ 1945 isSurveyDeep = False #: Survey contains repeated observations? 1946 isTile = False #: ? 1947 newFiltData = None #: List of ? 1948 newPosData = None #: List of ? 1949 posAngleTol = None #: ? 1950 productData = None #: ? 1951 productLinks = None #: ? 1952 productType = "stack" #: Product type, e.g. stack, tile etc. 1953 progID = None #: Unique ID of programme being set up. 1954 sysc = SystemConstants() #: Initialised system constants object. 1955 displayDeep = False 1956 #-------------------------------------------------------------------------- 1957
1958 - def __init__(self, progTable, recreate, dateRange, displayDeeps):
1959 """ 1960 """ 1961 self.productLinks = [] 1962 1963 # @@TODO if VISTA rotates by 180 degrees will stacking be a mess? 1964 # limit stacks by rotation angle too 1965 # then mosaic tiles at different rotation angles together. 1966 # Add in later. 1967 archive = progTable._db 1968 self.productData = defaultdict(list) 1969 self.sysc = archive.sysc 1970 self.displayDeep = displayDeeps 1971 self.progID = progTable.getAttr("programmeID") 1972 self.posAngTol = progTable.getAttr("posAngleTolerance") 1973 # @todo: Keep RequiredFilters for VISTA PSs 1974 currentSetup = CurrentSetup(archive, self.progID) 1975 Stack = namedtuple("Stack", currentSetup.stackAttrs) 1976 Tile = namedtuple("Tile", currentSetup.tileAttrs) 1977 Mosaic = namedtuple("Mosaic", currentSetup.mosaicAttrs) 1978 1979 stkAttr = "ra dec" 1980 posAttr = "fieldID ra dec gSize" 1981 if self.sysc.hasOffsetPos: 1982 posAttr += " off" 1983 stkAttr += " off" 1984 if self.sysc.hasVarOrient: 1985 posAttr += " pa" 1986 stkAttr += " pa" 1987 self.Position = namedtuple("Position", posAttr) 1988 ActStack = namedtuple("ActStack", stkAttr) 1989 1990 # First check to see in there are any external products 1991 prevReleaseNum = archive.queryAttrMax("releaseNum", 1992 table="ProgrammeFrame", 1993 where="programmeID=%s" % self.progID) or 0 1994 externalProd = archive.query("*", "ExternalProduct", 1995 "programmeID=%s and releaseNum>%s and productType in " 1996 "('mosaic')" % (self.progID, prevReleaseNum), firstOnly=True, 1997 default=None) 1998 if externalProd: 1999 extpd = ExternalProduct(archive, externalProd, progTable, dateRange) 2000 self.productData[externalProd.productType] = extpd.getProductList( 2001 {'mosaic': Mosaic, 2002 'stack': Stack, 'tile': Tile}[externalProd.productType]) 2003 2004 self.newPosData = extpd.getPosData() 2005 self.isLowGalLat = \ 2006 self.checkGalacticStatus(finalStacks=[], progTable=progTable) 2007 2008 self.isSurveyDeep = extpd.prepareProvList() 2009 self.newFiltData = extpd.getFiltData() 2010 self.productLinks = extpd.getProductLinks() 2011 extpd.renameFiles() 2012 2013 return 2014 2015 # @TODO: NonSurveys - initial setup 2016 if progTable.isNonSurvey(): 2017 # Check what productType 2018 # @TODO: what about mosaic surveys? 2019 if not self.sysc.hasOffsetPos: 2020 archive.update("Programme", 2021 entryList=[("sourceProdType", "'stack'"), 2022 ("epochFrameType", "'stack'")], 2023 where="programmeID=%s" % self.progID) 2024 if self.sysc.hasOffsetPos: 2025 prodType = ('tile' if archive.queryEntriesExist( 2026 Join(["ProgrammeFrame", "Multiframe"], ["multiframeID"]), 2027 where="programmeID=%s AND frameType like 'tile%%stack'" 2028 % self.progID) else 'stack') 2029 archive.update("Programme", 2030 entryList=[("sourceProdType", "'%s'" % prodType), 2031 ("epochFrameType", "'%s'" % prodType), 2032 ("posAngleTolerance", 2033 self.sysc.defaultPosAngleTolerance)], 2034 where="programmeID=%s" % self.progID) 2035 # Update progTable 2036 progTable = df.ProgrammeTable(archive) 2037 progTable.setCurRow(programmeID=self.progID) 2038 self.posAngTol = progTable.getAttr("posAngleTolerance") 2039 2040 # @todo 2nd time round - fit multiframes into existing products. 2041 # then make new products of anything left over. 2042 2043 dateRange = "utDate BETWEEN '%s' AND '%s'" % dateRange 2044 # Now find current archive setup 2045 # Case 2046 2047 frameTypeSel = queries.getFrameSelection('stack', noDeeps=True) 2048 nustepStr = ', nustep' if self.sysc.hasMicroStep else '' 2049 selectStr = ("m.multiframeID, (15.*rabase) as raBase, decBase, m.filterID%s, " 2050 % nustepStr 2051 + ("offsetX, offsetY, " if self.sysc.hasOffsetPos else "") 2052 + "AVG(totalExpTime) AS totalExpTime" 2053 + (", MIN(posAngle) AS posAngle" if self.sysc.hasVarOrient else 2054 "")) 2055 2056 fromStr = ("Multiframe as m,ProgrammeFrame as p,MultiframeDetector as d," 2057 "CurrentAstrometry as c") 2058 2059 whereStr = ("m.multiframeID=p.multiframeID and m.multiframeID=" 2060 "d.multiframeID and d.multiframeID=c.multiframeID and " 2061 "c.extNum=d.extNum AND %s" 2062 " AND programmeID=%s AND m.%s AND posAngle>=-180" 2063 " AND %s" % (frameTypeSel, self.progID, 2064 DepCodes.selectNonDeprecated, dateRange)) 2065 groupBy = ("m.multiframeID, raBase, decBase, " 2066 "m.filterID%s" % nustepStr 2067 + (",offsetX, offsetY" if self.sysc.hasOffsetPos else "") 2068 + ",telescope, instrument") 2069 orderBy = "decBase, m.filterID%s" % nustepStr 2070 2071 stacks = archive.query(selectStr, fromStr, whereStr, groupBy, orderBy) 2072 2073 if self.sysc.hasOffsetPos and progTable.getAttr('sourceProdType') == 'tile': 2074 mfIDSel = \ 2075 SelectSQL("m.multiframeID", fromStr, whereStr, groupBy=groupBy) 2076 2077 frameTypeSel = queries.getFrameSelection('tile', noDeeps=True) 2078 2079 # Make sure all mfIDs come from a good tile. 2080 goodTileSel = SelectSQL( 2081 select="p.combiframeID", 2082 table="(%s) AS t, Provenance AS p" % SelectSQL( 2083 select="pv.combiframeID, nOffsets", 2084 table="Provenance AS pv, Multiframe AS m", 2085 where="m.multiframeID=pv.combiframeID AND %s AND %s" 2086 " AND pv.multiframeID IN (%s)" % (frameTypeSel, 2087 DepCodes.selectNonDeprecated, mfIDSel)), 2088 where="t.combiframeID=p.combiframeID AND p.multiframeID IN (%s)" 2089 % mfIDSel, 2090 groupBy="p.combiframeID, t.nOffSets" 2091 " HAVING COUNT(distinct p.multiframeID)=t.nOffsets") 2092 2093 # Make sure each of these tiles has right no of good mfs. 2094 goodMfIDs = archive.query("multiframeID", "Provenance", 2095 whereStr="combiframeID IN (%s)" % goodTileSel) 2096 2097 stacks = [stack for stack in stacks 2098 if stack.multiframeID in goodMfIDs] 2099 2100 if not stacks: 2101 raise ProgrammeBuilder.CuError( 2102 "Programme %s has no non-deprecated catalogue data assigned in " 2103 "%s.ProgrammeFrame" % (progTable.getAcronym(), archive.database)) 2104 if self.sysc.hasOffsetPos and self.sysc.hasVarOrient: 2105 offsets = [astro.getOffSetPos(stack.offsetX, stack.offsetY) 2106 for stack in stacks] 2107 2108 possStacks = [ActStack(math.radians(stack.raBase), 2109 math.radians(stack.decBase), offset, stack.posAngle) 2110 for stack, offset in zip(stacks, offsets) 2111 if offset >= 0] 2112 elif self.sysc.hasOffsetPos: 2113 offsets = [astro.getOffSetPos(stack.offsetX, stack.offsetY) 2114 for stack in stacks] 2115 2116 possStacks = [ActStack(math.radians(stack.raBase), 2117 math.radians(stack.decBase), offset) 2118 for stack, offset in zip(stacks, offsets) 2119 if offset >= 0] 2120 elif self.sysc.hasVarOrient: 2121 possStacks = [ActStack(math.radians(stack.raBase), 2122 math.radians(stack.decBase), stack.posAngle) 2123 for stack in stacks] 2124 else: 2125 possStacks = [ActStack(math.radians(stack.raBase), 2126 math.radians(stack.decBase)) for stack in stacks] 2127 # group by ra,dec 2128 finalStacks = self.groupbyPosition(possStacks, 2129 self.sysc.maxRaDecExtentStackDeg, currentSetup.currentStackPosData, 2130 recreate) 2131 2132 self.isLowGalLat = self.checkGalacticStatus(finalStacks, progTable) 2133 if self.sysc.hasOffsetPos and self.sysc.hasVarOrient: 2134 self.productType = "tile" 2135 2136 finalTiles = self.findTiles(finalStacks, 2137 self.sysc.maxRaDecExtentStackDeg, recreate, 2138 currentSetup.currentTilePosData) 2139 2140 finalStacks = [self.Position(pID, math.degrees(ra), math.degrees(dec), gSize, 2141 ofP, posAng) 2142 for pID, ra, dec, gSize, ofP, posAng in finalStacks] 2143 2144 finalTiles = [(pID, math.degrees(ra), math.degrees(dec), gSize, 2145 posAng) 2146 for pID, ra, dec, gSize, ofP, posAng in finalTiles] 2147 2148 elif self.sysc.hasOffsetPos: 2149 self.productType = "tile" 2150 2151 finalTiles = self.findTiles(finalStacks, 2152 self.sysc.maxRaDecExtentStackDeg, recreate, 2153 currentSetup.currentTilePosData) 2154 2155 finalStacks = [self.Position(pID, math.degrees(ra), math.degrees(dec), gSize, 2156 ofP) 2157 for pID, ra, dec, gSize, ofP in finalStacks] 2158 2159 finalTiles = [(pID, math.degrees(ra), math.degrees(dec), gSize) 2160 for pID, ra, dec, gSize, ofP in finalTiles] 2161 elif self.sysc.hasVarOrient: 2162 2163 finalStacks = [self.Position(pID, math.degrees(ra), math.degrees(dec), gSize, 2164 posAng) 2165 for pID, ra, dec, gSize, posAng in finalStacks] 2166 2167 2168 else: 2169 finalStacks = [self.Position(pID, math.degrees(ra), math.degrees(dec), gSize) 2170 for pID, ra, dec, gSize in finalStacks] 2171 2172 Logger.addMessage("Found all product positions") 2173 2174 # newFiltData 2175 allFilters = sorted(set(stack.filterID for stack in stacks)) 2176 # @FIXME: no nustep in OSA 2177 2178 2179 filtNuGroups = ( 2180 sorted(set((stack.filterID, stack.nustep) for stack in stacks)) 2181 if self.sysc.hasMicroStep else None) 2182 # First sort out all groups of filterID,microstep - find average 2183 # number of stacks and totalExpTime 2184 # Rearrange, so best nustep in each pointing is chosen. 2185 # @FIXME: no nustep in OSA 2186 microstepDict = {} 2187 self.newFiltData = [] 2188 nEpochDict = {} 2189 for filterID in allFilters: 2190 isDeepFilter = False 2191 for point in finalStacks: 2192 fieldID = point[0] 2193 pos = (math.radians(point.ra), math.radians(point.dec)) 2194 nuValsTExp = [] 2195 dataNGood = [] 2196 if self.sysc.hasMicroStep: 2197 for fID, microStep in filtNuGroups: 2198 if fID == filterID: 2199 times = [] 2200 for stack in stacks: 2201 if stack.filterID == filterID \ 2202 and stack.nustep == microStep \ 2203 and not (self.sysc.hasOffsetPos and point.off != 2204 astro.getOffSetPos(stack.offsetX, stack.offsetY)): 2205 # match Multiframe to bestPosition 2206 stackCentre = (math.radians(stack.raBase), 2207 math.radians(stack.decBase)) 2208 2209 if abs(pos[1] - stackCentre[1]) \ 2210 <= math.radians(point.gSize): 2211 if astro.angularSep(stackCentre, pos) \ 2212 < math.radians(point.gSize): 2213 times.append(stack.totalExpTime) 2214 2215 if times: 2216 tExpTime = sum(times) 2217 nuValsTExp.append((microStep, tExpTime)) 2218 dataNGood.append(len(times)) 2219 else: 2220 times = [] 2221 for stack in stacks: 2222 if stack.filterID == filterID \ 2223 and not (self.sysc.hasOffsetPos and point.off != 2224 astro.getOffSetPos(stack.offsetX, stack.offsetY)): 2225 # match Multiframe to bestPosition 2226 stackCentre = (math.radians(stack.raBase), 2227 math.radians(stack.decBase)) 2228 2229 if abs(pos[1] - stackCentre[1]) \ 2230 <= math.radians(point.gSize): 2231 if astro.angularSep(stackCentre, pos) \ 2232 < math.radians(point.gSize): 2233 times.append(stack.totalExpTime) 2234 2235 if times: 2236 tExpTime = sum(times) 2237 nuValsTExp.append((1, tExpTime)) 2238 dataNGood.append(len(times)) 2239 2240 # Find best microstepping level for filter/pointing and number 2241 # of stacks for pointing,filter,microstep 2242 if nuValsTExp: 2243 # @FIXME: No nus in OSA 2244 tExpMax = max(tExp for nus, tExp in nuValsTExp) 2245 microstepDict[(filterID, fieldID)] = \ 2246 [nus for nus, tExp in nuValsTExp if tExp == tExpMax][-1] 2247 2248 2249 index = [ii for ii, (nus, tExp) in enumerate(nuValsTExp) 2250 if tExp == tExpMax][-1] 2251 2252 # is nEpochs >1 2253 isDeepFilter = sum(dataNGood) > 1 or isDeepFilter 2254 if sum(dataNGood) > 1 and self.displayDeep: 2255 Logger.addMessage("Multiple epochs in pointing RA %s, " 2256 "DEC %s, filter %s" % 2257 (point.ra, point.dec, filterID)) 2258 # Want no of epochs of best nustep 2259 nEpochDict[(filterID, fieldID)] = dataNGood[index] 2260 2261 # Now decide if deep or shallow 2262 self.isSurveyDeep = self.isSurveyDeep or isDeepFilter 2263 self.newFiltData.append((filterID, 1, int(isDeepFilter))) 2264 2265 Logger.addMessage("Checked filters and epochs") 2266 2267 # Put filter and positions together to get products and sort out 2268 # final output. 2269 self.newPosData = [] 2270 tileInfo = [] 2271 newProdID = (len(currentSetup.productData["stack"]) + 1 2272 if not recreate else 1) 2273 2274 for pos in finalStacks: 2275 for filtID, _nPass, _isSyn in self.newFiltData: 2276 # check if this already exists. 2277 # Do products for all filters, 2278 # shallow or deep 2279 # @TODO: must tell if filter for this pointing. 2280 2281 2282 micro = microstepDict.get((filtID, pos.fieldID)) 2283 if micro: 2284 prodID = (self.getProdID(pos, filtID, 2285 currentSetup.productData["stack"], recreate) 2286 or newProdID) 2287 2288 isNew = prodID == newProdID 2289 if isNew: 2290 newProdID += 1 2291 # Make combinations of self.sysc.has?? 2292 2293 stackProd = [prodID, pos.fieldID, filtID] 2294 if self.sysc.hasMicroStep: 2295 stackProd.extend([micro]) 2296 stackProd.extend([pos.ra, pos.dec, pos.gSize]) 2297 if self.sysc.hasOffsetPos: 2298 stackProd.extend([pos.off]) 2299 if self.sysc.hasVarOrient: 2300 stackProd.extend([pos.pa]) 2301 2302 if recreate or isNew: 2303 self.productData["stack"].append(Stack(*tuple(stackProd))) 2304 2305 if self.isTile and self.sysc.hasOffsetPos: 2306 if pos.fieldID in self.stackTileDict: 2307 tileID = self.stackTileDict[pos.fieldID] 2308 tileInfo.append((tileID, filtID, micro, pos.off, 2309 prodID)) 2310 else: 2311 Logger.addMessage( 2312 "Field ID %s not in self.stackTileDict for " 2313 "pos %s, filter %s" % (pos.fieldID, pos, filtID)) 2314 2315 self.newPosData.append((pos.fieldID, ra, dec, gSize)) 2316 2317 if not self.sysc.hasOffsetPos or not self.isTile: 2318 return 2319 2320 # Do tiles 2321 newProdID = (len(currentSetup.productData["tile"]) + 1 2322 if not recreate else 1) 2323 2324 for pos in finalTiles: 2325 # PID,ra,dec,stkRad,fieldID,pA 2326 tInfo = [(filtID, micro, offPos, stkProdID) 2327 for tID, filtID, micro, offPos, stkProdID in tileInfo 2328 if tID == pos[0]] 2329 # Now sort into filters 2330 filterIDs = sorted(set(fID for fID, _ms, _oP, _sPID in tInfo)) 2331 for filtID in filterIDs: 2332 isNew = False 2333 # Make sure each tile product has enough stack products 2334 offPosList = sorted(oP for fID, _ms, oP, _sPID in tInfo 2335 if fID == filtID) 2336 if offPosList != range(6) and offPosList != range(3): 2337 Logger.addMessage("<Info> Tile product at position %s %s, " 2338 "filterID %s does not have the complete set of input " 2339 "stacks. Will not create it." % (pos[1], pos[2], filtID), 2340 echoOff=self.progID == self.sysc.scienceProgs.get("VHS")) 2341 else: 2342 tileProdID = self.getProdID(pos, filtID, 2343 currentSetup.productData["tile"], recreate) or newProdID 2344 2345 if tileProdID == newProdID: 2346 isNew = True 2347 newProdID += 1 2348 2349 micro = tInfo[0][1] 2350 2351 self.productLinks.extend((tileProdID, sPID, "tile", "stack") 2352 for fID, _ms, _oP, sPID in tInfo if fID == filtID) 2353 2354 tile = Tile(tileProdID, pos[0], filtID, micro, pos[1], 2355 pos[2], pos[3], pos[4]) 2356 2357 if recreate or isNew: 2358 self.productData["tile"].append(tile)
2359 2360 #-------------------------------------------------------------------------- 2361
2362 - def getProdID(self, pos, filterID, currentProdData, recreate):
2363 """ 2364 """ 2365 # @TODO: What type is pos? tuple or namedtuple? 2366 fieldID = pos[0] 2367 productID = 0 2368 if not recreate: 2369 productIDs = [prod.productID for prod in currentProdData 2370 if prod.fieldID == fieldID and prod.filterID == filterID] 2371 2372 if len(productIDs) == 1: 2373 productID = productIDs[0] 2374 2375 return productID
2376 2377 #-------------------------------------------------------------------------- 2378
2379 - def checkGalacticStatus(self, finalStacks, progTable):
2380 """ @return: True, if the programme is at low Galactic latitude. 2381 @rtype: bool 2382 """ 2383 if not progTable.isNonSurvey(): 2384 return 2385 2386 galCentre = [math.radians(192.25), math.radians(27.4)] 2387 coords = numpy.array([(stack[1], stack[2]) for stack in finalStacks]) 2388 bArray = numpy.arcsin(numpy.cos(coords[:, 1]) * numpy.cos(galCentre[1]) 2389 * numpy.cos(coords[:, 0] - galCentre[0]) + numpy.sin(coords[:, 1]) 2390 * numpy.sin(galCentre[1])) 2391 2392 medAbsGalLat = numpy.median(numpy.abs(bArray)) 2393 isLGL = medAbsGalLat < math.radians(15) 2394 Logger.addMessage("Median absolute galactic latitude is %4.1f: " 2395 "%sGALACTIC survey" % (math.degrees(medAbsGalLat), 2396 "a " if isLGL else "an EXTRA")) 2397 return isLGL
2398 2399 #-------------------------------------------------------------------------- 2400
2401 - def findTiles(self, finalStacks, maxSep, recreate, curTilePos):
2402 """ 2403 """ 2404 possTiles = [] 2405 tilAttr = "ra dec" 2406 if self.sysc.hasOffsetPos: 2407 tilAttr += " off" 2408 if self.sysc.hasVarOrient: 2409 tilAttr += " pa" 2410 2411 tilAttr += " fieldID" 2412 ActTile = namedtuple("ActTile", tilAttr) 2413 for stack in finalStacks: 2414 tilePos = astro.findTileCentre((stack.ra, stack.dec), 2415 stack.off, stack.pa) 2416 # Add in 0 offsetPos to use same tools 2417 possTiles.append(ActTile(tilePos[0], tilePos[1], 0, stack.pa, 2418 stack.fieldID)) 2419 2420 self.isTile = True 2421 possTiles = sorted(possTiles, key=itemgetter(1)) 2422 2423 # Simpler set up? 2424 finalTiles = self.groupbyPosition(possTiles, maxSep, curTilePos, 2425 recreate, onceOnly=True) 2426 2427 return self.matchStacksTiles(finalTiles, finalStacks)
2428 2429 #-------------------------------------------------------------------------- 2430
2431 - def groupbyPosition(self, interMedStacks, maxSep, currentPositions, 2432 recreate=False, onceOnly=False):
2433 """ Groups stacks according to following algorithm. 2434 """ 2435 Logger.addMessage("Number of intermediate %ss is %s" 2436 % (self.productType, len(interMedStacks)), 2437 alwaysLog=False) 2438 2439 numberGroups = len(currentPositions) 2440 nUngrouped = len(interMedStacks) 2441 hasImproved = True 2442 nIterations = 0 2443 # What is the point of orig positions? 2444 oldPositions = currentPositions[:] if not recreate else [] 2445 isComplete = False 2446 while (not isComplete and hasImproved and nIterations < 3): 2447 # try merging - recalculate centres and sizes 2448 # check if anything left over - if so, repeat process 2449 Logger.addMessage("Checking to see if %ss match current " 2450 "positions: iteration %d" % 2451 (self.productType, nIterations), 2452 alwaysLog=False) 2453 2454 possStacks = interMedStacks[:] 2455 Logger.addMessage("Grouping %d %ss" % (len(possStacks), 2456 self.productType), 2457 alwaysLog=False) 2458 2459 # Use initial positions,sizes (if any). 2460 if recreate: 2461 ungroupedStacks = possStacks 2462 groups = [] 2463 fieldID = 0 2464 else: 2465 groups, ungroupedStacks = \ 2466 self.compareStackPosit(currentPositions, possStacks) 2467 Logger.addMessage("Not recreating. No Groups: %d, no. " 2468 "ungrouped %ss: %d" 2469 % (len(groups), self.productType, 2470 len(ungroupedStacks)), 2471 alwaysLog=False) 2472 emptyGroups = [pID for pID, groupStacks in groups 2473 if not groupStacks] 2474 currentPositions = \ 2475 self.removeOldGroups(currentPositions, emptyGroups) 2476 fieldID = (currentPositions[-1][0] + 1 2477 if currentPositions else 0) 2478 if ungroupedStacks: 2479 Logger.addMessage("There are %d %ss that cannot be placed in " 2480 "current groups. New deep %s positions will be created" 2481 % (len(ungroupedStacks), self.productType, self.productType), 2482 alwaysLog=False) 2483 2484 # Check if any stacks left after assignment 2485 # Take left over objects - find new clusters 2486 newGroups = self.findNewGroups(ungroupedStacks, maxSep, fieldID) 2487 groups.extend(newGroups) 2488 Logger.addMessage("Found %d new groups" % len(newGroups), 2489 alwaysLog=False) 2490 2491 # find centres and sizes 2492 currentPositions = self.recalculateCentres(groups, maxSep) 2493 Logger.addMessage("Found new centres", alwaysLog=False) 2494 # @TODO: Why is this here? 2495 # self.compareStackPosit(currentPositions, possStacks) 2496 Logger.addMessage("Check exclusivity", alwaysLog=False) 2497 # now check for exclusivity 2498 closePairs = self.checkExclusivity(currentPositions) 2499 Logger.addMessage("Found %d close pairs" % len(closePairs), 2500 alwaysLog=False) 2501 while len(closePairs) > 0: 2502 fieldID = currentPositions[-1][0] + 1 2503 findNewPos = self.reJigClosePairs(closePairs, groups, fieldID, 2504 currentPositions) 2505 # remove old close pair positions and add in new positions. 2506 currentPositions.extend(findNewPos) 2507 closePids = [pair[0] for pair in closePairs] 2508 posTupSize = len(currentPositions[0]) 2509 closePids.extend(pair[posTupSize] for pair in closePairs) 2510 depPids = sorted(set(closePids)) 2511 currentPositions = self.removeOldGroups(currentPositions, depPids) 2512 closePairs = self.checkExclusivity(currentPositions) 2513 Logger.addMessage("Iterating close pairs: Found %d close pairs" 2514 % len(closePairs), alwaysLog=False) 2515 2516 # find number of groups and number of ungrouped stacks 2517 groups, ungroupedStacks = self.compareStackPosit(currentPositions, 2518 possStacks) 2519 2520 # Do final testing 2521 if (len(ungroupedStacks) < nUngrouped or ( 2522 len(ungroupedStacks) == nUngrouped and 2523 len(groups) < numberGroups)): 2524 hasImproved = True 2525 oldPositions = currentPositions[:] 2526 else: 2527 hasImproved = False 2528 currentPositions = oldPositions[:] 2529 2530 if onceOnly: 2531 hasImproved = False 2532 2533 if not ungroupedStacks: 2534 isComplete = True 2535 2536 nUngrouped = len(ungroupedStacks) 2537 numberGroups = len(groups) 2538 nIterations += 1 2539 recreate = False 2540 Logger.addMessage("Number of groups is %d and number of ungrouped " 2541 "%ss is %d" % (len(groups), self.productType, 2542 len(ungroupedStacks)), 2543 alwaysLog=False) 2544 2545 # Make sure groups match currentPositions 2546 groups, ungroupedStacks = self.compareStackPosit(currentPositions, 2547 possStacks) 2548 2549 Logger.addMessage("Final number of groups is %d and number of " 2550 "ungrouped %ss is %d" 2551 % (len(groups), self.productType, len(ungroupedStacks)), 2552 alwaysLog=False) 2553 2554 return currentPositions
2555 2556 #-------------------------------------------------------------------------- 2557
2558 - def getMergedGroup(self, pair, groups, fieldID=None):
2559 """ Merges two groups into one. 2560 """ 2561 2562 2563 2564 gDict = dict(groups) 2565 fullGroup = gDict[pair.pID1] + gDict[pair.pID2] 2566 if fieldID is None: 2567 pos1 = [pair.pID1, pair.ra1, pair.dec1, pair.size1 * pair.frac] 2568 pos2 = [pair.pID2, pair.ra2, pair.dec2, pair.size2 * pair.frac] 2569 if self.sysc.hasOffsetPos: 2570 pos1.append(pair.off1) 2571 pos2.append(pair.off2) 2572 if self.sysc.hasVarOrient: 2573 pos1.append(pair.pa1) 2574 pos2.append(pair.pa2) 2575 newPositions = [self.Position(*tuple(pos1)), self.Position(*tuple(pos2))] 2576 2577 else: 2578 newPositions = \ 2579 self.recalculateCentres([(fieldID, fullGroup)], 2580 max(pair.size1, pair.size2)) 2581 2582 _newGroup, ungroupStacks = \ 2583 self.compareStackPosit(newPositions, fullGroup) 2584 2585 return len(ungroupStacks), newPositions
2586 2587 #-------------------------------------------------------------------------- 2588
2589 - def compareStackPosit(self, positions, possStacks):
2590 """ ? 2591 """ 2592 # @FIXME: positions named tuple 2593 2594 decList = [st[1] for st in possStacks] 2595 groups = [] 2596 self.tileStackMatch = [] 2597 stackGrp = [] 2598 usedStacks = [] 2599 for pos in positions: 2600 tileGrp = [] 2601 if self.sysc.hasOffsetPos: 2602 grpOffID = pos.off 2603 if self.sysc.hasVarOrient: 2604 grpPosAng = pos.pa 2605 gSize = math.radians(pos.gSize) 2606 groupPos = (pos.ra, pos.dec) 2607 groupStacks = [] 2608 2609 # Only select ones in range 2610 minDec, maxDec = pos.dec - gSize, pos.dec + gSize 2611 begIdx = max((bisect(decList, minDec) - 1), 0) 2612 endIdx = min((bisect(decList, maxDec) + 1), len(decList)) 2613 for index in range(begIdx, endIdx, 1): 2614 stInfo = possStacks[index] 2615 position = stInfo[:2] 2616 offsetMatch = (stInfo.off == grpOffID 2617 if self.sysc.hasOffsetPos else True) 2618 posAngMatch = (abs(astro.angDiff(grpPosAng, stInfo.pa)) < self.posAngTol 2619 if self.sysc.hasVarOrient else True) 2620 if abs(groupPos[1] - position[1]) >= gSize: 2621 continue 2622 if (astro.angularSep(groupPos, position) < gSize 2623 and index not in usedStacks): 2624 if (offsetMatch and posAngMatch): 2625 groupStacks.append(stInfo) 2626 usedStacks.append(index) 2627 if self.isTile: 2628 tileGrp.append(stInfo.fieldID) 2629 stackGrp.append((stInfo.fieldID, pos.fieldID)) 2630 if groupStacks: 2631 groups.append((pos.fieldID, groupStacks)) 2632 else: 2633 Logger.addMessage( 2634 "<WARNING> No intermediates found for groupID=%s" % pos.fieldID, 2635 echoOff=self.progID == self.sysc.scienceProgs.get("VHS")) 2636 2637 if self.isTile: 2638 self.tileStackMatch.append((pos.fieldID, tileGrp)) 2639 2640 if self.isTile: 2641 self.stackTileDict = dict(stackGrp) 2642 2643 unusedStacks = [st for ii, st in enumerate(possStacks) 2644 if ii not in usedStacks] 2645 2646 return groups, unusedStacks
2647 2648 #-------------------------------------------------------------------------- 2649
2650 - def reJigClosePairs(self, closePairs, groups, pID, positions):
2651 """ ? 2652 """ 2653 maxPairFrac = 0.1 2654 newPosits = [] 2655 if closePairs: 2656 if len(closePairs) < maxPairFrac * len(groups): 2657 # if only a few 2658 for pair in closePairs: 2659 nOutliersMerge, newPosMerge = self.getMergedGroup(pair, groups, pID) 2660 nOutliersResize, newPosResize = self.getMergedGroup(pair, groups) 2661 pID += 1 2662 newPosits += \ 2663 (newPosMerge if nOutliersMerge <= nOutliersResize else 2664 newPosResize) 2665 else: 2666 # lots - very difficult 2667 avFrac = numpy.median([pair[8] for pair in closePairs]) 2668 for pos in positions: 2669 ra, dec, gSize = pos[1:4] 2670 if self.sysc.isVSA(): 2671 newPosits.append((pID, ra, dec, 0.9 * avFrac * gSize, 2672 pos[4], pos[5])) 2673 else: 2674 newPosits.append((pID, ra, dec, 0.9 * avFrac * gSize)) 2675 2676 return newPosits
2677 2678 #-------------------------------------------------------------------------- 2679
2680 - def checkExclusivity(self, newPositions):
2681 """ Checks group positions to see if they are exclusive. 2682 """ 2683 # check later ones with 2 x maxDist 2684 # @TODO: namedtuple for pairs. 2685 2686 2687 attrs = ['pID', 'ra', 'dec', 'size'] 2688 if self.sysc.hasOffsetPos: 2689 attrs.append('off') 2690 if self.sysc.hasVarOrient: 2691 attrs.append('pa') 2692 pairAttr = '' 2693 for pairPos in ['1', '2']: 2694 pairAttr += ' '.join(['%s%s' % (attr, pairPos) for attr in attrs]) + ' ' 2695 2696 pairAttr += "frac" 2697 2698 2699 ClosePair = namedtuple("ClosePair", pairAttr) 2700 2701 finPosDec = sorted(newPositions, key=itemgetter(2)) 2702 closePairs = [] 2703 for ii, pos in enumerate(finPosDec): 2704 _pID, ra, dec, gSize = pos[:4] 2705 2706 isLessMaxDist = True 2707 index = ii + 1 2708 isPosMatch = True 2709 if self.sysc.isVSA(): 2710 # Check offsetIDs 2711 offID = pos[4] 2712 posAng = pos[5] 2713 2714 while isLessMaxDist and index < len(finPosDec): 2715 if self.sysc.isVSA() and (offID != finPosDec[index][4] or 2716 abs(astro.angDiff(posAng, finPosDec[index][5])) > 2717 2. * self.posAngTol): 2718 isPosMatch = False 2719 2720 totalSep = gSize + finPosDec[index][3] 2721 isLessMaxDist = finPosDec[index][2] - dec < math.radians( 2722 totalSep) 2723 2724 if isLessMaxDist and isPosMatch: 2725 dist = astro.angularSep((ra, dec), 2726 (finPosDec[index][1], finPosDec[index][2])) 2727 2728 if dist < math.radians(totalSep): 2729 dist /= math.radians(totalSep) 2730 closePairs.append(ClosePair(*pos + finPosDec[index] + (dist,))) 2731 2732 index += 1 2733 2734 return closePairs
2735 2736 #-------------------------------------------------------------------------- 2737
2738 - def recalculateCentres(self, groups, maxDist):
2739 """ Recalculate group centres and sizes. 2740 """ 2741 # @TODO - tangential coordinates from first one? 2742 minSize = self.sysc.minRaDecExtentStackDeg # 1 arcmin 2743 newPositions = [] 2744 for pID, groupStacks in groups: 2745 if len(groupStacks) == 0: 2746 Logger.addMessage("<WARNING> Group has no inputs: %s" % pID) 2747 continue 2748 if len(groupStacks) > 1: 2749 groupCartCoords = astro.convSkyToCartesian( 2750 numpy.array([(pos[0], pos[1]) for pos in groupStacks])) 2751 2752 gCentre = astro.calcGroupCentre(groupCartCoords) 2753 raMed, decMed = gCentre[:2] 2754 else: 2755 raMed, decMed = groupStacks[0][:2] 2756 2757 if self.sysc.hasOffsetPos: 2758 offID = groupStacks[0].off 2759 if self.sysc.hasVarOrient: 2760 grpPA = astro.getMedAngle([pos.pa for pos in groupStacks], 2761 1.5 * self.posAngTol) 2762 2763 distCentre = [] 2764 # @FIXME: groupStacks - namedtuple 2765 for index in range(len(groupStacks)): 2766 distCentre.append(astro.angularSep((raMed, decMed), ( 2767 groupStacks[index][0], groupStacks[index][1]))) 2768 2769 typDist = min(max(minSize, 1.1 * math.degrees(max(distCentre))), maxDist) 2770 2771 pos = [pID, raMed, decMed, typDist] 2772 2773 if self.sysc.hasOffsetPos: 2774 pos.append(offID) 2775 if self.sysc.hasVarOrient: 2776 pos.append(grpPA) 2777 2778 newPositions.append(self.Position(*tuple(pos))) 2779 2780 return newPositions
2781 2782 #------------------------------------------------------------------------- 2783
2784 - def findNewGroups(self, possStacks, maxSep, fieldID):
2785 """ Takes ungroupedStacks and groups them together. 2786 """ 2787 groups = [] 2788 for position in possStacks: 2789 # compare to each group. 2790 if groups: 2791 groups = self.findBestGroup(groups, position, maxSep) 2792 else: 2793 groups.append((fieldID, [position])) 2794 2795 return groups
2796 2797 #------------------------------------------------------------------------- 2798
2799 - def findBestGroup(self, groups, position, maxSep):
2800 """ 2801 Criteria: All objects must be within 2*maxSep (diameter) of all others 2802 Count best matches if <scale*maxSep. 2803 2804 """ 2805 # @TODO: Original settings were 1.5 and 2. I am not sure why. 2806 # 1.0 and 1.5 makes more sense 2807 scale = 1.0 # 1.5 2808 maxWidth = 1.5 # 2. 2809 nMtcGrp = [] 2810 for pID, groupStacks in groups: 2811 nMatch = 0 2812 isCorPoint = True 2813 # Can this be done more efficiently (using numpy?) 2814 for gPos in groupStacks: 2815 if abs(gPos[1] - position[1]) > maxWidth * math.radians(maxSep): 2816 nMatch = 0 2817 break 2818 rad = astro.angularSep(gPos[:2], position[:2]) 2819 if rad > 2. * math.radians(maxSep): 2820 nMatch = 0 2821 break 2822 if (self.sysc.hasOffsetPos and gPos.off != position.off): 2823 isCorPoint = False 2824 if (self.sysc.hasVarOrient and abs(astro.angDiff(gPos.pa, position.pa)) > 2825 scale * self.posAngTol): 2826 isCorPoint = False 2827 if isCorPoint and rad < scale * math.radians(maxSep): 2828 nMatch += 1 2829 2830 nMtcGrp.append((pID, nMatch)) 2831 2832 maxMatch = max(nMatch for pID, nMatch in nMtcGrp) 2833 if maxMatch > 0: 2834 match = False 2835 index = 0 2836 while not match: 2837 if nMtcGrp[index][1] == maxMatch: 2838 match = True 2839 fieldID = nMtcGrp[index][0] 2840 newGroups = [] 2841 for pID, groupStacks in groups: 2842 if pID == fieldID: 2843 groupStacks.extend([position]) 2844 2845 newGroups.append((pID, groupStacks)) 2846 2847 groups = newGroups 2848 2849 index += 1 2850 else: 2851 newPid = max(pID for pID, groupStacks in groups) + 1 2852 groups.append((newPid, [position])) 2853 2854 return groups
2855 2856 #-------------------------------------------------------------------------- 2857
2858 - def matchStacksTiles(self, finalTiles, finalStack):
2859 """ Have matches, just make sure complete and consistent. 2860 """ 2861 # @TODO: Is this the correct place for bad tiles. Should do it at 2862 # a product level, not a pointing... 2863 stacks = sorted(finalStack) 2864 matches = dict(self.tileStackMatch) 2865 badTiles = [] 2866 tilefIDs = set() 2867 for tile in finalTiles: 2868 if tile[0] in matches: 2869 fIDs = matches[tile[0]] 2870 offsets = sorted(stacks[fID][4] for fID in fIDs) 2871 if offsets != range(6) and offsets != range(3): 2872 badTiles.append(tile[0]) 2873 Logger.addMessage("<WARNING> Not enough pawprints to " 2874 "create a complete tile in tile %d: excluding tile " 2875 "pointing from list and resorting" % tile[0]) 2876 else: 2877 tilefIDs.update(fIDs) 2878 else: 2879 badTiles.append(tile[0]) 2880 Logger.addMessage("<WARNING> No pawprints found in tile " 2881 "field %d: excluding tile pointing from list and resorting" 2882 % tile[0]) 2883 2884 # Raise any issues, else return 2885 allPPts = set(stack[0] for stack in finalStack) 2886 2887 # Compare sets 2888 if allPPts.difference(tilefIDs): 2889 Logger.addMessage("<WARNING> The following pawprint fields are not" 2890 " being tiled: %s" 2891 % ','.join(str(fID) for fID in allPPts.difference(tilefIDs))) 2892 2893 # remove bad tiles 2894 finalTiles = sorted(tile for tile in finalTiles 2895 if tile[0] not in badTiles) 2896 2897 return finalTiles
2898 2899 #-------------------------------------------------------------------------- 2900
2901 - def removeOldGroups(self, finalPositions, depPids):
2902 """ Removes deprecated Pids and then reindexes. 2903 """ 2904 dodgyGrp = [] 2905 for index, pos in enumerate(finalPositions): 2906 if pos[0] in depPids: 2907 dodgyGrp.append(index) 2908 2909 for ii, index in enumerate(dodgyGrp): 2910 finalPositions.pop(index - ii) 2911 2912 return [self.Position(*tuple([ii] + list(pos[1:]))) 2913 for ii, pos in enumerate(finalPositions)]
2914 2915 #------------------------------------------------------------------------------ 2916
2917 -class SynopticSetup(object):
2918 """ 2919 This determines the setup of the multi-epoch tables based on the details in 2920 Programme. 2921 2922 """ 2923 bandMergingCrit = None #: Correlated band pass time scale. 2924 bestMatchTables = None #: List of best match tables for this programme. 2925 corBMTable = None #: Correlated best-match table name. 2926 dateRangeOfTable = None #: DateRange applied to a particular table. 2927 isDeep = False #: Set up survey as a deep survey? 2928 mainDateRange = None #: DateRange of synoptic data. 2929 programme = None #: Initialised Programme table for this archive. 2930 synopticMergeLogTable = None #: Synoptic merge-log table name. 2931 synopticSourceTable = None #: Synoptic source table name. 2932 sysc = None #: Initialised SystemConstants for this archive. 2933 uncorBMTable = None #: Uncorrelated best-match table name. 2934 varBMTable = None #: BestMatch table used for the Variability table. 2935 isCorSingleEpoch = False #: Use single epoch in SynopticSource table 2936 specialFilter = None #: Is there a special filter 2937 #-------------------------------------------------------------------------- 2938
2939 - def __init__(self, progTable, dateRange=None, isDeep=None, 2940 extBandMergeCrit=None):
2941 """ 2942 Processes the information in Programme to get the correct setup. 2943 2944 @param progTable: Programme table for current database with the 2945 current row set to that of the desired programme. 2946 @type progTable: df.ProgrammeTable 2947 @param dateRange: Date range this synoptic data covers (default: all). 2948 @type dateRange: DateRange 2949 @param isDeep: If True, override Programme default to suggest this 2950 is a deep survey. 2951 @type isDeep: bool 2952 @param extBandMergeCrit: Optionally override Programme default band 2953 merge criterion with this value. 2954 @type extBandMergeCrit: float 2955 2956 """ 2957 self.neighTables = [] 2958 self.bestMatchTables = [] 2959 self.dateRangeOfTable = {} 2960 self.programme = progTable 2961 self.sysc = self.programme._db.sysc 2962 self.mainDateRange = dateRange or self.sysc.obsCal.dateRange() 2963 2964 self.corBMTable = \ 2965 self.programme.getAcronym() + 'SourceXSynopticSourceBestMatch' 2966 2967 self.uncorBMTable = \ 2968 self.programme.getAcronym() + 'SourceXDetectionBestMatch' 2969 2970 # Parse synopticSetup string to setup the rest 2971 isDeep = self.programme.isDeep() if isDeep is None else isDeep 2972 if isDeep: 2973 self.bandMergingCrit = \ 2974 self.programme.getAttr("bandMergingCriterion") 2975 2976 if self.bandMergingCrit <= 0: 2977 self.bandMergingCrit = extBandMergeCrit 2978 2979 if self.bandMergingCrit: 2980 self.synopticMergeLogTable = \ 2981 self.programme.getAcronym() + 'SynopticMergeLog' 2982 2983 self.synopticSourceTable = \ 2984 self.programme.getAcronym() + 'SynopticSource' 2985 2986 self.parseSynString(self.programme.getAttr("synopticSetup")) 2987 2988 # Consistency checks 2989 if not self.bandMergingCrit \ 2990 and self.corBMTable in self.bestMatchTables: 2991 2992 raise ProgrammeBuilder.CuError("Correlated synoptic programme" 2993 " specified, but no correlation time scale. Please update" 2994 " bandMergingCriterion in Programme.") 2995 2996 if (self.bandMergingCrit and not self.isCorSingleEpoch and 2997 self.corBMTable not in self.bestMatchTables): 2998 2999 Logger.addMessage("<WARNING> A correlation time scale is" 3000 " specified, but synopticSetup string specifies uncorrelated" 3001 " tables only. Please update Programme.") 3002 3003 if not self.varBMTable: 3004 if len(self.bestMatchTables) > 1: 3005 raise ProgrammeBuilder.CuError("More than one best match " 3006 "table is specified, but the variability table is not. " 3007 "Please update synopticSetup in Programme.") 3008 3009 self.varBMTable = self.bestMatchTables[0] 3010 if len(self.bestMatchTables) > 1 and not (self.dateRangeOfTable or self.isCorSingleEpoch): 3011 Logger.addMessage("<WARNING> There are two Best Match tables, but" 3012 " no date range specified. This is inefficient, and unnecessary." 3013 " Please correct this in the future using synopticSetup in" 3014 " Programme.") 3015 if len(self.bestMatchTables) > 1 and self.isCorSingleEpoch and self.varBMTable == self.corBMTable: 3016 Logger.addMessage("<WARNING> There are two Best Match tables, but" 3017 " the Variability table is assigned to the correlated Best Match table, " 3018 " which only has one epoch") 3019 for bmTable in self.dateRangeOfTable: 3020 if bmTable not in self.bestMatchTables: 3021 Logger.addMessage("<WARNING> Date range is specified for %s," 3022 " but this table is not in the list of BestMatch tables." 3023 " Please correct this inconsistency in the future using" 3024 " synopticSetup in Programme." % bmTable) 3025 3026 # Fill in missing values 3027 for bmTable in self.bestMatchTables: 3028 if not bmTable in self.dateRangeOfTable: 3029 self.dateRangeOfTable[bmTable] = self.mainDateRange
3030 3031 #-------------------------------------------------------------------------- 3032
3033 - def getBMTableAttr(self, bmTable):
3034 """@return: Named tuple of the BestMatch table attributes 3035 @rtype: namedtuple() 3036 """ 3037 isCorrel = 'SynopticSource' in bmTable 3038 if isCorrel: 3039 secObjID = 'synopticID' 3040 secName = self.programme.getSynopticSourceTable() 3041 else: 3042 secObjID = 'objID' 3043 secName = self.programme.getDetectionTable() 3044 3045 dateRange = self.getTimeRange(isCorrel) 3046 3047 BestMatch = namedtuple("BestMatch", 3048 "secObjID secName isCorrel dateRange tableName") 3049 3050 return BestMatch(secObjID, secName, isCorrel, dateRange, bmTable)
3051 3052 #-------------------------------------------------------------------------- 3053
3054 - def getSynopticSourceTable(self):
3055 """@return: SynopticSourceTable name 3056 @rtype: str 3057 """ 3058 return self.synopticSourceTable
3059 3060 #-------------------------------------------------------------------------- 3061
3062 - def getCorrelNeighTable(self):
3063 """@return: Returns correlated neighbour table name if it exists 3064 @rtype: str 3065 """ 3066 corTables = [table for table in self.neighTables 3067 if 'SynopticSource' in table] 3068 3069 if corTables: 3070 return corTables[0]
3071 3072 #-------------------------------------------------------------------------- 3073
3074 - def getSynopticMergeLogTable(self):
3075 """@return: SynopticMergeLogTable name 3076 @rtype: str 3077 """ 3078 return self.synopticMergeLogTable
3079 3080 #-------------------------------------------------------------------------- 3081
3082 - def getSynopNeighTables(self):
3083 """@return: List of all the Synoptic neighbour tables produced 3084 @rtype: list(str) 3085 """ 3086 return self.neighTables
3087 3088 #-------------------------------------------------------------------------- 3089
3090 - def getVarNeighTable(self):
3091 """@return: Name of the Neighbour table used by in the 3092 Variability table 3093 @rtype: str 3094 """ 3095 return self.varBMTable.replace('BestMatch', '')
3096 3097 #-------------------------------------------------------------------------- 3098
3099 - def getTimeRange(self, isCorrelated):
3100 """@return: The time range for a given BestMatch table 3101 @rtype: namedtuple(DateTime, DateTime) 3102 """ 3103 for bestMatchTable in self.bestMatchTables: 3104 isCorrect = (('SynopticSource' in bestMatchTable and isCorrelated) 3105 or ('Detection' in bestMatchTable and not isCorrelated)) 3106 3107 if isCorrect: 3108 return self.dateRangeOfTable[bestMatchTable] 3109 3110 return self.mainDateRange
3111 3112 #-------------------------------------------------------------------------- 3113
3114 - def getSynopticTableID(self, neighTable):
3115 """@return: The time range for a given BestMatch table 3116 @rtype: namedtuple(DateTime, DateTime) 3117 """ 3118 if neighTable not in self.getSynopNeighTables(): 3119 raise ProgrammeBuilder.CuError( 3120 "Table %s is not a neighbour table in this programme %s" 3121 % (neighTable, self.programme.getAcronym())) 3122 3123 return {self.uncorBMTable.replace("BestMatch", ''): 1, 3124 self.corBMTable.replace('BestMatch', ''): 3}[neighTable]
3125 3126 #-------------------------------------------------------------------------- 3127
3128 - def isVarTableCorrel(self):
3129 """@return: Is the variability table correlated 3130 @rtype: bool 3131 """ 3132 return self.varBMTable == self.corBMTable
3133 3134 #-------------------------------------------------------------------------- 3135
3136 - def isNonCorrelBMTable(self):
3137 """@return: Is there a SourceXDetectionBM table? 3138 @rtype: bool 3139 """ 3140 return self.uncorBMTable in self.bestMatchTables
3141 3142 #-------------------------------------------------------------------------- 3143
3144 - def isCorrelBMTable(self):
3145 """@return: Is there a SourceXDetectionBM table? 3146 @rtype: bool 3147 """ 3148 return self.corBMTable in self.bestMatchTables
3149 3150 #-------------------------------------------------------------------------- 3151
3152 - def parseSynString(self, synString):
3153 """ Parses the synopticSetup string and processes each substring. 3154 """ 3155 if synString and synString != dbc.charDefault(): 3156 subStrings = synString.split(':') 3157 for sStr in subStrings: 3158 self.process(sStr) 3159 3160 if not self.bestMatchTables: 3161 self.bestMatchTables = ([self.corBMTable] 3162 if self.bandMergingCrit else [self.uncorBMTable]) 3163 self.neighTables = [bmTable.replace('BestMatch', '') 3164 for bmTable in self.bestMatchTables]
3165 #-------------------------------------------------------------------------- 3166
3167 - def process(self, subString):
3168 """ Processes each substring. 3169 """ 3170 if subString.upper() == SystemConstants.bthSubStr: 3171 3172 self.neighTables = [self.uncorBMTable.replace('BestMatch', ''), 3173 self.corBMTable.replace('BestMatch', '')] 3174 self.bestMatchTables = [self.uncorBMTable, self.corBMTable] 3175 3176 elif subString.upper() == SystemConstants.uncSubStr: 3177 self.neighTables = [self.uncorBMTable.replace('BestMatch', '')] 3178 self.bestMatchTables = [self.uncorBMTable] 3179 3180 elif subString.upper() == SystemConstants.corSubStr: 3181 self.neighTables = [self.corBMTable.replace('BestMatch', '')] 3182 self.bestMatchTables = [self.corBMTable] 3183 3184 elif subString.upper().startswith(SystemConstants.varSubStr): 3185 varTabType = subString.split('-')[-1].upper() 3186 if varTabType == SystemConstants.uncSubStr: 3187 self.varBMTable = self.uncorBMTable 3188 3189 elif varTabType == SystemConstants.corSubStr: 3190 self.varBMTable = self.corBMTable 3191 elif (subString.upper().startswith(SystemConstants.corSubStr) and 3192 'ONEEPOCH' in subString): 3193 self.isCorSingleEpoch = True 3194 # Remove correlated BM table from BM tables 3195 self.bestMatchTables = [bmTable for bmTable in self.bestMatchTables 3196 if bmTable != self.corBMTable] 3197 elif (subString.upper().startswith('FIL')): 3198 # information about how to assign a particular filter (e.g. VVV) 3199 parts = subString.split(',') 3200 self.specialFilter = {'Name':parts[1]} 3201 orderList = [] 3202 for ii, part in enumerate(parts): 3203 if ii > 1: 3204 if '-' in part: 3205 key, value = part.split('-') 3206 self.specialFilter[key] = value 3207 orderList.append(key) 3208 self.specialFilter['order'] = orderList 3209 elif ',' in subString: 3210 # time range of component 3211 prts = subString.split(',') 3212 if len(prts) == 3: 3213 # Check to see if dates in cal 3214 dRange = self.sysc.obsCal.dateRange(prts[1], prts[2]) 3215 beginDT = max(dRange.begin, self.mainDateRange.begin) 3216 endDT = min(dRange.end, self.mainDateRange.end) 3217 tableName = {SystemConstants.uncSubStr: self.uncorBMTable, 3218 SystemConstants.corSubStr: self.corBMTable}[prts[0]] 3219 3220 self.dateRangeOfTable[tableName] = DateRange(beginDT, endDT)
3221 3222 #------------------------------------------------------------------------------ 3223
3224 - def updateSynopticMatchRadius(self):
3225 """ 3226 Calculates a matching radius for the synoptic neighbour table, in the 3227 range 1 to 10 arcsec, based on the density of sources in the source 3228 table and updates RequiredNeighbours in the current database. 3229 3230 """ 3231 Logger.addMessage("Calculating optimal synoptic neighbour radius...") 3232 db = self.programme._db 3233 passbands = df.PassbandList(self.programme).getMfdCols() 3234 areaInfo = [db.query( 3235 selectStr="frameSetID, axis1length*axis2length*xPixSize*yPixSize", 3236 fromStr=self.programme.getMergeLogTable() 3237 + " AS ML, MultiframeDetector AS M, CurrentAstrometry AS C", 3238 whereStr="ML.%s=M.multiframeID AND M.multiframeID=C.multiframeID" 3239 " AND ML.%s=M.extNum AND M.extNum=C.extNum AND M.extNum>0" 3240 " AND M.multiframeID>0 AND M.%s" 3241 % (pFrame + (DepCodes.selectNonDeprecated,))) 3242 for pFrame in passbands] 3243 3244 totalAreaAsecSq = 0 3245 for frameSetID in \ 3246 db.query("frameSetID", self.programme.getMergeLogTable()): 3247 3248 areas = [area for frame in areaInfo for fSetID, area in frame 3249 if fSetID == frameSetID] 3250 3251 totalAreaAsecSq += max(numpy.median(areas), 0) 3252 3253 numSources = db.queryNumRows(self.programme.getSourceTable()) 3254 3255 density = numSources / totalAreaAsecSq if totalAreaAsecSq > 0 else 0 3256 radius = 1 / math.sqrt(math.pi * density) if density > 0 else 0 3257 minMatchRadius = 1 3258 maxMatchRadius = 10 3259 joinCriterion = max(min(radius, maxMatchRadius), minMatchRadius) / 3600 3260 for neighbourTable in self.getSynopNeighTables(): 3261 done = db.update("RequiredNeighbours", 3262 "joinCriterion=%s" % joinCriterion, 3263 where="neighbourTable=%r" % neighbourTable) 3264 3265 Logger.addMessage( 3266 "...updated RequiredNeighbours for " + neighbourTable 3267 if done else "...failed on " + neighbourTable)
3268 3269 #------------------------------------------------------------------------------ 3270
3271 -def commitSchema(archive, comment, isNonSurvey=True):
3272 """ 3273 Commits schema to SVN if new tables created on load database. 3274 3275 @param archive: Connection to database where new tables have been created. 3276 @type archive: DbSession 3277 @param comment: Comment, i.e. new or updated. 3278 @type comment: str 3279 @param isNonSurvey: Commit non-survey schema changes. 3280 @type isNonSurvey: bool 3281 3282 """ 3283 if not archive.isTrialRun \ 3284 and archive.database.startswith(archive.sysc.loadDatabase): 3285 3286 Logger.addMessage("Committing %s schema to SVN..." % comment) 3287 files = (SystemConstants.sqlNonSurveyPath('*.sql') if isNonSurvey else 3288 SystemConstants.sqlScriptPath + os.sep + '*.sql') 3289 3290 # Need to add new neighbour schema scripts etc. even if updating 3291 extp.run("svn add --quiet --force --depth=files " + files) 3292 lines = extp.run("svn commit %s -m '%s %ssurvey programme schema'" % 3293 (files, comment.title(), "non-" if isNonSurvey else '')) 3294 3295 if lines: # Log revision number committed. 3296 Logger.addMessage(lines[-1]) 3297 Logger.addMessage("<IMPORTANT> Please remember to merge SQL " 3298 "schema changes into all active software branches.")
3299 3300 #------------------------------------------------------------------------------ 3301
3302 -def extractTables(tableLines, status):
3303 """ 3304 """ 3305 return dict((line.split()[2][:-1], status.dropTables) 3306 for line in tableLines if 'create table' in line.lower())
3307 3308 #------------------------------------------------------------------------------ 3309