1
2
3
4 """
5 Invoke CU9. Produce list-driven intra-WFCAM remeasurements for a given
6 programme.
7
8 @TODO: Update to newer CU7 style way of working (e.g. enable ingest,
9 logging, dropping indicies, updating tables, tiding up files etc.)
10
11 @author: N.J.G. Cross
12 @org: WFAU, IfA, University of Edinburgh
13
14 @newfield contributors: Contributors, Contributors (Alphabetical Order)
15 @contributors: R.S. Collins
16 """
17
18 from __future__ import division, print_function
19
20 import os
21 import time
22 from collections import defaultdict
23 import dircache
24 import numpy
25 import pyfits
26 from pyfits import Column
27
28 from wsatools.CLI import CLI
29 import wsatools.DataFactory as df
30 import wsatools.DbConnect.CommonQueries as queries
31 from wsatools.DbConnect.CuSession import CuSession
32 from wsatools.DbConnect.DbSession import Ingester
33 import wsatools.DbConnect.Schema as schema
34 import wsatools.ExternalProcess as extp
35 import wsatools.FitsUtils as fits
36 from wsatools.Logger import Logger
37 import wsatools.Utilities as utils
38 import wsatools.WfcamsrcInterface as wfcamsrc
39 from wsatools.File import File
40
41
42
43 -class Cu9(CuSession):
44 """ Produce list-driven remeasurements (data with a survey)
45 for a given programme.
46 """
47
48 cuNum = 9
49
50 memoryLimit = 0.5 * utils.getSystemMemoryKb()
51
52 casuToolName = "cir_imcore_list"
53
54
55
56
57
58 dateRange = CuSession.sysc.obsCal.dateRange()
59 """ Range of observation dates to include.
60 """
61 dateString = time.strftime("%Y%m%d", time.gmtime())
62 """ Date string for output dir date.
63 """
64 internalOnly = False
65 """ Only run internal list-driven photometry programmes.
66 """
67 runSynop = False
68 """ Run list-driven photometry on each individual stack? Just those that
69 contribute t---++ Modifications to WSA non-survey and VSA schema files
70
71 Modify the relevant Template, see <nop>SystemConstants for the
72 <nop>autoTemplateSchema or <nop>autoTemplateNeighbourSchema. Consult
73 AutoTemplateSchema to comprehend the templates and create these using
74 =src/curation/helpers/SetupProgramme.py= or the template parsing part
75 of CU21 that is described in NonSurveyCuration#SchemaUpdate. o the
76 MergeLog table.
77 """
78
79 oneOnly = False
80 """ Use one FrameGroup only for testing
81 """
82
83 skipCats = False
84 """ Skip catalogue creation and move directly onto ingest (Testing only)
85 """
86
87 isTrialRun = False
88
89
91 """
92 Invoke list-driven photometry for a specified programme in the WFCAM
93 Science Archive. The technique produces consistent photometry across
94 all WFCAM filters for the image data within a given field.
95
96 Method: for the given programme ID, the procedure is: i) establish the
97 list of fields to be analysed; ii) remove any previous remeasurements
98 based on deprecated or updated frame sets;
99 iii)
100
101 @TODO: Break up into functions.
102 @TODO: First just work on MergeLog - forget multi-epoch.
103 @TODO: A lot of work is needed on the schema - the tags etc.
104 @TODO: Run imcore_list detector by detector for increased efficiency?
105 @TODO: TILES -- do not have filtered tiles for OB level data.
106 Have to reproduce!! or run list-driven on pawprints and
107 calculate weighted mean (may also work on overlaps,
108 but seeing issues)
109 @TODO: Run list-driven on pawprints for all tiles.
110 @TODO: Luptitudes instead of magnitudes
111 @TODO: Run from J-band list, not main Source table - option in survey requirements
112 @TODO: Update RequiredListDrivenProduct - like RequiredNeighbours?
113 @TODO: Add column for filtered tiles into Multiframe (for VISTA).
114 @TPDP: Add in option for input list...
115 """
116
117
118
119
120 listDrivReq = self.archive.query(
121 "tableID,filterID,transFile,extractor",
122 "RequiredListDrivenProduct",
123 "programmeID=%s and surveyID<0" % self.programmeID)
124 if len(listDrivReq) > 1:
125
126 raise Exception
127 tableInfo = self.archive.query(
128 "tableName,sourceIDName",
129 "ProgrammeTable",
130 "programmeID=%s AND tableID=%s" % (self.programmeID,
131 listDrivReq[0].tableID),
132 firstOnly=True)
133
134 self.passbands = df.PassbandList(self.programme).passbands
135 spaceRequired = (len(self.passbands) * self.archive.queryNumRows(
136 self.programme.getSourceTable()) * self.archive.queryRowSize(
137 self.programme.getListRemeasTable()))
138 self.setUpWorkingDirs(spaceRequired)
139 self.prodType = self.programme.getAttr('sourceProdType')
140 self.procDateSet = set()
141 self._prefix = ''.join(["cu", "%02d" % (self.cuNum), "id",
142 str(self.cuEventID),
143 '_', os.getenv('HOST'), '_',
144 self.archive.database])
145 filePidPathName = fits.createFileListDict(self.programme)[self.prodType]
146
147 Logger.addMessage("Will write list-driven catalogues to %s"
148 % self.outputDirPath)
149
150 whereString = ''.join("listproc%s=0 or " % pband
151 for pband in self.passbands)
152
153 frameSets = self.archive.query(
154 selectStr="frameSetID",
155 fromStr=self.programme.getMergeLogTable(),
156 whereStr=whereString[:-4])
157
158
159
160 frameGroups = queries.getFrameSetGroups(self.programme, frameSets)
161
162 frameGroups = [frameGroups[0]] if self.oneOnly else frameGroups
163 Logger.addMessage("Produced list of %d frameSet groups to process"
164 % len(frameGroups))
165
166
167 procLDCatList = []
168 progSchema = schema.parseTables(self.programme.getSchemaScript())
169 ingester = Ingester(self.archive, progSchema, self.shareFileID)
170 for ii, frmGroup in enumerate(frameGroups):
171 Logger.addMessage("Getting multiframes for frameGroup %d" % (ii + 1))
172
173 mfList = self.getMultiframeList(frmGroup)
174
175 isPwprint = getIsPPT(mfList)
176 mxImgSizeDeg = max([(max(mfl.imgSizeX, mfl.imgSizeY) *
177 self.sysc.instrumentPixScale /
178 (3600.*mfl.micro)) for mfl in mfList])
179
180 mxDithSizeDeg = max([(max((mfl.imgSizeX - mfl.micro * mfl.detX),
181 (mfl.imgSizeY - mfl.micro * mfl.detY)) *
182 self.sysc.instrumentPixScale /
183 (3600.*mfl.micro)) for mfl in mfList])
184 if isPwprint:
185
186 basicSize = 0.5 * max(self.sysc.fieldOfView)
187
188
189 radiusDeg = basicSize + mxDithSizeDeg
190 else:
191
192
193 radiusDeg = 0.5 * mxImgSizeDeg
194
195
196 tolerance = self.getTolerance(frmGroup)
197 radiusDeg = 1.1 * (radiusDeg + tolerance)
198 centre = (mfList[0].ra, mfList[0].dec)
199
200 sourceList = self.getSources(centre, radiusDeg)
201 Logger.addMessage("Number of sources is %d" % len(sourceList))
202 file(self.tempSourceFileName, 'w').writelines(
203 ["%s %s\n" % (src.ra, src.dec) for src in sourceList])
204 Logger.addMessage("Selected source list for this frameSet group")
205
206
207
208 for mfl in mfList:
209 self.procDateSet.add(fits.getDate(mfl.fileName))
210
211 listDrivCatName = self.runLDExtractor(mfl, sourceList)
212
213
214
215
216
217
218 procLDCatList.append("%s %s\n" %
219 (fits.stripServer(mfl.fileName),
220 listDrivCatName))
221
222
223
224
225 os.remove(self.tempSourceFileName)
226 file(filePidPathName, "w").writelines(procLDCatList)
227
228
229
230
231
232
233
234
235
236 self.createSchemaList()
237
238
239 self.setTableVariables()
240
241
242 self.cleanUpFileShare()
243
244
245
246 self.createIlluminationTableList()
247
248
249
250 if not self.isTrialRun and os.path.getsize(filePidPathName) > 0:
251
252
253
254
255
256
257
258 wfcamsrc.extractDetections(self._schemaFileList[0],
259 filePidPathName, self.cuEventID, self.archive.sharePath(),
260 self._prefix, self.illumFile.name, self._schemaFileList[1],
261 0, self.sysc)
262 ingestFilePathList = [(tableName,
263 os.path.join(self.archive.sharePath(), "%s_%s.dat"
264 % (self._prefix, tableName)))
265 for tableName in self._schemaFileList[1].split(',')]
266 for tableName, ingestPathFile in ingestFilePathList:
267 ingester.ingestTable(tableName, ingestPathFile,
268 isCsv=False)
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
285 """Make sure there are no old .dat files in the dbSharePath.
286 """
287 for table in self._tableDict:
288 csvFileName = os.path.join(
289 self.archive.sharePath(), self._tableDict[table]["ingfile"])
290 if os.path.exists(csvFileName):
291 os.remove(csvFileName)
292
293
294
296 """Create the list of used illumination tables and write it for use
297 with exnumeric to the working directory.
298 @TODO: Move CU4 version to Astrometry?? and link to that.
299
300 """
301 illumDict = defaultdict(list)
302 illumFiles = [illumFileName
303 for illumFileName in dircache.listdir(self.sysc.illuminationDir())
304 if not "_200" in illumFileName]
305
306 for dateStr in self.procDateSet:
307
308 for illumFileName in illumFiles:
309 if illumFileName.endswith(".table"):
310 _prefix, semester, month, _band = \
311 illumFileName.partition(".table")[0].split('_')
312 semester = "20%s" % semester.upper()
313 if semester == self.sysc.obsCal.checkDate(dateStr) \
314 and month.lower() == self.sysc.obsCal.getMonth(dateStr):
315 illumDict[dateStr].append(os.path.join(
316 self.sysc.illuminationDir(), illumFileName))
317
318
319 if dateStr not in illumDict:
320 illumDict[dateStr] = [
321 os.path.join(self.sysc.illuminationDir(), fn)
322 for fn in illumFiles
323 if '_00x_def_' in fn and fn.endswith(".table")]
324 Logger.addMessage("<Warning> No illumination correction table"
325 " available, using default table!")
326
327
328 self.illumFile = File(os.path.join(self.tempWorkingDirPath,
329 "illumTables.dat"))
330 self.illumFile.wopen()
331 for dateStr in self.procDateSet:
332 for illumTableName in illumDict[dateStr]:
333 if not os.path.exists(illumTableName + ".csv"):
334 self.parseIllumTable(illumTableName)
335 self.illumFile.writetheline(illumTableName + ".csv")
336 self.illumFile.close()
337
338
339
340
359
360
361
362 - def filterCat(self, listDrivCatName, origCatName, sourceList):
363 """ Remove sources outside edge. Add in sourceIDs
364 @TODO: Do this in C++ code -- more efficient.
365 """
366 sourceIDs = numpy.array([sourceID for sourceID, _ra, _dec in sourceList])
367 newCol = Column(name='Source_ID', format='K', unit='Number',
368 array=sourceIDs)
369 tmpCatName = os.path.join(os.path.dirname(listDrivCatName), "temp.fits")
370 hdulist = pyfits.open(listDrivCatName)
371 hdulist2 = pyfits.HDUList()
372 hdulistC = fits.open(origCatName)
373 for ii, hdu in enumerate(hdulist):
374 if ii > 0:
375 t = hdu.columns + newCol
376 newhdu = pyfits.new_table(t)
377 newhdu.header = self.updateHeader(newhdu.header, hdu.header)
378 newhdu.header = self.updateHeader(newhdu.header,
379 hdulistC[ii].header)
380 hdulist2.append(newhdu)
381 else:
382 hdulist2.append(hdu)
383 hdulist2.writeto(tmpCatName)
384 hdulist.close()
385 hdulist2.close()
386 hdulistC.close()
387 os.remove(listDrivCatName)
388 hdulist = pyfits.open(tmpCatName)
389 hdulist2 = pyfits.HDUList()
390 for ii, hdu in enumerate(hdulist):
391 if ii > 0:
392 nxout = hdu.header['NXOUT']
393 nyout = hdu.header['NYOUT']
394 tbdata = hdu.data
395
396 mask = numpy.logical_and(numpy.logical_and(numpy.logical_and(
397 numpy.greater_equal(tbdata.field("X_coordinate"), 1),
398 numpy.less_equal(tbdata.field("X_coordinate"), nxout)),
399 numpy.greater_equal(tbdata.field("Y_coordinate"), 1)),
400 numpy.less_equal(tbdata.field("Y_coordinate"), nyout))
401 newhdu = pyfits.new_table(hdu.columns)
402 newhdu.data = hdu.data[mask]
403 newhdu.header = self.updateHeader(newhdu.header, hdu.header)
404 hdulist2.append(newhdu)
405 else:
406 hdulist2.append(hdu)
407 hdulist2.writeto(listDrivCatName)
408 hdulist.close()
409 hdulist2.close()
410 os.remove(tmpCatName)
411
412
413
415 """Gets list of multiframes
416 """
417 multiframeList = list()
418 for passband in self.passbands:
419 multiframeList += self.archive.query(
420 selectStr="m.multiframeID as mfID, (15.*rabase) as ra,"
421 "decbase as dec, frameType, "
422 "cast(sqrt(nuStep) as int) as micro, fileName, "
423 "max(axis1length) as imgSizeX, "
424 "max(axis2length) as imgSizeY, "
425 "max(detRows) as detX, max(detCols) as detY",
426 fromStr="Multiframe as m,MultiframeDetector as md,%s as l"
427 % self.programme.getMergeLogTable(),
428 whereStr="frameSetID in (%s) and m.multiframeID=l.%smfID and "
429 "m.multiframeID=md.multiframeID and l.listProc%s=0 "
430 "and m.multiframeID>0 "
431 "group by m.multiframeID,frameType,(15.*rabase),"
432 "decbase,nustep,fileName"
433 % (','.join(str(fsID) for fsID in frmGroup),
434 passband, passband))
435
436 return multiframeList
437
438
439
441 """Returns all primary sources within circle
442 """
443 return self.archive.query(
444 selectStr="sourceID,ra,dec",
445 fromStr=self.programme.getSourceTable(),
446 whereStr="(priOrSec=0 or priOrSec=frameSetID) and "
447 "dbo.fGreatCircleDist(ra,dec,%f,%f)<%f"
448 % (centre[0], centre[1], 60.*radiusDeg))
449
450
452 """
453 """
454 fieldIDList = []
455 for fSetID in frmGroup:
456 fieldIDList.append(queries.getFieldID(
457 self.archive, fSetID, self.programme))
458 return self.archive.queryAttrMax(
459 "stackRadius", "Required%s" %
460 self.prodType,
461 where="fieldID in (%s)" % ','.join(map(str, fieldIDList)))
462
463
464
465
467 """
468 Run CASU Extractor.
469
470 @TODO: Parallelise - like CU13
471 @TODO: Describe inputs, e.g. args.
472 @TODO: Add in ZP etc...
473 """
474 thresh = self.sysc.casuDetThresh[mfl.micro * mfl.micro]
475 rCore = mfl.micro * self.sysc.rCoreBasic
476
477 imgName = fits.stripServer(mfl.fileName)
478 cnfName = fits.getConfMap(imgName)
479 catName = fits.getCatalogue(imgName)
480 listCatName = fits.getListDrivenCat(self.outputDirPath, imgName)
481
482
483
484
485
486
487
488
489
490 argstr = ' '.join([imgName, cnfName, self.tempSourceFileName,
491 listCatName, str(thresh)])
492 argstr += " --rcore %s " % rCore
493 argstr += " --cattype %s" % self.sysc.casuCatType
494
495 Logger.addMessage("Extracting for camera %s" %
496 self.sysc.instrumentName,
497 alwaysLog=False)
498 commands = ["imcore_list " + argstr]
499 for cmd in commands:
500
501 if not self.skipCats:
502 extp.run(cmd)
503 fits.removeDuplicateKeywords(listCatName)
504 self.filterCat(listCatName, catName, sourceList)
505
506 return listCatName
507
508
509
510
512 """ Set the variables dependent of the tables used in this cu.
513 """
514 tableList = []
515 processedTables = []
516 tableList += zip(
517 utils.extractColumn(self._schemaFileList[0], 1),
518 utils.extractColumn(self._schemaFileList[0], 0))
519 processedTables += self._schemaFileList[1].split(',')
520
521 self._ingestOrder = []
522 self._tableDict = {}
523 for table, schema in tableList:
524 if table in processedTables:
525 self._ingestOrder.append(table)
526 self._tableDict[table] = dict(schema=schema,
527 ingfile="%s_%s.dat" % (self._prefix, table))
528
529 self._ingestOrder = utils.orderedSet(self._ingestOrder)
530
531
532
562
563
564
566 """ Add keywords that are not in header from old header
567 """
568 origList = [card.key for card in header.ascardlist()]
569
570 for card in oldheader.ascardlist():
571 if card.key not in origList:
572 header.update(card.key, card.value, card.comment)
573 return header
574
575
577 """
578 """
579 fTypes = set(not ('tile' in mfInfo.frameType.lower() or
580 'mosaic' in mfInfo.frameType.lower())
581 for mfInfo in mfList)
582
583 if len(fTypes) > 1:
584 raise Exception("?")
585
586 return fTypes.pop()
587
588
589
590
591
592 if __name__ == '__main__':
593 CLI.progArgs.append(CLI.Argument('programmeID', 'DXS'))
594 CLI.progArgs.append(CLI.Argument('releaseNum', '3',
595 isValOK=lambda x: x.isdigit()))
596
597
598 CLI.progOpts += [
599 CLI.Option('i', 'internalOnly', 'Only do internal list-driven photometry'),
600 CLI.Option('o', 'oneFrameGroupOnly', 'Only test one frameGroup'),
601 CLI.Option('v', 'verbose',
602 'more verbose logging'),
603 CLI.Option('s', 'synoptic',
604 'Run synoptic list-driven photometry'),
605 CLI.Option('S', 'skipCats', 'Skip catalogue creation')]
606
607 cli = CLI(Cu9, "$Revision: 9470 $")
608 Logger.isVerbose = cli.getOpt('verbose')
609 Logger.addMessage(cli.getProgDetails())
610
611 cu = Cu9(cli.getArg('programmeID'), cli=cli)
612 cu.internalOnly = cli.getOpt('internalOnly')
613 cu.releaseNum = int(cli.getArg('releaseNum'))
614 cu.runSynop = cli.getOpt('synoptic')
615 cu.skipCats = cli.getOpt('skipCats')
616 cu.isTrialRun = cli.getOpt('test')
617 cu.oneOnly = cli.getOpt('oneFrameGroupOnly')
618 cu.run()
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649