1
2
3
4 """
5 Invoke CU4. Ingest single frame source detections.
6
7 @author: E. Sutorius
8 @org: WFAU, IfA, University of Edinburgh
9
10 @newfield contributors: Contributors, Contributors (Alphabetical Order)
11 @contributors: I. Bond, J. Bryant, R.S. Collins, N.C. Hambly
12 """
13
14 from collections import defaultdict
15 import dircache
16 import inspect
17 import itertools
18 import math
19 import mx.DateTime as mxdt
20 from operator import itemgetter
21 import os
22 import string
23
24 from wsatools.CLI import CLI
25 import wsatools.DataFactory as df
26 import wsatools.DbConnect.DbConstants as dbc
27 from wsatools.DbConnect.DbSession import DbSession
28 from wsatools.DbConnect.IngCuSession import IngCuSession
29 from wsatools.DbConnect.IngIngester import IngestLogger as Logger, \
30 IngestLogFile
31 from wsatools.File import File
32 import wsatools.Utilities as utils
33 import wsatools.WfcamsrcInterface as wfcamsrc
34
35
36 -class Cu4(IngCuSession):
37 """
38 Invoke single frame source detections ingestion given a log of files
39 transferred from the data processing centre.
40
41 """
43 """ Exception in case the given programme has no catalogues to process.
44 """
45 pass
46
47 maxSplit = 2
48 cuedFileExists = False
49
50
51 - def __init__(self,
52 curator=CLI.getOptDef("curator"),
53 database=DbSession.database,
54 csvSharePath=CLI.getOptDef("outpath"),
55 programmeList=CLI.getOptDef("programmes"),
56 progOrder=CLI.getOptDef("progorder"),
57 detectionSubset=CLI.getOptDef("subset"),
58 excludeFiles=CLI.getOptDef("exclude"),
59 isTrialRun=DbSession.isTrialRun,
60 ReDo=CLI.getOptDef("redo"),
61 keepWorkDir=CLI.getOptDef("keepwork"),
62 xferlog=CLI.getArgDef("xferlog"),
63 comment=CLI.getArgDef("comment")):
64 """
65 @param curator: Name of curator.
66 @type curator: str
67 @param comment: Descriptive comment as to why curation task is
68 being performed.
69 @type comment: str
70 @param database: Name of the database to connect to.
71 @type database: str
72 @param detectionSubset: Process subset of [Astrometry table,
73 Photometry table, Raw table].
74 @type detectionSubset: list(str)
75 @param excludeFiles: Excluded files from processing.
76 @type excludeFiles: list(str)
77 @param keepWorkDir: Don't remove working directory.
78 @type keepWorkDir: bool
79 @param programmeList: Only process data for given programmes (accepts
80 keywords 'all', 'ns' (non-survey),
81 'ukidss' (all 5 main surveys)).
82 @type programmeList: list(str)
83 @param progOrder: The order of processing of the programmes, all
84 programmes not explicitely named can be put
85 anywhere in the list as 'others'.
86 @type progOrder: list(str)
87 @param ReDo: If True, overwrite existing monthly detection schema.
88 @type ReDo: bool
89 @param xferlog: Logfile containing files to be ingested.
90 @type xferlog: str
91 @param isTrialRun: If True, do not perform database modifications.
92 @type isTrialRun: bool
93
94 """
95 typeTranslation = {"curator":str,
96 "database":str,
97 "csvSharePath":str,
98 "programmeList":list,
99 "progOrder":list,
100 "detectionSubset":list,
101 "excludeFiles":list,
102 "isTrialRun":bool,
103 "xferlog":str,
104 "ReDo":bool,
105 "keepWorkDir":bool,
106 "comment":str}
107
108 super(Cu4, self).attributesFromArguments(
109 inspect.getargspec(Cu4.__init__)[0], locals(),
110 types=typeTranslation)
111
112 self.comment += '[' + ','.join(n for n in programmeList) + ']'
113
114
115 super(Cu4, self).__init__(cuNum=4,
116 curator=self.curator,
117 comment=self.comment,
118 reqWorkDir=True,
119 keepWorkDir=self.keepWorkDir,
120 database=self.database,
121 autoCommit=False,
122 isTrialRun=self.isTrialRun)
123
124
125 self.detectionSubset = tuple(x.title() for x in self.detectionSubset)
126
127
128 self.createFileList()
129 self.outPrefixList = []
130
131 self._initFromDB()
132
133 utils.ensureDirExist(self.csvSharePath)
134 self.obsCal = self.sysc.obsCal
135
136
137
139 """ Initialize constants from the DB
140 """
141
142 self._connectToDb()
143
144 try:
145 if not self.isTrialRun:
146 self._cuEventID = self._getNextCuEventID()
147
148 if self.cuNum != dbc.smallIntDefault():
149 try:
150 self._cuTable = df.CurationTaskTable(self.archive,
151 self.cuNum)
152 Logger.addMessage("Initialising Curation Use case:")
153 Logger.addMessage(self._cuTable.getDescription())
154 Logger.addMessage("on list: " + self.xferlog)
155 except ValueError, details:
156 Logger.addMessage("<Warning> " + str(details))
157 self._cuTable = None
158
159
160
161 self._createProgrammeTranslation()
162
163
164
165
166 self._shortListFile = \
167 File(os.path.join(self._workPath, "shortlist.dat"))
168 self._shortListFile.wopen()
169
170 notInDbLogFile = \
171 File("notcu3ed_%s.log" % mxdt.utc().strftime("%Y%m%d_%H%M%S"))
172 notInDbLogFile.wopen()
173
174
175 tmpList = []
176 if self.excludeFiles:
177 Logger.addMessage("Excluding %s" % ','.join(self.excludeFiles))
178 for excl in self.excludeFiles:
179 for entry in self._fileList:
180 if not (excl in entry[0] or excl in entry[1]):
181 tmpList.append(entry)
182 self._fileList = tmpList[:]
183
184 catFiles = []
185 self.procDateSet = set()
186 self.isWfauProduct = False
187 notcu3ed = 0
188
189 for entry in self._fileList:
190 if entry[1] != self.sysc.emptyFitsCataloguePathName():
191
192 fileEntry = self.sysc.pixelServerHostName + entry[0]
193
194 mfID = self.archive.query(
195 "multiframeID", "Multiframe",
196 whereStr="deprecated<128 AND filename LIKE %r" %
197 fileEntry)
198 if mfID:
199 catFiles.append(list(entry) + mfID)
200 self._shortListFile.writetheline(entry[0] + ' ' + entry[1])
201
202 if "e20" in entry[1]:
203 self.isWfauProduct = True
204
205 if self.sysc.isVSA() and "e20" in entry[1] \
206 and self.database.rpartition('.')[2] == "VSAVVV" :
207 month = os.path.basename(os.path.dirname(
208 entry[0])).partition('_v')[0][:4] + "0000"
209 else:
210 month = os.path.basename(os.path.dirname(
211 entry[0])).partition('_v')[0]
212
213 self.procDateSet.add(month)
214 else:
215 notcu3ed += 1
216 notInDbLogFile.writetheline(entry[0])
217
218 self._fileList = catFiles
219
220 self._shortListFile.close()
221 notInDbLogFile.close()
222
223 if notcu3ed > 0:
224 Logger.addMessage("%s files not ingested by CU3 are in %s" %
225 (notcu3ed, notInDbLogFile.name))
226 else:
227 try:
228 os.remove(notInDbLogFile.name)
229 except OSError:
230 pass
231
232
233 self._progTable = df.ProgrammeTable(self.archive)
234
235 finally:
236 self._disconnectFromDb()
237
238
239
241 """ Do CU4.
242 """
243 Logger.addMessage("[%d] Started CU%s on list: %s in order: %r" %
244 (self._cuEventID, self.cuNum, self.xferlog, self.progOrder))
245
246
247 log = Logger(self.createLogFileName())
248
249 try:
250
251 if not self._fileList:
252
253 Logger.addMessage(
254 "[%d] No new source data are required to be loaded." % self._cuEventID)
255 return
256
257
258 self.createIlluminationTableList()
259
260
261 self.monthlyMonths = []
262 for dateStr in sorted(self.procDateSet):
263 self.monthlyMonths.append(dateStr[:6])
264
265
266 catsForProg = self.getProgIDs()
267
268 Logger.addMessage(
269 "[%d] Number of files containing source detections to be "
270 "ingested: %d" % (self._cuEventID, len(list(
271 utils.unpackList(catsForProg.values())))))
272
273
274 self.createSchemaDict()
275
276
277 self._progOrderRatedDict = self.getProcessOrder(self._progIDs,
278 include=self.programmeList, progOrder=self.progOrder)
279
280 if not set(self._progIDs).intersection(self._progOrderRatedDict):
281 raise Cu4.NoFilesError()
282
283
284 extractDict = self.createSplitListFiles(catsForProg,
285 splitNum=self.maxSplit)
286
287 if not self._progOrderRatedDict:
288 Logger.addMessage("[%d] No data for %s." % (
289 self._cuEventID, ','.join(self.programmeList)))
290 else:
291 Logger.addMessage("[%d] Running exnumeric..." % self._cuEventID)
292 print("Full log is no longer echoed to terminal, but "
293 "continues to be written to " + log.pathName)
294
295
296 timestamp = utils.makeMssqlTimeStamp()
297 histories = {"DCH": [], "ACH": [], "PCH": []}
298
299 histories["DCH"].extend(self._dateList)
300
301
302 histories["ACH"].append(
303 [self._cuEventID, self.cuNum, log.pathName, dbc.charDefault(),
304 timestamp, self.curator, self.comment, dbc.yes()])
305
306
307 for dateDirPath in histories["DCH"]:
308 cuedFileName = "CU%02dED_%s" % (
309 self.cuNum, self.database.rpartition('.')[2])
310 if os.path.exists(os.path.join(dateDirPath, cuedFileName)):
311 self.cuedFileExists = True
312
313 self._subNum = 0
314 totalProcNum = 0
315 for progID in self._progOrderRatedDict:
316
317 if progID in self.sysc.monthlyDetSurveys:
318 self.setupDetectionTable(
319 self._progNameOfID[progID].lower().replace(
320 "u/ukidss/",'').replace('/',''), self.monthlyMonths,
321 self.ReDo)
322
323
324 if progID == 120:
325 if len(extractDict[progID]) >= 50:
326 splitNum = int(len(extractDict[progID]) / 10.)
327 elif len(extractDict[progID]) >= 10:
328 splitNum = int(len(extractDict[progID]) / 5.)
329 else:
330 splitNum = len(extractDict[progID])
331 else:
332 splitNum = 1
333
334 self._progTable.setCurRow(programmeID=int(progID))
335 Logger.addMessage("[%d] Extracting data for %s" % (
336 self._cuEventID, self._progTable.getName()))
337
338 subLists = self.splitList(extractDict[progID], splitNum)
339
340 Logger.addMessage(
341 "[%d] Number of listfiles (cont. 2 FITS files) to be "
342 "processed: %d (split into %d sublist(s))" % (
343 self._cuEventID, len(extractDict[progID]),
344 len(subLists)))
345
346 for subFileList in subLists:
347 self._subNum += 1
348 Logger.addMessage("Processing sublist #%d.%d..." % (
349 self._cuEventID, self._subNum))
350
351 hostID = "%s-%d" % (self._hostName, self._subNum)
352 if progID == 120:
353 hostID += "-%s" % self.monthlyMonths[0]
354
355 self._outPrefix = self.createFilePrefix(
356 self.cuNum, self._cuEventID, hostID,
357 self.database.rpartition('.')[2])
358 self.outPrefixList.append((self._outPrefix,
359 self._cuEventID))
360
361
362 self.setTableVariables()
363
364
365 self.cleanUpFileShare()
366
367
368 ingestLogFile = IngestLogFile(os.path.join(
369 self.dbMntPath, self._outPrefix + ".log"))
370
371 self.runExtractions(progID, subFileList)
372
373
374 tmpTableDict, tmpIngestOrder = self.checkCreated(progID)
375
376
377 histories["PCH"] =[[progID, self._cuEventID, timestamp, -1]]
378 ingestLogFile.pickleWrite(tmpIngestOrder, tmpTableDict,
379 histories)
380 Logger.addMessage(
381 "[%d] Curation History info written to %s" % (
382 self._cuEventID, ingestLogFile.name))
383
384 Logger.addMessage(
385 "Processing of %s listfiles of sublist #%d.%d finished." % (
386 len(subFileList), self._cuEventID, self._subNum))
387 totalProcNum += len(subFileList)
388 forceFlag = (" -F" if self.cuedFileExists else '')
389 Logger.addMessage(
390 "[%d] Please, run ./IngestCUFiles.py -i -r %s%s %s" % (
391 self._cuEventID, ingestLogFile.name, forceFlag,
392 self.database))
393
394 preComment = self._historyCommentPrefix(self._success, dbc.yes())
395 row = (self._cuEventID, self.cuNum, log.pathName,
396 dbc.charDefault(), timestamp, self.curator,
397 preComment + self.comment, dbc.yes())
398
399 self._connectToDb()
400 self._updateArchiveCurationHistory(row)
401 self.archive.commitTransaction()
402 self._disconnectFromDb()
403 Logger.addMessage("[%d] Processing of %s files finished." % (
404 self._cuEventID, totalProcNum * self.maxSplit))
405
406 except Cu4.NoFilesError:
407 Logger.addMessage(
408 "[%d] No catalogue data available for programmes: %s" % (
409 self._cuEventID, ', '.join(self.programmeList)))
410 except Exception as error:
411
412
413 Logger.addExceptionDetails("[%d] %r" % (self._cuEventID, error))
414 finally:
415 Logger.addMessage("[%d] Log written to %s" %(
416 self._cuEventID, log.pathName))
417 Logger.dump(file(log.pathName, 'w'))
418 Logger.reset()
419
420
421
423 """ Check that all files are created.
424 """
425 tmpTableDict = {}
426 tmpIngestOrder = []
427 identity = string.maketrans('', '')
428 for table in self._ingestOrder:
429 progToCheck = self._progNameOfID[progID].lower()
430 progToCheck = progToCheck.replace("u/ukidss/", '')
431 progToCheck = progToCheck.translate(identity, '/.-()')
432 if progToCheck in table.partition("Detection")[0].lower():
433 if not os.path.exists(os.path.join(
434 self.csvSharePath, self._tableDict[table]["ingfile"])):
435 self._ingestOrder.remove(table)
436 Logger.addMessage(
437 "[%d] <ERROR> Data for %s not available!" % (
438 self._cuEventID, table))
439 else:
440 tmpTableDict[table] = self._tableDict[table]
441 tmpIngestOrder.append(table)
442
443 return tmpTableDict, tmpIngestOrder
444
445
446
448 """Make sure there are no old .dat files in the dbSharePath.
449 """
450 for table in self._tableDict:
451 csvFileName = os.path.join(
452 self.csvSharePath, self._tableDict[table]["ingfile"])
453 if os.path.exists(csvFileName):
454 os.remove(csvFileName)
455
456
457
459 """Create the list of used illumination tables and write it for use
460 with exnumeric to the working directory.
461 """
462 illumDict = defaultdict(list)
463 illumFiles = dircache.listdir(self.sysc.illuminationDir())
464
465 for dateStr in self.procDateSet:
466
467 for illumFileName in illumFiles:
468 if illumFileName.endswith(".table"):
469 _prefix, semester, month, _band = \
470 illumFileName.partition(".table")[0].split('_')
471
472 semester = ("20%s" % semester.upper() if self.sysc.isWSA()
473 else semester)
474
475 if not self.isWfauProduct and \
476 (semester == self.obsCal.checkDate(dateStr)
477 or dateStr.startswith(semester)) \
478 and month.lower() == self.obsCal.getMonth(dateStr):
479 illumDict[dateStr].append(os.path.join(
480 self.sysc.illuminationDir(), illumFileName))
481
482
483 if dateStr not in illumDict:
484 illumDict[dateStr] = [
485 os.path.join(self.sysc.illuminationDir(), fn)
486 for fn in illumFiles
487 if any(k in fn for k in ['_00x_def_', '_0000_def_'])
488 and fn.endswith(".table")]
489 Logger.addMessage("<Warning> No illumination correction table"
490 " available, using default table!")
491
492
493 self.illumFile = File(os.path.join(self._workPath, "illumTables.dat"))
494 self.illumFile.wopen()
495 for dateStr in self.procDateSet:
496 for illumTableName in illumDict[dateStr]:
497 if not os.path.exists(illumTableName + ".csv"):
498 self.parseIllumTable(illumTableName)
499 self.illumFile.writetheline(illumTableName + ".csv")
500 self.illumFile.close()
501
502
503
505 """ Create schema dictionary for every table.
506 """
507 self._schemaFileDict = {}
508 for progID in self._progIDs:
509 self._progTable.setCurRow(programmeID=progID)
510
511 rawTable = self._progTable.getDetectionTable()
512 astroTable = self._progTable.getAstrometryTable()
513 photoTable = self._progTable.getPhotometryTable()
514
515 if progID in self.sysc.monthlyDetSurveys:
516 rawTable += self.monthlyMonths[0]
517 astroTable += self.monthlyMonths[0]
518 photoTable += self.monthlyMonths[0]
519
520 tables = [rawTable, astroTable, photoTable]
521
522
523 processTables = [table for table in tables
524 if any(x in table for x in self.detectionSubset)]
525
526 self._schemaFileDict[progID] = [
527 os.path.join(self._workPath, "schema_%s.dat" % progID),
528 ','.join(processTables)]
529
530 schema = self._progTable.getAttr('catalogueSchema')
531 if progID in self.sysc.monthlyDetSurveys:
532 schema = self.sysc.sqlMonthlyDetPath(
533 schema.replace(".sql", "%s.sql" % self.monthlyMonths[0]),
534 full=False)
535
536
537 writeTables = (processTables if rawTable in processTables else
538 [rawTable] + processTables)
539 file(self._schemaFileDict[progID][0], 'w').writelines(
540 "%s %s\n" % (schema, table) for table in writeTables)
541
542
543
544
546 """Split list p into sublists of length n.
547 """
548 result = [ [ ] for _x in itertools.repeat(0, n) ]
549 resiter = itertools.cycle(result)
550 for item, sublist in itertools.izip(p, resiter):
551 sublist.append(item)
552 return result
553
554
555
575
576
577
579 """
580 Reads FITS files to determine the set of programme IDs for the
581 catalogues that will be ingested in this curation event.
582
583 @return: A programme ID -> catalogues dictionary.
584 @rtype: defaultdict(int, list(str))
585
586 """
587
588 Logger.addMessage("[%d] Getting all programme IDs." % self._cuEventID)
589 progOfFile, newProgOfFile = \
590 self.getProgramIDs(self._shortListFile.name)
591
592 newProgFiles = newProgOfFile.keys()
593 progIDs = set(progOfFile[fileName][0] for fileName in progOfFile)
594
595
596 catsForProg = defaultdict(list)
597 for imgName, catName, mfID in self._fileList:
598 if not imgName in newProgFiles:
599
600 catsForProg[progOfFile[imgName][0]].append(
601 [imgName, catName, mfID])
602
603
604 tmpProgs = set()
605 for entry in self.programmeList:
606 if any(eql in entry for eql in ('+=', '==')):
607 inProg, _eql, outProg = entry.rpartition('=')
608 inProg, _met = inProg[:-1], inProg[-1:]
609 inPID = self._progIDofName[inProg.upper()]
610 if self.sysc.isWSA():
611 outPID = self._progIDofName[
612 outProg.upper() if '/' in outProg
613 else "U/UKIDSS/%s" % outProg.upper()]
614 else:
615 outPID = self._progIDofName[outProg.upper()]
616
617 progIDs.add(outPID)
618 if _met == '+':
619 progIDs.add(inPID)
620 if outProg in self.programmeList:
621 catsForProg[outPID].extend(catsForProg[inPID])
622 else:
623 catsForProg[outPID] = catsForProg[inPID][:]
624 tmpProgs.add(inProg)
625 tmpProgs.add(outProg)
626 elif _met == '=':
627 catsForProg[outPID] = catsForProg[inPID][:]
628 del catsForProg[inPID]
629 progIDs.discard(inPID)
630 tmpProgs.add(outProg)
631 else:
632 tmpProgs.add(entry)
633 else:
634 tmpProgs.add(entry)
635 self.programmeList = list(tmpProgs)[:]
636
637
638
639 for progID in catsForProg:
640 catsForProg[progID][:] = \
641 sorted(catsForProg[progID], key=itemgetter(2))
642
643 self._progIDs = sorted(progIDs)
644 Logger.addMessage(
645 "[%d] Got all programme IDs: %s" % (
646 self._cuEventID, ','.join(map(str, self._progIDs))))
647
648 return catsForProg
649
650
651
653 """ Parse the CASU illumination table file into a csv file.
654 """
655 casuIllumFile = File(illumTableName)
656 casuIllumFile.ropen()
657
658 illumDict = {}
659 for line in casuIllumFile.readlines():
660 if "Xi" not in line:
661 xi, eta, delMag, _binNo = line.split()
662 key = (math.radians(float(xi)), math.radians(float(eta)))
663 illumDict[key] = float(delMag)
664
665 filterName = casuIllumFile.root.rpartition('_')[2]
666 casuIllumFile.close()
667
668 xiList = sorted(set(k[0] for k in illumDict))
669 etaList = sorted(set(k[1] for k in illumDict))
670
671 illumDataFile = File(casuIllumFile.name + ".csv")
672 illumDataFile.wopen()
673 illumDataFile.writetheline('%s,%s,%s' %
674 (len(xiList), len(etaList), filterName))
675 for xi in xiList:
676 for eta in etaList:
677 illumDataFile.writetheline('%.16f,%.16f,%.3f' %
678 (xi, eta, illumDict[(xi, eta)]))
679 illumDataFile.close()
680
681
682
683
685 """ Run the C++ code to extract the detections from the catalogues.
686 """
687
688 nextObjID = -1
689
690 for filePidPathName in subFileList:
691
692 if not self.isTrialRun and os.path.getsize(filePidPathName) > 0:
693 notIngList, lastObjID = wfcamsrc.extractDetections(
694 self._schemaFileDict[progID][0], filePidPathName,
695 self._cuEventID, self.csvSharePath, self._outPrefix,
696 self.illumFile.name, self._schemaFileDict[progID][1],
697 nextObjID, self.sysc)
698 if notIngList:
699 self.writeErrorFiles(notIngList)
700
701
702 nextObjID = min(nextObjID, lastObjID)
703
704
705
707 """ Set the variables dependend of the tables used in this cu.
708 """
709 tableList = []
710 processedTables = []
711 for progID in self._progOrderRatedDict:
712 tableList += zip(
713 utils.extractColumn(self._schemaFileDict[progID][0], 1),
714 utils.extractColumn(self._schemaFileDict[progID][0], 0))
715 processedTables += self._schemaFileDict[progID][1].split(',')
716
717 self._ingestOrder = []
718 self._tableDict = {}
719 for table, schema in tableList:
720 if table in processedTables:
721 self._ingestOrder.append(table)
722 self._tableDict[table] = dict(schema=schema,
723 ingfile="%s_%s.dat" % (self._outPrefix, table))
724
725 self._ingestOrder = utils.orderedSet(self._ingestOrder)
726
727
728
729
730 if __name__ == '__main__':
731
732 CLI.progArgs.append(CLI.Argument("xferlog", "xferlog.log"))
733 CLI.progOpts += [
734 CLI.Option('o', "outpath",
735 "directory where the csv file is written to",
736 "PATH"),
737 CLI.Option('p', "progorder",
738 "the order of processing of the programmes, all programmes not "
739 "explicitly named can be put anywhere in the list as 'others'",
740 "LIST", ''),
741 CLI.Option('r', "redo",
742 "enable overwriting of existing MfIDs"),
743 CLI.Option('K', "keepwork", "Don't remove working dir."),
744 CLI.Option('P', "programmes",
745 "only process data for given programmes (accepts keywords 'all', 'ns' "
746 "(non-survey), 'ukidss' (all 5 main surveys); one programme suffixed "
747 "with 'x-' excludes this programme.)",
748 "LIST", "all"),
749 CLI.Option('S', "subset",
750 "process subset of Photometry table, Raw table and Astrometry table ",
751 "LIST", "Photometry,Raw,Astrometry"),
752 CLI.Option('X', "exclude",
753 "exclude given files/file patterns from processing",
754 "LIST", '')]
755
756 cli = CLI(Cu4.__name__, "$Revision: 10181 $", Cu4.__doc__)
757 Logger.addMessage(cli.getProgDetails())
758
759 cu4 = Cu4(cli.getOpt("curator"),
760 cli.getArg("database"),
761 cli.getOpt("outpath"),
762 cli.getOpt("programmes"),
763 cli.getOpt("progorder"),
764 cli.getOpt("subset"),
765 cli.getOpt("exclude"),
766 cli.getOpt("test"),
767 cli.getOpt("redo"),
768 cli.getOpt("keepwork"),
769 cli.getArg("xferlog"),
770 cli.getArg("comment"))
771 cu4.run()
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844