rem
stringlengths 0
322k
| add
stringlengths 0
2.05M
| context
stringlengths 8
228k
|
---|---|---|
parser.add_option("-s", "--snap", action="append", type="int",
|
parser.add_option("-S", "--snap", action="append", type="int",
|
def lsstSimMain(processFunction, outDatasetType, need=(), defaultRoot="."): parser = OptionParser() parser.add_option("-i", "--input", dest="root", default=defaultRoot, help="input root") parser.add_option("-o", "--output", dest="outRoot", default=".", help="output root") parser.add_option("-f", "--force", action="store_true", default=False, help="execute even if output dataset exists") if "calib" in need: parser.add_option("-C", "--calibRoot", dest="calibRoot", help="calibration root") parser.add_option("-R", "--registry", help="registry") parser.add_option("-v", "--visit", action="append", type="int", help="visit number (can be repeated)") if "snap" in need: parser.add_option("-s", "--snap", action="append", type="int", help="snap number (can be repeated)") if "sensor" in need: parser.add_option("-r", "--raft", action="append", help="raft coords (can be repeated)") parser.add_option("-c", "--sensor", action="append", help="sensor coords (can be repeated)") if "channel" in need: parser.add_option("-a", "--channel", action="append", help="channel coords (can be repeated)") (options, args) = parser.parse_args() if options.registry is None: if os.path.exists(os.path.join(options.root, "registry.sqlite3")): options.registry = os.path.join(options.root, "registry.sqlite3") elif os.path.exists("/lsst/DC3/data/obstest/ImSim/registry.sqlite3"): options.registry = "/lsst/DC3/data/obstest/ImSim/registry.sqlite3" if "calib" in need: if os.path.exists("/lsst/DC3/data/obstest/ImSim"): options.calibRoot = "/lsst/DC3/data/obstest/ImSim" bf = dafPersist.ButlerFactory(mapper=LsstSimMapper( root=options.root, calibRoot=options.calibRoot, registry=options.registry)) else: bf = dafPersist.ButlerFactory(mapper=LsstSimMapper( root=options.root, registry=options.registry)) inButler = bf.create() obf = dafPersist.ButlerFactory(mapper=LsstSimMapper( root=options.outRoot, registry=options.registry)) outButler = obf.create() if options.visit is None: print >>sys.stderr, "Running over all input visits" options.visit = [x[0] for x in inButler.queryMetadata("raw", "visit", ("visit",))] elif not hasattr(options.visit, "__iter__"): options.visit = [options.visit] if "snap" in need: if options.snap is None: print >>sys.stderr, "Running over all snaps" options.snap = [x[0] for x in inButler.queryMetadata("raw", "snap", ("snap",))] elif not hasattr(options.snap, "__iter__"): options.snap = [options.snap] else: setattr(options, "snap", [0]) if "sensor" in need or "channel" in need: if options.raft is None: print >>sys.stderr, "Running over all rafts" options.raft = [x[0] for x in inButler.queryMetadata("raw", "raft", ("raft",))] elif not hasattr(options.raft, "__iter__"): options.raft = [options.raft] if "sensor" in need or "channel" in need: if options.sensor is None: print >>sys.stderr, "Running over all sensors" options.sensor = [x[0] for x in inButler.queryMetadata("raw", "sensor", ("sensor",))] elif not hasattr(options.sensor, "__iter__"): options.sensor = [options.sensor] if "channel" in need: if options.channel is None: print >>sys.stderr, "Running over all channels" options.channel = [x[0] for x in inButler.queryMetadata("raw", "channel", ("channel",))] elif not hasattr(options.channel, "__iter__"): options.channel = [options.channel] for visit in options.visit: if "sensor" in need or "channel" in need: if "snap" in need: for snap in options.snap: for raft in options.raft: for sensor in options.sensor: if "channel" in need: for channel in options.channel: if options.force or \ not outButler.fileExists( outDatasetType, visit=visit, snap=snap, raft=raft, sensor=sensor, channel=channel): print >>sys.stderr, \ ("***** Processing " + \ "visit %d snap %d raft %s " + \ "sensor %s channel %s") % \ (visit, snap, raft, sensor, channel) processFunction(inButler=inButler, outButler=outButler, visit=visit, snap=snap, raft=raft, sensor=sensor, channel=channel) else: if options.force or \ not outButler.fileExists( outDatasetType, visit=visit, snap=snap, raft=raft, sensor=sensor): print >>sys.stderr, \ ("***** Processing visit %d " + \ "snap %d raft %s sensor %s") % \ (visit, snap, raft, sensor) processFunction(inButler=inButler, outButler=outButler, visit=visit, snap=snap, raft=raft, sensor=sensor) else: # snap for raft in options.raft: for sensor in options.sensor: if "channel" in need: for channel in options.channel: if options.force or \ not outButler.fileExists( outDatasetType, visit=visit, raft=raft, sensor=sensor, channel=channel): print >>sys.stderr, \ ("***** Processing visit %d " + \ "raft %s sensor %s channel %s") % \ (visit, raft, sensor, channel) processFunction(inButler=inButler, outButler=outButler, visit=visit, raft=raft, sensor=sensor, channel=channel) else: if options.force or \ not outButler.fileExists(outDatasetType, visit=visit, raft=raft, sensor=sensor): print >>sys.stderr, \ ("***** Processing visit %d " + \ "raft %s sensor %s") % \ (visit, raft, sensor) processFunction(inButler=inButler, outButler=outButler, visit=visit, raft=raft, sensor=sensor) else: # raft, sensor if options.force or \ not outButler.fileExists(outDatasetType, visit=visit): print >>sys.stderr, "***** Processing visit %d" % (visit,) processFunction(inButler=inButler, outButler=outButler, visit=visit)
|
parser.add_option("-c", "--sensor", action="append",
|
parser.add_option("-s", "--sensor", action="append",
|
def lsstSimMain(processFunction, outDatasetType, need=(), defaultRoot="."): parser = OptionParser() parser.add_option("-i", "--input", dest="root", default=defaultRoot, help="input root") parser.add_option("-o", "--output", dest="outRoot", default=".", help="output root") parser.add_option("-f", "--force", action="store_true", default=False, help="execute even if output dataset exists") if "calib" in need: parser.add_option("-C", "--calibRoot", dest="calibRoot", help="calibration root") parser.add_option("-R", "--registry", help="registry") parser.add_option("-v", "--visit", action="append", type="int", help="visit number (can be repeated)") if "snap" in need: parser.add_option("-s", "--snap", action="append", type="int", help="snap number (can be repeated)") if "sensor" in need: parser.add_option("-r", "--raft", action="append", help="raft coords (can be repeated)") parser.add_option("-c", "--sensor", action="append", help="sensor coords (can be repeated)") if "channel" in need: parser.add_option("-a", "--channel", action="append", help="channel coords (can be repeated)") (options, args) = parser.parse_args() if options.registry is None: if os.path.exists(os.path.join(options.root, "registry.sqlite3")): options.registry = os.path.join(options.root, "registry.sqlite3") elif os.path.exists("/lsst/DC3/data/obstest/ImSim/registry.sqlite3"): options.registry = "/lsst/DC3/data/obstest/ImSim/registry.sqlite3" if "calib" in need: if os.path.exists("/lsst/DC3/data/obstest/ImSim"): options.calibRoot = "/lsst/DC3/data/obstest/ImSim" bf = dafPersist.ButlerFactory(mapper=LsstSimMapper( root=options.root, calibRoot=options.calibRoot, registry=options.registry)) else: bf = dafPersist.ButlerFactory(mapper=LsstSimMapper( root=options.root, registry=options.registry)) inButler = bf.create() obf = dafPersist.ButlerFactory(mapper=LsstSimMapper( root=options.outRoot, registry=options.registry)) outButler = obf.create() if options.visit is None: print >>sys.stderr, "Running over all input visits" options.visit = [x[0] for x in inButler.queryMetadata("raw", "visit", ("visit",))] elif not hasattr(options.visit, "__iter__"): options.visit = [options.visit] if "snap" in need: if options.snap is None: print >>sys.stderr, "Running over all snaps" options.snap = [x[0] for x in inButler.queryMetadata("raw", "snap", ("snap",))] elif not hasattr(options.snap, "__iter__"): options.snap = [options.snap] else: setattr(options, "snap", [0]) if "sensor" in need or "channel" in need: if options.raft is None: print >>sys.stderr, "Running over all rafts" options.raft = [x[0] for x in inButler.queryMetadata("raw", "raft", ("raft",))] elif not hasattr(options.raft, "__iter__"): options.raft = [options.raft] if "sensor" in need or "channel" in need: if options.sensor is None: print >>sys.stderr, "Running over all sensors" options.sensor = [x[0] for x in inButler.queryMetadata("raw", "sensor", ("sensor",))] elif not hasattr(options.sensor, "__iter__"): options.sensor = [options.sensor] if "channel" in need: if options.channel is None: print >>sys.stderr, "Running over all channels" options.channel = [x[0] for x in inButler.queryMetadata("raw", "channel", ("channel",))] elif not hasattr(options.channel, "__iter__"): options.channel = [options.channel] for visit in options.visit: if "sensor" in need or "channel" in need: if "snap" in need: for snap in options.snap: for raft in options.raft: for sensor in options.sensor: if "channel" in need: for channel in options.channel: if options.force or \ not outButler.fileExists( outDatasetType, visit=visit, snap=snap, raft=raft, sensor=sensor, channel=channel): print >>sys.stderr, \ ("***** Processing " + \ "visit %d snap %d raft %s " + \ "sensor %s channel %s") % \ (visit, snap, raft, sensor, channel) processFunction(inButler=inButler, outButler=outButler, visit=visit, snap=snap, raft=raft, sensor=sensor, channel=channel) else: if options.force or \ not outButler.fileExists( outDatasetType, visit=visit, snap=snap, raft=raft, sensor=sensor): print >>sys.stderr, \ ("***** Processing visit %d " + \ "snap %d raft %s sensor %s") % \ (visit, snap, raft, sensor) processFunction(inButler=inButler, outButler=outButler, visit=visit, snap=snap, raft=raft, sensor=sensor) else: # snap for raft in options.raft: for sensor in options.sensor: if "channel" in need: for channel in options.channel: if options.force or \ not outButler.fileExists( outDatasetType, visit=visit, raft=raft, sensor=sensor, channel=channel): print >>sys.stderr, \ ("***** Processing visit %d " + \ "raft %s sensor %s channel %s") % \ (visit, raft, sensor, channel) processFunction(inButler=inButler, outButler=outButler, visit=visit, raft=raft, sensor=sensor, channel=channel) else: if options.force or \ not outButler.fileExists(outDatasetType, visit=visit, raft=raft, sensor=sensor): print >>sys.stderr, \ ("***** Processing visit %d " + \ "raft %s sensor %s") % \ (visit, raft, sensor) processFunction(inButler=inButler, outButler=outButler, visit=visit, raft=raft, sensor=sensor) else: # raft, sensor if options.force or \ not outButler.fileExists(outDatasetType, visit=visit): print >>sys.stderr, "***** Processing visit %d" % (visit,) processFunction(inButler=inButler, outButler=outButler, visit=visit)
|
'uCov,gCov,rCov,iCov,zCov,yCov', '-f', 'objectId,ra,dec,epoch'])
|
'uCov,gCov,rCov,iCov,zCov,yCov', '-f', 'objectId,ra,decl,epoch'])
|
def referenceMatch(inputRoot, outputRoot, database, refCatalog, radius, tableSuffix=""): objectCsv = os.path.abspath(os.path.join(outputRoot, 'objDump.csv')) filtCsv = os.path.abspath(os.path.join(outputRoot, 'refFilt.csv')) matchCsv = os.path.abspath(os.path.join(outputRoot, 'refObjMatch.csv')) # Filter reference catalog subprocess.call(['python', refCcdFilter, refCatalog, filtCsv, inputRoot, '-F', 'refObjectId,isStar,ra,decl,gLat,gLon,sedName,' + 'uMag,gMag,rMag,iMag,zMag,yMag,muRa,muDecl,parallax,vRad,isVar,redshift']) # Dump object table execStmt("""SELECT o.objectId, o.ra_PS, o.decl_PS, AVG(s.taiMidPoint) FROM %s.Object%s AS o INNER JOIN %s.Source%s AS s ON (s.objectId = o.objectId) GROUP BY o.objectId ORDER BY o.decl_PS INTO OUTFILE '%s' FIELDS TERMINATED BY ','; """ % (database, tableSuffix, database, tableSuffix, objectCsv)) # Match reference objects to objects subprocess.call(['python', refPosMatch, filtCsv, objectCsv, matchCsv, '-s', '-r', str(radius), '-F', 'refObjectId,isStar,ra,decl,gLat,gLon,sedName,' + 'uMag,gMag,rMag,iMag,zMag,yMag,muRa,muDecl,parallax,vRad,isVar,redshift,' + 'uCov,gCov,rCov,iCov,zCov,yCov', '-f', 'objectId,ra,dec,epoch']) # Load filtered reference catalog and matches execStmt("""LOAD DATA INFILE '%s' INTO TABLE %s.SimRefObject%s FIELDS TERMINATED BY ',' OPTIONALLY ENCLOSED BY '"';""" % (filtCsv, database, tableSuffix)) execStmt("""LOAD DATA INFILE '%s' INTO TABLE %s.RefObjMatch%s FIELDS TERMINATED BY ',' OPTIONALLY ENCLOSED BY '"';""" % (matchCsv, database, tableSuffix)) execStmt("ALTER TABLE %s.SimRefObject%s ADD PRIMARY KEY (refObjectId);" % (database, tableSuffix)) execStmt("ALTER TABLE %s.SimRefObject%s ADD KEY (decl);" % (database, tableSuffix)) execStmt("ALTER TABLE %s.RefObjMatch%s ADD KEY (refObjectId);" % (database, tableSuffix)) execStmt("ALTER TABLE %s.RefObjMatch%s ADD KEY (objectId);" % (database, tableSuffix))
|
for io in ["input", "output"]: policy.set("CrRejectStage.%sKeys.exposure" % io, policy.get("CrRejectStage.%sKeys.exposure" % io))
|
def removeCRsSilently(self, exposure): """Remove CRs without trace""" mask = exposure.getMaskedImage().getMask() mask = mask.Factory(mask, True) # save initial mask
|
|
exposure = afwImage.ExposureF(filename, 0,bbox)
|
exposure = afwImage.ExposureF(filename, 0, bbox) exposure.setWcs(afwImage.makeWcs(exposure.getWcs().getFitsMetadata()))
|
def setUp(self): filename = os.path.join(eups.productDir("afwdata"), "CFHT", "D4", "cal-53535-i-797722_1") bbox = afwImage.BBox(afwImage.PointI(32,32), 512, 512) exposure = afwImage.ExposureF(filename, 0,bbox)
|
butler: @butlerInput.paf
|
butler: @PT1Pipe/butlerInput.paf
|
def isrProcess(f): print >>f, """ appStage: { name: isrInputRaw parallelClass: lsst.pex.harness.IOStage.InputStageParallel eventTopic: None stagePolicy: { parameters: { butler: @butlerInput.paf inputItems: {""" for channelX in (0, 1): for channelY in (0, 1, 2, 3, 4, 5, 6, 7): for snap in (0, 1): channelName = '"%d,%d"' % (channelX, channelY) channelSnap = "%d%d_%d" % (channelX, channelY, snap) print >>f, """ isrExposure""" + channelSnap + """: { datasetType: raw datasetId: { fromJobIdentity: "visit" "raft" "sensor" set: { snap: """ + str(snap) + """ channel: """ + channelName + """ } } }""" print >>f, """ } } } }""" for channelX in (0, 1): for channelY in (0, 1, 2, 3, 4, 5, 6, 7): channelName = '"%d,%d"' % (channelX, channelY) channelId = "%d%d" % (channelX, channelY) print >>f, """ appStage: { name: isrInput""" + channelId + """ parallelClass: lsst.pex.harness.IOStage.InputStageParallel eventTopic: None stagePolicy: { parameters: { butler: @butlerInput.paf inputItems: { biasExposure: { datasetType: bias datasetId: { fromJobIdentity: "raft" "sensor" set: { channel: """ + channelName + """ } } } darkExposure: { datasetType: dark datasetId: { fromJobIdentity: "raft" "sensor" set: { channel: """ + channelName + """ } } } flatExposure: { datasetType: flat datasetId: { fromJobIdentity: "raft" "sensor" set: { channel: """ + channelName + """ } } } } } } }""" for snap in (0, 1): channelSnap = "%d%d_%d" % (channelX, channelY, snap) print >>f, """ appStage: { name: isrSaturation""" + channelSnap + """ parallelClass: lsst.ip.pipeline.IsrSaturationStageParallel eventTopic: None stagePolicy: { inputKeys: { exposure: isrExposure""" + channelSnap + """ } outputKeys: { saturationMaskedExposure: isrExposure""" + channelSnap + """ } } } appStage: { name: isrOverscan""" + channelSnap + """ parallelClass: lsst.ip.pipeline.IsrOverscanStageParallel eventTopic: None stagePolicy: { inputKeys: { exposure: isrExposure""" + channelSnap + """ } outputKeys: { overscanCorrectedExposure: isrExposure""" + channelSnap + """ } } } appStage: { name: isrBias""" + channelSnap + """ parallelClass: lsst.ip.pipeline.IsrBiasStageParallel eventTopic: None stagePolicy: { inputKeys: { exposure: isrExposure""" + channelSnap + """ biasexposure: biasExposure } outputKeys: { biasSubtractedExposure: isrExposure""" + channelSnap + """ } } } appStage: { name: isrVariance""" + channelSnap + """ parallelClass: lsst.ip.pipeline.IsrVarianceStageParallel eventTopic: None stagePolicy: { inputKeys: { exposure: isrExposure""" + channelSnap + """ } outputKeys: { varianceAddedExposure: isrExposure""" + channelSnap + """ } } } appStage: { name: isrDark""" + channelSnap + """ parallelClass: lsst.ip.pipeline.IsrDarkStageParallel eventTopic: None stagePolicy: { inputKeys: { exposure: isrExposure""" + channelSnap + """ darkexposure: darkExposure } outputKeys: { darkSubtractedExposure: isrExposure""" + channelSnap + """ } } } appStage: { name: isrFlat""" + channelSnap + """ parallelClass: lsst.ip.pipeline.IsrFlatStageParallel eventTopic: None stagePolicy: { inputKeys: { exposure: isrExposure""" + channelSnap + """ flatexposure: flatExposure } outputKeys: { darkSubtractedExposure: isrExposure""" + channelSnap + """ } parameters: @ISR-flat.paf outputKeys: { flatCorrectedExposure: isrExposure""" + channelSnap + """ } } } appStage: { name: isrSdqaAmp""" + channelSnap + """ parallelClass: lsst.sdqa.pipeline.IsrSdqaStageParallel eventTopic: None stagePolicy: { inputKeys: { exposure: isrExposure""" + channelSnap + """ } parameters: @ISR-sdqaAmp.paf outputKeys: { isrPersistableSdqaRatingVectorKey: sdqaRatingVector""" + str(snap) + """ } } }""" pass # end of snap loop print >>f, """ appStage: { name: isrOutput""" + channelId + """ parallelClass: lsst.pex.harness.IOStage.OutputStageParallel eventTopic: None stagePolicy: { parameters: { butler: @butlerUpdate.paf outputItems: { sdqaRatingVector0: { datasetType: sdqaAmp datasetId: { fromJobIdentity: "visit" "raft" "sensor" set: { snap: 0 channel: """ + channelName + """ } } } sdqaRatingVector1: { datasetType: sdqaAmp datasetId: { fromJobIdentity: "visit" "raft" "sensor" set: { snap: 1 channel: """ + channelName + """ } } } } } } }"""
|
parameters: @ISR-flat.paf
|
parameters: @PT1Pipe/ISR-flat.paf
|
def isrProcess(f): print >>f, """ appStage: { name: isrInputRaw parallelClass: lsst.pex.harness.IOStage.InputStageParallel eventTopic: None stagePolicy: { parameters: { butler: @butlerInput.paf inputItems: {""" for channelX in (0, 1): for channelY in (0, 1, 2, 3, 4, 5, 6, 7): for snap in (0, 1): channelName = '"%d,%d"' % (channelX, channelY) channelSnap = "%d%d_%d" % (channelX, channelY, snap) print >>f, """ isrExposure""" + channelSnap + """: { datasetType: raw datasetId: { fromJobIdentity: "visit" "raft" "sensor" set: { snap: """ + str(snap) + """ channel: """ + channelName + """ } } }""" print >>f, """ } } } }""" for channelX in (0, 1): for channelY in (0, 1, 2, 3, 4, 5, 6, 7): channelName = '"%d,%d"' % (channelX, channelY) channelId = "%d%d" % (channelX, channelY) print >>f, """ appStage: { name: isrInput""" + channelId + """ parallelClass: lsst.pex.harness.IOStage.InputStageParallel eventTopic: None stagePolicy: { parameters: { butler: @butlerInput.paf inputItems: { biasExposure: { datasetType: bias datasetId: { fromJobIdentity: "raft" "sensor" set: { channel: """ + channelName + """ } } } darkExposure: { datasetType: dark datasetId: { fromJobIdentity: "raft" "sensor" set: { channel: """ + channelName + """ } } } flatExposure: { datasetType: flat datasetId: { fromJobIdentity: "raft" "sensor" set: { channel: """ + channelName + """ } } } } } } }""" for snap in (0, 1): channelSnap = "%d%d_%d" % (channelX, channelY, snap) print >>f, """ appStage: { name: isrSaturation""" + channelSnap + """ parallelClass: lsst.ip.pipeline.IsrSaturationStageParallel eventTopic: None stagePolicy: { inputKeys: { exposure: isrExposure""" + channelSnap + """ } outputKeys: { saturationMaskedExposure: isrExposure""" + channelSnap + """ } } } appStage: { name: isrOverscan""" + channelSnap + """ parallelClass: lsst.ip.pipeline.IsrOverscanStageParallel eventTopic: None stagePolicy: { inputKeys: { exposure: isrExposure""" + channelSnap + """ } outputKeys: { overscanCorrectedExposure: isrExposure""" + channelSnap + """ } } } appStage: { name: isrBias""" + channelSnap + """ parallelClass: lsst.ip.pipeline.IsrBiasStageParallel eventTopic: None stagePolicy: { inputKeys: { exposure: isrExposure""" + channelSnap + """ biasexposure: biasExposure } outputKeys: { biasSubtractedExposure: isrExposure""" + channelSnap + """ } } } appStage: { name: isrVariance""" + channelSnap + """ parallelClass: lsst.ip.pipeline.IsrVarianceStageParallel eventTopic: None stagePolicy: { inputKeys: { exposure: isrExposure""" + channelSnap + """ } outputKeys: { varianceAddedExposure: isrExposure""" + channelSnap + """ } } } appStage: { name: isrDark""" + channelSnap + """ parallelClass: lsst.ip.pipeline.IsrDarkStageParallel eventTopic: None stagePolicy: { inputKeys: { exposure: isrExposure""" + channelSnap + """ darkexposure: darkExposure } outputKeys: { darkSubtractedExposure: isrExposure""" + channelSnap + """ } } } appStage: { name: isrFlat""" + channelSnap + """ parallelClass: lsst.ip.pipeline.IsrFlatStageParallel eventTopic: None stagePolicy: { inputKeys: { exposure: isrExposure""" + channelSnap + """ flatexposure: flatExposure } outputKeys: { darkSubtractedExposure: isrExposure""" + channelSnap + """ } parameters: @ISR-flat.paf outputKeys: { flatCorrectedExposure: isrExposure""" + channelSnap + """ } } } appStage: { name: isrSdqaAmp""" + channelSnap + """ parallelClass: lsst.sdqa.pipeline.IsrSdqaStageParallel eventTopic: None stagePolicy: { inputKeys: { exposure: isrExposure""" + channelSnap + """ } parameters: @ISR-sdqaAmp.paf outputKeys: { isrPersistableSdqaRatingVectorKey: sdqaRatingVector""" + str(snap) + """ } } }""" pass # end of snap loop print >>f, """ appStage: { name: isrOutput""" + channelId + """ parallelClass: lsst.pex.harness.IOStage.OutputStageParallel eventTopic: None stagePolicy: { parameters: { butler: @butlerUpdate.paf outputItems: { sdqaRatingVector0: { datasetType: sdqaAmp datasetId: { fromJobIdentity: "visit" "raft" "sensor" set: { snap: 0 channel: """ + channelName + """ } } } sdqaRatingVector1: { datasetType: sdqaAmp datasetId: { fromJobIdentity: "visit" "raft" "sensor" set: { snap: 1 channel: """ + channelName + """ } } } } } } }"""
|
parameters: @ISR-sdqaAmp.paf
|
parameters: @PT1Pipe/ISR-sdqaAmp.paf
|
def isrProcess(f): print >>f, """ appStage: { name: isrInputRaw parallelClass: lsst.pex.harness.IOStage.InputStageParallel eventTopic: None stagePolicy: { parameters: { butler: @butlerInput.paf inputItems: {""" for channelX in (0, 1): for channelY in (0, 1, 2, 3, 4, 5, 6, 7): for snap in (0, 1): channelName = '"%d,%d"' % (channelX, channelY) channelSnap = "%d%d_%d" % (channelX, channelY, snap) print >>f, """ isrExposure""" + channelSnap + """: { datasetType: raw datasetId: { fromJobIdentity: "visit" "raft" "sensor" set: { snap: """ + str(snap) + """ channel: """ + channelName + """ } } }""" print >>f, """ } } } }""" for channelX in (0, 1): for channelY in (0, 1, 2, 3, 4, 5, 6, 7): channelName = '"%d,%d"' % (channelX, channelY) channelId = "%d%d" % (channelX, channelY) print >>f, """ appStage: { name: isrInput""" + channelId + """ parallelClass: lsst.pex.harness.IOStage.InputStageParallel eventTopic: None stagePolicy: { parameters: { butler: @butlerInput.paf inputItems: { biasExposure: { datasetType: bias datasetId: { fromJobIdentity: "raft" "sensor" set: { channel: """ + channelName + """ } } } darkExposure: { datasetType: dark datasetId: { fromJobIdentity: "raft" "sensor" set: { channel: """ + channelName + """ } } } flatExposure: { datasetType: flat datasetId: { fromJobIdentity: "raft" "sensor" set: { channel: """ + channelName + """ } } } } } } }""" for snap in (0, 1): channelSnap = "%d%d_%d" % (channelX, channelY, snap) print >>f, """ appStage: { name: isrSaturation""" + channelSnap + """ parallelClass: lsst.ip.pipeline.IsrSaturationStageParallel eventTopic: None stagePolicy: { inputKeys: { exposure: isrExposure""" + channelSnap + """ } outputKeys: { saturationMaskedExposure: isrExposure""" + channelSnap + """ } } } appStage: { name: isrOverscan""" + channelSnap + """ parallelClass: lsst.ip.pipeline.IsrOverscanStageParallel eventTopic: None stagePolicy: { inputKeys: { exposure: isrExposure""" + channelSnap + """ } outputKeys: { overscanCorrectedExposure: isrExposure""" + channelSnap + """ } } } appStage: { name: isrBias""" + channelSnap + """ parallelClass: lsst.ip.pipeline.IsrBiasStageParallel eventTopic: None stagePolicy: { inputKeys: { exposure: isrExposure""" + channelSnap + """ biasexposure: biasExposure } outputKeys: { biasSubtractedExposure: isrExposure""" + channelSnap + """ } } } appStage: { name: isrVariance""" + channelSnap + """ parallelClass: lsst.ip.pipeline.IsrVarianceStageParallel eventTopic: None stagePolicy: { inputKeys: { exposure: isrExposure""" + channelSnap + """ } outputKeys: { varianceAddedExposure: isrExposure""" + channelSnap + """ } } } appStage: { name: isrDark""" + channelSnap + """ parallelClass: lsst.ip.pipeline.IsrDarkStageParallel eventTopic: None stagePolicy: { inputKeys: { exposure: isrExposure""" + channelSnap + """ darkexposure: darkExposure } outputKeys: { darkSubtractedExposure: isrExposure""" + channelSnap + """ } } } appStage: { name: isrFlat""" + channelSnap + """ parallelClass: lsst.ip.pipeline.IsrFlatStageParallel eventTopic: None stagePolicy: { inputKeys: { exposure: isrExposure""" + channelSnap + """ flatexposure: flatExposure } outputKeys: { darkSubtractedExposure: isrExposure""" + channelSnap + """ } parameters: @ISR-flat.paf outputKeys: { flatCorrectedExposure: isrExposure""" + channelSnap + """ } } } appStage: { name: isrSdqaAmp""" + channelSnap + """ parallelClass: lsst.sdqa.pipeline.IsrSdqaStageParallel eventTopic: None stagePolicy: { inputKeys: { exposure: isrExposure""" + channelSnap + """ } parameters: @ISR-sdqaAmp.paf outputKeys: { isrPersistableSdqaRatingVectorKey: sdqaRatingVector""" + str(snap) + """ } } }""" pass # end of snap loop print >>f, """ appStage: { name: isrOutput""" + channelId + """ parallelClass: lsst.pex.harness.IOStage.OutputStageParallel eventTopic: None stagePolicy: { parameters: { butler: @butlerUpdate.paf outputItems: { sdqaRatingVector0: { datasetType: sdqaAmp datasetId: { fromJobIdentity: "visit" "raft" "sensor" set: { snap: 0 channel: """ + channelName + """ } } } sdqaRatingVector1: { datasetType: sdqaAmp datasetId: { fromJobIdentity: "visit" "raft" "sensor" set: { snap: 1 channel: """ + channelName + """ } } } } } } }"""
|
butler: @butlerUpdate.paf
|
butler: @PT1Pipe/butlerUpdate.paf
|
def isrProcess(f): print >>f, """ appStage: { name: isrInputRaw parallelClass: lsst.pex.harness.IOStage.InputStageParallel eventTopic: None stagePolicy: { parameters: { butler: @butlerInput.paf inputItems: {""" for channelX in (0, 1): for channelY in (0, 1, 2, 3, 4, 5, 6, 7): for snap in (0, 1): channelName = '"%d,%d"' % (channelX, channelY) channelSnap = "%d%d_%d" % (channelX, channelY, snap) print >>f, """ isrExposure""" + channelSnap + """: { datasetType: raw datasetId: { fromJobIdentity: "visit" "raft" "sensor" set: { snap: """ + str(snap) + """ channel: """ + channelName + """ } } }""" print >>f, """ } } } }""" for channelX in (0, 1): for channelY in (0, 1, 2, 3, 4, 5, 6, 7): channelName = '"%d,%d"' % (channelX, channelY) channelId = "%d%d" % (channelX, channelY) print >>f, """ appStage: { name: isrInput""" + channelId + """ parallelClass: lsst.pex.harness.IOStage.InputStageParallel eventTopic: None stagePolicy: { parameters: { butler: @butlerInput.paf inputItems: { biasExposure: { datasetType: bias datasetId: { fromJobIdentity: "raft" "sensor" set: { channel: """ + channelName + """ } } } darkExposure: { datasetType: dark datasetId: { fromJobIdentity: "raft" "sensor" set: { channel: """ + channelName + """ } } } flatExposure: { datasetType: flat datasetId: { fromJobIdentity: "raft" "sensor" set: { channel: """ + channelName + """ } } } } } } }""" for snap in (0, 1): channelSnap = "%d%d_%d" % (channelX, channelY, snap) print >>f, """ appStage: { name: isrSaturation""" + channelSnap + """ parallelClass: lsst.ip.pipeline.IsrSaturationStageParallel eventTopic: None stagePolicy: { inputKeys: { exposure: isrExposure""" + channelSnap + """ } outputKeys: { saturationMaskedExposure: isrExposure""" + channelSnap + """ } } } appStage: { name: isrOverscan""" + channelSnap + """ parallelClass: lsst.ip.pipeline.IsrOverscanStageParallel eventTopic: None stagePolicy: { inputKeys: { exposure: isrExposure""" + channelSnap + """ } outputKeys: { overscanCorrectedExposure: isrExposure""" + channelSnap + """ } } } appStage: { name: isrBias""" + channelSnap + """ parallelClass: lsst.ip.pipeline.IsrBiasStageParallel eventTopic: None stagePolicy: { inputKeys: { exposure: isrExposure""" + channelSnap + """ biasexposure: biasExposure } outputKeys: { biasSubtractedExposure: isrExposure""" + channelSnap + """ } } } appStage: { name: isrVariance""" + channelSnap + """ parallelClass: lsst.ip.pipeline.IsrVarianceStageParallel eventTopic: None stagePolicy: { inputKeys: { exposure: isrExposure""" + channelSnap + """ } outputKeys: { varianceAddedExposure: isrExposure""" + channelSnap + """ } } } appStage: { name: isrDark""" + channelSnap + """ parallelClass: lsst.ip.pipeline.IsrDarkStageParallel eventTopic: None stagePolicy: { inputKeys: { exposure: isrExposure""" + channelSnap + """ darkexposure: darkExposure } outputKeys: { darkSubtractedExposure: isrExposure""" + channelSnap + """ } } } appStage: { name: isrFlat""" + channelSnap + """ parallelClass: lsst.ip.pipeline.IsrFlatStageParallel eventTopic: None stagePolicy: { inputKeys: { exposure: isrExposure""" + channelSnap + """ flatexposure: flatExposure } outputKeys: { darkSubtractedExposure: isrExposure""" + channelSnap + """ } parameters: @ISR-flat.paf outputKeys: { flatCorrectedExposure: isrExposure""" + channelSnap + """ } } } appStage: { name: isrSdqaAmp""" + channelSnap + """ parallelClass: lsst.sdqa.pipeline.IsrSdqaStageParallel eventTopic: None stagePolicy: { inputKeys: { exposure: isrExposure""" + channelSnap + """ } parameters: @ISR-sdqaAmp.paf outputKeys: { isrPersistableSdqaRatingVectorKey: sdqaRatingVector""" + str(snap) + """ } } }""" pass # end of snap loop print >>f, """ appStage: { name: isrOutput""" + channelId + """ parallelClass: lsst.pex.harness.IOStage.OutputStageParallel eventTopic: None stagePolicy: { parameters: { butler: @butlerUpdate.paf outputItems: { sdqaRatingVector0: { datasetType: sdqaAmp datasetId: { fromJobIdentity: "visit" "raft" "sensor" set: { snap: 0 channel: """ + channelName + """ } } } sdqaRatingVector1: { datasetType: sdqaAmp datasetId: { fromJobIdentity: "visit" "raft" "sensor" set: { snap: 1 channel: """ + channelName + """ } } } } } } }"""
|
parameters: @ISR-sdqaCcd.paf
|
parameters: @PT1Pipe/ISR-sdqaCcd.paf
|
def ccdAssemblyProcess(f): for snap in (0, 1): print >>f, """ appStage: { name: ccdAssemblyCcdList""" + str(snap) + """ parallelClass: lsst.datarel.ObjectListStageParallel eventTopic: None stagePolicy: { inputKeys: {""" for channelX in (0, 1): for channelY in (0, 1, 2, 3, 4, 5, 6, 7): channelId = "%d%d" % (channelX, channelY) channelSnap = "%d%d_%d" % (channelX, channelY, snap) print >>f, " object: isrExposure" + channelSnap print >>f, """ } outputKeys: { objectList: exposureList""" + str(snap) + """ } } } appStage: { name: ccdAssemblyIsrCcdAssembly""" + str(snap) + """ parallelClass: lsst.ip.pipeline.IsrCcdAssemblyStageParallel eventTopic: None inputKeys: { exposureList: exposureList""" + str(snap) + """ } outputKeys: { assembledCcdExposure: isrExposure""" + str(snap) + """ } } appStage: { name: ccdAssemblyIsrCcdDefect""" + str(snap) + """ parallelClass: lsst.ip.pipeline.IsrCcdDefectStageParallel eventTopic: None inputKeys: { ccdExposure: isrExposure""" + str(snap) + """ } outputKeys: { ccdExposure: isrExposure""" + str(snap) + """ } } appStage: { name: ccdAssemblyIsrCcdSdqa""" + str(snap) + """ parallelClass: lsst.ip.pipeline.IsrCcdSdqaStageParallel eventTopic: None inputKeys: { ccdExposure: isrExposure""" + str(snap) + """ } outputKeys: { sdqaCcdExposure: isrExposure""" + str(snap) + """ } } appStage: { name: ccdAssemblySdqaCcd""" + str(snap) + """ parallelClass: lsst.sdqa.pipeline.IsrSdqaStageParallel eventTopic: None inputKeys: { exposureKey: isrExposure""" + str(snap) + """ } parameters: @ISR-sdqaCcd.paf outputKeys: { isrPersistableSdqaRatingVectorKey: sdqaRatingVector""" + str(snap) + """ } }""" print >>f, """ appStage: { name: ccdAssemblyOutput parallelClass: lsst.pex.harness.IOStage.OutputStageParallel eventTopic: None stagePolicy: { parameters: { butler: @butlerUpdate.paf outputItems: { sdqaRatingVector0: { datasetType: sdqaCcd datasetId: { fromJobIdentity: "visit" "raft" "sensor" set: { snap: 0 } } } sdqaRatingVector1: { datasetType: sdqaCcd datasetId: { fromJobIdentity: "visit" "raft" "sensor" set: { snap: 1 } } } } } } }""" print >>f, """ appStage: { name: ccdAssemblyFixup parallelClass: lsst.datarel.FixupStageParallel eventTopic: None stagePolicy: { inputKeys: { isrCcdExposure0: isrExposure0 isrCcdExposure1: isrExposure1 } parameters: { pipeline: CcdAssembly } outputKeys: { isrCcdExposure0: isrCcdExposure0 isrCcdExposure1: isrCcdExposure1 } } }"""
|
butler: @butlerUpdate.paf
|
butler: @PT1Pipe/butlerUpdate.paf
|
def ccdAssemblyProcess(f): for snap in (0, 1): print >>f, """ appStage: { name: ccdAssemblyCcdList""" + str(snap) + """ parallelClass: lsst.datarel.ObjectListStageParallel eventTopic: None stagePolicy: { inputKeys: {""" for channelX in (0, 1): for channelY in (0, 1, 2, 3, 4, 5, 6, 7): channelId = "%d%d" % (channelX, channelY) channelSnap = "%d%d_%d" % (channelX, channelY, snap) print >>f, " object: isrExposure" + channelSnap print >>f, """ } outputKeys: { objectList: exposureList""" + str(snap) + """ } } } appStage: { name: ccdAssemblyIsrCcdAssembly""" + str(snap) + """ parallelClass: lsst.ip.pipeline.IsrCcdAssemblyStageParallel eventTopic: None inputKeys: { exposureList: exposureList""" + str(snap) + """ } outputKeys: { assembledCcdExposure: isrExposure""" + str(snap) + """ } } appStage: { name: ccdAssemblyIsrCcdDefect""" + str(snap) + """ parallelClass: lsst.ip.pipeline.IsrCcdDefectStageParallel eventTopic: None inputKeys: { ccdExposure: isrExposure""" + str(snap) + """ } outputKeys: { ccdExposure: isrExposure""" + str(snap) + """ } } appStage: { name: ccdAssemblyIsrCcdSdqa""" + str(snap) + """ parallelClass: lsst.ip.pipeline.IsrCcdSdqaStageParallel eventTopic: None inputKeys: { ccdExposure: isrExposure""" + str(snap) + """ } outputKeys: { sdqaCcdExposure: isrExposure""" + str(snap) + """ } } appStage: { name: ccdAssemblySdqaCcd""" + str(snap) + """ parallelClass: lsst.sdqa.pipeline.IsrSdqaStageParallel eventTopic: None inputKeys: { exposureKey: isrExposure""" + str(snap) + """ } parameters: @ISR-sdqaCcd.paf outputKeys: { isrPersistableSdqaRatingVectorKey: sdqaRatingVector""" + str(snap) + """ } }""" print >>f, """ appStage: { name: ccdAssemblyOutput parallelClass: lsst.pex.harness.IOStage.OutputStageParallel eventTopic: None stagePolicy: { parameters: { butler: @butlerUpdate.paf outputItems: { sdqaRatingVector0: { datasetType: sdqaCcd datasetId: { fromJobIdentity: "visit" "raft" "sensor" set: { snap: 0 } } } sdqaRatingVector1: { datasetType: sdqaCcd datasetId: { fromJobIdentity: "visit" "raft" "sensor" set: { snap: 1 } } } } } } }""" print >>f, """ appStage: { name: ccdAssemblyFixup parallelClass: lsst.datarel.FixupStageParallel eventTopic: None stagePolicy: { inputKeys: { isrCcdExposure0: isrExposure0 isrCcdExposure1: isrExposure1 } parameters: { pipeline: CcdAssembly } outputKeys: { isrCcdExposure0: isrCcdExposure0 isrCcdExposure1: isrCcdExposure1 } } }"""
|
parameters: @CrSplit-backgroundEstimation.paf
|
parameters: @PT1Pipe/CrSplit-backgroundEstimation.paf
|
def crSplitProcess(f): print >>f, """ appStage: { name: crSplitBackgroundEstimation0 parallelClass: lsst.meas.pipeline.BackgroundEstimationStageParallel eventTopic: None stagePolicy: { inputKeys: { exposure: isrCcdExposure0 } outputKeys: { backgroundSubtractedExposure: bkgSubCcdExposure0 } parameters: @CrSplit-backgroundEstimation.paf } appStage: { name: crSplitBackgroundEstimation1 parallelClass: lsst.meas.pipeline.BackgroundEstimationStageParallel eventTopic: None stagePolicy: { inputKeys: { exposure: isrCcdExposure1 } outputKeys: { backgroundSubtractedExposure: bkgSubCcdExposure1 } parameters: @CrSplit-backgroundEstimation.paf } } appStage: { name: crSplitCrReject0 parallelClass: lsst.ip.pipeline.CrRejectStageParallel eventTopic: None stagePolicy: { inputKeys: { exposure: bkgSubCcdExposure0 } outputKeys: { exposure: crSubCcdExposure0 } parameters: @CrSplit-crReject.paf crRejectPolicy: @CrSplit-crReject-algorithm.paf } } appStage: { name: crSplitCrReject1 parallelClass: lsst.ip.pipeline.CrRejectStageParallel eventTopic: None stagePolicy: { inputKeys: { exposure: bkgSubCcdExposure1 } outputKeys: { exposure: crSubCcdExposure1 } parameters: @CrSplit-crReject.paf crRejectPolicy: @CrSplit-crReject-algorithm.paf } } """ print >>f, """ appStage: { name: crSplitFixup parallelClass: lsst.datarel.FixupStageParallel eventTopic: None stagePolicy: { inputKeys: { visitExposure: crSubCcdExposure0 } parameters: { pipeline: CrSplit } outputKeys: { visitExposure: visitExposure } } }"""
|
parameters: @CrSplit-crReject.paf crRejectPolicy: @CrSplit-crReject-algorithm.paf
|
parameters: @PT1Pipe/CrSplit-crReject.paf crRejectPolicy: @PT1Pipe/CrSplit-crReject-algorithm.paf
|
def crSplitProcess(f): print >>f, """ appStage: { name: crSplitBackgroundEstimation0 parallelClass: lsst.meas.pipeline.BackgroundEstimationStageParallel eventTopic: None stagePolicy: { inputKeys: { exposure: isrCcdExposure0 } outputKeys: { backgroundSubtractedExposure: bkgSubCcdExposure0 } parameters: @CrSplit-backgroundEstimation.paf } appStage: { name: crSplitBackgroundEstimation1 parallelClass: lsst.meas.pipeline.BackgroundEstimationStageParallel eventTopic: None stagePolicy: { inputKeys: { exposure: isrCcdExposure1 } outputKeys: { backgroundSubtractedExposure: bkgSubCcdExposure1 } parameters: @CrSplit-backgroundEstimation.paf } } appStage: { name: crSplitCrReject0 parallelClass: lsst.ip.pipeline.CrRejectStageParallel eventTopic: None stagePolicy: { inputKeys: { exposure: bkgSubCcdExposure0 } outputKeys: { exposure: crSubCcdExposure0 } parameters: @CrSplit-crReject.paf crRejectPolicy: @CrSplit-crReject-algorithm.paf } } appStage: { name: crSplitCrReject1 parallelClass: lsst.ip.pipeline.CrRejectStageParallel eventTopic: None stagePolicy: { inputKeys: { exposure: bkgSubCcdExposure1 } outputKeys: { exposure: crSubCcdExposure1 } parameters: @CrSplit-crReject.paf crRejectPolicy: @CrSplit-crReject-algorithm.paf } } """ print >>f, """ appStage: { name: crSplitFixup parallelClass: lsst.datarel.FixupStageParallel eventTopic: None stagePolicy: { inputKeys: { visitExposure: crSubCcdExposure0 } parameters: { pipeline: CrSplit } outputKeys: { visitExposure: visitExposure } } }"""
|
psfPolicy: @ImgChar-sourceDetect-psf.paf backgroundPolicy: @ImgChar-sourceDetect-background.paf
|
psfPolicy: @PT1Pipe/ImgChar-sourceDetect-psf.paf backgroundPolicy: @PT1Pipe/ImgChar-sourceDetect-background.paf
|
def imgCharProcess(f): print >>f, """ appStage: { name: icSourceDetect parallelClass: lsst.meas.pipeline.SourceDetectionStageParallel eventTopic: None stagePolicy: { inputKeys: { exposure: visitExposure } outputKeys: { positiveDetection: positiveFootprintSet negativeDetection: negativeFootprintSet psf: simplePsf } psfPolicy: @ImgChar-sourceDetect-psf.paf backgroundPolicy: @ImgChar-sourceDetect-background.paf } } appStage: { name: icSourceMeasure parallelClass: lsst.meas.pipeline.SourceMeasurementStageParallel eventTopic: None stagePolicy: { inputKeys: { exposure: visitExposure psf: simplePsf positiveDetection: positiveFootprintSet negativeDetection: negativeFootprintSet } outputKeys: { sources: sourceSet } } } appStage: { name: icPsfDetermination parallelClass: lsst.meas.pipeline.PsfDeterminationStageParallel eventTopic: None stagePolicy: { inputKeys: { exposure: visitExposure sourceSet: sourceSet } outputKeys: { psf: measuredPsf cellSet: cellSet sdqa: sdqa } } } appStage: { name: icWcsDetermination parallelClass: lsst.meas.pipeline.WcsDeterminationStageParallel eventTopic: None stagePolicy: @ImgChar-wcsDetermination.paf } appStage: { name: icWcsVerification parallelClass: lsst.meas.pipeline.WcsVerificationStageParallel eventTopic: None stagePolicy: { sourceMatchSetKey: matchList } } appStage: { name: icPhotoCal parallelClass: lsst.meas.pipeline.PhotoCalStageParallel eventTopic: None stagePolicy: { sourceMatchSetKey: matchList outputValueKey: photometricMagnitudeObject } }""" print >>f, """ appStage: { name: icOutput parallelClass: lsst.pex.harness.IOStage.OutputStageParallel eventTopic: None stagePolicy: { parameters: { butler: @butlerUpdate.paf outputItems: { sourceSet_persistable: { datasetType: icSrc datasetId: { fromJobIdentity: "visit" "raft" "sensor" } } measuredPsf: { datasetType: psf datasetId: { fromJobIdentity: "visit" "raft" "sensor" } } visitExposure: { datasetType: calexp datasetId: { fromJobIdentity: "visit" "raft" "sensor" } } } } } }""" print >>f, """ appStage: { name: icFixup parallelClass: lsst.datarel.FixupStageParallel eventTopic: None stagePolicy: { inputKeys: { calibratedExposure: visitExposure psf: measuredPsf } parameters: { pipeline: ImgChar } outputKeys: { calibratedExposure: scienceExposure psf: psf } } }"""
|
stagePolicy: @ImgChar-wcsDetermination.paf
|
stagePolicy: @PT1Pipe/ImgChar-wcsDetermination.paf
|
def imgCharProcess(f): print >>f, """ appStage: { name: icSourceDetect parallelClass: lsst.meas.pipeline.SourceDetectionStageParallel eventTopic: None stagePolicy: { inputKeys: { exposure: visitExposure } outputKeys: { positiveDetection: positiveFootprintSet negativeDetection: negativeFootprintSet psf: simplePsf } psfPolicy: @ImgChar-sourceDetect-psf.paf backgroundPolicy: @ImgChar-sourceDetect-background.paf } } appStage: { name: icSourceMeasure parallelClass: lsst.meas.pipeline.SourceMeasurementStageParallel eventTopic: None stagePolicy: { inputKeys: { exposure: visitExposure psf: simplePsf positiveDetection: positiveFootprintSet negativeDetection: negativeFootprintSet } outputKeys: { sources: sourceSet } } } appStage: { name: icPsfDetermination parallelClass: lsst.meas.pipeline.PsfDeterminationStageParallel eventTopic: None stagePolicy: { inputKeys: { exposure: visitExposure sourceSet: sourceSet } outputKeys: { psf: measuredPsf cellSet: cellSet sdqa: sdqa } } } appStage: { name: icWcsDetermination parallelClass: lsst.meas.pipeline.WcsDeterminationStageParallel eventTopic: None stagePolicy: @ImgChar-wcsDetermination.paf } appStage: { name: icWcsVerification parallelClass: lsst.meas.pipeline.WcsVerificationStageParallel eventTopic: None stagePolicy: { sourceMatchSetKey: matchList } } appStage: { name: icPhotoCal parallelClass: lsst.meas.pipeline.PhotoCalStageParallel eventTopic: None stagePolicy: { sourceMatchSetKey: matchList outputValueKey: photometricMagnitudeObject } }""" print >>f, """ appStage: { name: icOutput parallelClass: lsst.pex.harness.IOStage.OutputStageParallel eventTopic: None stagePolicy: { parameters: { butler: @butlerUpdate.paf outputItems: { sourceSet_persistable: { datasetType: icSrc datasetId: { fromJobIdentity: "visit" "raft" "sensor" } } measuredPsf: { datasetType: psf datasetId: { fromJobIdentity: "visit" "raft" "sensor" } } visitExposure: { datasetType: calexp datasetId: { fromJobIdentity: "visit" "raft" "sensor" } } } } } }""" print >>f, """ appStage: { name: icFixup parallelClass: lsst.datarel.FixupStageParallel eventTopic: None stagePolicy: { inputKeys: { calibratedExposure: visitExposure psf: measuredPsf } parameters: { pipeline: ImgChar } outputKeys: { calibratedExposure: scienceExposure psf: psf } } }"""
|
butler: @butlerUpdate.paf
|
butler: @PT1Pipe/butlerUpdate.paf
|
def imgCharProcess(f): print >>f, """ appStage: { name: icSourceDetect parallelClass: lsst.meas.pipeline.SourceDetectionStageParallel eventTopic: None stagePolicy: { inputKeys: { exposure: visitExposure } outputKeys: { positiveDetection: positiveFootprintSet negativeDetection: negativeFootprintSet psf: simplePsf } psfPolicy: @ImgChar-sourceDetect-psf.paf backgroundPolicy: @ImgChar-sourceDetect-background.paf } } appStage: { name: icSourceMeasure parallelClass: lsst.meas.pipeline.SourceMeasurementStageParallel eventTopic: None stagePolicy: { inputKeys: { exposure: visitExposure psf: simplePsf positiveDetection: positiveFootprintSet negativeDetection: negativeFootprintSet } outputKeys: { sources: sourceSet } } } appStage: { name: icPsfDetermination parallelClass: lsst.meas.pipeline.PsfDeterminationStageParallel eventTopic: None stagePolicy: { inputKeys: { exposure: visitExposure sourceSet: sourceSet } outputKeys: { psf: measuredPsf cellSet: cellSet sdqa: sdqa } } } appStage: { name: icWcsDetermination parallelClass: lsst.meas.pipeline.WcsDeterminationStageParallel eventTopic: None stagePolicy: @ImgChar-wcsDetermination.paf } appStage: { name: icWcsVerification parallelClass: lsst.meas.pipeline.WcsVerificationStageParallel eventTopic: None stagePolicy: { sourceMatchSetKey: matchList } } appStage: { name: icPhotoCal parallelClass: lsst.meas.pipeline.PhotoCalStageParallel eventTopic: None stagePolicy: { sourceMatchSetKey: matchList outputValueKey: photometricMagnitudeObject } }""" print >>f, """ appStage: { name: icOutput parallelClass: lsst.pex.harness.IOStage.OutputStageParallel eventTopic: None stagePolicy: { parameters: { butler: @butlerUpdate.paf outputItems: { sourceSet_persistable: { datasetType: icSrc datasetId: { fromJobIdentity: "visit" "raft" "sensor" } } measuredPsf: { datasetType: psf datasetId: { fromJobIdentity: "visit" "raft" "sensor" } } visitExposure: { datasetType: calexp datasetId: { fromJobIdentity: "visit" "raft" "sensor" } } } } } }""" print >>f, """ appStage: { name: icFixup parallelClass: lsst.datarel.FixupStageParallel eventTopic: None stagePolicy: { inputKeys: { calibratedExposure: visitExposure psf: measuredPsf } parameters: { pipeline: ImgChar } outputKeys: { calibratedExposure: scienceExposure psf: psf } } }"""
|
backgroundPolicy: @SFM-sourceDetect-background.paf
|
backgroundPolicy: @PT1Pipe/SFM-sourceDetect-background.paf
|
def sfmProcess(f): print >>f, """ appStage: { name: sfmSourceDetect parallelClass: lsst.meas.pipeline.SourceDetectionStageParallel eventTopic: None stagePolicy: { inputKeys: { exposure: scienceExposure psf: psf } outputKeys: { positiveDetection: positiveFootprintSet } backgroundPolicy: @SFM-sourceDetect-background.paf } } appStage: { name: sfmSourceMeasure parallelClass: lsst.meas.pipeline.SourceMeasurementStageParallel eventTopic: None stagePolicy: { inputKeys: { exposure: scienceExposure psf: psf positiveDetection: positiveFootprintSet } outputKeys: { sources: sourceSet } } } appStage: { name: sfmComputeSourceSkyCoords parallelClass: lsst.meas.pipeline.ComputeSourceSkyCoordsStageParallel eventTopic: None stagePolicy: { inputKeys: { sources: sourceSet exposure: scienceExposure } } }""" print >>f, """ appStage: { name: sfmOutput parallelClass: lsst.pex.harness.IOStage.OutputStageParallel eventTopic: None stagePolicy: { parameters: { butler: @butlerUpdate.paf outputItems: { sourceSet_persistable: { datasetType: src datasetId: { fromJobIdentity: "visit" "raft" "sensor" } } } } } }"""
|
butler: @butlerUpdate.paf
|
butler: @PT1Pipe/butlerUpdate.paf
|
def sfmProcess(f): print >>f, """ appStage: { name: sfmSourceDetect parallelClass: lsst.meas.pipeline.SourceDetectionStageParallel eventTopic: None stagePolicy: { inputKeys: { exposure: scienceExposure psf: psf } outputKeys: { positiveDetection: positiveFootprintSet } backgroundPolicy: @SFM-sourceDetect-background.paf } } appStage: { name: sfmSourceMeasure parallelClass: lsst.meas.pipeline.SourceMeasurementStageParallel eventTopic: None stagePolicy: { inputKeys: { exposure: scienceExposure psf: psf positiveDetection: positiveFootprintSet } outputKeys: { sources: sourceSet } } } appStage: { name: sfmComputeSourceSkyCoords parallelClass: lsst.meas.pipeline.ComputeSourceSkyCoordsStageParallel eventTopic: None stagePolicy: { inputKeys: { sources: sourceSet exposure: scienceExposure } } }""" print >>f, """ appStage: { name: sfmOutput parallelClass: lsst.pex.harness.IOStage.OutputStageParallel eventTopic: None stagePolicy: { parameters: { butler: @butlerUpdate.paf outputItems: { sourceSet_persistable: { datasetType: src datasetId: { fromJobIdentity: "visit" "raft" "sensor" } } } } } }"""
|
sql.createDb(database) sql.execScript(os.path.join(catDir, 'sql', 'lsstSchema4mysqlDC3b.sql'))
|
def main(): usage = dedent("""\ usage: %prog [options] <database> Program which creates an LSST run database and instantiates the LSST schema therein. Indexes on tables which will be loaded by the various datarel ingest scripts are disabled. Once loading has finished, the finishDb.py script should be run to re-enable them. <database>: Name of database to create and instantiate the LSST schema in. """) parser = optparse.OptionParser(usage) addDbOptions(parser) opts, args = parser.parse_args() if len(args) != 1: parser.error("A single argument (database name) must be supplied.") database = args[0] if opts.user == None: parser.error("No database user name specified and $USER is undefined or empty") if 'CAT_DIR' not in os.environ or len(os.environ['CAT_DIR']) == 0: parser.error("$CAT_DIR is undefined or empty - please setup the cat " + "package and try again.") catDir = os.environ['CAT_DIR'] sql = MysqlExecutor(opts.host, database, opts.user, opts.port) #sql.createDb(database) #try: # sql.execScript(os.path.join(catDir, 'sql', 'lsstSchema4mysqlDC3b.sql')) #except: # pass sql.execScript(os.path.join(catDir, 'sql', 'setup_perRunTables.sql')) sql.execScript(os.path.join(catDir, 'sql', 'setup_storedFunctions.sql')) sql.execScript(os.path.join(catDir, 'sql', 'setup_sdqa.sql')) # BadSource is exactly like Source, and may not be present in the schema sql.execStmt("CREATE TABLE IF NOT EXISTS BadSource LIKE Source;") # Disable indexes on tables for faster loading for table in loadTables: sql.execStmt("ALTER TABLE %s DISABLE KEYS;" % table)
|
|
for tableName in ('BadSource', 'Source', 'Object'):
|
for tableName in ('BadSource', 'Source', 'Object', 'SimRefObject', 'RefObjMatch'):
|
def setupDb(database, tableSuffix=""): execStmt("CREATE DATABASE IF NOT EXISTS %s;" % database) for tableName in ('BadSource', 'Source', 'Object'): execStmt("CREATE TABLE %s.%s%s LIKE pt1_templates.%s" % (database, tableName, tableSuffix, tableName))
|
filename="/lsst/daues/ipac/add_ons/afwdata-ImSim/processed/imsim_85751839_R23_S11_C00_E000.fits" bbox = afwImage.BBox(afwImage.PointI(32,32), 512, 512) exposure = afwImage.ExposureF(filename, 0,bbox)
|
filename = os.path.join(eups.productDir("afwdata"), "ImSim", "postISR", "v85751839-fr", "s0", "R23", "S11", "C00.fits") bbox = afwImage.BBox(afwImage.PointI(0,0), 512, 512) exposure = afwImage.ExposureF(filename, 0, bbox)
|
def setUp(self): # filename = os.path.join(eups.productDir("afwdata"), "CFHT", "D4", "cal-53535-i-797722_1")
|
exposure: visitim
|
def crSplitProcess(f): print >>f, """ appStage: { name: crSplitBackgroundEstimation0 parallelClass: lsst.meas.pipeline.BackgroundEstimationStageParallel eventTopic: None stagePolicy: { inputKeys: { exposure: isrCcdExposure0 } outputKeys: { backgroundSubtractedExposure: bkgSubCcdExposure0 } parameters: @CrSplit-backgroundEstimation.paf } appStage: { name: crSplitBackgroundEstimation1 parallelClass: lsst.meas.pipeline.BackgroundEstimationStageParallel eventTopic: None stagePolicy: { inputKeys: { exposure: isrCcdExposure1 } outputKeys: { backgroundSubtractedExposure: bkgSubCcdExposure1 } parameters: @CrSplit-backgroundEstimation.paf } } appStage: { name: crSplitCrReject0 parallelClass: lsst.ip.pipeline.CrRejectStageParallel eventTopic: None stagePolicy: { inputKeys: { exposure: bkgSubCcdExposure0 } outputKeys: { exposure: visitim exposure: crSubCcdExposure0 } parameters: @CrSplit-crReject.paf crRejectPolicy: @CrSplit-crReject-algorithm.paf } } appStage: { name: crSplitCrReject1 parallelClass: lsst.ip.pipeline.CrRejectStageParallel eventTopic: None stagePolicy: { inputKeys: { exposure: bkgSubCcdExposure1 } outputKeys: { exposure: crSubCcdExposure1 } parameters: @CrSplit-crReject.paf crRejectPolicy: @CrSplit-crReject-algorithm.paf } } """ print >>f, """ appStage: { name: crSplitFixup parallelClass: lsst.datarel.FixupStageParallel eventTopic: None stagePolicy: { inputKeys: { visitExposure: crSubCcdExposure0 } parameters: { pipeline: CrSplit } outputKeys: { visitExposure: visitExposure } } }"""
|
|
num, cluster, password = station num = int(num) station_list[num] = (cluster, password)
|
if station: num, cluster, password = station num = int(num) station_list[num] = (cluster, password)
|
def do_init(configfile): """Load configuration and passwords and set up a logger handler This function will do one-time initialization. By using global variables, we eliminate the need to reread configuration and passwords on every request. """ # set up config global config try: config except NameError: config = ConfigParser.ConfigParser() config.read(configfile) # set up logger if not logger.handlers: file = config.get('General', 'log') + '-wsgi.%d' % os.getpid() handler = logging.handlers.TimedRotatingFileHandler(file, when='midnight', backupCount=14) handler.setFormatter(formatter) logger.addHandler(handler) level = LEVELS.get(config.get('General', 'loglevel'), logging.NOTSET) logger.setLevel(level=level) # read station list global station_list try: station_list except NameError: station_list = {} with open(config.get('General', 'station_list')) as file: reader = csv.reader(file) for station in reader: num, cluster, password = station num = int(num) station_list[num] = (cluster, password)
|
system = models.BooleanField(blank=True, null=True)
|
system = models.BooleanField(blank=True, default=False)
|
def get_protocol(self): return PROTOCOLS[str(self.protocol)]
|
allow_express_pay = models.BooleanField(verbose_name=u'Оплата экспресс картами', blank=True, null=True, default=False)
|
allow_express_pay = models.BooleanField(verbose_name=u'Оплата экспресс картами', blank=True, default=False)
|
def __unicode__(self): return u"%s" % self.name
|
database.flush()
|
def update(self, documents=None, after_index=None, per_page=10000, commit_each=False): """ Update the database with the documents. There are some default value and terms in a document: * Values: 1. Used to store the ID of the document 2. Store the model of the object (in the string format, like "project.app.model") 3. Store the indexer descriptor (module path) 4..10. Free
|
|
def handle(self, verbose=False, make_daemon=False, timeout=10,
|
def handle(self, verbose=False, make_daemon=False, loop=False, timeout=10,
|
def handle(self, verbose=False, make_daemon=False, timeout=10, rebuild_index=False, per_page=1000, commit_each=False, *args, **options): utils.load_indexes()
|
update_changes(verbose, timeout, not make_daemon, per_page, commit_each)
|
update_changes(verbose, timeout, not (loop or make_daemon), per_page, commit_each)
|
def handle(self, verbose=False, make_daemon=False, timeout=10, rebuild_index=False, per_page=1000, commit_each=False, *args, **options): utils.load_indexes()
|
return list(self._clone(offset=k, limit=1))[k]
|
return iter(self._clone(offset=k, limit=1)).next()
|
def __getitem__(self, k): if not isinstance(k, (slice, int, long)): raise TypeError if not ((not isinstance(k, slice) and (k >= 0)) or (isinstance(k, slice) and (k.start is None or k.start >= 0) and (k.stop is None or k.stop >= 0))): raise IndexError, "Negative indexing is not supported."
|
from djapian.tests.utils import WeightenedIndexerTest, WeightenedEntry
|
def test_result(self): self.assertEqual(len(self.result), 2)
|
|
print list(self._current_index.search(query)[self._parse_slice(_slice)])
|
try: _slice = self._parse_slice(_slice) except ValueError: print 'Error: second argument must be slice' return print list(self._current_index.search(query)[_slice])
|
def do_query(self, query, _slice=''): """ Returns objects fetched by given query """ print list(self._current_index.search(query)[self._parse_slice(_slice)])
|
print 'Illegal index alias `%s`. See `list` command for available aliases' % index
|
print 'Error: illegal index alias `%s`. See `list` command for available aliases' % index
|
def _get_indexer(self, index): try: _space, _model, _indexer = map(int, index.split('.'))
|
def get_content_types(*actions): types = Change.objects.filter(action__in=actions)\
|
def get_content_types(app_models, *actions): lookup_args = dict(action__in=actions) if app_models is not None: ct_list = [ContentType.objects.get_for_model(model) for model in app_models] lookup_args.update(dict(content_type__in=ct_list)) types = Change.objects.filter(models.Q(**lookup_args))\
|
def get_content_types(*actions): types = Change.objects.filter(action__in=actions)\ .values_list('content_type', flat=True)\ .distinct() return ContentType.objects.filter(pk__in=types)
|
def update_changes(verbose, timeout, once, per_page, commit_each):
|
def update_changes(verbose, timeout, once, per_page, commit_each, app_models=None):
|
def update_changes(verbose, timeout, once, per_page, commit_each): counter = [0] def reset_counter(): counter[0] = 0 def after_index(obj): counter[0] += 1 if verbose: sys.stdout.write('.') sys.stdout.flush() commiter = Commiter.create(commit_each)( lambda: None, transaction.commit, transaction.rollback ) while True: count = Change.objects.count() if count > 0 and verbose: print 'There are %d objects to update' % count for ct in get_content_types('add', 'edit'): indexers = get_indexers(ct) for page in paginate( Change.objects.filter(content_type=ct, action__in=('add', 'edit'))\ .select_related('content_type')\ .order_by('object_id'), per_page ):# The objects must be sorted by date commiter.begin_page() try: for indexer in indexers: indexer.update( ct.model_class()._default_manager.filter( pk__in=[c.object_id for c in page.object_list] ).order_by('pk'), after_index, per_page, commit_each ) for change in page.object_list: change.delete() commiter.commit_page() except Exception: if commit_each: for change in page.object_list[:counter[0]]: change.delete() commiter.commit_object() else: commiter.cancel_page() raise reset_counter() for ct in get_content_types('delete'): indexers = get_indexers(ct) for change in Change.objects.filter(content_type=ct, action='delete'): for indexer in indexers: indexer.delete(change.object_id) change.delete() # If using transactions and running Djapian as a daemon, transactions # need to be committed on each iteration, otherwise Djapian will not # catch changes. We also need to use the commit_manually decorator. # # Background information: # # Autocommit is turned off by default according to PEP 249. # PEP 249 states "Database modules that do not support transactions # should implement this method with void functionality". # Consistent Nonlocking Reads (InnoDB): # http://dev.mysql.com/doc/refman/5.0/en/innodb-consistent-read-example.html transaction.commit() if once: break time.sleep(timeout)
|
for ct in get_content_types('add', 'edit'):
|
for ct in get_content_types(app_models, 'add', 'edit'):
|
def after_index(obj): counter[0] += 1
|
for ct in get_content_types('delete'):
|
for ct in get_content_types(app_models, 'delete'):
|
def after_index(obj): counter[0] += 1
|
def rebuild(verbose, per_page, commit_each):
|
def rebuild(verbose, per_page, commit_each, app_models=None):
|
def rebuild(verbose, per_page, commit_each): def after_index(obj): if verbose: sys.stdout.write('.') sys.stdout.flush() for space in IndexSpace.instances: for model, indexers in space.get_indexers().iteritems(): for indexer in indexers: indexer.clear() indexer.update(None, after_index, per_page, commit_each)
|
for indexer in indexers: indexer.clear() indexer.update(None, after_index, per_page, commit_each)
|
if app_models is None or model in app_models: for indexer in indexers: indexer.clear() indexer.update(None, after_index, per_page, commit_each)
|
def after_index(obj): if verbose: sys.stdout.write('.') sys.stdout.flush()
|
make_option('--verbose', action='store_true', default=False,
|
make_option('--verbose', dest='verbose', default=False, action='store_true',
|
def after_index(obj): if verbose: sys.stdout.write('.') sys.stdout.flush()
|
make_option('--time-out', dest='timeout', default=10, type='int',
|
make_option('--time-out', dest='timeout', default=10, action='store', type='int',
|
def after_index(obj): if verbose: sys.stdout.write('.') sys.stdout.flush()
|
action='store', type=int,
|
action='store', type='int',
|
def after_index(obj): if verbose: sys.stdout.write('.') sys.stdout.flush()
|
def handle(self, verbose=False, make_daemon=False, loop=False, timeout=10, rebuild_index=False, per_page=1000, commit_each=False, *args, **options):
|
def handle(self, *app_labels, **options): verbose = options['verbose'] make_daemon = options['make_daemon'] loop = options['loop'] timeout = options['timeout'] rebuild_index = options['rebuild_index'] per_page = options['per_page'] commit_each = options['commit_each']
|
def handle(self, verbose=False, make_daemon=False, loop=False, timeout=10, rebuild_index=False, per_page=1000, commit_each=False, *args, **options): utils.load_indexes()
|
if rebuild_index: rebuild(verbose, per_page, commit_each)
|
if app_labels: try: app_list = [models.get_app(app_label) for app_label in app_labels] except (ImproperlyConfigured, ImportError), e: raise CommandError("%s. Are you sure your INSTALLED_APPS setting is correct?" % e) for app in app_list: app_models = models.get_models(app, include_auto_created=True) if rebuild_index: rebuild(verbose, per_page, commit_each, app_models) else: update_changes(verbose, timeout, not (loop or make_daemon), per_page, commit_each, app_models)
|
def handle(self, verbose=False, make_daemon=False, loop=False, timeout=10, rebuild_index=False, per_page=1000, commit_each=False, *args, **options): utils.load_indexes()
|
update_changes(verbose, timeout, not (loop or make_daemon), per_page, commit_each)
|
if rebuild_index: rebuild(verbose, per_page, commit_each) else: update_changes(verbose, timeout, not (loop or make_daemon), per_page, commit_each)
|
def handle(self, verbose=False, make_daemon=False, loop=False, timeout=10, rebuild_index=False, per_page=1000, commit_each=False, *args, **options): utils.load_indexes()
|
return value
|
return smart_str(value)
|
def convert(self, field_value): """ Generates index values (for sorting) for given field value and its content type """ if field_value is None: return None
|
return u', '.join(map(smart_str, value))
|
return ', '.join(map(smart_str, value))
|
def resolve(self, value): bits = self.path.split(".")
|
index_value = field.convert(value) if index_value is not None: doc.add_value(field.number, smart_str(index_value))
|
doc.add_value(field.number, field.convert(value))
|
def update(self, documents=None, after_index=None, per_page=10000, commit_each=False): """ Update the database with the documents. There are some default value and terms in a document: * Values: 1. Used to store the ID of the document 2. Store the model of the object (in the string format, like "project.app.model") 3. Store the indexer descriptor (module path) 4..10. Free
|
generator.index_text(smart_str(value), field.weight, prefix)
|
value = smart_str(value) generator.index_text(value, field.weight, prefix)
|
def update(self, documents=None, after_index=None, per_page=10000, commit_each=False): """ Update the database with the documents. There are some default value and terms in a document: * Values: 1. Used to store the ID of the document 2. Store the model of the object (in the string format, like "project.app.model") 3. Store the indexer descriptor (module path) 4..10. Free
|
generator.index_text(smart_str(value), field.weight)
|
generator.index_text(value, field.weight)
|
def update(self, documents=None, after_index=None, per_page=10000, commit_each=False): """ Update the database with the documents. There are some default value and terms in a document: * Values: 1. Used to store the ID of the document 2. Store the model of the object (in the string format, like "project.app.model") 3. Store the indexer descriptor (module path) 4..10. Free
|
doc = match.get_document()
|
doc = match.document
|
def _parse_results(self): self._resultset_cache = []
|
percent = match.get_percent() rank = match.get_rank() weight = match.get_weight()
|
percent = match.percent rank = match.rank weight = match.weight
|
def _parse_results(self): self._resultset_cache = []
|
collapse_count = match.get_collapse_count() or None collapse_key = match.get_collapse_key() or None
|
collapse_count = match.collapse_count or None collapse_key = match.collapse_key or None
|
def _parse_results(self): self._resultset_cache = []
|
value = u', '.join(
|
value = ', '.join(
|
def resolve(self, value): bits = self.path.split(".")
|
assert ((not isinstance(k, slice) and (k >= 0))
|
if not ((not isinstance(k, slice) and (k >= 0))
|
def __getitem__(self, k): if not isinstance(k, (slice, int, long)): raise TypeError assert ((not isinstance(k, slice) and (k >= 0)) or (isinstance(k, slice) and (k.start is None or k.start >= 0) and (k.stop is None or k.stop >= 0))), \ "Negative indexing is not supported."
|
and (k.stop is None or k.stop >= 0))), \ "Negative indexing is not supported."
|
and (k.stop is None or k.stop >= 0))): raise IndexError, "Negative indexing is not supported."
|
def __getitem__(self, k): if not isinstance(k, (slice, int, long)): raise TypeError assert ((not isinstance(k, slice) and (k >= 0)) or (isinstance(k, slice) and (k.start is None or k.start >= 0) and (k.stop is None or k.stop >= 0))), \ "Negative indexing is not supported."
|
if indices_tally == 2: indices_tally = 0
|
else: continue if indices_tally == 1: continue elif indices_tally == 2:
|
def read_pb_partitions(sampled_partitions_file, from_row=0, to_row=None, read_rates=True): site_indices_regex = r'^\d+\t\d+\t\d+\t\d+\t\d+\t\d+\t\d+\t\d+\t\d+\t\d+' site_indices_pattern = re.compile(site_indices_regex) posterior = PosteriorOfPartitions() line_iter = iter(sampled_partitions_file) indices_tally = 0 sample_tally = 0 for line_num, line in enumerate(line_iter): x = line.strip() indices_match = site_indices_pattern.search(x) if indices_match: indices_tally += 1 if indices_tally == 2: indices_tally = 0 sample_tally += 1 if from_row and from_row > sample_tally-1: _LOG.info("ignoring sample %d from '%s'\n" % (sample_tally-1, sampled_partitions_file.name)) continue if to_row and to_row < sample_tally-1: break _LOG.info("reading sample %d from '%s'\n" % (sample_tally-1, sampled_partitions_file.name)) next_line = line_iter.next().strip() if next_line == 'rates': number_of_subsets = int(line_iter.next().strip()) else: number_of_subsets = int(next_line) partition = Partition([], id=str(sample_tally)) rates = [] for i in xrange(number_of_subsets): r = line_iter.next().strip().split() assert len(r) == 2 if read_rates: rate = float(r[0]) else: rate = 1.0 subset = Subset(set(), rate = rate) rates.append(rate) partition.add_subset(subset) assignments = line_iter.next().strip().split() for index, el in enumerate(assignments): iel = int(el) partition.subsets[iel].add_index(index) if read_rates: assert partition.subsets[iel].rate == float(rates[iel]) assert partition.number_of_subsets == len(rates) == number_of_subsets posterior.add_partition(partition) return posterior
|
if hasattr(self, 'pack'): del self.pack if hasattr(self, 'part'): del self.part
|
def teardown_class(cls): if hasattr(self, 'pack'): del self.pack if hasattr(self, 'part'): del self.part cls.remove_testfile()
|
|
part_edit(args.path)
|
part_edit(args.path, args.reformat_xml)
|
def part_edit_cmd(): 'Edit a part from an OOXML Package without unzipping it' parser = argparse.ArgumentParser(description=inspect.getdoc(part_edit_cmd)) parser.add_argument('path', help='Path to part (including path to zip file, i.e. ./file.zipx/part)') args = parser.parse_args() part_edit(args.path)
|
def part_edit(path):
|
def part_edit(path, reformat_xml):
|
def part_edit(path): file, ipath = find_file(path) pkg = Package.from_file(file) part = pkg['/'+ipath] ef = EditableFile(part.dump()) ef.edit(ipath) if ef.changed: part.load(ef.data) pkg.save()
|
ef = EditableFile(part.dump())
|
data = part.dump() if reformat_xml: data = etree.tostring(etree.fromstring(data), pretty_print=True) ef = EditableFile(data)
|
def part_edit(path): file, ipath = find_file(path) pkg = Package.from_file(file) part = pkg['/'+ipath] ef = EditableFile(part.dump()) ef.edit(ipath) if ef.changed: part.load(ef.data) pkg.save()
|
base, rname = posixpath.split(item.name) base = base[1:]
|
base, rname = posixpath.split(to_zip_name(item.name))
|
def ropen(item): if isinstance(item, Relationships): return if isinstance(item, Part): base, rname = posixpath.split(item.name) base = base[1:] relname = posixpath.join(base, '_rels', '%s.rels' % rname) if relname in zf.namelist(): item._load_rels(zf.read(relname)) for rel in item.relationships: pname = posixpath.join(item.base, rel.target) if pname in self: # This item is already in self. continue data = "".join(self._get_matching_segments(rel.target)) # get a handler for the relationship type or use a default add_part = get_handler(rel.type, ZipPackage._load_part) add_part(self, pname, data) ropen(self[pname])
|
data = "".join(self._get_matching_segments(rel.target))
|
target_path = to_zip_name(pname) data = "".join(self._get_matching_segments(target_path))
|
def ropen(item): if isinstance(item, Relationships): return if isinstance(item, Part): base, rname = posixpath.split(item.name) base = base[1:] relname = posixpath.join(base, '_rels', '%s.rels' % rname) if relname in zf.namelist(): item._load_rels(zf.read(relname)) for rel in item.relationships: pname = posixpath.join(item.base, rel.target) if pname in self: # This item is already in self. continue data = "".join(self._get_matching_segments(rel.target)) # get a handler for the relationship type or use a default add_part = get_handler(rel.type, ZipPackage._load_part) add_part(self, pname, data) ropen(self[pname])
|
localtime = time.localtime(time.time())
|
now = time.localtime(time.time()) ZipInfoNow = functools.partial(ZipInfo, date_time = now)
|
def save(self, target=None): localtime = time.localtime(time.time()) zf = ZipFile(target or self.name, mode='w', compression=ZIP_DEFLATED) ct_info = ZipInfo('[Content_Types].xml', localtime) ct_info.create_system = SYSUNIX ct_info.flag_bits = 8 ct_info.external_attr = USER_READ_WRITE ct_info.compress_type = ZIP_DEFLATED zf.writestr(ct_info, self.content_types.dump()) rel_info = ZipInfo('_rels/.rels', localtime) rel_info.create_system = SYSUNIX rel_info.flag_bits = 8 rel_info.external_attr = USER_READ_WRITE rel_info.compress_type = ZIP_DEFLATED zf.writestr(rel_info, self.relationships.dump()) for name in self.parts: if name == '/_rels/.rels': continue part = self[name] content = part.dump() if not content: continue part_info = ZipInfo(name[1:], localtime) part_info.create_system = SYSUNIX part_info.flag_bits = 8 part_info.external_attr = USER_READ_WRITE part_info.compress_type = ZIP_DEFLATED zf.writestr(part_info, content)
|
ct_info = ZipInfo('[Content_Types].xml', localtime)
|
ct_info = ZipInfoNow('[Content_Types].xml')
|
def save(self, target=None): localtime = time.localtime(time.time()) zf = ZipFile(target or self.name, mode='w', compression=ZIP_DEFLATED) ct_info = ZipInfo('[Content_Types].xml', localtime) ct_info.create_system = SYSUNIX ct_info.flag_bits = 8 ct_info.external_attr = USER_READ_WRITE ct_info.compress_type = ZIP_DEFLATED zf.writestr(ct_info, self.content_types.dump()) rel_info = ZipInfo('_rels/.rels', localtime) rel_info.create_system = SYSUNIX rel_info.flag_bits = 8 rel_info.external_attr = USER_READ_WRITE rel_info.compress_type = ZIP_DEFLATED zf.writestr(rel_info, self.relationships.dump()) for name in self.parts: if name == '/_rels/.rels': continue part = self[name] content = part.dump() if not content: continue part_info = ZipInfo(name[1:], localtime) part_info.create_system = SYSUNIX part_info.flag_bits = 8 part_info.external_attr = USER_READ_WRITE part_info.compress_type = ZIP_DEFLATED zf.writestr(part_info, content)
|
rel_info = ZipInfo('_rels/.rels', localtime)
|
rel_info = ZipInfoNow('_rels/.rels')
|
def save(self, target=None): localtime = time.localtime(time.time()) zf = ZipFile(target or self.name, mode='w', compression=ZIP_DEFLATED) ct_info = ZipInfo('[Content_Types].xml', localtime) ct_info.create_system = SYSUNIX ct_info.flag_bits = 8 ct_info.external_attr = USER_READ_WRITE ct_info.compress_type = ZIP_DEFLATED zf.writestr(ct_info, self.content_types.dump()) rel_info = ZipInfo('_rels/.rels', localtime) rel_info.create_system = SYSUNIX rel_info.flag_bits = 8 rel_info.external_attr = USER_READ_WRITE rel_info.compress_type = ZIP_DEFLATED zf.writestr(rel_info, self.relationships.dump()) for name in self.parts: if name == '/_rels/.rels': continue part = self[name] content = part.dump() if not content: continue part_info = ZipInfo(name[1:], localtime) part_info.create_system = SYSUNIX part_info.flag_bits = 8 part_info.external_attr = USER_READ_WRITE part_info.compress_type = ZIP_DEFLATED zf.writestr(part_info, content)
|
part_info = ZipInfo(name[1:], localtime)
|
part_info = ZipInfoNow(to_zip_name(name))
|
def save(self, target=None): localtime = time.localtime(time.time()) zf = ZipFile(target or self.name, mode='w', compression=ZIP_DEFLATED) ct_info = ZipInfo('[Content_Types].xml', localtime) ct_info.create_system = SYSUNIX ct_info.flag_bits = 8 ct_info.external_attr = USER_READ_WRITE ct_info.compress_type = ZIP_DEFLATED zf.writestr(ct_info, self.content_types.dump()) rel_info = ZipInfo('_rels/.rels', localtime) rel_info.create_system = SYSUNIX rel_info.flag_bits = 8 rel_info.external_attr = USER_READ_WRITE rel_info.compress_type = ZIP_DEFLATED zf.writestr(rel_info, self.relationships.dump()) for name in self.parts: if name == '/_rels/.rels': continue part = self[name] content = part.dump() if not content: continue part_info = ZipInfo(name[1:], localtime) part_info.create_system = SYSUNIX part_info.flag_bits = 8 part_info.external_attr = USER_READ_WRITE part_info.compress_type = ZIP_DEFLATED zf.writestr(part_info, content)
|
self.core_properties = None
|
def __init__(self): self.parts = {} self.base = '/' self.relationships = rels = Relationships(self, self) self[rels.name] = rels self.content_types = ContentTypes() self.content_types.add(ContentType.Default(rels.content_type, 'rels')) self.core_properties = None
|
|
assert ct.extension not in self.defaults
|
if ct.extension in self.defaults: assert self.defaults[ct.extension] == ct
|
def _validate_default(self, ct): assert ct.extension not in self.defaults return ct
|
if elem and elem.text:
|
if elem is not None and elem.text:
|
def set_attr_if_tag(tag, attr=None, transform=identity): if attr is None: ns, attr = parse_tag(tag) elem = xml.find(tag) if elem and elem.text: value = transform(elem.text) setattr(self, attr, value)
|
J.Mx = [49, 193, 769, 3073, 12289]
|
J.Mx = [49, 98, 196, 392, 784]
|
def define_refinement_paths(KSPRTOL, SSARTOL): # Define all the supported refinement paths: tests = {} # A A = PISMVerificationTest() A.name = "A" A.test = "isothermal SIA with a marine margin" A.path = "(refine dx=53.33,40,26.67,20,13.33,km, dx=dy and Mx=My=31,41,61,81,121)" A.Mx = [31, 41, 61, 81, 121] A.My = A.Mx A.opts = "-y 25000.0" tests['A'] = A # B B = PISMVerificationTest() B.name = "B" B.test = "isothermal SIA with a moving margin" B.path = "(refine dx=80,60,40,30,20,km, dx=dy and Mx=My=31,41,61,81,121)" B.Mx = [31, 41, 61, 81, 121] B.My = B.Mx B.opts = "-ys 422.45 -y 25000.0" tests['B'] = B # C C = PISMVerificationTest() C.name = "C" C.test = "isothermal SIA w moving margin" C.path = "(refine dx=50,33.33,25,20,16,km, dx=dy and Mx=My=41,61,81,101,121)" C.Mx = [41, 61, 81, 101, 121] C.My = C.Mx C.opts = "-y 15208.0" tests['C'] = C # D D = PISMVerificationTest() D.name = "D" D.test = "isothermal SIA with variable accumulation" D.path = "(refine dx=50,33.33,25,20,16.67,km, dx=dy and Mx=My=41,61,81,101,121)" D.Mx = [41, 61, 81, 101, 121] D.My = D.Mx D.opts = "-y 25000.0" tests['D'] = D # E E = PISMVerificationTest() E.name = "E" E.test = "isothermal SIA with sliding" E.path = "(refine dx=53.33,40,26.67,20,13.33,km, dx=dy and Mx=My=31,41,61,81,121)" E.Mx = [31, 41, 61, 81, 121] E.My = E.Mx E.opts = "-y 25000.0" tests['E'] = E # F F = PISMVerificationTest() F.name = "F" F.test = "thermocoupled SIA" F.path = "(refine dx=30,20,15,10,7.5,km, dx=dy, dz=66.67,44.44,33.33,22.22,16.67 m\n and Mx=My=Mz=61,91,121,181,241)" F.Mx = [61, 91, 121, 181, 241] F.My = F.Mx F.Mz = F.Mx F.opts = "-y 25000.0" tests['F'] = F # G G = PISMVerificationTest() G.name = "G" G.test = "thermocoupled SIA with variable accumulation" G.path = "(refine dx=30,20,15,10,7.5,km, dx=dy, dz=66.67,44.44,33.33,22.22,16.67 m\n and Mx=My=Mz=61,91,121,181,241)" G.Mx = [61, 91, 121, 181, 241] G.My = G.Mx G.Mz = G.Mx G.opts = "-y 25000.0" tests['G'] = G # H H = PISMVerificationTest() H.name = "H" H.test = "isothermal SIA with a moving margin and isostatic bed deformation" H.path = "(refine dx=80,60,40,30,20,km, dx=dy and Mx=My=31,41,61,81,121)" H.Mx = [31, 41, 61, 81, 121] H.My = H.Mx H.opts = "-bed_def_iso -y 60000.0" tests['H'] = H # I I = PISMVerificationTest() I.name = "I" I.test = "plastic till ice stream" I.path = "(refine dy=5000,1250,312.5,78.13,19.53,m, My=49,193,769,3073,12289)" I.Mx = [5] * 5 I.My = [49, 193, 769, 3073, 12289] I.opts = "-ssa_rtol %1.e -ksp_rtol %1.e" % (SSARTOL, KSPRTOL) tests['I'] = I # J J = PISMVerificationTest() J.name = "J" J.test = "linearized periodic ice shelf" J.path = "(refine dy=5000,1250,312.5,78.13,19.53,m, Mx=49,193,769,3073,12289)" J.Mx = [49, 193, 769, 3073, 12289] J.My = J.Mx J.Mz = [11] * 5 J.opts = "-pc_type asm -sub_pc_type lu -ksp_rtol %1.e" % KSPRTOL tests['J'] = J # K K = PISMVerificationTest() K.name = "K" K.test = "pure conduction problem in ice and bedrock" K.path = "(refine dz=100,50,25,12.5,6.25,m, Mz=41,81,161,321,641)" K.Mx = [4] * 5 K.My = K.Mx K.Mz = array([41, 81, 161, 321, 641]) K.Mbz = (K.Mz - 1) / 4 + 1 K.opts = "-y 130000.0 -Lbz 1000" tests['K'] = K # L L = PISMVerificationTest() L.name = "L" L.test = "isothermal SIA with a non-flat bed" L.path = "(refine dx=60,30,20,15,10,km, dx=dy and Mx=My=31,61,91,121,181)" L.Mx = [31, 61, 91, 121, 181] L.My = L.Mx L.opts = "-y 25000.0" tests['L'] = L # M M = PISMVerificationTest() M.name = "M" M.test = "annular ice shelf with a calving front" M.path = "(refine dx=50,25,16.666,12.5,8.333 km; dx=dy and My=31,61,91,121,181)" M.Mx = [31, 61, 91, 121, 181] M.My = M.Mx M.Mz = [11] * 5 M.opts = "-ssa_rtol %1.e -ksp_rtol %1.e" % (SSARTOL, KSPRTOL) tests['M'] = M # test K (for a figure in the User's Manual) K = PISMVerificationTest() K.name = "K" K.test = "pure conduction problem in ice and bedrock" K.path = "(lots of levels)" K.Mz = array([21, 41, 61, 81, 101, 121, 141, 161, 181, 201, 221, 241, 261, 281, 301, 321]) K.Mbz = (K.Mz - 1) / 4 + 1 K.Mx = [4] * len(K.Mz) K.My = K.Mx tests['K_userman'] = K # test B (for a figure in the User's Manual) B = PISMVerificationTest() B.name = "B" B.test = "isothermal SIA with a moving margin" B.path = "(lots of levels)" B.Mx = [31, 41, 51, 61, 71, 81, 91, 101, 111, 121] B.My = B.Mx B.Mz = [31] * len(B.Mx) B.Mbz = [1] * len(B.Mx) B.opts = "-ys 422.45 -y 25000.0" tests['B_userman'] = B # test G (for a figure in the User's Manual) G = PISMVerificationTest() G.name = "G" G.test = "thermocoupled SIA with variable accumulation" G.path = "(lots of levels)" G.Mx = [61, 71, 81, 91, 101, 111, 121, 151, 181] G.My = G.Mx G.Mz = G.Mx tests['G_userman'] = G # test I (for a figure in the User's Manual) I = PISMVerificationTest() I.name = "I" I.test = "plastic till ice stream" I.path = "(lots of levels)" I.My = [51, 101, 151, 201, 401, 601, 801, 1001, 1501, 2001, 2501, 3073] I.Mx = [5] * len(I.My) tests['I_userman'] = I return tests
|
dt = np.diff(t[:],axis=0)
|
dt = np.diff(t[::stride],axis=0)
|
def getRateOfChange(t,X,p,varname): # differenetiate time along time axis dt = np.diff(t[:],axis=0) Xdim = X.ndim Xunits = X.units if Xdim == 1: X = X[:] dXp = np.diff(X) dXpdt = dX/dt elif Xdim == 3: if 'mask' in nc.variables.keys(): mask = np.array(nc.variables['mask'][:]) # (t,y,x) k = np.nonzero((mask==1) ^ (mask==2) ^ (mask==3)) mask2 = np.ones_like(mask) mask2[k] = 0 # use masked values (i.e. ignore ocean and ice-free land) X = np.ma.array(data=X[:],mask=mask2) else: X = np.array(X[:]) dX = np.diff(X,axis=0) nt,ny,nx = dX.shape dX = dX.reshape(nt,nx*ny) # convert (t,y,x) -> (t,y*x) so that np.nansum needs to be applied only once dXp = (np.nansum(dX**p,axis=1))**(1./p) dXpdt = dXp/dt else: print('error: dim n = %i of variable %s not supported, must be 1 or 3' % (Xdim, varname)) return dXpdt
|
X = X[:]
|
X = X[::stride]
|
def getRateOfChange(t,X,p,varname): # differenetiate time along time axis dt = np.diff(t[:],axis=0) Xdim = X.ndim Xunits = X.units if Xdim == 1: X = X[:] dXp = np.diff(X) dXpdt = dX/dt elif Xdim == 3: if 'mask' in nc.variables.keys(): mask = np.array(nc.variables['mask'][:]) # (t,y,x) k = np.nonzero((mask==1) ^ (mask==2) ^ (mask==3)) mask2 = np.ones_like(mask) mask2[k] = 0 # use masked values (i.e. ignore ocean and ice-free land) X = np.ma.array(data=X[:],mask=mask2) else: X = np.array(X[:]) dX = np.diff(X,axis=0) nt,ny,nx = dX.shape dX = dX.reshape(nt,nx*ny) # convert (t,y,x) -> (t,y*x) so that np.nansum needs to be applied only once dXp = (np.nansum(dX**p,axis=1))**(1./p) dXpdt = dXp/dt else: print('error: dim n = %i of variable %s not supported, must be 1 or 3' % (Xdim, varname)) return dXpdt
|
dXpdt = dX/dt
|
dXpdt = dXp/dt
|
def getRateOfChange(t,X,p,varname): # differenetiate time along time axis dt = np.diff(t[:],axis=0) Xdim = X.ndim Xunits = X.units if Xdim == 1: X = X[:] dXp = np.diff(X) dXpdt = dX/dt elif Xdim == 3: if 'mask' in nc.variables.keys(): mask = np.array(nc.variables['mask'][:]) # (t,y,x) k = np.nonzero((mask==1) ^ (mask==2) ^ (mask==3)) mask2 = np.ones_like(mask) mask2[k] = 0 # use masked values (i.e. ignore ocean and ice-free land) X = np.ma.array(data=X[:],mask=mask2) else: X = np.array(X[:]) dX = np.diff(X,axis=0) nt,ny,nx = dX.shape dX = dX.reshape(nt,nx*ny) # convert (t,y,x) -> (t,y*x) so that np.nansum needs to be applied only once dXp = (np.nansum(dX**p,axis=1))**(1./p) dXpdt = dXp/dt else: print('error: dim n = %i of variable %s not supported, must be 1 or 3' % (Xdim, varname)) return dXpdt
|
mask = np.array(nc.variables['mask'][:])
|
mask = np.array(nc.variables['mask'][::stride,:,:])
|
def getRateOfChange(t,X,p,varname): # differenetiate time along time axis dt = np.diff(t[:],axis=0) Xdim = X.ndim Xunits = X.units if Xdim == 1: X = X[:] dXp = np.diff(X) dXpdt = dX/dt elif Xdim == 3: if 'mask' in nc.variables.keys(): mask = np.array(nc.variables['mask'][:]) # (t,y,x) k = np.nonzero((mask==1) ^ (mask==2) ^ (mask==3)) mask2 = np.ones_like(mask) mask2[k] = 0 # use masked values (i.e. ignore ocean and ice-free land) X = np.ma.array(data=X[:],mask=mask2) else: X = np.array(X[:]) dX = np.diff(X,axis=0) nt,ny,nx = dX.shape dX = dX.reshape(nt,nx*ny) # convert (t,y,x) -> (t,y*x) so that np.nansum needs to be applied only once dXp = (np.nansum(dX**p,axis=1))**(1./p) dXpdt = dXp/dt else: print('error: dim n = %i of variable %s not supported, must be 1 or 3' % (Xdim, varname)) return dXpdt
|
X = np.ma.array(data=X[:],mask=mask2)
|
X = np.ma.array(data=X[::stride,:,:],mask=mask2)
|
def getRateOfChange(t,X,p,varname): # differenetiate time along time axis dt = np.diff(t[:],axis=0) Xdim = X.ndim Xunits = X.units if Xdim == 1: X = X[:] dXp = np.diff(X) dXpdt = dX/dt elif Xdim == 3: if 'mask' in nc.variables.keys(): mask = np.array(nc.variables['mask'][:]) # (t,y,x) k = np.nonzero((mask==1) ^ (mask==2) ^ (mask==3)) mask2 = np.ones_like(mask) mask2[k] = 0 # use masked values (i.e. ignore ocean and ice-free land) X = np.ma.array(data=X[:],mask=mask2) else: X = np.array(X[:]) dX = np.diff(X,axis=0) nt,ny,nx = dX.shape dX = dX.reshape(nt,nx*ny) # convert (t,y,x) -> (t,y*x) so that np.nansum needs to be applied only once dXp = (np.nansum(dX**p,axis=1))**(1./p) dXpdt = dXp/dt else: print('error: dim n = %i of variable %s not supported, must be 1 or 3' % (Xdim, varname)) return dXpdt
|
X = np.array(X[:])
|
X = np.array(X[::stride,:,:])
|
def getRateOfChange(t,X,p,varname): # differenetiate time along time axis dt = np.diff(t[:],axis=0) Xdim = X.ndim Xunits = X.units if Xdim == 1: X = X[:] dXp = np.diff(X) dXpdt = dX/dt elif Xdim == 3: if 'mask' in nc.variables.keys(): mask = np.array(nc.variables['mask'][:]) # (t,y,x) k = np.nonzero((mask==1) ^ (mask==2) ^ (mask==3)) mask2 = np.ones_like(mask) mask2[k] = 0 # use masked values (i.e. ignore ocean and ice-free land) X = np.ma.array(data=X[:],mask=mask2) else: X = np.array(X[:]) dX = np.diff(X,axis=0) nt,ny,nx = dX.shape dX = dX.reshape(nt,nx*ny) # convert (t,y,x) -> (t,y*x) so that np.nansum needs to be applied only once dXp = (np.nansum(dX**p,axis=1))**(1./p) dXpdt = dXp/dt else: print('error: dim n = %i of variable %s not supported, must be 1 or 3' % (Xdim, varname)) return dXpdt
|
plt.semilogy(t[1:],dVpdt, 'b', lw = 2)
|
plt.semilogy(t[1::stride],dVpdt, 'b', lw = 2)
|
def getRateOfChange(t,X,p,varname): # differenetiate time along time axis dt = np.diff(t[:],axis=0) Xdim = X.ndim Xunits = X.units if Xdim == 1: X = X[:] dXp = np.diff(X) dXpdt = dX/dt elif Xdim == 3: if 'mask' in nc.variables.keys(): mask = np.array(nc.variables['mask'][:]) # (t,y,x) k = np.nonzero((mask==1) ^ (mask==2) ^ (mask==3)) mask2 = np.ones_like(mask) mask2[k] = 0 # use masked values (i.e. ignore ocean and ice-free land) X = np.ma.array(data=X[:],mask=mask2) else: X = np.array(X[:]) dX = np.diff(X,axis=0) nt,ny,nx = dX.shape dX = dX.reshape(nt,nx*ny) # convert (t,y,x) -> (t,y*x) so that np.nansum needs to be applied only once dXp = (np.nansum(dX**p,axis=1))**(1./p) dXpdt = dXp/dt else: print('error: dim n = %i of variable %s not supported, must be 1 or 3' % (Xdim, varname)) return dXpdt
|
pism_var = squeeze(nc_pism.variables[dovars[0]]) ref_var = squeeze(nc_reference.variables[dovars[1]])
|
pism_var = squeeze(nc_pism.variables[dovars[0]][:]) ref_var = squeeze(nc_reference.variables[dovars[1]][:])
|
def usagefailure(message): print message print print usage exit(2)
|
thk_var = squeeze(nc_thk.variables["thk"])
|
thk_var = squeeze(nc_thk.variables["thk"][:])
|
def usagefailure(message): print message print print usage exit(2)
|
print "Files are the same within given tolerance."
|
print "Files are the same within tolerance %.1e" % tol
|
def success(): print "Files are the same within given tolerance." exit(0)
|
failure()
|
usagefailure("ERROR: VARIABLE '%s' NOT FOUND IN FILE 2" % name)
|
def compare_vars(nc1, nc2, name, tol): from numpy import squeeze # Find variables: try: var1 = squeeze(nc1.variables[name][:]) var2 = squeeze(nc2.variables[name][:]) except: # This can happen if one of the files does not have the variable. failure() try: delta = abs(var1 - var2).max() except: # This can happen if variables have different shapes. failure() # The actual check: if (delta > tol): print "name = %s, delta = %e, tol = %e" % (name, delta, tol) failure()
|
failure()
|
usagefailure("ERROR: VARIABLE '%s' OF INCOMPATIBLE SHAPES (?) IN FILES" % name)
|
def compare_vars(nc1, nc2, name, tol): from numpy import squeeze # Find variables: try: var1 = squeeze(nc1.variables[name][:]) var2 = squeeze(nc2.variables[name][:]) except: # This can happen if one of the files does not have the variable. failure() try: delta = abs(var1 - var2).max() except: # This can happen if variables have different shapes. failure() # The actual check: if (delta > tol): print "name = %s, delta = %e, tol = %e" % (name, delta, tol) failure()
|
failure()
|
usagefailure("ERROR: FILE '%s' CANNOT BE OPENED FOR READING" % file2)
|
def compare(file1, file2, variables, exclude, tol): try: from netCDF4 import Dataset as NC except: from netCDF3 import Dataset as NC from numpy import unique, r_ try: nc1 = NC(file1, 'r') nc2 = NC(file2, 'r') except: # This can happen if one of the files could not be opened. failure() if (exclude == False): if len(variables) == 0: vars1 = nc1.variables.keys() vars2 = nc2.variables.keys() variables = unique(r_[vars1, vars2]) for each in variables: compare_vars(nc1, nc2, each, tol) else: vars1 = nc1.variables.keys() vars2 = nc2.variables.keys() vars = unique(r_[vars1, vars2]) for each in vars: if (each in variables): continue compare_vars(nc1, nc2, each, tol)
|
opts, args = getopt(argv[1:], "t:v:x") tol = 0
|
try: opts, args = getopt(argv[1:], "t:v:x", ["help","usage"]) except GetoptError: usagefailure('ERROR: INCORRECT COMMAND LINE ARGUMENTS FOR nccmp.py')
|
def compare(file1, file2, variables, exclude, tol): try: from netCDF4 import Dataset as NC except: from netCDF3 import Dataset as NC from numpy import unique, r_ try: nc1 = NC(file1, 'r') nc2 = NC(file2, 'r') except: # This can happen if one of the files could not be opened. failure() if (exclude == False): if len(variables) == 0: vars1 = nc1.variables.keys() vars2 = nc2.variables.keys() variables = unique(r_[vars1, vars2]) for each in variables: compare_vars(nc1, nc2, each, tol) else: vars1 = nc1.variables.keys() vars2 = nc2.variables.keys() vars = unique(r_[vars1, vars2]) for each in vars: if (each in variables): continue compare_vars(nc1, nc2, each, tol)
|
failure()
|
usagefailure('ERROR: WRONG NUMBER OF ARGUMENTS FOR nccmp.py')
|
def compare(file1, file2, variables, exclude, tol): try: from netCDF4 import Dataset as NC except: from netCDF3 import Dataset as NC from numpy import unique, r_ try: nc1 = NC(file1, 'r') nc2 = NC(file2, 'r') except: # This can happen if one of the files could not be opened. failure() if (exclude == False): if len(variables) == 0: vars1 = nc1.variables.keys() vars2 = nc2.variables.keys() variables = unique(r_[vars1, vars2]) for each in variables: compare_vars(nc1, nc2, each, tol) else: vars1 = nc1.variables.keys() vars2 = nc2.variables.keys() vars = unique(r_[vars1, vars2]) for each in vars: if (each in variables): continue compare_vars(nc1, nc2, each, tol)
|
nc.history += historystr
|
nc.history = historystr + nc.history
|
def laplace(data, mask, eps1, eps2, initial_guess='mean', max_iter=10000): """laplace solves the Laplace equation using the SOR method with Chebyshev acceleration as described in 'Numerical Recipes in Fortran: the art of scientific computing' by William H. Press et al -- 2nd edition, section 19.5. data is a 2-d array (computation grid) mask is a boolean array; setting mask to 'data == 0', for example, results in only modifying points where 'data' is zero, all the other points are left as is. Intended use: if in an array the value of -9999.0 signifies a missing value, then setting mask to 'data == -9999.0' fills in all the missing values. eps1 is the first stopping criterion: the iterations stop if the norm of residual becomes less than eps1*initial_norm, where 'initial_norm' is the initial norm of residual. Setting eps1 to zero or a negative number disables this stopping criterion. eps2 is the second stopping criterion: the iterations stop if the absolute value of the maximal change in value between successive iterations is less than eps2. Setting eps2 to zero or a negative number disables this stopping criterion. initial_guess is the initial guess used for all the values in the domain; the default is 'mean', i.e. use the mean of all the present values as the initial guess for missing values. initial_guess has to be 'mean' or a number. max_iter is the maximum number of iterations allowed. The default is 10000. """ dimensions = data.shape rjac = rho_jacobi(dimensions) i, j = indices(dimensions) # This splits the grid into 'odd' and 'even' parts, according to the # checkerboard pattern: odd = (i % 2 == 1) ^ (j % 2 == 0) even = (i % 2 == 0) ^ (j % 2 == 0) # odd and even parts _in_ the domain: odd_part = zip(i[mask & odd], j[mask & odd]) even_part = zip(i[mask & even], j[mask & even]) # relative indices of the stencil points: k = array([0, 1, 0, -1]) l = array([-1, 0, 1, 0]) parts = [odd_part, even_part] if type(initial_guess) == type('string'): if initial_guess == 'mean': present = array(ones_like(mask) - mask, dtype=bool) initial_guess = mean(data[present]) else: print """ERROR: initial_guess of '%s' is not supported (it should be a number or 'mean').
|
from shutil import copyfile, move
|
from shutil import copy, move
|
def laplace(data, mask, eps1, eps2, initial_guess='mean', max_iter=10000): """laplace solves the Laplace equation using the SOR method with Chebyshev acceleration as described in 'Numerical Recipes in Fortran: the art of scientific computing' by William H. Press et al -- 2nd edition, section 19.5. data is a 2-d array (computation grid) mask is a boolean array; setting mask to 'data == 0', for example, results in only modifying points where 'data' is zero, all the other points are left as is. Intended use: if in an array the value of -9999.0 signifies a missing value, then setting mask to 'data == -9999.0' fills in all the missing values. eps1 is the first stopping criterion: the iterations stop if the norm of residual becomes less than eps1*initial_norm, where 'initial_norm' is the initial norm of residual. Setting eps1 to zero or a negative number disables this stopping criterion. eps2 is the second stopping criterion: the iterations stop if the absolute value of the maximal change in value between successive iterations is less than eps2. Setting eps2 to zero or a negative number disables this stopping criterion. initial_guess is the initial guess used for all the values in the domain; the default is 'mean', i.e. use the mean of all the present values as the initial guess for missing values. initial_guess has to be 'mean' or a number. max_iter is the maximum number of iterations allowed. The default is 10000. """ dimensions = data.shape rjac = rho_jacobi(dimensions) i, j = indices(dimensions) # This splits the grid into 'odd' and 'even' parts, according to the # checkerboard pattern: odd = (i % 2 == 1) ^ (j % 2 == 0) even = (i % 2 == 0) ^ (j % 2 == 0) # odd and even parts _in_ the domain: odd_part = zip(i[mask & odd], j[mask & odd]) even_part = zip(i[mask & even], j[mask & even]) # relative indices of the stencil points: k = array([0, 1, 0, -1]) l = array([-1, 0, 1, 0]) parts = [odd_part, even_part] if type(initial_guess) == type('string'): if initial_guess == 'mean': present = array(ones_like(mask) - mask, dtype=bool) initial_guess = mean(data[present]) else: print """ERROR: initial_guess of '%s' is not supported (it should be a number or 'mean').
|
copyfile(input_filename, tmp_filename)
|
copy(input_filename, tmp_filename)
|
def laplace(data, mask, eps1, eps2, initial_guess='mean', max_iter=10000): """laplace solves the Laplace equation using the SOR method with Chebyshev acceleration as described in 'Numerical Recipes in Fortran: the art of scientific computing' by William H. Press et al -- 2nd edition, section 19.5. data is a 2-d array (computation grid) mask is a boolean array; setting mask to 'data == 0', for example, results in only modifying points where 'data' is zero, all the other points are left as is. Intended use: if in an array the value of -9999.0 signifies a missing value, then setting mask to 'data == -9999.0' fills in all the missing values. eps1 is the first stopping criterion: the iterations stop if the norm of residual becomes less than eps1*initial_norm, where 'initial_norm' is the initial norm of residual. Setting eps1 to zero or a negative number disables this stopping criterion. eps2 is the second stopping criterion: the iterations stop if the absolute value of the maximal change in value between successive iterations is less than eps2. Setting eps2 to zero or a negative number disables this stopping criterion. initial_guess is the initial guess used for all the values in the domain; the default is 'mean', i.e. use the mean of all the present values as the initial guess for missing values. initial_guess has to be 'mean' or a number. max_iter is the maximum number of iterations allowed. The default is 10000. """ dimensions = data.shape rjac = rho_jacobi(dimensions) i, j = indices(dimensions) # This splits the grid into 'odd' and 'even' parts, according to the # checkerboard pattern: odd = (i % 2 == 1) ^ (j % 2 == 0) even = (i % 2 == 0) ^ (j % 2 == 0) # odd and even parts _in_ the domain: odd_part = zip(i[mask & odd], j[mask & odd]) even_part = zip(i[mask & even], j[mask & even]) # relative indices of the stencil points: k = array([0, 1, 0, -1]) l = array([-1, 0, 1, 0]) parts = [odd_part, even_part] if type(initial_guess) == type('string'): if initial_guess == 'mean': present = array(ones_like(mask) - mask, dtype=bool) initial_guess = mean(data[present]) else: print """ERROR: initial_guess of '%s' is not supported (it should be a number or 'mean').
|
nccmp.py -x C foo.nc bar.nc compare all variables except C
|
nccmp.py -x -v C foo.nc bar.nc compare all variables except C
|
def failure(): print "Files are different." exit(1)
|
from numpy import squeeze
|
from numpy import squeeze, isnan, ma
|
def compare_vars(nc1, nc2, name, tol): from numpy import squeeze try: var1 = squeeze(nc1.variables[name][:]) except: usagefailure("ERROR: VARIABLE '%s' NOT FOUND IN FILE 1" % name) try: var2 = squeeze(nc2.variables[name][:]) except: usagefailure("ERROR: VARIABLE '%s' NOT FOUND IN FILE 2" % name) try: delta = abs(var1 - var2).max() except: usagefailure("ERROR: VARIABLE '%s' OF INCOMPATIBLE SHAPES (?) IN FILES" % name) # The actual check: if (delta > tol): print "name = %s, delta = %e, tol = %e" % (name, delta, tol) failure()
|
delta = abs(var1 - var2).max()
|
mask = isnan(var1) | isnan(var2)
|
def compare_vars(nc1, nc2, name, tol): from numpy import squeeze try: var1 = squeeze(nc1.variables[name][:]) except: usagefailure("ERROR: VARIABLE '%s' NOT FOUND IN FILE 1" % name) try: var2 = squeeze(nc2.variables[name][:]) except: usagefailure("ERROR: VARIABLE '%s' NOT FOUND IN FILE 2" % name) try: delta = abs(var1 - var2).max() except: usagefailure("ERROR: VARIABLE '%s' OF INCOMPATIBLE SHAPES (?) IN FILES" % name) # The actual check: if (delta > tol): print "name = %s, delta = %e, tol = %e" % (name, delta, tol) failure()
|
print delta, denom
|
print "name = %s, delta = %e, denom = %e" % (name, delta, denom)
|
def compare_vars(nc1, nc2, name, tol, relative=False): from numpy import squeeze, isnan, ma try: var1 = squeeze(nc1.variables[name][:]) except: usagefailure("ERROR: VARIABLE '%s' NOT FOUND IN FILE 1" % name) try: var2 = squeeze(nc2.variables[name][:]) except: usagefailure("ERROR: VARIABLE '%s' NOT FOUND IN FILE 2" % name) try: mask = isnan(var1) | isnan(var2) except: usagefailure("ERROR: VARIABLE '%s' OF INCOMPATIBLE SHAPES (?) IN FILES" % name) if mask.all(): print 'Variable %10s: no values to compare.' % name return var1 = ma.array(var1, mask = mask) var2 = ma.array(var2, mask = mask) delta = abs(var1 - var2).max() if relative: denom = max(abs(var1).max(), abs(var2).max()) print delta, denom if denom > 0: delta = delta / denom # The actual check: if (delta > tol): print "name = %s, delta = %e, tol = %e" % (name, delta, tol) failure()
|
var[thk<thktol] = -9999.0
|
if 't' in dims: for j in range(var.shape[0]): tmp = var[j] tmp[thk<thktol] = fill_value var[j] = tmp else: var[thk<thktol] = fill_value
|
def maskout(nc, thk, name, thktol=1.0): from numpy import squeeze, isnan, ma try: var = squeeze(nc.variables[name][:]) except: usagefailure("ERROR: VARIABLE '%s' NOT FOUND IN FILE" % name) var[thk<thktol] = -9999.0 return var
|
def success(): print "Files are the same within tolerance %.1e" % tol
|
def success(relative): if relative: print "Files are the same within relative tolerance %.1e" % tol else: print "Files are the same within tolerance %.1e" % tol
|
def success(): print "Files are the same within tolerance %.1e" % tol exit(0)
|
def compare_vars(nc1, nc2, name, tol):
|
def compare_vars(nc1, nc2, name, tol, relative=False):
|
def compare_vars(nc1, nc2, name, tol): from numpy import squeeze, isnan, ma try: var1 = squeeze(nc1.variables[name][:]) except: usagefailure("ERROR: VARIABLE '%s' NOT FOUND IN FILE 1" % name) try: var2 = squeeze(nc2.variables[name][:]) except: usagefailure("ERROR: VARIABLE '%s' NOT FOUND IN FILE 2" % name) try: mask = isnan(var1) | isnan(var2) except: usagefailure("ERROR: VARIABLE '%s' OF INCOMPATIBLE SHAPES (?) IN FILES" % name) if mask.all(): print 'Variable %10s: no values to compare.' % name return var1 = ma.array(var1, mask = mask) var2 = ma.array(var2, mask = mask) delta = abs(var1 - var2).max() # The actual check: if (delta > tol): print "name = %s, delta = %e, tol = %e" % (name, delta, tol) failure()
|
def compare(file1, file2, variables, exclude, tol):
|
def compare(file1, file2, variables, exclude, tol, relative):
|
def compare(file1, file2, variables, exclude, tol): try: from netCDF4 import Dataset as NC except: from netCDF3 import Dataset as NC from numpy import unique, r_ try: nc1 = NC(file1, 'r') except: usagefailure("ERROR: FILE '%s' CANNOT BE OPENED FOR READING" % file1) try: nc2 = NC(file2, 'r') except: usagefailure("ERROR: FILE '%s' CANNOT BE OPENED FOR READING" % file2) if (exclude == False): if len(variables) == 0: vars1 = nc1.variables.keys() vars2 = nc2.variables.keys() variables = unique(r_[vars1, vars2]) for each in variables: compare_vars(nc1, nc2, each, tol) else: vars1 = nc1.variables.keys() vars2 = nc2.variables.keys() vars = unique(r_[vars1, vars2]) for each in vars: if (each in variables): continue compare_vars(nc1, nc2, each, tol)
|
compare_vars(nc1, nc2, each, tol)
|
compare_vars(nc1, nc2, each, tol, relative)
|
def compare(file1, file2, variables, exclude, tol): try: from netCDF4 import Dataset as NC except: from netCDF3 import Dataset as NC from numpy import unique, r_ try: nc1 = NC(file1, 'r') except: usagefailure("ERROR: FILE '%s' CANNOT BE OPENED FOR READING" % file1) try: nc2 = NC(file2, 'r') except: usagefailure("ERROR: FILE '%s' CANNOT BE OPENED FOR READING" % file2) if (exclude == False): if len(variables) == 0: vars1 = nc1.variables.keys() vars2 = nc2.variables.keys() variables = unique(r_[vars1, vars2]) for each in variables: compare_vars(nc1, nc2, each, tol) else: vars1 = nc1.variables.keys() vars2 = nc2.variables.keys() vars = unique(r_[vars1, vars2]) for each in vars: if (each in variables): continue compare_vars(nc1, nc2, each, tol)
|
opts, args = getopt(argv[1:], "t:v:x", ["help","usage"])
|
opts, args = getopt(argv[1:], "t:v:xr", ["help","usage"])
|
def compare(file1, file2, variables, exclude, tol): try: from netCDF4 import Dataset as NC except: from netCDF3 import Dataset as NC from numpy import unique, r_ try: nc1 = NC(file1, 'r') except: usagefailure("ERROR: FILE '%s' CANNOT BE OPENED FOR READING" % file1) try: nc2 = NC(file2, 'r') except: usagefailure("ERROR: FILE '%s' CANNOT BE OPENED FOR READING" % file2) if (exclude == False): if len(variables) == 0: vars1 = nc1.variables.keys() vars2 = nc2.variables.keys() variables = unique(r_[vars1, vars2]) for each in variables: compare_vars(nc1, nc2, each, tol) else: vars1 = nc1.variables.keys() vars2 = nc2.variables.keys() vars = unique(r_[vars1, vars2]) for each in vars: if (each in variables): continue compare_vars(nc1, nc2, each, tol)
|
compare(args[0],args[1], variables, exclude, tol)
|
compare(args[0],args[1], variables, exclude, tol, relative)
|
def compare(file1, file2, variables, exclude, tol): try: from netCDF4 import Dataset as NC except: from netCDF3 import Dataset as NC from numpy import unique, r_ try: nc1 = NC(file1, 'r') except: usagefailure("ERROR: FILE '%s' CANNOT BE OPENED FOR READING" % file1) try: nc2 = NC(file2, 'r') except: usagefailure("ERROR: FILE '%s' CANNOT BE OPENED FOR READING" % file2) if (exclude == False): if len(variables) == 0: vars1 = nc1.variables.keys() vars2 = nc2.variables.keys() variables = unique(r_[vars1, vars2]) for each in variables: compare_vars(nc1, nc2, each, tol) else: vars1 = nc1.variables.keys() vars2 = nc2.variables.keys() vars = unique(r_[vars1, vars2]) for each in vars: if (each in variables): continue compare_vars(nc1, nc2, each, tol)
|
success()
|
success(relative)
|
def compare(file1, file2, variables, exclude, tol): try: from netCDF4 import Dataset as NC except: from netCDF3 import Dataset as NC from numpy import unique, r_ try: nc1 = NC(file1, 'r') except: usagefailure("ERROR: FILE '%s' CANNOT BE OPENED FOR READING" % file1) try: nc2 = NC(file2, 'r') except: usagefailure("ERROR: FILE '%s' CANNOT BE OPENED FOR READING" % file2) if (exclude == False): if len(variables) == 0: vars1 = nc1.variables.keys() vars2 = nc2.variables.keys() variables = unique(r_[vars1, vars2]) for each in variables: compare_vars(nc1, nc2, each, tol) else: vars1 = nc1.variables.keys() vars2 = nc2.variables.keys() vars = unique(r_[vars1, vars2]) for each in vars: if (each in variables): continue compare_vars(nc1, nc2, each, tol)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.