rem
stringlengths 0
322k
| add
stringlengths 0
2.05M
| context
stringlengths 8
228k
|
---|---|---|
lumis = [] | runLumiDict = {} | def splitLumiRanges(lumis, off = 0, singlemode = False): # Split into single runs (todel, toadd) = (set(), set()) for (s, e) in lumis: if s[0] and e[0] and s[0] != e[0]: todel.add((s, e)) toadd.add((s, (s[0],None))) toadd.add(((e[0],1),e)) for x in range(s[0] + 1, e[0]): toadd.add(((x, 1), (x, None))) lumis.difference_update(todel) lumis.update(toadd) |
lumis.append(([run_id, lumi_id], [run_id, lumi_id])) | if run_id not in runLumiDict: runLumiDict[run_id] = set() runLumiDict[run_id].add(lumi_id) | def splitLumiRanges(lumis, off = 0, singlemode = False): # Split into single runs (todel, toadd) = (set(), set()) for (s, e) in lumis: if s[0] and e[0] and s[0] != e[0]: todel.add((s, e)) toadd.add((s, (s[0],None))) toadd.add(((e[0],1),e)) for x in range(s[0] + 1, e[0]): toadd.add(((x, 1), (x, None))) lumis.difference_update(todel) lumis.update(toadd) |
sys.stderr.write(se_utils.se_rm.lastlog) | utils.eprint(procRM.getMessage()) | def incInfo(x): infos[x] = infos.get(x, 0) + 1 |
if not se_utils.se_rm(os.path.join(pathSE, name_dest)): | procRM = se_utils.se_rm(os.path.join(pathSE, name_dest)) if procRM.wait() != 0: | def incInfo(x): infos[x] = infos.get(x, 0) + 1 |
if True in map(lambda x in state, ['h', 's', 'S', 'T', 'w']): | if True in map(lambda x: x in state, ['h', 's', 'S', 'T', 'w']): | def parseJobState(self, state): if True in map(lambda x in state, ['h', 's', 'S', 'T', 'w']): return Job.QUEUED if True in map(lambda x in state, ['r', 't']): return Job.RUNNING return Job.READY |
if True in map(lambda x in state, ['r', 't']): | if True in map(lambda x: x in state, ['r', 't']): | def parseJobState(self, state): if True in map(lambda x in state, ['h', 's', 'S', 'T', 'w']): return Job.QUEUED if True in map(lambda x in state, ['r', 't']): return Job.RUNNING return Job.READY |
['CMSSW_DIR_UI', 'CMSSW_DIR_PRO'])) | ['CMSSW_DIR_USER', 'CMSSW_DIR_UI', 'CMSSW_DIR_PRO'])) | def isInstrumented(cfgName): cfg = open(cfgName, 'r').read() for tag in [ "FILE_NAMES", "MAX_EVENTS", "SKIP_EVENTS" ]: if (not "__%s__" % tag in cfg) and (not "@%s@" % tag in cfg): return False return True |
submodules = map(str.strip, submodules.split(",")) | submodules = filter(lambda x: x != '', map(str.strip, submodules.split(","))) | def __init__(self, config, module, submodules): Monitoring.__init__(self, config, module) submodules = map(str.strip, submodules.split(",")) self.submodules = map(lambda x: Monitoring.open(x, config, module), submodules) |
print "You have selected the following runs and lumi sections:" | print "The following runs and lumi sections are selected:" | def __init__(self, config, datasetExpr, datasetNick, datasetID = 0): DataProvider.__init__(self, config, datasetExpr, datasetNick, datasetID) DataProvider.providers.update({'DBSApiv2': 'dbs'}) if config.getBool('CMSSW', 'dbs blacklist T1', True): T1SEs = ["-srmcms.pic.es", "-ccsrm.in2p3.fr", "-storm-fe-cms.cr.cnaf.infn.it", "-srm-cms.gridpp.rl.ac.uk", "-srm.grid.sinica.edu.tw", "-srm2.grid.sinica.edu.tw"] self.sitefilter.extend(T1SEs) |
else | else: | def __init__(self, config, datasetExpr, datasetNick, datasetID = 0): DataProvider.__init__(self, config, datasetExpr, datasetNick, datasetID) DataProvider.providers.update({'DBSApiv2': 'dbs'}) if config.getBool('CMSSW', 'dbs blacklist T1', True): T1SEs = ["-srmcms.pic.es", "-ccsrm.in2p3.fr", "-storm-fe-cms.cr.cnaf.infn.it", "-srm-cms.gridpp.rl.ac.uk", "-srm.grid.sinica.edu.tw", "-srm2.grid.sinica.edu.tw"] self.sitefilter.extend(T1SEs) |
result.append(blockInfo) | if len(blockInfo[DataProvider.FileList]) > 0: result.append(blockInfo) | def lumiFilter(lfn): for lumi in listLumiInfo[lfn]: if selectLumi(lumi, self.selectedLumis): return True return self.selectedLumis == None |
if opts.save_exprgc: outputGC(lumis) if opts.save_exprjson: outputJSON(lumis) | if opts.diff: print "Unchanged:\n", 30 * "=" outputGC(mergeLumi(list(lumis_uc))) print "\nOnly in reference file:\n", 30 * "=" outputGC(mergeLumi(list(lumis_b))) print "\nNot in reference file:\n", 30 * "=" outputGC(mergeLumi(list(lumis_a))) else: if opts.save_exprgc: outputGC(lumis) if opts.save_exprjson: outputJSON(lumis) | def outputJSON(lumis, stream = sys.stdout): tmp = {} for rlrange in lumis: start, end = rlrange if start[0] != end[0]: raise if start[0] not in tmp: tmp[start[0]] = [] tmp[start[0]].append([start[1], end[1]]) stream.write("{\n") entries = map(lambda run: '\t"%d": %s' % (run, tmp[run]), sorted(tmp.keys())) stream.write("%s\n" % str.join(',\n', entries)) stream.write("}\n") |
splitter = DataSplitter.loadState(workDir) | splitter = DataSplitter.loadState(os.path.join(workDir, 'datamap.tar')) | def outputJSON(lumis, stream = sys.stdout): tmp = {} for rlrange in lumis: start, end = rlrange if start[0] != end[0]: raise if start[0] not in tmp: tmp[start[0]] = [] tmp[start[0]].append([start[1], end[1]]) stream.write("{\n") entries = map(lambda run: '\t"%d": %s' % (run, tmp[run]), sorted(tmp.keys())) stream.write("%s\n" % str.join(',\n', entries)) stream.write("}\n") |
args = str.join(' ', map(lambda x: '"%s"' % ensurePrefix(x).replace('dir://', 'file://'), urls)) | args = str.join(' ', map(lambda x: '"%s"' % ensurePrefix(x).replace('dir://', 'file:////'), urls)) | def se_runcmd(cmd, varDict = {}, *urls): runLib = utils.pathGC('share', 'gc-run.lib') args = str.join(' ', map(lambda x: '"%s"' % ensurePrefix(x).replace('dir://', 'file://'), urls)) varString = str.join(' ', map(lambda x: 'export %s="%s";' % (x, varDict[x]), varDict)) return utils.LoggedProcess('source %s || exit 1; %s %s %s' % (runLib, varString, cmd, args)) |
params += " -l h_cpu=%s" % strTime(reqs[WMS.WALLTIME]) | params += " -l h_cpu=%s" % strTime(reqs[WMS.CPUTIME]) | def getSubmitArguments(self, jobNum, sandbox, stdout, stderr): # Restart jobs = no, job name params = ' -r n -N %s' % self.wms.getJobName(jobNum) |
yield utils.pathGC('python', 'grid_control_cms', 'share', 'DashboardAPI', file) | yield utils.pathGC('python', 'grid_control_cms', 'DashboardAPI', file) | def getFiles(self): for file in ('DashboardAPI.py', 'Logger.py', 'ProcInfo.py', 'apmon.py', 'report.py'): yield utils.pathGC('python', 'grid_control_cms', 'share', 'DashboardAPI', file) |
self.addAttr = dict(map(lambda item: (item, config.get(wmsapi, item)), config.parser.options(wmsapi))) | self.addAttr = {} if wmsapi in config.parser.sections(): self.addAttr = dict(map(lambda item: (item, config.get(wmsapi, item)), config.parser.options(wmsapi))) | def __init__(self, config, module, monitor): WMS.__init__(self, config, module, monitor, 'local') |
jobList = utils.sorted(map(lambda (jobNum, path): jobNum, Job.readJobs(opts.workDir))) | jobList = utils.sorted(map(lambda (jobNum, path): jobNum, Job.readJobs(os.path.join(opts.workDir, 'jobs')))) | def getOutputDatasets(opts): # Get job numbers, task id, ... log = utils.ActivityLog(' * Reading task info...') jobList = utils.sorted(map(lambda (jobNum, path): jobNum, Job.readJobs(opts.workDir))) taskInfo = utils.PersistentDict(os.path.join(opts.workDir, 'task.dat'), ' = ') del log print " * Reading task info - done" # Get all config and output data log = None configData = {} outputData = {} dbsLog = utils.PersistentDict(os.path.join(opts.workDir, 'dbs.log'), ' = ', False) for jobNum in jobList: if jobNum % 10 == 0: del log log = utils.ActivityLog(' * Reading job logs - [%d / %d]' % (jobNum, jobList[-1])) (output, config) = readDBSJobInfo(opts, opts.workDir, jobNum) # ignore already registed files in incremental mode for lfn in filter(lambda x: not (opts.incremental and x in dbsLog), output): outputData.update({lfn: output[lfn]}) configData.update(config) print " * Reading job logs - done" # Merge parent infos into output file data if os.path.exists(os.path.join(opts.workDir, 'datacache.dat')): # Get parent infos provider = DataProvider.loadState(Config(), opts.workDir, 'datacache.dat') log = utils.ActivityLog(' * Processing parent infos...') blocks = provider.getBlocks() parentMap = {} for block in blocks: blockInfo = (block[DataProvider.Dataset], block[DataProvider.BlockName]) lfns = map(lambda x: (x[DataProvider.lfn], blockInfo), block[DataProvider.FileList]) parentMap.update(dict(lfns)) # Insert parentage infos for lfn in outputData.keys(): for parentLFN in filter(lambda x: x, outputData[lfn][DBS.PARENT_FILES]): if not DBS.PARENT_INFO in outputData[lfn]: outputData[lfn][DBS.PARENT_INFO] = [] if not parentMap[parentLFN] in outputData[lfn][DBS.PARENT_INFO]: outputData[lfn][DBS.PARENT_INFO].append(parentMap[parentLFN]) del log print " * Processing parent infos - done" # Sort output files into blocks log = None metadata = {} datasets = {} datasetInfos = {} for idx, lfn in enumerate(outputData): if idx % 10 == 0: del log log = utils.ActivityLog(' * Dividing output into blocks - [%d / %d]' % (idx, len(outputData))) # Define dataset split criteria def generateDatasetKey(fileInfo): # Split by dataset parent and config hash (+ job config hash) parentDS = map(lambda (ds, b): ds, fileInfo.get(DBS.PARENT_INFO, [])) jobHash = ('', str(fileInfo[DBS.JOBHASH]))[opts.useJobHash] dsKey = utils.md5(str((fileInfo[DBS.CONFIGHASH], jobHash, parentDS))).hexdigest() # Write summary information: if not dsKey in datasetInfos: if parentDS == []: parentDS = ['None'] datasetInfos[dsKey] = ("%15s: %s\n%15s: %s\n" % ( "Config hash", fileInfo[DBS.CONFIGHASH], "Parent datasets", str.join("\n" + 17*" ", parentDS))) annotation = getAnnotation(fileInfo[DBS.CONFIGHASH], configData) if annotation: datasetInfos[dsKey] += "%15s: %s\n" % ("Annotation", annotation) return dsKey # Define block split criteria def generateBlockKey(fileInfo): # Split by SE and block parent (parent is left out in case of merging) key = utils.md5(str(fileInfo[DBS.SE]) + generateDatasetKey(fileInfo)) if not opts.doMerge: key.update(str(map(lambda (ds, b): b, fileInfo.get(DBS.PARENT_INFO, [])))) return key.hexdigest() dsKey = generateDatasetKey(outputData[lfn]) blockKey = generateBlockKey(outputData[lfn]) if not dsKey in datasets: datasets[dsKey] = {} metadata[dsKey] = {DBS.SIZE: 0, DBS.EVENTS: 0} if not blockKey in datasets[dsKey]: datasets[dsKey][blockKey] = [] metadata[blockKey] = {DBS.SIZE: 0, DBS.EVENTS: 0} # Calculate def incStats(x, info): x[DBS.SIZE] += int(info[DBS.SIZE]) x[DBS.EVENTS] += int(info[DBS.EVENTS]) incStats(metadata[dsKey], outputData[lfn]) incStats(metadata[blockKey], outputData[lfn]) datasets[dsKey][blockKey].append(lfn) print " * Dividing output into blocks - done" # Display dataset information print print " => Identified the following output datasets:" for ds in datasets.keys(): print "%4s * Key %s [%d block(s), %d file(s)]" % ("", ds, len(datasets[ds]), sum(map(len, datasets[ds].values()))) print 7*" " + datasetInfos[ds].replace("\n", "\n" + 7*" ") return (taskInfo['task id'], datasets, metadata, outputData, configData) |
tmp.sort(cmp=cmpLumi) | tmp.sort(cmpLumi) | def cmpLumi(a,b): (start_a_run, start_a_lumi) = a[0] (start_b_run, start_b_lumi) = b[0] if start_a_run == start_b_run: return cmp(start_a_lumi, start_b_lumi) else: return cmp(start_a_run, start_b_run) |
realmain(opts, args) | def processShorthand(optSet): if optSet: parser.parse_args(args = optSet.split() + sys.argv[1:], values = opts) |
|
if job.get('download') == 'True' and not opt.markIgnoreDL: | if job.get('download') == 'True' and not opts.markIgnoreDL: | def incInfo(x): infos[x] = infos.get(x, 0) + 1 |
if not 'http://' in src: src = "http://cmsdbsprod.cern.ch/%s/servlet/DBSServlet" % src self.args['url'] = src | if src != '': if not 'http://' in src: src = "http://cmsdbsprod.cern.ch/%s/servlet/DBSServlet" % src self.args['url'] = src | def __init__(self, config, datasetExpr, datasetNick, datasetID = 0): DataProvider.__init__(self, config, datasetExpr, datasetNick, datasetID) DataProvider.providers.update({'DBSApiv2': 'dbs'}) |
WMS.__init__(self, config, module, monitor, 'local', wmsapi) | WMS.__init__(self, config, module, monitor, 'local', self.api) | def __init__(self, config, module, monitor): wmsapi = config.get('local', 'wms', self._guessWMS()) if wmsapi != self._guessWMS(): utils.vprint('Default batch system on this host is: %s' % self._guessWMS(), -1, once = True) self.api = LocalWMSApi.open(wmsapi, config, self) utils.vprint('Using batch system: %s' % self.api.__class__.__name__, -1) self.addAttr = {} if config.parser.has_section(wmsapi): self.addAttr = dict(map(lambda item: (item, config.get(wmsapi, item)), config.parser.options(wmsapi))) |
list = filter(lambda x: self._jobs.get(x, Job()).attempt < self.maxRetry, self.ready) | list = filter(lambda x: self._jobs.get(x, Job()).attempt - 1 < self.maxRetry, self.ready) | def getSubmissionJobs(self, maxsample): # Determine number of jobs to submit submit = self.nJobs nQueued = len(self.queued) if self.inQueue > 0: submit = min(submit, self.inQueue - nQueued) if self.inFlight > 0: submit = min(submit, self.inFlight - nQueued - len(self.running)) if self.config.opts.continuous: submit = min(submit, maxsample) submit = max(submit, 0) |
opts.output = os.path.abspath(os.path.join(workDir, 'se_output')) | opts.output = os.path.join(workDir, 'se_output') opts.output = os.path.abspath(opts.output) | def main(args): help = \ |
if (realmain(opts, args) or not opts.loop) and not opts.infinite: break time.sleep(60) | try: if (realmain(opts, args) or not opts.loop) and not opts.infinite: break time.sleep(60) except KeyboardInterrupt: print "\n\nDownload aborted!\n" sys.exit(1) | def processShorthand(optSet): if optSet: parser.parse_args(args = optSet.split() + sys.argv[1:], values = opts) |
utils.eprint(procRM.getMessage()) | utils.eprint("%s\n\n" % procRM.getMessage()) | def dlfs_rm(path, msg): procRM = se_utils.se_rm(path) if procRM.wait() != 0: print "\t\tUnable to remove %s!" % msg utils.eprint(procRM.getMessage()) |
procCP = se_utils.se_copy(os.path.join(pathSE, name_dest), outFilePath) if procCP.wait() != 0: | myGetSize = lambda x: "(%7s)" % gcSupport.prettySize(os.path.getsize(x.replace('file://', ''))) def monitorFile(path, lock): while not lock.acquire(False): try: print "\r\t", name_dest, myGetSize(path), sys.stdout.flush() except: pass time.sleep(1) lock.release() monitorLock = threading.Lock() monitorLock.acquire() monitor = threading.Thread(target = monitorFile, args = (checkPath, monitorLock)) monitor.start() try: procCP = se_utils.se_copy(os.path.join(pathSE, name_dest), outFilePath, tmp = checkPath) result = procCP.wait() finally: monitorLock.release() monitor.join() if result != 0: | def processSingleJob(jobNum): print "Job %d:" % jobNum, |
checkPath = checkPath.replace('file://', '') print "(%s)" % gcSupport.prettySize(os.path.getsize(checkPath)), hashLocal = md5sum(checkPath) if 'file://' not in outFilePath: | hashLocal = md5sum(checkPath.replace('file://', '')) if not ('file://' in outFilePath): | def processSingleJob(jobNum): print "Job %d:" % jobNum, |
lumirange = __LUMI_RANGE__ | lumirange = [__LUMI_RANGE__] | def customise_for_gc(process): try: maxevents = __MAX_EVENTS__ process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(maxevents) ) except: pass # Dataset related setup try: tmp = __SKIP_EVENTS__ process.source = cms.Source("PoolSource", skipEvents = cms.untracked.uint32(__SKIP_EVENTS__), fileNames = cms.untracked.vstring(__FILE_NAMES__) ) try: secondary = __FILE_NAMES2__ process.source.secondaryFileNames = cms.untracked.vstring(secondary) except: pass try: lumirange = __LUMI_RANGE__ process.source.lumisToProcess = cms.untracked.VLuminosityBlockRange(lumirange) except: pass except: pass # Generator related setup try: if hasattr(process, "generator"): process.source.firstLuminosityBlock = cms.untracked.uint32(1+__MY_JOBID__) except: pass if hasattr(process, "RandomNumberGeneratorService"): randSvc = RandomNumberServiceHelper(process.RandomNumberGeneratorService) randSvc.populate() process.AdaptorConfig = cms.Service("AdaptorConfig", enable=cms.untracked.bool(True), stats = cms.untracked.bool(True), ) return (process) |
self.sandPath = config.getPath('local', 'sandbox path', os.path.join(config.workDir, 'sandbox')) | self.sandPath = config.getPath('local', 'sandbox path', os.path.join(config.workDir, 'sandbox'), check=False) | def __init__(self, config, module, monitor): wmsapi = config.get('local', 'wms', self._guessWMS()) if wmsapi != self._guessWMS(): utils.vprint('Default batch system on this host is: %s' % self._guessWMS(), -1, once = True) self.api = LocalWMSApi.open(wmsapi, config, self) utils.vprint('Using batch system: %s' % self.api.__class__.__name__, -1) self.addAttr = {} if config.parser.has_section(wmsapi): self.addAttr = dict(map(lambda item: (item, config.get(wmsapi, item)), config.parser.options(wmsapi))) |
try: listBlockInfo = api.listBlocks(self.datasetPath, nosite=True) (listFileInfo, seList) = ([], {}) def listFileInfoThread(self, result): result.extend(api.listFiles(self.datasetPath, retriveList=QM(self.selectedLumis, ['retrive_lumi'], []))) tFile = utils.gcStartThread(listFileInfoThread, self, listFileInfo) | def getWithPhedex(listBlockInfo, seList): | def getBlocksInternal(self): import urllib2 api = createDBSAPI(self.url) try: listBlockInfo = api.listBlocks(self.datasetPath, nosite=True) # Start thread to retrieve list of files (listFileInfo, seList) = ([], {}) def listFileInfoThread(self, result): result.extend(api.listFiles(self.datasetPath, retriveList=QM(self.selectedLumis, ['retrive_lumi'], []))) tFile = utils.gcStartThread(listFileInfoThread, self, listFileInfo) # Get dataset list from PhEDex (concurrent with listFiles) phedexArgFmt = lambda x: ('block=%s' % x['Name']).replace('/', '%2F').replace('#', '%23') phedexArg = str.join('&', map(phedexArgFmt, listBlockInfo)) phedexData = urllib2.urlopen('https://cmsweb.cern.ch/phedex/datasvc/json/prod/blockreplicas', phedexArg).read() if str(phedexData).lower().find('error') != -1: raise DatasetError("Phedex error '%s'" % phedexData) phedexDict = eval(compile(phedexData.replace('null','None'), '<string>', 'eval'))['phedex']['block'] for phedexBlock in phedexDict: phedexSelector = lambda x: (x['complete'] == 'y') or not self.onlyComplete phedexSites = dict(map(lambda x: (x['node'], x['se']), filter(phedexSelector, phedexBlock['replica']))) phedexSitesOK = utils.doBlackWhiteList(phedexSites.keys(), self.phedexBL) seList[phedexBlock['name']] = map(lambda x: phedexSites[x], phedexSitesOK) tFile.join() except: raise RethrowError('DBS exception') |
blockInfo[DataProvider.SEList] = seList.get(block['Name'], []) | if self.phedex: blockInfo[DataProvider.SEList] = seList.get(block['Name'], []) else: blockInfo[DataProvider.SEList] = map(lambda x: x['Name'], block['StorageElementList']) | def lumiFilter(lumilist): if self.selectedLumis: for lumi in lumilist: if selectLumi((lumi['RunNumber'], lumi['LumiSectionNumber']), self.selectedLumis): return True return self.selectedLumis == None |
params += ' -c %d' % ((reqs[WMS.WALLTIME] + 59) / 60) | params += ' -W %d' % ((reqs[WMS.WALLTIME] + 59) / 60) if WMS.CPUTIME in reqs: params += ' -c %d' % ((reqs[WMS.CPUTIME] + 59) / 60) | def getSubmitArguments(self, jobNum, sandbox, stdout, stderr, addAttr): # Job name params = ' -J %s' % self.wms.getJobName(jobNum) # Job requirements reqs = dict(self.wms.getRequirements(jobNum)) if WMS.SITES in reqs: params += ' -q %s' % reqs[WMS.SITES][0] if WMS.WALLTIME in reqs: params += ' -c %d' % ((reqs[WMS.WALLTIME] + 59) / 60) # IO paths params += ' -o %s -e %s' % (stdout, stderr) return params |
if opts.shuffle: random.shuffle(jobList) else: jobList.sort() for jobNum in jobList: | def processSingleJob(jobNum): | def incInfo(x): infos[x] = infos.get(x, 0) + 1 |
continue | return | def incInfo(x): infos[x] = infos.get(x, 0) + 1 |
incInfo("Processing") continue | return incInfo("Processing") | def incInfo(x): infos[x] = infos.get(x, 0) + 1 |
incInfo("Downloaded") continue | return incInfo("Downloaded") | def incInfo(x): infos[x] = infos.get(x, 0) + 1 |
break | sys.exit(1) | def incInfo(x): infos[x] = infos.get(x, 0) + 1 |
incInfo("No files") continue | return incInfo("No files") | def incInfo(x): infos[x] = infos.get(x, 0) + 1 |
dlfs_rm('file://%s' % checkPath, 'SE file') | if 'file://' not in outFilePath: dlfs_rm('file://%s' % checkPath, 'SE file') | def incInfo(x): infos[x] = infos.get(x, 0) + 1 |
userDefaultsFile = resolvePath("~/.grid-control.conf", check = False) | userDefaultsFile = utils.resolvePath("~/.grid-control.conf", check = False) | def parseFileInt(fn): try: parser.readfp(open(fn, 'r')) except IOError: raise ConfigError("Error while reading configuration file '%s'!" % fn) except cp.Error: print "Configuration file `%s' contains an error:" % fn raise |
args = str.join(' ', map(lambda x: '"%s"' % ensurePrefix(x).replace('dir://', 'file:////'), urls)) | args = str.join(' ', map(lambda x: '"%s"' % ensurePrefix(x).replace('dir://', 'file://'), urls)) | def se_runcmd(cmd, varDict = {}, *urls): runLib = utils.pathGC('share', 'gc-run.lib') args = str.join(' ', map(lambda x: '"%s"' % ensurePrefix(x).replace('dir://', 'file:////'), urls)) varString = str.join(' ', map(lambda x: 'export %s="%s";' % (x, varDict[x]), varDict)) return utils.LoggedProcess('source %s || exit 1; %s %s %s' % (runLib, varString, cmd, args)) |
if infos["Downloaded"] == len(jobList): | if ("Downloaded" in infos) and (infos["Downloaded"] == len(jobList)): | def incInfo(x): infos[x] = infos.get(x, 0) + 1 |
lambda se_rm(target) = utils.LoggedProcess(se_runcmd("url_rm", se_url(target))) | se_rm = lambda target: utils.LoggedProcess(se_runcmd("url_rm", se_url(target))) | def se_runcmd(cmd, urls): runLib = utils.pathGC('share', 'gc-run.lib') urlargs = str.join(' ', map(lambda x: '"%s"' % x.replace('dir://', 'file://'), urls)) return 'source %s || exit 1; print_and_eval "%s" %s' % (runLib, cmd, urlargs) |
predefined = { 'TODO': 'SUBMITTED,WAITING,READY,QUEUED', 'ALL': 'SUBMITTED,WAITING,READY,QUEUED,RUNNING', 'COMPLETE': str.join(',', Job.states)} | predefined = { 'TODO': 'SUBMITTED,WAITING,READY,QUEUED', 'ALL': str.join(',', Job.states)} | def getJobs(self, selector): predefined = { 'TODO': 'SUBMITTED,WAITING,READY,QUEUED', 'ALL': 'SUBMITTED,WAITING,READY,QUEUED,RUNNING', 'COMPLETE': str.join(',', Job.states)} jobFilter = predefined.get(selector.upper(), selector.upper()) |
regex = re.compile(site) if regex.search(dest) and jobObj.state not in (Job.SUCCESS, Job.FAILED): | if re.compile(site).search(dest): | def siteFilter(jobObj): dest = jobObj.get("dest") if not dest: return False dest = str.join("/", map(lambda x: x.split(":")[0], dest.upper().split("/"))) for site in jobFilter.split(','): regex = re.compile(site) if regex.search(dest) and jobObj.state not in (Job.SUCCESS, Job.FAILED): return True return False |
infos[dsName][DataProvider.lfn] = block[DataProvider.FileList][0][DataProvider.lfn] | if len(block[DataProvider.FileList]): infos[dsName][DataProvider.lfn] = block[DataProvider.FileList][0][DataProvider.lfn] | def unique(seq): set = {} map(set.__setitem__, seq, []) return set.keys() |
wms = config.get(backend, 'wms', defaultwms[backend]) wms = WMS.open(wms, config, module, monitor) | if backend == 'grid': wms = WMS.open(config.get(backend, 'wms', 'GliteWMS'), config, module, monitor) elif backend == 'local': wms = WMS.open(defaultwms[backend], config, module, monitor) else: raise UserError("Invalid backend specified!" % config.workDir) | def interrupt(sig, frame): global opts, log, handler opts.abort = True log = utils.ActivityLog('Quitting grid-control! (This can take a few seconds...)') signal.signal(signal.SIGINT, handler) |
def getMissing(self, nJobs): | def extendJobDB(self, nJobs): | def getMissing(self, nJobs): self.nJobs = nJobs if len(self._jobs) < nJobs: return filter(lambda x: x not in self._jobs, range(nJobs)) return [] |
return jobNum in map(int, arg.split(",")) | def checkID(idArg): (start, end) = (idArg.split('-')[0], idArg.split('-')[-1]) if (start == '') or jobNum >= int(start): if (end == '') or jobNum <= int(end): return True return False return reduce(operator.or_, map(checkID, arg.split(","))) | def selectByID(jobNum, jobObj, arg): try: return jobNum in map(int, arg.split(",")) except: raise UserError('Job identifiers must be integers.') |
raise UserError('Job identifiers must be integers.') | raise UserError('Job identifiers must be integers or ranges.') | def selectByID(jobNum, jobObj, arg): try: return jobNum in map(int, arg.split(",")) except: raise UserError('Job identifiers must be integers.') |
def selectSpecific(specific): selectorType = QM(sepcific.isdigit(), 'id', 'state') | def selectSpecific(specific): cmpValue = QM(specific[0] == '~', False, True) specific = specific.lstrip('~') selectorType = QM(specific[0].isdigit(), 'id', 'state') | def selectSpecific(specific): selectorType = QM(sepcific.isdigit(), 'id', 'state') if ':' in specific: selectorType = specific.split(':', 1)[0].lower() return selectorMap[selectorType](jobNum, jobObj, specific.split(':', 1)[-1]) |
return selectorMap[selectorType](jobNum, jobObj, specific.split(':', 1)[-1]) | return selectorMap[selectorType](jobNum, jobObj, specific.split(':', 1)[-1]) == cmpValue | def selectSpecific(specific): selectorType = QM(sepcific.isdigit(), 'id', 'state') if ':' in specific: selectorType = specific.split(':', 1)[0].lower() return selectorMap[selectorType](jobNum, jobObj, specific.split(':', 1)[-1]) |
self.ready.extend(self.jobDB.getMissing(self.nJobs)) | self.ready.extend(self.jobDB.extendJobDB(self.nJobs)) | def __init__(self, config, module, monitor): (self.module, self.monitor) = (module, monitor) self.errorDict = module.errorDict self._dbPath = os.path.join(config.workDir, 'jobs') self.disableLog = os.path.join(config.workDir, 'disabled') try: if not os.path.exists(self._dbPath): if config.opts.init: os.mkdir(self._dbPath) else: raise ConfigError("Not a properly initialized work directory '%s'." % config.workDir) except IOError: raise RethrowError("Problem creating work directory '%s'" % self._dbPath) |
self.ready.extend(self.jobDB.getMissing(self.nJobs)) | self.ready.extend(self.jobDB.extendJobDB(self.nJobs)) | def resetState(jobs, newState): jobSet = utils.set(jobs) for jobNum in jobs: jobObj = self.jobDB.get(jobNum) if jobObj.state in [ Job.INIT, Job.DISABLED, Job.ABORTED, Job.CANCELLED, Job.DONE, Job.FAILED, Job.SUCCESS ]: self._update(jobObj, jobNum, newState) jobSet.remove(jobNum) jobObj.attempt = 0 if len(jobSet) > 0: output = (Job.states[newState], str.join(', ', map(str, jobSet))) raise RuntimeError('For the following jobs it was not possible to reset the state to %s:\n%s' % output) |
params = PBSGE.getSubmitArguments(self, jobNum, sandbox, stdout, stderr, addAttr, reqMap) | params = PBSGECommon.getSubmitArguments(self, jobNum, sandbox, stdout, stderr, addAttr, reqMap) | def getSubmitArguments(self, jobNum, sandbox, stdout, stderr, addAttr): reqMap = { WMS.MEMORY: ("pvmem", lambda m: "%dmb" % m) } params = PBSGE.getSubmitArguments(self, jobNum, sandbox, stdout, stderr, addAttr, reqMap) # Job requirements reqs = dict(self.wms.getRequirements(jobNum)) if reqs.get(WMS.SITES, (None, None))[1]: params += ' -l host=%s' % str.join("+", reqs[WMS.SITES][1]) return params |
if len(self.sePaths) <= 1: | if len(self.sePaths) == 1: | def getTaskConfig(self): taskConfig = { # Space limits 'SCRATCH_UL' : self.seSDUpperLimit, 'SCRATCH_LL' : self.seSDLowerLimit, 'LANDINGZONE_UL': self.seLZUpperLimit, 'LANDINGZONE_LL': self.seLZLowerLimit, # Storage element 'SE_MINFILESIZE': self.seMinSize, 'SE_OUTPUT_FILES': str.join(' ', self.seOutputFiles), 'SE_INPUT_FILES': str.join(' ', self.seInputFiles), 'SE_OUTPUT_PATTERN': self.seOutputPattern, 'SE_INPUT_PATTERN': self.seInputPattern, # Sandbox 'SB_OUTPUT_FILES': str.join(' ', self.getOutFiles()), 'SB_INPUT_FILES': str.join(' ', map(utils.shellEscape, map(os.path.basename, self.getInFiles()))), # Runtime 'DOBREAK': self.nodeTimeout, 'MY_RUNTIME': self.getCommand(), 'GC_DEPFILES': str.join(' ', self.getDependencies()), # Seeds and substitutions 'SEEDS': str.join(' ', map(str, self.seeds)), 'SUBST_FILES': str.join(' ', map(os.path.basename, self.getSubstFiles())), # Task infos 'TASK_ID': self.taskID, 'GC_CONF': self.config.confName, 'GC_VERSION': utils.getVersion(), 'DB_EXEC': 'shellscript' } if len(self.sePaths) <= 1: taskConfig['SE_PATH'] = self.sePaths[0] return dict(taskConfig.items() + self.constants.items()) |
tmp = map(str.strip, rsplit(line, '=', 1)) | tmp = map(str.strip, [i[::-1] for i in line[::-1].split("=",1)[::-1]]) | def doFilter(blockinfo): name = self._filter if self._filter: name = blockinfo[DataProvider.Dataset] if DataProvider.BlockName in blockinfo and "#" in self._filter: name = "%s#%s" % (name, blockinfo[DataProvider.BlockName]) if name.startswith(self._filter): return True return False |
selist = self.dataSplitter.getSplitInfo(jobNum).get(DataSplitter.SEList, []) if selist != None: | selist = self.dataSplitter.getSplitInfo(jobNum).get(DataSplitter.SEList, False) if selist != False: | def getRequirements(self, jobNum): reqs = Module.getRequirements(self, jobNum) if self.dataSplitter != None: selist = self.dataSplitter.getSplitInfo(jobNum).get(DataSplitter.SEList, []) if selist != None: reqs.append((WMS.STORAGE, selist)) return reqs |
result.extend(api.listFiles(self.datasetPath, retriveList=([], ['retrive_lumi'])[self.selectedLumis])) | result.extend(api.listFiles(self.datasetPath, retriveList=(['retrive_lumi'], [])[self.selectedLumis == '' ])) | def listFileInfoThread(self, result): result.extend(api.listFiles(self.datasetPath, retriveList=([], ['retrive_lumi'])[self.selectedLumis])) |
if 'config file' in config.parser.options(self.__class__.__name__): raise ConfigError("Please use 'nickname config' instead of 'config file'") | def parseMap(x, parser): result = {} for entry in x.split('\n'): if "=>" in entry: nick, value = map(str.strip, entry.split('=>')) else: nick, value = (None, entry) result[nick] = filter(lambda x: x, parser(value.strip())) return result |
|
return self.selectedLumis == None | return False | def lumiFilter(lfn): for lumi in listLumiInfo[lfn]: if selectLumi(lumi, self.selectedLumis): return True return self.selectedLumis == None |
se_rm = lambda target: utils.LoggedProcess(se_runcmd("url_rm", se_url(target))) | se_rm = lambda target: utils.LoggedProcess(se_runcmd("url_rm", target)) | def se_runcmd(cmd, urls): runLib = utils.pathGC('share', 'gc-run.lib') urlargs = str.join(' ', map(lambda x: '"%s"' % x.replace('dir://', 'file://'), urls)) return 'source %s || exit 1; print_and_eval "%s" %s' % (runLib, cmd, urlargs) |
def __cmp__(self, other): return cmp(self.message, other.message) or cmp(self.new_message, self.other_messge) | def __cmp__(self, other): return cmp(self.message, other.message) or cmp(self.new_message, self.other_messge) |
|
object_suffix=None, schema_suffix=None): | schema_suffix=None): | def _report_error(self, legacy_message, new_message=None, object_suffix=None, schema_suffix=None): """ Report an error during validation. |
if object_suffix: object_expr += object_suffix | def _report_error(self, legacy_message, new_message=None, object_suffix=None, schema_suffix=None): """ Report an error during validation. |
|
This works with all public attributes: >>> class Person(PlainOldData): ... def __init__(self, name): ... self.name = name >>> joe = Person('Joe') >>> joe.pod_attrs ('name',) | This works with all public attributes:: | def pod_attrs(self): """ Return a list of sorted attributes. |
With private attributes exposed as properties: >>> class Person(PlainOldData): ... def __init__(self, name): ... self._name = name ... @property ... def name(self): ... return self._name >>> joe = Person('Joe') >>> joe.pod_attrs ('name',) | >>> class Person(PlainOldData): ... def __init__(self, name): ... self.name = name >>> joe = Person('Joe') >>> joe.pod_attrs ('name',) | ... def __init__(self, name): |
And with __slots__: >>> class Person(PlainOldData): ... __slots__ = ('_name',) ... def __init__(self, name): ... self._name = name ... @property ... def name(self): ... return self._name >>> joe = Person('Joe') >>> joe.pod_attrs ('name',) | With private attributes exposed as properties:: >>> class Person(PlainOldData): ... def __init__(self, name): ... self._name = name ... @property ... def name(self): ... return self._name >>> joe = Person('Joe') >>> joe.pod_attrs ('name',) And with __slots__:: >>> class Person(PlainOldData): ... __slots__ = ('_name',) ... def __init__(self, name): ... self._name = name ... @property ... def name(self): ... return self._name >>> joe = Person('Joe') >>> joe.pod_attrs ('name',) | ... def name(self): |
This function simply shows all fields in a simple format: >>> class Person(PlainOldData): ... def __init__(self, name): ... self._name = name ... @property ... def name(self): ... return self._name >>> Person("Bob") <Person name:'Bob'> | This function simply shows all fields in a simple format:: >>> class Person(PlainOldData): ... def __init__(self, name): ... self._name = name ... @property ... def name(self): ... return self._name >>> Person("Bob") <Person name:'Bob'> | def __repr__(self): """ Produce more-less human readable encoding of all fields. |
def nonum(name): return filter(lambda x: not x.isdigit(), name) | def make_head(header): return """// file: %s\n// dealer: %s\n// programmer: %s\n// hint: %s\n\ndigraph {"""%( header['file'],header['dealer'],header['programer'],header['hint']) def make_tail(): return "}" | def nonum(name): return filter(lambda x: not x.isdigit(), name) |
head = """// file: %s\n// dealer: %s\n// programmer: %s\n// hint: %s\n\ndigraph {"""%( header['file'],header['dealer'],header['programer'],header['hint']) tail = """}""" dot_file = [head] | dot_file = [make_head(header)] | def make_dot(header, signals, modules): head = """// file: %s\n// dealer: %s\n// programmer: %s\n// hint: %s\n\ndigraph {"""%( header['file'],header['dealer'],header['programer'],header['hint']) tail = """}""" dot_file = [head] checkT = False checkF = False limit_mods = filter(lambda m: adv_len(modules[m]['ins']) or adv_len(modules[m]['outs']), modules) limit_mods.sort() for m in limit_mods: dot_file.append(make_node(m, modules[m])) checkT = 2 in modules[m]['ins'] or checkT checkF = 1 in modules[m]['ins'] or checkF if checkF: dot_file.append(' 0 [shape="circle"];') if checkT: dot_file.append(' 1 [shape="circle"];') dot_file.append('') limit_sigs = filter(lambda s: is_significant(signals[s][0]), signals) extended_sigs = {} #extended_sigs = { (src,dst):[(name0,type0),(name1,type1)] } for s in limit_sigs: sname, stype = signals[s] outs = filter(lambda m: s in modules[m]['outs'], limit_mods) if sname == '0': outs.append('0') if sname == '1': outs.append('1') ins = filter(lambda m: s in modules[m]['ins'], limit_mods) pack = set() for o in outs: for i in ins: pack.add((o,i)) for line in pack: if extended_sigs.get(line) is None: extended_sigs[line]=[] extended_sigs[line].append(signals[s]) # bus signals agregate for addr in extended_sigs: if len(extended_sigs[addr])==1: sname, stype = extended_sigs[addr][0] dot_file.append(make_direct_signal(addr,sname,stype)) else: pack = {} for n,t in extended_sigs[addr]: tag = (nonum(n),t) if pack.get(tag) is None: pack[tag]=[] pack[tag].append(n) for bulk in pack: if len(pack[bulk])==1: dot_file.append(make_direct_signal(addr,pack[bulk][0],bulk[1])) else: dot_file.append(make_signal_bus(addr,pack[bulk],bulk[1])) dot_file.append(tail) return "\n".join(dot_file) |
dot_file.append(tail) | dot_file.append(make_tail()) | def make_dot(header, signals, modules): head = """// file: %s\n// dealer: %s\n// programmer: %s\n// hint: %s\n\ndigraph {"""%( header['file'],header['dealer'],header['programer'],header['hint']) tail = """}""" dot_file = [head] checkT = False checkF = False limit_mods = filter(lambda m: adv_len(modules[m]['ins']) or adv_len(modules[m]['outs']), modules) limit_mods.sort() for m in limit_mods: dot_file.append(make_node(m, modules[m])) checkT = 2 in modules[m]['ins'] or checkT checkF = 1 in modules[m]['ins'] or checkF if checkF: dot_file.append(' 0 [shape="circle"];') if checkT: dot_file.append(' 1 [shape="circle"];') dot_file.append('') limit_sigs = filter(lambda s: is_significant(signals[s][0]), signals) extended_sigs = {} #extended_sigs = { (src,dst):[(name0,type0),(name1,type1)] } for s in limit_sigs: sname, stype = signals[s] outs = filter(lambda m: s in modules[m]['outs'], limit_mods) if sname == '0': outs.append('0') if sname == '1': outs.append('1') ins = filter(lambda m: s in modules[m]['ins'], limit_mods) pack = set() for o in outs: for i in ins: pack.add((o,i)) for line in pack: if extended_sigs.get(line) is None: extended_sigs[line]=[] extended_sigs[line].append(signals[s]) # bus signals agregate for addr in extended_sigs: if len(extended_sigs[addr])==1: sname, stype = extended_sigs[addr][0] dot_file.append(make_direct_signal(addr,sname,stype)) else: pack = {} for n,t in extended_sigs[addr]: tag = (nonum(n),t) if pack.get(tag) is None: pack[tag]=[] pack[tag].append(n) for bulk in pack: if len(pack[bulk])==1: dot_file.append(make_direct_signal(addr,pack[bulk][0],bulk[1])) else: dot_file.append(make_signal_bus(addr,pack[bulk],bulk[1])) dot_file.append(tail) return "\n".join(dot_file) |
head = """// file: %s\n// dealer: %s\n// programmer: %s\n// hint: %s\n\ndigraph {"""%( header['file'],header['dealer'],header['programer'],header['hint']) tail = """}""" dot_file = [head] | dot_file = [make_head(header)] | def make_dot_merged(header, signals, modules): head = """// file: %s\n// dealer: %s\n// programmer: %s\n// hint: %s\n\ndigraph {"""%( header['file'],header['dealer'],header['programer'],header['hint']) tail = """}""" dot_file = [head] limit_mods = filter(lambda m: adv_len(modules[m]['ins']) or adv_len(modules[m]['outs']), modules) limit_mods.sort() dot_file.extend(map(lambda m: make_node(m, modules[m]), limit_mods)) dot_file.append('') limit_sigs = filter(lambda s: is_significant(signals[s][0]), signals) limit_sigs.sort() direct_sigs = {} # for every signal get set of pairs (signal,module) and (module, signal) # bus unable to create for s in limit_sigs: sname, stype = signals[s] stag = "s%d"%s outs = filter(lambda m: s in modules[m]['outs'], limit_mods) ins = filter(lambda m: s in modules[m]['ins'], limit_mods) # filtering out single signals if len(outs)==1 and len(ins)==1: line = (outs[0],ins[0]) if direct_sigs.get(line) is None: direct_sigs[line]=[] direct_sigs[line].append(signals[s]) # for single signals don't create dots # we will add them later continue # for others create packets pack = set() map(lambda m: pack.add((m,stag)), outs) map(lambda m: pack.add((stag,m)), ins) if len(pack)>1: dot_file.append(make_direct_signal(stag, sname, stype)) for src, dst in pack: dot_file.append(make_link(src,dst,stype)) elif len(pack)==1: dot_file.append(make_signal(stag, sname, stype)) for src, dst in pack: dot_file.append(make_link(src,dst,stype)) # adding single signals, some of them aggregating into bus for addr in direct_sigs: pack = {} for sname, stype in direct_sigs[addr]: tag = (nonum(sname),stype) if pack.get(tag) is None: pack[tag]=[] pack[tag].append(sname) for bulk in pack: if len(pack[bulk])==1: dot_file.append(make_direct_signal(addr,pack[bulk][0],bulk[1])) else: dot_file.append(make_signal_bus(addr,pack[bulk],bulk[1])) dot_file.append(tail) return "\n".join(dot_file) |
dot_file.append(make_direct_signal(stag, sname, stype)) | dot_file.append(make_signal(stag, sname, stype)) | def make_dot_merged(header, signals, modules): head = """// file: %s\n// dealer: %s\n// programmer: %s\n// hint: %s\n\ndigraph {"""%( header['file'],header['dealer'],header['programer'],header['hint']) tail = """}""" dot_file = [head] limit_mods = filter(lambda m: adv_len(modules[m]['ins']) or adv_len(modules[m]['outs']), modules) limit_mods.sort() dot_file.extend(map(lambda m: make_node(m, modules[m]), limit_mods)) dot_file.append('') limit_sigs = filter(lambda s: is_significant(signals[s][0]), signals) limit_sigs.sort() direct_sigs = {} # for every signal get set of pairs (signal,module) and (module, signal) # bus unable to create for s in limit_sigs: sname, stype = signals[s] stag = "s%d"%s outs = filter(lambda m: s in modules[m]['outs'], limit_mods) ins = filter(lambda m: s in modules[m]['ins'], limit_mods) # filtering out single signals if len(outs)==1 and len(ins)==1: line = (outs[0],ins[0]) if direct_sigs.get(line) is None: direct_sigs[line]=[] direct_sigs[line].append(signals[s]) # for single signals don't create dots # we will add them later continue # for others create packets pack = set() map(lambda m: pack.add((m,stag)), outs) map(lambda m: pack.add((stag,m)), ins) if len(pack)>1: dot_file.append(make_direct_signal(stag, sname, stype)) for src, dst in pack: dot_file.append(make_link(src,dst,stype)) elif len(pack)==1: dot_file.append(make_signal(stag, sname, stype)) for src, dst in pack: dot_file.append(make_link(src,dst,stype)) # adding single signals, some of them aggregating into bus for addr in direct_sigs: pack = {} for sname, stype in direct_sigs[addr]: tag = (nonum(sname),stype) if pack.get(tag) is None: pack[tag]=[] pack[tag].append(sname) for bulk in pack: if len(pack[bulk])==1: dot_file.append(make_direct_signal(addr,pack[bulk][0],bulk[1])) else: dot_file.append(make_signal_bus(addr,pack[bulk],bulk[1])) dot_file.append(tail) return "\n".join(dot_file) |
dot_file.append(tail) | dot_file.append(make_tail()) | def make_dot_merged(header, signals, modules): head = """// file: %s\n// dealer: %s\n// programmer: %s\n// hint: %s\n\ndigraph {"""%( header['file'],header['dealer'],header['programer'],header['hint']) tail = """}""" dot_file = [head] limit_mods = filter(lambda m: adv_len(modules[m]['ins']) or adv_len(modules[m]['outs']), modules) limit_mods.sort() dot_file.extend(map(lambda m: make_node(m, modules[m]), limit_mods)) dot_file.append('') limit_sigs = filter(lambda s: is_significant(signals[s][0]), signals) limit_sigs.sort() direct_sigs = {} # for every signal get set of pairs (signal,module) and (module, signal) # bus unable to create for s in limit_sigs: sname, stype = signals[s] stag = "s%d"%s outs = filter(lambda m: s in modules[m]['outs'], limit_mods) ins = filter(lambda m: s in modules[m]['ins'], limit_mods) # filtering out single signals if len(outs)==1 and len(ins)==1: line = (outs[0],ins[0]) if direct_sigs.get(line) is None: direct_sigs[line]=[] direct_sigs[line].append(signals[s]) # for single signals don't create dots # we will add them later continue # for others create packets pack = set() map(lambda m: pack.add((m,stag)), outs) map(lambda m: pack.add((stag,m)), ins) if len(pack)>1: dot_file.append(make_direct_signal(stag, sname, stype)) for src, dst in pack: dot_file.append(make_link(src,dst,stype)) elif len(pack)==1: dot_file.append(make_signal(stag, sname, stype)) for src, dst in pack: dot_file.append(make_link(src,dst,stype)) # adding single signals, some of them aggregating into bus for addr in direct_sigs: pack = {} for sname, stype in direct_sigs[addr]: tag = (nonum(sname),stype) if pack.get(tag) is None: pack[tag]=[] pack[tag].append(sname) for bulk in pack: if len(pack[bulk])==1: dot_file.append(make_direct_signal(addr,pack[bulk][0],bulk[1])) else: dot_file.append(make_signal_bus(addr,pack[bulk],bulk[1])) dot_file.append(tail) return "\n".join(dot_file) |
if len(sys.argv) != 2: print 'Please specify one filename on the command line.' sys.exit(1) text = open(sys.argv[1],"rt").read() | usage = "usage: %prog [options] inputfile\ninputfile of smw/umc/cmc Crestron's filetypes" parser = OptionParser(usage=usage) parser.add_option("-o","--output", dest="output", help="write result to FILE", metavar="FILE") parser.add_option("-m","--merged", dest="merge", action="store_true", help="make \"merged signals\" version", default=False) (options,args) = parser.parse_args() if len(args) != 1: parser.error("input file is required") (filename,) = args text = open(filename, "rt").read() | def main(): if len(sys.argv) != 2: print 'Please specify one filename on the command line.' sys.exit(1) text = open(sys.argv[1],"rt").read() tree = tokenize(text) header,sigs,mods = parse(tree) print make_dot(header,sigs,mods) #print make_dot_merged(header,sigs,mods) |
print make_dot(header,sigs,mods) | if options.merge: result = make_dot_merged(header,sigs,mods) else: result = make_dot(header,sigs,mods) if options.output is not None: open(options.output, "wt").write(result) else: print result | def main(): if len(sys.argv) != 2: print 'Please specify one filename on the command line.' sys.exit(1) text = open(sys.argv[1],"rt").read() tree = tokenize(text) header,sigs,mods = parse(tree) print make_dot(header,sigs,mods) #print make_dot_merged(header,sigs,mods) |
_description = "Hours summary by user" | _description = "Hours Summary by User" | def _user(self, cr, uid, ids, name, arg, context=None): res = {} cr.execute('SELECT MAX(id) FROM res_users') max_user = cr.fetchone()[0] for id in ids: ids2 = self.search(cr, uid, [('parent_id', 'child_of', [id])]) if ids2: cr.execute('SELECT DISTINCT("user") FROM account_analytic_analysis_summary_user ' \ 'WHERE account_id =ANY(%s) AND unit_amount <> 0.0',(ids2,)) res[id] = [int((id * max_user) + x[0]) for x in cr.fetchall()] else: res[id] = [] return res |
for account_id, sum in cr.fetchall(): res[account_id] = sum | for account_id, lid in cr.fetchall(): res[account_id][f] = lid | def _analysis_all(self, cr, uid, ids, fields, arg, context=None): dp = 2 res = dict([(i, {}) for i in ids]) |
for account_id, sum in cr.fetchall(): res[account_id][f] = sum | for account_id, lwd in cr.fetchall(): res[account_id][f] = lwd | def _analysis_all(self, cr, uid, ids, fields, arg, context=None): dp = 2 res = dict([(i, {}) for i in ids]) |
for account_id, sum in cr.fetchall(): res[account_id][f] = round(sum, dp) | for account_id, sua in cr.fetchall(): res[account_id][f] = round(sua, dp) | def _analysis_all(self, cr, uid, ids, fields, arg, context=None): dp = 2 res = dict([(i, {}) for i in ids]) |
for account_id, sum in ff: res[account_id][f] = round(sum, dp) | for account_id, hq in ff: res[account_id][f] = round(hq, dp) | def _analysis_all(self, cr, uid, ids, fields, arg, context=None): dp = 2 res = dict([(i, {}) for i in ids]) |
ids2 = self.search(cr, uid, [('parent_id', 'child_of', ids)]) if ids2: acc_set = ",".join(map(str, ids2)) | parent_ids = tuple(self.search(cr, uid, [('parent_id', 'child_of', ids)])) if parent_ids: | def _ca_invoiced_calc(self, cr, uid, ids, name, arg, context={}): res = {} ids2 = self.search(cr, uid, [('parent_id', 'child_of', ids)]) if ids2: acc_set = ",".join(map(str, ids2)) cr.execute("select account_analytic_line.account_id, COALESCE(sum(amount_currency),0.0) \ from account_analytic_line \ join account_analytic_journal \ on account_analytic_line.journal_id = account_analytic_journal.id \ where account_analytic_line.account_id =ANY(%s) \ and account_analytic_journal.type = 'sale' \ group by account_analytic_line.account_id" ,(ids2,)) for account_id, sum in cr.fetchall(): res[account_id] = round(sum,2) return self._compute_currency_for_level_tree(cr, uid, ids, ids2, res, acc_set, context) |
where account_analytic_line.account_id =ANY(%s) \ | where account_analytic_line.account_id IN %s \ | def _ca_invoiced_calc(self, cr, uid, ids, name, arg, context={}): res = {} ids2 = self.search(cr, uid, [('parent_id', 'child_of', ids)]) if ids2: acc_set = ",".join(map(str, ids2)) cr.execute("select account_analytic_line.account_id, COALESCE(sum(amount_currency),0.0) \ from account_analytic_line \ join account_analytic_journal \ on account_analytic_line.journal_id = account_analytic_journal.id \ where account_analytic_line.account_id =ANY(%s) \ and account_analytic_journal.type = 'sale' \ group by account_analytic_line.account_id" ,(ids2,)) for account_id, sum in cr.fetchall(): res[account_id] = round(sum,2) return self._compute_currency_for_level_tree(cr, uid, ids, ids2, res, acc_set, context) |
group by account_analytic_line.account_id" ,(ids2,)) | group by account_analytic_line.account_id" ,(parent_ids,)) | def _ca_invoiced_calc(self, cr, uid, ids, name, arg, context={}): res = {} ids2 = self.search(cr, uid, [('parent_id', 'child_of', ids)]) if ids2: acc_set = ",".join(map(str, ids2)) cr.execute("select account_analytic_line.account_id, COALESCE(sum(amount_currency),0.0) \ from account_analytic_line \ join account_analytic_journal \ on account_analytic_line.journal_id = account_analytic_journal.id \ where account_analytic_line.account_id =ANY(%s) \ and account_analytic_journal.type = 'sale' \ group by account_analytic_line.account_id" ,(ids2,)) for account_id, sum in cr.fetchall(): res[account_id] = round(sum,2) return self._compute_currency_for_level_tree(cr, uid, ids, ids2, res, acc_set, context) |
return self._compute_currency_for_level_tree(cr, uid, ids, ids2, res, acc_set, context) | return self._compute_currency_for_level_tree(cr, uid, ids, parent_ids, res, context) | def _ca_invoiced_calc(self, cr, uid, ids, name, arg, context={}): res = {} ids2 = self.search(cr, uid, [('parent_id', 'child_of', ids)]) if ids2: acc_set = ",".join(map(str, ids2)) cr.execute("select account_analytic_line.account_id, COALESCE(sum(amount_currency),0.0) \ from account_analytic_line \ join account_analytic_journal \ on account_analytic_line.journal_id = account_analytic_journal.id \ where account_analytic_line.account_id =ANY(%s) \ and account_analytic_journal.type = 'sale' \ group by account_analytic_line.account_id" ,(ids2,)) for account_id, sum in cr.fetchall(): res[account_id] = round(sum,2) return self._compute_currency_for_level_tree(cr, uid, ids, ids2, res, acc_set, context) |
ids2 = self.search(cr, uid, [('parent_id', 'child_of', ids)]) if ids2: | parent_ids = tuple(self.search(cr, uid, [('parent_id', 'child_of', ids)])) if parent_ids: | def _ca_to_invoice_calc(self, cr, uid, ids, name, arg, context={}): res = {} res2 = {} ids2 = self.search(cr, uid, [('parent_id', 'child_of', ids)]) if ids2: # Amount uninvoiced hours to invoice at sale price # Warning # This computation doesn't take care of pricelist ! # Just consider list_price acc_set = ",".join(map(str, ids2)) cr.execute("""SELECT account_analytic_account.id, \ COALESCE(sum (product_template.list_price * \ account_analytic_line.unit_amount * \ ((100-hr_timesheet_invoice_factor.factor)/100)),0.0) \ AS ca_to_invoice \ FROM product_template \ join product_product \ on product_template.id = product_product.product_tmpl_id \ JOIN account_analytic_line \ on account_analytic_line.product_id = product_product.id \ JOIN account_analytic_journal \ on account_analytic_line.journal_id = account_analytic_journal.id \ JOIN account_analytic_account \ on account_analytic_account.id = account_analytic_line.account_id \ JOIN hr_timesheet_invoice_factor \ on hr_timesheet_invoice_factor.id = account_analytic_account.to_invoice \ WHERE account_analytic_account.id =ANY(%s) \ AND account_analytic_line.invoice_id is null \ AND account_analytic_line.to_invoice IS NOT NULL \ and account_analytic_journal.type in ('purchase','general') \ GROUP BY account_analytic_account.id;""",(ids2,)) for account_id, sum in cr.fetchall(): res[account_id] = round(sum,2) |
acc_set = ",".join(map(str, ids2)) | def _ca_to_invoice_calc(self, cr, uid, ids, name, arg, context={}): res = {} res2 = {} ids2 = self.search(cr, uid, [('parent_id', 'child_of', ids)]) if ids2: # Amount uninvoiced hours to invoice at sale price # Warning # This computation doesn't take care of pricelist ! # Just consider list_price acc_set = ",".join(map(str, ids2)) cr.execute("""SELECT account_analytic_account.id, \ COALESCE(sum (product_template.list_price * \ account_analytic_line.unit_amount * \ ((100-hr_timesheet_invoice_factor.factor)/100)),0.0) \ AS ca_to_invoice \ FROM product_template \ join product_product \ on product_template.id = product_product.product_tmpl_id \ JOIN account_analytic_line \ on account_analytic_line.product_id = product_product.id \ JOIN account_analytic_journal \ on account_analytic_line.journal_id = account_analytic_journal.id \ JOIN account_analytic_account \ on account_analytic_account.id = account_analytic_line.account_id \ JOIN hr_timesheet_invoice_factor \ on hr_timesheet_invoice_factor.id = account_analytic_account.to_invoice \ WHERE account_analytic_account.id =ANY(%s) \ AND account_analytic_line.invoice_id is null \ AND account_analytic_line.to_invoice IS NOT NULL \ and account_analytic_journal.type in ('purchase','general') \ GROUP BY account_analytic_account.id;""",(ids2,)) for account_id, sum in cr.fetchall(): res[account_id] = round(sum,2) |
|
WHERE account_analytic_account.id =ANY(%s) \ | WHERE account_analytic_account.id IN %s \ | def _ca_to_invoice_calc(self, cr, uid, ids, name, arg, context={}): res = {} res2 = {} ids2 = self.search(cr, uid, [('parent_id', 'child_of', ids)]) if ids2: # Amount uninvoiced hours to invoice at sale price # Warning # This computation doesn't take care of pricelist ! # Just consider list_price acc_set = ",".join(map(str, ids2)) cr.execute("""SELECT account_analytic_account.id, \ COALESCE(sum (product_template.list_price * \ account_analytic_line.unit_amount * \ ((100-hr_timesheet_invoice_factor.factor)/100)),0.0) \ AS ca_to_invoice \ FROM product_template \ join product_product \ on product_template.id = product_product.product_tmpl_id \ JOIN account_analytic_line \ on account_analytic_line.product_id = product_product.id \ JOIN account_analytic_journal \ on account_analytic_line.journal_id = account_analytic_journal.id \ JOIN account_analytic_account \ on account_analytic_account.id = account_analytic_line.account_id \ JOIN hr_timesheet_invoice_factor \ on hr_timesheet_invoice_factor.id = account_analytic_account.to_invoice \ WHERE account_analytic_account.id =ANY(%s) \ AND account_analytic_line.invoice_id is null \ AND account_analytic_line.to_invoice IS NOT NULL \ and account_analytic_journal.type in ('purchase','general') \ GROUP BY account_analytic_account.id;""",(ids2,)) for account_id, sum in cr.fetchall(): res[account_id] = round(sum,2) |
GROUP BY account_analytic_account.id;""",(ids2,)) | GROUP BY account_analytic_account.id;""",(parent_ids,)) | def _ca_to_invoice_calc(self, cr, uid, ids, name, arg, context={}): res = {} res2 = {} ids2 = self.search(cr, uid, [('parent_id', 'child_of', ids)]) if ids2: # Amount uninvoiced hours to invoice at sale price # Warning # This computation doesn't take care of pricelist ! # Just consider list_price acc_set = ",".join(map(str, ids2)) cr.execute("""SELECT account_analytic_account.id, \ COALESCE(sum (product_template.list_price * \ account_analytic_line.unit_amount * \ ((100-hr_timesheet_invoice_factor.factor)/100)),0.0) \ AS ca_to_invoice \ FROM product_template \ join product_product \ on product_template.id = product_product.product_tmpl_id \ JOIN account_analytic_line \ on account_analytic_line.product_id = product_product.id \ JOIN account_analytic_journal \ on account_analytic_line.journal_id = account_analytic_journal.id \ JOIN account_analytic_account \ on account_analytic_account.id = account_analytic_line.account_id \ JOIN hr_timesheet_invoice_factor \ on hr_timesheet_invoice_factor.id = account_analytic_account.to_invoice \ WHERE account_analytic_account.id =ANY(%s) \ AND account_analytic_line.invoice_id is null \ AND account_analytic_line.to_invoice IS NOT NULL \ and account_analytic_journal.type in ('purchase','general') \ GROUP BY account_analytic_account.id;""",(ids2,)) for account_id, sum in cr.fetchall(): res[account_id] = round(sum,2) |
ids2 = self.search(cr, uid, [('parent_id', 'child_of', ids)]) if ids2: | parent_ids = tuple(self.search(cr, uid, [('parent_id', 'child_of', ids)])) if parent_ids: | def _hours_qtt_non_invoiced_calc (self, cr, uid, ids, name, arg, context={}): res = {} ids2 = self.search(cr, uid, [('parent_id', 'child_of', ids)]) if ids2: cr.execute("select account_analytic_line.account_id, COALESCE(sum(unit_amount),0.0) \ from account_analytic_line \ join account_analytic_journal \ on account_analytic_line.journal_id = account_analytic_journal.id \ where account_analytic_line.account_id =ANY(%s) \ and account_analytic_journal.type='general' \ and invoice_id is null \ AND to_invoice IS NOT NULL \ GROUP BY account_analytic_line.account_id;",(ids2,)) for account_id, sum in cr.fetchall(): res[account_id] = round(sum,2) for obj_id in ids: res.setdefault(obj_id, 0.0) for child_id in self.search(cr, uid, [('parent_id', 'child_of', [obj_id])]): if child_id != obj_id: res[obj_id] += res.get(child_id, 0.0) for id in ids: res[id] = round(res.get(id, 0.0),2) return res |
where account_analytic_line.account_id =ANY(%s) \ | where account_analytic_line.account_id IN %s \ | def _hours_qtt_non_invoiced_calc (self, cr, uid, ids, name, arg, context={}): res = {} ids2 = self.search(cr, uid, [('parent_id', 'child_of', ids)]) if ids2: cr.execute("select account_analytic_line.account_id, COALESCE(sum(unit_amount),0.0) \ from account_analytic_line \ join account_analytic_journal \ on account_analytic_line.journal_id = account_analytic_journal.id \ where account_analytic_line.account_id =ANY(%s) \ and account_analytic_journal.type='general' \ and invoice_id is null \ AND to_invoice IS NOT NULL \ GROUP BY account_analytic_line.account_id;",(ids2,)) for account_id, sum in cr.fetchall(): res[account_id] = round(sum,2) for obj_id in ids: res.setdefault(obj_id, 0.0) for child_id in self.search(cr, uid, [('parent_id', 'child_of', [obj_id])]): if child_id != obj_id: res[obj_id] += res.get(child_id, 0.0) for id in ids: res[id] = round(res.get(id, 0.0),2) return res |
GROUP BY account_analytic_line.account_id;",(ids2,)) | GROUP BY account_analytic_line.account_id;",(parent_ids,)) | def _hours_qtt_non_invoiced_calc (self, cr, uid, ids, name, arg, context={}): res = {} ids2 = self.search(cr, uid, [('parent_id', 'child_of', ids)]) if ids2: cr.execute("select account_analytic_line.account_id, COALESCE(sum(unit_amount),0.0) \ from account_analytic_line \ join account_analytic_journal \ on account_analytic_line.journal_id = account_analytic_journal.id \ where account_analytic_line.account_id =ANY(%s) \ and account_analytic_journal.type='general' \ and invoice_id is null \ AND to_invoice IS NOT NULL \ GROUP BY account_analytic_line.account_id;",(ids2,)) for account_id, sum in cr.fetchall(): res[account_id] = round(sum,2) for obj_id in ids: res.setdefault(obj_id, 0.0) for child_id in self.search(cr, uid, [('parent_id', 'child_of', [obj_id])]): if child_id != obj_id: res[obj_id] += res.get(child_id, 0.0) for id in ids: res[id] = round(res.get(id, 0.0),2) return res |
ids2 = self.search(cr, uid, [('parent_id', 'child_of', ids)]) if ids2: | parent_ids = tuple(self.search(cr, uid, [('parent_id', 'child_of', ids)])) if parent_ids: | def _hours_quantity_calc(self, cr, uid, ids, name, arg, context={}): res = {} ids2 = self.search(cr, uid, [('parent_id', 'child_of', ids)]) if ids2: cr.execute("select account_analytic_line.account_id,COALESCE(SUM(unit_amount),0.0) \ from account_analytic_line \ join account_analytic_journal \ on account_analytic_line.journal_id = account_analytic_journal.id \ where account_analytic_line.account_id =ANY(%s) \ and account_analytic_journal.type='general' \ GROUP BY account_analytic_line.account_id",(ids2,)) ff = cr.fetchall() for account_id, sum in ff: res[account_id] = round(sum,2) for obj_id in ids: res.setdefault(obj_id, 0.0) for child_id in self.search(cr, uid, [('parent_id', 'child_of', [obj_id])]): if child_id != obj_id: res[obj_id] += res.get(child_id, 0.0) for id in ids: res[id] = round(res.get(id, 0.0),2) return res |
where account_analytic_line.account_id =ANY(%s) \ | where account_analytic_line.account_id IN %s \ | def _hours_quantity_calc(self, cr, uid, ids, name, arg, context={}): res = {} ids2 = self.search(cr, uid, [('parent_id', 'child_of', ids)]) if ids2: cr.execute("select account_analytic_line.account_id,COALESCE(SUM(unit_amount),0.0) \ from account_analytic_line \ join account_analytic_journal \ on account_analytic_line.journal_id = account_analytic_journal.id \ where account_analytic_line.account_id =ANY(%s) \ and account_analytic_journal.type='general' \ GROUP BY account_analytic_line.account_id",(ids2,)) ff = cr.fetchall() for account_id, sum in ff: res[account_id] = round(sum,2) for obj_id in ids: res.setdefault(obj_id, 0.0) for child_id in self.search(cr, uid, [('parent_id', 'child_of', [obj_id])]): if child_id != obj_id: res[obj_id] += res.get(child_id, 0.0) for id in ids: res[id] = round(res.get(id, 0.0),2) return res |
GROUP BY account_analytic_line.account_id",(ids2,)) | GROUP BY account_analytic_line.account_id",(parent_ids,)) | def _hours_quantity_calc(self, cr, uid, ids, name, arg, context={}): res = {} ids2 = self.search(cr, uid, [('parent_id', 'child_of', ids)]) if ids2: cr.execute("select account_analytic_line.account_id,COALESCE(SUM(unit_amount),0.0) \ from account_analytic_line \ join account_analytic_journal \ on account_analytic_line.journal_id = account_analytic_journal.id \ where account_analytic_line.account_id =ANY(%s) \ and account_analytic_journal.type='general' \ GROUP BY account_analytic_line.account_id",(ids2,)) ff = cr.fetchall() for account_id, sum in ff: res[account_id] = round(sum,2) for obj_id in ids: res.setdefault(obj_id, 0.0) for child_id in self.search(cr, uid, [('parent_id', 'child_of', [obj_id])]): if child_id != obj_id: res[obj_id] += res.get(child_id, 0.0) for id in ids: res[id] = round(res.get(id, 0.0),2) return res |
ids2 = self.search(cr, uid, [('parent_id', 'child_of', ids)]) if ids2: acc_set = ",".join(map(str, ids2)) | parent_ids = tuple(self.search(cr, uid, [('parent_id', 'child_of', ids)])) if parent_ids: | def _total_cost_calc(self, cr, uid, ids, name, arg, context={}): res = {} ids2 = self.search(cr, uid, [('parent_id', 'child_of', ids)]) if ids2: acc_set = ",".join(map(str, ids2)) cr.execute("""select account_analytic_line.account_id,COALESCE(sum(amount_currency),0.0) \ |
where account_analytic_line.account_id =ANY(%s) \ | where account_analytic_line.account_id IN %s \ | def _total_cost_calc(self, cr, uid, ids, name, arg, context={}): res = {} ids2 = self.search(cr, uid, [('parent_id', 'child_of', ids)]) if ids2: acc_set = ",".join(map(str, ids2)) cr.execute("""select account_analytic_line.account_id,COALESCE(sum(amount_currency),0.0) \ |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.