body
stringlengths
26
98.2k
body_hash
int64
-9,222,864,604,528,158,000
9,221,803,474B
docstring
stringlengths
1
16.8k
path
stringlengths
5
230
name
stringlengths
1
96
repository_name
stringlengths
7
89
lang
stringclasses
1 value
body_without_docstring
stringlengths
20
98.2k
@property def local_needrun_jobs(self): 'Iterate over all jobs that need to be run and are marked as local.' return filter((lambda job: job.is_local), self.needrun_jobs)
5,180,558,367,343,746,000
Iterate over all jobs that need to be run and are marked as local.
snakemake/dag.py
local_needrun_jobs
baileythegreen/snakemake
python
@property def local_needrun_jobs(self): return filter((lambda job: job.is_local), self.needrun_jobs)
@property def finished_jobs(self): ' Iterate over all jobs that have been finished.' for job in filter(self.finished, self.bfs(self.dependencies, *self.targetjobs)): (yield job)
-962,557,771,915,621,600
Iterate over all jobs that have been finished.
snakemake/dag.py
finished_jobs
baileythegreen/snakemake
python
@property def finished_jobs(self): ' ' for job in filter(self.finished, self.bfs(self.dependencies, *self.targetjobs)): (yield job)
@property def ready_jobs(self): 'Jobs that are ready to execute.' return self._ready_jobs
7,521,555,611,777,074,000
Jobs that are ready to execute.
snakemake/dag.py
ready_jobs
baileythegreen/snakemake
python
@property def ready_jobs(self): return self._ready_jobs
def needrun(self, job): 'Return whether a given job needs to be executed.' return (job in self._needrun)
1,641,981,298,269,334,000
Return whether a given job needs to be executed.
snakemake/dag.py
needrun
baileythegreen/snakemake
python
def needrun(self, job): return (job in self._needrun)
def priority(self, job): 'Return priority of given job.' return self._priority[job]
526,851,191,883,921,540
Return priority of given job.
snakemake/dag.py
priority
baileythegreen/snakemake
python
def priority(self, job): return self._priority[job]
def noneedrun_finished(self, job): '\n Return whether a given job is finished or was not\n required to run at all.\n ' return ((not self.needrun(job)) or self.finished(job))
-2,352,851,572,218,798,600
Return whether a given job is finished or was not required to run at all.
snakemake/dag.py
noneedrun_finished
baileythegreen/snakemake
python
def noneedrun_finished(self, job): '\n Return whether a given job is finished or was not\n required to run at all.\n ' return ((not self.needrun(job)) or self.finished(job))
def reason(self, job): ' Return the reason of the job execution. ' return self._reason[job]
4,667,444,684,062,257,000
Return the reason of the job execution.
snakemake/dag.py
reason
baileythegreen/snakemake
python
def reason(self, job): ' ' return self._reason[job]
def finished(self, job): ' Return whether a job is finished. ' return (job in self._finished)
2,299,385,806,758,008,600
Return whether a job is finished.
snakemake/dag.py
finished
baileythegreen/snakemake
python
def finished(self, job): ' ' return (job in self._finished)
def dynamic(self, job): '\n Return whether a job is dynamic (i.e. it is only a placeholder\n for those that are created after the job with dynamic output has\n finished.\n ' if job.is_group(): for j in job: if (j in self._dynamic): return True else: return (job in self._dynamic)
477,988,465,187,231,500
Return whether a job is dynamic (i.e. it is only a placeholder for those that are created after the job with dynamic output has finished.
snakemake/dag.py
dynamic
baileythegreen/snakemake
python
def dynamic(self, job): '\n Return whether a job is dynamic (i.e. it is only a placeholder\n for those that are created after the job with dynamic output has\n finished.\n ' if job.is_group(): for j in job: if (j in self._dynamic): return True else: return (job in self._dynamic)
def requested_files(self, job): 'Return the files a job requests.' return set(*self.depending[job].values())
-3,774,339,331,432,726,000
Return the files a job requests.
snakemake/dag.py
requested_files
baileythegreen/snakemake
python
def requested_files(self, job): return set(*self.depending[job].values())
@property def incomplete_files(self): 'Return list of incomplete files.' return list(chain(*(job.output for job in filter(self.workflow.persistence.incomplete, filterfalse(self.needrun, self.jobs)))))
-7,766,036,957,068,657,000
Return list of incomplete files.
snakemake/dag.py
incomplete_files
baileythegreen/snakemake
python
@property def incomplete_files(self): return list(chain(*(job.output for job in filter(self.workflow.persistence.incomplete, filterfalse(self.needrun, self.jobs)))))
@property def newversion_files(self): 'Return list of files where the current version is newer than the\n recorded version.\n ' return list(chain(*(job.output for job in filter(self.workflow.persistence.newversion, self.jobs))))
1,139,763,053,158,034,400
Return list of files where the current version is newer than the recorded version.
snakemake/dag.py
newversion_files
baileythegreen/snakemake
python
@property def newversion_files(self): 'Return list of files where the current version is newer than the\n recorded version.\n ' return list(chain(*(job.output for job in filter(self.workflow.persistence.newversion, self.jobs))))
def missing_temp(self, job): '\n Return whether a temp file that is input of the given job is missing.\n ' for (job_, files) in self.depending[job].items(): if (self.needrun(job_) and any(((not f.exists) for f in files))): return True return False
-4,270,668,817,331,513,000
Return whether a temp file that is input of the given job is missing.
snakemake/dag.py
missing_temp
baileythegreen/snakemake
python
def missing_temp(self, job): '\n \n ' for (job_, files) in self.depending[job].items(): if (self.needrun(job_) and any(((not f.exists) for f in files))): return True return False
def check_and_touch_output(self, job, wait=3, ignore_missing_output=False, no_touch=False, force_stay_on_remote=False): ' Raise exception if output files of job are missing. ' expanded_output = [job.shadowed_path(path) for path in job.expanded_output] if job.benchmark: expanded_output.append(job.benchmark) if (not ignore_missing_output): try: wait_for_files(expanded_output, latency_wait=wait, force_stay_on_remote=force_stay_on_remote, ignore_pipe=True) except IOError as e: raise MissingOutputException((str(e) + '\nThis might be due to filesystem latency. If that is the case, consider to increase the wait time with --latency-wait.'), rule=job.rule) for f in expanded_output: if ((f.is_directory and (not os.path.isdir(f))) or (os.path.isdir(f) and (not f.is_directory))): raise ImproperOutputException(job.rule, [f]) if (not no_touch): for f in expanded_output: if f.exists_local: f.touch()
5,602,922,416,562,306,000
Raise exception if output files of job are missing.
snakemake/dag.py
check_and_touch_output
baileythegreen/snakemake
python
def check_and_touch_output(self, job, wait=3, ignore_missing_output=False, no_touch=False, force_stay_on_remote=False): ' ' expanded_output = [job.shadowed_path(path) for path in job.expanded_output] if job.benchmark: expanded_output.append(job.benchmark) if (not ignore_missing_output): try: wait_for_files(expanded_output, latency_wait=wait, force_stay_on_remote=force_stay_on_remote, ignore_pipe=True) except IOError as e: raise MissingOutputException((str(e) + '\nThis might be due to filesystem latency. If that is the case, consider to increase the wait time with --latency-wait.'), rule=job.rule) for f in expanded_output: if ((f.is_directory and (not os.path.isdir(f))) or (os.path.isdir(f) and (not f.is_directory))): raise ImproperOutputException(job.rule, [f]) if (not no_touch): for f in expanded_output: if f.exists_local: f.touch()
def unshadow_output(self, job, only_log=False): ' Move files from shadow directory to real output paths. ' if ((not job.shadow_dir) or (not job.expanded_output)): return files = (job.log if only_log else chain(job.expanded_output, job.log)) for real_output in files: shadow_output = job.shadowed_path(real_output).file if os.path.islink(shadow_output): dest = os.readlink(shadow_output) if os.path.isabs(dest): rel_dest = os.path.relpath(dest, job.shadow_dir) os.remove(shadow_output) os.symlink(rel_dest, shadow_output) if (os.path.realpath(shadow_output) == os.path.realpath(real_output)): continue logger.debug('Moving shadow output {} to destination {}'.format(shadow_output, real_output)) shutil.move(shadow_output, real_output) shutil.rmtree(job.shadow_dir)
3,537,507,914,298,335,000
Move files from shadow directory to real output paths.
snakemake/dag.py
unshadow_output
baileythegreen/snakemake
python
def unshadow_output(self, job, only_log=False): ' ' if ((not job.shadow_dir) or (not job.expanded_output)): return files = (job.log if only_log else chain(job.expanded_output, job.log)) for real_output in files: shadow_output = job.shadowed_path(real_output).file if os.path.islink(shadow_output): dest = os.readlink(shadow_output) if os.path.isabs(dest): rel_dest = os.path.relpath(dest, job.shadow_dir) os.remove(shadow_output) os.symlink(rel_dest, shadow_output) if (os.path.realpath(shadow_output) == os.path.realpath(real_output)): continue logger.debug('Moving shadow output {} to destination {}'.format(shadow_output, real_output)) shutil.move(shadow_output, real_output) shutil.rmtree(job.shadow_dir)
def check_periodic_wildcards(self, job): 'Raise an exception if a wildcard of the given job appears to be periodic,\n indicating a cyclic dependency.' for (wildcard, value) in job.wildcards_dict.items(): periodic_substring = self.periodic_wildcard_detector.is_periodic(value) if (periodic_substring is not None): raise PeriodicWildcardError('The value {} in wildcard {} is periodically repeated ({}). This would lead to an infinite recursion. To avoid this, e.g. restrict the wildcards in this rule to certain values.'.format(periodic_substring, wildcard, value), rule=job.rule)
-2,250,608,271,693,119,000
Raise an exception if a wildcard of the given job appears to be periodic, indicating a cyclic dependency.
snakemake/dag.py
check_periodic_wildcards
baileythegreen/snakemake
python
def check_periodic_wildcards(self, job): 'Raise an exception if a wildcard of the given job appears to be periodic,\n indicating a cyclic dependency.' for (wildcard, value) in job.wildcards_dict.items(): periodic_substring = self.periodic_wildcard_detector.is_periodic(value) if (periodic_substring is not None): raise PeriodicWildcardError('The value {} in wildcard {} is periodically repeated ({}). This would lead to an infinite recursion. To avoid this, e.g. restrict the wildcards in this rule to certain values.'.format(periodic_substring, wildcard, value), rule=job.rule)
def handle_protected(self, job): ' Write-protect output files that are marked with protected(). ' for f in job.expanded_output: if (f in job.protected_output): logger.info('Write-protecting output file {}.'.format(f)) f.protect()
-7,197,705,180,720,776,000
Write-protect output files that are marked with protected().
snakemake/dag.py
handle_protected
baileythegreen/snakemake
python
def handle_protected(self, job): ' ' for f in job.expanded_output: if (f in job.protected_output): logger.info('Write-protecting output file {}.'.format(f)) f.protect()
def handle_touch(self, job): ' Touches those output files that are marked for touching. ' for f in job.expanded_output: if (f in job.touch_output): f = job.shadowed_path(f) logger.info('Touching output file {}.'.format(f)) f.touch_or_create() assert os.path.exists(f)
-2,172,067,755,928,961,800
Touches those output files that are marked for touching.
snakemake/dag.py
handle_touch
baileythegreen/snakemake
python
def handle_touch(self, job): ' ' for f in job.expanded_output: if (f in job.touch_output): f = job.shadowed_path(f) logger.info('Touching output file {}.'.format(f)) f.touch_or_create() assert os.path.exists(f)
def temp_size(self, job): 'Return the total size of temporary input files of the job.\n If none, return 0.\n ' return sum((f.size for f in self.temp_input(job)))
-5,253,043,208,774,059,000
Return the total size of temporary input files of the job. If none, return 0.
snakemake/dag.py
temp_size
baileythegreen/snakemake
python
def temp_size(self, job): 'Return the total size of temporary input files of the job.\n If none, return 0.\n ' return sum((f.size for f in self.temp_input(job)))
def handle_temp(self, job): ' Remove temp files if they are no longer needed. Update temp_mtimes. ' if self.notemp: return is_temp = (lambda f: is_flagged(f, 'temp')) needed = (lambda job_, f: any(((f in files) for (j, files) in self.depending[job_].items() if ((not self.finished(j)) and self.needrun(j) and (j != job))))) def unneeded_files(): for (job_, files) in self.dependencies[job].items(): tempfiles = set((f for f in job_.expanded_output if is_temp(f))) (yield from filterfalse(partial(needed, job_), (tempfiles & files))) if ((not job.dynamic_output) and ((job not in self.targetjobs) or (job.rule.name == self.workflow.first_rule))): tempfiles = (f for f in job.expanded_output if (is_temp(f) and (f not in self.targetfiles))) (yield from filterfalse(partial(needed, job), tempfiles)) for f in unneeded_files(): logger.info('Removing temporary output file {}.'.format(f)) f.remove(remove_non_empty_dir=True)
-5,018,095,018,264,098,000
Remove temp files if they are no longer needed. Update temp_mtimes.
snakemake/dag.py
handle_temp
baileythegreen/snakemake
python
def handle_temp(self, job): ' ' if self.notemp: return is_temp = (lambda f: is_flagged(f, 'temp')) needed = (lambda job_, f: any(((f in files) for (j, files) in self.depending[job_].items() if ((not self.finished(j)) and self.needrun(j) and (j != job))))) def unneeded_files(): for (job_, files) in self.dependencies[job].items(): tempfiles = set((f for f in job_.expanded_output if is_temp(f))) (yield from filterfalse(partial(needed, job_), (tempfiles & files))) if ((not job.dynamic_output) and ((job not in self.targetjobs) or (job.rule.name == self.workflow.first_rule))): tempfiles = (f for f in job.expanded_output if (is_temp(f) and (f not in self.targetfiles))) (yield from filterfalse(partial(needed, job), tempfiles)) for f in unneeded_files(): logger.info('Removing temporary output file {}.'.format(f)) f.remove(remove_non_empty_dir=True)
def handle_remote(self, job, upload=True): ' Remove local files if they are no longer needed and upload. ' if upload: files = list(job.expanded_output) if job.benchmark: files.append(job.benchmark) for f in files: if (f.is_remote and (not f.should_stay_on_remote)): f.upload_to_remote() remote_mtime = f.mtime f.touch(times=(remote_mtime, remote_mtime)) if (not f.exists_remote): raise RemoteFileException('The file upload was attempted, but it does not exist on remote. Check that your credentials have read AND write permissions.') if (not self.keep_remote_local): needed = (lambda job_, f: any(((f in files) for (j, files) in self.depending[job_].items() if ((not self.finished(j)) and self.needrun(j) and (j != job))))) def unneeded_files(): putative = (lambda f: (f.is_remote and (not f.protected) and (not f.should_keep_local))) generated_input = set() for (job_, files) in self.dependencies[job].items(): generated_input |= files for f in filter(putative, files): if (not needed(job_, f)): (yield f) for (f, f_) in zip(job.output, job.rule.output): if (putative(f) and (not needed(job, f)) and (not (f in self.targetfiles))): if (f in job.dynamic_output): for f_ in job.expand_dynamic(f_): (yield f_) else: (yield f) for f in filter(putative, job.input): if (f not in generated_input): (yield f) for f in unneeded_files(): if f.exists_local: logger.info('Removing local output file: {}'.format(f)) f.remove()
3,578,904,658,369,048,600
Remove local files if they are no longer needed and upload.
snakemake/dag.py
handle_remote
baileythegreen/snakemake
python
def handle_remote(self, job, upload=True): ' ' if upload: files = list(job.expanded_output) if job.benchmark: files.append(job.benchmark) for f in files: if (f.is_remote and (not f.should_stay_on_remote)): f.upload_to_remote() remote_mtime = f.mtime f.touch(times=(remote_mtime, remote_mtime)) if (not f.exists_remote): raise RemoteFileException('The file upload was attempted, but it does not exist on remote. Check that your credentials have read AND write permissions.') if (not self.keep_remote_local): needed = (lambda job_, f: any(((f in files) for (j, files) in self.depending[job_].items() if ((not self.finished(j)) and self.needrun(j) and (j != job))))) def unneeded_files(): putative = (lambda f: (f.is_remote and (not f.protected) and (not f.should_keep_local))) generated_input = set() for (job_, files) in self.dependencies[job].items(): generated_input |= files for f in filter(putative, files): if (not needed(job_, f)): (yield f) for (f, f_) in zip(job.output, job.rule.output): if (putative(f) and (not needed(job, f)) and (not (f in self.targetfiles))): if (f in job.dynamic_output): for f_ in job.expand_dynamic(f_): (yield f_) else: (yield f) for f in filter(putative, job.input): if (f not in generated_input): (yield f) for f in unneeded_files(): if f.exists_local: logger.info('Removing local output file: {}'.format(f)) f.remove()
def jobid(self, job): 'Return job id of given job.' if job.is_group(): return job.jobid else: return self._jobid[job]
-2,560,642,363,251,344,000
Return job id of given job.
snakemake/dag.py
jobid
baileythegreen/snakemake
python
def jobid(self, job): if job.is_group(): return job.jobid else: return self._jobid[job]
def update(self, jobs, file=None, visited=None, skip_until_dynamic=False, progress=False): ' Update the DAG by adding given jobs and their dependencies. ' if (visited is None): visited = set() producer = None exceptions = list() jobs = sorted(jobs, reverse=(not self.ignore_ambiguity)) cycles = list() for job in jobs: logger.dag_debug(dict(status='candidate', job=job)) if (file in job.input): cycles.append(job) continue if (job in visited): cycles.append(job) continue try: self.check_periodic_wildcards(job) self.update_(job, visited=set(visited), skip_until_dynamic=skip_until_dynamic, progress=progress) if producer: if ((job < producer) or self.ignore_ambiguity): break elif (producer is not None): raise AmbiguousRuleException(file, job, producer) producer = job except (MissingInputException, CyclicGraphException, PeriodicWildcardError, WorkflowError) as ex: exceptions.append(ex) except RecursionError as e: raise WorkflowError(e, (('If building the DAG exceeds the recursion limit, this is likely due to a cyclic dependency.E.g. you might have a sequence of rules that can generate their own input. Try to make the output files more specific. A common pattern is to have different prefixes in the output files of different rules.' + '\nProblematic file pattern: {}'.format(file)) if file else '')) if (producer is None): if cycles: job = cycles[0] raise CyclicGraphException(job.rule, file, rule=job.rule) if (len(exceptions) > 1): raise WorkflowError(*exceptions) elif (len(exceptions) == 1): raise exceptions[0] else: logger.dag_debug(dict(status='selected', job=producer)) logger.dag_debug(dict(file=file, msg='Producer found, hence exceptions are ignored.', exception=WorkflowError(*exceptions))) n = len(self.dependencies) if (progress and ((n % 1000) == 0) and n and (self._progress != n)): logger.info('Processed {} potential jobs.'.format(n)) self._progress = n return producer
-1,266,715,212,862,555,600
Update the DAG by adding given jobs and their dependencies.
snakemake/dag.py
update
baileythegreen/snakemake
python
def update(self, jobs, file=None, visited=None, skip_until_dynamic=False, progress=False): ' ' if (visited is None): visited = set() producer = None exceptions = list() jobs = sorted(jobs, reverse=(not self.ignore_ambiguity)) cycles = list() for job in jobs: logger.dag_debug(dict(status='candidate', job=job)) if (file in job.input): cycles.append(job) continue if (job in visited): cycles.append(job) continue try: self.check_periodic_wildcards(job) self.update_(job, visited=set(visited), skip_until_dynamic=skip_until_dynamic, progress=progress) if producer: if ((job < producer) or self.ignore_ambiguity): break elif (producer is not None): raise AmbiguousRuleException(file, job, producer) producer = job except (MissingInputException, CyclicGraphException, PeriodicWildcardError, WorkflowError) as ex: exceptions.append(ex) except RecursionError as e: raise WorkflowError(e, (('If building the DAG exceeds the recursion limit, this is likely due to a cyclic dependency.E.g. you might have a sequence of rules that can generate their own input. Try to make the output files more specific. A common pattern is to have different prefixes in the output files of different rules.' + '\nProblematic file pattern: {}'.format(file)) if file else )) if (producer is None): if cycles: job = cycles[0] raise CyclicGraphException(job.rule, file, rule=job.rule) if (len(exceptions) > 1): raise WorkflowError(*exceptions) elif (len(exceptions) == 1): raise exceptions[0] else: logger.dag_debug(dict(status='selected', job=producer)) logger.dag_debug(dict(file=file, msg='Producer found, hence exceptions are ignored.', exception=WorkflowError(*exceptions))) n = len(self.dependencies) if (progress and ((n % 1000) == 0) and n and (self._progress != n)): logger.info('Processed {} potential jobs.'.format(n)) self._progress = n return producer
def update_(self, job, visited=None, skip_until_dynamic=False, progress=False): ' Update the DAG by adding the given job and its dependencies. ' if (job in self.dependencies): return if (visited is None): visited = set() visited.add(job) dependencies = self.dependencies[job] potential_dependencies = self.collect_potential_dependencies(job) skip_until_dynamic = (skip_until_dynamic and (not job.dynamic_output)) missing_input = set() producer = dict() exceptions = dict() for (file, jobs) in potential_dependencies.items(): file.inventory() if (not jobs): if (not file.exists): missing_input.add(file) continue try: selected_job = self.update(jobs, file=file, visited=visited, skip_until_dynamic=(skip_until_dynamic or (file in job.dynamic_input)), progress=progress) producer[file] = selected_job except (MissingInputException, CyclicGraphException, PeriodicWildcardError, WorkflowError) as ex: if (not file.exists): self.delete_job(job, recursive=False) raise ex else: logger.dag_debug(dict(file=file, msg='No producers found, but file is present on disk.', exception=ex)) for (file, job_) in producer.items(): dependencies[job_].add(file) self.depending[job_][job].add(file) if (self.is_batch_rule(job.rule) and self.batch.is_final): if any((f for f in job.input if ((f not in potential_dependencies) and (not f.exists)))): raise WorkflowError('Unable to execute batch {} because not all previous batches have been completed before or files have been deleted.'.format(self.batch)) if missing_input: self.delete_job(job, recursive=False) raise MissingInputException(job.rule, missing_input) if skip_until_dynamic: self._dynamic.add(job)
8,019,392,418,157,816,000
Update the DAG by adding the given job and its dependencies.
snakemake/dag.py
update_
baileythegreen/snakemake
python
def update_(self, job, visited=None, skip_until_dynamic=False, progress=False): ' ' if (job in self.dependencies): return if (visited is None): visited = set() visited.add(job) dependencies = self.dependencies[job] potential_dependencies = self.collect_potential_dependencies(job) skip_until_dynamic = (skip_until_dynamic and (not job.dynamic_output)) missing_input = set() producer = dict() exceptions = dict() for (file, jobs) in potential_dependencies.items(): file.inventory() if (not jobs): if (not file.exists): missing_input.add(file) continue try: selected_job = self.update(jobs, file=file, visited=visited, skip_until_dynamic=(skip_until_dynamic or (file in job.dynamic_input)), progress=progress) producer[file] = selected_job except (MissingInputException, CyclicGraphException, PeriodicWildcardError, WorkflowError) as ex: if (not file.exists): self.delete_job(job, recursive=False) raise ex else: logger.dag_debug(dict(file=file, msg='No producers found, but file is present on disk.', exception=ex)) for (file, job_) in producer.items(): dependencies[job_].add(file) self.depending[job_][job].add(file) if (self.is_batch_rule(job.rule) and self.batch.is_final): if any((f for f in job.input if ((f not in potential_dependencies) and (not f.exists)))): raise WorkflowError('Unable to execute batch {} because not all previous batches have been completed before or files have been deleted.'.format(self.batch)) if missing_input: self.delete_job(job, recursive=False) raise MissingInputException(job.rule, missing_input) if skip_until_dynamic: self._dynamic.add(job)
def update_needrun(self): ' Update the information whether a job needs to be executed. ' output_mintime = dict() def update_output_mintime(job): try: return output_mintime[job] except KeyError: for job_ in chain([job], self.depending[job]): try: t = output_mintime[job_] except KeyError: t = job_.output_mintime if (t is not None): output_mintime[job] = t return output_mintime[job] = None def update_needrun(job): reason = self.reason(job) noinitreason = (not reason) updated_subworkflow_input = self.updated_subworkflow_files.intersection(job.input) if (((job not in self.omitforce) and (job.rule in self.forcerules)) or (not self.forcefiles.isdisjoint(job.output))): reason.forced = True elif updated_subworkflow_input: reason.updated_input.update(updated_subworkflow_input) elif (job in self.targetjobs): if ((not job.output) and (not job.benchmark)): if job.input: if job.rule.norun: reason.updated_input_run.update([f for f in job.input if (not f.exists)]) else: reason.nooutput = True else: reason.noio = True else: if (job.rule in self.targetrules): missing_output = job.missing_output() else: missing_output = job.missing_output(requested=(set(chain(*self.depending[job].values())) | self.targetfiles)) reason.missing_output.update(missing_output) if (not reason): output_mintime_ = output_mintime.get(job) if output_mintime_: updated_input = [f for f in job.input if (f.exists and f.is_newer(output_mintime_))] reason.updated_input.update(updated_input) if (noinitreason and reason): reason.derived = False reason = self.reason _needrun = self._needrun dependencies = self.dependencies depending = self.depending _needrun.clear() candidates = list(self.jobs) for job in candidates: update_output_mintime(job) for job in candidates: update_needrun(job) queue = list(filter(reason, candidates)) visited = set(queue) while queue: job = queue.pop(0) _needrun.add(job) for (job_, files) in dependencies[job].items(): missing_output = job_.missing_output(requested=files) reason(job_).missing_output.update(missing_output) if (missing_output and (not (job_ in visited))): visited.add(job_) queue.append(job_) for (job_, files) in depending[job].items(): if (job_ in candidates): reason(job_).updated_input_run.update(files) if (not (job_ in visited)): visited.add(job_) queue.append(job_) self._len = len((self._finished | self._needrun))
6,314,524,535,821,029,000
Update the information whether a job needs to be executed.
snakemake/dag.py
update_needrun
baileythegreen/snakemake
python
def update_needrun(self): ' ' output_mintime = dict() def update_output_mintime(job): try: return output_mintime[job] except KeyError: for job_ in chain([job], self.depending[job]): try: t = output_mintime[job_] except KeyError: t = job_.output_mintime if (t is not None): output_mintime[job] = t return output_mintime[job] = None def update_needrun(job): reason = self.reason(job) noinitreason = (not reason) updated_subworkflow_input = self.updated_subworkflow_files.intersection(job.input) if (((job not in self.omitforce) and (job.rule in self.forcerules)) or (not self.forcefiles.isdisjoint(job.output))): reason.forced = True elif updated_subworkflow_input: reason.updated_input.update(updated_subworkflow_input) elif (job in self.targetjobs): if ((not job.output) and (not job.benchmark)): if job.input: if job.rule.norun: reason.updated_input_run.update([f for f in job.input if (not f.exists)]) else: reason.nooutput = True else: reason.noio = True else: if (job.rule in self.targetrules): missing_output = job.missing_output() else: missing_output = job.missing_output(requested=(set(chain(*self.depending[job].values())) | self.targetfiles)) reason.missing_output.update(missing_output) if (not reason): output_mintime_ = output_mintime.get(job) if output_mintime_: updated_input = [f for f in job.input if (f.exists and f.is_newer(output_mintime_))] reason.updated_input.update(updated_input) if (noinitreason and reason): reason.derived = False reason = self.reason _needrun = self._needrun dependencies = self.dependencies depending = self.depending _needrun.clear() candidates = list(self.jobs) for job in candidates: update_output_mintime(job) for job in candidates: update_needrun(job) queue = list(filter(reason, candidates)) visited = set(queue) while queue: job = queue.pop(0) _needrun.add(job) for (job_, files) in dependencies[job].items(): missing_output = job_.missing_output(requested=files) reason(job_).missing_output.update(missing_output) if (missing_output and (not (job_ in visited))): visited.add(job_) queue.append(job_) for (job_, files) in depending[job].items(): if (job_ in candidates): reason(job_).updated_input_run.update(files) if (not (job_ in visited)): visited.add(job_) queue.append(job_) self._len = len((self._finished | self._needrun))
def in_until(self, job): 'Return whether given job has been specified via --until.' return ((job.rule.name in self.untilrules) or (not self.untilfiles.isdisjoint(job.output)))
-4,675,211,391,535,302,000
Return whether given job has been specified via --until.
snakemake/dag.py
in_until
baileythegreen/snakemake
python
def in_until(self, job): return ((job.rule.name in self.untilrules) or (not self.untilfiles.isdisjoint(job.output)))
def in_omitfrom(self, job): 'Return whether given job has been specified via --omit-from.' return ((job.rule.name in self.omitrules) or (not self.omitfiles.isdisjoint(job.output)))
-4,318,886,760,568,708,600
Return whether given job has been specified via --omit-from.
snakemake/dag.py
in_omitfrom
baileythegreen/snakemake
python
def in_omitfrom(self, job): return ((job.rule.name in self.omitrules) or (not self.omitfiles.isdisjoint(job.output)))
def until_jobs(self): 'Returns a generator of jobs specified by untiljobs.' return (job for job in self.jobs if self.in_until(job))
6,464,025,500,891,526,000
Returns a generator of jobs specified by untiljobs.
snakemake/dag.py
until_jobs
baileythegreen/snakemake
python
def until_jobs(self): return (job for job in self.jobs if self.in_until(job))
def omitfrom_jobs(self): 'Returns a generator of jobs specified by omitfromjobs.' return (job for job in self.jobs if self.in_omitfrom(job))
8,775,060,150,555,016,000
Returns a generator of jobs specified by omitfromjobs.
snakemake/dag.py
omitfrom_jobs
baileythegreen/snakemake
python
def omitfrom_jobs(self): return (job for job in self.jobs if self.in_omitfrom(job))
def downstream_of_omitfrom(self): 'Returns the downstream of --omit-from rules or files and themselves.' return self.bfs(self.depending, *self.omitfrom_jobs())
814,192,902,394,369
Returns the downstream of --omit-from rules or files and themselves.
snakemake/dag.py
downstream_of_omitfrom
baileythegreen/snakemake
python
def downstream_of_omitfrom(self): return self.bfs(self.depending, *self.omitfrom_jobs())
def delete_omitfrom_jobs(self): 'Removes jobs downstream of jobs specified by --omit-from.' if ((not self.omitrules) and (not self.omitfiles)): return downstream_jobs = list(self.downstream_of_omitfrom()) for job in downstream_jobs: self.delete_job(job, recursive=False, add_dependencies=True)
2,595,008,063,148,844,500
Removes jobs downstream of jobs specified by --omit-from.
snakemake/dag.py
delete_omitfrom_jobs
baileythegreen/snakemake
python
def delete_omitfrom_jobs(self): if ((not self.omitrules) and (not self.omitfiles)): return downstream_jobs = list(self.downstream_of_omitfrom()) for job in downstream_jobs: self.delete_job(job, recursive=False, add_dependencies=True)
def set_until_jobs(self): 'Removes jobs downstream of jobs specified by --omit-from.' if ((not self.untilrules) and (not self.untilfiles)): return self.targetjobs = set(self.until_jobs())
-7,642,303,915,606,476,000
Removes jobs downstream of jobs specified by --omit-from.
snakemake/dag.py
set_until_jobs
baileythegreen/snakemake
python
def set_until_jobs(self): if ((not self.untilrules) and (not self.untilfiles)): return self.targetjobs = set(self.until_jobs())
def update_priority(self): ' Update job priorities. ' prioritized = (lambda job: ((job.rule in self.priorityrules) or (not self.priorityfiles.isdisjoint(job.output)))) for job in self.needrun_jobs: self._priority[job] = job.rule.priority for job in self.bfs(self.dependencies, *filter(prioritized, self.needrun_jobs), stop=self.noneedrun_finished): self._priority[job] = Job.HIGHEST_PRIORITY
4,670,790,590,416,513,000
Update job priorities.
snakemake/dag.py
update_priority
baileythegreen/snakemake
python
def update_priority(self): ' ' prioritized = (lambda job: ((job.rule in self.priorityrules) or (not self.priorityfiles.isdisjoint(job.output)))) for job in self.needrun_jobs: self._priority[job] = job.rule.priority for job in self.bfs(self.dependencies, *filter(prioritized, self.needrun_jobs), stop=self.noneedrun_finished): self._priority[job] = Job.HIGHEST_PRIORITY
def update_ready(self, jobs=None): 'Update information whether a job is ready to execute.\n\n Given jobs must be needrun jobs!\n ' if (jobs is None): jobs = self.needrun_jobs candidate_groups = set() for job in jobs: if ((not self.finished(job)) and self._ready(job)): if (job.group is None): self._ready_jobs.add(job) else: group = self._group[job] group.finalize() candidate_groups.add(group) self._ready_jobs.update((group for group in candidate_groups if all((self._ready(job) for job in group))))
8,820,413,122,410,618,000
Update information whether a job is ready to execute. Given jobs must be needrun jobs!
snakemake/dag.py
update_ready
baileythegreen/snakemake
python
def update_ready(self, jobs=None): 'Update information whether a job is ready to execute.\n\n Given jobs must be needrun jobs!\n ' if (jobs is None): jobs = self.needrun_jobs candidate_groups = set() for job in jobs: if ((not self.finished(job)) and self._ready(job)): if (job.group is None): self._ready_jobs.add(job) else: group = self._group[job] group.finalize() candidate_groups.add(group) self._ready_jobs.update((group for group in candidate_groups if all((self._ready(job) for job in group))))
def close_remote_objects(self): 'Close all remote objects.' for job in self.jobs: if (not self.needrun(job)): job.close_remote()
-7,580,807,009,841,187,000
Close all remote objects.
snakemake/dag.py
close_remote_objects
baileythegreen/snakemake
python
def close_remote_objects(self): for job in self.jobs: if (not self.needrun(job)): job.close_remote()
def postprocess(self): 'Postprocess the DAG. This has to be invoked after any change to the\n DAG topology.' self.update_jobids() self.update_needrun() self.update_priority() self.handle_pipes() self.update_groups() self.update_ready() self.close_remote_objects() self.update_checkpoint_outputs()
-5,770,164,959,497,063,000
Postprocess the DAG. This has to be invoked after any change to the DAG topology.
snakemake/dag.py
postprocess
baileythegreen/snakemake
python
def postprocess(self): 'Postprocess the DAG. This has to be invoked after any change to the\n DAG topology.' self.update_jobids() self.update_needrun() self.update_priority() self.handle_pipes() self.update_groups() self.update_ready() self.close_remote_objects() self.update_checkpoint_outputs()
def handle_pipes(self): 'Use pipes to determine job groups. Check if every pipe has exactly\n one consumer' for job in self.needrun_jobs: candidate_groups = set() if (job.group is not None): candidate_groups.add(job.group) all_depending = set() has_pipe = False for f in job.output: if is_flagged(f, 'pipe'): if job.is_run: raise WorkflowError("Rule defines pipe output but uses a 'run' directive. This is not possible for technical reasons. Consider using 'shell' or 'script'.", rule=job.rule) has_pipe = True depending = [j for (j, files) in self.depending[job].items() if (f in files)] if (len(depending) > 1): raise WorkflowError('Output file {} is marked as pipe but more than one job depends on it. Make sure that any pipe output is only consumed by one job'.format(f), rule=job.rule) elif (len(depending) == 0): raise WorkflowError('Output file {} is marked as pipe but it has no consumer. This is invalid because it can lead to a dead lock.'.format(f), rule=job.rule) depending = depending[0] if depending.is_run: raise WorkflowError("Rule consumes pipe input but uses a 'run' directive. This is not possible for technical reasons. Consider using 'shell' or 'script'.", rule=job.rule) all_depending.add(depending) if (depending.group is not None): candidate_groups.add(depending.group) if (not has_pipe): continue if (len(candidate_groups) > 1): raise WorkflowError('An output file is marked as pipe, but consuming jobs are part of conflicting groups.', rule=job.rule) elif candidate_groups: group = candidate_groups.pop() else: group = str(uuid.uuid4()) job.group = group for j in all_depending: j.group = group
-6,062,718,041,752,702,000
Use pipes to determine job groups. Check if every pipe has exactly one consumer
snakemake/dag.py
handle_pipes
baileythegreen/snakemake
python
def handle_pipes(self): 'Use pipes to determine job groups. Check if every pipe has exactly\n one consumer' for job in self.needrun_jobs: candidate_groups = set() if (job.group is not None): candidate_groups.add(job.group) all_depending = set() has_pipe = False for f in job.output: if is_flagged(f, 'pipe'): if job.is_run: raise WorkflowError("Rule defines pipe output but uses a 'run' directive. This is not possible for technical reasons. Consider using 'shell' or 'script'.", rule=job.rule) has_pipe = True depending = [j for (j, files) in self.depending[job].items() if (f in files)] if (len(depending) > 1): raise WorkflowError('Output file {} is marked as pipe but more than one job depends on it. Make sure that any pipe output is only consumed by one job'.format(f), rule=job.rule) elif (len(depending) == 0): raise WorkflowError('Output file {} is marked as pipe but it has no consumer. This is invalid because it can lead to a dead lock.'.format(f), rule=job.rule) depending = depending[0] if depending.is_run: raise WorkflowError("Rule consumes pipe input but uses a 'run' directive. This is not possible for technical reasons. Consider using 'shell' or 'script'.", rule=job.rule) all_depending.add(depending) if (depending.group is not None): candidate_groups.add(depending.group) if (not has_pipe): continue if (len(candidate_groups) > 1): raise WorkflowError('An output file is marked as pipe, but consuming jobs are part of conflicting groups.', rule=job.rule) elif candidate_groups: group = candidate_groups.pop() else: group = str(uuid.uuid4()) job.group = group for j in all_depending: j.group = group
def _ready(self, job): 'Return whether the given job is ready to execute.' group = self._group.get(job, None) if (group is None): is_external_needrun_dep = self.needrun else: def is_external_needrun_dep(j): g = self._group.get(j, None) return (self.needrun(j) and ((g is None) or (g != group))) return self._finished.issuperset(filter(is_external_needrun_dep, self.dependencies[job]))
6,166,242,618,882,280,000
Return whether the given job is ready to execute.
snakemake/dag.py
_ready
baileythegreen/snakemake
python
def _ready(self, job): group = self._group.get(job, None) if (group is None): is_external_needrun_dep = self.needrun else: def is_external_needrun_dep(j): g = self._group.get(j, None) return (self.needrun(j) and ((g is None) or (g != group))) return self._finished.issuperset(filter(is_external_needrun_dep, self.dependencies[job]))
def update_checkpoint_dependencies(self, jobs=None): 'Update dependencies of checkpoints.' updated = False self.update_checkpoint_outputs() if (jobs is None): jobs = [job for job in self.jobs if (not self.needrun(job))] for job in jobs: if job.is_checkpoint: depending = list(self.depending[job]) for j in depending: logger.info('Updating job {} ({}).'.format(self.jobid(j), j)) newjob = j.updated() self.replace_job(j, newjob, recursive=False) updated = True if updated: self.postprocess() return updated
-1,335,494,655,616,101,000
Update dependencies of checkpoints.
snakemake/dag.py
update_checkpoint_dependencies
baileythegreen/snakemake
python
def update_checkpoint_dependencies(self, jobs=None): updated = False self.update_checkpoint_outputs() if (jobs is None): jobs = [job for job in self.jobs if (not self.needrun(job))] for job in jobs: if job.is_checkpoint: depending = list(self.depending[job]) for j in depending: logger.info('Updating job {} ({}).'.format(self.jobid(j), j)) newjob = j.updated() self.replace_job(j, newjob, recursive=False) updated = True if updated: self.postprocess() return updated
def finish(self, job, update_dynamic=True): 'Finish a given job (e.g. remove from ready jobs, mark depending jobs\n as ready).' try: self._ready_jobs.remove(job) except KeyError: pass if job.is_group(): jobs = job else: jobs = [job] self._finished.update(jobs) updated_dag = False if update_dynamic: updated_dag = self.update_checkpoint_dependencies(jobs) self.update_ready((j for job in jobs for j in self.depending[job] if ((not self.in_until(job)) and self.needrun(j)))) for job in jobs: if (update_dynamic and job.dynamic_output): logger.info('Dynamically updating jobs') newjob = self.update_dynamic(job) if newjob: self.omitforce.add(newjob) self._needrun.add(newjob) self._finished.add(newjob) updated_dag = True self.postprocess() self.handle_protected(newjob) self.handle_touch(newjob) if updated_dag: if self.workflow.use_singularity: self.pull_container_imgs() if self.workflow.use_conda: self.create_conda_envs()
-6,145,590,956,678,949,000
Finish a given job (e.g. remove from ready jobs, mark depending jobs as ready).
snakemake/dag.py
finish
baileythegreen/snakemake
python
def finish(self, job, update_dynamic=True): 'Finish a given job (e.g. remove from ready jobs, mark depending jobs\n as ready).' try: self._ready_jobs.remove(job) except KeyError: pass if job.is_group(): jobs = job else: jobs = [job] self._finished.update(jobs) updated_dag = False if update_dynamic: updated_dag = self.update_checkpoint_dependencies(jobs) self.update_ready((j for job in jobs for j in self.depending[job] if ((not self.in_until(job)) and self.needrun(j)))) for job in jobs: if (update_dynamic and job.dynamic_output): logger.info('Dynamically updating jobs') newjob = self.update_dynamic(job) if newjob: self.omitforce.add(newjob) self._needrun.add(newjob) self._finished.add(newjob) updated_dag = True self.postprocess() self.handle_protected(newjob) self.handle_touch(newjob) if updated_dag: if self.workflow.use_singularity: self.pull_container_imgs() if self.workflow.use_conda: self.create_conda_envs()
def new_job(self, rule, targetfile=None, format_wildcards=None): 'Create new job for given rule and (optional) targetfile.\n This will reuse existing jobs with the same wildcards.' key = (rule, targetfile) if (key in self.job_cache): assert (targetfile is not None) return self.job_cache[key] wildcards_dict = rule.get_wildcards(targetfile) job = self.job_factory.new(rule, self, wildcards_dict=wildcards_dict, format_wildcards=format_wildcards, targetfile=targetfile) self.cache_job(job) return job
-2,048,175,340,361,699,300
Create new job for given rule and (optional) targetfile. This will reuse existing jobs with the same wildcards.
snakemake/dag.py
new_job
baileythegreen/snakemake
python
def new_job(self, rule, targetfile=None, format_wildcards=None): 'Create new job for given rule and (optional) targetfile.\n This will reuse existing jobs with the same wildcards.' key = (rule, targetfile) if (key in self.job_cache): assert (targetfile is not None) return self.job_cache[key] wildcards_dict = rule.get_wildcards(targetfile) job = self.job_factory.new(rule, self, wildcards_dict=wildcards_dict, format_wildcards=format_wildcards, targetfile=targetfile) self.cache_job(job) return job
def update_dynamic(self, job): 'Update the DAG by evaluating the output of the given job that\n contains dynamic output files.' dynamic_wildcards = job.dynamic_wildcards if (not dynamic_wildcards): return depending = list(filter((lambda job_: (not self.finished(job_))), self.bfs(self.depending, job))) (newrule, non_dynamic_wildcards) = job.rule.dynamic_branch(dynamic_wildcards, input=False) self.specialize_rule(job.rule, newrule) newjob = self.new_job(newrule, format_wildcards=non_dynamic_wildcards) self.replace_job(job, newjob) for job_ in depending: needs_update = any(((f.get_wildcard_names() & dynamic_wildcards.keys()) for f in job_.rule.dynamic_input)) if needs_update: newrule_ = job_.rule.dynamic_branch(dynamic_wildcards) if (newrule_ is not None): self.specialize_rule(job_.rule, newrule_) if (not self.dynamic(job_)): logger.debug('Updating job {}.'.format(job_)) newjob_ = self.new_job(newrule_, targetfile=(job_.output[0] if job_.output else None)) unexpected_output = self.reason(job_).missing_output.intersection(newjob.existing_output) if unexpected_output: logger.warning('Warning: the following output files of rule {} were not present when the DAG was created:\n{}'.format(newjob_.rule, unexpected_output)) self.replace_job(job_, newjob_) return newjob
-3,438,462,581,824,115,000
Update the DAG by evaluating the output of the given job that contains dynamic output files.
snakemake/dag.py
update_dynamic
baileythegreen/snakemake
python
def update_dynamic(self, job): 'Update the DAG by evaluating the output of the given job that\n contains dynamic output files.' dynamic_wildcards = job.dynamic_wildcards if (not dynamic_wildcards): return depending = list(filter((lambda job_: (not self.finished(job_))), self.bfs(self.depending, job))) (newrule, non_dynamic_wildcards) = job.rule.dynamic_branch(dynamic_wildcards, input=False) self.specialize_rule(job.rule, newrule) newjob = self.new_job(newrule, format_wildcards=non_dynamic_wildcards) self.replace_job(job, newjob) for job_ in depending: needs_update = any(((f.get_wildcard_names() & dynamic_wildcards.keys()) for f in job_.rule.dynamic_input)) if needs_update: newrule_ = job_.rule.dynamic_branch(dynamic_wildcards) if (newrule_ is not None): self.specialize_rule(job_.rule, newrule_) if (not self.dynamic(job_)): logger.debug('Updating job {}.'.format(job_)) newjob_ = self.new_job(newrule_, targetfile=(job_.output[0] if job_.output else None)) unexpected_output = self.reason(job_).missing_output.intersection(newjob.existing_output) if unexpected_output: logger.warning('Warning: the following output files of rule {} were not present when the DAG was created:\n{}'.format(newjob_.rule, unexpected_output)) self.replace_job(job_, newjob_) return newjob
def delete_job(self, job, recursive=True, add_dependencies=False): 'Delete given job from DAG.' if (job in self.targetjobs): self.targetjobs.remove(job) if add_dependencies: for _job in self.dependencies[job]: self.targetjobs.add(_job) for job_ in self.depending[job]: del self.dependencies[job_][job] del self.depending[job] for job_ in self.dependencies[job]: depending = self.depending[job_] del depending[job] if ((not depending) and recursive): self.delete_job(job_) del self.dependencies[job] if (job in self._needrun): self._len -= 1 self._needrun.remove(job) del self._reason[job] if (job in self._finished): self._finished.remove(job) if (job in self._dynamic): self._dynamic.remove(job) if (job in self._ready_jobs): self._ready_jobs.remove(job) for f in job.output: try: del self.job_cache[(job.rule, f)] except KeyError: pass
2,529,409,256,004,886,500
Delete given job from DAG.
snakemake/dag.py
delete_job
baileythegreen/snakemake
python
def delete_job(self, job, recursive=True, add_dependencies=False): if (job in self.targetjobs): self.targetjobs.remove(job) if add_dependencies: for _job in self.dependencies[job]: self.targetjobs.add(_job) for job_ in self.depending[job]: del self.dependencies[job_][job] del self.depending[job] for job_ in self.dependencies[job]: depending = self.depending[job_] del depending[job] if ((not depending) and recursive): self.delete_job(job_) del self.dependencies[job] if (job in self._needrun): self._len -= 1 self._needrun.remove(job) del self._reason[job] if (job in self._finished): self._finished.remove(job) if (job in self._dynamic): self._dynamic.remove(job) if (job in self._ready_jobs): self._ready_jobs.remove(job) for f in job.output: try: del self.job_cache[(job.rule, f)] except KeyError: pass
def replace_job(self, job, newjob, recursive=True): 'Replace given job with new job.' add_to_targetjobs = (job in self.targetjobs) depending = list(self.depending[job].items()) if self.finished(job): self._finished.add(newjob) self.delete_job(job, recursive=recursive) if add_to_targetjobs: self.targetjobs.add(newjob) self.cache_job(newjob) self.update([newjob]) logger.debug('Replace {} with dynamic branch {}'.format(job, newjob)) for (job_, files) in depending: logger.debug('updating depending job {}'.format(job_)) self.dependencies[job_][newjob].update(files) self.depending[newjob][job_].update(files)
-244,424,612,344,326,180
Replace given job with new job.
snakemake/dag.py
replace_job
baileythegreen/snakemake
python
def replace_job(self, job, newjob, recursive=True): add_to_targetjobs = (job in self.targetjobs) depending = list(self.depending[job].items()) if self.finished(job): self._finished.add(newjob) self.delete_job(job, recursive=recursive) if add_to_targetjobs: self.targetjobs.add(newjob) self.cache_job(newjob) self.update([newjob]) logger.debug('Replace {} with dynamic branch {}'.format(job, newjob)) for (job_, files) in depending: logger.debug('updating depending job {}'.format(job_)) self.dependencies[job_][newjob].update(files) self.depending[newjob][job_].update(files)
def specialize_rule(self, rule, newrule): 'Specialize the given rule by inserting newrule into the DAG.' assert (newrule is not None) self.rules.add(newrule) self.update_output_index()
-3,487,190,096,571,759,000
Specialize the given rule by inserting newrule into the DAG.
snakemake/dag.py
specialize_rule
baileythegreen/snakemake
python
def specialize_rule(self, rule, newrule): assert (newrule is not None) self.rules.add(newrule) self.update_output_index()
def is_batch_rule(self, rule): 'Return True if the underlying rule is to be used for batching the DAG.' return ((self.batch is not None) and (rule.name == self.batch.rulename))
2,566,255,858,520,121,000
Return True if the underlying rule is to be used for batching the DAG.
snakemake/dag.py
is_batch_rule
baileythegreen/snakemake
python
def is_batch_rule(self, rule): return ((self.batch is not None) and (rule.name == self.batch.rulename))
def collect_potential_dependencies(self, job): 'Collect all potential dependencies of a job. These might contain\n ambiguities. The keys of the returned dict represent the files to be considered.' dependencies = defaultdict(list) file2jobs = self.file2jobs input_files = list(job.unique_input) if self.is_batch_rule(job.rule): input_batch = self.batch.get_batch(input_files) if (len(input_batch) != len(input_files)): logger.info("Considering only batch {} for DAG computation.\nAll jobs beyond the batching rule are omitted until the final batch.\nDon't forget to run the other batches too.".format(self.batch)) input_files = input_batch for file in input_files: if (file in job.subworkflow_input): continue try: if (file in job.dependencies): jobs = [self.new_job(job.dependencies[file], targetfile=file)] else: jobs = file2jobs(file) dependencies[file].extend(jobs) except MissingRuleException as ex: dependencies[file] = [] return dependencies
5,342,551,899,153,103,000
Collect all potential dependencies of a job. These might contain ambiguities. The keys of the returned dict represent the files to be considered.
snakemake/dag.py
collect_potential_dependencies
baileythegreen/snakemake
python
def collect_potential_dependencies(self, job): 'Collect all potential dependencies of a job. These might contain\n ambiguities. The keys of the returned dict represent the files to be considered.' dependencies = defaultdict(list) file2jobs = self.file2jobs input_files = list(job.unique_input) if self.is_batch_rule(job.rule): input_batch = self.batch.get_batch(input_files) if (len(input_batch) != len(input_files)): logger.info("Considering only batch {} for DAG computation.\nAll jobs beyond the batching rule are omitted until the final batch.\nDon't forget to run the other batches too.".format(self.batch)) input_files = input_batch for file in input_files: if (file in job.subworkflow_input): continue try: if (file in job.dependencies): jobs = [self.new_job(job.dependencies[file], targetfile=file)] else: jobs = file2jobs(file) dependencies[file].extend(jobs) except MissingRuleException as ex: dependencies[file] = [] return dependencies
def bfs(self, direction, *jobs, stop=(lambda job: False)): 'Perform a breadth-first traversal of the DAG.' queue = list(jobs) visited = set(queue) while queue: job = queue.pop(0) if stop(job): continue (yield job) for (job_, _) in direction[job].items(): if (not (job_ in visited)): queue.append(job_) visited.add(job_)
-2,070,557,354,798,236,000
Perform a breadth-first traversal of the DAG.
snakemake/dag.py
bfs
baileythegreen/snakemake
python
def bfs(self, direction, *jobs, stop=(lambda job: False)): queue = list(jobs) visited = set(queue) while queue: job = queue.pop(0) if stop(job): continue (yield job) for (job_, _) in direction[job].items(): if (not (job_ in visited)): queue.append(job_) visited.add(job_)
def level_bfs(self, direction, *jobs, stop=(lambda job: False)): 'Perform a breadth-first traversal of the DAG, but also yield the\n level together with each job.' queue = [(job, 0) for job in jobs] visited = set(jobs) while queue: (job, level) = queue.pop(0) if stop(job): continue (yield (level, job)) level += 1 for (job_, _) in direction[job].items(): if (not (job_ in visited)): queue.append((job_, level)) visited.add(job_)
-1,120,438,311,960,962,200
Perform a breadth-first traversal of the DAG, but also yield the level together with each job.
snakemake/dag.py
level_bfs
baileythegreen/snakemake
python
def level_bfs(self, direction, *jobs, stop=(lambda job: False)): 'Perform a breadth-first traversal of the DAG, but also yield the\n level together with each job.' queue = [(job, 0) for job in jobs] visited = set(jobs) while queue: (job, level) = queue.pop(0) if stop(job): continue (yield (level, job)) level += 1 for (job_, _) in direction[job].items(): if (not (job_ in visited)): queue.append((job_, level)) visited.add(job_)
def dfs(self, direction, *jobs, stop=(lambda job: False), post=True): 'Perform depth-first traversal of the DAG.' visited = set() def _dfs(job): 'Inner function for DFS traversal.' if stop(job): return if (not post): (yield job) for job_ in direction[job]: if (not (job_ in visited)): visited.add(job_) for j in _dfs(job_): (yield j) if post: (yield job) for job in jobs: for job_ in self._dfs(direction, job, visited, stop=stop, post=post): (yield job_)
-7,262,718,796,230,219,000
Perform depth-first traversal of the DAG.
snakemake/dag.py
dfs
baileythegreen/snakemake
python
def dfs(self, direction, *jobs, stop=(lambda job: False), post=True): visited = set() def _dfs(job): 'Inner function for DFS traversal.' if stop(job): return if (not post): (yield job) for job_ in direction[job]: if (not (job_ in visited)): visited.add(job_) for j in _dfs(job_): (yield j) if post: (yield job) for job in jobs: for job_ in self._dfs(direction, job, visited, stop=stop, post=post): (yield job_)
def new_wildcards(self, job): 'Return wildcards that are newly introduced in this job,\n compared to its ancestors.' new_wildcards = set(job.wildcards.items()) for job_ in self.dependencies[job]: if (not new_wildcards): return set() for wildcard in job_.wildcards.items(): new_wildcards.discard(wildcard) return new_wildcards
5,009,472,403,097,837,000
Return wildcards that are newly introduced in this job, compared to its ancestors.
snakemake/dag.py
new_wildcards
baileythegreen/snakemake
python
def new_wildcards(self, job): 'Return wildcards that are newly introduced in this job,\n compared to its ancestors.' new_wildcards = set(job.wildcards.items()) for job_ in self.dependencies[job]: if (not new_wildcards): return set() for wildcard in job_.wildcards.items(): new_wildcards.discard(wildcard) return new_wildcards
def rule2job(self, targetrule): 'Generate a new job from a given rule.' if targetrule.has_wildcards(): raise WorkflowError('Target rules may not contain wildcards. Please specify concrete files or a rule without wildcards.') return self.new_job(targetrule)
-7,220,431,080,580,572,000
Generate a new job from a given rule.
snakemake/dag.py
rule2job
baileythegreen/snakemake
python
def rule2job(self, targetrule): if targetrule.has_wildcards(): raise WorkflowError('Target rules may not contain wildcards. Please specify concrete files or a rule without wildcards.') return self.new_job(targetrule)
def archive(self, path): 'Archives workflow such that it can be re-run on a different system.\n\n Archiving includes git versioned files (i.e. Snakefiles, config files, ...),\n ancestral input files and conda environments.\n ' if path.endswith('.tar'): mode = 'x' elif path.endswith('tar.bz2'): mode = 'x:bz2' elif path.endswith('tar.xz'): mode = 'x:xz' elif path.endswith('tar.gz'): mode = 'x:gz' else: raise WorkflowError('Unsupported archive format (supported: .tar, .tar.gz, .tar.bz2, .tar.xz)') if os.path.exists(path): raise WorkflowError(('Archive already exists:\n' + path)) self.create_conda_envs(forceall=True) try: workdir = Path(os.path.abspath(os.getcwd())) with tarfile.open(path, mode=mode, dereference=True) as archive: archived = set() def add(path): if (workdir not in Path(os.path.abspath(path)).parents): logger.warning('Path {} cannot be archived: not within working directory.'.format(path)) else: f = os.path.relpath(path) if (f not in archived): archive.add(f) archived.add(f) logger.info(('archived ' + f)) logger.info('Archiving snakefiles, scripts and files under version control...') for f in self.workflow.get_sources(): add(f) logger.info('Archiving external input files...') for job in self.jobs: for f in job.input: if (not any(((f in files) for files in self.dependencies[job].values()))): add(f) logger.info('Archiving conda environments...') envs = set() for job in self.jobs: if job.conda_env_file: env_archive = job.archive_conda_env() envs.add(env_archive) for env in envs: add(env) except (Exception, BaseException) as e: os.remove(path) raise e
6,564,548,577,266,580,000
Archives workflow such that it can be re-run on a different system. Archiving includes git versioned files (i.e. Snakefiles, config files, ...), ancestral input files and conda environments.
snakemake/dag.py
archive
baileythegreen/snakemake
python
def archive(self, path): 'Archives workflow such that it can be re-run on a different system.\n\n Archiving includes git versioned files (i.e. Snakefiles, config files, ...),\n ancestral input files and conda environments.\n ' if path.endswith('.tar'): mode = 'x' elif path.endswith('tar.bz2'): mode = 'x:bz2' elif path.endswith('tar.xz'): mode = 'x:xz' elif path.endswith('tar.gz'): mode = 'x:gz' else: raise WorkflowError('Unsupported archive format (supported: .tar, .tar.gz, .tar.bz2, .tar.xz)') if os.path.exists(path): raise WorkflowError(('Archive already exists:\n' + path)) self.create_conda_envs(forceall=True) try: workdir = Path(os.path.abspath(os.getcwd())) with tarfile.open(path, mode=mode, dereference=True) as archive: archived = set() def add(path): if (workdir not in Path(os.path.abspath(path)).parents): logger.warning('Path {} cannot be archived: not within working directory.'.format(path)) else: f = os.path.relpath(path) if (f not in archived): archive.add(f) archived.add(f) logger.info(('archived ' + f)) logger.info('Archiving snakefiles, scripts and files under version control...') for f in self.workflow.get_sources(): add(f) logger.info('Archiving external input files...') for job in self.jobs: for f in job.input: if (not any(((f in files) for files in self.dependencies[job].values()))): add(f) logger.info('Archiving conda environments...') envs = set() for job in self.jobs: if job.conda_env_file: env_archive = job.archive_conda_env() envs.add(env_archive) for env in envs: add(env) except (Exception, BaseException) as e: os.remove(path) raise e
def clean(self, only_temp=False, dryrun=False): 'Removes files generated by the workflow.' for job in self.jobs: for f in job.output: if ((not only_temp) or is_flagged(f, 'temp')): if (f.exists or os.path.islink(f)): if f.protected: logger.error('Skipping write-protected file {}.'.format(f)) else: msg = ('Deleting {}' if (not dryrun) else 'Would delete {}') logger.info(msg.format(f)) if (not dryrun): f.remove(remove_non_empty_dir=only_temp)
-4,303,512,907,294,990,300
Removes files generated by the workflow.
snakemake/dag.py
clean
baileythegreen/snakemake
python
def clean(self, only_temp=False, dryrun=False): for job in self.jobs: for f in job.output: if ((not only_temp) or is_flagged(f, 'temp')): if (f.exists or os.path.islink(f)): if f.protected: logger.error('Skipping write-protected file {}.'.format(f)) else: msg = ('Deleting {}' if (not dryrun) else 'Would delete {}') logger.info(msg.format(f)) if (not dryrun): f.remove(remove_non_empty_dir=only_temp)
def list_untracked(self): 'List files in the workdir that are not in the dag.' used_files = set() files_in_cwd = set() for job in self.jobs: used_files.update((os.path.relpath(file) for file in chain(job.local_input, job.local_output, job.log))) for (root, dirs, files) in os.walk(os.getcwd()): files_in_cwd.update([os.path.relpath(os.path.join(root, f)) for f in files if (not (f[0] == '.'))]) dirs[:] = [d for d in dirs if (not (d[0] == '.'))] for f in sorted(list((files_in_cwd - used_files))): logger.info(f)
7,058,179,587,227,485,000
List files in the workdir that are not in the dag.
snakemake/dag.py
list_untracked
baileythegreen/snakemake
python
def list_untracked(self): used_files = set() files_in_cwd = set() for job in self.jobs: used_files.update((os.path.relpath(file) for file in chain(job.local_input, job.local_output, job.log))) for (root, dirs, files) in os.walk(os.getcwd()): files_in_cwd.update([os.path.relpath(os.path.join(root, f)) for f in files if (not (f[0] == '.'))]) dirs[:] = [d for d in dirs if (not (d[0] == '.'))] for f in sorted(list((files_in_cwd - used_files))): logger.info(f)
def _dfs(job): 'Inner function for DFS traversal.' if stop(job): return if (not post): (yield job) for job_ in direction[job]: if (not (job_ in visited)): visited.add(job_) for j in _dfs(job_): (yield j) if post: (yield job)
-4,692,494,413,225,559,000
Inner function for DFS traversal.
snakemake/dag.py
_dfs
baileythegreen/snakemake
python
def _dfs(job): if stop(job): return if (not post): (yield job) for job_ in direction[job]: if (not (job_ in visited)): visited.add(job_) for j in _dfs(job_): (yield j) if post: (yield job)
def hsv_to_htmlhexrgb(h, s, v): 'Convert hsv colors to hex-encoded rgb colors usable by html.' import colorsys (hex_r, hex_g, hex_b) = (round((255 * x)) for x in colorsys.hsv_to_rgb(h, s, v)) return '#{hex_r:0>2X}{hex_g:0>2X}{hex_b:0>2X}'.format(hex_r=hex_r, hex_g=hex_g, hex_b=hex_b)
7,796,969,250,204,814,000
Convert hsv colors to hex-encoded rgb colors usable by html.
snakemake/dag.py
hsv_to_htmlhexrgb
baileythegreen/snakemake
python
def hsv_to_htmlhexrgb(h, s, v): import colorsys (hex_r, hex_g, hex_b) = (round((255 * x)) for x in colorsys.hsv_to_rgb(h, s, v)) return '#{hex_r:0>2X}{hex_g:0>2X}{hex_b:0>2X}'.format(hex_r=hex_r, hex_g=hex_g, hex_b=hex_b)
def resolve_input_functions(input_files): 'Iterate over all input files and replace input functions\n with a fixed string.\n ' files = [] for f in input_files: if callable(f): files.append('<input function>') else: files.append(repr(f).strip("'")) return files
5,267,305,400,582,140,000
Iterate over all input files and replace input functions with a fixed string.
snakemake/dag.py
resolve_input_functions
baileythegreen/snakemake
python
def resolve_input_functions(input_files): 'Iterate over all input files and replace input functions\n with a fixed string.\n ' files = [] for f in input_files: if callable(f): files.append('<input function>') else: files.append(repr(f).strip("'")) return files
def html_node(node_id, node, color): 'Assemble a html style node for graphviz' input_files = resolve_input_functions(node._input) output_files = [repr(f).strip("'") for f in node._output] input_header = ('<b><font point-size="14">&#8618; input</font></b>' if input_files else '') output_header = ('<b><font point-size="14">output &rarr;</font></b>' if output_files else '') html_node = ['{node_id} [ shape=none, margin=0, label=<<table border="2" color="{color}" cellspacing="3" cellborder="0">'.format(node_id=node_id, color=color), '<tr><td>', '<b><font point-size="18">{node.name}</font></b>'.format(node=node), '</td></tr>', '<hr/>', '<tr><td align="left"> {input_header} </td></tr>'.format(input_header=input_header)] for filename in sorted(input_files): in_file = html.escape(filename) html_node.extend(['<tr>', '<td align="left"><font face="monospace">{in_file}</font></td>'.format(in_file=in_file), '</tr>']) html_node.append('<hr/>') html_node.append('<tr><td align="right"> {output_header} </td> </tr>'.format(output_header=output_header)) for filename in sorted(output_files): out_file = html.escape(filename) html_node.extend(['<tr>', '<td align="left"><font face="monospace">{out_file}</font></td></tr>'.format(out_file=out_file)]) html_node.append('</table>>]') return '\n'.join(html_node)
-4,481,357,097,268,225,500
Assemble a html style node for graphviz
snakemake/dag.py
html_node
baileythegreen/snakemake
python
def html_node(node_id, node, color): input_files = resolve_input_functions(node._input) output_files = [repr(f).strip("'") for f in node._output] input_header = ('<b><font point-size="14">&#8618; input</font></b>' if input_files else ) output_header = ('<b><font point-size="14">output &rarr;</font></b>' if output_files else ) html_node = ['{node_id} [ shape=none, margin=0, label=<<table border="2" color="{color}" cellspacing="3" cellborder="0">'.format(node_id=node_id, color=color), '<tr><td>', '<b><font point-size="18">{node.name}</font></b>'.format(node=node), '</td></tr>', '<hr/>', '<tr><td align="left"> {input_header} </td></tr>'.format(input_header=input_header)] for filename in sorted(input_files): in_file = html.escape(filename) html_node.extend(['<tr>', '<td align="left"><font face="monospace">{in_file}</font></td>'.format(in_file=in_file), '</tr>']) html_node.append('<hr/>') html_node.append('<tr><td align="right"> {output_header} </td> </tr>'.format(output_header=output_header)) for filename in sorted(output_files): out_file = html.escape(filename) html_node.extend(['<tr>', '<td align="left"><font face="monospace">{out_file}</font></td></tr>'.format(out_file=out_file)]) html_node.append('</table>>]') return '\n'.join(html_node)
def test_single_page_does_not_include_any_pagination_controls(): '\n When there is only a single page, no pagination controls should render.\n ' url = URL('/') controls = get_page_controls(url, current_page=1, total_pages=1) assert (controls == [])
8,805,130,570,875,188,000
When there is only a single page, no pagination controls should render.
tests/test_pagination.py
test_single_page_does_not_include_any_pagination_controls
encode/dashboard
python
def test_single_page_does_not_include_any_pagination_controls(): '\n \n ' url = URL('/') controls = get_page_controls(url, current_page=1, total_pages=1) assert (controls == [])
def test_first_page_in_pagination_controls(): '\n First page in pagination controls, should render as:\n Previous [1] 2 3 4 5 Next\n ' url = URL('/') controls = get_page_controls(url, current_page=1, total_pages=5) assert (controls == [PageControl(text='Previous', is_disabled=True), PageControl(text='1', is_active=True, url=URL('/')), PageControl(text='2', url=URL('/?page=2')), PageControl(text='3', url=URL('/?page=3')), PageControl(text='4', url=URL('/?page=4')), PageControl(text='5', url=URL('/?page=5')), PageControl(text='Next', url=URL('/?page=2'))])
-1,796,014,110,327,840,500
First page in pagination controls, should render as: Previous [1] 2 3 4 5 Next
tests/test_pagination.py
test_first_page_in_pagination_controls
encode/dashboard
python
def test_first_page_in_pagination_controls(): '\n First page in pagination controls, should render as:\n Previous [1] 2 3 4 5 Next\n ' url = URL('/') controls = get_page_controls(url, current_page=1, total_pages=5) assert (controls == [PageControl(text='Previous', is_disabled=True), PageControl(text='1', is_active=True, url=URL('/')), PageControl(text='2', url=URL('/?page=2')), PageControl(text='3', url=URL('/?page=3')), PageControl(text='4', url=URL('/?page=4')), PageControl(text='5', url=URL('/?page=5')), PageControl(text='Next', url=URL('/?page=2'))])
def test_second_page_in_pagination_controls(): '\n Second page in pagination controls, should render as:\n Previous 1 [2] 3 4 5 Next\n ' url = URL('/') controls = get_page_controls(url, current_page=2, total_pages=5) assert (controls == [PageControl(text='Previous', url=URL('/')), PageControl(text='1', url=URL('/')), PageControl(text='2', is_active=True, url=URL('/?page=2')), PageControl(text='3', url=URL('/?page=3')), PageControl(text='4', url=URL('/?page=4')), PageControl(text='5', url=URL('/?page=5')), PageControl(text='Next', url=URL('/?page=3'))])
-2,232,584,472,365,900,300
Second page in pagination controls, should render as: Previous 1 [2] 3 4 5 Next
tests/test_pagination.py
test_second_page_in_pagination_controls
encode/dashboard
python
def test_second_page_in_pagination_controls(): '\n Second page in pagination controls, should render as:\n Previous 1 [2] 3 4 5 Next\n ' url = URL('/') controls = get_page_controls(url, current_page=2, total_pages=5) assert (controls == [PageControl(text='Previous', url=URL('/')), PageControl(text='1', url=URL('/')), PageControl(text='2', is_active=True, url=URL('/?page=2')), PageControl(text='3', url=URL('/?page=3')), PageControl(text='4', url=URL('/?page=4')), PageControl(text='5', url=URL('/?page=5')), PageControl(text='Next', url=URL('/?page=3'))])
def test_middle_page_in_pagination_controls(): '\n Middle page in pagination controls, should render as:\n Previous 1 2 [3] 4 5 Next\n ' url = URL('/?page=3') controls = get_page_controls(url, current_page=3, total_pages=5) assert (controls == [PageControl(text='Previous', url=URL('/?page=2')), PageControl(text='1', url=URL('/')), PageControl(text='2', url=URL('/?page=2')), PageControl(text='3', is_active=True, url=URL('/?page=3')), PageControl(text='4', url=URL('/?page=4')), PageControl(text='5', url=URL('/?page=5')), PageControl(text='Next', url=URL('/?page=4'))])
7,354,472,169,436,877,000
Middle page in pagination controls, should render as: Previous 1 2 [3] 4 5 Next
tests/test_pagination.py
test_middle_page_in_pagination_controls
encode/dashboard
python
def test_middle_page_in_pagination_controls(): '\n Middle page in pagination controls, should render as:\n Previous 1 2 [3] 4 5 Next\n ' url = URL('/?page=3') controls = get_page_controls(url, current_page=3, total_pages=5) assert (controls == [PageControl(text='Previous', url=URL('/?page=2')), PageControl(text='1', url=URL('/')), PageControl(text='2', url=URL('/?page=2')), PageControl(text='3', is_active=True, url=URL('/?page=3')), PageControl(text='4', url=URL('/?page=4')), PageControl(text='5', url=URL('/?page=5')), PageControl(text='Next', url=URL('/?page=4'))])
def test_last_page_in_pagination_controls(): '\n Last page in pagination controls, should render as:\n Previous 1 2 3 4 [5] Next\n ' url = URL('/?page=5') controls = get_page_controls(url, current_page=5, total_pages=5) assert (controls == [PageControl(text='Previous', url=URL('/?page=4')), PageControl(text='1', url=URL('/')), PageControl(text='2', url=URL('/?page=2')), PageControl(text='3', url=URL('/?page=3')), PageControl(text='4', url=URL('/?page=4')), PageControl(text='5', url=URL('/?page=5'), is_active=True), PageControl(text='Next', is_disabled=True)])
9,106,975,628,763,386,000
Last page in pagination controls, should render as: Previous 1 2 3 4 [5] Next
tests/test_pagination.py
test_last_page_in_pagination_controls
encode/dashboard
python
def test_last_page_in_pagination_controls(): '\n Last page in pagination controls, should render as:\n Previous 1 2 3 4 [5] Next\n ' url = URL('/?page=5') controls = get_page_controls(url, current_page=5, total_pages=5) assert (controls == [PageControl(text='Previous', url=URL('/?page=4')), PageControl(text='1', url=URL('/')), PageControl(text='2', url=URL('/?page=2')), PageControl(text='3', url=URL('/?page=3')), PageControl(text='4', url=URL('/?page=4')), PageControl(text='5', url=URL('/?page=5'), is_active=True), PageControl(text='Next', is_disabled=True)])
def test_first_page_in_long_pagination_controls(): '\n First page in long pagination controls, should render as:\n Previous [1] 2 3 4 5 ... 49 50 Next\n ' url = URL('/') controls = get_page_controls(url, current_page=1, total_pages=50) assert (controls == [PageControl(text='Previous', is_disabled=True), PageControl(text='1', is_active=True, url=URL('/')), PageControl(text='2', url=URL('/?page=2')), PageControl(text='3', url=URL('/?page=3')), PageControl(text='4', url=URL('/?page=4')), PageControl(text='5', url=URL('/?page=5')), PageControl(text='…', is_disabled=True), PageControl(text='49', url=URL('/?page=49')), PageControl(text='50', url=URL('/?page=50')), PageControl(text='Next', url=URL('/?page=2'))])
1,161,099,459,008,527,000
First page in long pagination controls, should render as: Previous [1] 2 3 4 5 ... 49 50 Next
tests/test_pagination.py
test_first_page_in_long_pagination_controls
encode/dashboard
python
def test_first_page_in_long_pagination_controls(): '\n First page in long pagination controls, should render as:\n Previous [1] 2 3 4 5 ... 49 50 Next\n ' url = URL('/') controls = get_page_controls(url, current_page=1, total_pages=50) assert (controls == [PageControl(text='Previous', is_disabled=True), PageControl(text='1', is_active=True, url=URL('/')), PageControl(text='2', url=URL('/?page=2')), PageControl(text='3', url=URL('/?page=3')), PageControl(text='4', url=URL('/?page=4')), PageControl(text='5', url=URL('/?page=5')), PageControl(text='…', is_disabled=True), PageControl(text='49', url=URL('/?page=49')), PageControl(text='50', url=URL('/?page=50')), PageControl(text='Next', url=URL('/?page=2'))])
def test_last_page_in_long_pagination_controls(): '\n Last page in long pagination controls, should render as:\n Previous 1 2 ... 46 47 48 49 [50] Next\n ' url = URL('/?page=50') controls = get_page_controls(url, current_page=50, total_pages=50) assert (controls == [PageControl(text='Previous', url=URL('/?page=49')), PageControl(text='1', url=URL('/')), PageControl(text='2', url=URL('/?page=2')), PageControl(text='…', is_disabled=True), PageControl(text='46', url=URL('/?page=46')), PageControl(text='47', url=URL('/?page=47')), PageControl(text='48', url=URL('/?page=48')), PageControl(text='49', url=URL('/?page=49')), PageControl(text='50', is_active=True, url=URL('/?page=50')), PageControl(text='Next', is_disabled=True)])
2,766,726,091,038,919,700
Last page in long pagination controls, should render as: Previous 1 2 ... 46 47 48 49 [50] Next
tests/test_pagination.py
test_last_page_in_long_pagination_controls
encode/dashboard
python
def test_last_page_in_long_pagination_controls(): '\n Last page in long pagination controls, should render as:\n Previous 1 2 ... 46 47 48 49 [50] Next\n ' url = URL('/?page=50') controls = get_page_controls(url, current_page=50, total_pages=50) assert (controls == [PageControl(text='Previous', url=URL('/?page=49')), PageControl(text='1', url=URL('/')), PageControl(text='2', url=URL('/?page=2')), PageControl(text='…', is_disabled=True), PageControl(text='46', url=URL('/?page=46')), PageControl(text='47', url=URL('/?page=47')), PageControl(text='48', url=URL('/?page=48')), PageControl(text='49', url=URL('/?page=49')), PageControl(text='50', is_active=True, url=URL('/?page=50')), PageControl(text='Next', is_disabled=True)])
def test_ellipsis_fill_in(): '\n If an ellipsis marker can be replaced with a single page marker, then\n we should do so.\n ' url = URL('/?page=6') controls = get_page_controls(url, current_page=6, total_pages=11) assert (controls == [PageControl(text='Previous', url=URL('/?page=5')), PageControl(text='1', url=URL('/')), PageControl(text='2', url=URL('/?page=2')), PageControl(text='3', url=URL('/?page=3')), PageControl(text='4', url=URL('/?page=4')), PageControl(text='5', url=URL('/?page=5')), PageControl(text='6', url=URL('/?page=6'), is_active=True), PageControl(text='7', url=URL('/?page=7')), PageControl(text='8', url=URL('/?page=8')), PageControl(text='9', url=URL('/?page=9')), PageControl(text='10', url=URL('/?page=10')), PageControl(text='11', url=URL('/?page=11')), PageControl(text='Next', url=URL('/?page=7'))])
-3,769,510,764,230,896,600
If an ellipsis marker can be replaced with a single page marker, then we should do so.
tests/test_pagination.py
test_ellipsis_fill_in
encode/dashboard
python
def test_ellipsis_fill_in(): '\n If an ellipsis marker can be replaced with a single page marker, then\n we should do so.\n ' url = URL('/?page=6') controls = get_page_controls(url, current_page=6, total_pages=11) assert (controls == [PageControl(text='Previous', url=URL('/?page=5')), PageControl(text='1', url=URL('/')), PageControl(text='2', url=URL('/?page=2')), PageControl(text='3', url=URL('/?page=3')), PageControl(text='4', url=URL('/?page=4')), PageControl(text='5', url=URL('/?page=5')), PageControl(text='6', url=URL('/?page=6'), is_active=True), PageControl(text='7', url=URL('/?page=7')), PageControl(text='8', url=URL('/?page=8')), PageControl(text='9', url=URL('/?page=9')), PageControl(text='10', url=URL('/?page=10')), PageControl(text='11', url=URL('/?page=11')), PageControl(text='Next', url=URL('/?page=7'))])
def __init__(self, ds, fields, start_point, end_point, npoints, figure_size=5.0, fontsize=14.0, field_labels=None): '\n Sets up figure and axes\n ' line = LineBuffer(ds, start_point, end_point, npoints, label=None) self.lines = [line] self._initialize_instance(self, ds, fields, figure_size, fontsize, field_labels) self._setup_plots()
5,028,115,850,037,134,000
Sets up figure and axes
yt/visualization/line_plot.py
__init__
smressle/yt
python
def __init__(self, ds, fields, start_point, end_point, npoints, figure_size=5.0, fontsize=14.0, field_labels=None): '\n \n ' line = LineBuffer(ds, start_point, end_point, npoints, label=None) self.lines = [line] self._initialize_instance(self, ds, fields, figure_size, fontsize, field_labels) self._setup_plots()
@classmethod def from_lines(cls, ds, fields, lines, figure_size=5.0, font_size=14.0, field_labels=None): "\n A class method for constructing a line plot from multiple sampling lines\n\n Parameters\n ----------\n\n ds : :class:`yt.data_objects.static_output.Dataset`\n This is the dataset object corresponding to the\n simulation output to be plotted.\n fields : field name or list of field names\n The name(s) of the field(s) to be plotted.\n lines : list of :class:`yt.visualization.line_plot.LineBuffer` instances\n The lines from which to sample data\n figure_size : int or two-element iterable of ints\n Size in inches of the image.\n Default: 5 (5x5)\n fontsize : int\n Font size for all text in the plot.\n Default: 14\n field_labels : dictionary\n Keys should be the field names. Values should be latex-formattable\n strings used in the LinePlot legend\n Default: None\n\n Example\n --------\n >>> ds = yt.load('SecondOrderTris/RZ_p_no_parts_do_nothing_bcs_cone_out.e', step=-1)\n >>> fields = [field for field in ds.field_list if field[0] == 'all']\n >>> lines = []\n >>> lines.append(yt.LineBuffer(ds, [0.25, 0, 0], [0.25, 1, 0], 100, label='x = 0.25'))\n >>> lines.append(yt.LineBuffer(ds, [0.5, 0, 0], [0.5, 1, 0], 100, label='x = 0.5'))\n >>> plot = yt.LinePlot.from_lines(ds, fields, lines)\n >>> plot.save()\n\n " obj = cls.__new__(cls) obj.lines = lines cls._initialize_instance(obj, ds, fields, figure_size, font_size, field_labels) obj._setup_plots() return obj
-8,590,241,957,368,003,000
A class method for constructing a line plot from multiple sampling lines Parameters ---------- ds : :class:`yt.data_objects.static_output.Dataset` This is the dataset object corresponding to the simulation output to be plotted. fields : field name or list of field names The name(s) of the field(s) to be plotted. lines : list of :class:`yt.visualization.line_plot.LineBuffer` instances The lines from which to sample data figure_size : int or two-element iterable of ints Size in inches of the image. Default: 5 (5x5) fontsize : int Font size for all text in the plot. Default: 14 field_labels : dictionary Keys should be the field names. Values should be latex-formattable strings used in the LinePlot legend Default: None Example -------- >>> ds = yt.load('SecondOrderTris/RZ_p_no_parts_do_nothing_bcs_cone_out.e', step=-1) >>> fields = [field for field in ds.field_list if field[0] == 'all'] >>> lines = [] >>> lines.append(yt.LineBuffer(ds, [0.25, 0, 0], [0.25, 1, 0], 100, label='x = 0.25')) >>> lines.append(yt.LineBuffer(ds, [0.5, 0, 0], [0.5, 1, 0], 100, label='x = 0.5')) >>> plot = yt.LinePlot.from_lines(ds, fields, lines) >>> plot.save()
yt/visualization/line_plot.py
from_lines
smressle/yt
python
@classmethod def from_lines(cls, ds, fields, lines, figure_size=5.0, font_size=14.0, field_labels=None): "\n A class method for constructing a line plot from multiple sampling lines\n\n Parameters\n ----------\n\n ds : :class:`yt.data_objects.static_output.Dataset`\n This is the dataset object corresponding to the\n simulation output to be plotted.\n fields : field name or list of field names\n The name(s) of the field(s) to be plotted.\n lines : list of :class:`yt.visualization.line_plot.LineBuffer` instances\n The lines from which to sample data\n figure_size : int or two-element iterable of ints\n Size in inches of the image.\n Default: 5 (5x5)\n fontsize : int\n Font size for all text in the plot.\n Default: 14\n field_labels : dictionary\n Keys should be the field names. Values should be latex-formattable\n strings used in the LinePlot legend\n Default: None\n\n Example\n --------\n >>> ds = yt.load('SecondOrderTris/RZ_p_no_parts_do_nothing_bcs_cone_out.e', step=-1)\n >>> fields = [field for field in ds.field_list if field[0] == 'all']\n >>> lines = []\n >>> lines.append(yt.LineBuffer(ds, [0.25, 0, 0], [0.25, 1, 0], 100, label='x = 0.25'))\n >>> lines.append(yt.LineBuffer(ds, [0.5, 0, 0], [0.5, 1, 0], 100, label='x = 0.5'))\n >>> plot = yt.LinePlot.from_lines(ds, fields, lines)\n >>> plot.save()\n\n " obj = cls.__new__(cls) obj.lines = lines cls._initialize_instance(obj, ds, fields, figure_size, font_size, field_labels) obj._setup_plots() return obj
@invalidate_plot def annotate_legend(self, field): '\n Adds a legend to the `LinePlot` instance. The `_sanitize_dimensions`\n call ensures that a legend label will be added for every field of\n a multi-field plot\n ' dim_field = self.plots._sanitize_dimensions(field) self.include_legend[dim_field] = True
-7,778,326,172,680,734,000
Adds a legend to the `LinePlot` instance. The `_sanitize_dimensions` call ensures that a legend label will be added for every field of a multi-field plot
yt/visualization/line_plot.py
annotate_legend
smressle/yt
python
@invalidate_plot def annotate_legend(self, field): '\n Adds a legend to the `LinePlot` instance. The `_sanitize_dimensions`\n call ensures that a legend label will be added for every field of\n a multi-field plot\n ' dim_field = self.plots._sanitize_dimensions(field) self.include_legend[dim_field] = True
@invalidate_plot def set_x_unit(self, unit_name): 'Set the unit to use along the x-axis\n\n Parameters\n ----------\n unit_name: str\n The name of the unit to use for the x-axis unit\n ' self._x_unit = unit_name
-1,050,713,432,808,144,600
Set the unit to use along the x-axis Parameters ---------- unit_name: str The name of the unit to use for the x-axis unit
yt/visualization/line_plot.py
set_x_unit
smressle/yt
python
@invalidate_plot def set_x_unit(self, unit_name): 'Set the unit to use along the x-axis\n\n Parameters\n ----------\n unit_name: str\n The name of the unit to use for the x-axis unit\n ' self._x_unit = unit_name
@invalidate_plot def set_unit(self, field, unit_name): 'Set the unit used to plot the field\n\n Parameters\n ----------\n field: str or field tuple\n The name of the field to set the units for\n unit_name: str\n The name of the unit to use for this field\n ' self._y_units[self.data_source._determine_fields(field)[0]] = unit_name
-5,547,905,575,005,278,000
Set the unit used to plot the field Parameters ---------- field: str or field tuple The name of the field to set the units for unit_name: str The name of the unit to use for this field
yt/visualization/line_plot.py
set_unit
smressle/yt
python
@invalidate_plot def set_unit(self, field, unit_name): 'Set the unit used to plot the field\n\n Parameters\n ----------\n field: str or field tuple\n The name of the field to set the units for\n unit_name: str\n The name of the unit to use for this field\n ' self._y_units[self.data_source._determine_fields(field)[0]] = unit_name
@invalidate_plot def annotate_title(self, field, title): 'Set the unit used to plot the field\n\n Parameters\n ----------\n field: str or field tuple\n The name of the field to set the units for\n title: str\n The title to use for the plot\n ' self._titles[self.data_source._determine_fields(field)[0]] = title
-773,530,870,880,534,000
Set the unit used to plot the field Parameters ---------- field: str or field tuple The name of the field to set the units for title: str The title to use for the plot
yt/visualization/line_plot.py
annotate_title
smressle/yt
python
@invalidate_plot def annotate_title(self, field, title): 'Set the unit used to plot the field\n\n Parameters\n ----------\n field: str or field tuple\n The name of the field to set the units for\n title: str\n The title to use for the plot\n ' self._titles[self.data_source._determine_fields(field)[0]] = title
def unset_macosx_deployment_target(): 'Unset MACOSX_DEPLOYMENT_TARGET because we are not building portable\n libraries\n ' if ('MACOSX_DEPLOYMENT_TARGET' in os.environ): del os.environ['MACOSX_DEPLOYMENT_TARGET']
469,749,772,178,140,860
Unset MACOSX_DEPLOYMENT_TARGET because we are not building portable libraries
numba/tests/test_pycc.py
unset_macosx_deployment_target
eric-erki/numba
python
def unset_macosx_deployment_target(): 'Unset MACOSX_DEPLOYMENT_TARGET because we are not building portable\n libraries\n ' if ('MACOSX_DEPLOYMENT_TARGET' in os.environ): del os.environ['MACOSX_DEPLOYMENT_TARGET']
def test_pycc_ctypes_lib(self): '\n Test creating a C shared library object using pycc.\n ' source = os.path.join(base_path, 'compile_with_pycc.py') cdll_modulename = ('test_dll_legacy' + find_shared_ending()) cdll_path = os.path.join(self.tmpdir, cdll_modulename) if os.path.exists(cdll_path): os.unlink(cdll_path) main(args=['--debug', '-o', cdll_path, source]) lib = CDLL(cdll_path) lib.mult.argtypes = [POINTER(c_double), c_void_p, c_double, c_double] lib.mult.restype = c_int lib.multf.argtypes = [POINTER(c_float), c_void_p, c_float, c_float] lib.multf.restype = c_int res = c_double() lib.mult(byref(res), None, 123, 321) self.assertEqual(res.value, (123 * 321)) res = c_float() lib.multf(byref(res), None, 987, 321) self.assertEqual(res.value, (987 * 321))
4,948,081,442,505,072,000
Test creating a C shared library object using pycc.
numba/tests/test_pycc.py
test_pycc_ctypes_lib
eric-erki/numba
python
def test_pycc_ctypes_lib(self): '\n \n ' source = os.path.join(base_path, 'compile_with_pycc.py') cdll_modulename = ('test_dll_legacy' + find_shared_ending()) cdll_path = os.path.join(self.tmpdir, cdll_modulename) if os.path.exists(cdll_path): os.unlink(cdll_path) main(args=['--debug', '-o', cdll_path, source]) lib = CDLL(cdll_path) lib.mult.argtypes = [POINTER(c_double), c_void_p, c_double, c_double] lib.mult.restype = c_int lib.multf.argtypes = [POINTER(c_float), c_void_p, c_float, c_float] lib.multf.restype = c_int res = c_double() lib.mult(byref(res), None, 123, 321) self.assertEqual(res.value, (123 * 321)) res = c_float() lib.multf(byref(res), None, 987, 321) self.assertEqual(res.value, (987 * 321))
def test_pycc_pymodule(self): '\n Test creating a CPython extension module using pycc.\n ' self.skipTest('lack of environment can make the extension crash') source = os.path.join(base_path, 'compile_with_pycc.py') modulename = 'test_pyext_legacy' out_modulename = os.path.join(self.tmpdir, (modulename + find_pyext_ending())) if os.path.exists(out_modulename): os.unlink(out_modulename) main(args=['--debug', '--python', '-o', out_modulename, source]) with self.check_c_ext(self.tmpdir, modulename) as lib: res = lib.multi(123, 321) self.assertPreciseEqual(res, (123 * 321)) res = lib.multf(987, 321) self.assertPreciseEqual(res, (987.0 * 321.0))
6,336,552,434,764,369,000
Test creating a CPython extension module using pycc.
numba/tests/test_pycc.py
test_pycc_pymodule
eric-erki/numba
python
def test_pycc_pymodule(self): '\n \n ' self.skipTest('lack of environment can make the extension crash') source = os.path.join(base_path, 'compile_with_pycc.py') modulename = 'test_pyext_legacy' out_modulename = os.path.join(self.tmpdir, (modulename + find_pyext_ending())) if os.path.exists(out_modulename): os.unlink(out_modulename) main(args=['--debug', '--python', '-o', out_modulename, source]) with self.check_c_ext(self.tmpdir, modulename) as lib: res = lib.multi(123, 321) self.assertPreciseEqual(res, (123 * 321)) res = lib.multf(987, 321) self.assertPreciseEqual(res, (987.0 * 321.0))
def test_pycc_bitcode(self): '\n Test creating a LLVM bitcode file using pycc.\n ' modulename = os.path.join(base_path, 'compile_with_pycc') bitcode_modulename = os.path.join(self.tmpdir, 'test_bitcode_legacy.bc') if os.path.exists(bitcode_modulename): os.unlink(bitcode_modulename) main(args=['--debug', '--llvm', '-o', bitcode_modulename, (modulename + '.py')]) with open(bitcode_modulename, 'rb') as f: bc = f.read() bitcode_wrapper_magic = b'\xde\xc0\x17\x0b' bitcode_magic = b'BC\xc0\xde' self.assertTrue(bc.startswith((bitcode_magic, bitcode_wrapper_magic)), bc)
1,490,123,849,608,073,500
Test creating a LLVM bitcode file using pycc.
numba/tests/test_pycc.py
test_pycc_bitcode
eric-erki/numba
python
def test_pycc_bitcode(self): '\n \n ' modulename = os.path.join(base_path, 'compile_with_pycc') bitcode_modulename = os.path.join(self.tmpdir, 'test_bitcode_legacy.bc') if os.path.exists(bitcode_modulename): os.unlink(bitcode_modulename) main(args=['--debug', '--llvm', '-o', bitcode_modulename, (modulename + '.py')]) with open(bitcode_modulename, 'rb') as f: bc = f.read() bitcode_wrapper_magic = b'\xde\xc0\x17\x0b' bitcode_magic = b'BC\xc0\xde' self.assertTrue(bc.startswith((bitcode_magic, bitcode_wrapper_magic)), bc)
async def get(self, resource_group_name: str, registry_name: str, agent_pool_name: str, **kwargs: Any) -> '_models.AgentPool': 'Gets the detailed information for a given agent pool.\n\n :param resource_group_name: The name of the resource group to which the container registry\n belongs.\n :type resource_group_name: str\n :param registry_name: The name of the container registry.\n :type registry_name: str\n :param agent_pool_name: The name of the agent pool.\n :type agent_pool_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: AgentPool, or the result of cls(response)\n :rtype: ~azure.mgmt.containerregistry.v2019_06_01_preview.models.AgentPool\n :raises: ~azure.core.exceptions.HttpResponseError\n ' cls = kwargs.pop('cls', None) error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop('error_map', {})) api_version = '2019-06-01-preview' accept = 'application/json' url = self.get.metadata['url'] path_format_arguments = {'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str', min_length=1), 'registryName': self._serialize.url('registry_name', registry_name, 'str', max_length=50, min_length=5, pattern='^[a-zA-Z0-9]*$'), 'agentPoolName': self._serialize.url('agent_pool_name', agent_pool_name, 'str', max_length=20, min_length=3, pattern='^[a-zA-Z0-9-]*$')} url = self._client.format_url(url, **path_format_arguments) query_parameters = {} query_parameters['api-version'] = self._serialize.query('api_version', api_version, 'str') header_parameters = {} header_parameters['Accept'] = self._serialize.header('accept', accept, 'str') request = self._client.get(url, query_parameters, header_parameters) pipeline_response = (await self._client._pipeline.run(request, stream=False, **kwargs)) response = pipeline_response.http_response if (response.status_code not in [200]): map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize('AgentPool', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized
6,863,132,846,442,995,000
Gets the detailed information for a given agent pool. :param resource_group_name: The name of the resource group to which the container registry belongs. :type resource_group_name: str :param registry_name: The name of the container registry. :type registry_name: str :param agent_pool_name: The name of the agent pool. :type agent_pool_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: AgentPool, or the result of cls(response) :rtype: ~azure.mgmt.containerregistry.v2019_06_01_preview.models.AgentPool :raises: ~azure.core.exceptions.HttpResponseError
sdk/containerregistry/azure-mgmt-containerregistry/azure/mgmt/containerregistry/v2019_06_01_preview/aio/operations/_agent_pools_operations.py
get
AFengKK/azure-sdk-for-python
python
async def get(self, resource_group_name: str, registry_name: str, agent_pool_name: str, **kwargs: Any) -> '_models.AgentPool': 'Gets the detailed information for a given agent pool.\n\n :param resource_group_name: The name of the resource group to which the container registry\n belongs.\n :type resource_group_name: str\n :param registry_name: The name of the container registry.\n :type registry_name: str\n :param agent_pool_name: The name of the agent pool.\n :type agent_pool_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: AgentPool, or the result of cls(response)\n :rtype: ~azure.mgmt.containerregistry.v2019_06_01_preview.models.AgentPool\n :raises: ~azure.core.exceptions.HttpResponseError\n ' cls = kwargs.pop('cls', None) error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop('error_map', {})) api_version = '2019-06-01-preview' accept = 'application/json' url = self.get.metadata['url'] path_format_arguments = {'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str', min_length=1), 'registryName': self._serialize.url('registry_name', registry_name, 'str', max_length=50, min_length=5, pattern='^[a-zA-Z0-9]*$'), 'agentPoolName': self._serialize.url('agent_pool_name', agent_pool_name, 'str', max_length=20, min_length=3, pattern='^[a-zA-Z0-9-]*$')} url = self._client.format_url(url, **path_format_arguments) query_parameters = {} query_parameters['api-version'] = self._serialize.query('api_version', api_version, 'str') header_parameters = {} header_parameters['Accept'] = self._serialize.header('accept', accept, 'str') request = self._client.get(url, query_parameters, header_parameters) pipeline_response = (await self._client._pipeline.run(request, stream=False, **kwargs)) response = pipeline_response.http_response if (response.status_code not in [200]): map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize('AgentPool', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized
async def begin_create(self, resource_group_name: str, registry_name: str, agent_pool_name: str, agent_pool: '_models.AgentPool', **kwargs: Any) -> AsyncLROPoller['_models.AgentPool']: 'Creates an agent pool for a container registry with the specified parameters.\n\n :param resource_group_name: The name of the resource group to which the container registry\n belongs.\n :type resource_group_name: str\n :param registry_name: The name of the container registry.\n :type registry_name: str\n :param agent_pool_name: The name of the agent pool.\n :type agent_pool_name: str\n :param agent_pool: The parameters of an agent pool that needs to scheduled.\n :type agent_pool: ~azure.mgmt.containerregistry.v2019_06_01_preview.models.AgentPool\n :keyword callable cls: A custom type or function that will be passed the direct response\n :keyword str continuation_token: A continuation token to restart a poller from a saved state.\n :keyword polling: By default, your polling method will be AsyncARMPolling.\n Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.\n :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod\n :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.\n :return: An instance of AsyncLROPoller that returns either AgentPool or the result of cls(response)\n :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerregistry.v2019_06_01_preview.models.AgentPool]\n :raises ~azure.core.exceptions.HttpResponseError:\n ' polling = kwargs.pop('polling', True) cls = kwargs.pop('cls', None) lro_delay = kwargs.pop('polling_interval', self._config.polling_interval) cont_token = kwargs.pop('continuation_token', None) if (cont_token is None): raw_result = (await self._create_initial(resource_group_name=resource_group_name, registry_name=registry_name, agent_pool_name=agent_pool_name, agent_pool=agent_pool, cls=(lambda x, y, z: x), **kwargs)) kwargs.pop('error_map', None) kwargs.pop('content_type', None) def get_long_running_output(pipeline_response): deserialized = self._deserialize('AgentPool', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized path_format_arguments = {'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str', min_length=1), 'registryName': self._serialize.url('registry_name', registry_name, 'str', max_length=50, min_length=5, pattern='^[a-zA-Z0-9]*$'), 'agentPoolName': self._serialize.url('agent_pool_name', agent_pool_name, 'str', max_length=20, min_length=3, pattern='^[a-zA-Z0-9-]*$')} if (polling is True): polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) elif (polling is False): polling_method = AsyncNoPolling() else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token(polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output) else: return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
-1,030,239,877,524,509,000
Creates an agent pool for a container registry with the specified parameters. :param resource_group_name: The name of the resource group to which the container registry belongs. :type resource_group_name: str :param registry_name: The name of the container registry. :type registry_name: str :param agent_pool_name: The name of the agent pool. :type agent_pool_name: str :param agent_pool: The parameters of an agent pool that needs to scheduled. :type agent_pool: ~azure.mgmt.containerregistry.v2019_06_01_preview.models.AgentPool :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of AsyncLROPoller that returns either AgentPool or the result of cls(response) :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerregistry.v2019_06_01_preview.models.AgentPool] :raises ~azure.core.exceptions.HttpResponseError:
sdk/containerregistry/azure-mgmt-containerregistry/azure/mgmt/containerregistry/v2019_06_01_preview/aio/operations/_agent_pools_operations.py
begin_create
AFengKK/azure-sdk-for-python
python
async def begin_create(self, resource_group_name: str, registry_name: str, agent_pool_name: str, agent_pool: '_models.AgentPool', **kwargs: Any) -> AsyncLROPoller['_models.AgentPool']: 'Creates an agent pool for a container registry with the specified parameters.\n\n :param resource_group_name: The name of the resource group to which the container registry\n belongs.\n :type resource_group_name: str\n :param registry_name: The name of the container registry.\n :type registry_name: str\n :param agent_pool_name: The name of the agent pool.\n :type agent_pool_name: str\n :param agent_pool: The parameters of an agent pool that needs to scheduled.\n :type agent_pool: ~azure.mgmt.containerregistry.v2019_06_01_preview.models.AgentPool\n :keyword callable cls: A custom type or function that will be passed the direct response\n :keyword str continuation_token: A continuation token to restart a poller from a saved state.\n :keyword polling: By default, your polling method will be AsyncARMPolling.\n Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.\n :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod\n :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.\n :return: An instance of AsyncLROPoller that returns either AgentPool or the result of cls(response)\n :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerregistry.v2019_06_01_preview.models.AgentPool]\n :raises ~azure.core.exceptions.HttpResponseError:\n ' polling = kwargs.pop('polling', True) cls = kwargs.pop('cls', None) lro_delay = kwargs.pop('polling_interval', self._config.polling_interval) cont_token = kwargs.pop('continuation_token', None) if (cont_token is None): raw_result = (await self._create_initial(resource_group_name=resource_group_name, registry_name=registry_name, agent_pool_name=agent_pool_name, agent_pool=agent_pool, cls=(lambda x, y, z: x), **kwargs)) kwargs.pop('error_map', None) kwargs.pop('content_type', None) def get_long_running_output(pipeline_response): deserialized = self._deserialize('AgentPool', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized path_format_arguments = {'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str', min_length=1), 'registryName': self._serialize.url('registry_name', registry_name, 'str', max_length=50, min_length=5, pattern='^[a-zA-Z0-9]*$'), 'agentPoolName': self._serialize.url('agent_pool_name', agent_pool_name, 'str', max_length=20, min_length=3, pattern='^[a-zA-Z0-9-]*$')} if (polling is True): polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) elif (polling is False): polling_method = AsyncNoPolling() else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token(polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output) else: return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
async def begin_delete(self, resource_group_name: str, registry_name: str, agent_pool_name: str, **kwargs: Any) -> AsyncLROPoller[None]: 'Deletes a specified agent pool resource.\n\n :param resource_group_name: The name of the resource group to which the container registry\n belongs.\n :type resource_group_name: str\n :param registry_name: The name of the container registry.\n :type registry_name: str\n :param agent_pool_name: The name of the agent pool.\n :type agent_pool_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :keyword str continuation_token: A continuation token to restart a poller from a saved state.\n :keyword polling: By default, your polling method will be AsyncARMPolling.\n Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.\n :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod\n :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.\n :return: An instance of AsyncLROPoller that returns either None or the result of cls(response)\n :rtype: ~azure.core.polling.AsyncLROPoller[None]\n :raises ~azure.core.exceptions.HttpResponseError:\n ' polling = kwargs.pop('polling', True) cls = kwargs.pop('cls', None) lro_delay = kwargs.pop('polling_interval', self._config.polling_interval) cont_token = kwargs.pop('continuation_token', None) if (cont_token is None): raw_result = (await self._delete_initial(resource_group_name=resource_group_name, registry_name=registry_name, agent_pool_name=agent_pool_name, cls=(lambda x, y, z: x), **kwargs)) kwargs.pop('error_map', None) kwargs.pop('content_type', None) def get_long_running_output(pipeline_response): if cls: return cls(pipeline_response, None, {}) path_format_arguments = {'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str', min_length=1), 'registryName': self._serialize.url('registry_name', registry_name, 'str', max_length=50, min_length=5, pattern='^[a-zA-Z0-9]*$'), 'agentPoolName': self._serialize.url('agent_pool_name', agent_pool_name, 'str', max_length=20, min_length=3, pattern='^[a-zA-Z0-9-]*$')} if (polling is True): polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) elif (polling is False): polling_method = AsyncNoPolling() else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token(polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output) else: return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
-4,383,831,536,828,033,000
Deletes a specified agent pool resource. :param resource_group_name: The name of the resource group to which the container registry belongs. :type resource_group_name: str :param registry_name: The name of the container registry. :type registry_name: str :param agent_pool_name: The name of the agent pool. :type agent_pool_name: str :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of AsyncLROPoller that returns either None or the result of cls(response) :rtype: ~azure.core.polling.AsyncLROPoller[None] :raises ~azure.core.exceptions.HttpResponseError:
sdk/containerregistry/azure-mgmt-containerregistry/azure/mgmt/containerregistry/v2019_06_01_preview/aio/operations/_agent_pools_operations.py
begin_delete
AFengKK/azure-sdk-for-python
python
async def begin_delete(self, resource_group_name: str, registry_name: str, agent_pool_name: str, **kwargs: Any) -> AsyncLROPoller[None]: 'Deletes a specified agent pool resource.\n\n :param resource_group_name: The name of the resource group to which the container registry\n belongs.\n :type resource_group_name: str\n :param registry_name: The name of the container registry.\n :type registry_name: str\n :param agent_pool_name: The name of the agent pool.\n :type agent_pool_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :keyword str continuation_token: A continuation token to restart a poller from a saved state.\n :keyword polling: By default, your polling method will be AsyncARMPolling.\n Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.\n :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod\n :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.\n :return: An instance of AsyncLROPoller that returns either None or the result of cls(response)\n :rtype: ~azure.core.polling.AsyncLROPoller[None]\n :raises ~azure.core.exceptions.HttpResponseError:\n ' polling = kwargs.pop('polling', True) cls = kwargs.pop('cls', None) lro_delay = kwargs.pop('polling_interval', self._config.polling_interval) cont_token = kwargs.pop('continuation_token', None) if (cont_token is None): raw_result = (await self._delete_initial(resource_group_name=resource_group_name, registry_name=registry_name, agent_pool_name=agent_pool_name, cls=(lambda x, y, z: x), **kwargs)) kwargs.pop('error_map', None) kwargs.pop('content_type', None) def get_long_running_output(pipeline_response): if cls: return cls(pipeline_response, None, {}) path_format_arguments = {'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str', min_length=1), 'registryName': self._serialize.url('registry_name', registry_name, 'str', max_length=50, min_length=5, pattern='^[a-zA-Z0-9]*$'), 'agentPoolName': self._serialize.url('agent_pool_name', agent_pool_name, 'str', max_length=20, min_length=3, pattern='^[a-zA-Z0-9-]*$')} if (polling is True): polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) elif (polling is False): polling_method = AsyncNoPolling() else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token(polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output) else: return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
async def begin_update(self, resource_group_name: str, registry_name: str, agent_pool_name: str, update_parameters: '_models.AgentPoolUpdateParameters', **kwargs: Any) -> AsyncLROPoller['_models.AgentPool']: 'Updates an agent pool with the specified parameters.\n\n :param resource_group_name: The name of the resource group to which the container registry\n belongs.\n :type resource_group_name: str\n :param registry_name: The name of the container registry.\n :type registry_name: str\n :param agent_pool_name: The name of the agent pool.\n :type agent_pool_name: str\n :param update_parameters: The parameters for updating an agent pool.\n :type update_parameters: ~azure.mgmt.containerregistry.v2019_06_01_preview.models.AgentPoolUpdateParameters\n :keyword callable cls: A custom type or function that will be passed the direct response\n :keyword str continuation_token: A continuation token to restart a poller from a saved state.\n :keyword polling: By default, your polling method will be AsyncARMPolling.\n Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.\n :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod\n :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.\n :return: An instance of AsyncLROPoller that returns either AgentPool or the result of cls(response)\n :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerregistry.v2019_06_01_preview.models.AgentPool]\n :raises ~azure.core.exceptions.HttpResponseError:\n ' polling = kwargs.pop('polling', True) cls = kwargs.pop('cls', None) lro_delay = kwargs.pop('polling_interval', self._config.polling_interval) cont_token = kwargs.pop('continuation_token', None) if (cont_token is None): raw_result = (await self._update_initial(resource_group_name=resource_group_name, registry_name=registry_name, agent_pool_name=agent_pool_name, update_parameters=update_parameters, cls=(lambda x, y, z: x), **kwargs)) kwargs.pop('error_map', None) kwargs.pop('content_type', None) def get_long_running_output(pipeline_response): deserialized = self._deserialize('AgentPool', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized path_format_arguments = {'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str', min_length=1), 'registryName': self._serialize.url('registry_name', registry_name, 'str', max_length=50, min_length=5, pattern='^[a-zA-Z0-9]*$'), 'agentPoolName': self._serialize.url('agent_pool_name', agent_pool_name, 'str', max_length=20, min_length=3, pattern='^[a-zA-Z0-9-]*$')} if (polling is True): polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) elif (polling is False): polling_method = AsyncNoPolling() else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token(polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output) else: return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
-6,170,495,838,415,589,000
Updates an agent pool with the specified parameters. :param resource_group_name: The name of the resource group to which the container registry belongs. :type resource_group_name: str :param registry_name: The name of the container registry. :type registry_name: str :param agent_pool_name: The name of the agent pool. :type agent_pool_name: str :param update_parameters: The parameters for updating an agent pool. :type update_parameters: ~azure.mgmt.containerregistry.v2019_06_01_preview.models.AgentPoolUpdateParameters :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of AsyncLROPoller that returns either AgentPool or the result of cls(response) :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerregistry.v2019_06_01_preview.models.AgentPool] :raises ~azure.core.exceptions.HttpResponseError:
sdk/containerregistry/azure-mgmt-containerregistry/azure/mgmt/containerregistry/v2019_06_01_preview/aio/operations/_agent_pools_operations.py
begin_update
AFengKK/azure-sdk-for-python
python
async def begin_update(self, resource_group_name: str, registry_name: str, agent_pool_name: str, update_parameters: '_models.AgentPoolUpdateParameters', **kwargs: Any) -> AsyncLROPoller['_models.AgentPool']: 'Updates an agent pool with the specified parameters.\n\n :param resource_group_name: The name of the resource group to which the container registry\n belongs.\n :type resource_group_name: str\n :param registry_name: The name of the container registry.\n :type registry_name: str\n :param agent_pool_name: The name of the agent pool.\n :type agent_pool_name: str\n :param update_parameters: The parameters for updating an agent pool.\n :type update_parameters: ~azure.mgmt.containerregistry.v2019_06_01_preview.models.AgentPoolUpdateParameters\n :keyword callable cls: A custom type or function that will be passed the direct response\n :keyword str continuation_token: A continuation token to restart a poller from a saved state.\n :keyword polling: By default, your polling method will be AsyncARMPolling.\n Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.\n :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod\n :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.\n :return: An instance of AsyncLROPoller that returns either AgentPool or the result of cls(response)\n :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerregistry.v2019_06_01_preview.models.AgentPool]\n :raises ~azure.core.exceptions.HttpResponseError:\n ' polling = kwargs.pop('polling', True) cls = kwargs.pop('cls', None) lro_delay = kwargs.pop('polling_interval', self._config.polling_interval) cont_token = kwargs.pop('continuation_token', None) if (cont_token is None): raw_result = (await self._update_initial(resource_group_name=resource_group_name, registry_name=registry_name, agent_pool_name=agent_pool_name, update_parameters=update_parameters, cls=(lambda x, y, z: x), **kwargs)) kwargs.pop('error_map', None) kwargs.pop('content_type', None) def get_long_running_output(pipeline_response): deserialized = self._deserialize('AgentPool', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized path_format_arguments = {'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str', min_length=1), 'registryName': self._serialize.url('registry_name', registry_name, 'str', max_length=50, min_length=5, pattern='^[a-zA-Z0-9]*$'), 'agentPoolName': self._serialize.url('agent_pool_name', agent_pool_name, 'str', max_length=20, min_length=3, pattern='^[a-zA-Z0-9-]*$')} if (polling is True): polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) elif (polling is False): polling_method = AsyncNoPolling() else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token(polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output) else: return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
def list(self, resource_group_name: str, registry_name: str, **kwargs: Any) -> AsyncIterable['_models.AgentPoolListResult']: 'Lists all the agent pools for a specified container registry.\n\n :param resource_group_name: The name of the resource group to which the container registry\n belongs.\n :type resource_group_name: str\n :param registry_name: The name of the container registry.\n :type registry_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: An iterator like instance of either AgentPoolListResult or the result of cls(response)\n :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerregistry.v2019_06_01_preview.models.AgentPoolListResult]\n :raises: ~azure.core.exceptions.HttpResponseError\n ' cls = kwargs.pop('cls', None) error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop('error_map', {})) api_version = '2019-06-01-preview' accept = 'application/json' def prepare_request(next_link=None): header_parameters = {} header_parameters['Accept'] = self._serialize.header('accept', accept, 'str') if (not next_link): url = self.list.metadata['url'] path_format_arguments = {'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str', min_length=1), 'registryName': self._serialize.url('registry_name', registry_name, 'str', max_length=50, min_length=5, pattern='^[a-zA-Z0-9]*$')} url = self._client.format_url(url, **path_format_arguments) query_parameters = {} query_parameters['api-version'] = self._serialize.query('api_version', api_version, 'str') request = self._client.get(url, query_parameters, header_parameters) else: url = next_link query_parameters = {} request = self._client.get(url, query_parameters, header_parameters) return request async def extract_data(pipeline_response): deserialized = self._deserialize('AgentPoolListResult', pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return ((deserialized.next_link or None), AsyncList(list_of_elem)) async def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = (await self._client._pipeline.run(request, stream=False, **kwargs)) response = pipeline_response.http_response if (response.status_code not in [200]): error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response) map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) return pipeline_response return AsyncItemPaged(get_next, extract_data)
4,120,430,077,457,207,300
Lists all the agent pools for a specified container registry. :param resource_group_name: The name of the resource group to which the container registry belongs. :type resource_group_name: str :param registry_name: The name of the container registry. :type registry_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either AgentPoolListResult or the result of cls(response) :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerregistry.v2019_06_01_preview.models.AgentPoolListResult] :raises: ~azure.core.exceptions.HttpResponseError
sdk/containerregistry/azure-mgmt-containerregistry/azure/mgmt/containerregistry/v2019_06_01_preview/aio/operations/_agent_pools_operations.py
list
AFengKK/azure-sdk-for-python
python
def list(self, resource_group_name: str, registry_name: str, **kwargs: Any) -> AsyncIterable['_models.AgentPoolListResult']: 'Lists all the agent pools for a specified container registry.\n\n :param resource_group_name: The name of the resource group to which the container registry\n belongs.\n :type resource_group_name: str\n :param registry_name: The name of the container registry.\n :type registry_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: An iterator like instance of either AgentPoolListResult or the result of cls(response)\n :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerregistry.v2019_06_01_preview.models.AgentPoolListResult]\n :raises: ~azure.core.exceptions.HttpResponseError\n ' cls = kwargs.pop('cls', None) error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop('error_map', {})) api_version = '2019-06-01-preview' accept = 'application/json' def prepare_request(next_link=None): header_parameters = {} header_parameters['Accept'] = self._serialize.header('accept', accept, 'str') if (not next_link): url = self.list.metadata['url'] path_format_arguments = {'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str', min_length=1), 'registryName': self._serialize.url('registry_name', registry_name, 'str', max_length=50, min_length=5, pattern='^[a-zA-Z0-9]*$')} url = self._client.format_url(url, **path_format_arguments) query_parameters = {} query_parameters['api-version'] = self._serialize.query('api_version', api_version, 'str') request = self._client.get(url, query_parameters, header_parameters) else: url = next_link query_parameters = {} request = self._client.get(url, query_parameters, header_parameters) return request async def extract_data(pipeline_response): deserialized = self._deserialize('AgentPoolListResult', pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return ((deserialized.next_link or None), AsyncList(list_of_elem)) async def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = (await self._client._pipeline.run(request, stream=False, **kwargs)) response = pipeline_response.http_response if (response.status_code not in [200]): error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response) map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) return pipeline_response return AsyncItemPaged(get_next, extract_data)
async def get_queue_status(self, resource_group_name: str, registry_name: str, agent_pool_name: str, **kwargs: Any) -> '_models.AgentPoolQueueStatus': 'Gets the count of queued runs for a given agent pool.\n\n :param resource_group_name: The name of the resource group to which the container registry\n belongs.\n :type resource_group_name: str\n :param registry_name: The name of the container registry.\n :type registry_name: str\n :param agent_pool_name: The name of the agent pool.\n :type agent_pool_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: AgentPoolQueueStatus, or the result of cls(response)\n :rtype: ~azure.mgmt.containerregistry.v2019_06_01_preview.models.AgentPoolQueueStatus\n :raises: ~azure.core.exceptions.HttpResponseError\n ' cls = kwargs.pop('cls', None) error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop('error_map', {})) api_version = '2019-06-01-preview' accept = 'application/json' url = self.get_queue_status.metadata['url'] path_format_arguments = {'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str', min_length=1), 'registryName': self._serialize.url('registry_name', registry_name, 'str', max_length=50, min_length=5, pattern='^[a-zA-Z0-9]*$'), 'agentPoolName': self._serialize.url('agent_pool_name', agent_pool_name, 'str', max_length=20, min_length=3, pattern='^[a-zA-Z0-9-]*$')} url = self._client.format_url(url, **path_format_arguments) query_parameters = {} query_parameters['api-version'] = self._serialize.query('api_version', api_version, 'str') header_parameters = {} header_parameters['Accept'] = self._serialize.header('accept', accept, 'str') request = self._client.post(url, query_parameters, header_parameters) pipeline_response = (await self._client._pipeline.run(request, stream=False, **kwargs)) response = pipeline_response.http_response if (response.status_code not in [200]): map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize('AgentPoolQueueStatus', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized
2,766,069,385,546,253,000
Gets the count of queued runs for a given agent pool. :param resource_group_name: The name of the resource group to which the container registry belongs. :type resource_group_name: str :param registry_name: The name of the container registry. :type registry_name: str :param agent_pool_name: The name of the agent pool. :type agent_pool_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: AgentPoolQueueStatus, or the result of cls(response) :rtype: ~azure.mgmt.containerregistry.v2019_06_01_preview.models.AgentPoolQueueStatus :raises: ~azure.core.exceptions.HttpResponseError
sdk/containerregistry/azure-mgmt-containerregistry/azure/mgmt/containerregistry/v2019_06_01_preview/aio/operations/_agent_pools_operations.py
get_queue_status
AFengKK/azure-sdk-for-python
python
async def get_queue_status(self, resource_group_name: str, registry_name: str, agent_pool_name: str, **kwargs: Any) -> '_models.AgentPoolQueueStatus': 'Gets the count of queued runs for a given agent pool.\n\n :param resource_group_name: The name of the resource group to which the container registry\n belongs.\n :type resource_group_name: str\n :param registry_name: The name of the container registry.\n :type registry_name: str\n :param agent_pool_name: The name of the agent pool.\n :type agent_pool_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: AgentPoolQueueStatus, or the result of cls(response)\n :rtype: ~azure.mgmt.containerregistry.v2019_06_01_preview.models.AgentPoolQueueStatus\n :raises: ~azure.core.exceptions.HttpResponseError\n ' cls = kwargs.pop('cls', None) error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop('error_map', {})) api_version = '2019-06-01-preview' accept = 'application/json' url = self.get_queue_status.metadata['url'] path_format_arguments = {'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str', min_length=1), 'registryName': self._serialize.url('registry_name', registry_name, 'str', max_length=50, min_length=5, pattern='^[a-zA-Z0-9]*$'), 'agentPoolName': self._serialize.url('agent_pool_name', agent_pool_name, 'str', max_length=20, min_length=3, pattern='^[a-zA-Z0-9-]*$')} url = self._client.format_url(url, **path_format_arguments) query_parameters = {} query_parameters['api-version'] = self._serialize.query('api_version', api_version, 'str') header_parameters = {} header_parameters['Accept'] = self._serialize.header('accept', accept, 'str') request = self._client.post(url, query_parameters, header_parameters) pipeline_response = (await self._client._pipeline.run(request, stream=False, **kwargs)) response = pipeline_response.http_response if (response.status_code not in [200]): map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize('AgentPoolQueueStatus', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized
def check_lnotab(self, code): 'Check that the lnotab byte offsets are sensible.' code = dis._get_code_object(code) lnotab = list(dis.findlinestarts(code)) min_bytecode = min((t[0] for t in lnotab)) max_bytecode = max((t[0] for t in lnotab)) self.assertGreaterEqual(min_bytecode, 0) self.assertLess(max_bytecode, len(code.co_code))
566,996,500,382,314,560
Check that the lnotab byte offsets are sensible.
www/src/Lib/test/test_peepholer.py
check_lnotab
Froggo8311/brython
python
def check_lnotab(self, code): code = dis._get_code_object(code) lnotab = list(dis.findlinestarts(code)) min_bytecode = min((t[0] for t in lnotab)) max_bytecode = max((t[0] for t in lnotab)) self.assertGreaterEqual(min_bytecode, 0) self.assertLess(max_bytecode, len(code.co_code))
def f(): 'Adding a docstring made this test fail in Py2.5.0' return None
-8,453,757,884,726,220,000
Adding a docstring made this test fail in Py2.5.0
www/src/Lib/test/test_peepholer.py
f
Froggo8311/brython
python
def f(): return None
def _extract3d(zipFileDir, destDirectory, outputFileName): ' a wrapper function for the recursive file extraction function ' with zipfile.ZipFile(zipFileDir) as zipFile: _extract3dRecursively(zipFile.namelist(), zipFile, destDirectory, outputFileName)
6,251,360,792,502,066,000
a wrapper function for the recursive file extraction function
fusion123/converter.py
_extract3d
bennymeg/123-Fusion
python
def _extract3d(zipFileDir, destDirectory, outputFileName): ' ' with zipfile.ZipFile(zipFileDir) as zipFile: _extract3dRecursively(zipFile.namelist(), zipFile, destDirectory, outputFileName)
def _extract3dRecursively(fileList, baseZipFile, destDirectory, outputFileName, numOfFileExtracted=0): ' extracts all the illustations and models from the 123dx file recursively ' imageExtList = ['.jpg', '.png'] fusionExtList = ['.smt', '.smb', '.sat', '.igs', '.dxf', '.stp', '.stl'] for member in fileList: if os.path.isdir(member): _extract3dRecursively(os.listdir(member), baseZipFile, destDirectory, outputFileName) else: fileExt = os.path.splitext(member)[1] fileName = os.path.splitext(os.path.basename(member))[0] if (fileExt in (fusionExtList + imageExtList)): fullFileName = ''.join([outputFileName, '_', fileName, fileExt]) while os.path.exists(os.path.join(destDirectory, fullFileName)): fileName += '#' fullFileName = ''.join([outputFileName, '_', fileName, fileExt]) source = baseZipFile.open(member) target = open(os.path.join(destDirectory, fullFileName), 'wb') with source, target: shutil.copyfileobj(source, target) numOfFileExtracted += 1
3,192,290,431,466,725,000
extracts all the illustations and models from the 123dx file recursively
fusion123/converter.py
_extract3dRecursively
bennymeg/123-Fusion
python
def _extract3dRecursively(fileList, baseZipFile, destDirectory, outputFileName, numOfFileExtracted=0): ' ' imageExtList = ['.jpg', '.png'] fusionExtList = ['.smt', '.smb', '.sat', '.igs', '.dxf', '.stp', '.stl'] for member in fileList: if os.path.isdir(member): _extract3dRecursively(os.listdir(member), baseZipFile, destDirectory, outputFileName) else: fileExt = os.path.splitext(member)[1] fileName = os.path.splitext(os.path.basename(member))[0] if (fileExt in (fusionExtList + imageExtList)): fullFileName = .join([outputFileName, '_', fileName, fileExt]) while os.path.exists(os.path.join(destDirectory, fullFileName)): fileName += '#' fullFileName = .join([outputFileName, '_', fileName, fileExt]) source = baseZipFile.open(member) target = open(os.path.join(destDirectory, fullFileName), 'wb') with source, target: shutil.copyfileobj(source, target) numOfFileExtracted += 1
def _execute(srcDirectory, destDirectory, filename): ' converts the file into fusion 360 file (this file might be usable in other CAD software as well) ' outputFileName = os.path.splitext(os.path.basename(filename))[0] newFileName = (outputFileName + '.zip') oldFilePath = os.path.join(srcDirectory, filename) newFilePath = os.path.join(srcDirectory, newFileName) os.rename(oldFilePath, newFilePath) print(('Extracting %s' % oldFilePath)) _extract3d(newFilePath, destDirectory, outputFileName) os.rename(newFilePath, oldFilePath)
7,805,116,768,547,185,000
converts the file into fusion 360 file (this file might be usable in other CAD software as well)
fusion123/converter.py
_execute
bennymeg/123-Fusion
python
def _execute(srcDirectory, destDirectory, filename): ' ' outputFileName = os.path.splitext(os.path.basename(filename))[0] newFileName = (outputFileName + '.zip') oldFilePath = os.path.join(srcDirectory, filename) newFilePath = os.path.join(srcDirectory, newFileName) os.rename(oldFilePath, newFilePath) print(('Extracting %s' % oldFilePath)) _extract3d(newFilePath, destDirectory, outputFileName) os.rename(newFilePath, oldFilePath)
def sumOfLeftLeaves(self, root): '\n :type root: TreeNode\n :rtype: int\n ' while (not root): return 0 if (root.left and (not root.left.left) and (not root.left.right)): return (root.left.val + self.sumOfLeftLeaves(root.right)) return (self.sumOfLeftLeaves(root.left) + self.sumOfLeftLeaves(root.right))
-1,436,847,296,584,454,100
:type root: TreeNode :rtype: int
Python/404sum_of_left_leaves.py
sumOfLeftLeaves
Apocrypse/LeetCode
python
def sumOfLeftLeaves(self, root): '\n :type root: TreeNode\n :rtype: int\n ' while (not root): return 0 if (root.left and (not root.left.left) and (not root.left.right)): return (root.left.val + self.sumOfLeftLeaves(root.right)) return (self.sumOfLeftLeaves(root.left) + self.sumOfLeftLeaves(root.right))
def __init__(self, connection=None, nodename=None): 'Initialize this Item' self._connection = connection self._nodename = nodename self._nodepath = [] self._curobj = None self._xml = StringIO()
-429,282,253,661,561,800
Initialize this Item
desktop/core/ext-py/boto-2.46.1/boto/ecs/item.py
__init__
10088/hue
python
def __init__(self, connection=None, nodename=None): self._connection = connection self._nodename = nodename self._nodepath = [] self._curobj = None self._xml = StringIO()
def __init__(self, connection=None): 'Initialize this Item' ResponseGroup.__init__(self, connection, 'Item')
-7,527,589,324,602,155,000
Initialize this Item
desktop/core/ext-py/boto-2.46.1/boto/ecs/item.py
__init__
10088/hue
python
def __init__(self, connection=None): ResponseGroup.__init__(self, connection, 'Item')
def __next__(self): 'Special paging functionality' if (self.iter is None): self.iter = iter(self.objs) try: return next(self.iter) except StopIteration: self.iter = None self.objs = [] if (int(self.page) < int(self.total_pages)): self.page += 1 self._connection.get_response(self.action, self.params, self.page, self) return next(self) else: raise
4,520,251,078,756,440,600
Special paging functionality
desktop/core/ext-py/boto-2.46.1/boto/ecs/item.py
__next__
10088/hue
python
def __next__(self): if (self.iter is None): self.iter = iter(self.objs) try: return next(self.iter) except StopIteration: self.iter = None self.objs = [] if (int(self.page) < int(self.total_pages)): self.page += 1 self._connection.get_response(self.action, self.params, self.page, self) return next(self) else: raise
def to_xml(self): 'Override to first fetch everything' for item in self: pass return ResponseGroup.to_xml(self)
-1,328,991,912,337,954,600
Override to first fetch everything
desktop/core/ext-py/boto-2.46.1/boto/ecs/item.py
to_xml
10088/hue
python
def to_xml(self): for item in self: pass return ResponseGroup.to_xml(self)
def delete(self, resource_group_name, public_ip_prefix_name, custom_headers=None, raw=False, polling=True, **operation_config): 'Deletes the specified public IP prefix.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param public_ip_prefix_name: The name of the PublicIpPrefix.\n :type public_ip_prefix_name: str\n :param dict custom_headers: headers that will be added to the request\n :param bool raw: The poller return type is ClientRawResponse, the\n direct response alongside the deserialized response\n :param polling: True for ARMPolling, False for no polling, or a\n polling object for personal polling strategy\n :return: An instance of LROPoller that returns None or\n ClientRawResponse<None> if raw==True\n :rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or\n ~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]]\n :raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`\n ' raw_result = self._delete_initial(resource_group_name=resource_group_name, public_ip_prefix_name=public_ip_prefix_name, custom_headers=custom_headers, raw=True, **operation_config) def get_long_running_output(response): if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response lro_delay = operation_config.get('long_running_operation_timeout', self.config.long_running_operation_timeout) if (polling is True): polling_method = ARMPolling(lro_delay, **operation_config) elif (polling is False): polling_method = NoPolling() else: polling_method = polling return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
-5,602,891,413,988,677,000
Deletes the specified public IP prefix. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param public_ip_prefix_name: The name of the PublicIpPrefix. :type public_ip_prefix_name: str :param dict custom_headers: headers that will be added to the request :param bool raw: The poller return type is ClientRawResponse, the direct response alongside the deserialized response :param polling: True for ARMPolling, False for no polling, or a polling object for personal polling strategy :return: An instance of LROPoller that returns None or ClientRawResponse<None> if raw==True :rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or ~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]] :raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
azure-mgmt-network/azure/mgmt/network/v2018_10_01/operations/public_ip_prefixes_operations.py
delete
acured/azure-sdk-for-python
python
def delete(self, resource_group_name, public_ip_prefix_name, custom_headers=None, raw=False, polling=True, **operation_config): 'Deletes the specified public IP prefix.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param public_ip_prefix_name: The name of the PublicIpPrefix.\n :type public_ip_prefix_name: str\n :param dict custom_headers: headers that will be added to the request\n :param bool raw: The poller return type is ClientRawResponse, the\n direct response alongside the deserialized response\n :param polling: True for ARMPolling, False for no polling, or a\n polling object for personal polling strategy\n :return: An instance of LROPoller that returns None or\n ClientRawResponse<None> if raw==True\n :rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or\n ~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]]\n :raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`\n ' raw_result = self._delete_initial(resource_group_name=resource_group_name, public_ip_prefix_name=public_ip_prefix_name, custom_headers=custom_headers, raw=True, **operation_config) def get_long_running_output(response): if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response lro_delay = operation_config.get('long_running_operation_timeout', self.config.long_running_operation_timeout) if (polling is True): polling_method = ARMPolling(lro_delay, **operation_config) elif (polling is False): polling_method = NoPolling() else: polling_method = polling return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
def get(self, resource_group_name, public_ip_prefix_name, expand=None, custom_headers=None, raw=False, **operation_config): 'Gets the specified public IP prefix in a specified resource group.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param public_ip_prefix_name: The name of the PublicIPPrefx.\n :type public_ip_prefix_name: str\n :param expand: Expands referenced resources.\n :type expand: str\n :param dict custom_headers: headers that will be added to the request\n :param bool raw: returns the direct response alongside the\n deserialized response\n :param operation_config: :ref:`Operation configuration\n overrides<msrest:optionsforoperations>`.\n :return: PublicIPPrefix or ClientRawResponse if raw=true\n :rtype: ~azure.mgmt.network.v2018_10_01.models.PublicIPPrefix or\n ~msrest.pipeline.ClientRawResponse\n :raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`\n ' url = self.get.metadata['url'] path_format_arguments = {'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str'), 'publicIpPrefixName': self._serialize.url('public_ip_prefix_name', public_ip_prefix_name, 'str'), 'subscriptionId': self._serialize.url('self.config.subscription_id', self.config.subscription_id, 'str')} url = self._client.format_url(url, **path_format_arguments) query_parameters = {} query_parameters['api-version'] = self._serialize.query('self.api_version', self.api_version, 'str') if (expand is not None): query_parameters['$expand'] = self._serialize.query('expand', expand, 'str') header_parameters = {} header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) if (self.config.accept_language is not None): header_parameters['accept-language'] = self._serialize.header('self.config.accept_language', self.config.accept_language, 'str') request = self._client.get(url, query_parameters, header_parameters) response = self._client.send(request, stream=False, **operation_config) if (response.status_code not in [200]): exp = CloudError(response) exp.request_id = response.headers.get('x-ms-request-id') raise exp deserialized = None if (response.status_code == 200): deserialized = self._deserialize('PublicIPPrefix', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized
2,187,649,534,079,012,600
Gets the specified public IP prefix in a specified resource group. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param public_ip_prefix_name: The name of the PublicIPPrefx. :type public_ip_prefix_name: str :param expand: Expands referenced resources. :type expand: str :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: PublicIPPrefix or ClientRawResponse if raw=true :rtype: ~azure.mgmt.network.v2018_10_01.models.PublicIPPrefix or ~msrest.pipeline.ClientRawResponse :raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
azure-mgmt-network/azure/mgmt/network/v2018_10_01/operations/public_ip_prefixes_operations.py
get
acured/azure-sdk-for-python
python
def get(self, resource_group_name, public_ip_prefix_name, expand=None, custom_headers=None, raw=False, **operation_config): 'Gets the specified public IP prefix in a specified resource group.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param public_ip_prefix_name: The name of the PublicIPPrefx.\n :type public_ip_prefix_name: str\n :param expand: Expands referenced resources.\n :type expand: str\n :param dict custom_headers: headers that will be added to the request\n :param bool raw: returns the direct response alongside the\n deserialized response\n :param operation_config: :ref:`Operation configuration\n overrides<msrest:optionsforoperations>`.\n :return: PublicIPPrefix or ClientRawResponse if raw=true\n :rtype: ~azure.mgmt.network.v2018_10_01.models.PublicIPPrefix or\n ~msrest.pipeline.ClientRawResponse\n :raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`\n ' url = self.get.metadata['url'] path_format_arguments = {'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str'), 'publicIpPrefixName': self._serialize.url('public_ip_prefix_name', public_ip_prefix_name, 'str'), 'subscriptionId': self._serialize.url('self.config.subscription_id', self.config.subscription_id, 'str')} url = self._client.format_url(url, **path_format_arguments) query_parameters = {} query_parameters['api-version'] = self._serialize.query('self.api_version', self.api_version, 'str') if (expand is not None): query_parameters['$expand'] = self._serialize.query('expand', expand, 'str') header_parameters = {} header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) if (self.config.accept_language is not None): header_parameters['accept-language'] = self._serialize.header('self.config.accept_language', self.config.accept_language, 'str') request = self._client.get(url, query_parameters, header_parameters) response = self._client.send(request, stream=False, **operation_config) if (response.status_code not in [200]): exp = CloudError(response) exp.request_id = response.headers.get('x-ms-request-id') raise exp deserialized = None if (response.status_code == 200): deserialized = self._deserialize('PublicIPPrefix', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized
def create_or_update(self, resource_group_name, public_ip_prefix_name, parameters, custom_headers=None, raw=False, polling=True, **operation_config): 'Creates or updates a static or dynamic public IP prefix.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param public_ip_prefix_name: The name of the public IP prefix.\n :type public_ip_prefix_name: str\n :param parameters: Parameters supplied to the create or update public\n IP prefix operation.\n :type parameters:\n ~azure.mgmt.network.v2018_10_01.models.PublicIPPrefix\n :param dict custom_headers: headers that will be added to the request\n :param bool raw: The poller return type is ClientRawResponse, the\n direct response alongside the deserialized response\n :param polling: True for ARMPolling, False for no polling, or a\n polling object for personal polling strategy\n :return: An instance of LROPoller that returns PublicIPPrefix or\n ClientRawResponse<PublicIPPrefix> if raw==True\n :rtype:\n ~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2018_10_01.models.PublicIPPrefix]\n or\n ~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2018_10_01.models.PublicIPPrefix]]\n :raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`\n ' raw_result = self._create_or_update_initial(resource_group_name=resource_group_name, public_ip_prefix_name=public_ip_prefix_name, parameters=parameters, custom_headers=custom_headers, raw=True, **operation_config) def get_long_running_output(response): deserialized = self._deserialize('PublicIPPrefix', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized lro_delay = operation_config.get('long_running_operation_timeout', self.config.long_running_operation_timeout) if (polling is True): polling_method = ARMPolling(lro_delay, **operation_config) elif (polling is False): polling_method = NoPolling() else: polling_method = polling return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
-6,732,961,279,870,233,000
Creates or updates a static or dynamic public IP prefix. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param public_ip_prefix_name: The name of the public IP prefix. :type public_ip_prefix_name: str :param parameters: Parameters supplied to the create or update public IP prefix operation. :type parameters: ~azure.mgmt.network.v2018_10_01.models.PublicIPPrefix :param dict custom_headers: headers that will be added to the request :param bool raw: The poller return type is ClientRawResponse, the direct response alongside the deserialized response :param polling: True for ARMPolling, False for no polling, or a polling object for personal polling strategy :return: An instance of LROPoller that returns PublicIPPrefix or ClientRawResponse<PublicIPPrefix> if raw==True :rtype: ~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2018_10_01.models.PublicIPPrefix] or ~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2018_10_01.models.PublicIPPrefix]] :raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
azure-mgmt-network/azure/mgmt/network/v2018_10_01/operations/public_ip_prefixes_operations.py
create_or_update
acured/azure-sdk-for-python
python
def create_or_update(self, resource_group_name, public_ip_prefix_name, parameters, custom_headers=None, raw=False, polling=True, **operation_config): 'Creates or updates a static or dynamic public IP prefix.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param public_ip_prefix_name: The name of the public IP prefix.\n :type public_ip_prefix_name: str\n :param parameters: Parameters supplied to the create or update public\n IP prefix operation.\n :type parameters:\n ~azure.mgmt.network.v2018_10_01.models.PublicIPPrefix\n :param dict custom_headers: headers that will be added to the request\n :param bool raw: The poller return type is ClientRawResponse, the\n direct response alongside the deserialized response\n :param polling: True for ARMPolling, False for no polling, or a\n polling object for personal polling strategy\n :return: An instance of LROPoller that returns PublicIPPrefix or\n ClientRawResponse<PublicIPPrefix> if raw==True\n :rtype:\n ~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2018_10_01.models.PublicIPPrefix]\n or\n ~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2018_10_01.models.PublicIPPrefix]]\n :raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`\n ' raw_result = self._create_or_update_initial(resource_group_name=resource_group_name, public_ip_prefix_name=public_ip_prefix_name, parameters=parameters, custom_headers=custom_headers, raw=True, **operation_config) def get_long_running_output(response): deserialized = self._deserialize('PublicIPPrefix', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized lro_delay = operation_config.get('long_running_operation_timeout', self.config.long_running_operation_timeout) if (polling is True): polling_method = ARMPolling(lro_delay, **operation_config) elif (polling is False): polling_method = NoPolling() else: polling_method = polling return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
def update_tags(self, resource_group_name, public_ip_prefix_name, tags=None, custom_headers=None, raw=False, polling=True, **operation_config): 'Updates public IP prefix tags.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param public_ip_prefix_name: The name of the public IP prefix.\n :type public_ip_prefix_name: str\n :param tags: Resource tags.\n :type tags: dict[str, str]\n :param dict custom_headers: headers that will be added to the request\n :param bool raw: The poller return type is ClientRawResponse, the\n direct response alongside the deserialized response\n :param polling: True for ARMPolling, False for no polling, or a\n polling object for personal polling strategy\n :return: An instance of LROPoller that returns PublicIPPrefix or\n ClientRawResponse<PublicIPPrefix> if raw==True\n :rtype:\n ~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2018_10_01.models.PublicIPPrefix]\n or\n ~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2018_10_01.models.PublicIPPrefix]]\n :raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`\n ' raw_result = self._update_tags_initial(resource_group_name=resource_group_name, public_ip_prefix_name=public_ip_prefix_name, tags=tags, custom_headers=custom_headers, raw=True, **operation_config) def get_long_running_output(response): deserialized = self._deserialize('PublicIPPrefix', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized lro_delay = operation_config.get('long_running_operation_timeout', self.config.long_running_operation_timeout) if (polling is True): polling_method = ARMPolling(lro_delay, **operation_config) elif (polling is False): polling_method = NoPolling() else: polling_method = polling return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
-3,271,771,562,177,810,400
Updates public IP prefix tags. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param public_ip_prefix_name: The name of the public IP prefix. :type public_ip_prefix_name: str :param tags: Resource tags. :type tags: dict[str, str] :param dict custom_headers: headers that will be added to the request :param bool raw: The poller return type is ClientRawResponse, the direct response alongside the deserialized response :param polling: True for ARMPolling, False for no polling, or a polling object for personal polling strategy :return: An instance of LROPoller that returns PublicIPPrefix or ClientRawResponse<PublicIPPrefix> if raw==True :rtype: ~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2018_10_01.models.PublicIPPrefix] or ~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2018_10_01.models.PublicIPPrefix]] :raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
azure-mgmt-network/azure/mgmt/network/v2018_10_01/operations/public_ip_prefixes_operations.py
update_tags
acured/azure-sdk-for-python
python
def update_tags(self, resource_group_name, public_ip_prefix_name, tags=None, custom_headers=None, raw=False, polling=True, **operation_config): 'Updates public IP prefix tags.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param public_ip_prefix_name: The name of the public IP prefix.\n :type public_ip_prefix_name: str\n :param tags: Resource tags.\n :type tags: dict[str, str]\n :param dict custom_headers: headers that will be added to the request\n :param bool raw: The poller return type is ClientRawResponse, the\n direct response alongside the deserialized response\n :param polling: True for ARMPolling, False for no polling, or a\n polling object for personal polling strategy\n :return: An instance of LROPoller that returns PublicIPPrefix or\n ClientRawResponse<PublicIPPrefix> if raw==True\n :rtype:\n ~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2018_10_01.models.PublicIPPrefix]\n or\n ~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2018_10_01.models.PublicIPPrefix]]\n :raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`\n ' raw_result = self._update_tags_initial(resource_group_name=resource_group_name, public_ip_prefix_name=public_ip_prefix_name, tags=tags, custom_headers=custom_headers, raw=True, **operation_config) def get_long_running_output(response): deserialized = self._deserialize('PublicIPPrefix', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized lro_delay = operation_config.get('long_running_operation_timeout', self.config.long_running_operation_timeout) if (polling is True): polling_method = ARMPolling(lro_delay, **operation_config) elif (polling is False): polling_method = NoPolling() else: polling_method = polling return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
def list_all(self, custom_headers=None, raw=False, **operation_config): 'Gets all the public IP prefixes in a subscription.\n\n :param dict custom_headers: headers that will be added to the request\n :param bool raw: returns the direct response alongside the\n deserialized response\n :param operation_config: :ref:`Operation configuration\n overrides<msrest:optionsforoperations>`.\n :return: An iterator like instance of PublicIPPrefix\n :rtype:\n ~azure.mgmt.network.v2018_10_01.models.PublicIPPrefixPaged[~azure.mgmt.network.v2018_10_01.models.PublicIPPrefix]\n :raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`\n ' def internal_paging(next_link=None, raw=False): if (not next_link): url = self.list_all.metadata['url'] path_format_arguments = {'subscriptionId': self._serialize.url('self.config.subscription_id', self.config.subscription_id, 'str')} url = self._client.format_url(url, **path_format_arguments) query_parameters = {} query_parameters['api-version'] = self._serialize.query('self.api_version', self.api_version, 'str') else: url = next_link query_parameters = {} header_parameters = {} header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) if (self.config.accept_language is not None): header_parameters['accept-language'] = self._serialize.header('self.config.accept_language', self.config.accept_language, 'str') request = self._client.get(url, query_parameters, header_parameters) response = self._client.send(request, stream=False, **operation_config) if (response.status_code not in [200]): exp = CloudError(response) exp.request_id = response.headers.get('x-ms-request-id') raise exp return response deserialized = models.PublicIPPrefixPaged(internal_paging, self._deserialize.dependencies) if raw: header_dict = {} client_raw_response = models.PublicIPPrefixPaged(internal_paging, self._deserialize.dependencies, header_dict) return client_raw_response return deserialized
11,541,604,707,037,170
Gets all the public IP prefixes in a subscription. :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: An iterator like instance of PublicIPPrefix :rtype: ~azure.mgmt.network.v2018_10_01.models.PublicIPPrefixPaged[~azure.mgmt.network.v2018_10_01.models.PublicIPPrefix] :raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
azure-mgmt-network/azure/mgmt/network/v2018_10_01/operations/public_ip_prefixes_operations.py
list_all
acured/azure-sdk-for-python
python
def list_all(self, custom_headers=None, raw=False, **operation_config): 'Gets all the public IP prefixes in a subscription.\n\n :param dict custom_headers: headers that will be added to the request\n :param bool raw: returns the direct response alongside the\n deserialized response\n :param operation_config: :ref:`Operation configuration\n overrides<msrest:optionsforoperations>`.\n :return: An iterator like instance of PublicIPPrefix\n :rtype:\n ~azure.mgmt.network.v2018_10_01.models.PublicIPPrefixPaged[~azure.mgmt.network.v2018_10_01.models.PublicIPPrefix]\n :raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`\n ' def internal_paging(next_link=None, raw=False): if (not next_link): url = self.list_all.metadata['url'] path_format_arguments = {'subscriptionId': self._serialize.url('self.config.subscription_id', self.config.subscription_id, 'str')} url = self._client.format_url(url, **path_format_arguments) query_parameters = {} query_parameters['api-version'] = self._serialize.query('self.api_version', self.api_version, 'str') else: url = next_link query_parameters = {} header_parameters = {} header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) if (self.config.accept_language is not None): header_parameters['accept-language'] = self._serialize.header('self.config.accept_language', self.config.accept_language, 'str') request = self._client.get(url, query_parameters, header_parameters) response = self._client.send(request, stream=False, **operation_config) if (response.status_code not in [200]): exp = CloudError(response) exp.request_id = response.headers.get('x-ms-request-id') raise exp return response deserialized = models.PublicIPPrefixPaged(internal_paging, self._deserialize.dependencies) if raw: header_dict = {} client_raw_response = models.PublicIPPrefixPaged(internal_paging, self._deserialize.dependencies, header_dict) return client_raw_response return deserialized
def list(self, resource_group_name, custom_headers=None, raw=False, **operation_config): 'Gets all public IP prefixes in a resource group.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param dict custom_headers: headers that will be added to the request\n :param bool raw: returns the direct response alongside the\n deserialized response\n :param operation_config: :ref:`Operation configuration\n overrides<msrest:optionsforoperations>`.\n :return: An iterator like instance of PublicIPPrefix\n :rtype:\n ~azure.mgmt.network.v2018_10_01.models.PublicIPPrefixPaged[~azure.mgmt.network.v2018_10_01.models.PublicIPPrefix]\n :raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`\n ' def internal_paging(next_link=None, raw=False): if (not next_link): url = self.list.metadata['url'] path_format_arguments = {'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str'), 'subscriptionId': self._serialize.url('self.config.subscription_id', self.config.subscription_id, 'str')} url = self._client.format_url(url, **path_format_arguments) query_parameters = {} query_parameters['api-version'] = self._serialize.query('self.api_version', self.api_version, 'str') else: url = next_link query_parameters = {} header_parameters = {} header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) if (self.config.accept_language is not None): header_parameters['accept-language'] = self._serialize.header('self.config.accept_language', self.config.accept_language, 'str') request = self._client.get(url, query_parameters, header_parameters) response = self._client.send(request, stream=False, **operation_config) if (response.status_code not in [200]): exp = CloudError(response) exp.request_id = response.headers.get('x-ms-request-id') raise exp return response deserialized = models.PublicIPPrefixPaged(internal_paging, self._deserialize.dependencies) if raw: header_dict = {} client_raw_response = models.PublicIPPrefixPaged(internal_paging, self._deserialize.dependencies, header_dict) return client_raw_response return deserialized
1,095,154,879,440,661,000
Gets all public IP prefixes in a resource group. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: An iterator like instance of PublicIPPrefix :rtype: ~azure.mgmt.network.v2018_10_01.models.PublicIPPrefixPaged[~azure.mgmt.network.v2018_10_01.models.PublicIPPrefix] :raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
azure-mgmt-network/azure/mgmt/network/v2018_10_01/operations/public_ip_prefixes_operations.py
list
acured/azure-sdk-for-python
python
def list(self, resource_group_name, custom_headers=None, raw=False, **operation_config): 'Gets all public IP prefixes in a resource group.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param dict custom_headers: headers that will be added to the request\n :param bool raw: returns the direct response alongside the\n deserialized response\n :param operation_config: :ref:`Operation configuration\n overrides<msrest:optionsforoperations>`.\n :return: An iterator like instance of PublicIPPrefix\n :rtype:\n ~azure.mgmt.network.v2018_10_01.models.PublicIPPrefixPaged[~azure.mgmt.network.v2018_10_01.models.PublicIPPrefix]\n :raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`\n ' def internal_paging(next_link=None, raw=False): if (not next_link): url = self.list.metadata['url'] path_format_arguments = {'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str'), 'subscriptionId': self._serialize.url('self.config.subscription_id', self.config.subscription_id, 'str')} url = self._client.format_url(url, **path_format_arguments) query_parameters = {} query_parameters['api-version'] = self._serialize.query('self.api_version', self.api_version, 'str') else: url = next_link query_parameters = {} header_parameters = {} header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) if (self.config.accept_language is not None): header_parameters['accept-language'] = self._serialize.header('self.config.accept_language', self.config.accept_language, 'str') request = self._client.get(url, query_parameters, header_parameters) response = self._client.send(request, stream=False, **operation_config) if (response.status_code not in [200]): exp = CloudError(response) exp.request_id = response.headers.get('x-ms-request-id') raise exp return response deserialized = models.PublicIPPrefixPaged(internal_paging, self._deserialize.dependencies) if raw: header_dict = {} client_raw_response = models.PublicIPPrefixPaged(internal_paging, self._deserialize.dependencies, header_dict) return client_raw_response return deserialized
def run(self): 'connect and poll messages to queue' sock = None print(('Connecting to synchronous uhd message tcp port ' + str(self.port))) while self.q_quit.empty(): try: sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.connect((self.ip_address, self.port)) break except socket.error: print(('connecting to synchronous uhd message tcp port ' + str(self.port))) sock.close() time.sleep(0.5) print(('Connected to synchronous uhd message tcp port ' + str(self.port))) sock.settimeout(None) s = '' while self.q_quit.empty(): try: while self.q_quit.empty(): s += sock.recv(self.packet_size) if (len(s) >= self.packet_size): break res_tuple = struct.unpack(self.packet_type, s[:self.packet_size]) s = s[self.packet_size:] self.queue.put(res_tuple) except socket.timeout: self.stop() traceback.print_exc() pass sock.close()
-653,668,052,842,803,300
connect and poll messages to queue
src/tcp_sync.py
run
Opendigitalradio/ODR-StaticPrecorrection
python
def run(self): sock = None print(('Connecting to synchronous uhd message tcp port ' + str(self.port))) while self.q_quit.empty(): try: sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.connect((self.ip_address, self.port)) break except socket.error: print(('connecting to synchronous uhd message tcp port ' + str(self.port))) sock.close() time.sleep(0.5) print(('Connected to synchronous uhd message tcp port ' + str(self.port))) sock.settimeout(None) s = while self.q_quit.empty(): try: while self.q_quit.empty(): s += sock.recv(self.packet_size) if (len(s) >= self.packet_size): break res_tuple = struct.unpack(self.packet_type, s[:self.packet_size]) s = s[self.packet_size:] self.queue.put(res_tuple) except socket.timeout: self.stop() traceback.print_exc() pass sock.close()