body stringlengths 26 98.2k | body_hash int64 -9,222,864,604,528,158,000 9,221,803,474B | docstring stringlengths 1 16.8k | path stringlengths 5 230 | name stringlengths 1 96 | repository_name stringlengths 7 89 | lang stringclasses 1
value | body_without_docstring stringlengths 20 98.2k |
|---|---|---|---|---|---|---|---|
@property
def local_needrun_jobs(self):
'Iterate over all jobs that need to be run and are marked as local.'
return filter((lambda job: job.is_local), self.needrun_jobs) | 5,180,558,367,343,746,000 | Iterate over all jobs that need to be run and are marked as local. | snakemake/dag.py | local_needrun_jobs | baileythegreen/snakemake | python | @property
def local_needrun_jobs(self):
return filter((lambda job: job.is_local), self.needrun_jobs) |
@property
def finished_jobs(self):
' Iterate over all jobs that have been finished.'
for job in filter(self.finished, self.bfs(self.dependencies, *self.targetjobs)):
(yield job) | -962,557,771,915,621,600 | Iterate over all jobs that have been finished. | snakemake/dag.py | finished_jobs | baileythegreen/snakemake | python | @property
def finished_jobs(self):
' '
for job in filter(self.finished, self.bfs(self.dependencies, *self.targetjobs)):
(yield job) |
@property
def ready_jobs(self):
'Jobs that are ready to execute.'
return self._ready_jobs | 7,521,555,611,777,074,000 | Jobs that are ready to execute. | snakemake/dag.py | ready_jobs | baileythegreen/snakemake | python | @property
def ready_jobs(self):
return self._ready_jobs |
def needrun(self, job):
'Return whether a given job needs to be executed.'
return (job in self._needrun) | 1,641,981,298,269,334,000 | Return whether a given job needs to be executed. | snakemake/dag.py | needrun | baileythegreen/snakemake | python | def needrun(self, job):
return (job in self._needrun) |
def priority(self, job):
'Return priority of given job.'
return self._priority[job] | 526,851,191,883,921,540 | Return priority of given job. | snakemake/dag.py | priority | baileythegreen/snakemake | python | def priority(self, job):
return self._priority[job] |
def noneedrun_finished(self, job):
'\n Return whether a given job is finished or was not\n required to run at all.\n '
return ((not self.needrun(job)) or self.finished(job)) | -2,352,851,572,218,798,600 | Return whether a given job is finished or was not
required to run at all. | snakemake/dag.py | noneedrun_finished | baileythegreen/snakemake | python | def noneedrun_finished(self, job):
'\n Return whether a given job is finished or was not\n required to run at all.\n '
return ((not self.needrun(job)) or self.finished(job)) |
def reason(self, job):
' Return the reason of the job execution. '
return self._reason[job] | 4,667,444,684,062,257,000 | Return the reason of the job execution. | snakemake/dag.py | reason | baileythegreen/snakemake | python | def reason(self, job):
' '
return self._reason[job] |
def finished(self, job):
' Return whether a job is finished. '
return (job in self._finished) | 2,299,385,806,758,008,600 | Return whether a job is finished. | snakemake/dag.py | finished | baileythegreen/snakemake | python | def finished(self, job):
' '
return (job in self._finished) |
def dynamic(self, job):
'\n Return whether a job is dynamic (i.e. it is only a placeholder\n for those that are created after the job with dynamic output has\n finished.\n '
if job.is_group():
for j in job:
if (j in self._dynamic):
return True
... | 477,988,465,187,231,500 | Return whether a job is dynamic (i.e. it is only a placeholder
for those that are created after the job with dynamic output has
finished. | snakemake/dag.py | dynamic | baileythegreen/snakemake | python | def dynamic(self, job):
'\n Return whether a job is dynamic (i.e. it is only a placeholder\n for those that are created after the job with dynamic output has\n finished.\n '
if job.is_group():
for j in job:
if (j in self._dynamic):
return True
... |
def requested_files(self, job):
'Return the files a job requests.'
return set(*self.depending[job].values()) | -3,774,339,331,432,726,000 | Return the files a job requests. | snakemake/dag.py | requested_files | baileythegreen/snakemake | python | def requested_files(self, job):
return set(*self.depending[job].values()) |
@property
def incomplete_files(self):
'Return list of incomplete files.'
return list(chain(*(job.output for job in filter(self.workflow.persistence.incomplete, filterfalse(self.needrun, self.jobs))))) | -7,766,036,957,068,657,000 | Return list of incomplete files. | snakemake/dag.py | incomplete_files | baileythegreen/snakemake | python | @property
def incomplete_files(self):
return list(chain(*(job.output for job in filter(self.workflow.persistence.incomplete, filterfalse(self.needrun, self.jobs))))) |
@property
def newversion_files(self):
'Return list of files where the current version is newer than the\n recorded version.\n '
return list(chain(*(job.output for job in filter(self.workflow.persistence.newversion, self.jobs)))) | 1,139,763,053,158,034,400 | Return list of files where the current version is newer than the
recorded version. | snakemake/dag.py | newversion_files | baileythegreen/snakemake | python | @property
def newversion_files(self):
'Return list of files where the current version is newer than the\n recorded version.\n '
return list(chain(*(job.output for job in filter(self.workflow.persistence.newversion, self.jobs)))) |
def missing_temp(self, job):
'\n Return whether a temp file that is input of the given job is missing.\n '
for (job_, files) in self.depending[job].items():
if (self.needrun(job_) and any(((not f.exists) for f in files))):
return True
return False | -4,270,668,817,331,513,000 | Return whether a temp file that is input of the given job is missing. | snakemake/dag.py | missing_temp | baileythegreen/snakemake | python | def missing_temp(self, job):
'\n \n '
for (job_, files) in self.depending[job].items():
if (self.needrun(job_) and any(((not f.exists) for f in files))):
return True
return False |
def check_and_touch_output(self, job, wait=3, ignore_missing_output=False, no_touch=False, force_stay_on_remote=False):
' Raise exception if output files of job are missing. '
expanded_output = [job.shadowed_path(path) for path in job.expanded_output]
if job.benchmark:
expanded_output.append(job.ben... | 5,602,922,416,562,306,000 | Raise exception if output files of job are missing. | snakemake/dag.py | check_and_touch_output | baileythegreen/snakemake | python | def check_and_touch_output(self, job, wait=3, ignore_missing_output=False, no_touch=False, force_stay_on_remote=False):
' '
expanded_output = [job.shadowed_path(path) for path in job.expanded_output]
if job.benchmark:
expanded_output.append(job.benchmark)
if (not ignore_missing_output):
... |
def unshadow_output(self, job, only_log=False):
' Move files from shadow directory to real output paths. '
if ((not job.shadow_dir) or (not job.expanded_output)):
return
files = (job.log if only_log else chain(job.expanded_output, job.log))
for real_output in files:
shadow_output = job.s... | 3,537,507,914,298,335,000 | Move files from shadow directory to real output paths. | snakemake/dag.py | unshadow_output | baileythegreen/snakemake | python | def unshadow_output(self, job, only_log=False):
' '
if ((not job.shadow_dir) or (not job.expanded_output)):
return
files = (job.log if only_log else chain(job.expanded_output, job.log))
for real_output in files:
shadow_output = job.shadowed_path(real_output).file
if os.path.isli... |
def check_periodic_wildcards(self, job):
'Raise an exception if a wildcard of the given job appears to be periodic,\n indicating a cyclic dependency.'
for (wildcard, value) in job.wildcards_dict.items():
periodic_substring = self.periodic_wildcard_detector.is_periodic(value)
if (periodic_... | -2,250,608,271,693,119,000 | Raise an exception if a wildcard of the given job appears to be periodic,
indicating a cyclic dependency. | snakemake/dag.py | check_periodic_wildcards | baileythegreen/snakemake | python | def check_periodic_wildcards(self, job):
'Raise an exception if a wildcard of the given job appears to be periodic,\n indicating a cyclic dependency.'
for (wildcard, value) in job.wildcards_dict.items():
periodic_substring = self.periodic_wildcard_detector.is_periodic(value)
if (periodic_... |
def handle_protected(self, job):
' Write-protect output files that are marked with protected(). '
for f in job.expanded_output:
if (f in job.protected_output):
logger.info('Write-protecting output file {}.'.format(f))
f.protect() | -7,197,705,180,720,776,000 | Write-protect output files that are marked with protected(). | snakemake/dag.py | handle_protected | baileythegreen/snakemake | python | def handle_protected(self, job):
' '
for f in job.expanded_output:
if (f in job.protected_output):
logger.info('Write-protecting output file {}.'.format(f))
f.protect() |
def handle_touch(self, job):
' Touches those output files that are marked for touching. '
for f in job.expanded_output:
if (f in job.touch_output):
f = job.shadowed_path(f)
logger.info('Touching output file {}.'.format(f))
f.touch_or_create()
assert os.pat... | -2,172,067,755,928,961,800 | Touches those output files that are marked for touching. | snakemake/dag.py | handle_touch | baileythegreen/snakemake | python | def handle_touch(self, job):
' '
for f in job.expanded_output:
if (f in job.touch_output):
f = job.shadowed_path(f)
logger.info('Touching output file {}.'.format(f))
f.touch_or_create()
assert os.path.exists(f) |
def temp_size(self, job):
'Return the total size of temporary input files of the job.\n If none, return 0.\n '
return sum((f.size for f in self.temp_input(job))) | -5,253,043,208,774,059,000 | Return the total size of temporary input files of the job.
If none, return 0. | snakemake/dag.py | temp_size | baileythegreen/snakemake | python | def temp_size(self, job):
'Return the total size of temporary input files of the job.\n If none, return 0.\n '
return sum((f.size for f in self.temp_input(job))) |
def handle_temp(self, job):
' Remove temp files if they are no longer needed. Update temp_mtimes. '
if self.notemp:
return
is_temp = (lambda f: is_flagged(f, 'temp'))
needed = (lambda job_, f: any(((f in files) for (j, files) in self.depending[job_].items() if ((not self.finished(j)) and self.ne... | -5,018,095,018,264,098,000 | Remove temp files if they are no longer needed. Update temp_mtimes. | snakemake/dag.py | handle_temp | baileythegreen/snakemake | python | def handle_temp(self, job):
' '
if self.notemp:
return
is_temp = (lambda f: is_flagged(f, 'temp'))
needed = (lambda job_, f: any(((f in files) for (j, files) in self.depending[job_].items() if ((not self.finished(j)) and self.needrun(j) and (j != job)))))
def unneeded_files():
for ... |
def handle_remote(self, job, upload=True):
' Remove local files if they are no longer needed and upload. '
if upload:
files = list(job.expanded_output)
if job.benchmark:
files.append(job.benchmark)
for f in files:
if (f.is_remote and (not f.should_stay_on_remote))... | 3,578,904,658,369,048,600 | Remove local files if they are no longer needed and upload. | snakemake/dag.py | handle_remote | baileythegreen/snakemake | python | def handle_remote(self, job, upload=True):
' '
if upload:
files = list(job.expanded_output)
if job.benchmark:
files.append(job.benchmark)
for f in files:
if (f.is_remote and (not f.should_stay_on_remote)):
f.upload_to_remote()
remo... |
def jobid(self, job):
'Return job id of given job.'
if job.is_group():
return job.jobid
else:
return self._jobid[job] | -2,560,642,363,251,344,000 | Return job id of given job. | snakemake/dag.py | jobid | baileythegreen/snakemake | python | def jobid(self, job):
if job.is_group():
return job.jobid
else:
return self._jobid[job] |
def update(self, jobs, file=None, visited=None, skip_until_dynamic=False, progress=False):
' Update the DAG by adding given jobs and their dependencies. '
if (visited is None):
visited = set()
producer = None
exceptions = list()
jobs = sorted(jobs, reverse=(not self.ignore_ambiguity))
cy... | -1,266,715,212,862,555,600 | Update the DAG by adding given jobs and their dependencies. | snakemake/dag.py | update | baileythegreen/snakemake | python | def update(self, jobs, file=None, visited=None, skip_until_dynamic=False, progress=False):
' '
if (visited is None):
visited = set()
producer = None
exceptions = list()
jobs = sorted(jobs, reverse=(not self.ignore_ambiguity))
cycles = list()
for job in jobs:
logger.dag_debug... |
def update_(self, job, visited=None, skip_until_dynamic=False, progress=False):
' Update the DAG by adding the given job and its dependencies. '
if (job in self.dependencies):
return
if (visited is None):
visited = set()
visited.add(job)
dependencies = self.dependencies[job]
pote... | 8,019,392,418,157,816,000 | Update the DAG by adding the given job and its dependencies. | snakemake/dag.py | update_ | baileythegreen/snakemake | python | def update_(self, job, visited=None, skip_until_dynamic=False, progress=False):
' '
if (job in self.dependencies):
return
if (visited is None):
visited = set()
visited.add(job)
dependencies = self.dependencies[job]
potential_dependencies = self.collect_potential_dependencies(job... |
def update_needrun(self):
' Update the information whether a job needs to be executed. '
output_mintime = dict()
def update_output_mintime(job):
try:
return output_mintime[job]
except KeyError:
for job_ in chain([job], self.depending[job]):
try:
... | 6,314,524,535,821,029,000 | Update the information whether a job needs to be executed. | snakemake/dag.py | update_needrun | baileythegreen/snakemake | python | def update_needrun(self):
' '
output_mintime = dict()
def update_output_mintime(job):
try:
return output_mintime[job]
except KeyError:
for job_ in chain([job], self.depending[job]):
try:
t = output_mintime[job_]
ex... |
def in_until(self, job):
'Return whether given job has been specified via --until.'
return ((job.rule.name in self.untilrules) or (not self.untilfiles.isdisjoint(job.output))) | -4,675,211,391,535,302,000 | Return whether given job has been specified via --until. | snakemake/dag.py | in_until | baileythegreen/snakemake | python | def in_until(self, job):
return ((job.rule.name in self.untilrules) or (not self.untilfiles.isdisjoint(job.output))) |
def in_omitfrom(self, job):
'Return whether given job has been specified via --omit-from.'
return ((job.rule.name in self.omitrules) or (not self.omitfiles.isdisjoint(job.output))) | -4,318,886,760,568,708,600 | Return whether given job has been specified via --omit-from. | snakemake/dag.py | in_omitfrom | baileythegreen/snakemake | python | def in_omitfrom(self, job):
return ((job.rule.name in self.omitrules) or (not self.omitfiles.isdisjoint(job.output))) |
def until_jobs(self):
'Returns a generator of jobs specified by untiljobs.'
return (job for job in self.jobs if self.in_until(job)) | 6,464,025,500,891,526,000 | Returns a generator of jobs specified by untiljobs. | snakemake/dag.py | until_jobs | baileythegreen/snakemake | python | def until_jobs(self):
return (job for job in self.jobs if self.in_until(job)) |
def omitfrom_jobs(self):
'Returns a generator of jobs specified by omitfromjobs.'
return (job for job in self.jobs if self.in_omitfrom(job)) | 8,775,060,150,555,016,000 | Returns a generator of jobs specified by omitfromjobs. | snakemake/dag.py | omitfrom_jobs | baileythegreen/snakemake | python | def omitfrom_jobs(self):
return (job for job in self.jobs if self.in_omitfrom(job)) |
def downstream_of_omitfrom(self):
'Returns the downstream of --omit-from rules or files and themselves.'
return self.bfs(self.depending, *self.omitfrom_jobs()) | 814,192,902,394,369 | Returns the downstream of --omit-from rules or files and themselves. | snakemake/dag.py | downstream_of_omitfrom | baileythegreen/snakemake | python | def downstream_of_omitfrom(self):
return self.bfs(self.depending, *self.omitfrom_jobs()) |
def delete_omitfrom_jobs(self):
'Removes jobs downstream of jobs specified by --omit-from.'
if ((not self.omitrules) and (not self.omitfiles)):
return
downstream_jobs = list(self.downstream_of_omitfrom())
for job in downstream_jobs:
self.delete_job(job, recursive=False, add_dependencies=... | 2,595,008,063,148,844,500 | Removes jobs downstream of jobs specified by --omit-from. | snakemake/dag.py | delete_omitfrom_jobs | baileythegreen/snakemake | python | def delete_omitfrom_jobs(self):
if ((not self.omitrules) and (not self.omitfiles)):
return
downstream_jobs = list(self.downstream_of_omitfrom())
for job in downstream_jobs:
self.delete_job(job, recursive=False, add_dependencies=True) |
def set_until_jobs(self):
'Removes jobs downstream of jobs specified by --omit-from.'
if ((not self.untilrules) and (not self.untilfiles)):
return
self.targetjobs = set(self.until_jobs()) | -7,642,303,915,606,476,000 | Removes jobs downstream of jobs specified by --omit-from. | snakemake/dag.py | set_until_jobs | baileythegreen/snakemake | python | def set_until_jobs(self):
if ((not self.untilrules) and (not self.untilfiles)):
return
self.targetjobs = set(self.until_jobs()) |
def update_priority(self):
' Update job priorities. '
prioritized = (lambda job: ((job.rule in self.priorityrules) or (not self.priorityfiles.isdisjoint(job.output))))
for job in self.needrun_jobs:
self._priority[job] = job.rule.priority
for job in self.bfs(self.dependencies, *filter(prioritized... | 4,670,790,590,416,513,000 | Update job priorities. | snakemake/dag.py | update_priority | baileythegreen/snakemake | python | def update_priority(self):
' '
prioritized = (lambda job: ((job.rule in self.priorityrules) or (not self.priorityfiles.isdisjoint(job.output))))
for job in self.needrun_jobs:
self._priority[job] = job.rule.priority
for job in self.bfs(self.dependencies, *filter(prioritized, self.needrun_jobs), ... |
def update_ready(self, jobs=None):
'Update information whether a job is ready to execute.\n\n Given jobs must be needrun jobs!\n '
if (jobs is None):
jobs = self.needrun_jobs
candidate_groups = set()
for job in jobs:
if ((not self.finished(job)) and self._ready(job)):
... | 8,820,413,122,410,618,000 | Update information whether a job is ready to execute.
Given jobs must be needrun jobs! | snakemake/dag.py | update_ready | baileythegreen/snakemake | python | def update_ready(self, jobs=None):
'Update information whether a job is ready to execute.\n\n Given jobs must be needrun jobs!\n '
if (jobs is None):
jobs = self.needrun_jobs
candidate_groups = set()
for job in jobs:
if ((not self.finished(job)) and self._ready(job)):
... |
def close_remote_objects(self):
'Close all remote objects.'
for job in self.jobs:
if (not self.needrun(job)):
job.close_remote() | -7,580,807,009,841,187,000 | Close all remote objects. | snakemake/dag.py | close_remote_objects | baileythegreen/snakemake | python | def close_remote_objects(self):
for job in self.jobs:
if (not self.needrun(job)):
job.close_remote() |
def postprocess(self):
'Postprocess the DAG. This has to be invoked after any change to the\n DAG topology.'
self.update_jobids()
self.update_needrun()
self.update_priority()
self.handle_pipes()
self.update_groups()
self.update_ready()
self.close_remote_objects()
self.update_c... | -5,770,164,959,497,063,000 | Postprocess the DAG. This has to be invoked after any change to the
DAG topology. | snakemake/dag.py | postprocess | baileythegreen/snakemake | python | def postprocess(self):
'Postprocess the DAG. This has to be invoked after any change to the\n DAG topology.'
self.update_jobids()
self.update_needrun()
self.update_priority()
self.handle_pipes()
self.update_groups()
self.update_ready()
self.close_remote_objects()
self.update_c... |
def handle_pipes(self):
'Use pipes to determine job groups. Check if every pipe has exactly\n one consumer'
for job in self.needrun_jobs:
candidate_groups = set()
if (job.group is not None):
candidate_groups.add(job.group)
all_depending = set()
has_pipe = False... | -6,062,718,041,752,702,000 | Use pipes to determine job groups. Check if every pipe has exactly
one consumer | snakemake/dag.py | handle_pipes | baileythegreen/snakemake | python | def handle_pipes(self):
'Use pipes to determine job groups. Check if every pipe has exactly\n one consumer'
for job in self.needrun_jobs:
candidate_groups = set()
if (job.group is not None):
candidate_groups.add(job.group)
all_depending = set()
has_pipe = False... |
def _ready(self, job):
'Return whether the given job is ready to execute.'
group = self._group.get(job, None)
if (group is None):
is_external_needrun_dep = self.needrun
else:
def is_external_needrun_dep(j):
g = self._group.get(j, None)
return (self.needrun(j) and... | 6,166,242,618,882,280,000 | Return whether the given job is ready to execute. | snakemake/dag.py | _ready | baileythegreen/snakemake | python | def _ready(self, job):
group = self._group.get(job, None)
if (group is None):
is_external_needrun_dep = self.needrun
else:
def is_external_needrun_dep(j):
g = self._group.get(j, None)
return (self.needrun(j) and ((g is None) or (g != group)))
return self._fi... |
def update_checkpoint_dependencies(self, jobs=None):
'Update dependencies of checkpoints.'
updated = False
self.update_checkpoint_outputs()
if (jobs is None):
jobs = [job for job in self.jobs if (not self.needrun(job))]
for job in jobs:
if job.is_checkpoint:
depending = l... | -1,335,494,655,616,101,000 | Update dependencies of checkpoints. | snakemake/dag.py | update_checkpoint_dependencies | baileythegreen/snakemake | python | def update_checkpoint_dependencies(self, jobs=None):
updated = False
self.update_checkpoint_outputs()
if (jobs is None):
jobs = [job for job in self.jobs if (not self.needrun(job))]
for job in jobs:
if job.is_checkpoint:
depending = list(self.depending[job])
... |
def finish(self, job, update_dynamic=True):
'Finish a given job (e.g. remove from ready jobs, mark depending jobs\n as ready).'
try:
self._ready_jobs.remove(job)
except KeyError:
pass
if job.is_group():
jobs = job
else:
jobs = [job]
self._finished.update(jo... | -6,145,590,956,678,949,000 | Finish a given job (e.g. remove from ready jobs, mark depending jobs
as ready). | snakemake/dag.py | finish | baileythegreen/snakemake | python | def finish(self, job, update_dynamic=True):
'Finish a given job (e.g. remove from ready jobs, mark depending jobs\n as ready).'
try:
self._ready_jobs.remove(job)
except KeyError:
pass
if job.is_group():
jobs = job
else:
jobs = [job]
self._finished.update(jo... |
def new_job(self, rule, targetfile=None, format_wildcards=None):
'Create new job for given rule and (optional) targetfile.\n This will reuse existing jobs with the same wildcards.'
key = (rule, targetfile)
if (key in self.job_cache):
assert (targetfile is not None)
return self.job_cac... | -2,048,175,340,361,699,300 | Create new job for given rule and (optional) targetfile.
This will reuse existing jobs with the same wildcards. | snakemake/dag.py | new_job | baileythegreen/snakemake | python | def new_job(self, rule, targetfile=None, format_wildcards=None):
'Create new job for given rule and (optional) targetfile.\n This will reuse existing jobs with the same wildcards.'
key = (rule, targetfile)
if (key in self.job_cache):
assert (targetfile is not None)
return self.job_cac... |
def update_dynamic(self, job):
'Update the DAG by evaluating the output of the given job that\n contains dynamic output files.'
dynamic_wildcards = job.dynamic_wildcards
if (not dynamic_wildcards):
return
depending = list(filter((lambda job_: (not self.finished(job_))), self.bfs(self.depe... | -3,438,462,581,824,115,000 | Update the DAG by evaluating the output of the given job that
contains dynamic output files. | snakemake/dag.py | update_dynamic | baileythegreen/snakemake | python | def update_dynamic(self, job):
'Update the DAG by evaluating the output of the given job that\n contains dynamic output files.'
dynamic_wildcards = job.dynamic_wildcards
if (not dynamic_wildcards):
return
depending = list(filter((lambda job_: (not self.finished(job_))), self.bfs(self.depe... |
def delete_job(self, job, recursive=True, add_dependencies=False):
'Delete given job from DAG.'
if (job in self.targetjobs):
self.targetjobs.remove(job)
if add_dependencies:
for _job in self.dependencies[job]:
self.targetjobs.add(_job)
for job_ in self.depending[job]:
... | 2,529,409,256,004,886,500 | Delete given job from DAG. | snakemake/dag.py | delete_job | baileythegreen/snakemake | python | def delete_job(self, job, recursive=True, add_dependencies=False):
if (job in self.targetjobs):
self.targetjobs.remove(job)
if add_dependencies:
for _job in self.dependencies[job]:
self.targetjobs.add(_job)
for job_ in self.depending[job]:
del self.dependencies[job_]... |
def replace_job(self, job, newjob, recursive=True):
'Replace given job with new job.'
add_to_targetjobs = (job in self.targetjobs)
depending = list(self.depending[job].items())
if self.finished(job):
self._finished.add(newjob)
self.delete_job(job, recursive=recursive)
if add_to_targetjob... | -244,424,612,344,326,180 | Replace given job with new job. | snakemake/dag.py | replace_job | baileythegreen/snakemake | python | def replace_job(self, job, newjob, recursive=True):
add_to_targetjobs = (job in self.targetjobs)
depending = list(self.depending[job].items())
if self.finished(job):
self._finished.add(newjob)
self.delete_job(job, recursive=recursive)
if add_to_targetjobs:
self.targetjobs.add(ne... |
def specialize_rule(self, rule, newrule):
'Specialize the given rule by inserting newrule into the DAG.'
assert (newrule is not None)
self.rules.add(newrule)
self.update_output_index() | -3,487,190,096,571,759,000 | Specialize the given rule by inserting newrule into the DAG. | snakemake/dag.py | specialize_rule | baileythegreen/snakemake | python | def specialize_rule(self, rule, newrule):
assert (newrule is not None)
self.rules.add(newrule)
self.update_output_index() |
def is_batch_rule(self, rule):
'Return True if the underlying rule is to be used for batching the DAG.'
return ((self.batch is not None) and (rule.name == self.batch.rulename)) | 2,566,255,858,520,121,000 | Return True if the underlying rule is to be used for batching the DAG. | snakemake/dag.py | is_batch_rule | baileythegreen/snakemake | python | def is_batch_rule(self, rule):
return ((self.batch is not None) and (rule.name == self.batch.rulename)) |
def collect_potential_dependencies(self, job):
'Collect all potential dependencies of a job. These might contain\n ambiguities. The keys of the returned dict represent the files to be considered.'
dependencies = defaultdict(list)
file2jobs = self.file2jobs
input_files = list(job.unique_input)
... | 5,342,551,899,153,103,000 | Collect all potential dependencies of a job. These might contain
ambiguities. The keys of the returned dict represent the files to be considered. | snakemake/dag.py | collect_potential_dependencies | baileythegreen/snakemake | python | def collect_potential_dependencies(self, job):
'Collect all potential dependencies of a job. These might contain\n ambiguities. The keys of the returned dict represent the files to be considered.'
dependencies = defaultdict(list)
file2jobs = self.file2jobs
input_files = list(job.unique_input)
... |
def bfs(self, direction, *jobs, stop=(lambda job: False)):
'Perform a breadth-first traversal of the DAG.'
queue = list(jobs)
visited = set(queue)
while queue:
job = queue.pop(0)
if stop(job):
continue
(yield job)
for (job_, _) in direction[job].items():
... | -2,070,557,354,798,236,000 | Perform a breadth-first traversal of the DAG. | snakemake/dag.py | bfs | baileythegreen/snakemake | python | def bfs(self, direction, *jobs, stop=(lambda job: False)):
queue = list(jobs)
visited = set(queue)
while queue:
job = queue.pop(0)
if stop(job):
continue
(yield job)
for (job_, _) in direction[job].items():
if (not (job_ in visited)):
... |
def level_bfs(self, direction, *jobs, stop=(lambda job: False)):
'Perform a breadth-first traversal of the DAG, but also yield the\n level together with each job.'
queue = [(job, 0) for job in jobs]
visited = set(jobs)
while queue:
(job, level) = queue.pop(0)
if stop(job):
... | -1,120,438,311,960,962,200 | Perform a breadth-first traversal of the DAG, but also yield the
level together with each job. | snakemake/dag.py | level_bfs | baileythegreen/snakemake | python | def level_bfs(self, direction, *jobs, stop=(lambda job: False)):
'Perform a breadth-first traversal of the DAG, but also yield the\n level together with each job.'
queue = [(job, 0) for job in jobs]
visited = set(jobs)
while queue:
(job, level) = queue.pop(0)
if stop(job):
... |
def dfs(self, direction, *jobs, stop=(lambda job: False), post=True):
'Perform depth-first traversal of the DAG.'
visited = set()
def _dfs(job):
'Inner function for DFS traversal.'
if stop(job):
return
if (not post):
(yield job)
for job_ in direction[... | -7,262,718,796,230,219,000 | Perform depth-first traversal of the DAG. | snakemake/dag.py | dfs | baileythegreen/snakemake | python | def dfs(self, direction, *jobs, stop=(lambda job: False), post=True):
visited = set()
def _dfs(job):
'Inner function for DFS traversal.'
if stop(job):
return
if (not post):
(yield job)
for job_ in direction[job]:
if (not (job_ in visited)... |
def new_wildcards(self, job):
'Return wildcards that are newly introduced in this job,\n compared to its ancestors.'
new_wildcards = set(job.wildcards.items())
for job_ in self.dependencies[job]:
if (not new_wildcards):
return set()
for wildcard in job_.wildcards.items():
... | 5,009,472,403,097,837,000 | Return wildcards that are newly introduced in this job,
compared to its ancestors. | snakemake/dag.py | new_wildcards | baileythegreen/snakemake | python | def new_wildcards(self, job):
'Return wildcards that are newly introduced in this job,\n compared to its ancestors.'
new_wildcards = set(job.wildcards.items())
for job_ in self.dependencies[job]:
if (not new_wildcards):
return set()
for wildcard in job_.wildcards.items():
... |
def rule2job(self, targetrule):
'Generate a new job from a given rule.'
if targetrule.has_wildcards():
raise WorkflowError('Target rules may not contain wildcards. Please specify concrete files or a rule without wildcards.')
return self.new_job(targetrule) | -7,220,431,080,580,572,000 | Generate a new job from a given rule. | snakemake/dag.py | rule2job | baileythegreen/snakemake | python | def rule2job(self, targetrule):
if targetrule.has_wildcards():
raise WorkflowError('Target rules may not contain wildcards. Please specify concrete files or a rule without wildcards.')
return self.new_job(targetrule) |
def archive(self, path):
'Archives workflow such that it can be re-run on a different system.\n\n Archiving includes git versioned files (i.e. Snakefiles, config files, ...),\n ancestral input files and conda environments.\n '
if path.endswith('.tar'):
mode = 'x'
elif path.endsw... | 6,564,548,577,266,580,000 | Archives workflow such that it can be re-run on a different system.
Archiving includes git versioned files (i.e. Snakefiles, config files, ...),
ancestral input files and conda environments. | snakemake/dag.py | archive | baileythegreen/snakemake | python | def archive(self, path):
'Archives workflow such that it can be re-run on a different system.\n\n Archiving includes git versioned files (i.e. Snakefiles, config files, ...),\n ancestral input files and conda environments.\n '
if path.endswith('.tar'):
mode = 'x'
elif path.endsw... |
def clean(self, only_temp=False, dryrun=False):
'Removes files generated by the workflow.'
for job in self.jobs:
for f in job.output:
if ((not only_temp) or is_flagged(f, 'temp')):
if (f.exists or os.path.islink(f)):
if f.protected:
... | -4,303,512,907,294,990,300 | Removes files generated by the workflow. | snakemake/dag.py | clean | baileythegreen/snakemake | python | def clean(self, only_temp=False, dryrun=False):
for job in self.jobs:
for f in job.output:
if ((not only_temp) or is_flagged(f, 'temp')):
if (f.exists or os.path.islink(f)):
if f.protected:
logger.error('Skipping write-protected fi... |
def list_untracked(self):
'List files in the workdir that are not in the dag.'
used_files = set()
files_in_cwd = set()
for job in self.jobs:
used_files.update((os.path.relpath(file) for file in chain(job.local_input, job.local_output, job.log)))
for (root, dirs, files) in os.walk(os.getcwd()... | 7,058,179,587,227,485,000 | List files in the workdir that are not in the dag. | snakemake/dag.py | list_untracked | baileythegreen/snakemake | python | def list_untracked(self):
used_files = set()
files_in_cwd = set()
for job in self.jobs:
used_files.update((os.path.relpath(file) for file in chain(job.local_input, job.local_output, job.log)))
for (root, dirs, files) in os.walk(os.getcwd()):
files_in_cwd.update([os.path.relpath(os.p... |
def _dfs(job):
'Inner function for DFS traversal.'
if stop(job):
return
if (not post):
(yield job)
for job_ in direction[job]:
if (not (job_ in visited)):
visited.add(job_)
for j in _dfs(job_):
(yield j)
if post:
(yield job) | -4,692,494,413,225,559,000 | Inner function for DFS traversal. | snakemake/dag.py | _dfs | baileythegreen/snakemake | python | def _dfs(job):
if stop(job):
return
if (not post):
(yield job)
for job_ in direction[job]:
if (not (job_ in visited)):
visited.add(job_)
for j in _dfs(job_):
(yield j)
if post:
(yield job) |
def hsv_to_htmlhexrgb(h, s, v):
'Convert hsv colors to hex-encoded rgb colors usable by html.'
import colorsys
(hex_r, hex_g, hex_b) = (round((255 * x)) for x in colorsys.hsv_to_rgb(h, s, v))
return '#{hex_r:0>2X}{hex_g:0>2X}{hex_b:0>2X}'.format(hex_r=hex_r, hex_g=hex_g, hex_b=hex_b) | 7,796,969,250,204,814,000 | Convert hsv colors to hex-encoded rgb colors usable by html. | snakemake/dag.py | hsv_to_htmlhexrgb | baileythegreen/snakemake | python | def hsv_to_htmlhexrgb(h, s, v):
import colorsys
(hex_r, hex_g, hex_b) = (round((255 * x)) for x in colorsys.hsv_to_rgb(h, s, v))
return '#{hex_r:0>2X}{hex_g:0>2X}{hex_b:0>2X}'.format(hex_r=hex_r, hex_g=hex_g, hex_b=hex_b) |
def resolve_input_functions(input_files):
'Iterate over all input files and replace input functions\n with a fixed string.\n '
files = []
for f in input_files:
if callable(f):
files.append('<input function>')
else:
files.append(repr(f).strip("'")... | 5,267,305,400,582,140,000 | Iterate over all input files and replace input functions
with a fixed string. | snakemake/dag.py | resolve_input_functions | baileythegreen/snakemake | python | def resolve_input_functions(input_files):
'Iterate over all input files and replace input functions\n with a fixed string.\n '
files = []
for f in input_files:
if callable(f):
files.append('<input function>')
else:
files.append(repr(f).strip("'")... |
def html_node(node_id, node, color):
'Assemble a html style node for graphviz'
input_files = resolve_input_functions(node._input)
output_files = [repr(f).strip("'") for f in node._output]
input_header = ('<b><font point-size="14">↪ input</font></b>' if input_files else '')
output_header = ('<b... | -4,481,357,097,268,225,500 | Assemble a html style node for graphviz | snakemake/dag.py | html_node | baileythegreen/snakemake | python | def html_node(node_id, node, color):
input_files = resolve_input_functions(node._input)
output_files = [repr(f).strip("'") for f in node._output]
input_header = ('<b><font point-size="14">↪ input</font></b>' if input_files else )
output_header = ('<b><font point-size="14">output →</font>... |
def test_single_page_does_not_include_any_pagination_controls():
'\n When there is only a single page, no pagination controls should render.\n '
url = URL('/')
controls = get_page_controls(url, current_page=1, total_pages=1)
assert (controls == []) | 8,805,130,570,875,188,000 | When there is only a single page, no pagination controls should render. | tests/test_pagination.py | test_single_page_does_not_include_any_pagination_controls | encode/dashboard | python | def test_single_page_does_not_include_any_pagination_controls():
'\n \n '
url = URL('/')
controls = get_page_controls(url, current_page=1, total_pages=1)
assert (controls == []) |
def test_first_page_in_pagination_controls():
'\n First page in pagination controls, should render as:\n Previous [1] 2 3 4 5 Next\n '
url = URL('/')
controls = get_page_controls(url, current_page=1, total_pages=5)
assert (controls == [PageControl(text='Previous', is_disabled=True), PageControl... | -1,796,014,110,327,840,500 | First page in pagination controls, should render as:
Previous [1] 2 3 4 5 Next | tests/test_pagination.py | test_first_page_in_pagination_controls | encode/dashboard | python | def test_first_page_in_pagination_controls():
'\n First page in pagination controls, should render as:\n Previous [1] 2 3 4 5 Next\n '
url = URL('/')
controls = get_page_controls(url, current_page=1, total_pages=5)
assert (controls == [PageControl(text='Previous', is_disabled=True), PageControl... |
def test_second_page_in_pagination_controls():
'\n Second page in pagination controls, should render as:\n Previous 1 [2] 3 4 5 Next\n '
url = URL('/')
controls = get_page_controls(url, current_page=2, total_pages=5)
assert (controls == [PageControl(text='Previous', url=URL('/')), PageControl(t... | -2,232,584,472,365,900,300 | Second page in pagination controls, should render as:
Previous 1 [2] 3 4 5 Next | tests/test_pagination.py | test_second_page_in_pagination_controls | encode/dashboard | python | def test_second_page_in_pagination_controls():
'\n Second page in pagination controls, should render as:\n Previous 1 [2] 3 4 5 Next\n '
url = URL('/')
controls = get_page_controls(url, current_page=2, total_pages=5)
assert (controls == [PageControl(text='Previous', url=URL('/')), PageControl(t... |
def test_middle_page_in_pagination_controls():
'\n Middle page in pagination controls, should render as:\n Previous 1 2 [3] 4 5 Next\n '
url = URL('/?page=3')
controls = get_page_controls(url, current_page=3, total_pages=5)
assert (controls == [PageControl(text='Previous', url=URL('/?page=2')),... | 7,354,472,169,436,877,000 | Middle page in pagination controls, should render as:
Previous 1 2 [3] 4 5 Next | tests/test_pagination.py | test_middle_page_in_pagination_controls | encode/dashboard | python | def test_middle_page_in_pagination_controls():
'\n Middle page in pagination controls, should render as:\n Previous 1 2 [3] 4 5 Next\n '
url = URL('/?page=3')
controls = get_page_controls(url, current_page=3, total_pages=5)
assert (controls == [PageControl(text='Previous', url=URL('/?page=2')),... |
def test_last_page_in_pagination_controls():
'\n Last page in pagination controls, should render as:\n Previous 1 2 3 4 [5] Next\n '
url = URL('/?page=5')
controls = get_page_controls(url, current_page=5, total_pages=5)
assert (controls == [PageControl(text='Previous', url=URL('/?page=4')), Pag... | 9,106,975,628,763,386,000 | Last page in pagination controls, should render as:
Previous 1 2 3 4 [5] Next | tests/test_pagination.py | test_last_page_in_pagination_controls | encode/dashboard | python | def test_last_page_in_pagination_controls():
'\n Last page in pagination controls, should render as:\n Previous 1 2 3 4 [5] Next\n '
url = URL('/?page=5')
controls = get_page_controls(url, current_page=5, total_pages=5)
assert (controls == [PageControl(text='Previous', url=URL('/?page=4')), Pag... |
def test_first_page_in_long_pagination_controls():
'\n First page in long pagination controls, should render as:\n Previous [1] 2 3 4 5 ... 49 50 Next\n '
url = URL('/')
controls = get_page_controls(url, current_page=1, total_pages=50)
assert (controls == [PageControl(text='Previous', is_disabl... | 1,161,099,459,008,527,000 | First page in long pagination controls, should render as:
Previous [1] 2 3 4 5 ... 49 50 Next | tests/test_pagination.py | test_first_page_in_long_pagination_controls | encode/dashboard | python | def test_first_page_in_long_pagination_controls():
'\n First page in long pagination controls, should render as:\n Previous [1] 2 3 4 5 ... 49 50 Next\n '
url = URL('/')
controls = get_page_controls(url, current_page=1, total_pages=50)
assert (controls == [PageControl(text='Previous', is_disabl... |
def test_last_page_in_long_pagination_controls():
'\n Last page in long pagination controls, should render as:\n Previous 1 2 ... 46 47 48 49 [50] Next\n '
url = URL('/?page=50')
controls = get_page_controls(url, current_page=50, total_pages=50)
assert (controls == [PageControl(text='Previous',... | 2,766,726,091,038,919,700 | Last page in long pagination controls, should render as:
Previous 1 2 ... 46 47 48 49 [50] Next | tests/test_pagination.py | test_last_page_in_long_pagination_controls | encode/dashboard | python | def test_last_page_in_long_pagination_controls():
'\n Last page in long pagination controls, should render as:\n Previous 1 2 ... 46 47 48 49 [50] Next\n '
url = URL('/?page=50')
controls = get_page_controls(url, current_page=50, total_pages=50)
assert (controls == [PageControl(text='Previous',... |
def test_ellipsis_fill_in():
'\n If an ellipsis marker can be replaced with a single page marker, then\n we should do so.\n '
url = URL('/?page=6')
controls = get_page_controls(url, current_page=6, total_pages=11)
assert (controls == [PageControl(text='Previous', url=URL('/?page=5')), PageContr... | -3,769,510,764,230,896,600 | If an ellipsis marker can be replaced with a single page marker, then
we should do so. | tests/test_pagination.py | test_ellipsis_fill_in | encode/dashboard | python | def test_ellipsis_fill_in():
'\n If an ellipsis marker can be replaced with a single page marker, then\n we should do so.\n '
url = URL('/?page=6')
controls = get_page_controls(url, current_page=6, total_pages=11)
assert (controls == [PageControl(text='Previous', url=URL('/?page=5')), PageContr... |
def __init__(self, ds, fields, start_point, end_point, npoints, figure_size=5.0, fontsize=14.0, field_labels=None):
'\n Sets up figure and axes\n '
line = LineBuffer(ds, start_point, end_point, npoints, label=None)
self.lines = [line]
self._initialize_instance(self, ds, fields, figure_size... | 5,028,115,850,037,134,000 | Sets up figure and axes | yt/visualization/line_plot.py | __init__ | smressle/yt | python | def __init__(self, ds, fields, start_point, end_point, npoints, figure_size=5.0, fontsize=14.0, field_labels=None):
'\n \n '
line = LineBuffer(ds, start_point, end_point, npoints, label=None)
self.lines = [line]
self._initialize_instance(self, ds, fields, figure_size, fontsize, field_label... |
@classmethod
def from_lines(cls, ds, fields, lines, figure_size=5.0, font_size=14.0, field_labels=None):
"\n A class method for constructing a line plot from multiple sampling lines\n\n Parameters\n ----------\n\n ds : :class:`yt.data_objects.static_output.Dataset`\n This is t... | -8,590,241,957,368,003,000 | A class method for constructing a line plot from multiple sampling lines
Parameters
----------
ds : :class:`yt.data_objects.static_output.Dataset`
This is the dataset object corresponding to the
simulation output to be plotted.
fields : field name or list of field names
The name(s) of the field(s) to be p... | yt/visualization/line_plot.py | from_lines | smressle/yt | python | @classmethod
def from_lines(cls, ds, fields, lines, figure_size=5.0, font_size=14.0, field_labels=None):
"\n A class method for constructing a line plot from multiple sampling lines\n\n Parameters\n ----------\n\n ds : :class:`yt.data_objects.static_output.Dataset`\n This is t... |
@invalidate_plot
def annotate_legend(self, field):
'\n Adds a legend to the `LinePlot` instance. The `_sanitize_dimensions`\n call ensures that a legend label will be added for every field of\n a multi-field plot\n '
dim_field = self.plots._sanitize_dimensions(field)
self.include... | -7,778,326,172,680,734,000 | Adds a legend to the `LinePlot` instance. The `_sanitize_dimensions`
call ensures that a legend label will be added for every field of
a multi-field plot | yt/visualization/line_plot.py | annotate_legend | smressle/yt | python | @invalidate_plot
def annotate_legend(self, field):
'\n Adds a legend to the `LinePlot` instance. The `_sanitize_dimensions`\n call ensures that a legend label will be added for every field of\n a multi-field plot\n '
dim_field = self.plots._sanitize_dimensions(field)
self.include... |
@invalidate_plot
def set_x_unit(self, unit_name):
'Set the unit to use along the x-axis\n\n Parameters\n ----------\n unit_name: str\n The name of the unit to use for the x-axis unit\n '
self._x_unit = unit_name | -1,050,713,432,808,144,600 | Set the unit to use along the x-axis
Parameters
----------
unit_name: str
The name of the unit to use for the x-axis unit | yt/visualization/line_plot.py | set_x_unit | smressle/yt | python | @invalidate_plot
def set_x_unit(self, unit_name):
'Set the unit to use along the x-axis\n\n Parameters\n ----------\n unit_name: str\n The name of the unit to use for the x-axis unit\n '
self._x_unit = unit_name |
@invalidate_plot
def set_unit(self, field, unit_name):
'Set the unit used to plot the field\n\n Parameters\n ----------\n field: str or field tuple\n The name of the field to set the units for\n unit_name: str\n The name of the unit to use for this field\n '
... | -5,547,905,575,005,278,000 | Set the unit used to plot the field
Parameters
----------
field: str or field tuple
The name of the field to set the units for
unit_name: str
The name of the unit to use for this field | yt/visualization/line_plot.py | set_unit | smressle/yt | python | @invalidate_plot
def set_unit(self, field, unit_name):
'Set the unit used to plot the field\n\n Parameters\n ----------\n field: str or field tuple\n The name of the field to set the units for\n unit_name: str\n The name of the unit to use for this field\n '
... |
@invalidate_plot
def annotate_title(self, field, title):
'Set the unit used to plot the field\n\n Parameters\n ----------\n field: str or field tuple\n The name of the field to set the units for\n title: str\n The title to use for the plot\n '
self._titles[... | -773,530,870,880,534,000 | Set the unit used to plot the field
Parameters
----------
field: str or field tuple
The name of the field to set the units for
title: str
The title to use for the plot | yt/visualization/line_plot.py | annotate_title | smressle/yt | python | @invalidate_plot
def annotate_title(self, field, title):
'Set the unit used to plot the field\n\n Parameters\n ----------\n field: str or field tuple\n The name of the field to set the units for\n title: str\n The title to use for the plot\n '
self._titles[... |
def unset_macosx_deployment_target():
'Unset MACOSX_DEPLOYMENT_TARGET because we are not building portable\n libraries\n '
if ('MACOSX_DEPLOYMENT_TARGET' in os.environ):
del os.environ['MACOSX_DEPLOYMENT_TARGET'] | 469,749,772,178,140,860 | Unset MACOSX_DEPLOYMENT_TARGET because we are not building portable
libraries | numba/tests/test_pycc.py | unset_macosx_deployment_target | eric-erki/numba | python | def unset_macosx_deployment_target():
'Unset MACOSX_DEPLOYMENT_TARGET because we are not building portable\n libraries\n '
if ('MACOSX_DEPLOYMENT_TARGET' in os.environ):
del os.environ['MACOSX_DEPLOYMENT_TARGET'] |
def test_pycc_ctypes_lib(self):
'\n Test creating a C shared library object using pycc.\n '
source = os.path.join(base_path, 'compile_with_pycc.py')
cdll_modulename = ('test_dll_legacy' + find_shared_ending())
cdll_path = os.path.join(self.tmpdir, cdll_modulename)
if os.path.exists(cdl... | 4,948,081,442,505,072,000 | Test creating a C shared library object using pycc. | numba/tests/test_pycc.py | test_pycc_ctypes_lib | eric-erki/numba | python | def test_pycc_ctypes_lib(self):
'\n \n '
source = os.path.join(base_path, 'compile_with_pycc.py')
cdll_modulename = ('test_dll_legacy' + find_shared_ending())
cdll_path = os.path.join(self.tmpdir, cdll_modulename)
if os.path.exists(cdll_path):
os.unlink(cdll_path)
main(args... |
def test_pycc_pymodule(self):
'\n Test creating a CPython extension module using pycc.\n '
self.skipTest('lack of environment can make the extension crash')
source = os.path.join(base_path, 'compile_with_pycc.py')
modulename = 'test_pyext_legacy'
out_modulename = os.path.join(self.tmpd... | 6,336,552,434,764,369,000 | Test creating a CPython extension module using pycc. | numba/tests/test_pycc.py | test_pycc_pymodule | eric-erki/numba | python | def test_pycc_pymodule(self):
'\n \n '
self.skipTest('lack of environment can make the extension crash')
source = os.path.join(base_path, 'compile_with_pycc.py')
modulename = 'test_pyext_legacy'
out_modulename = os.path.join(self.tmpdir, (modulename + find_pyext_ending()))
if os.pa... |
def test_pycc_bitcode(self):
'\n Test creating a LLVM bitcode file using pycc.\n '
modulename = os.path.join(base_path, 'compile_with_pycc')
bitcode_modulename = os.path.join(self.tmpdir, 'test_bitcode_legacy.bc')
if os.path.exists(bitcode_modulename):
os.unlink(bitcode_modulename)... | 1,490,123,849,608,073,500 | Test creating a LLVM bitcode file using pycc. | numba/tests/test_pycc.py | test_pycc_bitcode | eric-erki/numba | python | def test_pycc_bitcode(self):
'\n \n '
modulename = os.path.join(base_path, 'compile_with_pycc')
bitcode_modulename = os.path.join(self.tmpdir, 'test_bitcode_legacy.bc')
if os.path.exists(bitcode_modulename):
os.unlink(bitcode_modulename)
main(args=['--debug', '--llvm', '-o', bi... |
async def get(self, resource_group_name: str, registry_name: str, agent_pool_name: str, **kwargs: Any) -> '_models.AgentPool':
'Gets the detailed information for a given agent pool.\n\n :param resource_group_name: The name of the resource group to which the container registry\n belongs.\n :typ... | 6,863,132,846,442,995,000 | Gets the detailed information for a given agent pool.
:param resource_group_name: The name of the resource group to which the container registry
belongs.
:type resource_group_name: str
:param registry_name: The name of the container registry.
:type registry_name: str
:param agent_pool_name: The name of the agent pool... | sdk/containerregistry/azure-mgmt-containerregistry/azure/mgmt/containerregistry/v2019_06_01_preview/aio/operations/_agent_pools_operations.py | get | AFengKK/azure-sdk-for-python | python | async def get(self, resource_group_name: str, registry_name: str, agent_pool_name: str, **kwargs: Any) -> '_models.AgentPool':
'Gets the detailed information for a given agent pool.\n\n :param resource_group_name: The name of the resource group to which the container registry\n belongs.\n :typ... |
async def begin_create(self, resource_group_name: str, registry_name: str, agent_pool_name: str, agent_pool: '_models.AgentPool', **kwargs: Any) -> AsyncLROPoller['_models.AgentPool']:
'Creates an agent pool for a container registry with the specified parameters.\n\n :param resource_group_name: The name of t... | -1,030,239,877,524,509,000 | Creates an agent pool for a container registry with the specified parameters.
:param resource_group_name: The name of the resource group to which the container registry
belongs.
:type resource_group_name: str
:param registry_name: The name of the container registry.
:type registry_name: str
:param agent_pool_name: Th... | sdk/containerregistry/azure-mgmt-containerregistry/azure/mgmt/containerregistry/v2019_06_01_preview/aio/operations/_agent_pools_operations.py | begin_create | AFengKK/azure-sdk-for-python | python | async def begin_create(self, resource_group_name: str, registry_name: str, agent_pool_name: str, agent_pool: '_models.AgentPool', **kwargs: Any) -> AsyncLROPoller['_models.AgentPool']:
'Creates an agent pool for a container registry with the specified parameters.\n\n :param resource_group_name: The name of t... |
async def begin_delete(self, resource_group_name: str, registry_name: str, agent_pool_name: str, **kwargs: Any) -> AsyncLROPoller[None]:
'Deletes a specified agent pool resource.\n\n :param resource_group_name: The name of the resource group to which the container registry\n belongs.\n :type r... | -4,383,831,536,828,033,000 | Deletes a specified agent pool resource.
:param resource_group_name: The name of the resource group to which the container registry
belongs.
:type resource_group_name: str
:param registry_name: The name of the container registry.
:type registry_name: str
:param agent_pool_name: The name of the agent pool.
:type agent... | sdk/containerregistry/azure-mgmt-containerregistry/azure/mgmt/containerregistry/v2019_06_01_preview/aio/operations/_agent_pools_operations.py | begin_delete | AFengKK/azure-sdk-for-python | python | async def begin_delete(self, resource_group_name: str, registry_name: str, agent_pool_name: str, **kwargs: Any) -> AsyncLROPoller[None]:
'Deletes a specified agent pool resource.\n\n :param resource_group_name: The name of the resource group to which the container registry\n belongs.\n :type r... |
async def begin_update(self, resource_group_name: str, registry_name: str, agent_pool_name: str, update_parameters: '_models.AgentPoolUpdateParameters', **kwargs: Any) -> AsyncLROPoller['_models.AgentPool']:
'Updates an agent pool with the specified parameters.\n\n :param resource_group_name: The name of the... | -6,170,495,838,415,589,000 | Updates an agent pool with the specified parameters.
:param resource_group_name: The name of the resource group to which the container registry
belongs.
:type resource_group_name: str
:param registry_name: The name of the container registry.
:type registry_name: str
:param agent_pool_name: The name of the agent pool.... | sdk/containerregistry/azure-mgmt-containerregistry/azure/mgmt/containerregistry/v2019_06_01_preview/aio/operations/_agent_pools_operations.py | begin_update | AFengKK/azure-sdk-for-python | python | async def begin_update(self, resource_group_name: str, registry_name: str, agent_pool_name: str, update_parameters: '_models.AgentPoolUpdateParameters', **kwargs: Any) -> AsyncLROPoller['_models.AgentPool']:
'Updates an agent pool with the specified parameters.\n\n :param resource_group_name: The name of the... |
def list(self, resource_group_name: str, registry_name: str, **kwargs: Any) -> AsyncIterable['_models.AgentPoolListResult']:
'Lists all the agent pools for a specified container registry.\n\n :param resource_group_name: The name of the resource group to which the container registry\n belongs.\n ... | 4,120,430,077,457,207,300 | Lists all the agent pools for a specified container registry.
:param resource_group_name: The name of the resource group to which the container registry
belongs.
:type resource_group_name: str
:param registry_name: The name of the container registry.
:type registry_name: str
:keyword callable cls: A custom type or fu... | sdk/containerregistry/azure-mgmt-containerregistry/azure/mgmt/containerregistry/v2019_06_01_preview/aio/operations/_agent_pools_operations.py | list | AFengKK/azure-sdk-for-python | python | def list(self, resource_group_name: str, registry_name: str, **kwargs: Any) -> AsyncIterable['_models.AgentPoolListResult']:
'Lists all the agent pools for a specified container registry.\n\n :param resource_group_name: The name of the resource group to which the container registry\n belongs.\n ... |
async def get_queue_status(self, resource_group_name: str, registry_name: str, agent_pool_name: str, **kwargs: Any) -> '_models.AgentPoolQueueStatus':
'Gets the count of queued runs for a given agent pool.\n\n :param resource_group_name: The name of the resource group to which the container registry\n ... | 2,766,069,385,546,253,000 | Gets the count of queued runs for a given agent pool.
:param resource_group_name: The name of the resource group to which the container registry
belongs.
:type resource_group_name: str
:param registry_name: The name of the container registry.
:type registry_name: str
:param agent_pool_name: The name of the agent pool... | sdk/containerregistry/azure-mgmt-containerregistry/azure/mgmt/containerregistry/v2019_06_01_preview/aio/operations/_agent_pools_operations.py | get_queue_status | AFengKK/azure-sdk-for-python | python | async def get_queue_status(self, resource_group_name: str, registry_name: str, agent_pool_name: str, **kwargs: Any) -> '_models.AgentPoolQueueStatus':
'Gets the count of queued runs for a given agent pool.\n\n :param resource_group_name: The name of the resource group to which the container registry\n ... |
def check_lnotab(self, code):
'Check that the lnotab byte offsets are sensible.'
code = dis._get_code_object(code)
lnotab = list(dis.findlinestarts(code))
min_bytecode = min((t[0] for t in lnotab))
max_bytecode = max((t[0] for t in lnotab))
self.assertGreaterEqual(min_bytecode, 0)
self.asser... | 566,996,500,382,314,560 | Check that the lnotab byte offsets are sensible. | www/src/Lib/test/test_peepholer.py | check_lnotab | Froggo8311/brython | python | def check_lnotab(self, code):
code = dis._get_code_object(code)
lnotab = list(dis.findlinestarts(code))
min_bytecode = min((t[0] for t in lnotab))
max_bytecode = max((t[0] for t in lnotab))
self.assertGreaterEqual(min_bytecode, 0)
self.assertLess(max_bytecode, len(code.co_code)) |
def f():
'Adding a docstring made this test fail in Py2.5.0'
return None | -8,453,757,884,726,220,000 | Adding a docstring made this test fail in Py2.5.0 | www/src/Lib/test/test_peepholer.py | f | Froggo8311/brython | python | def f():
return None |
def _extract3d(zipFileDir, destDirectory, outputFileName):
' a wrapper function for the recursive file extraction function '
with zipfile.ZipFile(zipFileDir) as zipFile:
_extract3dRecursively(zipFile.namelist(), zipFile, destDirectory, outputFileName) | 6,251,360,792,502,066,000 | a wrapper function for the recursive file extraction function | fusion123/converter.py | _extract3d | bennymeg/123-Fusion | python | def _extract3d(zipFileDir, destDirectory, outputFileName):
' '
with zipfile.ZipFile(zipFileDir) as zipFile:
_extract3dRecursively(zipFile.namelist(), zipFile, destDirectory, outputFileName) |
def _extract3dRecursively(fileList, baseZipFile, destDirectory, outputFileName, numOfFileExtracted=0):
' extracts all the illustations and models from the 123dx file recursively '
imageExtList = ['.jpg', '.png']
fusionExtList = ['.smt', '.smb', '.sat', '.igs', '.dxf', '.stp', '.stl']
for member in fileL... | 3,192,290,431,466,725,000 | extracts all the illustations and models from the 123dx file recursively | fusion123/converter.py | _extract3dRecursively | bennymeg/123-Fusion | python | def _extract3dRecursively(fileList, baseZipFile, destDirectory, outputFileName, numOfFileExtracted=0):
' '
imageExtList = ['.jpg', '.png']
fusionExtList = ['.smt', '.smb', '.sat', '.igs', '.dxf', '.stp', '.stl']
for member in fileList:
if os.path.isdir(member):
_extract3dRecursively... |
def _execute(srcDirectory, destDirectory, filename):
' converts the file into fusion 360 file (this file might be usable in other CAD software as well) '
outputFileName = os.path.splitext(os.path.basename(filename))[0]
newFileName = (outputFileName + '.zip')
oldFilePath = os.path.join(srcDirectory, file... | 7,805,116,768,547,185,000 | converts the file into fusion 360 file (this file might be usable in other CAD software as well) | fusion123/converter.py | _execute | bennymeg/123-Fusion | python | def _execute(srcDirectory, destDirectory, filename):
' '
outputFileName = os.path.splitext(os.path.basename(filename))[0]
newFileName = (outputFileName + '.zip')
oldFilePath = os.path.join(srcDirectory, filename)
newFilePath = os.path.join(srcDirectory, newFileName)
os.rename(oldFilePath, newFi... |
def sumOfLeftLeaves(self, root):
'\n :type root: TreeNode\n :rtype: int\n '
while (not root):
return 0
if (root.left and (not root.left.left) and (not root.left.right)):
return (root.left.val + self.sumOfLeftLeaves(root.right))
return (self.sumOfLeftLeaves(root.left)... | -1,436,847,296,584,454,100 | :type root: TreeNode
:rtype: int | Python/404sum_of_left_leaves.py | sumOfLeftLeaves | Apocrypse/LeetCode | python | def sumOfLeftLeaves(self, root):
'\n :type root: TreeNode\n :rtype: int\n '
while (not root):
return 0
if (root.left and (not root.left.left) and (not root.left.right)):
return (root.left.val + self.sumOfLeftLeaves(root.right))
return (self.sumOfLeftLeaves(root.left)... |
def __init__(self, connection=None, nodename=None):
'Initialize this Item'
self._connection = connection
self._nodename = nodename
self._nodepath = []
self._curobj = None
self._xml = StringIO() | -429,282,253,661,561,800 | Initialize this Item | desktop/core/ext-py/boto-2.46.1/boto/ecs/item.py | __init__ | 10088/hue | python | def __init__(self, connection=None, nodename=None):
self._connection = connection
self._nodename = nodename
self._nodepath = []
self._curobj = None
self._xml = StringIO() |
def __init__(self, connection=None):
'Initialize this Item'
ResponseGroup.__init__(self, connection, 'Item') | -7,527,589,324,602,155,000 | Initialize this Item | desktop/core/ext-py/boto-2.46.1/boto/ecs/item.py | __init__ | 10088/hue | python | def __init__(self, connection=None):
ResponseGroup.__init__(self, connection, 'Item') |
def __next__(self):
'Special paging functionality'
if (self.iter is None):
self.iter = iter(self.objs)
try:
return next(self.iter)
except StopIteration:
self.iter = None
self.objs = []
if (int(self.page) < int(self.total_pages)):
self.page += 1
... | 4,520,251,078,756,440,600 | Special paging functionality | desktop/core/ext-py/boto-2.46.1/boto/ecs/item.py | __next__ | 10088/hue | python | def __next__(self):
if (self.iter is None):
self.iter = iter(self.objs)
try:
return next(self.iter)
except StopIteration:
self.iter = None
self.objs = []
if (int(self.page) < int(self.total_pages)):
self.page += 1
self._connection.get_resp... |
def to_xml(self):
'Override to first fetch everything'
for item in self:
pass
return ResponseGroup.to_xml(self) | -1,328,991,912,337,954,600 | Override to first fetch everything | desktop/core/ext-py/boto-2.46.1/boto/ecs/item.py | to_xml | 10088/hue | python | def to_xml(self):
for item in self:
pass
return ResponseGroup.to_xml(self) |
def delete(self, resource_group_name, public_ip_prefix_name, custom_headers=None, raw=False, polling=True, **operation_config):
'Deletes the specified public IP prefix.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param public_ip_prefix_nam... | -5,602,891,413,988,677,000 | Deletes the specified public IP prefix.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param public_ip_prefix_name: The name of the PublicIpPrefix.
:type public_ip_prefix_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The po... | azure-mgmt-network/azure/mgmt/network/v2018_10_01/operations/public_ip_prefixes_operations.py | delete | acured/azure-sdk-for-python | python | def delete(self, resource_group_name, public_ip_prefix_name, custom_headers=None, raw=False, polling=True, **operation_config):
'Deletes the specified public IP prefix.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param public_ip_prefix_nam... |
def get(self, resource_group_name, public_ip_prefix_name, expand=None, custom_headers=None, raw=False, **operation_config):
'Gets the specified public IP prefix in a specified resource group.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :par... | 2,187,649,534,079,012,600 | Gets the specified public IP prefix in a specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param public_ip_prefix_name: The name of the PublicIPPrefx.
:type public_ip_prefix_name: str
:param expand: Expands referenced resources.
:type expand: str
:par... | azure-mgmt-network/azure/mgmt/network/v2018_10_01/operations/public_ip_prefixes_operations.py | get | acured/azure-sdk-for-python | python | def get(self, resource_group_name, public_ip_prefix_name, expand=None, custom_headers=None, raw=False, **operation_config):
'Gets the specified public IP prefix in a specified resource group.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :par... |
def create_or_update(self, resource_group_name, public_ip_prefix_name, parameters, custom_headers=None, raw=False, polling=True, **operation_config):
'Creates or updates a static or dynamic public IP prefix.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: s... | -6,732,961,279,870,233,000 | Creates or updates a static or dynamic public IP prefix.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param public_ip_prefix_name: The name of the public IP prefix.
:type public_ip_prefix_name: str
:param parameters: Parameters supplied to the create or update public
IP ... | azure-mgmt-network/azure/mgmt/network/v2018_10_01/operations/public_ip_prefixes_operations.py | create_or_update | acured/azure-sdk-for-python | python | def create_or_update(self, resource_group_name, public_ip_prefix_name, parameters, custom_headers=None, raw=False, polling=True, **operation_config):
'Creates or updates a static or dynamic public IP prefix.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: s... |
def update_tags(self, resource_group_name, public_ip_prefix_name, tags=None, custom_headers=None, raw=False, polling=True, **operation_config):
'Updates public IP prefix tags.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param public_ip_pre... | -3,271,771,562,177,810,400 | Updates public IP prefix tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param public_ip_prefix_name: The name of the public IP prefix.
:type public_ip_prefix_name: str
:param tags: Resource tags.
:type tags: dict[str, str]
:param dict custom_headers: headers that will... | azure-mgmt-network/azure/mgmt/network/v2018_10_01/operations/public_ip_prefixes_operations.py | update_tags | acured/azure-sdk-for-python | python | def update_tags(self, resource_group_name, public_ip_prefix_name, tags=None, custom_headers=None, raw=False, polling=True, **operation_config):
'Updates public IP prefix tags.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param public_ip_pre... |
def list_all(self, custom_headers=None, raw=False, **operation_config):
'Gets all the public IP prefixes in a subscription.\n\n :param dict custom_headers: headers that will be added to the request\n :param bool raw: returns the direct response alongside the\n deserialized response\n :p... | 11,541,604,707,037,170 | Gets all the public IP prefixes in a subscription.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterato... | azure-mgmt-network/azure/mgmt/network/v2018_10_01/operations/public_ip_prefixes_operations.py | list_all | acured/azure-sdk-for-python | python | def list_all(self, custom_headers=None, raw=False, **operation_config):
'Gets all the public IP prefixes in a subscription.\n\n :param dict custom_headers: headers that will be added to the request\n :param bool raw: returns the direct response alongside the\n deserialized response\n :p... |
def list(self, resource_group_name, custom_headers=None, raw=False, **operation_config):
'Gets all public IP prefixes in a resource group.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param dict custom_headers: headers that will be added to... | 1,095,154,879,440,661,000 | Gets all public IP prefixes in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :r... | azure-mgmt-network/azure/mgmt/network/v2018_10_01/operations/public_ip_prefixes_operations.py | list | acured/azure-sdk-for-python | python | def list(self, resource_group_name, custom_headers=None, raw=False, **operation_config):
'Gets all public IP prefixes in a resource group.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param dict custom_headers: headers that will be added to... |
def run(self):
'connect and poll messages to queue'
sock = None
print(('Connecting to synchronous uhd message tcp port ' + str(self.port)))
while self.q_quit.empty():
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((self.ip_address, self.port))
... | -653,668,052,842,803,300 | connect and poll messages to queue | src/tcp_sync.py | run | Opendigitalradio/ODR-StaticPrecorrection | python | def run(self):
sock = None
print(('Connecting to synchronous uhd message tcp port ' + str(self.port)))
while self.q_quit.empty():
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((self.ip_address, self.port))
break
except soc... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.