signature
stringlengths 8
3.44k
| body
stringlengths 0
1.41M
| docstring
stringlengths 1
122k
| id
stringlengths 5
17
|
|---|---|---|---|
def actions(self, s):
|
<EOL>return [a for a in self._actions if self._is_valid(self.result(s, a))]<EOL>
|
Possible actions from a state.
|
f8648:c0:m1
|
def _is_valid(self, s):
|
<EOL>return ((s[<NUM_LIT:0>] >= s[<NUM_LIT:1>] or s[<NUM_LIT:0>] == <NUM_LIT:0>)) and((<NUM_LIT:3> - s[<NUM_LIT:0>]) >= (<NUM_LIT:3> - s[<NUM_LIT:1>]) or s[<NUM_LIT:0>] == <NUM_LIT:3>) and(<NUM_LIT:0> <= s[<NUM_LIT:0>] <= <NUM_LIT:3>) and(<NUM_LIT:0> <= s[<NUM_LIT:1>] <= <NUM_LIT:3>)<EOL>
|
Check if a state is valid.
|
f8648:c0:m2
|
def result(self, s, a):
|
<EOL>if s[<NUM_LIT:2>] == <NUM_LIT:0>:<EOL><INDENT>return (s[<NUM_LIT:0>] - a[<NUM_LIT:1>][<NUM_LIT:0>], s[<NUM_LIT:1>] - a[<NUM_LIT:1>][<NUM_LIT:1>], <NUM_LIT:1>)<EOL><DEDENT>else:<EOL><INDENT>return (s[<NUM_LIT:0>] + a[<NUM_LIT:1>][<NUM_LIT:0>], s[<NUM_LIT:1>] + a[<NUM_LIT:1>][<NUM_LIT:1>], <NUM_LIT:0>)<EOL><DEDENT>
|
Result of applying an action to a state.
|
f8648:c0:m3
|
def mkconstraints():
|
constraints = []<EOL>for j in range(<NUM_LIT:1>, <NUM_LIT:10>):<EOL><INDENT>vars = ["<STR_LIT>" % (i, j) for i in uppercase[:<NUM_LIT:9>]]<EOL>constraints.extend((c, const_different) for c in combinations(vars, <NUM_LIT:2>))<EOL><DEDENT>for i in uppercase[:<NUM_LIT:9>]:<EOL><INDENT>vars = ["<STR_LIT>" % (i, j) for j in range(<NUM_LIT:1>, <NUM_LIT:10>)]<EOL>constraints.extend((c, const_different) for c in combinations(vars, <NUM_LIT:2>))<EOL><DEDENT>for b0 in ['<STR_LIT>', '<STR_LIT>', '<STR_LIT>']:<EOL><INDENT>for b1 in [[<NUM_LIT:1>, <NUM_LIT:2>, <NUM_LIT:3>], [<NUM_LIT:4>, <NUM_LIT:5>, <NUM_LIT:6>], [<NUM_LIT:7>, <NUM_LIT:8>, <NUM_LIT:9>]]:<EOL><INDENT>vars = ["<STR_LIT>" % (i, j) for i in b0 for j in b1]<EOL>l = list((c, const_different) for c in combinations(vars, <NUM_LIT:2>))<EOL>constraints.extend(l)<EOL><DEDENT><DEDENT>return constraints<EOL>
|
Make constraint list for binary constraint problem.
|
f8653:m2
|
def find_location(rows, element_to_find):
|
for ir, row in enumerate(rows):<EOL><INDENT>for ic, element in enumerate(row):<EOL><INDENT>if element == element_to_find:<EOL><INDENT>return ir, ic<EOL><DEDENT><DEDENT><DEDENT>
|
Find the location of a piece in the puzzle.
Returns a tuple: row, column
|
f8655:m2
|
def actions(self, state):
|
rows = string_to_list(state)<EOL>row_e, col_e = find_location(rows, '<STR_LIT:e>')<EOL>actions = []<EOL>if row_e > <NUM_LIT:0>:<EOL><INDENT>actions.append(rows[row_e - <NUM_LIT:1>][col_e])<EOL><DEDENT>if row_e < <NUM_LIT:2>:<EOL><INDENT>actions.append(rows[row_e + <NUM_LIT:1>][col_e])<EOL><DEDENT>if col_e > <NUM_LIT:0>:<EOL><INDENT>actions.append(rows[row_e][col_e - <NUM_LIT:1>])<EOL><DEDENT>if col_e < <NUM_LIT:2>:<EOL><INDENT>actions.append(rows[row_e][col_e + <NUM_LIT:1>])<EOL><DEDENT>return actions<EOL>
|
Returns a list of the pieces we can move to the empty space.
|
f8655:c0:m0
|
def result(self, state, action):
|
rows = string_to_list(state)<EOL>row_e, col_e = find_location(rows, '<STR_LIT:e>')<EOL>row_n, col_n = find_location(rows, action)<EOL>rows[row_e][col_e], rows[row_n][col_n] = rows[row_n][col_n], rows[row_e][col_e]<EOL>return list_to_string(rows)<EOL>
|
Return the resulting state after moving a piece to the empty space.
(the "action" parameter contains the piece to move)
|
f8655:c0:m1
|
def is_goal(self, state):
|
return state == GOAL<EOL>
|
Returns true if a state is the goal state.
|
f8655:c0:m2
|
def cost(self, state1, action, state2):
|
return <NUM_LIT:1><EOL>
|
Returns the cost of performing an action. No useful on this problem, i
but needed.
|
f8655:c0:m3
|
def heuristic(self, state):
|
rows = string_to_list(state)<EOL>distance = <NUM_LIT:0><EOL>for number in '<STR_LIT>':<EOL><INDENT>row_n, col_n = find_location(rows, number)<EOL>row_n_goal, col_n_goal = goal_positions[number]<EOL>distance += abs(row_n - row_n_goal) + abs(col_n - col_n_goal)<EOL><DEDENT>return distance<EOL>
|
Returns an *estimation* of the distance from a state to the goal.
We are using the manhattan distance.
|
f8655:c0:m4
|
def setup():
|
for file in glob.glob('<STR_LIT>'.format(TEST_FILE_DIR)):<EOL><INDENT>new_dest = file.replace(TEST_FILE_DIR, '<STR_LIT>')<EOL>shutil.copy(file, new_dest)<EOL><DEDENT>
|
Copy the testing files to the current working dir.
|
f8669:m0
|
def teardown():
|
for file in glob.glob('<STR_LIT>'.format(TEST_FILE_DIR)):<EOL><INDENT>new_dest = file.replace(TEST_FILE_DIR, '<STR_LIT>')<EOL>os.remove(new_dest)<EOL><DEDENT>for file in glob.glob('<STR_LIT>'):<EOL><INDENT>os.remove(file)<EOL><DEDENT>for file in glob.glob('<STR_LIT>'):<EOL><INDENT>os.remove(file)<EOL><DEDENT>for file in glob.glob('<STR_LIT>'):<EOL><INDENT>os.remove(file)<EOL><DEDENT>for file in glob.glob('<STR_LIT>'):<EOL><INDENT>os.remove(file)<EOL><DEDENT>
|
Delete the files.
|
f8669:m1
|
def add(self, command_template, job_class):
|
job = JobTemplate(command_template.alias,<EOL>command_template=command_template,<EOL>depends_on=command_template.depends_on, queue=self.queue,<EOL>job_class=job_class)<EOL>self.queue.push(job)<EOL>
|
Given a command template, add it as a job to the queue.
|
f8677:c0:m1
|
def run(self):
|
iterations = <NUM_LIT:0><EOL>queue = self.queue.tick()<EOL>while True:<EOL><INDENT>try:<EOL><INDENT>next(queue)<EOL><DEDENT>except StopIteration:<EOL><INDENT>break<EOL><DEDENT>iterations += <NUM_LIT:1><EOL>sleep(self.sleep_time)<EOL><DEDENT>return iterations<EOL>
|
Begins the runtime execution.
|
f8677:c0:m2
|
def consume(self, cwd=None):
|
first_pass = Grammar.overall.parseString(self.string)<EOL>lowered = { key.lower(): val for key, val in first_pass.iteritems() }<EOL>self.commands = ['<STR_LIT:\n>'.join(self._get('<STR_LIT>', lowered))]<EOL>self.job_options = self._get('<STR_LIT>', lowered)<EOL>self.global_options = self._get('<STR_LIT>', lowered)<EOL>self.files = self._get('<STR_LIT>', lowered)<EOL>self.paths = self._get('<STR_LIT>', lowered)<EOL>self.files = self._parse(self.files, Grammar.file, True)<EOL>self.paths = self._parse(self.paths, Grammar.path, True)<EOL>self.job_options = self._parse(self.job_options, Grammar.line)<EOL>try:<EOL><INDENT>command_lines = self._parse(self.commands, Grammar.command_lines)[<NUM_LIT:0>]<EOL><DEDENT>except IndexError:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>self.commands = []<EOL>for command_line in command_lines:<EOL><INDENT>comments, command = command_line<EOL>self.commands.append([comments.asList(),<EOL>self._parse(['<STR_LIT>'.join(command)], Grammar.command)])<EOL><DEDENT>self.job_options = [opt.asList() for opt in self.job_options]<EOL>self.paths = ctf.get_paths(self.paths)<EOL>self.files = ctf.get_files(self.files)<EOL>self.paths.reverse()<EOL>self.files.reverse()<EOL>self.commands.reverse()<EOL>return ctf.get_command_templates(self.commands, self.files[:],<EOL>self.paths[:], self.job_options)<EOL>
|
Converts the lexer tokens into valid statements. This process
also checks command syntax.
|
f8678:c0:m1
|
def _get(self, key, parser_result):
|
try:<EOL><INDENT>list_data = parser_result[key].asList()<EOL>if any(isinstance(obj, str) for obj in list_data):<EOL><INDENT>txt_lines = ['<STR_LIT>'.join(list_data)]<EOL><DEDENT>else:<EOL><INDENT>txt_lines = ['<STR_LIT>'.join(f) for f in list_data]<EOL><DEDENT><DEDENT>except KeyError:<EOL><INDENT>txt_lines = []<EOL><DEDENT>return txt_lines<EOL>
|
Given a type and a dict of parser results, return
the items as a list.
|
f8678:c0:m2
|
def _parse(self, lines, grammar, ignore_comments=False):
|
results = []<EOL>for c in lines:<EOL><INDENT>if c != '<STR_LIT>' and not (ignore_comments and c[<NUM_LIT:0>] == '<STR_LIT:#>'):<EOL><INDENT>try:<EOL><INDENT>results.append(grammar.parseString(c))<EOL><DEDENT>except pyparsing.ParseException as e:<EOL><INDENT>raise ValueError('<STR_LIT>'<EOL>'<STR_LIT>'.format(e.lineno, c, e))<EOL><DEDENT><DEDENT><DEDENT>return results<EOL>
|
Given a type and a list, parse it using the more detailed
parse grammar.
|
f8678:c0:m3
|
def is_running(self):
|
qstat = self._grep_qstat('<STR_LIT>')<EOL>if qstat:<EOL><INDENT>return True<EOL><DEDENT>return False<EOL>
|
Checks to see if the job is running.
|
f8679:c0:m3
|
def is_queued(self):
|
qstat = self._grep_qstat('<STR_LIT>')<EOL>if qstat:<EOL><INDENT>return True<EOL><DEDENT>return False<EOL>
|
Checks to see if the job is queued.
|
f8679:c0:m4
|
def is_complete(self):
|
qstat = self._grep_qstat('<STR_LIT>')<EOL>comp = self._grep_status('<STR_LIT>')<EOL>if qstat and comp:<EOL><INDENT>return True<EOL><DEDENT>return False<EOL>
|
Checks the job's output or log file to determing if
the completion criteria was met.
|
f8679:c0:m5
|
def is_error(self):
|
qstat = self._grep_qstat('<STR_LIT:error>')<EOL>err = self._grep_status('<STR_LIT:error>')<EOL>if qstat and err:<EOL><INDENT>return True<EOL><DEDENT>return False<EOL>
|
Checks to see if the job errored out.
|
f8679:c0:m7
|
def _grep_qstat(self, status_type='<STR_LIT>'):
|
args = "<STR_LIT>".format(self.id).split()<EOL>res, _ = call(args)<EOL>if res == '<STR_LIT>': return False<EOL>res = res.split('<STR_LIT:\n>')[<NUM_LIT:2>].split()[<NUM_LIT:4>]<EOL>if status_type == '<STR_LIT>' and res == '<STR_LIT:C>':<EOL><INDENT>return True<EOL><DEDENT>elif status_type == '<STR_LIT:error>' and (res == '<STR_LIT:E>' or res == '<STR_LIT:C>'):<EOL><INDENT>return True<EOL><DEDENT>elif status_type == '<STR_LIT>' and res == '<STR_LIT:R>':<EOL><INDENT>return True<EOL><DEDENT>elif status_type == '<STR_LIT>' and res == '<STR_LIT>':<EOL><INDENT>return True<EOL><DEDENT>elif status_type == '<STR_LIT>' and '<STR_LIT>' in str(res).lower():<EOL><INDENT>return True<EOL><DEDENT>else:<EOL><INDENT>return False<EOL><DEDENT>
|
Greps qstat -e <job_id> for information from the queue.
:paramsstatus_type: complete, queued, running, error, gone
|
f8679:c0:m8
|
def _grep_status(self, status_type):
|
args = "<STR_LIT>".format(self.id).split()<EOL>res, _ = call(args)<EOL>exit_status = [line for line in res.split('<STR_LIT:\n>')<EOL>if '<STR_LIT>' in line]<EOL>try:<EOL><INDENT>_, __, code = exit_status[<NUM_LIT:0>].split()<EOL><DEDENT>except IndexError:<EOL><INDENT>code = None<EOL><DEDENT>if status_type == '<STR_LIT>' and code == '<STR_LIT:0>':<EOL><INDENT>return True<EOL><DEDENT>elif status_type == '<STR_LIT:error>' and code != '<STR_LIT:0>':<EOL><INDENT>return True<EOL><DEDENT>else:<EOL><INDENT>return False<EOL><DEDENT>
|
Greps through the job's current status to see if
it returned with the requested status.
status_type: complete, error
|
f8679:c0:m9
|
def get_command_templates(command_tokens, file_tokens=[], path_tokens=[],<EOL>job_options=[]):
|
files = get_files(file_tokens)<EOL>paths = get_paths(path_tokens)<EOL>job_options = get_options(job_options)<EOL>templates = _get_command_templates(command_tokens, files, paths,<EOL>job_options)<EOL>for command_template in templates:<EOL><INDENT>command_template._dependencies = _get_prelim_dependencies(<EOL>command_template, templates)<EOL><DEDENT>return templates<EOL>
|
Given a list of tokens from the grammar, return a
list of commands.
|
f8680:m0
|
def get_files(file_tokens, cwd=None):
|
if not file_tokens:<EOL><INDENT>return []<EOL><DEDENT>token = file_tokens.pop()<EOL>try:<EOL><INDENT>filename = token.filename<EOL><DEDENT>except AttributeError:<EOL><INDENT>filename = '<STR_LIT>'<EOL><DEDENT>if cwd:<EOL><INDENT>input = Input(token.alias, filename, cwd=cwd)<EOL><DEDENT>else:<EOL><INDENT>input = Input(token.alias, filename)<EOL><DEDENT>return [input] + get_files(file_tokens)<EOL>
|
Given a list of parser file tokens, return a list of input objects
for them.
|
f8680:m1
|
def get_paths(path_tokens):
|
if len(path_tokens) == <NUM_LIT:0>:<EOL><INDENT>return []<EOL><DEDENT>token = path_tokens.pop()<EOL>path = PathToken(token.alias, token.path)<EOL>return [path] + get_paths(path_tokens)<EOL>
|
Given a list of parser path tokens, return a list of path objects
for them.
|
f8680:m2
|
def get_options(options):
|
return _get_comments(options)<EOL>
|
Given a list of options, tokenize them.
|
f8680:m3
|
def _get_command_templates(command_tokens, files=[], paths=[], job_options=[],<EOL>count=<NUM_LIT:1>):
|
if not command_tokens:<EOL><INDENT>return []<EOL><DEDENT>comment_tokens, command_token = command_tokens.pop()<EOL>parts = []<EOL>parts += job_options + _get_comments(comment_tokens)<EOL>for part in command_token[<NUM_LIT:0>]:<EOL><INDENT>try:<EOL><INDENT>parts.append(_get_file_by_alias(part, files))<EOL>continue<EOL><DEDENT>except (AttributeError, ValueError):<EOL><INDENT>pass<EOL><DEDENT>for cut in part.split():<EOL><INDENT>try:<EOL><INDENT>parts.append(_get_path_by_name(cut, paths))<EOL>continue<EOL><DEDENT>except ValueError:<EOL><INDENT>pass<EOL><DEDENT>parts.append(cut)<EOL><DEDENT><DEDENT>command_template = CommandTemplate(alias=str(count), parts=parts)<EOL>[setattr(p, '<STR_LIT>', command_template.alias)<EOL>for p in command_template.output_parts]<EOL>return [command_template] + _get_command_templates(command_tokens,<EOL>files, paths, job_options, count+<NUM_LIT:1>)<EOL>
|
Reversivly create command templates.
|
f8680:m4
|
def _get_prelim_dependencies(command_template, all_templates):
|
deps = []<EOL>for input in command_template.input_parts:<EOL><INDENT>if '<STR_LIT:.>' not in input.alias:<EOL><INDENT>continue<EOL><DEDENT>for template in all_templates:<EOL><INDENT>for output in template.output_parts:<EOL><INDENT>if input.fuzzy_match(output):<EOL><INDENT>deps.append(template)<EOL>break<EOL><DEDENT><DEDENT><DEDENT><DEDENT>return list(set(deps))<EOL>
|
Given a command_template determine which other templates it
depends on. This should not be used as the be-all end-all of
dependencies and before calling each command, ensure that it's
requirements are met.
|
f8680:m5
|
def _get_file_by_alias(part, files):
|
<EOL>if _is_output(part):<EOL><INDENT>return Output.from_string(part.pop())<EOL><DEDENT>else:<EOL><INDENT>inputs = [[]]<EOL>if part.magic_or:<EOL><INDENT>and_or = '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>and_or = '<STR_LIT>'<EOL><DEDENT>for cut in part.asList():<EOL><INDENT>if cut == OR_TOKEN:<EOL><INDENT>inputs.append([])<EOL>continue<EOL><DEDENT>if cut == AND_TOKEN:<EOL><INDENT>continue<EOL><DEDENT>input = Input(cut, filename=cut, and_or=and_or)<EOL>for file in files:<EOL><INDENT>if file.alias == cut:<EOL><INDENT>input.filename = file.filename<EOL>inputs[-<NUM_LIT:1>].append(input)<EOL>break<EOL><DEDENT><DEDENT>else:<EOL><INDENT>inputs[-<NUM_LIT:1>].append(input)<EOL><DEDENT><DEDENT>return [input for input in inputs if input]<EOL><DEDENT>
|
Given a command part, find the file it represents. If not found,
then returns a new token representing that file.
:throws ValueError: if the value is not a command file alias.
|
f8680:m6
|
def _get_path_by_name(part, paths):
|
for path in paths:<EOL><INDENT>if path.alias == part:<EOL><INDENT>return path<EOL><DEDENT><DEDENT>raise ValueError<EOL>
|
Given a command part, find the path it represents.
:throws ValueError: if no valid file is found.
|
f8680:m7
|
def _get_comments(parts):
|
return [CommentToken(part) for part in parts]<EOL>
|
Given a list of parts representing a list of comments, return the list
of comment tokens
|
f8680:m8
|
def _is_output(part):
|
if part[<NUM_LIT:0>].lower() == '<STR_LIT:o>':<EOL><INDENT>return True<EOL><DEDENT>elif part[<NUM_LIT:0>][:<NUM_LIT:2>].lower() == '<STR_LIT>':<EOL><INDENT>return True<EOL><DEDENT>elif part[<NUM_LIT:0>][:<NUM_LIT:2>].lower() == '<STR_LIT>':<EOL><INDENT>return True<EOL><DEDENT>else:<EOL><INDENT>return False<EOL><DEDENT>
|
Returns whether the given part represents an output variable.
|
f8680:m9
|
@property<EOL><INDENT>def active_jobs(self):<DEDENT>
|
return list(set(self.queue + self.running))<EOL>
|
Returns a list of all jobs submitted to the queue,
or in progress.
|
f8681:c0:m3
|
@property<EOL><INDENT>def all_jobs(self):<DEDENT>
|
return list(set(self.complete + self.failed + self.queue + self.running))<EOL>
|
Returns a list of all jobs submitted to the queue, complete,
in-progess or failed.
|
f8681:c0:m4
|
@property<EOL><INDENT>def progress(self):<DEDENT>
|
total = len(self.all_jobs)<EOL>remaining = total - len(self.active_jobs) if total > <NUM_LIT:0> else <NUM_LIT:0><EOL>percent = int(<NUM_LIT:100> * (float(remaining) / total)) if total > <NUM_LIT:0> else <NUM_LIT:0><EOL>return percent<EOL>
|
Returns the percentage, current and total number of
jobs in the queue.
|
f8681:c0:m5
|
def ready(self, job):
|
no_deps = len(job.depends_on) == <NUM_LIT:0><EOL>all_complete = all(j.is_complete() for j in self.active_jobs<EOL>if j.alias in job.depends_on)<EOL>none_failed = not any(True for j in self.failed<EOL>if j.alias in job.depends_on)<EOL>queue_is_open = len(self.running) < self.MAX_CONCURRENT_JOBS<EOL>return queue_is_open and (no_deps or (all_complete and none_failed))<EOL>
|
Determines if the job is ready to be sumitted to the
queue. It checks if the job depends on any currently
running or queued operations.
|
f8681:c0:m6
|
def locked(self):
|
if len(self.failed) == <NUM_LIT:0>:<EOL><INDENT>return False<EOL><DEDENT>for fail in self.failed:<EOL><INDENT>for job in self.active_jobs:<EOL><INDENT>if fail.alias in job.depends_on:<EOL><INDENT>return True<EOL><DEDENT><DEDENT><DEDENT>
|
Determines if the queue is locked.
|
f8681:c0:m7
|
def push(self, job):
|
self.queue.append(job)<EOL>
|
Push a job onto the queue. This does not submit the job.
|
f8681:c0:m8
|
def tick(self):
|
self.on_start()<EOL>while not self.is_empty:<EOL><INDENT>cruft = []<EOL>for job in self.queue:<EOL><INDENT>if not self.ready(job):<EOL><INDENT>continue<EOL><DEDENT>self.on_ready(job)<EOL>try:<EOL><INDENT>job.submit()<EOL><DEDENT>except ValueError:<EOL><INDENT>if job.should_retry:<EOL><INDENT>self.on_error(job)<EOL>job.attempts += <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>self.on_fail(job)<EOL>cruft.append(job)<EOL>self.failed.append(job)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>self.running.append(job)<EOL>self.on_submit(job)<EOL>cruft.append(job)<EOL><DEDENT><DEDENT>self.queue = [job for job in self.queue if job not in cruft]<EOL>cruft = []<EOL>for job in self.running:<EOL><INDENT>if job.is_running() or job.is_queued():<EOL><INDENT>pass<EOL><DEDENT>elif job.is_complete():<EOL><INDENT>self.on_complete(job)<EOL>cruft.append(job)<EOL>self.complete.append(job)<EOL><DEDENT>elif job.is_fail():<EOL><INDENT>self.on_fail(job)<EOL>cruft.append(job)<EOL>self.failed.append(job)<EOL><DEDENT>elif job.is_error():<EOL><INDENT>self.on_error(job)<EOL>cruft.append(job)<EOL><DEDENT>else:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>self.running = [job for job in self.running if job not in cruft]<EOL>if self.locked() and self.on_locked():<EOL><INDENT>raise RuntimeError<EOL><DEDENT>self.on_tick()<EOL>yield<EOL><DEDENT>self.on_end()<EOL>
|
Submits all the given jobs in the queue and watches their
progress as they proceed. This function yields at the end of
each iteration of the queue.
:raises RuntimeError: If queue is locked.
|
f8681:c0:m9
|
def on_start(self):
|
pass<EOL>
|
Called when the queue is starting up.
|
f8681:c0:m10
|
def on_end(self):
|
pass<EOL>
|
Called when the queue is shutting down.
|
f8681:c0:m11
|
def on_locked(self):
|
return True<EOL>
|
Called when the queue is locked and no jobs can proceed.
If this callback returns True, then the queue will be restarted,
else it will be terminated.
|
f8681:c0:m12
|
def on_tick(self):
|
pass<EOL>
|
Called when a tick of the queue is complete.
|
f8681:c0:m13
|
def on_ready(self, job):
|
pass<EOL>
|
Called when a job is ready to be submitted.
:param job: The given job that is ready.
|
f8681:c0:m14
|
def on_submit(self, job):
|
pass<EOL>
|
Called when a job has been submitted.
:param job: The given job that has been submitted.
|
f8681:c0:m15
|
def on_complete(self, job):
|
pass<EOL>
|
Called when a job has completed.
:param job: The given job that has completed.
|
f8681:c0:m16
|
def on_error(self, job):
|
pass<EOL>
|
Called when a job has errored. By default, the job
is resubmitted until some max threshold is reached.
:param job: The given job that has errored.
|
f8681:c0:m17
|
def on_fail(self, job):
|
pass<EOL>
|
Called when a job has failed after multiple resubmissions. The
given job will be removed from the queue.
:param job: The given job that has errored.
|
f8681:c0:m18
|
@property<EOL><INDENT>def real_jobs(self):<DEDENT>
|
return [j for j in self.all_jobs if not isinstance(j, JobTemplate)]<EOL>
|
Returns all jobs that represent work.
|
f8681:c1:m0
|
def fuzzy_match(self, other):
|
magic, fuzzy = False, False<EOL>try:<EOL><INDENT>magic = self.alias == other.magic<EOL><DEDENT>except AttributeError:<EOL><INDENT>pass<EOL><DEDENT>if '<STR_LIT:.>' in self.alias:<EOL><INDENT>major = self.alias.split('<STR_LIT:.>')[<NUM_LIT:0>]<EOL>fuzzy = major == other.alias<EOL><DEDENT>return magic or fuzzy<EOL>
|
Given another token, see if either the major alias identifier
matches the other alias, or if magic matches the alias.
|
f8682:c3:m2
|
def eval(self):
|
if self.and_or == '<STR_LIT>':<EOL><INDENT>return [Input(self.alias, file, self.cwd, '<STR_LIT>')<EOL>for file in self.files]<EOL><DEDENT>return '<STR_LIT:U+0020>'.join(self.files)<EOL>
|
Evaluates the given input and returns a string containing the
actual filenames represented. If the input token represents multiple
independent files, then eval will return a list of all the input files
needed, otherwise it returns the filenames in a string.
|
f8682:c3:m3
|
@property<EOL><INDENT>def command_alias(self):<DEDENT>
|
if '<STR_LIT:.>' in self.alias:<EOL><INDENT>return self.alias.split('<STR_LIT:->')[<NUM_LIT:0>]<EOL><DEDENT>return None<EOL>
|
Returns the command alias for a given input. In most cases this
is just the input's alias but if the input is one of many, then
`command_alias` returns just the beginning of the alias cooresponding to
the command's alias.
|
f8682:c3:m4
|
@property<EOL><INDENT>def files(self):<DEDENT>
|
res = None<EOL>if not res:<EOL><INDENT>res = glob.glob(self.path)<EOL><DEDENT>if not res and self.is_glob:<EOL><INDENT>res = glob.glob(self.magic_path)<EOL><DEDENT>if not res:<EOL><INDENT>res = glob.glob(self.alias)<EOL><DEDENT>if not res:<EOL><INDENT>raise ValueError('<STR_LIT>' % self)<EOL><DEDENT>return res<EOL>
|
Returns a list of all the files that match the given
input token.
|
f8682:c3:m8
|
@staticmethod<EOL><INDENT>def from_string(string, _or='<STR_LIT>'):<DEDENT>
|
if _or:<EOL><INDENT>and_or = '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>and_or = '<STR_LIT>'<EOL><DEDENT>return Input(string, and_or=and_or)<EOL>
|
Parse a given string and turn it into an input token.
|
f8682:c3:m9
|
def __eq__(self, other):
|
try:<EOL><INDENT>return (self.magic == other.alias or<EOL>super(Output, self).__eq__(other))<EOL><DEDENT>except AttributeError:<EOL><INDENT>return False<EOL><DEDENT>
|
Overrides the token eq to allow for magic : alias comparison for
magic inputs. Defaults to the super() eq otherwise.
|
f8682:c4:m2
|
def eval(self):
|
if self.magic:<EOL><INDENT>return self.magic<EOL><DEDENT>if not self.filename:<EOL><INDENT>return file_pattern.format(self.alias, self.ext)<EOL><DEDENT>return self.path<EOL>
|
Returns a filename to be used for script output.
|
f8682:c4:m3
|
def as_input(self):
|
return Input(self.alias, self.eval())<EOL>
|
Returns an input token for the given output.
|
f8682:c4:m4
|
def _clean(self, magic):
|
if magic.lower() == '<STR_LIT:o>':<EOL><INDENT>self.magic = '<STR_LIT>'<EOL><DEDENT>elif magic[:<NUM_LIT:2>].lower() == '<STR_LIT>':<EOL><INDENT>self.magic = magic[<NUM_LIT:2>:]<EOL><DEDENT>elif magic[:<NUM_LIT:2>].lower() == '<STR_LIT>':<EOL><INDENT>self.ext = magic[<NUM_LIT:1>:]<EOL><DEDENT>
|
Given a magic string, remove the output tag designator.
|
f8682:c4:m5
|
@staticmethod<EOL><INDENT>def from_string(string):<DEDENT>
|
return Output('<STR_LIT>', magic=string)<EOL>
|
Parse a given string and turn it into an output token.
|
f8682:c4:m6
|
def call(args, stdout=PIPE, stderr=PIPE):
|
p = Popen(args, stdout=stdout, stderr=stderr)<EOL>out, err = p.communicate()<EOL>try:<EOL><INDENT>return out.decode(sys.stdout.encoding), err.decode(sys.stdout.encoding)<EOL><DEDENT>except Exception:<EOL><INDENT>return out, err<EOL><DEDENT>
|
Calls the given arguments in a seperate process
and returns the contents of standard out.
|
f8683:m0
|
def __init__(self, alias, command, depends_on=[]):
|
self.command = command<EOL>self.depends_on = depends_on<EOL>self.alias = alias<EOL>self.attempts = <NUM_LIT:0><EOL>self.filename = self.JOB_FILE_PATTERN.format(self.alias)<EOL>
|
Create an new job with the given name, and command.
|
f8683:c0:m0
|
def make(self):
|
eval = self.command.eval()<EOL>with open(self.filename, '<STR_LIT:w>') as f:<EOL><INDENT>f.write(eval)<EOL><DEDENT>
|
Evaluate the command, and write it to a file.
|
f8683:c0:m3
|
@property<EOL><INDENT>def cmd(self):<DEDENT>
|
pass<EOL>
|
Returns the command needed to submit the calculations.
Normally, this would be just running the command, however if
using a queue system, then this should return the command to
submit the command to the queue.
|
f8683:c0:m5
|
def submit(self):
|
pass<EOL>
|
Submits the job to be run. If an external queue system is used,
this method submits itself to that queue. Else it runs the job itself.
:see: call
|
f8683:c0:m6
|
def is_running(self):
|
pass<EOL>
|
Returns whether the job is running or not.
|
f8683:c0:m7
|
def is_queued(self):
|
pass<EOL>
|
Returns whether the job is queued or not.
This function is only used if jobs are submitted to an external queue.
|
f8683:c0:m8
|
def is_complete(self):
|
pass<EOL>
|
Returns whether the job is complete or not.
|
f8683:c0:m9
|
def is_error(self):
|
pass<EOL>
|
Checks to see if the job errored out.
|
f8683:c0:m10
|
def is_failed(self):
|
pass<EOL>
|
Checks to see if the job has failed. This is usually if the job
should not be resubmitted.
|
f8683:c0:m11
|
def render(self, message, progress):
|
pass<EOL>
|
Render the output of the report.
|
f8684:c0:m0
|
def _get_jobs_from_template(self, template, job_class):
|
jobs = []<EOL>for command in template.eval():<EOL><INDENT>alias = command.alias<EOL>depends_on = [job.alias<EOL>for job in self.queue.all_jobs<EOL>for deps in command.depends_on<EOL>if deps == job.alias]<EOL>command.update_dependent_files([job.command<EOL>for job in self.queue.all_jobs<EOL>if not isinstance(job, JobTemplate)])<EOL>job = job_class(alias, command, depends_on)<EOL>jobs.append(job)<EOL><DEDENT>return jobs<EOL>
|
Given a template, a job class, construct jobs from
the given template.
|
f8685:c0:m8
|
@classproperty<EOL><INDENT>@staticmethod<EOL>def overall():<DEDENT>
|
return ZeroOrMore(Grammar.comment) + Dict(ZeroOrMore(Group(<EOL>Grammar._section + ZeroOrMore(Group(Grammar.line)))<EOL>))<EOL>
|
The overall grammer for pulling apart the main input files.
|
f8686:c1:m0
|
@classproperty<EOL><INDENT>@staticmethod<EOL>def file():<DEDENT>
|
return (<EOL>Optional(Word(alphanums).setResultsName('<STR_LIT>') +<EOL>Suppress(Literal('<STR_LIT:.>'))) + Suppress(White()) +<EOL>Word(approved_printables).setResultsName('<STR_LIT:filename>')<EOL>)<EOL>
|
Grammar for files found in the overall input files.
|
f8686:c1:m2
|
@classproperty<EOL><INDENT>@staticmethod<EOL>def path():<DEDENT>
|
return (<EOL>Word(approved_printables).setResultsName('<STR_LIT>') +<EOL>Suppress(White()) +<EOL>restOfLine.setResultsName('<STR_LIT:path>')<EOL>)<EOL>
|
Grammar for paths found in the overall input files.
|
f8686:c1:m3
|
@classproperty<EOL><INDENT>@staticmethod<EOL>def command_lines():<DEDENT>
|
return ZeroOrMore(Group(<EOL>Group(ZeroOrMore(Group(Grammar.comment))) + Grammar._non_comment_line<EOL>))<EOL>
|
Grammar for commands found in the overall input files.
|
f8686:c1:m4
|
@classproperty<EOL><INDENT>@staticmethod<EOL>def command():<DEDENT>
|
return (<EOL>OneOrMore(<EOL>Word(approved_printables+'<STR_LIT:U+0020>').setResultsName('<STR_LIT>',<EOL>listAllMatches=True) ^<EOL>Grammar.__command_input_output.setResultsName('<STR_LIT>',<EOL>listAllMatches=True)<EOL>)<EOL>)<EOL>
|
Grammar for commands found in the overall input files.
|
f8686:c1:m5
|
@property<EOL><INDENT>def depends_on(self):<DEDENT>
|
return [part.command_alias for part in self.input_parts<EOL>if part.command_alias is not None]<EOL>
|
Returns a list of command template aliases that the given command
template depends on.
|
f8687:c0:m2
|
@property<EOL><INDENT>def input_parts(self):<DEDENT>
|
return [part for part in self.file_parts<EOL>if isinstance(part, Input)]<EOL>
|
Returns a list of the input tokens in the list of parts.
|
f8687:c0:m3
|
@property<EOL><INDENT>def output_parts(self):<DEDENT>
|
return [part for part in self.file_parts<EOL>if isinstance(part, Output)]<EOL>
|
Returns a list of the output tokens in the list of parts.
|
f8687:c0:m4
|
@property<EOL><INDENT>def file_parts(self):<DEDENT>
|
file_parts = []<EOL>for part in self.parts:<EOL><INDENT>try:<EOL><INDENT>for sub_part in part:<EOL><INDENT>if isinstance(sub_part, FileToken):<EOL><INDENT>file_parts.append(sub_part)<EOL><DEDENT><DEDENT><DEDENT>except TypeError:<EOL><INDENT>if isinstance(part, FileToken):<EOL><INDENT>file_parts.append(part)<EOL><DEDENT><DEDENT><DEDENT>return file_parts<EOL>
|
Returns a list of the file tokens in the list of parts.
|
f8687:c0:m5
|
@property<EOL><INDENT>def path_parts(self):<DEDENT>
|
return [part for part in self.parts<EOL>if isinstance(part, PathToken)]<EOL>
|
Returns a list of the path tokens in the list of parts.
|
f8687:c0:m6
|
def update_dependent_files(self, prev_commands=[]):
|
for command in prev_commands:<EOL><INDENT>for my_input in self.input_parts:<EOL><INDENT>for their_output in command.output_parts:<EOL><INDENT>if their_output == my_input:<EOL><INDENT>my_input.filename = their_output.eval()<EOL><DEDENT><DEDENT><DEDENT><DEDENT>
|
Update the command's dependencies based on the evaluated input and
output of previous commands.
|
f8687:c0:m7
|
def eval(self):
|
eval = []<EOL>for part in self.parts:<EOL><INDENT>try:<EOL><INDENT>result = part.eval()<EOL><DEDENT>except AttributeError:<EOL><INDENT>result = part<EOL><DEDENT>if result[-<NUM_LIT:1>] != '<STR_LIT:\n>':<EOL><INDENT>result += '<STR_LIT:U+0020>'<EOL><DEDENT>eval.append(result)<EOL><DEDENT>return '<STR_LIT>'.join(eval).strip()<EOL>
|
Evaluate the given job and return a complete shell script to be run
by the job manager.
|
f8687:c0:m8
|
def is_queued(self):
|
return False<EOL>
|
Returns False since local jobs are not submitted to an
external queue.
|
f8688:c1:m5
|
def is_error(self):
|
try:<EOL><INDENT>if self._task.is_alive():<EOL><INDENT>if len(self._task.stderr.readlines()) > <NUM_LIT:0>:<EOL><INDENT>self._task.join()<EOL>self._write_log()<EOL>return True<EOL><DEDENT><DEDENT><DEDENT>except AttributeError:<EOL><INDENT>pass<EOL><DEDENT>return False<EOL>
|
Checks to see if the job errored out.
|
f8688:c1:m7
|
def _get_parts_list(to_go, so_far=[[]], ticker=None):
|
try:<EOL><INDENT>part = to_go.pop(<NUM_LIT:0>)<EOL><DEDENT>except IndexError:<EOL><INDENT>return so_far, ticker<EOL><DEDENT>if isinstance(part, list) and any(isinstance(e, list) for e in part):<EOL><INDENT>while len(part) > <NUM_LIT:0>:<EOL><INDENT>so_far, ticker = _get_parts_list(part, so_far, ticker)<EOL>ticker.tick()<EOL><DEDENT><DEDENT>elif isinstance(part, list) and any(isinstance(e, Input) for e in part):<EOL><INDENT>while len(part) > <NUM_LIT:0>:<EOL><INDENT>so_far, ticker = _get_parts_list(part, so_far, ticker)<EOL><DEDENT><DEDENT>elif isinstance(part, Input) and part.is_magic:<EOL><INDENT>inputs = part.eval()<EOL>while len(inputs) > <NUM_LIT:0>:<EOL><INDENT>so_far, ticker = _get_parts_list(inputs, so_far, ticker)<EOL>ticker.tick()<EOL><DEDENT><DEDENT>elif isinstance(part, Input) and not part.is_magic:<EOL><INDENT>so_far[ticker.value].append(part)<EOL><DEDENT>else:<EOL><INDENT>so_far = _append(so_far, part)<EOL><DEDENT>return so_far, ticker<EOL>
|
Iterates over to_go, building the list of parts. To provide
items for the beginning, use so_far.
|
f8690:m0
|
def _get_max_size(parts, size=<NUM_LIT:1>):
|
max_group_size = <NUM_LIT:0><EOL>for part in parts:<EOL><INDENT>if isinstance(part, list):<EOL><INDENT>group_size = <NUM_LIT:0><EOL>for input_group in part:<EOL><INDENT>group_size += <NUM_LIT:1><EOL><DEDENT>if group_size > max_group_size:<EOL><INDENT>max_group_size = group_size<EOL><DEDENT><DEDENT><DEDENT>magic_size = _get_magic_size(parts)<EOL>return max_group_size * magic_size<EOL>
|
Given a list of parts, find the maximum number of commands
contained in it.
|
f8690:m1
|
def _append(so_far, item):
|
for sub_list in so_far:<EOL><INDENT>sub_list.append(item)<EOL><DEDENT>return so_far<EOL>
|
Appends an item to all items in a list of lists.
|
f8690:m3
|
def _grow(list_of_lists, num_new):
|
first = list_of_lists[<NUM_LIT:0>]<EOL>for i in range(num_new):<EOL><INDENT>list_of_lists.append(copy.deepcopy(first))<EOL><DEDENT>return list_of_lists<EOL>
|
Given a list of lists, and a number of new lists to add, copy the
content of the first list into the new ones, and add them to the list
of lists.
|
f8690:m4
|
def _search_for_files(parts):
|
file_parts = []<EOL>for part in parts:<EOL><INDENT>if isinstance(part, list):<EOL><INDENT>file_parts.extend(_search_for_files(part))<EOL><DEDENT>elif isinstance(part, FileToken):<EOL><INDENT>file_parts.append(part)<EOL><DEDENT><DEDENT>return file_parts<EOL>
|
Given a list of parts, return all of the nested file parts.
|
f8690:m5
|
@property<EOL><INDENT>def depends_on(self):<DEDENT>
|
return [dep.alias for dep in self._dependencies]<EOL>
|
Returns a list of command template aliases that the given command
template depends on.
|
f8690:c1:m2
|
@property<EOL><INDENT>def file_parts(self):<DEDENT>
|
return _search_for_files(self.parts)<EOL>
|
Returns a list of the file tokens in the list of parts.
|
f8690:c1:m3
|
def eval(self):
|
max_size = _get_max_size(self.parts)<EOL>parts_list = _grow([[]], max_size-<NUM_LIT:1>)<EOL>counter = Ticker(max_size)<EOL>parts = self.parts[:]<EOL>while len(parts) > <NUM_LIT:0>:<EOL><INDENT>parts_list, counter = _get_parts_list(parts,<EOL>parts_list, counter)<EOL><DEDENT>commands = []<EOL>for i, parts in enumerate(parts_list):<EOL><INDENT>alias = self._get_alias(i+<NUM_LIT:1>)<EOL>new_parts = copy.deepcopy(parts)<EOL>commands.append(Command(alias=alias, parts=new_parts))<EOL><DEDENT>return commands<EOL>
|
Returns a list of Command objects that can be evaluated as their
string values. Each command will track it's preliminary dependencies,
but these values should not be depended on for running commands.
|
f8690:c1:m4
|
def _get_alias(self, index):
|
return '<STR_LIT>'.format(self.alias, index)<EOL>
|
Given an index, return the string alias for that command.
|
f8690:c1:m5
|
def is_running(self):
|
qstat = self._grep_qstat('<STR_LIT>')<EOL>if qstat:<EOL><INDENT>return True<EOL><DEDENT>return False<EOL>
|
Checks to see if the job is running.
|
f8691:c0:m3
|
def is_queued(self):
|
qstat = self._grep_qstat('<STR_LIT>')<EOL>if qstat:<EOL><INDENT>return True<EOL><DEDENT>return False<EOL>
|
Checks to see if the job is queued.
|
f8691:c0:m4
|
def is_complete(self):
|
qstat = self._grep_qstat('<STR_LIT>')<EOL>comp = self._grep_status('<STR_LIT>')<EOL>if qstat and comp:<EOL><INDENT>return True<EOL><DEDENT>return False<EOL>
|
Checks the job's output or log file to determing if
the completion criteria was met.
|
f8691:c0:m5
|
def is_error(self):
|
qstat = self._grep_qstat('<STR_LIT:error>')<EOL>err = self._grep_status('<STR_LIT:error>')<EOL>if qstat and err:<EOL><INDENT>return True<EOL><DEDENT>return False<EOL>
|
Checks to see if the job errored out.
|
f8691:c0:m6
|
def _grep_qstat(self, status_type='<STR_LIT>'):
|
args = ("<STR_LIT>" % self.id).split()<EOL>res, _ = call(args)<EOL>if res == '<STR_LIT>': return False<EOL>res = res.split('<STR_LIT:\n>')[<NUM_LIT:2>].split()[<NUM_LIT:4>]<EOL>if status_type == '<STR_LIT>' and res == '<STR_LIT:c>':<EOL><INDENT>return True<EOL><DEDENT>elif status_type == '<STR_LIT:error>' and (res == '<STR_LIT:e>' or res == '<STR_LIT:c>'):<EOL><INDENT>return True<EOL><DEDENT>elif status_type == '<STR_LIT>' and res == '<STR_LIT:r>':<EOL><INDENT>return True<EOL><DEDENT>elif status_type == '<STR_LIT>' and res == '<STR_LIT>':<EOL><INDENT>return True<EOL><DEDENT>elif status_type == '<STR_LIT>' and '<STR_LIT>' in str(res).lower():<EOL><INDENT>return True<EOL><DEDENT>else:<EOL><INDENT>return False<EOL><DEDENT>
|
Greps qstat -e <job_id> for information from the queue.
:paramsstatus_type: complete, queued, running, error, gone
|
f8691:c0:m7
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.