sentence1
stringlengths 52
3.87M
| sentence2
stringlengths 1
47.2k
| label
stringclasses 1
value |
|---|---|---|
def terminate(self):
"""Delete all files created by this server, invalidating `self`. Use with care."""
logger.info("deleting entire server %s" % self)
self.close()
try:
shutil.rmtree(self.basedir)
logger.info("deleted server under %s" % self.basedir)
# delete everything from self, so that using this object fails results
# in an error as quickly as possible
for val in self.__dict__.keys():
try:
delattr(self, val)
except:
pass
except Exception, e:
logger.warning("failed to delete SessionServer: %s" % (e))
|
Delete all files created by this server, invalidating `self`. Use with care.
|
entailment
|
def find_similar(self, *args, **kwargs):
"""
Find similar articles.
With autosession off, use the index state *before* current session started,
so that changes made in the session will not be visible here. With autosession
on, close the current session first (so that session changes *are* committed
and visible).
"""
if self.session is not None and self.autosession:
# with autosession on, commit the pending transaction first
self.commit()
return self.stable.find_similar(*args, **kwargs)
|
Find similar articles.
With autosession off, use the index state *before* current session started,
so that changes made in the session will not be visible here. With autosession
on, close the current session first (so that session changes *are* committed
and visible).
|
entailment
|
async def profile(self, ctx, platform, name):
'''Fetch a profile.'''
player = await self.client.get_player(platform, name)
solos = await player.get_solos()
await ctx.send("# of kills in solos for {}: {}".format(name,solos.kills.value))
|
Fetch a profile.
|
entailment
|
def start(io_loop=None, check_time=2):
"""Begins watching source files for changes.
.. versionchanged:: 4.1
The ``io_loop`` argument is deprecated.
"""
io_loop = io_loop or asyncio.get_event_loop()
if io_loop in _io_loops:
return
_io_loops[io_loop] = True
if len(_io_loops) > 1:
logger.warning("aiohttp_autoreload started more than once in the same process")
# if _has_execv:
# add_reload_hook(functools.partial(io_loop.close, all_fds=True))
modify_times = {}
callback = functools.partial(_reload_on_update, modify_times)
logger.debug("Starting periodic checks for code changes")
call_periodic(check_time, callback, loop=io_loop)
|
Begins watching source files for changes.
.. versionchanged:: 4.1
The ``io_loop`` argument is deprecated.
|
entailment
|
def generate_chunks(data, chunk_size=DEFAULT_CHUNK_SIZE):
"""Yield 'chunk_size' items from 'data' at a time."""
iterator = iter(repeated.getvalues(data))
while True:
chunk = list(itertools.islice(iterator, chunk_size))
if not chunk:
return
yield chunk
|
Yield 'chunk_size' items from 'data' at a time.
|
entailment
|
def reduce(reducer, data, chunk_size=DEFAULT_CHUNK_SIZE):
"""Repeatedly call fold and merge on data and then finalize.
Arguments:
data: Input for the fold function.
reducer: The IReducer to use.
chunk_size: How many items should be passed to fold at a time?
Returns:
Return value of finalize.
"""
if not chunk_size:
return finalize(reducer, fold(reducer, data))
# Splitting the work up into chunks allows us to, e.g. reduce a large file
# without loading everything into memory, while still being significantly
# faster than repeatedly calling the fold function for every element.
chunks = generate_chunks(data, chunk_size)
intermediate = fold(reducer, next(chunks))
for chunk in chunks:
intermediate = merge(reducer, intermediate, fold(reducer, chunk))
return finalize(reducer, intermediate)
|
Repeatedly call fold and merge on data and then finalize.
Arguments:
data: Input for the fold function.
reducer: The IReducer to use.
chunk_size: How many items should be passed to fold at a time?
Returns:
Return value of finalize.
|
entailment
|
def conditions(self):
"""The if-else pairs."""
for idx in six.moves.range(1, len(self.children), 2):
yield (self.children[idx - 1], self.children[idx])
|
The if-else pairs.
|
entailment
|
def resolve_placeholders(path, placeholder_dict):
"""
**Purpose**: Substitute placeholders in staging attributes of a Task with actual paths to the corresponding tasks.
:arguments:
:path: string describing the staging paths, possibly containing a placeholder
:placeholder_dict: dictionary holding the values for placeholders
"""
try:
if isinstance(path, unicode):
path = str(path)
if not isinstance(path, str):
raise TypeError(expected_type=str, actual_type=type(path))
if '$' not in path:
return path
# Extract placeholder from path
if len(path.split('>')) == 1:
placeholder = path.split('/')[0]
else:
if path.split('>')[0].strip().startswith('$'):
placeholder = path.split('>')[0].strip().split('/')[0]
else:
placeholder = path.split('>')[1].strip().split('/')[0]
# SHARED
if placeholder == "$SHARED":
return path.replace(placeholder, 'pilot://')
# Expected placeholder format:
# $Pipeline_{pipeline.uid}_Stage_{stage.uid}_Task_{task.uid}
broken_placeholder = placeholder.split('/')[0].split('_')
if not len(broken_placeholder) == 6:
raise ValueError(
obj='placeholder',
attribute='task',
expected_value='$Pipeline_(pipeline_name)_Stage_(stage_name)_Task_(task_name) or $SHARED',
actual_value=broken_placeholder)
pipeline_name = broken_placeholder[1]
stage_name = broken_placeholder[3]
task_name = broken_placeholder[5]
resolved_placeholder = None
if pipeline_name in placeholder_dict.keys():
if stage_name in placeholder_dict[pipeline_name].keys():
if task_name in placeholder_dict[pipeline_name][stage_name].keys():
resolved_placeholder = path.replace(placeholder, placeholder_dict[
pipeline_name][stage_name][task_name]['path'])
else:
logger.warning('%s not assigned to any task in Stage %s Pipeline %s' %
(task_name, stage_name, pipeline_name))
else:
logger.warning('%s not assigned to any Stage in Pipeline %s' % (
stage_name, pipeline_name))
else:
logger.warning('%s not assigned to any Pipeline' % (pipeline_name))
if not resolved_placeholder:
logger.warning('No placeholder could be found for task name %s \
stage name %s and pipeline name %s. Please be sure to \
use object names and not uids in your references,i.e, \
$Pipeline_(pipeline_name)_Stage_(stage_name)_Task_(task_name)')
raise ValueError(
obj='placeholder',
attribute='task',
expected_value='$Pipeline_(pipeline_name)_Stage_(stage_name)_Task_(task_name) or $SHARED',
actual_value=broken_placeholder)
return resolved_placeholder
except Exception, ex:
logger.exception('Failed to resolve placeholder %s, error: %s' %(path, ex))
raise
|
**Purpose**: Substitute placeholders in staging attributes of a Task with actual paths to the corresponding tasks.
:arguments:
:path: string describing the staging paths, possibly containing a placeholder
:placeholder_dict: dictionary holding the values for placeholders
|
entailment
|
def get_input_list_from_task(task, placeholder_dict):
"""
Purpose: Parse a Task object to extract the files to be staged as the output.
Details: The extracted data is then converted into the appropriate RP directive depending on whether the data
is to be copied/downloaded.
:arguments:
:task: EnTK Task object
:placeholder_dict: dictionary holding the values for placeholders
:return: list of RP directives for the files that need to be staged out
"""
try:
if not isinstance(task, Task):
raise TypeError(expected_type=Task, actual_type=type(task))
input_data = []
if task.link_input_data:
for path in task.link_input_data:
path = resolve_placeholders(path, placeholder_dict)
if len(path.split('>')) > 1:
temp = {
'source': path.split('>')[0].strip(),
'target': path.split('>')[1].strip(),
'action': rp.LINK
}
else:
temp = {
'source': path.split('>')[0].strip(),
'target': os.path.basename(path.split('>')[0].strip()),
'action': rp.LINK
}
input_data.append(temp)
if task.upload_input_data:
for path in task.upload_input_data:
path = resolve_placeholders(path, placeholder_dict)
if len(path.split('>')) > 1:
temp = {
'source': path.split('>')[0].strip(),
'target': path.split('>')[1].strip()
}
else:
temp = {
'source': path.split('>')[0].strip(),
'target': os.path.basename(path.split('>')[0].strip())
}
input_data.append(temp)
if task.copy_input_data:
for path in task.copy_input_data:
path = resolve_placeholders(path, placeholder_dict)
if len(path.split('>')) > 1:
temp = {
'source': path.split('>')[0].strip(),
'target': path.split('>')[1].strip(),
'action': rp.COPY
}
else:
temp = {
'source': path.split('>')[0].strip(),
'target': os.path.basename(path.split('>')[0].strip()),
'action': rp.COPY
}
input_data.append(temp)
if task.move_input_data:
for path in task.move_input_data:
path = resolve_placeholders(path, placeholder_dict)
if len(path.split('>')) > 1:
temp = {
'source': path.split('>')[0].strip(),
'target': path.split('>')[1].strip(),
'action': rp.MOVE
}
else:
temp = {
'source': path.split('>')[0].strip(),
'target': os.path.basename(path.split('>')[0].strip()),
'action': rp.MOVE
}
input_data.append(temp)
return input_data
except Exception, ex:
logger.exception('Failed to get input list of files from task, error: %s' % ex)
raise
|
Purpose: Parse a Task object to extract the files to be staged as the output.
Details: The extracted data is then converted into the appropriate RP directive depending on whether the data
is to be copied/downloaded.
:arguments:
:task: EnTK Task object
:placeholder_dict: dictionary holding the values for placeholders
:return: list of RP directives for the files that need to be staged out
|
entailment
|
def get_output_list_from_task(task, placeholder_dict):
"""
Purpose: Parse a Task object to extract the files to be staged as the output.
Details: The extracted data is then converted into the appropriate RP directive depending on whether the data
is to be copied/downloaded.
:arguments:
:task: EnTK Task object
:placeholder_dict: dictionary holding the values for placeholders
:return: list of RP directives for the files that need to be staged out
"""
try:
if not isinstance(task, Task):
raise TypeError(expected_type=Task, actual_type=type(task))
output_data = []
if task.copy_output_data:
for path in task.copy_output_data:
path = resolve_placeholders(path, placeholder_dict)
if len(path.split('>')) > 1:
temp = {
'source': path.split('>')[0].strip(),
'target': path.split('>')[1].strip(),
'action': rp.COPY
}
else:
temp = {
'source': path.split('>')[0].strip(),
'target': os.path.basename(path.split('>')[0].strip()),
'action': rp.COPY
}
output_data.append(temp)
if task.download_output_data:
for path in task.download_output_data:
path = resolve_placeholders(path, placeholder_dict)
if len(path.split('>')) > 1:
temp = {
'source': path.split('>')[0].strip(),
'target': path.split('>')[1].strip()
}
else:
temp = {
'source': path.split('>')[0].strip(),
'target': os.path.basename(path.split('>')[0].strip())
}
output_data.append(temp)
if task.move_output_data:
for path in task.move_output_data:
path = resolve_placeholders(path, placeholder_dict)
if len(path.split('>')) > 1:
temp = {
'source': path.split('>')[0].strip(),
'target': path.split('>')[1].strip(),
'action': rp.MOVE
}
else:
temp = {
'source': path.split('>')[0].strip(),
'target': os.path.basename(path.split('>')[0].strip()),
'action': rp.MOVE
}
output_data.append(temp)
return output_data
except Exception, ex:
logger.exception('Failed to get output list of files from task, error: %s' % ex)
raise
|
Purpose: Parse a Task object to extract the files to be staged as the output.
Details: The extracted data is then converted into the appropriate RP directive depending on whether the data
is to be copied/downloaded.
:arguments:
:task: EnTK Task object
:placeholder_dict: dictionary holding the values for placeholders
:return: list of RP directives for the files that need to be staged out
|
entailment
|
def create_cud_from_task(task, placeholder_dict, prof=None):
"""
Purpose: Create a Compute Unit description based on the defined Task.
:arguments:
:task: EnTK Task object
:placeholder_dict: dictionary holding the values for placeholders
:return: ComputeUnitDescription
"""
try:
logger.debug('Creating CU from Task %s' % (task.uid))
if prof:
prof.prof('cud from task - create', uid=task.uid)
cud = rp.ComputeUnitDescription()
cud.name = '%s,%s,%s,%s,%s,%s' % (task.uid, task.name,
task.parent_stage['uid'], task.parent_stage['name'],
task.parent_pipeline['uid'], task.parent_pipeline['name'])
cud.pre_exec = task.pre_exec
cud.executable = task.executable
cud.arguments = resolve_arguments(task.arguments, placeholder_dict)
cud.post_exec = task.post_exec
if task.tag:
if task.parent_pipeline['name']:
cud.tag = resolve_tags( tag=task.tag,
parent_pipeline_name=task.parent_pipeline['name'],
placeholder_dict=placeholder_dict)
cud.cpu_processes = task.cpu_reqs['processes']
cud.cpu_threads = task.cpu_reqs['threads_per_process']
cud.cpu_process_type = task.cpu_reqs['process_type']
cud.cpu_thread_type = task.cpu_reqs['thread_type']
cud.gpu_processes = task.gpu_reqs['processes']
cud.gpu_threads = task.gpu_reqs['threads_per_process']
cud.gpu_process_type = task.gpu_reqs['process_type']
cud.gpu_thread_type = task.gpu_reqs['thread_type']
if task.lfs_per_process:
cud.lfs_per_process = task.lfs_per_process
if task.stdout:
cud.stdout = task.stdout
if task.stderr:
cud.stderr = task.stderr
cud.input_staging = get_input_list_from_task(task, placeholder_dict)
cud.output_staging = get_output_list_from_task(task, placeholder_dict)
if prof:
prof.prof('cud from task - done', uid=task.uid)
logger.debug('CU %s created from Task %s' % (cud.name, task.uid))
return cud
except Exception, ex:
logger.exception('CU creation failed, error: %s' % ex)
raise
|
Purpose: Create a Compute Unit description based on the defined Task.
:arguments:
:task: EnTK Task object
:placeholder_dict: dictionary holding the values for placeholders
:return: ComputeUnitDescription
|
entailment
|
def create_task_from_cu(cu, prof=None):
"""
Purpose: Create a Task based on the Compute Unit.
Details: Currently, only the uid, parent_stage and parent_pipeline are retrieved. The exact initial Task (that was
converted to a CUD) cannot be recovered as the RP API does not provide the same attributes for a CU as for a CUD.
Also, this is not required for the most part.
TODO: Add exit code, stdout, stderr and path attributes to a Task. These can be extracted from a CU
:arguments:
:cu: RP Compute Unit
:return: Task
"""
try:
logger.debug('Create Task from CU %s' % cu.name)
if prof:
prof.prof('task from cu - create',
uid=cu.name.split(',')[0].strip())
task = Task()
task.uid = cu.name.split(',')[0].strip()
task.name = cu.name.split(',')[1].strip()
task.parent_stage['uid'] = cu.name.split(',')[2].strip()
task.parent_stage['name'] = cu.name.split(',')[3].strip()
task.parent_pipeline['uid'] = cu.name.split(',')[4].strip()
task.parent_pipeline['name'] = cu.name.split(',')[5].strip()
task.rts_uid = cu.uid
if cu.state == rp.DONE:
task.exit_code = 0
else:
task.exit_code = 1
task.path = ru.Url(cu.sandbox).path
if prof:
prof.prof('task from cu - done', uid=cu.name.split(',')[0].strip())
logger.debug('Task %s created from CU %s' % (task.uid, cu.name))
return task
except Exception, ex:
logger.exception('Task creation from CU failed, error: %s' % ex)
raise
|
Purpose: Create a Task based on the Compute Unit.
Details: Currently, only the uid, parent_stage and parent_pipeline are retrieved. The exact initial Task (that was
converted to a CUD) cannot be recovered as the RP API does not provide the same attributes for a CU as for a CUD.
Also, this is not required for the most part.
TODO: Add exit code, stdout, stderr and path attributes to a Task. These can be extracted from a CU
:arguments:
:cu: RP Compute Unit
:return: Task
|
entailment
|
def handle_noargs(self, **options):
"""Send Report E-mails."""
r = get_r()
since = datetime.utcnow() - timedelta(days=1)
metrics = {}
categories = r.metric_slugs_by_category()
for category_name, slug_list in categories.items():
metrics[category_name] = []
for slug in slug_list:
metric_values = r.get_metric_history(slug, since=since)
metrics[category_name].append(
(slug, metric_values)
)
# metrics is now:
# --------------
# { Category : [
# ('foo', [('m:foo:2012-07-18', 1), ('m:foo:2012-07-19, 2), ...])
# ],
# ...
# }
template = "redis_metrics/email/report.{fmt}"
data = {
'today': since,
'metrics': metrics,
}
message = render_to_string(template.format(fmt='txt'), data)
message_html = render_to_string(template.format(fmt='html'), data)
msg = EmailMultiAlternatives(
subject="Redis Metrics Report",
body=message,
from_email=settings.DEFAULT_FROM_EMAIL,
to=[email for name, email in settings.ADMINS]
)
msg.attach_alternative(message_html, "text/html")
msg.send()
|
Send Report E-mails.
|
entailment
|
def luid(self):
"""
Unique ID of the current stage (fully qualified).
example:
>>> stage.luid
pipe.0001.stage.0004
:getter: Returns the fully qualified uid of the current stage
:type: String
"""
p_elem = self.parent_pipeline.get('name')
if not p_elem:
p_elem = self.parent_pipeline['uid']
s_elem = self.name
if not s_elem:
s_elem = self.uid
return '%s.%s' % (p_elem, s_elem)
|
Unique ID of the current stage (fully qualified).
example:
>>> stage.luid
pipe.0001.stage.0004
:getter: Returns the fully qualified uid of the current stage
:type: String
|
entailment
|
def add_tasks(self, value):
"""
Adds tasks to the existing set of tasks of the Stage
:argument: set of tasks
"""
tasks = self._validate_entities(value)
self._tasks.update(tasks)
self._task_count = len(self._tasks)
|
Adds tasks to the existing set of tasks of the Stage
:argument: set of tasks
|
entailment
|
def to_dict(self):
"""
Convert current Stage into a dictionary
:return: python dictionary
"""
stage_desc_as_dict = {
'uid': self._uid,
'name': self._name,
'state': self._state,
'state_history': self._state_history,
'parent_pipeline': self._p_pipeline
}
return stage_desc_as_dict
|
Convert current Stage into a dictionary
:return: python dictionary
|
entailment
|
def from_dict(self, d):
"""
Create a Stage from a dictionary. The change is in inplace.
:argument: python dictionary
:return: None
"""
if 'uid' in d:
if d['uid']:
self._uid = d['uid']
if 'name' in d:
if d['name']:
self._name = d['name']
if 'state' in d:
if isinstance(d['state'], str) or isinstance(d['state'], unicode):
if d['state'] in states._stage_state_values.keys():
self._state = d['state']
else:
raise ValueError(obj=self._uid,
attribute='state',
expected_value=states._stage_state_values.keys(),
actual_value=value)
else:
raise TypeError(entity='state', expected_type=str, actual_type=type(d['state']))
else:
self._state = states.INITIAL
if 'state_history' in d:
if isinstance(d['state_history'], list):
self._state_history = d['state_history']
else:
raise TypeError(entity='state_history', expected_type=list, actual_type=type(d['state_history']))
if 'parent_pipeline' in d:
if isinstance(d['parent_pipeline'], dict):
self._p_pipeline = d['parent_pipeline']
else:
raise TypeError(entity='parent_pipeline', expected_type=dict, actual_type=type(d['parent_pipeline']))
|
Create a Stage from a dictionary. The change is in inplace.
:argument: python dictionary
:return: None
|
entailment
|
def _set_tasks_state(self, value):
"""
Purpose: Set state of all tasks of the current stage.
:arguments: String
"""
if value not in states.state_numbers.keys():
raise ValueError(obj=self._uid,
attribute='set_tasks_state',
expected_value=states.state_numbers.keys(),
actual_value=value)
for task in self._tasks:
task.state = value
|
Purpose: Set state of all tasks of the current stage.
:arguments: String
|
entailment
|
def _check_stage_complete(self):
"""
Purpose: Check if all tasks of the current stage have completed, i.e., are in either DONE or FAILED state.
"""
try:
for task in self._tasks:
if task.state not in [states.DONE, states.FAILED]:
return False
return True
except Exception, ex:
raise EnTKError(ex)
|
Purpose: Check if all tasks of the current stage have completed, i.e., are in either DONE or FAILED state.
|
entailment
|
def _validate_entities(self, tasks):
"""
Purpose: Validate whether the 'tasks' is of type set. Validate the description of each Task.
"""
if not tasks:
raise TypeError(expected_type=Task, actual_type=type(tasks))
if not isinstance(tasks, set):
if not isinstance(tasks, list):
tasks = set([tasks])
else:
tasks = set(tasks)
for t in tasks:
if not isinstance(t, Task):
raise TypeError(expected_type=Task, actual_type=type(t))
return tasks
|
Purpose: Validate whether the 'tasks' is of type set. Validate the description of each Task.
|
entailment
|
def _assign_uid(self, sid):
"""
Purpose: Assign a uid to the current object based on the sid passed. Pass the current uid to children of
current object
"""
self._uid = ru.generate_id('stage.%(item_counter)04d', ru.ID_CUSTOM, namespace=sid)
for task in self._tasks:
task._assign_uid(sid)
self._pass_uid()
|
Purpose: Assign a uid to the current object based on the sid passed. Pass the current uid to children of
current object
|
entailment
|
def _pass_uid(self):
"""
Purpose: Assign the parent Stage and the parent Pipeline to all the tasks of the current stage.
:arguments: set of Tasks (optional)
:return: list of updated Tasks
"""
for task in self._tasks:
task.parent_stage['uid'] = self._uid
task.parent_stage['name'] = self._name
task.parent_pipeline['uid'] = self._p_pipeline['uid']
task.parent_pipeline['name'] = self._p_pipeline['name']
|
Purpose: Assign the parent Stage and the parent Pipeline to all the tasks of the current stage.
:arguments: set of Tasks (optional)
:return: list of updated Tasks
|
entailment
|
def application(tokens):
"""Matches function call (application)."""
tokens = iter(tokens)
func = next(tokens)
paren = next(tokens)
if func and func.name == "symbol" and paren.name == "lparen":
# We would be able to unambiguously parse function application with
# whitespace between the function name and the lparen, but let's not
# do that because it's unexpected in most languages.
if func.end != paren.start:
raise errors.EfilterParseError(
start=func.start, end=paren.end,
message="No whitespace allowed between function and paren.")
return common.TokenMatch(None, func.value, (func, paren))
|
Matches function call (application).
|
entailment
|
def _make_spec_file(self):
"""Generates the text of an RPM spec file.
Returns:
A list of strings containing the lines of text.
"""
# Note that bdist_rpm can be an old style class.
if issubclass(BdistRPMCommand, object):
spec_file = super(BdistRPMCommand, self)._make_spec_file()
else:
spec_file = bdist_rpm._make_spec_file(self)
if sys.version_info[0] < 3:
python_package = "python"
else:
python_package = "python3"
description = []
summary = ""
in_description = False
python_spec_file = []
for line in spec_file:
if line.startswith("Summary: "):
summary = line
elif line.startswith("BuildRequires: "):
line = "BuildRequires: {0:s}-setuptools".format(python_package)
elif line.startswith("Requires: "):
if python_package == "python3":
line = line.replace("python", "python3")
elif line.startswith("%description"):
in_description = True
elif line.startswith("%files"):
line = "%files -f INSTALLED_FILES -n {0:s}-%{{name}}".format(
python_package)
elif line.startswith("%prep"):
in_description = False
python_spec_file.append(
"%package -n {0:s}-%{{name}}".format(python_package))
python_spec_file.append("{0:s}".format(summary))
python_spec_file.append("")
python_spec_file.append(
"%description -n {0:s}-%{{name}}".format(python_package))
python_spec_file.extend(description)
elif in_description:
# Ignore leading white lines in the description.
if not description and not line:
continue
description.append(line)
python_spec_file.append(line)
return python_spec_file
|
Generates the text of an RPM spec file.
Returns:
A list of strings containing the lines of text.
|
entailment
|
def resolve(self, name):
"""Call IStructured.resolve across all scopes and return first hit."""
for scope in reversed(self.scopes):
try:
return structured.resolve(scope, name)
except (KeyError, AttributeError):
continue
raise AttributeError(name)
|
Call IStructured.resolve across all scopes and return first hit.
|
entailment
|
def getmembers(self):
"""Gets members (vars) from all scopes, using both runtime and static.
This method will attempt both static and runtime getmembers. This is the
recommended way of getting available members.
Returns:
Set of available vars.
Raises:
NotImplementedError if any scope fails to implement 'getmembers'.
"""
names = set()
for scope in self.scopes:
if isinstance(scope, type):
names.update(structured.getmembers_static(scope))
else:
names.update(structured.getmembers_runtime(scope))
return names
|
Gets members (vars) from all scopes, using both runtime and static.
This method will attempt both static and runtime getmembers. This is the
recommended way of getting available members.
Returns:
Set of available vars.
Raises:
NotImplementedError if any scope fails to implement 'getmembers'.
|
entailment
|
def getmembers_runtime(self):
"""Gets members (vars) from all scopes using ONLY runtime information.
You most likely want to use ScopeStack.getmembers instead.
Returns:
Set of available vars.
Raises:
NotImplementedError if any scope fails to implement 'getmembers'.
"""
names = set()
for scope in self.scopes:
names.update(structured.getmembers_runtime(scope))
return names
|
Gets members (vars) from all scopes using ONLY runtime information.
You most likely want to use ScopeStack.getmembers instead.
Returns:
Set of available vars.
Raises:
NotImplementedError if any scope fails to implement 'getmembers'.
|
entailment
|
def getmembers_static(cls):
"""Gets members (vars) from all scopes using ONLY static information.
You most likely want to use ScopeStack.getmembers instead.
Returns:
Set of available vars.
Raises:
NotImplementedError if any scope fails to implement 'getmembers'.
"""
names = set()
for scope in cls.scopes:
names.update(structured.getmembers_static(scope))
return names
|
Gets members (vars) from all scopes using ONLY static information.
You most likely want to use ScopeStack.getmembers instead.
Returns:
Set of available vars.
Raises:
NotImplementedError if any scope fails to implement 'getmembers'.
|
entailment
|
def reflect(self, name):
"""Reflect 'name' starting with local scope all the way up to global.
This method will attempt both static and runtime reflection. This is the
recommended way of using reflection.
Returns:
Type of 'name', or protocol.AnyType.
Caveat:
The type of 'name' does not necessarily have to be an instance of
Python's type - it depends on what the host application returns
through the reflection API. For example, Rekall uses objects
generated at runtime to simulate a native (C/C++) type system.
"""
# Return whatever the most local scope defines this as, or bubble all
# the way to the top.
result = None
for scope in reversed(self.scopes):
try:
if isinstance(scope, type):
result = structured.reflect_static_member(scope, name)
else:
result = structured.reflect_runtime_member(scope, name)
if result is not None:
return result
except (NotImplementedError, KeyError, AttributeError):
continue
return protocol.AnyType
|
Reflect 'name' starting with local scope all the way up to global.
This method will attempt both static and runtime reflection. This is the
recommended way of using reflection.
Returns:
Type of 'name', or protocol.AnyType.
Caveat:
The type of 'name' does not necessarily have to be an instance of
Python's type - it depends on what the host application returns
through the reflection API. For example, Rekall uses objects
generated at runtime to simulate a native (C/C++) type system.
|
entailment
|
def reflect_runtime_member(self, name):
"""Reflect 'name' using ONLY runtime reflection.
You most likely want to use ScopeStack.reflect instead.
Returns:
Type of 'name', or protocol.AnyType.
"""
for scope in reversed(self.scopes):
try:
return structured.reflect_runtime_member(scope, name)
except (NotImplementedError, KeyError, AttributeError):
continue
return protocol.AnyType
|
Reflect 'name' using ONLY runtime reflection.
You most likely want to use ScopeStack.reflect instead.
Returns:
Type of 'name', or protocol.AnyType.
|
entailment
|
def reflect_static_member(cls, name):
"""Reflect 'name' using ONLY static reflection.
You most likely want to use ScopeStack.reflect instead.
Returns:
Type of 'name', or protocol.AnyType.
"""
for scope in reversed(cls.scopes):
try:
return structured.reflect_static_member(scope, name)
except (NotImplementedError, KeyError, AttributeError):
continue
return protocol.AnyType
|
Reflect 'name' using ONLY static reflection.
You most likely want to use ScopeStack.reflect instead.
Returns:
Type of 'name', or protocol.AnyType.
|
entailment
|
def get_hostmap(profile):
'''
We abuse the profile combination to also derive a pilot-host map, which
will tell us on what exact host each pilot has been running. To do so, we
check for the PMGR_ACTIVE advance event in agent_0.prof, and use the NTP
sync info to associate a hostname.
'''
# FIXME: This should be replaced by proper hostname logging
# in `pilot.resource_details`.
hostmap = dict() # map pilot IDs to host names
for entry in profile:
if entry[ru.EVENT] == 'hostname':
hostmap[entry[ru.UID]] = entry[ru.MSG]
return hostmap
|
We abuse the profile combination to also derive a pilot-host map, which
will tell us on what exact host each pilot has been running. To do so, we
check for the PMGR_ACTIVE advance event in agent_0.prof, and use the NTP
sync info to associate a hostname.
|
entailment
|
def get_hostmap_deprecated(profiles):
'''
This method mangles combine_profiles and get_hostmap, and is deprecated. At
this point it only returns the hostmap
'''
hostmap = dict() # map pilot IDs to host names
for pname, prof in profiles.iteritems():
if not len(prof):
continue
if not prof[0][ru.MSG]:
continue
host, ip, _, _, _ = prof[0][ru.MSG].split(':')
host_id = '%s:%s' % (host, ip)
for row in prof:
if 'agent_0.prof' in pname and \
row[ru.EVENT] == 'advance' and \
row[ru.STATE] == rps.PMGR_ACTIVE:
hostmap[row[ru.UID]] = host_id
break
return hostmap
|
This method mangles combine_profiles and get_hostmap, and is deprecated. At
this point it only returns the hostmap
|
entailment
|
def run(self):
"""
**Purpose**: Run the application manager. Once the workflow and resource manager have been assigned. Invoking this
method will start the setting up the communication infrastructure, submitting a resource request and then
submission of all the tasks.
"""
try:
# Set None objects local to each run
self._wfp = None
self._sync_thread = None
self._terminate_sync = Event()
self._resubmit_failed = False
self._cur_attempt = 1
if not self._workflow:
self._logger.error('No workflow assigned currently, please check your script')
raise MissingError(obj=self._uid, missing_attribute='workflow')
if not self._resource_manager:
self._logger.error('No resource manager assigned currently, please create and add a valid resource manager')
raise MissingError(obj=self._uid, missing_attribute='resource_manager')
self._prof.prof('amgr run started', uid=self._uid)
# Setup rabbitmq stuff
if not self._mqs_setup:
self._report.info('Setting up RabbitMQ system')
setup = self._setup_mqs()
if not setup:
self._logger.error('RabbitMQ system not available')
raise EnTKError("RabbitMQ setup failed")
self._mqs_setup = True
self._report.ok('>>ok\n')
# Create WFProcessor object
self._prof.prof('creating wfp obj', uid=self._uid)
self._wfp = WFprocessor(sid=self._sid,
workflow=self._workflow,
pending_queue=self._pending_queue,
completed_queue=self._completed_queue,
mq_hostname=self._mq_hostname,
port=self._port,
resubmit_failed=self._resubmit_failed)
self._wfp._initialize_workflow()
self._workflow = self._wfp.workflow
# Submit resource request if not resource allocation done till now or
# resubmit a new one if the old one has completed
if self._resource_manager:
res_alloc_state = self._resource_manager.get_resource_allocation_state()
if (not res_alloc_state) or (res_alloc_state in self._resource_manager.get_completed_states()):
self._logger.info('Starting resource request submission')
self._prof.prof('init rreq submission', uid=self._uid)
self._resource_manager._submit_resource_request()
res_alloc_state = self._resource_manager.get_resource_allocation_state()
if res_alloc_state in self._resource_manager.get_completed_states():
raise EnTKError(msg="Cannot proceed. Resource allocation ended up in %s"%res_alloc_state)
else:
self._logger.exception('Cannot run without resource manager, please create and assign a resource manager')
raise EnTKError(text='Missing resource manager')
# Start synchronizer thread
if not self._sync_thread:
self._logger.info('Starting synchronizer thread')
self._sync_thread = Thread(target=self._synchronizer, name='synchronizer-thread')
self._prof.prof('starting synchronizer thread', uid=self._uid)
self._sync_thread.start()
# Start WFprocessor
self._logger.info('Starting WFProcessor process from AppManager')
self._wfp.start_processor()
self._report.ok('All components created\n')
# Create tmgr object only if it does not already exist
if self._rts == 'radical.pilot':
from radical.entk.execman.rp import TaskManager
elif self._rts == 'mock':
from radical.entk.execman.mock import TaskManager
if not self._task_manager:
self._prof.prof('creating tmgr obj', uid=self._uid)
self._task_manager = TaskManager(sid=self._sid,
pending_queue=self._pending_queue,
completed_queue=self._completed_queue,
mq_hostname=self._mq_hostname,
rmgr=self._resource_manager,
port=self._port
)
self._logger.info('Starting task manager process from AppManager')
self._task_manager.start_manager()
self._task_manager.start_heartbeat()
active_pipe_count = len(self._workflow)
finished_pipe_uids = []
# We wait till all pipelines of the workflow are marked
# complete
while ((active_pipe_count > 0) and
(self._wfp.workflow_incomplete()) and
(self._resource_manager.get_resource_allocation_state() not
in self._resource_manager.get_completed_states())):
if active_pipe_count > 0:
for pipe in self._workflow:
with pipe.lock:
if (pipe.completed) and (pipe.uid not in finished_pipe_uids):
self._logger.info('Pipe %s completed' % pipe.uid)
finished_pipe_uids.append(pipe.uid)
active_pipe_count -= 1
self._logger.info('Active pipes: %s' % active_pipe_count)
if (not self._sync_thread.is_alive()) and (self._cur_attempt <= self._reattempts):
self._sync_thread = Thread(target=self._synchronizer,
name='synchronizer-thread')
self._logger.info('Restarting synchronizer thread')
self._prof.prof('restarting synchronizer', uid=self._uid)
self._sync_thread.start()
self._cur_attempt += 1
if (not self._wfp.check_processor()) and (self._cur_attempt <= self._reattempts):
"""
If WFP dies, both child threads are also cleaned out.
We simply recreate the wfp object with a copy of the workflow
in the appmanager and start the processor.
"""
self._prof.prof('recreating wfp obj', uid=self._uid)
self._wfp = WFProcessor(
sid=self._sid,
workflow=self._workflow,
pending_queue=self._pending_queue,
completed_queue=self._completed_queue,
mq_hostname=self._mq_hostname,
port=self._port,
resubmit_failed=self._resubmit_failed)
self._logger.info('Restarting WFProcessor process from AppManager')
self._wfp.start_processor()
self._cur_attempt += 1
if (not self._task_manager.check_heartbeat()) and (self._cur_attempt <= self._reattempts):
"""
If the tmgr process or heartbeat dies, we simply start a
new process using the start_manager method. We do not
need to create a new instance of the TaskManager object
itself. We stop and start a new instance of the
heartbeat thread as well.
"""
self._prof.prof('restarting tmgr process and heartbeat', uid=self._uid)
self._logger.info('Terminating heartbeat thread')
self._task_manager.terminate_heartbeat()
self._logger.info('Terminating tmgr process')
self._task_manager.terminate_manager()
self._logger.info('Restarting task manager process')
self._task_manager.start_manager()
self._logger.info('Restarting heartbeat thread')
self._task_manager.start_heartbeat()
self._cur_attempt += 1
self._prof.prof('start termination', uid=self._uid)
# Terminate threads in following order: wfp, helper, synchronizer
self._logger.info('Terminating WFprocessor')
self._wfp.terminate_processor()
self._logger.info('Terminating synchronizer thread')
self._terminate_sync.set()
self._sync_thread.join()
self._logger.info('Synchronizer thread terminated')
if self._autoterminate:
self.resource_terminate()
if self._write_workflow:
write_workflow(self._workflow, self._sid)
self._prof.prof('termination done', uid=self._uid)
except KeyboardInterrupt:
self._prof.prof('start termination', uid=self._uid)
self._logger.exception('Execution interrupted by user (you probably hit Ctrl+C), ' +
'trying to cancel enqueuer thread gracefully...')
# Terminate threads in following order: wfp, helper, synchronizer
if self._wfp:
self._logger.info('Terminating WFprocessor')
self._wfp.terminate_processor()
if self._task_manager:
self._logger.info('Terminating task manager process')
self._task_manager.terminate_manager()
self._task_manager.terminate_heartbeat()
if self._sync_thread:
self._logger.info('Terminating synchronizer thread')
self._terminate_sync.set()
self._sync_thread.join()
self._logger.info('Synchronizer thread terminated')
if self._resource_manager:
self._resource_manager._terminate_resource_request()
self._prof.prof('termination done', uid=self._uid)
raise KeyboardInterrupt
except Exception, ex:
self._prof.prof('start termination', uid=self._uid)
self._logger.exception('Error in AppManager: %s' % ex)
# Terminate threads in following order: wfp, helper, synchronizer
if self._wfp:
self._logger.info('Terminating WFprocessor')
self._wfp.terminate_processor()
if self._task_manager:
self._logger.info('Terminating task manager process')
self._task_manager.terminate_manager()
self._task_manager.terminate_heartbeat()
if self._sync_thread:
self._logger.info('Terminating synchronizer thread')
self._terminate_sync.set()
self._sync_thread.join()
self._logger.info('Synchronizer thread terminated')
if self._resource_manager:
self._resource_manager._terminate_resource_request()
self._prof.prof('termination done', uid=self._uid)
raise
|
**Purpose**: Run the application manager. Once the workflow and resource manager have been assigned. Invoking this
method will start the setting up the communication infrastructure, submitting a resource request and then
submission of all the tasks.
|
entailment
|
def _setup_mqs(self):
"""
**Purpose**: Setup RabbitMQ system on the client side. We instantiate queue(s) 'pendingq-*' for communication
between the enqueuer thread and the task manager process. We instantiate queue(s) 'completedq-*' for
communication between the task manager and dequeuer thread. We instantiate queue 'sync-to-master' for
communication from enqueuer/dequeuer/task_manager to the synchronizer thread. We instantiate queue
'sync-ack' for communication from synchronizer thread to enqueuer/dequeuer/task_manager.
Details: All queues are durable: Even if the RabbitMQ server goes down, the queues are saved to disk and can
be retrieved. This also means that after an erroneous run the queues might still have unacknowledged messages
and will contain messages from that run. Hence, in every new run, we first delete the queue and create a new
one.
"""
try:
self._prof.prof('init mqs setup', uid=self._uid)
self._logger.debug('Setting up mq connection and channel')
mq_connection = pika.BlockingConnection(pika.ConnectionParameters(host=self._mq_hostname, port=self._port))
mq_channel = mq_connection.channel()
self._logger.debug('Connection and channel setup successful')
self._logger.debug('Setting up all exchanges and queues')
qs = [
'%s-tmgr-to-sync' % self._sid,
'%s-cb-to-sync' % self._sid,
'%s-enq-to-sync' % self._sid,
'%s-deq-to-sync' % self._sid,
'%s-sync-to-tmgr' % self._sid,
'%s-sync-to-cb' % self._sid,
'%s-sync-to-enq' % self._sid,
'%s-sync-to-deq' % self._sid
]
for i in range(1, self._num_pending_qs + 1):
queue_name = '%s-pendingq-%s' % (self._sid, i)
self._pending_queue.append(queue_name)
qs.append(queue_name)
for i in range(1, self._num_completed_qs + 1):
queue_name = '%s-completedq-%s' % (self._sid, i)
self._completed_queue.append(queue_name)
qs.append(queue_name)
f = open('.%s.txt' % self._sid, 'w')
for q in qs:
# Durable Qs will not be lost if rabbitmq server crashes
mq_channel.queue_declare(queue=q)
f.write(q + '\n')
f.close()
self._logger.debug('All exchanges and queues are setup')
self._prof.prof('mqs setup done', uid=self._uid)
return True
except Exception, ex:
self._logger.exception('Error setting RabbitMQ system: %s' % ex)
raise
|
**Purpose**: Setup RabbitMQ system on the client side. We instantiate queue(s) 'pendingq-*' for communication
between the enqueuer thread and the task manager process. We instantiate queue(s) 'completedq-*' for
communication between the task manager and dequeuer thread. We instantiate queue 'sync-to-master' for
communication from enqueuer/dequeuer/task_manager to the synchronizer thread. We instantiate queue
'sync-ack' for communication from synchronizer thread to enqueuer/dequeuer/task_manager.
Details: All queues are durable: Even if the RabbitMQ server goes down, the queues are saved to disk and can
be retrieved. This also means that after an erroneous run the queues might still have unacknowledged messages
and will contain messages from that run. Hence, in every new run, we first delete the queue and create a new
one.
|
entailment
|
def _synchronizer(self):
"""
**Purpose**: Thread in the master process to keep the workflow data
structure in appmanager up to date. We receive pipelines, stages and
tasks objects directly. The respective object is updated in this master
process.
Details: Important to note that acknowledgements of the type
channel.basic_ack() is an acknowledgement to the server that the msg
was received. This is not to be confused with the Ack sent to the
enqueuer/dequeuer/task_manager through the sync-ack queue.
"""
try:
self._prof.prof('synchronizer started', uid=self._uid)
self._logger.info('synchronizer thread started')
def task_update(msg, reply_to, corr_id, mq_channel):
completed_task = Task()
completed_task.from_dict(msg['object'])
self._logger.info('Received %s with state %s' % (completed_task.uid, completed_task.state))
found_task = False
# Traverse the entire workflow to find the correct task
for pipe in self._workflow:
if not pipe.completed:
if completed_task.parent_pipeline['uid'] == pipe.uid:
for stage in pipe.stages:
if completed_task.parent_stage['uid'] == stage.uid:
for task in stage.tasks:
if (completed_task.uid == task.uid)and(completed_task.state != task.state):
task.state = str(completed_task.state)
self._logger.debug('Found task %s with state %s' %
(task.uid, task.state))
if completed_task.path:
task.path = str(completed_task.path)
mq_channel.basic_publish(exchange='',
routing_key=reply_to,
properties=pika.BasicProperties(
correlation_id=corr_id),
body='%s-ack' % task.uid)
self._prof.prof('publishing sync ack for obj with state %s' %
msg['object']['state'],
uid=msg['object']['uid']
)
mq_channel.basic_ack(delivery_tag=method_frame.delivery_tag)
self._report.ok('Update: ')
self._report.info('%s state: %s\n' % (task.luid, task.state))
found_task = True
if not found_task:
# If there was a Stage update, but the Stage was not found in any of the Pipelines. This
# means that this was a Stage that was added during runtime and the AppManager does not
# know about it. The current solution is going to be: add it to the workflow object in the
# AppManager via the synchronizer.
self._prof.prof('Adap: adding new task')
self._logger.info('Adding new task %s to parent stage: %s' % (completed_task.uid,
stage.uid))
stage.add_tasks(completed_task)
mq_channel.basic_publish(exchange='',
routing_key=reply_to,
properties=pika.BasicProperties(
correlation_id=corr_id),
body='%s-ack' % completed_task.uid)
self._prof.prof('Adap: added new task')
self._prof.prof('publishing sync ack for obj with state %s' %
msg['object']['state'],
uid=msg['object']['uid']
)
mq_channel.basic_ack(delivery_tag=method_frame.delivery_tag)
self._report.ok('Update: ')
self._report.info('%s state: %s\n' %
(completed_task.luid, completed_task.state))
def stage_update(msg, reply_to, corr_id, mq_channel):
completed_stage = Stage()
completed_stage.from_dict(msg['object'])
self._logger.info('Received %s with state %s' % (completed_stage.uid, completed_stage.state))
found_stage = False
# Traverse the entire workflow to find the correct stage
for pipe in self._workflow:
if not pipe.completed:
if completed_stage.parent_pipeline['uid'] == pipe.uid:
self._logger.info('Found parent pipeline: %s' % pipe.uid)
for stage in pipe.stages:
if (completed_stage.uid == stage.uid)and(completed_stage.state != stage.state):
self._logger.debug('Found stage %s' % stage.uid)
stage.state = str(completed_stage.state)
mq_channel.basic_publish(exchange='',
routing_key=reply_to,
properties=pika.BasicProperties(
correlation_id=corr_id),
body='%s-ack' % stage.uid)
self._prof.prof('publishing sync ack for obj with state %s' %
msg['object']['state'],
uid=msg['object']['uid']
)
mq_channel.basic_ack(delivery_tag=method_frame.delivery_tag)
self._report.ok('Update: ')
self._report.info('%s state: %s\n' % (stage.luid, stage.state))
found_stage = True
if not found_stage:
# If there was a Stage update, but the Stage was not found in any of the Pipelines. This
# means that this was a Stage that was added during runtime and the AppManager does not
# know about it. The current solution is going to be: add it to the workflow object in the
# AppManager via the synchronizer.
self._prof.prof('Adap: adding new stage', uid=self._uid)
self._logger.info('Adding new stage %s to parent pipeline: %s' % (completed_stage.uid,
pipe.uid))
pipe.add_stages(completed_stage)
mq_channel.basic_publish(exchange='',
routing_key=reply_to,
properties=pika.BasicProperties(
correlation_id=corr_id),
body='%s-ack' % completed_stage.uid)
self._prof.prof('Adap: adding new stage', uid=self._uid)
self._prof.prof('publishing sync ack for obj with state %s' %
msg['object']['state'],
uid=msg['object']['uid']
)
mq_channel.basic_ack(delivery_tag=method_frame.delivery_tag)
def pipeline_update(msg, reply_to, corr_id, mq_channel):
completed_pipeline = Pipeline()
completed_pipeline.from_dict(msg['object'])
self._logger.info('Received %s with state %s' % (completed_pipeline.uid, completed_pipeline.state))
# Traverse the entire workflow to find the correct pipeline
for pipe in self._workflow:
if not pipe.completed:
if (completed_pipeline.uid == pipe.uid)and(completed_pipeline.state != pipe.state):
pipe.state = str(completed_pipeline.state)
self._logger.info('Found pipeline %s, state %s, completed %s' % (pipe.uid,
pipe.state,
pipe.completed)
)
# Reply with ack msg to the sender
mq_channel.basic_publish(exchange='',
routing_key=reply_to,
properties=pika.BasicProperties(
correlation_id=corr_id),
body='%s-ack' % pipe.uid)
self._prof.prof('publishing sync ack for obj with state %s' %
msg['object']['state'],
uid=msg['object']['uid']
)
mq_channel.basic_ack(delivery_tag=method_frame.delivery_tag)
# Keep the assignment of the completed flag after sending the acknowledgment
# back. Otherwise the MainThread takes lock over the pipeline because of logging
# and profiling
if completed_pipeline.completed:
pipe._completed_flag.set()
self._report.ok('Update: ')
self._report.info('%s state: %s\n' % (pipe.luid, pipe.state))
mq_connection = pika.BlockingConnection(pika.ConnectionParameters(host=self._mq_hostname, port=self._port))
mq_channel = mq_connection.channel()
last = time.time()
while not self._terminate_sync.is_set():
#-------------------------------------------------------------------------------------------------------
# Messages between tmgr Main thread and synchronizer -- only Task objects
method_frame, props, body = mq_channel.basic_get(queue='%s-tmgr-to-sync' % self._sid)
"""
The message received is a JSON object with the following structure:
msg = {
'type': 'Pipeline'/'Stage'/'Task',
'object': json/dict
}
"""
if body:
msg = json.loads(body)
self._prof.prof('received obj with state %s for sync' %
msg['object']['state'], uid=msg['object']['uid'])
self._logger.debug('received %s with state %s for sync' %
(msg['object']['uid'], msg['object']['state']))
if msg['type'] == 'Task':
task_update(msg, '%s-sync-to-tmgr' % self._sid, props.correlation_id, mq_channel)
#-------------------------------------------------------------------------------------------------------
#-------------------------------------------------------------------------------------------------------
# Messages between callback thread and synchronizer -- only Task objects
method_frame, props, body = mq_channel.basic_get(queue='%s-cb-to-sync' % self._sid)
"""
The message received is a JSON object with the following structure:
msg = {
'type': 'Pipeline'/'Stage'/'Task',
'object': json/dict
}
"""
if body:
msg = json.loads(body)
self._prof.prof('received obj with state %s for sync' %
msg['object']['state'], uid=msg['object']['uid'])
self._logger.debug('received %s with state %s for sync' %
(msg['object']['uid'], msg['object']['state']))
if msg['type'] == 'Task':
task_update(msg, '%s-sync-to-cb' % self._sid, props.correlation_id, mq_channel)
#-------------------------------------------------------------------------------------------------------
#-------------------------------------------------------------------------------------------------------
# Messages between enqueue thread and synchronizer -- Task, Stage or Pipeline
method_frame, props, body = mq_channel.basic_get(queue='%s-enq-to-sync' % self._sid)
if body:
msg = json.loads(body)
self._prof.prof('received obj with state %s for sync' %
msg['object']['state'], uid=msg['object']['uid'])
self._logger.debug('received %s with state %s for sync' %
(msg['object']['uid'], msg['object']['state']))
if msg['type'] == 'Task':
task_update(msg, '%s-sync-to-enq' % self._sid, props.correlation_id, mq_channel)
elif msg['type'] == 'Stage':
stage_update(msg, '%s-sync-to-enq' % self._sid, props.correlation_id, mq_channel)
elif msg['type'] == 'Pipeline':
pipeline_update(msg, '%s-sync-to-enq' % self._sid, props.correlation_id, mq_channel)
#-------------------------------------------------------------------------------------------------------
#-------------------------------------------------------------------------------------------------------
# Messages between dequeue thread and synchronizer -- Task, Stage or Pipeline
method_frame, props, body = mq_channel.basic_get(queue='%s-deq-to-sync' % self._sid)
if body:
msg = json.loads(body)
self._prof.prof('received obj with state %s for sync' %
msg['object']['state'], uid=msg['object']['uid'])
self._logger.debug('received %s with state %s for sync' %
(msg['object']['uid'], msg['object']['state']))
if msg['type'] == 'Task':
task_update(msg, '%s-sync-to-deq' % self._sid, props.correlation_id, mq_channel)
elif msg['type'] == 'Stage':
stage_update(msg, '%s-sync-to-deq' % self._sid, props.correlation_id, mq_channel)
elif msg['type'] == 'Pipeline':
pipeline_update(msg, '%s-sync-to-deq' % self._sid, props.correlation_id, mq_channel)
#-------------------------------------------------------------------------------------------------------
# Appease pika cos it thinks the connection is dead
now = time.time()
if now - last >= self._rmq_ping_interval:
mq_connection.process_data_events()
last = now
self._prof.prof('terminating synchronizer', uid=self._uid)
except KeyboardInterrupt:
self._logger.exception('Execution interrupted by user (you probably hit Ctrl+C), ' +
'trying to terminate synchronizer thread gracefully...')
raise KeyboardInterrupt
except Exception, ex:
self._logger.exception('Unknown error in synchronizer: %s. \n Terminating thread' % ex)
raise
|
**Purpose**: Thread in the master process to keep the workflow data
structure in appmanager up to date. We receive pipelines, stages and
tasks objects directly. The respective object is updated in this master
process.
Details: Important to note that acknowledgements of the type
channel.basic_ack() is an acknowledgement to the server that the msg
was received. This is not to be confused with the Ack sent to the
enqueuer/dequeuer/task_manager through the sync-ack queue.
|
entailment
|
def categorize_metrics(self):
"""Called only on a valid form, this method will place the chosen
metrics in the given catgory."""
category = self.cleaned_data['category_name']
metrics = self.cleaned_data['metrics']
self.r.reset_category(category, metrics)
|
Called only on a valid form, this method will place the chosen
metrics in the given catgory.
|
entailment
|
def _submit_resource_request(self):
"""
**Purpose**: Create and submits a RADICAL Pilot Job as per the user
provided resource description
"""
try:
self._prof.prof('creating rreq', uid=self._uid)
def _pilot_state_cb(pilot, state):
self._logger.info('Pilot %s state: %s' % (pilot.uid, state))
if state == rp.FAILED:
self._logger.error('Pilot has failed')
elif state == rp.DONE:
self._logger.error('Pilot has completed')
self._session = rp.Session(dburl=self._mlab_url, uid=self._sid)
self._pmgr = rp.PilotManager(session=self._session)
self._pmgr.register_callback(_pilot_state_cb)
pd_init = {
'resource': self._resource,
'runtime': self._walltime,
'cores': self._cpus,
'project': self._project,
}
if self._gpus:
pd_init['gpus'] = self._gpus
if self._access_schema:
pd_init['access_schema'] = self._access_schema
if self._queue:
pd_init['queue'] = self._queue
if self._rts_config.get('sandbox_cleanup', None):
pd_init['cleanup'] = True
# Create Compute Pilot with validated resource description
pdesc = rp.ComputePilotDescription(pd_init)
self._prof.prof('rreq created', uid=self._uid)
# Launch the pilot
self._pilot = self._pmgr.submit_pilots(pdesc)
self._prof.prof('rreq submitted', uid=self._uid)
shared_staging_directives = list()
for data in self._shared_data:
temp = {
'source': data,
'target': 'pilot:///' + os.path.basename(data)
}
shared_staging_directives.append(temp)
self._pilot.stage_in(shared_staging_directives)
self._prof.prof('shared data staging initiated', uid=self._uid)
self._logger.info('Resource request submission successful.. waiting for pilot to go Active')
# Wait for pilot to go active
self._pilot.wait([rp.PMGR_ACTIVE, rp.FAILED, rp.CANCELED])
self._prof.prof('resource active', uid=self._uid)
self._logger.info('Pilot is now active')
except KeyboardInterrupt:
if self._session:
self._session.close()
self._logger.exception('Execution interrupted by user (you probably hit Ctrl+C), ' +
'trying to exit callback thread gracefully...')
raise KeyboardInterrupt
except Exception, ex:
self._logger.exception('Resource request submission failed')
raise
|
**Purpose**: Create and submits a RADICAL Pilot Job as per the user
provided resource description
|
entailment
|
def _terminate_resource_request(self):
"""
**Purpose**: Cancel the RADICAL Pilot Job
"""
try:
if self._pilot:
self._prof.prof('canceling resource allocation', uid=self._uid)
self._pilot.cancel()
download_rp_profile = os.environ.get('RADICAL_PILOT_PROFILE', False)
self._session.close(cleanup=self._rts_config.get('db_cleanup', False),
download=download_rp_profile)
self._prof.prof('resource allocation canceled', uid=self._uid)
except KeyboardInterrupt:
self._logger.exception('Execution interrupted by user (you probably hit Ctrl+C), ' +
'trying to exit callback thread gracefully...')
raise KeyboardInterrupt
except Exception, ex:
self._logger.exception('Could not cancel resource request, error: %s' % ex)
raise
|
**Purpose**: Cancel the RADICAL Pilot Job
|
entailment
|
def get_list(self, size=100, startIndex=0, searchText="", sortProperty="", sortOrder='ASC', status='Active,Pending'):
"""
Request service locations
Returns
-------
dict
"""
url = urljoin(BASEURL, "sites", "list")
params = {
'api_key': self.token,
'size': size,
'startIndex': startIndex,
'sortOrder': sortOrder,
'status': status
}
if searchText:
params['searchText'] = searchText
if sortProperty:
params['sortProperty'] = sortProperty
r = requests.get(url, params)
r.raise_for_status()
return r.json()
|
Request service locations
Returns
-------
dict
|
entailment
|
def match(self, f, *args):
"""Match grammar function 'f' against next token and set 'self.matched'.
Arguments:
f: A grammar function - see efilter.parsers.common.grammar. Must
return TokenMatch or None.
args: Passed to 'f', if any.
Returns:
Instance of efilter.parsers.common.grammar.TokenMatch or None.
Comment:
If a match is returned, it will also be stored in self.matched.
"""
try:
match = f(self.tokenizer, *args)
except StopIteration:
# The grammar function might have tried to access more tokens than
# are available. That's not really an error, it just means it didn't
# match.
return
if match is None:
return
if not isinstance(match, grammar.TokenMatch):
raise TypeError("Invalid grammar function %r returned %r."
% (f, match))
self.matched = match
return match
|
Match grammar function 'f' against next token and set 'self.matched'.
Arguments:
f: A grammar function - see efilter.parsers.common.grammar. Must
return TokenMatch or None.
args: Passed to 'f', if any.
Returns:
Instance of efilter.parsers.common.grammar.TokenMatch or None.
Comment:
If a match is returned, it will also be stored in self.matched.
|
entailment
|
def accept(self, f, *args):
"""Like 'match', but consume the token (tokenizer advances.)"""
match = self.match(f, *args)
if match is None:
return
self.tokenizer.skip(len(match.tokens))
return match
|
Like 'match', but consume the token (tokenizer advances.)
|
entailment
|
def reject(self, f, *args):
"""Like 'match', but throw a parse error if 'f' matches.
This is useful when a parser wants to be strict about specific things
being prohibited. For example, DottySQL bans the use of SQL keywords as
variable names.
"""
match = self.match(f, *args)
if match:
token = self.peek(0)
raise errors.EfilterParseError(
query=self.tokenizer.source, token=token,
message="Was not expecting a %s here." % token.name)
|
Like 'match', but throw a parse error if 'f' matches.
This is useful when a parser wants to be strict about specific things
being prohibited. For example, DottySQL bans the use of SQL keywords as
variable names.
|
entailment
|
def expect(self, f, *args):
"""Like 'accept' but throws a parse error if 'f' doesn't match."""
match = self.accept(f, *args)
if match:
return match
try:
func_name = f.func_name
except AttributeError:
func_name = "<unnamed grammar function>"
start, end = self.current_position()
raise errors.EfilterParseError(
query=self.tokenizer.source, start=start, end=end,
message="Was expecting %s here." % (func_name))
|
Like 'accept' but throws a parse error if 'f' doesn't match.
|
entailment
|
def current_position(self):
"""Return a tuple of (start, end)."""
token = self.tokenizer.peek(0)
if token:
return token.start, token.end
return self.tokenizer.position, self.tokenizer.position + 1
|
Return a tuple of (start, end).
|
entailment
|
def ComplementEquivalence(*args, **kwargs):
"""Change x != y to not(x == y)."""
return ast.Complement(
ast.Equivalence(*args, **kwargs), **kwargs)
|
Change x != y to not(x == y).
|
entailment
|
def ComplementMembership(*args, **kwargs):
"""Change (x not in y) to not(x in y)."""
return ast.Complement(
ast.Membership(*args, **kwargs), **kwargs)
|
Change (x not in y) to not(x in y).
|
entailment
|
def ReverseComplementMembership(x, y, **kwargs):
"""Change (x doesn't contain y) to not(y in x)."""
return ast.Complement(
ast.Membership(y, x, **kwargs), **kwargs)
|
Change (x doesn't contain y) to not(y in x).
|
entailment
|
def __solve_for_repeated(expr, vars):
"""Helper: solve 'expr' always returning an IRepeated.
If the result of solving 'expr' is a list or a tuple of IStructured objects
then treat is as a repeated value of IStructured objects because that's
what the called meant to do. This is a convenience helper so users of the
API don't have to create IRepeated objects.
If the result of solving 'expr' is a scalar then return it as a repeated
value of one element.
Arguments:
expr: Expression to solve.
vars: The scope.
Returns:
IRepeated result of solving 'expr'.
A booelan to indicate whether the original was repeating.
"""
var = solve(expr, vars).value
if (var and isinstance(var, (tuple, list))
and protocol.implements(var[0], structured.IStructured)):
return repeated.meld(*var), False
return var, repeated.isrepeating(var)
|
Helper: solve 'expr' always returning an IRepeated.
If the result of solving 'expr' is a list or a tuple of IStructured objects
then treat is as a repeated value of IStructured objects because that's
what the called meant to do. This is a convenience helper so users of the
API don't have to create IRepeated objects.
If the result of solving 'expr' is a scalar then return it as a repeated
value of one element.
Arguments:
expr: Expression to solve.
vars: The scope.
Returns:
IRepeated result of solving 'expr'.
A booelan to indicate whether the original was repeating.
|
entailment
|
def __solve_for_scalar(expr, vars):
"""Helper: solve 'expr' always returning a scalar (not IRepeated).
If the output of 'expr' is a single value or a single RowTuple with a single
column then return the value in that column. Otherwise raise.
Arguments:
expr: Expression to solve.
vars: The scope.
Returns:
A scalar value (not an IRepeated).
Raises:
EfilterTypeError if it cannot get a scalar.
"""
var = solve(expr, vars).value
try:
scalar = repeated.getvalue(var)
except TypeError:
raise errors.EfilterTypeError(
root=expr, query=expr.source,
message="Wasn't expecting more than one value here. Got %r."
% (var,))
if isinstance(scalar, row_tuple.RowTuple):
try:
return scalar.get_singleton()
except ValueError:
raise errors.EfilterTypeError(
root=expr, query=expr.source,
message="Was expecting a scalar value here. Got %r."
% (scalar,))
else:
return scalar
|
Helper: solve 'expr' always returning a scalar (not IRepeated).
If the output of 'expr' is a single value or a single RowTuple with a single
column then return the value in that column. Otherwise raise.
Arguments:
expr: Expression to solve.
vars: The scope.
Returns:
A scalar value (not an IRepeated).
Raises:
EfilterTypeError if it cannot get a scalar.
|
entailment
|
def __solve_and_destructure_repeated(expr, vars):
"""Helper: solve 'expr' always returning a list of scalars.
If the output of 'expr' is one or more row tuples with only a single column
then return a repeated value of values in that column. If there are more
than one column per row then raise.
This returns a list because there's no point in wrapping the scalars in
a repeated value for use internal to the implementing solver.
Returns:
Two values:
- An iterator (not an IRepeated!) of scalars.
- A boolean to indicate whether the original value was repeating.
Raises:
EfilterTypeError if the values don't conform.
"""
iterable, isrepeating = __solve_for_repeated(expr, vars)
if iterable is None:
return (), isrepeating
if not isrepeating:
return [iterable], False
values = iter(iterable)
try:
value = next(values)
except StopIteration:
return (), True
if not isinstance(value, row_tuple.RowTuple):
result = [value]
# We skip type checking the remaining values because it'd be slow.
result.extend(values)
return result, True
try:
result = [value.get_singleton()]
for value in values:
result.append(value.get_singleton())
return result, True
except ValueError:
raise errors.EfilterTypeError(
root=expr, query=expr.source,
message="Was expecting exactly one column in %r." % (value,))
|
Helper: solve 'expr' always returning a list of scalars.
If the output of 'expr' is one or more row tuples with only a single column
then return a repeated value of values in that column. If there are more
than one column per row then raise.
This returns a list because there's no point in wrapping the scalars in
a repeated value for use internal to the implementing solver.
Returns:
Two values:
- An iterator (not an IRepeated!) of scalars.
- A boolean to indicate whether the original value was repeating.
Raises:
EfilterTypeError if the values don't conform.
|
entailment
|
def solve_var(expr, vars):
"""Returns the value of the var named in the expression."""
try:
return Result(structured.resolve(vars, expr.value), ())
except (KeyError, AttributeError) as e:
# Raise a better exception for accessing a non-existent member.
raise errors.EfilterKeyError(root=expr, key=expr.value, message=e,
query=expr.source)
except (TypeError, ValueError) as e:
# Raise a better exception for what is probably a null pointer error.
if vars.locals is None:
raise errors.EfilterNoneError(
root=expr, query=expr.source,
message="Trying to access member %r of a null." % expr.value)
else:
raise errors.EfilterTypeError(
root=expr, query=expr.source,
message="%r (vars: %r)" % (e, vars))
except NotImplementedError as e:
raise errors.EfilterError(
root=expr, query=expr.source,
message="Trying to access member %r of an instance of %r." %
(expr.value, type(vars)))
|
Returns the value of the var named in the expression.
|
entailment
|
def solve_select(expr, vars):
"""Use IAssociative.select to get key (rhs) from the data (lhs).
This operation supports both scalars and repeated values on the LHS -
selecting from a repeated value implies a map-like operation and returns a
new repeated value.
"""
data, _ = __solve_for_repeated(expr.lhs, vars)
key = solve(expr.rhs, vars).value
try:
results = [associative.select(d, key) for d in repeated.getvalues(data)]
except (KeyError, AttributeError):
# Raise a better exception for accessing a non-existent key.
raise errors.EfilterKeyError(root=expr, key=key, query=expr.source)
except (TypeError, ValueError):
# Raise a better exception for what is probably a null pointer error.
if vars.locals is None:
raise errors.EfilterNoneError(
root=expr, query=expr.source,
message="Cannot select key %r from a null." % key)
else:
raise
except NotImplementedError:
raise errors.EfilterError(
root=expr, query=expr.source,
message="Cannot select keys from a non-associative value.")
return Result(repeated.meld(*results), ())
|
Use IAssociative.select to get key (rhs) from the data (lhs).
This operation supports both scalars and repeated values on the LHS -
selecting from a repeated value implies a map-like operation and returns a
new repeated value.
|
entailment
|
def solve_resolve(expr, vars):
"""Use IStructured.resolve to get member (rhs) from the object (lhs).
This operation supports both scalars and repeated values on the LHS -
resolving from a repeated value implies a map-like operation and returns a
new repeated values.
"""
objs, _ = __solve_for_repeated(expr.lhs, vars)
member = solve(expr.rhs, vars).value
try:
results = [structured.resolve(o, member)
for o in repeated.getvalues(objs)]
except (KeyError, AttributeError):
# Raise a better exception for the non-existent member.
raise errors.EfilterKeyError(root=expr.rhs, key=member,
query=expr.source)
except (TypeError, ValueError):
# Is this a null object error?
if vars.locals is None:
raise errors.EfilterNoneError(
root=expr, query=expr.source,
message="Cannot resolve member %r from a null." % member)
else:
raise
except NotImplementedError:
raise errors.EfilterError(
root=expr, query=expr.source,
message="Cannot resolve members from a non-structured value.")
return Result(repeated.meld(*results), ())
|
Use IStructured.resolve to get member (rhs) from the object (lhs).
This operation supports both scalars and repeated values on the LHS -
resolving from a repeated value implies a map-like operation and returns a
new repeated values.
|
entailment
|
def solve_apply(expr, vars):
"""Returns the result of applying function (lhs) to its arguments (rest).
We use IApplicative to apply the function, because that gives the host
application an opportunity to compare the function being called against
a whitelist. EFILTER will never directly call a function that wasn't
provided through a protocol implementation.
"""
func = __solve_for_scalar(expr.func, vars)
args = []
kwargs = {}
for arg in expr.args:
if isinstance(arg, ast.Pair):
if not isinstance(arg.lhs, ast.Var):
raise errors.EfilterError(
root=arg.lhs,
message="Invalid argument name.")
kwargs[arg.key.value] = solve(arg.value, vars).value
else:
args.append(solve(arg, vars).value)
result = applicative.apply(func, args, kwargs)
return Result(result, ())
|
Returns the result of applying function (lhs) to its arguments (rest).
We use IApplicative to apply the function, because that gives the host
application an opportunity to compare the function being called against
a whitelist. EFILTER will never directly call a function that wasn't
provided through a protocol implementation.
|
entailment
|
def solve_bind(expr, vars):
"""Build a RowTuple from key/value pairs under the bind.
The Bind subtree is arranged as follows:
Bind
| First KV Pair
| | First Key Expression
| | First Value Expression
| Second KV Pair
| | Second Key Expression
| | Second Value Expression
Etc...
As we evaluate the subtree, each subsequent KV pair is evaluated with
the all previous bingings already in scope. For example:
bind(x: 5, y: x + 5) # Will bind y = 10 because x is already available.
"""
value_expressions = []
keys = []
for pair in expr.children:
keys.append(solve(pair.key, vars).value)
value_expressions.append(pair.value)
result = row_tuple.RowTuple(ordered_columns=keys)
intermediate_scope = scope.ScopeStack(vars, result)
for idx, value_expression in enumerate(value_expressions):
value = solve(value_expression, intermediate_scope).value
# Update the intermediate bindings so as to make earlier bindings
# already available to the next child-expression.
result[keys[idx]] = value
return Result(result, ())
|
Build a RowTuple from key/value pairs under the bind.
The Bind subtree is arranged as follows:
Bind
| First KV Pair
| | First Key Expression
| | First Value Expression
| Second KV Pair
| | Second Key Expression
| | Second Value Expression
Etc...
As we evaluate the subtree, each subsequent KV pair is evaluated with
the all previous bingings already in scope. For example:
bind(x: 5, y: x + 5) # Will bind y = 10 because x is already available.
|
entailment
|
def solve_repeat(expr, vars):
"""Build a repeated value from subexpressions."""
try:
result = repeated.meld(*[solve(x, vars).value for x in expr.children])
return Result(result, ())
except TypeError:
raise errors.EfilterTypeError(
root=expr, query=expr.source,
message="All values in a repeated value must be of the same type.")
|
Build a repeated value from subexpressions.
|
entailment
|
def solve_tuple(expr, vars):
"""Build a tuple from subexpressions."""
result = tuple(solve(x, vars).value for x in expr.children)
return Result(result, ())
|
Build a tuple from subexpressions.
|
entailment
|
def solve_ifelse(expr, vars):
"""Evaluate conditions and return the one that matches."""
for condition, result in expr.conditions():
if boolean.asbool(solve(condition, vars).value):
return solve(result, vars)
return solve(expr.default(), vars)
|
Evaluate conditions and return the one that matches.
|
entailment
|
def solve_map(expr, vars):
"""Solves the map-form, by recursively calling its RHS with new vars.
let-forms are binary expressions. The LHS should evaluate to an IAssociative
that can be used as new vars with which to solve a new query, of which
the RHS is the root. In most cases, the LHS will be a Var (var).
Typically, map-forms result from the dotty "dot" (.) operator. For example,
the query "User.name" will translate to a map-form with the var "User"
on LHS and a var to "name" on the RHS. With top-level vars being
something like {"User": {"name": "Bob"}}, the Var on the LHS will
evaluate to {"name": "Bob"}, which subdict will then be used on the RHS as
new vars, and that whole form will evaluate to "Bob".
"""
lhs_values, _ = __solve_for_repeated(expr.lhs, vars)
def lazy_map():
try:
for lhs_value in repeated.getvalues(lhs_values):
yield solve(expr.rhs,
__nest_scope(expr.lhs, vars, lhs_value)).value
except errors.EfilterNoneError as error:
error.root = expr
raise
return Result(repeated.lazy(lazy_map), ())
|
Solves the map-form, by recursively calling its RHS with new vars.
let-forms are binary expressions. The LHS should evaluate to an IAssociative
that can be used as new vars with which to solve a new query, of which
the RHS is the root. In most cases, the LHS will be a Var (var).
Typically, map-forms result from the dotty "dot" (.) operator. For example,
the query "User.name" will translate to a map-form with the var "User"
on LHS and a var to "name" on the RHS. With top-level vars being
something like {"User": {"name": "Bob"}}, the Var on the LHS will
evaluate to {"name": "Bob"}, which subdict will then be used on the RHS as
new vars, and that whole form will evaluate to "Bob".
|
entailment
|
def solve_let(expr, vars):
"""Solves a let-form by calling RHS with nested scope."""
lhs_value = solve(expr.lhs, vars).value
if not isinstance(lhs_value, structured.IStructured):
raise errors.EfilterTypeError(
root=expr.lhs, query=expr.original,
message="The LHS of 'let' must evaluate to an IStructured. Got %r."
% (lhs_value,))
return solve(expr.rhs, __nest_scope(expr.lhs, vars, lhs_value))
|
Solves a let-form by calling RHS with nested scope.
|
entailment
|
def solve_filter(expr, vars):
"""Filter values on the LHS by evaluating RHS with each value.
Returns any LHS values for which RHS evaluates to a true value.
"""
lhs_values, _ = __solve_for_repeated(expr.lhs, vars)
def lazy_filter():
for lhs_value in repeated.getvalues(lhs_values):
if solve(expr.rhs, __nest_scope(expr.lhs, vars, lhs_value)).value:
yield lhs_value
return Result(repeated.lazy(lazy_filter), ())
|
Filter values on the LHS by evaluating RHS with each value.
Returns any LHS values for which RHS evaluates to a true value.
|
entailment
|
def solve_sort(expr, vars):
"""Sort values on the LHS by the value they yield when passed to RHS."""
lhs_values = repeated.getvalues(__solve_for_repeated(expr.lhs, vars)[0])
sort_expression = expr.rhs
def _key_func(x):
return solve(sort_expression, __nest_scope(expr.lhs, vars, x)).value
results = ordered.ordered(lhs_values, key_func=_key_func)
return Result(repeated.meld(*results), ())
|
Sort values on the LHS by the value they yield when passed to RHS.
|
entailment
|
def solve_each(expr, vars):
"""Return True if RHS evaluates to a true value with each state of LHS.
If LHS evaluates to a normal IAssociative object then this is the same as
a regular let-form, except the return value is always a boolean. If LHS
evaluates to a repeared var (see efilter.protocols.repeated) of
IAssociative objects then RHS will be evaluated with each state and True
will be returned only if each result is true.
"""
lhs_values, _ = __solve_for_repeated(expr.lhs, vars)
for lhs_value in repeated.getvalues(lhs_values):
result = solve(expr.rhs, __nest_scope(expr.lhs, vars, lhs_value))
if not result.value:
# Each is required to return an actual boolean.
return result._replace(value=False)
return Result(True, ())
|
Return True if RHS evaluates to a true value with each state of LHS.
If LHS evaluates to a normal IAssociative object then this is the same as
a regular let-form, except the return value is always a boolean. If LHS
evaluates to a repeared var (see efilter.protocols.repeated) of
IAssociative objects then RHS will be evaluated with each state and True
will be returned only if each result is true.
|
entailment
|
def solve_cast(expr, vars):
"""Get cast LHS to RHS."""
lhs = solve(expr.lhs, vars).value
t = solve(expr.rhs, vars).value
if t is None:
raise errors.EfilterTypeError(
root=expr, query=expr.source,
message="Cannot find type named %r." % expr.rhs.value)
if not isinstance(t, type):
raise errors.EfilterTypeError(
root=expr.rhs, query=expr.source,
message="%r is not a type and cannot be used with 'cast'." % (t,))
try:
cast_value = t(lhs)
except TypeError:
raise errors.EfilterTypeError(
root=expr, query=expr.source,
message="Invalid cast %s -> %s." % (type(lhs), t))
return Result(cast_value, ())
|
Get cast LHS to RHS.
|
entailment
|
def solve_isinstance(expr, vars):
"""Typecheck whether LHS is type on the RHS."""
lhs = solve(expr.lhs, vars)
try:
t = solve(expr.rhs, vars).value
except errors.EfilterKeyError:
t = None
if t is None:
raise errors.EfilterTypeError(
root=expr.rhs, query=expr.source,
message="Cannot find type named %r." % expr.rhs.value)
if not isinstance(t, type):
raise errors.EfilterTypeError(
root=expr.rhs, query=expr.source,
message="%r is not a type and cannot be used with 'isa'." % (t,))
return Result(protocol.implements(lhs.value, t), ())
|
Typecheck whether LHS is type on the RHS.
|
entailment
|
def set_version(mod_root):
"""
mod_root
a VERSION file containes the version strings is created in mod_root,
during installation. That file is used at runtime to get the version
information.
"""
try:
version_base = None
version_detail = None
# get version from './VERSION'
src_root = os.path.dirname(__file__)
if not src_root:
src_root = '.'
with open(src_root + '/VERSION', 'r') as f:
version_base = f.readline().strip()
# attempt to get version detail information from git
# We only do that though if we are in a repo root dir,
# ie. if 'git rev-parse --show-prefix' returns an empty string --
# otherwise we get confused if the ve lives beneath another repository,
# and the pip version used uses an install tmp dir in the ve space
# instead of /tmp (which seems to happen with some pip/setuptools
# versions).
p = sp.Popen('cd %s ; '
'test -z `git rev-parse --show-prefix` || exit -1; '
'tag=`git describe --tags --always` 2>/dev/null ; '
'branch=`git branch | grep -e "^*" | cut -f 2- -d " "` 2>/dev/null ; '
'echo $tag@$branch' % src_root,
stdout=sp.PIPE, stderr=sp.STDOUT, shell=True)
version_detail = str(p.communicate()[0].strip())
version_detail = version_detail.replace('detached from ', 'detached-')
# remove all non-alphanumeric (and then some) chars
version_detail = re.sub('[/ ]+', '-', version_detail)
version_detail = re.sub('[^a-zA-Z0-9_+@.-]+', '', version_detail)
if p.returncode != 0 or \
version_detail == '@' or \
'git-error' in version_detail or \
'not-a-git-repo' in version_detail or \
'not-found' in version_detail or \
'fatal' in version_detail :
version = version_base
elif '@' not in version_base:
version = '%s-%s' % (version_base, version_detail)
else:
version = version_base
# make sure the version files exist for the runtime version inspection
path = '%s/%s' % (src_root, mod_root)
with open(path + "/VERSION", "w") as f:
f.write(version + "\n")
sdist_name = "%s-%s.tar.gz" % (name, version)
sdist_name = sdist_name.replace('/', '-')
sdist_name = sdist_name.replace('@', '-')
sdist_name = sdist_name.replace('#', '-')
sdist_name = sdist_name.replace('_', '-')
if '--record' in sys.argv or \
'bdist_egg' in sys.argv or \
'bdist_wheel' in sys.argv :
# pip install stage 2 or easy_install stage 1
#
# pip install will untar the sdist in a tmp tree. In that tmp
# tree, we won't be able to derive git version tags -- so we pack the
# formerly derived version as ./VERSION
shutil.move("VERSION", "VERSION.bak") # backup version
shutil.copy("%s/VERSION" % path, "VERSION") # use full version instead
os.system ("python setup.py sdist") # build sdist
shutil.copy('dist/%s' % sdist_name,
'%s/%s' % (mod_root, sdist_name)) # copy into tree
shutil.move("VERSION.bak", "VERSION") # restore version
with open(path + "/SDIST", "w") as f:
f.write(sdist_name + "\n")
return version_base, version_detail, sdist_name
except Exception as e :
raise RuntimeError('Could not extract/set version: %s' % e)
|
mod_root
a VERSION file containes the version strings is created in mod_root,
during installation. That file is used at runtime to get the version
information.
|
entailment
|
def makeDataFiles(prefix, dir):
""" Create distutils data_files structure from dir
distutil will copy all file rooted under dir into prefix, excluding
dir itself, just like 'ditto src dst' works, and unlike 'cp -r src
dst, which copy src into dst'.
Typical usage:
# install the contents of 'wiki' under sys.prefix+'share/moin'
data_files = makeDataFiles('share/moin', 'wiki')
For this directory structure:
root
file1
file2
dir
file
subdir
file
makeDataFiles('prefix', 'root') will create this distutil data_files structure:
[('prefix', ['file1', 'file2']),
('prefix/dir', ['file']),
('prefix/dir/subdir', ['file'])]
"""
# Strip 'dir/' from of path before joining with prefix
dir = dir.rstrip('/')
strip = len(dir) + 1
found = []
os.path.walk(dir, visit, (prefix, strip, found))
#print found[0]
return found[0]
|
Create distutils data_files structure from dir
distutil will copy all file rooted under dir into prefix, excluding
dir itself, just like 'ditto src dst' works, and unlike 'cp -r src
dst, which copy src into dst'.
Typical usage:
# install the contents of 'wiki' under sys.prefix+'share/moin'
data_files = makeDataFiles('share/moin', 'wiki')
For this directory structure:
root
file1
file2
dir
file
subdir
file
makeDataFiles('prefix', 'root') will create this distutil data_files structure:
[('prefix', ['file1', 'file2']),
('prefix/dir', ['file']),
('prefix/dir/subdir', ['file'])]
|
entailment
|
def visit((prefix, strip, found), dirname, names):
""" Visit directory, create distutil tuple
Add distutil tuple for each directory using this format:
(destination, [dirname/file1, dirname/file2, ...])
distutil will copy later file1, file2, ... info destination.
"""
files = []
# Iterate over a copy of names, modify names
for name in names[:]:
path = os.path.join(dirname, name)
# Ignore directories - we will visit later
if os.path.isdir(path):
# Remove directories we don't want to visit later
if isbad(name):
names.remove(name)
continue
elif isgood(name):
files.append(path)
destination = os.path.join(prefix, dirname[strip:])
found.append((destination, files))
|
Visit directory, create distutil tuple
Add distutil tuple for each directory using this format:
(destination, [dirname/file1, dirname/file2, ...])
distutil will copy later file1, file2, ... info destination.
|
entailment
|
def isgood(name):
""" Whether name should be installed """
if not isbad(name):
if name.endswith('.py') or name.endswith('.json') or name.endswith('.tar'):
return True
return False
|
Whether name should be installed
|
entailment
|
def _initialize_workflow(self):
"""
**Purpose**: Initialize the PST of the workflow with a uid and type checks
"""
try:
self._prof.prof('initializing workflow', uid=self._uid)
for p in self._workflow:
p._assign_uid(self._sid)
self._prof.prof('workflow initialized', uid=self._uid)
except Exception, ex:
self._logger.exception(
'Fatal error while initializing workflow: %s' % ex)
raise
|
**Purpose**: Initialize the PST of the workflow with a uid and type checks
|
entailment
|
def _enqueue(self, local_prof):
"""
**Purpose**: This is the function that is run in the enqueue thread. This function extracts Tasks from the
copy of workflow that exists in the WFprocessor object and pushes them to the queues in the pending_q list.
Since this thread works on the copy of the workflow, every state update to the Task, Stage and Pipeline is
communicated back to the AppManager (master process) via the 'sync_with_master' function that has dedicated
queues to communicate with the master.
Details: Termination condition of this thread is set by the wfp process.
"""
try:
local_prof.prof('enqueue-thread started', uid=self._uid)
self._logger.info('enqueue-thread started')
# Acquire a connection+channel to the rmq server
mq_connection = pika.BlockingConnection(
pika.ConnectionParameters(host=self._mq_hostname, port=self._port))
mq_channel = mq_connection.channel()
last = time.time()
while not self._enqueue_thread_terminate.is_set():
'''
We iterate through all pipelines to collect tasks from
stages that are pending scheduling. Once collected, these tasks
will be communicated to the tmgr in bulk.
'''
workload = []
scheduled_stages = []
for pipe in self._workflow:
with pipe.lock:
if ((not pipe.completed) and (not pipe.state == states.SUSPENDED)):
# Test if the pipeline is already in the final state
if pipe.state in states.FINAL:
continue
elif pipe.state == states.INITIAL:
# Set state of pipeline to SCHEDULING if it is in INITIAL
transition(obj=pipe,
obj_type='Pipeline',
new_state=states.SCHEDULING,
channel=mq_channel,
queue='%s-enq-to-sync' % self._sid,
profiler=local_prof,
logger=self._logger)
executable_stage = pipe.stages[pipe.current_stage - 1]
if not executable_stage.uid:
executable_stage.parent_pipeline['uid'] = pipe.uid
executable_stage.parent_pipeline['name'] = pipe.name
executable_stage._assign_uid(self._sid)
if executable_stage.state in [states.INITIAL, states.SCHEDULED]:
if executable_stage.state == states.INITIAL:
transition(obj=executable_stage,
obj_type='Stage',
new_state=states.SCHEDULING,
channel=mq_channel,
queue='%s-enq-to-sync' % self._sid,
profiler=local_prof,
logger=self._logger)
executable_tasks = executable_stage.tasks
for executable_task in executable_tasks:
if (executable_task.state == states.INITIAL)or \
((executable_task.state == states.FAILED)and(self._resubmit_failed)):
# Set state of Tasks in current Stage to SCHEDULING
transition(obj=executable_task,
obj_type='Task',
new_state=states.SCHEDULING,
channel=mq_channel,
queue='%s-enq-to-sync' % self._sid,
profiler=local_prof,
logger=self._logger)
# task_as_dict = json.dumps(executable_task.to_dict())
workload.append(executable_task)
if executable_stage not in scheduled_stages:
scheduled_stages.append(
executable_stage)
if workload:
# Put the task on one of the pending_queues
workload_as_dict = list()
for task in workload:
workload_as_dict.append(task.to_dict())
workload_as_dict = json.dumps(workload_as_dict)
mq_channel.basic_publish(exchange='',
routing_key=self._pending_queue[0],
body=workload_as_dict
# properties=pika.BasicProperties(
# make message persistent
# delivery_mode = 2)
)
for task in workload:
# Set state of Tasks in current Stage to SCHEDULED
transition(obj=task,
obj_type='Task',
new_state=states.SCHEDULED,
channel=mq_channel,
queue='%s-enq-to-sync' % self._sid,
profiler=local_prof,
logger=self._logger)
self._logger.debug(
'Task %s published to pending queue' % task.uid)
if scheduled_stages:
for executable_stage in scheduled_stages:
transition(obj=executable_stage,
obj_type='Stage',
new_state=states.SCHEDULED,
channel=mq_channel,
queue='%s-enq-to-sync' % self._sid,
profiler=local_prof,
logger=self._logger)
# Appease pika cos it thinks the connection is dead
now = time.time()
if now - last >= self._rmq_ping_interval:
mq_connection.process_data_events()
last = now
self._logger.info('Enqueue thread terminated')
mq_connection.close()
local_prof.prof('terminating enqueue-thread', uid=self._uid)
except KeyboardInterrupt:
self._logger.exception('Execution interrupted by user (you probably hit Ctrl+C), ' +
'trying to cancel enqueuer thread gracefully...')
mq_connection.close()
raise KeyboardInterrupt
except Exception, ex:
self._logger.exception('Error in enqueue-thread: %s' % ex)
try:
mq_connection.close()
except Exception as ex:
self._logger.warning('mq_connection not created, %s' % ex)
raise
|
**Purpose**: This is the function that is run in the enqueue thread. This function extracts Tasks from the
copy of workflow that exists in the WFprocessor object and pushes them to the queues in the pending_q list.
Since this thread works on the copy of the workflow, every state update to the Task, Stage and Pipeline is
communicated back to the AppManager (master process) via the 'sync_with_master' function that has dedicated
queues to communicate with the master.
Details: Termination condition of this thread is set by the wfp process.
|
entailment
|
def _dequeue(self, local_prof):
"""
**Purpose**: This is the function that is run in the dequeue thread. This function extracts Tasks from the
completed queus and updates the copy of workflow that exists in the WFprocessor object.
Since this thread works on the copy of the workflow, every state update to the Task, Stage and Pipeline is
communicated back to the AppManager (master process) via the 'sync_with_master' function that has dedicated
queues to communicate with the master.
Details: Termination condition of this thread is set by the wfp process.
"""
try:
local_prof.prof('dequeue-thread started', uid=self._uid)
self._logger.info('Dequeue thread started')
mq_connection = pika.BlockingConnection(
pika.ConnectionParameters(host=self._mq_hostname, port=self._port))
mq_channel = mq_connection.channel()
last = time.time()
while not self._dequeue_thread_terminate.is_set():
try:
method_frame, header_frame, body = mq_channel.basic_get(
queue=self._completed_queue[0])
if body:
# Get task from the message
completed_task = Task()
completed_task.from_dict(json.loads(body))
self._logger.info(
'Got finished task %s from queue' % (completed_task.uid))
transition(obj=completed_task,
obj_type='Task',
new_state=states.DEQUEUEING,
channel=mq_channel,
queue='%s-deq-to-sync' % self._sid,
profiler=local_prof,
logger=self._logger)
# Traverse the entire workflow to find out the correct Task
for pipe in self._workflow:
with pipe.lock:
if ((not pipe.completed) and (not pipe.state == states.SUSPENDED)):
if completed_task.parent_pipeline['uid'] == pipe.uid:
self._logger.debug(
'Found parent pipeline: %s' % pipe.uid)
for stage in pipe.stages:
if completed_task.parent_stage['uid'] == stage.uid:
self._logger.debug(
'Found parent stage: %s' % (stage.uid))
transition(obj=completed_task,
obj_type='Task',
new_state=states.DEQUEUED,
channel=mq_channel,
queue='%s-deq-to-sync' % self._sid,
profiler=local_prof,
logger=self._logger)
if not completed_task.exit_code:
completed_task.state = states.DONE
else:
completed_task.state = states.FAILED
for task in stage.tasks:
if task.uid == completed_task.uid:
task.state = str(
completed_task.state)
if (task.state == states.FAILED) and (self._resubmit_failed):
task.state = states.INITIAL
transition(obj=task,
obj_type='Task',
new_state=task.state,
channel=mq_channel,
queue='%s-deq-to-sync' % self._sid,
profiler=local_prof,
logger=self._logger)
if stage._check_stage_complete():
transition(obj=stage,
obj_type='Stage',
new_state=states.DONE,
channel=mq_channel,
queue='%s-deq-to-sync' % self._sid,
profiler=local_prof,
logger=self._logger)
# Check if Stage has a post-exec that needs to be
# executed
if stage.post_exec:
try:
self._logger.info('Executing post-exec for stage %s'
% stage.uid)
self._prof.prof('Adap: executing post-exec',
uid=self._uid)
stage.post_exec()
self._logger.info(
'Post-exec executed for stage %s' % stage.uid)
self._prof.prof(
'Adap: post-exec executed', uid=self._uid)
except Exception, ex:
self._logger.exception('Execution failed in post_exec of stage %s' % stage.uid)
raise
pipe._increment_stage()
if pipe.completed:
transition(obj=pipe,
obj_type='Pipeline',
new_state=states.DONE,
channel=mq_channel,
queue='%s-deq-to-sync' % self._sid,
profiler=local_prof,
logger=self._logger)
# Found the task and processed it -- no more iterations needed
break
# Found the stage and processed it -- no more iterations neeeded
break
# Found the pipeline and processed it -- no more iterations neeeded
break
mq_channel.basic_ack(
delivery_tag=method_frame.delivery_tag)
# Appease pika cos it thinks the connection is dead
now = time.time()
if now - last >= self._rmq_ping_interval:
mq_connection.process_data_events()
last = now
except Exception, ex:
self._logger.exception(
'Unable to receive message from completed queue: %s' % ex)
raise
self._logger.info('Terminated dequeue thread')
mq_connection.close()
local_prof.prof('terminating dequeue-thread', uid=self._uid)
except KeyboardInterrupt:
self._logger.exception('Execution interrupted by user (you probably hit Ctrl+C), ' +
'trying to exit gracefully...')
mq_connection.close()
raise KeyboardInterrupt
except Exception, ex:
self._logger.exception('Error in dequeue-thread: %s' % ex)
try:
mq_connection.close()
except:
self._logger.warning('mq_connection not created')
raise EnTKError(ex)
|
**Purpose**: This is the function that is run in the dequeue thread. This function extracts Tasks from the
completed queus and updates the copy of workflow that exists in the WFprocessor object.
Since this thread works on the copy of the workflow, every state update to the Task, Stage and Pipeline is
communicated back to the AppManager (master process) via the 'sync_with_master' function that has dedicated
queues to communicate with the master.
Details: Termination condition of this thread is set by the wfp process.
|
entailment
|
def _wfp(self):
"""
**Purpose**: This is the function executed in the wfp process. The function is used to simply create
and spawn two threads: enqueue, dequeue. The enqueue thread pushes ready tasks to the queues in the pending_q slow
list whereas the dequeue thread pulls completed tasks from the queues in the completed_q. This function is also
responsible for the termination of these threads and hence blocking.
"""
try:
local_prof = ru.Profiler(
name='radical.entk.%s' % self._uid + '-proc', path=self._path)
local_prof.prof('wfp process started', uid=self._uid)
self._logger.info('WFprocessor started')
# Process should run till terminate condtion is encountered
while (not self._wfp_terminate.is_set()):
try:
# Start dequeue thread
if (not self._dequeue_thread) or (not self._dequeue_thread.is_alive()):
local_prof.prof(
'creating dequeue-thread', uid=self._uid)
self._dequeue_thread = threading.Thread(
target=self._dequeue, args=(local_prof,), name='dequeue-thread')
self._logger.info('Starting dequeue-thread')
local_prof.prof(
'starting dequeue-thread', uid=self._uid)
self._dequeue_thread.start()
# Start enqueue thread
if (not self._enqueue_thread) or (not self._enqueue_thread.is_alive()):
local_prof.prof(
'creating enqueue-thread', uid=self._uid)
self._enqueue_thread = threading.Thread(
target=self._enqueue, args=(local_prof,), name='enqueue-thread')
self._logger.info('Starting enqueue-thread')
local_prof.prof(
'starting enqueue-thread', uid=self._uid)
self._enqueue_thread.start()
except Exception, ex:
self._logger.exception('WFProcessor interrupted')
raise
local_prof.prof('start termination', uid=self._uid)
self._logger.info('Terminating enqueue-thread')
self._enqueue_thread_terminate.set()
self._enqueue_thread.join()
self._logger.info('Terminating dequeue-thread')
self._dequeue_thread_terminate.set()
self._dequeue_thread.join()
local_prof.prof('termination done', uid=self._uid)
local_prof.prof('terminating wfp process', uid=self._uid)
local_prof.close()
except KeyboardInterrupt:
self._logger.exception('Execution interrupted by user (you probably hit Ctrl+C), ' +
'trying to cancel wfprocessor process gracefully...')
if self._enqueue_thread:
if not self._enqueue_thread_terminate.is_set():
self._logger.info('Terminating enqueue-thread')
self._enqueue_thread_terminate.set()
self._enqueue_thread.join()
if self._dequeue_thread:
if not self._dequeue_thread_terminate.is_set():
self._logger.info('Terminating dequeue-thread')
self._dequeue_thread_terminate.set()
self._dequeue_thread.join()
self._logger.info('WFprocessor process terminated')
raise KeyboardInterrupt
except Exception, ex:
self._logger.exception(
'Error in wfp process: %s. \n Closing enqueue, dequeue threads' % ex)
if self._enqueue_thread:
if not self._enqueue_thread_terminate.is_set():
self._logger.info('Terminating enqueue-thread')
self._enqueue_thread_terminate.set()
self._enqueue_thread.join()
if self._dequeue_thread:
if not self._dequeue_thread_terminate.is_set():
self._logger.info('Terminating dequeue-thread')
self._dequeue_thread_terminate.set()
self._dequeue_thread.join()
self._logger.info('WFprocessor process terminated')
self._logger.exception('%s failed with %s'%(self._uid, ex))
raise EnTKError(ex)
|
**Purpose**: This is the function executed in the wfp process. The function is used to simply create
and spawn two threads: enqueue, dequeue. The enqueue thread pushes ready tasks to the queues in the pending_q slow
list whereas the dequeue thread pulls completed tasks from the queues in the completed_q. This function is also
responsible for the termination of these threads and hence blocking.
|
entailment
|
def start_processor(self):
"""
**Purpose**: Method to start the wfp process. The wfp function
is not to be accessed directly. The function is started in a separate
process using this method.
"""
if not self._wfp_process:
try:
self._prof.prof('creating wfp process', uid=self._uid)
self._wfp_process = Process(
target=self._wfp, name='wfprocessor')
self._enqueue_thread = None
self._dequeue_thread = None
self._enqueue_thread_terminate = threading.Event()
self._dequeue_thread_terminate = threading.Event()
self._wfp_terminate = Event()
self._logger.info('Starting WFprocessor process')
self._prof.prof('starting wfp process', uid=self._uid)
self._wfp_process.start()
return True
except Exception, ex:
self._logger.exception('WFprocessor not started')
self.terminate_processor()
raise
else:
self._logger.warn(
'Wfp process already running, attempted to restart!')
|
**Purpose**: Method to start the wfp process. The wfp function
is not to be accessed directly. The function is started in a separate
process using this method.
|
entailment
|
def terminate_processor(self):
"""
**Purpose**: Method to terminate the wfp process. This method is
blocking as it waits for the wfp process to terminate (aka join).
"""
try:
if self.check_processor():
self._logger.debug(
'Attempting to end WFprocessor... event: %s' % self._wfp_terminate.is_set())
self._wfp_terminate.set()
self._wfp_process.join()
self._wfp_process = None
self._logger.debug('WFprocessor process terminated')
else:
self._logger.debug('WFprocessor process already terminated')
self._prof.prof('wfp process terminated', uid=self._uid)
self._prof.close()
except Exception, ex:
self._logger.exception('Could not terminate wfprocessor process')
raise
|
**Purpose**: Method to terminate the wfp process. This method is
blocking as it waits for the wfp process to terminate (aka join).
|
entailment
|
def workflow_incomplete(self):
"""
**Purpose**: Method to check if the workflow execution is incomplete.
"""
try:
for pipe in self._workflow:
with pipe.lock:
if pipe.completed:
pass
else:
return True
return False
except Exception, ex:
self._logger.exception(
'Could not check if workflow is incomplete, error:%s' % ex)
raise
|
**Purpose**: Method to check if the workflow execution is incomplete.
|
entailment
|
def meld(*values):
"""Return the repeated value, or the first value if there's only one.
This is a convenience function, equivalent to calling
getvalue(repeated(x)) to get x.
This function skips over instances of None in values (None is not allowed
in repeated variables).
Examples:
meld("foo", "bar") # => ListRepetition("foo", "bar")
meld("foo", "foo") # => ListRepetition("foo", "foo")
meld("foo", None) # => "foo"
meld(None) # => None
"""
values = [x for x in values if x is not None]
if not values:
return None
result = repeated(*values)
if isrepeating(result):
return result
return getvalue(result)
|
Return the repeated value, or the first value if there's only one.
This is a convenience function, equivalent to calling
getvalue(repeated(x)) to get x.
This function skips over instances of None in values (None is not allowed
in repeated variables).
Examples:
meld("foo", "bar") # => ListRepetition("foo", "bar")
meld("foo", "foo") # => ListRepetition("foo", "foo")
meld("foo", None) # => "foo"
meld(None) # => None
|
entailment
|
def getvalue(x):
"""Return the single value of x or raise TypError if more than one value."""
if isrepeating(x):
raise TypeError(
"Ambiguous call to getvalue for %r which has more than one value."
% x)
for value in getvalues(x):
return value
|
Return the single value of x or raise TypError if more than one value.
|
entailment
|
def luid(self):
"""
Unique ID of the current task (fully qualified).
example:
>>> task.luid
pipe.0001.stage.0004.task.0234
:getter: Returns the fully qualified uid of the current task
:type: String
"""
p_elem = self.parent_pipeline.get('name')
if not p_elem:
p_elem = self.parent_pipeline['uid']
s_elem = self.parent_stage.get('name')
if not s_elem:
s_elem = self.parent_stage['uid']
t_elem = self.name
if not t_elem:
t_elem = self.uid
return '%s.%s.%s' % (p_elem, s_elem, t_elem)
|
Unique ID of the current task (fully qualified).
example:
>>> task.luid
pipe.0001.stage.0004.task.0234
:getter: Returns the fully qualified uid of the current task
:type: String
|
entailment
|
def to_dict(self):
"""
Convert current Task into a dictionary
:return: python dictionary
"""
task_desc_as_dict = {
'uid': self._uid,
'name': self._name,
'state': self._state,
'state_history': self._state_history,
'pre_exec': self._pre_exec,
'executable': self._executable,
'arguments': self._arguments,
'post_exec': self._post_exec,
'cpu_reqs': self._cpu_reqs,
'gpu_reqs': self._gpu_reqs,
'lfs_per_process': self._lfs_per_process,
'upload_input_data': self._upload_input_data,
'copy_input_data': self._copy_input_data,
'link_input_data': self._link_input_data,
'move_input_data': self._move_input_data,
'copy_output_data': self._copy_output_data,
'move_output_data': self._move_output_data,
'download_output_data': self._download_output_data,
'stdout': self._stdout,
'stderr': self._stderr,
'exit_code': self._exit_code,
'path': self._path,
'tag': self._tag,
'parent_stage': self._p_stage,
'parent_pipeline': self._p_pipeline,
}
return task_desc_as_dict
|
Convert current Task into a dictionary
:return: python dictionary
|
entailment
|
def from_dict(self, d):
"""
Create a Task from a dictionary. The change is in inplace.
:argument: python dictionary
:return: None
"""
if 'uid' in d:
if d['uid']:
self._uid = d['uid']
if 'name' in d:
if d['name']:
self._name = d['name']
if 'state' in d:
if isinstance(d['state'], str) or isinstance(d['state'], unicode):
self._state = d['state']
else:
raise TypeError(entity='state', expected_type=str,
actual_type=type(d['state']))
else:
self._state = states.INITIAL
if 'state_history' in d:
if isinstance(d['state_history'], list):
self._state_history = d['state_history']
else:
raise TypeError(entity='state_history', expected_type=list, actual_type=type(
d['state_history']))
if 'pre_exec' in d:
if isinstance(d['pre_exec'], list):
self._pre_exec = d['pre_exec']
else:
raise TypeError(expected_type=list,
actual_type=type(d['pre_exec']))
if 'executable' in d:
if isinstance(d['executable'], str) or isinstance(d['executable'], unicode):
self._executable = d['executable']
else:
raise TypeError(expected_type=str,
actual_type=type(d['executable']))
if 'arguments' in d:
if isinstance(d['arguments'], list):
self._arguments = d['arguments']
else:
raise TypeError(expected_type=list,
actual_type=type(d['arguments']))
if 'post_exec' in d:
if isinstance(d['post_exec'], list):
self._post_exec = d['post_exec']
else:
raise TypeError(expected_type=list,
actual_type=type(d['post_exec']))
if 'cpu_reqs' in d:
if isinstance(d['cpu_reqs'], dict):
self._cpu_reqs = d['cpu_reqs']
else:
raise TypeError(expected_type=dict,
actual_type=type(d['cpu_reqs']))
if 'gpu_reqs' in d:
if isinstance(d['gpu_reqs'], dict):
self._gpu_reqs = d['gpu_reqs']
else:
raise TypeError(expected_type=dict,
actual_type=type(d['gpu_reqs']))
if 'lfs_per_process' in d:
if d['lfs_per_process']:
if isinstance(d['lfs_per_process'], int):
self._lfs_per_process = d['lfs_per_process']
else:
raise TypeError(expected_type=int,
actual_type=type(d['lfs_per_process']))
if 'upload_input_data' in d:
if isinstance(d['upload_input_data'], list):
self._upload_input_data = d['upload_input_data']
else:
raise TypeError(expected_type=list,
actual_type=type(d['upload_input_data']))
if 'copy_input_data' in d:
if isinstance(d['copy_input_data'], list):
self._copy_input_data = d['copy_input_data']
else:
raise TypeError(expected_type=list,
actual_type=type(d['copy_input_data']))
if 'link_input_data' in d:
if isinstance(d['link_input_data'], list):
self._link_input_data = d['link_input_data']
else:
raise TypeError(expected_type=list,
actual_type=type(d['link_input_data']))
if 'move_input_data' in d:
if isinstance(d['move_input_data'], list):
self._move_input_data = d['move_input_data']
else:
raise TypeError(expected_type=list,
actual_type=type(d['move_input_data']))
if 'copy_output_data' in d:
if isinstance(d['copy_output_data'], list):
self._copy_output_data = d['copy_output_data']
else:
raise TypeError(expected_type=list,
actual_type=type(d['copy_output_data']))
if 'move_output_data' in d:
if isinstance(d['move_output_data'], list):
self._move_output_data = d['move_output_data']
else:
raise TypeError(expected_type=list,
actual_type=type(d['move_output_data']))
if 'download_output_data' in d:
if isinstance(d['download_output_data'], list):
self._download_output_data = d['download_output_data']
else:
raise TypeError(expected_type=list, actual_type=type(
d['download_output_data']))
if 'stdout' in d:
if d['stdout']:
if isinstance(d['stdout'], str) or isinstance(d['stdout'], unicode):
self._stdout = d['stdout']
else:
raise TypeError(expected_type=str, actual_type=type(d['stdout']))
if 'stderr' in d:
if d['stderr']:
if isinstance(d['stderr'], str) or isinstance(d['stderr'], unicode):
self._stderr = d['stderr']
else:
raise TypeError(expected_type=str, actual_type=type(d['stderr']))
if 'exit_code' in d:
if d['exit_code']:
if isinstance(d['exit_code'], int):
self._exit_code = d['exit_code']
else:
raise TypeError(
entity='exit_code', expected_type=int, actual_type=type(d['exit_code']))
if 'path' in d:
if d['path']:
if isinstance(d['path'], str) or isinstance(d['path'], unicode):
self._path = d['path']
else:
raise TypeError(entity='path', expected_type=str,
actual_type=type(d['path']))
if 'tag' in d:
if d['tag']:
if isinstance(d['tag'], str) or isinstance(d['tag'], unicode):
self._tag = str(d['tag'])
else:
raise TypeError(expected_type=str,
actual_type=type(d['tag']))
if 'parent_stage' in d:
if isinstance(d['parent_stage'], dict):
self._p_stage = d['parent_stage']
else:
raise TypeError(
entity='parent_stage', expected_type=dict, actual_type=type(d['parent_stage']))
if 'parent_pipeline' in d:
if isinstance(d['parent_pipeline'], dict):
self._p_pipeline = d['parent_pipeline']
else:
raise TypeError(entity='parent_pipeline', expected_type=dict, actual_type=type(
d['parent_pipeline']))
|
Create a Task from a dictionary. The change is in inplace.
:argument: python dictionary
:return: None
|
entailment
|
def _assign_uid(self, sid):
"""
Purpose: Assign a uid to the current object based on the sid passed
"""
self._uid = ru.generate_id(
'task.%(item_counter)04d', ru.ID_CUSTOM, namespace=sid)
|
Purpose: Assign a uid to the current object based on the sid passed
|
entailment
|
def _validate(self):
"""
Purpose: Validate that the state of the task is 'DESCRIBED' and that an executable has been specified for the
task.
"""
if self._state is not states.INITIAL:
raise ValueError(obj=self._uid,
attribute='state',
expected_value=states.INITIAL,
actual_value=self._state)
if not self._executable:
raise MissingError(obj=self._uid,
missing_attribute='executable')
|
Purpose: Validate that the state of the task is 'DESCRIBED' and that an executable has been specified for the
task.
|
entailment
|
def _process_tasks(self, task_queue, rmgr, logger, mq_hostname, port, local_prof, sid):
'''
**Purpose**: The new thread that gets spawned by the main tmgr process invokes this function. This
function receives tasks from 'task_queue' and submits them to the RADICAL Pilot RTS.
'''
placeholder_dict = dict()
def load_placeholder(task, rts_uid):
parent_pipeline = str(task.parent_pipeline['name'])
parent_stage = str(task.parent_stage['name'])
if parent_pipeline not in placeholder_dict:
placeholder_dict[parent_pipeline] = dict()
if parent_stage not in placeholder_dict[parent_pipeline]:
placeholder_dict[parent_pipeline][parent_stage] = dict()
if None not in [parent_pipeline, parent_stage, task.name]:
placeholder_dict[parent_pipeline][parent_stage][str(task.name)] = {'path': str(task.path),
'rts_uid': rts_uid}
def unit_state_cb(unit, state):
try:
logger.debug('Unit %s in state %s' % (unit.uid, unit.state))
if unit.state in rp.FINAL:
# Acquire a connection+channel to the rmq server
mq_connection = pika.BlockingConnection(
pika.ConnectionParameters(host=mq_hostname, port=port))
mq_channel = mq_connection.channel()
task = None
task = create_task_from_cu(unit, local_prof)
transition(obj=task,
obj_type='Task',
new_state=states.COMPLETED,
channel=mq_channel,
queue='%s-cb-to-sync' % sid,
profiler=local_prof,
logger=logger)
load_placeholder(task, unit.uid)
task_as_dict = json.dumps(task.to_dict())
mq_channel.basic_publish(exchange='',
routing_key='%s-completedq-1' % sid,
body=task_as_dict
# properties=pika.BasicProperties(
# make message persistent
# delivery_mode = 2,
# )
)
logger.info('Pushed task %s with state %s to completed queue %s-completedq-1' % (task.uid, task.state,
sid))
mq_connection.close()
except KeyboardInterrupt:
logger.exception('Execution interrupted by user (you probably hit Ctrl+C), ' +
'trying to exit callback thread gracefully...')
raise KeyboardInterrupt
except Exception, ex:
logger.exception('Error in RP callback thread: %s' % ex)
umgr = rp.UnitManager(session=rmgr._session)
umgr.add_pilots(rmgr.pilot)
umgr.register_callback(unit_state_cb)
try:
while not self._tmgr_terminate.is_set():
body = None
try:
body = task_queue.get(block=True, timeout=10)
except Queue.Empty:
# Ignore empty exception, we don't always have new tasks to run
pass
if body:
task_queue.task_done()
bulk_tasks = list()
bulk_cuds = list()
for task in body:
t = Task()
t.from_dict(task)
bulk_tasks.append(t)
bulk_cuds.append(create_cud_from_task(
t, placeholder_dict, local_prof))
mq_connection = pika.BlockingConnection(pika.ConnectionParameters(host=mq_hostname, port=port))
mq_channel = mq_connection.channel()
transition(obj=t,
obj_type='Task',
new_state=states.SUBMITTING,
channel=mq_channel,
queue='%s-tmgr-to-sync' % sid,
profiler=local_prof,
logger=logger)
mq_connection.close()
umgr.submit_units(bulk_cuds)
for task in bulk_tasks:
mq_connection = pika.BlockingConnection(pika.ConnectionParameters(host=mq_hostname, port=port))
mq_channel = mq_connection.channel()
transition(obj=task,
obj_type='Task',
new_state=states.SUBMITTED,
channel=mq_channel,
queue='%s-tmgr-to-sync' % sid,
profiler=local_prof,
logger=logger)
mq_connection.close()
except KeyboardInterrupt as ex:
logger.exception('Execution interrupted by user (you probably hit Ctrl+C), ' +
'trying to cancel task processor gracefully...')
except Exception as ex:
logger.exception('%s failed with %s'%(self._uid, ex))
raise EnTKError(ex)
|
**Purpose**: The new thread that gets spawned by the main tmgr process invokes this function. This
function receives tasks from 'task_queue' and submits them to the RADICAL Pilot RTS.
|
entailment
|
def infer_type(expr, scope):
"""Try to infer the type of x[y] if y is a known value (literal)."""
# Do we know what the key even is?
if isinstance(expr.key, ast.Literal):
key = expr.key.value
else:
return protocol.AnyType
container_type = infer_type(expr.value, scope)
try:
# Associative types are not subject to scoping rules so we can just
# reflect using IAssociative.
return associative.reflect(container_type, key) or protocol.AnyType
except NotImplementedError:
return protocol.AnyType
|
Try to infer the type of x[y] if y is a known value (literal).
|
entailment
|
def infer_type(expr, scope):
"""Try to infer the type of x.y if y is a known value (literal)."""
# Do we know what the member is?
if isinstance(expr.member, ast.Literal):
member = expr.member.value
else:
return protocol.AnyType
container_type = infer_type(expr.obj, scope)
try:
# We are not using lexical scope here on purpose - we want to see what
# the type of the member is only on the container_type.
return structured.reflect(container_type, member) or protocol.AnyType
except NotImplementedError:
return protocol.AnyType
|
Try to infer the type of x.y if y is a known value (literal).
|
entailment
|
def _tmgr(self, uid, rmgr, logger, mq_hostname, port, pending_queue, completed_queue):
"""
**Purpose**: Method to be run by the tmgr process. This method receives a Task from the pending_queue
and submits it to the RTS. Currently, it also converts Tasks into CUDs and CUs into (partially described) Tasks.
This conversion is necessary since the current RTS is RADICAL Pilot. Once Tasks are recovered from a CU, they
are then pushed to the completed_queue. At all state transititons, they are synced (blocking) with the AppManager
in the master process.
In addition the tmgr also receives heartbeat 'request' msgs from the heartbeat-req queue. It responds with a
'response' message to the 'heartbeart-res' queue.
**Details**: The AppManager can re-invoke the tmgr process with this function if the execution of the workflow is
still incomplete. There is also population of a dictionary, placeholder_dict, which stores the path of each of
the tasks on the remote machine.
"""
try:
def heartbeat_response(mq_channel):
try:
# Get request from heartbeat-req for heartbeat response
hb_method_frame, hb_props, hb_body = mq_channel.basic_get(
queue=self._hb_request_q)
if hb_body:
logger.info('Received heartbeat request')
mq_channel.basic_publish(exchange='',
routing_key=self._hb_response_q,
properties=pika.BasicProperties(
correlation_id=hb_props.correlation_id),
body='response')
logger.info('Sent heartbeat response')
mq_channel.basic_ack(
delivery_tag=hb_method_frame.delivery_tag)
except Exception, ex:
logger.exception(
'Failed to respond to heartbeat request, error: %s' % ex)
raise
local_prof = ru.Profiler(
name='radical.entk.%s' % self._uid + '-proc', path=self._path)
local_prof.prof('tmgr process started', uid=self._uid)
logger.info('Task Manager process started')
# Thread should run till terminate condtion is encountered
mq_connection = pika.BlockingConnection(
pika.ConnectionParameters(host=mq_hostname, port=port))
mq_channel = mq_connection.channel()
# Queue for communication between threads of this process
task_queue = Queue.Queue()
# Start second thread to receive tasks and push to RTS
self._rts_runner = threading.Thread(target=self._process_tasks,
args=(task_queue,
rmgr,
logger,
mq_hostname,
port,
local_prof,
self._sid))
self._rts_runner.start()
local_prof.prof('tmgr infrastructure setup done', uid=uid)
last = time.time()
while not self._tmgr_terminate.is_set():
try:
method_frame, header_frame, body = mq_channel.basic_get(
queue=pending_queue[0])
if body:
body = json.loads(body)
task_queue.put(body)
mq_channel.basic_ack(
delivery_tag=method_frame.delivery_tag)
heartbeat_response(mq_channel)
except Exception, ex:
logger.exception('Error in task execution: %s' % ex)
raise
except KeyboardInterrupt:
logger.exception('Execution interrupted by user (you probably hit Ctrl+C), ' +
'trying to cancel tmgr process gracefully...')
except Exception, ex:
logger.exception('%s failed with %s'%(self._uid, ex))
raise EnTKError(ex)
finally:
local_prof.prof('terminating tmgr process', uid=uid)
if self._rts_runner:
self._rts_runner.join()
mq_connection.close()
local_prof.close()
|
**Purpose**: Method to be run by the tmgr process. This method receives a Task from the pending_queue
and submits it to the RTS. Currently, it also converts Tasks into CUDs and CUs into (partially described) Tasks.
This conversion is necessary since the current RTS is RADICAL Pilot. Once Tasks are recovered from a CU, they
are then pushed to the completed_queue. At all state transititons, they are synced (blocking) with the AppManager
in the master process.
In addition the tmgr also receives heartbeat 'request' msgs from the heartbeat-req queue. It responds with a
'response' message to the 'heartbeart-res' queue.
**Details**: The AppManager can re-invoke the tmgr process with this function if the execution of the workflow is
still incomplete. There is also population of a dictionary, placeholder_dict, which stores the path of each of
the tasks on the remote machine.
|
entailment
|
def start_manager(self):
"""
**Purpose**: Method to start the tmgr process. The tmgr function
is not to be accessed directly. The function is started in a separate
thread using this method.
"""
if not self._tmgr_process:
try:
self._prof.prof('creating tmgr process', uid=self._uid)
self._tmgr_terminate = Event()
self._tmgr_process = Process(target=self._tmgr,
name='task-manager',
args=(
self._uid,
self._rmgr,
self._logger,
self._mq_hostname,
self._port,
self._pending_queue,
self._completed_queue)
)
self._logger.info('Starting task manager process')
self._prof.prof('starting tmgr process', uid=self._uid)
self._tmgr_process.start()
return True
except Exception, ex:
self._logger.exception('Task manager not started, error: %s' % ex)
self.terminate_manager()
raise
else:
self._logger.warn(
'tmgr process already running, but attempted to restart!')
|
**Purpose**: Method to start the tmgr process. The tmgr function
is not to be accessed directly. The function is started in a separate
thread using this method.
|
entailment
|
def keyword(tokens, expected):
"""Case-insensitive keyword match."""
try:
token = next(iter(tokens))
except StopIteration:
return
if token and token.name == "symbol" and token.value.lower() == expected:
return TokenMatch(None, token.value, (token,))
|
Case-insensitive keyword match.
|
entailment
|
def multi_keyword(tokens, keyword_parts):
"""Match a case-insensitive keyword consisting of multiple tokens."""
tokens = iter(tokens)
matched_tokens = []
limit = len(keyword_parts)
for idx in six.moves.range(limit):
try:
token = next(tokens)
except StopIteration:
return
if (not token or token.name != "symbol" or
token.value.lower() != keyword_parts[idx]):
return
matched_tokens.append(token)
return TokenMatch(None, token.value, matched_tokens)
|
Match a case-insensitive keyword consisting of multiple tokens.
|
entailment
|
def prefix(tokens, operator_table):
"""Match a prefix of an operator."""
operator, matched_tokens = operator_table.prefix.match(tokens)
if operator:
return TokenMatch(operator, None, matched_tokens)
|
Match a prefix of an operator.
|
entailment
|
def infix(tokens, operator_table):
"""Match an infix of an operator."""
operator, matched_tokens = operator_table.infix.match(tokens)
if operator:
return TokenMatch(operator, None, matched_tokens)
|
Match an infix of an operator.
|
entailment
|
def suffix(tokens, operator_table):
"""Match a suffix of an operator."""
operator, matched_tokens = operator_table.suffix.match(tokens)
if operator:
return TokenMatch(operator, None, matched_tokens)
|
Match a suffix of an operator.
|
entailment
|
def token_name(tokens, expected):
"""Match a token name (type)."""
try:
token = next(iter(tokens))
except StopIteration:
return
if token and token.name == expected:
return TokenMatch(None, token.value, (token,))
|
Match a token name (type).
|
entailment
|
def match_tokens(expected_tokens):
"""Generate a grammar function that will match 'expected_tokens' only."""
if isinstance(expected_tokens, Token):
# Match a single token.
def _grammar_func(tokens):
try:
next_token = next(iter(tokens))
except StopIteration:
return
if next_token == expected_tokens:
return TokenMatch(None, next_token.value, (next_token,))
elif isinstance(expected_tokens, tuple):
# Match multiple tokens.
match_len = len(expected_tokens)
def _grammar_func(tokens):
upcoming = tuple(itertools.islice(tokens, match_len))
if upcoming == expected_tokens:
return TokenMatch(None, None, upcoming)
else:
raise TypeError(
"'expected_tokens' must be an instance of Token or a tuple "
"thereof. Got %r." % expected_tokens)
return _grammar_func
|
Generate a grammar function that will match 'expected_tokens' only.
|
entailment
|
def set_metric(slug, value, category=None, expire=None, date=None):
"""Create/Increment a metric."""
get_r().set_metric(slug, value, category=category, expire=expire, date=date)
|
Create/Increment a metric.
|
entailment
|
def metric(slug, num=1, category=None, expire=None, date=None):
"""Create/Increment a metric."""
get_r().metric(slug, num=num, category=category, expire=expire, date=date)
|
Create/Increment a metric.
|
entailment
|
def expression(self, previous_precedence=0):
"""An expression is an atom or an infix expression.
Grammar (sort of, actually a precedence-climbing parser):
expression = atom [ binary_operator expression ] .
Args:
previous_precedence: What operator precedence should we start with?
"""
lhs = self.atom()
return self.operator(lhs, previous_precedence)
|
An expression is an atom or an infix expression.
Grammar (sort of, actually a precedence-climbing parser):
expression = atom [ binary_operator expression ] .
Args:
previous_precedence: What operator precedence should we start with?
|
entailment
|
def atom(self):
"""Parse an atom, which is most things.
Grammar:
atom =
[ prefix ]
( select_expression
| any_expression
| func_application
| let_expr
| var
| literal
| list
| "(" expression ")" ) .
"""
# Parameter replacement with literals.
if self.tokens.accept(grammar.param):
return self.param()
# Let expressions (let(x = 5, y = 10) x + y)
if self.tokens.accept(grammar.let):
return self.let()
# At the top level, we try to see if we are recursing into an SQL query.
if self.tokens.accept(grammar.select):
return self.select()
# A SELECT query can also start with 'ANY'.
if self.tokens.accept(grammar.select_any):
return self.select_any()
# Explicitly reject any keywords from SQL other than SELECT and ANY.
# If we don't do this they will match as valid symbols (variables)
# and that might be confusing to the user.
self.tokens.reject(grammar.sql_keyword)
# Match if-else before other things that consume symbols.
if self.tokens.accept(grammar.if_if):
return self.if_if()
# Operators must be matched first because the same symbols could also
# be vars or applications.
if self.tokens.accept(grammar.prefix):
operator = self.tokens.matched.operator
start = self.tokens.matched.start
expr = self.expression(operator.precedence)
return operator.handler(expr, start=start, end=expr.end,
source=self.original)
if self.tokens.accept(grammar.literal):
return ast.Literal(self.tokens.matched.value, source=self.original,
start=self.tokens.matched.start,
end=self.tokens.matched.end)
# Match builtin pseudo-functions before functions and vars to prevent
# overrides.
if self.tokens.accept(grammar.builtin):
return self.builtin(self.tokens.matched.value)
# Match applications before vars, because obviously.
if self.tokens.accept(grammar.application):
return self.application(
ast.Var(self.tokens.matched.value, source=self.original,
start=self.tokens.matched.start,
end=self.tokens.matched.first.end))
if self.tokens.accept(common_grammar.symbol):
return ast.Var(self.tokens.matched.value, source=self.original,
start=self.tokens.matched.start,
end=self.tokens.matched.end)
if self.tokens.accept(common_grammar.lparen):
# Parens will contain one or more expressions. If there are several
# expressions, separated by commas, then they are a repeated value.
#
# Unlike lists, repeated values must all be of the same type,
# otherwise evaluation of the query will fail at runtime (or
# type-check time, for simple cases.)
start = self.tokens.matched.start
expressions = [self.expression()]
while self.tokens.accept(common_grammar.comma):
expressions.append(self.expression())
self.tokens.expect(common_grammar.rparen)
if len(expressions) == 1:
return expressions[0]
else:
return ast.Repeat(*expressions, source=self.original,
start=start, end=self.tokens.matched.end)
if self.tokens.accept(common_grammar.lbracket):
return self.list()
# We've run out of things we know the next atom could be. If there is
# still input left then it's illegal syntax. If there is nothing then
# the input cuts off when we still need an atom. Either is an error.
if self.tokens.peek(0):
return self.error(
"Was not expecting %r here." % self.tokens.peek(0).name,
start_token=self.tokens.peek(0))
else:
return self.error("Unexpected end of input.")
|
Parse an atom, which is most things.
Grammar:
atom =
[ prefix ]
( select_expression
| any_expression
| func_application
| let_expr
| var
| literal
| list
| "(" expression ")" ) .
|
entailment
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.