sentence1 stringlengths 52 3.87M | sentence2 stringlengths 1 47.2k | label stringclasses 1 value |
|---|---|---|
def run_shell(args: dict) -> int:
"""Run the shell sub command"""
if args.get('project_directory'):
return run_batch(args)
shell = CauldronShell()
if in_project_directory():
shell.cmdqueue.append('open "{}"'.format(os.path.realpath(os.curdir)))
shell.cmdloop()
return 0 | Run the shell sub command | entailment |
def run(action: str, args: dict) -> int:
"""
Runs the specified command action and returns the return status code
for exit.
:param action:
The action to run
:param args:
The arguments parsed for the specified action
"""
if args.get('show_version_info'):
return run_version(args)
actions = dict(
shell=run_shell,
kernel=run_kernel,
serve=run_kernel,
version=run_version
)
if action not in actions:
print('[ERROR]: Unrecognized sub command "{}"'.format(action))
parser = args['parser'] # type: ArgumentParser
parser.print_help()
return 1
return actions.get(action)(args) | Runs the specified command action and returns the return status code
for exit.
:param action:
The action to run
:param args:
The arguments parsed for the specified action | entailment |
def get_running_step_changes(write: bool = False) -> list:
"""..."""
project = cd.project.get_internal_project()
running_steps = list(filter(
lambda step: step.is_running,
project.steps
))
def get_changes(step):
step_data = writing.step_writer.serialize(step)
if write:
writing.save(project, step_data.file_writes)
return dict(
name=step.definition.name,
action='updated',
step=step_data._asdict(),
written=write
)
return [get_changes(step) for step in running_steps] | ... | entailment |
def parse(
args: typing.List[str] = None,
arg_parser: ArgumentParser = None
) -> dict:
"""Parses the arguments for the cauldron server"""
parser = arg_parser or create_parser()
return vars(parser.parse_args(args)) | Parses the arguments for the cauldron server | entailment |
def create_parser(arg_parser: ArgumentParser = None) -> ArgumentParser:
"""
Creates an argument parser populated with the arg formats for the server
command.
"""
parser = arg_parser or ArgumentParser()
parser.description = 'Cauldron kernel server'
parser.add_argument(
'-p', '--port',
dest='port',
type=int,
default=5010
)
parser.add_argument(
'-d', '--debug',
dest='debug',
default=False,
action='store_true'
)
parser.add_argument(
'-v', '--version',
dest='version',
default=False,
action='store_true'
)
parser.add_argument(
'-c', '--code',
dest='authentication_code',
type=str,
default=''
)
parser.add_argument(
'-n', '--name',
dest='host',
type=str,
default=None
)
return parser | Creates an argument parser populated with the arg formats for the server
command. | entailment |
def match_rules(tree, rules, fun=None, multi=False):
"""Matches a Tree structure with the given query rules.
Query rules are represented as a dictionary of template to action.
Action is either a function, or a dictionary of subtemplate parameter to rules::
rules = { 'template' : { 'key': rules } }
| { 'template' : {} }
Args:
tree (Tree): Parsed tree structure
rules (dict): A dictionary of query rules
fun (function): Function to call with context (set to None if you want to return context)
multi (Bool): If True, returns all matched contexts, else returns first matched context
Returns:
Contexts from matched rules
"""
if multi:
context = match_rules_context_multi(tree, rules)
else:
context = match_rules_context(tree, rules)
if not context:
return None
if fun:
args = fun.__code__.co_varnames
if multi:
res = []
for c in context:
action_context = {}
for arg in args:
if arg in c:
action_context[arg] = c[arg]
res.append(fun(**action_context))
return res
else:
action_context = {}
for arg in args:
if arg in context:
action_context[arg] = context[arg]
return fun(**action_context)
else:
return context | Matches a Tree structure with the given query rules.
Query rules are represented as a dictionary of template to action.
Action is either a function, or a dictionary of subtemplate parameter to rules::
rules = { 'template' : { 'key': rules } }
| { 'template' : {} }
Args:
tree (Tree): Parsed tree structure
rules (dict): A dictionary of query rules
fun (function): Function to call with context (set to None if you want to return context)
multi (Bool): If True, returns all matched contexts, else returns first matched context
Returns:
Contexts from matched rules | entailment |
def match_rules_context(tree, rules, parent_context={}):
"""Recursively matches a Tree structure with rules and returns context
Args:
tree (Tree): Parsed tree structure
rules (dict): See match_rules
parent_context (dict): Context of parent call
Returns:
dict: Context matched dictionary of matched rules or
None if no match
"""
for template, match_rules in rules.items():
context = parent_context.copy()
if match_template(tree, template, context):
for key, child_rules in match_rules.items():
child_context = match_rules_context(context[key], child_rules, context)
if child_context:
for k, v in child_context.items():
context[k] = v
else:
return None
return context
return None | Recursively matches a Tree structure with rules and returns context
Args:
tree (Tree): Parsed tree structure
rules (dict): See match_rules
parent_context (dict): Context of parent call
Returns:
dict: Context matched dictionary of matched rules or
None if no match | entailment |
def cross_context(contextss):
"""
Cross product of all contexts
[[a], [b], [c]] -> [[a] x [b] x [c]]
"""
if not contextss:
return []
product = [{}]
for contexts in contextss:
tmp_product = []
for c in contexts:
for ce in product:
c_copy = c.copy()
c_copy.update(ce)
tmp_product.append(c_copy)
product = tmp_product
return product | Cross product of all contexts
[[a], [b], [c]] -> [[a] x [b] x [c]] | entailment |
def match_rules_context_multi(tree, rules, parent_context={}):
"""Recursively matches a Tree structure with rules and returns context
Args:
tree (Tree): Parsed tree structure
rules (dict): See match_rules
parent_context (dict): Context of parent call
Returns:
dict: Context matched dictionary of matched rules or
None if no match
"""
all_contexts = []
for template, match_rules in rules.items():
context = parent_context.copy()
if match_template(tree, template, context):
child_contextss = []
if not match_rules:
all_contexts += [context]
else:
for key, child_rules in match_rules.items():
child_contextss.append(match_rules_context_multi(context[key], child_rules, context))
all_contexts += cross_context(child_contextss)
return all_contexts | Recursively matches a Tree structure with rules and returns context
Args:
tree (Tree): Parsed tree structure
rules (dict): See match_rules
parent_context (dict): Context of parent call
Returns:
dict: Context matched dictionary of matched rules or
None if no match | entailment |
def match_template(tree, template, args=None):
"""Check if match string matches Tree structure
Args:
tree (Tree): Parsed Tree structure of a sentence
template (str): String template to match. Example: "( S ( NP ) )"
Returns:
bool: If they match or not
"""
tokens = get_tokens(template.split())
cur_args = {}
if match_tokens(tree, tokens, cur_args):
if args is not None:
for k, v in cur_args.items():
args[k] = v
logger.debug('MATCHED: {0}'.format(template))
return True
else:
return False | Check if match string matches Tree structure
Args:
tree (Tree): Parsed Tree structure of a sentence
template (str): String template to match. Example: "( S ( NP ) )"
Returns:
bool: If they match or not | entailment |
def match_tokens(tree, tokens, args):
"""Check if stack of tokens matches the Tree structure
Special matching rules that can be specified in the template::
':label': Label a token, the token will be returned as part of the context with key 'label'.
'-@': Additional single letter argument determining return format of labeled token. Valid options are:
'-r': Return token as word
'-o': Return token as object
'=word|word2|....|wordn': Force match raw lower case
'$': Match end of tree
Args:
tree : Parsed tree structure
tokens : Stack of tokens
Returns:
Boolean if they match or not
"""
arg_type_to_func = {
'r': get_raw_lower,
'R': get_raw,
'o': get_object_lower,
'O': get_object,
}
if len(tokens) == 0:
return True
if not isinstance(tree, Tree):
return False
root_token = tokens[0]
# Equality
if root_token.find('=') >= 0:
eq_tokens = root_token.split('=')[1].lower().split('|')
root_token = root_token.split('=')[0]
word = get_raw_lower(tree)
if word not in eq_tokens:
return False
# Get arg
if root_token.find(':') >= 0:
arg_tokens = root_token.split(':')[1].split('-')
if len(arg_tokens) == 1:
arg_name = arg_tokens[0]
args[arg_name] = tree
else:
arg_name = arg_tokens[0]
arg_type = arg_tokens[1]
args[arg_name] = arg_type_to_func[arg_type](tree)
root_token = root_token.split(':')[0]
# Does not match wild card and label does not match
if root_token != '.' and tree.label() not in root_token.split('/'):
return False
# Check end symbol
if tokens[-1] == '$':
if len(tree) != len(tokens[:-1]) - 1:
return False
else:
tokens = tokens[:-1]
# Check # of tokens
if len(tree) < len(tokens) - 1:
return False
for i in range(len(tokens) - 1):
if not match_tokens(tree[i], tokens[i + 1], args):
return False
return True | Check if stack of tokens matches the Tree structure
Special matching rules that can be specified in the template::
':label': Label a token, the token will be returned as part of the context with key 'label'.
'-@': Additional single letter argument determining return format of labeled token. Valid options are:
'-r': Return token as word
'-o': Return token as object
'=word|word2|....|wordn': Force match raw lower case
'$': Match end of tree
Args:
tree : Parsed tree structure
tokens : Stack of tokens
Returns:
Boolean if they match or not | entailment |
def get_tokens(tokens):
"""Recursively gets tokens from a match list
Args:
tokens : List of tokens ['(', 'S', '(', 'NP', ')', ')']
Returns:
Stack of tokens
"""
tokens = tokens[1:-1]
ret = []
start = 0
stack = 0
for i in range(len(tokens)):
if tokens[i] == '(':
if stack == 0:
start = i
stack += 1
elif tokens[i] == ')':
stack -= 1
if stack < 0:
raise Exception('Bracket mismatch: ' + str(tokens))
if stack == 0:
ret.append(get_tokens(tokens[start:i + 1]))
else:
if stack == 0:
ret.append(tokens[i])
if stack != 0:
raise Exception('Bracket mismatch: ' + str(tokens))
return ret | Recursively gets tokens from a match list
Args:
tokens : List of tokens ['(', 'S', '(', 'NP', ')', ')']
Returns:
Stack of tokens | entailment |
def get_object(tree):
"""Get the object in the tree object.
Method should remove unnecessary letters and words::
the
a/an
's
Args:
tree (Tree): Parsed tree structure
Returns:
Resulting string of tree ``(Ex: "red car")``
"""
if isinstance(tree, Tree):
if tree.label() == 'DT' or tree.label() == 'POS':
return ''
words = []
for child in tree:
words.append(get_object(child))
return ' '.join([_f for _f in words if _f])
else:
return tree | Get the object in the tree object.
Method should remove unnecessary letters and words::
the
a/an
's
Args:
tree (Tree): Parsed tree structure
Returns:
Resulting string of tree ``(Ex: "red car")`` | entailment |
def get_raw(tree):
"""Get the exact words in lowercase in the tree object.
Args:
tree (Tree): Parsed tree structure
Returns:
Resulting string of tree ``(Ex: "The red car")``
"""
if isinstance(tree, Tree):
words = []
for child in tree:
words.append(get_raw(child))
return ' '.join(words)
else:
return tree | Get the exact words in lowercase in the tree object.
Args:
tree (Tree): Parsed tree structure
Returns:
Resulting string of tree ``(Ex: "The red car")`` | entailment |
def initialize(spark_home_path: str = None):
"""
Registers and initializes the PySpark library dependencies so that the
pyspark package can be imported and used within the notebook.
If you specify the path to the spark home folder, the PySpark libraries
from that location will be loaded. If a value is omitted, the $SPARK_HOME
environmental variable will be used to determine from where to load the
libraries.
:param spark_home_path:
The path to the spark folder on your system. Leave this blank if you
want to use the $SPARK_HOME environmental variable default instead.
:return:
"""
if not spark_home_path:
spark_home_path = os.environ.get('SPARK_HOME')
spark_home_path = environ.paths.clean(spark_home_path)
if not os.path.exists(spark_home_path):
raise FileNotFoundError(
errno.ENOENT,
os.strerror(errno.ENOENT),
spark_home_path
)
spark_python_path = os.path.join(spark_home_path, 'python')
if not os.path.exists(spark_python_path):
raise FileNotFoundError(
errno.ENOENT,
os.strerror(errno.ENOENT),
spark_python_path
)
spark_pylib_path = os.path.join(spark_python_path, 'lib')
if not os.path.exists(spark_pylib_path):
raise FileNotFoundError(
errno.ENOENT,
os.strerror(errno.ENOENT),
spark_python_path
)
lib_glob = os.path.join(spark_pylib_path, '*.zip')
lib_sources = [path for path in glob.iglob(lib_glob)]
unload()
for p in lib_sources:
if p not in sys.path:
sys.path.append(p)
spark_environment.update(dict(
spark_home_path=spark_home_path,
spark_python_path=spark_python_path,
spark_pylib_path=spark_pylib_path,
libs=lib_sources
)) | Registers and initializes the PySpark library dependencies so that the
pyspark package can be imported and used within the notebook.
If you specify the path to the spark home folder, the PySpark libraries
from that location will be loaded. If a value is omitted, the $SPARK_HOME
environmental variable will be used to determine from where to load the
libraries.
:param spark_home_path:
The path to the spark folder on your system. Leave this blank if you
want to use the $SPARK_HOME environmental variable default instead.
:return: | entailment |
def is_remote_project(self) -> bool:
"""Whether or not this project is remote"""
project_path = environ.paths.clean(self.source_directory)
return project_path.find('cd-remote-project') != -1 | Whether or not this project is remote | entailment |
def library_directories(self) -> typing.List[str]:
"""
The list of directories to all of the library locations
"""
def listify(value):
return [value] if isinstance(value, str) else list(value)
# If this is a project running remotely remove external library
# folders as the remote shared libraries folder will contain all
# of the necessary dependencies
is_local_project = not self.is_remote_project
folders = [
f
for f in listify(self.settings.fetch('library_folders', ['libs']))
if is_local_project or not f.startswith('..')
]
# Include the remote shared library folder as well
folders.append('../__cauldron_shared_libs')
# Include the project directory as well
folders.append(self.source_directory)
return [
environ.paths.clean(os.path.join(self.source_directory, folder))
for folder in folders
] | The list of directories to all of the library locations | entailment |
def results_path(self) -> str:
"""The path where the project results will be written"""
def possible_paths():
yield self._results_path
yield self.settings.fetch('path_results')
yield environ.configs.fetch('results_directory')
yield environ.paths.results(self.uuid)
return next(p for p in possible_paths() if p is not None) | The path where the project results will be written | entailment |
def url(self) -> str:
"""
Returns the URL that will open this project results file in the browser
:return:
"""
return 'file://{path}?id={id}'.format(
path=os.path.join(self.results_path, 'project.html'),
id=self.uuid
) | Returns the URL that will open this project results file in the browser
:return: | entailment |
def output_directory(self) -> str:
"""
Returns the directory where the project results files will be written
"""
return os.path.join(self.results_path, 'reports', self.uuid, 'latest') | Returns the directory where the project results files will be written | entailment |
def refresh(self, force: bool = False) -> bool:
"""
Loads the cauldron.json definition file for the project and populates
the project with the loaded data. Any existing data will be overwritten,
if the new definition file differs from the previous one.
If the project has already loaded with the most recent version of the
cauldron.json file, this method will return without making any changes
to the project.
:param force:
If true the project will be refreshed even if the project file
modified timestamp doesn't indicate that it needs to be refreshed.
:return:
Whether or not a refresh was needed and carried out
"""
lm = self.last_modified
is_newer = lm is not None and lm >= os.path.getmtime(self.source_path)
if not force and is_newer:
return False
old_definition = self.settings.fetch(None)
new_definition = definitions.load_project_definition(
self.source_directory
)
if not force and old_definition == new_definition:
return False
self.settings.clear().put(**new_definition)
old_step_definitions = old_definition.get('steps', [])
new_step_definitions = new_definition.get('steps', [])
if not force and old_step_definitions == new_step_definitions:
return True
old_steps = self.steps
self.steps = []
for step_data in new_step_definitions:
matches = [s for s in old_step_definitions if s == step_data]
if len(matches) > 0:
index = old_step_definitions.index(matches[0])
self.steps.append(old_steps[index])
else:
self.add_step(step_data)
self.last_modified = time.time()
return True | Loads the cauldron.json definition file for the project and populates
the project with the loaded data. Any existing data will be overwritten,
if the new definition file differs from the previous one.
If the project has already loaded with the most recent version of the
cauldron.json file, this method will return without making any changes
to the project.
:param force:
If true the project will be refreshed even if the project file
modified timestamp doesn't indicate that it needs to be refreshed.
:return:
Whether or not a refresh was needed and carried out | entailment |
def preformatted_text(source: str) -> str:
"""Renders preformatted text box"""
environ.abort_thread()
if not source:
return ''
source = render_utils.html_escape(source)
return '<pre class="preformatted-textbox">{text}</pre>'.format(
text=str(textwrap.dedent(source))
) | Renders preformatted text box | entailment |
def markdown(
source: str = None,
source_path: str = None,
preserve_lines: bool = False,
font_size: float = None,
**kwargs
) -> dict:
"""
Renders a markdown file with support for Jinja2 templating. Any keyword
arguments will be passed to Jinja2 for templating prior to rendering the
markdown to HTML for display within the notebook.
:param source:
A string of markdown text that should be rendered to HTML for
notebook display.
:param source_path:
The path to a markdown file that should be rendered to HTML for
notebook display.
:param preserve_lines:
If True, all line breaks will be treated as hard breaks. Use this
for pre-formatted markdown text where newlines should be retained
during rendering.
:param font_size:
Specifies a relative font size adjustment. The default value is 1.0,
which preserves the inherited font size values. Set it to a value
below 1.0 for smaller font-size rendering and greater than 1.0 for
larger font size rendering.
:return:
The HTML results of rendering the specified markdown string or file.
"""
environ.abort_thread()
library_includes = []
rendered = textwrap.dedent(
templating.render_file(source_path, **kwargs)
if source_path else
templating.render(source or '', **kwargs)
)
if md is None:
raise ImportError('Unable to import the markdown package')
offset = 0
while offset < len(rendered):
bound_chars = '$$'
start_index = rendered.find(bound_chars, offset)
if start_index < 0:
break
inline = rendered[start_index + 2] != '$'
bound_chars = '$$' if inline else '$$$'
end_index = rendered.find(
bound_chars,
start_index + len(bound_chars)
)
if end_index < 0:
break
end_index += len(bound_chars)
chunk = rendered[start_index: end_index] \
.strip('$') \
.strip() \
.replace('@', '\\')
if inline:
chunk = chunk.replace('\\', '\\\\')
chunk = latex(chunk, inline)
rendered = '{pre}{gap}{latex}{gap}{post}'.format(
pre=rendered[:start_index],
latex=chunk,
post=rendered[end_index:],
gap='' if inline else '\n\n'
)
if 'katex' not in library_includes:
library_includes.append('katex')
offset = end_index
extensions = [
'markdown.extensions.extra',
'markdown.extensions.admonition',
'markdown.extensions.sane_lists',
'markdown.extensions.nl2br' if preserve_lines else None
]
body = templating.render_template(
'markdown-block.html',
text=md.markdown(rendered, extensions=[
e for e in extensions if e is not None
]),
font_size=font_size
)
pattern = re.compile('src="(?P<url>[^"]+)"')
body = pattern.sub(r'data-src="\g<url>"', body)
return dict(
body=body,
library_includes=library_includes,
rendered=rendered
) | Renders a markdown file with support for Jinja2 templating. Any keyword
arguments will be passed to Jinja2 for templating prior to rendering the
markdown to HTML for display within the notebook.
:param source:
A string of markdown text that should be rendered to HTML for
notebook display.
:param source_path:
The path to a markdown file that should be rendered to HTML for
notebook display.
:param preserve_lines:
If True, all line breaks will be treated as hard breaks. Use this
for pre-formatted markdown text where newlines should be retained
during rendering.
:param font_size:
Specifies a relative font size adjustment. The default value is 1.0,
which preserves the inherited font size values. Set it to a value
below 1.0 for smaller font-size rendering and greater than 1.0 for
larger font size rendering.
:return:
The HTML results of rendering the specified markdown string or file. | entailment |
def populate_extra_files():
"""
Creates a list of non-python data files to include in package distribution
"""
out = ['cauldron/settings.json']
for entry in glob.iglob('cauldron/resources/examples/**/*', recursive=True):
out.append(entry)
for entry in glob.iglob('cauldron/resources/templates/**/*', recursive=True):
out.append(entry)
for entry in glob.iglob('cauldron/resources/web/**/*', recursive=True):
out.append(entry)
return out | Creates a list of non-python data files to include in package distribution | entailment |
def create_rename_entry(
step: 'projects.ProjectStep',
insertion_index: int = None,
stash_path: str = None
) -> typing.Union[None, STEP_RENAME]:
"""
Creates a STEP_RENAME for the given ProjectStep instance
:param step:
The ProjectStep instance for which the STEP_RENAME will be created
:param insertion_index:
An optional index where a step will be inserted as part of this
renaming process. Allows files to be renamed prior to the insertion
of the step to prevent conflicts.
:param stash_path:
:return:
"""
project = step.project
name = step.definition.name
name_parts = naming.explode_filename(name, project.naming_scheme)
index = project.index_of_step(name)
name_index = index
if insertion_index is not None and insertion_index <= index:
# Adjusts indexing when renaming is for the purpose of
# inserting a new step
name_index += 1
name_parts['index'] = name_index
new_name = naming.assemble_filename(
scheme=project.naming_scheme,
**name_parts
)
if name == new_name:
return None
if not stash_path:
fd, stash_path = tempfile.mkstemp(
prefix='{}-{}--{}--'.format(step.reference_id, name, new_name)
)
os.close(fd)
return STEP_RENAME(
id=step.reference_id,
index=index,
old_name=name,
new_name=new_name,
old_path=step.source_path,
stash_path=stash_path,
new_path=os.path.join(step.project.source_directory, new_name)
) | Creates a STEP_RENAME for the given ProjectStep instance
:param step:
The ProjectStep instance for which the STEP_RENAME will be created
:param insertion_index:
An optional index where a step will be inserted as part of this
renaming process. Allows files to be renamed prior to the insertion
of the step to prevent conflicts.
:param stash_path:
:return: | entailment |
def synchronize_step_names(
project: 'projects.Project',
insert_index: int = None
) -> Response:
"""
:param project:
:param insert_index:
"""
response = Response()
response.returned = dict()
if not project.naming_scheme:
return response
create_mapper_func = functools.partial(
create_rename_entry,
insertion_index=insert_index
)
step_renames = list([create_mapper_func(s) for s in project.steps])
step_renames = list(filter(lambda sr: (sr is not None), step_renames))
if not step_renames:
return response
try:
backup_path = create_backup(project)
except Exception as err:
return response.fail(
code='RENAME_BACKUP_ERROR',
message='Unable to create backup name',
error=err
).response
try:
step_renames = list([stash_source(sr) for sr in step_renames])
step_renames = list([unstash_source(sr) for sr in step_renames])
except Exception as err:
return response.fail(
code='RENAME_FILE_ERROR',
message='Unable to rename files',
error=err
).response
response.returned = update_steps(project, step_renames)
project.save()
try:
os.remove(backup_path)
except PermissionError:
pass
return response | :param project:
:param insert_index: | entailment |
def assemble_url(
endpoint: str,
remote_connection: 'environ.RemoteConnection' = None
) -> str:
"""
Assembles a fully-resolved remote connection URL from the given endpoint
and remote_connection structure. If the remote_connection is omitted, the
global remote_connection object stored in the environ module will be
used in its place.
:param endpoint:
The endpoint for the API call
:param remote_connection:
The remote connection definition data structure
:return:
The fully-resolved URL for the given endpoint
"""
url_root = (
remote_connection.url
if remote_connection else
environ.remote_connection.url
)
url_root = url_root if url_root else 'localhost:5010'
parts = [
'http://' if not url_root.startswith('http') else '',
url_root.rstrip('/'),
'/',
endpoint.lstrip('/')
]
return ''.join(parts) | Assembles a fully-resolved remote connection URL from the given endpoint
and remote_connection structure. If the remote_connection is omitted, the
global remote_connection object stored in the environ module will be
used in its place.
:param endpoint:
The endpoint for the API call
:param remote_connection:
The remote connection definition data structure
:return:
The fully-resolved URL for the given endpoint | entailment |
def parse_http_response(http_response: HttpResponse) -> 'environ.Response':
"""
Returns a Cauldron response object parsed from the serialized JSON data
specified in the http_response argument. If the response doesn't contain
valid Cauldron response data, an error Cauldron response object is
returned instead.
:param http_response:
The response object from an http request that contains a JSON
serialized Cauldron response object as its body
:return:
The Cauldron response object for the given http response
"""
try:
response = environ.Response.deserialize(http_response.json())
except Exception as error:
response = environ.Response().fail(
code='INVALID_REMOTE_RESPONSE',
error=error,
message='Invalid HTTP response from remote connection'
).console(
whitespace=1
).response
response.http_response = http_response
return response | Returns a Cauldron response object parsed from the serialized JSON data
specified in the http_response argument. If the response doesn't contain
valid Cauldron response data, an error Cauldron response object is
returned instead.
:param http_response:
The response object from an http request that contains a JSON
serialized Cauldron response object as its body
:return:
The Cauldron response object for the given http response | entailment |
def send_request(
endpoint: str,
data: dict = None,
remote_connection: 'environ.RemoteConnection' = None,
method: str = None,
timeout: int = 10,
max_retries: int = 10,
**kwargs
) -> 'environ.Response':
"""
Sends a request to the remote kernel specified by the RemoteConnection
object and processes the result. If the request fails or times out it
will be retried until the max retries is reached. After that a failed
response will be returned instead.
:param endpoint:
Remote endpoint where the request will be directed.
:param data:
An optional JSON-serializable dictionary containing the request
body data.
:param remote_connection:
Defines the connection to the remote server where the request will
be sent.
:param method:
The HTTP method type for the request, e.g. GET, POST.
:param timeout:
Number of seconds before the request aborts when trying to either
connect to the target endpoint or receive data from the server.
:param max_retries:
Number of retry attempts to make before giving up if a non-HTTP
error is encountered during communication.
"""
if max_retries < 0:
return environ.Response().fail(
code='COMMUNICATION_ERROR',
error=None,
message='Unable to communicate with the remote kernel.'
).console(whitespace=1).response
url = assemble_url(endpoint, remote_connection)
retriable_errors = (
requests.ConnectionError,
requests.HTTPError,
requests.Timeout
)
default_method = 'POST' if data is not None else 'GET'
try:
http_response = requests.request(
method=method or default_method,
url=url,
json=data,
timeout=10,
**kwargs
)
except retriable_errors:
return send_request(
endpoint=endpoint,
data=data,
remote_connection=remote_connection,
method=method,
timeout=timeout,
max_retries=max_retries - 1,
**kwargs
)
return parse_http_response(http_response) | Sends a request to the remote kernel specified by the RemoteConnection
object and processes the result. If the request fails or times out it
will be retried until the max retries is reached. After that a failed
response will be returned instead.
:param endpoint:
Remote endpoint where the request will be directed.
:param data:
An optional JSON-serializable dictionary containing the request
body data.
:param remote_connection:
Defines the connection to the remote server where the request will
be sent.
:param method:
The HTTP method type for the request, e.g. GET, POST.
:param timeout:
Number of seconds before the request aborts when trying to either
connect to the target endpoint or receive data from the server.
:param max_retries:
Number of retry attempts to make before giving up if a non-HTTP
error is encountered during communication. | entailment |
def view(route: str):
"""
Retrieves the contents of the file specified by the view route if it
exists.
"""
project = cauldron.project.get_internal_project()
results_path = project.results_path if project else None
if not project or not results_path:
return '', 204
path = os.path.join(results_path, route)
if not os.path.exists(path):
return '', 204
return flask.send_file(
path,
mimetype=mimetypes.guess_type(path)[0],
cache_timeout=-1
) | Retrieves the contents of the file specified by the view route if it
exists. | entailment |
def save(
project: 'projects.Project',
write_list: typing.List[tuple] = None
) -> typing.List[tuple]:
"""
Computes the file write list for the current state of the project if no
write_list was specified in the arguments, and then writes each entry in
that list to disk.
:param project:
The project to be saved
:param write_list:
The file writes list for the project if one already exists, or None
if a new writes list should be computed
:return:
The file write list that was used to save the project to disk
"""
try:
writes = (
to_write_list(project)
if write_list is None
else write_list.copy()
)
except Exception as err:
raise
environ.systems.remove(project.output_directory)
os.makedirs(project.output_directory)
file_io.deploy(writes)
return writes | Computes the file write list for the current state of the project if no
write_list was specified in the arguments, and then writes each entry in
that list to disk.
:param project:
The project to be saved
:param write_list:
The file writes list for the project if one already exists, or None
if a new writes list should be computed
:return:
The file write list that was used to save the project to disk | entailment |
def list_asset_writes(
project: 'projects.Project'
) -> typing.List[file_io.FILE_COPY_ENTRY]:
"""
Returns a list containing the file/directory writes that should be executed
to deploy the project assets to the results folder. If the project has no
assets an empty list is returned.
:param project:
The project for which the assets should be copied
:return:
A list containing the file copy entries for deploying project assets
"""
def make_asset_copy(directory: str) -> file_io.FILE_COPY_ENTRY:
output_directory = os.path.join(
project.output_directory,
directory[len(project.source_directory):].lstrip(os.sep)
)
return file_io.FILE_COPY_ENTRY(
source=directory,
destination=output_directory
)
copies = [
make_asset_copy(asset_directory)
for asset_directory in project.asset_directories
]
return list(filter(
lambda fc: os.path.exists(fc.source),
copies
)) | Returns a list containing the file/directory writes that should be executed
to deploy the project assets to the results folder. If the project has no
assets an empty list is returned.
:param project:
The project for which the assets should be copied
:return:
A list containing the file copy entries for deploying project assets | entailment |
def add_library_path(path: str) -> bool:
"""
Adds the path to the Python system path if not already added and the path
exists.
:param path:
The path to add to the system paths
:return:
Whether or not the path was added. Only returns False if the path was
not added because it doesn't exist
"""
if not os.path.exists(path):
return False
if path not in sys.path:
sys.path.append(path)
return True | Adds the path to the Python system path if not already added and the path
exists.
:param path:
The path to add to the system paths
:return:
Whether or not the path was added. Only returns False if the path was
not added because it doesn't exist | entailment |
def remove_library_path(path: str) -> bool:
"""
Removes the path from the Python system path if it is found in the system
paths.
:param path:
The path to remove from the system paths
:return:
Whether or not the path was removed.
"""
if path in sys.path:
sys.path.remove(path)
return True
return False | Removes the path from the Python system path if it is found in the system
paths.
:param path:
The path to remove from the system paths
:return:
Whether or not the path was removed. | entailment |
def close():
"""..."""
os.chdir(os.path.expanduser('~'))
project = cauldron.project.internal_project
if not project:
return False
[remove_library_path(path) for path in project.library_directories]
remove_library_path(project.source_directory)
cauldron.project.unload()
return True | ... | entailment |
def reload_libraries(library_directories: list = None):
"""
Reload the libraries stored in the project's local and shared library
directories
"""
directories = library_directories or []
project = cauldron.project.get_internal_project()
if project:
directories += project.library_directories
if not directories:
return
def reload_module(path: str, library_directory: str):
path = os.path.dirname(path) if path.endswith('__init__.py') else path
start_index = len(library_directory) + 1
end_index = -3 if path.endswith('.py') else None
package_path = path[start_index:end_index]
module = sys.modules.get(package_path.replace(os.sep, '.'))
return importlib.reload(module) if module is not None else None
def reload_library(directory: str) -> list:
if not add_library_path(directory):
# If the library wasn't added because it doesn't exist, remove it
# in case the directory has recently been deleted and then return
# an empty result
remove_library_path(directory)
return []
glob_path = os.path.join(directory, '**', '*.py')
return [
reload_module(path, directory)
for path in glob.glob(glob_path, recursive=True)
]
return [
reloaded_module
for directory in directories
for reloaded_module in reload_library(directory)
if reload_module is not None
] | Reload the libraries stored in the project's local and shared library
directories | entailment |
def complete(
response: Response,
project: typing.Union[Project, None],
starting: ProjectStep = None,
force: bool = False,
limit: int = -1
) -> list:
"""
Runs the entire project, writes the results files, and returns the URL to
the report file
:param response:
:param project:
:param starting:
:param force:
:param limit:
:return:
Local URL to the report path
"""
if project is None:
project = cauldron.project.get_internal_project()
starting_index = 0
if starting:
starting_index = project.steps.index(starting)
count = 0
steps_run = []
for ps in project.steps:
if 0 < limit <= count:
break
if ps.index < starting_index:
continue
if not force and not ps.is_dirty():
if limit < 1:
environ.log(
'[{}]: Nothing to update'.format(ps.definition.name)
)
continue
count += 1
steps_run.append(ps)
success = source.run_step(response, project, ps, force=True)
if not success or project.stop_condition.halt:
return steps_run
return steps_run | Runs the entire project, writes the results files, and returns the URL to
the report file
:param response:
:param project:
:param starting:
:param force:
:param limit:
:return:
Local URL to the report path | entailment |
def elapsed_time(self) -> float:
"""
The number of seconds that has elapsed since the step started running
if the step is still running. Or, if the step has already finished
running, the amount of time that elapsed during the last execution of
the step.
"""
current_time = datetime.utcnow()
start = self.start_time or current_time
end = self.end_time or current_time
return (end - start).total_seconds() | The number of seconds that has elapsed since the step started running
if the step is still running. Or, if the step has already finished
running, the amount of time that elapsed during the last execution of
the step. | entailment |
def get_elapsed_timestamp(self) -> str:
"""
A human-readable version of the elapsed time for the last execution
of the step. The value is derived from the `ProjectStep.elapsed_time`
property.
"""
t = self.elapsed_time
minutes = int(t / 60)
seconds = int(t - (60 * minutes))
millis = int(100 * (t - int(t)))
return '{:>02d}:{:>02d}.{:<02d}'.format(minutes, seconds, millis) | A human-readable version of the elapsed time for the last execution
of the step. The value is derived from the `ProjectStep.elapsed_time`
property. | entailment |
def get_dom(self) -> str:
""" Retrieves the current value of the DOM for the step """
if self.is_running:
return self.dumps()
if self.dom is not None:
return self.dom
dom = self.dumps()
self.dom = dom
return dom | Retrieves the current value of the DOM for the step | entailment |
def dumps(self) -> str:
"""Writes the step information to an HTML-formatted string"""
code_file_path = os.path.join(
self.project.source_directory,
self.filename
)
code = dict(
filename=self.filename,
path=code_file_path,
code=render.code_file(code_file_path)
)
if not self.is_running:
# If no longer running, make sure to flush the stdout buffer so
# any print statements at the end of the step get included in
# the body
self.report.flush_stdout()
# Create a copy of the body for dumping
body = self.report.body[:]
if self.is_running:
# If still running add a temporary copy of anything not flushed
# from the stdout buffer to the copy of the body for display. Do
# not flush the buffer though until the step is done running or
# it gets flushed by another display call.
body.append(self.report.read_stdout())
body = ''.join(body)
has_body = len(body) > 0 and (
body.find('<div') != -1 or
body.find('<span') != -1 or
body.find('<p') != -1 or
body.find('<pre') != -1 or
body.find('<h') != -1 or
body.find('<ol') != -1 or
body.find('<ul') != -1 or
body.find('<li') != -1
)
std_err = (
self.report.read_stderr()
if self.is_running else
self.report.flush_stderr()
).strip('\n').rstrip()
# The step will be visible in the display if any of the following
# conditions are true.
is_visible = self.is_visible or self.is_running or self.error
dom = templating.render_template(
'step-body.html',
last_display_update=self.report.last_update_time,
elapsed_time=self.get_elapsed_timestamp(),
code=code,
body=body,
has_body=has_body,
id=self.definition.name,
title=self.report.title,
subtitle=self.report.subtitle,
summary=self.report.summary,
error=self.error,
index=self.index,
is_running=self.is_running,
is_visible=is_visible,
progress_message=self.progress_message,
progress=int(round(max(0, min(100, 100 * self.progress)))),
sub_progress_message=self.sub_progress_message,
sub_progress=int(round(max(0, min(100, 100 * self.sub_progress)))),
std_err=std_err
)
if not self.is_running:
self.dom = dom
return dom | Writes the step information to an HTML-formatted string | entailment |
def retry(*excepts):
'''A decorator to specify a bunch of exceptions that should be caught
and the job retried. It turns out this comes up with relative frequency'''
@decorator.decorator
def new_func(func, job):
'''No docstring'''
try:
func(job)
except tuple(excepts):
job.retry()
return new_func | A decorator to specify a bunch of exceptions that should be caught
and the job retried. It turns out this comes up with relative frequency | entailment |
def tracked(self):
'''Return an array of job objects that are being tracked'''
results = json.loads(self.client('track'))
results['jobs'] = [Job(self, **job) for job in results['jobs']]
return results | Return an array of job objects that are being tracked | entailment |
def tagged(self, tag, offset=0, count=25):
'''Return the paginated jids of jobs tagged with a tag'''
return json.loads(self.client('tag', 'get', tag, offset, count)) | Return the paginated jids of jobs tagged with a tag | entailment |
def failed(self, group=None, start=0, limit=25):
'''If no group is provided, this returns a JSON blob of the counts of
the various types of failures known. If a type is provided, returns
paginated job objects affected by that kind of failure.'''
if not group:
return json.loads(self.client('failed'))
else:
results = json.loads(
self.client('failed', group, start, limit))
results['jobs'] = self.get(*results['jobs'])
return results | If no group is provided, this returns a JSON blob of the counts of
the various types of failures known. If a type is provided, returns
paginated job objects affected by that kind of failure. | entailment |
def get(self, *jids):
'''Return jobs objects for all the jids'''
if jids:
return [
Job(self.client, **j) for j in
json.loads(self.client('multiget', *jids))]
return [] | Return jobs objects for all the jids | entailment |
def title(cls, message=None):
'''Set the title of the process'''
if message == None:
return getproctitle()
else:
setproctitle('qless-py-worker %s' % message)
logger.info(message) | Set the title of the process | entailment |
def divide(cls, jobs, count):
'''Divide up the provided jobs into count evenly-sized groups'''
jobs = list(zip(*zip_longest(*[iter(jobs)] * count)))
# If we had no jobs to resume, then we get an empty list
jobs = jobs or [()] * count
for index in range(count):
# Filter out the items in jobs that are Nones
jobs[index] = [j for j in jobs[index] if j != None]
return jobs | Divide up the provided jobs into count evenly-sized groups | entailment |
def clean(cls, path):
'''Clean up all the files in a provided path'''
for pth in os.listdir(path):
pth = os.path.abspath(os.path.join(path, pth))
if os.path.isdir(pth):
logger.debug('Removing directory %s' % pth)
shutil.rmtree(pth)
else:
logger.debug('Removing file %s' % pth)
os.remove(pth) | Clean up all the files in a provided path | entailment |
def sandbox(cls, path):
'''Ensures path exists before yielding, cleans up after'''
# Ensure the path exists and is clean
try:
os.makedirs(path)
logger.debug('Making %s' % path)
except OSError:
if not os.path.isdir(path):
raise
finally:
cls.clean(path)
# Then yield, but make sure to clean up the directory afterwards
try:
yield
finally:
cls.clean(path) | Ensures path exists before yielding, cleans up after | entailment |
def resumable(self):
'''Find all the jobs that we'd previously been working on'''
# First, find the jids of all the jobs registered to this client.
# Then, get the corresponding job objects
jids = self.client.workers[self.client.worker_name]['jobs']
jobs = self.client.jobs.get(*jids)
# We'll filter out all the jobs that aren't in any of the queues
# we're working on.
queue_names = set([queue.name for queue in self.queues])
return [job for job in jobs if job.queue_name in queue_names] | Find all the jobs that we'd previously been working on | entailment |
def jobs(self):
'''Generator for all the jobs'''
# If we should resume work, then we should hand those out first,
# assuming we can still heartbeat them
for job in self.resume:
try:
if job.heartbeat():
yield job
except exceptions.LostLockException:
logger.exception('Cannot resume %s' % job.jid)
while True:
seen = False
for queue in self.queues:
job = queue.pop()
if job:
seen = True
yield job
if not seen:
yield None | Generator for all the jobs | entailment |
def listener(self):
'''Listen for pubsub messages relevant to this worker in a thread'''
channels = ['ql:w:' + self.client.worker_name]
listener = Listener(self.client.redis, channels)
thread = threading.Thread(target=self.listen, args=(listener,))
thread.start()
try:
yield
finally:
listener.unlisten()
thread.join() | Listen for pubsub messages relevant to this worker in a thread | entailment |
def listen(self, listener):
'''Listen for events that affect our ownership of a job'''
for message in listener.listen():
try:
data = json.loads(message['data'])
if data['event'] in ('canceled', 'lock_lost', 'put'):
self.kill(data['jid'])
except:
logger.exception('Pubsub error') | Listen for events that affect our ownership of a job | entailment |
def signals(self, signals=('QUIT', 'USR1', 'USR2')):
'''Register our signal handler'''
for sig in signals:
signal.signal(getattr(signal, 'SIG' + sig), self.handler) | Register our signal handler | entailment |
def handler(self, signum, frame): # pragma: no cover
'''Signal handler for this process'''
if signum == signal.SIGQUIT:
# QUIT - Finish processing, but don't do any more work after that
self.stop()
elif signum == signal.SIGUSR1:
# USR1 - Print the backtrace
message = ''.join(traceback.format_stack(frame))
message = 'Signaled traceback for %s:\n%s' % (os.getpid(), message)
print(message, file=sys.stderr)
logger.warn(message)
elif signum == signal.SIGUSR2:
# USR2 - Enter a debugger
# Much thanks to http://stackoverflow.com/questions/132058
data = {'_frame': frame} # Allow access to frame object.
data.update(frame.f_globals) # Unless shadowed by global
data.update(frame.f_locals)
# Build up a message with a traceback
message = ''.join(traceback.format_stack(frame))
message = 'Traceback:\n%s' % message
code.InteractiveConsole(data).interact(message) | Signal handler for this process | entailment |
def truncate_entry_titles(apps, schema_editor):
"""This function will truncate the values of Entry.title so they are
255 characters or less.
"""
Entry = apps.get_model("andablog", "Entry")
for entry in Entry.objects.all():
# Truncate to 255 characters (or less) but keep whole words intact.
while len(entry.title) > TITLE_LENGTH:
entry.title = ' '.join(entry.title.split()[:-1])
entry.save() | This function will truncate the values of Entry.title so they are
255 characters or less. | entailment |
def process(self, job):
'''Process a job'''
sandbox = self.sandboxes.pop(0)
try:
with Worker.sandbox(sandbox):
job.sandbox = sandbox
job.process()
finally:
# Delete its entry from our greenlets mapping
self.greenlets.pop(job.jid, None)
self.sandboxes.append(sandbox) | Process a job | entailment |
def kill(self, jid):
'''Stop the greenlet processing the provided jid'''
greenlet = self.greenlets.get(jid)
if greenlet is not None:
logger.warn('Lost ownership of %s' % jid)
greenlet.kill() | Stop the greenlet processing the provided jid | entailment |
def run(self):
'''Work on jobs'''
# Register signal handlers
self.signals()
# Start listening
with self.listener():
try:
generator = self.jobs()
while not self.shutdown:
self.pool.wait_available()
job = next(generator)
if job:
# For whatever reason, doing imports within a greenlet
# (there's one implicitly invoked in job.process), was
# throwing exceptions. The hacky way to get around this
# is to force the import to happen before the greenlet
# is spawned.
job.klass
greenlet = gevent.Greenlet(self.process, job)
self.greenlets[job.jid] = greenlet
self.pool.start(greenlet)
else:
logger.debug('Sleeping for %fs' % self.interval)
gevent.sleep(self.interval)
except StopIteration:
logger.info('Exhausted jobs')
finally:
logger.info('Waiting for greenlets to finish')
self.pool.join() | Work on jobs | entailment |
def init_db_conn(connection_name, connection_string, scopefunc=None):
"""
Initialize a postgresql connection by each connection string
defined in the configuration file
"""
engine = create_engine(connection_string)
session = scoped_session(sessionmaker(), scopefunc=scopefunc)
session.configure(bind=engine)
pool.connections[connection_name] = Connection(engine, session) | Initialize a postgresql connection by each connection string
defined in the configuration file | entailment |
def initialize(g, app):
"""
If postgresql url is defined in configuration params a
scoped session will be created
"""
if 'DATABASES' in app.config and 'POSTGRESQL' in app.config['DATABASES']:
# Database connection established for console commands
for k, v in app.config['DATABASES']['POSTGRESQL'].items():
init_db_conn(k, v)
if 'test' not in sys.argv:
# Establish a new connection every request
@app.before_request
def before_request():
"""
Assign postgresql connection pool to the global
flask object at the beginning of every request
"""
# inject stack context if not testing
from flask import _app_ctx_stack
for k, v in app.config['DATABASES']['POSTGRESQL'].items():
init_db_conn(k, v, scopefunc=_app_ctx_stack)
g.postgresql_pool = pool
# avoid to close connections if testing
@app.teardown_request
def teardown_request(exception):
"""
Releasing connection after finish request, not required in unit
testing
"""
pool = getattr(g, 'postgresql_pool', None)
if pool is not None:
for k, v in pool.connections.items():
v.session.remove()
else:
@app.before_request
def before_request():
"""
Assign postgresql connection pool to the global
flask object at the beginning of every request
"""
for k, v in app.config['DATABASES']['POSTGRESQL'].items():
init_db_conn(k, v)
g.postgresql_pool = pool | If postgresql url is defined in configuration params a
scoped session will be created | entailment |
def load_extensions_from_config(**config):
"""
Loads extensions
"""
extensions = []
if 'EXTENSIONS' in config:
for ext in config['EXTENSIONS']:
try:
extensions.append(locate(ext))
except Exception as e:
print(e)
return extensions | Loads extensions | entailment |
def set_request(self, r):
"""
Appends request object to the globals dict
"""
for k in self.environments.keys():
self.environments[k].globals['REQUEST'] = r | Appends request object to the globals dict | entailment |
def init_db_conn(connection_name, HOSTS=None):
"""
Initialize a redis connection by each connection string
defined in the configuration file
"""
el = elasticsearch.Elasticsearch(hosts=HOSTS)
el_pool.connections[connection_name] = ElasticSearchClient(el) | Initialize a redis connection by each connection string
defined in the configuration file | entailment |
def parse_tags(targs):
"""
Tags can be in the forma key:value or simply value
"""
tags = {}
for t in targs:
split_tag = t.split(':')
if len(split_tag) > 1:
tags['tag:' + split_tag[0]] = split_tag[1]
else:
tags['tag:' + split_tag[0]] = ''
return tags | Tags can be in the forma key:value or simply value | entailment |
def json_response(self, status=200, data={}, headers={}):
'''
To set flask to inject specific headers on response request,
such as CORS_ORIGIN headers
'''
mimetype = 'application/json'
header_dict = {}
for k, v in headers.items():
header_dict[k] = v
return Response(
json.dumps(data),
status=status,
mimetype=mimetype,
headers=header_dict) | To set flask to inject specific headers on response request,
such as CORS_ORIGIN headers | entailment |
def template_response(self, template_name, headers={}, **values):
"""
Constructs a response, allowing custom template name and content_type
"""
response = make_response(
self.render_template(template_name, **values))
for field, value in headers.items():
response.headers.set(field, value)
return response | Constructs a response, allowing custom template name and content_type | entailment |
def describe_key_pairs():
"""
Returns all key pairs for region
"""
region_keys = {}
for r in boto3.client('ec2', 'us-west-2').describe_regions()['Regions']:
region = r['RegionName']
client = boto3.client('ec2', region_name=region)
try:
pairs = client.describe_key_pairs()
if pairs:
region_keys[region] = pairs
except Exception as e:
app.logger.info(e)
return region_keys | Returns all key pairs for region | entailment |
def init_app(module, BASE_DIR, **kwargs):
"""
Initalize an app, call this method once from start_app
"""
global app
def init_config():
"""
Load settings module and attach values to the application
config dictionary
"""
if 'FLASK_PHILO_SETTINGS_MODULE' not in os.environ:
raise ConfigurationError('No settings has been defined')
app.config['BASE_DIR'] = BASE_DIR
# default settings
for v in dir(default_settings):
if not v.startswith('_'):
app.config[v] = getattr(default_settings, v)
app.debug = app.config['DEBUG']
# app settings
settings = importlib.import_module(
os.environ['FLASK_PHILO_SETTINGS_MODULE'])
for v in dir(settings):
if not v.startswith('_'):
app.config[v] = getattr(settings, v)
def init_urls():
# Reads urls definition from URLs file and bind routes and views
urls_module = importlib.import_module(app.config['URLS'])
for route in urls_module.URLS:
app.add_url_rule(
route[0], view_func=route[1].as_view(route[2]))
def init_logging():
"""
initialize logger for the app
"""
hndlr = logging.StreamHandler()
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
hndlr.setFormatter(formatter)
app.logger.addHandler(hndlr)
log_level = app.config['LOG_LEVEL']
app.logger.setLevel(getattr(logging, log_level))
def init_flask_oauthlib():
"""
http://flask-oauthlib.readthedocs.io/en/latest/oauth2.html
"""
oauth.init_app(app)
def init_cors(app):
"""
Initializes cors protection if config
"""
if 'CORS' in app.config:
CORS(
app,
resources=app.config['CORS'],
supports_credentials=app.config.get(
"CORS_SUPPORT_CREDENTIALS",
False
),
allow_headers=app.config.get(
"CORS_ALLOW_HEADERS",
"Content-Type,Authorization,accept-language,accept"
)
)
init_db(g, app)
init_logging()
init_urls()
init_flask_oauthlib()
init_jinja2(g, app)
init_cors(app)
app = Flask(module)
init_config()
return app | Initalize an app, call this method once from start_app | entailment |
def execute_command(cmd, **kwargs):
"""
execute a console command
"""
cmd_dict = {
c: 'flask_philo.commands_flask_philo.' + c for c
in dir(commands_flask_philo) if not c.startswith('_') and c != 'os' # noqa
}
# loading specific app commands
try:
import console_commands
for cm in console_commands.__all__:
if not cm.startswith('_'):
cmd_dict[cm] = 'console_commands.' + cm
except Exception:
pass
if cmd not in cmd_dict:
raise ConfigurationError('command {} does not exists'.format(cmd))
cmd_module = importlib.import_module(cmd_dict[cmd])
kwargs['app'] = app
cmd_module.run(**kwargs) | execute a console command | entailment |
def syncdb():
"""
Create tables if they don't exist
"""
from flask_philo.db.postgresql.schema import Base
from flask_philo.db.postgresql.orm import BaseModel # noqa
from flask_philo.db.postgresql.connection import get_pool
for conn_name, conn in get_pool().connections.items():
Base.metadata.create_all(conn.engine) | Create tables if they don't exist | entailment |
def new(cls, password, rounds):
"""Creates a PasswordHash from the given password."""
if isinstance(password, str):
password = password.encode('utf8')
value = bcrypt.hashpw(password, bcrypt.gensalt(rounds))
return cls(value) | Creates a PasswordHash from the given password. | entailment |
def _convert(self, value):
"""Returns a PasswordHash from the given string.
PasswordHash instances or None values will return unchanged.
Strings will be hashed and the resulting PasswordHash returned.
Any other input will result in a TypeError.
"""
if isinstance(value, PasswordHash):
return value
elif isinstance(value, str):
value = value.encode('utf-8')
return PasswordHash.new(value, self.rounds)
elif value is not None:
raise TypeError(
'Cannot convert {} to a PasswordHash'.format(type(value))) | Returns a PasswordHash from the given string.
PasswordHash instances or None values will return unchanged.
Strings will be hashed and the resulting PasswordHash returned.
Any other input will result in a TypeError. | entailment |
def init_db_conn(
connection_name, HOST=None, PORT=None, DB=None, PASSWORD=None):
"""
Initialize a redis connection by each connection string
defined in the configuration file
"""
rpool = redis.ConnectionPool(
host=HOST, port=PORT, db=DB, password=PASSWORD)
r = redis.Redis(connection_pool=rpool)
redis_pool.connections[connection_name] = RedisClient(r) | Initialize a redis connection by each connection string
defined in the configuration file | entailment |
def initialize(g, app):
"""
If redis connection parameters are defined in configuration params a
session will be created
"""
if 'DATABASES' in app.config and 'REDIS' in app.config['DATABASES']:
# Initialize connections for console commands
for k, v in app.config['DATABASES']['REDIS'].items():
init_db_conn(k, **v)
@app.before_request
def before_request():
"""
Assign redis connection pool to the global
flask object at the beginning of every request
"""
for k, v in app.config['DATABASES']['REDIS'].items():
init_db_conn(k, **v)
g.redis_pool = redis_pool
if 'test' not in sys.argv:
@app.teardown_request
def teardown_request(exception):
pool = getattr(g, 'redis_pool', None)
if pool is not None:
pool.close() | If redis connection parameters are defined in configuration params a
session will be created | entailment |
def _initialize_from_dict(self, data):
"""
Loads serializer from a request object
"""
self._json = data
self._validate()
for name, value in self._json.items():
if name in self._properties:
if '$ref' in self._properties[name]:
if 'decimal' in self._properties[name]['$ref']:
value = Decimal(value)
# applying proper formatting when required
if 'format' in self._properties[name]:
format = self._properties[name]['format']
if 'date-time' == format:
value = utils.string_to_datetime(value)
elif 'date' == format:
value = utils.string_to_date(value)
setattr(self, name, value) | Loads serializer from a request object | entailment |
def _initialize_from_model(self, model):
"""
Loads a model from
"""
for name, value in model.__dict__.items():
if name in self._properties:
setattr(self, name, value) | Loads a model from | entailment |
def update(self):
"""
Finds record and update it based in serializer values
"""
obj = self.__model__.objects.get_for_update(id=self.id)
for name, value in self.__dict__.items():
if name in self._properties:
setattr(obj, name, value)
obj.update()
return obj | Finds record and update it based in serializer values | entailment |
def to_json(self):
"""
Returns a json representation
"""
data = {}
for k, v in self.__dict__.items():
if not k.startswith('_'):
# values not serializable, should be converted to strings
if isinstance(v, datetime):
v = utils.datetime_to_string(v)
elif isinstance(v, date):
v = utils.date_to_string(v)
elif isinstance(v, uuid.UUID):
v = str(v)
elif isinstance(v, Decimal):
v = str(v)
data[k] = v
return data | Returns a json representation | entailment |
def OPERATING_SYSTEM(stats, info):
"""General information about the operating system.
This is a flag you can pass to `Stats.submit()`.
"""
info.append(('architecture', platform.machine().lower()))
info.append(('distribution',
"%s;%s" % (platform.linux_distribution()[0:2])))
info.append(('system',
"%s;%s" % (platform.system(), platform.release()))) | General information about the operating system.
This is a flag you can pass to `Stats.submit()`. | entailment |
def SESSION_TIME(stats, info):
"""Total time of this session.
Reports the time elapsed from the construction of the `Stats` object to
this `submit()` call.
This is a flag you can pass to `Stats.submit()`.
"""
duration = time.time() - stats.started_time
secs = int(duration)
msecs = int((duration - secs) * 1000)
info.append(('session_time', '%d.%d' % (secs, msecs))) | Total time of this session.
Reports the time elapsed from the construction of the `Stats` object to
this `submit()` call.
This is a flag you can pass to `Stats.submit()`. | entailment |
def PYTHON_VERSION(stats, info):
"""Python interpreter version.
This is a flag you can pass to `Stats.submit()`.
"""
# Some versions of Python have a \n in sys.version!
version = sys.version.replace(' \n', ' ').replace('\n', ' ')
python = ';'.join([str(c) for c in sys.version_info] + [version])
info.append(('python', python)) | Python interpreter version.
This is a flag you can pass to `Stats.submit()`. | entailment |
def read_config(self):
"""Reads the configuration.
This method can be overloaded to integrate with your application's own
configuration mechanism. By default, a single 'status' file is read
from the reports' directory.
This should set `self.status` to one of the state constants, and make
sure `self.location` points to a writable directory where the reports
will be written.
The possible values for `self.status` are:
- `UNSET`: nothing has been selected and the user should be prompted
- `ENABLED`: collect and upload reports
- `DISABLED`: don't collect or upload anything, stop prompting
- `ERRORED`: something is broken, and we can't do anything in this
session (for example, the configuration directory is not writable)
"""
if self.enabled and not os.path.isdir(self.location):
try:
os.makedirs(self.location, 0o700)
except OSError:
logger.warning("Couldn't create %s, usage statistics won't be "
"collected", self.location)
self.status = Stats.ERRORED
status_file = os.path.join(self.location, 'status')
if self.enabled and os.path.exists(status_file):
with open(status_file, 'r') as fp:
status = fp.read().strip()
if status == 'ENABLED':
self.status = Stats.ENABLED
elif status == 'DISABLED':
self.status = Stats.DISABLED | Reads the configuration.
This method can be overloaded to integrate with your application's own
configuration mechanism. By default, a single 'status' file is read
from the reports' directory.
This should set `self.status` to one of the state constants, and make
sure `self.location` points to a writable directory where the reports
will be written.
The possible values for `self.status` are:
- `UNSET`: nothing has been selected and the user should be prompted
- `ENABLED`: collect and upload reports
- `DISABLED`: don't collect or upload anything, stop prompting
- `ERRORED`: something is broken, and we can't do anything in this
session (for example, the configuration directory is not writable) | entailment |
def write_config(self, enabled):
"""Writes the configuration.
This method can be overloaded to integrate with your application's own
configuration mechanism. By default, a single 'status' file is written
in the reports' directory, containing either ``ENABLED`` or
``DISABLED``; if the file doesn't exist, `UNSET` is assumed.
:param enabled: Either `Stats.UNSET`, `Stats.DISABLED` or
`Stats.ENABLED`.
"""
status_file = os.path.join(self.location, 'status')
with open(status_file, 'w') as fp:
if enabled is Stats.ENABLED:
fp.write('ENABLED')
elif enabled is Stats.DISABLED:
fp.write('DISABLED')
else:
raise ValueError("Unknown reporting state %r" % enabled) | Writes the configuration.
This method can be overloaded to integrate with your application's own
configuration mechanism. By default, a single 'status' file is written
in the reports' directory, containing either ``ENABLED`` or
``DISABLED``; if the file doesn't exist, `UNSET` is assumed.
:param enabled: Either `Stats.UNSET`, `Stats.DISABLED` or
`Stats.ENABLED`. | entailment |
def enable_reporting(self):
"""Call this method to explicitly enable reporting.
The current report will be uploaded, plus the previously recorded ones,
and the configuration will be updated so that future runs also upload
automatically.
"""
if self.status == Stats.ENABLED:
return
if not self.enableable:
logger.critical("Can't enable reporting")
return
self.status = Stats.ENABLED
self.write_config(self.status) | Call this method to explicitly enable reporting.
The current report will be uploaded, plus the previously recorded ones,
and the configuration will be updated so that future runs also upload
automatically. | entailment |
def disable_reporting(self):
"""Call this method to explicitly disable reporting.
The current report will be discarded, along with the previously
recorded ones that haven't been uploaded. The configuration is updated
so that future runs do not record or upload reports.
"""
if self.status == Stats.DISABLED:
return
if not self.disableable:
logger.critical("Can't disable reporting")
return
self.status = Stats.DISABLED
self.write_config(self.status)
if os.path.exists(self.location):
old_reports = [f for f in os.listdir(self.location)
if f.startswith('report_')]
for old_filename in old_reports:
fullname = os.path.join(self.location, old_filename)
os.remove(fullname)
logger.info("Deleted %d pending reports", len(old_reports)) | Call this method to explicitly disable reporting.
The current report will be discarded, along with the previously
recorded ones that haven't been uploaded. The configuration is updated
so that future runs do not record or upload reports. | entailment |
def note(self, info):
"""Record some info to the report.
:param info: Dictionary of info to record. Note that previous info
recorded under the same keys will not be overwritten.
"""
if self.recording:
if self.notes is None:
raise ValueError("This report has already been submitted")
self.notes.extend(self._to_notes(info)) | Record some info to the report.
:param info: Dictionary of info to record. Note that previous info
recorded under the same keys will not be overwritten. | entailment |
def submit(self, info, *flags):
"""Finish recording and upload or save the report.
This closes the `Stats` object, no further methods should be called.
The report is either saved, uploaded or discarded, depending on
configuration. If uploading is enabled, previous reports might be
uploaded too. If uploading is not explicitly enabled or disabled, the
prompt will be shown, to ask the user to enable or disable it.
"""
if not self.recording:
return
env_val = os.environ.get(self.env_var, '').lower()
if env_val not in (None, '', '1', 'on', 'enabled', 'yes', 'true'):
self.status = Stats.DISABLED_ENV
self.notes = None
return
if self.notes is None:
raise ValueError("This report has already been submitted")
all_info, self.notes = self.notes, None
all_info.extend(self._to_notes(info))
for flag in flags:
flag(self, all_info)
now = time.time()
secs = int(now)
msecs = int((now - secs) * 1000)
all_info.insert(0, ('date', '%d.%d' % (secs, msecs)))
if self.user_id:
all_info.insert(1, ('user', self.user_id))
logger.debug("Generated report:\n%r", (all_info,))
# Current report
def generator():
for key, value in all_info:
yield _encode(key) + b':' + _encode(value) + b'\n'
filename = 'report_%d_%d.txt' % (secs, msecs)
# Save current report and exit, unless user has opted in
if not self.sending:
fullname = os.path.join(self.location, filename)
with open(fullname, 'wb') as fp:
for l in generator():
fp.write(l)
# Show prompt
sys.stderr.write(self.prompt.prompt)
return
# Post previous reports
old_reports = [f for f in os.listdir(self.location)
if f.startswith('report_')]
old_reports.sort()
old_reports = old_reports[:4] # Only upload 5 at a time
for old_filename in old_reports:
fullname = os.path.join(self.location, old_filename)
try:
with open(fullname, 'rb') as fp:
# `data=fp` would make requests stream, which is currently
# not a good idea (WSGI chokes on it)
r = requests.post(self.drop_point, data=fp.read(),
timeout=1, verify=self.ssl_verify)
r.raise_for_status()
except Exception as e:
logger.warning("Couldn't upload %s: %s", old_filename, str(e))
break
else:
logger.info("Submitted report %s", old_filename)
os.remove(fullname)
# Post current report
try:
# `data=generator()` would make requests stream, which is currently
# not a good idea (WSGI chokes on it)
r = requests.post(self.drop_point, data=b''.join(generator()),
timeout=1, verify=self.ssl_verify)
except requests.RequestException as e:
logger.warning("Couldn't upload report: %s", str(e))
fullname = os.path.join(self.location, filename)
with open(fullname, 'wb') as fp:
for l in generator():
fp.write(l)
else:
try:
r.raise_for_status()
logger.info("Submitted report")
except requests.RequestException as e:
logger.warning("Server rejected report: %s", str(e)) | Finish recording and upload or save the report.
This closes the `Stats` object, no further methods should be called.
The report is either saved, uploaded or discarded, depending on
configuration. If uploading is enabled, previous reports might be
uploaded too. If uploading is not explicitly enabled or disabled, the
prompt will be shown, to ask the user to enable or disable it. | entailment |
def store(report, address):
"""Stores the report on disk.
"""
now = time.time()
secs = int(now)
msecs = int((now - secs) * 1000)
submitted_date = filename = None # avoids warnings
while True:
submitted_date = '%d.%03d' % (secs, msecs)
filename = 'report_%s.txt' % submitted_date
filename = os.path.join(DESTINATION, filename)
if not os.path.exists(filename):
break
msecs += 1
lines = [l for l in report.split(b'\n') if l]
for line in lines:
if line.startswith(b'date:'):
date = line[5:]
if date_format.match(date):
with open(filename, 'wb') as fp:
if not isinstance(address, bytes):
address = address.encode('ascii')
fp.write(b'submitted_from:' + address + b'\n')
fp.write(('submitted_date:%s\n' % submitted_date)
.encode('ascii'))
fp.write(report)
return None
else:
return "invalid date"
return "missing date field" | Stores the report on disk. | entailment |
def application(environ, start_response):
"""WSGI interface.
"""
def send_response(status, body):
if not isinstance(body, bytes):
body = body.encode('utf-8')
start_response(status, [('Content-Type', 'text/plain'),
('Content-Length', '%d' % len(body))])
return [body]
if environ['REQUEST_METHOD'] != 'POST':
return send_response('403 Forbidden', "invalid request")
# Gets the posted input
try:
request_body_size = int(environ['CONTENT_LENGTH'])
except (KeyError, ValueError):
return send_response('400 Bad Request', "invalid content length")
if request_body_size > MAX_SIZE:
return send_response('403 Forbidden', "report too big")
request_body = environ['wsgi.input'].read(request_body_size)
# Tries to store
response_body = store(request_body, environ.get('REMOTE_ADDR'))
if not response_body:
status = '200 OK'
response_body = "stored"
else:
status = '501 Server Error'
# Sends the response
return send_response(status, response_body) | WSGI interface. | entailment |
def read(self, pin):
""" Read the pin state of an input pin.
Make sure you put the pin in input modus with the IODIR* register or direction_* attribute first.
:Example:
>>> expander = MCP23017I2C(gw)
>>> # Read the logic level on pin B3
>>> expander.read('B3')
False
>>> # Read the logic level on pin A1
>>> expander.read('A1')
True
:param pin: The label for the pin to read. (Ex. A0)
:return: Boolean representing the input level
"""
port, pin = self.pin_to_port(pin)
self.i2c_write([0x12 + port])
raw = self.i2c_read(1)
value = struct.unpack('>B', raw)[0]
return (value & (1 << pin)) > 0 | Read the pin state of an input pin.
Make sure you put the pin in input modus with the IODIR* register or direction_* attribute first.
:Example:
>>> expander = MCP23017I2C(gw)
>>> # Read the logic level on pin B3
>>> expander.read('B3')
False
>>> # Read the logic level on pin A1
>>> expander.read('A1')
True
:param pin: The label for the pin to read. (Ex. A0)
:return: Boolean representing the input level | entailment |
def read_port(self, port):
""" Read the pin state of a whole port (8 pins)
:Example:
>>> expander = MCP23017I2C(gw)
>>> # Read pin A0-A7 as a int (A0 and A1 are high)
>>> expander.read_port('A')
3
>>> # Read pin B0-B7 as a int (B2 is high)
>>> expander.read_port('B')
4
:param port: use 'A' to read port A and 'B' for port b
:return: An int where every bit represents the input level.
"""
if port == 'A':
raw = self.i2c_read_register(0x12, 1)
elif port == 'B':
raw = self.i2c_read_register(0x13, 1)
return struct.unpack('>B', raw)[0] | Read the pin state of a whole port (8 pins)
:Example:
>>> expander = MCP23017I2C(gw)
>>> # Read pin A0-A7 as a int (A0 and A1 are high)
>>> expander.read_port('A')
3
>>> # Read pin B0-B7 as a int (B2 is high)
>>> expander.read_port('B')
4
:param port: use 'A' to read port A and 'B' for port b
:return: An int where every bit represents the input level. | entailment |
def write(self, pin, value):
""" Set the pin state.
Make sure you put the pin in output mode first.
:param pin: The label for the pin to write to. (Ex. A0)
:param value: Boolean representing the new state
"""
port, pin = self.pin_to_port(pin)
portname = 'A'
if port == 1:
portname = 'B'
self._update_register('GPIO' + portname, pin, value)
self.sync() | Set the pin state.
Make sure you put the pin in output mode first.
:param pin: The label for the pin to write to. (Ex. A0)
:param value: Boolean representing the new state | entailment |
def write_port(self, port, value):
""" Use a whole port as a bus and write a byte to it.
:param port: Name of the port ('A' or 'B')
:param value: Value to write (0-255)
"""
if port == 'A':
self.GPIOA = value
elif port == 'B':
self.GPIOB = value
else:
raise AttributeError('Port {} does not exist, use A or B'.format(port))
self.sync() | Use a whole port as a bus and write a byte to it.
:param port: Name of the port ('A' or 'B')
:param value: Value to write (0-255) | entailment |
def sync(self):
""" Upload the changed registers to the chip
This will check which register have been changed since the last sync and send them to the chip.
You need to call this method if you modify one of the register attributes (mcp23017.IODIRA for example) or
if you use one of the helper attributes (mcp23017.direction_A0 for example)
"""
registers = {
0x00: 'IODIRA',
0x01: 'IODIRB',
0x02: 'IPOLA',
0x03: 'IPOLB',
0x04: 'GPINTENA',
0x05: 'GPINTENB',
0x0C: 'GPPUA',
0x0D: 'GPPUB',
0x12: 'GPIOA',
0x13: 'GPIOB'
}
for reg in registers:
name = registers[reg]
if getattr(self, name) != getattr(self, '_' + name):
self.i2c_write_register(reg, [getattr(self, name)])
setattr(self, '_' + name, getattr(self, name)) | Upload the changed registers to the chip
This will check which register have been changed since the last sync and send them to the chip.
You need to call this method if you modify one of the register attributes (mcp23017.IODIRA for example) or
if you use one of the helper attributes (mcp23017.direction_A0 for example) | entailment |
def get_pins(self):
""" Get a list containing references to all 16 pins of the chip.
:Example:
>>> expander = MCP23017I2C(gw)
>>> pins = expander.get_pins()
>>> pprint.pprint(pins)
[<GPIOPin A0 on MCP23017I2C>,
<GPIOPin A1 on MCP23017I2C>,
<GPIOPin A2 on MCP23017I2C>,
<GPIOPin A3 on MCP23017I2C>,
<GPIOPin A4 on MCP23017I2C>,
<GPIOPin A5 on MCP23017I2C>,
<GPIOPin A6 on MCP23017I2C>,
<GPIOPin B0 on MCP23017I2C>,
<GPIOPin B1 on MCP23017I2C>,
<GPIOPin B2 on MCP23017I2C>,
<GPIOPin B3 on MCP23017I2C>,
<GPIOPin B4 on MCP23017I2C>,
<GPIOPin B5 on MCP23017I2C>,
<GPIOPin B6 on MCP23017I2C>]
"""
result = []
for a in range(0, 7):
result.append(GPIOPin(self, '_action', {'pin': 'A{}'.format(a)}, name='A{}'.format(a)))
for b in range(0, 7):
result.append(GPIOPin(self, '_action', {'pin': 'B{}'.format(b)}, name='B{}'.format(b)))
return result | Get a list containing references to all 16 pins of the chip.
:Example:
>>> expander = MCP23017I2C(gw)
>>> pins = expander.get_pins()
>>> pprint.pprint(pins)
[<GPIOPin A0 on MCP23017I2C>,
<GPIOPin A1 on MCP23017I2C>,
<GPIOPin A2 on MCP23017I2C>,
<GPIOPin A3 on MCP23017I2C>,
<GPIOPin A4 on MCP23017I2C>,
<GPIOPin A5 on MCP23017I2C>,
<GPIOPin A6 on MCP23017I2C>,
<GPIOPin B0 on MCP23017I2C>,
<GPIOPin B1 on MCP23017I2C>,
<GPIOPin B2 on MCP23017I2C>,
<GPIOPin B3 on MCP23017I2C>,
<GPIOPin B4 on MCP23017I2C>,
<GPIOPin B5 on MCP23017I2C>,
<GPIOPin B6 on MCP23017I2C>] | entailment |
def read(self):
""" Get the logic input level for the pin
:return: True if the input is high
"""
m = getattr(self.chip, self.method)
return m(**self.arguments) | Get the logic input level for the pin
:return: True if the input is high | entailment |
def write(self, value):
""" Set the logic output level for the pin.
:type value: bool
:param value: True for a logic high
"""
if self.inverted:
value = not value
m = getattr(self.chip, self.method)
m(value=value, **self.arguments) | Set the logic output level for the pin.
:type value: bool
:param value: True for a logic high | entailment |
def init_app(self, app, **kwargs):
"""Initialize application object.
:param app: An instance of :class:`~flask.Flask`.
"""
self.init_config(app)
# Initialize extensions
self.menu_ext.init_app(app)
self.menu = app.extensions['menu']
self.breadcrumbs.init_app(app)
# Register blueprint in order to register template and static folder.
app.register_blueprint(Blueprint(
'invenio_theme',
__name__,
template_folder='templates',
static_folder='static',
))
# Register frontpage blueprint if enabled.
if app.config['THEME_FRONTPAGE']:
app.register_blueprint(blueprint)
# Initialize breadcrumbs.
item = self.menu.submenu('breadcrumbs')
item.register(app.config['THEME_BREADCRUMB_ROOT_ENDPOINT'], _('Home'))
# Register errors handlers.
app.register_error_handler(401, unauthorized)
app.register_error_handler(403, insufficient_permissions)
app.register_error_handler(404, page_not_found)
app.register_error_handler(500, internal_error)
# Save reference to self on object
app.extensions['invenio-theme'] = self | Initialize application object.
:param app: An instance of :class:`~flask.Flask`. | entailment |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.