sentence1 stringlengths 52 3.87M | sentence2 stringlengths 1 47.2k | label stringclasses 1 value |
|---|---|---|
def html(dom: str):
"""
A string containing a valid HTML snippet.
:param dom:
The HTML string to add to the display.
"""
r = _get_report()
r.append_body(render.html(dom))
r.stdout_interceptor.write_source('[ADDED] HTML\n') | A string containing a valid HTML snippet.
:param dom:
The HTML string to add to the display. | entailment |
def workspace(show_values: bool = True, show_types: bool = True):
"""
Adds a list of the shared variables currently stored in the project
workspace.
:param show_values:
When true the values for each variable will be shown in addition to
their name.
:param show_types:
When true the data types for each shared variable will be shown in
addition to their name.
"""
r = _get_report()
data = {}
for key, value in r.project.shared.fetch(None).items():
if key.startswith('__cauldron_'):
continue
data[key] = value
r.append_body(render.status(data, values=show_values, types=show_types)) | Adds a list of the shared variables currently stored in the project
workspace.
:param show_values:
When true the values for each variable will be shown in addition to
their name.
:param show_types:
When true the data types for each shared variable will be shown in
addition to their name. | entailment |
def pyplot(
figure=None,
scale: float = 0.8,
clear: bool = True,
aspect_ratio: typing.Union[list, tuple] = None
):
"""
Creates a matplotlib plot in the display for the specified figure. The size
of the plot is determined automatically to best fit the notebook.
:param figure:
The matplotlib figure to plot. If omitted, the currently active
figure will be used.
:param scale:
The display scale with units of fractional screen height. A value
of 0.5 constrains the output to a maximum height equal to half the
height of browser window when viewed. Values below 1.0 are usually
recommended so the entire output can be viewed without scrolling.
:param clear:
Clears the figure after it has been rendered. This is useful to
prevent persisting old plot data between repeated runs of the
project files. This can be disabled if the plot is going to be
used later in the project files.
:param aspect_ratio:
The aspect ratio for the displayed plot as a two-element list or
tuple. The first element is the width and the second element the
height. The units are "inches," which is an important consideration
for the display of text within the figure. If no aspect ratio is
specified, the currently assigned values to the plot will be used
instead.
"""
r = _get_report()
r.append_body(render_plots.pyplot(
figure,
scale=scale,
clear=clear,
aspect_ratio=aspect_ratio
))
r.stdout_interceptor.write_source('[ADDED] PyPlot plot\n') | Creates a matplotlib plot in the display for the specified figure. The size
of the plot is determined automatically to best fit the notebook.
:param figure:
The matplotlib figure to plot. If omitted, the currently active
figure will be used.
:param scale:
The display scale with units of fractional screen height. A value
of 0.5 constrains the output to a maximum height equal to half the
height of browser window when viewed. Values below 1.0 are usually
recommended so the entire output can be viewed without scrolling.
:param clear:
Clears the figure after it has been rendered. This is useful to
prevent persisting old plot data between repeated runs of the
project files. This can be disabled if the plot is going to be
used later in the project files.
:param aspect_ratio:
The aspect ratio for the displayed plot as a two-element list or
tuple. The first element is the width and the second element the
height. The units are "inches," which is an important consideration
for the display of text within the figure. If no aspect ratio is
specified, the currently assigned values to the plot will be used
instead. | entailment |
def bokeh(model, scale: float = 0.7, responsive: bool = True):
"""
Adds a Bokeh plot object to the notebook display.
:param model:
The plot object to be added to the notebook display.
:param scale:
How tall the plot should be in the notebook as a fraction of screen
height. A number between 0.1 and 1.0. The default value is 0.7.
:param responsive:
Whether or not the plot should responsively scale to fill the width
of the notebook. The default is True.
"""
r = _get_report()
if 'bokeh' not in r.library_includes:
r.library_includes.append('bokeh')
r.append_body(render_plots.bokeh_plot(
model=model,
scale=scale,
responsive=responsive
))
r.stdout_interceptor.write_source('[ADDED] Bokeh plot\n') | Adds a Bokeh plot object to the notebook display.
:param model:
The plot object to be added to the notebook display.
:param scale:
How tall the plot should be in the notebook as a fraction of screen
height. A number between 0.1 and 1.0. The default value is 0.7.
:param responsive:
Whether or not the plot should responsively scale to fill the width
of the notebook. The default is True. | entailment |
def listing(
source: list,
ordered: bool = False,
expand_full: bool = False
):
"""
An unordered or ordered list of the specified *source* iterable where
each element is converted to a string representation for display.
:param source:
The iterable to display as a list.
:param ordered:
Whether or not the list should be ordered. If False, which is the
default, an unordered bulleted list is created.
:param expand_full:
Whether or not the list should expand to fill the screen horizontally.
When defaulted to False, the list is constrained to the center view
area of the screen along with other text. This can be useful to keep
lists aligned with the text flow.
"""
r = _get_report()
r.append_body(render.listing(
source=source,
ordered=ordered,
expand_full=expand_full
))
r.stdout_interceptor.write_source('[ADDED] Listing\n') | An unordered or ordered list of the specified *source* iterable where
each element is converted to a string representation for display.
:param source:
The iterable to display as a list.
:param ordered:
Whether or not the list should be ordered. If False, which is the
default, an unordered bulleted list is created.
:param expand_full:
Whether or not the list should expand to fill the screen horizontally.
When defaulted to False, the list is constrained to the center view
area of the screen along with other text. This can be useful to keep
lists aligned with the text flow. | entailment |
def list_grid(
source: list,
expand_full: bool = False,
column_count: int = 2,
row_spacing: float = 1.0
):
"""
An multi-column list of the specified *source* iterable where
each element is converted to a string representation for display.
:param source:
The iterable to display as a list.
:param expand_full:
Whether or not the list should expand to fill the screen horizontally.
When defaulted to False, the list is constrained to the center view
area of the screen along with other text. This can be useful to keep
lists aligned with the text flow.
:param column_count:
The number of columns to display. The specified count is applicable to
high-definition screens. For Lower definition screens the actual count
displayed may be fewer as the layout responds to less available
horizontal screen space.
:param row_spacing:
The number of lines of whitespace to include between each row in the
grid. Set this to 0 for tightly displayed lists.
"""
r = _get_report()
r.append_body(render.list_grid(
source=source,
expand_full=expand_full,
column_count=column_count,
row_spacing=row_spacing
))
r.stdout_interceptor.write_source('[ADDED] List grid\n') | An multi-column list of the specified *source* iterable where
each element is converted to a string representation for display.
:param source:
The iterable to display as a list.
:param expand_full:
Whether or not the list should expand to fill the screen horizontally.
When defaulted to False, the list is constrained to the center view
area of the screen along with other text. This can be useful to keep
lists aligned with the text flow.
:param column_count:
The number of columns to display. The specified count is applicable to
high-definition screens. For Lower definition screens the actual count
displayed may be fewer as the layout responds to less available
horizontal screen space.
:param row_spacing:
The number of lines of whitespace to include between each row in the
grid. Set this to 0 for tightly displayed lists. | entailment |
def latex(source: str):
"""
Add a mathematical equation in latex math-mode syntax to the display.
Instead of the traditional backslash escape character, the @ character is
used instead to prevent backslash conflicts with Python strings. For
example, \\delta would be @delta.
:param source:
The string representing the latex equation to be rendered.
"""
r = _get_report()
if 'katex' not in r.library_includes:
r.library_includes.append('katex')
r.append_body(render_texts.latex(source.replace('@', '\\')))
r.stdout_interceptor.write_source('[ADDED] Latex equation\n') | Add a mathematical equation in latex math-mode syntax to the display.
Instead of the traditional backslash escape character, the @ character is
used instead to prevent backslash conflicts with Python strings. For
example, \\delta would be @delta.
:param source:
The string representing the latex equation to be rendered. | entailment |
def head(source, count: int = 5):
"""
Displays a specified number of elements in a source object of many
different possible types.
:param source:
DataFrames will show *count* rows of that DataFrame. A list, tuple or
other iterable, will show the first *count* rows. Dictionaries will
show *count* keys from the dictionary, which will be randomly selected
unless you are using an OrderedDict. Strings will show the first
*count* characters.
:param count:
The number of elements to show from the source.
"""
r = _get_report()
r.append_body(render_texts.head(source, count=count))
r.stdout_interceptor.write_source('[ADDED] Head\n') | Displays a specified number of elements in a source object of many
different possible types.
:param source:
DataFrames will show *count* rows of that DataFrame. A list, tuple or
other iterable, will show the first *count* rows. Dictionaries will
show *count* keys from the dictionary, which will be randomly selected
unless you are using an OrderedDict. Strings will show the first
*count* characters.
:param count:
The number of elements to show from the source. | entailment |
def status(
message: str = None,
progress: float = None,
section_message: str = None,
section_progress: float = None,
):
"""
Updates the status display, which is only visible while a step is running.
This is useful for providing feedback and information during long-running
steps.
A section progress is also available for cases where long running tasks
consist of multiple tasks and you want to display sub-progress messages
within the context of the larger status.
Note: this is only supported when running in the Cauldron desktop
application.
:param message:
The status message you want to display. If left blank the previously
set status message will be retained. Should you desire to remove an
existing message, specify a blank string for this argument.
:param progress:
A number between zero and one that indicates the overall progress for
the current status. If no value is specified, the previously assigned
progress will be retained.
:param section_message:
The status message you want to display for a particular task within a
long-running step. If left blank the previously set section message
will be retained. Should you desire to remove an existing message,
specify a blank string for this argument.
:param section_progress:
A number between zero and one that indicates the progress for the
current section status. If no value is specified, the previously
assigned section progress value will be retained.
"""
environ.abort_thread()
step = _cd.project.get_internal_project().current_step
if message is not None:
step.progress_message = message
if progress is not None:
step.progress = max(0.0, min(1.0, progress))
if section_message is not None:
step.sub_progress_message = section_message
if section_progress is not None:
step.sub_progress = section_progress | Updates the status display, which is only visible while a step is running.
This is useful for providing feedback and information during long-running
steps.
A section progress is also available for cases where long running tasks
consist of multiple tasks and you want to display sub-progress messages
within the context of the larger status.
Note: this is only supported when running in the Cauldron desktop
application.
:param message:
The status message you want to display. If left blank the previously
set status message will be retained. Should you desire to remove an
existing message, specify a blank string for this argument.
:param progress:
A number between zero and one that indicates the overall progress for
the current status. If no value is specified, the previously assigned
progress will be retained.
:param section_message:
The status message you want to display for a particular task within a
long-running step. If left blank the previously set section message
will be retained. Should you desire to remove an existing message,
specify a blank string for this argument.
:param section_progress:
A number between zero and one that indicates the progress for the
current section status. If no value is specified, the previously
assigned section progress value will be retained. | entailment |
def code_block(
code: str = None,
path: str = None,
language_id: str = None,
title: str = None,
caption: str = None
):
"""
Adds a block of syntax highlighted code to the display from either
the supplied code argument, or from the code file specified
by the path argument.
:param code:
A string containing the code to be added to the display
:param path:
A path to a file containing code to be added to the display
:param language_id:
The language identifier that indicates what language should
be used by the syntax highlighter. Valid values are any of the
languages supported by the Pygments highlighter.
:param title:
If specified, the code block will include a title bar with the
value of this argument
:param caption:
If specified, the code block will include a caption box below the code
that contains the value of this argument
"""
environ.abort_thread()
r = _get_report()
r.append_body(render.code_block(
block=code,
path=path,
language=language_id,
title=title,
caption=caption
))
r.stdout_interceptor.write_source('{}\n'.format(code)) | Adds a block of syntax highlighted code to the display from either
the supplied code argument, or from the code file specified
by the path argument.
:param code:
A string containing the code to be added to the display
:param path:
A path to a file containing code to be added to the display
:param language_id:
The language identifier that indicates what language should
be used by the syntax highlighter. Valid values are any of the
languages supported by the Pygments highlighter.
:param title:
If specified, the code block will include a title bar with the
value of this argument
:param caption:
If specified, the code block will include a caption box below the code
that contains the value of this argument | entailment |
def elapsed():
"""
Displays the elapsed time since the step started running.
"""
environ.abort_thread()
step = _cd.project.get_internal_project().current_step
r = _get_report()
r.append_body(render.elapsed_time(step.elapsed_time))
result = '[ELAPSED]: {}\n'.format(timedelta(seconds=step.elapsed_time))
r.stdout_interceptor.write_source(result) | Displays the elapsed time since the step started running. | entailment |
def get_module(name: str) -> typing.Union[types.ModuleType, None]:
"""
Retrieves the loaded module for the given module name or returns None if
no such module has been loaded.
:param name:
The name of the module to be retrieved
:return:
Either the loaded module with the specified name, or None if no such
module has been imported.
"""
return sys.modules.get(name) | Retrieves the loaded module for the given module name or returns None if
no such module has been loaded.
:param name:
The name of the module to be retrieved
:return:
Either the loaded module with the specified name, or None if no such
module has been imported. | entailment |
def get_module_name(module: types.ModuleType) -> str:
"""
Returns the name of the specified module by looking up its name in
multiple ways to prevent incompatibility issues.
:param module:
A module object for which to retrieve the name.
"""
try:
return module.__spec__.name
except AttributeError:
return module.__name__ | Returns the name of the specified module by looking up its name in
multiple ways to prevent incompatibility issues.
:param module:
A module object for which to retrieve the name. | entailment |
def do_reload(module: types.ModuleType, newer_than: int) -> bool:
"""
Executes the reload of the specified module if the source file that it was
loaded from was updated more recently than the specified time
:param module:
A module object to be reloaded
:param newer_than:
The time in seconds since epoch that should be used to determine if
the module needs to be reloaded. If the module source was modified
more recently than this time, the module will be refreshed.
:return:
Whether or not the module was reloaded
"""
path = getattr(module, '__file__')
directory = getattr(module, '__path__', [None])[0]
if path is None and directory:
path = os.path.join(directory, '__init__.py')
last_modified = os.path.getmtime(path)
if last_modified < newer_than:
return False
try:
importlib.reload(module)
return True
except ImportError:
return False | Executes the reload of the specified module if the source file that it was
loaded from was updated more recently than the specified time
:param module:
A module object to be reloaded
:param newer_than:
The time in seconds since epoch that should be used to determine if
the module needs to be reloaded. If the module source was modified
more recently than this time, the module will be refreshed.
:return:
Whether or not the module was reloaded | entailment |
def reload_children(parent_module: types.ModuleType, newer_than: int) -> bool:
"""
Reloads all imported children of the specified parent module object
:param parent_module:
A module object whose children should be refreshed if their
currently loaded versions are out of date.
:param newer_than:
An integer time in seconds for comparison. Any children modules that
were modified more recently than this time will be reloaded.
:return:
Whether or not any children were reloaded
"""
if not hasattr(parent_module, '__path__'):
return False
parent_name = get_module_name(parent_module)
children = filter(
lambda item: item[0].startswith(parent_name),
sys.modules.items()
)
return any([do_reload(item[1], newer_than) for item in children]) | Reloads all imported children of the specified parent module object
:param parent_module:
A module object whose children should be refreshed if their
currently loaded versions are out of date.
:param newer_than:
An integer time in seconds for comparison. Any children modules that
were modified more recently than this time will be reloaded.
:return:
Whether or not any children were reloaded | entailment |
def reload_module(
module: typing.Union[str, types.ModuleType],
recursive: bool,
force: bool
) -> bool:
"""
Reloads the specified module, which can either be a module object or
a string name of a module. Will not reload a module that has not been
imported
:param module:
A module object or string module name that should be refreshed if its
currently loaded version is out of date or the action is forced.
:param recursive:
When true, any imported sub-modules of this module will also be
refreshed if they have been updated.
:param force:
When true, all modules will be refreshed even if it doesn't appear
that they have been updated.
:return:
"""
if isinstance(module, str):
module = get_module(module)
if module is None or not isinstance(module, types.ModuleType):
return False
try:
step = session.project.get_internal_project().current_step
modified = step.last_modified if step else None
except AttributeError:
modified = 0
if modified is None:
# If the step has no modified time it hasn't been run yet and
# a reload won't be needed
return False
newer_than = modified if not force and modified else 0
if recursive:
children_reloaded = reload_children(module, newer_than)
else:
children_reloaded = False
reloaded = do_reload(module, newer_than)
return reloaded or children_reloaded | Reloads the specified module, which can either be a module object or
a string name of a module. Will not reload a module that has not been
imported
:param module:
A module object or string module name that should be refreshed if its
currently loaded version is out of date or the action is forced.
:param recursive:
When true, any imported sub-modules of this module will also be
refreshed if they have been updated.
:param force:
When true, all modules will be refreshed even if it doesn't appear
that they have been updated.
:return: | entailment |
def refresh(
*modules: typing.Union[str, types.ModuleType],
recursive: bool = False,
force: bool = False
) -> bool:
"""
Checks the specified module or modules for changes and reloads them if
they have been changed since the module was first imported or last
refreshed.
:param modules:
One or more module objects that should be refreshed if they the
currently loaded versions are out of date. The package name for
modules can also be used.
:param recursive:
When true, any imported sub-modules of this module will also be
refreshed if they have been updated.
:param force:
When true, all modules will be refreshed even if it doesn't appear
that they have been updated.
:return:
True or False depending on whether any modules were refreshed by this
call.
"""
out = []
for module in modules:
out.append(reload_module(module, recursive, force))
return any(out) | Checks the specified module or modules for changes and reloads them if
they have been changed since the module was first imported or last
refreshed.
:param modules:
One or more module objects that should be refreshed if they the
currently loaded versions are out of date. The package name for
modules can also be used.
:param recursive:
When true, any imported sub-modules of this module will also be
refreshed if they have been updated.
:param force:
When true, all modules will be refreshed even if it doesn't appear
that they have been updated.
:return:
True or False depending on whether any modules were refreshed by this
call. | entailment |
def get_args_index(target) -> int:
"""
Returns the index of the "*args" parameter if such a parameter exists in
the function arguments or -1 otherwise.
:param target:
The target function for which the args index should be determined
:return:
The arguments index if it exists or -1 if not
"""
code = target.__code__
if not bool(code.co_flags & inspect.CO_VARARGS):
return -1
return code.co_argcount + code.co_kwonlyargcount | Returns the index of the "*args" parameter if such a parameter exists in
the function arguments or -1 otherwise.
:param target:
The target function for which the args index should be determined
:return:
The arguments index if it exists or -1 if not | entailment |
def get_kwargs_index(target) -> int:
"""
Returns the index of the "**kwargs" parameter if such a parameter exists in
the function arguments or -1 otherwise.
:param target:
The target function for which the kwargs index should be determined
:return:
The keyword arguments index if it exists or -1 if not
"""
code = target.__code__
if not bool(code.co_flags & inspect.CO_VARKEYWORDS):
return -1
return (
code.co_argcount +
code.co_kwonlyargcount +
(1 if code.co_flags & inspect.CO_VARARGS else 0)
) | Returns the index of the "**kwargs" parameter if such a parameter exists in
the function arguments or -1 otherwise.
:param target:
The target function for which the kwargs index should be determined
:return:
The keyword arguments index if it exists or -1 if not | entailment |
def get_arg_names(target) -> typing.List[str]:
"""
Gets the list of named arguments for the target function
:param target:
Function for which the argument names will be retrieved
"""
code = getattr(target, '__code__')
if code is None:
return []
arg_count = code.co_argcount
kwarg_count = code.co_kwonlyargcount
args_index = get_args_index(target)
kwargs_index = get_kwargs_index(target)
arg_names = list(code.co_varnames[:arg_count])
if args_index != -1:
arg_names.append(code.co_varnames[args_index])
arg_names += list(code.co_varnames[arg_count:(arg_count + kwarg_count)])
if kwargs_index != -1:
arg_names.append(code.co_varnames[kwargs_index])
if len(arg_names) > 0 and arg_names[0] in ['self', 'cls']:
arg_count -= 1
arg_names.pop(0)
return arg_names | Gets the list of named arguments for the target function
:param target:
Function for which the argument names will be retrieved | entailment |
def create_argument(target, name, description: str = '') -> dict:
"""
Creates a dictionary representation of the parameter
:param target:
The function object in which the parameter resides
:param name:
The name of the parameter
:param description:
The documentation description for the parameter
"""
arg_names = get_arg_names(target)
annotations = getattr(target, '__annotations__', {})
out = dict(
name=name,
index=arg_names.index(name),
description=description,
type=conversions.arg_type_to_string(annotations.get(name, 'Any'))
)
out.update(get_optional_data(target, name, arg_names))
return out | Creates a dictionary representation of the parameter
:param target:
The function object in which the parameter resides
:param name:
The name of the parameter
:param description:
The documentation description for the parameter | entailment |
def explode_line(argument_line: str) -> typing.Tuple[str, str]:
"""
Returns a tuple containing the parameter name and the description parsed
from the given argument line
"""
parts = tuple(argument_line.split(' ', 1)[-1].split(':', 1))
return parts if len(parts) > 1 else (parts[0], '') | Returns a tuple containing the parameter name and the description parsed
from the given argument line | entailment |
def update_base_image(path: str):
"""Pulls the latest version of the base image"""
with open(path, 'r') as file_handle:
contents = file_handle.read()
regex = re.compile('from\s+(?P<source>[^\s]+)', re.IGNORECASE)
matches = regex.findall(contents)
if not matches:
return None
match = matches[0]
os.system('docker pull {}'.format(match))
return match | Pulls the latest version of the base image | entailment |
def build(path: str) -> dict:
"""Builds the container from the specified docker file path"""
update_base_image(path)
match = file_pattern.search(os.path.basename(path))
build_id = match.group('id')
tags = [
'{}:{}-{}'.format(HUB_PREFIX, version, build_id),
'{}:latest-{}'.format(HUB_PREFIX, build_id),
'{}:current-{}'.format(HUB_PREFIX, build_id)
]
if build_id == 'standard':
tags.append('{}:latest'.format(HUB_PREFIX))
command = 'docker build --file "{}" {} .'.format(
path,
' '.join(['-t {}'.format(t) for t in tags])
)
print('[BUILDING]:', build_id)
os.system(command)
return dict(
id=build_id,
path=path,
command=command,
tags=tags
) | Builds the container from the specified docker file path | entailment |
def publish(build_entry: dict):
"""Publishes the specified build entry to docker hub"""
for tag in build_entry['tags']:
print('[PUSHING]:', tag)
os.system('docker push {}'.format(tag)) | Publishes the specified build entry to docker hub | entailment |
def parse() -> dict:
"""Parse command line arguments"""
parser = ArgumentParser()
parser.add_argument('-p', '--publish', action='store_true', default=False)
return vars(parser.parse_args()) | Parse command line arguments | entailment |
def run():
"""Execute the build process"""
args = parse()
build_results = [build(p) for p in glob.iglob(glob_path)]
if not args['publish']:
return
for entry in build_results:
publish(entry) | Execute the build process | entailment |
def elapsed_time(seconds: float) -> str:
"""Displays the elapsed time since the current step started running."""
environ.abort_thread()
parts = (
'{}'.format(timedelta(seconds=seconds))
.rsplit('.', 1)
)
hours, minutes, seconds = parts[0].split(':')
return templating.render_template(
'elapsed_time.html',
hours=hours.zfill(2),
minutes=minutes.zfill(2),
seconds=seconds.zfill(2),
microseconds=parts[-1] if len(parts) > 1 else ''
) | Displays the elapsed time since the current step started running. | entailment |
def image(
rendered_path: str,
width: int = None,
height: int = None,
justify: str = None
) -> str:
"""Renders an image block"""
environ.abort_thread()
return templating.render_template(
'image.html',
path=rendered_path,
width=width,
height=height,
justification=(justify or 'left').lower()
) | Renders an image block | entailment |
def get_environment_info() -> dict:
"""
Information about Cauldron and its Python interpreter.
:return:
A dictionary containing information about the Cauldron and its
Python environment. This information is useful when providing feedback
and bug reports.
"""
data = _environ.systems.get_system_data()
data['cauldron'] = _environ.package_settings.copy()
return data | Information about Cauldron and its Python interpreter.
:return:
A dictionary containing information about the Cauldron and its
Python environment. This information is useful when providing feedback
and bug reports. | entailment |
def run_server(port=5010, debug=False, **kwargs):
"""
Run the cauldron http server used to interact with cauldron from a remote
host.
:param port:
The port on which to bind the cauldron server.
:param debug:
Whether or not the server should be run in debug mode. If true, the
server will echo debugging information during operation.
:param kwargs:
Custom properties to alter the way the server runs.
"""
from cauldron.cli.server import run
run.execute(port=port, debug=debug, **kwargs) | Run the cauldron http server used to interact with cauldron from a remote
host.
:param port:
The port on which to bind the cauldron server.
:param debug:
Whether or not the server should be run in debug mode. If true, the
server will echo debugging information during operation.
:param kwargs:
Custom properties to alter the way the server runs. | entailment |
def run_project(
project_directory: str,
output_directory: str = None,
logging_path: str = None,
reader_path: str = None,
reload_project_libraries: bool = False,
**kwargs
) -> ExecutionResult:
"""
Runs a project as a single command directly within the current Python
interpreter.
:param project_directory:
The fully-qualified path to the directory where the Cauldron project is
located
:param output_directory:
The fully-qualified path to the directory where the results will be
written. All of the results files will be written within this
directory. If the directory does not exist, it will be created.
:param logging_path:
The fully-qualified path to a file that will be used for logging. If a
directory is specified instead of a file, a file will be created using
the default filename of cauldron_run.log. If a file already exists at
that location it will be removed and a new file created in its place.
:param reader_path:
Specifies a path where a reader file will be saved after the project
has finished running. If no path is specified, no reader file will be
saved. If the path is a directory, a reader file will be saved in that
directory with the project name as the file name.
:param reload_project_libraries:
Whether or not to reload all project libraries prior to execution of
the project. By default this is False, but can be enabled in cases
where refreshing the project libraries before execution is needed.
:param kwargs:
Any variables to be available in the cauldron.shared object during
execution of the project can be specified here as keyword arguments.
:return:
A response object that contains information about the run process
and the shared data from the final state of the project.
"""
from cauldron.cli import batcher
return batcher.run_project(
project_directory=project_directory,
output_directory=output_directory,
log_path=logging_path,
reader_path=reader_path,
reload_project_libraries=reload_project_libraries,
shared_data=kwargs
) | Runs a project as a single command directly within the current Python
interpreter.
:param project_directory:
The fully-qualified path to the directory where the Cauldron project is
located
:param output_directory:
The fully-qualified path to the directory where the results will be
written. All of the results files will be written within this
directory. If the directory does not exist, it will be created.
:param logging_path:
The fully-qualified path to a file that will be used for logging. If a
directory is specified instead of a file, a file will be created using
the default filename of cauldron_run.log. If a file already exists at
that location it will be removed and a new file created in its place.
:param reader_path:
Specifies a path where a reader file will be saved after the project
has finished running. If no path is specified, no reader file will be
saved. If the path is a directory, a reader file will be saved in that
directory with the project name as the file name.
:param reload_project_libraries:
Whether or not to reload all project libraries prior to execution of
the project. By default this is False, but can be enabled in cases
where refreshing the project libraries before execution is needed.
:param kwargs:
Any variables to be available in the cauldron.shared object during
execution of the project can be specified here as keyword arguments.
:return:
A response object that contains information about the run process
and the shared data from the final state of the project. | entailment |
def join(self, timeout: float = None) -> bool:
"""
Joins on the thread associated with the response if it exists, or
just returns after a no-op if no thread exists to join.
:param timeout:
Maximum number of seconds to block on the join before given up
and continuing operation. The default `None` value will wait
forever.
:return:
A boolean indicating whether or not a thread existed to join
upon.
"""
try:
self.thread.join(timeout)
return True
except AttributeError:
return False | Joins on the thread associated with the response if it exists, or
just returns after a no-op if no thread exists to join.
:param timeout:
Maximum number of seconds to block on the join before given up
and continuing operation. The default `None` value will wait
forever.
:return:
A boolean indicating whether or not a thread existed to join
upon. | entailment |
def deserialize(serial_data: dict) -> 'Response':
""" Converts a serialized dictionary response to a Response object """
r = Response(serial_data.get('id'))
r.data.update(serial_data.get('data', {}))
r.ended = serial_data.get('ended', False)
r.failed = not serial_data.get('success', True)
def load_messages(message_type: str):
messages = [
ResponseMessage(**data)
for data in serial_data.get(message_type, [])
]
setattr(r, message_type, getattr(r, message_type) + messages)
load_messages('errors')
load_messages('warnings')
load_messages('messages')
return r | Converts a serialized dictionary response to a Response object | entailment |
def abort_thread():
"""
This function checks to see if the user has indicated that they want the
currently running execution to stop prematurely by marking the running
thread as aborted. It only applies to operations that are run within
CauldronThreads and not the main thread.
"""
thread = threading.current_thread()
if not isinstance(thread, CauldronThread):
return
if thread.is_executing and thread.abort:
raise ThreadAbortError('User Aborted Execution') | This function checks to see if the user has indicated that they want the
currently running execution to stop prematurely by marking the running
thread as aborted. It only applies to operations that are run within
CauldronThreads and not the main thread. | entailment |
def is_running(self) -> bool:
"""Specifies whether or not the thread is running"""
return (
self._has_started and
self.is_alive() or
self.completed_at is None or
(datetime.utcnow() - self.completed_at).total_seconds() < 0.5
) | Specifies whether or not the thread is running | entailment |
def run(self):
"""
Executes the Cauldron command in a thread to prevent long-running
computations from locking the main Cauldron thread, which is needed
to serve and print status information.
"""
async def run_command():
try:
self.result = self.command(
context=self.context,
**self.kwargs
)
except Exception as error:
self.exception = error
print(error)
import traceback
traceback.print_exc()
import sys
self.context.response.fail(
code='COMMAND_EXECUTION_ERROR',
message='Failed to execute command due to internal error',
error=error
).console(
whitespace=1
)
self._has_started = True
self._loop = asyncio.new_event_loop()
self._loop.run_until_complete(run_command())
self._loop.close()
self._loop = None
self.completed_at = datetime.utcnow() | Executes the Cauldron command in a thread to prevent long-running
computations from locking the main Cauldron thread, which is needed
to serve and print status information. | entailment |
def abort_running(self) -> bool:
"""
Executes a hard abort by shutting down the event loop in this thread
in which the running command was operating. This is carried out using
the asyncio library to prevent the stopped execution from destabilizing
the Python environment.
"""
if not self._loop:
return False
try:
self._loop.stop()
return True
except Exception:
return False
finally:
self.completed_at = datetime.utcnow() | Executes a hard abort by shutting down the event loop in this thread
in which the running command was operating. This is carried out using
the asyncio library to prevent the stopped execution from destabilizing
the Python environment. | entailment |
def from_request(request=None) -> dict:
""" Fetches the arguments for the current Flask application request """
request = request if request else flask_request
try:
json_args = request.get_json(silent=True)
except Exception:
json_args = None
try:
get_args = request.values
except Exception:
get_args = None
arg_sources = list(filter(
lambda arg: arg is not None,
[json_args, get_args, {}]
))
return arg_sources[0] | Fetches the arguments for the current Flask application request | entailment |
def print_rule(rule, feature_names=None, category_names=None, label="label", support=None):
# type: (Rule, List[Union[str, None]], List[Union[List[str], None]], str, List[int]) -> str
"""
print the rule in a nice way
:param rule: An instance of Rule
:param feature_names: a list of n_features feature names,
:param category_names: the names of the categories of each feature.
:param label: Can take two values, 'label': just print the label as output, or 'prob': print the prob distribution.
:param support: optional values of the data that support the rule
:return: str
"""
pred_label = np.argmax(rule.output)
if label == "label":
output = str(pred_label) + " ({})".format(rule.output[pred_label])
elif label == "prob":
output = "[{}]".format(", ".join(["{:.4f}".format(prob) for prob in rule.output]))
else:
raise ValueError("Unknown label {}".format(label))
output = "{}: ".format(label) + output
default = rule.is_default()
if default:
s = "DEFAULT " + output
else:
if feature_names is None:
_feature_names = ["X" + str(idx) for idx, _ in rule.clauses]
else:
_feature_names = [feature_names[idx] for idx, _ in rule.clauses]
categories = []
for feature_idx, category in rule.clauses:
if category_names is None:
_category_names = None
else:
_category_names = category_names[feature_idx]
if _category_names is None:
categories.append(" = " + str(category))
else:
categories.append(" in " + str(_category_names[category]))
s = "IF "
# conditions
conditions = ["({}{})".format(feature, category) for feature, category in zip(_feature_names, categories)]
s += " AND ".join(conditions)
# results
s += " THEN " + output
if support is not None:
support_list = [("+" if i == pred_label else "-") + str(supp) for i, supp in enumerate(support)]
s += " [" + "/".join(support_list) + "]"
return s | print the rule in a nice way
:param rule: An instance of Rule
:param feature_names: a list of n_features feature names,
:param category_names: the names of the categories of each feature.
:param label: Can take two values, 'label': just print the label as output, or 'prob': print the prob distribution.
:param support: optional values of the data that support the rule
:return: str | entailment |
def rule_str2rule(rule_str, prob):
# type: (str, float) -> Rule
"""
A helper function that converts the resulting string returned from C function to the Rule object
:param rule_str: a string representing the rule
:param prob: the output probability
:return: a Rule object
"""
if rule_str == "default":
return Rule([], prob)
raw_rules = rule_str[1:-1].split(",")
clauses = []
for raw_rule in raw_rules:
idx = raw_rule.find("=")
if idx == -1:
raise ValueError("No \"=\" find in the rule!")
clauses.append(Clause(int(raw_rule[1:idx]), int(raw_rule[(idx + 1):])))
return Rule(clauses, prob) | A helper function that converts the resulting string returned from C function to the Rule object
:param rule_str: a string representing the rule
:param prob: the output probability
:return: a Rule object | entailment |
def is_satisfied(self, x_cat):
"""
:param x_cat: a 2D array of pure categorical data of shape [n_data, n_features]
:return: a bool array of shape [n_data,] representing whether the rule is fired by each input data.
"""
satisfied = []
if self.is_default():
return np.ones(x_cat.shape[0], dtype=bool)
for idx, cat in self.clauses:
satisfied.append(x_cat[:, idx] == cat)
return reduce(np.logical_and, satisfied) | :param x_cat: a 2D array of pure categorical data of shape [n_data, n_features]
:return: a bool array of shape [n_data,] representing whether the rule is fired by each input data. | entailment |
def fit(self, x, y):
"""
:param x: 2D np.ndarray (n_instances, n_features) should be categorical data, must be of type int
:param y: 1D np.ndarray (n_instances, ) labels
:return:
"""
verbose = self.verbose
# Create temporary files
data_file = tempfile.NamedTemporaryFile("w+b", delete=False)
label_file = tempfile.NamedTemporaryFile("w+b", delete=False)
start = time.time()
raw_rules = categorical2pysbrl_data(x, y, data_file.name, label_file.name, supp=self.min_support,
zmin=self.min_rule_len, zmax=self.max_rule_len, method=self.fim_method)
if verbose > 1:
print("Info: sbrl data files saved to %s and %s temporarily" % (data_file.name, label_file.name))
data_file.close()
label_file.close()
cat_time = time.time() - start
if verbose:
print("Info: time for rule mining: %.4fs" % cat_time)
n_labels = int(np.max(y)) + 1
start = time.time()
_model = train_sbrl(data_file.name, label_file.name, self.lambda_, eta=self.eta,
max_iters=self.iters, n_chains=self.n_chains, seed=self.seed,
alpha=self.alpha, verbose=verbose)
train_time = time.time() - start
if verbose:
print("Info: training time: %.4fs" % train_time)
# update model parameters
self._n_classes = n_labels
self._n_features = x.shape[1]
# convert the raw parameters to rules
self.from_raw(_model[0], _model[1], raw_rules)
self._supports = self.compute_support(x, y)
# Close the temp files
os.unlink(data_file.name)
os.unlink(label_file.name) | :param x: 2D np.ndarray (n_instances, n_features) should be categorical data, must be of type int
:param y: 1D np.ndarray (n_instances, ) labels
:return: | entailment |
def from_raw(self, rule_ids, outputs, raw_rules):
"""
A helper function that converts the results returned from C function
:param rule_ids:
:param outputs:
:param raw_rules:
:return:
"""
self._rule_pool = [([], [])] + raw_rules
self._rule_list = []
for i, idx in enumerate(rule_ids):
rule = Rule([Clause(f, c) for f, c in zip(*self._rule_pool[idx])], outputs[i])
self._rule_list.append(rule)
# self._rule_list.append(rule_str2rule(_rule_name, outputs[i]))
self._rule_ids = rule_ids
self._rule_outputs = outputs | A helper function that converts the results returned from C function
:param rule_ids:
:param outputs:
:param raw_rules:
:return: | entailment |
def compute_support(self, x, y):
# type: (np.ndarray, np.ndarray) -> np.ndarray
"""
Calculate the support for the rules.
The support of each rule is a list of `n_classes` integers: [l1, l2, ...].
Each integer represents the number of data of label i that is caught by this rule
:param x: 2D np.ndarray (n_instances, n_features) should be categorical data, must be of type int
:param y: 1D np.ndarray (n_instances, ) labels
:return: a np.ndarray of shape (n_rules, n_classes)
"""
caught_matrix = self.caught_matrix(x)
if np.sum(caught_matrix.astype(np.int)) != x.shape[0]:
raise RuntimeError("The sum of the support should equal to the number of instances!")
support_summary = np.zeros((self.n_rules, self.n_classes), dtype=np.int)
for i, support in enumerate(caught_matrix):
support_labels = y[support]
unique_labels, unique_counts = np.unique(support_labels, return_counts=True)
if len(unique_labels) > 0 and np.max(unique_labels) > support_summary.shape[1]:
# There occurs labels that have not seen in training
pad_len = np.max(unique_labels) - support_summary.shape[1]
support_summary = np.hstack((support_summary, np.zeros((self.n_rules, pad_len), dtype=np.int)))
support_summary[i, unique_labels] = unique_counts
return support_summary | Calculate the support for the rules.
The support of each rule is a list of `n_classes` integers: [l1, l2, ...].
Each integer represents the number of data of label i that is caught by this rule
:param x: 2D np.ndarray (n_instances, n_features) should be categorical data, must be of type int
:param y: 1D np.ndarray (n_instances, ) labels
:return: a np.ndarray of shape (n_rules, n_classes) | entailment |
def caught_matrix(self, x):
# type: (np.ndarray) -> np.ndarray
"""
compute the caught matrix of x
Each rule has an array of bools, showing whether each instances is caught by this rule
:param x: 2D np.ndarray (n_instances, n_features) should be categorical data, must be of type int
:return:
a bool np.ndarray of shape (n_rules, n_instances)
"""
un_satisfied = np.ones((x.shape[0],), dtype=np.bool)
supports = np.zeros((self.n_rules, x.shape[0]), dtype=np.bool)
for i, rule in enumerate(self._rule_list):
is_satisfied = rule.is_satisfied(x)
satisfied = np.logical_and(is_satisfied, un_satisfied)
# marking new satisfied instances as satisfied
un_satisfied = np.logical_xor(satisfied, un_satisfied)
supports[i, :] = satisfied
return supports | compute the caught matrix of x
Each rule has an array of bools, showing whether each instances is caught by this rule
:param x: 2D np.ndarray (n_instances, n_features) should be categorical data, must be of type int
:return:
a bool np.ndarray of shape (n_rules, n_instances) | entailment |
def decision_path(self, x):
# type: (np.ndarray) -> np.ndarray
"""
compute the decision path of the rule list on x
:param x: x should be already transformed
:return:
return a np.ndarray of shape [n_rules, n_instances] of type bool,
representing whether an instance has consulted a rule or not
"""
un_satisfied = np.ones([x.shape[0]], dtype=np.bool)
paths = np.zeros((self.n_rules, x.shape[0]), dtype=np.bool)
for i, rule in enumerate(self._rule_list):
paths[i, :] = un_satisfied
satisfied = rule.is_satisfied(x)
# marking new satisfied instances as satisfied
un_satisfied = np.logical_and(np.logical_not(satisfied), un_satisfied)
return paths | compute the decision path of the rule list on x
:param x: x should be already transformed
:return:
return a np.ndarray of shape [n_rules, n_instances] of type bool,
representing whether an instance has consulted a rule or not | entailment |
def put(self, *args, **kwargs) -> 'SharedCache':
"""
Adds one or more variables to the cache.
:param args:
Variables can be specified by two consecutive arguments where the
first argument is a key and the second one the corresponding value.
For example:
```
put('a', 1, 'b', False)
```
would add two variables to the cache where the value of _a_ would
be 1 and the value of _b_ would be False.
:param kwargs:
Keyword arguments to be added to the cache, which are name value
pairs like standard keyword named arguments in Python. For example:
```
put(a=1, b=False)
```
would add two variables to the cache where the value of _a_ would
be 1 and the value of _b_ would be False.
"""
environ.abort_thread()
index = 0
while index < (len(args) - 1):
key = args[index]
value = args[index + 1]
self._shared_cache_data[key] = value
index += 2
for key, value in kwargs.items():
if value is None and key in self._shared_cache_data:
del self._shared_cache_data[key]
else:
self._shared_cache_data[key] = value
return self | Adds one or more variables to the cache.
:param args:
Variables can be specified by two consecutive arguments where the
first argument is a key and the second one the corresponding value.
For example:
```
put('a', 1, 'b', False)
```
would add two variables to the cache where the value of _a_ would
be 1 and the value of _b_ would be False.
:param kwargs:
Keyword arguments to be added to the cache, which are name value
pairs like standard keyword named arguments in Python. For example:
```
put(a=1, b=False)
```
would add two variables to the cache where the value of _a_ would
be 1 and the value of _b_ would be False. | entailment |
def grab(
self,
*keys: typing.List[str],
default_value=None
) -> typing.Tuple:
"""
Returns a tuple containing multiple values from the cache specified by
the keys arguments
:param keys:
One or more variable names stored in the cache that should be
returned by the grab function. The order of these arguments are
preserved by the returned tuple.
:param default_value:
If one or more of the keys is not found within the cache, this
value will be returned as the missing value.
:return:
A tuple containing values for each of the keys specified in the
arguments
"""
return tuple([self.fetch(k, default_value) for k in keys]) | Returns a tuple containing multiple values from the cache specified by
the keys arguments
:param keys:
One or more variable names stored in the cache that should be
returned by the grab function. The order of these arguments are
preserved by the returned tuple.
:param default_value:
If one or more of the keys is not found within the cache, this
value will be returned as the missing value.
:return:
A tuple containing values for each of the keys specified in the
arguments | entailment |
def fetch(self, key: typing.Union[str, None], default_value=None):
"""
Retrieves the value of the specified variable from the cache
:param key:
The name of the variable for which the value should be returned
:param default_value:
The value to return if the variable does not exist in the cache
:return:
The value of the specified key if it exists in the cache or the
default_Value if it does not
"""
environ.abort_thread()
if key is None:
return self._shared_cache_data
return self._shared_cache_data.get(key, default_value) | Retrieves the value of the specified variable from the cache
:param key:
The name of the variable for which the value should be returned
:param default_value:
The value to return if the variable does not exist in the cache
:return:
The value of the specified key if it exists in the cache or the
default_Value if it does not | entailment |
def train_sbrl(data_file, label_file, lambda_=20, eta=2, max_iters=300000, n_chains=20, alpha=1, seed=None, verbose=0):
"""
The basic training function of the scalable bayesian rule list.
Users are suggested to use SBRL instead of this function.
It takes the paths of the pre-processed data and label files as input,
and return the parameters of the trained rule list.
Check pysbrl.utils:categorical2pysbrl_data to see how to convert categorical data to the required format
:param data_file: The data file
:param label_file: The label file
:param lambda_: A hyper parameter, the prior representing the expected length of the rule list
:param eta: A hyper parameter, the prior representing the expected length of each rule
:param max_iters: The maximum iteration of the algo
:param n_chains: The number of markov chains to run
:param alpha: The prior of the output probability distribution, see the paper for more detail.
:return: A tuple of (`rule_ids`, `outputs`, `rule_strings`)
`rule_ids`: the list of ids of rules
`outputs`: the outputs matrix (prob distribution as a vector per rule)
`rule_strings`: the whole list of rules in the format of strings like `u'{c2=x,c4=o,c5=b}'`.
"""
if isinstance(alpha, int):
alphas = np.array([alpha], dtype=np.int32)
elif isinstance(alpha, list):
for a in alpha:
assert isinstance(a, int)
alphas = np.array(alpha, dtype=np.int32)
else:
raise ValueError('the argument alpha can only be int or List[int]')
if seed is None:
seed = -1
if not os.path.isfile(data_file):
raise FileNotFoundError('data file %s does not exists!' % data_file)
if not os.path.isfile(label_file):
raise FileNotFoundError('label file %s does not exists!' % label_file)
return _train(data_file, label_file, lambda_, eta, max_iters, n_chains, alphas, seed, verbose) | The basic training function of the scalable bayesian rule list.
Users are suggested to use SBRL instead of this function.
It takes the paths of the pre-processed data and label files as input,
and return the parameters of the trained rule list.
Check pysbrl.utils:categorical2pysbrl_data to see how to convert categorical data to the required format
:param data_file: The data file
:param label_file: The label file
:param lambda_: A hyper parameter, the prior representing the expected length of the rule list
:param eta: A hyper parameter, the prior representing the expected length of each rule
:param max_iters: The maximum iteration of the algo
:param n_chains: The number of markov chains to run
:param alpha: The prior of the output probability distribution, see the paper for more detail.
:return: A tuple of (`rule_ids`, `outputs`, `rule_strings`)
`rule_ids`: the list of ids of rules
`outputs`: the outputs matrix (prob distribution as a vector per rule)
`rule_strings`: the whole list of rules in the format of strings like `u'{c2=x,c4=o,c5=b}'`. | entailment |
def parse(self, line):
"""Returns tree objects from a sentence
Args:
line: Sentence to be parsed into a tree
Returns:
Tree object representing parsed sentence
None if parse fails
"""
tree = list(self.parser.raw_parse(line))[0]
tree = tree[0]
return tree | Returns tree objects from a sentence
Args:
line: Sentence to be parsed into a tree
Returns:
Tree object representing parsed sentence
None if parse fails | entailment |
def pack_chunk(source_data: bytes) -> str:
"""
Packs the specified binary source data by compressing it with the Zlib
library and then converting the bytes to a base64 encoded string for
non-binary transmission.
:param source_data:
The data to be converted to a compressed, base64 string
"""
if not source_data:
return ''
chunk_compressed = zlib.compress(source_data)
return binascii.b2a_base64(chunk_compressed).decode('utf-8') | Packs the specified binary source data by compressing it with the Zlib
library and then converting the bytes to a base64 encoded string for
non-binary transmission.
:param source_data:
The data to be converted to a compressed, base64 string | entailment |
def unpack_chunk(chunk_data: str) -> bytes:
"""
Unpacks a previously packed chunk data back into the original
bytes representation
:param chunk_data:
The compressed, base64 encoded string to convert back to the
source bytes object.
"""
if not chunk_data:
return b''
chunk_compressed = binascii.a2b_base64(chunk_data.encode('utf-8'))
return zlib.decompress(chunk_compressed) | Unpacks a previously packed chunk data back into the original
bytes representation
:param chunk_data:
The compressed, base64 encoded string to convert back to the
source bytes object. | entailment |
def get_file_chunk_count(
file_path: str,
chunk_size: int = DEFAULT_CHUNK_SIZE
) -> int:
"""
Determines the number of chunks necessary to send the file for the given
chunk size
:param file_path:
The absolute path to the file that will be synchronized in chunks
:param chunk_size:
The maximum size of each chunk in bytes
:return
The number of chunks necessary to send the entire contents of the
specified file for the given chunk size
"""
if not os.path.exists(file_path):
return 0
file_size = os.path.getsize(file_path)
return max(1, int(math.ceil(file_size / chunk_size))) | Determines the number of chunks necessary to send the file for the given
chunk size
:param file_path:
The absolute path to the file that will be synchronized in chunks
:param chunk_size:
The maximum size of each chunk in bytes
:return
The number of chunks necessary to send the entire contents of the
specified file for the given chunk size | entailment |
def read_file_chunks(
file_path: str,
chunk_size: int = DEFAULT_CHUNK_SIZE
) -> bytes:
"""
Reads the specified file in chunks and returns a generator where
each returned chunk is a compressed base64 encoded string for sync
transmission
:param file_path:
The path to the file to read in chunks
:param chunk_size:
The size, in bytes, of each chunk. The final chunk will be less than
or equal to this size as the remainder.
"""
chunk_count = get_file_chunk_count(file_path, chunk_size)
if chunk_count < 1:
return ''
with open(file_path, mode='rb') as fp:
for chunk_index in range(chunk_count):
source = fp.read(chunk_size)
chunk = pack_chunk(source)
yield chunk | Reads the specified file in chunks and returns a generator where
each returned chunk is a compressed base64 encoded string for sync
transmission
:param file_path:
The path to the file to read in chunks
:param chunk_size:
The size, in bytes, of each chunk. The final chunk will be less than
or equal to this size as the remainder. | entailment |
def write_file_chunk(
file_path: str,
packed_chunk: str,
append: bool = True,
offset: int = -1
):
"""
Write or append the specified chunk data to the given file path, unpacking
the chunk before writing. If the file does not yet exist, it will be
created. Set the append argument to False if you do not want the chunk
to be appended to an existing file.
:param file_path:
The file where the chunk will be written or appended
:param packed_chunk:
The packed chunk data to write to the file. It will be unpacked before
the file is written.
:param append:
Whether or not the chunk should be appended to the existing file. If
False the chunk data will overwrite the existing file.
:param offset:
The byte offset in the file where the chunk should be written.
If the value is less than zero, the chunk will be written or appended
based on the `append` argument. Note that if you indicate an append
write mode and an offset, the mode will be forced to write instead of
append.
"""
mode = 'ab' if append else 'wb'
contents = unpack_chunk(packed_chunk)
writer.write_file(file_path, contents, mode=mode, offset=offset) | Write or append the specified chunk data to the given file path, unpacking
the chunk before writing. If the file does not yet exist, it will be
created. Set the append argument to False if you do not want the chunk
to be appended to an existing file.
:param file_path:
The file where the chunk will be written or appended
:param packed_chunk:
The packed chunk data to write to the file. It will be unpacked before
the file is written.
:param append:
Whether or not the chunk should be appended to the existing file. If
False the chunk data will overwrite the existing file.
:param offset:
The byte offset in the file where the chunk should be written.
If the value is less than zero, the chunk will be written or appended
based on the `append` argument. Note that if you indicate an append
write mode and an offset, the mode will be forced to write instead of
append. | entailment |
def add_output_path(path: str = None) -> str:
"""
Adds the specified path to the output logging paths if it is not
already in the listed paths.
:param path:
The path to add to the logging output paths. If the path is empty
or no path is given, the current working directory will be used
instead.
"""
cleaned = paths.clean(path or os.getcwd())
if cleaned not in _logging_paths:
_logging_paths.append(cleaned)
return cleaned | Adds the specified path to the output logging paths if it is not
already in the listed paths.
:param path:
The path to add to the logging output paths. If the path is empty
or no path is given, the current working directory will be used
instead. | entailment |
def remove_output_path(path: str = None) -> str:
"""
Removes the specified path from the output logging paths if it is
in the listed paths.
:param path:
The path to remove from the logging output paths. If the path is empty
or no path is given, the current working directory will be used
instead.
"""
cleaned = paths.clean(path or os.getcwd())
if cleaned in _logging_paths:
_logging_paths.remove(path)
return cleaned | Removes the specified path from the output logging paths if it is
in the listed paths.
:param path:
The path to remove from the logging output paths. If the path is empty
or no path is given, the current working directory will be used
instead. | entailment |
def log(
message: typing.Union[str, typing.List[str]],
whitespace: int = 0,
whitespace_top: int = 0,
whitespace_bottom: int = 0,
indent_by: int = 0,
trace: bool = True,
file_path: str = None,
append_to_file: bool = True,
**kwargs
) -> str:
"""
Logs a message to the console with the formatting support beyond a simple
print statement or logger statement.
:param message:
The primary log message for the entry
:param whitespace:
The number of lines of whitespace to append to the beginning and end
of the log message when printed to the console
:param whitespace_top:
The number of lines of whitespace to append to the beginning only of
the log message when printed to the console. If whitespace_top and
whitespace are both specified, the larger of the two values will be
used.
:param whitespace_bottom:
The number of lines of whitespace to append to the end of the log
message when printed to the console. If whitespace_bottom and
whitespace are both specified, the larger of hte two values will be
used.
:param indent_by:
The number of spaces that each line of text should be indented
:param trace:
Whether or not to trace the output to the console
:param file_path:
A path to a logging file where the output should be written
:param append_to_file:
Whether or not the log entry should be overwritten or appended to the
log file specified in the file_path argument
:param kwargs:
"""
m = add_to_message(message)
for key, value in kwargs.items():
m.append('{key}: {value}'.format(key=key, value=value))
pre_whitespace = int(max(whitespace, whitespace_top))
post_whitespace = int(max(whitespace, whitespace_bottom))
if pre_whitespace:
m.insert(0, max(0, pre_whitespace - 1) * '\n')
if post_whitespace:
m.append(max(0, post_whitespace - 1) * '\n')
message = indent('\n'.join(m), ' ' * indent_by)
raw(
message=message,
trace=trace,
file_path=file_path,
append_to_file=append_to_file
)
return message | Logs a message to the console with the formatting support beyond a simple
print statement or logger statement.
:param message:
The primary log message for the entry
:param whitespace:
The number of lines of whitespace to append to the beginning and end
of the log message when printed to the console
:param whitespace_top:
The number of lines of whitespace to append to the beginning only of
the log message when printed to the console. If whitespace_top and
whitespace are both specified, the larger of the two values will be
used.
:param whitespace_bottom:
The number of lines of whitespace to append to the end of the log
message when printed to the console. If whitespace_bottom and
whitespace are both specified, the larger of hte two values will be
used.
:param indent_by:
The number of spaces that each line of text should be indented
:param trace:
Whether or not to trace the output to the console
:param file_path:
A path to a logging file where the output should be written
:param append_to_file:
Whether or not the log entry should be overwritten or appended to the
log file specified in the file_path argument
:param kwargs: | entailment |
def add_to_message(data, indent_level=0) -> list:
"""Adds data to the message object"""
message = []
if isinstance(data, str):
message.append(indent(
dedent(data.strip('\n')).strip(),
indent_level * ' '
))
return message
for line in data:
offset = 0 if isinstance(line, str) else 1
message += add_to_message(line, indent_level + offset)
return message | Adds data to the message object | entailment |
def add(mode_id: str) -> list:
"""
Adds the specified mode identifier to the list of active modes and returns
a copy of the currently active modes list.
"""
if not has(mode_id):
_current_modes.append(mode_id)
return _current_modes.copy() | Adds the specified mode identifier to the list of active modes and returns
a copy of the currently active modes list. | entailment |
def remove(mode_id: str) -> bool:
"""
Removes the specified mode identifier from the active modes and returns
whether or not a remove operation was carried out. If the mode identifier
is not in the currently active modes, it does need to be removed.
"""
had_mode = has(mode_id)
if had_mode:
_current_modes.remove(mode_id)
return had_mode | Removes the specified mode identifier from the active modes and returns
whether or not a remove operation was carried out. If the mode identifier
is not in the currently active modes, it does need to be removed. | entailment |
def clone(client):
'''Clone the redis client to be slowlog-compatible'''
kwargs = client.redis.connection_pool.connection_kwargs
kwargs['parser_class'] = redis.connection.PythonParser
pool = redis.connection.ConnectionPool(**kwargs)
return redis.Redis(connection_pool=pool) | Clone the redis client to be slowlog-compatible | entailment |
def pretty(timings, label):
'''Print timing stats'''
results = [(sum(values), len(values), key)
for key, values in timings.items()]
print(label)
print('=' * 65)
print('%20s => %13s | %8s | %13s' % (
'Command', 'Average', '# Calls', 'Total time'))
print('-' * 65)
for total, length, key in sorted(results, reverse=True):
print('%20s => %10.5f us | %8i | %10i us' % (
key, float(total) / length, length, total)) | Print timing stats | entailment |
def start(self):
'''Get ready for a profiling run'''
self._configs = self._client.config_get('slow-*')
self._client.config_set('slowlog-max-len', 100000)
self._client.config_set('slowlog-log-slower-than', 0)
self._client.execute_command('slowlog', 'reset') | Get ready for a profiling run | entailment |
def stop(self):
'''Set everything back to normal and collect our data'''
for key, value in self._configs.items():
self._client.config_set(key, value)
logs = self._client.execute_command('slowlog', 'get', 100000)
current = {
'name': None, 'accumulated': defaultdict(list)
}
for _, _, duration, request in logs:
command = request[0]
if command == 'slowlog':
continue
if 'eval' in command.lower():
subcommand = request[3]
self._timings['qless-%s' % subcommand].append(duration)
if current['name']:
if current['name'] not in self._commands:
self._commands[current['name']] = defaultdict(list)
for key, values in current['accumulated'].items():
self._commands[current['name']][key].extend(values)
current = {
'name': subcommand, 'accumulated': defaultdict(list)
}
else:
self._timings[command].append(duration)
if current['name']:
current['accumulated'][command].append(duration)
# Include the last
if current['name']:
if current['name'] not in self._commands:
self._commands[current['name']] = defaultdict(list)
for key, values in current['accumulated'].items():
self._commands[current['name']][key].extend(values) | Set everything back to normal and collect our data | entailment |
def display(self):
'''Print the results of this profiling'''
self.pretty(self._timings, 'Raw Redis Commands')
print()
for key, value in self._commands.items():
self.pretty(value, 'Qless "%s" Command' % key)
print() | Print the results of this profiling | entailment |
def add_shell_action(sub_parser: ArgumentParser) -> ArgumentParser:
"""Populates the sub parser with the shell arguments"""
sub_parser.add_argument(
'-p', '--project',
dest='project_directory',
type=str,
default=None
)
sub_parser.add_argument(
'-l', '--log',
dest='logging_path',
type=str,
default=None
)
sub_parser.add_argument(
'-o', '--output',
dest='output_directory',
type=str,
default=None
)
sub_parser.add_argument(
'-s', '--shared',
dest='shared_data_path',
type=str,
default=None
)
return sub_parser | Populates the sub parser with the shell arguments | entailment |
def parse(args: list = None) -> dict:
"""
Parses the command line arguments and returns a dictionary containing the
results.
:param args:
The command line arguments to parse. If None, the system command line
arguments will be used instead.
"""
parser = ArgumentParser(description='Cauldron command')
parser.add_argument(
'command',
nargs='?',
default='shell',
help='The Cauldron command action to execute'
)
parser.add_argument(
'-v', '--version',
dest='show_version_info',
default=False,
action='store_true',
help='show Cauldron version and exit'
)
sub_parsers = parser.add_subparsers(
dest='command',
title='Sub-Command Actions',
description='The actions you can execute with the cauldron command',
)
sub_parsers.add_parser('shell', aliases=['version'])
add_shell_action(sub_parsers.add_parser('shell'))
add_kernel_action(sub_parsers.add_parser('kernel', aliases=['serve']))
arguments = vars(parser.parse_args(args=args))
arguments['parser'] = parser
return arguments | Parses the command line arguments and returns a dictionary containing the
results.
:param args:
The command line arguments to parse. If None, the system command line
arguments will be used instead. | entailment |
def running(self, offset=0, count=25):
'''Return all the currently-running jobs'''
return self.client('jobs', 'running', self.name, offset, count) | Return all the currently-running jobs | entailment |
def stalled(self, offset=0, count=25):
'''Return all the currently-stalled jobs'''
return self.client('jobs', 'stalled', self.name, offset, count) | Return all the currently-stalled jobs | entailment |
def scheduled(self, offset=0, count=25):
'''Return all the currently-scheduled jobs'''
return self.client('jobs', 'scheduled', self.name, offset, count) | Return all the currently-scheduled jobs | entailment |
def depends(self, offset=0, count=25):
'''Return all the currently dependent jobs'''
return self.client('jobs', 'depends', self.name, offset, count) | Return all the currently dependent jobs | entailment |
def recurring(self, offset=0, count=25):
'''Return all the recurring jobs'''
return self.client('jobs', 'recurring', self.name, offset, count) | Return all the recurring jobs | entailment |
def class_string(self, klass):
'''Return a string representative of the class'''
if isinstance(klass, string_types):
return klass
return klass.__module__ + '.' + klass.__name__ | Return a string representative of the class | entailment |
def put(self, klass, data, priority=None, tags=None, delay=None,
retries=None, jid=None, depends=None):
'''Either create a new job in the provided queue with the provided
attributes, or move that job into that queue. If the job is being
serviced by a worker, subsequent attempts by that worker to either
`heartbeat` or `complete` the job should fail and return `false`.
The `priority` argument should be negative to be run sooner rather
than later, and positive if it's less important. The `tags` argument
should be a JSON array of the tags associated with the instance and
the `valid after` argument should be in how many seconds the instance
should be considered actionable.'''
return self.client('put', self.worker_name,
self.name,
jid or uuid.uuid4().hex,
self.class_string(klass),
json.dumps(data),
delay or 0,
'priority', priority or 0,
'tags', json.dumps(tags or []),
'retries', retries or 5,
'depends', json.dumps(depends or [])
) | Either create a new job in the provided queue with the provided
attributes, or move that job into that queue. If the job is being
serviced by a worker, subsequent attempts by that worker to either
`heartbeat` or `complete` the job should fail and return `false`.
The `priority` argument should be negative to be run sooner rather
than later, and positive if it's less important. The `tags` argument
should be a JSON array of the tags associated with the instance and
the `valid after` argument should be in how many seconds the instance
should be considered actionable. | entailment |
def recur(self, klass, data, interval, offset=0, priority=None, tags=None,
retries=None, jid=None):
'''Place a recurring job in this queue'''
return self.client('recur', self.name,
jid or uuid.uuid4().hex,
self.class_string(klass),
json.dumps(data),
'interval', interval, offset,
'priority', priority or 0,
'tags', json.dumps(tags or []),
'retries', retries or 5
) | Place a recurring job in this queue | entailment |
def pop(self, count=None):
'''Passing in the queue from which to pull items, the current time,
when the locks for these returned items should expire, and the number
of items to be popped off.'''
results = [Job(self.client, **job) for job in json.loads(
self.client('pop', self.name, self.worker_name, count or 1))]
if count is None:
return (len(results) and results[0]) or None
return results | Passing in the queue from which to pull items, the current time,
when the locks for these returned items should expire, and the number
of items to be popped off. | entailment |
def peek(self, count=None):
'''Similar to the pop command, except that it merely peeks at the next
items'''
results = [Job(self.client, **rec) for rec in json.loads(
self.client('peek', self.name, count or 1))]
if count is None:
return (len(results) and results[0]) or None
return results | Similar to the pop command, except that it merely peeks at the next
items | entailment |
def stats(self, date=None):
'''Return the current statistics for a given queue on a given date.
The results are returned are a JSON blob::
{
'total' : ...,
'mean' : ...,
'variance' : ...,
'histogram': [
...
]
}
The histogram's data points are at the second resolution for the first
minute, the minute resolution for the first hour, the 15-minute
resolution for the first day, the hour resolution for the first 3
days, and then at the day resolution from there on out. The
`histogram` key is a list of those values.'''
return json.loads(
self.client('stats', self.name, date or repr(time.time()))) | Return the current statistics for a given queue on a given date.
The results are returned are a JSON blob::
{
'total' : ...,
'mean' : ...,
'variance' : ...,
'histogram': [
...
]
}
The histogram's data points are at the second resolution for the first
minute, the minute resolution for the first hour, the 15-minute
resolution for the first day, the hour resolution for the first 3
days, and then at the day resolution from there on out. The
`histogram` key is a list of those values. | entailment |
def run(
project: 'projects.Project',
step: 'projects.ProjectStep'
) -> dict:
"""
Runs the markdown file and renders the contents to the notebook display
:param project:
:param step:
:return:
A run response dictionary containing
"""
with open(step.source_path, 'r') as f:
code = f.read()
try:
cauldron.display.markdown(code, **project.shared.fetch(None))
return {'success': True}
except Exception as err:
return dict(
success=False,
html_message=templating.render_template(
'markdown-error.html',
error=err
)
) | Runs the markdown file and renders the contents to the notebook display
:param project:
:param step:
:return:
A run response dictionary containing | entailment |
def fetch(reload: bool = False) -> dict:
"""
Returns a dictionary containing all of the available Cauldron commands
currently registered. This data is cached for performance. Unless the
reload argument is set to True, the command list will only be generated
the first time this function is called.
:param reload:
Whether or not to disregard any cached command data and generate a
new dictionary of available commands.
:return:
A dictionary where the keys are the name of the commands and the
values are the modules for the command .
"""
if len(list(COMMANDS.keys())) > 0 and not reload:
return COMMANDS
COMMANDS.clear()
for key in dir(commands):
e = getattr(commands, key)
if e and hasattr(e, 'NAME') and hasattr(e, 'DESCRIPTION'):
COMMANDS[e.NAME] = e
return dict(COMMANDS.items()) | Returns a dictionary containing all of the available Cauldron commands
currently registered. This data is cached for performance. Unless the
reload argument is set to True, the command list will only be generated
the first time this function is called.
:param reload:
Whether or not to disregard any cached command data and generate a
new dictionary of available commands.
:return:
A dictionary where the keys are the name of the commands and the
values are the modules for the command . | entailment |
def get_command_from_module(
command_module,
remote_connection: environ.RemoteConnection
):
"""
Returns the execution command to use for the specified module, which may
be different depending upon remote connection
:param command_module:
:param remote_connection:
:return:
"""
use_remote = (
remote_connection.active and
hasattr(command_module, 'execute_remote')
)
return (
command_module.execute_remote
if use_remote else
command_module.execute
) | Returns the execution command to use for the specified module, which may
be different depending upon remote connection
:param command_module:
:param remote_connection:
:return: | entailment |
def show_help(command_name: str = None, raw_args: str = '') -> Response:
""" Prints the basic command help to the console """
response = Response()
cmds = fetch()
if command_name and command_name in cmds:
parser, result = parse.get_parser(
cmds[command_name],
parse.explode_line(raw_args),
dict()
)
if parser is not None:
out = parser.format_help()
return response.notify(
kind='INFO',
code='COMMAND_DESCRIPTION'
).kernel(
commands=out
).console(
out,
whitespace=1
).response
environ.log_header('Available Commands')
response.consume(print_module_help())
return response.fail(
code='NO_SUCH_COMMAND',
message='Failed to show command help for "{}"'.format(command_name)
).console(
"""
For more information on the various commands, enter help on the
specific command:
help [COMMAND]
""",
whitespace_bottom=1
).response | Prints the basic command help to the console | entailment |
def _import(klass):
'''1) Get a reference to the module
2) Check the file that module's imported from
3) If that file's been updated, force a reload of that module
return it'''
mod = __import__(klass.rpartition('.')[0])
for segment in klass.split('.')[1:-1]:
mod = getattr(mod, segment)
# Alright, now check the file associated with it. Note that clases
# defined in __main__ don't have a __file__ attribute
if klass not in BaseJob._loaded:
BaseJob._loaded[klass] = time.time()
if hasattr(mod, '__file__'):
try:
mtime = os.stat(mod.__file__).st_mtime
if BaseJob._loaded[klass] < mtime:
mod = reload_module(mod)
except OSError:
logger.warn('Could not check modification time of %s',
mod.__file__)
return getattr(mod, klass.rpartition('.')[2]) | 1) Get a reference to the module
2) Check the file that module's imported from
3) If that file's been updated, force a reload of that module
return it | entailment |
def process(self):
'''Load the module containing your class, and run the appropriate
method. For example, if this job was popped from the queue
``testing``, then this would invoke the ``testing`` staticmethod of
your class.'''
try:
method = getattr(self.klass, self.queue_name,
getattr(self.klass, 'process', None))
except Exception as exc:
# We failed to import the module containing this class
logger.exception('Failed to import %s', self.klass_name)
return self.fail(self.queue_name + '-' + exc.__class__.__name__,
'Failed to import %s' % self.klass_name)
if method:
if isinstance(method, types.FunctionType):
try:
logger.info('Processing %s in %s',
self.jid, self.queue_name)
method(self)
logger.info('Completed %s in %s',
self.jid, self.queue_name)
except Exception as exc:
# Make error type based on exception type
logger.exception('Failed %s in %s: %s',
self.jid, self.queue_name, repr(method))
self.fail(self.queue_name + '-' + exc.__class__.__name__,
traceback.format_exc())
else:
# Or fail with a message to that effect
logger.error('Failed %s in %s : %s is not static',
self.jid, self.queue_name, repr(method))
self.fail(self.queue_name + '-method-type',
repr(method) + ' is not static')
else:
# Or fail with a message to that effect
logger.error(
'Failed %s : %s is missing a method "%s" or "process"',
self.jid, self.klass_name, self.queue_name)
self.fail(self.queue_name + '-method-missing', self.klass_name +
' is missing a method "' + self.queue_name + '" or "process"') | Load the module containing your class, and run the appropriate
method. For example, if this job was popped from the queue
``testing``, then this would invoke the ``testing`` staticmethod of
your class. | entailment |
def move(self, queue, delay=0, depends=None):
'''Move this job out of its existing state and into another queue. If
a worker has been given this job, then that worker's attempts to
heartbeat that job will fail. Like ``Queue.put``, this accepts a
delay, and dependencies'''
logger.info('Moving %s to %s from %s',
self.jid, queue, self.queue_name)
return self.client('put', self.worker_name,
queue, self.jid, self.klass_name,
json.dumps(self.data), delay, 'depends', json.dumps(depends or [])
) | Move this job out of its existing state and into another queue. If
a worker has been given this job, then that worker's attempts to
heartbeat that job will fail. Like ``Queue.put``, this accepts a
delay, and dependencies | entailment |
def complete(self, nextq=None, delay=None, depends=None):
'''Turn this job in as complete, optionally advancing it to another
queue. Like ``Queue.put`` and ``move``, it accepts a delay, and
dependencies'''
if nextq:
logger.info('Advancing %s to %s from %s',
self.jid, nextq, self.queue_name)
return self.client('complete', self.jid, self.client.worker_name,
self.queue_name, json.dumps(self.data), 'next', nextq,
'delay', delay or 0, 'depends',
json.dumps(depends or [])) or False
else:
logger.info('Completing %s', self.jid)
return self.client('complete', self.jid, self.client.worker_name,
self.queue_name, json.dumps(self.data)) or False | Turn this job in as complete, optionally advancing it to another
queue. Like ``Queue.put`` and ``move``, it accepts a delay, and
dependencies | entailment |
def heartbeat(self):
'''Renew the heartbeat, if possible, and optionally update the job's
user data.'''
logger.debug('Heartbeating %s (ttl = %s)', self.jid, self.ttl)
try:
self.expires_at = float(self.client('heartbeat', self.jid,
self.client.worker_name, json.dumps(self.data)) or 0)
except QlessException:
raise LostLockException(self.jid)
logger.debug('Heartbeated %s (ttl = %s)', self.jid, self.ttl)
return self.expires_at | Renew the heartbeat, if possible, and optionally update the job's
user data. | entailment |
def fail(self, group, message):
'''Mark the particular job as failed, with the provided type, and a
more specific message. By `type`, we mean some phrase that might be
one of several categorical modes of failure. The `message` is
something more job-specific, like perhaps a traceback.
This method should __not__ be used to note that a job has been dropped
or has failed in a transient way. This method __should__ be used to
note that a job has something really wrong with it that must be
remedied.
The motivation behind the `type` is so that similar errors can be
grouped together. Optionally, updated data can be provided for the job.
A job in any state can be marked as failed. If it has been given to a
worker as a job, then its subsequent requests to heartbeat or complete
that job will fail. Failed jobs are kept until they are canceled or
completed. __Returns__ the id of the failed job if successful, or
`False` on failure.'''
logger.warn('Failing %s (%s): %s', self.jid, group, message)
return self.client('fail', self.jid, self.client.worker_name, group,
message, json.dumps(self.data)) or False | Mark the particular job as failed, with the provided type, and a
more specific message. By `type`, we mean some phrase that might be
one of several categorical modes of failure. The `message` is
something more job-specific, like perhaps a traceback.
This method should __not__ be used to note that a job has been dropped
or has failed in a transient way. This method __should__ be used to
note that a job has something really wrong with it that must be
remedied.
The motivation behind the `type` is so that similar errors can be
grouped together. Optionally, updated data can be provided for the job.
A job in any state can be marked as failed. If it has been given to a
worker as a job, then its subsequent requests to heartbeat or complete
that job will fail. Failed jobs are kept until they are canceled or
completed. __Returns__ the id of the failed job if successful, or
`False` on failure. | entailment |
def retry(self, delay=0, group=None, message=None):
'''Retry this job in a little bit, in the same queue. This is meant
for the times when you detect a transient failure yourself'''
args = ['retry', self.jid, self.queue_name, self.worker_name, delay]
if group is not None and message is not None:
args.append(group)
args.append(message)
return self.client(*args) | Retry this job in a little bit, in the same queue. This is meant
for the times when you detect a transient failure yourself | entailment |
def undepend(self, *args, **kwargs):
'''Remove specific (or all) job dependencies from this job:
job.remove(jid1, jid2)
job.remove(all=True)'''
if kwargs.get('all', False):
return self.client('depends', self.jid, 'off', 'all') or False
else:
return self.client('depends', self.jid, 'off', *args) or False | Remove specific (or all) job dependencies from this job:
job.remove(jid1, jid2)
job.remove(all=True) | entailment |
def has_extension(file_path: str, *args: typing.Tuple[str]) -> bool:
"""
Checks to see if the given file path ends with any of the specified file
extensions. If a file extension does not begin with a '.' it will be added
automatically
:param file_path:
The path on which the extensions will be tested for a match
:param args:
One or more extensions to test for a match with the file_path argument
:return:
Whether or not the file_path argument ended with one or more of the
specified extensions
"""
def add_dot(extension):
return (
extension
if extension.startswith('.') else
'.{}'.format(extension)
)
return any([
file_path.endswith(add_dot(extension))
for extension in args
]) | Checks to see if the given file path ends with any of the specified file
extensions. If a file extension does not begin with a '.' it will be added
automatically
:param file_path:
The path on which the extensions will be tested for a match
:param args:
One or more extensions to test for a match with the file_path argument
:return:
Whether or not the file_path argument ended with one or more of the
specified extensions | entailment |
def get_docstring(target) -> str:
"""
Retrieves the documentation string from the target object and returns it
after removing insignificant whitespace
:param target:
The object for which the doc string should be retrieved
:return:
The cleaned documentation string for the target. If no doc string
exists an empty string will be returned instead.
"""
raw = getattr(target, '__doc__')
if raw is None:
return ''
return textwrap.dedent(raw) | Retrieves the documentation string from the target object and returns it
after removing insignificant whitespace
:param target:
The object for which the doc string should be retrieved
:return:
The cleaned documentation string for the target. If no doc string
exists an empty string will be returned instead. | entailment |
def get_doc_entries(target: typing.Callable) -> list:
"""
Gets the lines of documentation from the given target, which are formatted
so that each line is a documentation entry.
:param target:
:return:
A list of strings containing the documentation block entries
"""
raw = get_docstring(target)
if not raw:
return []
raw_lines = [
line.strip()
for line in raw.replace('\r', '').split('\n')
]
def compactify(compacted: list, entry: str) -> list:
chars = entry.strip()
if not chars:
return compacted
if len(compacted) < 1 or chars.startswith(':'):
compacted.append(entry.rstrip())
else:
compacted[-1] = '{}\n{}'.format(compacted[-1], entry.rstrip())
return compacted
return [
textwrap.dedent(block).strip()
for block in functools.reduce(compactify, raw_lines, [])
] | Gets the lines of documentation from the given target, which are formatted
so that each line is a documentation entry.
:param target:
:return:
A list of strings containing the documentation block entries | entailment |
def parse_function(
name: str,
target: typing.Callable
) -> typing.Union[None, dict]:
"""
Parses the documentation for a function, which is specified by the name of
the function and the function itself.
:param name:
Name of the function to parse
:param target:
The function to parse into documentation
:return:
A dictionary containing documentation for the specified function, or
None if the target was not a function.
"""
if not hasattr(target, '__code__'):
return None
lines = get_doc_entries(target)
docs = ' '.join(filter(lambda line: not line.startswith(':'), lines))
params = parse_params(target, lines)
returns = parse_returns(target, lines)
return dict(
name=getattr(target, '__name__'),
doc=docs,
params=params,
returns=returns
) | Parses the documentation for a function, which is specified by the name of
the function and the function itself.
:param name:
Name of the function to parse
:param target:
The function to parse into documentation
:return:
A dictionary containing documentation for the specified function, or
None if the target was not a function. | entailment |
def variable(name: str, target: property) -> typing.Union[None, dict]:
"""
:param name:
:param target:
:return:
"""
if hasattr(target, 'fget'):
doc = parse_function(name, target.fget)
if doc:
doc['read_only'] = bool(target.fset is None)
return doc
return dict(
name=name,
description=get_docstring(target)
) | :param name:
:param target:
:return: | entailment |
def read_all(self) -> str:
"""
Reads the current state of the buffer and returns a string those
contents
:return:
A string for the current state of the print buffer contents
"""
try:
buffered_bytes = self.bytes_buffer.getvalue()
if buffered_bytes is None:
return ''
return buffered_bytes.decode(self.source_encoding)
except Exception as err:
return 'Redirect Buffer Error: {}'.format(err) | Reads the current state of the buffer and returns a string those
contents
:return:
A string for the current state of the print buffer contents | entailment |
def flush_all(self) -> str:
"""
:return:
"""
# self.bytes_buffer.seek(0)
# contents = self.bytes_buffer.read()
# self.bytes_buffer.truncate(0)
# self.bytes_buffer.seek(0)
# if contents is None:
# return ''
contents = self.bytes_buffer.getvalue()
self.bytes_buffer.truncate(0)
self.bytes_buffer.seek(0)
return (
''
if not contents else
contents.decode(self.source_encoding)
) | :return: | entailment |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.