sentence1 stringlengths 52 3.87M | sentence2 stringlengths 1 47.2k | label stringclasses 1 value |
|---|---|---|
def create_data(step: 'projects.ProjectStep') -> STEP_DATA:
"""
Creates the data object that stores the step information in the notebook
results JavaScript file.
:param step:
Project step for which to create the data
:return:
Step data tuple containing scaffold data structure for the step output.
The dictionary must then be populated with data from the step to
correctly reflect the current state of the step.
This is essentially a "blank" step dictionary, which is what the step
would look like if it had not yet run
"""
return STEP_DATA(
name=step.definition.name,
status=step.status(),
has_error=False,
body=None,
data=dict(),
includes=[],
cauldron_version=list(environ.version_info),
file_writes=[]
) | Creates the data object that stores the step information in the notebook
results JavaScript file.
:param step:
Project step for which to create the data
:return:
Step data tuple containing scaffold data structure for the step output.
The dictionary must then be populated with data from the step to
correctly reflect the current state of the step.
This is essentially a "blank" step dictionary, which is what the step
would look like if it had not yet run | entailment |
def get_cached_data(
step: 'projects.ProjectStep'
) -> typing.Union[None, STEP_DATA]:
"""
Attempts to load and return the cached step data for the specified step. If
not cached data exists, or the cached data is corrupt, a None value is
returned instead.
:param step:
The step for which the cached data should be loaded
:return:
Either a step data structure containing the cached step data or None
if no cached data exists for the step
"""
cache_path = step.report.results_cache_path
if not os.path.exists(cache_path):
return None
out = create_data(step)
try:
with open(cache_path, 'r') as f:
cached_data = json.load(f)
except Exception:
return None
file_writes = [
file_io.entry_from_dict(fw)
for fw in cached_data['file_writes']
]
return out \
._replace(**cached_data) \
._replace(file_writes=file_writes) | Attempts to load and return the cached step data for the specified step. If
not cached data exists, or the cached data is corrupt, a None value is
returned instead.
:param step:
The step for which the cached data should be loaded
:return:
Either a step data structure containing the cached step data or None
if no cached data exists for the step | entailment |
def initialize_logging_path(path: str = None) -> str:
"""
Initializes the logging path for running the project. If no logging path
is specified, the current directory will be used instead.
:param path:
Path to initialize for logging. Can be either a path to a file or
a path to a directory. If a directory is specified, the log file
written will be called "cauldron_run.log".
:return:
The absolute path to the log file that will be used when this project
is executed.
"""
path = environ.paths.clean(path if path else '.')
if os.path.isdir(path) and os.path.exists(path):
path = os.path.join(path, 'cauldron_run.log')
elif os.path.exists(path):
os.remove(path)
directory = os.path.dirname(path)
if not os.path.exists(directory):
os.makedirs(directory)
return path | Initializes the logging path for running the project. If no logging path
is specified, the current directory will be used instead.
:param path:
Path to initialize for logging. Can be either a path to a file or
a path to a directory. If a directory is specified, the log file
written will be called "cauldron_run.log".
:return:
The absolute path to the log file that will be used when this project
is executed. | entailment |
def run_project(
project_directory: str,
output_directory: str = None,
log_path: str = None,
shared_data: dict = None,
reader_path: str = None,
reload_project_libraries: bool = False
) -> ExecutionResult:
"""
Opens, executes and closes a Cauldron project in a single command in
production mode (non-interactive).
:param project_directory:
Directory where the project to run is located
:param output_directory:
Directory where the project display data will be saved
:param log_path:
Path to a file or directory where logging information will be
written
:param shared_data:
Data to load into the cauldron.shared object prior to executing the
project
:param reader_path:
Specifies a path where a reader file will be saved after the project
has finished running. If no path is specified, no reader file will be
saved. If the path is a directory, a reader file will be saved in that
directory with the project name as the file name.
:param reload_project_libraries:
Whether or not to reload all project libraries prior to execution of
the project. By default this is False, but can be enabled in cases
where refreshing the project libraries before execution is needed.
:return:
The response result from the project execution
"""
log_path = initialize_logging_path(log_path)
logger.add_output_path(log_path)
def on_complete(
command_response: environ.Response,
project_data: SharedCache = None,
message: str = None
) -> ExecutionResult:
environ.modes.remove(environ.modes.SINGLE_RUN)
if message:
logger.log(message)
logger.remove_output_path(log_path)
return ExecutionResult(
command_response=command_response,
project_data=project_data or SharedCache()
)
environ.modes.add(environ.modes.SINGLE_RUN)
open_response = open_command.execute(
context=cli.make_command_context(open_command.NAME),
path=project_directory,
results_path=output_directory
)
if open_response.failed:
return on_complete(
command_response=open_response,
message='[ERROR]: Aborted trying to open project'
)
project = cauldron.project.get_internal_project()
project.shared.put(**(shared_data if shared_data is not None else dict()))
commander.preload()
run_response = run_command.execute(
context=cli.make_command_context(run_command.NAME),
skip_library_reload=not reload_project_libraries
)
project_cache = SharedCache().put(
**project.shared._shared_cache_data
)
if run_response.failed:
return on_complete(
command_response=run_response,
project_data=project_cache,
message='[ERROR]: Aborted trying to run project steps'
)
if reader_path:
save_command.execute(
context=cli.make_command_context(save_command.NAME),
path=reader_path
)
close_response = close_command.execute(
context=cli.make_command_context(close_command.NAME)
)
if close_response.failed:
return on_complete(
command_response=close_response,
project_data=project_cache,
message='[ERROR]: Failed to close project cleanly after run'
)
return on_complete(
command_response=run_response,
project_data=project_cache,
message='Project execution complete'
) | Opens, executes and closes a Cauldron project in a single command in
production mode (non-interactive).
:param project_directory:
Directory where the project to run is located
:param output_directory:
Directory where the project display data will be saved
:param log_path:
Path to a file or directory where logging information will be
written
:param shared_data:
Data to load into the cauldron.shared object prior to executing the
project
:param reader_path:
Specifies a path where a reader file will be saved after the project
has finished running. If no path is specified, no reader file will be
saved. If the path is a directory, a reader file will be saved in that
directory with the project name as the file name.
:param reload_project_libraries:
Whether or not to reload all project libraries prior to execution of
the project. By default this is False, but can be enabled in cases
where refreshing the project libraries before execution is needed.
:return:
The response result from the project execution | entailment |
def stop(self, sig=signal.SIGINT):
'''Stop all the workers, and then wait for them'''
for cpid in self.sandboxes:
logger.warn('Stopping %i...' % cpid)
try:
os.kill(cpid, sig)
except OSError: # pragma: no cover
logger.exception('Error stopping %s...' % cpid)
# While we still have children running, wait for them
# We edit the dictionary during the loop, so we need to copy its keys
for cpid in list(self.sandboxes):
try:
logger.info('Waiting for %i...' % cpid)
pid, status = os.waitpid(cpid, 0)
logger.warn('%i stopped with status %i' % (pid, status >> 8))
except OSError: # pragma: no cover
logger.exception('Error waiting for %i...' % cpid)
finally:
self.sandboxes.pop(cpid, None) | Stop all the workers, and then wait for them | entailment |
def spawn(self, **kwargs):
'''Return a new worker for a child process'''
copy = dict(self.kwargs)
copy.update(kwargs)
# Apparently there's an issue with importing gevent in the parent
# process and then using it int he child. This is meant to relieve that
# problem by allowing `klass` to be specified as a string.
if isinstance(self.klass, string_types):
self.klass = util.import_class(self.klass)
return self.klass(self.queues, self.client, **copy) | Return a new worker for a child process | entailment |
def run(self):
'''Run this worker'''
self.signals(('TERM', 'INT', 'QUIT'))
# Divide up the jobs that we have to divy up between the workers. This
# produces evenly-sized groups of jobs
resume = self.divide(self.resume, self.count)
for index in range(self.count):
# The sandbox for the child worker
sandbox = os.path.join(
os.getcwd(), 'qless-py-workers', 'sandbox-%s' % index)
cpid = os.fork()
if cpid:
logger.info('Spawned worker %i' % cpid)
self.sandboxes[cpid] = sandbox
else: # pragma: no cover
# Move to the sandbox as the current working directory
with Worker.sandbox(sandbox):
os.chdir(sandbox)
try:
self.spawn(resume=resume[index], sandbox=sandbox).run()
except:
logger.exception('Exception in spawned worker')
finally:
os._exit(0)
try:
while not self.shutdown:
pid, status = os.wait()
logger.warn('Worker %i died with status %i from signal %i' % (
pid, status >> 8, status & 0xff))
sandbox = self.sandboxes.pop(pid)
cpid = os.fork()
if cpid:
logger.info('Spawned replacement worker %i' % cpid)
self.sandboxes[cpid] = sandbox
else: # pragma: no cover
with Worker.sandbox(sandbox):
os.chdir(sandbox)
try:
self.spawn(sandbox=sandbox).run()
except:
logger.exception('Exception in spawned worker')
finally:
os._exit(0)
finally:
self.stop(signal.SIGKILL) | Run this worker | entailment |
def handler(self, signum, frame): # pragma: no cover
'''Signal handler for this process'''
if signum in (signal.SIGTERM, signal.SIGINT, signal.SIGQUIT):
self.stop(signum)
os._exit(0) | Signal handler for this process | entailment |
def get_stack_frames(error_stack: bool = True) -> list:
"""
Returns a list of the current stack frames, which are pruned focus on the
Cauldron code where the relevant information resides.
"""
cauldron_path = environ.paths.package()
resources_path = environ.paths.resources()
frames = (
list(traceback.extract_tb(sys.exc_info()[-1]))
if error_stack else
traceback.extract_stack()
).copy()
def is_cauldron_code(test_filename: str) -> bool:
if not test_filename or not test_filename.startswith(cauldron_path):
return False
if test_filename.startswith(resources_path):
return False
return True
while len(frames) > 1 and is_cauldron_code(frames[0].filename):
frames.pop(0)
return frames | Returns a list of the current stack frames, which are pruned focus on the
Cauldron code where the relevant information resides. | entailment |
def format_stack_frame(stack_frame, project: 'projects.Project') -> dict:
"""
Formats a raw stack frame into a dictionary formatted for render
templating and enriched with information from the currently open project.
:param stack_frame:
A raw stack frame to turn into an enriched version for templating.
:param project:
The currently open project, which is used to contextualize stack
information with project-specific information.
:return:
A dictionary containing the enriched stack frame data.
"""
filename = stack_frame.filename
if filename.startswith(project.source_directory):
filename = filename[len(project.source_directory) + 1:]
location = stack_frame.name
if location == '<module>':
location = None
return dict(
filename=filename,
location=location,
line_number=stack_frame.lineno,
line=stack_frame.line
) | Formats a raw stack frame into a dictionary formatted for render
templating and enriched with information from the currently open project.
:param stack_frame:
A raw stack frame to turn into an enriched version for templating.
:param project:
The currently open project, which is used to contextualize stack
information with project-specific information.
:return:
A dictionary containing the enriched stack frame data. | entailment |
def get_formatted_stack_frame(
project: 'projects.Project',
error_stack: bool = True
) -> list:
"""
Returns a list of the stack frames formatted for user display that has
been enriched by the project-specific data.
:param project:
The currently open project used to enrich the stack data.
:param error_stack:
Whether or not to return the error stack. When True the stack of the
last exception will be returned. If no such exception exists, an empty
list will be returned instead. When False the current execution stack
trace will be returned.
"""
return [
format_stack_frame(f, project)
for f in get_stack_frames(error_stack=error_stack)
] | Returns a list of the stack frames formatted for user display that has
been enriched by the project-specific data.
:param project:
The currently open project used to enrich the stack data.
:param error_stack:
Whether or not to return the error stack. When True the stack of the
last exception will be returned. If no such exception exists, an empty
list will be returned instead. When False the current execution stack
trace will be returned. | entailment |
def arg_type_to_string(arg_type) -> str:
"""
Converts the argument type to a string
:param arg_type:
:return:
String representation of the argument type. Multiple return types are
turned into a comma delimited list of type names
"""
union_params = (
getattr(arg_type, '__union_params__', None) or
getattr(arg_type, '__args__', None)
)
if union_params and isinstance(union_params, (list, tuple)):
return ', '.join([arg_type_to_string(item) for item in union_params])
try:
return arg_type.__name__
except AttributeError:
return '{}'.format(arg_type) | Converts the argument type to a string
:param arg_type:
:return:
String representation of the argument type. Multiple return types are
turned into a comma delimited list of type names | entailment |
def merge_components(
*components: typing.List[typing.Union[list, tuple, COMPONENT]]
) -> COMPONENT:
"""
Merges multiple COMPONENT instances into a single one by merging the
lists of includes and files. Has support for elements of the components
arguments list to be lists or tuples of COMPONENT instances as well.
:param components:
:return:
"""
flat_components = functools.reduce(flatten_reducer, components, [])
return COMPONENT(
includes=functools.reduce(
functools.partial(combine_lists_reducer, 'includes'),
flat_components,
[]
),
files=functools.reduce(
functools.partial(combine_lists_reducer, 'files'),
flat_components,
[]
)
) | Merges multiple COMPONENT instances into a single one by merging the
lists of includes and files. Has support for elements of the components
arguments list to be lists or tuples of COMPONENT instances as well.
:param components:
:return: | entailment |
def flatten_reducer(
flattened_list: list,
entry: typing.Union[list, tuple, COMPONENT]
) -> list:
"""
Flattens a list of COMPONENT instances to remove any lists or tuples
of COMPONENTS contained within the list
:param flattened_list:
The existing flattened list that has been populated from previous
calls of this reducer function
:param entry:
An entry to be reduced. Either a COMPONENT instance or a list/tuple
of COMPONENT instances
:return:
The flattened list with the entry flatly added to it
"""
if hasattr(entry, 'includes') and hasattr(entry, 'files'):
flattened_list.append(entry)
elif entry:
flattened_list.extend(entry)
return flattened_list | Flattens a list of COMPONENT instances to remove any lists or tuples
of COMPONENTS contained within the list
:param flattened_list:
The existing flattened list that has been populated from previous
calls of this reducer function
:param entry:
An entry to be reduced. Either a COMPONENT instance or a list/tuple
of COMPONENT instances
:return:
The flattened list with the entry flatly added to it | entailment |
def combine_lists_reducer(
key: str,
merged_list: list,
component: COMPONENT
) -> list:
"""
Reducer function to combine the lists for the specified key into a
single, flat list
:param key:
The key on the COMPONENT instances to operate upon
:param merged_list:
The accumulated list of values populated by previous calls to this
reducer function
:param component:
The COMPONENT instance from which to append values to the
merged_list
:return:
The updated merged_list with the values for the COMPONENT added
onto it
"""
merged_list.extend(getattr(component, key))
return merged_list | Reducer function to combine the lists for the specified key into a
single, flat list
:param key:
The key on the COMPONENT instances to operate upon
:param merged_list:
The accumulated list of values populated by previous calls to this
reducer function
:param component:
The COMPONENT instance from which to append values to the
merged_list
:return:
The updated merged_list with the values for the COMPONENT added
onto it | entailment |
def listen(self):
'''Listen for events as they come in'''
try:
self._pubsub.subscribe(self._channels)
for message in self._pubsub.listen():
if message['type'] == 'message':
yield message
finally:
self._channels = [] | Listen for events as they come in | entailment |
def thread(self):
'''Run in a thread'''
thread = threading.Thread(target=self.listen)
thread.start()
try:
yield self
finally:
self.unlisten()
thread.join() | Run in a thread | entailment |
def listen(self):
'''Listen for events'''
for message in Listener.listen(self):
logger.debug('Message: %s', message)
# Strip off the 'namespace' from the channel
channel = message['channel'][len(self.namespace):]
func = self._callbacks.get(channel)
if func:
func(message['data']) | Listen for events | entailment |
def on(self, evt, func):
'''Set a callback handler for a pubsub event'''
if evt not in self._callbacks:
raise NotImplementedError('callback "%s"' % evt)
else:
self._callbacks[evt] = func | Set a callback handler for a pubsub event | entailment |
def get(self, option, default=None):
'''Get a particular option, or the default if it's missing'''
val = self[option]
return (val is None and default) or val | Get a particular option, or the default if it's missing | entailment |
def pop(self, option, default=None):
'''Just like `dict.pop`'''
val = self[option]
del self[option]
return (val is None and default) or val | Just like `dict.pop` | entailment |
def update(self, other=(), **kwargs):
'''Just like `dict.update`'''
_kwargs = dict(kwargs)
_kwargs.update(other)
for key, value in _kwargs.items():
self[key] = value | Just like `dict.update` | entailment |
def touch_project():
"""
Touches the project to trigger refreshing its cauldron.json state.
"""
r = Response()
project = cd.project.get_internal_project()
if project:
project.refresh()
else:
r.fail(
code='NO_PROJECT',
message='No open project to refresh'
)
return r.update(
sync_time=sync_status.get('time', 0)
).flask_serialize() | Touches the project to trigger refreshing its cauldron.json state. | entailment |
def fetch_synchronize_status():
"""
Returns the synchronization status information for the currently opened
project
"""
r = Response()
project = cd.project.get_internal_project()
if not project:
r.fail(
code='NO_PROJECT',
message='No open project on which to retrieve status'
)
else:
with open(project.source_path, 'r') as f:
definition = json.load(f)
result = status.of_project(project)
r.update(
sync_time=sync_status.get('time', 0),
source_directory=project.source_directory,
remote_source_directory=project.remote_source_directory,
status=result,
definition=definition
)
return r.flask_serialize() | Returns the synchronization status information for the currently opened
project | entailment |
def download_file(filename: str):
""" downloads the specified project file if it exists """
project = cd.project.get_internal_project()
source_directory = project.source_directory if project else None
if not filename or not project or not source_directory:
return '', 204
path = os.path.realpath(os.path.join(
source_directory,
'..',
'__cauldron_downloads',
filename
))
if not os.path.exists(path):
return '', 204
return flask.send_file(path, mimetype=mimetypes.guess_type(path)[0]) | downloads the specified project file if it exists | entailment |
def get_project_source_path(path: str) -> str:
"""
Converts the given path into a project source path, to the cauldron.json
file. If the path already points to a cauldron.json file, the path is
returned without modification.
:param path:
The path to convert into a project source path
"""
path = environ.paths.clean(path)
if not path.endswith('cauldron.json'):
return os.path.join(path, 'cauldron.json')
return path | Converts the given path into a project source path, to the cauldron.json
file. If the path already points to a cauldron.json file, the path is
returned without modification.
:param path:
The path to convert into a project source path | entailment |
def load_project_definition(path: str) -> dict:
"""
Load the cauldron.json project definition file for the given path. The
path can be either a source path to the cauldron.json file or the source
directory where a cauldron.json file resides.
:param path:
The source path or directory where the definition file will be loaded
"""
source_path = get_project_source_path(path)
if not os.path.exists(source_path):
raise FileNotFoundError('Missing project file: {}'.format(source_path))
with open(source_path, 'r') as f:
out = json.load(f)
project_folder = os.path.split(os.path.dirname(source_path))[-1]
if 'id' not in out or not out['id']:
out['id'] = project_folder
return out | Load the cauldron.json project definition file for the given path. The
path can be either a source path to the cauldron.json file or the source
directory where a cauldron.json file resides.
:param path:
The source path or directory where the definition file will be loaded | entailment |
def simplify_path(path: str, path_prefixes: list = None) -> str:
"""
Simplifies package paths by replacing path prefixes with values specified
in the replacements list
:param path:
:param path_prefixes:
:return:
"""
test_path = '{}'.format(path if path else '')
replacements = (path_prefixes if path_prefixes else []).copy()
replacements.append(('~', os.path.expanduser('~')))
for key, value in replacements:
if test_path.startswith(value):
return '{}{}'.format(key, test_path[len(value):])
return test_path | Simplifies package paths by replacing path prefixes with values specified
in the replacements list
:param path:
:param path_prefixes:
:return: | entailment |
def module_to_package_data(
name: str,
entry,
path_prefixes: list = None
) -> typing.Union[dict, None]:
"""
Converts a module entry into a package data dictionary with information
about the module. including version and location on disk
:param name:
:param entry:
:param path_prefixes:
:return:
"""
if name.find('.') > -1:
# Not interested in sub-packages, only root ones
return None
version = getattr(entry, '__version__', None)
version = version if not hasattr(version, 'version') else version.version
location = getattr(entry, '__file__', sys.exec_prefix)
if version is None or location.startswith(sys.exec_prefix):
# Not interested in core packages. They obviously are standard and
# don't need to be included in an output.
return None
return dict(
name=name,
version=version,
location=simplify_path(location, path_prefixes)
) | Converts a module entry into a package data dictionary with information
about the module. including version and location on disk
:param name:
:param entry:
:param path_prefixes:
:return: | entailment |
def get_system_data() -> typing.Union[None, dict]:
"""
Returns information about the system in which Cauldron is running.
If the information cannot be found, None is returned instead.
:return:
Dictionary containing information about the Cauldron system, whic
includes:
* name
* location
* version
"""
site_packages = get_site_packages()
path_prefixes = [('[SP]', p) for p in site_packages]
path_prefixes.append(('[CORE]', sys.exec_prefix))
packages = [
module_to_package_data(name, entry, path_prefixes)
for name, entry in list(sys.modules.items())
]
python_data = dict(
version=list(sys.version_info),
executable=simplify_path(sys.executable),
directory=simplify_path(sys.exec_prefix),
site_packages=[simplify_path(sp) for sp in site_packages]
)
return dict(
python=python_data,
packages=[p for p in packages if p is not None]
) | Returns information about the system in which Cauldron is running.
If the information cannot be found, None is returned instead.
:return:
Dictionary containing information about the Cauldron system, whic
includes:
* name
* location
* version | entailment |
def remove(path: str, max_retries: int = 3) -> bool:
"""
Removes the specified path from the local filesystem if it exists.
Directories will be removed along with all files and folders within
them as well as files.
:param path:
The location of the file or folder to remove.
:param max_retries:
The number of times to retry before giving up.
:return:
A boolean indicating whether or not the removal was successful.
"""
if not path:
return False
if not os.path.exists(path):
return True
remover = os.remove if os.path.isfile(path) else shutil.rmtree
for attempt in range(max_retries):
try:
remover(path)
return True
except Exception:
# Pause briefly in case there's a race condition on lock
# for the target.
time.sleep(0.02)
return False | Removes the specified path from the local filesystem if it exists.
Directories will be removed along with all files and folders within
them as well as files.
:param path:
The location of the file or folder to remove.
:param max_retries:
The number of times to retry before giving up.
:return:
A boolean indicating whether or not the removal was successful. | entailment |
def end(code: int):
"""
Ends the application with the specified error code, adding whitespace to
the end of the console log output for clarity
:param code:
The integer status code to apply on exit. If the value is non-zero,
indicating an error, a message will be printed to the console to
inform the user that the application exited in error
"""
print('\n')
if code != 0:
log('Failed with status code: {}'.format(code), whitespace=1)
sys.exit(code) | Ends the application with the specified error code, adding whitespace to
the end of the console log output for clarity
:param code:
The integer status code to apply on exit. If the value is non-zero,
indicating an error, a message will be printed to the console to
inform the user that the application exited in error | entailment |
def folder(self) -> typing.Union[str, None]:
"""
The folder, relative to the project source_directory, where the file
resides
:return:
"""
if 'folder' in self.data:
return self.data.get('folder')
elif self.project_folder:
if callable(self.project_folder):
return self.project_folder()
else:
return self.project_folder
return None | The folder, relative to the project source_directory, where the file
resides
:return: | entailment |
def project_exists(response: 'environ.Response', path: str) -> bool:
"""
Determines whether or not a project exists at the specified path
:param response:
:param path:
:return:
"""
if os.path.exists(path):
return True
response.fail(
code='PROJECT_NOT_FOUND',
message='The project path does not exist',
path=path
).console(
"""
[ERROR]: Unable to open project. The specified path does not exist:
{path}
""".format(path=path)
)
return False | Determines whether or not a project exists at the specified path
:param response:
:param path:
:return: | entailment |
def update_recent_paths(response, path):
"""
:param response:
:param path:
:return:
"""
try:
recent_paths = environ.configs.fetch('recent_paths', [])
if path in recent_paths:
recent_paths.remove(path)
recent_paths.insert(0, path)
environ.configs.put(recent_paths=recent_paths[:10], persists=True)
environ.configs.save()
except Exception as error: # pragma: no cover
response.warn(
code='FAILED_RECENT_UPDATE',
message='Unable to update recently opened projects',
error=str(error)
).console(whitespace=1)
return True | :param response:
:param path:
:return: | entailment |
def split_line(line: str) -> typing.Tuple[str, str]:
"""
Separates the raw line string into two strings: (1) the command and (2) the
argument(s) string
:param line:
:return:
"""
index = line.find(' ')
if index == -1:
return line.lower(), ''
return line[:index].lower(), line[index:].strip() | Separates the raw line string into two strings: (1) the command and (2) the
argument(s) string
:param line:
:return: | entailment |
def render_stop_display(step: 'projects.ProjectStep', message: str):
"""Renders a stop action to the Cauldron display."""
stack = render_stack.get_formatted_stack_frame(
project=step.project,
error_stack=False
)
try:
names = [frame['filename'] for frame in stack]
index = names.index(os.path.realpath(__file__))
frame = stack[index - 1]
except Exception:
frame = {}
stop_message = (
'{}'.format(message)
if message else
'This step was explicitly stopped prior to its completion'
)
dom = templating.render_template(
'step-stop.html',
message=stop_message,
frame=frame
)
step.report.append_body(dom) | Renders a stop action to the Cauldron display. | entailment |
def id(self) -> typing.Union[str, None]:
"""Identifier for the project."""
return self._project.id if self._project else None | Identifier for the project. | entailment |
def display(self) -> typing.Union[None, report.Report]:
"""The display report for the current project."""
return (
self._project.current_step.report
if self._project and self._project.current_step else
None
) | The display report for the current project. | entailment |
def shared(self) -> typing.Union[None, SharedCache]:
"""The shared display object associated with this project."""
return self._project.shared if self._project else None | The shared display object associated with this project. | entailment |
def settings(self) -> typing.Union[None, SharedCache]:
"""The settings associated with this project."""
return self._project.settings if self._project else None | The settings associated with this project. | entailment |
def title(self) -> typing.Union[None, str]:
"""The title of this project."""
return self._project.title if self._project else None | The title of this project. | entailment |
def title(self, value: typing.Union[None, str]):
"""
Modifies the title of the project, which is initially loaded from the
`cauldron.json` file.
"""
if not self._project:
raise RuntimeError('Failed to assign title to an unloaded project')
self._project.title = value | Modifies the title of the project, which is initially loaded from the
`cauldron.json` file. | entailment |
def load(self, project: typing.Union[projects.Project, None]):
"""Connects this object to the specified source project."""
self._project = project | Connects this object to the specified source project. | entailment |
def path(self, *args: typing.List[str]) -> typing.Union[None, str]:
"""
Creates an absolute path in the project source directory from the
relative path components.
:param args:
Relative components for creating a path within the project source
directory
:return:
An absolute path to the specified file or directory within the
project source directory.
"""
if not self._project:
return None
return environ.paths.clean(os.path.join(
self._project.source_directory,
*args
)) | Creates an absolute path in the project source directory from the
relative path components.
:param args:
Relative components for creating a path within the project source
directory
:return:
An absolute path to the specified file or directory within the
project source directory. | entailment |
def stop(self, message: str = None, silent: bool = False):
"""
Stops the execution of the project at the current step immediately
without raising an error. Use this to abort running the project in
situations where some critical branching action should prevent the
project from continuing to run.
:param message:
A custom display message to include in the display for the stop
action. This message is ignored if silent is set to True.
:param silent:
When True nothing will be shown in the notebook display when the
step is stopped. When False, the notebook display will include
information relating to the stopped action.
"""
me = self.get_internal_project()
if not me or not me.current_step:
return
if not silent:
render_stop_display(me.current_step, message)
raise UserAbortError(halt=True) | Stops the execution of the project at the current step immediately
without raising an error. Use this to abort running the project in
situations where some critical branching action should prevent the
project from continuing to run.
:param message:
A custom display message to include in the display for the stop
action. This message is ignored if silent is set to True.
:param silent:
When True nothing will be shown in the notebook display when the
step is stopped. When False, the notebook display will include
information relating to the stopped action. | entailment |
def get_internal_project(
self,
timeout: float = 1
) -> typing.Union['projects.Project', None]:
"""
Attempts to return the internally loaded project. This function
prevents race condition issues where projects are loaded via threads
because the internal loop will try to continuously load the internal
project until it is available or until the timeout is reached.
:param timeout:
Maximum number of seconds to wait before giving up and returning
None.
"""
count = int(timeout / 0.1)
for _ in range(count):
project = self.internal_project
if project:
return project
time.sleep(0.1)
return self.internal_project | Attempts to return the internally loaded project. This function
prevents race condition issues where projects are loaded via threads
because the internal loop will try to continuously load the internal
project until it is available or until the timeout is reached.
:param timeout:
Maximum number of seconds to wait before giving up and returning
None. | entailment |
def _step(self) -> typing.Union[None, 'projects.ProjectStep']:
"""
Internal access to the source step. Should not be used outside
of Cauldron development.
:return:
The ProjectStep instance that this ExposedStep represents
"""
import cauldron
try:
return cauldron.project.get_internal_project().current_step
except Exception:
return None | Internal access to the source step. Should not be used outside
of Cauldron development.
:return:
The ProjectStep instance that this ExposedStep represents | entailment |
def stop(
self,
message: str = None,
silent: bool = False,
halt: bool = False
):
"""
Stops the execution of the current step immediately without raising
an error. Use this to abort the step running process if you want
to return early.
:param message:
A custom display message to include in the display for the stop
action. This message is ignored if silent is set to True.
:param silent:
When True nothing will be shown in the notebook display when the
step is stopped. When False, the notebook display will include
information relating to the stopped action.
:param halt:
Whether or not to keep running other steps in the project after
this step has been stopped. By default this is False and after this
stops running, future steps in the project will continue running
if they've been queued to run. If you want stop execution entirely,
set this value to True and the current run command will be aborted
entirely.
"""
step = self._step
if not step:
return
if not silent:
render_stop_display(step, message)
raise UserAbortError(halt=halt) | Stops the execution of the current step immediately without raising
an error. Use this to abort the step running process if you want
to return early.
:param message:
A custom display message to include in the display for the stop
action. This message is ignored if silent is set to True.
:param silent:
When True nothing will be shown in the notebook display when the
step is stopped. When False, the notebook display will include
information relating to the stopped action.
:param halt:
Whether or not to keep running other steps in the project after
this step has been stopped. By default this is False and after this
stops running, future steps in the project will continue running
if they've been queued to run. If you want stop execution entirely,
set this value to True and the current run command will be aborted
entirely. | entailment |
def write_to_console(self, message: str):
"""
Writes the specified message to the console stdout without including
it in the notebook display.
"""
if not self._step:
raise ValueError(
'Cannot write to the console stdout on an uninitialized step'
)
interceptor = self._step.report.stdout_interceptor
interceptor.write_source('{}'.format(message)) | Writes the specified message to the console stdout without including
it in the notebook display. | entailment |
def render_to_console(self, message: str, **kwargs):
"""
Renders the specified message to the console using Jinja2 template
rendering with the kwargs as render variables. The message will also
be dedented prior to rendering in the same fashion as other Cauldron
template rendering actions.
:param message:
Template string to be rendered.
:param kwargs:
Variables to be used in rendering the template.
"""
rendered = templating.render(message, **kwargs)
return self.write_to_console(rendered) | Renders the specified message to the console using Jinja2 template
rendering with the kwargs as render variables. The message will also
be dedented prior to rendering in the same fashion as other Cauldron
template rendering actions.
:param message:
Template string to be rendered.
:param kwargs:
Variables to be used in rendering the template. | entailment |
def get_operations():
"""
This will break things if you upgrade Django to 1.8 having already applied this migration in 1.7.
Since this is for a demo site it doesn't really matter (simply blow away the DB if you want to go to 1.8)
Our demo site is a unusual in that we want to run it's tests (for integration testing) in multiple Django versions.
Typical sites don't have to worry about that sort of thing.
"""
compatible = (1, 8) <= DJANGO_VERSION < (1, 10)
if not compatible:
return []
return [
migrations.AlterField(
model_name='user',
name='groups',
field=models.ManyToManyField(related_query_name='user', related_name='user_set', to='auth.Group', blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', verbose_name='groups'),
),
migrations.AlterField(
model_name='user',
name='last_login',
field=models.DateTimeField(null=True, verbose_name='last login', blank=True),
),
] | This will break things if you upgrade Django to 1.8 having already applied this migration in 1.7.
Since this is for a demo site it doesn't really matter (simply blow away the DB if you want to go to 1.8)
Our demo site is a unusual in that we want to run it's tests (for integration testing) in multiple Django versions.
Typical sites don't have to worry about that sort of thing. | entailment |
def create(project: 'projects.Project') -> COMPONENT:
"""
:return:
"""
try:
from bokeh.resources import Resources as BokehResources
bokeh_resources = BokehResources(mode='absolute')
except Exception:
bokeh_resources = None
if bokeh_resources is None:
environ.log(BOKEH_WARNING)
return COMPONENT([], [])
return definitions.merge_components(
_assemble_component(
project,
'bokeh-css',
['bokeh', 'bokeh.css'],
bokeh_resources.css_files
),
_assemble_component(
project,
'bokeh-js',
['bokeh', 'bokeh.js'],
bokeh_resources.js_files
)
) | :return: | entailment |
def last_update_time(self) -> float:
"""The last time at which the report was modified."""
stdout = self.stdout_interceptor
stderr = self.stderr_interceptor
return max([
self._last_update_time,
stdout.last_write_time if stdout else 0,
stderr.last_write_time if stderr else 0,
]) | The last time at which the report was modified. | entailment |
def results_cache_path(self) -> str:
"""
Location where step report is cached between sessions to
prevent loss of display data between runs.
"""
if not self.project:
return ''
return os.path.join(
self.project.results_path,
'.cache',
'steps',
'{}.json'.format(self.id)
) | Location where step report is cached between sessions to
prevent loss of display data between runs. | entailment |
def clear(self) -> 'Report':
"""
Clear all user-data stored in this instance and reset it to its
originally loaded state
:return:
The instance that was called for method chaining
"""
self.body = []
self.data = SharedCache()
self.files = SharedCache()
self._last_update_time = time.time()
return self | Clear all user-data stored in this instance and reset it to its
originally loaded state
:return:
The instance that was called for method chaining | entailment |
def append_body(self, dom: str):
"""
Appends the specified HTML-formatted DOM string to the
currently stored report body for the step.
"""
self.flush_stdout()
self.body.append(dom)
self._last_update_time = time.time() | Appends the specified HTML-formatted DOM string to the
currently stored report body for the step. | entailment |
def read_stdout(self):
"""
Reads the current state of the print buffer (if it exists) and returns
a body-ready dom object of those contents without adding them to the
actual report body. This is useful for creating intermediate body
values for display while the method is still executing.
:return:
A dom string for the current state of the print buffer contents
"""
try:
contents = self.stdout_interceptor.read_all()
except Exception as err:
contents = ''
return render_texts.preformatted_text(contents) | Reads the current state of the print buffer (if it exists) and returns
a body-ready dom object of those contents without adding them to the
actual report body. This is useful for creating intermediate body
values for display while the method is still executing.
:return:
A dom string for the current state of the print buffer contents | entailment |
def flush_stdout(self):
"""
Empties the standard out redirect buffer and renders the
contents to the body as a preformatted text box.
"""
try:
contents = self.stdout_interceptor.flush_all()
except Exception:
return
if len(contents) > 0:
self.body.append(render_texts.preformatted_text(contents))
self._last_update_time = time.time()
return contents | Empties the standard out redirect buffer and renders the
contents to the body as a preformatted text box. | entailment |
def import_class(klass):
'''Import the named class and return that class'''
mod = __import__(klass.rpartition('.')[0])
for segment in klass.split('.')[1:-1]:
mod = getattr(mod, segment)
return getattr(mod, klass.rpartition('.')[2]) | Import the named class and return that class | entailment |
def create(
project: 'projects.Project',
include_path: str
) -> COMPONENT:
"""
Creates a COMPONENT instance for the project component specified by the
include path
:param project:
The project in which the component resides
:param include_path:
The relative path within the project where the component resides
:return:
The created COMPONENT instance
"""
source_path = environ.paths.clean(
os.path.join(project.source_directory, include_path)
)
if not os.path.exists(source_path):
return COMPONENT([], [])
if os.path.isdir(source_path):
glob_path = os.path.join(source_path, '**', '*')
include_paths = glob.iglob(glob_path, recursive=True)
else:
include_paths = [source_path]
destination_path = os.path.join(project.output_directory, include_path)
return COMPONENT(
includes=filter(
lambda web_include: web_include is not None,
map(functools.partial(to_web_include, project), include_paths)
),
files=[file_io.FILE_COPY_ENTRY(
source=source_path,
destination=destination_path
)]
) | Creates a COMPONENT instance for the project component specified by the
include path
:param project:
The project in which the component resides
:param include_path:
The relative path within the project where the component resides
:return:
The created COMPONENT instance | entailment |
def create_many(
project: 'projects.Project',
include_paths: typing.List[str]
) -> COMPONENT:
"""
Creates a single COMPONENT instance for all of the specified project
include paths
:param project:
Project where the components reside
:param include_paths:
A list of relative paths within the project directory to files or
directories that should be included in the project
:return:
The combined COMPONENT instance for all of the included paths
"""
return definitions.merge_components(*map(
functools.partial(create, project),
include_paths
)) | Creates a single COMPONENT instance for all of the specified project
include paths
:param project:
Project where the components reside
:param include_paths:
A list of relative paths within the project directory to files or
directories that should be included in the project
:return:
The combined COMPONENT instance for all of the included paths | entailment |
def to_web_include(
project: 'projects.Project',
file_path: str
) -> WEB_INCLUDE:
"""
Converts the given file_path into a WEB_INCLUDE instance that represents
the deployed version of this file to be loaded into the results project
page
:param project:
Project in which the file_path resides
:param file_path:
Absolute path to the source file for which the WEB_INCLUDE instance
will be created
:return:
The WEB_INCLUDE instance that represents the given source file
"""
if not file_path.endswith('.css') and not file_path.endswith('.js'):
return None
slug = file_path[len(project.source_directory):]
url = '/{}' \
.format(slug) \
.replace('\\', '/') \
.replace('//', '/')
return WEB_INCLUDE(name=':project:{}'.format(url), src=url) | Converts the given file_path into a WEB_INCLUDE instance that represents
the deployed version of this file to be loaded into the results project
page
:param project:
Project in which the file_path resides
:param file_path:
Absolute path to the source file for which the WEB_INCLUDE instance
will be created
:return:
The WEB_INCLUDE instance that represents the given source file | entailment |
def attempt_file_write(
path: str,
contents: typing.Union[str, bytes],
mode: str = 'w',
offset: int = 0
) -> typing.Union[None, Exception]:
"""
Attempts to write the specified contents to a file and returns None if
successful, or the raised exception if writing failed.
:param path:
The path to the file that will be written
:param contents:
The contents of the file to write
:param mode:
The mode in which the file will be opened when written
:param offset:
The byte offset in the file where the contents should be written.
If the value is zero, the offset information will be ignored and
the operation will write entirely based on mode. Note that if you
indicate an append write mode and an offset, the mode will be forced
to write instead of append.
:return:
None if the write operation succeeded. Otherwise, the exception that
was raised by the failed write action.
"""
try:
data = contents.encode()
except Exception:
data = contents
if offset > 0:
with open(path, 'rb') as f:
existing = f.read(offset)
else:
existing = None
append = 'a' in mode
write_mode = 'wb' if offset > 0 or not append else 'ab'
try:
with open(path, write_mode) as f:
if existing is not None:
f.write(existing)
f.write(data)
return None
except Exception as error:
return error | Attempts to write the specified contents to a file and returns None if
successful, or the raised exception if writing failed.
:param path:
The path to the file that will be written
:param contents:
The contents of the file to write
:param mode:
The mode in which the file will be opened when written
:param offset:
The byte offset in the file where the contents should be written.
If the value is zero, the offset information will be ignored and
the operation will write entirely based on mode. Note that if you
indicate an append write mode and an offset, the mode will be forced
to write instead of append.
:return:
None if the write operation succeeded. Otherwise, the exception that
was raised by the failed write action. | entailment |
def write_file(
path: str,
contents,
mode: str = 'w',
retry_count: int = 3,
offset: int = 0
) -> typing.Tuple[bool, typing.Union[None, Exception]]:
"""
Writes the specified contents to a file, with retry attempts if the write
operation fails. This is useful to prevent OS related write collisions with
files that are regularly written to and read from quickly.
:param path:
The path to the file that will be written
:param contents:
The contents of the file to write
:param mode:
The mode in which the file will be opened when written
:param retry_count:
The number of attempts to make before giving up and returning a
failed write.
:param offset:
The byte offset in the file where the contents should be written.
If the value is zero, the offset information will be ignored and the
operation will write entirely based on mode. Note that if you indicate
an append write mode and an offset, the mode will be forced to write
instead of append.
:return:
Returns two arguments. The first is a boolean specifying whether or
not the write operation succeeded. The second is the error result, which
is None if the write operation succeeded. Otherwise, it will be the
exception that was raised by the last failed write attempt.
"""
error = None
for i in range(retry_count):
error = attempt_file_write(path, contents, mode, offset)
if error is None:
return True, None
time.sleep(0.2)
return False, error | Writes the specified contents to a file, with retry attempts if the write
operation fails. This is useful to prevent OS related write collisions with
files that are regularly written to and read from quickly.
:param path:
The path to the file that will be written
:param contents:
The contents of the file to write
:param mode:
The mode in which the file will be opened when written
:param retry_count:
The number of attempts to make before giving up and returning a
failed write.
:param offset:
The byte offset in the file where the contents should be written.
If the value is zero, the offset information will be ignored and the
operation will write entirely based on mode. Note that if you indicate
an append write mode and an offset, the mode will be forced to write
instead of append.
:return:
Returns two arguments. The first is a boolean specifying whether or
not the write operation succeeded. The second is the error result, which
is None if the write operation succeeded. Otherwise, it will be the
exception that was raised by the last failed write attempt. | entailment |
def attempt_json_write(
path: str,
contents: dict,
mode: str = 'w'
) -> typing.Union[None, Exception]:
"""
Attempts to write the specified JSON content to file.
:param path:
The path to the file where the JSON serialized content will be written.
:param contents:
The JSON data to write to the file
:param mode:
The mode used to open the file where the content will be written.
:return:
None if the write operation succeeded. Otherwise, the exception that
was raised by the failed write operation.
"""
try:
with open(path, mode) as f:
json.dump(contents, f)
return None
except Exception as error:
return error | Attempts to write the specified JSON content to file.
:param path:
The path to the file where the JSON serialized content will be written.
:param contents:
The JSON data to write to the file
:param mode:
The mode used to open the file where the content will be written.
:return:
None if the write operation succeeded. Otherwise, the exception that
was raised by the failed write operation. | entailment |
def write_json_file(
path: str,
contents: dict,
mode: str = 'w',
retry_count: int = 3
) -> typing.Tuple[bool, typing.Union[None, Exception]]:
"""
Writes the specified dictionary to a file as a JSON-serialized string,
with retry attempts if the write operation fails. This is useful to prevent
OS related write collisions with files that are regularly written to and
read from quickly.
:param path:
The path to the file that will be written
:param contents:
The contents of the file to write
:param mode:
The mode in which the file will be opened when written
:param retry_count:
The number of attempts to make before giving up and returning a
failed write.
:return:
Returns two arguments. The first is a boolean specifying whether or
not the write operation succeeded. The second is the error result, which
is None if the write operation succeeded. Otherwise, it will be the
exception that was raised by the last failed write attempt.
"""
error = None
for i in range(retry_count):
error = attempt_json_write(path, contents, mode)
if error is None:
return True, None
time.sleep(0.2)
return False, error | Writes the specified dictionary to a file as a JSON-serialized string,
with retry attempts if the write operation fails. This is useful to prevent
OS related write collisions with files that are regularly written to and
read from quickly.
:param path:
The path to the file that will be written
:param contents:
The contents of the file to write
:param mode:
The mode in which the file will be opened when written
:param retry_count:
The number of attempts to make before giving up and returning a
failed write.
:return:
Returns two arguments. The first is a boolean specifying whether or
not the write operation succeeded. The second is the error result, which
is None if the write operation succeeded. Otherwise, it will be the
exception that was raised by the last failed write attempt. | entailment |
def reformat(source: str) -> str:
"""
Formats the source string to strip newlines on both ends and dedents the
the entire string
:param source:
The string to reformat
"""
value = source if source else ''
return dedent(value.strip('\n')).strip() | Formats the source string to strip newlines on both ends and dedents the
the entire string
:param source:
The string to reformat | entailment |
def get_environment() -> Environment:
"""
Returns the jinja2 templating environment updated with the most recent
cauldron environment configurations
:return:
"""
env = JINJA_ENVIRONMENT
loader = env.loader
resource_path = environ.configs.make_path(
'resources', 'templates',
override_key='template_path'
)
if not loader:
env.filters['id'] = get_id
env.filters['latex'] = get_latex
if not loader or resource_path not in loader.searchpath:
env.loader = FileSystemLoader(resource_path)
return env | Returns the jinja2 templating environment updated with the most recent
cauldron environment configurations
:return: | entailment |
def render(template: typing.Union[str, Template], **kwargs):
"""
Renders a template string using Jinja2 and the Cauldron templating
environment.
:param template:
The string containing the template to be rendered
:param kwargs:
Any named arguments to pass to Jinja2 for use in rendering
:return:
The rendered template string
"""
if not hasattr(template, 'render'):
template = get_environment().from_string(textwrap.dedent(template))
return template.render(
cauldron_template_uid=make_template_uid(),
**kwargs
) | Renders a template string using Jinja2 and the Cauldron templating
environment.
:param template:
The string containing the template to be rendered
:param kwargs:
Any named arguments to pass to Jinja2 for use in rendering
:return:
The rendered template string | entailment |
def render_file(path: str, **kwargs):
"""
Renders a file at the specified absolute path. The file can reside
anywhere on the local disk as Cauldron's template environment path
searching is ignored.
:param path:
Absolute path to a template file to render
:param kwargs:
Named arguments that should be passed to Jinja2 for rendering
:return:
The rendered template string
"""
with open(path, 'r') as f:
contents = f.read()
return get_environment().from_string(contents).render(
cauldron_template_uid=make_template_uid(),
**kwargs
) | Renders a file at the specified absolute path. The file can reside
anywhere on the local disk as Cauldron's template environment path
searching is ignored.
:param path:
Absolute path to a template file to render
:param kwargs:
Named arguments that should be passed to Jinja2 for rendering
:return:
The rendered template string | entailment |
def render_template(template_name: str, **kwargs):
"""
Renders the template file with the given filename from within Cauldron's
template environment folder.
:param template_name:
The filename of the template to render. Any path elements should be
relative to Cauldron's root template folder.
:param kwargs:
Any elements passed to Jinja2 for rendering the template
:return:
The rendered string
"""
return get_environment().get_template(template_name).render(
cauldron_template_uid=make_template_uid(),
**kwargs
) | Renders the template file with the given filename from within Cauldron's
template environment folder.
:param template_name:
The filename of the template to render. Any path elements should be
relative to Cauldron's root template folder.
:param kwargs:
Any elements passed to Jinja2 for rendering the template
:return:
The rendered string | entailment |
def clean(path: str) -> str:
"""
Cleans the specified path by expanding shorthand elements, redirecting to
the real path for symbolic links, and removing any relative components to
return a complete, absolute path to the specified location.
:param path:
The source path to be cleaned
"""
if not path or path == '.':
path = os.curdir
if path.startswith('~'):
path = os.path.expanduser(path)
return os.path.realpath(os.path.abspath(path)) | Cleans the specified path by expanding shorthand elements, redirecting to
the real path for symbolic links, and removing any relative components to
return a complete, absolute path to the specified location.
:param path:
The source path to be cleaned | entailment |
def package(*args: str) -> str:
"""
Creates an absolute path to a file or folder within the cauldron package
using the relative path elements specified by the args.
:param args:
Zero or more relative path elements that describe a file or folder
within the reporting
"""
return clean(os.path.join(os.path.dirname(__file__), '..', *args)) | Creates an absolute path to a file or folder within the cauldron package
using the relative path elements specified by the args.
:param args:
Zero or more relative path elements that describe a file or folder
within the reporting | entailment |
def confirm(question: str, default: bool = True) -> bool:
"""
Requests confirmation of the specified question and returns that result
:param question:
The question to print to the console for the confirmation
:param default:
The default value if the user hits enter without entering a value
"""
result = input('{question} [{yes}/{no}]:'.format(
question=question,
yes='(Y)' if default else 'Y',
no='N' if default else '(N)'
))
if not result:
return default
if result[0].lower() in ['y', 't', '1']:
return True
return False | Requests confirmation of the specified question and returns that result
:param question:
The question to print to the console for the confirmation
:param default:
The default value if the user hits enter without entering a value | entailment |
def fetch_last(response: Response) -> typing.Union[str, None]:
""" Returns the last opened project path if such a path exists """
recent_paths = environ.configs.fetch('recent_paths', [])
if len(recent_paths) < 1:
response.fail(
code='NO_RECENT_PROJECTS',
message='No projects have been opened recently'
).console()
return None
return recent_paths[0] | Returns the last opened project path if such a path exists | entailment |
def before_save(file_or_dir):
"""
make sure that the dedicated path exists (create if not exist)
:param file_or_dir:
:return: None
"""
dir_name = os.path.dirname(os.path.abspath(file_or_dir))
if not os.path.exists(dir_name):
os.makedirs(dir_name) | make sure that the dedicated path exists (create if not exist)
:param file_or_dir:
:return: None | entailment |
def categorical2pysbrl_data(
x,
y,
data_filename,
label_filename,
method='eclat',
supp=0.05,
zmin=1,
zmax=3):
"""
Run a frequent item mining algorithm to extract candidate rules.
:param x: 2D np.ndarray, categorical data of shape [n_instances, n_features]
:param y: 1D np.ndarray, label array of shape [n_instances, ]
:param data_filename: the path to store data file
:param label_filename: the path to store label file
:param method: a str denoting the method to use, default to 'eclat'
:param supp: the minimum support of a rule (item)
:param zmin:
:param zmax:
:return:
"""
# Safely cast data types
x = x.astype(np.int, casting='safe')
y = y.astype(np.int, casting='safe')
labels = np.unique(y)
labels = np.arange(np.max(labels) + 1)
# assert max(labels) + 1 == len(labels)
mine = get_fim_method(method)
x_by_labels = []
for label in labels:
x_by_labels.append(x[y == label])
transactions_by_labels = [categorical2transactions(_x) for _x in x_by_labels]
itemsets = transactions2freqitems(transactions_by_labels, mine, supp=supp, zmin=zmin, zmax=zmax)
rules = [itemset2feature_categories(itemset) for itemset in itemsets]
data_by_rule = []
for features, categories in rules:
satisfied = rule_satisfied(x, features, categories)
data_by_rule.append(satisfied)
# Write data file
# data_filename = get_path(_datasets_path, data_name+'.data')
before_save(data_filename)
with open(data_filename, 'w') as f:
f.write('n_items: %d\n' % len(itemsets))
f.write('n_samples: %d\n' % len(y))
for itemset, data in zip(itemsets, data_by_rule):
rule_str = '{' + ','.join(itemset) + '}' + ' '
f.write(rule_str)
bit_s = ' '.join(['1' if bit else '0' for bit in data])
f.write(bit_s)
f.write('\n')
# Write label file
# label_filename = get_path(_datasets_path, data_name+'.label')
before_save(label_filename)
with open(label_filename, 'w') as f:
f.write('n_items: %d\n' % len(labels))
f.write('n_samples: %d\n' % len(y))
for label in labels:
f.write('{label=%d} ' % label)
bits = np.equal(y, label)
bit_s = ' '.join(['1' if bit else '0' for bit in bits])
f.write(bit_s)
f.write('\n')
return rules | Run a frequent item mining algorithm to extract candidate rules.
:param x: 2D np.ndarray, categorical data of shape [n_instances, n_features]
:param y: 1D np.ndarray, label array of shape [n_instances, ]
:param data_filename: the path to store data file
:param label_filename: the path to store label file
:param method: a str denoting the method to use, default to 'eclat'
:param supp: the minimum support of a rule (item)
:param zmin:
:param zmax:
:return: | entailment |
def categorical2transactions(x):
# type: (np.ndarray) -> List
"""
Convert a 2D int array into a transaction list:
[
['x0=1', 'x1=0', ...],
...
]
:param x:
:return:
"""
assert len(x.shape) == 2
transactions = []
for entry in x:
transactions.append(['x%d=%d' % (i, val) for i, val in enumerate(entry)])
return transactions | Convert a 2D int array into a transaction list:
[
['x0=1', 'x1=0', ...],
...
]
:param x:
:return: | entailment |
def rule_satisfied(x, features, categories):
"""
return a logical array representing whether entries in x satisfied the rules denoted by features and categories
:param x: a categorical 2D array
:param features: a list of feature indices
:param categories: a list of categories
:return:
"""
satisfied = []
if features[0] == -1 and len(features) == 1:
# Default rule, all satisfied
return np.ones(x.shape[0], dtype=bool)
for idx, cat in zip(features, categories):
# Every single condition needs to be satisfied.
satisfied.append(x[:, idx] == cat)
return functools.reduce(np.logical_and, satisfied) | return a logical array representing whether entries in x satisfied the rules denoted by features and categories
:param x: a categorical 2D array
:param features: a list of feature indices
:param categories: a list of categories
:return: | entailment |
def of_project(project: 'projects.Project') -> dict:
"""
Returns the file status information for every file within the project
source directory and its shared library folders.
:param project:
The project for which the status information should be generated
:return:
A dictionary containing:
- project: the status information for all files within
the projects source directory
- libraries: a list of status information dictionaries for all
files within each of the project's library directories. If a
library resides within the project source directory, the entry
will be an empty dictionary to prevent duplication.
"""
source_directory = project.source_directory
libraries_status = [
{} if d.startswith(source_directory) else of_directory(d)
for d in project.library_directories
]
return dict(
project=of_directory(source_directory),
libraries=libraries_status
) | Returns the file status information for every file within the project
source directory and its shared library folders.
:param project:
The project for which the status information should be generated
:return:
A dictionary containing:
- project: the status information for all files within
the projects source directory
- libraries: a list of status information dictionaries for all
files within each of the project's library directories. If a
library resides within the project source directory, the entry
will be an empty dictionary to prevent duplication. | entailment |
def of_file(path: str, root_directory: str = None) -> dict:
"""
Returns a dictionary containing status information for the specified file
including when its name relative to the root directory, when it was last
modified and its size.
:param path:
The absolute path to the file for which the status information should
be generated
:param root_directory:
The directory to use for creating relative path names for the returned
status. If this argument is None the path in the status will be the
absolute path argument.
:return:
A dictionary containing the status information for the file at the
specified path. If no such file exists, then the dictionary will
contain -1 values for both the file size and the last modified time.
"""
slug = (
path
if root_directory is None
else path[len(root_directory):].lstrip(os.sep)
)
if not os.path.exists(path) or os.path.isdir(path):
return dict(
size=-1,
modified=-1,
path=slug
)
size = os.path.getsize(path)
modified = max(os.path.getmtime(path), os.path.getctime(path))
return dict(
modified=modified,
path=slug,
size=size
) | Returns a dictionary containing status information for the specified file
including when its name relative to the root directory, when it was last
modified and its size.
:param path:
The absolute path to the file for which the status information should
be generated
:param root_directory:
The directory to use for creating relative path names for the returned
status. If this argument is None the path in the status will be the
absolute path argument.
:return:
A dictionary containing the status information for the file at the
specified path. If no such file exists, then the dictionary will
contain -1 values for both the file size and the last modified time. | entailment |
def of_directory(directory: str, root_directory: str = None) -> dict:
"""
Returns a dictionary containing status entries recursively for all files
within the specified directory and its descendant directories.
:param directory:
The directory in which to retrieve status information
:param root_directory:
Directory relative to which all file status paths are related. If this
argument is None then the directory argument itself will be used.
:return:
A dictionary containing status information for each file within the
specified directory and its descendants. The keys of the dictionary
are the relative path names for each of the files.
"""
glob_path = os.path.join(directory, '**/*')
root = root_directory if root_directory else directory
results = filter(
lambda result: (result['modified'] != -1),
[of_file(path, root) for path in glob.iglob(glob_path, recursive=True)]
)
return dict([(result['path'], result) for result in results]) | Returns a dictionary containing status entries recursively for all files
within the specified directory and its descendant directories.
:param directory:
The directory in which to retrieve status information
:param root_directory:
Directory relative to which all file status paths are related. If this
argument is None then the directory argument itself will be used.
:return:
A dictionary containing status information for each file within the
specified directory and its descendants. The keys of the dictionary
are the relative path names for each of the files. | entailment |
def run_local(
context: cli.CommandContext,
project: projects.Project,
project_steps: typing.List[projects.ProjectStep],
force: bool,
continue_after: bool,
single_step: bool,
limit: int,
print_status: bool,
skip_library_reload: bool = False
) -> environ.Response:
"""
Execute the run command locally within this cauldron environment
:param context:
:param project:
:param project_steps:
:param force:
:param continue_after:
:param single_step:
:param limit:
:param print_status:
:param skip_library_reload:
Whether or not to skip reloading all project libraries prior to
execution of the project. By default this is False in which case
the project libraries are reloaded prior to execution.
:return:
"""
skip_reload = (
skip_library_reload
or environ.modes.has(environ.modes.TESTING)
)
if not skip_reload:
runner.reload_libraries()
environ.log_header('RUNNING', 5)
steps_run = []
if single_step:
# If the user specifies the single step flag, only run one step. Force
# the step to be run if they specified it explicitly
ps = project_steps[0] if len(project_steps) > 0 else None
force = force or (single_step and bool(ps is not None))
steps_run = runner.section(
response=context.response,
project=project,
starting=ps,
limit=1,
force=force
)
elif continue_after or len(project_steps) == 0:
# If the continue after flag is set, start with the specified step
# and run the rest of the project after that. Or, if no steps were
# specified, run the entire project with the specified flags.
ps = project_steps[0] if len(project_steps) > 0 else None
steps_run = runner.complete(
context.response,
project,
ps,
force=force,
limit=limit
)
else:
for ps in project_steps:
steps_run += runner.section(
response=context.response,
project=project,
starting=ps,
limit=max(1, limit),
force=force or (limit < 1 and len(project_steps) < 2),
skips=steps_run + []
)
project.write()
environ.log_blanks()
step_changes = []
for ps in steps_run:
step_changes.append(dict(
name=ps.definition.name,
action='updated',
step=writing.step_writer.serialize(ps)._asdict()
))
context.response.update(step_changes=step_changes)
if print_status or context.response.failed:
context.response.update(project=project.kernel_serialize())
return context.response | Execute the run command locally within this cauldron environment
:param context:
:param project:
:param project_steps:
:param force:
:param continue_after:
:param single_step:
:param limit:
:param print_status:
:param skip_library_reload:
Whether or not to skip reloading all project libraries prior to
execution of the project. By default this is False in which case
the project libraries are reloaded prior to execution.
:return: | entailment |
def loadalldatas():
"""Loads all demo fixtures."""
dependency_order = ['common', 'profiles', 'blog', 'democomments']
for app in dependency_order:
project.recursive_load(os.path.join(paths.project_paths.manage_root, app)) | Loads all demo fixtures. | entailment |
def project_status():
"""..."""
r = Response()
try:
project = cauldron.project.get_internal_project()
if project:
r.update(project=project.status())
else:
r.update(project=None)
except Exception as err:
r.fail(
code='PROJECT_STATUS_ERROR',
message='Unable to check status of currently opened project',
error=err
)
r.update(server=server_runner.get_server_data())
return flask.jsonify(r.serialize()) | ... | entailment |
def clean_step(step_name: str):
"""..."""
r = Response()
project = cauldron.project.get_internal_project()
if not project:
return flask.jsonify(r.fail(
code='PROJECT_FETCH_ERROR',
message='No project is currently open'
).response.serialize())
step = project.get_step(step_name)
if not step:
return flask.jsonify(r.fail(
code='STEP_FETCH_ERROR',
message='No such step "{}" found'.format(step_name)
).response.serialize())
step.mark_dirty(False, force=True)
return flask.jsonify(r.update(
project=project.kernel_serialize()
).response.serialize()) | ... | entailment |
def _insert_timestamp(self, slug, max_length=255):
"""Appends a timestamp integer to the given slug, yet ensuring the
result is less than the specified max_length.
"""
timestamp = str(int(time.time()))
ts_len = len(timestamp) + 1
while len(slug) + ts_len > max_length:
slug = '-'.join(slug.split('-')[:-1])
slug = '-'.join([slug, timestamp])
return slug | Appends a timestamp integer to the given slug, yet ensuring the
result is less than the specified max_length. | entailment |
def _slugify_title(self):
"""Slugify the Entry title, but ensure it's less than the maximum
number of characters. This method also ensures that a slug is unique by
appending a timestamp to any duplicate slugs.
"""
# Restrict slugs to their maximum number of chars, but don't split mid-word
self.slug = slugify(self.title)
while len(self.slug) > 255:
self.slug = '-'.join(self.slug.split('-')[:-1])
# Is the same slug as another entry?
if Entry.objects.filter(slug=self.slug).exclude(id=self.id).exists():
# Append time to differentiate.
self.slug = self._insert_timestamp(self.slug) | Slugify the Entry title, but ensure it's less than the maximum
number of characters. This method also ensures that a slug is unique by
appending a timestamp to any duplicate slugs. | entailment |
def run(self):
'''Run jobs, popping one after another'''
# Register our signal handlers
self.signals()
with self.listener():
for job in self.jobs():
# If there was no job to be had, we should sleep a little bit
if not job:
self.jid = None
self.title('Sleeping for %fs' % self.interval)
time.sleep(self.interval)
else:
self.jid = job.jid
self.title('Working on %s (%s)' % (job.jid, job.klass_name))
with Worker.sandbox(self.sandbox):
job.sandbox = self.sandbox
job.process()
if self.shutdown:
break | Run jobs, popping one after another | entailment |
def initialize():
"""
Initializes the cauldron library by confirming that it can be imported
by the importlib library. If the attempt to import it fails, the system
path will be modified and the attempt retried. If both attempts fail, an
import error will be raised.
"""
cauldron_module = get_cauldron_module()
if cauldron_module is not None:
return cauldron_module
sys.path.append(ROOT_DIRECTORY)
cauldron_module = get_cauldron_module()
if cauldron_module is not None:
return cauldron_module
raise ImportError(' '.join((
'Unable to import cauldron.'
'The package was not installed in a known location.'
))) | Initializes the cauldron library by confirming that it can be imported
by the importlib library. If the attempt to import it fails, the system
path will be modified and the attempt retried. If both attempts fail, an
import error will be raised. | entailment |
def run(arguments: typing.List[str] = None):
"""Executes the cauldron command"""
initialize()
from cauldron.invoke import parser
from cauldron.invoke import invoker
args = parser.parse(arguments)
exit_code = invoker.run(args.get('command'), args)
sys.exit(exit_code) | Executes the cauldron command | entailment |
def author_display(author, *args):
"""Returns either the linked or not-linked profile name."""
# Call get_absolute_url or a function returning none if not defined
url = getattr(author, 'get_absolute_url', lambda: None)()
# get_short_name or unicode representation
short_name = getattr(author, 'get_short_name', lambda: six.text_type(author))()
if url:
return mark_safe('<a href="{}">{}</a>'.format(url, short_name))
else:
return short_name | Returns either the linked or not-linked profile name. | entailment |
def echo_steps(response: Response, project: Project):
"""
:param response:
:param project:
:return:
"""
if len(project.steps) < 1:
response.update(
steps=[]
).notify(
kind='SUCCESS',
code='ECHO_STEPS',
message='No steps in project'
).console(
"""
[NONE]: This project does not have any steps yet. To add a new
step use the command:
steps add [YOUR_STEP_NAME]
and a new step will be created in this project.
""",
whitespace=1
)
return
response.update(
steps=[ps.kernel_serialize() for ps in project.steps]
).notify(
kind='SUCCESS',
code='ECHO_STEPS'
).console_header(
'Project Steps',
level=3
).console(
'\n'.join(['* {}'.format(ps.definition.name) for ps in project.steps]),
indent_by=2,
whitespace_bottom=1
) | :param response:
:param project:
:return: | entailment |
def explode_filename(name: str, scheme: str) -> dict:
"""
Removes any path components from the input filename and returns a
dictionary containing the name of the file without extension and the
extension (if an extension exists)
:param name:
:param scheme:
:return:
"""
if not scheme:
return split_filename(name)
replacements = {
'name': '(?P<name>.*)',
'ext': '(?P<extension>.+)$',
'index': '(?P<index>[0-9]{{{length}}})'
}
scheme_pattern = '^'
empty_scheme_pattern = ''
offset = 0
while offset < len(scheme):
char = scheme[offset]
next_char = scheme[offset + 1] if (offset + 1) < len(scheme) else None
if char in r'.()^$?*+\[]|':
addition = '\\{}'.format(char)
scheme_pattern += addition
empty_scheme_pattern += addition
offset += 1
continue
if char != '{':
scheme_pattern += char
empty_scheme_pattern += char
offset += 1
continue
if next_char != '{':
scheme_pattern += char
empty_scheme_pattern += char
offset += 1
continue
end_index = scheme.find('}}', offset)
contents = scheme[offset:end_index].strip('{}').lower()
if contents in replacements:
scheme_pattern += replacements[contents]
elif contents == ('#' * len(contents)):
addition = replacements['index'].format(length=len(contents))
scheme_pattern += addition
empty_scheme_pattern += addition
else:
addition = '{{{}}}'.format(contents)
scheme_pattern += addition
empty_scheme_pattern += addition
offset = end_index + 2
match = re.compile(scheme_pattern).match(name)
if not match:
parts = split_filename(name)
comparison = re.compile(empty_scheme_pattern.rstrip('-_: .\\'))
match = comparison.match(parts['name'])
if not match:
return parts
parts = match.groupdict()
index = parts.get('index')
index = int(index) if index else None
return dict(
index=index - 1,
name=parts.get('name', ''),
extension=parts.get('extension', 'py')
) | Removes any path components from the input filename and returns a
dictionary containing the name of the file without extension and the
extension (if an extension exists)
:param name:
:param scheme:
:return: | entailment |
def create(project: 'projects.Project') -> COMPONENT:
"""
:param project:
:return:
"""
source_path = get_source_path()
if not source_path:
return COMPONENT([], [])
output_slug = 'components/plotly/plotly.min.js'
output_path = os.path.join(project.output_directory, output_slug)
return COMPONENT(
includes=[WEB_INCLUDE(
name='plotly',
src='/{}'.format(output_slug)
)],
files=[file_io.FILE_COPY_ENTRY(
source=source_path,
destination=output_path
)]
) | :param project:
:return: | entailment |
def in_project_directory() -> bool:
"""
Returns whether or not the current working directory is a Cauldron project
directory, which contains a cauldron.json file.
"""
current_directory = os.path.realpath(os.curdir)
project_path = os.path.join(current_directory, 'cauldron.json')
return os.path.exists(project_path) and os.path.isfile(project_path) | Returns whether or not the current working directory is a Cauldron project
directory, which contains a cauldron.json file. | entailment |
def load_shared_data(path: typing.Union[str, None]) -> dict:
"""Load shared data from a JSON file stored on disk"""
if path is None:
return dict()
if not os.path.exists(path):
raise FileNotFoundError('No such shared data file "{}"'.format(path))
try:
with open(path, 'r') as fp:
data = json.load(fp)
except Exception:
raise IOError('Unable to read shared data file "{}"'.format(path))
if not isinstance(data, dict):
raise ValueError('Shared data must load into a dictionary object')
return data | Load shared data from a JSON file stored on disk | entailment |
def run_version(args: dict) -> int:
"""Displays the current version"""
version = environ.package_settings.get('version', 'unknown')
print('VERSION: {}'.format(version))
return 0 | Displays the current version | entailment |
def run_batch(args: dict) -> int:
"""Runs a batch operation for the given arguments"""
batcher.run_project(
project_directory=args.get('project_directory'),
log_path=args.get('logging_path'),
output_directory=args.get('output_directory'),
shared_data=load_shared_data(args.get('shared_data_path'))
)
return 0 | Runs a batch operation for the given arguments | entailment |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.