id
int32 0
252k
| repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
list | docstring
stringlengths 3
17.3k
| docstring_tokens
list | sha
stringlengths 40
40
| url
stringlengths 87
242
|
|---|---|---|---|---|---|---|---|---|---|---|---|
9,500
|
vatlab/SoS
|
src/sos/utils.py
|
sample_lines
|
def sample_lines(lines, n):
'''Draw a sample of n lines from filename, largely evenly.'''
if len(lines) <= n:
return ''.join(lines)
else:
m = len(lines)
return ''.join([lines[x * m // n + m // (2 * n)] for x in range(n)])
|
python
|
def sample_lines(lines, n):
'''Draw a sample of n lines from filename, largely evenly.'''
if len(lines) <= n:
return ''.join(lines)
else:
m = len(lines)
return ''.join([lines[x * m // n + m // (2 * n)] for x in range(n)])
|
[
"def",
"sample_lines",
"(",
"lines",
",",
"n",
")",
":",
"if",
"len",
"(",
"lines",
")",
"<=",
"n",
":",
"return",
"''",
".",
"join",
"(",
"lines",
")",
"else",
":",
"m",
"=",
"len",
"(",
"lines",
")",
"return",
"''",
".",
"join",
"(",
"[",
"lines",
"[",
"x",
"*",
"m",
"//",
"n",
"+",
"m",
"//",
"(",
"2",
"*",
"n",
")",
"]",
"for",
"x",
"in",
"range",
"(",
"n",
")",
"]",
")"
] |
Draw a sample of n lines from filename, largely evenly.
|
[
"Draw",
"a",
"sample",
"of",
"n",
"lines",
"from",
"filename",
"largely",
"evenly",
"."
] |
6b60ed0770916d135e17322e469520d778e9d4e7
|
https://github.com/vatlab/SoS/blob/6b60ed0770916d135e17322e469520d778e9d4e7/src/sos/utils.py#L1498-L1504
|
9,501
|
vatlab/SoS
|
src/sos/utils.py
|
WorkflowDict.set
|
def set(self, key, value):
'''A short cut to set value to key without triggering any logging
or warning message.'''
if hasattr(value, 'labels'):
if 'VARIABLE' in env.config['SOS_DEBUG'] or 'ALL' in env.config[
'SOS_DEBUG']:
env.log_to_file(
'VARIABLE',
f"Set {key} to {short_repr(value)} with labels {short_repr(value.labels)}"
)
else:
if 'VARIABLE' in env.config['SOS_DEBUG'] or 'ALL' in env.config[
'SOS_DEBUG']:
env.log_to_file(
'VARIABLE',
f"Set {key} to {short_repr(value)} of type {value.__class__.__name__}"
)
self._dict[key] = value
|
python
|
def set(self, key, value):
'''A short cut to set value to key without triggering any logging
or warning message.'''
if hasattr(value, 'labels'):
if 'VARIABLE' in env.config['SOS_DEBUG'] or 'ALL' in env.config[
'SOS_DEBUG']:
env.log_to_file(
'VARIABLE',
f"Set {key} to {short_repr(value)} with labels {short_repr(value.labels)}"
)
else:
if 'VARIABLE' in env.config['SOS_DEBUG'] or 'ALL' in env.config[
'SOS_DEBUG']:
env.log_to_file(
'VARIABLE',
f"Set {key} to {short_repr(value)} of type {value.__class__.__name__}"
)
self._dict[key] = value
|
[
"def",
"set",
"(",
"self",
",",
"key",
",",
"value",
")",
":",
"if",
"hasattr",
"(",
"value",
",",
"'labels'",
")",
":",
"if",
"'VARIABLE'",
"in",
"env",
".",
"config",
"[",
"'SOS_DEBUG'",
"]",
"or",
"'ALL'",
"in",
"env",
".",
"config",
"[",
"'SOS_DEBUG'",
"]",
":",
"env",
".",
"log_to_file",
"(",
"'VARIABLE'",
",",
"f\"Set {key} to {short_repr(value)} with labels {short_repr(value.labels)}\"",
")",
"else",
":",
"if",
"'VARIABLE'",
"in",
"env",
".",
"config",
"[",
"'SOS_DEBUG'",
"]",
"or",
"'ALL'",
"in",
"env",
".",
"config",
"[",
"'SOS_DEBUG'",
"]",
":",
"env",
".",
"log_to_file",
"(",
"'VARIABLE'",
",",
"f\"Set {key} to {short_repr(value)} of type {value.__class__.__name__}\"",
")",
"self",
".",
"_dict",
"[",
"key",
"]",
"=",
"value"
] |
A short cut to set value to key without triggering any logging
or warning message.
|
[
"A",
"short",
"cut",
"to",
"set",
"value",
"to",
"key",
"without",
"triggering",
"any",
"logging",
"or",
"warning",
"message",
"."
] |
6b60ed0770916d135e17322e469520d778e9d4e7
|
https://github.com/vatlab/SoS/blob/6b60ed0770916d135e17322e469520d778e9d4e7/src/sos/utils.py#L209-L226
|
9,502
|
vatlab/SoS
|
src/sos/utils.py
|
WorkflowDict.update
|
def update(self, obj):
'''Redefine update to trigger logging message'''
self._dict.update(obj)
for k, v in obj.items():
# if k.isupper():
# self._check_readonly(k, v)
if env.verbosity > 2:
self._log(k, v)
|
python
|
def update(self, obj):
'''Redefine update to trigger logging message'''
self._dict.update(obj)
for k, v in obj.items():
# if k.isupper():
# self._check_readonly(k, v)
if env.verbosity > 2:
self._log(k, v)
|
[
"def",
"update",
"(",
"self",
",",
"obj",
")",
":",
"self",
".",
"_dict",
".",
"update",
"(",
"obj",
")",
"for",
"k",
",",
"v",
"in",
"obj",
".",
"items",
"(",
")",
":",
"# if k.isupper():",
"# self._check_readonly(k, v)",
"if",
"env",
".",
"verbosity",
">",
"2",
":",
"self",
".",
"_log",
"(",
"k",
",",
"v",
")"
] |
Redefine update to trigger logging message
|
[
"Redefine",
"update",
"to",
"trigger",
"logging",
"message"
] |
6b60ed0770916d135e17322e469520d778e9d4e7
|
https://github.com/vatlab/SoS/blob/6b60ed0770916d135e17322e469520d778e9d4e7/src/sos/utils.py#L234-L241
|
9,503
|
vatlab/SoS
|
src/sos/substep_executor.py
|
execute_substep
|
def execute_substep(stmt,
global_def,
global_vars,
task='',
task_params='',
proc_vars={},
shared_vars=[],
config={}):
'''Execute a substep with specific input etc
Substep executed by this function should be self-contained. It can contain
tasks (which will be sent to the master process) but not nested workflows.
The executor checks step signatures and might skip the substep if it has
been executed and the signature matches.
The executor accepts connections to the controller, and a socket using
which the results will be returned. However, the calling process should
take care of the connection and disconnection of controller sockets and
this function only takes care of the connection and disconnection of
result socket.
stmt:
Main statement of the substep
global_def:
Global definitions, might define functions useful to the substep
task:
External task
proc_vars:
Environmental variables, signature variables etc
shared_vars:
Variables that should be returned after the execution
config:
Runmode, signature mode, verbosity, etc.
The return value should be a dictionary with the following keys:
index: index of the substep within the step
ret_code: (all) return code, 0 for successful
sig_skipped: (optional) return if the step is skipped due to signature
shared: (optional) shared variable as specified by 'shared_vars'
stdout: (optional) if in interactive mode
stderr: (optional) if in interactive mode
exception: (optional) if an exception occures
'''
assert not env.zmq_context.closed
assert 'workflow_id' in proc_vars
assert 'step_id' in proc_vars
assert '_input' in proc_vars
assert '_output' in proc_vars
assert '_depends' in proc_vars
assert 'step_output' in proc_vars
assert '_index' in proc_vars
assert 'result_push_socket' in config["sockets"]
# this should not happen but check nevertheless
if env.result_socket_port is not None and env.result_socket_port != config[
"sockets"]["result_push_socket"]:
close_socket(env.result_socket)
env.result_socket = None
if env.result_socket is None:
env.result_socket = create_socket(env.zmq_context, zmq.PUSH)
env.result_socket_port = config["sockets"]["result_push_socket"]
env.result_socket.connect(f'tcp://127.0.0.1:{env.result_socket_port}')
res = _execute_substep(
stmt=stmt,
global_def=global_def,
global_vars=global_vars,
task=task,
task_params=task_params,
proc_vars=proc_vars,
shared_vars=shared_vars,
config=config)
env.result_socket.send_pyobj(res)
|
python
|
def execute_substep(stmt,
global_def,
global_vars,
task='',
task_params='',
proc_vars={},
shared_vars=[],
config={}):
'''Execute a substep with specific input etc
Substep executed by this function should be self-contained. It can contain
tasks (which will be sent to the master process) but not nested workflows.
The executor checks step signatures and might skip the substep if it has
been executed and the signature matches.
The executor accepts connections to the controller, and a socket using
which the results will be returned. However, the calling process should
take care of the connection and disconnection of controller sockets and
this function only takes care of the connection and disconnection of
result socket.
stmt:
Main statement of the substep
global_def:
Global definitions, might define functions useful to the substep
task:
External task
proc_vars:
Environmental variables, signature variables etc
shared_vars:
Variables that should be returned after the execution
config:
Runmode, signature mode, verbosity, etc.
The return value should be a dictionary with the following keys:
index: index of the substep within the step
ret_code: (all) return code, 0 for successful
sig_skipped: (optional) return if the step is skipped due to signature
shared: (optional) shared variable as specified by 'shared_vars'
stdout: (optional) if in interactive mode
stderr: (optional) if in interactive mode
exception: (optional) if an exception occures
'''
assert not env.zmq_context.closed
assert 'workflow_id' in proc_vars
assert 'step_id' in proc_vars
assert '_input' in proc_vars
assert '_output' in proc_vars
assert '_depends' in proc_vars
assert 'step_output' in proc_vars
assert '_index' in proc_vars
assert 'result_push_socket' in config["sockets"]
# this should not happen but check nevertheless
if env.result_socket_port is not None and env.result_socket_port != config[
"sockets"]["result_push_socket"]:
close_socket(env.result_socket)
env.result_socket = None
if env.result_socket is None:
env.result_socket = create_socket(env.zmq_context, zmq.PUSH)
env.result_socket_port = config["sockets"]["result_push_socket"]
env.result_socket.connect(f'tcp://127.0.0.1:{env.result_socket_port}')
res = _execute_substep(
stmt=stmt,
global_def=global_def,
global_vars=global_vars,
task=task,
task_params=task_params,
proc_vars=proc_vars,
shared_vars=shared_vars,
config=config)
env.result_socket.send_pyobj(res)
|
[
"def",
"execute_substep",
"(",
"stmt",
",",
"global_def",
",",
"global_vars",
",",
"task",
"=",
"''",
",",
"task_params",
"=",
"''",
",",
"proc_vars",
"=",
"{",
"}",
",",
"shared_vars",
"=",
"[",
"]",
",",
"config",
"=",
"{",
"}",
")",
":",
"assert",
"not",
"env",
".",
"zmq_context",
".",
"closed",
"assert",
"'workflow_id'",
"in",
"proc_vars",
"assert",
"'step_id'",
"in",
"proc_vars",
"assert",
"'_input'",
"in",
"proc_vars",
"assert",
"'_output'",
"in",
"proc_vars",
"assert",
"'_depends'",
"in",
"proc_vars",
"assert",
"'step_output'",
"in",
"proc_vars",
"assert",
"'_index'",
"in",
"proc_vars",
"assert",
"'result_push_socket'",
"in",
"config",
"[",
"\"sockets\"",
"]",
"# this should not happen but check nevertheless",
"if",
"env",
".",
"result_socket_port",
"is",
"not",
"None",
"and",
"env",
".",
"result_socket_port",
"!=",
"config",
"[",
"\"sockets\"",
"]",
"[",
"\"result_push_socket\"",
"]",
":",
"close_socket",
"(",
"env",
".",
"result_socket",
")",
"env",
".",
"result_socket",
"=",
"None",
"if",
"env",
".",
"result_socket",
"is",
"None",
":",
"env",
".",
"result_socket",
"=",
"create_socket",
"(",
"env",
".",
"zmq_context",
",",
"zmq",
".",
"PUSH",
")",
"env",
".",
"result_socket_port",
"=",
"config",
"[",
"\"sockets\"",
"]",
"[",
"\"result_push_socket\"",
"]",
"env",
".",
"result_socket",
".",
"connect",
"(",
"f'tcp://127.0.0.1:{env.result_socket_port}'",
")",
"res",
"=",
"_execute_substep",
"(",
"stmt",
"=",
"stmt",
",",
"global_def",
"=",
"global_def",
",",
"global_vars",
"=",
"global_vars",
",",
"task",
"=",
"task",
",",
"task_params",
"=",
"task_params",
",",
"proc_vars",
"=",
"proc_vars",
",",
"shared_vars",
"=",
"shared_vars",
",",
"config",
"=",
"config",
")",
"env",
".",
"result_socket",
".",
"send_pyobj",
"(",
"res",
")"
] |
Execute a substep with specific input etc
Substep executed by this function should be self-contained. It can contain
tasks (which will be sent to the master process) but not nested workflows.
The executor checks step signatures and might skip the substep if it has
been executed and the signature matches.
The executor accepts connections to the controller, and a socket using
which the results will be returned. However, the calling process should
take care of the connection and disconnection of controller sockets and
this function only takes care of the connection and disconnection of
result socket.
stmt:
Main statement of the substep
global_def:
Global definitions, might define functions useful to the substep
task:
External task
proc_vars:
Environmental variables, signature variables etc
shared_vars:
Variables that should be returned after the execution
config:
Runmode, signature mode, verbosity, etc.
The return value should be a dictionary with the following keys:
index: index of the substep within the step
ret_code: (all) return code, 0 for successful
sig_skipped: (optional) return if the step is skipped due to signature
shared: (optional) shared variable as specified by 'shared_vars'
stdout: (optional) if in interactive mode
stderr: (optional) if in interactive mode
exception: (optional) if an exception occures
|
[
"Execute",
"a",
"substep",
"with",
"specific",
"input",
"etc"
] |
6b60ed0770916d135e17322e469520d778e9d4e7
|
https://github.com/vatlab/SoS/blob/6b60ed0770916d135e17322e469520d778e9d4e7/src/sos/substep_executor.py#L36-L116
|
9,504
|
vatlab/SoS
|
src/sos/signatures.py
|
WorkflowSignatures.files
|
def files(self):
'''Listing files related to workflows related to current directory'''
try:
cur = self.conn.cursor()
cur.execute(
'SELECT id, item FROM workflows WHERE entry_type = "tracked_files"'
)
return [(x[0], eval(x[1])) for x in cur.fetchall()]
except sqlite3.DatabaseError as e:
env.logger.warning(
f'Failed to get files from signature database: {e}')
return []
|
python
|
def files(self):
'''Listing files related to workflows related to current directory'''
try:
cur = self.conn.cursor()
cur.execute(
'SELECT id, item FROM workflows WHERE entry_type = "tracked_files"'
)
return [(x[0], eval(x[1])) for x in cur.fetchall()]
except sqlite3.DatabaseError as e:
env.logger.warning(
f'Failed to get files from signature database: {e}')
return []
|
[
"def",
"files",
"(",
"self",
")",
":",
"try",
":",
"cur",
"=",
"self",
".",
"conn",
".",
"cursor",
"(",
")",
"cur",
".",
"execute",
"(",
"'SELECT id, item FROM workflows WHERE entry_type = \"tracked_files\"'",
")",
"return",
"[",
"(",
"x",
"[",
"0",
"]",
",",
"eval",
"(",
"x",
"[",
"1",
"]",
")",
")",
"for",
"x",
"in",
"cur",
".",
"fetchall",
"(",
")",
"]",
"except",
"sqlite3",
".",
"DatabaseError",
"as",
"e",
":",
"env",
".",
"logger",
".",
"warning",
"(",
"f'Failed to get files from signature database: {e}'",
")",
"return",
"[",
"]"
] |
Listing files related to workflows related to current directory
|
[
"Listing",
"files",
"related",
"to",
"workflows",
"related",
"to",
"current",
"directory"
] |
6b60ed0770916d135e17322e469520d778e9d4e7
|
https://github.com/vatlab/SoS/blob/6b60ed0770916d135e17322e469520d778e9d4e7/src/sos/signatures.py#L176-L187
|
9,505
|
vatlab/SoS
|
src/sos/dag.py
|
SoS_DAG.find_executable
|
def find_executable(self):
'''Find an executable node, which means nodes that has not been completed
and has no input dependency.'''
if 'DAG' in env.config['SOS_DEBUG'] or 'ALL' in env.config['SOS_DEBUG']:
env.log_to_file('DAG', 'find_executable')
for node in self.nodes():
# if it has not been executed
if node._status is None:
with_dependency = False
for edge in self.in_edges(node):
if edge[0]._status != 'completed':
with_dependency = True
break
if not with_dependency:
return node
# if no node could be found, let use try pending ones
pending_jobs = [
x for x in self.nodes() if x._status == 'signature_pending'
]
if pending_jobs:
try:
notifier = ActivityNotifier(
f'Waiting for {len(pending_jobs)} pending job{"s: e.g." if len(pending_jobs) > 1 else ":"} output {short_repr(pending_jobs[0]._signature[0])} with signature file {pending_jobs[0]._signature[1] + "_"}. You can manually remove this lock file if you are certain that no other process is working on the output.'
)
while True:
for node in pending_jobs:
# if it has not been executed
lock = fasteners.InterProcessLock(node._signature[1] +
'_')
if lock.acquire(blocking=False):
lock.release()
node._status = None
return node
time.sleep(0.1)
except Exception as e:
env.logger.error(e)
finally:
notifier.stop()
return None
|
python
|
def find_executable(self):
'''Find an executable node, which means nodes that has not been completed
and has no input dependency.'''
if 'DAG' in env.config['SOS_DEBUG'] or 'ALL' in env.config['SOS_DEBUG']:
env.log_to_file('DAG', 'find_executable')
for node in self.nodes():
# if it has not been executed
if node._status is None:
with_dependency = False
for edge in self.in_edges(node):
if edge[0]._status != 'completed':
with_dependency = True
break
if not with_dependency:
return node
# if no node could be found, let use try pending ones
pending_jobs = [
x for x in self.nodes() if x._status == 'signature_pending'
]
if pending_jobs:
try:
notifier = ActivityNotifier(
f'Waiting for {len(pending_jobs)} pending job{"s: e.g." if len(pending_jobs) > 1 else ":"} output {short_repr(pending_jobs[0]._signature[0])} with signature file {pending_jobs[0]._signature[1] + "_"}. You can manually remove this lock file if you are certain that no other process is working on the output.'
)
while True:
for node in pending_jobs:
# if it has not been executed
lock = fasteners.InterProcessLock(node._signature[1] +
'_')
if lock.acquire(blocking=False):
lock.release()
node._status = None
return node
time.sleep(0.1)
except Exception as e:
env.logger.error(e)
finally:
notifier.stop()
return None
|
[
"def",
"find_executable",
"(",
"self",
")",
":",
"if",
"'DAG'",
"in",
"env",
".",
"config",
"[",
"'SOS_DEBUG'",
"]",
"or",
"'ALL'",
"in",
"env",
".",
"config",
"[",
"'SOS_DEBUG'",
"]",
":",
"env",
".",
"log_to_file",
"(",
"'DAG'",
",",
"'find_executable'",
")",
"for",
"node",
"in",
"self",
".",
"nodes",
"(",
")",
":",
"# if it has not been executed",
"if",
"node",
".",
"_status",
"is",
"None",
":",
"with_dependency",
"=",
"False",
"for",
"edge",
"in",
"self",
".",
"in_edges",
"(",
"node",
")",
":",
"if",
"edge",
"[",
"0",
"]",
".",
"_status",
"!=",
"'completed'",
":",
"with_dependency",
"=",
"True",
"break",
"if",
"not",
"with_dependency",
":",
"return",
"node",
"# if no node could be found, let use try pending ones",
"pending_jobs",
"=",
"[",
"x",
"for",
"x",
"in",
"self",
".",
"nodes",
"(",
")",
"if",
"x",
".",
"_status",
"==",
"'signature_pending'",
"]",
"if",
"pending_jobs",
":",
"try",
":",
"notifier",
"=",
"ActivityNotifier",
"(",
"f'Waiting for {len(pending_jobs)} pending job{\"s: e.g.\" if len(pending_jobs) > 1 else \":\"} output {short_repr(pending_jobs[0]._signature[0])} with signature file {pending_jobs[0]._signature[1] + \"_\"}. You can manually remove this lock file if you are certain that no other process is working on the output.'",
")",
"while",
"True",
":",
"for",
"node",
"in",
"pending_jobs",
":",
"# if it has not been executed",
"lock",
"=",
"fasteners",
".",
"InterProcessLock",
"(",
"node",
".",
"_signature",
"[",
"1",
"]",
"+",
"'_'",
")",
"if",
"lock",
".",
"acquire",
"(",
"blocking",
"=",
"False",
")",
":",
"lock",
".",
"release",
"(",
")",
"node",
".",
"_status",
"=",
"None",
"return",
"node",
"time",
".",
"sleep",
"(",
"0.1",
")",
"except",
"Exception",
"as",
"e",
":",
"env",
".",
"logger",
".",
"error",
"(",
"e",
")",
"finally",
":",
"notifier",
".",
"stop",
"(",
")",
"return",
"None"
] |
Find an executable node, which means nodes that has not been completed
and has no input dependency.
|
[
"Find",
"an",
"executable",
"node",
"which",
"means",
"nodes",
"that",
"has",
"not",
"been",
"completed",
"and",
"has",
"no",
"input",
"dependency",
"."
] |
6b60ed0770916d135e17322e469520d778e9d4e7
|
https://github.com/vatlab/SoS/blob/6b60ed0770916d135e17322e469520d778e9d4e7/src/sos/dag.py#L184-L222
|
9,506
|
vatlab/SoS
|
src/sos/dag.py
|
SoS_DAG.dangling
|
def dangling(self, targets: sos_targets):
'''returns
1. missing targets, which are missing from the DAG or from the provided targets
2. existing targets of provided target list, not in DAG
'''
existing = []
missing = []
if env.config['trace_existing']:
for x in self._all_depends_files.keys():
if x not in self._all_output_files:
if x.target_exists():
existing.append(x)
else:
missing.append(x)
else:
missing = [
x for x in self._all_depends_files.keys()
if x not in self._all_output_files and not x.target_exists()
]
for x in targets:
if x not in self._all_output_files:
if x.target_exists('target'):
existing.append(x)
else:
missing.append(x)
return missing, existing
|
python
|
def dangling(self, targets: sos_targets):
'''returns
1. missing targets, which are missing from the DAG or from the provided targets
2. existing targets of provided target list, not in DAG
'''
existing = []
missing = []
if env.config['trace_existing']:
for x in self._all_depends_files.keys():
if x not in self._all_output_files:
if x.target_exists():
existing.append(x)
else:
missing.append(x)
else:
missing = [
x for x in self._all_depends_files.keys()
if x not in self._all_output_files and not x.target_exists()
]
for x in targets:
if x not in self._all_output_files:
if x.target_exists('target'):
existing.append(x)
else:
missing.append(x)
return missing, existing
|
[
"def",
"dangling",
"(",
"self",
",",
"targets",
":",
"sos_targets",
")",
":",
"existing",
"=",
"[",
"]",
"missing",
"=",
"[",
"]",
"if",
"env",
".",
"config",
"[",
"'trace_existing'",
"]",
":",
"for",
"x",
"in",
"self",
".",
"_all_depends_files",
".",
"keys",
"(",
")",
":",
"if",
"x",
"not",
"in",
"self",
".",
"_all_output_files",
":",
"if",
"x",
".",
"target_exists",
"(",
")",
":",
"existing",
".",
"append",
"(",
"x",
")",
"else",
":",
"missing",
".",
"append",
"(",
"x",
")",
"else",
":",
"missing",
"=",
"[",
"x",
"for",
"x",
"in",
"self",
".",
"_all_depends_files",
".",
"keys",
"(",
")",
"if",
"x",
"not",
"in",
"self",
".",
"_all_output_files",
"and",
"not",
"x",
".",
"target_exists",
"(",
")",
"]",
"for",
"x",
"in",
"targets",
":",
"if",
"x",
"not",
"in",
"self",
".",
"_all_output_files",
":",
"if",
"x",
".",
"target_exists",
"(",
"'target'",
")",
":",
"existing",
".",
"append",
"(",
"x",
")",
"else",
":",
"missing",
".",
"append",
"(",
"x",
")",
"return",
"missing",
",",
"existing"
] |
returns
1. missing targets, which are missing from the DAG or from the provided targets
2. existing targets of provided target list, not in DAG
|
[
"returns",
"1",
".",
"missing",
"targets",
"which",
"are",
"missing",
"from",
"the",
"DAG",
"or",
"from",
"the",
"provided",
"targets",
"2",
".",
"existing",
"targets",
"of",
"provided",
"target",
"list",
"not",
"in",
"DAG"
] |
6b60ed0770916d135e17322e469520d778e9d4e7
|
https://github.com/vatlab/SoS/blob/6b60ed0770916d135e17322e469520d778e9d4e7/src/sos/dag.py#L258-L283
|
9,507
|
vatlab/SoS
|
src/sos/dag.py
|
SoS_DAG.subgraph_from
|
def subgraph_from(self, targets: sos_targets):
'''Trim DAG to keep only nodes that produce targets'''
if 'DAG' in env.config['SOS_DEBUG'] or 'ALL' in env.config['SOS_DEBUG']:
env.log_to_file('DAG', 'create subgraph')
# first, find all nodes with targets
subnodes = []
for node in self.nodes():
if node._output_targets.valid() and any(
x in node._output_targets for x in targets):
subnodes.append(node)
#
ancestors = set()
for node in subnodes:
ancestors |= nx.ancestors(self, node)
return SoS_DAG(nx.subgraph(self, subnodes + list(ancestors)))
|
python
|
def subgraph_from(self, targets: sos_targets):
'''Trim DAG to keep only nodes that produce targets'''
if 'DAG' in env.config['SOS_DEBUG'] or 'ALL' in env.config['SOS_DEBUG']:
env.log_to_file('DAG', 'create subgraph')
# first, find all nodes with targets
subnodes = []
for node in self.nodes():
if node._output_targets.valid() and any(
x in node._output_targets for x in targets):
subnodes.append(node)
#
ancestors = set()
for node in subnodes:
ancestors |= nx.ancestors(self, node)
return SoS_DAG(nx.subgraph(self, subnodes + list(ancestors)))
|
[
"def",
"subgraph_from",
"(",
"self",
",",
"targets",
":",
"sos_targets",
")",
":",
"if",
"'DAG'",
"in",
"env",
".",
"config",
"[",
"'SOS_DEBUG'",
"]",
"or",
"'ALL'",
"in",
"env",
".",
"config",
"[",
"'SOS_DEBUG'",
"]",
":",
"env",
".",
"log_to_file",
"(",
"'DAG'",
",",
"'create subgraph'",
")",
"# first, find all nodes with targets",
"subnodes",
"=",
"[",
"]",
"for",
"node",
"in",
"self",
".",
"nodes",
"(",
")",
":",
"if",
"node",
".",
"_output_targets",
".",
"valid",
"(",
")",
"and",
"any",
"(",
"x",
"in",
"node",
".",
"_output_targets",
"for",
"x",
"in",
"targets",
")",
":",
"subnodes",
".",
"append",
"(",
"node",
")",
"#",
"ancestors",
"=",
"set",
"(",
")",
"for",
"node",
"in",
"subnodes",
":",
"ancestors",
"|=",
"nx",
".",
"ancestors",
"(",
"self",
",",
"node",
")",
"return",
"SoS_DAG",
"(",
"nx",
".",
"subgraph",
"(",
"self",
",",
"subnodes",
"+",
"list",
"(",
"ancestors",
")",
")",
")"
] |
Trim DAG to keep only nodes that produce targets
|
[
"Trim",
"DAG",
"to",
"keep",
"only",
"nodes",
"that",
"produce",
"targets"
] |
6b60ed0770916d135e17322e469520d778e9d4e7
|
https://github.com/vatlab/SoS/blob/6b60ed0770916d135e17322e469520d778e9d4e7/src/sos/dag.py#L301-L315
|
9,508
|
vatlab/SoS
|
src/sos/dag.py
|
SoS_DAG.build
|
def build(self):
'''Connect nodes according to status of targets'''
# right now we do not worry about status of nodes
# connecting the output to the input of other nodes
#
# NOTE: This is implemented in the least efficient way just for
# testing. It has to be re-implemented.
#
# refer to http://stackoverflow.com/questions/33494376/networkx-add-edges-to-graph-from-node-attributes
#
# several cases triggers dependency.
if 'DAG' in env.config['SOS_DEBUG'] or 'ALL' in env.config['SOS_DEBUG']:
env.log_to_file('DAG', 'build DAG')
for wf in range(self._forward_workflow_id + 1):
indexed = [x for x in self.nodes() if x._wf_index == wf]
indexed.sort(key=lambda x: x._node_index)
for idx, node in enumerate(indexed):
# 1. if a node changes context (using option alias), all later steps
# has to rely on it.
if node._context['__changed_vars__']:
for later_node in indexed[idx + 1:]:
if node._context['__changed_vars__'] & (
later_node._context['__signature_vars__']
| later_node._context['__environ_vars__']):
self.add_edge(node, later_node)
# 2. if the input of a step is undetermined, it has to be executed
# after all its previous steps.
if not node._input_targets.valid() and idx > 0:
# if there is some input specified, it does not use default
# input, so the relationship can be further looked before
if node._input_targets.undetermined():
# if the input is dynamic, has to rely on previous step...
if 'dynamic' in node._context['__environ_vars__']:
self.add_edge(indexed[idx - 1], node)
else:
# otherwise let us look back.
for prev_node in indexed[idx - 1::-1]:
if node._context[
'__environ_vars__'] & prev_node._context[
'__changed_vars__']:
self.add_edge(prev_node, node)
else:
self.add_edge(indexed[idx - 1], node)
#
# 3. if the input of a step depends on the output of another step
for target, in_node in self._all_depends_files.items():
if target not in self._all_output_files:
continue
# it is possible that multiple nodes satisfy the same target
out_node = self._all_output_files[target]
for i in in_node:
for j in out_node:
if j != i:
self.add_edge(j, i)
self.mark_dirty()
|
python
|
def build(self):
'''Connect nodes according to status of targets'''
# right now we do not worry about status of nodes
# connecting the output to the input of other nodes
#
# NOTE: This is implemented in the least efficient way just for
# testing. It has to be re-implemented.
#
# refer to http://stackoverflow.com/questions/33494376/networkx-add-edges-to-graph-from-node-attributes
#
# several cases triggers dependency.
if 'DAG' in env.config['SOS_DEBUG'] or 'ALL' in env.config['SOS_DEBUG']:
env.log_to_file('DAG', 'build DAG')
for wf in range(self._forward_workflow_id + 1):
indexed = [x for x in self.nodes() if x._wf_index == wf]
indexed.sort(key=lambda x: x._node_index)
for idx, node in enumerate(indexed):
# 1. if a node changes context (using option alias), all later steps
# has to rely on it.
if node._context['__changed_vars__']:
for later_node in indexed[idx + 1:]:
if node._context['__changed_vars__'] & (
later_node._context['__signature_vars__']
| later_node._context['__environ_vars__']):
self.add_edge(node, later_node)
# 2. if the input of a step is undetermined, it has to be executed
# after all its previous steps.
if not node._input_targets.valid() and idx > 0:
# if there is some input specified, it does not use default
# input, so the relationship can be further looked before
if node._input_targets.undetermined():
# if the input is dynamic, has to rely on previous step...
if 'dynamic' in node._context['__environ_vars__']:
self.add_edge(indexed[idx - 1], node)
else:
# otherwise let us look back.
for prev_node in indexed[idx - 1::-1]:
if node._context[
'__environ_vars__'] & prev_node._context[
'__changed_vars__']:
self.add_edge(prev_node, node)
else:
self.add_edge(indexed[idx - 1], node)
#
# 3. if the input of a step depends on the output of another step
for target, in_node in self._all_depends_files.items():
if target not in self._all_output_files:
continue
# it is possible that multiple nodes satisfy the same target
out_node = self._all_output_files[target]
for i in in_node:
for j in out_node:
if j != i:
self.add_edge(j, i)
self.mark_dirty()
|
[
"def",
"build",
"(",
"self",
")",
":",
"# right now we do not worry about status of nodes",
"# connecting the output to the input of other nodes",
"#",
"# NOTE: This is implemented in the least efficient way just for",
"# testing. It has to be re-implemented.",
"#",
"# refer to http://stackoverflow.com/questions/33494376/networkx-add-edges-to-graph-from-node-attributes",
"#",
"# several cases triggers dependency.",
"if",
"'DAG'",
"in",
"env",
".",
"config",
"[",
"'SOS_DEBUG'",
"]",
"or",
"'ALL'",
"in",
"env",
".",
"config",
"[",
"'SOS_DEBUG'",
"]",
":",
"env",
".",
"log_to_file",
"(",
"'DAG'",
",",
"'build DAG'",
")",
"for",
"wf",
"in",
"range",
"(",
"self",
".",
"_forward_workflow_id",
"+",
"1",
")",
":",
"indexed",
"=",
"[",
"x",
"for",
"x",
"in",
"self",
".",
"nodes",
"(",
")",
"if",
"x",
".",
"_wf_index",
"==",
"wf",
"]",
"indexed",
".",
"sort",
"(",
"key",
"=",
"lambda",
"x",
":",
"x",
".",
"_node_index",
")",
"for",
"idx",
",",
"node",
"in",
"enumerate",
"(",
"indexed",
")",
":",
"# 1. if a node changes context (using option alias), all later steps",
"# has to rely on it.",
"if",
"node",
".",
"_context",
"[",
"'__changed_vars__'",
"]",
":",
"for",
"later_node",
"in",
"indexed",
"[",
"idx",
"+",
"1",
":",
"]",
":",
"if",
"node",
".",
"_context",
"[",
"'__changed_vars__'",
"]",
"&",
"(",
"later_node",
".",
"_context",
"[",
"'__signature_vars__'",
"]",
"|",
"later_node",
".",
"_context",
"[",
"'__environ_vars__'",
"]",
")",
":",
"self",
".",
"add_edge",
"(",
"node",
",",
"later_node",
")",
"# 2. if the input of a step is undetermined, it has to be executed",
"# after all its previous steps.",
"if",
"not",
"node",
".",
"_input_targets",
".",
"valid",
"(",
")",
"and",
"idx",
">",
"0",
":",
"# if there is some input specified, it does not use default",
"# input, so the relationship can be further looked before",
"if",
"node",
".",
"_input_targets",
".",
"undetermined",
"(",
")",
":",
"# if the input is dynamic, has to rely on previous step...",
"if",
"'dynamic'",
"in",
"node",
".",
"_context",
"[",
"'__environ_vars__'",
"]",
":",
"self",
".",
"add_edge",
"(",
"indexed",
"[",
"idx",
"-",
"1",
"]",
",",
"node",
")",
"else",
":",
"# otherwise let us look back.",
"for",
"prev_node",
"in",
"indexed",
"[",
"idx",
"-",
"1",
":",
":",
"-",
"1",
"]",
":",
"if",
"node",
".",
"_context",
"[",
"'__environ_vars__'",
"]",
"&",
"prev_node",
".",
"_context",
"[",
"'__changed_vars__'",
"]",
":",
"self",
".",
"add_edge",
"(",
"prev_node",
",",
"node",
")",
"else",
":",
"self",
".",
"add_edge",
"(",
"indexed",
"[",
"idx",
"-",
"1",
"]",
",",
"node",
")",
"#",
"# 3. if the input of a step depends on the output of another step",
"for",
"target",
",",
"in_node",
"in",
"self",
".",
"_all_depends_files",
".",
"items",
"(",
")",
":",
"if",
"target",
"not",
"in",
"self",
".",
"_all_output_files",
":",
"continue",
"# it is possible that multiple nodes satisfy the same target",
"out_node",
"=",
"self",
".",
"_all_output_files",
"[",
"target",
"]",
"for",
"i",
"in",
"in_node",
":",
"for",
"j",
"in",
"out_node",
":",
"if",
"j",
"!=",
"i",
":",
"self",
".",
"add_edge",
"(",
"j",
",",
"i",
")",
"self",
".",
"mark_dirty",
"(",
")"
] |
Connect nodes according to status of targets
|
[
"Connect",
"nodes",
"according",
"to",
"status",
"of",
"targets"
] |
6b60ed0770916d135e17322e469520d778e9d4e7
|
https://github.com/vatlab/SoS/blob/6b60ed0770916d135e17322e469520d778e9d4e7/src/sos/dag.py#L317-L373
|
9,509
|
vatlab/SoS
|
src/sos/task_engines.py
|
TaskEngine.monitor_tasks
|
def monitor_tasks(self, tasks=None, status=None, age=None):
'''Start monitoring specified or all tasks'''
self.engine_ready.wait()
if not tasks:
tasks = self.task_status.keys()
else:
tasks = [x for x in tasks if x in self.task_status]
# we only monitor running tasks
with threading.Lock():
for task in tasks:
if self.task_status[task] in (
'submitted',
'running') and task not in self.running_tasks:
# these tasks will be actively monitored
self.running_tasks.append(task)
#
if age is not None:
age = expand_time(age, default_unit='d')
return sorted([
(x, self.task_status[x], self.task_info[x].get(
'data', (time.time(), None, None)))
for x in tasks
if (status is None or self.task_status[x] in status) and
(age is None or (
(age > 0 and time.time() -
self.task_info[x].get('date',
(time.time(), None, None))[0] > age) or
(age < 0 and time.time() -
self.task_info[x].get('date',
(time.time(), None, None))[0] < -age)))
],
key=lambda x: -x[2][0])
|
python
|
def monitor_tasks(self, tasks=None, status=None, age=None):
'''Start monitoring specified or all tasks'''
self.engine_ready.wait()
if not tasks:
tasks = self.task_status.keys()
else:
tasks = [x for x in tasks if x in self.task_status]
# we only monitor running tasks
with threading.Lock():
for task in tasks:
if self.task_status[task] in (
'submitted',
'running') and task not in self.running_tasks:
# these tasks will be actively monitored
self.running_tasks.append(task)
#
if age is not None:
age = expand_time(age, default_unit='d')
return sorted([
(x, self.task_status[x], self.task_info[x].get(
'data', (time.time(), None, None)))
for x in tasks
if (status is None or self.task_status[x] in status) and
(age is None or (
(age > 0 and time.time() -
self.task_info[x].get('date',
(time.time(), None, None))[0] > age) or
(age < 0 and time.time() -
self.task_info[x].get('date',
(time.time(), None, None))[0] < -age)))
],
key=lambda x: -x[2][0])
|
[
"def",
"monitor_tasks",
"(",
"self",
",",
"tasks",
"=",
"None",
",",
"status",
"=",
"None",
",",
"age",
"=",
"None",
")",
":",
"self",
".",
"engine_ready",
".",
"wait",
"(",
")",
"if",
"not",
"tasks",
":",
"tasks",
"=",
"self",
".",
"task_status",
".",
"keys",
"(",
")",
"else",
":",
"tasks",
"=",
"[",
"x",
"for",
"x",
"in",
"tasks",
"if",
"x",
"in",
"self",
".",
"task_status",
"]",
"# we only monitor running tasks",
"with",
"threading",
".",
"Lock",
"(",
")",
":",
"for",
"task",
"in",
"tasks",
":",
"if",
"self",
".",
"task_status",
"[",
"task",
"]",
"in",
"(",
"'submitted'",
",",
"'running'",
")",
"and",
"task",
"not",
"in",
"self",
".",
"running_tasks",
":",
"# these tasks will be actively monitored",
"self",
".",
"running_tasks",
".",
"append",
"(",
"task",
")",
"#",
"if",
"age",
"is",
"not",
"None",
":",
"age",
"=",
"expand_time",
"(",
"age",
",",
"default_unit",
"=",
"'d'",
")",
"return",
"sorted",
"(",
"[",
"(",
"x",
",",
"self",
".",
"task_status",
"[",
"x",
"]",
",",
"self",
".",
"task_info",
"[",
"x",
"]",
".",
"get",
"(",
"'data'",
",",
"(",
"time",
".",
"time",
"(",
")",
",",
"None",
",",
"None",
")",
")",
")",
"for",
"x",
"in",
"tasks",
"if",
"(",
"status",
"is",
"None",
"or",
"self",
".",
"task_status",
"[",
"x",
"]",
"in",
"status",
")",
"and",
"(",
"age",
"is",
"None",
"or",
"(",
"(",
"age",
">",
"0",
"and",
"time",
".",
"time",
"(",
")",
"-",
"self",
".",
"task_info",
"[",
"x",
"]",
".",
"get",
"(",
"'date'",
",",
"(",
"time",
".",
"time",
"(",
")",
",",
"None",
",",
"None",
")",
")",
"[",
"0",
"]",
">",
"age",
")",
"or",
"(",
"age",
"<",
"0",
"and",
"time",
".",
"time",
"(",
")",
"-",
"self",
".",
"task_info",
"[",
"x",
"]",
".",
"get",
"(",
"'date'",
",",
"(",
"time",
".",
"time",
"(",
")",
",",
"None",
",",
"None",
")",
")",
"[",
"0",
"]",
"<",
"-",
"age",
")",
")",
")",
"]",
",",
"key",
"=",
"lambda",
"x",
":",
"-",
"x",
"[",
"2",
"]",
"[",
"0",
"]",
")"
] |
Start monitoring specified or all tasks
|
[
"Start",
"monitoring",
"specified",
"or",
"all",
"tasks"
] |
6b60ed0770916d135e17322e469520d778e9d4e7
|
https://github.com/vatlab/SoS/blob/6b60ed0770916d135e17322e469520d778e9d4e7/src/sos/task_engines.py#L97-L129
|
9,510
|
vatlab/SoS
|
src/sos/task_engines.py
|
BackgroundProcess_TaskEngine._submit_task_with_template
|
def _submit_task_with_template(self, task_ids):
'''Submit tasks by interpolating a shell script defined in job_template'''
runtime = self.config
runtime.update({
'workdir': os.getcwd(),
'cur_dir': os.getcwd(), # for backward compatibility
'verbosity': env.verbosity,
'sig_mode': env.config.get('sig_mode', 'default'),
'run_mode': env.config.get('run_mode', 'run'),
'home_dir': os.path.expanduser('~')
})
if '_runtime' in env.sos_dict:
runtime.update({
x: env.sos_dict['_runtime'][x]
for x in ('nodes', 'cores', 'workdir', 'mem', 'walltime')
if x in env.sos_dict['_runtime']
})
if 'nodes' not in runtime:
runtime['nodes'] = 1
if 'cores' not in runtime:
runtime['cores'] = 1
# let us first prepare a task file
job_text = ''
for task_id in task_ids:
runtime['task'] = task_id
try:
job_text += cfg_interpolate(self.job_template, runtime)
job_text += '\n'
except Exception as e:
raise ValueError(
f'Failed to generate job file for task {task_id}: {e}')
filename = task_ids[0] + ('.sh' if len(task_ids) == 1 else
f'-{task_ids[-1]}.sh')
# now we need to write a job file
job_file = os.path.join(
os.path.expanduser('~'), '.sos', 'tasks', filename)
# do not translate newline under windows because the script will be executed
# under linux/mac
with open(job_file, 'w', newline='') as job:
job.write(job_text)
# then copy the job file to remote host if necessary
self.agent.send_task_file(job_file)
try:
cmd = f'bash ~/.sos/tasks/{filename}'
self.agent.run_command(cmd, wait_for_task=self.wait_for_task)
except Exception as e:
raise RuntimeError(f'Failed to submit task {task_ids}: {e}')
return True
|
python
|
def _submit_task_with_template(self, task_ids):
'''Submit tasks by interpolating a shell script defined in job_template'''
runtime = self.config
runtime.update({
'workdir': os.getcwd(),
'cur_dir': os.getcwd(), # for backward compatibility
'verbosity': env.verbosity,
'sig_mode': env.config.get('sig_mode', 'default'),
'run_mode': env.config.get('run_mode', 'run'),
'home_dir': os.path.expanduser('~')
})
if '_runtime' in env.sos_dict:
runtime.update({
x: env.sos_dict['_runtime'][x]
for x in ('nodes', 'cores', 'workdir', 'mem', 'walltime')
if x in env.sos_dict['_runtime']
})
if 'nodes' not in runtime:
runtime['nodes'] = 1
if 'cores' not in runtime:
runtime['cores'] = 1
# let us first prepare a task file
job_text = ''
for task_id in task_ids:
runtime['task'] = task_id
try:
job_text += cfg_interpolate(self.job_template, runtime)
job_text += '\n'
except Exception as e:
raise ValueError(
f'Failed to generate job file for task {task_id}: {e}')
filename = task_ids[0] + ('.sh' if len(task_ids) == 1 else
f'-{task_ids[-1]}.sh')
# now we need to write a job file
job_file = os.path.join(
os.path.expanduser('~'), '.sos', 'tasks', filename)
# do not translate newline under windows because the script will be executed
# under linux/mac
with open(job_file, 'w', newline='') as job:
job.write(job_text)
# then copy the job file to remote host if necessary
self.agent.send_task_file(job_file)
try:
cmd = f'bash ~/.sos/tasks/{filename}'
self.agent.run_command(cmd, wait_for_task=self.wait_for_task)
except Exception as e:
raise RuntimeError(f'Failed to submit task {task_ids}: {e}')
return True
|
[
"def",
"_submit_task_with_template",
"(",
"self",
",",
"task_ids",
")",
":",
"runtime",
"=",
"self",
".",
"config",
"runtime",
".",
"update",
"(",
"{",
"'workdir'",
":",
"os",
".",
"getcwd",
"(",
")",
",",
"'cur_dir'",
":",
"os",
".",
"getcwd",
"(",
")",
",",
"# for backward compatibility",
"'verbosity'",
":",
"env",
".",
"verbosity",
",",
"'sig_mode'",
":",
"env",
".",
"config",
".",
"get",
"(",
"'sig_mode'",
",",
"'default'",
")",
",",
"'run_mode'",
":",
"env",
".",
"config",
".",
"get",
"(",
"'run_mode'",
",",
"'run'",
")",
",",
"'home_dir'",
":",
"os",
".",
"path",
".",
"expanduser",
"(",
"'~'",
")",
"}",
")",
"if",
"'_runtime'",
"in",
"env",
".",
"sos_dict",
":",
"runtime",
".",
"update",
"(",
"{",
"x",
":",
"env",
".",
"sos_dict",
"[",
"'_runtime'",
"]",
"[",
"x",
"]",
"for",
"x",
"in",
"(",
"'nodes'",
",",
"'cores'",
",",
"'workdir'",
",",
"'mem'",
",",
"'walltime'",
")",
"if",
"x",
"in",
"env",
".",
"sos_dict",
"[",
"'_runtime'",
"]",
"}",
")",
"if",
"'nodes'",
"not",
"in",
"runtime",
":",
"runtime",
"[",
"'nodes'",
"]",
"=",
"1",
"if",
"'cores'",
"not",
"in",
"runtime",
":",
"runtime",
"[",
"'cores'",
"]",
"=",
"1",
"# let us first prepare a task file",
"job_text",
"=",
"''",
"for",
"task_id",
"in",
"task_ids",
":",
"runtime",
"[",
"'task'",
"]",
"=",
"task_id",
"try",
":",
"job_text",
"+=",
"cfg_interpolate",
"(",
"self",
".",
"job_template",
",",
"runtime",
")",
"job_text",
"+=",
"'\\n'",
"except",
"Exception",
"as",
"e",
":",
"raise",
"ValueError",
"(",
"f'Failed to generate job file for task {task_id}: {e}'",
")",
"filename",
"=",
"task_ids",
"[",
"0",
"]",
"+",
"(",
"'.sh'",
"if",
"len",
"(",
"task_ids",
")",
"==",
"1",
"else",
"f'-{task_ids[-1]}.sh'",
")",
"# now we need to write a job file",
"job_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"expanduser",
"(",
"'~'",
")",
",",
"'.sos'",
",",
"'tasks'",
",",
"filename",
")",
"# do not translate newline under windows because the script will be executed",
"# under linux/mac",
"with",
"open",
"(",
"job_file",
",",
"'w'",
",",
"newline",
"=",
"''",
")",
"as",
"job",
":",
"job",
".",
"write",
"(",
"job_text",
")",
"# then copy the job file to remote host if necessary",
"self",
".",
"agent",
".",
"send_task_file",
"(",
"job_file",
")",
"try",
":",
"cmd",
"=",
"f'bash ~/.sos/tasks/{filename}'",
"self",
".",
"agent",
".",
"run_command",
"(",
"cmd",
",",
"wait_for_task",
"=",
"self",
".",
"wait_for_task",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"RuntimeError",
"(",
"f'Failed to submit task {task_ids}: {e}'",
")",
"return",
"True"
] |
Submit tasks by interpolating a shell script defined in job_template
|
[
"Submit",
"tasks",
"by",
"interpolating",
"a",
"shell",
"script",
"defined",
"in",
"job_template"
] |
6b60ed0770916d135e17322e469520d778e9d4e7
|
https://github.com/vatlab/SoS/blob/6b60ed0770916d135e17322e469520d778e9d4e7/src/sos/task_engines.py#L651-L702
|
9,511
|
vatlab/SoS
|
src/sos/parser.py
|
is_type_hint
|
def is_type_hint(stmt: str) -> bool:
'''Try to differentiate
var: type = value
with
action: input = whatever
'''
if stmt.count('=') > 1:
return False
if ':' not in stmt:
return False
#
# action:
if not stmt.split(':')[1].strip():
return False
#
# action: int
#
# or
#
# input: variable
#
if '=' not in stmt:
action, par = [x.strip() for x in stmt.split(':', 1)]
else:
# one parameter?
#
# action: input={'a': b}
#
action, par = [x.strip() for x in stmt.split('=', 1)[0].split(':', 1)]
if action in SOS_DIRECTIVES:
return False
if par in SOS_ACTION_OPTIONS:
return False
# if par is something like List[Any], or 'classname'
if not par.isidentifier():
return True
# if action is a builtin function, such as sort, it cannot be
# a variable assignment.
if action in dir(builtins):
return False
# if action is registered
global _action_list
if _action_list is None:
import pkg_resources
_action_list = [
x.name for x in pkg_resources.iter_entry_points(group='sos_actions')
]
if action in _action_list:
return False
# if par is something like List, Tuple, str
if par in dir(typing) or par in dir(builtins):
return True
# if not quite sure???
env.logger.debug(
f"Failed to tell if '{stmt}' is an assignment with type hint or function in script format. Assuming type hint."
)
# regular function written in this format?
return True
|
python
|
def is_type_hint(stmt: str) -> bool:
'''Try to differentiate
var: type = value
with
action: input = whatever
'''
if stmt.count('=') > 1:
return False
if ':' not in stmt:
return False
#
# action:
if not stmt.split(':')[1].strip():
return False
#
# action: int
#
# or
#
# input: variable
#
if '=' not in stmt:
action, par = [x.strip() for x in stmt.split(':', 1)]
else:
# one parameter?
#
# action: input={'a': b}
#
action, par = [x.strip() for x in stmt.split('=', 1)[0].split(':', 1)]
if action in SOS_DIRECTIVES:
return False
if par in SOS_ACTION_OPTIONS:
return False
# if par is something like List[Any], or 'classname'
if not par.isidentifier():
return True
# if action is a builtin function, such as sort, it cannot be
# a variable assignment.
if action in dir(builtins):
return False
# if action is registered
global _action_list
if _action_list is None:
import pkg_resources
_action_list = [
x.name for x in pkg_resources.iter_entry_points(group='sos_actions')
]
if action in _action_list:
return False
# if par is something like List, Tuple, str
if par in dir(typing) or par in dir(builtins):
return True
# if not quite sure???
env.logger.debug(
f"Failed to tell if '{stmt}' is an assignment with type hint or function in script format. Assuming type hint."
)
# regular function written in this format?
return True
|
[
"def",
"is_type_hint",
"(",
"stmt",
":",
"str",
")",
"->",
"bool",
":",
"if",
"stmt",
".",
"count",
"(",
"'='",
")",
">",
"1",
":",
"return",
"False",
"if",
"':'",
"not",
"in",
"stmt",
":",
"return",
"False",
"#",
"# action:",
"if",
"not",
"stmt",
".",
"split",
"(",
"':'",
")",
"[",
"1",
"]",
".",
"strip",
"(",
")",
":",
"return",
"False",
"#",
"# action: int",
"#",
"# or",
"#",
"# input: variable",
"#",
"if",
"'='",
"not",
"in",
"stmt",
":",
"action",
",",
"par",
"=",
"[",
"x",
".",
"strip",
"(",
")",
"for",
"x",
"in",
"stmt",
".",
"split",
"(",
"':'",
",",
"1",
")",
"]",
"else",
":",
"# one parameter?",
"#",
"# action: input={'a': b}",
"#",
"action",
",",
"par",
"=",
"[",
"x",
".",
"strip",
"(",
")",
"for",
"x",
"in",
"stmt",
".",
"split",
"(",
"'='",
",",
"1",
")",
"[",
"0",
"]",
".",
"split",
"(",
"':'",
",",
"1",
")",
"]",
"if",
"action",
"in",
"SOS_DIRECTIVES",
":",
"return",
"False",
"if",
"par",
"in",
"SOS_ACTION_OPTIONS",
":",
"return",
"False",
"# if par is something like List[Any], or 'classname'",
"if",
"not",
"par",
".",
"isidentifier",
"(",
")",
":",
"return",
"True",
"# if action is a builtin function, such as sort, it cannot be",
"# a variable assignment.",
"if",
"action",
"in",
"dir",
"(",
"builtins",
")",
":",
"return",
"False",
"# if action is registered",
"global",
"_action_list",
"if",
"_action_list",
"is",
"None",
":",
"import",
"pkg_resources",
"_action_list",
"=",
"[",
"x",
".",
"name",
"for",
"x",
"in",
"pkg_resources",
".",
"iter_entry_points",
"(",
"group",
"=",
"'sos_actions'",
")",
"]",
"if",
"action",
"in",
"_action_list",
":",
"return",
"False",
"# if par is something like List, Tuple, str",
"if",
"par",
"in",
"dir",
"(",
"typing",
")",
"or",
"par",
"in",
"dir",
"(",
"builtins",
")",
":",
"return",
"True",
"# if not quite sure???",
"env",
".",
"logger",
".",
"debug",
"(",
"f\"Failed to tell if '{stmt}' is an assignment with type hint or function in script format. Assuming type hint.\"",
")",
"# regular function written in this format?",
"return",
"True"
] |
Try to differentiate
var: type = value
with
action: input = whatever
|
[
"Try",
"to",
"differentiate"
] |
6b60ed0770916d135e17322e469520d778e9d4e7
|
https://github.com/vatlab/SoS/blob/6b60ed0770916d135e17322e469520d778e9d4e7/src/sos/parser.py#L60-L130
|
9,512
|
vatlab/SoS
|
src/sos/parser.py
|
SoS_Step.indented_script
|
def indented_script(self) -> bool:
''' check self._script and see if it is indented '''
# get all leading space, tab and newline
leading = INDENTED.match(self._script)
return 0 if leading is None else len(leading.group(2))
|
python
|
def indented_script(self) -> bool:
''' check self._script and see if it is indented '''
# get all leading space, tab and newline
leading = INDENTED.match(self._script)
return 0 if leading is None else len(leading.group(2))
|
[
"def",
"indented_script",
"(",
"self",
")",
"->",
"bool",
":",
"# get all leading space, tab and newline",
"leading",
"=",
"INDENTED",
".",
"match",
"(",
"self",
".",
"_script",
")",
"return",
"0",
"if",
"leading",
"is",
"None",
"else",
"len",
"(",
"leading",
".",
"group",
"(",
"2",
")",
")"
] |
check self._script and see if it is indented
|
[
"check",
"self",
".",
"_script",
"and",
"see",
"if",
"it",
"is",
"indented"
] |
6b60ed0770916d135e17322e469520d778e9d4e7
|
https://github.com/vatlab/SoS/blob/6b60ed0770916d135e17322e469520d778e9d4e7/src/sos/parser.py#L299-L303
|
9,513
|
vatlab/SoS
|
src/sos/parser.py
|
SoS_Step.category
|
def category(self) -> Optional[str]:
'''Determine the category of existing statement'''
if self.statements:
if self.statements[-1][0] == ':':
# a hack. ... to avoid calling isValid recursively
def validDirective():
if not self.values:
return True
if self.values[-1].strip().endswith(','):
return False
try:
compile(
'func(' + ''.join(self.values) + ')',
filename='<string>',
mode='eval')
except Exception:
return False
return True
if validDirective() and self._action is not None:
return 'script'
return 'directive'
return 'statements'
return None
|
python
|
def category(self) -> Optional[str]:
'''Determine the category of existing statement'''
if self.statements:
if self.statements[-1][0] == ':':
# a hack. ... to avoid calling isValid recursively
def validDirective():
if not self.values:
return True
if self.values[-1].strip().endswith(','):
return False
try:
compile(
'func(' + ''.join(self.values) + ')',
filename='<string>',
mode='eval')
except Exception:
return False
return True
if validDirective() and self._action is not None:
return 'script'
return 'directive'
return 'statements'
return None
|
[
"def",
"category",
"(",
"self",
")",
"->",
"Optional",
"[",
"str",
"]",
":",
"if",
"self",
".",
"statements",
":",
"if",
"self",
".",
"statements",
"[",
"-",
"1",
"]",
"[",
"0",
"]",
"==",
"':'",
":",
"# a hack. ... to avoid calling isValid recursively",
"def",
"validDirective",
"(",
")",
":",
"if",
"not",
"self",
".",
"values",
":",
"return",
"True",
"if",
"self",
".",
"values",
"[",
"-",
"1",
"]",
".",
"strip",
"(",
")",
".",
"endswith",
"(",
"','",
")",
":",
"return",
"False",
"try",
":",
"compile",
"(",
"'func('",
"+",
"''",
".",
"join",
"(",
"self",
".",
"values",
")",
"+",
"')'",
",",
"filename",
"=",
"'<string>'",
",",
"mode",
"=",
"'eval'",
")",
"except",
"Exception",
":",
"return",
"False",
"return",
"True",
"if",
"validDirective",
"(",
")",
"and",
"self",
".",
"_action",
"is",
"not",
"None",
":",
"return",
"'script'",
"return",
"'directive'",
"return",
"'statements'",
"return",
"None"
] |
Determine the category of existing statement
|
[
"Determine",
"the",
"category",
"of",
"existing",
"statement"
] |
6b60ed0770916d135e17322e469520d778e9d4e7
|
https://github.com/vatlab/SoS/blob/6b60ed0770916d135e17322e469520d778e9d4e7/src/sos/parser.py#L305-L328
|
9,514
|
vatlab/SoS
|
src/sos/parser.py
|
SoS_Step.isValid
|
def isValid(self) -> bool:
'''Determine if the statement, expression or directive is valid. Otherwise
the parser will continue until a valid multi-line expression or statement
can be found.'''
if not self.values:
return True
try:
if self.category() == 'directive':
# we add func() because the expression can be multi-line and
# can have keyword-argument like options
#
# However, python considers
#
# func('value', )
#
# a valid syntax but we do want , to continue to the next line
if self.values[-1].strip().endswith(','):
self.error_msg = 'Trailing ,'
return False
# to allow type trait, we will have to test the expression as if in a function
# definition, with something like "def func(a : str, b : list=[])"
try:
compile(
'func(' + ''.join(self.values) + ')',
filename='<string>',
mode='eval')
except:
compile(
'def func(' + ''.join(self.values) + '):\n pass',
filename='<string>',
mode='exec')
elif self.category() == 'statements':
compile((''.join(self.values)),
filename='<string>',
mode='exec')
elif self.category() == 'script':
#
# A valid script has an identation defined at the first line. That is to say
#
# line 1
# line 2
#
# is allowed
#
# line 1
# line 2
#
# line 3
#
# is not so the addition of line 3 would fail. However, the last line
# will be tested before inserted so this function will always return True
return True
else:
raise RuntimeError(
f'Unrecognized expression type {self.category()}')
return True
except Exception as e:
self.error_msg = repr(e)
return False
|
python
|
def isValid(self) -> bool:
'''Determine if the statement, expression or directive is valid. Otherwise
the parser will continue until a valid multi-line expression or statement
can be found.'''
if not self.values:
return True
try:
if self.category() == 'directive':
# we add func() because the expression can be multi-line and
# can have keyword-argument like options
#
# However, python considers
#
# func('value', )
#
# a valid syntax but we do want , to continue to the next line
if self.values[-1].strip().endswith(','):
self.error_msg = 'Trailing ,'
return False
# to allow type trait, we will have to test the expression as if in a function
# definition, with something like "def func(a : str, b : list=[])"
try:
compile(
'func(' + ''.join(self.values) + ')',
filename='<string>',
mode='eval')
except:
compile(
'def func(' + ''.join(self.values) + '):\n pass',
filename='<string>',
mode='exec')
elif self.category() == 'statements':
compile((''.join(self.values)),
filename='<string>',
mode='exec')
elif self.category() == 'script':
#
# A valid script has an identation defined at the first line. That is to say
#
# line 1
# line 2
#
# is allowed
#
# line 1
# line 2
#
# line 3
#
# is not so the addition of line 3 would fail. However, the last line
# will be tested before inserted so this function will always return True
return True
else:
raise RuntimeError(
f'Unrecognized expression type {self.category()}')
return True
except Exception as e:
self.error_msg = repr(e)
return False
|
[
"def",
"isValid",
"(",
"self",
")",
"->",
"bool",
":",
"if",
"not",
"self",
".",
"values",
":",
"return",
"True",
"try",
":",
"if",
"self",
".",
"category",
"(",
")",
"==",
"'directive'",
":",
"# we add func() because the expression can be multi-line and",
"# can have keyword-argument like options",
"#",
"# However, python considers",
"#",
"# func('value', )",
"#",
"# a valid syntax but we do want , to continue to the next line",
"if",
"self",
".",
"values",
"[",
"-",
"1",
"]",
".",
"strip",
"(",
")",
".",
"endswith",
"(",
"','",
")",
":",
"self",
".",
"error_msg",
"=",
"'Trailing ,'",
"return",
"False",
"# to allow type trait, we will have to test the expression as if in a function",
"# definition, with something like \"def func(a : str, b : list=[])\"",
"try",
":",
"compile",
"(",
"'func('",
"+",
"''",
".",
"join",
"(",
"self",
".",
"values",
")",
"+",
"')'",
",",
"filename",
"=",
"'<string>'",
",",
"mode",
"=",
"'eval'",
")",
"except",
":",
"compile",
"(",
"'def func('",
"+",
"''",
".",
"join",
"(",
"self",
".",
"values",
")",
"+",
"'):\\n pass'",
",",
"filename",
"=",
"'<string>'",
",",
"mode",
"=",
"'exec'",
")",
"elif",
"self",
".",
"category",
"(",
")",
"==",
"'statements'",
":",
"compile",
"(",
"(",
"''",
".",
"join",
"(",
"self",
".",
"values",
")",
")",
",",
"filename",
"=",
"'<string>'",
",",
"mode",
"=",
"'exec'",
")",
"elif",
"self",
".",
"category",
"(",
")",
"==",
"'script'",
":",
"#",
"# A valid script has an identation defined at the first line. That is to say",
"#",
"# line 1",
"# line 2",
"#",
"# is allowed",
"#",
"# line 1",
"# line 2",
"#",
"# line 3",
"#",
"# is not so the addition of line 3 would fail. However, the last line",
"# will be tested before inserted so this function will always return True",
"return",
"True",
"else",
":",
"raise",
"RuntimeError",
"(",
"f'Unrecognized expression type {self.category()}'",
")",
"return",
"True",
"except",
"Exception",
"as",
"e",
":",
"self",
".",
"error_msg",
"=",
"repr",
"(",
"e",
")",
"return",
"False"
] |
Determine if the statement, expression or directive is valid. Otherwise
the parser will continue until a valid multi-line expression or statement
can be found.
|
[
"Determine",
"if",
"the",
"statement",
"expression",
"or",
"directive",
"is",
"valid",
".",
"Otherwise",
"the",
"parser",
"will",
"continue",
"until",
"a",
"valid",
"multi",
"-",
"line",
"expression",
"or",
"statement",
"can",
"be",
"found",
"."
] |
6b60ed0770916d135e17322e469520d778e9d4e7
|
https://github.com/vatlab/SoS/blob/6b60ed0770916d135e17322e469520d778e9d4e7/src/sos/parser.py#L330-L388
|
9,515
|
vatlab/SoS
|
src/sos/parser.py
|
SoS_Step.extend
|
def extend(self, line: str) -> None:
'''Extend the current directive, expression or script'''
if self.category() == 'directive':
self.add_directive(None, line)
elif self.category() == 'script':
self._script += line
else:
self.add_statement(line)
|
python
|
def extend(self, line: str) -> None:
'''Extend the current directive, expression or script'''
if self.category() == 'directive':
self.add_directive(None, line)
elif self.category() == 'script':
self._script += line
else:
self.add_statement(line)
|
[
"def",
"extend",
"(",
"self",
",",
"line",
":",
"str",
")",
"->",
"None",
":",
"if",
"self",
".",
"category",
"(",
")",
"==",
"'directive'",
":",
"self",
".",
"add_directive",
"(",
"None",
",",
"line",
")",
"elif",
"self",
".",
"category",
"(",
")",
"==",
"'script'",
":",
"self",
".",
"_script",
"+=",
"line",
"else",
":",
"self",
".",
"add_statement",
"(",
"line",
")"
] |
Extend the current directive, expression or script
|
[
"Extend",
"the",
"current",
"directive",
"expression",
"or",
"script"
] |
6b60ed0770916d135e17322e469520d778e9d4e7
|
https://github.com/vatlab/SoS/blob/6b60ed0770916d135e17322e469520d778e9d4e7/src/sos/parser.py#L394-L401
|
9,516
|
vatlab/SoS
|
src/sos/parser.py
|
SoS_Step.add_statement
|
def add_statement(self, line: str, lineno: Optional[int] = None) -> None:
'''statements are regular python statements'''
# there can be only one statement block
if self.category() != 'statements':
self.values = [line]
else:
self.values.append(line)
if self.statements and self.statements[-1][0] == '!':
self.statements[-1][-1] += line
else:
self.statements.append(['!', line])
if lineno:
self.lineno = lineno
|
python
|
def add_statement(self, line: str, lineno: Optional[int] = None) -> None:
'''statements are regular python statements'''
# there can be only one statement block
if self.category() != 'statements':
self.values = [line]
else:
self.values.append(line)
if self.statements and self.statements[-1][0] == '!':
self.statements[-1][-1] += line
else:
self.statements.append(['!', line])
if lineno:
self.lineno = lineno
|
[
"def",
"add_statement",
"(",
"self",
",",
"line",
":",
"str",
",",
"lineno",
":",
"Optional",
"[",
"int",
"]",
"=",
"None",
")",
"->",
"None",
":",
"# there can be only one statement block",
"if",
"self",
".",
"category",
"(",
")",
"!=",
"'statements'",
":",
"self",
".",
"values",
"=",
"[",
"line",
"]",
"else",
":",
"self",
".",
"values",
".",
"append",
"(",
"line",
")",
"if",
"self",
".",
"statements",
"and",
"self",
".",
"statements",
"[",
"-",
"1",
"]",
"[",
"0",
"]",
"==",
"'!'",
":",
"self",
".",
"statements",
"[",
"-",
"1",
"]",
"[",
"-",
"1",
"]",
"+=",
"line",
"else",
":",
"self",
".",
"statements",
".",
"append",
"(",
"[",
"'!'",
",",
"line",
"]",
")",
"if",
"lineno",
":",
"self",
".",
"lineno",
"=",
"lineno"
] |
statements are regular python statements
|
[
"statements",
"are",
"regular",
"python",
"statements"
] |
6b60ed0770916d135e17322e469520d778e9d4e7
|
https://github.com/vatlab/SoS/blob/6b60ed0770916d135e17322e469520d778e9d4e7/src/sos/parser.py#L434-L446
|
9,517
|
vatlab/SoS
|
src/sos/parser.py
|
SoS_Step.get_tokens
|
def get_tokens(self) -> str:
'''Get tokens after input statement'''
def _get_tokens(statement):
return [
x[1]
for x in generate_tokens(StringIO(statement).readline)
if x[1] not in ('', '\n')
]
tokens: List = []
for statement in self.statements:
tokens.extend(
_get_tokens(statement[2] if statement[0] ==
':' else statement[1]))
if self.task:
tokens.extend(_get_tokens(self.task))
return ' '.join(tokens)
|
python
|
def get_tokens(self) -> str:
'''Get tokens after input statement'''
def _get_tokens(statement):
return [
x[1]
for x in generate_tokens(StringIO(statement).readline)
if x[1] not in ('', '\n')
]
tokens: List = []
for statement in self.statements:
tokens.extend(
_get_tokens(statement[2] if statement[0] ==
':' else statement[1]))
if self.task:
tokens.extend(_get_tokens(self.task))
return ' '.join(tokens)
|
[
"def",
"get_tokens",
"(",
"self",
")",
"->",
"str",
":",
"def",
"_get_tokens",
"(",
"statement",
")",
":",
"return",
"[",
"x",
"[",
"1",
"]",
"for",
"x",
"in",
"generate_tokens",
"(",
"StringIO",
"(",
"statement",
")",
".",
"readline",
")",
"if",
"x",
"[",
"1",
"]",
"not",
"in",
"(",
"''",
",",
"'\\n'",
")",
"]",
"tokens",
":",
"List",
"=",
"[",
"]",
"for",
"statement",
"in",
"self",
".",
"statements",
":",
"tokens",
".",
"extend",
"(",
"_get_tokens",
"(",
"statement",
"[",
"2",
"]",
"if",
"statement",
"[",
"0",
"]",
"==",
"':'",
"else",
"statement",
"[",
"1",
"]",
")",
")",
"if",
"self",
".",
"task",
":",
"tokens",
".",
"extend",
"(",
"_get_tokens",
"(",
"self",
".",
"task",
")",
")",
"return",
"' '",
".",
"join",
"(",
"tokens",
")"
] |
Get tokens after input statement
|
[
"Get",
"tokens",
"after",
"input",
"statement"
] |
6b60ed0770916d135e17322e469520d778e9d4e7
|
https://github.com/vatlab/SoS/blob/6b60ed0770916d135e17322e469520d778e9d4e7/src/sos/parser.py#L484-L503
|
9,518
|
vatlab/SoS
|
src/sos/parser.py
|
SoS_Step.show
|
def show(self):
'''Output for command sos show'''
textWidth = max(60, shutil.get_terminal_size((80, 20)).columns)
text = f' {self.step_name() + ":":<21} ' + self.comment
print('\n'.join(
textwrap.wrap(
text,
width=textWidth,
initial_indent='',
subsequent_indent=' ' * 24)))
local_parameters = {
x: y
for x, y in self.parameters.items()
if x not in self.global_parameters
}
if local_parameters:
print(' Workflow Options:')
for name, (value, comment) in local_parameters.items():
par_str = f' {format_par(name, value)}'
print(par_str)
if comment:
print('\n'.join(
textwrap.wrap(
comment,
width=textWidth,
initial_indent=' ' * 24,
subsequent_indent=' ' * 24)))
|
python
|
def show(self):
'''Output for command sos show'''
textWidth = max(60, shutil.get_terminal_size((80, 20)).columns)
text = f' {self.step_name() + ":":<21} ' + self.comment
print('\n'.join(
textwrap.wrap(
text,
width=textWidth,
initial_indent='',
subsequent_indent=' ' * 24)))
local_parameters = {
x: y
for x, y in self.parameters.items()
if x not in self.global_parameters
}
if local_parameters:
print(' Workflow Options:')
for name, (value, comment) in local_parameters.items():
par_str = f' {format_par(name, value)}'
print(par_str)
if comment:
print('\n'.join(
textwrap.wrap(
comment,
width=textWidth,
initial_indent=' ' * 24,
subsequent_indent=' ' * 24)))
|
[
"def",
"show",
"(",
"self",
")",
":",
"textWidth",
"=",
"max",
"(",
"60",
",",
"shutil",
".",
"get_terminal_size",
"(",
"(",
"80",
",",
"20",
")",
")",
".",
"columns",
")",
"text",
"=",
"f' {self.step_name() + \":\":<21} '",
"+",
"self",
".",
"comment",
"print",
"(",
"'\\n'",
".",
"join",
"(",
"textwrap",
".",
"wrap",
"(",
"text",
",",
"width",
"=",
"textWidth",
",",
"initial_indent",
"=",
"''",
",",
"subsequent_indent",
"=",
"' '",
"*",
"24",
")",
")",
")",
"local_parameters",
"=",
"{",
"x",
":",
"y",
"for",
"x",
",",
"y",
"in",
"self",
".",
"parameters",
".",
"items",
"(",
")",
"if",
"x",
"not",
"in",
"self",
".",
"global_parameters",
"}",
"if",
"local_parameters",
":",
"print",
"(",
"' Workflow Options:'",
")",
"for",
"name",
",",
"(",
"value",
",",
"comment",
")",
"in",
"local_parameters",
".",
"items",
"(",
")",
":",
"par_str",
"=",
"f' {format_par(name, value)}'",
"print",
"(",
"par_str",
")",
"if",
"comment",
":",
"print",
"(",
"'\\n'",
".",
"join",
"(",
"textwrap",
".",
"wrap",
"(",
"comment",
",",
"width",
"=",
"textWidth",
",",
"initial_indent",
"=",
"' '",
"*",
"24",
",",
"subsequent_indent",
"=",
"' '",
"*",
"24",
")",
")",
")"
] |
Output for command sos show
|
[
"Output",
"for",
"command",
"sos",
"show"
] |
6b60ed0770916d135e17322e469520d778e9d4e7
|
https://github.com/vatlab/SoS/blob/6b60ed0770916d135e17322e469520d778e9d4e7/src/sos/parser.py#L610-L636
|
9,519
|
vatlab/SoS
|
src/sos/parser.py
|
SoS_Workflow.extend
|
def extend(self, workflow: 'SoS_Workflow') -> None:
'''Append another workflow to existing one to created a combined workflow'''
# all sections are simply appended ...
# but we will need to make sure that the new workflow is
# executed after the previous one.
if not workflow.sections:
return
if not self.sections:
self.sections = workflow.sections
return
section = workflow.sections[0]
depends_idx = [
idx for idx, stmt in enumerate(section.statements)
if stmt[0] == ':' and stmt[1] == 'depends'
]
if not depends_idx:
section.statements.insert(0, [
':', 'depends', f"sos_step('{self.sections[-1].step_name()}')"
])
else:
section.statements[depends_idx[0]][2] = section.statements[depends_idx[0]][2].strip() + \
(", " if section.statements[depends_idx[0]][2].strip() else "") + \
f"sos_step('{self.sections[-1].step_name()}')\n"
self.sections.extend(workflow.sections)
|
python
|
def extend(self, workflow: 'SoS_Workflow') -> None:
'''Append another workflow to existing one to created a combined workflow'''
# all sections are simply appended ...
# but we will need to make sure that the new workflow is
# executed after the previous one.
if not workflow.sections:
return
if not self.sections:
self.sections = workflow.sections
return
section = workflow.sections[0]
depends_idx = [
idx for idx, stmt in enumerate(section.statements)
if stmt[0] == ':' and stmt[1] == 'depends'
]
if not depends_idx:
section.statements.insert(0, [
':', 'depends', f"sos_step('{self.sections[-1].step_name()}')"
])
else:
section.statements[depends_idx[0]][2] = section.statements[depends_idx[0]][2].strip() + \
(", " if section.statements[depends_idx[0]][2].strip() else "") + \
f"sos_step('{self.sections[-1].step_name()}')\n"
self.sections.extend(workflow.sections)
|
[
"def",
"extend",
"(",
"self",
",",
"workflow",
":",
"'SoS_Workflow'",
")",
"->",
"None",
":",
"# all sections are simply appended ...",
"# but we will need to make sure that the new workflow is",
"# executed after the previous one.",
"if",
"not",
"workflow",
".",
"sections",
":",
"return",
"if",
"not",
"self",
".",
"sections",
":",
"self",
".",
"sections",
"=",
"workflow",
".",
"sections",
"return",
"section",
"=",
"workflow",
".",
"sections",
"[",
"0",
"]",
"depends_idx",
"=",
"[",
"idx",
"for",
"idx",
",",
"stmt",
"in",
"enumerate",
"(",
"section",
".",
"statements",
")",
"if",
"stmt",
"[",
"0",
"]",
"==",
"':'",
"and",
"stmt",
"[",
"1",
"]",
"==",
"'depends'",
"]",
"if",
"not",
"depends_idx",
":",
"section",
".",
"statements",
".",
"insert",
"(",
"0",
",",
"[",
"':'",
",",
"'depends'",
",",
"f\"sos_step('{self.sections[-1].step_name()}')\"",
"]",
")",
"else",
":",
"section",
".",
"statements",
"[",
"depends_idx",
"[",
"0",
"]",
"]",
"[",
"2",
"]",
"=",
"section",
".",
"statements",
"[",
"depends_idx",
"[",
"0",
"]",
"]",
"[",
"2",
"]",
".",
"strip",
"(",
")",
"+",
"(",
"\", \"",
"if",
"section",
".",
"statements",
"[",
"depends_idx",
"[",
"0",
"]",
"]",
"[",
"2",
"]",
".",
"strip",
"(",
")",
"else",
"\"\"",
")",
"+",
"f\"sos_step('{self.sections[-1].step_name()}')\\n\"",
"self",
".",
"sections",
".",
"extend",
"(",
"workflow",
".",
"sections",
")"
] |
Append another workflow to existing one to created a combined workflow
|
[
"Append",
"another",
"workflow",
"to",
"existing",
"one",
"to",
"created",
"a",
"combined",
"workflow"
] |
6b60ed0770916d135e17322e469520d778e9d4e7
|
https://github.com/vatlab/SoS/blob/6b60ed0770916d135e17322e469520d778e9d4e7/src/sos/parser.py#L732-L755
|
9,520
|
vatlab/SoS
|
src/sos/parser.py
|
SoS_Script.add_comment
|
def add_comment(self, line: str) -> None:
'''Keeping track of "last comment" for section and parameter '''
# the rule is like
#
# # comment line --> add to last comment
# blank line --> clears last comment
# [ ] --> use last comment
# parameter: --> use last comment
# All others: clear last comment
self._last_comment += (' ' if self._last_comment else '') + \
line.lstrip('#').strip()
|
python
|
def add_comment(self, line: str) -> None:
'''Keeping track of "last comment" for section and parameter '''
# the rule is like
#
# # comment line --> add to last comment
# blank line --> clears last comment
# [ ] --> use last comment
# parameter: --> use last comment
# All others: clear last comment
self._last_comment += (' ' if self._last_comment else '') + \
line.lstrip('#').strip()
|
[
"def",
"add_comment",
"(",
"self",
",",
"line",
":",
"str",
")",
"->",
"None",
":",
"# the rule is like",
"#",
"# # comment line --> add to last comment",
"# blank line --> clears last comment",
"# [ ] --> use last comment",
"# parameter: --> use last comment",
"# All others: clear last comment",
"self",
".",
"_last_comment",
"+=",
"(",
"' '",
"if",
"self",
".",
"_last_comment",
"else",
"''",
")",
"+",
"line",
".",
"lstrip",
"(",
"'#'",
")",
".",
"strip",
"(",
")"
] |
Keeping track of "last comment" for section and parameter
|
[
"Keeping",
"track",
"of",
"last",
"comment",
"for",
"section",
"and",
"parameter"
] |
6b60ed0770916d135e17322e469520d778e9d4e7
|
https://github.com/vatlab/SoS/blob/6b60ed0770916d135e17322e469520d778e9d4e7/src/sos/parser.py#L924-L934
|
9,521
|
vatlab/SoS
|
src/sos/parser.py
|
SoS_Script.workflow
|
def workflow(self,
workflow_name: Optional[str] = None,
use_default: bool = True) -> SoS_Workflow:
'''Return a workflow with name_step+name_step specified in wf_name
This function might be called recursively because of nested
workflow.'''
if workflow_name is None and not use_default:
return SoS_Workflow(self.content, '', '', self.sections,
self.global_stmts)
allowed_steps = None
if not workflow_name:
wf_name = ''
else:
# if consists of multiple workflows
if '+' in workflow_name:
wfs = []
for wf in workflow_name.split('+'):
if not SOS_SUBWORKFLOW.match(wf):
raise ValueError(
f'Incorrect workflow name {workflow_name}')
# if this is a combined workflow, extra_section might be specied.
wfs.append(self.workflow(wf))
combined_wf = wfs[0]
for wf in wfs[1:]:
combined_wf.extend(wf)
combined_wf.name = workflow_name
return combined_wf
# if a single workflow
# workflow_10:15 etc
mo = SOS_SUBWORKFLOW.match(workflow_name)
if not mo:
raise ValueError(f'Incorrect workflow name {workflow_name}')
wf_name, allowed_steps = mo.group('name', 'steps')
# check source
if not wf_name:
if len(self.workflows) == 1:
wf_name = list(self.workflows)[0]
elif self.default_workflow:
wf_name = self.default_workflow
elif 'default' in self.workflows or '' in self.workflows:
wf_name = 'default'
else:
raise ValueError(
'Name of workflow should be specified because '
'the script defines more than one pipelines without a default one. '
'Available pipelines are: {}.'.format(', '.join(
self.workflows)))
elif wf_name not in self.workflows and wf_name != 'default':
raise ValueError(
f'Workflow {wf_name} is undefined. Available workflows are: {", ".join(self.workflows)}'
)
return SoS_Workflow(self.content, wf_name, allowed_steps, self.sections,
self.global_stmts)
|
python
|
def workflow(self,
workflow_name: Optional[str] = None,
use_default: bool = True) -> SoS_Workflow:
'''Return a workflow with name_step+name_step specified in wf_name
This function might be called recursively because of nested
workflow.'''
if workflow_name is None and not use_default:
return SoS_Workflow(self.content, '', '', self.sections,
self.global_stmts)
allowed_steps = None
if not workflow_name:
wf_name = ''
else:
# if consists of multiple workflows
if '+' in workflow_name:
wfs = []
for wf in workflow_name.split('+'):
if not SOS_SUBWORKFLOW.match(wf):
raise ValueError(
f'Incorrect workflow name {workflow_name}')
# if this is a combined workflow, extra_section might be specied.
wfs.append(self.workflow(wf))
combined_wf = wfs[0]
for wf in wfs[1:]:
combined_wf.extend(wf)
combined_wf.name = workflow_name
return combined_wf
# if a single workflow
# workflow_10:15 etc
mo = SOS_SUBWORKFLOW.match(workflow_name)
if not mo:
raise ValueError(f'Incorrect workflow name {workflow_name}')
wf_name, allowed_steps = mo.group('name', 'steps')
# check source
if not wf_name:
if len(self.workflows) == 1:
wf_name = list(self.workflows)[0]
elif self.default_workflow:
wf_name = self.default_workflow
elif 'default' in self.workflows or '' in self.workflows:
wf_name = 'default'
else:
raise ValueError(
'Name of workflow should be specified because '
'the script defines more than one pipelines without a default one. '
'Available pipelines are: {}.'.format(', '.join(
self.workflows)))
elif wf_name not in self.workflows and wf_name != 'default':
raise ValueError(
f'Workflow {wf_name} is undefined. Available workflows are: {", ".join(self.workflows)}'
)
return SoS_Workflow(self.content, wf_name, allowed_steps, self.sections,
self.global_stmts)
|
[
"def",
"workflow",
"(",
"self",
",",
"workflow_name",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
",",
"use_default",
":",
"bool",
"=",
"True",
")",
"->",
"SoS_Workflow",
":",
"if",
"workflow_name",
"is",
"None",
"and",
"not",
"use_default",
":",
"return",
"SoS_Workflow",
"(",
"self",
".",
"content",
",",
"''",
",",
"''",
",",
"self",
".",
"sections",
",",
"self",
".",
"global_stmts",
")",
"allowed_steps",
"=",
"None",
"if",
"not",
"workflow_name",
":",
"wf_name",
"=",
"''",
"else",
":",
"# if consists of multiple workflows",
"if",
"'+'",
"in",
"workflow_name",
":",
"wfs",
"=",
"[",
"]",
"for",
"wf",
"in",
"workflow_name",
".",
"split",
"(",
"'+'",
")",
":",
"if",
"not",
"SOS_SUBWORKFLOW",
".",
"match",
"(",
"wf",
")",
":",
"raise",
"ValueError",
"(",
"f'Incorrect workflow name {workflow_name}'",
")",
"# if this is a combined workflow, extra_section might be specied.",
"wfs",
".",
"append",
"(",
"self",
".",
"workflow",
"(",
"wf",
")",
")",
"combined_wf",
"=",
"wfs",
"[",
"0",
"]",
"for",
"wf",
"in",
"wfs",
"[",
"1",
":",
"]",
":",
"combined_wf",
".",
"extend",
"(",
"wf",
")",
"combined_wf",
".",
"name",
"=",
"workflow_name",
"return",
"combined_wf",
"# if a single workflow",
"# workflow_10:15 etc",
"mo",
"=",
"SOS_SUBWORKFLOW",
".",
"match",
"(",
"workflow_name",
")",
"if",
"not",
"mo",
":",
"raise",
"ValueError",
"(",
"f'Incorrect workflow name {workflow_name}'",
")",
"wf_name",
",",
"allowed_steps",
"=",
"mo",
".",
"group",
"(",
"'name'",
",",
"'steps'",
")",
"# check source",
"if",
"not",
"wf_name",
":",
"if",
"len",
"(",
"self",
".",
"workflows",
")",
"==",
"1",
":",
"wf_name",
"=",
"list",
"(",
"self",
".",
"workflows",
")",
"[",
"0",
"]",
"elif",
"self",
".",
"default_workflow",
":",
"wf_name",
"=",
"self",
".",
"default_workflow",
"elif",
"'default'",
"in",
"self",
".",
"workflows",
"or",
"''",
"in",
"self",
".",
"workflows",
":",
"wf_name",
"=",
"'default'",
"else",
":",
"raise",
"ValueError",
"(",
"'Name of workflow should be specified because '",
"'the script defines more than one pipelines without a default one. '",
"'Available pipelines are: {}.'",
".",
"format",
"(",
"', '",
".",
"join",
"(",
"self",
".",
"workflows",
")",
")",
")",
"elif",
"wf_name",
"not",
"in",
"self",
".",
"workflows",
"and",
"wf_name",
"!=",
"'default'",
":",
"raise",
"ValueError",
"(",
"f'Workflow {wf_name} is undefined. Available workflows are: {\", \".join(self.workflows)}'",
")",
"return",
"SoS_Workflow",
"(",
"self",
".",
"content",
",",
"wf_name",
",",
"allowed_steps",
",",
"self",
".",
"sections",
",",
"self",
".",
"global_stmts",
")"
] |
Return a workflow with name_step+name_step specified in wf_name
This function might be called recursively because of nested
workflow.
|
[
"Return",
"a",
"workflow",
"with",
"name_step",
"+",
"name_step",
"specified",
"in",
"wf_name",
"This",
"function",
"might",
"be",
"called",
"recursively",
"because",
"of",
"nested",
"workflow",
"."
] |
6b60ed0770916d135e17322e469520d778e9d4e7
|
https://github.com/vatlab/SoS/blob/6b60ed0770916d135e17322e469520d778e9d4e7/src/sos/parser.py#L1368-L1421
|
9,522
|
vatlab/SoS
|
src/sos/parser.py
|
SoS_Script.print_help
|
def print_help(self, script_name: str):
'''print a help message from the script'''
textWidth = max(60, shutil.get_terminal_size((80, 20)).columns)
if len(script_name) > 20:
print(f'usage: sos run {script_name}')
print(
' [workflow_name | -t targets] [options] [workflow_options]'
)
else:
print(
f'usage: sos run {script_name} [workflow_name | -t targets] [options] [workflow_options]'
)
print(
' workflow_name: Single or combined workflows defined in this script'
)
print(' targets: One or more targets to generate')
print(
' options: Single-hyphen sos parameters (see "sos run -h" for details)'
)
print(
' workflow_options: Double-hyphen workflow-specific parameters'
)
description = [x.lstrip('# ').strip() for x in self.description]
description = textwrap.dedent('\n'.join(description)).strip()
if description:
print('\n' + description)
#
print('\nWorkflows:')
print(' ' + '\n '.join(self.workflows))
#
global_parameters = {}
for section in self.sections:
global_parameters.update(section.global_parameters)
if global_parameters:
print('\nGlobal Workflow Options:')
for name, (value, comment) in global_parameters.items():
par_str = f' {format_par(name, value)}'
print(par_str)
if comment:
print('\n'.join(
textwrap.wrap(
comment,
width=textWidth,
initial_indent=' ' * 24,
subsequent_indent=' ' * 24)))
#
print('\nSections')
for section in self.sections:
section.show()
|
python
|
def print_help(self, script_name: str):
'''print a help message from the script'''
textWidth = max(60, shutil.get_terminal_size((80, 20)).columns)
if len(script_name) > 20:
print(f'usage: sos run {script_name}')
print(
' [workflow_name | -t targets] [options] [workflow_options]'
)
else:
print(
f'usage: sos run {script_name} [workflow_name | -t targets] [options] [workflow_options]'
)
print(
' workflow_name: Single or combined workflows defined in this script'
)
print(' targets: One or more targets to generate')
print(
' options: Single-hyphen sos parameters (see "sos run -h" for details)'
)
print(
' workflow_options: Double-hyphen workflow-specific parameters'
)
description = [x.lstrip('# ').strip() for x in self.description]
description = textwrap.dedent('\n'.join(description)).strip()
if description:
print('\n' + description)
#
print('\nWorkflows:')
print(' ' + '\n '.join(self.workflows))
#
global_parameters = {}
for section in self.sections:
global_parameters.update(section.global_parameters)
if global_parameters:
print('\nGlobal Workflow Options:')
for name, (value, comment) in global_parameters.items():
par_str = f' {format_par(name, value)}'
print(par_str)
if comment:
print('\n'.join(
textwrap.wrap(
comment,
width=textWidth,
initial_indent=' ' * 24,
subsequent_indent=' ' * 24)))
#
print('\nSections')
for section in self.sections:
section.show()
|
[
"def",
"print_help",
"(",
"self",
",",
"script_name",
":",
"str",
")",
":",
"textWidth",
"=",
"max",
"(",
"60",
",",
"shutil",
".",
"get_terminal_size",
"(",
"(",
"80",
",",
"20",
")",
")",
".",
"columns",
")",
"if",
"len",
"(",
"script_name",
")",
">",
"20",
":",
"print",
"(",
"f'usage: sos run {script_name}'",
")",
"print",
"(",
"' [workflow_name | -t targets] [options] [workflow_options]'",
")",
"else",
":",
"print",
"(",
"f'usage: sos run {script_name} [workflow_name | -t targets] [options] [workflow_options]'",
")",
"print",
"(",
"' workflow_name: Single or combined workflows defined in this script'",
")",
"print",
"(",
"' targets: One or more targets to generate'",
")",
"print",
"(",
"' options: Single-hyphen sos parameters (see \"sos run -h\" for details)'",
")",
"print",
"(",
"' workflow_options: Double-hyphen workflow-specific parameters'",
")",
"description",
"=",
"[",
"x",
".",
"lstrip",
"(",
"'# '",
")",
".",
"strip",
"(",
")",
"for",
"x",
"in",
"self",
".",
"description",
"]",
"description",
"=",
"textwrap",
".",
"dedent",
"(",
"'\\n'",
".",
"join",
"(",
"description",
")",
")",
".",
"strip",
"(",
")",
"if",
"description",
":",
"print",
"(",
"'\\n'",
"+",
"description",
")",
"#",
"print",
"(",
"'\\nWorkflows:'",
")",
"print",
"(",
"' '",
"+",
"'\\n '",
".",
"join",
"(",
"self",
".",
"workflows",
")",
")",
"#",
"global_parameters",
"=",
"{",
"}",
"for",
"section",
"in",
"self",
".",
"sections",
":",
"global_parameters",
".",
"update",
"(",
"section",
".",
"global_parameters",
")",
"if",
"global_parameters",
":",
"print",
"(",
"'\\nGlobal Workflow Options:'",
")",
"for",
"name",
",",
"(",
"value",
",",
"comment",
")",
"in",
"global_parameters",
".",
"items",
"(",
")",
":",
"par_str",
"=",
"f' {format_par(name, value)}'",
"print",
"(",
"par_str",
")",
"if",
"comment",
":",
"print",
"(",
"'\\n'",
".",
"join",
"(",
"textwrap",
".",
"wrap",
"(",
"comment",
",",
"width",
"=",
"textWidth",
",",
"initial_indent",
"=",
"' '",
"*",
"24",
",",
"subsequent_indent",
"=",
"' '",
"*",
"24",
")",
")",
")",
"#",
"print",
"(",
"'\\nSections'",
")",
"for",
"section",
"in",
"self",
".",
"sections",
":",
"section",
".",
"show",
"(",
")"
] |
print a help message from the script
|
[
"print",
"a",
"help",
"message",
"from",
"the",
"script"
] |
6b60ed0770916d135e17322e469520d778e9d4e7
|
https://github.com/vatlab/SoS/blob/6b60ed0770916d135e17322e469520d778e9d4e7/src/sos/parser.py#L1423-L1472
|
9,523
|
vatlab/SoS
|
src/sos/pattern.py
|
glob_wildcards
|
def glob_wildcards(pattern: str, files: Optional[List[str]] = None
) -> Dict[str, Union[List[Any], List[str]]]:
"""
Glob the values of the wildcards by matching the given pattern to the filesystem.
Returns a named tuple with a list of values for each wildcard.
"""
pattern = os.path.normpath(pattern)
if sys.platform == 'win32':
# we perform path matching with / slash only
pattern = pattern.replace('\\', '/')
first_wildcard = re.search("{[^{]", pattern)
dirname = os.path.dirname(pattern[:first_wildcard.start()]
) if first_wildcard else os.path.dirname(pattern)
if not dirname:
dirname = "."
names = [match.group('name') for match in SOS_WILDCARD.finditer(pattern)]
res = {x: [] for x in names}
pattern = re.compile(regex(pattern))
if files is None:
files = ((os.path.join(dirpath, f) if dirpath != "." else f)
for dirpath, dirnames, filenames in os.walk(dirname)
for f in chain(filenames, dirnames))
for f in files:
# we perform path matching with only / slash
match = re.match(pattern, str(f).replace('\\', '/'))
if match:
for name, value in match.groupdict().items():
res[name].append(value)
return res
|
python
|
def glob_wildcards(pattern: str, files: Optional[List[str]] = None
) -> Dict[str, Union[List[Any], List[str]]]:
"""
Glob the values of the wildcards by matching the given pattern to the filesystem.
Returns a named tuple with a list of values for each wildcard.
"""
pattern = os.path.normpath(pattern)
if sys.platform == 'win32':
# we perform path matching with / slash only
pattern = pattern.replace('\\', '/')
first_wildcard = re.search("{[^{]", pattern)
dirname = os.path.dirname(pattern[:first_wildcard.start()]
) if first_wildcard else os.path.dirname(pattern)
if not dirname:
dirname = "."
names = [match.group('name') for match in SOS_WILDCARD.finditer(pattern)]
res = {x: [] for x in names}
pattern = re.compile(regex(pattern))
if files is None:
files = ((os.path.join(dirpath, f) if dirpath != "." else f)
for dirpath, dirnames, filenames in os.walk(dirname)
for f in chain(filenames, dirnames))
for f in files:
# we perform path matching with only / slash
match = re.match(pattern, str(f).replace('\\', '/'))
if match:
for name, value in match.groupdict().items():
res[name].append(value)
return res
|
[
"def",
"glob_wildcards",
"(",
"pattern",
":",
"str",
",",
"files",
":",
"Optional",
"[",
"List",
"[",
"str",
"]",
"]",
"=",
"None",
")",
"->",
"Dict",
"[",
"str",
",",
"Union",
"[",
"List",
"[",
"Any",
"]",
",",
"List",
"[",
"str",
"]",
"]",
"]",
":",
"pattern",
"=",
"os",
".",
"path",
".",
"normpath",
"(",
"pattern",
")",
"if",
"sys",
".",
"platform",
"==",
"'win32'",
":",
"# we perform path matching with / slash only",
"pattern",
"=",
"pattern",
".",
"replace",
"(",
"'\\\\'",
",",
"'/'",
")",
"first_wildcard",
"=",
"re",
".",
"search",
"(",
"\"{[^{]\"",
",",
"pattern",
")",
"dirname",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"pattern",
"[",
":",
"first_wildcard",
".",
"start",
"(",
")",
"]",
")",
"if",
"first_wildcard",
"else",
"os",
".",
"path",
".",
"dirname",
"(",
"pattern",
")",
"if",
"not",
"dirname",
":",
"dirname",
"=",
"\".\"",
"names",
"=",
"[",
"match",
".",
"group",
"(",
"'name'",
")",
"for",
"match",
"in",
"SOS_WILDCARD",
".",
"finditer",
"(",
"pattern",
")",
"]",
"res",
"=",
"{",
"x",
":",
"[",
"]",
"for",
"x",
"in",
"names",
"}",
"pattern",
"=",
"re",
".",
"compile",
"(",
"regex",
"(",
"pattern",
")",
")",
"if",
"files",
"is",
"None",
":",
"files",
"=",
"(",
"(",
"os",
".",
"path",
".",
"join",
"(",
"dirpath",
",",
"f",
")",
"if",
"dirpath",
"!=",
"\".\"",
"else",
"f",
")",
"for",
"dirpath",
",",
"dirnames",
",",
"filenames",
"in",
"os",
".",
"walk",
"(",
"dirname",
")",
"for",
"f",
"in",
"chain",
"(",
"filenames",
",",
"dirnames",
")",
")",
"for",
"f",
"in",
"files",
":",
"# we perform path matching with only / slash",
"match",
"=",
"re",
".",
"match",
"(",
"pattern",
",",
"str",
"(",
"f",
")",
".",
"replace",
"(",
"'\\\\'",
",",
"'/'",
")",
")",
"if",
"match",
":",
"for",
"name",
",",
"value",
"in",
"match",
".",
"groupdict",
"(",
")",
".",
"items",
"(",
")",
":",
"res",
"[",
"name",
"]",
".",
"append",
"(",
"value",
")",
"return",
"res"
] |
Glob the values of the wildcards by matching the given pattern to the filesystem.
Returns a named tuple with a list of values for each wildcard.
|
[
"Glob",
"the",
"values",
"of",
"the",
"wildcards",
"by",
"matching",
"the",
"given",
"pattern",
"to",
"the",
"filesystem",
".",
"Returns",
"a",
"named",
"tuple",
"with",
"a",
"list",
"of",
"values",
"for",
"each",
"wildcard",
"."
] |
6b60ed0770916d135e17322e469520d778e9d4e7
|
https://github.com/vatlab/SoS/blob/6b60ed0770916d135e17322e469520d778e9d4e7/src/sos/pattern.py#L56-L87
|
9,524
|
vatlab/SoS
|
src/sos/pattern.py
|
extract_pattern
|
def extract_pattern(pattern: str, ifiles: List[str]) -> Dict[str, any]:
'''This function match pattern to a list of input files, extract and return
pieces of filenames as a list of variables with keys defined by pattern.'''
res = glob_wildcards(pattern, [])
for ifile in ifiles:
matched = glob_wildcards(pattern, [ifile])
for key in matched.keys():
if not matched[key]:
#env.logger.warning('Filename {} does not match pattern {}. None returned.'.format(ifile, pattern))
res[key].append(None)
else:
res[key].extend(matched[key])
return res
|
python
|
def extract_pattern(pattern: str, ifiles: List[str]) -> Dict[str, any]:
'''This function match pattern to a list of input files, extract and return
pieces of filenames as a list of variables with keys defined by pattern.'''
res = glob_wildcards(pattern, [])
for ifile in ifiles:
matched = glob_wildcards(pattern, [ifile])
for key in matched.keys():
if not matched[key]:
#env.logger.warning('Filename {} does not match pattern {}. None returned.'.format(ifile, pattern))
res[key].append(None)
else:
res[key].extend(matched[key])
return res
|
[
"def",
"extract_pattern",
"(",
"pattern",
":",
"str",
",",
"ifiles",
":",
"List",
"[",
"str",
"]",
")",
"->",
"Dict",
"[",
"str",
",",
"any",
"]",
":",
"res",
"=",
"glob_wildcards",
"(",
"pattern",
",",
"[",
"]",
")",
"for",
"ifile",
"in",
"ifiles",
":",
"matched",
"=",
"glob_wildcards",
"(",
"pattern",
",",
"[",
"ifile",
"]",
")",
"for",
"key",
"in",
"matched",
".",
"keys",
"(",
")",
":",
"if",
"not",
"matched",
"[",
"key",
"]",
":",
"#env.logger.warning('Filename {} does not match pattern {}. None returned.'.format(ifile, pattern))",
"res",
"[",
"key",
"]",
".",
"append",
"(",
"None",
")",
"else",
":",
"res",
"[",
"key",
"]",
".",
"extend",
"(",
"matched",
"[",
"key",
"]",
")",
"return",
"res"
] |
This function match pattern to a list of input files, extract and return
pieces of filenames as a list of variables with keys defined by pattern.
|
[
"This",
"function",
"match",
"pattern",
"to",
"a",
"list",
"of",
"input",
"files",
"extract",
"and",
"return",
"pieces",
"of",
"filenames",
"as",
"a",
"list",
"of",
"variables",
"with",
"keys",
"defined",
"by",
"pattern",
"."
] |
6b60ed0770916d135e17322e469520d778e9d4e7
|
https://github.com/vatlab/SoS/blob/6b60ed0770916d135e17322e469520d778e9d4e7/src/sos/pattern.py#L115-L127
|
9,525
|
vatlab/SoS
|
src/sos/pattern.py
|
expand_pattern
|
def expand_pattern(pattern: str) -> List[str]:
'''This function expand patterns against the current namespace
and return a list of filenames'''
ofiles = []
sz = None
res = glob_wildcards(pattern, [])
sz = None
wildcard = [{}]
for key in res.keys():
if key not in env.sos_dict:
raise ValueError(f'Undefined variable {key} in pattern {pattern}')
if not isinstance(env.sos_dict[key], str) and isinstance(
env.sos_dict[key], collections.Sequence):
if sz is None:
sz = len(env.sos_dict[key])
wildcard = [copy.deepcopy(wildcard[0]) for x in range(sz)]
elif sz != len(env.sos_dict[key]):
raise ValueError(
f'Variables in output pattern should have the same length (other={sz}, len({key})={len(env.sos_dict[key])})'
)
for idx, value in enumerate(env.sos_dict[key]):
wildcard[idx][key] = value
else:
for v in wildcard:
v[key] = env.sos_dict[key]
#
for card in wildcard:
ofiles.append(
apply_wildcards(
pattern,
card,
fill_missing=False,
fail_dynamic=False,
dynamic_fill=None,
keep_dynamic=False))
return ofiles
|
python
|
def expand_pattern(pattern: str) -> List[str]:
'''This function expand patterns against the current namespace
and return a list of filenames'''
ofiles = []
sz = None
res = glob_wildcards(pattern, [])
sz = None
wildcard = [{}]
for key in res.keys():
if key not in env.sos_dict:
raise ValueError(f'Undefined variable {key} in pattern {pattern}')
if not isinstance(env.sos_dict[key], str) and isinstance(
env.sos_dict[key], collections.Sequence):
if sz is None:
sz = len(env.sos_dict[key])
wildcard = [copy.deepcopy(wildcard[0]) for x in range(sz)]
elif sz != len(env.sos_dict[key]):
raise ValueError(
f'Variables in output pattern should have the same length (other={sz}, len({key})={len(env.sos_dict[key])})'
)
for idx, value in enumerate(env.sos_dict[key]):
wildcard[idx][key] = value
else:
for v in wildcard:
v[key] = env.sos_dict[key]
#
for card in wildcard:
ofiles.append(
apply_wildcards(
pattern,
card,
fill_missing=False,
fail_dynamic=False,
dynamic_fill=None,
keep_dynamic=False))
return ofiles
|
[
"def",
"expand_pattern",
"(",
"pattern",
":",
"str",
")",
"->",
"List",
"[",
"str",
"]",
":",
"ofiles",
"=",
"[",
"]",
"sz",
"=",
"None",
"res",
"=",
"glob_wildcards",
"(",
"pattern",
",",
"[",
"]",
")",
"sz",
"=",
"None",
"wildcard",
"=",
"[",
"{",
"}",
"]",
"for",
"key",
"in",
"res",
".",
"keys",
"(",
")",
":",
"if",
"key",
"not",
"in",
"env",
".",
"sos_dict",
":",
"raise",
"ValueError",
"(",
"f'Undefined variable {key} in pattern {pattern}'",
")",
"if",
"not",
"isinstance",
"(",
"env",
".",
"sos_dict",
"[",
"key",
"]",
",",
"str",
")",
"and",
"isinstance",
"(",
"env",
".",
"sos_dict",
"[",
"key",
"]",
",",
"collections",
".",
"Sequence",
")",
":",
"if",
"sz",
"is",
"None",
":",
"sz",
"=",
"len",
"(",
"env",
".",
"sos_dict",
"[",
"key",
"]",
")",
"wildcard",
"=",
"[",
"copy",
".",
"deepcopy",
"(",
"wildcard",
"[",
"0",
"]",
")",
"for",
"x",
"in",
"range",
"(",
"sz",
")",
"]",
"elif",
"sz",
"!=",
"len",
"(",
"env",
".",
"sos_dict",
"[",
"key",
"]",
")",
":",
"raise",
"ValueError",
"(",
"f'Variables in output pattern should have the same length (other={sz}, len({key})={len(env.sos_dict[key])})'",
")",
"for",
"idx",
",",
"value",
"in",
"enumerate",
"(",
"env",
".",
"sos_dict",
"[",
"key",
"]",
")",
":",
"wildcard",
"[",
"idx",
"]",
"[",
"key",
"]",
"=",
"value",
"else",
":",
"for",
"v",
"in",
"wildcard",
":",
"v",
"[",
"key",
"]",
"=",
"env",
".",
"sos_dict",
"[",
"key",
"]",
"#",
"for",
"card",
"in",
"wildcard",
":",
"ofiles",
".",
"append",
"(",
"apply_wildcards",
"(",
"pattern",
",",
"card",
",",
"fill_missing",
"=",
"False",
",",
"fail_dynamic",
"=",
"False",
",",
"dynamic_fill",
"=",
"None",
",",
"keep_dynamic",
"=",
"False",
")",
")",
"return",
"ofiles"
] |
This function expand patterns against the current namespace
and return a list of filenames
|
[
"This",
"function",
"expand",
"patterns",
"against",
"the",
"current",
"namespace",
"and",
"return",
"a",
"list",
"of",
"filenames"
] |
6b60ed0770916d135e17322e469520d778e9d4e7
|
https://github.com/vatlab/SoS/blob/6b60ed0770916d135e17322e469520d778e9d4e7/src/sos/pattern.py#L130-L165
|
9,526
|
vatlab/SoS
|
src/sos/eval.py
|
interpolate
|
def interpolate(text, global_dict=None, local_dict=None):
'''Evaluate expressions in `text` '''
# step 1, make it a f-string (add quotation marks and f
# step 2, evaluate as a string
try:
return eval(as_fstring(text), global_dict, local_dict)
except Exception as e:
raise ValueError(f'Failed to interpolate {text}: {e}')
|
python
|
def interpolate(text, global_dict=None, local_dict=None):
'''Evaluate expressions in `text` '''
# step 1, make it a f-string (add quotation marks and f
# step 2, evaluate as a string
try:
return eval(as_fstring(text), global_dict, local_dict)
except Exception as e:
raise ValueError(f'Failed to interpolate {text}: {e}')
|
[
"def",
"interpolate",
"(",
"text",
",",
"global_dict",
"=",
"None",
",",
"local_dict",
"=",
"None",
")",
":",
"# step 1, make it a f-string (add quotation marks and f",
"# step 2, evaluate as a string",
"try",
":",
"return",
"eval",
"(",
"as_fstring",
"(",
"text",
")",
",",
"global_dict",
",",
"local_dict",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"ValueError",
"(",
"f'Failed to interpolate {text}: {e}'",
")"
] |
Evaluate expressions in `text`
|
[
"Evaluate",
"expressions",
"in",
"text"
] |
6b60ed0770916d135e17322e469520d778e9d4e7
|
https://github.com/vatlab/SoS/blob/6b60ed0770916d135e17322e469520d778e9d4e7/src/sos/eval.py#L15-L22
|
9,527
|
vatlab/SoS
|
src/sos/eval.py
|
SoS_eval
|
def SoS_eval(expr: str, extra_dict: dict = {}) -> Any:
'''Evaluate an expression with sos dict.'''
return eval(expr, env.sos_dict.dict(), extra_dict)
|
python
|
def SoS_eval(expr: str, extra_dict: dict = {}) -> Any:
'''Evaluate an expression with sos dict.'''
return eval(expr, env.sos_dict.dict(), extra_dict)
|
[
"def",
"SoS_eval",
"(",
"expr",
":",
"str",
",",
"extra_dict",
":",
"dict",
"=",
"{",
"}",
")",
"->",
"Any",
":",
"return",
"eval",
"(",
"expr",
",",
"env",
".",
"sos_dict",
".",
"dict",
"(",
")",
",",
"extra_dict",
")"
] |
Evaluate an expression with sos dict.
|
[
"Evaluate",
"an",
"expression",
"with",
"sos",
"dict",
"."
] |
6b60ed0770916d135e17322e469520d778e9d4e7
|
https://github.com/vatlab/SoS/blob/6b60ed0770916d135e17322e469520d778e9d4e7/src/sos/eval.py#L96-L98
|
9,528
|
vatlab/SoS
|
src/sos/eval.py
|
SoS_exec
|
def SoS_exec(script: str, _dict: dict = None,
return_result: bool = True) -> None:
'''Execute a statement.'''
if _dict is None:
_dict = env.sos_dict.dict()
if not return_result:
exec(
compile(script, filename=stmtHash.hash(script), mode='exec'), _dict)
return None
try:
stmts = list(ast.iter_child_nodes(ast.parse(script)))
if not stmts:
return
if isinstance(stmts[-1], ast.Expr):
# the last one is an expression and we will try to return the results
# so we first execute the previous statements
if len(stmts) > 1:
exec(
compile(
ast.Module(body=stmts[:-1]),
filename=stmtHash.hash(script),
mode="exec"), _dict)
# then we eval the last one
res = eval(
compile(
ast.Expression(body=stmts[-1].value),
filename=stmtHash.hash(script),
mode="eval"), _dict)
else:
# otherwise we just execute the entire code
exec(
compile(script, filename=stmtHash.hash(script), mode='exec'),
_dict)
res = None
except SyntaxError as e:
raise SyntaxError(f"Invalid code {script}: {e}")
# if check_readonly:
# env.sos_dict.check_readonly_vars()
return res
|
python
|
def SoS_exec(script: str, _dict: dict = None,
return_result: bool = True) -> None:
'''Execute a statement.'''
if _dict is None:
_dict = env.sos_dict.dict()
if not return_result:
exec(
compile(script, filename=stmtHash.hash(script), mode='exec'), _dict)
return None
try:
stmts = list(ast.iter_child_nodes(ast.parse(script)))
if not stmts:
return
if isinstance(stmts[-1], ast.Expr):
# the last one is an expression and we will try to return the results
# so we first execute the previous statements
if len(stmts) > 1:
exec(
compile(
ast.Module(body=stmts[:-1]),
filename=stmtHash.hash(script),
mode="exec"), _dict)
# then we eval the last one
res = eval(
compile(
ast.Expression(body=stmts[-1].value),
filename=stmtHash.hash(script),
mode="eval"), _dict)
else:
# otherwise we just execute the entire code
exec(
compile(script, filename=stmtHash.hash(script), mode='exec'),
_dict)
res = None
except SyntaxError as e:
raise SyntaxError(f"Invalid code {script}: {e}")
# if check_readonly:
# env.sos_dict.check_readonly_vars()
return res
|
[
"def",
"SoS_exec",
"(",
"script",
":",
"str",
",",
"_dict",
":",
"dict",
"=",
"None",
",",
"return_result",
":",
"bool",
"=",
"True",
")",
"->",
"None",
":",
"if",
"_dict",
"is",
"None",
":",
"_dict",
"=",
"env",
".",
"sos_dict",
".",
"dict",
"(",
")",
"if",
"not",
"return_result",
":",
"exec",
"(",
"compile",
"(",
"script",
",",
"filename",
"=",
"stmtHash",
".",
"hash",
"(",
"script",
")",
",",
"mode",
"=",
"'exec'",
")",
",",
"_dict",
")",
"return",
"None",
"try",
":",
"stmts",
"=",
"list",
"(",
"ast",
".",
"iter_child_nodes",
"(",
"ast",
".",
"parse",
"(",
"script",
")",
")",
")",
"if",
"not",
"stmts",
":",
"return",
"if",
"isinstance",
"(",
"stmts",
"[",
"-",
"1",
"]",
",",
"ast",
".",
"Expr",
")",
":",
"# the last one is an expression and we will try to return the results",
"# so we first execute the previous statements",
"if",
"len",
"(",
"stmts",
")",
">",
"1",
":",
"exec",
"(",
"compile",
"(",
"ast",
".",
"Module",
"(",
"body",
"=",
"stmts",
"[",
":",
"-",
"1",
"]",
")",
",",
"filename",
"=",
"stmtHash",
".",
"hash",
"(",
"script",
")",
",",
"mode",
"=",
"\"exec\"",
")",
",",
"_dict",
")",
"# then we eval the last one",
"res",
"=",
"eval",
"(",
"compile",
"(",
"ast",
".",
"Expression",
"(",
"body",
"=",
"stmts",
"[",
"-",
"1",
"]",
".",
"value",
")",
",",
"filename",
"=",
"stmtHash",
".",
"hash",
"(",
"script",
")",
",",
"mode",
"=",
"\"eval\"",
")",
",",
"_dict",
")",
"else",
":",
"# otherwise we just execute the entire code",
"exec",
"(",
"compile",
"(",
"script",
",",
"filename",
"=",
"stmtHash",
".",
"hash",
"(",
"script",
")",
",",
"mode",
"=",
"'exec'",
")",
",",
"_dict",
")",
"res",
"=",
"None",
"except",
"SyntaxError",
"as",
"e",
":",
"raise",
"SyntaxError",
"(",
"f\"Invalid code {script}: {e}\"",
")",
"# if check_readonly:",
"# env.sos_dict.check_readonly_vars()",
"return",
"res"
] |
Execute a statement.
|
[
"Execute",
"a",
"statement",
"."
] |
6b60ed0770916d135e17322e469520d778e9d4e7
|
https://github.com/vatlab/SoS/blob/6b60ed0770916d135e17322e469520d778e9d4e7/src/sos/eval.py#L127-L168
|
9,529
|
vatlab/SoS
|
src/sos/step_executor.py
|
expand_depends_files
|
def expand_depends_files(*args, **kwargs):
'''handle directive depends'''
args = [x.resolve() if isinstance(x, dynamic) else x for x in args]
kwargs = {
x: (y.resolve() if isinstance(y, dynamic) else y)
for x, y in kwargs.items()
}
return sos_targets(
*args,
**kwargs,
_verify_existence=True,
_undetermined=False,
_source=env.sos_dict['step_name'])
|
python
|
def expand_depends_files(*args, **kwargs):
'''handle directive depends'''
args = [x.resolve() if isinstance(x, dynamic) else x for x in args]
kwargs = {
x: (y.resolve() if isinstance(y, dynamic) else y)
for x, y in kwargs.items()
}
return sos_targets(
*args,
**kwargs,
_verify_existence=True,
_undetermined=False,
_source=env.sos_dict['step_name'])
|
[
"def",
"expand_depends_files",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"args",
"=",
"[",
"x",
".",
"resolve",
"(",
")",
"if",
"isinstance",
"(",
"x",
",",
"dynamic",
")",
"else",
"x",
"for",
"x",
"in",
"args",
"]",
"kwargs",
"=",
"{",
"x",
":",
"(",
"y",
".",
"resolve",
"(",
")",
"if",
"isinstance",
"(",
"y",
",",
"dynamic",
")",
"else",
"y",
")",
"for",
"x",
",",
"y",
"in",
"kwargs",
".",
"items",
"(",
")",
"}",
"return",
"sos_targets",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
",",
"_verify_existence",
"=",
"True",
",",
"_undetermined",
"=",
"False",
",",
"_source",
"=",
"env",
".",
"sos_dict",
"[",
"'step_name'",
"]",
")"
] |
handle directive depends
|
[
"handle",
"directive",
"depends"
] |
6b60ed0770916d135e17322e469520d778e9d4e7
|
https://github.com/vatlab/SoS/blob/6b60ed0770916d135e17322e469520d778e9d4e7/src/sos/step_executor.py#L209-L221
|
9,530
|
vatlab/SoS
|
src/sos/step_executor.py
|
Step_Executor.wait_for_subworkflows
|
def wait_for_subworkflows(self, workflow_results):
'''Wait for results from subworkflows'''
wf_ids = sum([x['pending_workflows'] for x in workflow_results], [])
for wf_id in wf_ids:
# here we did not check if workflow ids match
yield self.socket
res = self.socket.recv_pyobj()
if res is None:
sys.exit(0)
elif isinstance(res, Exception):
raise res
|
python
|
def wait_for_subworkflows(self, workflow_results):
'''Wait for results from subworkflows'''
wf_ids = sum([x['pending_workflows'] for x in workflow_results], [])
for wf_id in wf_ids:
# here we did not check if workflow ids match
yield self.socket
res = self.socket.recv_pyobj()
if res is None:
sys.exit(0)
elif isinstance(res, Exception):
raise res
|
[
"def",
"wait_for_subworkflows",
"(",
"self",
",",
"workflow_results",
")",
":",
"wf_ids",
"=",
"sum",
"(",
"[",
"x",
"[",
"'pending_workflows'",
"]",
"for",
"x",
"in",
"workflow_results",
"]",
",",
"[",
"]",
")",
"for",
"wf_id",
"in",
"wf_ids",
":",
"# here we did not check if workflow ids match",
"yield",
"self",
".",
"socket",
"res",
"=",
"self",
".",
"socket",
".",
"recv_pyobj",
"(",
")",
"if",
"res",
"is",
"None",
":",
"sys",
".",
"exit",
"(",
"0",
")",
"elif",
"isinstance",
"(",
"res",
",",
"Exception",
")",
":",
"raise",
"res"
] |
Wait for results from subworkflows
|
[
"Wait",
"for",
"results",
"from",
"subworkflows"
] |
6b60ed0770916d135e17322e469520d778e9d4e7
|
https://github.com/vatlab/SoS/blob/6b60ed0770916d135e17322e469520d778e9d4e7/src/sos/step_executor.py#L1852-L1862
|
9,531
|
vatlab/SoS
|
src/sos/actions_r.py
|
Rmarkdown
|
def Rmarkdown(script=None,
input=None,
output=None,
args='{input:r}, output_file={output:ar}',
**kwargs):
'''Convert input file to output using Rmarkdown
The input can be specified in three ways:
1. instant script, which is assumed to be in md format
Rmarkdown: output='report.html'
script
2. one or more input files. The format is determined by extension of input file
Rmarkdown(input, output='report.html')
3. input file specified by command line option `-r` .
Rmarkdown(output='report.html')
If no output is specified, it is assumed to be in html format
and is written to standard output.
You can specify more options using the args parameter of the action. The default value
of args is `${input!r} --output ${output!ar}'
'''
if not R_library('rmarkdown').target_exists():
raise RuntimeError('Library rmarkdown does not exist')
input = sos_targets(collect_input(script, input))
output = sos_targets(output)
if len(output) == 0:
write_to_stdout = True
output = sos_targets(
tempfile.NamedTemporaryFile(
mode='w+t', suffix='.html', delete=False).name)
else:
write_to_stdout = False
#
ret = 1
try:
# render(input, output_format = NULL, output_file = NULL, output_dir = NULL,
# output_options = NULL, intermediates_dir = NULL,
# runtime = c("auto", "static", "shiny"),
# clean = TRUE, params = NULL, knit_meta = NULL, envir = parent.frame(),
# run_Rmarkdown = TRUE, quiet = FALSE, encoding = getOption("encoding"))
cmd = interpolate(f'Rscript -e "rmarkdown::render({args})"', {
'input': input,
'output': output
})
if 'ACTION' in env.config['SOS_DEBUG'] or 'ALL' in env.config['SOS_DEBUG']:
env.log_to_file('ACTION', f'Running command "{cmd}"')
if env.config['run_mode'] == 'interactive':
# need to catch output and send to python output, which will in trun be hijacked by SoS notebook
p = subprocess.Popen(
cmd, shell=True, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
#pid = p.pid
out, err = p.communicate()
sys.stdout.write(out.decode())
sys.stderr.write(err.decode())
ret = p.returncode
else:
p = subprocess.Popen(cmd, shell=True)
#pid = p.pid
ret = p.wait()
except Exception as e:
env.logger.error(e)
if ret != 0:
temp_file = os.path.join('.sos', f'{"Rmarkdown"}_{os.getpid()}.md')
shutil.copyfile(str(input), temp_file)
cmd = interpolate(f'Rscript -e "rmarkdown::render({args})"', {
'input': input,
'output': sos_targets(temp_file)
})
raise RuntimeError(
f'Failed to execute script. Please use command \n"{cmd}"\nunder {os.getcwd()} to test it.'
)
if write_to_stdout:
with open(str(output[0])) as out:
sys.stdout.write(out.read())
else:
env.logger.info(f'Report saved to {output}')
|
python
|
def Rmarkdown(script=None,
input=None,
output=None,
args='{input:r}, output_file={output:ar}',
**kwargs):
'''Convert input file to output using Rmarkdown
The input can be specified in three ways:
1. instant script, which is assumed to be in md format
Rmarkdown: output='report.html'
script
2. one or more input files. The format is determined by extension of input file
Rmarkdown(input, output='report.html')
3. input file specified by command line option `-r` .
Rmarkdown(output='report.html')
If no output is specified, it is assumed to be in html format
and is written to standard output.
You can specify more options using the args parameter of the action. The default value
of args is `${input!r} --output ${output!ar}'
'''
if not R_library('rmarkdown').target_exists():
raise RuntimeError('Library rmarkdown does not exist')
input = sos_targets(collect_input(script, input))
output = sos_targets(output)
if len(output) == 0:
write_to_stdout = True
output = sos_targets(
tempfile.NamedTemporaryFile(
mode='w+t', suffix='.html', delete=False).name)
else:
write_to_stdout = False
#
ret = 1
try:
# render(input, output_format = NULL, output_file = NULL, output_dir = NULL,
# output_options = NULL, intermediates_dir = NULL,
# runtime = c("auto", "static", "shiny"),
# clean = TRUE, params = NULL, knit_meta = NULL, envir = parent.frame(),
# run_Rmarkdown = TRUE, quiet = FALSE, encoding = getOption("encoding"))
cmd = interpolate(f'Rscript -e "rmarkdown::render({args})"', {
'input': input,
'output': output
})
if 'ACTION' in env.config['SOS_DEBUG'] or 'ALL' in env.config['SOS_DEBUG']:
env.log_to_file('ACTION', f'Running command "{cmd}"')
if env.config['run_mode'] == 'interactive':
# need to catch output and send to python output, which will in trun be hijacked by SoS notebook
p = subprocess.Popen(
cmd, shell=True, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
#pid = p.pid
out, err = p.communicate()
sys.stdout.write(out.decode())
sys.stderr.write(err.decode())
ret = p.returncode
else:
p = subprocess.Popen(cmd, shell=True)
#pid = p.pid
ret = p.wait()
except Exception as e:
env.logger.error(e)
if ret != 0:
temp_file = os.path.join('.sos', f'{"Rmarkdown"}_{os.getpid()}.md')
shutil.copyfile(str(input), temp_file)
cmd = interpolate(f'Rscript -e "rmarkdown::render({args})"', {
'input': input,
'output': sos_targets(temp_file)
})
raise RuntimeError(
f'Failed to execute script. Please use command \n"{cmd}"\nunder {os.getcwd()} to test it.'
)
if write_to_stdout:
with open(str(output[0])) as out:
sys.stdout.write(out.read())
else:
env.logger.info(f'Report saved to {output}')
|
[
"def",
"Rmarkdown",
"(",
"script",
"=",
"None",
",",
"input",
"=",
"None",
",",
"output",
"=",
"None",
",",
"args",
"=",
"'{input:r}, output_file={output:ar}'",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"R_library",
"(",
"'rmarkdown'",
")",
".",
"target_exists",
"(",
")",
":",
"raise",
"RuntimeError",
"(",
"'Library rmarkdown does not exist'",
")",
"input",
"=",
"sos_targets",
"(",
"collect_input",
"(",
"script",
",",
"input",
")",
")",
"output",
"=",
"sos_targets",
"(",
"output",
")",
"if",
"len",
"(",
"output",
")",
"==",
"0",
":",
"write_to_stdout",
"=",
"True",
"output",
"=",
"sos_targets",
"(",
"tempfile",
".",
"NamedTemporaryFile",
"(",
"mode",
"=",
"'w+t'",
",",
"suffix",
"=",
"'.html'",
",",
"delete",
"=",
"False",
")",
".",
"name",
")",
"else",
":",
"write_to_stdout",
"=",
"False",
"#",
"ret",
"=",
"1",
"try",
":",
"# render(input, output_format = NULL, output_file = NULL, output_dir = NULL,",
"# output_options = NULL, intermediates_dir = NULL,",
"# runtime = c(\"auto\", \"static\", \"shiny\"),",
"# clean = TRUE, params = NULL, knit_meta = NULL, envir = parent.frame(),",
"# run_Rmarkdown = TRUE, quiet = FALSE, encoding = getOption(\"encoding\"))",
"cmd",
"=",
"interpolate",
"(",
"f'Rscript -e \"rmarkdown::render({args})\"'",
",",
"{",
"'input'",
":",
"input",
",",
"'output'",
":",
"output",
"}",
")",
"if",
"'ACTION'",
"in",
"env",
".",
"config",
"[",
"'SOS_DEBUG'",
"]",
"or",
"'ALL'",
"in",
"env",
".",
"config",
"[",
"'SOS_DEBUG'",
"]",
":",
"env",
".",
"log_to_file",
"(",
"'ACTION'",
",",
"f'Running command \"{cmd}\"'",
")",
"if",
"env",
".",
"config",
"[",
"'run_mode'",
"]",
"==",
"'interactive'",
":",
"# need to catch output and send to python output, which will in trun be hijacked by SoS notebook",
"p",
"=",
"subprocess",
".",
"Popen",
"(",
"cmd",
",",
"shell",
"=",
"True",
",",
"stderr",
"=",
"subprocess",
".",
"PIPE",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
")",
"#pid = p.pid",
"out",
",",
"err",
"=",
"p",
".",
"communicate",
"(",
")",
"sys",
".",
"stdout",
".",
"write",
"(",
"out",
".",
"decode",
"(",
")",
")",
"sys",
".",
"stderr",
".",
"write",
"(",
"err",
".",
"decode",
"(",
")",
")",
"ret",
"=",
"p",
".",
"returncode",
"else",
":",
"p",
"=",
"subprocess",
".",
"Popen",
"(",
"cmd",
",",
"shell",
"=",
"True",
")",
"#pid = p.pid",
"ret",
"=",
"p",
".",
"wait",
"(",
")",
"except",
"Exception",
"as",
"e",
":",
"env",
".",
"logger",
".",
"error",
"(",
"e",
")",
"if",
"ret",
"!=",
"0",
":",
"temp_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"'.sos'",
",",
"f'{\"Rmarkdown\"}_{os.getpid()}.md'",
")",
"shutil",
".",
"copyfile",
"(",
"str",
"(",
"input",
")",
",",
"temp_file",
")",
"cmd",
"=",
"interpolate",
"(",
"f'Rscript -e \"rmarkdown::render({args})\"'",
",",
"{",
"'input'",
":",
"input",
",",
"'output'",
":",
"sos_targets",
"(",
"temp_file",
")",
"}",
")",
"raise",
"RuntimeError",
"(",
"f'Failed to execute script. Please use command \\n\"{cmd}\"\\nunder {os.getcwd()} to test it.'",
")",
"if",
"write_to_stdout",
":",
"with",
"open",
"(",
"str",
"(",
"output",
"[",
"0",
"]",
")",
")",
"as",
"out",
":",
"sys",
".",
"stdout",
".",
"write",
"(",
"out",
".",
"read",
"(",
")",
")",
"else",
":",
"env",
".",
"logger",
".",
"info",
"(",
"f'Report saved to {output}'",
")"
] |
Convert input file to output using Rmarkdown
The input can be specified in three ways:
1. instant script, which is assumed to be in md format
Rmarkdown: output='report.html'
script
2. one or more input files. The format is determined by extension of input file
Rmarkdown(input, output='report.html')
3. input file specified by command line option `-r` .
Rmarkdown(output='report.html')
If no output is specified, it is assumed to be in html format
and is written to standard output.
You can specify more options using the args parameter of the action. The default value
of args is `${input!r} --output ${output!ar}'
|
[
"Convert",
"input",
"file",
"to",
"output",
"using",
"Rmarkdown"
] |
6b60ed0770916d135e17322e469520d778e9d4e7
|
https://github.com/vatlab/SoS/blob/6b60ed0770916d135e17322e469520d778e9d4e7/src/sos/actions_r.py#L41-L124
|
9,532
|
vatlab/SoS
|
src/sos/docker/client.py
|
SoS_DockerClient.total_memory
|
def total_memory(self, image='ubuntu'):
'''Get the available ram fo the docker machine in Kb'''
try:
ret = subprocess.check_output(
f'''docker run -t {image} cat /proc/meminfo | grep MemTotal''',
shell=True,
stdin=subprocess.DEVNULL)
# ret: MemTotal: 30208916 kB
self.tot_mem = int(ret.split()[1])
except Exception:
# some system does not have cat or grep
self.tot_mem = None
return self.tot_mem
|
python
|
def total_memory(self, image='ubuntu'):
'''Get the available ram fo the docker machine in Kb'''
try:
ret = subprocess.check_output(
f'''docker run -t {image} cat /proc/meminfo | grep MemTotal''',
shell=True,
stdin=subprocess.DEVNULL)
# ret: MemTotal: 30208916 kB
self.tot_mem = int(ret.split()[1])
except Exception:
# some system does not have cat or grep
self.tot_mem = None
return self.tot_mem
|
[
"def",
"total_memory",
"(",
"self",
",",
"image",
"=",
"'ubuntu'",
")",
":",
"try",
":",
"ret",
"=",
"subprocess",
".",
"check_output",
"(",
"f'''docker run -t {image} cat /proc/meminfo | grep MemTotal'''",
",",
"shell",
"=",
"True",
",",
"stdin",
"=",
"subprocess",
".",
"DEVNULL",
")",
"# ret: MemTotal: 30208916 kB",
"self",
".",
"tot_mem",
"=",
"int",
"(",
"ret",
".",
"split",
"(",
")",
"[",
"1",
"]",
")",
"except",
"Exception",
":",
"# some system does not have cat or grep",
"self",
".",
"tot_mem",
"=",
"None",
"return",
"self",
".",
"tot_mem"
] |
Get the available ram fo the docker machine in Kb
|
[
"Get",
"the",
"available",
"ram",
"fo",
"the",
"docker",
"machine",
"in",
"Kb"
] |
6b60ed0770916d135e17322e469520d778e9d4e7
|
https://github.com/vatlab/SoS/blob/6b60ed0770916d135e17322e469520d778e9d4e7/src/sos/docker/client.py#L34-L46
|
9,533
|
vatlab/SoS
|
src/sos/actions.py
|
script
|
def script(script, interpreter='', suffix='', args='', **kwargs):
'''Execute specified script using specified interpreter. This action accepts common
action arguments such as input, active, workdir, docker_image and args. In particular,
content of one or more files specified by option input would be prepended before
the specified script.'''
return SoS_ExecuteScript(script, interpreter, suffix, args).run(**kwargs)
|
python
|
def script(script, interpreter='', suffix='', args='', **kwargs):
'''Execute specified script using specified interpreter. This action accepts common
action arguments such as input, active, workdir, docker_image and args. In particular,
content of one or more files specified by option input would be prepended before
the specified script.'''
return SoS_ExecuteScript(script, interpreter, suffix, args).run(**kwargs)
|
[
"def",
"script",
"(",
"script",
",",
"interpreter",
"=",
"''",
",",
"suffix",
"=",
"''",
",",
"args",
"=",
"''",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"SoS_ExecuteScript",
"(",
"script",
",",
"interpreter",
",",
"suffix",
",",
"args",
")",
".",
"run",
"(",
"*",
"*",
"kwargs",
")"
] |
Execute specified script using specified interpreter. This action accepts common
action arguments such as input, active, workdir, docker_image and args. In particular,
content of one or more files specified by option input would be prepended before
the specified script.
|
[
"Execute",
"specified",
"script",
"using",
"specified",
"interpreter",
".",
"This",
"action",
"accepts",
"common",
"action",
"arguments",
"such",
"as",
"input",
"active",
"workdir",
"docker_image",
"and",
"args",
".",
"In",
"particular",
"content",
"of",
"one",
"or",
"more",
"files",
"specified",
"by",
"option",
"input",
"would",
"be",
"prepended",
"before",
"the",
"specified",
"script",
"."
] |
6b60ed0770916d135e17322e469520d778e9d4e7
|
https://github.com/vatlab/SoS/blob/6b60ed0770916d135e17322e469520d778e9d4e7/src/sos/actions.py#L656-L661
|
9,534
|
vatlab/SoS
|
src/sos/actions.py
|
stop_if
|
def stop_if(expr, msg='', no_output=False):
'''Abort the execution of the current step or loop and yield
an warning message `msg` if `expr` is False '''
if expr:
raise StopInputGroup(msg=msg, keep_output=not no_output)
return 0
|
python
|
def stop_if(expr, msg='', no_output=False):
'''Abort the execution of the current step or loop and yield
an warning message `msg` if `expr` is False '''
if expr:
raise StopInputGroup(msg=msg, keep_output=not no_output)
return 0
|
[
"def",
"stop_if",
"(",
"expr",
",",
"msg",
"=",
"''",
",",
"no_output",
"=",
"False",
")",
":",
"if",
"expr",
":",
"raise",
"StopInputGroup",
"(",
"msg",
"=",
"msg",
",",
"keep_output",
"=",
"not",
"no_output",
")",
"return",
"0"
] |
Abort the execution of the current step or loop and yield
an warning message `msg` if `expr` is False
|
[
"Abort",
"the",
"execution",
"of",
"the",
"current",
"step",
"or",
"loop",
"and",
"yield",
"an",
"warning",
"message",
"msg",
"if",
"expr",
"is",
"False"
] |
6b60ed0770916d135e17322e469520d778e9d4e7
|
https://github.com/vatlab/SoS/blob/6b60ed0770916d135e17322e469520d778e9d4e7/src/sos/actions.py#L682-L687
|
9,535
|
vatlab/SoS
|
src/sos/actions.py
|
download
|
def download(URLs, dest_dir='.', dest_file=None, decompress=False, max_jobs=5):
'''Download files from specified URL, which should be space, tab or
newline separated URLs. The files will be downloaded to specified
destination. If `filename.md5` files are downloaded, they are used to
validate downloaded `filename`. Unless otherwise specified, compressed
files are decompressed. If `max_jobs` is given, a maximum of `max_jobs`
concurrent download jobs will be used for each domain. This restriction
applies to domain names and will be applied to multiple download
instances.
'''
if env.config['run_mode'] == 'dryrun':
print(f'HINT: download\n{URLs}\n')
return None
if isinstance(URLs, str):
urls = [x.strip() for x in URLs.split() if x.strip()]
else:
urls = list(URLs)
if not urls:
env.logger.debug(f'No download URL specified: {URLs}')
return
#
if dest_file is not None and len(urls) != 1:
raise RuntimeError(
'Only one URL is allowed if a destination file is specified.')
#
if dest_file is None:
filenames = []
for idx, url in enumerate(urls):
token = urllib.parse.urlparse(url)
# if no scheme or netloc, the URL is not acceptable
if not all([
getattr(token, qualifying_attr)
for qualifying_attr in ('scheme', 'netloc')
]):
raise ValueError(f'Invalid URL {url}')
filename = os.path.split(token.path)[-1]
if not filename:
raise ValueError(f'Cannot determine destination file for {url}')
filenames.append(os.path.join(dest_dir, filename))
else:
token = urllib.parse.urlparse(urls[0])
if not all([
getattr(token, qualifying_attr)
for qualifying_attr in ('scheme', 'netloc')
]):
raise ValueError(f'Invalid URL {url}')
filenames = [dest_file]
#
succ = [(False, None) for x in urls]
with ProcessPoolExecutor(max_workers=max_jobs) as executor:
for idx, (url, filename) in enumerate(zip(urls, filenames)):
# if there is alot, start download
succ[idx] = executor.submit(downloadURL, url, filename, decompress,
idx)
succ = [x.result() for x in succ]
# for su, url in zip(succ, urls):
# if not su:
# env.logger.warning('Failed to download {}'.format(url))
failed = [y for x, y in zip(succ, urls) if not x]
if failed:
if len(urls) == 1:
raise RuntimeError('Failed to download {urls[0]}')
else:
raise RuntimeError(
f'Failed to download {failed[0]} ({len(failed)} out of {len(urls)})'
)
return 0
|
python
|
def download(URLs, dest_dir='.', dest_file=None, decompress=False, max_jobs=5):
'''Download files from specified URL, which should be space, tab or
newline separated URLs. The files will be downloaded to specified
destination. If `filename.md5` files are downloaded, they are used to
validate downloaded `filename`. Unless otherwise specified, compressed
files are decompressed. If `max_jobs` is given, a maximum of `max_jobs`
concurrent download jobs will be used for each domain. This restriction
applies to domain names and will be applied to multiple download
instances.
'''
if env.config['run_mode'] == 'dryrun':
print(f'HINT: download\n{URLs}\n')
return None
if isinstance(URLs, str):
urls = [x.strip() for x in URLs.split() if x.strip()]
else:
urls = list(URLs)
if not urls:
env.logger.debug(f'No download URL specified: {URLs}')
return
#
if dest_file is not None and len(urls) != 1:
raise RuntimeError(
'Only one URL is allowed if a destination file is specified.')
#
if dest_file is None:
filenames = []
for idx, url in enumerate(urls):
token = urllib.parse.urlparse(url)
# if no scheme or netloc, the URL is not acceptable
if not all([
getattr(token, qualifying_attr)
for qualifying_attr in ('scheme', 'netloc')
]):
raise ValueError(f'Invalid URL {url}')
filename = os.path.split(token.path)[-1]
if not filename:
raise ValueError(f'Cannot determine destination file for {url}')
filenames.append(os.path.join(dest_dir, filename))
else:
token = urllib.parse.urlparse(urls[0])
if not all([
getattr(token, qualifying_attr)
for qualifying_attr in ('scheme', 'netloc')
]):
raise ValueError(f'Invalid URL {url}')
filenames = [dest_file]
#
succ = [(False, None) for x in urls]
with ProcessPoolExecutor(max_workers=max_jobs) as executor:
for idx, (url, filename) in enumerate(zip(urls, filenames)):
# if there is alot, start download
succ[idx] = executor.submit(downloadURL, url, filename, decompress,
idx)
succ = [x.result() for x in succ]
# for su, url in zip(succ, urls):
# if not su:
# env.logger.warning('Failed to download {}'.format(url))
failed = [y for x, y in zip(succ, urls) if not x]
if failed:
if len(urls) == 1:
raise RuntimeError('Failed to download {urls[0]}')
else:
raise RuntimeError(
f'Failed to download {failed[0]} ({len(failed)} out of {len(urls)})'
)
return 0
|
[
"def",
"download",
"(",
"URLs",
",",
"dest_dir",
"=",
"'.'",
",",
"dest_file",
"=",
"None",
",",
"decompress",
"=",
"False",
",",
"max_jobs",
"=",
"5",
")",
":",
"if",
"env",
".",
"config",
"[",
"'run_mode'",
"]",
"==",
"'dryrun'",
":",
"print",
"(",
"f'HINT: download\\n{URLs}\\n'",
")",
"return",
"None",
"if",
"isinstance",
"(",
"URLs",
",",
"str",
")",
":",
"urls",
"=",
"[",
"x",
".",
"strip",
"(",
")",
"for",
"x",
"in",
"URLs",
".",
"split",
"(",
")",
"if",
"x",
".",
"strip",
"(",
")",
"]",
"else",
":",
"urls",
"=",
"list",
"(",
"URLs",
")",
"if",
"not",
"urls",
":",
"env",
".",
"logger",
".",
"debug",
"(",
"f'No download URL specified: {URLs}'",
")",
"return",
"#",
"if",
"dest_file",
"is",
"not",
"None",
"and",
"len",
"(",
"urls",
")",
"!=",
"1",
":",
"raise",
"RuntimeError",
"(",
"'Only one URL is allowed if a destination file is specified.'",
")",
"#",
"if",
"dest_file",
"is",
"None",
":",
"filenames",
"=",
"[",
"]",
"for",
"idx",
",",
"url",
"in",
"enumerate",
"(",
"urls",
")",
":",
"token",
"=",
"urllib",
".",
"parse",
".",
"urlparse",
"(",
"url",
")",
"# if no scheme or netloc, the URL is not acceptable",
"if",
"not",
"all",
"(",
"[",
"getattr",
"(",
"token",
",",
"qualifying_attr",
")",
"for",
"qualifying_attr",
"in",
"(",
"'scheme'",
",",
"'netloc'",
")",
"]",
")",
":",
"raise",
"ValueError",
"(",
"f'Invalid URL {url}'",
")",
"filename",
"=",
"os",
".",
"path",
".",
"split",
"(",
"token",
".",
"path",
")",
"[",
"-",
"1",
"]",
"if",
"not",
"filename",
":",
"raise",
"ValueError",
"(",
"f'Cannot determine destination file for {url}'",
")",
"filenames",
".",
"append",
"(",
"os",
".",
"path",
".",
"join",
"(",
"dest_dir",
",",
"filename",
")",
")",
"else",
":",
"token",
"=",
"urllib",
".",
"parse",
".",
"urlparse",
"(",
"urls",
"[",
"0",
"]",
")",
"if",
"not",
"all",
"(",
"[",
"getattr",
"(",
"token",
",",
"qualifying_attr",
")",
"for",
"qualifying_attr",
"in",
"(",
"'scheme'",
",",
"'netloc'",
")",
"]",
")",
":",
"raise",
"ValueError",
"(",
"f'Invalid URL {url}'",
")",
"filenames",
"=",
"[",
"dest_file",
"]",
"#",
"succ",
"=",
"[",
"(",
"False",
",",
"None",
")",
"for",
"x",
"in",
"urls",
"]",
"with",
"ProcessPoolExecutor",
"(",
"max_workers",
"=",
"max_jobs",
")",
"as",
"executor",
":",
"for",
"idx",
",",
"(",
"url",
",",
"filename",
")",
"in",
"enumerate",
"(",
"zip",
"(",
"urls",
",",
"filenames",
")",
")",
":",
"# if there is alot, start download",
"succ",
"[",
"idx",
"]",
"=",
"executor",
".",
"submit",
"(",
"downloadURL",
",",
"url",
",",
"filename",
",",
"decompress",
",",
"idx",
")",
"succ",
"=",
"[",
"x",
".",
"result",
"(",
")",
"for",
"x",
"in",
"succ",
"]",
"# for su, url in zip(succ, urls):",
"# if not su:",
"# env.logger.warning('Failed to download {}'.format(url))",
"failed",
"=",
"[",
"y",
"for",
"x",
",",
"y",
"in",
"zip",
"(",
"succ",
",",
"urls",
")",
"if",
"not",
"x",
"]",
"if",
"failed",
":",
"if",
"len",
"(",
"urls",
")",
"==",
"1",
":",
"raise",
"RuntimeError",
"(",
"'Failed to download {urls[0]}'",
")",
"else",
":",
"raise",
"RuntimeError",
"(",
"f'Failed to download {failed[0]} ({len(failed)} out of {len(urls)})'",
")",
"return",
"0"
] |
Download files from specified URL, which should be space, tab or
newline separated URLs. The files will be downloaded to specified
destination. If `filename.md5` files are downloaded, they are used to
validate downloaded `filename`. Unless otherwise specified, compressed
files are decompressed. If `max_jobs` is given, a maximum of `max_jobs`
concurrent download jobs will be used for each domain. This restriction
applies to domain names and will be applied to multiple download
instances.
|
[
"Download",
"files",
"from",
"specified",
"URL",
"which",
"should",
"be",
"space",
"tab",
"or",
"newline",
"separated",
"URLs",
".",
"The",
"files",
"will",
"be",
"downloaded",
"to",
"specified",
"destination",
".",
"If",
"filename",
".",
"md5",
"files",
"are",
"downloaded",
"they",
"are",
"used",
"to",
"validate",
"downloaded",
"filename",
".",
"Unless",
"otherwise",
"specified",
"compressed",
"files",
"are",
"decompressed",
".",
"If",
"max_jobs",
"is",
"given",
"a",
"maximum",
"of",
"max_jobs",
"concurrent",
"download",
"jobs",
"will",
"be",
"used",
"for",
"each",
"domain",
".",
"This",
"restriction",
"applies",
"to",
"domain",
"names",
"and",
"will",
"be",
"applied",
"to",
"multiple",
"download",
"instances",
"."
] |
6b60ed0770916d135e17322e469520d778e9d4e7
|
https://github.com/vatlab/SoS/blob/6b60ed0770916d135e17322e469520d778e9d4e7/src/sos/actions.py#L923-L991
|
9,536
|
vatlab/SoS
|
src/sos/actions.py
|
run
|
def run(script, args='', **kwargs):
'''Execute specified script using bash. This action accepts common action arguments such as
input, active, workdir, docker_image and args. In particular, content of one or more files
specified by option input would be prepended before the specified script.'''
if sys.platform == 'win32':
# in the case there is no interpreter, we put the script
# at first (this is the case for windows)
# and we donot add default args.
interpreter = ''
else:
# if there is a shebang line, we ...
if not script.startswith('#!'):
interpreter = '/bin/bash'
if not args:
args = '-ev {filename:q}'
else:
# execute script directly
interpreter = ''
return SoS_ExecuteScript(script, interpreter, '', args).run(**kwargs)
|
python
|
def run(script, args='', **kwargs):
'''Execute specified script using bash. This action accepts common action arguments such as
input, active, workdir, docker_image and args. In particular, content of one or more files
specified by option input would be prepended before the specified script.'''
if sys.platform == 'win32':
# in the case there is no interpreter, we put the script
# at first (this is the case for windows)
# and we donot add default args.
interpreter = ''
else:
# if there is a shebang line, we ...
if not script.startswith('#!'):
interpreter = '/bin/bash'
if not args:
args = '-ev {filename:q}'
else:
# execute script directly
interpreter = ''
return SoS_ExecuteScript(script, interpreter, '', args).run(**kwargs)
|
[
"def",
"run",
"(",
"script",
",",
"args",
"=",
"''",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"sys",
".",
"platform",
"==",
"'win32'",
":",
"# in the case there is no interpreter, we put the script",
"# at first (this is the case for windows)",
"# and we donot add default args.",
"interpreter",
"=",
"''",
"else",
":",
"# if there is a shebang line, we ...",
"if",
"not",
"script",
".",
"startswith",
"(",
"'#!'",
")",
":",
"interpreter",
"=",
"'/bin/bash'",
"if",
"not",
"args",
":",
"args",
"=",
"'-ev {filename:q}'",
"else",
":",
"# execute script directly",
"interpreter",
"=",
"''",
"return",
"SoS_ExecuteScript",
"(",
"script",
",",
"interpreter",
",",
"''",
",",
"args",
")",
".",
"run",
"(",
"*",
"*",
"kwargs",
")"
] |
Execute specified script using bash. This action accepts common action arguments such as
input, active, workdir, docker_image and args. In particular, content of one or more files
specified by option input would be prepended before the specified script.
|
[
"Execute",
"specified",
"script",
"using",
"bash",
".",
"This",
"action",
"accepts",
"common",
"action",
"arguments",
"such",
"as",
"input",
"active",
"workdir",
"docker_image",
"and",
"args",
".",
"In",
"particular",
"content",
"of",
"one",
"or",
"more",
"files",
"specified",
"by",
"option",
"input",
"would",
"be",
"prepended",
"before",
"the",
"specified",
"script",
"."
] |
6b60ed0770916d135e17322e469520d778e9d4e7
|
https://github.com/vatlab/SoS/blob/6b60ed0770916d135e17322e469520d778e9d4e7/src/sos/actions.py#L995-L1013
|
9,537
|
vatlab/SoS
|
src/sos/actions.py
|
pandoc
|
def pandoc(script=None,
input=None,
output=None,
args='{input:q} --output {output:q}',
**kwargs):
'''Convert input file to output using pandoc
The input can be specified in three ways:
1. instant script, which is assumed to be in md format
pandoc: output='report.html'
script
2. one or more input files. The format is determined by extension of input file
pandoc(input, output='report.html')
3. input file specified by command line option `-r` .
pandoc(output='report.html')
If no output is specified, it is assumed to be in html format
and is written to standard output.
You can specify more options such as "from" and "to" by customizing
the args parameter of the action. The default value of args is
`{input:q} --output {output:q}'
'''
#
# # this is output format
# pandoc [OPTIONS] [FILES]
# Input formats: commonmark, docbook, docx, epub, haddock, html, json*, latex,
# markdown, markdown_github, markdown_mmd, markdown_phpextra,
# markdown_strict, mediawiki, native, odt, opml, org, rst, t2t,
# textile, twiki
# [ *only Pandoc's JSON version of native AST]
# Output formats: asciidoc, beamer, commonmark, context, docbook, docx, dokuwiki,
# dzslides, epub, epub3, fb2, haddock, html, html5, icml, json*,
# latex, man, markdown, markdown_github, markdown_mmd,
# markdown_phpextra, markdown_strict, mediawiki, native, odt,
# opendocument, opml, org, pdf**, plain, revealjs, rst, rtf, s5,
# slideous, slidy, tei, texinfo, textile
# [**for pdf output, use latex or beamer and -o FILENAME.pdf]
# Options:
# -f FORMAT, -r FORMAT --from=FORMAT, --read=FORMAT
# -t FORMAT, -w FORMAT --to=FORMAT, --write=FORMAT
# -o FILENAME --output=FILENAME
# --data-dir=DIRECTORY
# -R --parse-raw
# -S --smart
#
# IGNORED
#
if not executable('pandoc').target_exists():
raise RuntimeError('pandoc not found')
input = sos_targets(collect_input(script, input))
output = sos_targets(output)
if len(output) == 0:
write_to_stdout = True
output = sos_targets(
tempfile.NamedTemporaryFile(
mode='w+t', suffix='.html', delete=False).name)
else:
write_to_stdout = False
#
ret = 1
try:
p = None
cmd = interpolate(f'pandoc {args}', {'input': input, 'output': output})
if 'ACTION' in env.config['SOS_DEBUG'] or 'ALL' in env.config['SOS_DEBUG']:
env.log_to_file('ACTION', f'Running command "{cmd}"')
if env.config['run_mode'] == 'interactive':
# need to catch output and send to python output, which will in trun be hijacked by SoS notebook
from .utils import pexpect_run
ret = pexpect_run(cmd)
else:
p = subprocess.Popen(cmd, shell=True)
ret = p.wait()
except Exception as e:
env.logger.error(e)
if ret != 0:
temp_file = os.path.join('.sos', f'pandoc_{os.getpid()}.md')
shutil.copyfile(input, temp_file)
cmd = interpolate(f'pandoc {args}', {
'input': sos_targets(temp_file),
'output': sos_targets(output)
})
raise RuntimeError(
f'Failed to execute script. Please use command \n{cmd}\nunder {os.getcwd()} to test it.'
)
if write_to_stdout:
with open(output[0].fullname()) as out:
sys.stdout.write(out.read())
else:
env.logger.info(f'Report saved to {output}')
try:
os.remove(input)
except Exception:
pass
|
python
|
def pandoc(script=None,
input=None,
output=None,
args='{input:q} --output {output:q}',
**kwargs):
'''Convert input file to output using pandoc
The input can be specified in three ways:
1. instant script, which is assumed to be in md format
pandoc: output='report.html'
script
2. one or more input files. The format is determined by extension of input file
pandoc(input, output='report.html')
3. input file specified by command line option `-r` .
pandoc(output='report.html')
If no output is specified, it is assumed to be in html format
and is written to standard output.
You can specify more options such as "from" and "to" by customizing
the args parameter of the action. The default value of args is
`{input:q} --output {output:q}'
'''
#
# # this is output format
# pandoc [OPTIONS] [FILES]
# Input formats: commonmark, docbook, docx, epub, haddock, html, json*, latex,
# markdown, markdown_github, markdown_mmd, markdown_phpextra,
# markdown_strict, mediawiki, native, odt, opml, org, rst, t2t,
# textile, twiki
# [ *only Pandoc's JSON version of native AST]
# Output formats: asciidoc, beamer, commonmark, context, docbook, docx, dokuwiki,
# dzslides, epub, epub3, fb2, haddock, html, html5, icml, json*,
# latex, man, markdown, markdown_github, markdown_mmd,
# markdown_phpextra, markdown_strict, mediawiki, native, odt,
# opendocument, opml, org, pdf**, plain, revealjs, rst, rtf, s5,
# slideous, slidy, tei, texinfo, textile
# [**for pdf output, use latex or beamer and -o FILENAME.pdf]
# Options:
# -f FORMAT, -r FORMAT --from=FORMAT, --read=FORMAT
# -t FORMAT, -w FORMAT --to=FORMAT, --write=FORMAT
# -o FILENAME --output=FILENAME
# --data-dir=DIRECTORY
# -R --parse-raw
# -S --smart
#
# IGNORED
#
if not executable('pandoc').target_exists():
raise RuntimeError('pandoc not found')
input = sos_targets(collect_input(script, input))
output = sos_targets(output)
if len(output) == 0:
write_to_stdout = True
output = sos_targets(
tempfile.NamedTemporaryFile(
mode='w+t', suffix='.html', delete=False).name)
else:
write_to_stdout = False
#
ret = 1
try:
p = None
cmd = interpolate(f'pandoc {args}', {'input': input, 'output': output})
if 'ACTION' in env.config['SOS_DEBUG'] or 'ALL' in env.config['SOS_DEBUG']:
env.log_to_file('ACTION', f'Running command "{cmd}"')
if env.config['run_mode'] == 'interactive':
# need to catch output and send to python output, which will in trun be hijacked by SoS notebook
from .utils import pexpect_run
ret = pexpect_run(cmd)
else:
p = subprocess.Popen(cmd, shell=True)
ret = p.wait()
except Exception as e:
env.logger.error(e)
if ret != 0:
temp_file = os.path.join('.sos', f'pandoc_{os.getpid()}.md')
shutil.copyfile(input, temp_file)
cmd = interpolate(f'pandoc {args}', {
'input': sos_targets(temp_file),
'output': sos_targets(output)
})
raise RuntimeError(
f'Failed to execute script. Please use command \n{cmd}\nunder {os.getcwd()} to test it.'
)
if write_to_stdout:
with open(output[0].fullname()) as out:
sys.stdout.write(out.read())
else:
env.logger.info(f'Report saved to {output}')
try:
os.remove(input)
except Exception:
pass
|
[
"def",
"pandoc",
"(",
"script",
"=",
"None",
",",
"input",
"=",
"None",
",",
"output",
"=",
"None",
",",
"args",
"=",
"'{input:q} --output {output:q}'",
",",
"*",
"*",
"kwargs",
")",
":",
"#",
"# # this is output format",
"# pandoc [OPTIONS] [FILES]",
"# Input formats: commonmark, docbook, docx, epub, haddock, html, json*, latex,",
"# markdown, markdown_github, markdown_mmd, markdown_phpextra,",
"# markdown_strict, mediawiki, native, odt, opml, org, rst, t2t,",
"# textile, twiki",
"# [ *only Pandoc's JSON version of native AST]",
"# Output formats: asciidoc, beamer, commonmark, context, docbook, docx, dokuwiki,",
"# dzslides, epub, epub3, fb2, haddock, html, html5, icml, json*,",
"# latex, man, markdown, markdown_github, markdown_mmd,",
"# markdown_phpextra, markdown_strict, mediawiki, native, odt,",
"# opendocument, opml, org, pdf**, plain, revealjs, rst, rtf, s5,",
"# slideous, slidy, tei, texinfo, textile",
"# [**for pdf output, use latex or beamer and -o FILENAME.pdf]",
"# Options:",
"# -f FORMAT, -r FORMAT --from=FORMAT, --read=FORMAT",
"# -t FORMAT, -w FORMAT --to=FORMAT, --write=FORMAT",
"# -o FILENAME --output=FILENAME",
"# --data-dir=DIRECTORY",
"# -R --parse-raw",
"# -S --smart",
"#",
"# IGNORED",
"#",
"if",
"not",
"executable",
"(",
"'pandoc'",
")",
".",
"target_exists",
"(",
")",
":",
"raise",
"RuntimeError",
"(",
"'pandoc not found'",
")",
"input",
"=",
"sos_targets",
"(",
"collect_input",
"(",
"script",
",",
"input",
")",
")",
"output",
"=",
"sos_targets",
"(",
"output",
")",
"if",
"len",
"(",
"output",
")",
"==",
"0",
":",
"write_to_stdout",
"=",
"True",
"output",
"=",
"sos_targets",
"(",
"tempfile",
".",
"NamedTemporaryFile",
"(",
"mode",
"=",
"'w+t'",
",",
"suffix",
"=",
"'.html'",
",",
"delete",
"=",
"False",
")",
".",
"name",
")",
"else",
":",
"write_to_stdout",
"=",
"False",
"#",
"ret",
"=",
"1",
"try",
":",
"p",
"=",
"None",
"cmd",
"=",
"interpolate",
"(",
"f'pandoc {args}'",
",",
"{",
"'input'",
":",
"input",
",",
"'output'",
":",
"output",
"}",
")",
"if",
"'ACTION'",
"in",
"env",
".",
"config",
"[",
"'SOS_DEBUG'",
"]",
"or",
"'ALL'",
"in",
"env",
".",
"config",
"[",
"'SOS_DEBUG'",
"]",
":",
"env",
".",
"log_to_file",
"(",
"'ACTION'",
",",
"f'Running command \"{cmd}\"'",
")",
"if",
"env",
".",
"config",
"[",
"'run_mode'",
"]",
"==",
"'interactive'",
":",
"# need to catch output and send to python output, which will in trun be hijacked by SoS notebook",
"from",
".",
"utils",
"import",
"pexpect_run",
"ret",
"=",
"pexpect_run",
"(",
"cmd",
")",
"else",
":",
"p",
"=",
"subprocess",
".",
"Popen",
"(",
"cmd",
",",
"shell",
"=",
"True",
")",
"ret",
"=",
"p",
".",
"wait",
"(",
")",
"except",
"Exception",
"as",
"e",
":",
"env",
".",
"logger",
".",
"error",
"(",
"e",
")",
"if",
"ret",
"!=",
"0",
":",
"temp_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"'.sos'",
",",
"f'pandoc_{os.getpid()}.md'",
")",
"shutil",
".",
"copyfile",
"(",
"input",
",",
"temp_file",
")",
"cmd",
"=",
"interpolate",
"(",
"f'pandoc {args}'",
",",
"{",
"'input'",
":",
"sos_targets",
"(",
"temp_file",
")",
",",
"'output'",
":",
"sos_targets",
"(",
"output",
")",
"}",
")",
"raise",
"RuntimeError",
"(",
"f'Failed to execute script. Please use command \\n{cmd}\\nunder {os.getcwd()} to test it.'",
")",
"if",
"write_to_stdout",
":",
"with",
"open",
"(",
"output",
"[",
"0",
"]",
".",
"fullname",
"(",
")",
")",
"as",
"out",
":",
"sys",
".",
"stdout",
".",
"write",
"(",
"out",
".",
"read",
"(",
")",
")",
"else",
":",
"env",
".",
"logger",
".",
"info",
"(",
"f'Report saved to {output}'",
")",
"try",
":",
"os",
".",
"remove",
"(",
"input",
")",
"except",
"Exception",
":",
"pass"
] |
Convert input file to output using pandoc
The input can be specified in three ways:
1. instant script, which is assumed to be in md format
pandoc: output='report.html'
script
2. one or more input files. The format is determined by extension of input file
pandoc(input, output='report.html')
3. input file specified by command line option `-r` .
pandoc(output='report.html')
If no output is specified, it is assumed to be in html format
and is written to standard output.
You can specify more options such as "from" and "to" by customizing
the args parameter of the action. The default value of args is
`{input:q} --output {output:q}'
|
[
"Convert",
"input",
"file",
"to",
"output",
"using",
"pandoc"
] |
6b60ed0770916d135e17322e469520d778e9d4e7
|
https://github.com/vatlab/SoS/blob/6b60ed0770916d135e17322e469520d778e9d4e7/src/sos/actions.py#L1134-L1234
|
9,538
|
vatlab/SoS
|
src/sos/section_analyzer.py
|
get_changed_vars
|
def get_changed_vars(section: SoS_Step):
'''changed vars are variables that are "shared" and therefore "provides"
to others '''
if 'shared' not in section.options:
return set()
changed_vars = set()
svars = section.options['shared']
if isinstance(svars, str):
changed_vars.add(svars)
svars = {svars: svars}
elif isinstance(svars, Sequence):
for item in svars:
if isinstance(item, str):
changed_vars.add(item)
elif isinstance(item, Mapping):
changed_vars |= set(item.keys())
else:
raise ValueError(
f'Option shared should be a string, a mapping of expression, or list of string or mappings. {svars} provided'
)
elif isinstance(svars, Mapping):
changed_vars |= set(svars.keys())
else:
raise ValueError(
f'Option shared should be a string, a mapping of expression, or list of string or mappings. {svars} provided'
)
return changed_vars
|
python
|
def get_changed_vars(section: SoS_Step):
'''changed vars are variables that are "shared" and therefore "provides"
to others '''
if 'shared' not in section.options:
return set()
changed_vars = set()
svars = section.options['shared']
if isinstance(svars, str):
changed_vars.add(svars)
svars = {svars: svars}
elif isinstance(svars, Sequence):
for item in svars:
if isinstance(item, str):
changed_vars.add(item)
elif isinstance(item, Mapping):
changed_vars |= set(item.keys())
else:
raise ValueError(
f'Option shared should be a string, a mapping of expression, or list of string or mappings. {svars} provided'
)
elif isinstance(svars, Mapping):
changed_vars |= set(svars.keys())
else:
raise ValueError(
f'Option shared should be a string, a mapping of expression, or list of string or mappings. {svars} provided'
)
return changed_vars
|
[
"def",
"get_changed_vars",
"(",
"section",
":",
"SoS_Step",
")",
":",
"if",
"'shared'",
"not",
"in",
"section",
".",
"options",
":",
"return",
"set",
"(",
")",
"changed_vars",
"=",
"set",
"(",
")",
"svars",
"=",
"section",
".",
"options",
"[",
"'shared'",
"]",
"if",
"isinstance",
"(",
"svars",
",",
"str",
")",
":",
"changed_vars",
".",
"add",
"(",
"svars",
")",
"svars",
"=",
"{",
"svars",
":",
"svars",
"}",
"elif",
"isinstance",
"(",
"svars",
",",
"Sequence",
")",
":",
"for",
"item",
"in",
"svars",
":",
"if",
"isinstance",
"(",
"item",
",",
"str",
")",
":",
"changed_vars",
".",
"add",
"(",
"item",
")",
"elif",
"isinstance",
"(",
"item",
",",
"Mapping",
")",
":",
"changed_vars",
"|=",
"set",
"(",
"item",
".",
"keys",
"(",
")",
")",
"else",
":",
"raise",
"ValueError",
"(",
"f'Option shared should be a string, a mapping of expression, or list of string or mappings. {svars} provided'",
")",
"elif",
"isinstance",
"(",
"svars",
",",
"Mapping",
")",
":",
"changed_vars",
"|=",
"set",
"(",
"svars",
".",
"keys",
"(",
")",
")",
"else",
":",
"raise",
"ValueError",
"(",
"f'Option shared should be a string, a mapping of expression, or list of string or mappings. {svars} provided'",
")",
"return",
"changed_vars"
] |
changed vars are variables that are "shared" and therefore "provides"
to others
|
[
"changed",
"vars",
"are",
"variables",
"that",
"are",
"shared",
"and",
"therefore",
"provides",
"to",
"others"
] |
6b60ed0770916d135e17322e469520d778e9d4e7
|
https://github.com/vatlab/SoS/blob/6b60ed0770916d135e17322e469520d778e9d4e7/src/sos/section_analyzer.py#L138-L165
|
9,539
|
vatlab/SoS
|
src/sos/section_analyzer.py
|
get_all_used_vars
|
def get_all_used_vars(section):
'''Get variables which are variables used by input statement and statements before it'''
all_used_vars = set()
for statement in section.statements:
if statement[0] == '=':
all_used_vars |= accessed_vars('='.join(statement[1:3]))
elif statement[0] == '!':
all_used_vars |= accessed_vars(statement[1])
elif statement[0] == ':':
all_used_vars |= accessed_vars(statement[2], mode='eval')
if statement[1] != 'input':
continue
if 'paired_with' in statement[2]:
try:
pws = get_names_of_param(
'paired_with',
statement[2],
extra_dict=env.sos_dict.dict())
all_used_vars |= set(pws)
except Exception as e:
raise ValueError(
f'Failed to parse parameter paired_with: {e}')
if 'group_with' in statement[2]:
try:
pws = get_names_of_param(
'group_with',
statement[2],
extra_dict=env.sos_dict.dict())
all_used_vars |= set(pws)
except Exception as e:
raise ValueError(
f'Failed to parse parameter group_with: {e}')
if 'for_each' in statement[2]:
try:
pws = get_names_of_param(
'for_each',
statement[2],
extra_dict=env.sos_dict.dict())
for pw in pws:
all_used_vars |= set(pw.split(','))
except Exception as e:
raise ValueError(f'Failed to parse parameter for_each: {e}')
if section.task:
all_used_vars |= accessed_vars(section.task)
# now we have a list of global variables that are actually used in the functions
# this is specifically designed to handle the last case in #1225
func_with_vars = [
y for x, y in used_in_func(section.global_stmts).items()
if x in all_used_vars
]
return set.union(all_used_vars, *func_with_vars)
|
python
|
def get_all_used_vars(section):
'''Get variables which are variables used by input statement and statements before it'''
all_used_vars = set()
for statement in section.statements:
if statement[0] == '=':
all_used_vars |= accessed_vars('='.join(statement[1:3]))
elif statement[0] == '!':
all_used_vars |= accessed_vars(statement[1])
elif statement[0] == ':':
all_used_vars |= accessed_vars(statement[2], mode='eval')
if statement[1] != 'input':
continue
if 'paired_with' in statement[2]:
try:
pws = get_names_of_param(
'paired_with',
statement[2],
extra_dict=env.sos_dict.dict())
all_used_vars |= set(pws)
except Exception as e:
raise ValueError(
f'Failed to parse parameter paired_with: {e}')
if 'group_with' in statement[2]:
try:
pws = get_names_of_param(
'group_with',
statement[2],
extra_dict=env.sos_dict.dict())
all_used_vars |= set(pws)
except Exception as e:
raise ValueError(
f'Failed to parse parameter group_with: {e}')
if 'for_each' in statement[2]:
try:
pws = get_names_of_param(
'for_each',
statement[2],
extra_dict=env.sos_dict.dict())
for pw in pws:
all_used_vars |= set(pw.split(','))
except Exception as e:
raise ValueError(f'Failed to parse parameter for_each: {e}')
if section.task:
all_used_vars |= accessed_vars(section.task)
# now we have a list of global variables that are actually used in the functions
# this is specifically designed to handle the last case in #1225
func_with_vars = [
y for x, y in used_in_func(section.global_stmts).items()
if x in all_used_vars
]
return set.union(all_used_vars, *func_with_vars)
|
[
"def",
"get_all_used_vars",
"(",
"section",
")",
":",
"all_used_vars",
"=",
"set",
"(",
")",
"for",
"statement",
"in",
"section",
".",
"statements",
":",
"if",
"statement",
"[",
"0",
"]",
"==",
"'='",
":",
"all_used_vars",
"|=",
"accessed_vars",
"(",
"'='",
".",
"join",
"(",
"statement",
"[",
"1",
":",
"3",
"]",
")",
")",
"elif",
"statement",
"[",
"0",
"]",
"==",
"'!'",
":",
"all_used_vars",
"|=",
"accessed_vars",
"(",
"statement",
"[",
"1",
"]",
")",
"elif",
"statement",
"[",
"0",
"]",
"==",
"':'",
":",
"all_used_vars",
"|=",
"accessed_vars",
"(",
"statement",
"[",
"2",
"]",
",",
"mode",
"=",
"'eval'",
")",
"if",
"statement",
"[",
"1",
"]",
"!=",
"'input'",
":",
"continue",
"if",
"'paired_with'",
"in",
"statement",
"[",
"2",
"]",
":",
"try",
":",
"pws",
"=",
"get_names_of_param",
"(",
"'paired_with'",
",",
"statement",
"[",
"2",
"]",
",",
"extra_dict",
"=",
"env",
".",
"sos_dict",
".",
"dict",
"(",
")",
")",
"all_used_vars",
"|=",
"set",
"(",
"pws",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"ValueError",
"(",
"f'Failed to parse parameter paired_with: {e}'",
")",
"if",
"'group_with'",
"in",
"statement",
"[",
"2",
"]",
":",
"try",
":",
"pws",
"=",
"get_names_of_param",
"(",
"'group_with'",
",",
"statement",
"[",
"2",
"]",
",",
"extra_dict",
"=",
"env",
".",
"sos_dict",
".",
"dict",
"(",
")",
")",
"all_used_vars",
"|=",
"set",
"(",
"pws",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"ValueError",
"(",
"f'Failed to parse parameter group_with: {e}'",
")",
"if",
"'for_each'",
"in",
"statement",
"[",
"2",
"]",
":",
"try",
":",
"pws",
"=",
"get_names_of_param",
"(",
"'for_each'",
",",
"statement",
"[",
"2",
"]",
",",
"extra_dict",
"=",
"env",
".",
"sos_dict",
".",
"dict",
"(",
")",
")",
"for",
"pw",
"in",
"pws",
":",
"all_used_vars",
"|=",
"set",
"(",
"pw",
".",
"split",
"(",
"','",
")",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"ValueError",
"(",
"f'Failed to parse parameter for_each: {e}'",
")",
"if",
"section",
".",
"task",
":",
"all_used_vars",
"|=",
"accessed_vars",
"(",
"section",
".",
"task",
")",
"# now we have a list of global variables that are actually used in the functions",
"# this is specifically designed to handle the last case in #1225",
"func_with_vars",
"=",
"[",
"y",
"for",
"x",
",",
"y",
"in",
"used_in_func",
"(",
"section",
".",
"global_stmts",
")",
".",
"items",
"(",
")",
"if",
"x",
"in",
"all_used_vars",
"]",
"return",
"set",
".",
"union",
"(",
"all_used_vars",
",",
"*",
"func_with_vars",
")"
] |
Get variables which are variables used by input statement and statements before it
|
[
"Get",
"variables",
"which",
"are",
"variables",
"used",
"by",
"input",
"statement",
"and",
"statements",
"before",
"it"
] |
6b60ed0770916d135e17322e469520d778e9d4e7
|
https://github.com/vatlab/SoS/blob/6b60ed0770916d135e17322e469520d778e9d4e7/src/sos/section_analyzer.py#L185-L236
|
9,540
|
vatlab/SoS
|
src/sos/section_analyzer.py
|
get_signature_vars
|
def get_signature_vars(section):
'''Get signature variables which are variables that will be
saved with step signatures'''
# signature vars should contain parameters defined in global section
# #1155
signature_vars = set(
section.parameters.keys()
& accessed_vars(strip_param_defs(section.global_stmts)))
input_idx = find_statement(section, 'input')
after_input_idx = 0 if input_idx is None else input_idx + 1
for statement in section.statements[after_input_idx:]:
if statement[0] == '=':
signature_vars |= accessed_vars('='.join(statement[1:3]))
elif statement[0] == '!':
signature_vars |= accessed_vars(statement[1])
# finally, tasks..
if section.task:
signature_vars |= accessed_vars(section.task)
return {x for x in signature_vars if not x.startswith('__')}
|
python
|
def get_signature_vars(section):
'''Get signature variables which are variables that will be
saved with step signatures'''
# signature vars should contain parameters defined in global section
# #1155
signature_vars = set(
section.parameters.keys()
& accessed_vars(strip_param_defs(section.global_stmts)))
input_idx = find_statement(section, 'input')
after_input_idx = 0 if input_idx is None else input_idx + 1
for statement in section.statements[after_input_idx:]:
if statement[0] == '=':
signature_vars |= accessed_vars('='.join(statement[1:3]))
elif statement[0] == '!':
signature_vars |= accessed_vars(statement[1])
# finally, tasks..
if section.task:
signature_vars |= accessed_vars(section.task)
return {x for x in signature_vars if not x.startswith('__')}
|
[
"def",
"get_signature_vars",
"(",
"section",
")",
":",
"# signature vars should contain parameters defined in global section",
"# #1155",
"signature_vars",
"=",
"set",
"(",
"section",
".",
"parameters",
".",
"keys",
"(",
")",
"&",
"accessed_vars",
"(",
"strip_param_defs",
"(",
"section",
".",
"global_stmts",
")",
")",
")",
"input_idx",
"=",
"find_statement",
"(",
"section",
",",
"'input'",
")",
"after_input_idx",
"=",
"0",
"if",
"input_idx",
"is",
"None",
"else",
"input_idx",
"+",
"1",
"for",
"statement",
"in",
"section",
".",
"statements",
"[",
"after_input_idx",
":",
"]",
":",
"if",
"statement",
"[",
"0",
"]",
"==",
"'='",
":",
"signature_vars",
"|=",
"accessed_vars",
"(",
"'='",
".",
"join",
"(",
"statement",
"[",
"1",
":",
"3",
"]",
")",
")",
"elif",
"statement",
"[",
"0",
"]",
"==",
"'!'",
":",
"signature_vars",
"|=",
"accessed_vars",
"(",
"statement",
"[",
"1",
"]",
")",
"# finally, tasks..",
"if",
"section",
".",
"task",
":",
"signature_vars",
"|=",
"accessed_vars",
"(",
"section",
".",
"task",
")",
"return",
"{",
"x",
"for",
"x",
"in",
"signature_vars",
"if",
"not",
"x",
".",
"startswith",
"(",
"'__'",
")",
"}"
] |
Get signature variables which are variables that will be
saved with step signatures
|
[
"Get",
"signature",
"variables",
"which",
"are",
"variables",
"that",
"will",
"be",
"saved",
"with",
"step",
"signatures"
] |
6b60ed0770916d135e17322e469520d778e9d4e7
|
https://github.com/vatlab/SoS/blob/6b60ed0770916d135e17322e469520d778e9d4e7/src/sos/section_analyzer.py#L239-L261
|
9,541
|
vatlab/SoS
|
src/sos/section_analyzer.py
|
get_step_input
|
def get_step_input(section, default_input):
'''Find step input
'''
step_input: sos_targets = sos_targets()
dynamic_input = True
# look for input statement.
input_idx = find_statement(section, 'input')
if input_idx is None:
return step_input, dynamic_input
# input statement
stmt = section.statements[input_idx][2]
try:
svars = ['output_from', 'named_output', 'sos_step', 'sos_variable']
old_values = {
x: env.sos_dict.dict()[x]
for x in svars
if x in env.sos_dict.dict()
}
env.sos_dict.quick_update({
'output_from': lambda *args, **kwargs: None,
'named_output': lambda *args, **kwargs: None,
'traced': lambda *args, **kwargs: sos_targets(*args, **kwargs),
'sos_step': no_sos_step,
'sos_variable': no_sos_variable,
})
args, kwargs = SoS_eval(
f'__null_func__({stmt})', extra_dict=env.sos_dict.dict())
if not args:
if default_input is None:
step_input = sos_targets()
else:
step_input = default_input
elif not any(isinstance(x, (dynamic, remote)) for x in args):
step_input = sos_targets(*args)
except SyntaxError:
raise
except Exception as e:
# if anything is not evalutable, keep Undetermined
env.logger.debug(
f'Input of step {section.name if section.index is None else f"{section.name}_{section.index}"} is set to Undertermined: {e}'
)
# expression ...
step_input = sos_targets(_undetermined=stmt)
finally:
[env.sos_dict.dict().pop(x) for x in svars]
env.sos_dict.quick_update(old_values)
return step_input, dynamic_input
|
python
|
def get_step_input(section, default_input):
'''Find step input
'''
step_input: sos_targets = sos_targets()
dynamic_input = True
# look for input statement.
input_idx = find_statement(section, 'input')
if input_idx is None:
return step_input, dynamic_input
# input statement
stmt = section.statements[input_idx][2]
try:
svars = ['output_from', 'named_output', 'sos_step', 'sos_variable']
old_values = {
x: env.sos_dict.dict()[x]
for x in svars
if x in env.sos_dict.dict()
}
env.sos_dict.quick_update({
'output_from': lambda *args, **kwargs: None,
'named_output': lambda *args, **kwargs: None,
'traced': lambda *args, **kwargs: sos_targets(*args, **kwargs),
'sos_step': no_sos_step,
'sos_variable': no_sos_variable,
})
args, kwargs = SoS_eval(
f'__null_func__({stmt})', extra_dict=env.sos_dict.dict())
if not args:
if default_input is None:
step_input = sos_targets()
else:
step_input = default_input
elif not any(isinstance(x, (dynamic, remote)) for x in args):
step_input = sos_targets(*args)
except SyntaxError:
raise
except Exception as e:
# if anything is not evalutable, keep Undetermined
env.logger.debug(
f'Input of step {section.name if section.index is None else f"{section.name}_{section.index}"} is set to Undertermined: {e}'
)
# expression ...
step_input = sos_targets(_undetermined=stmt)
finally:
[env.sos_dict.dict().pop(x) for x in svars]
env.sos_dict.quick_update(old_values)
return step_input, dynamic_input
|
[
"def",
"get_step_input",
"(",
"section",
",",
"default_input",
")",
":",
"step_input",
":",
"sos_targets",
"=",
"sos_targets",
"(",
")",
"dynamic_input",
"=",
"True",
"# look for input statement.",
"input_idx",
"=",
"find_statement",
"(",
"section",
",",
"'input'",
")",
"if",
"input_idx",
"is",
"None",
":",
"return",
"step_input",
",",
"dynamic_input",
"# input statement",
"stmt",
"=",
"section",
".",
"statements",
"[",
"input_idx",
"]",
"[",
"2",
"]",
"try",
":",
"svars",
"=",
"[",
"'output_from'",
",",
"'named_output'",
",",
"'sos_step'",
",",
"'sos_variable'",
"]",
"old_values",
"=",
"{",
"x",
":",
"env",
".",
"sos_dict",
".",
"dict",
"(",
")",
"[",
"x",
"]",
"for",
"x",
"in",
"svars",
"if",
"x",
"in",
"env",
".",
"sos_dict",
".",
"dict",
"(",
")",
"}",
"env",
".",
"sos_dict",
".",
"quick_update",
"(",
"{",
"'output_from'",
":",
"lambda",
"*",
"args",
",",
"*",
"*",
"kwargs",
":",
"None",
",",
"'named_output'",
":",
"lambda",
"*",
"args",
",",
"*",
"*",
"kwargs",
":",
"None",
",",
"'traced'",
":",
"lambda",
"*",
"args",
",",
"*",
"*",
"kwargs",
":",
"sos_targets",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
",",
"'sos_step'",
":",
"no_sos_step",
",",
"'sos_variable'",
":",
"no_sos_variable",
",",
"}",
")",
"args",
",",
"kwargs",
"=",
"SoS_eval",
"(",
"f'__null_func__({stmt})'",
",",
"extra_dict",
"=",
"env",
".",
"sos_dict",
".",
"dict",
"(",
")",
")",
"if",
"not",
"args",
":",
"if",
"default_input",
"is",
"None",
":",
"step_input",
"=",
"sos_targets",
"(",
")",
"else",
":",
"step_input",
"=",
"default_input",
"elif",
"not",
"any",
"(",
"isinstance",
"(",
"x",
",",
"(",
"dynamic",
",",
"remote",
")",
")",
"for",
"x",
"in",
"args",
")",
":",
"step_input",
"=",
"sos_targets",
"(",
"*",
"args",
")",
"except",
"SyntaxError",
":",
"raise",
"except",
"Exception",
"as",
"e",
":",
"# if anything is not evalutable, keep Undetermined",
"env",
".",
"logger",
".",
"debug",
"(",
"f'Input of step {section.name if section.index is None else f\"{section.name}_{section.index}\"} is set to Undertermined: {e}'",
")",
"# expression ...",
"step_input",
"=",
"sos_targets",
"(",
"_undetermined",
"=",
"stmt",
")",
"finally",
":",
"[",
"env",
".",
"sos_dict",
".",
"dict",
"(",
")",
".",
"pop",
"(",
"x",
")",
"for",
"x",
"in",
"svars",
"]",
"env",
".",
"sos_dict",
".",
"quick_update",
"(",
"old_values",
")",
"return",
"step_input",
",",
"dynamic_input"
] |
Find step input
|
[
"Find",
"step",
"input"
] |
6b60ed0770916d135e17322e469520d778e9d4e7
|
https://github.com/vatlab/SoS/blob/6b60ed0770916d135e17322e469520d778e9d4e7/src/sos/section_analyzer.py#L343-L391
|
9,542
|
vatlab/SoS
|
src/sos/section_analyzer.py
|
get_step_output
|
def get_step_output(section, default_output):
'''determine step output'''
step_output: sos_targets = sos_targets()
#
if 'provides' in section.options and default_output:
step_output = default_output
# look for input statement.
output_idx = find_statement(section, 'output')
if output_idx is None:
return step_output
# output statement
value = section.statements[output_idx][2]
# output, depends, and process can be processed multiple times
try:
svars = ['output_from', 'named_output', 'sos_step', 'sos_variable']
old_values = {
x: env.sos_dict.dict()[x]
for x in svars
if x in env.sos_dict.dict()
}
env.sos_dict.quick_update({
'output_from': no_output_from,
'named_output': no_named_output,
'sos_step': no_sos_step,
'sos_variable': no_sos_variable,
})
args, kwargs = SoS_eval(
f'__null_func__({value})', extra_dict=env.sos_dict.dict())
if not any(isinstance(x, (dynamic, remote)) for x in args):
step_output = sos_targets(
*args, **{
x: y
for x, y in kwargs.items()
if x not in SOS_TARGETS_OPTIONS
})
except SyntaxError:
raise
except Exception as e:
if 'STEP' in env.config['SOS_DEBUG'] or 'ALL' in env.config['SOS_DEBUG']:
env.log_to_file('STEP', f"Args {value} cannot be determined: {e}")
finally:
[env.sos_dict.dict().pop(x) for x in svars]
env.sos_dict.quick_update(old_values)
if 'provides' in section.options and default_output is not None and step_output.valid(
):
for out in default_output:
# 981
if not isinstance(out, sos_step) and out not in step_output:
raise ValueError(
f'Defined output fail to produce expected output: {step_output} generated, {default_output} expected.'
)
return step_output
|
python
|
def get_step_output(section, default_output):
'''determine step output'''
step_output: sos_targets = sos_targets()
#
if 'provides' in section.options and default_output:
step_output = default_output
# look for input statement.
output_idx = find_statement(section, 'output')
if output_idx is None:
return step_output
# output statement
value = section.statements[output_idx][2]
# output, depends, and process can be processed multiple times
try:
svars = ['output_from', 'named_output', 'sos_step', 'sos_variable']
old_values = {
x: env.sos_dict.dict()[x]
for x in svars
if x in env.sos_dict.dict()
}
env.sos_dict.quick_update({
'output_from': no_output_from,
'named_output': no_named_output,
'sos_step': no_sos_step,
'sos_variable': no_sos_variable,
})
args, kwargs = SoS_eval(
f'__null_func__({value})', extra_dict=env.sos_dict.dict())
if not any(isinstance(x, (dynamic, remote)) for x in args):
step_output = sos_targets(
*args, **{
x: y
for x, y in kwargs.items()
if x not in SOS_TARGETS_OPTIONS
})
except SyntaxError:
raise
except Exception as e:
if 'STEP' in env.config['SOS_DEBUG'] or 'ALL' in env.config['SOS_DEBUG']:
env.log_to_file('STEP', f"Args {value} cannot be determined: {e}")
finally:
[env.sos_dict.dict().pop(x) for x in svars]
env.sos_dict.quick_update(old_values)
if 'provides' in section.options and default_output is not None and step_output.valid(
):
for out in default_output:
# 981
if not isinstance(out, sos_step) and out not in step_output:
raise ValueError(
f'Defined output fail to produce expected output: {step_output} generated, {default_output} expected.'
)
return step_output
|
[
"def",
"get_step_output",
"(",
"section",
",",
"default_output",
")",
":",
"step_output",
":",
"sos_targets",
"=",
"sos_targets",
"(",
")",
"#",
"if",
"'provides'",
"in",
"section",
".",
"options",
"and",
"default_output",
":",
"step_output",
"=",
"default_output",
"# look for input statement.",
"output_idx",
"=",
"find_statement",
"(",
"section",
",",
"'output'",
")",
"if",
"output_idx",
"is",
"None",
":",
"return",
"step_output",
"# output statement",
"value",
"=",
"section",
".",
"statements",
"[",
"output_idx",
"]",
"[",
"2",
"]",
"# output, depends, and process can be processed multiple times",
"try",
":",
"svars",
"=",
"[",
"'output_from'",
",",
"'named_output'",
",",
"'sos_step'",
",",
"'sos_variable'",
"]",
"old_values",
"=",
"{",
"x",
":",
"env",
".",
"sos_dict",
".",
"dict",
"(",
")",
"[",
"x",
"]",
"for",
"x",
"in",
"svars",
"if",
"x",
"in",
"env",
".",
"sos_dict",
".",
"dict",
"(",
")",
"}",
"env",
".",
"sos_dict",
".",
"quick_update",
"(",
"{",
"'output_from'",
":",
"no_output_from",
",",
"'named_output'",
":",
"no_named_output",
",",
"'sos_step'",
":",
"no_sos_step",
",",
"'sos_variable'",
":",
"no_sos_variable",
",",
"}",
")",
"args",
",",
"kwargs",
"=",
"SoS_eval",
"(",
"f'__null_func__({value})'",
",",
"extra_dict",
"=",
"env",
".",
"sos_dict",
".",
"dict",
"(",
")",
")",
"if",
"not",
"any",
"(",
"isinstance",
"(",
"x",
",",
"(",
"dynamic",
",",
"remote",
")",
")",
"for",
"x",
"in",
"args",
")",
":",
"step_output",
"=",
"sos_targets",
"(",
"*",
"args",
",",
"*",
"*",
"{",
"x",
":",
"y",
"for",
"x",
",",
"y",
"in",
"kwargs",
".",
"items",
"(",
")",
"if",
"x",
"not",
"in",
"SOS_TARGETS_OPTIONS",
"}",
")",
"except",
"SyntaxError",
":",
"raise",
"except",
"Exception",
"as",
"e",
":",
"if",
"'STEP'",
"in",
"env",
".",
"config",
"[",
"'SOS_DEBUG'",
"]",
"or",
"'ALL'",
"in",
"env",
".",
"config",
"[",
"'SOS_DEBUG'",
"]",
":",
"env",
".",
"log_to_file",
"(",
"'STEP'",
",",
"f\"Args {value} cannot be determined: {e}\"",
")",
"finally",
":",
"[",
"env",
".",
"sos_dict",
".",
"dict",
"(",
")",
".",
"pop",
"(",
"x",
")",
"for",
"x",
"in",
"svars",
"]",
"env",
".",
"sos_dict",
".",
"quick_update",
"(",
"old_values",
")",
"if",
"'provides'",
"in",
"section",
".",
"options",
"and",
"default_output",
"is",
"not",
"None",
"and",
"step_output",
".",
"valid",
"(",
")",
":",
"for",
"out",
"in",
"default_output",
":",
"# 981",
"if",
"not",
"isinstance",
"(",
"out",
",",
"sos_step",
")",
"and",
"out",
"not",
"in",
"step_output",
":",
"raise",
"ValueError",
"(",
"f'Defined output fail to produce expected output: {step_output} generated, {default_output} expected.'",
")",
"return",
"step_output"
] |
determine step output
|
[
"determine",
"step",
"output"
] |
6b60ed0770916d135e17322e469520d778e9d4e7
|
https://github.com/vatlab/SoS/blob/6b60ed0770916d135e17322e469520d778e9d4e7/src/sos/section_analyzer.py#L394-L448
|
9,543
|
vatlab/SoS
|
src/sos/section_analyzer.py
|
analyze_section
|
def analyze_section(section: SoS_Step,
default_input: Optional[sos_targets] = None,
default_output: Optional[sos_targets] = None,
context={},
vars_and_output_only: bool = False) -> Dict[str, Any]:
'''Analyze a section for how it uses input and output, what variables
it uses, and input, output, etc.'''
# analysis_key = (section.md5, section.step_name(),
# default_input.target_name() if hasattr(default_input, 'target_name') else '',
# default_output.target_name() if hasattr(default_output, 'target_name') else '', vars_and_output_only)
#if analysis_key in analysis_cache:
# return analysis_cache[analysis_key]
# use a fresh env for analysis
new_env, old_env = env.request_new()
try:
prepare_env(section.global_def, section.global_vars, context)
env.sos_dict.set('step_name', section.step_name())
env.sos_dict.set('__null_func__', __null_func__)
if 'STEP' in env.config['SOS_DEBUG'] or 'ALL' in env.config['SOS_DEBUG']:
env.log_to_file(
'STEP',
f'Analyzing {section.step_name()} {"(output only)" if vars_and_output_only else ""}'
)
res = {
'step_name': section.step_name(),
'step_output': get_step_output(section, default_output),
# variables starting with __ are internals...
'environ_vars': get_environ_vars(section),
'signature_vars': get_signature_vars(section),
'changed_vars': get_changed_vars(section)
}
if not vars_and_output_only:
inps = get_step_input(section, default_input)
res['step_input'] = inps[0]
res['dynamic_input'] = inps[1]
deps = get_step_depends(section)
res['step_depends'] = deps[0]
res['dynamic_depends'] = deps[1]
# analysis_cache[analysis_key] = res
finally:
# restore env
env.restore_to_old(new_env, old_env)
# #1225
# The global section can contain a lot of variables, some of which can be large. Here we
# found all variables that will be used in the step, including ones used in substep (signature_vars)
# and ones that will be used in input statement etc.
section.global_vars = {
x: y
for x, y in section.global_vars.items()
if x in get_all_used_vars(section)
}
return res
|
python
|
def analyze_section(section: SoS_Step,
default_input: Optional[sos_targets] = None,
default_output: Optional[sos_targets] = None,
context={},
vars_and_output_only: bool = False) -> Dict[str, Any]:
'''Analyze a section for how it uses input and output, what variables
it uses, and input, output, etc.'''
# analysis_key = (section.md5, section.step_name(),
# default_input.target_name() if hasattr(default_input, 'target_name') else '',
# default_output.target_name() if hasattr(default_output, 'target_name') else '', vars_and_output_only)
#if analysis_key in analysis_cache:
# return analysis_cache[analysis_key]
# use a fresh env for analysis
new_env, old_env = env.request_new()
try:
prepare_env(section.global_def, section.global_vars, context)
env.sos_dict.set('step_name', section.step_name())
env.sos_dict.set('__null_func__', __null_func__)
if 'STEP' in env.config['SOS_DEBUG'] or 'ALL' in env.config['SOS_DEBUG']:
env.log_to_file(
'STEP',
f'Analyzing {section.step_name()} {"(output only)" if vars_and_output_only else ""}'
)
res = {
'step_name': section.step_name(),
'step_output': get_step_output(section, default_output),
# variables starting with __ are internals...
'environ_vars': get_environ_vars(section),
'signature_vars': get_signature_vars(section),
'changed_vars': get_changed_vars(section)
}
if not vars_and_output_only:
inps = get_step_input(section, default_input)
res['step_input'] = inps[0]
res['dynamic_input'] = inps[1]
deps = get_step_depends(section)
res['step_depends'] = deps[0]
res['dynamic_depends'] = deps[1]
# analysis_cache[analysis_key] = res
finally:
# restore env
env.restore_to_old(new_env, old_env)
# #1225
# The global section can contain a lot of variables, some of which can be large. Here we
# found all variables that will be used in the step, including ones used in substep (signature_vars)
# and ones that will be used in input statement etc.
section.global_vars = {
x: y
for x, y in section.global_vars.items()
if x in get_all_used_vars(section)
}
return res
|
[
"def",
"analyze_section",
"(",
"section",
":",
"SoS_Step",
",",
"default_input",
":",
"Optional",
"[",
"sos_targets",
"]",
"=",
"None",
",",
"default_output",
":",
"Optional",
"[",
"sos_targets",
"]",
"=",
"None",
",",
"context",
"=",
"{",
"}",
",",
"vars_and_output_only",
":",
"bool",
"=",
"False",
")",
"->",
"Dict",
"[",
"str",
",",
"Any",
"]",
":",
"# analysis_key = (section.md5, section.step_name(),",
"# default_input.target_name() if hasattr(default_input, 'target_name') else '',",
"# default_output.target_name() if hasattr(default_output, 'target_name') else '', vars_and_output_only)",
"#if analysis_key in analysis_cache:",
"# return analysis_cache[analysis_key]",
"# use a fresh env for analysis",
"new_env",
",",
"old_env",
"=",
"env",
".",
"request_new",
"(",
")",
"try",
":",
"prepare_env",
"(",
"section",
".",
"global_def",
",",
"section",
".",
"global_vars",
",",
"context",
")",
"env",
".",
"sos_dict",
".",
"set",
"(",
"'step_name'",
",",
"section",
".",
"step_name",
"(",
")",
")",
"env",
".",
"sos_dict",
".",
"set",
"(",
"'__null_func__'",
",",
"__null_func__",
")",
"if",
"'STEP'",
"in",
"env",
".",
"config",
"[",
"'SOS_DEBUG'",
"]",
"or",
"'ALL'",
"in",
"env",
".",
"config",
"[",
"'SOS_DEBUG'",
"]",
":",
"env",
".",
"log_to_file",
"(",
"'STEP'",
",",
"f'Analyzing {section.step_name()} {\"(output only)\" if vars_and_output_only else \"\"}'",
")",
"res",
"=",
"{",
"'step_name'",
":",
"section",
".",
"step_name",
"(",
")",
",",
"'step_output'",
":",
"get_step_output",
"(",
"section",
",",
"default_output",
")",
",",
"# variables starting with __ are internals...",
"'environ_vars'",
":",
"get_environ_vars",
"(",
"section",
")",
",",
"'signature_vars'",
":",
"get_signature_vars",
"(",
"section",
")",
",",
"'changed_vars'",
":",
"get_changed_vars",
"(",
"section",
")",
"}",
"if",
"not",
"vars_and_output_only",
":",
"inps",
"=",
"get_step_input",
"(",
"section",
",",
"default_input",
")",
"res",
"[",
"'step_input'",
"]",
"=",
"inps",
"[",
"0",
"]",
"res",
"[",
"'dynamic_input'",
"]",
"=",
"inps",
"[",
"1",
"]",
"deps",
"=",
"get_step_depends",
"(",
"section",
")",
"res",
"[",
"'step_depends'",
"]",
"=",
"deps",
"[",
"0",
"]",
"res",
"[",
"'dynamic_depends'",
"]",
"=",
"deps",
"[",
"1",
"]",
"# analysis_cache[analysis_key] = res",
"finally",
":",
"# restore env",
"env",
".",
"restore_to_old",
"(",
"new_env",
",",
"old_env",
")",
"# #1225",
"# The global section can contain a lot of variables, some of which can be large. Here we",
"# found all variables that will be used in the step, including ones used in substep (signature_vars)",
"# and ones that will be used in input statement etc.",
"section",
".",
"global_vars",
"=",
"{",
"x",
":",
"y",
"for",
"x",
",",
"y",
"in",
"section",
".",
"global_vars",
".",
"items",
"(",
")",
"if",
"x",
"in",
"get_all_used_vars",
"(",
"section",
")",
"}",
"return",
"res"
] |
Analyze a section for how it uses input and output, what variables
it uses, and input, output, etc.
|
[
"Analyze",
"a",
"section",
"for",
"how",
"it",
"uses",
"input",
"and",
"output",
"what",
"variables",
"it",
"uses",
"and",
"input",
"output",
"etc",
"."
] |
6b60ed0770916d135e17322e469520d778e9d4e7
|
https://github.com/vatlab/SoS/blob/6b60ed0770916d135e17322e469520d778e9d4e7/src/sos/section_analyzer.py#L514-L569
|
9,544
|
vatlab/SoS
|
src/sos/converter.py
|
extract_workflow
|
def extract_workflow(notebook):
'''Extract workflow from a notebook file or notebook JSON instance'''
if isinstance(notebook, str):
nb = nbformat.read(notebook, nbformat.NO_CONVERT)
else:
nb = notebook
cells = nb.cells
content = '#!/usr/bin/env sos-runner\n#fileformat=SOS1.0\n\n'
for cell in cells:
if cell.cell_type != "code":
continue
# Non-sos code cells are also ignored
if 'kernel' in cell.metadata and cell.metadata['kernel'] not in ('sos',
'SoS',
None):
continue
lines = cell.source.split('\n')
valid_cell = False
for idx, line in enumerate(lines):
if valid_cell or (line.startswith('%include') or
line.startswith('%from')):
content += line + '\n'
elif SOS_SECTION_HEADER.match(line):
valid_cell = True
# look retrospectively for comments
c = idx - 1
comment = ''
while c >= 0 and lines[c].startswith('#'):
comment = lines[c] + '\n' + comment
c -= 1
content += comment + line + '\n'
if valid_cell:
content += '\n'
return content
|
python
|
def extract_workflow(notebook):
'''Extract workflow from a notebook file or notebook JSON instance'''
if isinstance(notebook, str):
nb = nbformat.read(notebook, nbformat.NO_CONVERT)
else:
nb = notebook
cells = nb.cells
content = '#!/usr/bin/env sos-runner\n#fileformat=SOS1.0\n\n'
for cell in cells:
if cell.cell_type != "code":
continue
# Non-sos code cells are also ignored
if 'kernel' in cell.metadata and cell.metadata['kernel'] not in ('sos',
'SoS',
None):
continue
lines = cell.source.split('\n')
valid_cell = False
for idx, line in enumerate(lines):
if valid_cell or (line.startswith('%include') or
line.startswith('%from')):
content += line + '\n'
elif SOS_SECTION_HEADER.match(line):
valid_cell = True
# look retrospectively for comments
c = idx - 1
comment = ''
while c >= 0 and lines[c].startswith('#'):
comment = lines[c] + '\n' + comment
c -= 1
content += comment + line + '\n'
if valid_cell:
content += '\n'
return content
|
[
"def",
"extract_workflow",
"(",
"notebook",
")",
":",
"if",
"isinstance",
"(",
"notebook",
",",
"str",
")",
":",
"nb",
"=",
"nbformat",
".",
"read",
"(",
"notebook",
",",
"nbformat",
".",
"NO_CONVERT",
")",
"else",
":",
"nb",
"=",
"notebook",
"cells",
"=",
"nb",
".",
"cells",
"content",
"=",
"'#!/usr/bin/env sos-runner\\n#fileformat=SOS1.0\\n\\n'",
"for",
"cell",
"in",
"cells",
":",
"if",
"cell",
".",
"cell_type",
"!=",
"\"code\"",
":",
"continue",
"# Non-sos code cells are also ignored",
"if",
"'kernel'",
"in",
"cell",
".",
"metadata",
"and",
"cell",
".",
"metadata",
"[",
"'kernel'",
"]",
"not",
"in",
"(",
"'sos'",
",",
"'SoS'",
",",
"None",
")",
":",
"continue",
"lines",
"=",
"cell",
".",
"source",
".",
"split",
"(",
"'\\n'",
")",
"valid_cell",
"=",
"False",
"for",
"idx",
",",
"line",
"in",
"enumerate",
"(",
"lines",
")",
":",
"if",
"valid_cell",
"or",
"(",
"line",
".",
"startswith",
"(",
"'%include'",
")",
"or",
"line",
".",
"startswith",
"(",
"'%from'",
")",
")",
":",
"content",
"+=",
"line",
"+",
"'\\n'",
"elif",
"SOS_SECTION_HEADER",
".",
"match",
"(",
"line",
")",
":",
"valid_cell",
"=",
"True",
"# look retrospectively for comments",
"c",
"=",
"idx",
"-",
"1",
"comment",
"=",
"''",
"while",
"c",
">=",
"0",
"and",
"lines",
"[",
"c",
"]",
".",
"startswith",
"(",
"'#'",
")",
":",
"comment",
"=",
"lines",
"[",
"c",
"]",
"+",
"'\\n'",
"+",
"comment",
"c",
"-=",
"1",
"content",
"+=",
"comment",
"+",
"line",
"+",
"'\\n'",
"if",
"valid_cell",
":",
"content",
"+=",
"'\\n'",
"return",
"content"
] |
Extract workflow from a notebook file or notebook JSON instance
|
[
"Extract",
"workflow",
"from",
"a",
"notebook",
"file",
"or",
"notebook",
"JSON",
"instance"
] |
6b60ed0770916d135e17322e469520d778e9d4e7
|
https://github.com/vatlab/SoS/blob/6b60ed0770916d135e17322e469520d778e9d4e7/src/sos/converter.py#L166-L199
|
9,545
|
vatlab/SoS
|
misc/vim-ipython/vim_ipython.py
|
vim_ipython_is_open
|
def vim_ipython_is_open():
"""
Helper function to let us know if the vim-ipython shell is currently
visible
"""
for w in vim.windows:
if w.buffer.name is not None and w.buffer.name.endswith("vim-ipython"):
return True
return False
|
python
|
def vim_ipython_is_open():
"""
Helper function to let us know if the vim-ipython shell is currently
visible
"""
for w in vim.windows:
if w.buffer.name is not None and w.buffer.name.endswith("vim-ipython"):
return True
return False
|
[
"def",
"vim_ipython_is_open",
"(",
")",
":",
"for",
"w",
"in",
"vim",
".",
"windows",
":",
"if",
"w",
".",
"buffer",
".",
"name",
"is",
"not",
"None",
"and",
"w",
".",
"buffer",
".",
"name",
".",
"endswith",
"(",
"\"vim-ipython\"",
")",
":",
"return",
"True",
"return",
"False"
] |
Helper function to let us know if the vim-ipython shell is currently
visible
|
[
"Helper",
"function",
"to",
"let",
"us",
"know",
"if",
"the",
"vim",
"-",
"ipython",
"shell",
"is",
"currently",
"visible"
] |
6b60ed0770916d135e17322e469520d778e9d4e7
|
https://github.com/vatlab/SoS/blob/6b60ed0770916d135e17322e469520d778e9d4e7/misc/vim-ipython/vim_ipython.py#L345-L353
|
9,546
|
vatlab/SoS
|
misc/vim-ipython/vim_ipython.py
|
with_subchannel
|
def with_subchannel(f,*args):
"conditionally monitor subchannel"
def f_with_update(*args):
try:
f(*args)
if monitor_subchannel:
update_subchannel_msgs(force=True)
except AttributeError: #if kc is None
echo("not connected to IPython", 'Error')
return f_with_update
|
python
|
def with_subchannel(f,*args):
"conditionally monitor subchannel"
def f_with_update(*args):
try:
f(*args)
if monitor_subchannel:
update_subchannel_msgs(force=True)
except AttributeError: #if kc is None
echo("not connected to IPython", 'Error')
return f_with_update
|
[
"def",
"with_subchannel",
"(",
"f",
",",
"*",
"args",
")",
":",
"def",
"f_with_update",
"(",
"*",
"args",
")",
":",
"try",
":",
"f",
"(",
"*",
"args",
")",
"if",
"monitor_subchannel",
":",
"update_subchannel_msgs",
"(",
"force",
"=",
"True",
")",
"except",
"AttributeError",
":",
"#if kc is None",
"echo",
"(",
"\"not connected to IPython\"",
",",
"'Error'",
")",
"return",
"f_with_update"
] |
conditionally monitor subchannel
|
[
"conditionally",
"monitor",
"subchannel"
] |
6b60ed0770916d135e17322e469520d778e9d4e7
|
https://github.com/vatlab/SoS/blob/6b60ed0770916d135e17322e469520d778e9d4e7/misc/vim-ipython/vim_ipython.py#L570-L579
|
9,547
|
vatlab/SoS
|
misc/vim-ipython/vim_ipython.py
|
set_pid
|
def set_pid():
"""
Explicitly ask the ipython kernel for its pid
"""
global pid
lines = '\n'.join(['import os', '_pid = os.getpid()'])
try:
msg_id = send(lines, silent=True, user_variables=['_pid'])
except TypeError: # change in IPython 3.0+
msg_id = send(lines, silent=True, user_expressions={'_pid':'_pid'})
# wait to get message back from kernel
try:
child = get_child_msg(msg_id)
except Empty:
echo("no reply from IPython kernel")
return
try:
pid = int(child['content']['user_variables']['_pid'])
except TypeError: # change in IPython 1.0.dev moved this out
pid = int(child['content']['user_variables']['_pid']['data']['text/plain'])
except KeyError: # change in IPython 3.0+
pid = int(
child['content']['user_expressions']['_pid']['data']['text/plain'])
except KeyError: # change in IPython 1.0.dev moved this out
echo("Could not get PID information, kernel not running Python?")
return pid
|
python
|
def set_pid():
"""
Explicitly ask the ipython kernel for its pid
"""
global pid
lines = '\n'.join(['import os', '_pid = os.getpid()'])
try:
msg_id = send(lines, silent=True, user_variables=['_pid'])
except TypeError: # change in IPython 3.0+
msg_id = send(lines, silent=True, user_expressions={'_pid':'_pid'})
# wait to get message back from kernel
try:
child = get_child_msg(msg_id)
except Empty:
echo("no reply from IPython kernel")
return
try:
pid = int(child['content']['user_variables']['_pid'])
except TypeError: # change in IPython 1.0.dev moved this out
pid = int(child['content']['user_variables']['_pid']['data']['text/plain'])
except KeyError: # change in IPython 3.0+
pid = int(
child['content']['user_expressions']['_pid']['data']['text/plain'])
except KeyError: # change in IPython 1.0.dev moved this out
echo("Could not get PID information, kernel not running Python?")
return pid
|
[
"def",
"set_pid",
"(",
")",
":",
"global",
"pid",
"lines",
"=",
"'\\n'",
".",
"join",
"(",
"[",
"'import os'",
",",
"'_pid = os.getpid()'",
"]",
")",
"try",
":",
"msg_id",
"=",
"send",
"(",
"lines",
",",
"silent",
"=",
"True",
",",
"user_variables",
"=",
"[",
"'_pid'",
"]",
")",
"except",
"TypeError",
":",
"# change in IPython 3.0+",
"msg_id",
"=",
"send",
"(",
"lines",
",",
"silent",
"=",
"True",
",",
"user_expressions",
"=",
"{",
"'_pid'",
":",
"'_pid'",
"}",
")",
"# wait to get message back from kernel",
"try",
":",
"child",
"=",
"get_child_msg",
"(",
"msg_id",
")",
"except",
"Empty",
":",
"echo",
"(",
"\"no reply from IPython kernel\"",
")",
"return",
"try",
":",
"pid",
"=",
"int",
"(",
"child",
"[",
"'content'",
"]",
"[",
"'user_variables'",
"]",
"[",
"'_pid'",
"]",
")",
"except",
"TypeError",
":",
"# change in IPython 1.0.dev moved this out",
"pid",
"=",
"int",
"(",
"child",
"[",
"'content'",
"]",
"[",
"'user_variables'",
"]",
"[",
"'_pid'",
"]",
"[",
"'data'",
"]",
"[",
"'text/plain'",
"]",
")",
"except",
"KeyError",
":",
"# change in IPython 3.0+",
"pid",
"=",
"int",
"(",
"child",
"[",
"'content'",
"]",
"[",
"'user_expressions'",
"]",
"[",
"'_pid'",
"]",
"[",
"'data'",
"]",
"[",
"'text/plain'",
"]",
")",
"except",
"KeyError",
":",
"# change in IPython 1.0.dev moved this out",
"echo",
"(",
"\"Could not get PID information, kernel not running Python?\"",
")",
"return",
"pid"
] |
Explicitly ask the ipython kernel for its pid
|
[
"Explicitly",
"ask",
"the",
"ipython",
"kernel",
"for",
"its",
"pid"
] |
6b60ed0770916d135e17322e469520d778e9d4e7
|
https://github.com/vatlab/SoS/blob/6b60ed0770916d135e17322e469520d778e9d4e7/misc/vim-ipython/vim_ipython.py#L646-L673
|
9,548
|
BradRuderman/pyhs2
|
pyhs2/cursor.py
|
Cursor.fetchmany
|
def fetchmany(self,size=-1):
""" return a sequential set of records. This is guaranteed by locking,
so that no other thread can grab a few records while a set is fetched.
this has the side effect that other threads may have to wait for
an arbitrary long time for the completion of the current request.
"""
self._cursorLock.acquire()
# default value (or just checking that someone did not put a ridiculous size)
if size < 0 or size > self.MAX_BLOCK_SIZE:
size = self.arraysize
recs = []
for i in range(0,size):
recs.append(self.fetchone())
self._cursorLock.release()
return recs
|
python
|
def fetchmany(self,size=-1):
""" return a sequential set of records. This is guaranteed by locking,
so that no other thread can grab a few records while a set is fetched.
this has the side effect that other threads may have to wait for
an arbitrary long time for the completion of the current request.
"""
self._cursorLock.acquire()
# default value (or just checking that someone did not put a ridiculous size)
if size < 0 or size > self.MAX_BLOCK_SIZE:
size = self.arraysize
recs = []
for i in range(0,size):
recs.append(self.fetchone())
self._cursorLock.release()
return recs
|
[
"def",
"fetchmany",
"(",
"self",
",",
"size",
"=",
"-",
"1",
")",
":",
"self",
".",
"_cursorLock",
".",
"acquire",
"(",
")",
"# default value (or just checking that someone did not put a ridiculous size)",
"if",
"size",
"<",
"0",
"or",
"size",
">",
"self",
".",
"MAX_BLOCK_SIZE",
":",
"size",
"=",
"self",
".",
"arraysize",
"recs",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"size",
")",
":",
"recs",
".",
"append",
"(",
"self",
".",
"fetchone",
"(",
")",
")",
"self",
".",
"_cursorLock",
".",
"release",
"(",
")",
"return",
"recs"
] |
return a sequential set of records. This is guaranteed by locking,
so that no other thread can grab a few records while a set is fetched.
this has the side effect that other threads may have to wait for
an arbitrary long time for the completion of the current request.
|
[
"return",
"a",
"sequential",
"set",
"of",
"records",
".",
"This",
"is",
"guaranteed",
"by",
"locking",
"so",
"that",
"no",
"other",
"thread",
"can",
"grab",
"a",
"few",
"records",
"while",
"a",
"set",
"is",
"fetched",
".",
"this",
"has",
"the",
"side",
"effect",
"that",
"other",
"threads",
"may",
"have",
"to",
"wait",
"for",
"an",
"arbitrary",
"long",
"time",
"for",
"the",
"completion",
"of",
"the",
"current",
"request",
"."
] |
1094d4b3a1e9032ee17eeb41f3381bbbd95862c1
|
https://github.com/BradRuderman/pyhs2/blob/1094d4b3a1e9032ee17eeb41f3381bbbd95862c1/pyhs2/cursor.py#L143-L159
|
9,549
|
kashifrazzaqui/json-streamer
|
jsonstreamer/jsonstreamer.py
|
JSONStreamer.on_number
|
def on_number(self, ctx, value):
''' Since this is defined both integer and double callbacks are useless '''
value = int(value) if value.isdigit() else float(value)
top = self._stack[-1]
if top is JSONCompositeType.OBJECT:
self.fire(JSONStreamer.VALUE_EVENT, value)
elif top is JSONCompositeType.ARRAY:
self.fire(JSONStreamer.ELEMENT_EVENT, value)
else:
raise RuntimeError('Invalid json-streamer state')
|
python
|
def on_number(self, ctx, value):
''' Since this is defined both integer and double callbacks are useless '''
value = int(value) if value.isdigit() else float(value)
top = self._stack[-1]
if top is JSONCompositeType.OBJECT:
self.fire(JSONStreamer.VALUE_EVENT, value)
elif top is JSONCompositeType.ARRAY:
self.fire(JSONStreamer.ELEMENT_EVENT, value)
else:
raise RuntimeError('Invalid json-streamer state')
|
[
"def",
"on_number",
"(",
"self",
",",
"ctx",
",",
"value",
")",
":",
"value",
"=",
"int",
"(",
"value",
")",
"if",
"value",
".",
"isdigit",
"(",
")",
"else",
"float",
"(",
"value",
")",
"top",
"=",
"self",
".",
"_stack",
"[",
"-",
"1",
"]",
"if",
"top",
"is",
"JSONCompositeType",
".",
"OBJECT",
":",
"self",
".",
"fire",
"(",
"JSONStreamer",
".",
"VALUE_EVENT",
",",
"value",
")",
"elif",
"top",
"is",
"JSONCompositeType",
".",
"ARRAY",
":",
"self",
".",
"fire",
"(",
"JSONStreamer",
".",
"ELEMENT_EVENT",
",",
"value",
")",
"else",
":",
"raise",
"RuntimeError",
"(",
"'Invalid json-streamer state'",
")"
] |
Since this is defined both integer and double callbacks are useless
|
[
"Since",
"this",
"is",
"defined",
"both",
"integer",
"and",
"double",
"callbacks",
"are",
"useless"
] |
f87527d57557d11682c12727a1a4eeda9cca3c8f
|
https://github.com/kashifrazzaqui/json-streamer/blob/f87527d57557d11682c12727a1a4eeda9cca3c8f/jsonstreamer/jsonstreamer.py#L147-L156
|
9,550
|
kashifrazzaqui/json-streamer
|
jsonstreamer/jsonstreamer.py
|
JSONStreamer.close
|
def close(self):
"""Closes the streamer which causes a `DOC_END_EVENT` to be fired and frees up memory used by yajl"""
self.fire(JSONStreamer.DOC_END_EVENT)
self._stack = None
self._parser.close()
|
python
|
def close(self):
"""Closes the streamer which causes a `DOC_END_EVENT` to be fired and frees up memory used by yajl"""
self.fire(JSONStreamer.DOC_END_EVENT)
self._stack = None
self._parser.close()
|
[
"def",
"close",
"(",
"self",
")",
":",
"self",
".",
"fire",
"(",
"JSONStreamer",
".",
"DOC_END_EVENT",
")",
"self",
".",
"_stack",
"=",
"None",
"self",
".",
"_parser",
".",
"close",
"(",
")"
] |
Closes the streamer which causes a `DOC_END_EVENT` to be fired and frees up memory used by yajl
|
[
"Closes",
"the",
"streamer",
"which",
"causes",
"a",
"DOC_END_EVENT",
"to",
"be",
"fired",
"and",
"frees",
"up",
"memory",
"used",
"by",
"yajl"
] |
f87527d57557d11682c12727a1a4eeda9cca3c8f
|
https://github.com/kashifrazzaqui/json-streamer/blob/f87527d57557d11682c12727a1a4eeda9cca3c8f/jsonstreamer/jsonstreamer.py#L190-L194
|
9,551
|
paolodragone/pymzn
|
pymzn/mzn/aio/minizinc.py
|
minizinc
|
async def minizinc(
mzn, *dzn_files, args=None, data=None, include=None, stdlib_dir=None,
globals_dir=None, declare_enums=True, allow_multiple_assignments=False,
keep=False, output_vars=None, output_base=None, output_mode='dict',
solver=None, timeout=None, two_pass=None, pre_passes=None,
output_objective=False, non_unique=False, all_solutions=False,
num_solutions=None, free_search=False, parallel=None, seed=None,
rebase_arrays=True, keep_solutions=True, return_enums=False,
max_queue_size=0, **kwargs
):
"""Coroutine version of the ``pymzn.minizinc`` function.
Parameters
----------
max_queue_size : int
Maximum number of solutions in the queue between the solution parser and
the returned solution stream. When the queue is full, the solver
execution will halt untill an item of the queue is consumed. This option
is useful for memory management in cases where the solution stream gets
very large and the caller cannot consume solutions as fast as they are
produced. Use with care, if the full solution stream is not consumed
before the execution of the Python program ends it may result in the
solver becoming a zombie process. Default is ``0``, meaning an infinite
queue.
"""
mzn_file, dzn_files, data_file, data, keep, _output_mode, types = \
_minizinc_preliminaries(
mzn, *dzn_files, args=args, data=data, include=include,
stdlib_dir=stdlib_dir, globals_dir=globals_dir,
output_vars=output_vars, keep=keep, output_base=output_base,
output_mode=output_mode, declare_enums=declare_enums,
allow_multiple_assignments=allow_multiple_assignments
)
if not solver:
solver = config.get('solver', gecode)
solver_args = {**kwargs, **config.get('solver_args', {})}
proc = await solve(
solver, mzn_file, *dzn_files, data=data, include=include,
stdlib_dir=stdlib_dir, globals_dir=globals_dir,
output_mode=_output_mode, timeout=timeout, two_pass=two_pass,
pre_passes=pre_passes, output_objective=output_objective,
non_unique=non_unique, all_solutions=all_solutions,
num_solutions=num_solutions, free_search=free_search, parallel=parallel,
seed=seed, allow_multiple_assignments=allow_multiple_assignments,
**solver_args
)
if output_mode == 'raw':
solns = asyncio.Queue(maxsize=max_queue_size)
task = asyncio.create_task(_collect(proc, solns))
else:
parser = AsyncSolutionParser(
solver, output_mode=output_mode, rebase_arrays=rebase_arrays,
types=types, keep_solutions=keep_solutions,
return_enums=return_enums, max_queue_size=max_queue_size
)
solns = await parser.parse(proc)
task = parser.parse_task
if not keep:
task.add_done_callback(partial(_cleanup_cb, [mzn_file, data_file]))
return solns
|
python
|
async def minizinc(
mzn, *dzn_files, args=None, data=None, include=None, stdlib_dir=None,
globals_dir=None, declare_enums=True, allow_multiple_assignments=False,
keep=False, output_vars=None, output_base=None, output_mode='dict',
solver=None, timeout=None, two_pass=None, pre_passes=None,
output_objective=False, non_unique=False, all_solutions=False,
num_solutions=None, free_search=False, parallel=None, seed=None,
rebase_arrays=True, keep_solutions=True, return_enums=False,
max_queue_size=0, **kwargs
):
"""Coroutine version of the ``pymzn.minizinc`` function.
Parameters
----------
max_queue_size : int
Maximum number of solutions in the queue between the solution parser and
the returned solution stream. When the queue is full, the solver
execution will halt untill an item of the queue is consumed. This option
is useful for memory management in cases where the solution stream gets
very large and the caller cannot consume solutions as fast as they are
produced. Use with care, if the full solution stream is not consumed
before the execution of the Python program ends it may result in the
solver becoming a zombie process. Default is ``0``, meaning an infinite
queue.
"""
mzn_file, dzn_files, data_file, data, keep, _output_mode, types = \
_minizinc_preliminaries(
mzn, *dzn_files, args=args, data=data, include=include,
stdlib_dir=stdlib_dir, globals_dir=globals_dir,
output_vars=output_vars, keep=keep, output_base=output_base,
output_mode=output_mode, declare_enums=declare_enums,
allow_multiple_assignments=allow_multiple_assignments
)
if not solver:
solver = config.get('solver', gecode)
solver_args = {**kwargs, **config.get('solver_args', {})}
proc = await solve(
solver, mzn_file, *dzn_files, data=data, include=include,
stdlib_dir=stdlib_dir, globals_dir=globals_dir,
output_mode=_output_mode, timeout=timeout, two_pass=two_pass,
pre_passes=pre_passes, output_objective=output_objective,
non_unique=non_unique, all_solutions=all_solutions,
num_solutions=num_solutions, free_search=free_search, parallel=parallel,
seed=seed, allow_multiple_assignments=allow_multiple_assignments,
**solver_args
)
if output_mode == 'raw':
solns = asyncio.Queue(maxsize=max_queue_size)
task = asyncio.create_task(_collect(proc, solns))
else:
parser = AsyncSolutionParser(
solver, output_mode=output_mode, rebase_arrays=rebase_arrays,
types=types, keep_solutions=keep_solutions,
return_enums=return_enums, max_queue_size=max_queue_size
)
solns = await parser.parse(proc)
task = parser.parse_task
if not keep:
task.add_done_callback(partial(_cleanup_cb, [mzn_file, data_file]))
return solns
|
[
"async",
"def",
"minizinc",
"(",
"mzn",
",",
"*",
"dzn_files",
",",
"args",
"=",
"None",
",",
"data",
"=",
"None",
",",
"include",
"=",
"None",
",",
"stdlib_dir",
"=",
"None",
",",
"globals_dir",
"=",
"None",
",",
"declare_enums",
"=",
"True",
",",
"allow_multiple_assignments",
"=",
"False",
",",
"keep",
"=",
"False",
",",
"output_vars",
"=",
"None",
",",
"output_base",
"=",
"None",
",",
"output_mode",
"=",
"'dict'",
",",
"solver",
"=",
"None",
",",
"timeout",
"=",
"None",
",",
"two_pass",
"=",
"None",
",",
"pre_passes",
"=",
"None",
",",
"output_objective",
"=",
"False",
",",
"non_unique",
"=",
"False",
",",
"all_solutions",
"=",
"False",
",",
"num_solutions",
"=",
"None",
",",
"free_search",
"=",
"False",
",",
"parallel",
"=",
"None",
",",
"seed",
"=",
"None",
",",
"rebase_arrays",
"=",
"True",
",",
"keep_solutions",
"=",
"True",
",",
"return_enums",
"=",
"False",
",",
"max_queue_size",
"=",
"0",
",",
"*",
"*",
"kwargs",
")",
":",
"mzn_file",
",",
"dzn_files",
",",
"data_file",
",",
"data",
",",
"keep",
",",
"_output_mode",
",",
"types",
"=",
"_minizinc_preliminaries",
"(",
"mzn",
",",
"*",
"dzn_files",
",",
"args",
"=",
"args",
",",
"data",
"=",
"data",
",",
"include",
"=",
"include",
",",
"stdlib_dir",
"=",
"stdlib_dir",
",",
"globals_dir",
"=",
"globals_dir",
",",
"output_vars",
"=",
"output_vars",
",",
"keep",
"=",
"keep",
",",
"output_base",
"=",
"output_base",
",",
"output_mode",
"=",
"output_mode",
",",
"declare_enums",
"=",
"declare_enums",
",",
"allow_multiple_assignments",
"=",
"allow_multiple_assignments",
")",
"if",
"not",
"solver",
":",
"solver",
"=",
"config",
".",
"get",
"(",
"'solver'",
",",
"gecode",
")",
"solver_args",
"=",
"{",
"*",
"*",
"kwargs",
",",
"*",
"*",
"config",
".",
"get",
"(",
"'solver_args'",
",",
"{",
"}",
")",
"}",
"proc",
"=",
"await",
"solve",
"(",
"solver",
",",
"mzn_file",
",",
"*",
"dzn_files",
",",
"data",
"=",
"data",
",",
"include",
"=",
"include",
",",
"stdlib_dir",
"=",
"stdlib_dir",
",",
"globals_dir",
"=",
"globals_dir",
",",
"output_mode",
"=",
"_output_mode",
",",
"timeout",
"=",
"timeout",
",",
"two_pass",
"=",
"two_pass",
",",
"pre_passes",
"=",
"pre_passes",
",",
"output_objective",
"=",
"output_objective",
",",
"non_unique",
"=",
"non_unique",
",",
"all_solutions",
"=",
"all_solutions",
",",
"num_solutions",
"=",
"num_solutions",
",",
"free_search",
"=",
"free_search",
",",
"parallel",
"=",
"parallel",
",",
"seed",
"=",
"seed",
",",
"allow_multiple_assignments",
"=",
"allow_multiple_assignments",
",",
"*",
"*",
"solver_args",
")",
"if",
"output_mode",
"==",
"'raw'",
":",
"solns",
"=",
"asyncio",
".",
"Queue",
"(",
"maxsize",
"=",
"max_queue_size",
")",
"task",
"=",
"asyncio",
".",
"create_task",
"(",
"_collect",
"(",
"proc",
",",
"solns",
")",
")",
"else",
":",
"parser",
"=",
"AsyncSolutionParser",
"(",
"solver",
",",
"output_mode",
"=",
"output_mode",
",",
"rebase_arrays",
"=",
"rebase_arrays",
",",
"types",
"=",
"types",
",",
"keep_solutions",
"=",
"keep_solutions",
",",
"return_enums",
"=",
"return_enums",
",",
"max_queue_size",
"=",
"max_queue_size",
")",
"solns",
"=",
"await",
"parser",
".",
"parse",
"(",
"proc",
")",
"task",
"=",
"parser",
".",
"parse_task",
"if",
"not",
"keep",
":",
"task",
".",
"add_done_callback",
"(",
"partial",
"(",
"_cleanup_cb",
",",
"[",
"mzn_file",
",",
"data_file",
"]",
")",
")",
"return",
"solns"
] |
Coroutine version of the ``pymzn.minizinc`` function.
Parameters
----------
max_queue_size : int
Maximum number of solutions in the queue between the solution parser and
the returned solution stream. When the queue is full, the solver
execution will halt untill an item of the queue is consumed. This option
is useful for memory management in cases where the solution stream gets
very large and the caller cannot consume solutions as fast as they are
produced. Use with care, if the full solution stream is not consumed
before the execution of the Python program ends it may result in the
solver becoming a zombie process. Default is ``0``, meaning an infinite
queue.
|
[
"Coroutine",
"version",
"of",
"the",
"pymzn",
".",
"minizinc",
"function",
"."
] |
35b04cfb244918551649b9bb8a0ab65d37c31fe4
|
https://github.com/paolodragone/pymzn/blob/35b04cfb244918551649b9bb8a0ab65d37c31fe4/pymzn/mzn/aio/minizinc.py#L35-L101
|
9,552
|
paolodragone/pymzn
|
pymzn/dzn/parse.py
|
parse_value
|
def parse_value(val, var_type=None, enums=None, rebase_arrays=True):
"""Parses the value of a dzn statement.
Parameters
----------
val : str
A value in dzn format.
var_type : dict
The dictionary of variable type as returned by the command ``minizinc
--model-types-only``. Default is ``None``, in which case the type of the
variable is inferred from its value. To parse an enum value as a Python
enum type its ``var_type`` is required, otherwise the value is simply
returned as a string.
enums : dict of IntEnum
A dictionary containing Python enums, with their respective names as
keys. These enums are available to the parser to convert enum values
into corresponding values of their respective enum types. Enum values
can only be parsed if also the ``var_type`` of the variable is
available.
rebase_arrays : bool
If the parsed value is an array and ``rebase_arrays`` is ``True``,
return it as zero-based lists. If ``rebase_arrays`` is ``False``,
instead, return it as a dictionary, preserving the original index-set.
Returns
-------
object
The parsed object. The type of the object depends on the dzn value.
"""
if not var_type:
p_val = _parse_array(
val, rebase_arrays=rebase_arrays, enums=enums, raise_errors=False
)
if p_val is not None:
return p_val
return _parse_val(val, enums=enums)
if 'dims' in var_type:
return _parse_array(
val, rebase_arrays=rebase_arrays, var_type=var_type, enums=enums
)
return _parse_val(val, var_type=var_type, enums=enums)
|
python
|
def parse_value(val, var_type=None, enums=None, rebase_arrays=True):
"""Parses the value of a dzn statement.
Parameters
----------
val : str
A value in dzn format.
var_type : dict
The dictionary of variable type as returned by the command ``minizinc
--model-types-only``. Default is ``None``, in which case the type of the
variable is inferred from its value. To parse an enum value as a Python
enum type its ``var_type`` is required, otherwise the value is simply
returned as a string.
enums : dict of IntEnum
A dictionary containing Python enums, with their respective names as
keys. These enums are available to the parser to convert enum values
into corresponding values of their respective enum types. Enum values
can only be parsed if also the ``var_type`` of the variable is
available.
rebase_arrays : bool
If the parsed value is an array and ``rebase_arrays`` is ``True``,
return it as zero-based lists. If ``rebase_arrays`` is ``False``,
instead, return it as a dictionary, preserving the original index-set.
Returns
-------
object
The parsed object. The type of the object depends on the dzn value.
"""
if not var_type:
p_val = _parse_array(
val, rebase_arrays=rebase_arrays, enums=enums, raise_errors=False
)
if p_val is not None:
return p_val
return _parse_val(val, enums=enums)
if 'dims' in var_type:
return _parse_array(
val, rebase_arrays=rebase_arrays, var_type=var_type, enums=enums
)
return _parse_val(val, var_type=var_type, enums=enums)
|
[
"def",
"parse_value",
"(",
"val",
",",
"var_type",
"=",
"None",
",",
"enums",
"=",
"None",
",",
"rebase_arrays",
"=",
"True",
")",
":",
"if",
"not",
"var_type",
":",
"p_val",
"=",
"_parse_array",
"(",
"val",
",",
"rebase_arrays",
"=",
"rebase_arrays",
",",
"enums",
"=",
"enums",
",",
"raise_errors",
"=",
"False",
")",
"if",
"p_val",
"is",
"not",
"None",
":",
"return",
"p_val",
"return",
"_parse_val",
"(",
"val",
",",
"enums",
"=",
"enums",
")",
"if",
"'dims'",
"in",
"var_type",
":",
"return",
"_parse_array",
"(",
"val",
",",
"rebase_arrays",
"=",
"rebase_arrays",
",",
"var_type",
"=",
"var_type",
",",
"enums",
"=",
"enums",
")",
"return",
"_parse_val",
"(",
"val",
",",
"var_type",
"=",
"var_type",
",",
"enums",
"=",
"enums",
")"
] |
Parses the value of a dzn statement.
Parameters
----------
val : str
A value in dzn format.
var_type : dict
The dictionary of variable type as returned by the command ``minizinc
--model-types-only``. Default is ``None``, in which case the type of the
variable is inferred from its value. To parse an enum value as a Python
enum type its ``var_type`` is required, otherwise the value is simply
returned as a string.
enums : dict of IntEnum
A dictionary containing Python enums, with their respective names as
keys. These enums are available to the parser to convert enum values
into corresponding values of their respective enum types. Enum values
can only be parsed if also the ``var_type`` of the variable is
available.
rebase_arrays : bool
If the parsed value is an array and ``rebase_arrays`` is ``True``,
return it as zero-based lists. If ``rebase_arrays`` is ``False``,
instead, return it as a dictionary, preserving the original index-set.
Returns
-------
object
The parsed object. The type of the object depends on the dzn value.
|
[
"Parses",
"the",
"value",
"of",
"a",
"dzn",
"statement",
"."
] |
35b04cfb244918551649b9bb8a0ab65d37c31fe4
|
https://github.com/paolodragone/pymzn/blob/35b04cfb244918551649b9bb8a0ab65d37c31fe4/pymzn/dzn/parse.py#L395-L438
|
9,553
|
paolodragone/pymzn
|
pymzn/dzn/parse.py
|
dzn2dict
|
def dzn2dict(dzn, *, rebase_arrays=True, types=None, return_enums=False):
"""Parses a dzn string or file into a dictionary of variable assignments.
Parameters
----------
dzn : str
A dzn content string or a path to a dzn file.
rebase_arrays : bool
Whether to return arrays as zero-based lists or to return them as
dictionaries, preserving the original index-sets.
types : dict
Dictionary of variable types. Types can either be dictionaries, as
returned by the ``minizinc --model-types-only``, or strings containing a
type in dzn format. If the type is a string, it can either be the name
of an enum type or one of the following: ``bool``, ``int``, ``float``,
``enum``, ``set of <type>``, ``array[<index_sets>] of <type>``. The
default value for ``var_types`` is ``None``, in which case the type of
most dzn assignments will be inferred automatically from the value. Enum
values can only be parsed if their respective types are available.
return_enums : bool
Whether to return the parsed enum types included in the dzn content.
Returns
-------
dict
A dictionary containing the variable assignments parsed from the
input file or string.
"""
dzn_ext = os.path.splitext(dzn)[1]
if dzn_ext == '.dzn':
with open(dzn) as f:
dzn = f.read()
var_types = None
if types:
var_types = {}
for var, var_type in types.items():
if isinstance(var_type, str):
var_types[var] = _to_var_type(var, var_type)
elif isinstance(var_type, dict):
var_types[var] = var_type
else:
err = 'Type of variable {} must be a string or a dict.'
raise ValueError(err.format(var))
enum_types = None
if var_types:
enum_types = []
for var, var_type in var_types.items():
if 'enum_type' in var_type and var_type['enum_type'] == var:
enum_types.append(var)
var_list = []
dzn = _comm_p.sub('\n', dzn)
stmts = _stmt_p.findall(dzn)
for stmt in stmts:
var_m = _var_p.match(stmt)
if var_m:
var = var_m.group('var')
val = var_m.group('val')
var_list.append((var, val))
else:
raise ValueError(
'Unsupported parsing for statement:\n{}'.format(repr(stmt))
)
enums = None
if enum_types:
enums = {}
remaining = []
while len(var_list) > 0:
var, val = var_list.pop(0)
if var in enum_types:
enum = None
enum_m = _enum_p.match(val)
if enum_m:
vals = enum_m.group('vals').strip()
if vals:
enum_vals = _parse_enum_vals(vals.split(','))
enum = IntEnum(
var, {v: i + 1 for i, v in enumerate(enum_vals)}
)
if enum is None:
raise ValueError(
'Cannot parse enum type \'{} = {}\'.'.format(var, val)
)
enums[var] = enum
else:
remaining.append((var, val))
var_list = remaining
assign = {}
for var, val in var_list:
var_type = None
if var_types:
var_type = var_types.get(var, None)
assign[var] = parse_value(
val, var_type=var_type, enums=enums, rebase_arrays=rebase_arrays
)
if return_enums and enums:
assign.update(enums)
return assign
|
python
|
def dzn2dict(dzn, *, rebase_arrays=True, types=None, return_enums=False):
"""Parses a dzn string or file into a dictionary of variable assignments.
Parameters
----------
dzn : str
A dzn content string or a path to a dzn file.
rebase_arrays : bool
Whether to return arrays as zero-based lists or to return them as
dictionaries, preserving the original index-sets.
types : dict
Dictionary of variable types. Types can either be dictionaries, as
returned by the ``minizinc --model-types-only``, or strings containing a
type in dzn format. If the type is a string, it can either be the name
of an enum type or one of the following: ``bool``, ``int``, ``float``,
``enum``, ``set of <type>``, ``array[<index_sets>] of <type>``. The
default value for ``var_types`` is ``None``, in which case the type of
most dzn assignments will be inferred automatically from the value. Enum
values can only be parsed if their respective types are available.
return_enums : bool
Whether to return the parsed enum types included in the dzn content.
Returns
-------
dict
A dictionary containing the variable assignments parsed from the
input file or string.
"""
dzn_ext = os.path.splitext(dzn)[1]
if dzn_ext == '.dzn':
with open(dzn) as f:
dzn = f.read()
var_types = None
if types:
var_types = {}
for var, var_type in types.items():
if isinstance(var_type, str):
var_types[var] = _to_var_type(var, var_type)
elif isinstance(var_type, dict):
var_types[var] = var_type
else:
err = 'Type of variable {} must be a string or a dict.'
raise ValueError(err.format(var))
enum_types = None
if var_types:
enum_types = []
for var, var_type in var_types.items():
if 'enum_type' in var_type and var_type['enum_type'] == var:
enum_types.append(var)
var_list = []
dzn = _comm_p.sub('\n', dzn)
stmts = _stmt_p.findall(dzn)
for stmt in stmts:
var_m = _var_p.match(stmt)
if var_m:
var = var_m.group('var')
val = var_m.group('val')
var_list.append((var, val))
else:
raise ValueError(
'Unsupported parsing for statement:\n{}'.format(repr(stmt))
)
enums = None
if enum_types:
enums = {}
remaining = []
while len(var_list) > 0:
var, val = var_list.pop(0)
if var in enum_types:
enum = None
enum_m = _enum_p.match(val)
if enum_m:
vals = enum_m.group('vals').strip()
if vals:
enum_vals = _parse_enum_vals(vals.split(','))
enum = IntEnum(
var, {v: i + 1 for i, v in enumerate(enum_vals)}
)
if enum is None:
raise ValueError(
'Cannot parse enum type \'{} = {}\'.'.format(var, val)
)
enums[var] = enum
else:
remaining.append((var, val))
var_list = remaining
assign = {}
for var, val in var_list:
var_type = None
if var_types:
var_type = var_types.get(var, None)
assign[var] = parse_value(
val, var_type=var_type, enums=enums, rebase_arrays=rebase_arrays
)
if return_enums and enums:
assign.update(enums)
return assign
|
[
"def",
"dzn2dict",
"(",
"dzn",
",",
"*",
",",
"rebase_arrays",
"=",
"True",
",",
"types",
"=",
"None",
",",
"return_enums",
"=",
"False",
")",
":",
"dzn_ext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"dzn",
")",
"[",
"1",
"]",
"if",
"dzn_ext",
"==",
"'.dzn'",
":",
"with",
"open",
"(",
"dzn",
")",
"as",
"f",
":",
"dzn",
"=",
"f",
".",
"read",
"(",
")",
"var_types",
"=",
"None",
"if",
"types",
":",
"var_types",
"=",
"{",
"}",
"for",
"var",
",",
"var_type",
"in",
"types",
".",
"items",
"(",
")",
":",
"if",
"isinstance",
"(",
"var_type",
",",
"str",
")",
":",
"var_types",
"[",
"var",
"]",
"=",
"_to_var_type",
"(",
"var",
",",
"var_type",
")",
"elif",
"isinstance",
"(",
"var_type",
",",
"dict",
")",
":",
"var_types",
"[",
"var",
"]",
"=",
"var_type",
"else",
":",
"err",
"=",
"'Type of variable {} must be a string or a dict.'",
"raise",
"ValueError",
"(",
"err",
".",
"format",
"(",
"var",
")",
")",
"enum_types",
"=",
"None",
"if",
"var_types",
":",
"enum_types",
"=",
"[",
"]",
"for",
"var",
",",
"var_type",
"in",
"var_types",
".",
"items",
"(",
")",
":",
"if",
"'enum_type'",
"in",
"var_type",
"and",
"var_type",
"[",
"'enum_type'",
"]",
"==",
"var",
":",
"enum_types",
".",
"append",
"(",
"var",
")",
"var_list",
"=",
"[",
"]",
"dzn",
"=",
"_comm_p",
".",
"sub",
"(",
"'\\n'",
",",
"dzn",
")",
"stmts",
"=",
"_stmt_p",
".",
"findall",
"(",
"dzn",
")",
"for",
"stmt",
"in",
"stmts",
":",
"var_m",
"=",
"_var_p",
".",
"match",
"(",
"stmt",
")",
"if",
"var_m",
":",
"var",
"=",
"var_m",
".",
"group",
"(",
"'var'",
")",
"val",
"=",
"var_m",
".",
"group",
"(",
"'val'",
")",
"var_list",
".",
"append",
"(",
"(",
"var",
",",
"val",
")",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'Unsupported parsing for statement:\\n{}'",
".",
"format",
"(",
"repr",
"(",
"stmt",
")",
")",
")",
"enums",
"=",
"None",
"if",
"enum_types",
":",
"enums",
"=",
"{",
"}",
"remaining",
"=",
"[",
"]",
"while",
"len",
"(",
"var_list",
")",
">",
"0",
":",
"var",
",",
"val",
"=",
"var_list",
".",
"pop",
"(",
"0",
")",
"if",
"var",
"in",
"enum_types",
":",
"enum",
"=",
"None",
"enum_m",
"=",
"_enum_p",
".",
"match",
"(",
"val",
")",
"if",
"enum_m",
":",
"vals",
"=",
"enum_m",
".",
"group",
"(",
"'vals'",
")",
".",
"strip",
"(",
")",
"if",
"vals",
":",
"enum_vals",
"=",
"_parse_enum_vals",
"(",
"vals",
".",
"split",
"(",
"','",
")",
")",
"enum",
"=",
"IntEnum",
"(",
"var",
",",
"{",
"v",
":",
"i",
"+",
"1",
"for",
"i",
",",
"v",
"in",
"enumerate",
"(",
"enum_vals",
")",
"}",
")",
"if",
"enum",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"'Cannot parse enum type \\'{} = {}\\'.'",
".",
"format",
"(",
"var",
",",
"val",
")",
")",
"enums",
"[",
"var",
"]",
"=",
"enum",
"else",
":",
"remaining",
".",
"append",
"(",
"(",
"var",
",",
"val",
")",
")",
"var_list",
"=",
"remaining",
"assign",
"=",
"{",
"}",
"for",
"var",
",",
"val",
"in",
"var_list",
":",
"var_type",
"=",
"None",
"if",
"var_types",
":",
"var_type",
"=",
"var_types",
".",
"get",
"(",
"var",
",",
"None",
")",
"assign",
"[",
"var",
"]",
"=",
"parse_value",
"(",
"val",
",",
"var_type",
"=",
"var_type",
",",
"enums",
"=",
"enums",
",",
"rebase_arrays",
"=",
"rebase_arrays",
")",
"if",
"return_enums",
"and",
"enums",
":",
"assign",
".",
"update",
"(",
"enums",
")",
"return",
"assign"
] |
Parses a dzn string or file into a dictionary of variable assignments.
Parameters
----------
dzn : str
A dzn content string or a path to a dzn file.
rebase_arrays : bool
Whether to return arrays as zero-based lists or to return them as
dictionaries, preserving the original index-sets.
types : dict
Dictionary of variable types. Types can either be dictionaries, as
returned by the ``minizinc --model-types-only``, or strings containing a
type in dzn format. If the type is a string, it can either be the name
of an enum type or one of the following: ``bool``, ``int``, ``float``,
``enum``, ``set of <type>``, ``array[<index_sets>] of <type>``. The
default value for ``var_types`` is ``None``, in which case the type of
most dzn assignments will be inferred automatically from the value. Enum
values can only be parsed if their respective types are available.
return_enums : bool
Whether to return the parsed enum types included in the dzn content.
Returns
-------
dict
A dictionary containing the variable assignments parsed from the
input file or string.
|
[
"Parses",
"a",
"dzn",
"string",
"or",
"file",
"into",
"a",
"dictionary",
"of",
"variable",
"assignments",
"."
] |
35b04cfb244918551649b9bb8a0ab65d37c31fe4
|
https://github.com/paolodragone/pymzn/blob/35b04cfb244918551649b9bb8a0ab65d37c31fe4/pymzn/dzn/parse.py#L490-L593
|
9,554
|
paolodragone/pymzn
|
pymzn/mzn/solvers.py
|
Solver.args
|
def args(
self, all_solutions=False, num_solutions=None, free_search=False,
parallel=None, seed=None, **kwargs
):
"""Returns a list of command line arguments for the specified options.
If the solver parser is able to parse statistics, this function should
always add options to display statistics.
Parameters
----------
all_solutions : bool
Whether all the solutions must be returned (default is False).
num_solutions : int
The maximum number of solutions to be returned (only used in
satisfation problems).
free_search : bool
Whether the solver should be instructed to perform a free search.
parallel : int
The number of parallel threads the solver should use.
seed : int
The random number generator seed to pass to the solver.
"""
args = ['-s', '-v']
if all_solutions:
args.append('-a')
if num_solutions is not None:
args += ['-n', num_solutions]
if free_search:
args.append('-f')
if parallel is not None:
args += ['-p', parallel]
if seed is not None:
args += ['-r', seed]
return args
|
python
|
def args(
self, all_solutions=False, num_solutions=None, free_search=False,
parallel=None, seed=None, **kwargs
):
"""Returns a list of command line arguments for the specified options.
If the solver parser is able to parse statistics, this function should
always add options to display statistics.
Parameters
----------
all_solutions : bool
Whether all the solutions must be returned (default is False).
num_solutions : int
The maximum number of solutions to be returned (only used in
satisfation problems).
free_search : bool
Whether the solver should be instructed to perform a free search.
parallel : int
The number of parallel threads the solver should use.
seed : int
The random number generator seed to pass to the solver.
"""
args = ['-s', '-v']
if all_solutions:
args.append('-a')
if num_solutions is not None:
args += ['-n', num_solutions]
if free_search:
args.append('-f')
if parallel is not None:
args += ['-p', parallel]
if seed is not None:
args += ['-r', seed]
return args
|
[
"def",
"args",
"(",
"self",
",",
"all_solutions",
"=",
"False",
",",
"num_solutions",
"=",
"None",
",",
"free_search",
"=",
"False",
",",
"parallel",
"=",
"None",
",",
"seed",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"args",
"=",
"[",
"'-s'",
",",
"'-v'",
"]",
"if",
"all_solutions",
":",
"args",
".",
"append",
"(",
"'-a'",
")",
"if",
"num_solutions",
"is",
"not",
"None",
":",
"args",
"+=",
"[",
"'-n'",
",",
"num_solutions",
"]",
"if",
"free_search",
":",
"args",
".",
"append",
"(",
"'-f'",
")",
"if",
"parallel",
"is",
"not",
"None",
":",
"args",
"+=",
"[",
"'-p'",
",",
"parallel",
"]",
"if",
"seed",
"is",
"not",
"None",
":",
"args",
"+=",
"[",
"'-r'",
",",
"seed",
"]",
"return",
"args"
] |
Returns a list of command line arguments for the specified options.
If the solver parser is able to parse statistics, this function should
always add options to display statistics.
Parameters
----------
all_solutions : bool
Whether all the solutions must be returned (default is False).
num_solutions : int
The maximum number of solutions to be returned (only used in
satisfation problems).
free_search : bool
Whether the solver should be instructed to perform a free search.
parallel : int
The number of parallel threads the solver should use.
seed : int
The random number generator seed to pass to the solver.
|
[
"Returns",
"a",
"list",
"of",
"command",
"line",
"arguments",
"for",
"the",
"specified",
"options",
"."
] |
35b04cfb244918551649b9bb8a0ab65d37c31fe4
|
https://github.com/paolodragone/pymzn/blob/35b04cfb244918551649b9bb8a0ab65d37c31fe4/pymzn/mzn/solvers.py#L96-L130
|
9,555
|
paolodragone/pymzn
|
pymzn/log.py
|
debug
|
def debug(dbg=True):
"""Enables or disables debugging messages on the standard output."""
global _debug_handler
if dbg and _debug_handler is None:
_debug_handler = logging.StreamHandler()
logger.addHandler(_debug_handler)
logger.setLevel(logging.DEBUG)
elif not dbg and _debug_handler is not None:
logger.removeHandler(_debug_handler)
_debug_handler = None
logger.setLevel(logging.WARNING)
|
python
|
def debug(dbg=True):
"""Enables or disables debugging messages on the standard output."""
global _debug_handler
if dbg and _debug_handler is None:
_debug_handler = logging.StreamHandler()
logger.addHandler(_debug_handler)
logger.setLevel(logging.DEBUG)
elif not dbg and _debug_handler is not None:
logger.removeHandler(_debug_handler)
_debug_handler = None
logger.setLevel(logging.WARNING)
|
[
"def",
"debug",
"(",
"dbg",
"=",
"True",
")",
":",
"global",
"_debug_handler",
"if",
"dbg",
"and",
"_debug_handler",
"is",
"None",
":",
"_debug_handler",
"=",
"logging",
".",
"StreamHandler",
"(",
")",
"logger",
".",
"addHandler",
"(",
"_debug_handler",
")",
"logger",
".",
"setLevel",
"(",
"logging",
".",
"DEBUG",
")",
"elif",
"not",
"dbg",
"and",
"_debug_handler",
"is",
"not",
"None",
":",
"logger",
".",
"removeHandler",
"(",
"_debug_handler",
")",
"_debug_handler",
"=",
"None",
"logger",
".",
"setLevel",
"(",
"logging",
".",
"WARNING",
")"
] |
Enables or disables debugging messages on the standard output.
|
[
"Enables",
"or",
"disables",
"debugging",
"messages",
"on",
"the",
"standard",
"output",
"."
] |
35b04cfb244918551649b9bb8a0ab65d37c31fe4
|
https://github.com/paolodragone/pymzn/blob/35b04cfb244918551649b9bb8a0ab65d37c31fe4/pymzn/log.py#L15-L25
|
9,556
|
paolodragone/pymzn
|
pymzn/mzn/minizinc.py
|
minizinc_version
|
def minizinc_version():
"""Returns the version of the found minizinc executable."""
vs = _run_minizinc('--version')
m = re.findall('version ([\d\.]+)', vs)
if not m:
raise RuntimeError('MiniZinc executable not found.')
return m[0]
|
python
|
def minizinc_version():
"""Returns the version of the found minizinc executable."""
vs = _run_minizinc('--version')
m = re.findall('version ([\d\.]+)', vs)
if not m:
raise RuntimeError('MiniZinc executable not found.')
return m[0]
|
[
"def",
"minizinc_version",
"(",
")",
":",
"vs",
"=",
"_run_minizinc",
"(",
"'--version'",
")",
"m",
"=",
"re",
".",
"findall",
"(",
"'version ([\\d\\.]+)'",
",",
"vs",
")",
"if",
"not",
"m",
":",
"raise",
"RuntimeError",
"(",
"'MiniZinc executable not found.'",
")",
"return",
"m",
"[",
"0",
"]"
] |
Returns the version of the found minizinc executable.
|
[
"Returns",
"the",
"version",
"of",
"the",
"found",
"minizinc",
"executable",
"."
] |
35b04cfb244918551649b9bb8a0ab65d37c31fe4
|
https://github.com/paolodragone/pymzn/blob/35b04cfb244918551649b9bb8a0ab65d37c31fe4/pymzn/mzn/minizinc.py#L64-L70
|
9,557
|
paolodragone/pymzn
|
pymzn/mzn/minizinc.py
|
preprocess_model
|
def preprocess_model(model, rewrap=True, **kwargs):
"""Preprocess a MiniZinc model.
This function takes care of preprocessing the model by resolving the
template using the arguments passed as keyword arguments to this function.
Optionally, this function can also "rewrap" the model, deleting spaces at
the beginning of the lines while preserving indentation.
Parameters
----------
model : str
The minizinc model (i.e. the content of a ``.mzn`` file).
rewrap : bool
Whether to "rewrap" the model, i.e. to delete leading spaces, while
preserving indentation. Default is ``True``.
**kwargs
Additional arguments to pass to the template engine.
Returns
-------
str
The preprocessed model.
"""
args = {**kwargs, **config.get('args', {})}
model = _process_template(model, **args)
if rewrap:
model = rewrap_model(model)
return model
|
python
|
def preprocess_model(model, rewrap=True, **kwargs):
"""Preprocess a MiniZinc model.
This function takes care of preprocessing the model by resolving the
template using the arguments passed as keyword arguments to this function.
Optionally, this function can also "rewrap" the model, deleting spaces at
the beginning of the lines while preserving indentation.
Parameters
----------
model : str
The minizinc model (i.e. the content of a ``.mzn`` file).
rewrap : bool
Whether to "rewrap" the model, i.e. to delete leading spaces, while
preserving indentation. Default is ``True``.
**kwargs
Additional arguments to pass to the template engine.
Returns
-------
str
The preprocessed model.
"""
args = {**kwargs, **config.get('args', {})}
model = _process_template(model, **args)
if rewrap:
model = rewrap_model(model)
return model
|
[
"def",
"preprocess_model",
"(",
"model",
",",
"rewrap",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
"args",
"=",
"{",
"*",
"*",
"kwargs",
",",
"*",
"*",
"config",
".",
"get",
"(",
"'args'",
",",
"{",
"}",
")",
"}",
"model",
"=",
"_process_template",
"(",
"model",
",",
"*",
"*",
"args",
")",
"if",
"rewrap",
":",
"model",
"=",
"rewrap_model",
"(",
"model",
")",
"return",
"model"
] |
Preprocess a MiniZinc model.
This function takes care of preprocessing the model by resolving the
template using the arguments passed as keyword arguments to this function.
Optionally, this function can also "rewrap" the model, deleting spaces at
the beginning of the lines while preserving indentation.
Parameters
----------
model : str
The minizinc model (i.e. the content of a ``.mzn`` file).
rewrap : bool
Whether to "rewrap" the model, i.e. to delete leading spaces, while
preserving indentation. Default is ``True``.
**kwargs
Additional arguments to pass to the template engine.
Returns
-------
str
The preprocessed model.
|
[
"Preprocess",
"a",
"MiniZinc",
"model",
"."
] |
35b04cfb244918551649b9bb8a0ab65d37c31fe4
|
https://github.com/paolodragone/pymzn/blob/35b04cfb244918551649b9bb8a0ab65d37c31fe4/pymzn/mzn/minizinc.py#L179-L209
|
9,558
|
paolodragone/pymzn
|
pymzn/mzn/minizinc.py
|
save_model
|
def save_model(model, output_file=None, output_dir=None, output_prefix='pymzn'):
"""Save a model to file.
Parameters
----------
model : str
The minizinc model (i.e. the content of a ``.mzn`` file).
output_file : str
The path to the output file. If this parameter is ``None`` (default), a
temporary file is created with the given model in the specified output
directory, using the specified prefix.
output_dir : str
The directory where to create the file in case ``output_file`` is None.
Default is ``None``, which creates a file in the system temporary directory.
output_prefix : str
The prefix for the output file if created. Default is ``'pymzn'``.
Returns
-------
str
The path to the newly created ``.mzn`` file.
"""
if output_file:
mzn_file = output_file
output_file = open(output_file, 'w+', buffering=1)
else:
output_prefix += '_'
output_file = NamedTemporaryFile(
dir=output_dir, prefix=output_prefix, suffix='.mzn', delete=False,
mode='w+', buffering=1
)
mzn_file = output_file.name
output_file.write(model)
output_file.close()
logger.info('Generated file {}'.format(mzn_file))
return mzn_file
|
python
|
def save_model(model, output_file=None, output_dir=None, output_prefix='pymzn'):
"""Save a model to file.
Parameters
----------
model : str
The minizinc model (i.e. the content of a ``.mzn`` file).
output_file : str
The path to the output file. If this parameter is ``None`` (default), a
temporary file is created with the given model in the specified output
directory, using the specified prefix.
output_dir : str
The directory where to create the file in case ``output_file`` is None.
Default is ``None``, which creates a file in the system temporary directory.
output_prefix : str
The prefix for the output file if created. Default is ``'pymzn'``.
Returns
-------
str
The path to the newly created ``.mzn`` file.
"""
if output_file:
mzn_file = output_file
output_file = open(output_file, 'w+', buffering=1)
else:
output_prefix += '_'
output_file = NamedTemporaryFile(
dir=output_dir, prefix=output_prefix, suffix='.mzn', delete=False,
mode='w+', buffering=1
)
mzn_file = output_file.name
output_file.write(model)
output_file.close()
logger.info('Generated file {}'.format(mzn_file))
return mzn_file
|
[
"def",
"save_model",
"(",
"model",
",",
"output_file",
"=",
"None",
",",
"output_dir",
"=",
"None",
",",
"output_prefix",
"=",
"'pymzn'",
")",
":",
"if",
"output_file",
":",
"mzn_file",
"=",
"output_file",
"output_file",
"=",
"open",
"(",
"output_file",
",",
"'w+'",
",",
"buffering",
"=",
"1",
")",
"else",
":",
"output_prefix",
"+=",
"'_'",
"output_file",
"=",
"NamedTemporaryFile",
"(",
"dir",
"=",
"output_dir",
",",
"prefix",
"=",
"output_prefix",
",",
"suffix",
"=",
"'.mzn'",
",",
"delete",
"=",
"False",
",",
"mode",
"=",
"'w+'",
",",
"buffering",
"=",
"1",
")",
"mzn_file",
"=",
"output_file",
".",
"name",
"output_file",
".",
"write",
"(",
"model",
")",
"output_file",
".",
"close",
"(",
")",
"logger",
".",
"info",
"(",
"'Generated file {}'",
".",
"format",
"(",
"mzn_file",
")",
")",
"return",
"mzn_file"
] |
Save a model to file.
Parameters
----------
model : str
The minizinc model (i.e. the content of a ``.mzn`` file).
output_file : str
The path to the output file. If this parameter is ``None`` (default), a
temporary file is created with the given model in the specified output
directory, using the specified prefix.
output_dir : str
The directory where to create the file in case ``output_file`` is None.
Default is ``None``, which creates a file in the system temporary directory.
output_prefix : str
The prefix for the output file if created. Default is ``'pymzn'``.
Returns
-------
str
The path to the newly created ``.mzn`` file.
|
[
"Save",
"a",
"model",
"to",
"file",
"."
] |
35b04cfb244918551649b9bb8a0ab65d37c31fe4
|
https://github.com/paolodragone/pymzn/blob/35b04cfb244918551649b9bb8a0ab65d37c31fe4/pymzn/mzn/minizinc.py#L212-L249
|
9,559
|
paolodragone/pymzn
|
pymzn/mzn/minizinc.py
|
check_instance
|
def check_instance(
mzn, *dzn_files, data=None, include=None, stdlib_dir=None, globals_dir=None,
allow_multiple_assignments=False
):
"""Perform instance checking on a model + data.
This function calls the command ``minizinc --instance-check-only`` to check
for consistency of the given model + data.
Parameters
----------
mzn : str
The minizinc model. This can be either the path to the ``.mzn`` file or
the content of the model itself.
*dzn_files
A list of paths to dzn files to attach to the minizinc execution,
provided as positional arguments; by default no data file is attached.
data : dict
Additional data as a list of strings containing dzn variables
assignments.
include : str or list
One or more additional paths to search for included ``.mzn`` files.
stdlib_dir : str
The path to the MiniZinc standard library. Provide it only if it is
different from the default one.
globals_dir : str
The path to the MiniZinc globals directory. Provide it only if it is
different from the default one.
allow_multiple_assignments : bool
Whether to allow multiple assignments of variables. Sometimes is
convenient to simply let the data file override the value already
assigned in the minizinc file. Default is ``False``.
Raises
------
``MiniZincError`` if instance checking fails.
"""
args = ['--instance-check-only']
args += _flattening_args(
mzn, *dzn_files, data=data, include=include, stdlib_dir=stdlib_dir,
globals_dir=globals_dir,
allow_multiple_assignments=allow_multiple_assignments
)
input = mzn if args[-1] == '-' else None
proc = _run_minizinc_proc(*args, input=input)
if proc.stderr_data:
raise MiniZincError(
mzn if input is None else '\n' + mzn + '\n', args, proc.stderr_data
)
|
python
|
def check_instance(
mzn, *dzn_files, data=None, include=None, stdlib_dir=None, globals_dir=None,
allow_multiple_assignments=False
):
"""Perform instance checking on a model + data.
This function calls the command ``minizinc --instance-check-only`` to check
for consistency of the given model + data.
Parameters
----------
mzn : str
The minizinc model. This can be either the path to the ``.mzn`` file or
the content of the model itself.
*dzn_files
A list of paths to dzn files to attach to the minizinc execution,
provided as positional arguments; by default no data file is attached.
data : dict
Additional data as a list of strings containing dzn variables
assignments.
include : str or list
One or more additional paths to search for included ``.mzn`` files.
stdlib_dir : str
The path to the MiniZinc standard library. Provide it only if it is
different from the default one.
globals_dir : str
The path to the MiniZinc globals directory. Provide it only if it is
different from the default one.
allow_multiple_assignments : bool
Whether to allow multiple assignments of variables. Sometimes is
convenient to simply let the data file override the value already
assigned in the minizinc file. Default is ``False``.
Raises
------
``MiniZincError`` if instance checking fails.
"""
args = ['--instance-check-only']
args += _flattening_args(
mzn, *dzn_files, data=data, include=include, stdlib_dir=stdlib_dir,
globals_dir=globals_dir,
allow_multiple_assignments=allow_multiple_assignments
)
input = mzn if args[-1] == '-' else None
proc = _run_minizinc_proc(*args, input=input)
if proc.stderr_data:
raise MiniZincError(
mzn if input is None else '\n' + mzn + '\n', args, proc.stderr_data
)
|
[
"def",
"check_instance",
"(",
"mzn",
",",
"*",
"dzn_files",
",",
"data",
"=",
"None",
",",
"include",
"=",
"None",
",",
"stdlib_dir",
"=",
"None",
",",
"globals_dir",
"=",
"None",
",",
"allow_multiple_assignments",
"=",
"False",
")",
":",
"args",
"=",
"[",
"'--instance-check-only'",
"]",
"args",
"+=",
"_flattening_args",
"(",
"mzn",
",",
"*",
"dzn_files",
",",
"data",
"=",
"data",
",",
"include",
"=",
"include",
",",
"stdlib_dir",
"=",
"stdlib_dir",
",",
"globals_dir",
"=",
"globals_dir",
",",
"allow_multiple_assignments",
"=",
"allow_multiple_assignments",
")",
"input",
"=",
"mzn",
"if",
"args",
"[",
"-",
"1",
"]",
"==",
"'-'",
"else",
"None",
"proc",
"=",
"_run_minizinc_proc",
"(",
"*",
"args",
",",
"input",
"=",
"input",
")",
"if",
"proc",
".",
"stderr_data",
":",
"raise",
"MiniZincError",
"(",
"mzn",
"if",
"input",
"is",
"None",
"else",
"'\\n'",
"+",
"mzn",
"+",
"'\\n'",
",",
"args",
",",
"proc",
".",
"stderr_data",
")"
] |
Perform instance checking on a model + data.
This function calls the command ``minizinc --instance-check-only`` to check
for consistency of the given model + data.
Parameters
----------
mzn : str
The minizinc model. This can be either the path to the ``.mzn`` file or
the content of the model itself.
*dzn_files
A list of paths to dzn files to attach to the minizinc execution,
provided as positional arguments; by default no data file is attached.
data : dict
Additional data as a list of strings containing dzn variables
assignments.
include : str or list
One or more additional paths to search for included ``.mzn`` files.
stdlib_dir : str
The path to the MiniZinc standard library. Provide it only if it is
different from the default one.
globals_dir : str
The path to the MiniZinc globals directory. Provide it only if it is
different from the default one.
allow_multiple_assignments : bool
Whether to allow multiple assignments of variables. Sometimes is
convenient to simply let the data file override the value already
assigned in the minizinc file. Default is ``False``.
Raises
------
``MiniZincError`` if instance checking fails.
|
[
"Perform",
"instance",
"checking",
"on",
"a",
"model",
"+",
"data",
"."
] |
35b04cfb244918551649b9bb8a0ab65d37c31fe4
|
https://github.com/paolodragone/pymzn/blob/35b04cfb244918551649b9bb8a0ab65d37c31fe4/pymzn/mzn/minizinc.py#L327-L378
|
9,560
|
paolodragone/pymzn
|
pymzn/mzn/minizinc.py
|
check_model
|
def check_model(
mzn, *, include=None, stdlib_dir=None, globals_dir=None
):
"""Perform model checking on a given model.
This function calls the command ``minizinc --model-check-only`` to check
for consistency of the given model.
Parameters
----------
mzn : str
The minizinc model. This can be either the path to the ``.mzn`` file or
the content of the model itself.
include : str or list
One or more additional paths to search for included ``.mzn`` files.
stdlib_dir : str
The path to the MiniZinc standard library. Provide it only if it is
different from the default one.
globals_dir : str
The path to the MiniZinc globals directory. Provide it only if it is
different from the default one.
Raises
------
``MiniZincError`` if model checking fails.
"""
args = ['--model-check-only']
args += _flattening_args(
mzn, include=include, stdlib_dir=stdlib_dir, globals_dir=globals_dir
)
input = mzn if args[-1] == '-' else None
proc = _run_minizinc_proc(*args, input=input)
if proc.stderr_data:
raise MiniZincError(
mzn if input is None else '\n' + mzn + '\n', args, proc.stderr_data
)
|
python
|
def check_model(
mzn, *, include=None, stdlib_dir=None, globals_dir=None
):
"""Perform model checking on a given model.
This function calls the command ``minizinc --model-check-only`` to check
for consistency of the given model.
Parameters
----------
mzn : str
The minizinc model. This can be either the path to the ``.mzn`` file or
the content of the model itself.
include : str or list
One or more additional paths to search for included ``.mzn`` files.
stdlib_dir : str
The path to the MiniZinc standard library. Provide it only if it is
different from the default one.
globals_dir : str
The path to the MiniZinc globals directory. Provide it only if it is
different from the default one.
Raises
------
``MiniZincError`` if model checking fails.
"""
args = ['--model-check-only']
args += _flattening_args(
mzn, include=include, stdlib_dir=stdlib_dir, globals_dir=globals_dir
)
input = mzn if args[-1] == '-' else None
proc = _run_minizinc_proc(*args, input=input)
if proc.stderr_data:
raise MiniZincError(
mzn if input is None else '\n' + mzn + '\n', args, proc.stderr_data
)
|
[
"def",
"check_model",
"(",
"mzn",
",",
"*",
",",
"include",
"=",
"None",
",",
"stdlib_dir",
"=",
"None",
",",
"globals_dir",
"=",
"None",
")",
":",
"args",
"=",
"[",
"'--model-check-only'",
"]",
"args",
"+=",
"_flattening_args",
"(",
"mzn",
",",
"include",
"=",
"include",
",",
"stdlib_dir",
"=",
"stdlib_dir",
",",
"globals_dir",
"=",
"globals_dir",
")",
"input",
"=",
"mzn",
"if",
"args",
"[",
"-",
"1",
"]",
"==",
"'-'",
"else",
"None",
"proc",
"=",
"_run_minizinc_proc",
"(",
"*",
"args",
",",
"input",
"=",
"input",
")",
"if",
"proc",
".",
"stderr_data",
":",
"raise",
"MiniZincError",
"(",
"mzn",
"if",
"input",
"is",
"None",
"else",
"'\\n'",
"+",
"mzn",
"+",
"'\\n'",
",",
"args",
",",
"proc",
".",
"stderr_data",
")"
] |
Perform model checking on a given model.
This function calls the command ``minizinc --model-check-only`` to check
for consistency of the given model.
Parameters
----------
mzn : str
The minizinc model. This can be either the path to the ``.mzn`` file or
the content of the model itself.
include : str or list
One or more additional paths to search for included ``.mzn`` files.
stdlib_dir : str
The path to the MiniZinc standard library. Provide it only if it is
different from the default one.
globals_dir : str
The path to the MiniZinc globals directory. Provide it only if it is
different from the default one.
Raises
------
``MiniZincError`` if model checking fails.
|
[
"Perform",
"model",
"checking",
"on",
"a",
"given",
"model",
"."
] |
35b04cfb244918551649b9bb8a0ab65d37c31fe4
|
https://github.com/paolodragone/pymzn/blob/35b04cfb244918551649b9bb8a0ab65d37c31fe4/pymzn/mzn/minizinc.py#L381-L419
|
9,561
|
paolodragone/pymzn
|
pymzn/mzn/minizinc.py
|
minizinc
|
def minizinc(
mzn, *dzn_files, args=None, data=None, include=None, stdlib_dir=None,
globals_dir=None, declare_enums=True, allow_multiple_assignments=False,
keep=False, output_vars=None, output_base=None, output_mode='dict',
solver=None, timeout=None, two_pass=None, pre_passes=None,
output_objective=False, non_unique=False, all_solutions=False,
num_solutions=None, free_search=False, parallel=None, seed=None,
rebase_arrays=True, keep_solutions=True, return_enums=False, **kwargs
):
"""Implements the workflow for solving a CSP problem encoded with MiniZinc.
Parameters
----------
mzn : str
The minizinc model. This can be either the path to the ``.mzn`` file or
the content of the model itself.
*dzn_files
A list of paths to dzn files to attach to the minizinc execution,
provided as positional arguments; by default no data file is attached.
args : dict
Arguments for the template engine.
data : dict
Additional data as a dictionary of variables assignments to supply to
the minizinc executable. The dictionary is automatically converted to
dzn format by the ``pymzn.dict2dzn`` function.
include : str or list
One or more additional paths to search for included ``.mzn`` files.
stdlib_dir : str
The path to the MiniZinc standard library. Provide it only if it is
different from the default one.
globals_dir : str
The path to the MiniZinc globals directory. Provide it only if it is
different from the default one.
declare_enums : bool
Whether to declare enum types when converting inline data into dzn
format. If the enum types are declared elsewhere this option should be
False. Default is ``True``.
allow_multiple_assignments : bool
Whether to allow multiple assignments of variables. Sometimes is
convenient to simply let the data file override the value already
assigned in the minizinc file. Default is ``False``.
keep : bool
Whether to keep the generated ``.mzn``, ``.dzn``, ``.fzn`` and ``.ozn``
files or not. If False, the generated files are created as temporary
files which will be deleted right after the problem is solved. Though
files generated by PyMzn are not intended to be kept, this property can
be used for debugging purpose. Note that in case of error the files are
not deleted even if this parameter is ``False``. Default is ``False``.
output_vars : list of str
A list of output variables. These variables will be the ones included in
the output dictionary. Only available if ``ouptut_mode='dict'``.
output_base : str
Output directory for the files generated by PyMzn. The default
(``None``) is the temporary directory of your OS (if ``keep=False``) or
the current working directory (if ``keep=True``).
output_mode : {'dict', 'item', 'dzn', 'json', 'raw'}
The desired output format. The default is ``'dict'`` which returns a
stream of solutions decoded as python dictionaries. The ``'item'``
format outputs a stream of strings as returned by the ``solns2out``
tool, formatted according to the output statement of the MiniZinc model.
The ``'dzn'`` and ``'json'`` formats output a stream of strings
formatted in dzn of json respectively. The ``'raw'`` format, instead
returns the whole solution stream, without parsing.
solver : Solver
The ``Solver`` instance to use. The default solver is ``gecode``.
timeout : int
The timeout in seconds for the flattening + solving process.
two_pass : bool or int
If ``two_pass`` is True, then it is equivalent to the ``--two-pass``
option for the ``minizinc`` executable. If ``two_pass`` is an integer
``<n>``, instead, it is equivalent to the ``-O<n>`` option for the
``minizinc`` executable.
pre_passes : int
Equivalent to the ``--pre-passes`` option for the ``minizinc``
executable.
output_objective : bool
Equivalent to the ``--output-objective`` option for the ``minizinc``
executable. Adds a field ``_objective`` to all solutions.
non_unique : bool
Equivalent to the ``--non-unique`` option for the ``minizinc``
executable.
all_solutions : bool
Whether all the solutions must be returned. This option might not work
if the solver does not support it. Default is ``False``.
num_solutions : int
The upper bound on the number of solutions to be returned. This option
might not work if the solver does not support it. Default is ``1``.
free_search : bool
If ``True``, instruct the solver to perform free search.
parallel : int
The number of parallel threads the solver can utilize for the solving.
seed : int
The random number generator seed to pass to the solver.
rebase_arrays : bool
Whether to "rebase" parsed arrays (see the `Dzn files
<http://paolodragone.com/pymzn/reference/dzn>`__ section). Default is
True.
keep_solutions : bool
Whether to store the solutions in memory after solving is done. If
``keep_solutions`` is ``False``, the returned solution stream can only
be iterated once and cannot be addressed as a list.
return_enums : bool
Wheter to return enum types along with the variable assignments in the
solutions. Only used if ``output_mode='dict'``. Default is ``False``.
**kwargs
Additional arguments to pass to the solver, provided as additional
keyword arguments to this function. Check the solver documentation for
the available arguments.
Returns
-------
Solutions or str
If ``output_mode`` is not ``'raw'``, returns a list-like object
containing the solutions found by the solver. The format of the solution
depends on the specified ``output_mode``. If ``keep_solutions=False``,
the returned object cannot be addressed as a list and can only be
iterated once. If ``output_mode='raw'``, the function returns the whole
solution stream as a single string.
"""
mzn_file, dzn_files, data_file, data, keep, _output_mode, types = \
_minizinc_preliminaries(
mzn, *dzn_files, args=args, data=data, include=include,
stdlib_dir=stdlib_dir, globals_dir=globals_dir,
output_vars=output_vars, keep=keep, output_base=output_base,
output_mode=output_mode, declare_enums=declare_enums,
allow_multiple_assignments=allow_multiple_assignments
)
if not solver:
solver = config.get('solver', gecode)
solver_args = {**kwargs, **config.get('solver_args', {})}
proc = solve(
solver, mzn_file, *dzn_files, data=data, include=include,
stdlib_dir=stdlib_dir, globals_dir=globals_dir,
output_mode=_output_mode, timeout=timeout, two_pass=two_pass,
pre_passes=pre_passes, output_objective=output_objective,
non_unique=non_unique, all_solutions=all_solutions,
num_solutions=num_solutions, free_search=free_search, parallel=parallel,
seed=seed, allow_multiple_assignments=allow_multiple_assignments,
**solver_args
)
if not keep:
_cleanup([mzn_file, data_file])
if output_mode == 'raw':
return proc.stdout_data
parser = SolutionParser(
solver, output_mode=output_mode, rebase_arrays=rebase_arrays,
types=types, keep_solutions=keep_solutions, return_enums=return_enums
)
solns = parser.parse(proc)
return solns
|
python
|
def minizinc(
mzn, *dzn_files, args=None, data=None, include=None, stdlib_dir=None,
globals_dir=None, declare_enums=True, allow_multiple_assignments=False,
keep=False, output_vars=None, output_base=None, output_mode='dict',
solver=None, timeout=None, two_pass=None, pre_passes=None,
output_objective=False, non_unique=False, all_solutions=False,
num_solutions=None, free_search=False, parallel=None, seed=None,
rebase_arrays=True, keep_solutions=True, return_enums=False, **kwargs
):
"""Implements the workflow for solving a CSP problem encoded with MiniZinc.
Parameters
----------
mzn : str
The minizinc model. This can be either the path to the ``.mzn`` file or
the content of the model itself.
*dzn_files
A list of paths to dzn files to attach to the minizinc execution,
provided as positional arguments; by default no data file is attached.
args : dict
Arguments for the template engine.
data : dict
Additional data as a dictionary of variables assignments to supply to
the minizinc executable. The dictionary is automatically converted to
dzn format by the ``pymzn.dict2dzn`` function.
include : str or list
One or more additional paths to search for included ``.mzn`` files.
stdlib_dir : str
The path to the MiniZinc standard library. Provide it only if it is
different from the default one.
globals_dir : str
The path to the MiniZinc globals directory. Provide it only if it is
different from the default one.
declare_enums : bool
Whether to declare enum types when converting inline data into dzn
format. If the enum types are declared elsewhere this option should be
False. Default is ``True``.
allow_multiple_assignments : bool
Whether to allow multiple assignments of variables. Sometimes is
convenient to simply let the data file override the value already
assigned in the minizinc file. Default is ``False``.
keep : bool
Whether to keep the generated ``.mzn``, ``.dzn``, ``.fzn`` and ``.ozn``
files or not. If False, the generated files are created as temporary
files which will be deleted right after the problem is solved. Though
files generated by PyMzn are not intended to be kept, this property can
be used for debugging purpose. Note that in case of error the files are
not deleted even if this parameter is ``False``. Default is ``False``.
output_vars : list of str
A list of output variables. These variables will be the ones included in
the output dictionary. Only available if ``ouptut_mode='dict'``.
output_base : str
Output directory for the files generated by PyMzn. The default
(``None``) is the temporary directory of your OS (if ``keep=False``) or
the current working directory (if ``keep=True``).
output_mode : {'dict', 'item', 'dzn', 'json', 'raw'}
The desired output format. The default is ``'dict'`` which returns a
stream of solutions decoded as python dictionaries. The ``'item'``
format outputs a stream of strings as returned by the ``solns2out``
tool, formatted according to the output statement of the MiniZinc model.
The ``'dzn'`` and ``'json'`` formats output a stream of strings
formatted in dzn of json respectively. The ``'raw'`` format, instead
returns the whole solution stream, without parsing.
solver : Solver
The ``Solver`` instance to use. The default solver is ``gecode``.
timeout : int
The timeout in seconds for the flattening + solving process.
two_pass : bool or int
If ``two_pass`` is True, then it is equivalent to the ``--two-pass``
option for the ``minizinc`` executable. If ``two_pass`` is an integer
``<n>``, instead, it is equivalent to the ``-O<n>`` option for the
``minizinc`` executable.
pre_passes : int
Equivalent to the ``--pre-passes`` option for the ``minizinc``
executable.
output_objective : bool
Equivalent to the ``--output-objective`` option for the ``minizinc``
executable. Adds a field ``_objective`` to all solutions.
non_unique : bool
Equivalent to the ``--non-unique`` option for the ``minizinc``
executable.
all_solutions : bool
Whether all the solutions must be returned. This option might not work
if the solver does not support it. Default is ``False``.
num_solutions : int
The upper bound on the number of solutions to be returned. This option
might not work if the solver does not support it. Default is ``1``.
free_search : bool
If ``True``, instruct the solver to perform free search.
parallel : int
The number of parallel threads the solver can utilize for the solving.
seed : int
The random number generator seed to pass to the solver.
rebase_arrays : bool
Whether to "rebase" parsed arrays (see the `Dzn files
<http://paolodragone.com/pymzn/reference/dzn>`__ section). Default is
True.
keep_solutions : bool
Whether to store the solutions in memory after solving is done. If
``keep_solutions`` is ``False``, the returned solution stream can only
be iterated once and cannot be addressed as a list.
return_enums : bool
Wheter to return enum types along with the variable assignments in the
solutions. Only used if ``output_mode='dict'``. Default is ``False``.
**kwargs
Additional arguments to pass to the solver, provided as additional
keyword arguments to this function. Check the solver documentation for
the available arguments.
Returns
-------
Solutions or str
If ``output_mode`` is not ``'raw'``, returns a list-like object
containing the solutions found by the solver. The format of the solution
depends on the specified ``output_mode``. If ``keep_solutions=False``,
the returned object cannot be addressed as a list and can only be
iterated once. If ``output_mode='raw'``, the function returns the whole
solution stream as a single string.
"""
mzn_file, dzn_files, data_file, data, keep, _output_mode, types = \
_minizinc_preliminaries(
mzn, *dzn_files, args=args, data=data, include=include,
stdlib_dir=stdlib_dir, globals_dir=globals_dir,
output_vars=output_vars, keep=keep, output_base=output_base,
output_mode=output_mode, declare_enums=declare_enums,
allow_multiple_assignments=allow_multiple_assignments
)
if not solver:
solver = config.get('solver', gecode)
solver_args = {**kwargs, **config.get('solver_args', {})}
proc = solve(
solver, mzn_file, *dzn_files, data=data, include=include,
stdlib_dir=stdlib_dir, globals_dir=globals_dir,
output_mode=_output_mode, timeout=timeout, two_pass=two_pass,
pre_passes=pre_passes, output_objective=output_objective,
non_unique=non_unique, all_solutions=all_solutions,
num_solutions=num_solutions, free_search=free_search, parallel=parallel,
seed=seed, allow_multiple_assignments=allow_multiple_assignments,
**solver_args
)
if not keep:
_cleanup([mzn_file, data_file])
if output_mode == 'raw':
return proc.stdout_data
parser = SolutionParser(
solver, output_mode=output_mode, rebase_arrays=rebase_arrays,
types=types, keep_solutions=keep_solutions, return_enums=return_enums
)
solns = parser.parse(proc)
return solns
|
[
"def",
"minizinc",
"(",
"mzn",
",",
"*",
"dzn_files",
",",
"args",
"=",
"None",
",",
"data",
"=",
"None",
",",
"include",
"=",
"None",
",",
"stdlib_dir",
"=",
"None",
",",
"globals_dir",
"=",
"None",
",",
"declare_enums",
"=",
"True",
",",
"allow_multiple_assignments",
"=",
"False",
",",
"keep",
"=",
"False",
",",
"output_vars",
"=",
"None",
",",
"output_base",
"=",
"None",
",",
"output_mode",
"=",
"'dict'",
",",
"solver",
"=",
"None",
",",
"timeout",
"=",
"None",
",",
"two_pass",
"=",
"None",
",",
"pre_passes",
"=",
"None",
",",
"output_objective",
"=",
"False",
",",
"non_unique",
"=",
"False",
",",
"all_solutions",
"=",
"False",
",",
"num_solutions",
"=",
"None",
",",
"free_search",
"=",
"False",
",",
"parallel",
"=",
"None",
",",
"seed",
"=",
"None",
",",
"rebase_arrays",
"=",
"True",
",",
"keep_solutions",
"=",
"True",
",",
"return_enums",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"mzn_file",
",",
"dzn_files",
",",
"data_file",
",",
"data",
",",
"keep",
",",
"_output_mode",
",",
"types",
"=",
"_minizinc_preliminaries",
"(",
"mzn",
",",
"*",
"dzn_files",
",",
"args",
"=",
"args",
",",
"data",
"=",
"data",
",",
"include",
"=",
"include",
",",
"stdlib_dir",
"=",
"stdlib_dir",
",",
"globals_dir",
"=",
"globals_dir",
",",
"output_vars",
"=",
"output_vars",
",",
"keep",
"=",
"keep",
",",
"output_base",
"=",
"output_base",
",",
"output_mode",
"=",
"output_mode",
",",
"declare_enums",
"=",
"declare_enums",
",",
"allow_multiple_assignments",
"=",
"allow_multiple_assignments",
")",
"if",
"not",
"solver",
":",
"solver",
"=",
"config",
".",
"get",
"(",
"'solver'",
",",
"gecode",
")",
"solver_args",
"=",
"{",
"*",
"*",
"kwargs",
",",
"*",
"*",
"config",
".",
"get",
"(",
"'solver_args'",
",",
"{",
"}",
")",
"}",
"proc",
"=",
"solve",
"(",
"solver",
",",
"mzn_file",
",",
"*",
"dzn_files",
",",
"data",
"=",
"data",
",",
"include",
"=",
"include",
",",
"stdlib_dir",
"=",
"stdlib_dir",
",",
"globals_dir",
"=",
"globals_dir",
",",
"output_mode",
"=",
"_output_mode",
",",
"timeout",
"=",
"timeout",
",",
"two_pass",
"=",
"two_pass",
",",
"pre_passes",
"=",
"pre_passes",
",",
"output_objective",
"=",
"output_objective",
",",
"non_unique",
"=",
"non_unique",
",",
"all_solutions",
"=",
"all_solutions",
",",
"num_solutions",
"=",
"num_solutions",
",",
"free_search",
"=",
"free_search",
",",
"parallel",
"=",
"parallel",
",",
"seed",
"=",
"seed",
",",
"allow_multiple_assignments",
"=",
"allow_multiple_assignments",
",",
"*",
"*",
"solver_args",
")",
"if",
"not",
"keep",
":",
"_cleanup",
"(",
"[",
"mzn_file",
",",
"data_file",
"]",
")",
"if",
"output_mode",
"==",
"'raw'",
":",
"return",
"proc",
".",
"stdout_data",
"parser",
"=",
"SolutionParser",
"(",
"solver",
",",
"output_mode",
"=",
"output_mode",
",",
"rebase_arrays",
"=",
"rebase_arrays",
",",
"types",
"=",
"types",
",",
"keep_solutions",
"=",
"keep_solutions",
",",
"return_enums",
"=",
"return_enums",
")",
"solns",
"=",
"parser",
".",
"parse",
"(",
"proc",
")",
"return",
"solns"
] |
Implements the workflow for solving a CSP problem encoded with MiniZinc.
Parameters
----------
mzn : str
The minizinc model. This can be either the path to the ``.mzn`` file or
the content of the model itself.
*dzn_files
A list of paths to dzn files to attach to the minizinc execution,
provided as positional arguments; by default no data file is attached.
args : dict
Arguments for the template engine.
data : dict
Additional data as a dictionary of variables assignments to supply to
the minizinc executable. The dictionary is automatically converted to
dzn format by the ``pymzn.dict2dzn`` function.
include : str or list
One or more additional paths to search for included ``.mzn`` files.
stdlib_dir : str
The path to the MiniZinc standard library. Provide it only if it is
different from the default one.
globals_dir : str
The path to the MiniZinc globals directory. Provide it only if it is
different from the default one.
declare_enums : bool
Whether to declare enum types when converting inline data into dzn
format. If the enum types are declared elsewhere this option should be
False. Default is ``True``.
allow_multiple_assignments : bool
Whether to allow multiple assignments of variables. Sometimes is
convenient to simply let the data file override the value already
assigned in the minizinc file. Default is ``False``.
keep : bool
Whether to keep the generated ``.mzn``, ``.dzn``, ``.fzn`` and ``.ozn``
files or not. If False, the generated files are created as temporary
files which will be deleted right after the problem is solved. Though
files generated by PyMzn are not intended to be kept, this property can
be used for debugging purpose. Note that in case of error the files are
not deleted even if this parameter is ``False``. Default is ``False``.
output_vars : list of str
A list of output variables. These variables will be the ones included in
the output dictionary. Only available if ``ouptut_mode='dict'``.
output_base : str
Output directory for the files generated by PyMzn. The default
(``None``) is the temporary directory of your OS (if ``keep=False``) or
the current working directory (if ``keep=True``).
output_mode : {'dict', 'item', 'dzn', 'json', 'raw'}
The desired output format. The default is ``'dict'`` which returns a
stream of solutions decoded as python dictionaries. The ``'item'``
format outputs a stream of strings as returned by the ``solns2out``
tool, formatted according to the output statement of the MiniZinc model.
The ``'dzn'`` and ``'json'`` formats output a stream of strings
formatted in dzn of json respectively. The ``'raw'`` format, instead
returns the whole solution stream, without parsing.
solver : Solver
The ``Solver`` instance to use. The default solver is ``gecode``.
timeout : int
The timeout in seconds for the flattening + solving process.
two_pass : bool or int
If ``two_pass`` is True, then it is equivalent to the ``--two-pass``
option for the ``minizinc`` executable. If ``two_pass`` is an integer
``<n>``, instead, it is equivalent to the ``-O<n>`` option for the
``minizinc`` executable.
pre_passes : int
Equivalent to the ``--pre-passes`` option for the ``minizinc``
executable.
output_objective : bool
Equivalent to the ``--output-objective`` option for the ``minizinc``
executable. Adds a field ``_objective`` to all solutions.
non_unique : bool
Equivalent to the ``--non-unique`` option for the ``minizinc``
executable.
all_solutions : bool
Whether all the solutions must be returned. This option might not work
if the solver does not support it. Default is ``False``.
num_solutions : int
The upper bound on the number of solutions to be returned. This option
might not work if the solver does not support it. Default is ``1``.
free_search : bool
If ``True``, instruct the solver to perform free search.
parallel : int
The number of parallel threads the solver can utilize for the solving.
seed : int
The random number generator seed to pass to the solver.
rebase_arrays : bool
Whether to "rebase" parsed arrays (see the `Dzn files
<http://paolodragone.com/pymzn/reference/dzn>`__ section). Default is
True.
keep_solutions : bool
Whether to store the solutions in memory after solving is done. If
``keep_solutions`` is ``False``, the returned solution stream can only
be iterated once and cannot be addressed as a list.
return_enums : bool
Wheter to return enum types along with the variable assignments in the
solutions. Only used if ``output_mode='dict'``. Default is ``False``.
**kwargs
Additional arguments to pass to the solver, provided as additional
keyword arguments to this function. Check the solver documentation for
the available arguments.
Returns
-------
Solutions or str
If ``output_mode`` is not ``'raw'``, returns a list-like object
containing the solutions found by the solver. The format of the solution
depends on the specified ``output_mode``. If ``keep_solutions=False``,
the returned object cannot be addressed as a list and can only be
iterated once. If ``output_mode='raw'``, the function returns the whole
solution stream as a single string.
|
[
"Implements",
"the",
"workflow",
"for",
"solving",
"a",
"CSP",
"problem",
"encoded",
"with",
"MiniZinc",
"."
] |
35b04cfb244918551649b9bb8a0ab65d37c31fe4
|
https://github.com/paolodragone/pymzn/blob/35b04cfb244918551649b9bb8a0ab65d37c31fe4/pymzn/mzn/minizinc.py#L502-L658
|
9,562
|
paolodragone/pymzn
|
pymzn/mzn/minizinc.py
|
solve
|
def solve(
solver, mzn, *dzn_files, data=None, include=None, stdlib_dir=None,
globals_dir=None, allow_multiple_assignments=False, output_mode='item',
timeout=None, two_pass=None, pre_passes=None, output_objective=False,
non_unique=False, all_solutions=False, num_solutions=None,
free_search=False, parallel=None, seed=None, **kwargs
):
"""Flatten and solve a MiniZinc program.
Parameters
----------
solver : Solver
The ``Solver`` instance to use.
mzn : str
The path to the minizinc model file.
*dzn_files
A list of paths to dzn files to attach to the minizinc execution,
provided as positional arguments; by default no data file is attached.
data : list of str
Additional data as a list of strings containing dzn variables
assignments.
include : str or list
One or more additional paths to search for included ``.mzn`` files.
stdlib_dir : str
The path to the MiniZinc standard library. Provide it only if it is
different from the default one.
globals_dir : str
The path to the MiniZinc globals directory. Provide it only if it is
different from the default one.
allow_multiple_assignments : bool
Whether to allow multiple assignments of variables. Sometimes is
convenient to simply let the data file override the value already
assigned in the minizinc file. Default is ``False``.
output_mode : {'item', 'dzn', 'json'}
The desired output format. The default is ``'item'`` which outputs a
stream of strings as returned by the ``solns2out`` tool, formatted
according to the output statement of the MiniZinc model. The ``'dzn'``
and ``'json'`` formats output a stream of strings formatted in dzn and
json respectively.
timeout : int
The timeout in seconds for the flattening + solving process.
two_pass : bool or int
If ``two_pass`` is True, then it is equivalent to the ``--two-pass``
option for the ``minizinc`` executable. If ``two_pass`` is an integer
``<n>``, instead, it is equivalent to the ``-O<n>`` option for the
``minizinc`` executable.
pre_passes : int
Equivalent to the ``--pre-passes`` option for the ``minizinc``
executable.
output_objective : bool
Equivalent to the ``--output-objective`` option for the ``minizinc``
executable. Adds a field ``_objective`` to all solutions.
non_unique : bool
Equivalent to the ``--non-unique`` option for the ``minizinc``
executable.
all_solutions : bool
Whether all the solutions must be returned. This option might not work
if the solver does not support it. Default is ``False``.
num_solutions : int
The upper bound on the number of solutions to be returned. This option
might not work if the solver does not support it. Default is ``1``.
free_search : bool
If True, instruct the solver to perform free search.
parallel : int
The number of parallel threads the solver can utilize for the solving.
seed : int
The random number generator seed to pass to the solver.
**kwargs
Additional arguments to pass to the solver, provided as additional
keyword arguments to this function. Check the solver documentation for
the available arguments.
Returns
-------
Object wrapping the executed process.
"""
args = _solve_args(
solver, timeout=timeout, two_pass=two_pass, pre_passes=pre_passes,
output_objective=output_objective, non_unique=non_unique,
all_solutions=all_solutions, num_solutions=num_solutions,
free_search=free_search, parallel=parallel, seed=seed, **kwargs
)
args += _flattening_args(
mzn, *dzn_files, data=data, stdlib_dir=stdlib_dir,
globals_dir=globals_dir, output_mode=output_mode, include=include,
allow_multiple_assignments=allow_multiple_assignments
)
input = mzn if args[-1] == '-' else None
t0 = _time()
try:
proc = _run_minizinc_proc(*args, input=input)
except RuntimeError as err:
raise MiniZincError(mzn_file, args) from err
solve_time = _time() - t0
logger.info('Solving completed in {:>3.2f} sec'.format(solve_time))
return proc
|
python
|
def solve(
solver, mzn, *dzn_files, data=None, include=None, stdlib_dir=None,
globals_dir=None, allow_multiple_assignments=False, output_mode='item',
timeout=None, two_pass=None, pre_passes=None, output_objective=False,
non_unique=False, all_solutions=False, num_solutions=None,
free_search=False, parallel=None, seed=None, **kwargs
):
"""Flatten and solve a MiniZinc program.
Parameters
----------
solver : Solver
The ``Solver`` instance to use.
mzn : str
The path to the minizinc model file.
*dzn_files
A list of paths to dzn files to attach to the minizinc execution,
provided as positional arguments; by default no data file is attached.
data : list of str
Additional data as a list of strings containing dzn variables
assignments.
include : str or list
One or more additional paths to search for included ``.mzn`` files.
stdlib_dir : str
The path to the MiniZinc standard library. Provide it only if it is
different from the default one.
globals_dir : str
The path to the MiniZinc globals directory. Provide it only if it is
different from the default one.
allow_multiple_assignments : bool
Whether to allow multiple assignments of variables. Sometimes is
convenient to simply let the data file override the value already
assigned in the minizinc file. Default is ``False``.
output_mode : {'item', 'dzn', 'json'}
The desired output format. The default is ``'item'`` which outputs a
stream of strings as returned by the ``solns2out`` tool, formatted
according to the output statement of the MiniZinc model. The ``'dzn'``
and ``'json'`` formats output a stream of strings formatted in dzn and
json respectively.
timeout : int
The timeout in seconds for the flattening + solving process.
two_pass : bool or int
If ``two_pass`` is True, then it is equivalent to the ``--two-pass``
option for the ``minizinc`` executable. If ``two_pass`` is an integer
``<n>``, instead, it is equivalent to the ``-O<n>`` option for the
``minizinc`` executable.
pre_passes : int
Equivalent to the ``--pre-passes`` option for the ``minizinc``
executable.
output_objective : bool
Equivalent to the ``--output-objective`` option for the ``minizinc``
executable. Adds a field ``_objective`` to all solutions.
non_unique : bool
Equivalent to the ``--non-unique`` option for the ``minizinc``
executable.
all_solutions : bool
Whether all the solutions must be returned. This option might not work
if the solver does not support it. Default is ``False``.
num_solutions : int
The upper bound on the number of solutions to be returned. This option
might not work if the solver does not support it. Default is ``1``.
free_search : bool
If True, instruct the solver to perform free search.
parallel : int
The number of parallel threads the solver can utilize for the solving.
seed : int
The random number generator seed to pass to the solver.
**kwargs
Additional arguments to pass to the solver, provided as additional
keyword arguments to this function. Check the solver documentation for
the available arguments.
Returns
-------
Object wrapping the executed process.
"""
args = _solve_args(
solver, timeout=timeout, two_pass=two_pass, pre_passes=pre_passes,
output_objective=output_objective, non_unique=non_unique,
all_solutions=all_solutions, num_solutions=num_solutions,
free_search=free_search, parallel=parallel, seed=seed, **kwargs
)
args += _flattening_args(
mzn, *dzn_files, data=data, stdlib_dir=stdlib_dir,
globals_dir=globals_dir, output_mode=output_mode, include=include,
allow_multiple_assignments=allow_multiple_assignments
)
input = mzn if args[-1] == '-' else None
t0 = _time()
try:
proc = _run_minizinc_proc(*args, input=input)
except RuntimeError as err:
raise MiniZincError(mzn_file, args) from err
solve_time = _time() - t0
logger.info('Solving completed in {:>3.2f} sec'.format(solve_time))
return proc
|
[
"def",
"solve",
"(",
"solver",
",",
"mzn",
",",
"*",
"dzn_files",
",",
"data",
"=",
"None",
",",
"include",
"=",
"None",
",",
"stdlib_dir",
"=",
"None",
",",
"globals_dir",
"=",
"None",
",",
"allow_multiple_assignments",
"=",
"False",
",",
"output_mode",
"=",
"'item'",
",",
"timeout",
"=",
"None",
",",
"two_pass",
"=",
"None",
",",
"pre_passes",
"=",
"None",
",",
"output_objective",
"=",
"False",
",",
"non_unique",
"=",
"False",
",",
"all_solutions",
"=",
"False",
",",
"num_solutions",
"=",
"None",
",",
"free_search",
"=",
"False",
",",
"parallel",
"=",
"None",
",",
"seed",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"args",
"=",
"_solve_args",
"(",
"solver",
",",
"timeout",
"=",
"timeout",
",",
"two_pass",
"=",
"two_pass",
",",
"pre_passes",
"=",
"pre_passes",
",",
"output_objective",
"=",
"output_objective",
",",
"non_unique",
"=",
"non_unique",
",",
"all_solutions",
"=",
"all_solutions",
",",
"num_solutions",
"=",
"num_solutions",
",",
"free_search",
"=",
"free_search",
",",
"parallel",
"=",
"parallel",
",",
"seed",
"=",
"seed",
",",
"*",
"*",
"kwargs",
")",
"args",
"+=",
"_flattening_args",
"(",
"mzn",
",",
"*",
"dzn_files",
",",
"data",
"=",
"data",
",",
"stdlib_dir",
"=",
"stdlib_dir",
",",
"globals_dir",
"=",
"globals_dir",
",",
"output_mode",
"=",
"output_mode",
",",
"include",
"=",
"include",
",",
"allow_multiple_assignments",
"=",
"allow_multiple_assignments",
")",
"input",
"=",
"mzn",
"if",
"args",
"[",
"-",
"1",
"]",
"==",
"'-'",
"else",
"None",
"t0",
"=",
"_time",
"(",
")",
"try",
":",
"proc",
"=",
"_run_minizinc_proc",
"(",
"*",
"args",
",",
"input",
"=",
"input",
")",
"except",
"RuntimeError",
"as",
"err",
":",
"raise",
"MiniZincError",
"(",
"mzn_file",
",",
"args",
")",
"from",
"err",
"solve_time",
"=",
"_time",
"(",
")",
"-",
"t0",
"logger",
".",
"info",
"(",
"'Solving completed in {:>3.2f} sec'",
".",
"format",
"(",
"solve_time",
")",
")",
"return",
"proc"
] |
Flatten and solve a MiniZinc program.
Parameters
----------
solver : Solver
The ``Solver`` instance to use.
mzn : str
The path to the minizinc model file.
*dzn_files
A list of paths to dzn files to attach to the minizinc execution,
provided as positional arguments; by default no data file is attached.
data : list of str
Additional data as a list of strings containing dzn variables
assignments.
include : str or list
One or more additional paths to search for included ``.mzn`` files.
stdlib_dir : str
The path to the MiniZinc standard library. Provide it only if it is
different from the default one.
globals_dir : str
The path to the MiniZinc globals directory. Provide it only if it is
different from the default one.
allow_multiple_assignments : bool
Whether to allow multiple assignments of variables. Sometimes is
convenient to simply let the data file override the value already
assigned in the minizinc file. Default is ``False``.
output_mode : {'item', 'dzn', 'json'}
The desired output format. The default is ``'item'`` which outputs a
stream of strings as returned by the ``solns2out`` tool, formatted
according to the output statement of the MiniZinc model. The ``'dzn'``
and ``'json'`` formats output a stream of strings formatted in dzn and
json respectively.
timeout : int
The timeout in seconds for the flattening + solving process.
two_pass : bool or int
If ``two_pass`` is True, then it is equivalent to the ``--two-pass``
option for the ``minizinc`` executable. If ``two_pass`` is an integer
``<n>``, instead, it is equivalent to the ``-O<n>`` option for the
``minizinc`` executable.
pre_passes : int
Equivalent to the ``--pre-passes`` option for the ``minizinc``
executable.
output_objective : bool
Equivalent to the ``--output-objective`` option for the ``minizinc``
executable. Adds a field ``_objective`` to all solutions.
non_unique : bool
Equivalent to the ``--non-unique`` option for the ``minizinc``
executable.
all_solutions : bool
Whether all the solutions must be returned. This option might not work
if the solver does not support it. Default is ``False``.
num_solutions : int
The upper bound on the number of solutions to be returned. This option
might not work if the solver does not support it. Default is ``1``.
free_search : bool
If True, instruct the solver to perform free search.
parallel : int
The number of parallel threads the solver can utilize for the solving.
seed : int
The random number generator seed to pass to the solver.
**kwargs
Additional arguments to pass to the solver, provided as additional
keyword arguments to this function. Check the solver documentation for
the available arguments.
Returns
-------
Object wrapping the executed process.
|
[
"Flatten",
"and",
"solve",
"a",
"MiniZinc",
"program",
"."
] |
35b04cfb244918551649b9bb8a0ab65d37c31fe4
|
https://github.com/paolodragone/pymzn/blob/35b04cfb244918551649b9bb8a0ab65d37c31fe4/pymzn/mzn/minizinc.py#L694-L796
|
9,563
|
paolodragone/pymzn
|
pymzn/mzn/minizinc.py
|
mzn2fzn
|
def mzn2fzn(
mzn, *dzn_files, args=None, data=None, include=None, stdlib_dir=None,
globals_dir=None, declare_enums=True, allow_multiple_assignments=False,
keep=False, output_vars=None, output_base=None, output_mode='item',
no_ozn=False
):
"""Flatten a MiniZinc model into a FlatZinc one.
This function is equivalent to the command ``minizinc --compile``.
Parameters
----------
mzn : str
The minizinc model. This can be either the path to the ``.mzn`` file or
the content of the model itself.
*dzn_files
A list of paths to dzn files to attach to the minizinc execution,
provided as positional arguments; by default no data file is attached.
args : dict
Arguments for the template engine.
data : dict
Additional data as a dictionary of variables assignments to supply to
the minizinc executable. The dictionary is automatically converted to
dzn format by the ``pymzn.dict2dzn`` function.
include : str or list
One or more additional paths to search for included ``.mzn`` files.
stdlib_dir : str
The path to the MiniZinc standard library. Provide it only if it is
different from the default one.
globals_dir : str
The path to the MiniZinc globals directory. Provide it only if it is
different from the default one.
declare_enums : bool
Whether to declare enum types when converting inline data into dzn
format. If the enum types are declared elsewhere this option should be
False. Default is ``True``.
allow_multiple_assignments : bool
Whether to allow multiple assignments of variables. Sometimes is
convenient to simply let the data file override the value already
assigned in the minizinc file. Default is ``False``.
keep : bool
Whether to keep the generated ``.mzn``, ``.dzn``, ``.fzn`` and ``.ozn``
files or not. If False, the generated files are created as temporary
files which will be deleted right after the problem is solved. Though
files generated by PyMzn are not intended to be kept, this property can
be used for debugging purpose. Note that in case of error the files are
not deleted even if this parameter is ``False``. Default is ``False``.
output_vars : list of str
A list of output variables. These variables will be the ones included in
the output dictionary. Only available if ``ouptut_mode='dict'``.
output_base : str
Output directory for the files generated by PyMzn. The default
(``None``) is the temporary directory of your OS (if ``keep=False``) or
the current working directory (if ``keep=True``).
output_mode : {'dict', 'item', 'dzn', 'json', 'raw'}
The desired output format. The default is ``'dict'`` which returns a
stream of solutions decoded as python dictionaries. The ``'item'``
format outputs a stream of strings as returned by the ``solns2out``
tool, formatted according to the output statement of the MiniZinc model.
The ``'dzn'`` and ``'json'`` formats output a stream of strings
formatted in dzn and json respectively. The ``'raw'`` format, instead
returns the whole solution stream, without parsing.
no_ozn : bool
If ``True``, the ozn file is not produced, ``False`` otherwise.
Returns
-------
tuple (str, str)
The paths to the generated fzn and ozn files. If ``no_ozn=True``, the
second argument is ``None``.
"""
mzn_file, dzn_files, data_file, data, keep, _output_mode, types = \
_minizinc_preliminaries(
mzn, *dzn_files, args=args, data=data, include=include,
stdlib_dir=stdlib_dir, globals_dir=globals_dir,
output_vars=output_vars, keep=keep, output_base=output_base,
output_mode=output_mode, declare_enums=declare_enums,
allow_multiple_assignments=allow_multiple_assignments
)
args = ['--compile']
args += _flattening_args(
mzn_file, *dzn_files, data=data, stdlib_dir=stdlib_dir,
globals_dir=globals_dir, output_mode=output_mode, include=include,
no_ozn=no_ozn, output_base=output_base,
allow_multiple_assignments=allow_multiple_assignments
)
t0 = _time()
_run_minizinc(*args)
flattening_time = _time() - t0
logger.info('Flattening completed in {:>3.2f} sec'.format(flattening_time))
if not keep:
with contextlib.suppress(FileNotFoundError):
if data_file:
os.remove(data_file)
logger.info('Deleted file: {}'.format(data_file))
if output_base:
mzn_base = output_base
else:
mzn_base = os.path.splitext(mzn_file)[0]
fzn_file = '.'.join([mzn_base, 'fzn'])
fzn_file = fzn_file if os.path.isfile(fzn_file) else None
ozn_file = '.'.join([mzn_base, 'ozn'])
ozn_file = ozn_file if os.path.isfile(ozn_file) else None
if fzn_file:
logger.info('Generated file: {}'.format(fzn_file))
if ozn_file:
logger.info('Generated file: {}'.format(ozn_file))
return fzn_file, ozn_file
|
python
|
def mzn2fzn(
mzn, *dzn_files, args=None, data=None, include=None, stdlib_dir=None,
globals_dir=None, declare_enums=True, allow_multiple_assignments=False,
keep=False, output_vars=None, output_base=None, output_mode='item',
no_ozn=False
):
"""Flatten a MiniZinc model into a FlatZinc one.
This function is equivalent to the command ``minizinc --compile``.
Parameters
----------
mzn : str
The minizinc model. This can be either the path to the ``.mzn`` file or
the content of the model itself.
*dzn_files
A list of paths to dzn files to attach to the minizinc execution,
provided as positional arguments; by default no data file is attached.
args : dict
Arguments for the template engine.
data : dict
Additional data as a dictionary of variables assignments to supply to
the minizinc executable. The dictionary is automatically converted to
dzn format by the ``pymzn.dict2dzn`` function.
include : str or list
One or more additional paths to search for included ``.mzn`` files.
stdlib_dir : str
The path to the MiniZinc standard library. Provide it only if it is
different from the default one.
globals_dir : str
The path to the MiniZinc globals directory. Provide it only if it is
different from the default one.
declare_enums : bool
Whether to declare enum types when converting inline data into dzn
format. If the enum types are declared elsewhere this option should be
False. Default is ``True``.
allow_multiple_assignments : bool
Whether to allow multiple assignments of variables. Sometimes is
convenient to simply let the data file override the value already
assigned in the minizinc file. Default is ``False``.
keep : bool
Whether to keep the generated ``.mzn``, ``.dzn``, ``.fzn`` and ``.ozn``
files or not. If False, the generated files are created as temporary
files which will be deleted right after the problem is solved. Though
files generated by PyMzn are not intended to be kept, this property can
be used for debugging purpose. Note that in case of error the files are
not deleted even if this parameter is ``False``. Default is ``False``.
output_vars : list of str
A list of output variables. These variables will be the ones included in
the output dictionary. Only available if ``ouptut_mode='dict'``.
output_base : str
Output directory for the files generated by PyMzn. The default
(``None``) is the temporary directory of your OS (if ``keep=False``) or
the current working directory (if ``keep=True``).
output_mode : {'dict', 'item', 'dzn', 'json', 'raw'}
The desired output format. The default is ``'dict'`` which returns a
stream of solutions decoded as python dictionaries. The ``'item'``
format outputs a stream of strings as returned by the ``solns2out``
tool, formatted according to the output statement of the MiniZinc model.
The ``'dzn'`` and ``'json'`` formats output a stream of strings
formatted in dzn and json respectively. The ``'raw'`` format, instead
returns the whole solution stream, without parsing.
no_ozn : bool
If ``True``, the ozn file is not produced, ``False`` otherwise.
Returns
-------
tuple (str, str)
The paths to the generated fzn and ozn files. If ``no_ozn=True``, the
second argument is ``None``.
"""
mzn_file, dzn_files, data_file, data, keep, _output_mode, types = \
_minizinc_preliminaries(
mzn, *dzn_files, args=args, data=data, include=include,
stdlib_dir=stdlib_dir, globals_dir=globals_dir,
output_vars=output_vars, keep=keep, output_base=output_base,
output_mode=output_mode, declare_enums=declare_enums,
allow_multiple_assignments=allow_multiple_assignments
)
args = ['--compile']
args += _flattening_args(
mzn_file, *dzn_files, data=data, stdlib_dir=stdlib_dir,
globals_dir=globals_dir, output_mode=output_mode, include=include,
no_ozn=no_ozn, output_base=output_base,
allow_multiple_assignments=allow_multiple_assignments
)
t0 = _time()
_run_minizinc(*args)
flattening_time = _time() - t0
logger.info('Flattening completed in {:>3.2f} sec'.format(flattening_time))
if not keep:
with contextlib.suppress(FileNotFoundError):
if data_file:
os.remove(data_file)
logger.info('Deleted file: {}'.format(data_file))
if output_base:
mzn_base = output_base
else:
mzn_base = os.path.splitext(mzn_file)[0]
fzn_file = '.'.join([mzn_base, 'fzn'])
fzn_file = fzn_file if os.path.isfile(fzn_file) else None
ozn_file = '.'.join([mzn_base, 'ozn'])
ozn_file = ozn_file if os.path.isfile(ozn_file) else None
if fzn_file:
logger.info('Generated file: {}'.format(fzn_file))
if ozn_file:
logger.info('Generated file: {}'.format(ozn_file))
return fzn_file, ozn_file
|
[
"def",
"mzn2fzn",
"(",
"mzn",
",",
"*",
"dzn_files",
",",
"args",
"=",
"None",
",",
"data",
"=",
"None",
",",
"include",
"=",
"None",
",",
"stdlib_dir",
"=",
"None",
",",
"globals_dir",
"=",
"None",
",",
"declare_enums",
"=",
"True",
",",
"allow_multiple_assignments",
"=",
"False",
",",
"keep",
"=",
"False",
",",
"output_vars",
"=",
"None",
",",
"output_base",
"=",
"None",
",",
"output_mode",
"=",
"'item'",
",",
"no_ozn",
"=",
"False",
")",
":",
"mzn_file",
",",
"dzn_files",
",",
"data_file",
",",
"data",
",",
"keep",
",",
"_output_mode",
",",
"types",
"=",
"_minizinc_preliminaries",
"(",
"mzn",
",",
"*",
"dzn_files",
",",
"args",
"=",
"args",
",",
"data",
"=",
"data",
",",
"include",
"=",
"include",
",",
"stdlib_dir",
"=",
"stdlib_dir",
",",
"globals_dir",
"=",
"globals_dir",
",",
"output_vars",
"=",
"output_vars",
",",
"keep",
"=",
"keep",
",",
"output_base",
"=",
"output_base",
",",
"output_mode",
"=",
"output_mode",
",",
"declare_enums",
"=",
"declare_enums",
",",
"allow_multiple_assignments",
"=",
"allow_multiple_assignments",
")",
"args",
"=",
"[",
"'--compile'",
"]",
"args",
"+=",
"_flattening_args",
"(",
"mzn_file",
",",
"*",
"dzn_files",
",",
"data",
"=",
"data",
",",
"stdlib_dir",
"=",
"stdlib_dir",
",",
"globals_dir",
"=",
"globals_dir",
",",
"output_mode",
"=",
"output_mode",
",",
"include",
"=",
"include",
",",
"no_ozn",
"=",
"no_ozn",
",",
"output_base",
"=",
"output_base",
",",
"allow_multiple_assignments",
"=",
"allow_multiple_assignments",
")",
"t0",
"=",
"_time",
"(",
")",
"_run_minizinc",
"(",
"*",
"args",
")",
"flattening_time",
"=",
"_time",
"(",
")",
"-",
"t0",
"logger",
".",
"info",
"(",
"'Flattening completed in {:>3.2f} sec'",
".",
"format",
"(",
"flattening_time",
")",
")",
"if",
"not",
"keep",
":",
"with",
"contextlib",
".",
"suppress",
"(",
"FileNotFoundError",
")",
":",
"if",
"data_file",
":",
"os",
".",
"remove",
"(",
"data_file",
")",
"logger",
".",
"info",
"(",
"'Deleted file: {}'",
".",
"format",
"(",
"data_file",
")",
")",
"if",
"output_base",
":",
"mzn_base",
"=",
"output_base",
"else",
":",
"mzn_base",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"mzn_file",
")",
"[",
"0",
"]",
"fzn_file",
"=",
"'.'",
".",
"join",
"(",
"[",
"mzn_base",
",",
"'fzn'",
"]",
")",
"fzn_file",
"=",
"fzn_file",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"fzn_file",
")",
"else",
"None",
"ozn_file",
"=",
"'.'",
".",
"join",
"(",
"[",
"mzn_base",
",",
"'ozn'",
"]",
")",
"ozn_file",
"=",
"ozn_file",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"ozn_file",
")",
"else",
"None",
"if",
"fzn_file",
":",
"logger",
".",
"info",
"(",
"'Generated file: {}'",
".",
"format",
"(",
"fzn_file",
")",
")",
"if",
"ozn_file",
":",
"logger",
".",
"info",
"(",
"'Generated file: {}'",
".",
"format",
"(",
"ozn_file",
")",
")",
"return",
"fzn_file",
",",
"ozn_file"
] |
Flatten a MiniZinc model into a FlatZinc one.
This function is equivalent to the command ``minizinc --compile``.
Parameters
----------
mzn : str
The minizinc model. This can be either the path to the ``.mzn`` file or
the content of the model itself.
*dzn_files
A list of paths to dzn files to attach to the minizinc execution,
provided as positional arguments; by default no data file is attached.
args : dict
Arguments for the template engine.
data : dict
Additional data as a dictionary of variables assignments to supply to
the minizinc executable. The dictionary is automatically converted to
dzn format by the ``pymzn.dict2dzn`` function.
include : str or list
One or more additional paths to search for included ``.mzn`` files.
stdlib_dir : str
The path to the MiniZinc standard library. Provide it only if it is
different from the default one.
globals_dir : str
The path to the MiniZinc globals directory. Provide it only if it is
different from the default one.
declare_enums : bool
Whether to declare enum types when converting inline data into dzn
format. If the enum types are declared elsewhere this option should be
False. Default is ``True``.
allow_multiple_assignments : bool
Whether to allow multiple assignments of variables. Sometimes is
convenient to simply let the data file override the value already
assigned in the minizinc file. Default is ``False``.
keep : bool
Whether to keep the generated ``.mzn``, ``.dzn``, ``.fzn`` and ``.ozn``
files or not. If False, the generated files are created as temporary
files which will be deleted right after the problem is solved. Though
files generated by PyMzn are not intended to be kept, this property can
be used for debugging purpose. Note that in case of error the files are
not deleted even if this parameter is ``False``. Default is ``False``.
output_vars : list of str
A list of output variables. These variables will be the ones included in
the output dictionary. Only available if ``ouptut_mode='dict'``.
output_base : str
Output directory for the files generated by PyMzn. The default
(``None``) is the temporary directory of your OS (if ``keep=False``) or
the current working directory (if ``keep=True``).
output_mode : {'dict', 'item', 'dzn', 'json', 'raw'}
The desired output format. The default is ``'dict'`` which returns a
stream of solutions decoded as python dictionaries. The ``'item'``
format outputs a stream of strings as returned by the ``solns2out``
tool, formatted according to the output statement of the MiniZinc model.
The ``'dzn'`` and ``'json'`` formats output a stream of strings
formatted in dzn and json respectively. The ``'raw'`` format, instead
returns the whole solution stream, without parsing.
no_ozn : bool
If ``True``, the ozn file is not produced, ``False`` otherwise.
Returns
-------
tuple (str, str)
The paths to the generated fzn and ozn files. If ``no_ozn=True``, the
second argument is ``None``.
|
[
"Flatten",
"a",
"MiniZinc",
"model",
"into",
"a",
"FlatZinc",
"one",
"."
] |
35b04cfb244918551649b9bb8a0ab65d37c31fe4
|
https://github.com/paolodragone/pymzn/blob/35b04cfb244918551649b9bb8a0ab65d37c31fe4/pymzn/mzn/minizinc.py#L799-L914
|
9,564
|
paolodragone/pymzn
|
pymzn/mzn/output.py
|
Solutions.print
|
def print(self, output_file=sys.stdout, log=False):
"""Print the solution stream"""
for soln in iter(self):
print(soln, file=output_file)
print(SOLN_SEP, file=output_file)
if self.status == 0:
print(SEARCH_COMPLETE, file=output_file)
if (self.status == 1 and self._n_solns == 0) or self.status >= 2:
print({
Status.INCOMPLETE : ERROR,
Status.UNKNOWN: UNKNOWN,
Status.UNSATISFIABLE: UNSATISFIABLE,
Status.UNBOUNDED: UNBOUNDED,
Status.UNSATorUNBOUNDED: UNSATorUNBOUNDED,
Status.ERROR: ERROR
}[self.status], file=output_file)
if self.stderr:
print(self.stderr.strip(), file=sys.stderr)
elif log:
print(str(self.log), file=output_file)
|
python
|
def print(self, output_file=sys.stdout, log=False):
"""Print the solution stream"""
for soln in iter(self):
print(soln, file=output_file)
print(SOLN_SEP, file=output_file)
if self.status == 0:
print(SEARCH_COMPLETE, file=output_file)
if (self.status == 1 and self._n_solns == 0) or self.status >= 2:
print({
Status.INCOMPLETE : ERROR,
Status.UNKNOWN: UNKNOWN,
Status.UNSATISFIABLE: UNSATISFIABLE,
Status.UNBOUNDED: UNBOUNDED,
Status.UNSATorUNBOUNDED: UNSATorUNBOUNDED,
Status.ERROR: ERROR
}[self.status], file=output_file)
if self.stderr:
print(self.stderr.strip(), file=sys.stderr)
elif log:
print(str(self.log), file=output_file)
|
[
"def",
"print",
"(",
"self",
",",
"output_file",
"=",
"sys",
".",
"stdout",
",",
"log",
"=",
"False",
")",
":",
"for",
"soln",
"in",
"iter",
"(",
"self",
")",
":",
"print",
"(",
"soln",
",",
"file",
"=",
"output_file",
")",
"print",
"(",
"SOLN_SEP",
",",
"file",
"=",
"output_file",
")",
"if",
"self",
".",
"status",
"==",
"0",
":",
"print",
"(",
"SEARCH_COMPLETE",
",",
"file",
"=",
"output_file",
")",
"if",
"(",
"self",
".",
"status",
"==",
"1",
"and",
"self",
".",
"_n_solns",
"==",
"0",
")",
"or",
"self",
".",
"status",
">=",
"2",
":",
"print",
"(",
"{",
"Status",
".",
"INCOMPLETE",
":",
"ERROR",
",",
"Status",
".",
"UNKNOWN",
":",
"UNKNOWN",
",",
"Status",
".",
"UNSATISFIABLE",
":",
"UNSATISFIABLE",
",",
"Status",
".",
"UNBOUNDED",
":",
"UNBOUNDED",
",",
"Status",
".",
"UNSATorUNBOUNDED",
":",
"UNSATorUNBOUNDED",
",",
"Status",
".",
"ERROR",
":",
"ERROR",
"}",
"[",
"self",
".",
"status",
"]",
",",
"file",
"=",
"output_file",
")",
"if",
"self",
".",
"stderr",
":",
"print",
"(",
"self",
".",
"stderr",
".",
"strip",
"(",
")",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"elif",
"log",
":",
"print",
"(",
"str",
"(",
"self",
".",
"log",
")",
",",
"file",
"=",
"output_file",
")"
] |
Print the solution stream
|
[
"Print",
"the",
"solution",
"stream"
] |
35b04cfb244918551649b9bb8a0ab65d37c31fe4
|
https://github.com/paolodragone/pymzn/blob/35b04cfb244918551649b9bb8a0ab65d37c31fe4/pymzn/mzn/output.py#L143-L167
|
9,565
|
paolodragone/pymzn
|
pymzn/config.py
|
Config.dump
|
def dump(self):
"""Writes the changes to the configuration file."""
try:
import yaml
cfg_file = self._cfg_file()
cfg_dir, __ = os.path.split(cfg_file)
os.makedirs(cfg_dir, exist_ok=True)
with open(cfg_file, 'w') as f:
yaml.dump(self, f)
except ImportError as err:
raise RuntimeError(
'Cannot dump the configuration settings to file. You need to '
'install the necessary dependencies (pyyaml, appdirs).'
) from err
|
python
|
def dump(self):
"""Writes the changes to the configuration file."""
try:
import yaml
cfg_file = self._cfg_file()
cfg_dir, __ = os.path.split(cfg_file)
os.makedirs(cfg_dir, exist_ok=True)
with open(cfg_file, 'w') as f:
yaml.dump(self, f)
except ImportError as err:
raise RuntimeError(
'Cannot dump the configuration settings to file. You need to '
'install the necessary dependencies (pyyaml, appdirs).'
) from err
|
[
"def",
"dump",
"(",
"self",
")",
":",
"try",
":",
"import",
"yaml",
"cfg_file",
"=",
"self",
".",
"_cfg_file",
"(",
")",
"cfg_dir",
",",
"__",
"=",
"os",
".",
"path",
".",
"split",
"(",
"cfg_file",
")",
"os",
".",
"makedirs",
"(",
"cfg_dir",
",",
"exist_ok",
"=",
"True",
")",
"with",
"open",
"(",
"cfg_file",
",",
"'w'",
")",
"as",
"f",
":",
"yaml",
".",
"dump",
"(",
"self",
",",
"f",
")",
"except",
"ImportError",
"as",
"err",
":",
"raise",
"RuntimeError",
"(",
"'Cannot dump the configuration settings to file. You need to '",
"'install the necessary dependencies (pyyaml, appdirs).'",
")",
"from",
"err"
] |
Writes the changes to the configuration file.
|
[
"Writes",
"the",
"changes",
"to",
"the",
"configuration",
"file",
"."
] |
35b04cfb244918551649b9bb8a0ab65d37c31fe4
|
https://github.com/paolodragone/pymzn/blob/35b04cfb244918551649b9bb8a0ab65d37c31fe4/pymzn/config.py#L102-L115
|
9,566
|
paolodragone/pymzn
|
pymzn/mzn/templates.py
|
discretize
|
def discretize(value, factor=100):
"""Discretize the given value, pre-multiplying by the given factor"""
if not isinstance(value, Iterable):
return int(value * factor)
int_value = list(deepcopy(value))
for i in range(len(int_value)):
int_value[i] = int(int_value[i] * factor)
return int_value
|
python
|
def discretize(value, factor=100):
"""Discretize the given value, pre-multiplying by the given factor"""
if not isinstance(value, Iterable):
return int(value * factor)
int_value = list(deepcopy(value))
for i in range(len(int_value)):
int_value[i] = int(int_value[i] * factor)
return int_value
|
[
"def",
"discretize",
"(",
"value",
",",
"factor",
"=",
"100",
")",
":",
"if",
"not",
"isinstance",
"(",
"value",
",",
"Iterable",
")",
":",
"return",
"int",
"(",
"value",
"*",
"factor",
")",
"int_value",
"=",
"list",
"(",
"deepcopy",
"(",
"value",
")",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"int_value",
")",
")",
":",
"int_value",
"[",
"i",
"]",
"=",
"int",
"(",
"int_value",
"[",
"i",
"]",
"*",
"factor",
")",
"return",
"int_value"
] |
Discretize the given value, pre-multiplying by the given factor
|
[
"Discretize",
"the",
"given",
"value",
"pre",
"-",
"multiplying",
"by",
"the",
"given",
"factor"
] |
35b04cfb244918551649b9bb8a0ab65d37c31fe4
|
https://github.com/paolodragone/pymzn/blob/35b04cfb244918551649b9bb8a0ab65d37c31fe4/pymzn/mzn/templates.py#L87-L94
|
9,567
|
paolodragone/pymzn
|
pymzn/mzn/templates.py
|
from_string
|
def from_string(source, args=None):
"""Renders a template string"""
if _has_jinja:
logger.info('Precompiling model with arguments: {}'.format(args))
return _jenv.from_string(source).render(args or {})
if args:
raise RuntimeError(_except_text)
return source
|
python
|
def from_string(source, args=None):
"""Renders a template string"""
if _has_jinja:
logger.info('Precompiling model with arguments: {}'.format(args))
return _jenv.from_string(source).render(args or {})
if args:
raise RuntimeError(_except_text)
return source
|
[
"def",
"from_string",
"(",
"source",
",",
"args",
"=",
"None",
")",
":",
"if",
"_has_jinja",
":",
"logger",
".",
"info",
"(",
"'Precompiling model with arguments: {}'",
".",
"format",
"(",
"args",
")",
")",
"return",
"_jenv",
".",
"from_string",
"(",
"source",
")",
".",
"render",
"(",
"args",
"or",
"{",
"}",
")",
"if",
"args",
":",
"raise",
"RuntimeError",
"(",
"_except_text",
")",
"return",
"source"
] |
Renders a template string
|
[
"Renders",
"a",
"template",
"string"
] |
35b04cfb244918551649b9bb8a0ab65d37c31fe4
|
https://github.com/paolodragone/pymzn/blob/35b04cfb244918551649b9bb8a0ab65d37c31fe4/pymzn/mzn/templates.py#L146-L153
|
9,568
|
paolodragone/pymzn
|
pymzn/mzn/templates.py
|
add_package
|
def add_package(package_name, package_path='templates', encoding='utf-8'):
"""Adds the given package to the template search routine"""
if not _has_jinja:
raise RuntimeError(_except_text)
_jload.add_loader(PackageLoader(package_name, package_path, encoding))
|
python
|
def add_package(package_name, package_path='templates', encoding='utf-8'):
"""Adds the given package to the template search routine"""
if not _has_jinja:
raise RuntimeError(_except_text)
_jload.add_loader(PackageLoader(package_name, package_path, encoding))
|
[
"def",
"add_package",
"(",
"package_name",
",",
"package_path",
"=",
"'templates'",
",",
"encoding",
"=",
"'utf-8'",
")",
":",
"if",
"not",
"_has_jinja",
":",
"raise",
"RuntimeError",
"(",
"_except_text",
")",
"_jload",
".",
"add_loader",
"(",
"PackageLoader",
"(",
"package_name",
",",
"package_path",
",",
"encoding",
")",
")"
] |
Adds the given package to the template search routine
|
[
"Adds",
"the",
"given",
"package",
"to",
"the",
"template",
"search",
"routine"
] |
35b04cfb244918551649b9bb8a0ab65d37c31fe4
|
https://github.com/paolodragone/pymzn/blob/35b04cfb244918551649b9bb8a0ab65d37c31fe4/pymzn/mzn/templates.py#L155-L159
|
9,569
|
paolodragone/pymzn
|
pymzn/mzn/templates.py
|
add_path
|
def add_path(searchpath, encoding='utf-8', followlinks=False):
"""Adds the given path to the template search routine"""
if not _has_jinja:
raise RuntimeError(_except_text)
_jload.add_loader(FileSystemLoader(searchpath, encoding, followlinks))
|
python
|
def add_path(searchpath, encoding='utf-8', followlinks=False):
"""Adds the given path to the template search routine"""
if not _has_jinja:
raise RuntimeError(_except_text)
_jload.add_loader(FileSystemLoader(searchpath, encoding, followlinks))
|
[
"def",
"add_path",
"(",
"searchpath",
",",
"encoding",
"=",
"'utf-8'",
",",
"followlinks",
"=",
"False",
")",
":",
"if",
"not",
"_has_jinja",
":",
"raise",
"RuntimeError",
"(",
"_except_text",
")",
"_jload",
".",
"add_loader",
"(",
"FileSystemLoader",
"(",
"searchpath",
",",
"encoding",
",",
"followlinks",
")",
")"
] |
Adds the given path to the template search routine
|
[
"Adds",
"the",
"given",
"path",
"to",
"the",
"template",
"search",
"routine"
] |
35b04cfb244918551649b9bb8a0ab65d37c31fe4
|
https://github.com/paolodragone/pymzn/blob/35b04cfb244918551649b9bb8a0ab65d37c31fe4/pymzn/mzn/templates.py#L162-L166
|
9,570
|
paolodragone/pymzn
|
pymzn/dzn/marsh.py
|
val2dzn
|
def val2dzn(val, wrap=True):
"""Serializes a value into its dzn representation.
The supported types are ``bool``, ``int``, ``float``, ``set``, ``array``.
Parameters
----------
val
The value to serialize
wrap : bool
Whether to wrap the serialized value.
Returns
-------
str
The serialized dzn representation of the given value.
"""
if _is_value(val):
dzn_val = _dzn_val(val)
elif _is_set(val):
dzn_val = _dzn_set(val)
elif _is_array_type(val):
dzn_val =_dzn_array_nd(val)
else:
raise TypeError(
'Unsupported serialization of value: {}'.format(repr(val))
)
if wrap:
wrapper = _get_wrapper()
dzn_val = wrapper.fill(dzn_val)
return dzn_val
|
python
|
def val2dzn(val, wrap=True):
"""Serializes a value into its dzn representation.
The supported types are ``bool``, ``int``, ``float``, ``set``, ``array``.
Parameters
----------
val
The value to serialize
wrap : bool
Whether to wrap the serialized value.
Returns
-------
str
The serialized dzn representation of the given value.
"""
if _is_value(val):
dzn_val = _dzn_val(val)
elif _is_set(val):
dzn_val = _dzn_set(val)
elif _is_array_type(val):
dzn_val =_dzn_array_nd(val)
else:
raise TypeError(
'Unsupported serialization of value: {}'.format(repr(val))
)
if wrap:
wrapper = _get_wrapper()
dzn_val = wrapper.fill(dzn_val)
return dzn_val
|
[
"def",
"val2dzn",
"(",
"val",
",",
"wrap",
"=",
"True",
")",
":",
"if",
"_is_value",
"(",
"val",
")",
":",
"dzn_val",
"=",
"_dzn_val",
"(",
"val",
")",
"elif",
"_is_set",
"(",
"val",
")",
":",
"dzn_val",
"=",
"_dzn_set",
"(",
"val",
")",
"elif",
"_is_array_type",
"(",
"val",
")",
":",
"dzn_val",
"=",
"_dzn_array_nd",
"(",
"val",
")",
"else",
":",
"raise",
"TypeError",
"(",
"'Unsupported serialization of value: {}'",
".",
"format",
"(",
"repr",
"(",
"val",
")",
")",
")",
"if",
"wrap",
":",
"wrapper",
"=",
"_get_wrapper",
"(",
")",
"dzn_val",
"=",
"wrapper",
".",
"fill",
"(",
"dzn_val",
")",
"return",
"dzn_val"
] |
Serializes a value into its dzn representation.
The supported types are ``bool``, ``int``, ``float``, ``set``, ``array``.
Parameters
----------
val
The value to serialize
wrap : bool
Whether to wrap the serialized value.
Returns
-------
str
The serialized dzn representation of the given value.
|
[
"Serializes",
"a",
"value",
"into",
"its",
"dzn",
"representation",
"."
] |
35b04cfb244918551649b9bb8a0ab65d37c31fe4
|
https://github.com/paolodragone/pymzn/blob/35b04cfb244918551649b9bb8a0ab65d37c31fe4/pymzn/dzn/marsh.py#L215-L247
|
9,571
|
paolodragone/pymzn
|
pymzn/dzn/marsh.py
|
stmt2dzn
|
def stmt2dzn(name, val, declare=True, assign=True, wrap=True):
"""Returns a dzn statement declaring and assigning the given value.
Parameters
----------
val
The value to serialize.
declare : bool
Whether to include the declaration of the variable in the statement or
just the assignment.
assign : bool
Wheter to include the assignment of the value in the statement or just
the declaration.
wrap : bool
Whether to wrap the serialized value.
Returns
-------
str
The serialized dzn representation of the value.
"""
if not (declare or assign):
raise ValueError(
'The statement must be a declaration or an assignment.'
)
stmt = []
if declare:
val_type = _dzn_type(val)
stmt.append('{}: '.format(val_type))
stmt.append(name)
if assign:
val_str = val2dzn(val, wrap=wrap)
stmt.append(' = {}'.format(val_str))
stmt.append(';')
return ''.join(stmt)
|
python
|
def stmt2dzn(name, val, declare=True, assign=True, wrap=True):
"""Returns a dzn statement declaring and assigning the given value.
Parameters
----------
val
The value to serialize.
declare : bool
Whether to include the declaration of the variable in the statement or
just the assignment.
assign : bool
Wheter to include the assignment of the value in the statement or just
the declaration.
wrap : bool
Whether to wrap the serialized value.
Returns
-------
str
The serialized dzn representation of the value.
"""
if not (declare or assign):
raise ValueError(
'The statement must be a declaration or an assignment.'
)
stmt = []
if declare:
val_type = _dzn_type(val)
stmt.append('{}: '.format(val_type))
stmt.append(name)
if assign:
val_str = val2dzn(val, wrap=wrap)
stmt.append(' = {}'.format(val_str))
stmt.append(';')
return ''.join(stmt)
|
[
"def",
"stmt2dzn",
"(",
"name",
",",
"val",
",",
"declare",
"=",
"True",
",",
"assign",
"=",
"True",
",",
"wrap",
"=",
"True",
")",
":",
"if",
"not",
"(",
"declare",
"or",
"assign",
")",
":",
"raise",
"ValueError",
"(",
"'The statement must be a declaration or an assignment.'",
")",
"stmt",
"=",
"[",
"]",
"if",
"declare",
":",
"val_type",
"=",
"_dzn_type",
"(",
"val",
")",
"stmt",
".",
"append",
"(",
"'{}: '",
".",
"format",
"(",
"val_type",
")",
")",
"stmt",
".",
"append",
"(",
"name",
")",
"if",
"assign",
":",
"val_str",
"=",
"val2dzn",
"(",
"val",
",",
"wrap",
"=",
"wrap",
")",
"stmt",
".",
"append",
"(",
"' = {}'",
".",
"format",
"(",
"val_str",
")",
")",
"stmt",
".",
"append",
"(",
"';'",
")",
"return",
"''",
".",
"join",
"(",
"stmt",
")"
] |
Returns a dzn statement declaring and assigning the given value.
Parameters
----------
val
The value to serialize.
declare : bool
Whether to include the declaration of the variable in the statement or
just the assignment.
assign : bool
Wheter to include the assignment of the value in the statement or just
the declaration.
wrap : bool
Whether to wrap the serialized value.
Returns
-------
str
The serialized dzn representation of the value.
|
[
"Returns",
"a",
"dzn",
"statement",
"declaring",
"and",
"assigning",
"the",
"given",
"value",
"."
] |
35b04cfb244918551649b9bb8a0ab65d37c31fe4
|
https://github.com/paolodragone/pymzn/blob/35b04cfb244918551649b9bb8a0ab65d37c31fe4/pymzn/dzn/marsh.py#L250-L285
|
9,572
|
paolodragone/pymzn
|
pymzn/dzn/marsh.py
|
stmt2enum
|
def stmt2enum(enum_type, declare=True, assign=True, wrap=True):
"""Returns a dzn enum declaration from an enum type.
Parameters
----------
enum_type : Enum
The enum to serialize.
declare : bool
Whether to include the ``enum`` declatation keyword in the statement or
just the assignment.
assign : bool
Wheter to include the assignment of the enum in the statement or just
the declaration.
wrap : bool
Whether to wrap the serialized enum.
Returns
-------
str
The serialized dzn representation of the enum.
"""
if not (declare or assign):
raise ValueError(
'The statement must be a declaration or an assignment.'
)
stmt = []
if declare:
stmt.append('enum ')
stmt.append(enum_type.__name__)
if assign:
val_str = []
for v in list(enum_type):
val_str.append(v.name)
val_str = ''.join(['{', ','.join(val_str), '}'])
if wrap:
wrapper = _get_wrapper()
val_str = wrapper.fill(val_str)
stmt.append(' = {}'.format(val_str))
stmt.append(';')
return ''.join(stmt)
|
python
|
def stmt2enum(enum_type, declare=True, assign=True, wrap=True):
"""Returns a dzn enum declaration from an enum type.
Parameters
----------
enum_type : Enum
The enum to serialize.
declare : bool
Whether to include the ``enum`` declatation keyword in the statement or
just the assignment.
assign : bool
Wheter to include the assignment of the enum in the statement or just
the declaration.
wrap : bool
Whether to wrap the serialized enum.
Returns
-------
str
The serialized dzn representation of the enum.
"""
if not (declare or assign):
raise ValueError(
'The statement must be a declaration or an assignment.'
)
stmt = []
if declare:
stmt.append('enum ')
stmt.append(enum_type.__name__)
if assign:
val_str = []
for v in list(enum_type):
val_str.append(v.name)
val_str = ''.join(['{', ','.join(val_str), '}'])
if wrap:
wrapper = _get_wrapper()
val_str = wrapper.fill(val_str)
stmt.append(' = {}'.format(val_str))
stmt.append(';')
return ''.join(stmt)
|
[
"def",
"stmt2enum",
"(",
"enum_type",
",",
"declare",
"=",
"True",
",",
"assign",
"=",
"True",
",",
"wrap",
"=",
"True",
")",
":",
"if",
"not",
"(",
"declare",
"or",
"assign",
")",
":",
"raise",
"ValueError",
"(",
"'The statement must be a declaration or an assignment.'",
")",
"stmt",
"=",
"[",
"]",
"if",
"declare",
":",
"stmt",
".",
"append",
"(",
"'enum '",
")",
"stmt",
".",
"append",
"(",
"enum_type",
".",
"__name__",
")",
"if",
"assign",
":",
"val_str",
"=",
"[",
"]",
"for",
"v",
"in",
"list",
"(",
"enum_type",
")",
":",
"val_str",
".",
"append",
"(",
"v",
".",
"name",
")",
"val_str",
"=",
"''",
".",
"join",
"(",
"[",
"'{'",
",",
"','",
".",
"join",
"(",
"val_str",
")",
",",
"'}'",
"]",
")",
"if",
"wrap",
":",
"wrapper",
"=",
"_get_wrapper",
"(",
")",
"val_str",
"=",
"wrapper",
".",
"fill",
"(",
"val_str",
")",
"stmt",
".",
"append",
"(",
"' = {}'",
".",
"format",
"(",
"val_str",
")",
")",
"stmt",
".",
"append",
"(",
"';'",
")",
"return",
"''",
".",
"join",
"(",
"stmt",
")"
] |
Returns a dzn enum declaration from an enum type.
Parameters
----------
enum_type : Enum
The enum to serialize.
declare : bool
Whether to include the ``enum`` declatation keyword in the statement or
just the assignment.
assign : bool
Wheter to include the assignment of the enum in the statement or just
the declaration.
wrap : bool
Whether to wrap the serialized enum.
Returns
-------
str
The serialized dzn representation of the enum.
|
[
"Returns",
"a",
"dzn",
"enum",
"declaration",
"from",
"an",
"enum",
"type",
"."
] |
35b04cfb244918551649b9bb8a0ab65d37c31fe4
|
https://github.com/paolodragone/pymzn/blob/35b04cfb244918551649b9bb8a0ab65d37c31fe4/pymzn/dzn/marsh.py#L288-L331
|
9,573
|
paolodragone/pymzn
|
pymzn/dzn/marsh.py
|
dict2dzn
|
def dict2dzn(
objs, declare=False, assign=True, declare_enums=True, wrap=True, fout=None
):
"""Serializes the objects in input and produces a list of strings encoding
them into dzn format. Optionally, the produced dzn is written on a file.
Supported types of objects include: ``str``, ``int``, ``float``, ``set``,
``list`` or ``dict``. List and dict are serialized into dzn
(multi-dimensional) arrays. The key-set of a dict is used as index-set of
dzn arrays. The index-set of a list is implicitly set to ``1 .. len(list)``.
Parameters
----------
objs : dict
A dictionary containing the objects to serialize, the keys are the names
of the variables.
declare : bool
Whether to include the declaration of the variable in the statements or
just the assignment. Default is ``False``.
assign : bool
Whether to include assignment of the value in the statements or just the
declaration.
declare_enums : bool
Whether to declare the enums found as types of the objects to serialize.
Default is ``True``.
wrap : bool
Whether to wrap the serialized values.
fout : str
Path to the output file, if None no output file is written.
Returns
-------
list
List of strings containing the dzn-encoded objects.
"""
log = logging.getLogger(__name__)
vals = []
enums = set()
for key, val in objs.items():
if _is_enum(val) and declare_enums:
enum_type = type(val)
enum_name = enum_type.__name__
if enum_name not in enums:
enum_stmt = stmt2enum(
enum_type, declare=declare, assign=assign, wrap=wrap
)
vals.append(enum_stmt)
enums.add(enum_name)
stmt = stmt2dzn(key, val, declare=declare, assign=assign, wrap=wrap)
vals.append(stmt)
if fout:
log.debug('Writing file: {}'.format(fout))
with open(fout, 'w') as f:
for val in vals:
f.write('{}\n\n'.format(val))
return vals
|
python
|
def dict2dzn(
objs, declare=False, assign=True, declare_enums=True, wrap=True, fout=None
):
"""Serializes the objects in input and produces a list of strings encoding
them into dzn format. Optionally, the produced dzn is written on a file.
Supported types of objects include: ``str``, ``int``, ``float``, ``set``,
``list`` or ``dict``. List and dict are serialized into dzn
(multi-dimensional) arrays. The key-set of a dict is used as index-set of
dzn arrays. The index-set of a list is implicitly set to ``1 .. len(list)``.
Parameters
----------
objs : dict
A dictionary containing the objects to serialize, the keys are the names
of the variables.
declare : bool
Whether to include the declaration of the variable in the statements or
just the assignment. Default is ``False``.
assign : bool
Whether to include assignment of the value in the statements or just the
declaration.
declare_enums : bool
Whether to declare the enums found as types of the objects to serialize.
Default is ``True``.
wrap : bool
Whether to wrap the serialized values.
fout : str
Path to the output file, if None no output file is written.
Returns
-------
list
List of strings containing the dzn-encoded objects.
"""
log = logging.getLogger(__name__)
vals = []
enums = set()
for key, val in objs.items():
if _is_enum(val) and declare_enums:
enum_type = type(val)
enum_name = enum_type.__name__
if enum_name not in enums:
enum_stmt = stmt2enum(
enum_type, declare=declare, assign=assign, wrap=wrap
)
vals.append(enum_stmt)
enums.add(enum_name)
stmt = stmt2dzn(key, val, declare=declare, assign=assign, wrap=wrap)
vals.append(stmt)
if fout:
log.debug('Writing file: {}'.format(fout))
with open(fout, 'w') as f:
for val in vals:
f.write('{}\n\n'.format(val))
return vals
|
[
"def",
"dict2dzn",
"(",
"objs",
",",
"declare",
"=",
"False",
",",
"assign",
"=",
"True",
",",
"declare_enums",
"=",
"True",
",",
"wrap",
"=",
"True",
",",
"fout",
"=",
"None",
")",
":",
"log",
"=",
"logging",
".",
"getLogger",
"(",
"__name__",
")",
"vals",
"=",
"[",
"]",
"enums",
"=",
"set",
"(",
")",
"for",
"key",
",",
"val",
"in",
"objs",
".",
"items",
"(",
")",
":",
"if",
"_is_enum",
"(",
"val",
")",
"and",
"declare_enums",
":",
"enum_type",
"=",
"type",
"(",
"val",
")",
"enum_name",
"=",
"enum_type",
".",
"__name__",
"if",
"enum_name",
"not",
"in",
"enums",
":",
"enum_stmt",
"=",
"stmt2enum",
"(",
"enum_type",
",",
"declare",
"=",
"declare",
",",
"assign",
"=",
"assign",
",",
"wrap",
"=",
"wrap",
")",
"vals",
".",
"append",
"(",
"enum_stmt",
")",
"enums",
".",
"add",
"(",
"enum_name",
")",
"stmt",
"=",
"stmt2dzn",
"(",
"key",
",",
"val",
",",
"declare",
"=",
"declare",
",",
"assign",
"=",
"assign",
",",
"wrap",
"=",
"wrap",
")",
"vals",
".",
"append",
"(",
"stmt",
")",
"if",
"fout",
":",
"log",
".",
"debug",
"(",
"'Writing file: {}'",
".",
"format",
"(",
"fout",
")",
")",
"with",
"open",
"(",
"fout",
",",
"'w'",
")",
"as",
"f",
":",
"for",
"val",
"in",
"vals",
":",
"f",
".",
"write",
"(",
"'{}\\n\\n'",
".",
"format",
"(",
"val",
")",
")",
"return",
"vals"
] |
Serializes the objects in input and produces a list of strings encoding
them into dzn format. Optionally, the produced dzn is written on a file.
Supported types of objects include: ``str``, ``int``, ``float``, ``set``,
``list`` or ``dict``. List and dict are serialized into dzn
(multi-dimensional) arrays. The key-set of a dict is used as index-set of
dzn arrays. The index-set of a list is implicitly set to ``1 .. len(list)``.
Parameters
----------
objs : dict
A dictionary containing the objects to serialize, the keys are the names
of the variables.
declare : bool
Whether to include the declaration of the variable in the statements or
just the assignment. Default is ``False``.
assign : bool
Whether to include assignment of the value in the statements or just the
declaration.
declare_enums : bool
Whether to declare the enums found as types of the objects to serialize.
Default is ``True``.
wrap : bool
Whether to wrap the serialized values.
fout : str
Path to the output file, if None no output file is written.
Returns
-------
list
List of strings containing the dzn-encoded objects.
|
[
"Serializes",
"the",
"objects",
"in",
"input",
"and",
"produces",
"a",
"list",
"of",
"strings",
"encoding",
"them",
"into",
"dzn",
"format",
".",
"Optionally",
"the",
"produced",
"dzn",
"is",
"written",
"on",
"a",
"file",
"."
] |
35b04cfb244918551649b9bb8a0ab65d37c31fe4
|
https://github.com/paolodragone/pymzn/blob/35b04cfb244918551649b9bb8a0ab65d37c31fe4/pymzn/dzn/marsh.py#L334-L391
|
9,574
|
PolicyStat/jobtastic
|
jobtastic/task.py
|
JobtasticTask.async_or_eager
|
def async_or_eager(self, **options):
"""
Attempt to call self.apply_async, or if that fails because of a problem
with the broker, run the task eagerly and return an EagerResult.
"""
args = options.pop("args", None)
kwargs = options.pop("kwargs", None)
possible_broker_errors = self._get_possible_broker_errors_tuple()
try:
return self.apply_async(args, kwargs, **options)
except possible_broker_errors:
return self.apply(args, kwargs, **options)
|
python
|
def async_or_eager(self, **options):
"""
Attempt to call self.apply_async, or if that fails because of a problem
with the broker, run the task eagerly and return an EagerResult.
"""
args = options.pop("args", None)
kwargs = options.pop("kwargs", None)
possible_broker_errors = self._get_possible_broker_errors_tuple()
try:
return self.apply_async(args, kwargs, **options)
except possible_broker_errors:
return self.apply(args, kwargs, **options)
|
[
"def",
"async_or_eager",
"(",
"self",
",",
"*",
"*",
"options",
")",
":",
"args",
"=",
"options",
".",
"pop",
"(",
"\"args\"",
",",
"None",
")",
"kwargs",
"=",
"options",
".",
"pop",
"(",
"\"kwargs\"",
",",
"None",
")",
"possible_broker_errors",
"=",
"self",
".",
"_get_possible_broker_errors_tuple",
"(",
")",
"try",
":",
"return",
"self",
".",
"apply_async",
"(",
"args",
",",
"kwargs",
",",
"*",
"*",
"options",
")",
"except",
"possible_broker_errors",
":",
"return",
"self",
".",
"apply",
"(",
"args",
",",
"kwargs",
",",
"*",
"*",
"options",
")"
] |
Attempt to call self.apply_async, or if that fails because of a problem
with the broker, run the task eagerly and return an EagerResult.
|
[
"Attempt",
"to",
"call",
"self",
".",
"apply_async",
"or",
"if",
"that",
"fails",
"because",
"of",
"a",
"problem",
"with",
"the",
"broker",
"run",
"the",
"task",
"eagerly",
"and",
"return",
"an",
"EagerResult",
"."
] |
19cd3137ebf46877cee1ee5155d318bb6261ee1c
|
https://github.com/PolicyStat/jobtastic/blob/19cd3137ebf46877cee1ee5155d318bb6261ee1c/jobtastic/task.py#L90-L101
|
9,575
|
PolicyStat/jobtastic
|
jobtastic/task.py
|
JobtasticTask.async_or_fail
|
def async_or_fail(self, **options):
"""
Attempt to call self.apply_async, but if that fails with an exception,
we fake the task completion using the exception as the result. This
allows us to seamlessly handle errors on task creation the same way we
handle errors when a task runs, simplifying the user interface.
"""
args = options.pop("args", None)
kwargs = options.pop("kwargs", None)
possible_broker_errors = self._get_possible_broker_errors_tuple()
try:
return self.apply_async(args, kwargs, **options)
except possible_broker_errors as e:
return self.simulate_async_error(e)
|
python
|
def async_or_fail(self, **options):
"""
Attempt to call self.apply_async, but if that fails with an exception,
we fake the task completion using the exception as the result. This
allows us to seamlessly handle errors on task creation the same way we
handle errors when a task runs, simplifying the user interface.
"""
args = options.pop("args", None)
kwargs = options.pop("kwargs", None)
possible_broker_errors = self._get_possible_broker_errors_tuple()
try:
return self.apply_async(args, kwargs, **options)
except possible_broker_errors as e:
return self.simulate_async_error(e)
|
[
"def",
"async_or_fail",
"(",
"self",
",",
"*",
"*",
"options",
")",
":",
"args",
"=",
"options",
".",
"pop",
"(",
"\"args\"",
",",
"None",
")",
"kwargs",
"=",
"options",
".",
"pop",
"(",
"\"kwargs\"",
",",
"None",
")",
"possible_broker_errors",
"=",
"self",
".",
"_get_possible_broker_errors_tuple",
"(",
")",
"try",
":",
"return",
"self",
".",
"apply_async",
"(",
"args",
",",
"kwargs",
",",
"*",
"*",
"options",
")",
"except",
"possible_broker_errors",
"as",
"e",
":",
"return",
"self",
".",
"simulate_async_error",
"(",
"e",
")"
] |
Attempt to call self.apply_async, but if that fails with an exception,
we fake the task completion using the exception as the result. This
allows us to seamlessly handle errors on task creation the same way we
handle errors when a task runs, simplifying the user interface.
|
[
"Attempt",
"to",
"call",
"self",
".",
"apply_async",
"but",
"if",
"that",
"fails",
"with",
"an",
"exception",
"we",
"fake",
"the",
"task",
"completion",
"using",
"the",
"exception",
"as",
"the",
"result",
".",
"This",
"allows",
"us",
"to",
"seamlessly",
"handle",
"errors",
"on",
"task",
"creation",
"the",
"same",
"way",
"we",
"handle",
"errors",
"when",
"a",
"task",
"runs",
"simplifying",
"the",
"user",
"interface",
"."
] |
19cd3137ebf46877cee1ee5155d318bb6261ee1c
|
https://github.com/PolicyStat/jobtastic/blob/19cd3137ebf46877cee1ee5155d318bb6261ee1c/jobtastic/task.py#L104-L117
|
9,576
|
PolicyStat/jobtastic
|
jobtastic/task.py
|
JobtasticTask.delay_or_eager
|
def delay_or_eager(self, *args, **kwargs):
"""
Wrap async_or_eager with a convenience signiture like delay
"""
return self.async_or_eager(args=args, kwargs=kwargs)
|
python
|
def delay_or_eager(self, *args, **kwargs):
"""
Wrap async_or_eager with a convenience signiture like delay
"""
return self.async_or_eager(args=args, kwargs=kwargs)
|
[
"def",
"delay_or_eager",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"self",
".",
"async_or_eager",
"(",
"args",
"=",
"args",
",",
"kwargs",
"=",
"kwargs",
")"
] |
Wrap async_or_eager with a convenience signiture like delay
|
[
"Wrap",
"async_or_eager",
"with",
"a",
"convenience",
"signiture",
"like",
"delay"
] |
19cd3137ebf46877cee1ee5155d318bb6261ee1c
|
https://github.com/PolicyStat/jobtastic/blob/19cd3137ebf46877cee1ee5155d318bb6261ee1c/jobtastic/task.py#L120-L124
|
9,577
|
PolicyStat/jobtastic
|
jobtastic/task.py
|
JobtasticTask.delay_or_run
|
def delay_or_run(self, *args, **kwargs):
"""
Attempt to call self.delay, or if that fails, call self.run.
Returns a tuple, (result, required_fallback). ``result`` is the result
of calling delay or run. ``required_fallback`` is True if the broker
failed we had to resort to `self.run`.
"""
warnings.warn(
"delay_or_run is deprecated. Please use delay_or_eager",
DeprecationWarning,
)
possible_broker_errors = self._get_possible_broker_errors_tuple()
try:
result = self.apply_async(args=args, kwargs=kwargs)
required_fallback = False
except possible_broker_errors:
result = self().run(*args, **kwargs)
required_fallback = True
return result, required_fallback
|
python
|
def delay_or_run(self, *args, **kwargs):
"""
Attempt to call self.delay, or if that fails, call self.run.
Returns a tuple, (result, required_fallback). ``result`` is the result
of calling delay or run. ``required_fallback`` is True if the broker
failed we had to resort to `self.run`.
"""
warnings.warn(
"delay_or_run is deprecated. Please use delay_or_eager",
DeprecationWarning,
)
possible_broker_errors = self._get_possible_broker_errors_tuple()
try:
result = self.apply_async(args=args, kwargs=kwargs)
required_fallback = False
except possible_broker_errors:
result = self().run(*args, **kwargs)
required_fallback = True
return result, required_fallback
|
[
"def",
"delay_or_run",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"warnings",
".",
"warn",
"(",
"\"delay_or_run is deprecated. Please use delay_or_eager\"",
",",
"DeprecationWarning",
",",
")",
"possible_broker_errors",
"=",
"self",
".",
"_get_possible_broker_errors_tuple",
"(",
")",
"try",
":",
"result",
"=",
"self",
".",
"apply_async",
"(",
"args",
"=",
"args",
",",
"kwargs",
"=",
"kwargs",
")",
"required_fallback",
"=",
"False",
"except",
"possible_broker_errors",
":",
"result",
"=",
"self",
"(",
")",
".",
"run",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"required_fallback",
"=",
"True",
"return",
"result",
",",
"required_fallback"
] |
Attempt to call self.delay, or if that fails, call self.run.
Returns a tuple, (result, required_fallback). ``result`` is the result
of calling delay or run. ``required_fallback`` is True if the broker
failed we had to resort to `self.run`.
|
[
"Attempt",
"to",
"call",
"self",
".",
"delay",
"or",
"if",
"that",
"fails",
"call",
"self",
".",
"run",
"."
] |
19cd3137ebf46877cee1ee5155d318bb6261ee1c
|
https://github.com/PolicyStat/jobtastic/blob/19cd3137ebf46877cee1ee5155d318bb6261ee1c/jobtastic/task.py#L127-L146
|
9,578
|
PolicyStat/jobtastic
|
jobtastic/task.py
|
JobtasticTask.delay_or_fail
|
def delay_or_fail(self, *args, **kwargs):
"""
Wrap async_or_fail with a convenience signiture like delay
"""
return self.async_or_fail(args=args, kwargs=kwargs)
|
python
|
def delay_or_fail(self, *args, **kwargs):
"""
Wrap async_or_fail with a convenience signiture like delay
"""
return self.async_or_fail(args=args, kwargs=kwargs)
|
[
"def",
"delay_or_fail",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"self",
".",
"async_or_fail",
"(",
"args",
"=",
"args",
",",
"kwargs",
"=",
"kwargs",
")"
] |
Wrap async_or_fail with a convenience signiture like delay
|
[
"Wrap",
"async_or_fail",
"with",
"a",
"convenience",
"signiture",
"like",
"delay"
] |
19cd3137ebf46877cee1ee5155d318bb6261ee1c
|
https://github.com/PolicyStat/jobtastic/blob/19cd3137ebf46877cee1ee5155d318bb6261ee1c/jobtastic/task.py#L149-L153
|
9,579
|
PolicyStat/jobtastic
|
jobtastic/task.py
|
JobtasticTask.simulate_async_error
|
def simulate_async_error(self, exception):
"""
Take this exception and store it as an error in the result backend.
This unifies the handling of broker-connection errors with any other
type of error that might occur when running the task. So the same
error-handling that might retry a task or display a useful message to
the user can also handle this error.
"""
task_id = gen_unique_id()
async_result = self.AsyncResult(task_id)
einfo = ExceptionInfo(sys.exc_info())
async_result.backend.mark_as_failure(
task_id,
exception,
traceback=einfo.traceback,
)
return async_result
|
python
|
def simulate_async_error(self, exception):
"""
Take this exception and store it as an error in the result backend.
This unifies the handling of broker-connection errors with any other
type of error that might occur when running the task. So the same
error-handling that might retry a task or display a useful message to
the user can also handle this error.
"""
task_id = gen_unique_id()
async_result = self.AsyncResult(task_id)
einfo = ExceptionInfo(sys.exc_info())
async_result.backend.mark_as_failure(
task_id,
exception,
traceback=einfo.traceback,
)
return async_result
|
[
"def",
"simulate_async_error",
"(",
"self",
",",
"exception",
")",
":",
"task_id",
"=",
"gen_unique_id",
"(",
")",
"async_result",
"=",
"self",
".",
"AsyncResult",
"(",
"task_id",
")",
"einfo",
"=",
"ExceptionInfo",
"(",
"sys",
".",
"exc_info",
"(",
")",
")",
"async_result",
".",
"backend",
".",
"mark_as_failure",
"(",
"task_id",
",",
"exception",
",",
"traceback",
"=",
"einfo",
".",
"traceback",
",",
")",
"return",
"async_result"
] |
Take this exception and store it as an error in the result backend.
This unifies the handling of broker-connection errors with any other
type of error that might occur when running the task. So the same
error-handling that might retry a task or display a useful message to
the user can also handle this error.
|
[
"Take",
"this",
"exception",
"and",
"store",
"it",
"as",
"an",
"error",
"in",
"the",
"result",
"backend",
".",
"This",
"unifies",
"the",
"handling",
"of",
"broker",
"-",
"connection",
"errors",
"with",
"any",
"other",
"type",
"of",
"error",
"that",
"might",
"occur",
"when",
"running",
"the",
"task",
".",
"So",
"the",
"same",
"error",
"-",
"handling",
"that",
"might",
"retry",
"a",
"task",
"or",
"display",
"a",
"useful",
"message",
"to",
"the",
"user",
"can",
"also",
"handle",
"this",
"error",
"."
] |
19cd3137ebf46877cee1ee5155d318bb6261ee1c
|
https://github.com/PolicyStat/jobtastic/blob/19cd3137ebf46877cee1ee5155d318bb6261ee1c/jobtastic/task.py#L178-L196
|
9,580
|
PolicyStat/jobtastic
|
jobtastic/task.py
|
JobtasticTask.calc_progress
|
def calc_progress(self, completed_count, total_count):
"""
Calculate the percentage progress and estimated remaining time based on
the current number of items completed of the total.
Returns a tuple of ``(percentage_complete, seconds_remaining)``.
"""
self.logger.debug(
"calc_progress(%s, %s)",
completed_count,
total_count,
)
current_time = time.time()
time_spent = current_time - self.start_time
self.logger.debug("Progress time spent: %s", time_spent)
if total_count == 0:
return 100, 1
completion_fraction = completed_count / total_count
if completion_fraction == 0:
completion_fraction = 1
total_time = 0
total_time = time_spent / completion_fraction
time_remaining = total_time - time_spent
completion_display = completion_fraction * 100
if completion_display == 100:
return 100, 1 # 1 second to finish up
return completion_display, time_remaining
|
python
|
def calc_progress(self, completed_count, total_count):
"""
Calculate the percentage progress and estimated remaining time based on
the current number of items completed of the total.
Returns a tuple of ``(percentage_complete, seconds_remaining)``.
"""
self.logger.debug(
"calc_progress(%s, %s)",
completed_count,
total_count,
)
current_time = time.time()
time_spent = current_time - self.start_time
self.logger.debug("Progress time spent: %s", time_spent)
if total_count == 0:
return 100, 1
completion_fraction = completed_count / total_count
if completion_fraction == 0:
completion_fraction = 1
total_time = 0
total_time = time_spent / completion_fraction
time_remaining = total_time - time_spent
completion_display = completion_fraction * 100
if completion_display == 100:
return 100, 1 # 1 second to finish up
return completion_display, time_remaining
|
[
"def",
"calc_progress",
"(",
"self",
",",
"completed_count",
",",
"total_count",
")",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"\"calc_progress(%s, %s)\"",
",",
"completed_count",
",",
"total_count",
",",
")",
"current_time",
"=",
"time",
".",
"time",
"(",
")",
"time_spent",
"=",
"current_time",
"-",
"self",
".",
"start_time",
"self",
".",
"logger",
".",
"debug",
"(",
"\"Progress time spent: %s\"",
",",
"time_spent",
")",
"if",
"total_count",
"==",
"0",
":",
"return",
"100",
",",
"1",
"completion_fraction",
"=",
"completed_count",
"/",
"total_count",
"if",
"completion_fraction",
"==",
"0",
":",
"completion_fraction",
"=",
"1",
"total_time",
"=",
"0",
"total_time",
"=",
"time_spent",
"/",
"completion_fraction",
"time_remaining",
"=",
"total_time",
"-",
"time_spent",
"completion_display",
"=",
"completion_fraction",
"*",
"100",
"if",
"completion_display",
"==",
"100",
":",
"return",
"100",
",",
"1",
"# 1 second to finish up",
"return",
"completion_display",
",",
"time_remaining"
] |
Calculate the percentage progress and estimated remaining time based on
the current number of items completed of the total.
Returns a tuple of ``(percentage_complete, seconds_remaining)``.
|
[
"Calculate",
"the",
"percentage",
"progress",
"and",
"estimated",
"remaining",
"time",
"based",
"on",
"the",
"current",
"number",
"of",
"items",
"completed",
"of",
"the",
"total",
"."
] |
19cd3137ebf46877cee1ee5155d318bb6261ee1c
|
https://github.com/PolicyStat/jobtastic/blob/19cd3137ebf46877cee1ee5155d318bb6261ee1c/jobtastic/task.py#L246-L278
|
9,581
|
PolicyStat/jobtastic
|
jobtastic/task.py
|
JobtasticTask.update_progress
|
def update_progress(
self,
completed_count,
total_count,
update_frequency=1,
):
"""
Update the task backend with both an estimated percentage complete and
number of seconds remaining until completion.
``completed_count`` Number of task "units" that have been completed out
of ``total_count`` total "units."
``update_frequency`` Only actually store the updated progress in the
background at most every ``N`` ``completed_count``.
"""
if completed_count - self._last_update_count < update_frequency:
# We've updated the progress too recently. Don't stress out the
# result backend
return
# Store progress for display
progress_percent, time_remaining = self.calc_progress(
completed_count, total_count)
self.logger.debug(
"Updating progress: %s percent, %s remaining",
progress_percent,
time_remaining)
if self.request.id:
self._last_update_count = completed_count
self.update_state(None, PROGRESS, {
"progress_percent": progress_percent,
"time_remaining": time_remaining,
})
|
python
|
def update_progress(
self,
completed_count,
total_count,
update_frequency=1,
):
"""
Update the task backend with both an estimated percentage complete and
number of seconds remaining until completion.
``completed_count`` Number of task "units" that have been completed out
of ``total_count`` total "units."
``update_frequency`` Only actually store the updated progress in the
background at most every ``N`` ``completed_count``.
"""
if completed_count - self._last_update_count < update_frequency:
# We've updated the progress too recently. Don't stress out the
# result backend
return
# Store progress for display
progress_percent, time_remaining = self.calc_progress(
completed_count, total_count)
self.logger.debug(
"Updating progress: %s percent, %s remaining",
progress_percent,
time_remaining)
if self.request.id:
self._last_update_count = completed_count
self.update_state(None, PROGRESS, {
"progress_percent": progress_percent,
"time_remaining": time_remaining,
})
|
[
"def",
"update_progress",
"(",
"self",
",",
"completed_count",
",",
"total_count",
",",
"update_frequency",
"=",
"1",
",",
")",
":",
"if",
"completed_count",
"-",
"self",
".",
"_last_update_count",
"<",
"update_frequency",
":",
"# We've updated the progress too recently. Don't stress out the",
"# result backend",
"return",
"# Store progress for display",
"progress_percent",
",",
"time_remaining",
"=",
"self",
".",
"calc_progress",
"(",
"completed_count",
",",
"total_count",
")",
"self",
".",
"logger",
".",
"debug",
"(",
"\"Updating progress: %s percent, %s remaining\"",
",",
"progress_percent",
",",
"time_remaining",
")",
"if",
"self",
".",
"request",
".",
"id",
":",
"self",
".",
"_last_update_count",
"=",
"completed_count",
"self",
".",
"update_state",
"(",
"None",
",",
"PROGRESS",
",",
"{",
"\"progress_percent\"",
":",
"progress_percent",
",",
"\"time_remaining\"",
":",
"time_remaining",
",",
"}",
")"
] |
Update the task backend with both an estimated percentage complete and
number of seconds remaining until completion.
``completed_count`` Number of task "units" that have been completed out
of ``total_count`` total "units."
``update_frequency`` Only actually store the updated progress in the
background at most every ``N`` ``completed_count``.
|
[
"Update",
"the",
"task",
"backend",
"with",
"both",
"an",
"estimated",
"percentage",
"complete",
"and",
"number",
"of",
"seconds",
"remaining",
"until",
"completion",
"."
] |
19cd3137ebf46877cee1ee5155d318bb6261ee1c
|
https://github.com/PolicyStat/jobtastic/blob/19cd3137ebf46877cee1ee5155d318bb6261ee1c/jobtastic/task.py#L280-L311
|
9,582
|
PolicyStat/jobtastic
|
jobtastic/task.py
|
JobtasticTask._validate_required_class_vars
|
def _validate_required_class_vars(self):
"""
Ensure that this subclass has defined all of the required class
variables.
"""
required_members = (
'significant_kwargs',
'herd_avoidance_timeout',
)
for required_member in required_members:
if not hasattr(self, required_member):
raise Exception(
"JobtasticTask's must define a %s" % required_member)
|
python
|
def _validate_required_class_vars(self):
"""
Ensure that this subclass has defined all of the required class
variables.
"""
required_members = (
'significant_kwargs',
'herd_avoidance_timeout',
)
for required_member in required_members:
if not hasattr(self, required_member):
raise Exception(
"JobtasticTask's must define a %s" % required_member)
|
[
"def",
"_validate_required_class_vars",
"(",
"self",
")",
":",
"required_members",
"=",
"(",
"'significant_kwargs'",
",",
"'herd_avoidance_timeout'",
",",
")",
"for",
"required_member",
"in",
"required_members",
":",
"if",
"not",
"hasattr",
"(",
"self",
",",
"required_member",
")",
":",
"raise",
"Exception",
"(",
"\"JobtasticTask's must define a %s\"",
"%",
"required_member",
")"
] |
Ensure that this subclass has defined all of the required class
variables.
|
[
"Ensure",
"that",
"this",
"subclass",
"has",
"defined",
"all",
"of",
"the",
"required",
"class",
"variables",
"."
] |
19cd3137ebf46877cee1ee5155d318bb6261ee1c
|
https://github.com/PolicyStat/jobtastic/blob/19cd3137ebf46877cee1ee5155d318bb6261ee1c/jobtastic/task.py#L378-L390
|
9,583
|
PolicyStat/jobtastic
|
jobtastic/task.py
|
JobtasticTask.on_success
|
def on_success(self, retval, task_id, args, kwargs):
"""
Store results in the backend even if we're always eager. This ensures
the `delay_or_run` calls always at least have results.
"""
if self.request.is_eager:
# Store the result because celery wouldn't otherwise
self.update_state(task_id, SUCCESS, retval)
|
python
|
def on_success(self, retval, task_id, args, kwargs):
"""
Store results in the backend even if we're always eager. This ensures
the `delay_or_run` calls always at least have results.
"""
if self.request.is_eager:
# Store the result because celery wouldn't otherwise
self.update_state(task_id, SUCCESS, retval)
|
[
"def",
"on_success",
"(",
"self",
",",
"retval",
",",
"task_id",
",",
"args",
",",
"kwargs",
")",
":",
"if",
"self",
".",
"request",
".",
"is_eager",
":",
"# Store the result because celery wouldn't otherwise",
"self",
".",
"update_state",
"(",
"task_id",
",",
"SUCCESS",
",",
"retval",
")"
] |
Store results in the backend even if we're always eager. This ensures
the `delay_or_run` calls always at least have results.
|
[
"Store",
"results",
"in",
"the",
"backend",
"even",
"if",
"we",
"re",
"always",
"eager",
".",
"This",
"ensures",
"the",
"delay_or_run",
"calls",
"always",
"at",
"least",
"have",
"results",
"."
] |
19cd3137ebf46877cee1ee5155d318bb6261ee1c
|
https://github.com/PolicyStat/jobtastic/blob/19cd3137ebf46877cee1ee5155d318bb6261ee1c/jobtastic/task.py#L392-L399
|
9,584
|
PolicyStat/jobtastic
|
jobtastic/task.py
|
JobtasticTask._get_cache
|
def _get_cache(self):
"""
Return the cache to use for thundering herd protection, etc.
"""
if not self._cache:
self._cache = get_cache(self.app)
return self._cache
|
python
|
def _get_cache(self):
"""
Return the cache to use for thundering herd protection, etc.
"""
if not self._cache:
self._cache = get_cache(self.app)
return self._cache
|
[
"def",
"_get_cache",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_cache",
":",
"self",
".",
"_cache",
"=",
"get_cache",
"(",
"self",
".",
"app",
")",
"return",
"self",
".",
"_cache"
] |
Return the cache to use for thundering herd protection, etc.
|
[
"Return",
"the",
"cache",
"to",
"use",
"for",
"thundering",
"herd",
"protection",
"etc",
"."
] |
19cd3137ebf46877cee1ee5155d318bb6261ee1c
|
https://github.com/PolicyStat/jobtastic/blob/19cd3137ebf46877cee1ee5155d318bb6261ee1c/jobtastic/task.py#L405-L411
|
9,585
|
PolicyStat/jobtastic
|
jobtastic/task.py
|
JobtasticTask._get_cache_key
|
def _get_cache_key(self, **kwargs):
"""
Take this task's configured ``significant_kwargs`` and build a hash
that all equivalent task calls will match.
Takes in kwargs and returns a string.
To change the way the cache key is generated or do more in-depth
processing, override this method.
"""
m = md5()
for significant_kwarg in self.significant_kwargs:
key, to_str = significant_kwarg
try:
m.update(to_str(kwargs[key]))
except (TypeError, UnicodeEncodeError):
# Python 3.x strings aren't accepted by hash.update().
# String should be byte-encoded first.
m.update(to_str(kwargs[key]).encode('utf-8'))
if hasattr(self, 'cache_prefix'):
cache_prefix = self.cache_prefix
else:
cache_prefix = '%s.%s' % (self.__module__, self.__name__)
return '%s:%s' % (cache_prefix, m.hexdigest())
|
python
|
def _get_cache_key(self, **kwargs):
"""
Take this task's configured ``significant_kwargs`` and build a hash
that all equivalent task calls will match.
Takes in kwargs and returns a string.
To change the way the cache key is generated or do more in-depth
processing, override this method.
"""
m = md5()
for significant_kwarg in self.significant_kwargs:
key, to_str = significant_kwarg
try:
m.update(to_str(kwargs[key]))
except (TypeError, UnicodeEncodeError):
# Python 3.x strings aren't accepted by hash.update().
# String should be byte-encoded first.
m.update(to_str(kwargs[key]).encode('utf-8'))
if hasattr(self, 'cache_prefix'):
cache_prefix = self.cache_prefix
else:
cache_prefix = '%s.%s' % (self.__module__, self.__name__)
return '%s:%s' % (cache_prefix, m.hexdigest())
|
[
"def",
"_get_cache_key",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"m",
"=",
"md5",
"(",
")",
"for",
"significant_kwarg",
"in",
"self",
".",
"significant_kwargs",
":",
"key",
",",
"to_str",
"=",
"significant_kwarg",
"try",
":",
"m",
".",
"update",
"(",
"to_str",
"(",
"kwargs",
"[",
"key",
"]",
")",
")",
"except",
"(",
"TypeError",
",",
"UnicodeEncodeError",
")",
":",
"# Python 3.x strings aren't accepted by hash.update().",
"# String should be byte-encoded first.",
"m",
".",
"update",
"(",
"to_str",
"(",
"kwargs",
"[",
"key",
"]",
")",
".",
"encode",
"(",
"'utf-8'",
")",
")",
"if",
"hasattr",
"(",
"self",
",",
"'cache_prefix'",
")",
":",
"cache_prefix",
"=",
"self",
".",
"cache_prefix",
"else",
":",
"cache_prefix",
"=",
"'%s.%s'",
"%",
"(",
"self",
".",
"__module__",
",",
"self",
".",
"__name__",
")",
"return",
"'%s:%s'",
"%",
"(",
"cache_prefix",
",",
"m",
".",
"hexdigest",
"(",
")",
")"
] |
Take this task's configured ``significant_kwargs`` and build a hash
that all equivalent task calls will match.
Takes in kwargs and returns a string.
To change the way the cache key is generated or do more in-depth
processing, override this method.
|
[
"Take",
"this",
"task",
"s",
"configured",
"significant_kwargs",
"and",
"build",
"a",
"hash",
"that",
"all",
"equivalent",
"task",
"calls",
"will",
"match",
"."
] |
19cd3137ebf46877cee1ee5155d318bb6261ee1c
|
https://github.com/PolicyStat/jobtastic/blob/19cd3137ebf46877cee1ee5155d318bb6261ee1c/jobtastic/task.py#L426-L450
|
9,586
|
PolicyStat/jobtastic
|
jobtastic/cache/__init__.py
|
get_cache
|
def get_cache(app):
"""
Attempt to find a valid cache from the Celery configuration
If the setting is a valid cache, just use it.
Otherwise, if Django is installed, then:
If the setting is a valid Django cache entry, then use that.
If the setting is empty use the default cache
Otherwise, if Werkzeug is installed, then:
If the setting is a valid Celery Memcache or Redis Backend, then use
that.
If the setting is empty and the default Celery Result Backend is
Memcache or Redis, then use that
Otherwise fail
"""
jobtastic_cache_setting = app.conf.get('JOBTASTIC_CACHE')
if isinstance(jobtastic_cache_setting, BaseCache):
return jobtastic_cache_setting
if 'Django' in CACHES:
if jobtastic_cache_setting:
try:
return WrappedCache(get_django_cache(jobtastic_cache_setting))
except InvalidCacheBackendError:
pass
else:
return WrappedCache(get_django_cache('default'))
if 'Werkzeug' in CACHES:
if jobtastic_cache_setting:
backend, url = get_backend_by_url(jobtastic_cache_setting)
backend = backend(app=app, url=url)
else:
backend = app.backend
if isinstance(backend, CacheBackend):
return WrappedCache(MemcachedCache(backend.client))
elif isinstance(backend, RedisBackend):
return WrappedCache(RedisCache(backend.client))
# Give up
raise RuntimeError('Cannot find a suitable cache for Jobtastic')
|
python
|
def get_cache(app):
"""
Attempt to find a valid cache from the Celery configuration
If the setting is a valid cache, just use it.
Otherwise, if Django is installed, then:
If the setting is a valid Django cache entry, then use that.
If the setting is empty use the default cache
Otherwise, if Werkzeug is installed, then:
If the setting is a valid Celery Memcache or Redis Backend, then use
that.
If the setting is empty and the default Celery Result Backend is
Memcache or Redis, then use that
Otherwise fail
"""
jobtastic_cache_setting = app.conf.get('JOBTASTIC_CACHE')
if isinstance(jobtastic_cache_setting, BaseCache):
return jobtastic_cache_setting
if 'Django' in CACHES:
if jobtastic_cache_setting:
try:
return WrappedCache(get_django_cache(jobtastic_cache_setting))
except InvalidCacheBackendError:
pass
else:
return WrappedCache(get_django_cache('default'))
if 'Werkzeug' in CACHES:
if jobtastic_cache_setting:
backend, url = get_backend_by_url(jobtastic_cache_setting)
backend = backend(app=app, url=url)
else:
backend = app.backend
if isinstance(backend, CacheBackend):
return WrappedCache(MemcachedCache(backend.client))
elif isinstance(backend, RedisBackend):
return WrappedCache(RedisCache(backend.client))
# Give up
raise RuntimeError('Cannot find a suitable cache for Jobtastic')
|
[
"def",
"get_cache",
"(",
"app",
")",
":",
"jobtastic_cache_setting",
"=",
"app",
".",
"conf",
".",
"get",
"(",
"'JOBTASTIC_CACHE'",
")",
"if",
"isinstance",
"(",
"jobtastic_cache_setting",
",",
"BaseCache",
")",
":",
"return",
"jobtastic_cache_setting",
"if",
"'Django'",
"in",
"CACHES",
":",
"if",
"jobtastic_cache_setting",
":",
"try",
":",
"return",
"WrappedCache",
"(",
"get_django_cache",
"(",
"jobtastic_cache_setting",
")",
")",
"except",
"InvalidCacheBackendError",
":",
"pass",
"else",
":",
"return",
"WrappedCache",
"(",
"get_django_cache",
"(",
"'default'",
")",
")",
"if",
"'Werkzeug'",
"in",
"CACHES",
":",
"if",
"jobtastic_cache_setting",
":",
"backend",
",",
"url",
"=",
"get_backend_by_url",
"(",
"jobtastic_cache_setting",
")",
"backend",
"=",
"backend",
"(",
"app",
"=",
"app",
",",
"url",
"=",
"url",
")",
"else",
":",
"backend",
"=",
"app",
".",
"backend",
"if",
"isinstance",
"(",
"backend",
",",
"CacheBackend",
")",
":",
"return",
"WrappedCache",
"(",
"MemcachedCache",
"(",
"backend",
".",
"client",
")",
")",
"elif",
"isinstance",
"(",
"backend",
",",
"RedisBackend",
")",
":",
"return",
"WrappedCache",
"(",
"RedisCache",
"(",
"backend",
".",
"client",
")",
")",
"# Give up",
"raise",
"RuntimeError",
"(",
"'Cannot find a suitable cache for Jobtastic'",
")"
] |
Attempt to find a valid cache from the Celery configuration
If the setting is a valid cache, just use it.
Otherwise, if Django is installed, then:
If the setting is a valid Django cache entry, then use that.
If the setting is empty use the default cache
Otherwise, if Werkzeug is installed, then:
If the setting is a valid Celery Memcache or Redis Backend, then use
that.
If the setting is empty and the default Celery Result Backend is
Memcache or Redis, then use that
Otherwise fail
|
[
"Attempt",
"to",
"find",
"a",
"valid",
"cache",
"from",
"the",
"Celery",
"configuration"
] |
19cd3137ebf46877cee1ee5155d318bb6261ee1c
|
https://github.com/PolicyStat/jobtastic/blob/19cd3137ebf46877cee1ee5155d318bb6261ee1c/jobtastic/cache/__init__.py#L29-L70
|
9,587
|
dodger487/dplython
|
dplython/dplython.py
|
select
|
def select(*args):
"""Select specific columns from DataFrame.
Output will be DplyFrame type. Order of columns will be the same as input into
select.
>>> diamonds >> select(X.color, X.carat) >> head(3)
Out:
color carat
0 E 0.23
1 E 0.21
2 E 0.23
Grouping variables are implied in selection.
>>> df >> group_by(X.a, X.b) >> select(X.c)
returns a dataframe like `df[[X.a, X.b, X.c]]` with the variables appearing in
grouped order before the selected column(s), unless a grouped variable is
explicitly selected
>>> df >> group_by(X.a, X.b) >> select(X.c, X.b)
returns a dataframe like `df[[X.a, X.c, X.b]]`
"""
def select_columns(df, args):
columns = [column._name for column in args]
if df._grouped_on:
for col in df._grouped_on[::-1]:
if col not in columns:
columns.insert(0, col)
return columns
return lambda df: df[select_columns(df, args)]
|
python
|
def select(*args):
"""Select specific columns from DataFrame.
Output will be DplyFrame type. Order of columns will be the same as input into
select.
>>> diamonds >> select(X.color, X.carat) >> head(3)
Out:
color carat
0 E 0.23
1 E 0.21
2 E 0.23
Grouping variables are implied in selection.
>>> df >> group_by(X.a, X.b) >> select(X.c)
returns a dataframe like `df[[X.a, X.b, X.c]]` with the variables appearing in
grouped order before the selected column(s), unless a grouped variable is
explicitly selected
>>> df >> group_by(X.a, X.b) >> select(X.c, X.b)
returns a dataframe like `df[[X.a, X.c, X.b]]`
"""
def select_columns(df, args):
columns = [column._name for column in args]
if df._grouped_on:
for col in df._grouped_on[::-1]:
if col not in columns:
columns.insert(0, col)
return columns
return lambda df: df[select_columns(df, args)]
|
[
"def",
"select",
"(",
"*",
"args",
")",
":",
"def",
"select_columns",
"(",
"df",
",",
"args",
")",
":",
"columns",
"=",
"[",
"column",
".",
"_name",
"for",
"column",
"in",
"args",
"]",
"if",
"df",
".",
"_grouped_on",
":",
"for",
"col",
"in",
"df",
".",
"_grouped_on",
"[",
":",
":",
"-",
"1",
"]",
":",
"if",
"col",
"not",
"in",
"columns",
":",
"columns",
".",
"insert",
"(",
"0",
",",
"col",
")",
"return",
"columns",
"return",
"lambda",
"df",
":",
"df",
"[",
"select_columns",
"(",
"df",
",",
"args",
")",
"]"
] |
Select specific columns from DataFrame.
Output will be DplyFrame type. Order of columns will be the same as input into
select.
>>> diamonds >> select(X.color, X.carat) >> head(3)
Out:
color carat
0 E 0.23
1 E 0.21
2 E 0.23
Grouping variables are implied in selection.
>>> df >> group_by(X.a, X.b) >> select(X.c)
returns a dataframe like `df[[X.a, X.b, X.c]]` with the variables appearing in
grouped order before the selected column(s), unless a grouped variable is
explicitly selected
>>> df >> group_by(X.a, X.b) >> select(X.c, X.b)
returns a dataframe like `df[[X.a, X.c, X.b]]`
|
[
"Select",
"specific",
"columns",
"from",
"DataFrame",
"."
] |
09c2a5f4ca67221b2a59928366ca8274357f7234
|
https://github.com/dodger487/dplython/blob/09c2a5f4ca67221b2a59928366ca8274357f7234/dplython/dplython.py#L203-L232
|
9,588
|
dodger487/dplython
|
dplython/dplython.py
|
arrange
|
def arrange(*args):
"""Sort DataFrame by the input column arguments.
>>> diamonds >> sample_n(5) >> arrange(X.price) >> select(X.depth, X.price)
Out:
depth price
28547 61.0 675
35132 59.1 889
42526 61.3 1323
3468 61.6 3392
23829 62.0 11903
"""
names = [column._name for column in args]
def f(df):
sortby_df = df >> mutate(*args)
index = sortby_df.sort_values([str(arg) for arg in args]).index
return df.loc[index]
return f
|
python
|
def arrange(*args):
"""Sort DataFrame by the input column arguments.
>>> diamonds >> sample_n(5) >> arrange(X.price) >> select(X.depth, X.price)
Out:
depth price
28547 61.0 675
35132 59.1 889
42526 61.3 1323
3468 61.6 3392
23829 62.0 11903
"""
names = [column._name for column in args]
def f(df):
sortby_df = df >> mutate(*args)
index = sortby_df.sort_values([str(arg) for arg in args]).index
return df.loc[index]
return f
|
[
"def",
"arrange",
"(",
"*",
"args",
")",
":",
"names",
"=",
"[",
"column",
".",
"_name",
"for",
"column",
"in",
"args",
"]",
"def",
"f",
"(",
"df",
")",
":",
"sortby_df",
"=",
"df",
">>",
"mutate",
"(",
"*",
"args",
")",
"index",
"=",
"sortby_df",
".",
"sort_values",
"(",
"[",
"str",
"(",
"arg",
")",
"for",
"arg",
"in",
"args",
"]",
")",
".",
"index",
"return",
"df",
".",
"loc",
"[",
"index",
"]",
"return",
"f"
] |
Sort DataFrame by the input column arguments.
>>> diamonds >> sample_n(5) >> arrange(X.price) >> select(X.depth, X.price)
Out:
depth price
28547 61.0 675
35132 59.1 889
42526 61.3 1323
3468 61.6 3392
23829 62.0 11903
|
[
"Sort",
"DataFrame",
"by",
"the",
"input",
"column",
"arguments",
"."
] |
09c2a5f4ca67221b2a59928366ca8274357f7234
|
https://github.com/dodger487/dplython/blob/09c2a5f4ca67221b2a59928366ca8274357f7234/dplython/dplython.py#L377-L394
|
9,589
|
dodger487/dplython
|
dplython/dplython.py
|
rename
|
def rename(**kwargs):
"""Rename one or more columns, leaving other columns unchanged
Example usage:
diamonds >> rename(new_name=old_name)
"""
def rename_columns(df):
column_assignments = {old_name_later._name: new_name
for new_name, old_name_later in kwargs.items()}
return df.rename(columns=column_assignments)
return rename_columns
|
python
|
def rename(**kwargs):
"""Rename one or more columns, leaving other columns unchanged
Example usage:
diamonds >> rename(new_name=old_name)
"""
def rename_columns(df):
column_assignments = {old_name_later._name: new_name
for new_name, old_name_later in kwargs.items()}
return df.rename(columns=column_assignments)
return rename_columns
|
[
"def",
"rename",
"(",
"*",
"*",
"kwargs",
")",
":",
"def",
"rename_columns",
"(",
"df",
")",
":",
"column_assignments",
"=",
"{",
"old_name_later",
".",
"_name",
":",
"new_name",
"for",
"new_name",
",",
"old_name_later",
"in",
"kwargs",
".",
"items",
"(",
")",
"}",
"return",
"df",
".",
"rename",
"(",
"columns",
"=",
"column_assignments",
")",
"return",
"rename_columns"
] |
Rename one or more columns, leaving other columns unchanged
Example usage:
diamonds >> rename(new_name=old_name)
|
[
"Rename",
"one",
"or",
"more",
"columns",
"leaving",
"other",
"columns",
"unchanged"
] |
09c2a5f4ca67221b2a59928366ca8274357f7234
|
https://github.com/dodger487/dplython/blob/09c2a5f4ca67221b2a59928366ca8274357f7234/dplython/dplython.py#L453-L463
|
9,590
|
dodger487/dplython
|
dplython/dplython.py
|
transmute
|
def transmute(*args, **kwargs):
""" Similar to `select` but allows mutation in column definitions.
In : (diamonds >>
head(3) >>
transmute(new_price=X.price * 2, x_plus_y=X.x + X.y))
Out:
new_price x_plus_y
0 652 7.93
1 652 7.73
2 654 8.12
"""
mutate_dateframe_fn = mutate(*args, **dict(kwargs))
column_names_args = [str(arg) for arg in args]
column_names_kwargs = [name for name, _
in _dict_to_possibly_ordered_tuples(kwargs)]
column_names = column_names_args + column_names_kwargs
return lambda df: mutate_dateframe_fn(df)[column_names]
|
python
|
def transmute(*args, **kwargs):
""" Similar to `select` but allows mutation in column definitions.
In : (diamonds >>
head(3) >>
transmute(new_price=X.price * 2, x_plus_y=X.x + X.y))
Out:
new_price x_plus_y
0 652 7.93
1 652 7.73
2 654 8.12
"""
mutate_dateframe_fn = mutate(*args, **dict(kwargs))
column_names_args = [str(arg) for arg in args]
column_names_kwargs = [name for name, _
in _dict_to_possibly_ordered_tuples(kwargs)]
column_names = column_names_args + column_names_kwargs
return lambda df: mutate_dateframe_fn(df)[column_names]
|
[
"def",
"transmute",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"mutate_dateframe_fn",
"=",
"mutate",
"(",
"*",
"args",
",",
"*",
"*",
"dict",
"(",
"kwargs",
")",
")",
"column_names_args",
"=",
"[",
"str",
"(",
"arg",
")",
"for",
"arg",
"in",
"args",
"]",
"column_names_kwargs",
"=",
"[",
"name",
"for",
"name",
",",
"_",
"in",
"_dict_to_possibly_ordered_tuples",
"(",
"kwargs",
")",
"]",
"column_names",
"=",
"column_names_args",
"+",
"column_names_kwargs",
"return",
"lambda",
"df",
":",
"mutate_dateframe_fn",
"(",
"df",
")",
"[",
"column_names",
"]"
] |
Similar to `select` but allows mutation in column definitions.
In : (diamonds >>
head(3) >>
transmute(new_price=X.price * 2, x_plus_y=X.x + X.y))
Out:
new_price x_plus_y
0 652 7.93
1 652 7.73
2 654 8.12
|
[
"Similar",
"to",
"select",
"but",
"allows",
"mutation",
"in",
"column",
"definitions",
"."
] |
09c2a5f4ca67221b2a59928366ca8274357f7234
|
https://github.com/dodger487/dplython/blob/09c2a5f4ca67221b2a59928366ca8274357f7234/dplython/dplython.py#L467-L484
|
9,591
|
dodger487/dplython
|
dplython/dplython.py
|
get_join_cols
|
def get_join_cols(by_entry):
""" helper function used for joins
builds left and right join list for join function
"""
left_cols = []
right_cols = []
for col in by_entry:
if isinstance(col, str):
left_cols.append(col)
right_cols.append(col)
else:
left_cols.append(col[0])
right_cols.append(col[1])
return left_cols, right_cols
|
python
|
def get_join_cols(by_entry):
""" helper function used for joins
builds left and right join list for join function
"""
left_cols = []
right_cols = []
for col in by_entry:
if isinstance(col, str):
left_cols.append(col)
right_cols.append(col)
else:
left_cols.append(col[0])
right_cols.append(col[1])
return left_cols, right_cols
|
[
"def",
"get_join_cols",
"(",
"by_entry",
")",
":",
"left_cols",
"=",
"[",
"]",
"right_cols",
"=",
"[",
"]",
"for",
"col",
"in",
"by_entry",
":",
"if",
"isinstance",
"(",
"col",
",",
"str",
")",
":",
"left_cols",
".",
"append",
"(",
"col",
")",
"right_cols",
".",
"append",
"(",
"col",
")",
"else",
":",
"left_cols",
".",
"append",
"(",
"col",
"[",
"0",
"]",
")",
"right_cols",
".",
"append",
"(",
"col",
"[",
"1",
"]",
")",
"return",
"left_cols",
",",
"right_cols"
] |
helper function used for joins
builds left and right join list for join function
|
[
"helper",
"function",
"used",
"for",
"joins",
"builds",
"left",
"and",
"right",
"join",
"list",
"for",
"join",
"function"
] |
09c2a5f4ca67221b2a59928366ca8274357f7234
|
https://github.com/dodger487/dplython/blob/09c2a5f4ca67221b2a59928366ca8274357f7234/dplython/dplython.py#L504-L517
|
9,592
|
dodger487/dplython
|
dplython/dplython.py
|
mutating_join
|
def mutating_join(*args, **kwargs):
""" generic function for mutating dplyr-style joins
"""
# candidate for improvement
left = args[0]
right = args[1]
if 'by' in kwargs:
left_cols, right_cols = get_join_cols(kwargs['by'])
else:
left_cols, right_cols = None, None
if 'suffixes' in kwargs:
dsuffixes = kwargs['suffixes']
else:
dsuffixes = ('_x', '_y')
if left._grouped_on:
outDf = (DplyFrame((left >> ungroup())
.merge(right, how=kwargs['how'], left_on=left_cols,
right_on=right_cols, suffixes=dsuffixes))
.regroup(left._grouped_on))
else:
outDf = DplyFrame(left.merge(right, how=kwargs['how'], left_on=left_cols,
right_on=right_cols, suffixes=dsuffixes))
return outDf
|
python
|
def mutating_join(*args, **kwargs):
""" generic function for mutating dplyr-style joins
"""
# candidate for improvement
left = args[0]
right = args[1]
if 'by' in kwargs:
left_cols, right_cols = get_join_cols(kwargs['by'])
else:
left_cols, right_cols = None, None
if 'suffixes' in kwargs:
dsuffixes = kwargs['suffixes']
else:
dsuffixes = ('_x', '_y')
if left._grouped_on:
outDf = (DplyFrame((left >> ungroup())
.merge(right, how=kwargs['how'], left_on=left_cols,
right_on=right_cols, suffixes=dsuffixes))
.regroup(left._grouped_on))
else:
outDf = DplyFrame(left.merge(right, how=kwargs['how'], left_on=left_cols,
right_on=right_cols, suffixes=dsuffixes))
return outDf
|
[
"def",
"mutating_join",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# candidate for improvement",
"left",
"=",
"args",
"[",
"0",
"]",
"right",
"=",
"args",
"[",
"1",
"]",
"if",
"'by'",
"in",
"kwargs",
":",
"left_cols",
",",
"right_cols",
"=",
"get_join_cols",
"(",
"kwargs",
"[",
"'by'",
"]",
")",
"else",
":",
"left_cols",
",",
"right_cols",
"=",
"None",
",",
"None",
"if",
"'suffixes'",
"in",
"kwargs",
":",
"dsuffixes",
"=",
"kwargs",
"[",
"'suffixes'",
"]",
"else",
":",
"dsuffixes",
"=",
"(",
"'_x'",
",",
"'_y'",
")",
"if",
"left",
".",
"_grouped_on",
":",
"outDf",
"=",
"(",
"DplyFrame",
"(",
"(",
"left",
">>",
"ungroup",
"(",
")",
")",
".",
"merge",
"(",
"right",
",",
"how",
"=",
"kwargs",
"[",
"'how'",
"]",
",",
"left_on",
"=",
"left_cols",
",",
"right_on",
"=",
"right_cols",
",",
"suffixes",
"=",
"dsuffixes",
")",
")",
".",
"regroup",
"(",
"left",
".",
"_grouped_on",
")",
")",
"else",
":",
"outDf",
"=",
"DplyFrame",
"(",
"left",
".",
"merge",
"(",
"right",
",",
"how",
"=",
"kwargs",
"[",
"'how'",
"]",
",",
"left_on",
"=",
"left_cols",
",",
"right_on",
"=",
"right_cols",
",",
"suffixes",
"=",
"dsuffixes",
")",
")",
"return",
"outDf"
] |
generic function for mutating dplyr-style joins
|
[
"generic",
"function",
"for",
"mutating",
"dplyr",
"-",
"style",
"joins"
] |
09c2a5f4ca67221b2a59928366ca8274357f7234
|
https://github.com/dodger487/dplython/blob/09c2a5f4ca67221b2a59928366ca8274357f7234/dplython/dplython.py#L520-L542
|
9,593
|
mher/chartkick.py
|
chartkick/ext.py
|
ChartExtension._chart_support
|
def _chart_support(self, name, data, caller, **kwargs):
"template chart support function"
id = 'chart-%s' % next(self.id)
name = self._chart_class_name(name)
options = dict(self.environment.options)
options.update(name=name, id=id)
# jinja2 prepends 'l_' or 'l_{{ n }}'(ver>=2.9) to keys
if jinja2.__version__ >= '2.9':
kwargs = dict((k[4:], v) for (k, v) in kwargs.items())
else:
kwargs = dict((k[2:], v) for (k, v) in kwargs.items())
if self._library is None:
self._library = self.load_library()
id = kwargs.get('id', '')
library = self._library.get(id, {})
# apply options from a tag
library.update(kwargs.get('library', {}))
# apply options from chartkick.json
kwargs.update(library=library)
options.update(kwargs)
return CHART_HTML.format(data=data, options=json.dumps(kwargs),
**options)
|
python
|
def _chart_support(self, name, data, caller, **kwargs):
"template chart support function"
id = 'chart-%s' % next(self.id)
name = self._chart_class_name(name)
options = dict(self.environment.options)
options.update(name=name, id=id)
# jinja2 prepends 'l_' or 'l_{{ n }}'(ver>=2.9) to keys
if jinja2.__version__ >= '2.9':
kwargs = dict((k[4:], v) for (k, v) in kwargs.items())
else:
kwargs = dict((k[2:], v) for (k, v) in kwargs.items())
if self._library is None:
self._library = self.load_library()
id = kwargs.get('id', '')
library = self._library.get(id, {})
# apply options from a tag
library.update(kwargs.get('library', {}))
# apply options from chartkick.json
kwargs.update(library=library)
options.update(kwargs)
return CHART_HTML.format(data=data, options=json.dumps(kwargs),
**options)
|
[
"def",
"_chart_support",
"(",
"self",
",",
"name",
",",
"data",
",",
"caller",
",",
"*",
"*",
"kwargs",
")",
":",
"id",
"=",
"'chart-%s'",
"%",
"next",
"(",
"self",
".",
"id",
")",
"name",
"=",
"self",
".",
"_chart_class_name",
"(",
"name",
")",
"options",
"=",
"dict",
"(",
"self",
".",
"environment",
".",
"options",
")",
"options",
".",
"update",
"(",
"name",
"=",
"name",
",",
"id",
"=",
"id",
")",
"# jinja2 prepends 'l_' or 'l_{{ n }}'(ver>=2.9) to keys",
"if",
"jinja2",
".",
"__version__",
">=",
"'2.9'",
":",
"kwargs",
"=",
"dict",
"(",
"(",
"k",
"[",
"4",
":",
"]",
",",
"v",
")",
"for",
"(",
"k",
",",
"v",
")",
"in",
"kwargs",
".",
"items",
"(",
")",
")",
"else",
":",
"kwargs",
"=",
"dict",
"(",
"(",
"k",
"[",
"2",
":",
"]",
",",
"v",
")",
"for",
"(",
"k",
",",
"v",
")",
"in",
"kwargs",
".",
"items",
"(",
")",
")",
"if",
"self",
".",
"_library",
"is",
"None",
":",
"self",
".",
"_library",
"=",
"self",
".",
"load_library",
"(",
")",
"id",
"=",
"kwargs",
".",
"get",
"(",
"'id'",
",",
"''",
")",
"library",
"=",
"self",
".",
"_library",
".",
"get",
"(",
"id",
",",
"{",
"}",
")",
"# apply options from a tag",
"library",
".",
"update",
"(",
"kwargs",
".",
"get",
"(",
"'library'",
",",
"{",
"}",
")",
")",
"# apply options from chartkick.json",
"kwargs",
".",
"update",
"(",
"library",
"=",
"library",
")",
"options",
".",
"update",
"(",
"kwargs",
")",
"return",
"CHART_HTML",
".",
"format",
"(",
"data",
"=",
"data",
",",
"options",
"=",
"json",
".",
"dumps",
"(",
"kwargs",
")",
",",
"*",
"*",
"options",
")"
] |
template chart support function
|
[
"template",
"chart",
"support",
"function"
] |
3411f36a069560fe1ba218e0a35f68c413332f63
|
https://github.com/mher/chartkick.py/blob/3411f36a069560fe1ba218e0a35f68c413332f63/chartkick/ext.py#L63-L88
|
9,594
|
mher/chartkick.py
|
chartkick/ext.py
|
ChartExtension.load_library
|
def load_library(self):
"loads configuration options"
try:
filename = self.environment.get_template('chartkick.json').filename
except TemplateNotFound:
return {}
else:
options = Options()
options.load(filename)
return options
|
python
|
def load_library(self):
"loads configuration options"
try:
filename = self.environment.get_template('chartkick.json').filename
except TemplateNotFound:
return {}
else:
options = Options()
options.load(filename)
return options
|
[
"def",
"load_library",
"(",
"self",
")",
":",
"try",
":",
"filename",
"=",
"self",
".",
"environment",
".",
"get_template",
"(",
"'chartkick.json'",
")",
".",
"filename",
"except",
"TemplateNotFound",
":",
"return",
"{",
"}",
"else",
":",
"options",
"=",
"Options",
"(",
")",
"options",
".",
"load",
"(",
"filename",
")",
"return",
"options"
] |
loads configuration options
|
[
"loads",
"configuration",
"options"
] |
3411f36a069560fe1ba218e0a35f68c413332f63
|
https://github.com/mher/chartkick.py/blob/3411f36a069560fe1ba218e0a35f68c413332f63/chartkick/ext.py#L94-L103
|
9,595
|
mher/chartkick.py
|
chartkick/__init__.py
|
js
|
def js():
"returns home directory of js"
return os.path.join(os.path.dirname(os.path.abspath(__file__)), 'js')
|
python
|
def js():
"returns home directory of js"
return os.path.join(os.path.dirname(os.path.abspath(__file__)), 'js')
|
[
"def",
"js",
"(",
")",
":",
"return",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"__file__",
")",
")",
",",
"'js'",
")"
] |
returns home directory of js
|
[
"returns",
"home",
"directory",
"of",
"js"
] |
3411f36a069560fe1ba218e0a35f68c413332f63
|
https://github.com/mher/chartkick.py/blob/3411f36a069560fe1ba218e0a35f68c413332f63/chartkick/__init__.py#L8-L10
|
9,596
|
mher/chartkick.py
|
chartkick/templatetags/chartkick.py
|
parse_options
|
def parse_options(source):
"""parses chart tag options"""
options = {}
tokens = [t.strip() for t in source.split('=')]
name = tokens[0]
for token in tokens[1:-1]:
value, next_name = token.rsplit(' ', 1)
options[name.strip()] = value
name = next_name
options[name.strip()] = tokens[-1].strip()
return options
|
python
|
def parse_options(source):
"""parses chart tag options"""
options = {}
tokens = [t.strip() for t in source.split('=')]
name = tokens[0]
for token in tokens[1:-1]:
value, next_name = token.rsplit(' ', 1)
options[name.strip()] = value
name = next_name
options[name.strip()] = tokens[-1].strip()
return options
|
[
"def",
"parse_options",
"(",
"source",
")",
":",
"options",
"=",
"{",
"}",
"tokens",
"=",
"[",
"t",
".",
"strip",
"(",
")",
"for",
"t",
"in",
"source",
".",
"split",
"(",
"'='",
")",
"]",
"name",
"=",
"tokens",
"[",
"0",
"]",
"for",
"token",
"in",
"tokens",
"[",
"1",
":",
"-",
"1",
"]",
":",
"value",
",",
"next_name",
"=",
"token",
".",
"rsplit",
"(",
"' '",
",",
"1",
")",
"options",
"[",
"name",
".",
"strip",
"(",
")",
"]",
"=",
"value",
"name",
"=",
"next_name",
"options",
"[",
"name",
".",
"strip",
"(",
")",
"]",
"=",
"tokens",
"[",
"-",
"1",
"]",
".",
"strip",
"(",
")",
"return",
"options"
] |
parses chart tag options
|
[
"parses",
"chart",
"tag",
"options"
] |
3411f36a069560fe1ba218e0a35f68c413332f63
|
https://github.com/mher/chartkick.py/blob/3411f36a069560fe1ba218e0a35f68c413332f63/chartkick/templatetags/chartkick.py#L91-L102
|
9,597
|
BerkeleyAutomation/autolab_core
|
autolab_core/rigid_transformations.py
|
RigidTransform.copy
|
def copy(self):
"""Returns a copy of the RigidTransform.
Returns
-------
:obj:`RigidTransform`
A deep copy of the RigidTransform.
"""
return RigidTransform(np.copy(self.rotation), np.copy(self.translation), self.from_frame, self.to_frame)
|
python
|
def copy(self):
"""Returns a copy of the RigidTransform.
Returns
-------
:obj:`RigidTransform`
A deep copy of the RigidTransform.
"""
return RigidTransform(np.copy(self.rotation), np.copy(self.translation), self.from_frame, self.to_frame)
|
[
"def",
"copy",
"(",
"self",
")",
":",
"return",
"RigidTransform",
"(",
"np",
".",
"copy",
"(",
"self",
".",
"rotation",
")",
",",
"np",
".",
"copy",
"(",
"self",
".",
"translation",
")",
",",
"self",
".",
"from_frame",
",",
"self",
".",
"to_frame",
")"
] |
Returns a copy of the RigidTransform.
Returns
-------
:obj:`RigidTransform`
A deep copy of the RigidTransform.
|
[
"Returns",
"a",
"copy",
"of",
"the",
"RigidTransform",
"."
] |
8f3813f6401972868cc5e3981ba1b4382d4418d5
|
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/rigid_transformations.py#L81-L89
|
9,598
|
BerkeleyAutomation/autolab_core
|
autolab_core/rigid_transformations.py
|
RigidTransform._check_valid_rotation
|
def _check_valid_rotation(self, rotation):
"""Checks that the given rotation matrix is valid.
"""
if not isinstance(rotation, np.ndarray) or not np.issubdtype(rotation.dtype, np.number):
raise ValueError('Rotation must be specified as numeric numpy array')
if len(rotation.shape) != 2 or rotation.shape[0] != 3 or rotation.shape[1] != 3:
raise ValueError('Rotation must be specified as a 3x3 ndarray')
if np.abs(np.linalg.det(rotation) - 1.0) > 1e-3:
raise ValueError('Illegal rotation. Must have determinant == 1.0')
|
python
|
def _check_valid_rotation(self, rotation):
"""Checks that the given rotation matrix is valid.
"""
if not isinstance(rotation, np.ndarray) or not np.issubdtype(rotation.dtype, np.number):
raise ValueError('Rotation must be specified as numeric numpy array')
if len(rotation.shape) != 2 or rotation.shape[0] != 3 or rotation.shape[1] != 3:
raise ValueError('Rotation must be specified as a 3x3 ndarray')
if np.abs(np.linalg.det(rotation) - 1.0) > 1e-3:
raise ValueError('Illegal rotation. Must have determinant == 1.0')
|
[
"def",
"_check_valid_rotation",
"(",
"self",
",",
"rotation",
")",
":",
"if",
"not",
"isinstance",
"(",
"rotation",
",",
"np",
".",
"ndarray",
")",
"or",
"not",
"np",
".",
"issubdtype",
"(",
"rotation",
".",
"dtype",
",",
"np",
".",
"number",
")",
":",
"raise",
"ValueError",
"(",
"'Rotation must be specified as numeric numpy array'",
")",
"if",
"len",
"(",
"rotation",
".",
"shape",
")",
"!=",
"2",
"or",
"rotation",
".",
"shape",
"[",
"0",
"]",
"!=",
"3",
"or",
"rotation",
".",
"shape",
"[",
"1",
"]",
"!=",
"3",
":",
"raise",
"ValueError",
"(",
"'Rotation must be specified as a 3x3 ndarray'",
")",
"if",
"np",
".",
"abs",
"(",
"np",
".",
"linalg",
".",
"det",
"(",
"rotation",
")",
"-",
"1.0",
")",
">",
"1e-3",
":",
"raise",
"ValueError",
"(",
"'Illegal rotation. Must have determinant == 1.0'",
")"
] |
Checks that the given rotation matrix is valid.
|
[
"Checks",
"that",
"the",
"given",
"rotation",
"matrix",
"is",
"valid",
"."
] |
8f3813f6401972868cc5e3981ba1b4382d4418d5
|
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/rigid_transformations.py#L91-L101
|
9,599
|
BerkeleyAutomation/autolab_core
|
autolab_core/rigid_transformations.py
|
RigidTransform._check_valid_translation
|
def _check_valid_translation(self, translation):
"""Checks that the translation vector is valid.
"""
if not isinstance(translation, np.ndarray) or not np.issubdtype(translation.dtype, np.number):
raise ValueError('Translation must be specified as numeric numpy array')
t = translation.squeeze()
if len(t.shape) != 1 or t.shape[0] != 3:
raise ValueError('Translation must be specified as a 3-vector, 3x1 ndarray, or 1x3 ndarray')
|
python
|
def _check_valid_translation(self, translation):
"""Checks that the translation vector is valid.
"""
if not isinstance(translation, np.ndarray) or not np.issubdtype(translation.dtype, np.number):
raise ValueError('Translation must be specified as numeric numpy array')
t = translation.squeeze()
if len(t.shape) != 1 or t.shape[0] != 3:
raise ValueError('Translation must be specified as a 3-vector, 3x1 ndarray, or 1x3 ndarray')
|
[
"def",
"_check_valid_translation",
"(",
"self",
",",
"translation",
")",
":",
"if",
"not",
"isinstance",
"(",
"translation",
",",
"np",
".",
"ndarray",
")",
"or",
"not",
"np",
".",
"issubdtype",
"(",
"translation",
".",
"dtype",
",",
"np",
".",
"number",
")",
":",
"raise",
"ValueError",
"(",
"'Translation must be specified as numeric numpy array'",
")",
"t",
"=",
"translation",
".",
"squeeze",
"(",
")",
"if",
"len",
"(",
"t",
".",
"shape",
")",
"!=",
"1",
"or",
"t",
".",
"shape",
"[",
"0",
"]",
"!=",
"3",
":",
"raise",
"ValueError",
"(",
"'Translation must be specified as a 3-vector, 3x1 ndarray, or 1x3 ndarray'",
")"
] |
Checks that the translation vector is valid.
|
[
"Checks",
"that",
"the",
"translation",
"vector",
"is",
"valid",
"."
] |
8f3813f6401972868cc5e3981ba1b4382d4418d5
|
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/rigid_transformations.py#L103-L111
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.