code stringlengths 75 104k | docstring stringlengths 1 46.9k |
|---|---|
def populate(self, priority, address, rtr, data):
"""
:return: None
"""
assert isinstance(data, bytes)
self.needs_low_priority(priority)
self.needs_no_rtr(rtr)
self.needs_data(data, 3)
self.transmit_error_counter = data[0]
self.receive_error_counter = data[1]
self.bus_off_counter = data[2] | :return: None |
def toimage(self, width=None, height=None):
'''Return the current scene as a PIL Image.
**Example**
You can build your molecular viewer as usual and dump an image
at any resolution supported by the video card (up to the
memory limits)::
v = QtViewer()
# Add the renderers
v.add_renderer(...)
# Add post processing effects
v.add_post_processing(...)
# Move the camera
v.widget.camera.autozoom(...)
v.widget.camera.orbit_x(...)
v.widget.camera.orbit_y(...)
# Save the image
image = v.widget.toimage(1024, 768)
image.save("mol.png")
.. seealso::
https://pillow.readthedocs.org/en/latest/PIL.html#module-PIL.Image
'''
from .postprocessing import NoEffect
effect = NoEffect(self)
self.post_processing.append(effect)
oldwidth, oldheight = self.width(), self.height()
#self.initializeGL()
if None not in (width, height):
self.resize(width, height)
self.resizeGL(width, height)
else:
width = self.width()
height = self.height()
self.paintGL()
self.post_processing.remove(effect)
coltex = effect.texture
coltex.bind()
glActiveTexture(GL_TEXTURE0)
data = glGetTexImage(GL_TEXTURE_2D, 0, GL_RGBA, GL_UNSIGNED_BYTE)
image = pil_Image.frombuffer('RGBA', (width, height), data, 'raw', 'RGBA', 0, -1)
#self.resize(oldwidth, oldheight)
#self.resizeGL(oldwidth, oldheight)
return image | Return the current scene as a PIL Image.
**Example**
You can build your molecular viewer as usual and dump an image
at any resolution supported by the video card (up to the
memory limits)::
v = QtViewer()
# Add the renderers
v.add_renderer(...)
# Add post processing effects
v.add_post_processing(...)
# Move the camera
v.widget.camera.autozoom(...)
v.widget.camera.orbit_x(...)
v.widget.camera.orbit_y(...)
# Save the image
image = v.widget.toimage(1024, 768)
image.save("mol.png")
.. seealso::
https://pillow.readthedocs.org/en/latest/PIL.html#module-PIL.Image |
def get_new_document( # noqa: C901
include_wdom_js: bool = True,
include_skeleton: bool = False,
include_normalizecss: bool = False,
autoreload: bool = None,
reload_wait: float = None,
log_level: Union[int, str] = None,
log_prefix: str = None,
log_console: bool = False,
ws_url: str = None,
message_wait: float = None,
document_factory: Callable[..., Document] = WdomDocument,
**kwargs: Any) -> Document:
"""Create new :class:`Document` object with options.
:arg bool include_wdom_js: Include wdom.js file. Usually should be True.
:arg bool include_skeleton: Include skelton.css.
:arg bool include_normalizecss: Include normalize.css.
:arg bool autoreload: Enable autoreload flag. This flag overwrites
``--debug`` flag, which automatically enables autoreload.
:arg float reload_wait: Seconds to wait until reload when autoreload is
enabled.
:arg str log_level: Log level string, chosen from DEBUG, INFO, WARN, ERROR.
Integer values are also acceptable like ``logging.INFO``. By default
use ``wdom.config.options.log_level``, which default is ``INFO``.
:arg str log_prefix: Prefix of log outputs.
:arg bool log_console: Flag to show wdom log on browser console.
:arg str ws_url: URL string to the ws url.
Default: ``ws://localhost:8888/wdom_ws``.
:arg float message_wait: Duration (seconds) to send WS messages.
:arg Callable document_factory: Factory function/class to create Document
object.
:rtype: Document
"""
document = document_factory(
autoreload=autoreload,
reload_wait=reload_wait,
**kwargs
)
if log_level is None:
log_level = config.logging
if message_wait is None:
message_wait = config.message_wait
log_script = []
log_script.append('var WDOM_MESSAGE_WAIT = {}'.format(message_wait))
if isinstance(log_level, str):
log_script.append('var WDOM_LOG_LEVEL = \'{}\''.format(log_level))
elif isinstance(log_level, int):
log_script.append('var WDOM_LOG_LEVEL = {}'.format(log_level))
if log_prefix:
log_script.append('var WDOM_LOG_PREFIX = \'{}\''.format(log_prefix))
if log_console:
log_script.append('var WDOM_LOG_CONSOLE = true')
if log_script:
_s = Script(parent=document.head)
_s.textContent = '\n{}\n'.format('\n'.join(log_script))
if ws_url:
_s = Script(parent=document.head)
_s.textContent = '\nvar WDOM_WS_URL = \'{}\'\n'.format(ws_url)
if include_wdom_js:
document.add_jsfile_head('_static/js/wdom.js')
return document | Create new :class:`Document` object with options.
:arg bool include_wdom_js: Include wdom.js file. Usually should be True.
:arg bool include_skeleton: Include skelton.css.
:arg bool include_normalizecss: Include normalize.css.
:arg bool autoreload: Enable autoreload flag. This flag overwrites
``--debug`` flag, which automatically enables autoreload.
:arg float reload_wait: Seconds to wait until reload when autoreload is
enabled.
:arg str log_level: Log level string, chosen from DEBUG, INFO, WARN, ERROR.
Integer values are also acceptable like ``logging.INFO``. By default
use ``wdom.config.options.log_level``, which default is ``INFO``.
:arg str log_prefix: Prefix of log outputs.
:arg bool log_console: Flag to show wdom log on browser console.
:arg str ws_url: URL string to the ws url.
Default: ``ws://localhost:8888/wdom_ws``.
:arg float message_wait: Duration (seconds) to send WS messages.
:arg Callable document_factory: Factory function/class to create Document
object.
:rtype: Document |
def _group_filter_values(seg, filter_indices, ms_per_input):
"""
Takes a list of 1s and 0s and returns a list of tuples of the form:
['y/n', timestamp].
"""
ret = []
for filter_value, (_segment, timestamp) in zip(filter_indices, seg.generate_frames_as_segments(ms_per_input)):
if filter_value == 1:
if len(ret) > 0 and ret[-1][0] == 'n':
ret.append(['y', timestamp]) # The last one was different, so we create a new one
elif len(ret) > 0 and ret[-1][0] == 'y':
ret[-1][1] = timestamp # The last one was the same as this one, so just update the timestamp
else:
ret.append(['y', timestamp]) # This is the first one
else:
if len(ret) > 0 and ret[-1][0] == 'n':
ret[-1][1] = timestamp
elif len(ret) > 0 and ret[-1][0] == 'y':
ret.append(['n', timestamp])
else:
ret.append(['n', timestamp])
return ret | Takes a list of 1s and 0s and returns a list of tuples of the form:
['y/n', timestamp]. |
def _remap_key(key):
""" Change key into correct casing if we know the parameter """
if key in KNOWN_PARAMS:
return key
if key.lower() in known_params:
return KNOWN_PARAMS[known_params.index(key.lower())]
return key | Change key into correct casing if we know the parameter |
def _instruction_list(self, filters):
"""Generates the instructions for a bot and its filters.
Note:
The guidance for each filter is generated by combining the
docstrings of the predicate filter and resulting dispatch
function with a single space between. The class's
:py:attr:`INSTRUCTIONS` and the default help command are
added.
Arguments:
filters (:py:class:`list`): The filters to apply to incoming
messages.
Returns:
:py:class:`str`: The bot's instructions.
"""
return '\n\n'.join([
self.INSTRUCTIONS.strip(),
'*Supported methods:*',
'If you send "@{}: help" to me I reply with these '
'instructions.'.format(self.user),
'If you send "@{}: version" to me I reply with my current '
'version.'.format(self.user),
] + [filter.description() for filter in filters]) | Generates the instructions for a bot and its filters.
Note:
The guidance for each filter is generated by combining the
docstrings of the predicate filter and resulting dispatch
function with a single space between. The class's
:py:attr:`INSTRUCTIONS` and the default help command are
added.
Arguments:
filters (:py:class:`list`): The filters to apply to incoming
messages.
Returns:
:py:class:`str`: The bot's instructions. |
def get_ancestors_through_subont(self, go_term, relations):
"""
Returns the ancestors from the relation filtered GO subontology of go_term's ancestors.
subontology() primarily used here for speed when specifying relations to traverse. Point of this is to first get
a smaller graph (all ancestors of go_term regardless of relation) and then filter relations on that instead of
the whole GO.
"""
all_ancestors = self.ontology.ancestors(go_term, reflexive=True)
subont = self.ontology.subontology(all_ancestors)
return subont.ancestors(go_term, relations) | Returns the ancestors from the relation filtered GO subontology of go_term's ancestors.
subontology() primarily used here for speed when specifying relations to traverse. Point of this is to first get
a smaller graph (all ancestors of go_term regardless of relation) and then filter relations on that instead of
the whole GO. |
def stddev(values, meanval=None): #from AI: A Modern Appproach
"""The standard deviation of a set of values.
Pass in the mean if you already know it."""
if meanval == None: meanval = mean(values)
return math.sqrt( sum([(x - meanval)**2 for x in values]) / (len(values)-1) ) | The standard deviation of a set of values.
Pass in the mean if you already know it. |
def extend_service_volume(self, stack, service, volume, args):
"""扩容存储卷
为指定名称的服务增加存储卷资源,并挂载到部署的容器中。
Args:
- stack: 服务所属的服务组名称
- service: 服务名
- volume: 存储卷名
- args: 请求参数(json),参考 http://kirk-docs.qiniu.com/apidocs/
Returns:
返回一个tuple对象,其格式为(<result>, <ResponseInfo>)
- result 成功返回空dict{},失败返回{"error": "<errMsg string>"}
- ResponseInfo 请求的Response信息
"""
url = '{0}/v3/stacks/{1}/services/{2}/volumes/{3}/extend'.format(self.host, stack, service, volume)
return self.__post(url, args) | 扩容存储卷
为指定名称的服务增加存储卷资源,并挂载到部署的容器中。
Args:
- stack: 服务所属的服务组名称
- service: 服务名
- volume: 存储卷名
- args: 请求参数(json),参考 http://kirk-docs.qiniu.com/apidocs/
Returns:
返回一个tuple对象,其格式为(<result>, <ResponseInfo>)
- result 成功返回空dict{},失败返回{"error": "<errMsg string>"}
- ResponseInfo 请求的Response信息 |
async def delete(query):
"""Perform DELETE query asynchronously. Returns number of rows deleted.
"""
assert isinstance(query, peewee.Delete),\
("Error, trying to run delete coroutine"
"with wrong query class %s" % str(query))
cursor = await _execute_query_async(query)
rowcount = cursor.rowcount
await cursor.release()
return rowcount | Perform DELETE query asynchronously. Returns number of rows deleted. |
def register(self, name, *slots):
"""
Registers a given signal
:param name: the signal to register
"""
# setdefault initializes the object even if it exists. This is more efficient
if name not in self:
self[name] = Signal()
for slot in slots:
self[name].connect(slot) | Registers a given signal
:param name: the signal to register |
def thread_details(io_handler, thread_id, max_depth=0):
"""
Prints details about the thread with the given ID (not its name)
"""
# Normalize maximum depth
try:
max_depth = int(max_depth)
if max_depth < 1:
max_depth = None
except (ValueError, TypeError):
max_depth = None
# pylint: disable=W0212
try:
# Get the stack
thread_id = int(thread_id)
stack = sys._current_frames()[thread_id]
except KeyError:
io_handler.write_line("Unknown thread ID: {0}", thread_id)
except ValueError:
io_handler.write_line("Invalid thread ID: {0}", thread_id)
except AttributeError:
io_handler.write_line("sys._current_frames() is not available.")
else:
# Get the name
try:
name = threading._active[thread_id].name
except KeyError:
name = "<unknown>"
lines = [
"Thread ID: {0} - Name: {1}".format(thread_id, name),
"Stack trace:",
]
trace_lines = []
depth = 0
frame = stack
while frame is not None and (
max_depth is None or depth < max_depth
):
# Store the line information
trace_lines.append(format_frame_info(frame))
# Previous frame...
frame = frame.f_back
depth += 1
# Reverse the lines
trace_lines.reverse()
# Add them to the printed lines
lines.extend(trace_lines)
lines.append("")
io_handler.write("\n".join(lines)) | Prints details about the thread with the given ID (not its name) |
def street(random=random, *args, **kwargs):
"""
Produce something that sounds like a street name
>>> mock_random.seed(0)
>>> street(random=mock_random)
'chimp place'
>>> street(random=mock_random, capitalize=True)
'Boatbench Block'
>>> mock_random.seed(3)
>>> street(random=mock_random, slugify=True)
'central-britches-boulevard'
"""
return random.choice([
"{noun} {street_type}",
"{adjective}{verb} {street_type}",
"{direction} {adjective}{verb} {street_type}",
"{direction} {noun} {street_type}",
"{direction} {lastname} {street_type}",
]).format(noun=noun(random=random),
lastname=lastname(random=random),
direction=direction(random=random),
adjective=adjective(random=random),
verb=verb(random=random),
street_type=random.choice(streets)) | Produce something that sounds like a street name
>>> mock_random.seed(0)
>>> street(random=mock_random)
'chimp place'
>>> street(random=mock_random, capitalize=True)
'Boatbench Block'
>>> mock_random.seed(3)
>>> street(random=mock_random, slugify=True)
'central-britches-boulevard' |
def check_running(self):
'''
Check if a pid file exists and if it is associated with
a running process.
'''
if self.check_pidfile():
pid = self.get_pidfile()
if not salt.utils.platform.is_windows():
if self.check_pidfile() and self.is_daemonized(pid) and os.getppid() != pid:
return True
else:
# We have no os.getppid() on Windows. Use salt.utils.win_functions.get_parent_pid
if self.check_pidfile() and self.is_daemonized(pid) and salt.utils.win_functions.get_parent_pid() != pid:
return True
return False | Check if a pid file exists and if it is associated with
a running process. |
def context(self):
""" Convenient access to shared context """
if self._context is not None:
return self._context
else:
logger.warning("Using shared context without a lock")
return self._executor._shared_context | Convenient access to shared context |
def _init_data_line(self, fnc, lnum, line):
"""Process Data line."""
fld = re.split(self.sep, line)
# Lines may contain different numbers of items.
# The line should have all columns requested by the user.
if self.usr_max_idx < len(fld):
self.convert_ints_floats(fld)
fnc(fld)
else:
for fld in enumerate(zip(self.hdr2idx.keys(), fld)):
print(fld)
for hdr in self.hdrs_usr:
print(hdr)
print('# ITEMS ON A LINE:', len(fld))
print('MAX USR IDX:', self.usr_max_idx)
raise Exception("ERROR ON LINE {} IN {}".format(lnum+1, self.fin)) | Process Data line. |
def get_stats_display(self, args=None, max_width=None):
"""Return a dict with all the information needed to display the stat.
key | description
----------------------------
display | Display the stat (True or False)
msgdict | Message to display (list of dict [{ 'msg': msg, 'decoration': decoration } ... ])
align | Message position
"""
display_curse = False
if hasattr(self, 'display_curse'):
display_curse = self.display_curse
if hasattr(self, 'align'):
align_curse = self._align
if max_width is not None:
ret = {'display': display_curse,
'msgdict': self.msg_curse(args, max_width=max_width),
'align': align_curse}
else:
ret = {'display': display_curse,
'msgdict': self.msg_curse(args),
'align': align_curse}
return ret | Return a dict with all the information needed to display the stat.
key | description
----------------------------
display | Display the stat (True or False)
msgdict | Message to display (list of dict [{ 'msg': msg, 'decoration': decoration } ... ])
align | Message position |
def process_LANGUAGE_CODE(self, language_code, data):
'''
Fix language code when set to non included default `en`
and add the extra variables ``LANGUAGE_NAME`` and ``LANGUAGE_NAME_LOCAL``.
'''
# Dirty hack to fix non included default
language_code = 'en-us' if language_code == 'en' else language_code
language = translation.get_language_info('en' if language_code == 'en-us' else language_code)
if not settings.JS_CONTEXT or 'LANGUAGE_NAME' in settings.JS_CONTEXT \
or (settings.JS_CONTEXT_EXCLUDE and 'LANGUAGE_NAME' in settings.JS_CONTEXT_EXCLUDE):
data['LANGUAGE_NAME'] = language['name']
if not settings.JS_CONTEXT or 'LANGUAGE_NAME_LOCAL' in settings.JS_CONTEXT \
or (settings.JS_CONTEXT_EXCLUDE and 'LANGUAGE_NAME_LOCAL' in settings.JS_CONTEXT_EXCLUDE):
data['LANGUAGE_NAME_LOCAL'] = language['name_local']
return language_code | Fix language code when set to non included default `en`
and add the extra variables ``LANGUAGE_NAME`` and ``LANGUAGE_NAME_LOCAL``. |
def maximum_likelihood_estimator(self, data, states):
"""
Fit using MLE method.
Parameters
----------
data: pandas.DataFrame or 2D array
Dataframe of values containing samples from the conditional distribution, (Y|X)
and corresponding X values.
states: All the input states that are jointly gaussian.
Returns
-------
beta, variance (tuple): Returns estimated betas and the variance.
"""
x_df = pd.DataFrame(data, columns=states)
x_len = len(self.evidence)
sym_coefs = []
for i in range(0, x_len):
sym_coefs.append('b' + str(i + 1) + '_coef')
sum_x = x_df.sum()
x = [sum_x['(Y|X)']]
coef_matrix = pd.DataFrame(columns=sym_coefs)
# First we compute just the coefficients of beta_1 to beta_N.
# Later we compute beta_0 and append it.
for i in range(0, x_len):
x.append(self.sum_of_product(x_df['(Y|X)'], x_df[self.evidence[i]]))
for j in range(0, x_len):
coef_matrix.loc[i, sym_coefs[j]] = self.sum_of_product(
x_df[self.evidence[i]], x_df[self.evidence[j]])
coef_matrix.insert(0, 'b0_coef', sum_x[self.evidence].values)
row_1 = np.append([len(x_df)], sum_x[self.evidence].values)
coef_matrix.loc[-1] = row_1
coef_matrix.index = coef_matrix.index + 1 # shifting index
coef_matrix.sort_index(inplace=True)
beta_coef_matrix = np.matrix(coef_matrix.values, dtype='float')
coef_inv = np.linalg.inv(beta_coef_matrix)
beta_est = np.array(np.matmul(coef_inv, np.transpose(x)))
self.beta = beta_est[0]
sigma_est = 0
x_len_df = len(x_df)
for i in range(0, x_len):
for j in range(0, x_len):
sigma_est += self.beta[i + 1] * self.beta[j + 1] * (self.sum_of_product(
x_df[self.evidence[i]], x_df[self.evidence[j]]) / x_len_df - np.mean(x_df[self.evidence[i]]) * np.mean(x_df[self.evidence[j]]))
sigma_est = np.sqrt(
self.sum_of_product(
x_df['(Y|X)'],
x_df['(Y|X)']) /
x_len_df -
np.mean(
x_df['(Y|X)']) *
np.mean(
x_df['(Y|X)']) -
sigma_est)
self.sigma_yx = sigma_est
return self.beta, self.sigma_yx | Fit using MLE method.
Parameters
----------
data: pandas.DataFrame or 2D array
Dataframe of values containing samples from the conditional distribution, (Y|X)
and corresponding X values.
states: All the input states that are jointly gaussian.
Returns
-------
beta, variance (tuple): Returns estimated betas and the variance. |
def from_series(series):
"""
Deseralize a PercentRankTransform the given pandas.Series, as returned
by `to_series()`.
Parameters
----------
series : pandas.Series
Returns
-------
PercentRankTransform
"""
result = PercentRankTransform()
result.cdf = series.values
result.bin_edges = series.index.values[1:-1]
return result | Deseralize a PercentRankTransform the given pandas.Series, as returned
by `to_series()`.
Parameters
----------
series : pandas.Series
Returns
-------
PercentRankTransform |
def _parse_known_pattern(self, pattern: str) -> List[str]:
"""
Expand pattern if identified as a directory and return found sub packages
"""
if pattern.endswith(os.path.sep):
patterns = [
filename
for filename in os.listdir(pattern)
if os.path.isdir(os.path.join(pattern, filename))
]
else:
patterns = [pattern]
return patterns | Expand pattern if identified as a directory and return found sub packages |
def create_container_student(self, parent_container_id, environment, network_grading, mem_limit, student_path,
socket_path, systemfiles_path, course_common_student_path):
"""
Creates a student container
:param parent_container_id: id of the "parent" container
:param environment: env to start (name/id of a docker image)
:param network_grading: boolean to indicate if the network should be enabled in the container or not (share the parent stack)
:param mem_limit: in Mo
:param student_path: path to the task directory that will be mounted in the container
:param socket_path: path to the socket that will be mounted in the container
:param systemfiles_path: path to the systemfiles folder containing files that can override partially some defined system files
:return: the container id
"""
student_path = os.path.abspath(student_path)
socket_path = os.path.abspath(socket_path)
systemfiles_path = os.path.abspath(systemfiles_path)
course_common_student_path = os.path.abspath(course_common_student_path)
response = self._docker.containers.create(
environment,
stdin_open=True,
command="_run_student_intern",
mem_limit=str(mem_limit) + "M",
memswap_limit=str(mem_limit) + "M",
mem_swappiness=0,
oom_kill_disable=True,
network_mode=('none' if not network_grading else ('container:' + parent_container_id)),
volumes={
student_path: {'bind': '/task/student'},
socket_path: {'bind': '/__parent.sock'},
systemfiles_path: {'bind': '/task/systemfiles', 'mode': 'ro'},
course_common_student_path: {'bind': '/course/common/student', 'mode': 'ro'}
}
)
return response.id | Creates a student container
:param parent_container_id: id of the "parent" container
:param environment: env to start (name/id of a docker image)
:param network_grading: boolean to indicate if the network should be enabled in the container or not (share the parent stack)
:param mem_limit: in Mo
:param student_path: path to the task directory that will be mounted in the container
:param socket_path: path to the socket that will be mounted in the container
:param systemfiles_path: path to the systemfiles folder containing files that can override partially some defined system files
:return: the container id |
def UpdateFlows(self,
client_id_flow_id_pairs,
pending_termination=db.Database.unchanged):
"""Updates flow objects in the database."""
for client_id, flow_id in client_id_flow_id_pairs:
try:
self.UpdateFlow(
client_id, flow_id, pending_termination=pending_termination)
except db.UnknownFlowError:
pass | Updates flow objects in the database. |
def bundle_lambda(zipfile):
"""Write zipfile contents to file.
:param zipfile:
:return: exit_code
"""
# TODO have 'bundle.zip' as default config
if not zipfile:
return 1
with open('bundle.zip', 'wb') as zfile:
zfile.write(zipfile)
log.info('Finished - a bundle.zip is waiting for you...')
return 0 | Write zipfile contents to file.
:param zipfile:
:return: exit_code |
def parseLine(line, lineNumber=None):
"""
Parse line
"""
match = line_re.match(line)
if match is None:
raise ParseError("Failed to parse line: {0!s}".format(line), lineNumber)
# Underscores are replaced with dash to work around Lotus Notes
return (match.group('name').replace('_', '-'),
parseParams(match.group('params')),
match.group('value'), match.group('group')) | Parse line |
def to_json_data(self, model_name=None):
"""
Parameters
----------
model_name: str, default None
if given, will be used as external file directory base name
Returns
-------
A dictionary of serialized data.
"""
return collections.OrderedDict([(k, self.get_serialized_value(k, model_name=model_name )) for k in self._data]) | Parameters
----------
model_name: str, default None
if given, will be used as external file directory base name
Returns
-------
A dictionary of serialized data. |
def to_record(self):
"""Create a CertStore record from this TLSFileBundle"""
tf_list = [getattr(self, k, None) for k in
[_.value for _ in TLSFileType]]
# If a cert isn't defined in this bundle, remove it
tf_list = filter(lambda x: x, tf_list)
files = {tf.file_type.value: tf.file_path for tf in tf_list}
self.record['files'] = files
return self.record | Create a CertStore record from this TLSFileBundle |
def _8bit_oper(op1, op2=None, reversed_=False):
""" Returns pop sequence for 8 bits operands
1st operand in H, 2nd operand in A (accumulator)
For some operations (like comparisons), you can swap
operands extraction by setting reversed = True
"""
output = []
if op2 is not None and reversed_:
tmp = op1
op1 = op2
op2 = tmp
op = op1
indirect = (op[0] == '*')
if indirect:
op = op[1:]
immediate = (op[0] == '#')
if immediate:
op = op[1:]
if is_int(op):
op = int(op)
if indirect:
output.append('ld a, (%i)' % op)
else:
if op == 0:
output.append('xor a')
else:
output.append('ld a, %i' % int8(op))
else:
if immediate:
if indirect:
output.append('ld a, (%s)' % op)
else:
output.append('ld a, %s' % op)
elif op[0] == '_':
if indirect:
idx = 'bc' if reversed_ else 'hl'
output.append('ld %s, (%s)' % (idx, op)) # can't use HL
output.append('ld a, (%s)' % idx)
else:
output.append('ld a, (%s)' % op)
else:
if immediate:
output.append('ld a, %s' % op)
elif indirect:
idx = 'bc' if reversed_ else 'hl'
output.append('pop %s' % idx)
output.append('ld a, (%s)' % idx)
else:
output.append('pop af')
if op2 is None:
return output
if not reversed_:
tmp = output
output = []
op = op2
indirect = (op[0] == '*')
if indirect:
op = op[1:]
immediate = (op[0] == '#')
if immediate:
op = op[1:]
if is_int(op):
op = int(op)
if indirect:
output.append('ld hl, (%i - 1)' % op)
else:
output.append('ld h, %i' % int8(op))
else:
if immediate:
if indirect:
output.append('ld hl, %s' % op)
output.append('ld h, (hl)')
else:
output.append('ld h, %s' % op)
elif op[0] == '_':
if indirect:
output.append('ld hl, (%s)' % op)
output.append('ld h, (hl)' % op)
else:
output.append('ld hl, (%s - 1)' % op)
else:
output.append('pop hl')
if indirect:
output.append('ld h, (hl)')
if not reversed_:
output.extend(tmp)
return output | Returns pop sequence for 8 bits operands
1st operand in H, 2nd operand in A (accumulator)
For some operations (like comparisons), you can swap
operands extraction by setting reversed = True |
def addSpecfile(self, specfiles, path):
"""Prepares the container for loading ``mrc`` files by adding specfile
entries to ``self.info``. Use :func:`MsrunContainer.load()` afterwards
to actually import the files
:param specfiles: the name of an ms-run file or a list of names
:type specfiles: str or [str, str, ...]
:param path: filedirectory used for loading and saving ``mrc`` files
"""
for specfile in aux.toList(specfiles):
if specfile not in self.info:
self._addSpecfile(specfile, path)
else:
warntext = 'Error while calling "MsrunContainer.addSpecfile()"'\
': "%s" is already present "MsrunContainer.info"'\
% (specfile, )
warnings.warn(warntext) | Prepares the container for loading ``mrc`` files by adding specfile
entries to ``self.info``. Use :func:`MsrunContainer.load()` afterwards
to actually import the files
:param specfiles: the name of an ms-run file or a list of names
:type specfiles: str or [str, str, ...]
:param path: filedirectory used for loading and saving ``mrc`` files |
def set_window_title(self):
"""Set window title."""
if DEV is not None:
title = u"Spyder %s (Python %s.%s)" % (__version__,
sys.version_info[0],
sys.version_info[1])
else:
title = u"Spyder (Python %s.%s)" % (sys.version_info[0],
sys.version_info[1])
if get_debug_level():
title += u" [DEBUG MODE %d]" % get_debug_level()
if self.window_title is not None:
title += u' -- ' + to_text_string(self.window_title)
if self.projects is not None:
path = self.projects.get_active_project_path()
if path:
path = path.replace(get_home_dir(), u'~')
title = u'{0} - {1}'.format(path, title)
self.base_title = title
self.setWindowTitle(self.base_title) | Set window title. |
def detect_number_of_threads():
"""
DEPRECATED: use `_init_num_threads` instead.
If this is modified, please update the note in: https://github.com/pydata/numexpr/wiki/Numexpr-Users-Guide
"""
log.warning('Deprecated, use `init_num_threads` instead.')
try:
nthreads = int(os.environ.get('NUMEXPR_NUM_THREADS', ''))
except ValueError:
try:
nthreads = int(os.environ.get('OMP_NUM_THREADS', ''))
except ValueError:
nthreads = detect_number_of_cores()
# Check that we don't surpass the MAX_THREADS in interpreter.cpp
if nthreads > MAX_THREADS:
nthreads = MAX_THREADS
return nthreads | DEPRECATED: use `_init_num_threads` instead.
If this is modified, please update the note in: https://github.com/pydata/numexpr/wiki/Numexpr-Users-Guide |
def validate_token(self, token):
'''retrieve a subject based on a token. Valid means we return a participant
invalid means we return None
'''
from expfactory.database.models import Participant
p = Participant.query.filter(Participant.token == token).first()
if p is not None:
if p.token.endswith(('finished','revoked')):
p = None
else:
p = p.id
return p | retrieve a subject based on a token. Valid means we return a participant
invalid means we return None |
def import_list(
self,
listName,
pathToTaskpaperDoc
):
"""
*import tasks from a reminder.app list into a given taskpaper document*
**Key Arguments:**
- ``listName`` -- the name of the reminders list
- ``pathToTaskpaperDoc`` -- the path to the taskpaper document to import the tasks into
**Usage:**
The following will import tasks from a Reminder.app list into a taskpaper document. Tasks are added to any existing content in the taskpaper document, or if the docuement doesn't yet exist it will be created for you. Tasks are deleted from the remainds list once import is complete.
.. code-block:: python
r.import_list(
listName="listname",
pathToTaskpaperDoc="/path/to/my/doc.taskpaper"
)
"""
self.log.info('starting the ``import_list`` method')
newTasks = self._get_tasks_from_reminder_list(listName)
self._add_tasks_to_taskpaper(
pathToTaskpaperDoc=pathToTaskpaperDoc,
taskString=newTasks
)
self._delete_reminders_from_list(
listName=listName
)
self.log.info('completed the ``import_list`` method')
return newTasks | *import tasks from a reminder.app list into a given taskpaper document*
**Key Arguments:**
- ``listName`` -- the name of the reminders list
- ``pathToTaskpaperDoc`` -- the path to the taskpaper document to import the tasks into
**Usage:**
The following will import tasks from a Reminder.app list into a taskpaper document. Tasks are added to any existing content in the taskpaper document, or if the docuement doesn't yet exist it will be created for you. Tasks are deleted from the remainds list once import is complete.
.. code-block:: python
r.import_list(
listName="listname",
pathToTaskpaperDoc="/path/to/my/doc.taskpaper"
) |
def percentile(self, percentile, axis, inclusive=True):
"""Returns d-1 dimensional histogram containing percentile of values along axis
if inclusive=True, will report bin center of first bin for which percentile% of data lies in or below the bin
=False, ... data lies strictly below the bin
10% percentile is calculated as: value at least 10% data is LOWER than
"""
axis = self.get_axis_number(axis)
# Shape of histogram
s = self.histogram.shape
# Shape of histogram after axis has been collapsed to 1
s_collapsed = list(s)
s_collapsed[axis] = 1
# Shape of histogram with axis removed entirely
s_removed = np.concatenate([s[:axis], s[axis + 1:]]).astype(np.int)
# Using np.where here is too tricky, as it may not return a value for each "bin-columns"
# First, get an array which has a minimum at the percentile-containing bins
# The minimum may not be unique: if later bins are empty, they will not be
if inclusive:
ecdf = self.cumulative_density(axis).histogram
else:
density = self.normalize(axis).histogram
ecdf = ecdf - density
ecdf = np.nan_to_num(ecdf) # Since we're relying on self-equality later
x = ecdf - 2 * (ecdf >= percentile / 100)
# We now want to get the location of the minimum
# To ensure it is unique, add a very very very small monotonously increasing bit to x
# Nobody will want 1e-9th percentiles, right? TODO
sz = np.ones(len(s), dtype=np.int)
sz[axis] = -1
x += np.linspace(0, 1e-9, s[axis]).reshape(sz)
# 1. Find the minimum along the axis
# 2. Reshape to s_collapsed and perform == to get a mask
# 3. Apply the mask to the bin centers along axis
# 4. Unflatten with reshape
result = self.all_axis_bin_centers(axis)[
x == np.min(x, axis=axis).reshape(s_collapsed)
]
result = result.reshape(s_removed)
if self.dimensions == 2:
new_hist = Hist1d
else:
new_hist = Histdd
return new_hist.from_histogram(histogram=result,
bin_edges=itemgetter(*self.other_axes(axis))(self.bin_edges),
axis_names=self.axis_names_without(axis)) | Returns d-1 dimensional histogram containing percentile of values along axis
if inclusive=True, will report bin center of first bin for which percentile% of data lies in or below the bin
=False, ... data lies strictly below the bin
10% percentile is calculated as: value at least 10% data is LOWER than |
def handle_keywords(self, func, node, offset=0):
'''
Gather keywords to positional argument information
Assumes the named parameter exist, raises a KeyError otherwise
'''
func_argument_names = {}
for i, arg in enumerate(func.args.args[offset:]):
assert isinstance(arg, ast.Name)
func_argument_names[arg.id] = i
nargs = len(func.args.args) - offset
defaults = func.args.defaults
keywords = {func_argument_names[kw.arg]: kw.value
for kw in node.keywords}
node.args.extend([None] * (1 + max(keywords.keys()) - len(node.args)))
replacements = {}
for index, arg in enumerate(node.args):
if arg is None:
if index in keywords:
replacements[index] = deepcopy(keywords[index])
else: # must be a default value
replacements[index] = deepcopy(defaults[index - nargs])
return replacements | Gather keywords to positional argument information
Assumes the named parameter exist, raises a KeyError otherwise |
def _load_entries(self, func, count, page=1, entries=None, **kwargs):
"""
Load entries
:param function func: function (:meth:`.API._req_files` or
:meth:`.API._req_search`) that returns entries
:param int count: number of entries to load. This value should never
be greater than self.count
:param int page: page number (starting from 1)
"""
if entries is None:
entries = []
res = \
func(offset=(page - 1) * self.max_entries_per_load,
limit=self.max_entries_per_load,
**kwargs)
loaded_entries = [
entry for entry in res['data'][:count]
]
#total_count = res['count']
total_count = self.count
# count should never be greater than total_count
if count > total_count:
count = total_count
if count <= self.max_entries_per_load:
return entries + loaded_entries
else:
cur_count = count - self.max_entries_per_load
return self._load_entries(
func=func, count=cur_count, page=page + 1,
entries=entries + loaded_entries, **kwargs) | Load entries
:param function func: function (:meth:`.API._req_files` or
:meth:`.API._req_search`) that returns entries
:param int count: number of entries to load. This value should never
be greater than self.count
:param int page: page number (starting from 1) |
def valuemap(f):
"""
Decorator to help PEG functions handle value conversions.
"""
@wraps(f)
def wrapper(*args, **kwargs):
if 'value' in kwargs:
val = kwargs['value']
del kwargs['value']
_f = f(*args, **kwargs)
def valued_f(*args, **kwargs):
result = _f(*args, **kwargs)
s, obj, span = result
if callable(val):
return PegreResult(s, val(obj), span)
else:
return PegreResult(s, val, span)
return valued_f
else:
return f(*args, **kwargs)
return wrapper | Decorator to help PEG functions handle value conversions. |
def gone_assignments(self):
'''
Returns the list of past assignments the user did not submit for
before the hard deadline.
'''
# Include only assignments with past hard deadline
qs = Assignment.objects.filter(hard_deadline__lt=timezone.now())
# Include only assignments from courses this user is registered for
qs = qs.filter(course__in=self.user_courses())
# Include only assignments this user has no submission for
return qs.order_by('-hard_deadline') | Returns the list of past assignments the user did not submit for
before the hard deadline. |
def handle(self, *args, **options):
"""Run the management command."""
self.stdout.write('Apply settings to index:')
for model in get_registered_model():
if options.get('model', None) and not (model.__name__ in
options['model']):
continue
get_adapter(model).set_settings()
self.stdout.write('\t* {}'.format(model.__name__)) | Run the management command. |
def init_config(app):
"""Initialize configuration."""
for k in dir(config):
if k.startswith('CLASSIFIER_'):
app.config.setdefault(k, getattr(config, k)) | Initialize configuration. |
def WriteSessionCompletion(self, aborted=False):
"""Writes session completion information.
Args:
aborted (Optional[bool]): True if the session was aborted.
Raises:
IOError: if the storage type is not supported or
when the storage writer is closed.
OSError: if the storage type is not supported or
when the storage writer is closed.
"""
self._RaiseIfNotWritable()
if self._storage_type != definitions.STORAGE_TYPE_SESSION:
raise IOError('Unsupported storage type.')
self._session.aborted = aborted
session_completion = self._session.CreateSessionCompletion()
self._storage_file.WriteSessionCompletion(session_completion) | Writes session completion information.
Args:
aborted (Optional[bool]): True if the session was aborted.
Raises:
IOError: if the storage type is not supported or
when the storage writer is closed.
OSError: if the storage type is not supported or
when the storage writer is closed. |
def break_on_error(self, pid, errorCode):
"""
Sets or clears the system breakpoint for a given Win32 error code.
Use L{Process.is_system_defined_breakpoint} to tell if a breakpoint
exception was caused by a system breakpoint or by the application
itself (for example because of a failed assertion in the code).
@note: This functionality is only available since Windows Server 2003.
In 2003 it only breaks on error values set externally to the
kernel32.dll library, but this was fixed in Windows Vista.
@warn: This method will fail if the debug symbols for ntdll (kernel32
in Windows 2003) are not present. For more information see:
L{System.fix_symbol_store_path}.
@see: U{http://www.nynaeve.net/?p=147}
@type pid: int
@param pid: Process ID.
@type errorCode: int
@param errorCode: Win32 error code to stop on. Set to C{0} or
C{ERROR_SUCCESS} to clear the breakpoint instead.
@raise NotImplementedError:
The functionality is not supported in this system.
@raise WindowsError:
An error occurred while processing this request.
"""
aProcess = self.system.get_process(pid)
address = aProcess.get_break_on_error_ptr()
if not address:
raise NotImplementedError(
"The functionality is not supported in this system.")
aProcess.write_dword(address, errorCode) | Sets or clears the system breakpoint for a given Win32 error code.
Use L{Process.is_system_defined_breakpoint} to tell if a breakpoint
exception was caused by a system breakpoint or by the application
itself (for example because of a failed assertion in the code).
@note: This functionality is only available since Windows Server 2003.
In 2003 it only breaks on error values set externally to the
kernel32.dll library, but this was fixed in Windows Vista.
@warn: This method will fail if the debug symbols for ntdll (kernel32
in Windows 2003) are not present. For more information see:
L{System.fix_symbol_store_path}.
@see: U{http://www.nynaeve.net/?p=147}
@type pid: int
@param pid: Process ID.
@type errorCode: int
@param errorCode: Win32 error code to stop on. Set to C{0} or
C{ERROR_SUCCESS} to clear the breakpoint instead.
@raise NotImplementedError:
The functionality is not supported in this system.
@raise WindowsError:
An error occurred while processing this request. |
def _friends_leaveoneout_radius(points, ftype):
"""Internal method used to compute the radius (half-side-length) for each
ball (cube) used in :class:`RadFriends` (:class:`SupFriends`) using
leave-one-out (LOO) cross-validation."""
# Construct KDTree to enable quick nearest-neighbor lookup for
# our resampled objects.
kdtree = spatial.KDTree(points)
if ftype == 'balls':
# Compute radius to two nearest neighbors (self + neighbor).
dists, ids = kdtree.query(points, k=2, eps=0, p=2)
elif ftype == 'cubes':
# Compute half-side-length to two nearest neighbors (self + neighbor).
dists, ids = kdtree.query(points, k=2, eps=0, p=np.inf)
dist = dists[:, 1] # distances to LOO nearest neighbor
return dist | Internal method used to compute the radius (half-side-length) for each
ball (cube) used in :class:`RadFriends` (:class:`SupFriends`) using
leave-one-out (LOO) cross-validation. |
def insert_many(conn, tablename, column_names, records, chunksize=2500):
"""Insert many records by chunking data into insert statements.
Notes
-----
records should be Iterable collection of namedtuples or tuples.
"""
groups = chunks(records, chunksize)
column_str = ','.join(column_names)
insert_template = 'INSERT INTO {table} ({columns}) VALUES {values}'.format(
table=tablename, columns=column_str, values='{0}')
with conn:
with conn.cursor() as cursor:
for recs in groups:
record_group = list(recs)
records_template_str = ','.join(['%s'] * len(record_group))
insert_query = insert_template.format(records_template_str)
cursor.execute(insert_query, record_group) | Insert many records by chunking data into insert statements.
Notes
-----
records should be Iterable collection of namedtuples or tuples. |
def fetch_and_filter_tags(self):
"""
Fetch and filter tags, fetch dates and sort them in time order.
"""
self.all_tags = self.fetcher.get_all_tags()
self.filtered_tags = self.get_filtered_tags(self.all_tags)
self.fetch_tags_dates() | Fetch and filter tags, fetch dates and sort them in time order. |
def compute_within_collection_vowel_duration(self, prefix, no_singletons=False):
""" Computes the mean duration of vowels from Units within clusters.
:param str prefix: Prefix for the key entry in self.measures
:param bool no_singletons: If False, excludes collections of length 1 from calculations
and adds "no_singletons" to the prefix
Adds the following measures to the self.measures dictionary:
- TIMING_(similarity_measure)_(collection_type)_within_collection_vowel_duration_mean
"""
if no_singletons:
min_size = 2
else:
prefix += "no_singletons_"
min_size = 1
durations = []
for cluster in self.collection_list:
if len(cluster) >= min_size:
for word in cluster:
word = self.full_timed_response[word.index_in_timed_response]
for phone in word.phones:
if phone.string in self.vowels:
durations.append(phone.end - phone.start)
self.measures[prefix + 'within_collection_vowel_duration_mean'] = get_mean(durations) \
if len(durations) > 0 else 'NA'
if not self.quiet:
if no_singletons:
print "Mean within-" + self.current_similarity_measure + "-" + self.current_collection_type + \
" vowel duration, excluding singletons:", \
self.measures[prefix + 'within_collection_vowel_duration_mean']
else:
print "Mean within-" + self.current_similarity_measure + "-" + self.current_collection_type + \
" vowel duration, including singletons:", \
self.measures[prefix + 'within_collection_vowel_duration_mean'] | Computes the mean duration of vowels from Units within clusters.
:param str prefix: Prefix for the key entry in self.measures
:param bool no_singletons: If False, excludes collections of length 1 from calculations
and adds "no_singletons" to the prefix
Adds the following measures to the self.measures dictionary:
- TIMING_(similarity_measure)_(collection_type)_within_collection_vowel_duration_mean |
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(DarnerCollector, self).get_default_config()
config.update({
'path': 'darner',
# Which rows of 'status' you would like to publish.
# 'telnet host port' and type stats and hit enter to see the list
# of possibilities.
# Leave unset to publish all
# 'publish': ''
'publish_queues': True,
# Connection settings
'hosts': ['localhost:22133']
})
return config | Returns the default collector settings |
def scatter(slope, zero, x1, x2, x1err=[], x2err=[]):
"""
Used mainly to measure scatter for the BCES best-fit
"""
n = len(x1)
x2pred = zero + slope * x1
s = sum((x2 - x2pred) ** 2) / (n - 1)
if len(x2err) == n:
s_obs = sum((x2err / x2) ** 2) / n
s0 = s - s_obs
print numpy.sqrt(s), numpy.sqrt(s_obs), numpy.sqrt(s0)
return numpy.sqrt(s0) | Used mainly to measure scatter for the BCES best-fit |
def findAllSubstrings(string, substring):
""" Returns a list of all substring starting positions in string or an empty
list if substring is not present in string.
:param string: a template string
:param substring: a string, which is looked for in the ``string`` parameter.
:returns: a list of substring starting positions in the template string
"""
#TODO: solve with regex? what about '.':
#return [m.start() for m in re.finditer('(?='+substring+')', string)]
start = 0
positions = []
while True:
start = string.find(substring, start)
if start == -1:
break
positions.append(start)
#+1 instead of +len(substring) to also find overlapping matches
start += 1
return positions | Returns a list of all substring starting positions in string or an empty
list if substring is not present in string.
:param string: a template string
:param substring: a string, which is looked for in the ``string`` parameter.
:returns: a list of substring starting positions in the template string |
def is_link(self, path, use_sudo=False):
"""
Check if a path exists, and is a symbolic link.
"""
func = use_sudo and _sudo or _run
with self.settings(hide('running', 'warnings'), warn_only=True):
return func('[ -L "%(path)s" ]' % locals()).succeeded | Check if a path exists, and is a symbolic link. |
def convolve(input, weights, mask=None, slow=False):
"""2 dimensional convolution.
This is a Python implementation of what will be written in Fortran.
Borders are handled with reflection.
Masking is supported in the following way:
* Masked points are skipped.
* Parts of the input which are masked have weight 0 in the kernel.
* Since the kernel as a whole needs to have value 1, the weights of the
masked parts of the kernel are evenly distributed over the non-masked
parts.
Adapted from https://github.com/nicjhan/gaussian-filter
"""
assert (len(input.shape) == 2)
assert (len(weights.shape) == 2)
# Only one reflection is done on each side so the weights array cannot be
# bigger than width/height of input +1.
assert (weights.shape[0] < input.shape[0] + 1)
assert (weights.shape[1] < input.shape[1] + 1)
if mask is not None:
# The slow convolve does not support masking.
assert (not slow)
assert (input.shape == mask.shape)
tiled_mask = tile_and_reflect(mask)
output = np.copy(input)
tiled_input = tile_and_reflect(input)
rows = input.shape[0]
cols = input.shape[1]
# Stands for half weights row.
hw_row = np.int(weights.shape[0] / 2)
hw_col = np.int(weights.shape[1] / 2)
# Stands for full weights row.
fw_row = weights.shape[0]
fw_col = weights.shape[0]
# Now do convolution on central array.
# Iterate over tiled_input.
for i, io in zip(list(range(rows, rows * 2)), list(range(rows))):
for j, jo in zip(list(range(cols, cols * 2)), list(range(cols))):
# The current central pixel is at (i, j)
# Skip masked points.
if mask is not None and tiled_mask[i, j]:
continue
average = 0.0
if slow:
# Iterate over weights/kernel.
for k in range(weights.shape[0]):
for l in range(weights.shape[1]):
# Get coordinates of tiled_input array that match given
# weights
m = i + k - hw_row
n = j + l - hw_col
average += tiled_input[m, n] * weights[k, l]
else:
# Find the part of the tiled_input array that overlaps with the
# weights array.
overlapping = tiled_input[
i - hw_row:i - hw_row + fw_row,
j - hw_col:j - hw_col + fw_col]
assert (overlapping.shape == weights.shape)
# If any of 'overlapping' is masked then set the corresponding
# points in the weights matrix to 0 and redistribute these to
# non-masked points.
if mask is not None:
overlapping_mask = tiled_mask[
i - hw_row:i - hw_row + fw_row,
j - hw_col:j - hw_col + fw_row]
assert (overlapping_mask.shape == weights.shape)
# Total value and number of weights clobbered by the mask.
clobber_total = np.sum(weights[overlapping_mask])
remaining_num = np.sum(np.logical_not(overlapping_mask))
# This is impossible since at least i, j is not masked.
assert (remaining_num > 0)
correction = clobber_total / remaining_num
# It is OK if nothing is masked - the weights will not be
# changed.
if correction == 0:
assert (not overlapping_mask.any())
# Redistribute to non-masked points.
tmp_weights = np.copy(weights)
tmp_weights[overlapping_mask] = 0.0
tmp_weights[np.where(tmp_weights != 0)] += correction
# Should be very close to 1. May not be exact due to
# rounding.
assert (abs(np.sum(tmp_weights) - 1) < 1e-15)
else:
tmp_weights = weights
merged = tmp_weights[:] * overlapping
average = np.sum(merged)
# Set new output value.
output[io, jo] = average
return output | 2 dimensional convolution.
This is a Python implementation of what will be written in Fortran.
Borders are handled with reflection.
Masking is supported in the following way:
* Masked points are skipped.
* Parts of the input which are masked have weight 0 in the kernel.
* Since the kernel as a whole needs to have value 1, the weights of the
masked parts of the kernel are evenly distributed over the non-masked
parts.
Adapted from https://github.com/nicjhan/gaussian-filter |
def collect(cls, result_key, func):
"""
Sets the `result_key` to an iterable of objects for which `func(obj)`
returns True
"""
def scanner(self, obj):
if not getattr(self, result_key, None):
setattr(self, result_key, [])
rv = func(obj)
if rv:
getattr(self, result_key).append(rv)
cls._scan(result_key, scanner) | Sets the `result_key` to an iterable of objects for which `func(obj)`
returns True |
def gallery_images(self):
"""Instance depends on the API version:
* 2018-06-01: :class:`GalleryImagesOperations<azure.mgmt.compute.v2018_06_01.operations.GalleryImagesOperations>`
* 2019-03-01: :class:`GalleryImagesOperations<azure.mgmt.compute.v2019_03_01.operations.GalleryImagesOperations>`
"""
api_version = self._get_api_version('gallery_images')
if api_version == '2018-06-01':
from .v2018_06_01.operations import GalleryImagesOperations as OperationClass
elif api_version == '2019-03-01':
from .v2019_03_01.operations import GalleryImagesOperations as OperationClass
else:
raise NotImplementedError("APIVersion {} is not available".format(api_version))
return OperationClass(self._client, self.config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) | Instance depends on the API version:
* 2018-06-01: :class:`GalleryImagesOperations<azure.mgmt.compute.v2018_06_01.operations.GalleryImagesOperations>`
* 2019-03-01: :class:`GalleryImagesOperations<azure.mgmt.compute.v2019_03_01.operations.GalleryImagesOperations>` |
def load_service(config):
"""
Load a restful service specified by some YAML file at config_path.
:param config_path: A pathlib Path object that points to the yaml
config
:returns: A python module containing a Client class, call factory,
and the definition of each of the APIs defined by the config.
"""
if isinstance(config, collections.abc.Mapping):
service_config = config
elif isinstance(config, str):
service_config = load_config(pathlib.Path(config))
elif isinstance(config, pathlib.Path):
service_config = load_config(config)
else:
raise TypeError('Cannot load config from type: {}'.format(type(config)))
apis = []
for api, defn in service_config['apis'].items():
api_def= create_api_definition(api, defn, service_config['base_url'])
apis.append(api_def)
service_module = create_service_module(service_config['name'], apis)
return service_module | Load a restful service specified by some YAML file at config_path.
:param config_path: A pathlib Path object that points to the yaml
config
:returns: A python module containing a Client class, call factory,
and the definition of each of the APIs defined by the config. |
def stop(name):
'''
Stop the specified service
CLI Example:
.. code-block:: bash
salt '*' service.stop <service name>
'''
if _service_is_upstart(name):
cmd = 'stop {0}'.format(name)
else:
cmd = '/sbin/service {0} stop'.format(name)
return not __salt__['cmd.retcode'](cmd, python_shell=False) | Stop the specified service
CLI Example:
.. code-block:: bash
salt '*' service.stop <service name> |
def delete_object(self, id):
"""Deletes the object with the given ID from the graph."""
# x=self.request(id, post_args={"method": "delete"})
params = urllib.parse.urlencode({"method": "delete", 'access_token': str(id)})
u = requests.get("https://graph.facebook.com/" + str(id) + "?" + params)
groups = u.json()
return groups | Deletes the object with the given ID from the graph. |
def register(**criteria):
"""
class decorator to add :class:`Part <cqparts.Part>` or
:class:`Assembly <cqparts.Assembly>` to the ``cqparts`` search index:
.. testcode::
import cqparts
from cqparts.params import *
# Created Part or Assembly
@cqparts.search.register(
type='motor',
current_class='dc',
part_number='ABC123X',
)
class SomeMotor(cqparts.Assembly):
shaft_diam = PositiveFloat(5)
def make_components(self):
return {} # build assembly content
motor_class = cqparts.search.find(part_number='ABC123X')
motor = motor_class(shaft_diam=6.0)
Then use :meth:`find` &/or :meth:`search` to instantiate it.
.. warning::
Multiple classes *can* be registered with identical criteria, but
should be avoided.
If multiple classes share the same criteria, :meth:`find` will never
yield the part you want.
Try adding unique criteria, such as *make*, *model*, *part number*,
*library name*, &/or *author*.
To avoid this, learn more in :ref:`tutorial_component-index`.
"""
def inner(cls):
# Add class references to search index
class_list.add(cls)
for (category, value) in criteria.items():
index[category][value].add(cls)
# Retain search criteria
_entry = dict((k, set([v])) for (k, v) in criteria.items())
if cls not in class_criteria:
class_criteria[cls] = _entry
else:
for key in _entry.keys():
class_criteria[cls][key] = class_criteria[cls].get(key, set()) | _entry[key]
# Return class
return cls
return inner | class decorator to add :class:`Part <cqparts.Part>` or
:class:`Assembly <cqparts.Assembly>` to the ``cqparts`` search index:
.. testcode::
import cqparts
from cqparts.params import *
# Created Part or Assembly
@cqparts.search.register(
type='motor',
current_class='dc',
part_number='ABC123X',
)
class SomeMotor(cqparts.Assembly):
shaft_diam = PositiveFloat(5)
def make_components(self):
return {} # build assembly content
motor_class = cqparts.search.find(part_number='ABC123X')
motor = motor_class(shaft_diam=6.0)
Then use :meth:`find` &/or :meth:`search` to instantiate it.
.. warning::
Multiple classes *can* be registered with identical criteria, but
should be avoided.
If multiple classes share the same criteria, :meth:`find` will never
yield the part you want.
Try adding unique criteria, such as *make*, *model*, *part number*,
*library name*, &/or *author*.
To avoid this, learn more in :ref:`tutorial_component-index`. |
def create_resource(self, path, transaction):
"""
Render a POST request.
:param path: the path of the request
:param transaction: the transaction
:return: the response
"""
t = self._parent.root.with_prefix(path)
max_len = 0
imax = None
for i in t:
if i == path:
# Resource already present
return self.edit_resource(transaction, path)
elif len(i) > max_len:
imax = i
max_len = len(i)
lp = path
parent_resource = self._parent.root[imax]
if parent_resource.allow_children:
return self.add_resource(transaction, parent_resource, lp)
else:
transaction.response.code = defines.Codes.METHOD_NOT_ALLOWED.number
return transaction | Render a POST request.
:param path: the path of the request
:param transaction: the transaction
:return: the response |
def system_exit(object):
"""
Handles proper system exit in case of critical exception.
:param object: Object to decorate.
:type object: object
:return: Object.
:rtype: object
"""
@functools.wraps(object)
def system_exit_wrapper(*args, **kwargs):
"""
Handles proper system exit in case of critical exception.
:param \*args: Arguments.
:type \*args: \*
:param \*\*kwargs: Keywords arguments.
:type \*\*kwargs: \*\*
"""
try:
if object(*args, **kwargs):
foundations.core.exit(0)
except Exception as error:
sys.stderr.write("\n".join(foundations.exceptions.format_exception(*sys.exc_info())))
foundations.core.exit(1)
return system_exit_wrapper | Handles proper system exit in case of critical exception.
:param object: Object to decorate.
:type object: object
:return: Object.
:rtype: object |
def guess_encoding(blob):
"""
uses file magic to determine the encoding of the given data blob.
:param blob: file content as read by file.read()
:type blob: data
:returns: encoding
:rtype: str
"""
# this is a bit of a hack to support different versions of python magic.
# Hopefully at some point this will no longer be necessary
#
# the version with open() is the bindings shipped with the file source from
# http://darwinsys.com/file/ - this is what is used by the python-magic
# package on Debian/Ubuntu. However it is not available on pypi/via pip.
#
# the version with from_buffer() is available at
# https://github.com/ahupp/python-magic and directly installable via pip.
#
# for more detail see https://github.com/pazz/alot/pull/588
if hasattr(magic, 'open'):
m = magic.open(magic.MAGIC_MIME_ENCODING)
m.load()
return m.buffer(blob)
elif hasattr(magic, 'from_buffer'):
m = magic.Magic(mime_encoding=True)
return m.from_buffer(blob)
else:
raise Exception('Unknown magic API') | uses file magic to determine the encoding of the given data blob.
:param blob: file content as read by file.read()
:type blob: data
:returns: encoding
:rtype: str |
def add(self, agent_id, media_type, media_file):
"""
新增其它类型永久素材
详情请参考
https://qydev.weixin.qq.com/wiki/index.php?title=%E4%B8%8A%E4%BC%A0%E6%B0%B8%E4%B9%85%E7%B4%A0%E6%9D%90
:param agent_id: 企业应用的id
:param media_type: 媒体文件类型,分别有图片(image)、语音(voice)、视频(video)普通文件(file)
:param media_file: 要上传的文件,一个 File-object
:return: 返回的 JSON 数据包
"""
params = {
'agentid': agent_id,
'type': media_type,
}
return self._post(
url='material/add_material',
params=params,
files={
'media': media_file
}
) | 新增其它类型永久素材
详情请参考
https://qydev.weixin.qq.com/wiki/index.php?title=%E4%B8%8A%E4%BC%A0%E6%B0%B8%E4%B9%85%E7%B4%A0%E6%9D%90
:param agent_id: 企业应用的id
:param media_type: 媒体文件类型,分别有图片(image)、语音(voice)、视频(video)普通文件(file)
:param media_file: 要上传的文件,一个 File-object
:return: 返回的 JSON 数据包 |
def parse_declaration(self, i):
"""Treat a bogus SGML declaration as raw data. Treat a CDATA
declaration as a CData object."""
j = None
if self.rawdata[i:i+9] == '<![CDATA[':
k = self.rawdata.find(']]>', i)
if k == -1:
k = len(self.rawdata)
data = self.rawdata[i+9:k]
j = k+3
self._toStringSubclass(data, CData)
else:
try:
j = SGMLParser.parse_declaration(self, i)
except SGMLParseError:
toHandle = self.rawdata[i:]
self.handle_data(toHandle)
j = i + len(toHandle)
return j | Treat a bogus SGML declaration as raw data. Treat a CDATA
declaration as a CData object. |
def _run(self):
"""Run method that can be profiled"""
self.set_state(self.STATE_INITIALIZING)
self.ioloop = ioloop.IOLoop.current()
self.consumer_lock = locks.Lock()
self.sentry_client = self.setup_sentry(
self._kwargs['config'], self.consumer_name)
try:
self.setup()
except (AttributeError, ImportError):
return self.on_startup_error(
'Failed to import the Python module for {}'.format(
self.consumer_name))
if not self.is_stopped:
try:
self.ioloop.start()
except KeyboardInterrupt:
LOGGER.warning('CTRL-C while waiting for clean shutdown') | Run method that can be profiled |
def print_options(self):
"""
Print all options as parsed by the script
"""
options = []
print("The script is running with the following options:")
options.append(("dry_run", self.options.dry_run))
options.append(("worker_config", self.__class__))
database_config = self.database_config or \
self.options.queue_model.database.connection_settings
options.append(("database", '%s:%s:%s' % (database_config['host'],
database_config['port'],
database_config['db'])))
if self.options.worker_class is not None:
options.append(("worker-class", self.options.worker_class))
for name, value in options:
print(" - %s = %s" % (name.replace('_', '-'), value))
print("The worker will run with the following options:")
for name in self.options.worker_class.parameters:
option = getattr(self.worker, name)
if name == 'callback' and \
self.options.worker_class.execute == Worker.execute:
option = '<jobs "run" method>'
elif isinstance(option, (list, tuple, set)):
option = ','.join(option)
print(" - %s = %s" % (name.replace('_', '-'), option)) | Print all options as parsed by the script |
def scalar_inc_dec(word, valence, is_cap_diff):
"""
Check if the preceding words increase, decrease, or negate/nullify the
valence
"""
scalar = 0.0
word_lower = word.lower()
if word_lower in BOOSTER_DICT:
scalar = BOOSTER_DICT[word_lower]
if valence < 0:
scalar *= -1
# check if booster/dampener word is in ALLCAPS (while others aren't)
if word.isupper() and is_cap_diff:
if valence > 0:
scalar += C_INCR
else:
scalar -= C_INCR
return scalar | Check if the preceding words increase, decrease, or negate/nullify the
valence |
def EncodeMessages(self,
message_list,
result,
destination=None,
timestamp=None,
api_version=3):
"""Accepts a list of messages and encodes for transmission.
This function signs and then encrypts the payload.
Args:
message_list: A MessageList rdfvalue containing a list of GrrMessages.
result: A ClientCommunication rdfvalue which will be filled in.
destination: The CN of the remote system this should go to.
timestamp: A timestamp to use for the signed messages. If None - use the
current time.
api_version: The api version which this should be encoded in.
Returns:
A nonce (based on time) which is inserted to the encrypted payload. The
client can verify that the server is able to decrypt the message and
return the nonce.
Raises:
RuntimeError: If we do not support this api version.
"""
if api_version not in [3]:
raise RuntimeError(
"Unsupported api version: %s, expected 3." % api_version)
# TODO(amoser): This is actually not great, we have two
# communicator classes already, one for the client, one for the
# server. This should be different methods, not a single one that
# gets passed a destination (server side) or not (client side).
if destination is None:
destination = self.server_name
# For the client it makes sense to cache the server cipher since
# it's the only cipher it ever uses.
cipher = self._GetServerCipher()
else:
remote_public_key = self._GetRemotePublicKey(destination)
cipher = Cipher(self.common_name, self.private_key, remote_public_key)
# Make a nonce for this transaction
if timestamp is None:
self.timestamp = timestamp = int(time.time() * 1000000)
packed_message_list = rdf_flows.PackedMessageList(timestamp=timestamp)
self.EncodeMessageList(message_list, packed_message_list)
result.encrypted_cipher_metadata = cipher.encrypted_cipher_metadata
# Include the encrypted cipher.
result.encrypted_cipher = cipher.encrypted_cipher
serialized_message_list = packed_message_list.SerializeToString()
# Encrypt the message symmetrically.
# New scheme cipher is signed plus hmac over message list.
result.packet_iv, result.encrypted = cipher.Encrypt(serialized_message_list)
# This is to support older endpoints.
result.hmac = cipher.HMAC(result.encrypted)
# Newer endpoints only look at this HMAC. It is recalculated for each packet
# in the session. Note that encrypted_cipher and encrypted_cipher_metadata
# do not change between all packets in this session.
result.full_hmac = cipher.HMAC(result.encrypted, result.encrypted_cipher,
result.encrypted_cipher_metadata,
result.packet_iv.SerializeToString(),
struct.pack("<I", api_version))
result.api_version = api_version
if isinstance(result, rdfvalue.RDFValue):
# Store the number of messages contained.
result.num_messages = len(message_list)
return timestamp | Accepts a list of messages and encodes for transmission.
This function signs and then encrypts the payload.
Args:
message_list: A MessageList rdfvalue containing a list of GrrMessages.
result: A ClientCommunication rdfvalue which will be filled in.
destination: The CN of the remote system this should go to.
timestamp: A timestamp to use for the signed messages. If None - use the
current time.
api_version: The api version which this should be encoded in.
Returns:
A nonce (based on time) which is inserted to the encrypted payload. The
client can verify that the server is able to decrypt the message and
return the nonce.
Raises:
RuntimeError: If we do not support this api version. |
def getRegexpsByName(regexpNames = ['all']):
'''
Method that recovers the names of the <RegexpObject> in a given list.
:param regexpNames: list of strings containing the possible regexp.
:return: Array of <RegexpObject> classes.
'''
allRegexpList = getAllRegexp()
if 'all' in regexpNames:
return allRegexpList
regexpList = []
# going through the regexpList
for name in regexpNames:
for r in allRegexpList:
if name == r.name:
regexpList.append(r)
return regexpList | Method that recovers the names of the <RegexpObject> in a given list.
:param regexpNames: list of strings containing the possible regexp.
:return: Array of <RegexpObject> classes. |
def import_class(import_path, setting_name=None):
"""
Import a class by name.
"""
mod_name, class_name = import_path.rsplit('.', 1)
# import module
mod = import_module_or_none(mod_name)
if mod is not None:
# Loaded module, get attribute
try:
return getattr(mod, class_name)
except AttributeError:
pass
# For ImportError and AttributeError, raise the same exception.
if setting_name:
raise ImproperlyConfigured("{0} does not point to an existing class: {1}".format(setting_name, import_path))
else:
raise ImproperlyConfigured("Class not found: {0}".format(import_path)) | Import a class by name. |
def get_mining_equipment():
"""Get all the mining equipment information available.
Returns:
This function returns two major dictionaries. The first one contains information about the coins for which mining equipment data is available.
coin_data:
{symbol1: {'BlockNumber': ...,
'BlockReward': ...,
'BlockRewardReduction': ...,
'BlockTime': ...,
'DifficultyAdjustment': ...,
'NetHashesPerSecond': ...,
'PreviousTotalCoinsMined': ...,
'PriceUSD': ...,
'Symbol': ...,
'TotalCoinsMined': ...},
symbol2: {...},
...}
The other one contains all the available mining equipment.
mining_data:
{id1: {'AffiliateURL': ...,
'Algorithm': ...,
'Company': ...,
'Cost': ...,
'CurrenciesAvailable': ...,
'CurrenciesAvailableLogo': ...,
'CurrenciesAvailableName': ...,
'Currency': ...,
'EquipmentType': ...,
'HashesPerSecond': ...,
'Id': ...,
'LogoUrl': ...,
'Name': ...,
'ParentId': ...,
'PowerConsumption': ...,
'Recommended': ...,
'Sponsored': ...,
'Url': ...},
id2: {...},
"""
# load data
url = build_url('miningequipment')
data = load_data(url)
coin_data = data['CoinData']
mining_data = data['MiningData']
return coin_data, mining_data | Get all the mining equipment information available.
Returns:
This function returns two major dictionaries. The first one contains information about the coins for which mining equipment data is available.
coin_data:
{symbol1: {'BlockNumber': ...,
'BlockReward': ...,
'BlockRewardReduction': ...,
'BlockTime': ...,
'DifficultyAdjustment': ...,
'NetHashesPerSecond': ...,
'PreviousTotalCoinsMined': ...,
'PriceUSD': ...,
'Symbol': ...,
'TotalCoinsMined': ...},
symbol2: {...},
...}
The other one contains all the available mining equipment.
mining_data:
{id1: {'AffiliateURL': ...,
'Algorithm': ...,
'Company': ...,
'Cost': ...,
'CurrenciesAvailable': ...,
'CurrenciesAvailableLogo': ...,
'CurrenciesAvailableName': ...,
'Currency': ...,
'EquipmentType': ...,
'HashesPerSecond': ...,
'Id': ...,
'LogoUrl': ...,
'Name': ...,
'ParentId': ...,
'PowerConsumption': ...,
'Recommended': ...,
'Sponsored': ...,
'Url': ...},
id2: {...}, |
def paths_for_download(self):
"""List of URLs available for downloading."""
if self._paths_for_download is None:
queries = list()
try:
for sra in self.gsm.relations['SRA']:
query = sra.split("=")[-1]
if 'SRX' not in query:
raise ValueError(
"Sample looks like it is not an SRA: %s" % query)
logger.info("Query: %s" % query)
queries.append(query)
except KeyError:
raise NoSRARelationException(
'No relation called SRA for %s' % self.gsm.get_accession())
# Construction of DataFrame df with paths to download
df = DataFrame(columns=['download_path'])
for query in queries:
# retrieve IDs for given SRX
searchdata = Entrez.esearch(db='sra', term=query, usehistory='y',
retmode='json')
answer = json.loads(searchdata.read())
ids = answer["esearchresult"]["idlist"]
if len(ids) != 1:
raise ValueError(
"There should be one and only one ID per SRX")
# using ID fetch the info
number_of_trials = 10
wait_time = 30
for trial in range(number_of_trials):
try:
results = Entrez.efetch(db="sra", id=ids[0],
rettype="runinfo",
retmode="text").read()
break
except HTTPError as httperr:
if "502" in str(httperr):
logger.warn(("%s, trial %i out of %i, waiting "
"for %i seconds.") % (
str(httperr),
trial,
number_of_trials,
wait_time))
time.sleep(wait_time)
elif httperr.code == 429:
# This means that there is too many requests
try:
header_wait_time = int(
httperr.headers["Retry-After"])
except:
header_wait_time = wait_time
logger.warn(("%s, trial %i out of %i, waiting "
"for %i seconds.") % (
str(httperr),
trial,
number_of_trials,
header_wait_time))
time.sleep(header_wait_time)
else:
raise httperr
try:
df_tmp = DataFrame([i.split(',') for i in results.split('\n') if i != ''][1:],
columns=[i.split(',') for i in results.split('\n') if i != ''][0])
except IndexError:
logger.error(("SRA is empty (ID: %s, query: %s). "
"Check if it is publicly available.") %
(ids[0], query))
continue
# check it first
try:
df_tmp['download_path']
except KeyError as e:
logger.error('KeyError: ' + str(e) + '\n')
logger.error(str(results) + '\n')
df = concat([df, df_tmp], sort=True)
self._paths_for_download = [path for path in df['download_path']]
return self._paths_for_download | List of URLs available for downloading. |
def checksum(file_path, hash_type='md5', block_size=65536):
"""Returns either the md5 or sha256 hash of a file at `file_path`.
md5 is the default hash_type as it is faster than sha256
The default block size is 64 kb, which appears to be one of a few command
choices according to https://stackoverflow.com/a/44873382/2680. The code
below is an extension of the example presented in that post.
"""
if hash_type == 'md5':
hash_ = hashlib.md5()
elif hash_type == 'sha256':
hash_ = hashlib.sha256()
else:
raise ValueError(
"{} is an invalid hash_type. Expected 'md5' or 'sha256'."
.format(hash_type)
)
with open(file_path, 'rb') as f:
for block in iter(lambda: f.read(block_size), b''):
hash_.update(block)
return hash_.hexdigest() | Returns either the md5 or sha256 hash of a file at `file_path`.
md5 is the default hash_type as it is faster than sha256
The default block size is 64 kb, which appears to be one of a few command
choices according to https://stackoverflow.com/a/44873382/2680. The code
below is an extension of the example presented in that post. |
def is_quoted(value):
'''
Return a single or double quote, if a string is wrapped in extra quotes.
Otherwise return an empty string.
'''
ret = ''
if isinstance(value, six.string_types) \
and value[0] == value[-1] \
and value.startswith(('\'', '"')):
ret = value[0]
return ret | Return a single or double quote, if a string is wrapped in extra quotes.
Otherwise return an empty string. |
def get_status_job(self, id_job, hub=None, group=None, project=None,
access_token=None, user_id=None):
"""
Get the status about a job, by its id
"""
if access_token:
self.req.credential.set_token(access_token)
if user_id:
self.req.credential.set_user_id(user_id)
if not self.check_credentials():
respond = {}
respond["status"] = 'Error'
respond["error"] = "Not credentials valid"
return respond
if not id_job:
respond = {}
respond["status"] = 'Error'
respond["error"] = "Job ID not specified"
return respond
url = get_job_url(self.config, hub, group, project)
url += '/' + id_job + '/status'
status = self.req.get(url)
return status | Get the status about a job, by its id |
def normalize_string(mac_type, resource, content_hash):
"""Serializes mac_type and resource into a HAWK string."""
normalized = [
'hawk.' + str(HAWK_VER) + '.' + mac_type,
normalize_header_attr(resource.timestamp),
normalize_header_attr(resource.nonce),
normalize_header_attr(resource.method or ''),
normalize_header_attr(resource.name or ''),
normalize_header_attr(resource.host),
normalize_header_attr(resource.port),
normalize_header_attr(content_hash or '')
]
# The blank lines are important. They follow what the Node Hawk lib does.
normalized.append(normalize_header_attr(resource.ext or ''))
if resource.app:
normalized.append(normalize_header_attr(resource.app))
normalized.append(normalize_header_attr(resource.dlg or ''))
# Add trailing new line.
normalized.append('')
normalized = '\n'.join(normalized)
return normalized | Serializes mac_type and resource into a HAWK string. |
def verify_verify(self, id, token):
"""Verify the token of a specific verification."""
return Verify().load(self.request('verify/' + str(id), params={'token': token})) | Verify the token of a specific verification. |
def qtt_fft1(self,tol,inverse=False, bitReverse=True):
""" Compute 1D (inverse) discrete Fourier Transform in the QTT format.
:param tol: error tolerance.
:type tol: float
:param inverse: whether do an inverse FFT or not.
:type inverse: Boolean
:param bitReverse: whether do the bit reversion or not. If this function is used as a subroutine for multi-dimensional qtt-fft, this option
need to be set False.
:type bitReverse: Boolean.
:returns: QTT-vector of FFT coefficients.
This is a python translation of the Matlab function "qtt_fft1" in Ivan Oseledets' project TT-Toolbox(https://github.com/oseledets/TT-Toolbox)
See S. Dolgov, B. Khoromskij, D. Savostyanov,
Superfast Fourier transform using QTT approximation,
J. Fourier Anal. Appl., 18(5), 2012.
"""
d = self.d
r = self.r.copy()
y = self.to_list(self)
if inverse:
twiddle =-1+1.22e-16j # exp(pi*1j)
else:
twiddle =-1-1.22e-16j # exp(-pi*1j)
for i in range(d-1, 0, -1):
r1= y[i].shape[0] # head r
r2= y[i].shape[2] # tail r
crd2 = _np.zeros((r1, 2, r2), order='F', dtype=complex)
# last block +-
crd2[:,0,:]= (y[i][:,0,:] + y[i][:,1,:])/_np.sqrt(2)
crd2[:,1,:]= (y[i][:,0,:] - y[i][:,1,:])/_np.sqrt(2)
# last block twiddles
y[i]= _np.zeros((r1*2, 2, r2),order='F',dtype=complex)
y[i][0:r1, 0, 0:r2]= crd2[:,0,:]
y[i][r1:r1*2, 1, 0:r2]= crd2[:,1,:]
#1..i-1 block twiddles and qr
rv=1;
for j in range(0, i):
cr=y[j]
r1= cr.shape[0] # head r
r2= cr.shape[2] # tail r
if j==0:
r[j]=r1
r[j+1] = r2*2
y[j] = _np.zeros((r[j], 2, r[j+1]),order='F',dtype=complex)
y[j][0:r1, :, 0:r2] = cr
y[j][0:r1, 0, r2 :r[j+1]] = cr[:,0,:]
y[j][0:r1, 1, r2 :r[j+1]] = twiddle**(1.0/(2**(i-j)))*cr[:,1,:]
else:
r[j]=r1*2
r[j+1] = r2*2
y[j] = _np.zeros((r[j], 2, r[j+1]),order='F',dtype=complex)
y[j][0:r1, :, 0:r2] = cr
y[j][r1:r[j], 0, r2 :r[j+1]] = cr[:,0,:]
y[j][r1:r[j], 1, r2 :r[j+1]] = twiddle**(1.0/(2**(i-j)))*cr[:,1,:]
y[j] = _np.reshape(y[j],( r[j], 2*r[j+1]),order='F')
y[j] = _np.dot(rv,y[j])
r[j] = y[j].shape[0]
y[j] = _np.reshape(y[j],( 2*r[j], r[j+1]),order='F')
y[j], rv = _np.linalg.qr(y[j])
y[j] = _np.reshape(y[j], (r[j], 2, rv.shape[0]),order='F')
y[i] = _np.reshape(y[i], (r[i], 2*r[i+1]),order='F')
y[i] = _np.dot(rv,y[i])
r[i] = rv.shape[0]
# backward svd
for j in range(i, 0,-1):
u,s,v = _np.linalg.svd(y[j], full_matrices=False)
rnew = my_chop2(s, _np.linalg.norm(s)*tol/_np.sqrt(i))
u=_np.dot(u[:, 0:rnew], _np.diag(s[0:rnew]))
v= v[0:rnew, :]
y[j] = _np.reshape(v, (rnew, 2, r[j+1]),order='F' )
y[j-1] = _np.reshape(y[j-1], (r[j-1]*2,r[j] ),order='F' )
y[j-1] = _np.dot(y[j-1], u)
r[j] = rnew
y[j-1] = _np.reshape(y[j-1], (r[j-1],r[j]*2 ),order='F' )
y[0] = _np.reshape(y[0], (r[0],2, r[1]), order='F' )
# FFT on the first block
y[0]=_np.transpose(y[0],(1,0,2))
y[0]=_np.reshape(y[0],(2, r[0]*r[1]),order='F')
y[0]= _np.dot( _np.array([[1,1],[1,-1]]), y[0])/_np.sqrt(2)
y[0]=_np.reshape(y[0],(2, r[0], r[1]),order='F')
y[0]=_np.transpose(y[0],(1,0,2))
if bitReverse:
# Reverse the train
y2=[None]*d
for i in range(d):
y2[d-i-1]= _np.transpose(y[i],(2,1,0))
y=self.from_list(y2)
else: # for multi-dimensional qtt_fft
y=self.from_list(y)
return y | Compute 1D (inverse) discrete Fourier Transform in the QTT format.
:param tol: error tolerance.
:type tol: float
:param inverse: whether do an inverse FFT or not.
:type inverse: Boolean
:param bitReverse: whether do the bit reversion or not. If this function is used as a subroutine for multi-dimensional qtt-fft, this option
need to be set False.
:type bitReverse: Boolean.
:returns: QTT-vector of FFT coefficients.
This is a python translation of the Matlab function "qtt_fft1" in Ivan Oseledets' project TT-Toolbox(https://github.com/oseledets/TT-Toolbox)
See S. Dolgov, B. Khoromskij, D. Savostyanov,
Superfast Fourier transform using QTT approximation,
J. Fourier Anal. Appl., 18(5), 2012. |
def digest(self,data=None):
"""
Method digest is redefined to return keyed MAC value instead of
just digest.
"""
if data is not None:
self.update(data)
b=create_string_buffer(256)
size=c_size_t(256)
if libcrypto.EVP_DigestSignFinal(self.ctx,b,pointer(size))<=0:
raise DigestError('SignFinal')
self.digest_finalized=True
return b.raw[:size.value] | Method digest is redefined to return keyed MAC value instead of
just digest. |
def equivalent_to(self, token):
"""
Gets all tokens which match the character and scopes of a reference token
:param token: :class:`esi.models.Token`
:return: :class:`esi.managers.TokenQueryset`
"""
return self.filter(character_id=token.character_id).require_scopes_exact(token.scopes.all()).filter(
models.Q(user=token.user) | models.Q(user__isnull=True)).exclude(pk=token.pk) | Gets all tokens which match the character and scopes of a reference token
:param token: :class:`esi.models.Token`
:return: :class:`esi.managers.TokenQueryset` |
def _adjust_n_months(other_day, n, reference_day):
"""Adjust the number of times a monthly offset is applied based
on the day of a given date, and the reference day provided.
"""
if n > 0 and other_day < reference_day:
n = n - 1
elif n <= 0 and other_day > reference_day:
n = n + 1
return n | Adjust the number of times a monthly offset is applied based
on the day of a given date, and the reference day provided. |
def parse(self, stream, full_statusline=None):
"""
parse stream for status line and headers
return a StatusAndHeaders object
support continuation headers starting with space or tab
"""
# status line w newlines intact
if full_statusline is None:
full_statusline = stream.readline()
full_statusline = self.decode_header(full_statusline)
statusline, total_read = _strip_count(full_statusline, 0)
headers = []
# at end of stream
if total_read == 0:
raise EOFError()
elif not statusline:
return StatusAndHeaders(statusline=statusline,
headers=headers,
protocol='',
total_len=total_read)
# validate only if verify is set
if self.verify:
protocol_status = self.split_prefix(statusline, self.statuslist)
if not protocol_status:
msg = 'Expected Status Line starting with {0} - Found: {1}'
msg = msg.format(self.statuslist, statusline)
raise StatusAndHeadersParserException(msg, full_statusline)
else:
protocol_status = statusline.split(' ', 1)
line, total_read = _strip_count(self.decode_header(stream.readline()), total_read)
while line:
result = line.split(':', 1)
if len(result) == 2:
name = result[0].rstrip(' \t')
value = result[1].lstrip()
else:
name = result[0]
value = None
next_line, total_read = _strip_count(self.decode_header(stream.readline()),
total_read)
# append continuation lines, if any
while next_line and next_line.startswith((' ', '\t')):
if value is not None:
value += next_line
next_line, total_read = _strip_count(self.decode_header(stream.readline()),
total_read)
if value is not None:
header = (name, value)
headers.append(header)
line = next_line
if len(protocol_status) > 1:
statusline = protocol_status[1].strip()
else:
statusline = ''
return StatusAndHeaders(statusline=statusline,
headers=headers,
protocol=protocol_status[0],
total_len=total_read) | parse stream for status line and headers
return a StatusAndHeaders object
support continuation headers starting with space or tab |
def indent_list(inlist, level):
"""Join a list of strings, one per line with 'level' spaces before each one"""
indent = ' '*level
joinstr = '\n' + indent
retval = joinstr.join(inlist)
return indent + retval | Join a list of strings, one per line with 'level' spaces before each one |
def _get_time_at_horizon(self, utc_time, obslon, obslat, **kwargs):
"""Get the time closest in time to *utc_time* when the
satellite is at the horizon relative to the position of an observer on
ground (altitude = 0)
Note: This is considered deprecated and it's functionality is currently
replaced by 'get_next_passes'.
"""
warnings.warn("_get_time_at_horizon is replaced with get_next_passes",
DeprecationWarning)
if "precision" in kwargs:
precision = kwargs['precision']
else:
precision = timedelta(seconds=0.001)
if "max_iterations" in kwargs:
nmax_iter = kwargs["max_iterations"]
else:
nmax_iter = 100
sec_step = 0.5
t_step = timedelta(seconds=sec_step / 2.0)
# Local derivative:
def fprime(timex):
el0 = self.get_observer_look(timex - t_step,
obslon, obslat, 0.0)[1]
el1 = self.get_observer_look(timex + t_step,
obslon, obslat, 0.0)[1]
return el0, (abs(el1) - abs(el0)) / sec_step
tx0 = utc_time - timedelta(seconds=1.0)
tx1 = utc_time
idx = 0
# eps = 500.
eps = 100.
while abs(tx1 - tx0) > precision and idx < nmax_iter:
tx0 = tx1
fpr = fprime(tx0)
# When the elevation is high the scale is high, and when
# the elevation is low the scale is low
# var_scale = np.abs(np.sin(fpr[0] * np.pi/180.))
# var_scale = np.sqrt(var_scale)
var_scale = np.abs(fpr[0])
tx1 = tx0 - timedelta(seconds=(eps * var_scale * fpr[1]))
idx = idx + 1
# print idx, tx0, tx1, var_scale, fpr
if abs(tx1 - utc_time) < precision and idx < 2:
tx1 = tx1 + timedelta(seconds=1.0)
if abs(tx1 - tx0) <= precision and idx < nmax_iter:
return tx1
else:
return None | Get the time closest in time to *utc_time* when the
satellite is at the horizon relative to the position of an observer on
ground (altitude = 0)
Note: This is considered deprecated and it's functionality is currently
replaced by 'get_next_passes'. |
def cleanDir(self):
''' Remove existing json datafiles in the target directory. '''
if os.path.isdir(self.outdir):
baddies = ['tout.json','nout.json','hout.json']
for file in baddies:
filepath = os.path.join(self.outdir,file)
if os.path.isfile(filepath):
os.remove(filepath) | Remove existing json datafiles in the target directory. |
def get_image_hashes(image_path, version=None, levels=None):
'''get_image_hashes returns the hash for an image across all levels. This is the quickest,
easiest way to define a container's reproducibility on each level.
'''
if levels is None:
levels = get_levels(version=version)
hashes = dict()
for level_name,level_filter in levels.items():
hashes[level_name] = get_image_hash(image_path,
level_filter=level_filter)
return hashes | get_image_hashes returns the hash for an image across all levels. This is the quickest,
easiest way to define a container's reproducibility on each level. |
def get_line_relative_to_node(self, target_node: ast.AST, offset: int) -> str:
"""
Raises:
IndexError: when ``offset`` takes the request out of bounds of this
Function's lines.
"""
return self.lines[target_node.lineno - self.node.lineno + offset] | Raises:
IndexError: when ``offset`` takes the request out of bounds of this
Function's lines. |
def save(self, file_path):
"""
Method to save the dataset to disk.
Parameters
----------
file_path : str
File path to save the current dataset to
Raises
------
IOError
If saving to disk is not successful.
"""
# TODO need a file format that is flexible and efficient to allow the following:
# 1) being able to read just meta info without having to load the ENTIRE dataset
# i.e. use case: compatibility check with #subjects, ids and their classes
# 2) random access layout: being able to read features for a single subject!
try:
file_path = os.path.abspath(file_path)
with open(file_path, 'wb') as df:
# pickle.dump(self, df)
pickle.dump((self.__data, self.__classes, self.__labels,
self.__dtype, self.__description, self.__num_features,
self.__feature_names),
df)
return
except IOError as ioe:
raise IOError('Unable to save the dataset to file: {}', format(ioe))
except:
raise | Method to save the dataset to disk.
Parameters
----------
file_path : str
File path to save the current dataset to
Raises
------
IOError
If saving to disk is not successful. |
def is_timed_out(self):
"""
determines whether a Session has been inactive/idle for too long a time
OR exceeds the absolute time that a Session may exist
"""
if (self.is_expired):
return True
try:
if (not self.last_access_time):
msg = ("session.last_access_time for session with id [" +
str(self.session_id) + "] is null. This value must be"
"set at least once, preferably at least upon "
"instantiation. Please check the " +
self.__class__.__name__ +
" implementation and ensure self value will be set "
"(perhaps in the constructor?)")
raise ValueError(msg)
"""
Calculate at what time a session would have been last accessed
for it to be expired at this point. In other words, subtract
from the current time the amount of time that a session can
be inactive before expiring. If the session was last accessed
before this time, it is expired.
"""
if self.is_absolute_timed_out:
return True
if self.is_idle_timed_out:
return True
except AttributeError:
msg2 = ("Timeouts not set for session with id [" +
str(self.session_id) + "]. Session is not considered "
"expired.")
logger.debug(msg2)
return False | determines whether a Session has been inactive/idle for too long a time
OR exceeds the absolute time that a Session may exist |
def cas(self, key, value, cas, time=0, compress_level=-1):
"""
Set a value for a key on server if its CAS value matches cas.
:param key: Key's name
:type key: six.string_types
:param value: A value to be stored on server.
:type value: object
:param cas: The CAS value previously obtained from a call to get*.
:type cas: int
:param time: Time in seconds that your key will expire.
:type time: int
:param compress_level: How much to compress.
0 = no compression, 1 = fastest, 9 = slowest but best,
-1 = default compression level.
:type compress_level: int
:return: True in case of success and False in case of failure
:rtype: bool
"""
server = self._get_server(key)
return server.cas(key, value, cas, time, compress_level) | Set a value for a key on server if its CAS value matches cas.
:param key: Key's name
:type key: six.string_types
:param value: A value to be stored on server.
:type value: object
:param cas: The CAS value previously obtained from a call to get*.
:type cas: int
:param time: Time in seconds that your key will expire.
:type time: int
:param compress_level: How much to compress.
0 = no compression, 1 = fastest, 9 = slowest but best,
-1 = default compression level.
:type compress_level: int
:return: True in case of success and False in case of failure
:rtype: bool |
def get_tile_image(self, x, y, l):
""" Get a tile image, respecting current animations
:param x: x coordinate
:param y: y coordinate
:param l: layer
:type x: int
:type y: int
:type l: int
:rtype: pygame.Surface
"""
# disabled for now, re-enable when support for generic maps is restored
# # since the tile has been queried, assume it wants to be checked
# # for animations sometime in the future
# if self._animation_queue:
# self._tracked_tiles.add((x, y, l))
try:
# animated, so return the correct frame
return self._animated_tile[(x, y, l)]
except KeyError:
# not animated, so return surface from data, if any
return self._get_tile_image(x, y, l) | Get a tile image, respecting current animations
:param x: x coordinate
:param y: y coordinate
:param l: layer
:type x: int
:type y: int
:type l: int
:rtype: pygame.Surface |
def on_click(self, button, **kwargs):
"""
Capture scrollup and scorlldown to move in groups
Pass everthing else to the module itself
"""
if button in (4, 5):
return super().on_click(button, **kwargs)
else:
activemodule = self.get_active_module()
if not activemodule:
return
return activemodule.on_click(button, **kwargs) | Capture scrollup and scorlldown to move in groups
Pass everthing else to the module itself |
def _HuntFlowCondition(self, condition):
"""Builds an SQL condition matching db.HuntFlowsCondition."""
if condition == db.HuntFlowsCondition.UNSET:
return "", []
elif condition == db.HuntFlowsCondition.FAILED_FLOWS_ONLY:
return ("AND flow_state = %s ",
[int(rdf_flow_objects.Flow.FlowState.ERROR)])
elif condition == db.HuntFlowsCondition.SUCCEEDED_FLOWS_ONLY:
return ("AND flow_state = %s ",
[int(rdf_flow_objects.Flow.FlowState.FINISHED)])
elif condition == db.HuntFlowsCondition.COMPLETED_FLOWS_ONLY:
return ("AND (flow_state = %s OR flow_state = %s) ", [
int(rdf_flow_objects.Flow.FlowState.FINISHED),
int(rdf_flow_objects.Flow.FlowState.ERROR)
])
elif condition == db.HuntFlowsCondition.FLOWS_IN_PROGRESS_ONLY:
return ("AND flow_state = %s ",
[int(rdf_flow_objects.Flow.FlowState.RUNNING)])
elif condition == db.HuntFlowsCondition.CRASHED_FLOWS_ONLY:
return ("AND flow_state = %s ",
[int(rdf_flow_objects.Flow.FlowState.CRASHED)])
else:
raise ValueError("Invalid condition value: %r" % condition) | Builds an SQL condition matching db.HuntFlowsCondition. |
def get_pending_servermanager():
'''
Determine whether there are pending Server Manager tasks that require a
reboot.
.. versionadded:: 2016.11.0
Returns:
bool: ``True`` if there are pending Server Manager tasks, otherwise
``False``
CLI Example:
.. code-block:: bash
salt '*' system.get_pending_servermanager
'''
vname = 'CurrentRebootAttempts'
key = r'SOFTWARE\Microsoft\ServerManager'
# There are situations where it's possible to have '(value not set)' as
# the value data, and since an actual reboot won't be pending in that
# instance, just catch instances where we try unsuccessfully to cast as int.
reg_ret = __utils__['reg.read_value']('HKLM', key, vname)
if reg_ret['success']:
log.debug('Found key: %s', key)
try:
if int(reg_ret['vdata']) > 0:
return True
except ValueError:
pass
else:
log.debug('Unable to access key: %s', key)
return False | Determine whether there are pending Server Manager tasks that require a
reboot.
.. versionadded:: 2016.11.0
Returns:
bool: ``True`` if there are pending Server Manager tasks, otherwise
``False``
CLI Example:
.. code-block:: bash
salt '*' system.get_pending_servermanager |
def start_instance(self, build):
"""
I start a new instance of a VM.
If a base_image is specified, I will make a clone of that otherwise i will
use image directly.
If i'm not given libvirt domain definition XML, I will look for my name
in the list of defined virtual machines and start that.
"""
if self.domain is not None:
log.msg("Cannot start_instance '%s' as already active" %
self.workername)
return False
yield self._prepare_base_image()
try:
if self.xml:
self.domain = yield self.connection.create(self.xml)
else:
self.domain = yield self.connection.lookupByName(self.workername)
yield self.domain.create()
except Exception:
log.err(failure.Failure(),
"Cannot start a VM (%s), failing gracefully and triggering"
"a new build check" % self.workername)
self.domain = None
return False
return True | I start a new instance of a VM.
If a base_image is specified, I will make a clone of that otherwise i will
use image directly.
If i'm not given libvirt domain definition XML, I will look for my name
in the list of defined virtual machines and start that. |
def required_permission(f, level):
"""Assert that subject has access at given level or higher for object."""
@functools.wraps(f)
def wrapper(request, pid, *args, **kwargs):
d1_gmn.app.auth.assert_allowed(request, level, pid)
return f(request, pid, *args, **kwargs)
return wrapper | Assert that subject has access at given level or higher for object. |
def asm_binary(exprs, dst_reg, sym_to_reg, triple_or_target=None):
'''
Compile and assemble an expression for a given architecture.
Arguments:
* *exprs*: list of expressions to convert. This can represent a graph of
expressions.
* *dst_reg*: final register on which to store the result of the last
expression. This is represented by a tuple ("reg_name", reg_size_bits).
Example: ("rax", 64)
* *sym_to_reg*: a dictionnary that maps Arybo variable name to registers
(described as tuple, see *dst_reg*). Example: {"x": ("rdi",64), "y": ("rsi", 64)}
* *triple_or_target*: LLVM architecture triple to use. Use by default the
host architecture. Example: "x86_64-unknown-unknown"
Output:
* binary stream of the assembled expression for the given target
Here is an example that will compile and assemble "x+y" for x86_64::
from arybo.lib import MBA
from arybo.lib import mba_exprs
from arybo.lib.exprs_asm import asm_binary
mba = MBA(64)
x = mba.var("x")
y = mba.var("y")
e = mba_exprs.ExprBV(x) + mba_exprs.ExprBV(y)
code = asm_binary([e], ("rax", 64), {"x": ("rdi", 64), "y": ("rsi", 64)}, "x86_64-unknown-unknown")
print(code.hex())
which outputs ``488d0437`` (which is equivalent to ``lea rax,[rdi+rsi*1]``).
'''
if not llvmlite_available:
raise RuntimeError("llvmlite module unavailable! can't assemble...")
target = llvm_get_target(triple_or_target)
M = asm_module(exprs, dst_reg, sym_to_reg, target)
# Use LLVM to compile the '__arybo' function. As the function is naked and
# is the only, we just got to dump the .text section to get the binary
# assembly.
# No need for keystone or whatever hype stuff. llvmlite does the job.
M = llvm.parse_assembly(str(M))
M.verify()
target_machine = target.create_target_machine()
obj_bin = target_machine.emit_object(M)
obj = llvm.ObjectFileRef.from_data(obj_bin)
for s in obj.sections():
if s.is_text():
return s.data()
raise RuntimeError("unable to get the assembled binary!") | Compile and assemble an expression for a given architecture.
Arguments:
* *exprs*: list of expressions to convert. This can represent a graph of
expressions.
* *dst_reg*: final register on which to store the result of the last
expression. This is represented by a tuple ("reg_name", reg_size_bits).
Example: ("rax", 64)
* *sym_to_reg*: a dictionnary that maps Arybo variable name to registers
(described as tuple, see *dst_reg*). Example: {"x": ("rdi",64), "y": ("rsi", 64)}
* *triple_or_target*: LLVM architecture triple to use. Use by default the
host architecture. Example: "x86_64-unknown-unknown"
Output:
* binary stream of the assembled expression for the given target
Here is an example that will compile and assemble "x+y" for x86_64::
from arybo.lib import MBA
from arybo.lib import mba_exprs
from arybo.lib.exprs_asm import asm_binary
mba = MBA(64)
x = mba.var("x")
y = mba.var("y")
e = mba_exprs.ExprBV(x) + mba_exprs.ExprBV(y)
code = asm_binary([e], ("rax", 64), {"x": ("rdi", 64), "y": ("rsi", 64)}, "x86_64-unknown-unknown")
print(code.hex())
which outputs ``488d0437`` (which is equivalent to ``lea rax,[rdi+rsi*1]``). |
async def _get_subscriptions(self) -> Tuple[Set[Text], Text]:
"""
List the subscriptions currently active
"""
url, params = self._get_subscriptions_endpoint()
get = self.session.get(url, params=params)
async with get as r:
await self._handle_fb_response(r)
data = await r.json()
for scope in data['data']:
if scope['object'] == 'page':
return (
set(x['name'] for x in scope['fields']),
scope['callback_url'],
)
return set(), '' | List the subscriptions currently active |
def get_source_value(self, obj, source, **kwargs):
"""
Treat ``field`` as a nested sub-Column instance, which explicitly stands in as the object
to which term coercions and the query type lookup are delegated.
"""
result = []
for sub_source in self.expand_source(source):
# Call super() to get default logic, but send it the 'sub_source'
sub_result = super(CompoundColumn, self).get_source_value(obj, sub_source, **kwargs)
result.extend(sub_result)
return result | Treat ``field`` as a nested sub-Column instance, which explicitly stands in as the object
to which term coercions and the query type lookup are delegated. |
def has_snap(self):
""" This method won't count the snaps in "destroying" state!
:return: false if no snaps or all snaps are destroying.
"""
return len(list(filter(lambda s: s.state != SnapStateEnum.DESTROYING,
self.snapshots))) > 0 | This method won't count the snaps in "destroying" state!
:return: false if no snaps or all snaps are destroying. |
def get_sequence_rules_by_ids(self, sequence_rule_ids):
"""Gets a ``SequenceRuleList`` corresponding to the given ``IdList``.
arg: sequence_rule_ids (osid.id.IdList): the list of ``Ids``
to retrieve
return: (osid.assessment.authoring.SequenceRuleList) - the
returned ``SequenceRule`` list
raise: NotFound - a ``Id was`` not found
raise: NullArgument - ``sequence_rule_ids`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceLookupSession.get_resources_by_ids
# NOTE: This implementation currently ignores plenary view
collection = JSONClientValidated('assessment_authoring',
collection='SequenceRule',
runtime=self._runtime)
object_id_list = []
for i in sequence_rule_ids:
object_id_list.append(ObjectId(self._get_id(i, 'assessment_authoring').get_identifier()))
result = collection.find(
dict({'_id': {'$in': object_id_list}},
**self._view_filter()))
result = list(result)
sorted_result = []
for object_id in object_id_list:
for object_map in result:
if object_map['_id'] == object_id:
sorted_result.append(object_map)
break
return objects.SequenceRuleList(sorted_result, runtime=self._runtime, proxy=self._proxy) | Gets a ``SequenceRuleList`` corresponding to the given ``IdList``.
arg: sequence_rule_ids (osid.id.IdList): the list of ``Ids``
to retrieve
return: (osid.assessment.authoring.SequenceRuleList) - the
returned ``SequenceRule`` list
raise: NotFound - a ``Id was`` not found
raise: NullArgument - ``sequence_rule_ids`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.* |
def inject(self,
require: Optional[List[Text]] = None,
fail: Text = 'missing_context',
var_name: Text = 'context'):
"""
This is a decorator intended to be used on states (and actually only
work on state handlers).
The `require` argument is a list of keys to be checked in the context.
If at least one of them is missing, then instead of calling the handler
another method will be called. By default the method is
`missing_context` but it can be configured using the `fail` argument.
The context will be injected into the handler as a keyword arg. By
default, the arg is expected to be named `context` but you can change
it to anything you'd like using `var_name`.
See `create_context_store()` for a full example.
"""
def decorator(func):
async def health_check(cls) -> Iterator[HealthCheckFail]:
if not callable(getattr(cls, fail, None)):
yield HealthCheckFail(
'00001',
f'State "{cls.__name__}" has no method "{fail}" to '
f'fall back to if required attributes are missing '
f'from the context.'
)
if require:
func.health_check = health_check
@wraps(func)
async def wrapper(state: Union[BaseState, BaseTrigger], **kwargs):
conv_id = state.request.conversation.id
key = f'context::{self.name}::{conv_id}'
x = self.open(key)
async with x as context:
for item in (require or []):
if item not in context:
return await getattr(state, fail)(state, **kwargs)
kwargs[var_name] = context
return await func(state, **kwargs)
return wrapper
return decorator | This is a decorator intended to be used on states (and actually only
work on state handlers).
The `require` argument is a list of keys to be checked in the context.
If at least one of them is missing, then instead of calling the handler
another method will be called. By default the method is
`missing_context` but it can be configured using the `fail` argument.
The context will be injected into the handler as a keyword arg. By
default, the arg is expected to be named `context` but you can change
it to anything you'd like using `var_name`.
See `create_context_store()` for a full example. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.