code stringlengths 75 104k | docstring stringlengths 1 46.9k |
|---|---|
def _get_optimal_thresholds(nd_dict, quantized_dtype, num_bins=8001, num_quantized_bins=255, logger=None):
"""Given a ndarray dict, find the optimal threshold for quantizing each value of the key."""
if stats is None:
raise ImportError('scipy.stats is required for running entropy mode of calculating'
' the optimal thresholds for quantizing FP32 ndarrays into int8.'
' Please check if the scipy python bindings are installed.')
assert isinstance(nd_dict, dict)
if logger is not None:
logger.info('Calculating optimal thresholds for quantization using KL divergence'
' with num_bins=%d and num_quantized_bins=%d' % (num_bins, num_quantized_bins))
th_dict = {}
# copy nd_dict keys since the keys() only returns a view in python3
layer_names = list(nd_dict.keys())
for name in layer_names:
assert name in nd_dict
min_val, max_val, min_divergence, opt_th = \
_get_optimal_threshold(nd_dict[name], quantized_dtype, num_bins=num_bins,
num_quantized_bins=num_quantized_bins)
del nd_dict[name] # release the memory of ndarray
if min_val < 0:
th_dict[name] = (-opt_th, opt_th)
else:
th_dict[name] = (0, opt_th)
if logger is not None:
logger.info('layer=%s, min_val=%f, max_val=%f, min_divergence=%f, optimal_threshold=%f'
% (name, min_val, max_val, min_divergence, opt_th))
return th_dict | Given a ndarray dict, find the optimal threshold for quantizing each value of the key. |
def _parse_prior(self):
"""Read csv paths to list of dataframes."""
paths = self.prior_
if isinstance(paths, str):
paths = [paths]
chain_data = []
for path in paths:
parsed_output = _read_output(path)
for sample, sample_stats, config, adaptation, timing in parsed_output:
chain_data.append(
{
"sample": sample,
"sample_stats": sample_stats,
"configuration_info": config,
"adaptation_info": adaptation,
"timing_info": timing,
}
)
self.prior = [item["sample"] for item in chain_data]
self.sample_stats_prior = [item["sample_stats"] for item in chain_data] | Read csv paths to list of dataframes. |
def url_with_auth(regex, view, kwargs=None, name=None, prefix=''):
"""
if view is string based, must be a full path
"""
from djapiauth.auth import api_auth
if isinstance(view, six.string_types): # view is a string, must be full path
return url(regex, api_auth(import_by_path(prefix + "." + view if prefix else view)))
elif isinstance(view, (list, tuple)): # include
return url(regex, view, name, prefix, **kwargs)
else: # view is an object
return url(regex, api_auth(view)) | if view is string based, must be a full path |
def prepare(self, start=-1):
"""Setup the parser for parsing.
Takes the starting symbol as an argument.
"""
if start == -1:
start = self.grammar.start
self.root = None
current_node = Node(start, None, [], 0, 0)
self.stack = []
self.stack.append((self.grammar.dfas[start - 256], 0, current_node)) | Setup the parser for parsing.
Takes the starting symbol as an argument. |
def GET(self, *args, **kwargs):
""" GET request """
return self._handle_api(self.API_GET, args, kwargs) | GET request |
def _chip_erase_program_double_buffer(self, progress_cb=_stub_progress):
"""! @brief Double-buffered program by first performing an erase all."""
LOG.debug("%i of %i pages have erased data", len(self.page_list) - self.chip_erase_count, len(self.page_list))
progress_cb(0.0)
progress = 0
self.flash.init(self.flash.Operation.ERASE)
self.flash.erase_all()
self.flash.uninit()
progress += self.flash.get_flash_info().erase_weight
progress_cb(float(progress) / float(self.chip_erase_weight))
# Set up page and buffer info.
current_buf = 0
next_buf = 1
page, i = self._next_unerased_page(0)
assert page is not None
# Load first page buffer
self.flash.load_page_buffer(current_buf, page.addr, page.data)
self.flash.init(self.flash.Operation.PROGRAM)
while page is not None:
# Kick off this page program.
current_addr = page.addr
current_weight = page.get_program_weight()
self.flash.start_program_page_with_buffer(current_buf, current_addr)
# Get next page and load it.
page, i = self._next_unerased_page(i)
if page is not None:
self.flash.load_page_buffer(next_buf, page.addr, page.data)
# Wait for the program to complete.
result = self.flash.wait_for_completion()
if result != 0:
raise FlashProgramFailure('program_page(0x%x) error: %i'
% (current_addr, result), current_addr, result)
# Swap buffers.
current_buf, next_buf = next_buf, current_buf
# Update progress.
progress += current_weight
progress_cb(float(progress) / float(self.chip_erase_weight))
self.flash.uninit()
progress_cb(1.0)
return FlashBuilder.FLASH_CHIP_ERASE | ! @brief Double-buffered program by first performing an erase all. |
def inc(self):
"""Corrected inclination, taking into account backsight and clino corrections."""
inc1 = self.get('INC', None)
inc2 = self.get('INC2', None)
if inc1 is None and inc2 is None:
return None
if inc2 is None:
return inc1
if inc1 is None:
return -1 * inc2
return (inc1 - inc2) / 2.0 | Corrected inclination, taking into account backsight and clino corrections. |
def list_storage_accounts(call=None):
'''
List storage accounts within the subscription.
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_storage_accounts function must be called with '
'-f or --function'
)
storconn = get_conn(client_type='storage')
ret = {}
try:
accounts_query = storconn.storage_accounts.list()
accounts = __utils__['azurearm.paged_object_to_list'](accounts_query)
for account in accounts:
ret[account['name']] = account
except CloudError as exc:
__utils__['azurearm.log_cloud_error']('storage', exc.message)
ret = {'Error': exc.message}
return ret | List storage accounts within the subscription. |
def update_module_state(cursor, module_ident,
state_name, recipe): # pragma: no cover
"""This updates the module's state in the database."""
cursor.execute("""\
UPDATE modules
SET stateid = (
SELECT stateid FROM modulestates WHERE statename = %s
), recipe = %s, baked = now() WHERE module_ident = %s""",
(state_name, recipe, module_ident)) | This updates the module's state in the database. |
def move(x, y, absolute=True, duration=0):
"""
Moves the mouse. If `absolute`, to position (x, y), otherwise move relative
to the current position. If `duration` is non-zero, animates the movement.
"""
x = int(x)
y = int(y)
# Requires an extra system call on Linux, but `move_relative` is measured
# in millimiters so we would lose precision.
position_x, position_y = get_position()
if not absolute:
x = position_x + x
y = position_y + y
if duration:
start_x = position_x
start_y = position_y
dx = x - start_x
dy = y - start_y
if dx == 0 and dy == 0:
_time.sleep(duration)
else:
# 120 movements per second.
# Round and keep float to ensure float division in Python 2
steps = max(1.0, float(int(duration * 120.0)))
for i in range(int(steps)+1):
move(start_x + dx*i/steps, start_y + dy*i/steps)
_time.sleep(duration/steps)
else:
_os_mouse.move_to(x, y) | Moves the mouse. If `absolute`, to position (x, y), otherwise move relative
to the current position. If `duration` is non-zero, animates the movement. |
def check_no_element_by_selector(self, selector):
"""Assert an element does not exist matching the given selector."""
elems = find_elements_by_jquery(world.browser, selector)
if elems:
raise AssertionError("Expected no matching elements, found {}.".format(
len(elems))) | Assert an element does not exist matching the given selector. |
def isodate(datestamp=None, microseconds=False):
"""Return current or given time formatted according to ISO-8601."""
datestamp = datestamp or datetime.datetime.now()
if not microseconds:
usecs = datetime.timedelta(microseconds=datestamp.microsecond)
datestamp = datestamp - usecs
return datestamp.isoformat(b' ' if PY2 else u' ') | Return current or given time formatted according to ISO-8601. |
def add_lb_nodes(self, lb_id, nodes):
"""
Adds nodes to an existing LBaaS instance
:param string lb_id: Balancer id
:param list nodes: Nodes to add. {address, port, [condition]}
:rtype :class:`list`
"""
log.info("Adding load balancer nodes %s" % nodes)
resp, body = self._request(
'post',
'/loadbalancers/%s/nodes' % lb_id,
data={'nodes': nodes})
return body | Adds nodes to an existing LBaaS instance
:param string lb_id: Balancer id
:param list nodes: Nodes to add. {address, port, [condition]}
:rtype :class:`list` |
def attributes(self, params=None):
"""
Gets the attributes from a Group/Indicator or Victim
Yields: attribute json
"""
if params is None:
params = {}
if not self.can_update():
self._tcex.handle_error(910, [self.type])
for a in self.tc_requests.attributes(
self.api_type, self.api_sub_type, self.unique_id, owner=self.owner, params=params
):
yield a | Gets the attributes from a Group/Indicator or Victim
Yields: attribute json |
def _build(self, inputs, keep_prob=None, is_training=None,
test_local_stats=True):
"""Connects the AlexNet module into the graph.
The is_training flag only controls the batch norm settings, if `False` it
does not force no dropout by overriding any input `keep_prob`. To avoid any
confusion this may cause, if `is_training=False` and `keep_prob` would cause
dropout to be applied, an error is thrown.
Args:
inputs: A Tensor of size [batch_size, input_height, input_width,
input_channels], representing a batch of input images.
keep_prob: A scalar Tensor representing the dropout keep probability.
When `is_training=False` this must be None or 1 to give no dropout.
is_training: Boolean to indicate if we are currently training. Must be
specified if batch normalization or dropout is used.
test_local_stats: Boolean to indicate to `snt.BatchNorm` if batch
normalization should use local batch statistics at test time.
By default `True`.
Returns:
A Tensor of size [batch_size, output_size], where `output_size` depends
on the mode the network was constructed in.
Raises:
base.IncompatibleShapeError: If any of the input image dimensions
(input_height, input_width) are too small for the given network mode.
ValueError: If `keep_prob` is not None or 1 when `is_training=False`.
ValueError: If `is_training` is not explicitly specified when using
batch normalization.
"""
# Check input shape
if (self._use_batch_norm or keep_prob is not None) and is_training is None:
raise ValueError("Boolean is_training flag must be explicitly specified "
"when using batch normalization or dropout.")
input_shape = inputs.get_shape().as_list()
if input_shape[1] < self._min_size or input_shape[2] < self._min_size:
raise base.IncompatibleShapeError(
"Image shape too small: ({:d}, {:d}) < {:d}".format(
input_shape[1], input_shape[2], self._min_size))
net = inputs
# Check keep prob
if keep_prob is not None:
valid_inputs = tf.logical_or(is_training, tf.equal(keep_prob, 1.))
keep_prob_check = tf.assert_equal(
valid_inputs, True,
message="Input `keep_prob` must be None or 1 if `is_training=False`.")
with tf.control_dependencies([keep_prob_check]):
net = tf.identity(net)
for i, params in enumerate(self._conv_layers):
output_channels, conv_params, max_pooling = params
kernel_size, stride = conv_params
conv_mod = conv.Conv2D(
name="conv_{}".format(i),
output_channels=output_channels,
kernel_shape=kernel_size,
stride=stride,
padding=conv.VALID,
initializers=self._initializers,
partitioners=self._partitioners,
regularizers=self._regularizers)
if not self.is_connected:
self._conv_modules.append(conv_mod)
net = conv_mod(net)
if self._use_batch_norm:
bn = batch_norm.BatchNorm(**self._batch_norm_config)
net = bn(net, is_training, test_local_stats)
net = tf.nn.relu(net)
if max_pooling is not None:
pooling_kernel_size, pooling_stride = max_pooling
net = tf.nn.max_pool(
net,
ksize=[1, pooling_kernel_size, pooling_kernel_size, 1],
strides=[1, pooling_stride, pooling_stride, 1],
padding=conv.VALID)
net = basic.BatchFlatten(name="flatten")(net)
for i, output_size in enumerate(self._fc_layers):
linear_mod = basic.Linear(
name="fc_{}".format(i),
output_size=output_size,
initializers=self._initializers,
partitioners=self._partitioners)
if not self.is_connected:
self._linear_modules.append(linear_mod)
net = linear_mod(net)
if self._use_batch_norm and self._bn_on_fc_layers:
bn = batch_norm.BatchNorm(**self._batch_norm_config)
net = bn(net, is_training, test_local_stats)
net = tf.nn.relu(net)
if keep_prob is not None:
net = tf.nn.dropout(net, keep_prob=keep_prob)
return net | Connects the AlexNet module into the graph.
The is_training flag only controls the batch norm settings, if `False` it
does not force no dropout by overriding any input `keep_prob`. To avoid any
confusion this may cause, if `is_training=False` and `keep_prob` would cause
dropout to be applied, an error is thrown.
Args:
inputs: A Tensor of size [batch_size, input_height, input_width,
input_channels], representing a batch of input images.
keep_prob: A scalar Tensor representing the dropout keep probability.
When `is_training=False` this must be None or 1 to give no dropout.
is_training: Boolean to indicate if we are currently training. Must be
specified if batch normalization or dropout is used.
test_local_stats: Boolean to indicate to `snt.BatchNorm` if batch
normalization should use local batch statistics at test time.
By default `True`.
Returns:
A Tensor of size [batch_size, output_size], where `output_size` depends
on the mode the network was constructed in.
Raises:
base.IncompatibleShapeError: If any of the input image dimensions
(input_height, input_width) are too small for the given network mode.
ValueError: If `keep_prob` is not None or 1 when `is_training=False`.
ValueError: If `is_training` is not explicitly specified when using
batch normalization. |
def btc_bitcoind_tx_serialize( tx ):
"""
Convert a *Bitcoind*-given transaction into its hex string.
tx format is {'vin': [...], 'vout': [...], 'locktime': ..., 'version': ...},
with the same formatting rules as getrawtransaction.
(in particular, each value in vout is a Decimal, in BTC)
"""
tx_ins = []
tx_outs = []
try:
for inp in tx['vin']:
next_inp = {
"outpoint": {
"index": int(inp['vout']),
"hash": str(inp['txid'])
}
}
if 'sequence' in inp:
next_inp['sequence'] = int(inp['sequence'])
else:
next_inp['sequence'] = UINT_MAX
if 'scriptSig' in inp:
next_inp['script'] = str(inp['scriptSig']['hex'])
else:
next_inp['script'] = ""
if 'txinwitness' in inp:
next_inp['witness_script'] = btc_witness_script_serialize(inp['txinwitness'])
tx_ins.append(next_inp)
for out in tx['vout']:
assert out['value'] < 1000, "High transaction value\n%s" % simplejson.dumps(tx, indent=4, sort_keys=True)
next_out = {
'value': int(Decimal(out['value'] * 10**8)),
'script': str(out['scriptPubKey']['hex'])
}
tx_outs.append(next_out)
tx_fields = {
"locktime": int(tx['locktime']),
"version": int(tx['version']),
"ins": tx_ins,
"outs": tx_outs
}
tx_serialized = btc_tx_serialize( tx_fields )
return str(tx_serialized)
except KeyError, ke:
if btc_bitcoind_tx_is_coinbase(tx) and 'hex' in tx.keys():
tx_serialized = tx['hex']
return str(tx_serialized)
log.error("Key error in:\n%s" % simplejson.dumps(tx, indent=4, sort_keys=True))
traceback.print_exc()
raise ke | Convert a *Bitcoind*-given transaction into its hex string.
tx format is {'vin': [...], 'vout': [...], 'locktime': ..., 'version': ...},
with the same formatting rules as getrawtransaction.
(in particular, each value in vout is a Decimal, in BTC) |
def setStr(self, name, n, value):
"""
setStr(CHeaderMap self, std::string name, limix::muint_t n, std::string value)
Parameters
----------
name: std::string
n: limix::muint_t
value: std::string
"""
return _core.CHeaderMap_setStr(self, name, n, value) | setStr(CHeaderMap self, std::string name, limix::muint_t n, std::string value)
Parameters
----------
name: std::string
n: limix::muint_t
value: std::string |
def createmeta(self,
projectKeys=None,
projectIds=[],
issuetypeIds=None,
issuetypeNames=None,
expand=None,
):
"""Get the metadata required to create issues, optionally filtered by projects and issue types.
:param projectKeys: keys of the projects to filter the results with.
Can be a single value or a comma-delimited string. May be combined
with projectIds.
:type projectKeys: Union[None, Tuple[str, str], str]
:param projectIds: IDs of the projects to filter the results with. Can
be a single value or a comma-delimited string. May be combined with
projectKeys.
:type projectIds: Union[List, Tuple[str, str]]
:param issuetypeIds: IDs of the issue types to filter the results with.
Can be a single value or a comma-delimited string. May be combined
with issuetypeNames.
:type issuetypeIds: Optional[List[str]]
:param issuetypeNames: Names of the issue types to filter the results
with. Can be a single value or a comma-delimited string. May be
combined with issuetypeIds.
:type issuetypeNames: Optional[str]
:param expand: extra information to fetch inside each resource.
:type expand: Optional[str]
:rtype: Dict[str, Any]
"""
params = {}
if projectKeys is not None:
params['projectKeys'] = projectKeys
if projectIds is not None:
if isinstance(projectIds, string_types):
projectIds = projectIds.split(',')
params['projectIds'] = projectIds
if issuetypeIds is not None:
params['issuetypeIds'] = issuetypeIds
if issuetypeNames is not None:
params['issuetypeNames'] = issuetypeNames
if expand is not None:
params['expand'] = expand
return self._get_json('issue/createmeta', params) | Get the metadata required to create issues, optionally filtered by projects and issue types.
:param projectKeys: keys of the projects to filter the results with.
Can be a single value or a comma-delimited string. May be combined
with projectIds.
:type projectKeys: Union[None, Tuple[str, str], str]
:param projectIds: IDs of the projects to filter the results with. Can
be a single value or a comma-delimited string. May be combined with
projectKeys.
:type projectIds: Union[List, Tuple[str, str]]
:param issuetypeIds: IDs of the issue types to filter the results with.
Can be a single value or a comma-delimited string. May be combined
with issuetypeNames.
:type issuetypeIds: Optional[List[str]]
:param issuetypeNames: Names of the issue types to filter the results
with. Can be a single value or a comma-delimited string. May be
combined with issuetypeIds.
:type issuetypeNames: Optional[str]
:param expand: extra information to fetch inside each resource.
:type expand: Optional[str]
:rtype: Dict[str, Any] |
def on(self, event, handler=None):
"""Register an event handler.
:param event: The event name. Can be ``'connect'``, ``'message'`` or
``'disconnect'``.
:param handler: The function that should be invoked to handle the
event. When this parameter is not given, the method
acts as a decorator for the handler function.
Example usage::
# as a decorator:
@eio.on('connect')
def connect_handler():
print('Connection request')
# as a method:
def message_handler(msg):
print('Received message: ', msg)
eio.send('response')
eio.on('message', message_handler)
"""
if event not in self.event_names:
raise ValueError('Invalid event')
def set_handler(handler):
self.handlers[event] = handler
return handler
if handler is None:
return set_handler
set_handler(handler) | Register an event handler.
:param event: The event name. Can be ``'connect'``, ``'message'`` or
``'disconnect'``.
:param handler: The function that should be invoked to handle the
event. When this parameter is not given, the method
acts as a decorator for the handler function.
Example usage::
# as a decorator:
@eio.on('connect')
def connect_handler():
print('Connection request')
# as a method:
def message_handler(msg):
print('Received message: ', msg)
eio.send('response')
eio.on('message', message_handler) |
def confindr_targets(self, database_name='ConFindr'):
"""
Download OLC-specific ConFindr targets
:param database_name: name of current database
"""
logging.info('Downloading ConFindr databases.')
# NOTE: Need ConFindr >= 0.5.0 for this to work.
secret_file = os.path.join(self.credentials, 'secret.txt')
confindr_db_setup.setup_confindr_database(output_folder=os.path.join(self.databasepath, database_name),
consumer_secret=secret_file) | Download OLC-specific ConFindr targets
:param database_name: name of current database |
def set_idlesleep(self, idlesleep):
"""
Sets CPU idle sleep time value.
:param idlesleep: idle sleep value (integer)
"""
is_running = yield from self.is_running()
if is_running: # router is running
yield from self._hypervisor.send('vm set_idle_sleep_time "{name}" 0 {idlesleep}'.format(name=self._name,
idlesleep=idlesleep))
log.info('Router "{name}" [{id}]: idlesleep updated from {old_idlesleep} to {new_idlesleep}'.format(name=self._name,
id=self._id,
old_idlesleep=self._idlesleep,
new_idlesleep=idlesleep))
self._idlesleep = idlesleep | Sets CPU idle sleep time value.
:param idlesleep: idle sleep value (integer) |
def reverse_tree(tree):
"""Reverse the dependency tree.
ie. the keys of the resulting dict are objects of type
ReqPackage and the values are lists of DistPackage objects.
:param dict tree: the pkg dependency tree obtained by calling
`construct_tree` function
:returns: reversed tree
:rtype: dict
"""
rtree = defaultdict(list)
child_keys = set(c.key for c in flatten(tree.values()))
for k, vs in tree.items():
for v in vs:
node = find_tree_root(rtree, v.key) or v
rtree[node].append(k.as_required_by(v))
if k.key not in child_keys:
rtree[k.as_requirement()] = []
return rtree | Reverse the dependency tree.
ie. the keys of the resulting dict are objects of type
ReqPackage and the values are lists of DistPackage objects.
:param dict tree: the pkg dependency tree obtained by calling
`construct_tree` function
:returns: reversed tree
:rtype: dict |
def nifti2db(file_path, file_type, is_copy, step_id, db_conn, sid_by_patient=False, pid_in_vid=False):
"""Extract some meta-data from NIFTI files (actually mostly from their paths) and stores it in a DB.
Arguments:
:param file_path: File path.
:param file_type: File type.
:param is_copy: Indicate if this file is a copy.
:param step_id: Step ID.
:param db_conn: Database connection.
:param sid_by_patient: Rarely, a data set might use study IDs which are unique by patient
(not for the whole study).
E.g.: LREN data. In such a case, you have to enable this flag. This will use PatientID + StudyID as a session ID.
:param pid_in_vid: Rarely, a data set might mix patient IDs and visit IDs. E.g. : LREN data. In such a case, you
to enable this flag. This will try to split PatientID into VisitID and PatientID.
:return:
"""
logging.info("Processing '%s'" % file_path)
df = db_conn.db_session.query(db_conn.DataFile).filter_by(path=file_path).one_or_none()
dataset = db_conn.get_dataset(step_id)
_extract_participant(db_conn, file_path, pid_in_vid, dataset)
visit_id = _extract_visit(db_conn, file_path, pid_in_vid, sid_by_patient, dataset)
session_id = _extract_session(db_conn, file_path, visit_id)
sequence_id = _extract_sequence(db_conn, file_path, session_id)
repetition_id = _extract_repetition(db_conn, file_path, sequence_id)
if not df:
df = db_conn.DataFile(
path=file_path,
type=file_type,
is_copy=is_copy,
processing_step_id=step_id,
repetition_id=repetition_id
)
db_conn.db_session.merge(df)
db_conn.db_session.commit()
else:
if file_type not in [None, '', df.type]:
df.type = file_type
db_conn.db_session.commit()
if is_copy not in [None, df.is_copy]:
df.is_copy = is_copy
db_conn.db_session.commit()
if step_id not in [None, df.processing_step_id]:
df.processing_step_id = step_id
db_conn.db_session.commit()
if repetition_id not in [None, df.repetition_id]:
df.repetition_id = repetition_id
db_conn.db_session.commit() | Extract some meta-data from NIFTI files (actually mostly from their paths) and stores it in a DB.
Arguments:
:param file_path: File path.
:param file_type: File type.
:param is_copy: Indicate if this file is a copy.
:param step_id: Step ID.
:param db_conn: Database connection.
:param sid_by_patient: Rarely, a data set might use study IDs which are unique by patient
(not for the whole study).
E.g.: LREN data. In such a case, you have to enable this flag. This will use PatientID + StudyID as a session ID.
:param pid_in_vid: Rarely, a data set might mix patient IDs and visit IDs. E.g. : LREN data. In such a case, you
to enable this flag. This will try to split PatientID into VisitID and PatientID.
:return: |
def inject_python_code2(fpath, patch_code, tag):
""" Does autogeneration stuff """
import utool as ut
text = ut.readfrom(fpath)
start_tag = '# <%s>' % tag
end_tag = '# </%s>' % tag
new_text = ut.replace_between_tags(text, patch_code, start_tag, end_tag)
ut.writeto(fpath, new_text) | Does autogeneration stuff |
def load_class_by_name(name: str):
"""Given a dotted path, returns the class"""
mod_path, _, cls_name = name.rpartition('.')
mod = importlib.import_module(mod_path)
cls = getattr(mod, cls_name)
return cls | Given a dotted path, returns the class |
def process_pc_pathsfromto(source_genes, target_genes, neighbor_limit=1,
database_filter=None):
"""Returns a BiopaxProcessor for a PathwayCommons paths-from-to query.
The paths-from-to query finds the paths from a set of source genes to
a set of target genes.
http://www.pathwaycommons.org/pc2/#graph
http://www.pathwaycommons.org/pc2/#graph_kind
Parameters
----------
source_genes : list
A list of HGNC gene symbols that are the sources of paths being
searched for.
Examples: ['BRAF', 'RAF1', 'ARAF']
target_genes : list
A list of HGNC gene symbols that are the targets of paths being
searched for.
Examples: ['MAP2K1', 'MAP2K2']
neighbor_limit : Optional[int]
The number of steps to limit the length of the paths
between the source genes and target genes being queried. Default: 1
database_filter : Optional[list]
A list of database identifiers to which the query is restricted.
Examples: ['reactome'], ['biogrid', 'pid', 'psp']
If not given, all databases are used in the query. For a full
list of databases see http://www.pathwaycommons.org/pc2/datasources
Returns
-------
bp : BiopaxProcessor
A BiopaxProcessor containing the obtained BioPAX model in bp.model.
"""
model = pcc.graph_query('pathsfromto', source_genes,
target_genes, neighbor_limit=neighbor_limit,
database_filter=database_filter)
if model is not None:
return process_model(model) | Returns a BiopaxProcessor for a PathwayCommons paths-from-to query.
The paths-from-to query finds the paths from a set of source genes to
a set of target genes.
http://www.pathwaycommons.org/pc2/#graph
http://www.pathwaycommons.org/pc2/#graph_kind
Parameters
----------
source_genes : list
A list of HGNC gene symbols that are the sources of paths being
searched for.
Examples: ['BRAF', 'RAF1', 'ARAF']
target_genes : list
A list of HGNC gene symbols that are the targets of paths being
searched for.
Examples: ['MAP2K1', 'MAP2K2']
neighbor_limit : Optional[int]
The number of steps to limit the length of the paths
between the source genes and target genes being queried. Default: 1
database_filter : Optional[list]
A list of database identifiers to which the query is restricted.
Examples: ['reactome'], ['biogrid', 'pid', 'psp']
If not given, all databases are used in the query. For a full
list of databases see http://www.pathwaycommons.org/pc2/datasources
Returns
-------
bp : BiopaxProcessor
A BiopaxProcessor containing the obtained BioPAX model in bp.model. |
def _merge_expressions(self, other):
"""
Merge the inputs of two NumericalExpressions into a single input tuple,
rewriting their respective string expressions to make input names
resolve correctly.
Returns a tuple of (new_self_expr, new_other_expr, new_inputs)
"""
new_inputs = tuple(set(self.inputs).union(other.inputs))
new_self_expr = self._rebind_variables(new_inputs)
new_other_expr = other._rebind_variables(new_inputs)
return new_self_expr, new_other_expr, new_inputs | Merge the inputs of two NumericalExpressions into a single input tuple,
rewriting their respective string expressions to make input names
resolve correctly.
Returns a tuple of (new_self_expr, new_other_expr, new_inputs) |
def prepare(self):
"""
Prepare the Directory for use in an Environment.
This will create the directory if the create flag is set.
"""
if self._create:
self.create()
for k in self._children:
self._children[k]._env = self._env
self._children[k].prepare() | Prepare the Directory for use in an Environment.
This will create the directory if the create flag is set. |
def load_package(package_dir, package=None, exclude=None, default_section=_DEFAULT_SECTION):
"""
从目录中载入配置文件
:param package_dir:
:param package:
:param exclude:
:param default_section:
:return:
"""
init_py = '__init__.py'
py_ext = '.py'
files = os.listdir(package_dir)
if init_py in files:
files = [f for f in files if f != init_py]
if package:
files.insert(0, package)
def init_package(item):
if str(item).endswith(py_ext):
item = item[:-3]
if package:
item = '{package}.{item}'.format(package=package, item=item)
elif _is_conf(item):
item = '{package_dir}/{item}'.format(package_dir=package_dir, item=item)
else:
item = package
return str(item)
logger.debug(files)
files = [init_package(f) for f in files]
if exclude:
files = [f for f in files if f not in exclude]
settings = load(files, default_section)
return merge(settings) | 从目录中载入配置文件
:param package_dir:
:param package:
:param exclude:
:param default_section:
:return: |
def prettify_metrics(metrics: List[Tuple[str, float]], precision: int = 4) -> OrderedDict:
"""Prettifies the dictionary of metrics."""
prettified_metrics = OrderedDict()
for key, value in metrics:
value = round(value, precision)
prettified_metrics[key] = value
return prettified_metrics | Prettifies the dictionary of metrics. |
def fit_ahrs(A, H, Aoff, Arot, Hoff, Hrot):
"""Calculate yaw, pitch and roll for given A/H and calibration set.
Author: Vladimir Kulikovsky
Parameters
----------
A: list, tuple or numpy.array of shape (3,)
H: list, tuple or numpy.array of shape (3,)
Aoff: numpy.array of shape(3,)
Arot: numpy.array of shape(3, 3)
Hoff: numpy.array of shape(3,)
Hrot: numpy.array of shape(3, 3)
Returns
-------
yaw, pitch, roll
"""
Acal = np.dot(A - Aoff, Arot)
Hcal = np.dot(H - Hoff, Hrot)
# invert axis for DOM upside down
for i in (1, 2):
Acal[i] = -Acal[i]
Hcal[i] = -Hcal[i]
roll = arctan2(-Acal[1], -Acal[2])
pitch = arctan2(Acal[0], np.sqrt(Acal[1] * Acal[1] + Acal[2] * Acal[2]))
yaw = arctan2(
Hcal[2] * sin(roll) - Hcal[1] * cos(roll),
sum((
Hcal[0] * cos(pitch), Hcal[1] * sin(pitch) * sin(roll),
Hcal[2] * sin(pitch) * cos(roll)
))
)
yaw = np.degrees(yaw)
while yaw < 0:
yaw += 360
# yaw = (yaw + magnetic_declination + 360 ) % 360
roll = np.degrees(roll)
pitch = np.degrees(pitch)
return yaw, pitch, roll | Calculate yaw, pitch and roll for given A/H and calibration set.
Author: Vladimir Kulikovsky
Parameters
----------
A: list, tuple or numpy.array of shape (3,)
H: list, tuple or numpy.array of shape (3,)
Aoff: numpy.array of shape(3,)
Arot: numpy.array of shape(3, 3)
Hoff: numpy.array of shape(3,)
Hrot: numpy.array of shape(3, 3)
Returns
-------
yaw, pitch, roll |
def constant_jump_targets_and_jumpkinds(self):
"""
A dict of the static jump targets of the basic block to their jumpkind.
"""
exits = dict()
if self.exit_statements:
for _, _, stmt_ in self.exit_statements:
exits[stmt_.dst.value] = stmt_.jumpkind
default_target = self.default_exit_target
if default_target is not None:
exits[default_target] = self.jumpkind
return exits | A dict of the static jump targets of the basic block to their jumpkind. |
def active_element(self):
"""
Returns the element with focus, or BODY if nothing has focus.
:Usage:
::
element = driver.switch_to.active_element
"""
if self._driver.w3c:
return self._driver.execute(Command.W3C_GET_ACTIVE_ELEMENT)['value']
else:
return self._driver.execute(Command.GET_ACTIVE_ELEMENT)['value'] | Returns the element with focus, or BODY if nothing has focus.
:Usage:
::
element = driver.switch_to.active_element |
def find_file_ident_desc_by_name(self, currpath):
# type: (bytes) -> UDFFileIdentifierDescriptor
'''
A method to find a UDF File Identifier descriptor by its name.
Parameters:
currpath - The UTF-8 encoded name to look up.
Returns:
The UDF File Identifier descriptor corresponding to the passed in name.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF File Entry not initialized')
# If this is a directory or it is an empty directory, just skip
# all work.
if self.icb_tag.file_type != 4 or not self.fi_descs:
raise pycdlibexception.PyCdlibInvalidInput('Could not find path')
tmp = currpath.decode('utf-8')
try:
latin1_currpath = tmp.encode('latin-1')
except (UnicodeDecodeError, UnicodeEncodeError):
latin1_currpath = b''
ucs2_currpath = tmp.encode('utf-16_be')
child = None
for fi_desc in self.fi_descs:
if latin1_currpath and fi_desc.encoding == 'latin-1':
eq = fi_desc.fi == latin1_currpath
else:
eq = fi_desc.fi == ucs2_currpath
if eq:
child = fi_desc
break
if child is None:
raise pycdlibexception.PyCdlibInvalidInput('Could not find path')
return child | A method to find a UDF File Identifier descriptor by its name.
Parameters:
currpath - The UTF-8 encoded name to look up.
Returns:
The UDF File Identifier descriptor corresponding to the passed in name. |
def split_prefix(self, ctx, item):
"""See if ``item`` has blueprint prefix, return (directory, rel_path).
"""
app = ctx._app
try:
if hasattr(app, 'blueprints'):
blueprint, name = item.split('/', 1)
directory = get_static_folder(app.blueprints[blueprint])
endpoint = '%s.static' % blueprint
item = name
else:
# Module support for Flask < 0.7
module, name = item.split('/', 1)
directory = get_static_folder(app.modules[module])
endpoint = '%s.static' % module
item = name
except (ValueError, KeyError):
directory = get_static_folder(app)
endpoint = 'static'
return directory, item, endpoint | See if ``item`` has blueprint prefix, return (directory, rel_path). |
def _lib(self, name, only_if_have=False):
"""Specify a linker library.
Example:
LDFLAGS={{ lib("rt") }} {{ lib("pthread", True) }}
Will unconditionally add `-lrt` and check the environment if the key
`HAVE_LIBPTHREAD` is set to be true, then add `-lpthread`.
"""
emit = True
if only_if_have:
emit = self.env.get('HAVE_LIB' + self.env_key(name))
if emit:
return '-l' + name
return '' | Specify a linker library.
Example:
LDFLAGS={{ lib("rt") }} {{ lib("pthread", True) }}
Will unconditionally add `-lrt` and check the environment if the key
`HAVE_LIBPTHREAD` is set to be true, then add `-lpthread`. |
def ini_load_hook(cfg, **kwargs):
"""
This handles automatically opening/creating the INI configuration files.
>>> import configmaster.INIConfigFile
>>> cfg = configmaster.INIConfigFile.INIConfigFile("tesr.ini") # Accepts a string for input
>>> fd = open("test.ini") # Accepts a file descriptor too
>>> cfg2 = configmaster.INIConfigFile.INIConfigFile(fd)
ConfigMaster objects accepts either a string for the relative path of the INI file to load, or a :io.TextIOBase: object to read from.
If you pass in a string, the file will automatically be created if it doesn't exist. However, if you do not have permission to write to it, a :PermissionError: will be raised.
To access config objects programmatically, a config object is exposed via the use of cfg.config.
These config objects can be accessed via cfg.config.attr, without having to resort to looking up objects in a dict.
"""
# Load the data from the INI file.
cfg.tmpini = configparser.ConfigParser()
try:
cfg.tmpini.read_file(cfg.fd)
except ValueError as e:
raise exc.LoaderException("Could not decode INI file: {}".format(e)) from e
# Sanitize data.
tmpdict = {}
for name in cfg.tmpini.sections():
data = dict(cfg.tmpini[name])
tmpdict[name] = data
# Serialize the data into new sets of ConfigKey classes.
cfg.config.load_from_dict(tmpdict) | This handles automatically opening/creating the INI configuration files.
>>> import configmaster.INIConfigFile
>>> cfg = configmaster.INIConfigFile.INIConfigFile("tesr.ini") # Accepts a string for input
>>> fd = open("test.ini") # Accepts a file descriptor too
>>> cfg2 = configmaster.INIConfigFile.INIConfigFile(fd)
ConfigMaster objects accepts either a string for the relative path of the INI file to load, or a :io.TextIOBase: object to read from.
If you pass in a string, the file will automatically be created if it doesn't exist. However, if you do not have permission to write to it, a :PermissionError: will be raised.
To access config objects programmatically, a config object is exposed via the use of cfg.config.
These config objects can be accessed via cfg.config.attr, without having to resort to looking up objects in a dict. |
def check_precondition(self, key, value):
'''
Override to check for timeout
'''
timeout = float(value)
curr_time = self.get_current_time()
if curr_time > timeout:
return True
return False | Override to check for timeout |
def execute(self, operation, *args, **kwargs):
'''execute
High-level api: Supported operations are get, get_config, get_schema,
dispatch, edit_config, copy_config, validate, commit, discard_changes,
delete_config, lock, unlock, close_session, kill_session,
poweroff_machine and reboot_machine. Since ModelDevice is a subclass of
manager in ncclient package, any method supported by ncclient is
available here. Refer to ncclient document for more details.
'''
def pop_models():
models = kwargs.pop('models', None)
if models is None:
return None
else:
if isinstance(models, str):
return [models]
else:
return models
def check_models(models):
missing_models = set(models) - set(self.models_loaded)
if missing_models:
raise ModelMissing('please load model {} by calling ' \
'method load_model() of device {}' \
.format(str(list(missing_models))[1:-1],
self))
def build_filter(models, roots):
if 'filter' in kwargs:
logger.warning("argument 'filter' is ignored as argument "
"'models' is specified")
if isinstance(models, str):
models = [models]
check_models(models)
filter_ele = etree.Element(filter_tag, type='subtree')
for root in roots:
etree.SubElement(filter_ele, root)
filter_xml = etree.tostring(filter_ele,
encoding='unicode',
pretty_print=False)
logger.debug("argument 'filter' is set to '{}'".format(filter_xml))
return filter_ele
def get_access_type(model_name, root):
check_models([model_name])
node = list(self.models[model_name].tree.iterchildren(tag=root))[0]
return node.get('access')
# allow for operation string type
if type(operation) is str:
try:
cls = manager.OPERATIONS[operation]
except KeyError:
supported_operations = list(manager.OPERATIONS.keys())
raise ValueError("supported operations are {}, but not '{}'" \
.format(str(supported_operations)[1:-1],
operation))
else:
cls = operation
if cls == operations.retrieve.Get:
models = pop_models()
if models is not None:
check_models(models)
roots = [k for k, v in self.roots.items()
if v in models and
(get_access_type(v, k) == 'read-write' or
get_access_type(v, k) == 'read-only')]
if not roots:
raise ValueError('no readable roots found in your ' \
'models: {}'.format(str(models)[1:-1]))
kwargs['filter'] = build_filter(models, roots)
elif cls == operations.retrieve.GetConfig:
if not args and 'source' not in kwargs:
args = tuple(['running'])
models = pop_models()
if models is not None:
check_models(models)
roots = [k for k, v in self.roots.items()
if v in models and
get_access_type(v, k) == 'read-write']
if not roots:
raise ValueError('no writable roots found in your ' \
'models: {}'.format(str(models)[1:-1]))
kwargs['filter'] = build_filter(models, roots)
elif cls == operations.edit.EditConfig:
if args and isinstance(args[0], Config):
args_list = list(args)
args_list[0] = args[0].ele
args = tuple(args_list)
if 'target' not in kwargs and \
'urn:ietf:params:netconf:capability:candidate:1.0' not in \
self.server_capabilities and \
'urn:ietf:params:netconf:capability:writable-running:1.0' in \
self.server_capabilities:
kwargs['target'] = 'running'
reply = super().execute(cls, *args, **kwargs)
if isinstance(reply, operations.rpc.RPCReply):
reply.ns = self._get_ns(reply._root)
if getattr(transport, 'notify', None) and \
isinstance(reply, transport.notify.Notification):
reply.ns = self._get_ns(reply._root_ele)
return reply | execute
High-level api: Supported operations are get, get_config, get_schema,
dispatch, edit_config, copy_config, validate, commit, discard_changes,
delete_config, lock, unlock, close_session, kill_session,
poweroff_machine and reboot_machine. Since ModelDevice is a subclass of
manager in ncclient package, any method supported by ncclient is
available here. Refer to ncclient document for more details. |
def _get_indexes_in_altered_table(self, diff):
"""
:param diff: The table diff
:type diff: orator.dbal.table_diff.TableDiff
:rtype: dict
"""
indexes = diff.from_table.get_indexes()
column_names = self._get_column_names_in_altered_table(diff)
for key, index in OrderedDict([(k, v) for k, v in indexes.items()]).items():
for old_index_name, renamed_index in diff.renamed_indexes.items():
if key.lower() == old_index_name.lower():
del indexes[key]
changed = False
index_columns = []
for column_name in index.get_columns():
normalized_column_name = column_name.lower()
if normalized_column_name not in column_names:
del indexes[key]
break
else:
index_columns.append(column_names[normalized_column_name])
if column_name != column_names[normalized_column_name]:
changed = True
if changed:
indexes[key] = Index(
index.get_name(),
index_columns,
index.is_unique(),
index.is_primary(),
index.get_flags(),
)
for index in diff.removed_indexes.values():
index_name = index.get_name().lower()
if index_name and index_name in indexes:
del indexes[index_name]
changed_indexes = (
list(diff.changed_indexes.values())
+ list(diff.added_indexes.values())
+ list(diff.renamed_indexes.values())
)
for index in changed_indexes:
index_name = index.get_name().lower()
if index_name:
indexes[index_name] = index
else:
indexes[len(indexes)] = index
return indexes | :param diff: The table diff
:type diff: orator.dbal.table_diff.TableDiff
:rtype: dict |
def changes_found(self):
"""
Returns True if the target folder is older than the source folder.
"""
if self.dest is None:
warnings.warn("dest directory not found!")
if self.src is None:
warnings.warn("src directory not found!")
if self.src is None or self.dest is None:
return False
dest_mtime = -1
src_mtime = os.path.getmtime(self.src)
if os.path.exists(self.dest):
dest_mtime = os.path.getmtime(self.dest)
if src_mtime >= dest_mtime:
return True # changes found
for folder, _, files in os.walk(self.src):
for filename in fnmatch.filter(files, '*.scss'):
src_path = os.path.join(folder, filename)
if os.path.getmtime(src_path) >= dest_mtime:
return True
return False | Returns True if the target folder is older than the source folder. |
def main():
'''
Set up "optparse" and pass the options to
a new instance of L{LatexMaker}.
'''
prog = 'latexmk.py'
version = __version__
usage = '%prog [options] [filename]'
# Read description from doc
doc_text = ''
for line in __doc__.splitlines():
if line.find('#') == 0:
break
doc_text += ' %s\n' % line
parser = OptionParser(prog=prog, usage=usage, version=version)
parser.add_option('-c', '--clean',
action='store_true', dest='clean', default=False,
help='clean all temporary files after converting')
parser.add_option('-q', '--quiet',
action='store_false', dest='verbose', default=True,
help='don\'t print status messages to stdout')
parser.add_option('-n', '--no-exit',
action='store_false', dest='exit_on_error', default=True,
help='don\'t exit if error occurs')
parser.add_option('-p', '--preview',
action='store_true', dest='preview', default=False,
help='try to open preview of generated document')
parser.add_option('--dvi', action='store_false', dest='pdf',
default=True, help='use "latex" instead of pdflatex')
parser.add_option('--check-cite', action='store_true', dest='check_cite',
default=False,
help='check bibtex file for uncited entries')
opt, args = parser.parse_args()
if len(args) == 0:
tex_files = fnmatch.filter(os.listdir(os.getcwd()), '*.tex')
if len(tex_files) == 1:
name = tex_files[0]
else:
parser.error('could not find a single *.tex file in current dir')
elif len(args) == 1:
name = args[0]
else:
parser.error('incorrect number of arguments')
LatexMaker(name, opt).run() | Set up "optparse" and pass the options to
a new instance of L{LatexMaker}. |
def account_id(self, value):
"""The account_id property.
Args:
value (string). the property value.
"""
if value == self._defaults['ai.user.accountId'] and 'ai.user.accountId' in self._values:
del self._values['ai.user.accountId']
else:
self._values['ai.user.accountId'] = value | The account_id property.
Args:
value (string). the property value. |
def _bfs(node, visited):
"""Iterate through nodes in BFS order."""
queue = collections.deque()
queue.appendleft(node)
while queue:
node = queue.pop()
if node not in visited:
if node.lo is not None:
queue.appendleft(node.lo)
if node.hi is not None:
queue.appendleft(node.hi)
visited.add(node)
yield node | Iterate through nodes in BFS order. |
def get(self):
"""Get the contents of a GValue.
The contents of the GValue are read out as a Python type.
"""
# logger.debug('GValue.get: self = %s', self)
gtype = self.gvalue.g_type
fundamental = gobject_lib.g_type_fundamental(gtype)
result = None
if gtype == GValue.gbool_type:
result = bool(gobject_lib.g_value_get_boolean(self.gvalue))
elif gtype == GValue.gint_type:
result = gobject_lib.g_value_get_int(self.gvalue)
elif gtype == GValue.guint64_type:
result = gobject_lib.g_value_get_uint64(self.gvalue)
elif gtype == GValue.gdouble_type:
result = gobject_lib.g_value_get_double(self.gvalue)
elif fundamental == GValue.genum_type:
return GValue.from_enum(gtype,
gobject_lib.g_value_get_enum(self.gvalue))
elif fundamental == GValue.gflags_type:
result = gobject_lib.g_value_get_flags(self.gvalue)
elif gtype == GValue.gstr_type:
pointer = gobject_lib.g_value_get_string(self.gvalue)
if pointer != ffi.NULL:
result = _to_string(pointer)
elif gtype == GValue.refstr_type:
psize = ffi.new('size_t *')
pointer = vips_lib.vips_value_get_ref_string(self.gvalue, psize)
# psize[0] will be number of bytes in string, but just assume it's
# NULL-terminated
result = _to_string(pointer)
elif gtype == GValue.image_type:
# g_value_get_object() will not add a ref ... that is
# held by the gvalue
go = gobject_lib.g_value_get_object(self.gvalue)
vi = ffi.cast('VipsImage *', go)
# we want a ref that will last with the life of the vimage:
# this ref is matched by the unref that's attached to finalize
# by Image()
gobject_lib.g_object_ref(go)
result = pyvips.Image(vi)
elif gtype == GValue.array_int_type:
pint = ffi.new('int *')
array = vips_lib.vips_value_get_array_int(self.gvalue, pint)
result = []
for i in range(0, pint[0]):
result.append(array[i])
elif gtype == GValue.array_double_type:
pint = ffi.new('int *')
array = vips_lib.vips_value_get_array_double(self.gvalue, pint)
result = []
for i in range(0, pint[0]):
result.append(array[i])
elif gtype == GValue.array_image_type:
pint = ffi.new('int *')
array = vips_lib.vips_value_get_array_image(self.gvalue, pint)
result = []
for i in range(0, pint[0]):
vi = array[i]
gobject_lib.g_object_ref(vi)
image = pyvips.Image(vi)
result.append(image)
elif gtype == GValue.blob_type:
psize = ffi.new('size_t *')
array = vips_lib.vips_value_get_blob(self.gvalue, psize)
buf = ffi.cast('char*', array)
result = ffi.unpack(buf, psize[0])
else:
raise Error('unsupported gtype for get {0}'.
format(type_name(gtype)))
return result | Get the contents of a GValue.
The contents of the GValue are read out as a Python type. |
def decode(cls, string, errors='strict'):
"""Return the decoded version of a string.
:param string:
The input string to decode.
:type string:
`basestring`
:param errors:
The error handling scheme. Only 'strict' is supported.
:type errors:
`basestring`
:return:
Tuple of decoded string and number of input bytes consumed.
:rtype:
`tuple` (`unicode`, `int`)
"""
if errors != 'strict':
raise UnicodeError('Unsupported error handling {0}'.format(errors))
unicode_string = cls._ensure_unicode_string(string)
decoded = unicode_string.translate(cls._decoding_table)
return decoded, len(string) | Return the decoded version of a string.
:param string:
The input string to decode.
:type string:
`basestring`
:param errors:
The error handling scheme. Only 'strict' is supported.
:type errors:
`basestring`
:return:
Tuple of decoded string and number of input bytes consumed.
:rtype:
`tuple` (`unicode`, `int`) |
def _get_base_defaultLayer(self):
"""
This is the environment implementation of
:attr:`BaseFont.defaultLayer`. Return the
default layer as a :class:`BaseLayer` object.
The layer will be normalized with
:func:`normalizers.normalizeLayer`.
Subclasses must override this method.
"""
name = self.defaultLayerName
layer = self.getLayer(name)
return layer | This is the environment implementation of
:attr:`BaseFont.defaultLayer`. Return the
default layer as a :class:`BaseLayer` object.
The layer will be normalized with
:func:`normalizers.normalizeLayer`.
Subclasses must override this method. |
def tag_dssp_solvent_accessibility(self, force=False):
"""Tags each `Residues` Polymer with its solvent accessibility.
Notes
-----
For more about DSSP's solvent accessibilty metric, see:
http://swift.cmbi.ru.nl/gv/dssp/HTML/descrip.html#ACC
References
----------
.. [1] Kabsch W, Sander C (1983) "Dictionary of protein
secondary structure: pattern recognition of hydrogen-bonded
and geometrical features", Biopolymers, 22, 2577-637.
Parameters
----------
force : bool, optional
If `True` the tag will be run even if `Residues` are
already tagged.
"""
tagged = ['dssp_acc' in x.tags.keys() for x in self._monomers]
if (not all(tagged)) or force:
dssp_out = run_dssp(self.pdb, path=False)
if dssp_out is None:
return
dssp_acc_list = extract_solvent_accessibility_dssp(
dssp_out, path=False)
for monomer, dssp_acc in zip(self._monomers, dssp_acc_list):
monomer.tags['dssp_acc'] = dssp_acc[-1]
return | Tags each `Residues` Polymer with its solvent accessibility.
Notes
-----
For more about DSSP's solvent accessibilty metric, see:
http://swift.cmbi.ru.nl/gv/dssp/HTML/descrip.html#ACC
References
----------
.. [1] Kabsch W, Sander C (1983) "Dictionary of protein
secondary structure: pattern recognition of hydrogen-bonded
and geometrical features", Biopolymers, 22, 2577-637.
Parameters
----------
force : bool, optional
If `True` the tag will be run even if `Residues` are
already tagged. |
def received_response(self, value):
"""The received_response property.
Args:
value (string). the property value.
"""
if value == self._defaults['receivedResponse'] and 'receivedResponse' in self._values:
del self._values['receivedResponse']
else:
self._values['receivedResponse'] = value | The received_response property.
Args:
value (string). the property value. |
def _init_sqlite_functions(self):
"""additional SQL functions to the database"""
self.connection.create_function("sqrt", 1,sqlfunctions._sqrt)
self.connection.create_function("sqr", 1,sqlfunctions._sqr)
self.connection.create_function("periodic", 1,sqlfunctions._periodic)
self.connection.create_function("pow", 2,sqlfunctions._pow)
self.connection.create_function("match",2,sqlfunctions._match)
self.connection.create_function("regexp",2,sqlfunctions._regexp)
self.connection.create_function("fformat",2,sqlfunctions._fformat)
self.connection.create_aggregate("std",1,sqlfunctions._Stdev)
self.connection.create_aggregate("stdN",1,sqlfunctions._StdevN)
self.connection.create_aggregate("median",1,sqlfunctions._Median)
self.connection.create_aggregate("array",1,sqlfunctions._NumpyArray)
self.connection.create_aggregate("histogram",4,sqlfunctions._NumpyHistogram)
self.connection.create_aggregate("distribution",4,sqlfunctions._NormedNumpyHistogram)
self.connection.create_aggregate("meanhistogram",5,sqlfunctions._MeanHistogram)
self.connection.create_aggregate("stdhistogram",5,sqlfunctions._StdHistogram)
self.connection.create_aggregate("minhistogram",5,sqlfunctions._MinHistogram)
self.connection.create_aggregate("maxhistogram",5,sqlfunctions._MaxHistogram)
self.connection.create_aggregate("medianhistogram",5,sqlfunctions._MedianHistogram)
self.connection.create_aggregate("zscorehistogram",5,sqlfunctions._ZscoreHistogram) | additional SQL functions to the database |
def on_start(self):
"""
start the service
"""
LOGGER.debug("natsd.Service.on_start")
self.service = threading.Thread(target=self.run_event_loop, name=self.serviceQ + " service thread")
self.service.start()
while not self.is_started:
time.sleep(0.01) | start the service |
def create_handler(Model, name=None, **kwds):
"""
This factory returns an action handler that creates a new instance of
the specified model when a create action is recieved, assuming the
action follows nautilus convetions.
Args:
Model (nautilus.BaseModel): The model to create when the action
received.
Returns:
function(action_type, payload): The action handler for this model
"""
async def action_handler(service, action_type, payload, props, notify=True, **kwds):
# if the payload represents a new instance of `Model`
if action_type == get_crud_action('create', name or Model):
# print('handling create for ' + name or Model)
try:
# the props of the message
message_props = {}
# if there was a correlation id in the request
if 'correlation_id' in props:
# make sure it ends up in the reply
message_props['correlation_id'] = props['correlation_id']
# for each required field
for requirement in Model.required_fields():
# save the name of the field
field_name = requirement.name
# ensure the value is in the payload
# TODO: check all required fields rather than failing on the first
if not field_name in payload and field_name != 'id':
# yell loudly
raise ValueError(
"Required field not found in payload: %s" %field_name
)
# create a new model
new_model = Model(**payload)
# save the new model instance
new_model.save()
# if we need to tell someone about what happened
if notify:
# publish the scucess event
await service.event_broker.send(
payload=ModelSerializer().serialize(new_model),
action_type=change_action_status(action_type, success_status()),
**message_props
)
# if something goes wrong
except Exception as err:
# if we need to tell someone about what happened
if notify:
# publish the error as an event
await service.event_broker.send(
payload=str(err),
action_type=change_action_status(action_type, error_status()),
**message_props
)
# otherwise we aren't supposed to notify
else:
# raise the exception normally
raise err
# return the handler
return action_handler | This factory returns an action handler that creates a new instance of
the specified model when a create action is recieved, assuming the
action follows nautilus convetions.
Args:
Model (nautilus.BaseModel): The model to create when the action
received.
Returns:
function(action_type, payload): The action handler for this model |
def delete_detector(self, detector_id, **kwargs):
"""Remove a detector.
Args:
detector_id (string): the ID of the detector.
"""
resp = self._delete(self._u(self._DETECTOR_ENDPOINT_SUFFIX,
detector_id),
**kwargs)
resp.raise_for_status()
# successful delete returns 204, which has no response json
return resp | Remove a detector.
Args:
detector_id (string): the ID of the detector. |
def is_enable_action_dependent(self, hosts, services):
"""
Check if dependencies states match dependencies statuses
This basically means that a dependency is in a bad state and
it can explain this object state.
:param hosts: hosts objects, used to get object in act_depend_of
:type hosts: alignak.objects.host.Hosts
:param services: services objects, used to get object in act_depend_of
:type services: alignak.objects.service.Services
:return: True if all dependencies matches the status, false otherwise
:rtype: bool
"""
# Use to know if notification is raise or not
enable_action = False
for (dep_id, status, _, _) in self.act_depend_of:
if 'n' in status:
enable_action = True
else:
if dep_id in hosts:
dep = hosts[dep_id]
else:
dep = services[dep_id]
p_is_down = False
dep_match = [dep.is_state(stat) for stat in status]
# check if the parent match a case, so he is down
if True in dep_match:
p_is_down = True
if not p_is_down:
enable_action = True
return enable_action | Check if dependencies states match dependencies statuses
This basically means that a dependency is in a bad state and
it can explain this object state.
:param hosts: hosts objects, used to get object in act_depend_of
:type hosts: alignak.objects.host.Hosts
:param services: services objects, used to get object in act_depend_of
:type services: alignak.objects.service.Services
:return: True if all dependencies matches the status, false otherwise
:rtype: bool |
def dameraulevenshtein(seq1, seq2):
"""Calculate the Damerau-Levenshtein distance between sequences.
This distance is the number of additions, deletions, substitutions,
and transpositions needed to transform the first sequence into the
second. Although generally used with strings, any sequences of
comparable objects will work.
Transpositions are exchanges of *consecutive* characters; all other
operations are self-explanatory.
This implementation is O(N*M) time and O(M) space, for N and M the
lengths of the two sequences.
>>> dameraulevenshtein('ba', 'abc')
2
>>> dameraulevenshtein('fee', 'deed')
2
It works with arbitrary sequences too:
>>> dameraulevenshtein('abcd', ['b', 'a', 'c', 'd', 'e'])
2
"""
# codesnippet:D0DE4716-B6E6-4161-9219-2903BF8F547F
# Conceptually, this is based on a len(seq1) + 1 * len(seq2) + 1 matrix.
# However, only the current and two previous rows are needed at once,
# so we only store those.
oneago = None
thisrow = list(range_(1, len(seq2) + 1)) + [0]
for x in range_(len(seq1)):
# Python lists wrap around for negative indices, so put the
# leftmost column at the *end* of the list. This matches with
# the zero-indexed strings and saves extra calculation.
twoago, oneago, thisrow = oneago, thisrow, [0] * len(seq2) + [x + 1]
for y in range_(len(seq2)):
delcost = oneago[y] + 1
addcost = thisrow[y - 1] + 1
subcost = oneago[y - 1] + (seq1[x] != seq2[y])
thisrow[y] = min(delcost, addcost, subcost)
# This block deals with transpositions
if (x > 0 and y > 0 and seq1[x] == seq2[y - 1] and
seq1[x - 1] == seq2[y] and seq1[x] != seq2[y]):
thisrow[y] = min(thisrow[y], twoago[y - 2] + 1)
return thisrow[len(seq2) - 1] | Calculate the Damerau-Levenshtein distance between sequences.
This distance is the number of additions, deletions, substitutions,
and transpositions needed to transform the first sequence into the
second. Although generally used with strings, any sequences of
comparable objects will work.
Transpositions are exchanges of *consecutive* characters; all other
operations are self-explanatory.
This implementation is O(N*M) time and O(M) space, for N and M the
lengths of the two sequences.
>>> dameraulevenshtein('ba', 'abc')
2
>>> dameraulevenshtein('fee', 'deed')
2
It works with arbitrary sequences too:
>>> dameraulevenshtein('abcd', ['b', 'a', 'c', 'd', 'e'])
2 |
def get(*args, **kwargs):
"""Get UserEXT objects."""
try:
from invenio.modules.accounts.models import UserEXT
except ImportError:
from invenio_accounts.models import UserEXT
q = UserEXT.query
return q.count(), q.all() | Get UserEXT objects. |
def search(self):
"""Search srt in project for cells matching list of terms."""
matches = []
for pattern in Config.patterns:
matches += self.termfinder(pattern)
return sorted(set(matches), key=int) | Search srt in project for cells matching list of terms. |
def set_wx_window_layout(wx_window, layout):
'''set a WinLayout for a wx window'''
try:
wx_window.SetSize(layout.size)
wx_window.SetPosition(layout.pos)
except Exception as ex:
print(ex) | set a WinLayout for a wx window |
def find_by_example(self, crash, offset = None, limit = None):
"""
Find all crash dumps that have common properties with the crash dump
provided.
Results can be paged to avoid consuming too much memory if the database
is large.
@see: L{find}
@type crash: L{Crash}
@param crash: Crash object to compare with. Fields set to C{None} are
ignored, all other fields but the signature are used in the
comparison.
To search for signature instead use the L{find} method.
@type offset: int
@param offset: (Optional) Skip the first I{offset} results.
@type limit: int
@param limit: (Optional) Return at most I{limit} results.
@rtype: list(L{Crash})
@return: List of similar crash dumps found.
"""
# Validate the parameters.
if limit is not None and not limit:
warnings.warn("CrashDAO.find_by_example() was set a limit of 0"
" results, returning without executing a query.")
return []
# Build the query.
query = self._session.query(CrashDTO)
# Order by row ID to get consistent results.
# Also some database engines require ordering when using offsets.
query = query.asc(CrashDTO.id)
# Build a CrashDTO from the Crash object.
dto = CrashDTO(crash)
# Filter all the fields in the crashes table that are present in the
# CrashDTO object and not set to None, except for the row ID.
for name, column in compat.iteritems(CrashDTO.__dict__):
if not name.startswith('__') and name not in ('id',
'signature',
'data'):
if isinstance(column, Column):
value = getattr(dto, name, None)
if value is not None:
query = query.filter(column == value)
# Page the query.
if offset:
query = query.offset(offset)
if limit:
query = query.limit(limit)
# Execute the SQL query and convert the results.
try:
return [dto.toCrash() for dto in query.all()]
except NoResultFound:
return [] | Find all crash dumps that have common properties with the crash dump
provided.
Results can be paged to avoid consuming too much memory if the database
is large.
@see: L{find}
@type crash: L{Crash}
@param crash: Crash object to compare with. Fields set to C{None} are
ignored, all other fields but the signature are used in the
comparison.
To search for signature instead use the L{find} method.
@type offset: int
@param offset: (Optional) Skip the first I{offset} results.
@type limit: int
@param limit: (Optional) Return at most I{limit} results.
@rtype: list(L{Crash})
@return: List of similar crash dumps found. |
def json_to_string(value, null_string_repr='[]', trimable=False):
"""
Return a string representation of the specified JSON object.
@param value: a JSON object.
@param null_string_rep: the string representation of the null
object.
@return: a string representation of the specified JSON object.
"""
return null_string_repr if is_undefined(value) \
else obj.jsonify(value, trimable=trimable) | Return a string representation of the specified JSON object.
@param value: a JSON object.
@param null_string_rep: the string representation of the null
object.
@return: a string representation of the specified JSON object. |
def silent_exec_method(self, code):
"""Silently execute a kernel method and save its reply
The methods passed here **don't** involve getting the value
of a variable but instead replies that can be handled by
ast.literal_eval.
To get a value see `get_value`
Parameters
----------
code : string
Code that contains the kernel method as part of its
string
See Also
--------
handle_exec_method : Method that deals with the reply
Note
----
This is based on the _silent_exec_callback method of
RichJupyterWidget. Therefore this is licensed BSD
"""
# Generate uuid, which would be used as an indication of whether or
# not the unique request originated from here
local_uuid = to_text_string(uuid.uuid1())
code = to_text_string(code)
if self.kernel_client is None:
return
msg_id = self.kernel_client.execute('', silent=True,
user_expressions={ local_uuid:code })
self._kernel_methods[local_uuid] = code
self._request_info['execute'][msg_id] = self._ExecutionRequest(msg_id,
'silent_exec_method') | Silently execute a kernel method and save its reply
The methods passed here **don't** involve getting the value
of a variable but instead replies that can be handled by
ast.literal_eval.
To get a value see `get_value`
Parameters
----------
code : string
Code that contains the kernel method as part of its
string
See Also
--------
handle_exec_method : Method that deals with the reply
Note
----
This is based on the _silent_exec_callback method of
RichJupyterWidget. Therefore this is licensed BSD |
def build(self, docs=None, filename=None):
"""Build FM-index
Params:
<iterator> | <generator> docs
<str> filename
"""
if docs:
if hasattr(docs, 'items'):
for (idx, doc) in sorted(getattr(docs, 'items')(),
key=lambda x: x[0]):
self.fm.push_back(doc)
else:
for doc in filter(bool, docs):
self.fm.push_back(doc)
self.fm.build()
if filename:
self.fm.write(filename) | Build FM-index
Params:
<iterator> | <generator> docs
<str> filename |
def first_interesting_frame(self):
"""
Traverse down the frame hierarchy until a frame is found with more than one child
"""
root_frame = self.root_frame()
frame = root_frame
while len(frame.children) <= 1:
if frame.children:
frame = frame.children[0]
else:
# there are no branches
return root_frame
return frame | Traverse down the frame hierarchy until a frame is found with more than one child |
def has_credentials(self):
"""Returns True if there are valid credentials for the current user
and required scopes."""
credentials = _credentials_from_request(self.request)
return (credentials and not credentials.invalid and
credentials.has_scopes(self._get_scopes())) | Returns True if there are valid credentials for the current user
and required scopes. |
def stylize(obj, style='plastique', theme='projexui'):
"""
Styles the inputed object with the given options.
:param obj | <QtGui.QWidget> || <QtGui.QApplication>
style | <str>
base | <str>
"""
obj.setStyle(style)
if theme:
sheet = resources.read('styles/{0}/style.css'.format(theme))
if sheet:
obj.setStyleSheet(sheet) | Styles the inputed object with the given options.
:param obj | <QtGui.QWidget> || <QtGui.QApplication>
style | <str>
base | <str> |
def delete_property(self, key):
"""Remove a property from the document.
Calling code should use this method to remove properties on the
document instead of modifying ``properties`` directly.
If there is a property with the name in ``key``, it will be removed.
Otherwise, a ``KeyError`` will be thrown.
"""
if key in self.RESERVED_ATTRIBUTE_NAMES:
raise KeyError(key)
del self.o[key] | Remove a property from the document.
Calling code should use this method to remove properties on the
document instead of modifying ``properties`` directly.
If there is a property with the name in ``key``, it will be removed.
Otherwise, a ``KeyError`` will be thrown. |
def create_vmfs_datastore(host_ref, datastore_name, disk_ref,
vmfs_major_version, storage_system=None):
'''
Creates a VMFS datastore from a disk_id
host_ref
vim.HostSystem object referencing a host to create the datastore on
datastore_name
Name of the datastore
disk_ref
vim.HostScsiDislk on which the datastore is created
vmfs_major_version
VMFS major version to use
'''
# TODO Support variable sized partitions
hostname = get_managed_object_name(host_ref)
disk_id = disk_ref.canonicalName
log.debug('Creating datastore \'%s\' on host \'%s\', scsi disk \'%s\', '
'vmfs v%s', datastore_name, hostname, disk_id, vmfs_major_version)
if not storage_system:
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
target_disk = disk_ref
partition_info = _get_partition_info(storage_system,
target_disk.devicePath)
log.trace('partition_info = %s', partition_info)
new_partition_number, partition_spec = _get_new_computed_partition_spec(
storage_system,
target_disk.devicePath,
partition_info
)
spec = vim.VmfsDatastoreCreateSpec(
vmfs=vim.HostVmfsSpec(
majorVersion=vmfs_major_version,
volumeName=datastore_name,
extent=vim.HostScsiDiskPartition(
diskName=disk_id,
partition=new_partition_number)),
diskUuid=target_disk.uuid,
partition=partition_spec)
try:
ds_ref = \
host_ref.configManager.datastoreSystem.CreateVmfsDatastore(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.debug('Created datastore \'%s\' on host \'%s\'', datastore_name, hostname)
return ds_ref | Creates a VMFS datastore from a disk_id
host_ref
vim.HostSystem object referencing a host to create the datastore on
datastore_name
Name of the datastore
disk_ref
vim.HostScsiDislk on which the datastore is created
vmfs_major_version
VMFS major version to use |
def delete_bams(job, bams, patient_id):
"""
Delete the bams from the job Store once their purpose has been achieved (i.e. after all
mutation calling steps). Will also delete the chimeric junction file from Star.
:param dict bams: Dict of bam and bai files
:param str patient_id: The ID of the patient for logging purposes.
"""
bams = {b: v for b, v in bams.items()
if (b.endswith('.bam') or b.endswith('.bai')) and v is not None}
if bams:
for key, val in bams.items():
job.fileStore.logToMaster('Deleting "%s" for patient "%s".' % (key, patient_id))
job.fileStore.deleteGlobalFile(val)
elif 'rna_genome' in bams:
delete_bams(job, bams['rna_genome'], patient_id)
job.fileStore.logToMaster('Deleting "rna_transcriptome.bam" for patient "%s".' % patient_id)
job.fileStore.deleteGlobalFile(bams['rna_transcriptome.bam'])
elif 'rnaChimeric.out.junction' in bams:
job.fileStore.logToMaster('Deleting "rnaChimeric.out.junction" for patient "%s".' %
patient_id)
job.fileStore.deleteGlobalFile(bams['rnaChimeric.out.junction'])
else:
pass | Delete the bams from the job Store once their purpose has been achieved (i.e. after all
mutation calling steps). Will also delete the chimeric junction file from Star.
:param dict bams: Dict of bam and bai files
:param str patient_id: The ID of the patient for logging purposes. |
def _add_loss_summaries(total_loss):
"""Add summaries for losses in CIFAR-10 model.
Generates moving average for all losses and associated summaries for
visualizing the performance of the network.
Args:
total_loss: Total loss from loss().
Returns:
loss_averages_op: op for generating moving averages of losses.
"""
# Compute the moving average of all individual losses and the total loss.
loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
losses = tf.get_collection('losses')
loss_averages_op = loss_averages.apply(losses + [total_loss])
# Attach a scalar summary to all individual losses and the total loss; do the
# same for the averaged version of the losses.
for l in losses + [total_loss]:
# Name each loss as '(raw)' and name the moving average version of the loss
# as the original loss name.
tf.summary.scalar(l.op.name + ' (raw)', l)
tf.summary.scalar(l.op.name, loss_averages.average(l))
return loss_averages_op | Add summaries for losses in CIFAR-10 model.
Generates moving average for all losses and associated summaries for
visualizing the performance of the network.
Args:
total_loss: Total loss from loss().
Returns:
loss_averages_op: op for generating moving averages of losses. |
def recurrent_transformer_decoder(
decoder_input,
encoder_output,
decoder_self_attention_bias,
encoder_decoder_attention_bias,
hparams,
name="decoder",
nonpadding=None,
save_weights_to=None,
make_image_summary=True):
"""Recurrent decoder function."""
x = decoder_input
attention_dropout_broadcast_dims = (
common_layers.comma_separated_string_to_integer_list(
getattr(hparams, "attention_dropout_broadcast_dims", "")))
with tf.variable_scope(name):
ffn_unit = functools.partial(
# use encoder ffn, since decoder ffn use left padding
universal_transformer_util.transformer_encoder_ffn_unit,
hparams=hparams,
nonpadding_mask=nonpadding)
attention_unit = functools.partial(
universal_transformer_util.transformer_decoder_attention_unit,
hparams=hparams,
encoder_output=encoder_output,
decoder_self_attention_bias=decoder_self_attention_bias,
encoder_decoder_attention_bias=encoder_decoder_attention_bias,
attention_dropout_broadcast_dims=attention_dropout_broadcast_dims,
save_weights_to=save_weights_to,
make_image_summary=make_image_summary)
x, extra_output = universal_transformer_util.universal_transformer_layer(
x, hparams, ffn_unit, attention_unit)
return common_layers.layer_preprocess(x, hparams), extra_output | Recurrent decoder function. |
async def addMachines(self, params=None):
"""
:param params dict:
Dictionary specifying the machine to add. All keys are optional.
Keys include:
series: string specifying the machine OS series.
constraints: string holding machine constraints, if any. We'll
parse this into the json friendly dict that the juju api
expects.
container_type: string holding the type of the container (for
instance ""lxd" or kvm"). It is not specified for top level
machines.
parent_id: string holding a placeholder pointing to another
machine change or to a unit change. This value is only
specified in the case this machine is a container, in
which case also ContainerType is set.
"""
params = params or {}
# Normalize keys
params = {normalize_key(k): params[k] for k in params.keys()}
# Fix up values, as necessary.
if 'parent_id' in params:
if params['parent_id'].startswith('$addUnit'):
unit = self.resolve(params['parent_id'])[0]
params['parent_id'] = unit.machine.entity_id
else:
params['parent_id'] = self.resolve(params['parent_id'])
params['constraints'] = parse_constraints(
params.get('constraints'))
params['jobs'] = params.get('jobs', ['JobHostUnits'])
if params.get('container_type') == 'lxc':
log.warning('Juju 2.0 does not support lxc containers. '
'Converting containers to lxd.')
params['container_type'] = 'lxd'
# Submit the request.
params = client.AddMachineParams(**params)
results = await self.client_facade.AddMachines([params])
error = results.machines[0].error
if error:
raise ValueError("Error adding machine: %s" % error.message)
machine = results.machines[0].machine
log.debug('Added new machine %s', machine)
return machine | :param params dict:
Dictionary specifying the machine to add. All keys are optional.
Keys include:
series: string specifying the machine OS series.
constraints: string holding machine constraints, if any. We'll
parse this into the json friendly dict that the juju api
expects.
container_type: string holding the type of the container (for
instance ""lxd" or kvm"). It is not specified for top level
machines.
parent_id: string holding a placeholder pointing to another
machine change or to a unit change. This value is only
specified in the case this machine is a container, in
which case also ContainerType is set. |
def timedelta_seconds(td):
'''
Return the offset stored by a :class:`datetime.timedelta` object as an
integer number of seconds. Microseconds, if present, are rounded to
the nearest second.
Delegates to
:meth:`timedelta.total_seconds() <datetime.timedelta.total_seconds()>`
if available.
>>> timedelta_seconds(timedelta(hours=1))
3600
>>> timedelta_seconds(timedelta(hours=-1))
-3600
>>> timedelta_seconds(timedelta(hours=1, minutes=30))
5400
>>> timedelta_seconds(timedelta(hours=1, minutes=30,
... microseconds=300000))
5400
>>> timedelta_seconds(timedelta(hours=1, minutes=30,
... microseconds=900000))
5401
'''
try:
return int(round(td.total_seconds()))
except AttributeError:
days = td.days
seconds = td.seconds
microseconds = td.microseconds
return int(round((days * 86400) + seconds + (microseconds / 1000000))) | Return the offset stored by a :class:`datetime.timedelta` object as an
integer number of seconds. Microseconds, if present, are rounded to
the nearest second.
Delegates to
:meth:`timedelta.total_seconds() <datetime.timedelta.total_seconds()>`
if available.
>>> timedelta_seconds(timedelta(hours=1))
3600
>>> timedelta_seconds(timedelta(hours=-1))
-3600
>>> timedelta_seconds(timedelta(hours=1, minutes=30))
5400
>>> timedelta_seconds(timedelta(hours=1, minutes=30,
... microseconds=300000))
5400
>>> timedelta_seconds(timedelta(hours=1, minutes=30,
... microseconds=900000))
5401 |
def is_path_python_module(thepath):
"""
Given a path, find out of the path is a python module or is inside
a python module.
"""
thepath = path.normpath(thepath)
if path.isfile(thepath):
base, ext = path.splitext(thepath)
if ext in _py_suffixes:
return True
return False
if path.isdir(thepath):
for suffix in _py_suffixes:
if path.isfile(path.join(thepath, '__init__%s' % suffix)):
return True
return False | Given a path, find out of the path is a python module or is inside
a python module. |
def target_for_product(self, product):
"""Looks up the target key for a product.
:API: public
:param product: The product to search for
:return: None if there is no target for the product
"""
for target, products in self._products_by_target.items():
if product in products:
return target
return None | Looks up the target key for a product.
:API: public
:param product: The product to search for
:return: None if there is no target for the product |
def users(self):
"""
List of users of this slack team
"""
if not self._users:
self._users = self._call_api('users.list')['members']
return self._users | List of users of this slack team |
def kinesia_scores(self, data_frame):
"""
This method calculates the number of key taps
:param data_frame: the data frame
:type data_frame: pandas.DataFrame
:return ks: key taps
:rtype ks: float
:return duration: test duration (seconds)
:rtype duration: float
"""
# tap_timestamps = data_frame.td[data_frame.action_type == 1]
# grouped = tap_timestamps.groupby(pd.TimeGrouper('30u'))
# return np.mean(grouped.size().values)
ks = sum(data_frame.action_type == 1)
duration = math.ceil(data_frame.td[-1])
return ks, duration | This method calculates the number of key taps
:param data_frame: the data frame
:type data_frame: pandas.DataFrame
:return ks: key taps
:rtype ks: float
:return duration: test duration (seconds)
:rtype duration: float |
def reverse_index_mapping(self):
"""Get mapping from this segment's indexes to the indexes of
the base array.
If the index is < 0, the index is out of range, meaning that it doesn't
exist in this segment and is not mapped to the base array
"""
if self._reverse_index_mapping is None:
if self.is_indexed:
# Initialize array to out of range
r = np.zeros(self.base_length, dtype=np.int32) - 1
r[self.order] = np.arange(len(self.order), dtype=np.int32)
elif self.data.base is None:
# Starts at the beginning; produces the identity
r = np.arange(self.data_length, dtype=np.int32)
else:
r = np.zeros(self.base_length, dtype=np.int32) - 1
r[self.data_start - self.base_start:self.data_end - self.base_start] = np.arange(self.data_length, dtype=np.int32)
self._reverse_index_mapping = r
return self._reverse_index_mapping | Get mapping from this segment's indexes to the indexes of
the base array.
If the index is < 0, the index is out of range, meaning that it doesn't
exist in this segment and is not mapped to the base array |
def get(self, url, **kwargs):
r"""Sends a GET request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:rtype: requests.Response
"""
kwargs.setdefault('allow_redirects', True)
return self.request('GET', url, **kwargs) | r"""Sends a GET request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:rtype: requests.Response |
def validate(self, arg=None):
"""Check that inputted path is valid - set validator accordingly"""
if os.path.isdir(self.path):
self.validator.object = None
else:
self.validator.object = ICONS['error'] | Check that inputted path is valid - set validator accordingly |
def set_child_value(
self, sensor_id, child_id, value_type, value, **kwargs):
"""Add a command to set a sensor value, to the queue.
A queued command will be sent to the sensor when the gateway
thread has sent all previously queued commands.
If the sensor attribute new_state returns True, the command will be
buffered in a queue on the sensor, and only the internal sensor state
will be updated. When a smartsleep message is received, the internal
state will be pushed to the sensor, via _handle_smartsleep method.
"""
if not self.is_sensor(sensor_id, child_id):
return
if self.sensors[sensor_id].new_state:
self.sensors[sensor_id].set_child_value(
child_id, value_type, value,
children=self.sensors[sensor_id].new_state)
else:
self.add_job(partial(
self.sensors[sensor_id].set_child_value, child_id, value_type,
value, **kwargs)) | Add a command to set a sensor value, to the queue.
A queued command will be sent to the sensor when the gateway
thread has sent all previously queued commands.
If the sensor attribute new_state returns True, the command will be
buffered in a queue on the sensor, and only the internal sensor state
will be updated. When a smartsleep message is received, the internal
state will be pushed to the sensor, via _handle_smartsleep method. |
def remove_root_family(self, family_id):
"""Removes a root family.
arg: family_id (osid.id.Id): the ``Id`` of a family
raise: NotFound - ``family_id`` not a root
raise: NullArgument - ``family_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinHierarchyDesignSession.remove_root_bin_template
if self._catalog_session is not None:
return self._catalog_session.remove_root_catalog(catalog_id=family_id)
return self._hierarchy_session.remove_root(id_=family_id) | Removes a root family.
arg: family_id (osid.id.Id): the ``Id`` of a family
raise: NotFound - ``family_id`` not a root
raise: NullArgument - ``family_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.* |
def CLJP(S, color=False):
"""Compute a C/F splitting using the parallel CLJP algorithm.
Parameters
----------
S : csr_matrix
Strength of connection matrix indicating the strength between nodes i
and j (S_ij)
color : bool
use the CLJP coloring approach
Returns
-------
splitting : array
Array of length of S of ones (coarse) and zeros (fine)
Examples
--------
>>> from pyamg.gallery import poisson
>>> from pyamg.classical.split import CLJP
>>> S = poisson((7,), format='csr') # 1D mesh with 7 vertices
>>> splitting = CLJP(S)
See Also
--------
MIS, PMIS, CLJPc
References
----------
.. [8] David M. Alber and Luke N. Olson
"Parallel coarse-grid selection"
Numerical Linear Algebra with Applications 2007; 14:611-643.
"""
if not isspmatrix_csr(S):
raise TypeError('expected csr_matrix')
S = remove_diagonal(S)
colorid = 0
if color:
colorid = 1
T = S.T.tocsr() # transpose S for efficient column access
splitting = np.empty(S.shape[0], dtype='intc')
amg_core.cljp_naive_splitting(S.shape[0],
S.indptr, S.indices,
T.indptr, T.indices,
splitting,
colorid)
return splitting | Compute a C/F splitting using the parallel CLJP algorithm.
Parameters
----------
S : csr_matrix
Strength of connection matrix indicating the strength between nodes i
and j (S_ij)
color : bool
use the CLJP coloring approach
Returns
-------
splitting : array
Array of length of S of ones (coarse) and zeros (fine)
Examples
--------
>>> from pyamg.gallery import poisson
>>> from pyamg.classical.split import CLJP
>>> S = poisson((7,), format='csr') # 1D mesh with 7 vertices
>>> splitting = CLJP(S)
See Also
--------
MIS, PMIS, CLJPc
References
----------
.. [8] David M. Alber and Luke N. Olson
"Parallel coarse-grid selection"
Numerical Linear Algebra with Applications 2007; 14:611-643. |
def set_up(self):
"""
This class overrides this method
"""
self.menu.pause()
curses.def_prog_mode()
self.menu.clear_screen() | This class overrides this method |
def Kn2Der(nu, y, n=0):
r"""Find the derivatives of :math:`K_\nu(y^{1/2})`.
Parameters
----------
nu : float
The order of the modified Bessel function of the second kind.
y : array of float
The values to evaluate at.
n : nonnegative int, optional
The order of derivative to take.
"""
n = int(n)
y = scipy.asarray(y, dtype=float)
sqrty = scipy.sqrt(y)
if n == 0:
K = scipy.special.kv(nu, sqrty)
else:
K = scipy.zeros_like(y)
x = scipy.asarray(
[
fixed_poch(1.5 - j, j) * y**(0.5 - j)
for j in scipy.arange(1.0, n + 1.0, dtype=float)
]
).T
for k in scipy.arange(1.0, n + 1.0, dtype=float):
K += (
scipy.special.kvp(nu, sqrty, n=int(k)) *
incomplete_bell_poly(n, int(k), x)
)
return K | r"""Find the derivatives of :math:`K_\nu(y^{1/2})`.
Parameters
----------
nu : float
The order of the modified Bessel function of the second kind.
y : array of float
The values to evaluate at.
n : nonnegative int, optional
The order of derivative to take. |
def send_all():
"""
Send all eligible messages in the queue.
"""
# The actual backend to use for sending, defaulting to the Django default.
# To make testing easier this is not stored at module level.
EMAIL_BACKEND = getattr(
settings,
"MAILER_EMAIL_BACKEND",
"django.core.mail.backends.smtp.EmailBackend"
)
acquired, lock = acquire_lock()
if not acquired:
return
start_time = time.time()
deferred = 0
sent = 0
try:
connection = None
for message in prioritize():
try:
if connection is None:
connection = get_connection(backend=EMAIL_BACKEND)
logging.info("sending message '{0}' to {1}".format(
message.subject,
", ".join(message.to_addresses))
)
email = message.email
if email is not None:
email.connection = connection
if not hasattr(email, 'reply_to'):
# Compatability fix for EmailMessage objects
# pickled when running < Django 1.8 and then
# unpickled under Django 1.8
email.reply_to = []
ensure_message_id(email)
email.send()
# connection can't be stored in the MessageLog
email.connection = None
message.email = email # For the sake of MessageLog
MessageLog.objects.log(message, RESULT_SUCCESS)
sent += 1
else:
logging.warning("message discarded due to failure in converting from DB. Added on '%s' with priority '%s'" % (message.when_added, message.priority)) # noqa
message.delete()
except (socket_error, smtplib.SMTPSenderRefused,
smtplib.SMTPRecipientsRefused,
smtplib.SMTPDataError,
smtplib.SMTPAuthenticationError) as err:
message.defer()
logging.info("message deferred due to failure: %s" % err)
MessageLog.objects.log(message, RESULT_FAILURE, log_message=str(err))
deferred += 1
# Get new connection, it case the connection itself has an error.
connection = None
# Check if we reached the limits for the current run
if _limits_reached(sent, deferred):
break
_throttle_emails()
finally:
release_lock(lock)
logging.info("")
logging.info("%s sent; %s deferred;" % (sent, deferred))
logging.info("done in %.2f seconds" % (time.time() - start_time)) | Send all eligible messages in the queue. |
def select(self):
"""Select Slackware command
"""
print("\nDetected Slackware binary package for installation:\n")
for pkg in self.packages:
print(" " + pkg.split("/")[-1])
print("")
self.msg.template(78)
print("| Choose a Slackware command:")
self.msg.template(78)
for com in sorted(self.commands):
print("| {0}{1}{2}) {3}{4}{5}".format(
self.meta.color["RED"], com, self.meta.color["ENDC"],
self.meta.color["GREEN"], self.commands[com],
self.meta.color["ENDC"]))
self.msg.template(78)
try:
self.choice = raw_input(" > ")
except EOFError:
print("")
raise SystemExit()
if self.choice in self.commands.keys():
sys.stdout.write(" \x1b[1A{0}{1}{2}\n\n".format(
self.meta.color["CYAN"], self.commands[self.choice],
self.meta.color["ENDC"]))
sys.stdout.flush()
self.execute() | Select Slackware command |
def dict_to_path(as_dict):
"""
Turn a pure dict into a dict containing entity objects that
can be sent directly to a Path constructor.
Parameters
-----------
as_dict : dict
Has keys: 'vertices', 'entities'
Returns
------------
kwargs : dict
Has keys: 'vertices', 'entities'
"""
# start kwargs with initial value
result = as_dict.copy()
# map of constructors
loaders = {'Arc': Arc, 'Line': Line}
# pre- allocate entity array
entities = [None] * len(as_dict['entities'])
# run constructor for dict kwargs
for entity_index, entity in enumerate(as_dict['entities']):
entities[entity_index] = loaders[entity['type']](
points=entity['points'], closed=entity['closed'])
result['entities'] = entities
return result | Turn a pure dict into a dict containing entity objects that
can be sent directly to a Path constructor.
Parameters
-----------
as_dict : dict
Has keys: 'vertices', 'entities'
Returns
------------
kwargs : dict
Has keys: 'vertices', 'entities' |
def subselect(self, obj):
"""
Filter a dict of hyperparameter settings to only those keys defined
in this HyperparameterDefaults .
"""
return dict(
(key, value) for (key, value)
in obj.items()
if key in self.defaults) | Filter a dict of hyperparameter settings to only those keys defined
in this HyperparameterDefaults . |
def p_statements(self, p):
"""statements : statements statement
| statement
"""
n = len(p)
if n == 3:
p[0] = p[1] + [p[2]]
elif n == 2:
p[0] = ['statements', p[1]] | statements : statements statement
| statement |
def _build_cache():
"""Build sets cache."""
sets = current_oaiserver.sets
if sets is None:
# build sets cache
sets = current_oaiserver.sets = [
oaiset.spec for oaiset in OAISet.query.filter(
OAISet.search_pattern.is_(None)).all()]
return sets | Build sets cache. |
def resolve(self, var, context):
"""Resolves a variable out of context if it's not in quotes"""
if var is None:
return var
if var[0] in ('"', "'") and var[-1] == var[0]:
return var[1:-1]
else:
return template.Variable(var).resolve(context) | Resolves a variable out of context if it's not in quotes |
def record_get(self, creative_ids, nick=None):
'''xxxxx.xxxxx.creatives.record.get
===================================
根据一个创意Id列表取得创意对应的修改记录'''
request = TOPRequest('xxxxx.xxxxx.creatives.record.get')
request['creative_ids'] = creative_ids
if nick!=None: request['nick'] = nick
self.create(self.execute(request), models = {'result':CreativeRecord})
return self.result | xxxxx.xxxxx.creatives.record.get
===================================
根据一个创意Id列表取得创意对应的修改记录 |
def Then(self, f, *args, **kwargs):
"""
`Then(f, ...)` is equivalent to `ThenAt(1, f, ...)`. Checkout `phi.builder.Builder.ThenAt` for more information.
"""
return self.ThenAt(1, f, *args, **kwargs) | `Then(f, ...)` is equivalent to `ThenAt(1, f, ...)`. Checkout `phi.builder.Builder.ThenAt` for more information. |
def unit_client(self):
# type: () -> TCPClient
"""Return a TCPClient with same settings of the batch TCP client"""
client = TCPClient(self.host, self.port, self.prefix)
self._configure_client(client)
return client | Return a TCPClient with same settings of the batch TCP client |
def main(argv=None, directory=None):
"""
Main entry point for the tool, used by setup.py
Returns a value that can be passed into exit() specifying
the exit code.
1 is an error
0 is successful run
"""
logging.basicConfig(format='%(message)s')
argv = argv or sys.argv
arg_dict = parse_quality_args(argv[1:])
GitPathTool.set_cwd(directory)
fail_under = arg_dict.get('fail_under')
tool = arg_dict['violations']
user_options = arg_dict.get('options')
if user_options:
# strip quotes if present
first_char = user_options[0]
last_char = user_options[-1]
if first_char == last_char and first_char in ('"', "'"):
user_options = user_options[1:-1]
driver = QUALITY_DRIVERS.get(tool)
if driver is not None:
# If we've been given pre-generated reports,
# try to open the files
input_reports = []
for path in arg_dict['input_reports']:
try:
input_reports.append(open(path, 'rb'))
except IOError:
LOGGER.warning("Could not load '{}'".format(path))
try:
reporter = QualityReporter(driver, input_reports, user_options)
percent_passing = generate_quality_report(
reporter,
arg_dict['compare_branch'],
html_report=arg_dict['html_report'],
css_file=arg_dict['external_css_file'],
ignore_staged=arg_dict['ignore_staged'],
ignore_unstaged=arg_dict['ignore_unstaged'],
exclude=arg_dict['exclude'],
)
if percent_passing >= fail_under:
return 0
else:
LOGGER.error("Failure. Quality is below {}%.".format(fail_under))
return 1
except (ImportError, EnvironmentError):
LOGGER.error(
"Quality tool not installed: '{}'".format(tool)
)
return 1
# Close any reports we opened
finally:
for file_handle in input_reports:
file_handle.close()
else:
LOGGER.error("Quality tool not recognized: '{}'".format(tool))
return 1 | Main entry point for the tool, used by setup.py
Returns a value that can be passed into exit() specifying
the exit code.
1 is an error
0 is successful run |
def get_file_systems(filesystemid=None,
keyid=None,
key=None,
profile=None,
region=None,
creation_token=None,
**kwargs):
'''
Get all EFS properties or a specific instance property
if filesystemid is specified
filesystemid
(string) - ID of the file system to retrieve properties
creation_token
(string) - A unique token that identifies an EFS.
If fileysystem created via create_file_system this would
either be explictitly passed in or set to name.
You can limit your search with this.
returns
(list[dict]) - list of all elastic file system properties
CLI Example:
.. code-block:: bash
salt 'my-minion' boto_efs.get_file_systems efs-id
'''
result = None
client = _get_conn(key=key, keyid=keyid, profile=profile, region=region)
if filesystemid and creation_token:
response = client.describe_file_systems(FileSystemId=filesystemid,
CreationToken=creation_token)
result = response["FileSystems"]
elif filesystemid:
response = client.describe_file_systems(FileSystemId=filesystemid)
result = response["FileSystems"]
elif creation_token:
response = client.describe_file_systems(CreationToken=creation_token)
result = response["FileSystems"]
else:
response = client.describe_file_systems()
result = response["FileSystems"]
while "NextMarker" in response:
response = client.describe_file_systems(
Marker=response["NextMarker"])
result.extend(response["FileSystems"])
return result | Get all EFS properties or a specific instance property
if filesystemid is specified
filesystemid
(string) - ID of the file system to retrieve properties
creation_token
(string) - A unique token that identifies an EFS.
If fileysystem created via create_file_system this would
either be explictitly passed in or set to name.
You can limit your search with this.
returns
(list[dict]) - list of all elastic file system properties
CLI Example:
.. code-block:: bash
salt 'my-minion' boto_efs.get_file_systems efs-id |
def list(self):
"""Lists all sessions in the store.
.. versionadded:: 0.6
"""
before, after = self.filename_template.split('%s', 1)
filename_re = re.compile(r'%s(.{5,})%s$' % (re.escape(before),
re.escape(after)))
result = []
for filename in os.listdir(self.path):
#: this is a session that is still being saved.
if filename.endswith(_fs_transaction_suffix):
continue
match = filename_re.match(filename)
if match is not None:
result.append(match.group(1))
return result | Lists all sessions in the store.
.. versionadded:: 0.6 |
def single_val(self):
"""return relative error of worst point that might make the data none
symmetric.
"""
sv_t = self._sv(self._tdsphere)
sv_p = self._sv(self._tdsphere)
return (sv_t, sv_p) | return relative error of worst point that might make the data none
symmetric. |
def output(self, output, status=None):
"""Output text to stdout or a pager command.
The status text is not outputted to pager or files.
The message will be logged in the audit log, if enabled. The
message will be written to the tee file, if enabled. The
message will be written to the output file, if enabled.
"""
if output:
size = self.cli.output.get_size()
margin = self.get_output_margin(status)
fits = True
buf = []
output_via_pager = self.explicit_pager and special.is_pager_enabled()
for i, line in enumerate(output, 1):
special.write_tee(line)
special.write_once(line)
if fits or output_via_pager:
# buffering
buf.append(line)
if len(line) > size.columns or i > (size.rows - margin):
fits = False
if not self.explicit_pager and special.is_pager_enabled():
# doesn't fit, use pager
output_via_pager = True
if not output_via_pager:
# doesn't fit, flush buffer
for line in buf:
click.secho(line)
buf = []
else:
click.secho(line)
if buf:
if output_via_pager:
# sadly click.echo_via_pager doesn't accept generators
click.echo_via_pager("\n".join(buf))
else:
for line in buf:
click.secho(line)
if status:
click.secho(status) | Output text to stdout or a pager command.
The status text is not outputted to pager or files.
The message will be logged in the audit log, if enabled. The
message will be written to the tee file, if enabled. The
message will be written to the output file, if enabled. |
def _set_automatic_tag(self, v, load=False):
"""
Setter method for automatic_tag, mapped from YANG variable /routing_system/route_map/content/set/automatic_tag (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_automatic_tag is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_automatic_tag() directly.
YANG Description: Automatically compute TAG value
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=automatic_tag.automatic_tag, is_container='container', presence=False, yang_name="automatic-tag", rest_name="automatic-tag", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Automatically compute TAG value', u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-ip-policy', defining_module='brocade-ip-policy', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """automatic_tag must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=automatic_tag.automatic_tag, is_container='container', presence=False, yang_name="automatic-tag", rest_name="automatic-tag", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Automatically compute TAG value', u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-ip-policy', defining_module='brocade-ip-policy', yang_type='container', is_config=True)""",
})
self.__automatic_tag = t
if hasattr(self, '_set'):
self._set() | Setter method for automatic_tag, mapped from YANG variable /routing_system/route_map/content/set/automatic_tag (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_automatic_tag is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_automatic_tag() directly.
YANG Description: Automatically compute TAG value |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.