content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def write_linux_hash_testvec(f, vec):
"""Format a hash test vector for Linux's crypto tests."""
write_linux_testvec_hexfield(f, "key", vec['input']['key'])
write_linux_testvec_field(f, "ksize", len(vec['input']['key']))
write_linux_testvec_hexfield(f, "plaintext", vec['input']['message'])
length = len(vec['input']['message'])
write_linux_testvec_field(f, "psize", length)
write_linux_testvec_hexfield(f, "digest", vec['hash'])
write_scatterlist_splits(f, length, False) | 37,400 |
def get_query_string_from_process_type_string(process_type_string: str) -> str: # pylint: disable=invalid-name
"""
Take the process type string of a Node and create the queryable type string.
:param process_type_string: the process type string
:type process_type_string: str
:return: string that can be used to query for subclasses of the process type using 'LIKE <string>'
:rtype: str
"""
if ':' in process_type_string:
return f'{process_type_string}.'
path = process_type_string.rsplit('.', 2)[0]
return f'{path}.' | 37,401 |
def test_key_to_fuzzer_and_benchmark():
"""Tests that key_to_fuzzer_and_benchmark returns the correct result."""
assert (coverage_data_utils.key_to_fuzzer_and_benchmark('afl libpng-1.2.56')
== (FUZZER, BENCHMARK)) | 37,402 |
def get_column(fn):
"""Get column from Cellomics filename.
Parameters
----------
fn : string
A filename from the Cellomics high-content screening system.
Returns
-------
column : string
The channel of the filename.
Examples
--------
>>> fn = 'MFGTMP_140206180002_A01f00d0.TIF'
>>> get_column(fn)
'01'
"""
sem = cellomics_semantic_filename(fn)
column = sem['well'][1:]
return column | 37,403 |
def json_to_csv(input_json, output_file):
"""Reads a JSON file, generated by the VGG Image Annotator, and generates a single CSV file"""
with open(input_json) as f:
images = json.load(f)
annotations = []
for entry in images:
filename = images[entry]['filename']
for region in images[entry]['regions']:
c = region['region_attributes']['class']
xmin = region['shape_attributes']['x']
ymin = region['shape_attributes']['y']
xmax = xmin + region['shape_attributes']['width']
ymax = ymin + region['shape_attributes']['height']
width = 0
height = 0
value = (filename, width, height, c, xmin, ymin, xmax, ymax)
annotations.append(value)
__list_to_csv(annotations, output_file) | 37,404 |
def add_tables():
"""
Generates tables in postgres database according to SQLAlchemy
model when this script is invoked directly via terminal.
"""
return database.Base.metadata.create_all(bind=database.engine) | 37,405 |
def isloggedin(userdir):
"""If user has sent us an in date, valid cookie then return updated cookie header,
otherwise return False."""
try:
rawcookie = os.environ['HTTP_COOKIE']
except KeyError:
return False
thecookie = SimpleCookie(rawcookie)
try:
cookiestring = thecookie['userid'].value
except KeyError:
return False
test = decodestring(cookiestring, userdir)
if not test:
return False
user, password, cookiepath = test
thecookie = makecookie(user, password, cookiepath)
return user, thecookie | 37,406 |
def enable(name, start=False, **kwargs):
"""
Start service ``name`` at boot.
Returns ``True`` if operation is successful
name
the service's name
start : False
If ``True``, start the service once enabled.
CLI Example:
.. code-block:: bash
salt '*' service.enable <name> [start=True]
"""
# non-existent service
if not available(name):
return False
# if service is aliased, refuse to enable it
alias = get_svc_alias()
if name in alias:
log.error("This service is aliased, enable its alias instead")
return False
# down_file: file that disables sv autostart
svc_realpath = _get_svc_path(name)[0]
down_file = os.path.join(svc_realpath, "down")
# if service already enabled, remove down_file to
# let service starts on boot (as requested)
if enabled(name):
if os.path.exists(down_file):
try:
os.unlink(down_file)
except OSError:
log.error("Unable to remove file %s", down_file)
return False
return True
# let's enable the service
if not start:
# create a temp 'down' file BEFORE enabling service.
# will prevent sv from starting this service automatically.
log.trace("need a temporary file %s", down_file)
if not os.path.exists(down_file):
try:
# pylint: disable=resource-leakage
salt.utils.files.fopen(down_file, "w").close()
# pylint: enable=resource-leakage
except OSError:
log.error("Unable to create file %s", down_file)
return False
# enable the service
try:
os.symlink(svc_realpath, _service_path(name))
except OSError:
# (attempt to) remove temp down_file anyway
log.error("Unable to create symlink %s", down_file)
if not start:
os.unlink(down_file)
return False
# ensure sv is aware of this new service before continuing.
# if not, down_file might be removed too quickly,
# before 'sv' have time to take care about it.
# Documentation indicates that a change is handled within 5 seconds.
cmd = "sv status {}".format(_service_path(name))
retcode_sv = 1
count_sv = 0
while retcode_sv != 0 and count_sv < 10:
time.sleep(0.5)
count_sv += 1
call = __salt__["cmd.run_all"](cmd)
retcode_sv = call["retcode"]
# remove the temp down_file in any case.
if (not start) and os.path.exists(down_file):
try:
os.unlink(down_file)
except OSError:
log.error("Unable to remove temp file %s", down_file)
retcode_sv = 1
# if an error happened, revert our changes
if retcode_sv != 0:
os.unlink(os.path.join([_service_path(name), name]))
return False
return True | 37,407 |
def docs(runtime, environment):
""" Build the html documentation.
"""
parameters = get_parameters(runtime, environment)
commands = [
"edm run -e {environment} -- sphinx-build -b html "
"-d build/doctrees source build/html",
]
with do_in_existingdir(os.path.join(os.getcwd(), 'docs')):
execute(commands, parameters) | 37,408 |
def label_vertices(ast, vi, vertices, var_v):
"""Label each node in the AST with a unique vertex id
vi : vertex id counter
vertices : list of all vertices (modified in place)
"""
def inner(ast):
nonlocal vi
if type(ast) != dict:
if type(ast) == list:
# print(vi)
pass
return ast
ast["vertex_id"] = vi
vertices.append(ast["tag"])
# if not (ast['tag'] in ['EVar', 'LvVar'] and ast['contents'] in var_v):
vi += 1
for k, v in ast.items():
if k != "tag":
inner(v)
return ast
return inner(ast) | 37,409 |
def standardize_batch(inputs,
is_training,
decay=0.999,
epsilon=1e-3,
data_format="NHWC",
use_moving_averages=True,
use_cross_replica_mean=None):
"""Adds TPU-enabled batch normalization layer.
This version does not apply trainable scale or offset!
It normalizes a tensor by mean and variance.
Details on Batch Normalization can be found in "Batch Normalization:
Accelerating Deep Network Training by Reducing Internal Covariate Shift",
Ioffe S. and Szegedy C. 2015 [http://arxiv.org/abs/1502.03167].
Note #1: This method computes the batch statistic across all TPU replicas,
thus simulating the true batch norm in the distributed setting. If one wants
to avoid the cross-replica communication set use_cross_replica_mean=False.
Note #2: When is_training is True the moving_mean and moving_variance need
to be updated in each training step. By default, the update_ops are placed
in `tf.GraphKeys.UPDATE_OPS` and they need to be added as a dependency to
the `train_op`. For example:
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
if update_ops:
updates = tf.group(*update_ops)
total_loss = control_flow_ops.with_dependencies([updates], total_loss)
Note #3: Reasonable values for `decay` are close to 1.0, typically in the
multiple-nines range: 0.999, 0.99, 0.9, etc. Lower the `decay` value (trying
`decay`=0.9) if model experiences reasonably good training performance but
poor validation and/or test performance.
Args:
inputs: A tensor with 2 or 4 dimensions, where the first dimension is
`batch_size`. The normalization is over all but the last dimension if
`data_format` is `NHWC`, and the second dimension if `data_format` is
`NCHW`.
is_training: Whether or not the layer is in training mode. In training
mode it would accumulate the statistics of the moments into the
`moving_mean` and `moving_variance` using an exponential moving average
with the given `decay`. When is_training=False, these variables are not
updated, and the precomputed values are used verbatim.
decay: Decay for the moving averages. See notes above for reasonable
values.
epsilon: Small float added to variance to avoid dividing by zero.
data_format: Input data format. NHWC or NCHW.
use_moving_averages: If True keep moving averages of mean and variance that
are used during inference. Otherwise use accumlators.
use_cross_replica_mean: If True add operations to do computes batch norm
statistics across all TPU cores. These ops are not compatible with other
platforms. The default (None) will only add the operations if running
on TPU.
Returns:
The normalized tensor with the same type and shape as `inputs`.
"""
if data_format not in {"NCHW", "NHWC"}:
raise ValueError(
"Invalid data_format {}. Allowed: NCHW, NHWC.".format(data_format))
if use_cross_replica_mean is None:
# Default to global batch norm only on TPUs.
use_cross_replica_mean = (
tpu_function.get_tpu_context().number_of_shards is not None)
logging.debug("Automatically determined use_cross_replica_mean=%s.",
use_cross_replica_mean)
inputs = tf.convert_to_tensor(inputs)
inputs_dtype = inputs.dtype
inputs_shape = inputs.get_shape()
num_channels = inputs.shape[-1].value
if num_channels is None:
raise ValueError("`C` dimension must be known but is None")
inputs_rank = inputs_shape.ndims
if inputs_rank is None:
raise ValueError("Inputs %s has undefined rank" % inputs.name)
elif inputs_rank not in [2, 4]:
raise ValueError(
"Inputs %s has unsupported rank."
" Expected 2 or 4 but got %d" % (inputs.name, inputs_rank))
# Bring 2-D inputs into 4-D format.
if inputs_rank == 2:
new_shape = [-1, 1, 1, num_channels]
if data_format == "NCHW":
new_shape = [-1, num_channels, 1, 1]
inputs = tf.reshape(inputs, new_shape)
# Execute a distributed batch normalization
axis = 1 if data_format == "NCHW" else 3
inputs = tf.cast(inputs, tf.float32)
reduction_axes = [i for i in range(4) if i != axis]
if use_cross_replica_mean:
mean, variance = tpu_ops.cross_replica_moments(inputs, reduction_axes)
else:
counts, mean_ss, variance_ss, _ = tf.nn.sufficient_statistics(
inputs, reduction_axes, keep_dims=False)
mean, variance = tf.nn.normalize_moments(
counts, mean_ss, variance_ss, shift=None)
if use_moving_averages:
mean, variance = _moving_moments_for_inference(
mean=mean, variance=variance, is_training=is_training, decay=decay)
else:
mean, variance = _accumulated_moments_for_inference(
mean=mean, variance=variance, is_training=is_training)
outputs = tf.nn.batch_normalization(
inputs,
mean=mean,
variance=variance,
offset=None,
scale=None,
variance_epsilon=epsilon)
outputs = tf.cast(outputs, inputs_dtype)
# Bring 2-D inputs back into 2-D format.
if inputs_rank == 2:
outputs = tf.reshape(outputs, [-1] + inputs_shape[1:].as_list())
outputs.set_shape(inputs_shape)
return outputs | 37,410 |
def process_misc_info_txt(framework_target_files_temp_dir,
vendor_target_files_temp_dir,
output_target_files_temp_dir,
framework_misc_info_keys):
"""Performs special processing for META/misc_info.txt.
This function merges the contents of the META/misc_info.txt files from the
framework directory and the vendor directory, placing the merged result in the
output directory. The precondition in that the files are already extracted.
The post condition is that the output META/misc_info.txt contains the merged
content.
Args:
framework_target_files_temp_dir: The name of a directory containing the
special items extracted from the framework target files package.
vendor_target_files_temp_dir: The name of a directory containing the special
items extracted from the vendor target files package.
output_target_files_temp_dir: The name of a directory that will be used to
create the output target files package after all the special cases are
processed.
framework_misc_info_keys: A list of keys to obtain from the framework
instance of META/misc_info.txt. The remaining keys from the vendor
instance.
"""
misc_info_path = ['META', 'misc_info.txt']
framework_dict = common.LoadDictionaryFromFile(
os.path.join(framework_target_files_temp_dir, *misc_info_path))
# We take most of the misc info from the vendor target files.
merged_dict = common.LoadDictionaryFromFile(
os.path.join(vendor_target_files_temp_dir, *misc_info_path))
# Replace certain values in merged_dict with values from
# framework_dict.
for key in framework_misc_info_keys:
merged_dict[key] = framework_dict[key]
# Merge misc info keys used for Dynamic Partitions.
if (merged_dict.get('use_dynamic_partitions') == 'true') and (
framework_dict.get('use_dynamic_partitions') == 'true'):
merged_dynamic_partitions_dict = common.MergeDynamicPartitionInfoDicts(
framework_dict=framework_dict, vendor_dict=merged_dict)
merged_dict.update(merged_dynamic_partitions_dict)
# Ensure that add_img_to_target_files rebuilds super split images for
# devices that retrofit dynamic partitions. This flag may have been set to
# false in the partial builds to prevent duplicate building of super.img.
merged_dict['build_super_partition'] = 'true'
# Replace <image>_selinux_fc values with framework or vendor file_contexts.bin
# depending on which dictionary the key came from.
# Only the file basename is required because all selinux_fc properties are
# replaced with the full path to the file under META/ when misc_info.txt is
# loaded from target files for repacking. See common.py LoadInfoDict().
for key in merged_dict:
if key.endswith('_selinux_fc'):
merged_dict[key] = 'vendor_file_contexts.bin'
for key in framework_dict:
if key.endswith('_selinux_fc'):
merged_dict[key] = 'framework_file_contexts.bin'
output_misc_info_txt = os.path.join(output_target_files_temp_dir, 'META',
'misc_info.txt')
write_sorted_data(data=merged_dict, path=output_misc_info_txt) | 37,411 |
def test_relation_isattrs_return_right_bools(relation, m2m, multi, direct):
"""
All "is" attributes on Relation objects should return the correct
truth values for the type of relation that is represented.
"""
assert relation.is_m2m == m2m
assert relation.is_multi == multi
assert relation.is_direct == direct | 37,412 |
def dict_to_hierarchy(tree, create_nodes=True, node_type="transform"):
"""Parent DAG nodes based on a dictionnary.
Args:
tree (dict): Dictionary representing the hierarchy.
node_type (str): Type of node to be created if the children doesn't exist
"""
if tree:
for parent, child_tree in tree.iteritems():
if child_tree:
for child in child_tree:
if not cmds.objExists(child):
if create_nodes:
cmds.createNode(node_type, name=child)
else:
continue
current_parent = cmds.listRelatives(child, parent=True)
current_parent = (
current_parent[0] if current_parent is not None else None
)
if current_parent != parent:
cmds.parent(child, parent)
dict_to_hierarchy(
child_tree, create_nodes=create_nodes, node_type=node_type
) | 37,413 |
def bias_variable(shape):
"""Create a bias variable with appropriate initialization."""
#initial = tf.constant(0.1, shape=shape)
initial = tf.constant(0.0, shape=shape)
return tf.Variable(initial) | 37,414 |
def getPortNumber():
"""
Check the command-line arguments for the port number.
The program can exit in this method if
Too few arguments are passed into the program
Too many arguments are passed into the program
The port number argument is non-numeric
The port number argument is less than 0 since port numbers cannot be negative
The port number argument is between 0 and 1023 since those ports are reserved
The port number argument is larger than 65353 since that is the max port number
Returns
number: The port number passed into the program
"""
if len(sys.argv) == 1:
printHelpMessage('You passed too few command-line arguments into the application')
elif len(sys.argv) > 2:
printHelpMessage('You passed too many command-line arguments into the application')
elif sys.argv[1].find('.') != -1:
printHelpMessage('Port number `{}` is a decimal'.format(sys.argv[1]))
try:
portNumber = int(sys.argv[1])
if portNumber < 0:
printHelpMessage(
'Port number `{}` is negative'.format(portNumber)
)
elif portNumber < 1024:
printHelpMessage(
'Port number `{}` is reserved for common TCP/IP applications'.format(portNumber)
)
elif portNumber > 65353:
printHelpMessage(
'Port number `{}` is higher than the maximum port number'.format(portNumber)
)
return portNumber
except ValueError:
printHelpMessage('You Passed a Non-Numeric Port Number Into the Application') | 37,415 |
def object_gatekeeper(obj, is_auth, ignore_standalone=False):
"""
It's OK to use available_to_public here because the underlying logic is identical.
"""
if not obj:
return False
if is_auth:
return True
else:
try:
return obj.available_to_public
except:
pass
return False | 37,416 |
def l1_l2_regularizer(scale_l1=1.0, scale_l2=1.0, scope=None):
"""Returns a function that can be used to apply L1 L2 regularizations.
Args:
scale_l1: A scalar multiplier `Tensor` for L1 regularization.
scale_l2: A scalar multiplier `Tensor` for L2 regularization.
scope: An optional scope name.
Returns:
A function with signature `l1_l2(weights)` that applies a weighted sum of
L1 L2 regularization.
Raises:
ValueError: If scale is negative or if scale is not a float.
"""
scope = scope or 'l1_l2_regularizer'
return sum_regularizer([l1_regularizer(scale_l1),
l2_regularizer(scale_l2)],
scope=scope) | 37,417 |
def test013_ip_range():
"""
to run:
kosmos 'j.data.types.test(name="iprange")'
"""
ipv4 = j.data.types.get("iprange", default="192.168.0.0/28")
assert ipv4.default_get() == "192.168.0.0/28"
assert ipv4.check("192.168.23.255/28") is True
assert ipv4.check("192.168.23.300/28") is False
assert ipv4.check("192.168.23.255/32") is True
ipv6 = j.data.types.get("iprange")
assert ipv6.default_get() == "::/128"
assert ipv6.check("2001:db00::0/24") is True
assert ipv6.check("2001:db00::1/24") is True
assert ipv6.check("2001:db00::0/ffff:ff00::") is False
j.data.types._log_info("TEST DONE LIST")
return "OK" | 37,418 |
def _create_hive_cursor():
"""
Initializes a hive connection and returns a cursor to it
:return: hive cursor
"""
_print_info('Initializing hive cursor.')
return _initialize_hive_connection() | 37,419 |
def load_pretrained_wts(featurizer_params, ExtendedEncoder_params):
"""Merging pre-trained and initialised parameters"""
param_idx = config['restart_from']//config['total_steps']
if os.path.isfile(config['params_dir']+f'params_{param_idx}'):
with open(config['params_dir']+f'params_{param_idx}', 'rb') as f:
params = pickle.load(f)
return params
if config['initialize_pretrained']!='':
pt_wts = get_pretrained_weights(config)
featurizer_params = to_mutable_dict(featurizer_params)
featurizer_params = copy_available_keys(pt_wts, featurizer_params,
[('embeddings/word_embeddings/weight', ('encoder/embedding/~/embed', 'embeddings')),
('embeddings/position_embeddings/weight', ('encoder/embedding/position_embeddings', 'position_embeddings')),
('embeddings/LayerNorm', ('encoder/embedding/layer_norm',))])
ExtendedEncoder_params = to_mutable_dict(ExtendedEncoder_params)
ExtendedEncoder_params = copy_available_keys(pt_wts, ExtendedEncoder_params,
[('embeddings/word_embeddings/weight', ('encoder/~/embedding/~/embed', 'embeddings')),
('embeddings/position_embeddings/weight', ('encoder/~/embedding/position_embeddings', 'position_embeddings')),
('embeddings/LayerNorm', ('encoder/~/embedding/layer_norm',))])
else:
print("No pretrained MLM model (e.g. distilbert, roberta..) was specified, initializing with random wts. Provide a pretrained \
model name in config['initialize_pretrained'], if you wish to use pretrained weights of that model.")
params = to_immutable_dict( {'comments_encoder' : featurizer_params,
'mlm_predictor' : ExtendedEncoder_params } )
return params | 37,420 |
def to_vector_single(text, embeddings, maxlen=300):
"""
Given a string, tokenize it, then convert it to a sequence of word embedding
vectors with the provided embeddings, introducing <PAD> and <UNK> padding token
vector when appropriate
"""
tokens = tokenizeAndFilterSimple(clean_text(text))
window = tokens[-maxlen:]
# TBD: use better initializers (uniform, etc.)
x = np.zeros((maxlen, embeddings.embed_size), )
# TBD: padding should be left and which vector do we use for padding?
# and what about masking padding later for RNN?
for i, word in enumerate(window):
x[i,:] = embeddings.get_word_vector(word).astype('float32')
return x | 37,421 |
def eval_tensor_density(
tens: tf_compat.Tensor, sess: tf_compat.Session = None
) -> float:
"""
Get the density (fraction of non zero values) in a tensor
:param tens: the tensor to get the density for
:param sess: the session to use for evaluating the tensor,
if not supplied will use the default session
:return: the density of the tensor
"""
if not sess:
sess = tf_compat.get_default_session()
val_array = sess.run(tens)
num_nonzeros = numpy.count_nonzero(val_array)
density = float(num_nonzeros) / float(val_array.size)
return density | 37,422 |
def get(key, default):
"""Get a config bloc from the YAML config file.
Args:
default (dict): The default bloc if the key is not available
Returns:
dict: The config bloc (or the default one)
"""
if not key.lower() in _YAML_DICT or isinstance(_YAML_DICT[key.lower()], collections.Mapping):
return default
else:
return _YAML_DICT[key.lower()] | 37,423 |
def adaptsim(f, a, b, eps=1e-8, max_iter=10000):
"""自适应 Simpson 求积
P.S. 这个函数名来自 Gander, W. and W. Gautschi, “Adaptive
Quadrature – Revisited,” BIT, Vol. 40, 2000, pp. 84-101.
该文档可以在 https://people.inf.ethz.ch/gander/ 找到。
但该函数的实现并没有使用此文中的递归方法。
Args:
f: 要求积的函数
a, b: 求积区间
eps: 目标精度,达到则停止,返回积分值
max_iter: 最大迭代次数,超出这个次数迭代不到目标精度,则 raise 一个 Exception
Returns: (I, m, p)
I: 积分的近似值
m: 分层数
p: 分点
Raises:
Exception: 无法在 max_iter 步内迭代到目标精度
"""
p = [a, b] # 分点
p0 = p
ep = [eps]
m = 0
q = 0
I = 0
for _iter_times in range(int(max_iter)):
n1 = len(ep)
n = len(p0)
if n <= 1:
break
h = p0[1] - p0[0]
s0 = h / 6 * ( f(p0[0]) + 4 * f(p0[0] + h/2) + f(p0[0] + h ) )
s1 = h / 12 * ( f(p0[0]) + 4 * f(p0[0] + h/4) + f(p0[0] + h/2) )
s2 = h / 12 * ( f(p0[0] + h/2) + 4 * f(p0[0] + 3*h/4) + f(p0[0] + h) )
if abs(s0 - s1 - s2) <= 15 * ep[0]:
I += s1 + s2
p0 = p0[1:]
if n1 >= 2:
ep = ep[1:]
q += 1
else:
m += 1
p0 = [p0[0], p0[0] + h/2] + p0[1:]
if n1 == 1:
ep = [ep[0]/2, ep[0]/2]
else:
ep = [ep[0]/2, ep[1]/2] + ep[1:]
if q == 0:
p = p0
else:
p = p[:q] + p0
else:
raise Exception('无法在 max_iter 步内迭代到目标精度')
return I, m, p | 37,424 |
def runHmmer(args, list_path, file_path, f):
"""run prodigal and hmmsearch on chr files"""
if not os.path.exists( str(args.data) + '/tmp'):
os.makedirs(str(args.data) + '/tmp')
# get the sample group
head, group = os.path.split(os.path.split(file_path)[0])
basename = os.path.splitext(str(ntpath.basename(str(file_path))))[0]
exportpath = str(args.data) + '/tmp/' + ntpath.basename(str(file_path))
hmmpath = str(args.data) + '/tmp/' + ntpath.basename(str(file_path)) + '.out'
print('Processing %s of group %s' % (basename, group))
s = ""
cmd = ("prodigal -p meta -i ", str(file_path), " -a ", exportpath, ' -d /dev/null > /dev/null 2> /dev/null')
os.system(s.join( cmd ))
# run hmmsearch on faa ORF files
s = " "
cmd = ("hmmsearch -E 0.001 --domtblout", hmmpath, 'resources/remove.hmm', exportpath, '> /dev/null 2> /dev/null')
os.system(s.join( cmd ))
# write it to output file if there is a hit
with open(hmmpath, 'rU') as input:
try:
for qresult in SearchIO.parse(input, 'hmmscan3-domtab'):
query_id = qresult.id
hits = qresult.hits
num_hits = len(hits)
acc = qresult.accession
if num_hits > 0:
f.write(''.join((basename, '\t', str(file_path),'\n')))
except ValueError:
print('parsing error on %s' % basename) | 37,425 |
def get_next_position(grid):
"""Returns best next position to send."""
width = len(grid[0])
unprepared = [inspect_around_position(grid, x)
for x in range(1, width - 1)]
return unprepared.index(max(unprepared)) + 2 | 37,426 |
def which_subdir(sha: str) -> Optional[str]:
""" Determine which subset (if any) sha is represented in """
fname = sha + '.json'
for k, v in subdir_contents.items():
if fname in v:
subdir_contents[k].remove(fname)
return k
subdir_contents[MISSING_FILE].add(fname)
return MISSING_FILE | 37,427 |
def smoothedEnsembles(data,lat_bounds,lon_bounds):
"""
Smoothes all ensembles by taking subsamples
"""
### Import modules
import numpy as np
import sys
print('\n------- Beginning of smoothing the ensembles per model -------')
### Save MM
newmodels = data.copy()
mmean = newmodels[-1,:,:,:,:] # 7 for MMmean
otherens = newmodels[:7,:,:,:,:]
newmodeltest = np.empty(otherens.shape)
for modi in range(otherens.shape[0]):
for sh in range(otherens.shape[1]):
ensnum = np.arange(otherens.shape[1])
slices = np.random.choice(ensnum,size=otherens.shape[0],replace=False)
modelsmooth = otherens[modi]
slicenewmodel = np.nanmean(modelsmooth[slices,:,:,:],axis=0)
newmodeltest[modi,sh,:,:,:] = slicenewmodel
### Add new class
smoothClass = np.append(newmodeltest,mmean[np.newaxis,:,:,:],axis=0)
print('--Size of smooth twin --->',newmodeltest.shape)
print('--NEW Size of smoothedclass--->',smoothClass.shape)
print('------- Ending of smoothing the ensembles per model -------')
return smoothClass | 37,428 |
def conv7x7_block(in_channels,
out_channels,
strides=1,
padding=3,
use_bias=False,
use_bn=True,
bn_eps=1e-5,
activation="relu",
data_format="channels_last",
**kwargs):
"""
7x7 version of the standard convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 3
Padding value for convolution layer.
use_bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default 'relu'
Activation function or name of activation function.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
return ConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=7,
strides=strides,
padding=padding,
use_bias=use_bias,
use_bn=use_bn,
bn_eps=bn_eps,
activation=activation,
data_format=data_format,
**kwargs) | 37,429 |
def alpha_to_weights(alpha):
"""归一化. 最终截面绝对值和为2. """
alpha = alpha - np.nanmean(alpha, axis=1, keepdims=True)
mask_pos = (alpha > 0)
mask_neg = (alpha < 0)
alpha_pos = imposter(alpha)
alpha_pos[mask_pos] = alpha[mask_pos]
alpha_pos = alpha_pos / np.nansum(alpha_pos, 1, keepdims=True)
alpha_neg = imposter(alpha)
alpha_neg[mask_neg] = alpha[mask_neg]
alpha_neg = -alpha_neg / np.nansum(alpha_neg, 1, keepdims=True)
alpha[mask_pos] = alpha_pos[mask_pos]
alpha[mask_neg] = alpha_neg[mask_neg]
return alpha | 37,430 |
def complete_with_fake_data_for_warmup(minimum_n_rows_to_fit, X=None, fv_size=None):
"""Makes fake data to warmup a partial fit process.
If no X is given, will return a random minimum_n_rows_to_fit x fv_size matrix (with values between 0 and 1)
If X is given, will repeat the rows in a cycle until the minimum_n_rows_to_fit is reached
>>> X = complete_with_fake_data_for_warmup(3, fv_size=2);
>>> X.shape
(3, 2)
>>> import numpy as np
>>> complete_with_fake_data_for_warmup(5, X=np.array([[1,2,3], [4,5,6]]))
array([[1, 2, 3],
[4, 5, 6],
[1, 2, 3],
[4, 5, 6],
[1, 2, 3]])
"""
if X is None:
assert fv_size is not None, 'You need to have some data, or specify an fv_size'
return np.random.rand(minimum_n_rows_to_fit, fv_size)
else:
nrows, fv_size = X.shape
missing_n_rows = max(0, minimum_n_rows_to_fit - nrows)
if missing_n_rows > 0:
return np.array(X.tolist() * int(1 + np.ceil(missing_n_rows / nrows)))[
:minimum_n_rows_to_fit
]
else:
return X | 37,431 |
def check_integrity(signify: Dict[str, str], snapshot: Path, url: str) -> bool:
"""Check the integrity of the snapshot and retry once if failed files.
signify -- the signify key and a signify signed file with SHA256 checksums
snapshot -- the directory where the snapshot is stored
url -- the snapshots' mirror URL
"""
whole, failed = verify(signify, snapshot)
# if there are some failed files, retry once, five minutes
# after. Downloads can fail or just get the mirror in the middle
# of a sync.
if failed:
sleep(300)
for f in failed:
get_binary(parse.urljoin(url, f), f)
whole, failed = verify(signify, snapshot)
return whole | 37,432 |
def exp_slow(b, c):
"""
Returns the value b^c.
Property: b^c = b * b^(c-1)
Parameter b: the number to raise to a power
Precondition: b is a number
Parameter c: the exponent
Precondition: c is an int >= 0
"""
# get in the habit of checking what you can
assert type(b) in [float, int], repr(b)+' is not a number'
assert type(c) == int, repr(c)+' is not an int'
assert c >= 0, repr(c)+' is negative'
# Allows us to write to global variable. EVIL! Do not use!
global count_frames
# Work on small data (BASE CASE)
if c == 0:
return 1
# Break up into halves (RECURSIVE CASE)
left = b
right = exp_slow(b, c-1)
# Used to count the number of frames
count_frames = count_frames+1
# Combine the answer
return left * right | 37,433 |
def recur_gen3(a0,a1,a2,a3,a4,a5):
"""
homogeneous general third-order linear recurrence generator with
fixed coefficients
a(0) = a0, a(1) = a1, a(2) = a2, a(n) = a3\*a(n-1) + a4\*a(n-2) +
a5\*a(n-3)
EXAMPLES::
sage: from sage.combinat.sloane_functions import recur_gen3
sage: it = recur_gen3(1,1,1,1,1,1)
sage: [next(it) for i in range(10)]
[1, 1, 1, 3, 5, 9, 17, 31, 57, 105]
"""
x, y ,z = Integer(a0), Integer(a1), Integer(a2)
n = 0
yield x
while True:
n = n+1
x, y, z = y, z, a5*x+a4*y+a3*z
yield x | 37,434 |
def test_parser_label():
"""Labels are correctly recognised"""
for command in _parser(["(label1)",
"(102)",
]):
assert command["instruction"] == "label"
assert isinstance(command["label"], str) | 37,435 |
def pick_theme(manual):
"""
Return theme name based on manual input, prefs file, or default to "plain".
"""
if manual:
return manual
pref_init()
parser = cp.ConfigParser()
parser.read(PREFS_FILE)
try:
theme = parser.get("theme", "default")
except (cp.NoSectionError, cp.NoOptionError):
theme = "plain"
return theme | 37,436 |
def ip2host(ls_input):
"""
Parameters : list of a ip addreses
----------
Returns : list of tuples, n=2, consisting of the ip and hostname
"""
ls_output = []
for ip in ls_input:
try:
x = socket.gethostbyaddr(ip)
ls_output.append((ip, x[0]))
except Exception as e:
print('Error: ', e)
ls_output.append((ip, None))
return ls_output | 37,437 |
def process_all_content(file_list: list, text_path: str) -> Tuple[list, list]:
"""
Analyze the whole content of the project, build and return lists
if toc_items and landmarks.
INPUTS:
file_list: a list of all content files
text_path: the path to the contents folder (src/epub/text)
OUTPUTS:
a tuple containing the list of Toc items and the list of landmark items
"""
toc_list: List[TocItem] = []
landmarks: List[TocItem] = []
# We make two passes through the work, because we need to know
# how many bodymatter items there are. So we do landmarks first.
for textf in file_list:
file_path = Path(text_path) / textf
try:
with open(file_path, encoding="utf8") as file:
dom = se.easy_xml.EasyXhtmlTree(file.read())
except Exception as ex:
raise se.InvalidFileException(f"Couldn’t open file: [path][link=file://{file_path}]{file_path}[/][/]. Exception: {ex}") from ex
add_landmark(dom, textf, landmarks)
# Now we test to see if there is only one body item
body_items = [item for item in landmarks if item.place == Position.BODY]
single_file = (len(body_items) == 1)
nest_under_halftitle = False
place = Position.NONE
for textf in file_list:
with open(Path(text_path) / textf, "r", encoding="utf-8") as file:
dom = se.easy_xml.EasyXhtmlTree(file.read())
body = dom.xpath("//body")
if body:
place = get_place(body[0])
else:
raise se.InvalidInputException("Couldn't locate body node")
if place == Position.BACK:
nest_under_halftitle = False
process_headings(dom, textf, toc_list, nest_under_halftitle, single_file)
if textf == "halftitlepage.xhtml":
nest_under_halftitle = True
# We add this dummy item because outputtoc always needs to look ahead to the next item.
last_toc = TocItem()
last_toc.level = 1
last_toc.title = "dummy"
toc_list.append(last_toc)
return landmarks, toc_list | 37,438 |
def fuel(bot, mask, target, args):
"""Show the current fuel for Erfurt
%%fuel [<city> <value> <type>]...
"""
"""Load configuration"""
config = {
'lat': 50.9827792,
'lng': 11.0394426,
'rad': 10
}
config.update(bot.config.get(__name__, {}))
sort_type = 'all'
sort_value = 'dist'
lat = config['lat']
lng = config['lng']
fuel_types = ['e5', 'e10', 'diesel', 'all']
if config['api_key'] == "your_apikey":
return "I don't have your api key!"
if '<city>' not in args or len(args['<city>']) < 1:
bot.log.info('Fetching fuel info for Erfurt')
lat = config['lat']
lng = config['lng']
else:
if " ".join(args['<city>']) == 'sort':
bot.log.info('Fetching fuel info for Erfurt')
lat = config['lat']
lng = config['lng']
if '<value>' not in args or len(args['<value>']) < 1:
sort_type = 'all'
sort_value = 'dist'
else:
sort_type = " ".join(args['<value>'])
sort_value = 'price'
else:
if " ".join(args['<city>']) == 'help':
bot.log.info('Printing some Help')
cmd = '!'
bot.privmsg(target, '( ͡° ͜ʖ ͡°)')
bot.privmsg(target, 'Example commands:')
bot.privmsg(target, cmd + 'fuel')
bot.privmsg(target, cmd + 'fuel help')
bot.privmsg(target, cmd + 'fuel sort <fuel>')
bot.privmsg(target, cmd + 'fuel sort e5')
bot.privmsg(target, cmd + 'fuel sort e10')
bot.privmsg(target, cmd + 'fuel sort diesel')
bot.privmsg(target, cmd + 'fuel <place>')
bot.privmsg(target, cmd + 'fuel erfurt')
bot.privmsg(target, cmd + 'fuel <place> sort <fuel>')
bot.privmsg(target, cmd + 'fuel erfurt sort e5')
bot.privmsg(target, cmd + 'fuel bytespeicher sort e10')
bot.privmsg(target, cmd + 'fuel krautspace sort diesel')
return ""
else:
bot.log.info('Fetching fuel info for ' +
str(" ".join(args['<city>'])))
geolocator = Nominatim()
location = geolocator.geocode(" ".join(args['<city>']))
lat = location.latitude
lng = location.longitude
if " ".join(args['<value>']) == 'sort':
if '<type>' not in args or len(args['<type>']) < 1:
sort_type = 'all'
sort_value = 'dist'
else:
sort_type = " ".join(args['<type>'])
sort_value = 'price'
if sort_type not in fuel_types:
return "Not supported fuel."
try:
url = "https://creativecommons.tankerkoenig.de/json/list.php?" + \
"lat=" + str(lat) + \
"&lng=" + str(lng) + \
"&rad=" + str(config['rad']) + \
"&sort=" + str(sort_value) + \
"&type=" + str(sort_type) + \
"&apikey=" + str(config['api_key'])
with aiohttp.Timeout(10):
with aiohttp.ClientSession(loop=bot.loop) as session:
resp = yield from session.get(url)
if resp.status != 200:
bot.privmsg(target, "Error while retrieving station list")
raise Exception()
r = yield from resp.read()
data = json.loads(r.decode('utf-8'))
messages = []
for x in range(len(data['stations'])):
brand = data[u'stations'][x][u"brand"]
station_id = data['stations'][x][u"id"]
postCode = data['stations'][x][u"postCode"]
bot.log.info('Fetching fuel info for Erfurt station ' +
str(station_id))
url = \
"https://creativecommons.tankerkoenig.de/json/detail.php?" + \
"id=" + station_id + \
"&apikey=" + str(config['api_key'])
with aiohttp.Timeout(10):
with aiohttp.ClientSession(loop=bot.loop) as session:
resp = yield from session.get(url)
if resp.status != 200:
bot.privmsg(target, "Error while retrieving fuel data")
raise Exception()
r = yield from resp.read()
details = json.loads(r.decode('utf-8'))
e5 = str(details['station']['e5'])
e10 = str(details['station']['e10'])
diesel = str(details['station']['diesel'])
dist = u"{:0.2} km".format(vincenty((details['station']['lat'],
details['station']['lng']),
(lat, lng)).meters / 1000)
if brand == '':
brand = 'GLOBUS'
print_str = \
u" {:20}".format(brand + ', ' + str(postCode) + ': ') + \
u"{:5} ".format(e5) + \
u"{:5} ".format(e10) + \
u"{:5} ".format(diesel) + \
u"{:5} ".format(dist)
messages.append(print_str)
headline = u"{:23}".format('fuel prices:') + \
u"{:6} ".format('e5') + \
u"{:6} ".format('e10') + \
u"{:6} ".format('diesel') + \
u"{:6} ".format('dist')
if len(messages) > 0:
bot.privmsg(target, headline)
for m in messages:
bot.privmsg(target, m)
else:
return "No fuel data found!"
except KeyError:
bot.privmsg(target, "Error while retrieving fuel data")
raise Exception() | 37,439 |
def is_executable_binary(file_path):
"""
Returns true if the file:
* is executable
* is a binary (i.e not a script)
"""
if not os.path.isfile(file_path):
return False
if not os.access(file_path, os.X_OK):
return False
return is_binary(file_path) | 37,440 |
def findElemArray2D(x, arr2d):
"""
:param x: a scalar
:param arr2d: a 2-dimensional numpy ndarray or matrix
Returns a tuple of arrays (rVec, cVec), where the corresponding elements in
each are the rows and cols where arr2d[r,c] == x.
Returns [] if x not in arr2d. \n
Example: \n
arr2d = np.array([[1,2],[3,1]]), x = 1
findElemArray2D(x, arr2d) --> ([0, 1], [0, 1]).
i.e., arr2d[0][0] and arr2d[1][1] both == x.
.. note::
The type of each tuple member is the same as type(arr2d)
"""
res = np.where(arr2d == x)
if len(res[0]):
return res[0].flatten(), res[1].flatten()
else:
return [], [] | 37,441 |
def merge(cluster_sentences):
"""
Merge multiple lists.
"""
cluster_sentences = list(itertools.chain(*cluster_sentences))
return cluster_sentences | 37,442 |
def reverse_str(s: str) -> str:
"""Reverse a given string"""
# Python strings are immutable
s = list(s)
s_len = len(s)
# Using the extra idx as a temp space in list
s.append(None)
for idx in range(s_len // 2):
s[s_len] = s[idx]
s[idx] = s[s_len - idx - 1]
s[s_len - idx - 1] = s[s_len]
return "".join(s)[:s_len] | 37,443 |
def readme():
"""Get text from the README.rst"""
with open('README.rst') as f:
return f.read() | 37,444 |
def exact_account(source_account_id):
"""
Get the BU id, OU id by the account id in dynamodb table.
"""
try:
response = dynamodb_table.get_item(Key={'AccountId': source_account_id})
except Exception as e:
failure_notify("Unable to query account id {0}, detailed exception {1}".format(source_account_id, e))
print(response)
mgt_account_id = response['Item']['MgtId']
ou_id = response['Item']['OuId']
return mgt_account_id, ou_id, source_account_id | 37,445 |
def test_catalog_to_markers_xy():
"""Test convert.catalog_to_markers using xy coords"""
helpers.disbale_tqdm()
helpers.setup(with_data=True)
out_dir = helpers.TEST_PATH
wcs_file = os.path.join(out_dir, "test_image.fits")
rows_per_col = np.inf
catalog_file = os.path.join(out_dir, "test_catalog_xy.cat")
catalog_delim = " "
n_per_catalog_shard = 250000
pbar_loc = 0
convert.catalog_to_markers(
wcs_file,
out_dir,
catalog_delim,
rows_per_col,
n_per_catalog_shard,
catalog_file,
pbar_loc,
)
expected_json, expected_name = helpers.cat_to_json(
os.path.join(out_dir, "expected_test_catalog_xy.cat.js")
)
actual_json, actual_name = helpers.cat_to_json(
os.path.join(out_dir, "js", "test_catalog_xy_0.cat.js")
)
helpers.tear_down()
helpers.enable_tqdm()
assert expected_json == actual_json
assert expected_name == actual_name | 37,446 |
def filter_output(output, regex):
"""Filter output by defined regex. Output can be either string, list or tuple.
Every string is split into list line by line. After that regex is applied
to filter only matching lines, which are returned back.
:returns: list of matching records
"""
result = []
if isinstance(output, str):
for line in output.splitlines():
result += re.findall(regex, line)
elif isinstance(output, (list, tuple)):
for item in output:
result.append(filter_output(item, regex))
else:
raise RuntimeError('Only strings and lists are supported by filter_output(), '
'but output has type {}'.format(type(output)))
return result | 37,447 |
def carteiralistar(request):
"""
Metódo para retornar o template de listar carteiras
"""
usuario = request.user
try:
# Pega o objeto carteira se já existir
carteira = CarteiraCriptomoeda.objects.get(usuario=usuario)
# Pega a chave da API e o saldo
chave_api = carteira.chave_api
saldo = carteira.saldo
valor_operacao = carteira.valor_operacao
num_operacoes = carteira.num_operacoes
simulacao = carteira.simulacao
existe_carteira = True
# Se não tiver carteira cadastrada deixe em branco
except ObjectDoesNotExist:
chave_api = ""
saldo = ""
valor_operacao = ""
num_operacoes = ""
simulacao = ""
existe_carteira = False
return render(request, 'site-pytradebot/carteiralistar.html',
{'usuario':usuario, 'chave_api':chave_api, 'saldo':saldo,
'valor_operacao':valor_operacao, 'num_operacoes':num_operacoes,
'simulacao':simulacao, 'existe_carteira':existe_carteira}) | 37,448 |
def import_cson(ctx, filename, boostnote_json, workspace_id, folder_id):
"""Import a cson document from old Boostnote
"""
if workspace_id is None and folder_id is None:
print('error: either workspace id or folder id is required',
file=sys.stderr)
sys.exit(1)
if workspace_id is not None and folder_id is not None:
print('error: give only one id', file=sys.stderr)
sys.exit(1)
original_folders = {}
if boostnote_json is not None:
with open(boostnote_json, 'rb') as f:
bn_json = json.load(f)
if 'folders' not in bn_json:
print('error: no folder information available: {}'.format(boostnote_json))
sys.exit(1)
for f in bn_json['folders']:
original_folders[f['key']] = f['name']
doc = parse_cson(filename)
if len(original_folders) > 0 and 'folder' in doc:
destination_folder = original_folders[doc['folder']]
destination_folder_id = find_folder(ctx, destination_folder, workspace_id, folder_id)
if destination_folder_id is None:
destination_folder_id = create_folder(ctx, destination_folder, workspace_id)
doc = create_document(ctx, doc['title'], doc['content'],
workspace_id, destination_folder_id, tags=doc['tags'])
else:
doc = create_document(ctx, doc['title'], doc['content'],
workspace_id, folder_id, tags=doc['tags'])
print(f'Created "{doc["title"]}" ({doc["id"]}) in {doc["workspace"]["name"]}{doc["folderPathname"]}') | 37,449 |
def test_chain_tuner_classification_correct(data_fixture, request):
""" Test ChainTuner for chain based on hyperopt library """
data = request.getfixturevalue(data_fixture)
train_data, test_data = train_test_data_setup(data=data)
# Chains for classification task
chain_simple = get_simple_class_chain()
chain_complex = get_complex_class_chain()
for chain in [chain_simple, chain_complex]:
# Chain tuning
chain_tuner = ChainTuner(chain=chain,
task=train_data.task,
iterations=1)
tuned_chain = chain_tuner.tune_chain(input_data=train_data,
loss_function=roc)
is_tuning_finished = True
assert is_tuning_finished | 37,450 |
def scrub(text, stop_chars=DEFAULT_STOP_CHARS, reorder_chars=DEFAULT_REORDER_CHARS):
"""
Scrub text. Runs the relevant functions in an appropriate order.
"""
text = reorder_stop_chars(text, stop_chars=stop_chars, reorder_chars=reorder_chars)
text = remove_columns(text)
text = split_as_one_sentence_per_line(text, stop_chars=stop_chars)
text = remove_excessive_whitespace(text)
return text | 37,451 |
def simulate_evoked_osc(info, fwd, n_trials, freq, label, loc_in_label=None,
picks=None, loc_seed=None, snr=None, mu=None,
noise_type="white", return_matrix=True,
filtering=None, phase_lock=False):
"""Simulate evoked oscillatory data based on a given fwd model and dipole.
Parameters:
-----------
info : MNE info object
data info, e.g., from raw
fwd : MNE forward object
forward model object
freq : float
freq of simulated oscillation
n_trials : int
number of trials
label : MNE label
source space label to simulate data in
loc_in_label : None | int
Specify the random generator state for dipole simulation within the
label. Defaults to np.random.RandomState if None.
picks : None | string
Channel types to pick from evoked, can be 'mag' or 'grad'. None
defaults to all.
seed : None | int
Seed for the time series simulation, only relevant for location in
label.
snr : None | float
If not None, signal-to-noise ratio in dB for resulting signal (adding
noise).
mu : None | float
To directly manipulate noise level (e.g. to keep constant across
conditions).
noise_type : str
Type of noise. Supported is at the moment: "white" and "brownian".
return_matrix : bool
If True, a matrix of epochs will be returned and the evoked object will
be averaged across trials.
filtering : None | dict
If None (default), no filtering is done. If filtering should be done,
the dictionary needs to contain the following keys:
"hp" : high pass cutoff, float.
"lp" : low pass cutoff, float.
"fir_design" : FIR design, string, see evoked.filter()
"lp_tw" : transition width for low pass, float, optional.
"hp_tw" : transition width for high pass, float, optional.
phase_lock : bool
If True, the oscillation will be phase-locked across trials.
Returns:
--------
evoked : MNE evoked object
Simulated sensor data.
stc : MNE source time course object
Simulated source space data.
epochs : np.array
Matrix with epochs, if return_matrix is True.
"""
if loc_seed is not None:
np.random.seed(loc_seed)
if loc_in_label is None:
loc_in_label = np.random.RandomState()
np.random.seed() # reset to random seed to not get funky results for noise
times = np.arange(0., n_trials, 1./info['sfreq'])
stc = simulate_sparse_stc(fwd['src'], n_dipoles=1, times=times,
random_state=loc_in_label, labels=label,
data_fun=lambda
times: generate_signal(times, freq, n_trials,
phase_lock=phase_lock))
# go to sensor space
evoked = apply_forward(fwd, stc, info, verbose=False, use_cps=False)
# pick channel types if applicable
if picks is not None:
evoked.pick_types(meg=picks)
if filtering is not None:
if "lp_tw" not in filtering:
filtering["lp_tw"] = "auto"
if "hp_tw" not in filtering:
filtering["hp_tw"] = "auto"
if snr is not None:
snr = 10 ** (snr/20) # convert dB to ratio
if noise_type == "white":
noise_data = np.random.randn(*evoked.data.shape)
elif noise_type == "brownian":
# make white noise first
noise_data = np.random.randn(*evoked.data.shape)
elif noise_type == "pink":
noise_data = make_pink_noise(evoked.data.shape[1], 10,
evoked.data.shape[0])
else:
raise ValueError('So far, only white, brownian, and pink noise is '
'implemented, got %s' % noise_type)
if filtering is not None:
# filter the noise
noise_evoked = evoked.copy()
noise_evoked.data[:] = noise_data
noise_evoked.filter(filtering["hp"], filtering["lp"],
fir_design=filtering["fir_design"],
l_trans_bandwidth=filtering["hp_tw"],
h_trans_bandwidth=filtering["lp_tw"],
verbose=False)
noise_data = noise_evoked.data
# scale the noise
# shape: trials x sensor x time
noise_matrix = noise_data.reshape([len(evoked.ch_names),
n_trials, -1]).transpose(
1, 0, 2)
signal_matrix = evoked._data.reshape([len(evoked.ch_names),
n_trials, -1]).transpose(1, 0, 2)
if mu is None:
mu = np.linalg.norm(signal_matrix, 'fro', axis=(1, 2))
mu /= (snr * np.sqrt(len(evoked.ch_names) *
(len(times) / n_trials)))
if noise_type == 'brownian':
noise_matrix = np.cumsum(mu[:, np.newaxis,
np.newaxis] * noise_matrix,
axis=1)
signal_matrix += noise_matrix
else:
signal_matrix += (mu[:, np.newaxis, np.newaxis] * noise_matrix)
evoked.data = signal_matrix.transpose(1, 0, 2).reshape(
[len(evoked.ch_names), int(n_trials * (len(times) / n_trials))])
# evoked.data *= 1e-11
if filtering is not None:
# filter all the data again
evoked.filter(filtering["hp"], filtering["lp"],
fir_design=filtering["fir_design"],
l_trans_bandwidth=filtering["hp_tw"],
h_trans_bandwidth=filtering["lp_tw"],
verbose=False)
# take care of trials:
if return_matrix is True:
epochs = evoked._data
epochs = epochs.reshape([len(evoked.ch_names),
n_trials, -1]).transpose(1, 0, 2)
evoked.crop(0., evoked.times[int((times.shape[0] / n_trials) - 1)])
evoked._data[:, :] = epochs.mean(axis=0)
return evoked, stc, epochs, mu
else:
return evoked, stc, mu | 37,452 |
def token_request():
"""
Request a Access Token from Vipps.
:return: A Access Token
"""
headers = config['token_request']
url = base_url + '/accesstoken/get'
response = requests.post(url, headers=headers)
return response.json() | 37,453 |
async def test_cancel_not_joined_yet():
"""
When we cancel the nursery, it hasn't been joined yet.
This should cancel it anyway.
"""
async def cleaner():
await asyncio.sleep(0.2)
Scope.get_current().cancel()
await asyncio.sleep(10)
before = time.time()
async with Scope() as s:
s << cleaner()
await asyncio.sleep(1)
raise Exception('never called')
after = time.time()
assert (after - before) < 0.4, 'for now...' | 37,454 |
def train(args, train_dataset, model, tokenizer, labels, pad_token_label_id):
""" Train the model """
if args.local_rank in [-1, 0]:
tb_writer = SummaryWriter()
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1
else:
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": args.weight_decay},
{"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps,
num_training_steps=t_total)
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
# multi-gpu training (should be after apex fp16 initialization)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank],
output_device=args.local_rank,
find_unused_parameters=True)
# Train!
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size * args.gradient_accumulation_steps * (
torch.distributed.get_world_size() if args.local_rank != -1 else 1))
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
global_step = 0
tr_loss, logging_loss = 0.0, 0.0
model.zero_grad()
train_iterator = trange(int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0])
set_seed(args) # Added here for reproductibility (even between python 2 and 3)
for _ in train_iterator:
epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0])
for step, batch in enumerate(epoch_iterator):
model.train()
cuda_indices = [0, 1, 2, 3, 6, 7]
batch = tuple(t.to(args.device) if i in cuda_indices else t for i, t in enumerate(batch))
inputs = {"input_ids": batch[0],
"attention_mask": batch[1],
"token_type_ids": batch[2],
"span_labels": batch[3],
"span_size": batch[4],
"span_list": batch[5],
"slot_labels": batch[6],
"slot_mask": batch[7],
"rel_size": batch[8],
"rel_list": batch[9],
"question_length": batch[10],
"span_null_label_id": labels[0].index('O'),
"global_step": global_step,
"args": args}
outputs = model(**inputs)
loss = outputs[0] # model outputs are always tuple in pytorch-transformers (see doc)
# span_logits = outputs[1][0]
# span_pred = [torch.max(sl, 2)[1] for sl in span_logits].detach().cpu().numpy()
# print(span_pred.shape)
# exit()
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
scheduler.step() # Update learning rate schedule
optimizer.step()
model.zero_grad()
global_step += 1
if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
# Log metrics
if args.local_rank == -1 and args.evaluate_during_training: # Only evaluate when single GPU otherwise metrics may not average well
results, _ = evaluate(args, model, tokenizer, labels, pad_token_label_id, mode="test",filename= os.path.join(args.data_dir, "{}.jsonl".format("test")))
for key, value in results.items():
tb_writer.add_scalar("eval_{}".format(key), value, global_step)
tb_writer.add_scalar("lr", scheduler.get_lr()[0], global_step)
tb_writer.add_scalar("loss", (tr_loss - logging_loss) / args.logging_steps, global_step)
logging_loss = tr_loss
if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0:
# Save model checkpoint
output_dir = os.path.join(args.output_dir, "checkpoint-{}".format(global_step))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = model.module if hasattr(model,
"module") else model # Take care of distributed/parallel training
model_to_save.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, "training_args.bin"))
logger.info("Saving model checkpoint to %s", output_dir)
if args.max_steps > 0 and global_step > args.max_steps:
epoch_iterator.close()
break
if args.max_steps > 0 and global_step > args.max_steps:
train_iterator.close()
break
if args.local_rank in [-1, 0]:
tb_writer.close()
return global_step, tr_loss / global_step | 37,455 |
def get_gv(title, model_id, mojo_path):
""" Utility function to generate graphviz dot file from h2o MOJO using
a subprocess.
Args:
title: Title for displayed decision tree.
model_id: h2o model identifier.
mojo_path: Path to saved model MOJO (Java scoring artifact);
generated by train_cv_dt function above.
"""
# locate h2o jar
hs = H2OLocalServer()
h2o_jar_path = hs._find_jar()
print('Discovered H2O jar path:\n', h2o_jar_path)
# construct command line call to generate graphviz version of
# tree, see for more information:
# http://docs.h2o.ai/h2o/latest-stable/h2o-genmodel/javadoc/index.html
gv_file_name = model_id + '.gv'
gv_args = str('-cp ' + h2o_jar_path +
' hex.genmodel.tools.PrintMojo --tree 0 -i '
+ mojo_path + ' -o').split()
gv_args.insert(0, 'java')
gv_args.append(gv_file_name)
if title is not None:
gv_args = gv_args + ['--title', title]
# call constructed command
print()
print('Calling external process ...')
print(' '.join(gv_args))
# if the line below is failing for you, try instead:
# _ = subprocess.call(gv_args, shell=True)
_ = subprocess.call(gv_args) | 37,456 |
def extract_tunneled_layer(tunnel_packet: scapy.layers.l2.Ether, offset: int, protocol: str):
"""
Extract tunneled layer from packet capture.
Args:
tunnel_packet (scapy.layers.l2.Ether): the PDU to extract from
offset (int): the byte offset of the tunneled protocol in data field of 'packet')
protocol (str): the tunneled protocol to search for
Returns:
extracted_packet (scapy.layers.l2.Ether):
"""
data = tunnel_packet[Raw].load[offset:]
extracted_packet = Ether(dst=tunnel_packet[Ether].dst, src=tunnel_packet[Ether].src, type=tunnel_packet[Ether].type) / IP(data)
return extracted_packet | 37,457 |
def draw_bbox(img, detections, cmap, random_color=True, figsize=(10, 10), show_text=True):
"""
Draw bounding boxes on the img.
:param img: BGR img.
:param detections: pandas DataFrame containing detections
:param random_color: assign random color for each objects
:param cmap: object colormap
:param plot_img: if plot img with bboxes
:return: None
"""
img = np.array(img)
scale = max(img.shape[0:2]) / 416
line_width = int(2 * scale)
for _, row in detections.iterrows():
x1, y1, x2, y2, cls, score, w, h = row.values
color = list(np.random.random(size=3) * 255) if random_color else cmap[cls]
cv2.rectangle(img, (x1, y1), (x2, y2), color, line_width)
if show_text:
text = f'{cls} {score:.2f}'
font = cv2.FONT_HERSHEY_DUPLEX
font_scale = max(0.3 * scale, 0.3)
thickness = max(int(1 * scale), 1)
(text_width, text_height) = cv2.getTextSize(text, font, fontScale=font_scale, thickness=thickness)[0]
cv2.rectangle(img, (x1 - line_width//2, y1 - text_height), (x1 + text_width, y1), color, cv2.FILLED)
cv2.putText(img, text, (x1, y1), font, font_scale, (255, 255, 255), thickness, cv2.LINE_AA)
plt.figure(figsize=figsize)
plt.imshow(img)
plt.show()
return img | 37,458 |
def print_total_eval_info(data_span_type2model_str2epoch_res_list,
metric_type='micro',
span_type='pred_span',
model_strs=('DCFEE-O', 'DCFEE-M', 'GreedyDec', 'Doc2EDAG'),
target_set='test'):
"""Print the final performance by selecting the best epoch on dev set and emitting performance on test set"""
dev_type = 'dev'
test_type = 'test'
avg_type2prf1_keys = {
'macro': ('MacroPrecision', 'MacroRecall', 'MacroF1'),
'micro': ('MicroPrecision', 'MicroRecall', 'MicroF1'),
}
name_key = 'EventType'
p_key, r_key, f_key = avg_type2prf1_keys[metric_type]
def get_avg_event_score(epoch_res):
eval_res = epoch_res[1]
avg_event_score = eval_res[-1][f_key]
return avg_event_score
dev_model_str2epoch_res_list = data_span_type2model_str2epoch_res_list[(dev_type, span_type)]
test_model_str2epoch_res_list = data_span_type2model_str2epoch_res_list[(test_type, span_type)]
has_header = False
mstr_bepoch_list = []
print('=' * 15, 'Final Performance (%) (avg_type={})'.format(metric_type), '=' * 15)
for model_str in model_strs:
if model_str not in dev_model_str2epoch_res_list or model_str not in test_model_str2epoch_res_list:
continue
# get the best epoch on dev set
dev_epoch_res_list = dev_model_str2epoch_res_list[model_str]
best_dev_epoch, best_dev_res = max(dev_epoch_res_list, key=get_avg_event_score)
test_epoch_res_list = test_model_str2epoch_res_list[model_str]
best_test_epoch = None
best_test_res = None
for test_epoch, test_res in test_epoch_res_list:
if test_epoch == best_dev_epoch:
best_test_epoch = test_epoch
best_test_res = test_res
assert best_test_epoch is not None
mstr_bepoch_list.append((model_str, best_test_epoch))
if target_set == 'test':
target_eval_res = best_test_res
else:
target_eval_res = best_dev_res
align_temp = '{:20}'
head_str = align_temp.format('ModelType')
eval_str = align_temp.format(model_str)
head_temp = ' \t {}'
eval_temp = ' \t & {:.1f} & {:.1f} & {:.1f}'
ps = []
rs = []
fs = []
for tgt_event_res in target_eval_res[:-1]:
head_str += align_temp.format(head_temp.format(tgt_event_res[0][name_key]))
p, r, f1 = (100 * tgt_event_res[0][key] for key in [p_key, r_key, f_key])
eval_str += align_temp.format(eval_temp.format(p, r, f1))
ps.append(p)
rs.append(r)
fs.append(f1)
head_str += align_temp.format(head_temp.format('Average'))
ap, ar, af1 = (x for x in [np.mean(ps), np.mean(rs), np.mean(fs)])
eval_str += align_temp.format(eval_temp.format(ap, ar, af1))
head_str += align_temp.format(head_temp.format('Total ({})'.format(metric_type)))
g_avg_res = target_eval_res[-1]
ap, ar, af1 = (100 * g_avg_res[key] for key in [p_key, r_key, f_key])
eval_str += align_temp.format(eval_temp.format(ap, ar, af1))
if not has_header:
print(head_str)
has_header = True
print(eval_str)
return mstr_bepoch_list | 37,459 |
def build_sentence_representation(s):
""" Build representation of a sentence by analyzing predpatt output.
Returns a weighted list of lists of terms.
"""
s = merge_citation_token_lists(s)
s = remove_qutation_marks(s)
lemmatizer = WordNetLemmatizer()
raw_lists = []
rep_lists = []
rep_lists_alt = [] # to be consistent with double annotating for 3 and 3.1
try:
pp = PredPatt.from_sentence(s, cacheable=False) # for speed tests
except Exception as e:
print('= = = PredPatt exception = = =')
print('input:\n{}'.format(s))
print('exception:\n{}'.format(e))
return rep_lists, rep_lists_alt
if len(pp.events) == 0:
return rep_lists, rep_lists_alt
if CIT_BASED:
for e in pp.events:
depth, rep = build_tree_representation(e)
if INCLUDE_PREDICATE:
pred = get_predicate(e.root)
rep = ['{}:{}'.format(pred, r) for r in rep]
if len(rep) > 0:
raw_lists.append([depth, rep])
weight = 1
for rl in sorted(raw_lists, key=itemgetter(0)):
rep_lists.append([weight, rl[1]])
weight *= .5
if len(rep_lists) == 0:
fallback = build_noun_representation(
pp.events[0], global_root=True
)
if INCLUDE_PREDICATE:
pred = get_predicate(pp.events[0].root)
fallback = ['{}:{}'.format(pred, f) for f in fallback]
if len(fallback) > 0:
rep_lists = [[.25, fallback]]
else:
# make a PPv3 and a PPv3.1 representation
# - - - 3.1 - - -
reps = []
for e in pp.events:
rep = build_noun_representation(e) # 3.1
if INCLUDE_PREDICATE:
pred = get_predicate(e.root)
rep = ['{}:{}'.format(pred, f) for f in rep]
reps.extend(rep)
if len(reps) > 0:
rep_lists = [[1, reps]]
# - - - 3 - - -
reps_alt = []
for e in pp.events:
rep = build_noun_representation(e, global_root=True) # 3
if INCLUDE_PREDICATE:
pred = get_predicate(e.root)
rep = ['{}:{}'.format(pred, f) for f in rep]
reps_alt.extend(rep)
if len(reps) > 0:
rep_lists_alt = [[1, reps_alt]]
rep_lists = normalize_rep_lists(rep_lists, lemmatizer)
rep_lists_alt = normalize_rep_lists(rep_lists_alt, lemmatizer)
return rep_lists, rep_lists_alt | 37,460 |
def load_qconfig():
"""
Attemps to load the Qconfig.py searching the current environment.
Returns:
module: Qconfig module
"""
try:
modspec = importlib.util.find_spec(_QCONFIG_NAME)
if modspec is not None:
mod = importlib.util.module_from_spec(modspec)
if mod is not None:
modspec.loader.exec_module(mod)
logger.debug('Loaded {}'.format(_QCONFIG_NAME))
return mod
except Exception as e:
logger.debug('Failed to load {} error {}'.format(_QCONFIG_NAME, str(e)))
return None
return None | 37,461 |
def scopus_serial_title(journal_title_list, apikey, d_path):
""" Scrape the SerialTitle
Args:
journal_title_list: A list of unique journal titles
apikey: the same key used for the search queries
d_path: output path for data
"""
print('************ Now Scraping Scopus for Journal Meta ***********')
with open(os.path.abspath(
os.path.join(d_path,
'scopus', 'serialtitle', 'parsed',
'serial_title.tsv')),
'w', encoding='utf-8') as tsvfile:
serial_title = csv.writer(tsvfile, delimiter='\t',
lineterminator='\n')
# @TODO this should be dynamicaly read from the api keys
serial_keys = ['prismissn', 'dctitle', 'dcpublisher',
'prismaggregationtype', 'sourceid',
'prismeissn', 'openaccess', 'openaccessarticle',
'openarchivearticle', 'openaccesstype',
'openaccessstartdate', 'oaallowsauthorpaid',
'subjectarea', 'subjectcodes',
'subjectabvs', 'subjectvalues', 'sniplist',
'snipfa', 'snipyear', 'snipscore', 'sjrlist',
'sjrfa', 'sjryear', 'sjrscore',
'citescoreyearinfolist', 'citescorecurrentmetric',
'citescorecurrentmetricyear',
'citescoretracker', 'citescoretrackeryear']
serial_title.writerow(serial_keys)
base_url = 'https://api.elsevier.com/content/serial/title/issn/'
for issn in tqdm(journal_title_list):
url = base_url + str(issn)
api_return = call_scopus_serialtitle_api(url, apikey)
if api_return.status_code != 404:
api_json = json.loads(api_return.text)
for entry in api_json['serial-metadata-response']['entry']:
try:
prismissn = issn
except KeyError:
prismissn = 'N/A'
try:
dctitle = entry['dc:title']
except KeyError:
dctitle = 'N/A'
try:
dcpublisher = entry['dc:publisher']
except KeyError:
dcpublisher = 'N/A'
try:
prismaggregationtype = entry['prism:aggregationType']
except KeyError:
prismaggregationtype = 'N/A'
try:
sourceid = entry['source-id']
except KeyError:
sourceid = 'N/A'
try:
prismeissn = entry['prism:eIssn']
except KeyError:
prismeissn = 'N/A'
try:
if entry['openaccess'] is not None:
openaccess = entry['openaccess']
else:
openaccess = 'N/A'
except KeyError:
openaccess = 'N/A'
try:
if entry['openaccessArticle'] is not None:
openaccessarticle = entry['openaccessArticle']
else:
openaccessarticle = 'N/A'
except KeyError:
openaccessarticle = 'N/A'
try:
if entry['openArchiveArticle'] is not None:
openarchivearticle = entry['openArchiveArticle']
else:
openarchivearticle = 'N/A'
except KeyError:
openarchivearticle = 'N/A'
try:
if entry['openaccessType'] is not None:
openaccesstype = entry['openaccessType']
else:
openaccesstype = 'N/A'
except KeyError:
openaccesstype = 'N/A'
try:
if entry['openaccessStartDate'] is not None:
openaccessstartdate = entry['openaccessStartDate']
else:
openaccessstartdate = 'N/A'
except KeyError:
openaccessstartdate = 'N/A'
try:
if entry['oaAllowsAuthorPaid'] is not None:
oaallowsauthorpaid = entry['oaAllowsAuthorPaid']
else:
oaallowsauthorpaid = 'N/A'
except KeyError:
oaallowsauthorpaid = 'N/A'
try:
subjectarea = entry['subject-area']
subjectcodes = ''
subjectabvs = ''
subjectvalues = ''
for subject in subjectarea:
subjectcodes = subjectcodes + subject['@code'] + ':'
subjectabvs = subjectabvs + subject['@abbrev'] + ':'
subjectvalues = subjectvalues + subject['$'] + ':'
subjectcodes = subjectcodes[:-1]
subjectabvs = subjectabvs[:-1]
subjectvalues = subjectvalues[:-1]
except KeyError:
subjectarea = 'N/A'
subjectcodes = 'N/A'
subjectabvs = 'N/A'
subjectvalues = 'N/A'
except KeyError:
subjectarea = 'N/A'
try:
sniplist = entry['SNIPList']
except KeyError:
sniplist = 'N/A'
try:
snipfa = entry['SNIPList']['SNIP'][0]['@_fa']
except KeyError:
snipfa = 'N/A'
try:
snipyear = entry['SNIPList']['SNIP'][0]['@year']
except KeyError:
snipyear = 'N/A'
try:
snipscore = entry['SNIPList']['SNIP'][0]['$']
except KeyError:
snipscore = 'N/A'
try:
sjrfa = entry['SJRList']['SJR'][0]['@_fa']
except KeyError:
sjrfa = 'N/A'
try:
sjrpyear = entry['SJRList']['SJR'][0]['@year']
except KeyError:
sjrpyear = 'N/A'
try:
sjrpscore = entry['SJRList']['SJR'][0]['$']
except KeyError:
sjrpscore = 'N/A'
try:
sjrlist = entry['SJRList']
except KeyError:
sjrlist = 'N/A'
try:
citescoreyearinfolist = entry['citeScoreYearInfoList']
except KeyError:
citescoreyearinfolist = 'N/A'
try:
citescorecurrentmetric = entry['citeScoreYearInfoList']['citeScoreCurrentMetric']
except KeyError:
citescorecurrentmetric = 'N/A'
try:
citescorecurrentmetricyear = entry['citeScoreYearInfoList']['citeScoreCurrentMetricYear']
except KeyError:
citescorecurrentmetricyear = 'N/A'
try:
citescoretracker = entry['citeScoreYearInfoList']['citeScoreTrackerYear']
except KeyError:
citescoretracker = 'N/A'
try:
citescoretrackeryear = entry['citeScoreYearInfoList']['citeScoreTrackerYear']
except KeyError:
citescoretrackeryear = 'N/A'
serial_title.writerow([prismissn,
dctitle,
dcpublisher,
prismaggregationtype,
sourceid,
prismeissn,
openaccess,
openaccessarticle,
openarchivearticle,
openaccesstype,
openaccessstartdate,
oaallowsauthorpaid,
subjectarea,
subjectcodes,
subjectabvs,
subjectvalues,
sniplist,
snipfa,
snipyear,
snipscore,
sjrlist,
sjrfa,
sjrpyear,
sjrpscore,
citescoreyearinfolist,
citescorecurrentmetric,
citescorecurrentmetricyear,
citescoretracker,
citescoretrackeryear])
elif (api_return.status_code != 200) and\
(api_return.status_code != 404):
print('whats going on here? api status code is: ' +
str(api_return.status_code)) | 37,462 |
def create_glucose_previous_day_groups(day_groups: dict) -> dict:
"""
Create a dictionary of glucose subseries, unique to each day in the parent glucose series.
Subseries data of each dictionary item will lag item key (date) by 1 day.
Keys will be (unique dates in the parent series) + 1 day.
Values will be the subseries with timestamp dates matching 1 day prior to the key.
Args:
day_groups: A dictionary of daily glucose series.
Keys individual dates with glucose data.
Values will be the glucose subseries with timestamp dates matching the key.
Returns: The dictionary of glucose subsamples.
Keys will be (unique dates in the parent series) + 1 day.
Values will be the subseries with timestamp dates matching 1 day prior to the key.
"""
previous_day_groups = {}
for previous_day, previous_glucose in day_groups.items():
today = previous_day + pd.Timedelta('1D')
previous_day_groups[today] = previous_glucose
return previous_day_groups | 37,463 |
def mean_abs_scaling(series: pd.Series, minimum_scale=1e-6):
"""Scales a Series by the mean of its absolute value. Returns the scaled Series
and the scale itself.
"""
scale = max(minimum_scale, series.abs().mean())
return series / scale, scale | 37,464 |
def init_dmriprep_wf(
anat_only,
debug,
force_syn,
freesurfer,
hires,
ignore,
layout,
longitudinal,
low_mem,
omp_nthreads,
output_dir,
output_spaces,
run_uuid,
skull_strip_fixed_seed,
skull_strip_template,
subject_list,
use_syn,
work_dir,
):
"""
Create the base workflow.
This workflow organizes the execution of *dMRIPrep*, with a sub-workflow for
each subject. If FreeSurfer's recon-all is to be run, a FreeSurfer derivatives folder is
created and populated with any needed template subjects.
Workflow Graph
.. workflow::
:graph2use: orig
:simple_form: yes
import os
from collections import namedtuple, OrderedDict
BIDSLayout = namedtuple('BIDSLayout', ['root'])
from dmriprep.workflows.base import init_dmriprep_wf
os.environ['FREESURFER_HOME'] = os.getcwd()
wf = init_dmriprep_wf(
anat_only=False,
debug=False,
force_syn=True,
freesurfer=True,
hires=True,
ignore=[],
layout=BIDSLayout('.'),
longitudinal=False,
low_mem=False,
omp_nthreads=1,
output_dir='.',
output_spaces=OrderedDict([
('MNI152Lin', {}), ('fsaverage', {'density': '10k'}),
('T1w', {}), ('fsnative', {})]),
run_uuid='X',
skull_strip_fixed_seed=False,
skull_strip_template=('OASIS30ANTs', {}),
subject_list=['dmripreptest'],
use_syn=True,
work_dir='.',
)
Parameters
----------
anat_only : bool
Disable diffusion MRI workflows
debug : bool
Enable debugging outputs
force_syn : bool
**Temporary**: Always run SyN-based SDC
freesurfer : bool
Enable FreeSurfer surface reconstruction (may increase runtime)
hires : bool
Enable sub-millimeter preprocessing in FreeSurfer
ignore : list
Preprocessing steps to skip (may include "slicetiming", "fieldmaps")
layout : BIDSLayout object
BIDS dataset layout
longitudinal : bool
Treat multiple sessions as longitudinal (may increase runtime)
See sub-workflows for specific differences
low_mem : bool
Write uncompressed .nii files in some cases to reduce memory usage
omp_nthreads : int
Maximum number of threads an individual process may use
output_dir : str
Directory in which to save derivatives
output_spaces : OrderedDict
Ordered dictionary where keys are TemplateFlow ID strings (e.g., ``MNI152Lin``,
``MNI152NLin6Asym``, ``MNI152NLin2009cAsym``, or ``fsLR``) strings designating
nonstandard references (e.g., ``T1w`` or ``anat``, ``sbref``, ``run``, etc.),
or paths pointing to custom templates organized in a TemplateFlow-like structure.
Values of the dictionary aggregate modifiers (e.g., the value for the key ``MNI152Lin``
could be ``{'resolution': 2}`` if one wants the resampling to be done on the 2mm
resolution version of the selected template).
run_uuid : str
Unique identifier for execution instance
skull_strip_template : tuple
Name of target template for brain extraction with ANTs' ``antsBrainExtraction``,
and corresponding dictionary of output-space modifiers.
skull_strip_fixed_seed : bool
Do not use a random seed for skull-stripping - will ensure
run-to-run replicability when used with --omp-nthreads 1
subject_list : list
List of subject labels
use_syn : bool
**Experimental**: Enable ANTs SyN-based susceptibility distortion correction (SDC).
If fieldmaps are present and enabled, this is not run, by default.
work_dir : str
Directory in which to store workflow execution state and temporary files
"""
dmriprep_wf = Workflow(name='dmriprep_wf')
dmriprep_wf.base_dir = work_dir
if freesurfer:
fsdir = pe.Node(
BIDSFreeSurferDir(
derivatives=output_dir,
freesurfer_home=os.getenv('FREESURFER_HOME'),
spaces=[s for s in output_spaces.keys() if s.startswith('fsaverage')] + [
'fsnative'] * ('fsnative' in output_spaces)),
name='fsdir_run_' + run_uuid.replace('-', '_'), run_without_submitting=True)
reportlets_dir = os.path.join(work_dir, 'reportlets')
for subject_id in subject_list:
single_subject_wf = init_single_subject_wf(
anat_only=anat_only,
debug=debug,
force_syn=force_syn,
freesurfer=freesurfer,
hires=hires,
ignore=ignore,
layout=layout,
longitudinal=longitudinal,
low_mem=low_mem,
name="single_subject_" + subject_id + "_wf",
omp_nthreads=omp_nthreads,
output_dir=output_dir,
output_spaces=output_spaces,
reportlets_dir=reportlets_dir,
skull_strip_fixed_seed=skull_strip_fixed_seed,
skull_strip_template=skull_strip_template,
subject_id=subject_id,
use_syn=use_syn,
)
single_subject_wf.config['execution']['crashdump_dir'] = (
os.path.join(output_dir, "dmriprep", "sub-" + subject_id, 'log', run_uuid)
)
for node in single_subject_wf._get_all_nodes():
node.config = deepcopy(single_subject_wf.config)
if freesurfer:
dmriprep_wf.connect(fsdir, 'subjects_dir',
single_subject_wf, 'inputnode.subjects_dir')
else:
dmriprep_wf.add_nodes([single_subject_wf])
return dmriprep_wf | 37,465 |
def count_total_parameters():
"""
Returns total number of trainable parameters in the current tf graph.
https://stackoverflow.com/a/38161314/1645784
"""
total_parameters = 0
for variable in tf.trainable_variables():
# shape is an array of tf.Dimension
shape = variable.get_shape()
variable_parameters = 1
for dim in shape:
variable_parameters *= dim.value
total_parameters += variable_parameters
return total_parameters | 37,466 |
def create_gradient_rms_plot(sticher_dict: dict[str, GDEFSticher], cutoff_percent=8, moving_average_n=1,
x_offset=0, plotter_style: PlotterStyle = None) -> Figure:
"""
Creates a matplotlib figure, showing a graph of the root meean square of the gradient of the GDEFSticher objects in
data_dict. The key value in data_dict is used as label in the legend.
:param sticher_dict:
:param cutoff_percent:
:param moving_average_n:
:param x_offset:
:param plotter_style:
:return:
"""
if plotter_style is None:
plotter_style = PlotterStyle(300, (8, 4))
y_label = f"roughness(gradient) (moving average n = {moving_average_n})"
plotter_style.set(y_label=y_label)
data_list = []
pixel_width_list = []
label_list = []
for key, sticher in sticher_dict.items():
gradient_data = create_absolute_gradient_array(sticher.values, cutoff_percent / 100.0)
data_list.append(gradient_data)
pixel_width_list.append(sticher.pixel_width)
label_list.append(key)
result = create_rms_plot(data_list, pixel_width=pixel_width_list, label_list=label_list,
moving_average_n=moving_average_n, x_offset=x_offset,
plotter_style=plotter_style)
return result | 37,467 |
def disp_img(img_arr):
"""Display an image from a numpy ndarray (height, width, channels)"""
img = Image.fromarray(img_arr)
img.show() | 37,468 |
def show_chart(graph: alt.Chart):
"""Wrapper function to ensure container width."""
st.altair_chart(graph, use_container_width=True) | 37,469 |
def reverse(ls: List[T]) -> List[T]:
"""
Reverses a list.
:param ls: The list to be reversed
:return: The reversed list
"""
for i in range(len(ls) // 2):
ls[i], ls[len(ls) - 1 - i] = ls[len(ls) - 1 - i], ls[i]
return ls | 37,470 |
def test_post_self_links(
app, client_with_login, location, minimal_community, headers
):
"""Test self links generated after post"""
client = client_with_login
#Creta a community
res = client.post(
'/communities', headers=headers,
json=minimal_community)
assert res.status_code == 201
_assert_single_item_response(res)
created_community = res.json
id_ = created_community['id']
# assert '/'.join(created_community['links']['self'].split('/')[-2:]) == f'communities/{id_}'
assert created_community['links']['self'] == f'https://127.0.0.1:5000/api/communities/{id_}'
assert created_community['links']['self_html'] == f'https://127.0.0.1:5000/communities/{id_}'
# Delete the community
res = client.delete(f'/communities/{id_}', headers=headers)
assert res.status_code == 204 | 37,471 |
def test_get_annotations_not_5(
test_gb_file, test_accession, coordination_args, monkeypatch
):
"""Test get_annotations when length of protein data is not 5."""
def mock_get_gb_file(*args, **kwargs):
gb_file = test_gb_file
return gb_file
def mock_get_record(*args, **kwargs):
return
monkeypatch.setattr(get_genbank_annotations, "get_genbank_file", mock_get_gb_file)
monkeypatch.setattr(get_genbank_annotations, "get_record_feature", mock_get_record)
get_genbank_annotations.get_annotations(
test_accession, coordination_args["args"],
) | 37,472 |
def increment_with_offset(c: str, increment: int, offset: int) -> str:
""" Caesar shift cipher. """
return chr(((ord(c) - offset + increment) % 26) + offset) | 37,473 |
def uploadfiles():
"""
function to upload csv to db
:return: renders success.html
"""
# get the uploaded file
uploaded_file = request.files['filename']
if uploaded_file.filename != '':
csv_to_db(uploaded_file)
return render_template('success.html')
logging.info("No file uploaded")
return render_template('startindex.html') | 37,474 |
def static_html(route):
"""
Route in charge of routing users to Pages.
:param route:
:return:
"""
page = get_page(route)
if page is None:
abort(404)
else:
if page.auth_required and authed() is False:
return redirect(url_for("auth.login", next=request.full_path))
return render_template("page.html", content=page.content) | 37,475 |
def checkLogExistence():
"""Checks to see if the CrashTrakr_Log file exists and creates it if it does
not."""
if not os.path.isfile("CrashTrakr_Log.txt"):
log_start=["LOG START @ {0}\n".format(str(datetime.datetime.now()))]
log_file = open("CrashTrakr_Log.txt", mode="wt", encoding="utf-8")
log_file.writelines(log_start)
log_file.close() | 37,476 |
def getCenterFrequency(filterBand):
"""
Intermediate computation used by the mfcc function.
Compute the center frequency (fc) of the specified filter band (l)
This where the mel-frequency scaling occurs. Filters are specified so that their
center frequencies are equally spaced on the mel scale
"""
centerFrequency = 0
if filterBand == 0:
centerFrequency = 0;
elif filterBand >= 1 and filterBand <= 14:
centerFrequency = (200.0 * filterBand) / 3.0
else:
exponent = filterBand - 14
centerFrequency = math.pow(1.0711703, exponent)
centerFrequency = centerFrequency * 1073.4
return centerFrequency | 37,477 |
def phi_analytic(dist, t, t_0, k, phi_1, phi_2):
""" the analytic solution to the Gaussian diffusion problem """
phi = (phi_2 - phi_1)*(t_0/(t + t_0)) * \
numpy.exp(-0.25*dist**2/(k*(t + t_0))) + phi_1
return phi | 37,478 |
def getAll():
"""
init
"""
with open(ipLocationCache, "r", encoding="utf-8") as target:
global cache
cache = json.load(target) | 37,479 |
def get_home_timeline(count=None, since_id=None, max_id=None, trim_user=None,
exclude_replies=None, include_entities=None, tweet_mode=None):
"""
Returns a collection of the most recent Tweets and retweets
posted by the authenticating user and the users they follow.
:param count: (int - optional) Specifies the number of results to retrieve.
:param since_id: (int - optional) Returns results with an ID greater than
(that is, more recent than) the specified ID. There are limits to the number of
Tweets which can be accessed through the API. If the limit of Tweets has
occured since the since_id, the since_id will be forced to the oldest ID
available.
:param max_id: (int - optional) Returns results with an ID less than (that
is, older than) or equal to the specified ID.
:param trim_user: (bool - optional) When set to True, each Tweet returned
in a timeline will include a user object including only the status authors
numerical ID. Omit this parameter to receive the complete user object.
:param exclude_replies: (bool - optional) This parameter will prevent
replies from appearing in the returned timeline. Using exclude_replies with the
count parameter will mean you will receive up-to count Tweets — this is because
the count parameter retrieves that many Tweets before filtering out retweets
and replies.
:param include_entities: (bool - optional) The entities node will not be
included when set to False.
:param tweet_mode: (str - optional) Valid request values are compat and
extended, which give compatibility mode and extended mode, respectively for
Tweets that contain over 140 characters
https://developer.twitter.com/en/docs/tweets/timelines/api-reference/get-statuses-home_timeline
"""
pass | 37,480 |
def get_changed_files(base_commit: str, head_commit: str,
subdir: str = '.'):
""" Get the files changed by the given range of commits. """
cmd = ['git', 'diff', '--name-only',
base_commit, head_commit, '--', subdir]
files = subprocess.check_output(cmd)
return files.decode('UTF-8').split('\n') | 37,481 |
def geometric_median(X, eps=1e-5):
"""
calculate the geometric median as implemented in https://stackoverflow.com/a/30305181
:param X: 2D dataset
:param eps:
:return: median value from X
"""
y = np.mean(X, 0)
while True:
D = cdist(X, [y])
nonzeros = (D != 0)[:, 0]
Dinv = 1 / D[nonzeros]
Dinvs = np.sum(Dinv)
W = Dinv / Dinvs
T = np.sum(W * X[nonzeros], 0)
num_zeros = len(X) - np.sum(nonzeros)
if num_zeros == 0:
y1 = T
elif num_zeros == len(X):
return y
else:
R = (T - y) * Dinvs
r = np.linalg.norm(R)
rinv = 0 if r == 0 else num_zeros/r
y1 = max(0, 1-rinv)*T + min(1, rinv)*y
if euclidean(y, y1) < eps:
return y1
y = y1 | 37,482 |
def cprint(*strings):
"""
compile all given strings and print them
"""
print(*map(compile, strings)) | 37,483 |
def test_find_pyproject_toml():
"""
Automatically find a pyproject.toml within the current current working directory.
"""
# .parent == tests/, .parent.parent == repo root
expected_pyproject_path = Path(__file__).parent.parent / "pyproject.toml"
# We want to find the pyproject.toml for THIS project.
pyproject_path = find_caller_relative_path_to_pyproject()
assert pyproject_path.samefile(expected_pyproject_path)
assert isinstance(pyproject_path, Path)
assert pyproject_path.is_file()
# THIS project is called "project-paths", so we should probably find that.
pyproject_text = pyproject_path.read_text(encoding="UTF-8")
assert "project-paths" in pyproject_text | 37,484 |
def distance(bbox, detection):
"""docstring for distance"""
nDetections = detection.shape[0]
d = np.zeros(nDetections)
D = detection - np.ones([nDetections,1])*bbox
for i in xrange(nDetections):
d[i] = np.linalg.norm(D[i],1)
return d | 37,485 |
def _read_float(line: str,
pos: int,
line_buffer: TextIO
) -> Tuple[float, str, int]:
"""Read float value from line.
Args:
line: line.
pos: current position.
line_buffer: line buffer for nnet3 file.
Returns:
float value, line string and current position.
"""
del line_buffer # Unused.
tok, pos = read_next_token(line, pos)
return float(tok), line, pos | 37,486 |
def psd_explore(
data_folder,
channel_index,
plot=True,
relative=False,
reverse=False,
export_to_csv=False):
"""PSD Explore.
This assumes use with VR300 for the AD Feedback experiment.
data_folder: path to a BciPy data folder with raw data and triggers
channel_index: channel to use for PSD calculation
plot: whether or not to plot the filtered data and psd spectrum
relative: whether or not to export relative PSD output
reverse: whether the level estimations should be descending (default; ie band increases with attention) or ascending
export_to_csv: whether or not to write output to csv
returns: average, standard deviation
"""
# construct the relevant data paths
trigger_path = f'{data_folder}/{TRIGGERS_FN}'
raw_data_path = f'{data_folder}/{RAW_DATA_FN}'
# print helpful information to console
print('CONFIGURATION:\n'
f'Trial length: {TRIAL_LENGTH} \n'
f'Downsample rate: {DOWNSAMPLE_RATE} \n'
f'Notch Frequency: {NOTCH_FREQ} \n'
f'Bandpass Range: [{FILTER_HP}-{FILTER_LP}] \n'
f'Trigger Path: [{trigger_path}] \n'
f'Raw Data Path: [{raw_data_path}] \n')
# process and get the data from csv
raw_data, _, channels, type_amp, fs = read_data_csv(raw_data_path)
# print helpful information to console
print(
'DEVICE INFO:'
f'\nChannels loaded: {channels}. \n'
f'Using channel: {channels[channel_index]} \n'
f'Using Device: {type_amp} - {fs} samples/sec \n')
# filter the data
filtered_data, sampling_rate_post_filter = filter_data(
raw_data, fs, DOWNSAMPLE_RATE, NOTCH_FREQ)
# decode triggers and get a channel map
_, trigger_targetness, trigger_timing, offset = trigger_decoder(
mode=MODE,
trigger_path=trigger_path)
# add a static offset of 100 ms [TODO load from parameters]
offset = offset + .1
# reshape the data
x, y, num_seq, _ = trial_reshaper(
trigger_targetness,
trigger_timing,
filtered_data,
mode=MODE,
fs=fs,
k=DOWNSAMPLE_RATE,
offset=offset,
channel_map=analysis_channels(channels, type_amp),
trial_length=TRIAL_LENGTH)
data = create_sequence_exports(
x,
num_seq * 10,
channel_index,
TRIAL_LENGTH,
sampling_rate_post_filter,
plot,
relative,
reverse)
# plot raw data for the trial index given
if plot:
time = np.arange(
data.size) / sampling_rate_post_filter
fig, ax = plt.subplots(1, 1, figsize=(12, 4))
plt.plot(time, data, lw=1.5, color='k')
plt.xlabel('Time (seconds)')
plt.ylabel('Voltage')
plt.xlim([time.min(), time.max()])
plt.title('Raw Data Plot')
sns.set(font_scale=1.2)
sns.despine()
plt.show()
if export_to_csv:
export_data_to_csv(exports)
return exports | 37,487 |
def pk_to_p2wpkh_in_p2sh_addr(pk, testnet=False):
"""
Compressed public key (hex string) -> p2wpkh nested in p2sh address. 'SegWit address.'
"""
pk_bytes = bytes.fromhex(pk)
assert is_compressed_pk(pk_bytes), \
"Only compressed public keys are compatible with p2sh-p2wpkh addresses. See BIP49."
# Script sig is just 0 + PUSH(20){hash160(cpk)}
script_sig = OP_0 + push_bytes(hash160_bytes(pk_bytes))
# Address is then prefix + hash160(script_sig)
address = Base58.check_encode(_prefix_bytes('p2sh', testnet=testnet) + hash160_bytes(script_sig))
return address | 37,488 |
def remove_imaginary(pauli_sums):
"""
Remove the imaginary component of each term in a Pauli sum
:param PauliSum pauli_sums: The Pauli sum to process.
:return: a purely hermitian Pauli sum.
:rtype: PauliSum
"""
if not isinstance(pauli_sums, PauliSum):
raise TypeError("not a pauli sum. please give me one")
new_term = sI(0) * 0.0
for term in pauli_sums:
new_term += term_with_coeff(term, term.coefficient.real)
return new_term | 37,489 |
def test_post_method_not_allowed_on_subscribers_api(flask_app):
"""Test Depot not known yet.
Verify that post method is not allowed on IMEI-Pairings API.
"""
rv = flask_app.post(url_for('v2.imei_get_subscribers_api', imei='64220297727231'))
assert rv.status_code == 405
assert b'The method is not allowed for the requested URL' in rv.data | 37,490 |
def log_sigmoid_deprecated(z):
"""
Calculate the log of sigmod, avoiding overflow underflow
"""
if abs(z) < 30:
return np.log(sigmoid(z))
else:
if z > 0:
return -np.exp(-z)
else:
return z | 37,491 |
def roty(t):
"""Rotation about the y-axis."""
c = np.cos(t)
s = np.sin(t)
return np.array([[c, 0, s], [0, 1, 0], [-s, 0, c]]) | 37,492 |
def ranks_to_metrics_dict(ranks):
"""Calculates metrics, returns metrics as a dict."""
mean_rank = np.mean(ranks)
mean_reciprocal_rank = np.mean(1. / ranks)
hits_at = {}
for k in (1, 3, 10):
hits_at[k] = np.mean(ranks <= k)*100
return {
'MR': mean_rank,
'MRR': mean_reciprocal_rank,
'hits@[1,3,10]': hits_at
} | 37,493 |
def bias_correction(input_data, output_filename='', mask_filename='', method="ants", command="/home/abeers/Software/ANTS/ANTs.2.1.0.Debian-Ubuntu_X64/N4BiasFieldCorrection", temp_dir='./'):
""" A catch-all function for motion correction. Will perform motion correction on an input volume
depending on the 'method' and 'command' inputted.
Parameters
----------
input_data: str or array
Can be a 4D volume or a filename.
output_filename: str
Location to save output data to. If left as '', will return numpy array.
method: str
Will perform motion correction according to the provided method.
Currently available: ['fsl']
command: str
The literal command-line string to be inputted via Python's subprocess module.
temp_dir: str
If temporary files are created, they will be saved here.
Returns
-------
output: array
Output data, only if output_filename is left as ''.
"""
bias_correction_methods = ['ants', 'slicer']
if method not in bias_correction_methods:
print 'Input \"method\" parameter is not available. Available methods: ', bias_correction_methods
return
if method == 'ants':
# A good reason to have a Class for qtim methods is to cut through all of this extra code.
temp_input, temp_output = False, False
if not isinstance(input_data, basestring):
input_filename = os.path.join(temp_dir, 'temp.nii.gz')
save_numpy_2_nifti(input_data, input_filename)
temp_input = True
else:
input_filename = input_data
if output_filename == '':
temp_output = True
output_filename = os.path.join(temp_dir, 'temp_out.nii.gz')
print ' '.join([command, '-i', input_filename, '-o', output_filename, '-x', mask_filename])
if mask_filename != '':
subprocess.call([command, '-i', input_filename, '-o', output_filename, '-x', mask_filename])
else:
subprocess.call([command, '-i', input_filename, '-o', output_filename])
if temp_input:
os.remove(input_filename)
pass
if temp_output:
output = convert_input_2_numpy(output_filename)
os.remove(output_filename)
return output
if method == 'slicer':
print 'Slicer method not yet implemented! Sorry...' | 37,494 |
def execute_sql(
connection: psycopg2.extensions.connection,
sql_query: str,
data: Union[dict, tuple],
commit=True,
) -> None:
"""
Execute and commit PostgreSQL query
Parameters
----------
connection : psycopg2.extensions.connection
_description_
sql_query : str
SQL query
data : Union[dict, tuple]
Data
commit : bool, optional
Make database change persistent, by default True
"""
cursor = connection.cursor()
try:
cursor.execute(sql_query, data)
if commit:
connection.commit()
logging.info("PostgreSQL committed ...")
except psycopg2.OperationalError as e:
logging.error(f"{e}") | 37,495 |
def randperm2d(H, W, number, population=None, mask=None):
"""randperm 2d function
genarates diffrent random interges in range [start, end)
Parameters
----------
H : {integer}
height
W : {integer}
width
number : {integer}
random numbers
population : {list or numpy array(1d or 2d)}
part of population in range(0, H*W)
"""
if population is None:
population = np.array(range(0, H * W)).reshape(H, W)
population = np.array(population)
if mask is not None and np.sum(mask) != 0:
population = population[mask > 0]
population = population.flatten()
population = np.random.permutation(population)
Ph = np.floor(population / W).astype('int')
Pw = np.floor(population - Ph * W).astype('int')
# print(Pw + Ph * W)
return Ph[0:number], Pw[0:number] | 37,496 |
def get_engine(db_credentials):
"""
Get SQLalchemy engine using credentials.
Input:
db: database name
user: Username
host: Hostname of the database server
port: Port number
passwd: Password for the database
"""
url = 'postgresql://{user}:{passwd}@{host}:{port}/{db}'.format(
user=db_credentials['user'], passwd=db_credentials['pwd'], host=db_credentials['host'],
port=db_credentials['port'], db=db_credentials['db'])
engine = create_engine(url, pool_size = 50)
return engine | 37,497 |
def numpy_to_vtkIdTypeArray(num_array, deep=0):
"""
Notes
-----
This was pulled from VTK and modified to eliminate numpy 1.14 warnings.
VTK uses a BSD license, so it's OK to do that.
"""
isize = vtk.vtkIdTypeArray().GetDataTypeSize()
dtype = num_array.dtype
if isize == 4:
if dtype != np.int32:
raise ValueError(
'Expecting a numpy.int32 array, got %s instead.' % (str(dtype)))
else:
if dtype != np.int64:
raise ValueError(
'Expecting a numpy.int64 array, got %s instead.' % (str(dtype)))
return numpy_to_vtk(num_array, deep, vtkConstants.VTK_ID_TYPE) | 37,498 |
def denormalize_ged(g1, g2, nged):
"""
Converts normalized ged into ged.
"""
return round(nged * (g1.num_nodes + g2.num_nodes) / 2) | 37,499 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.