content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
import subprocess
def supported_camera_list():
""" Grabs the list of gphoto2 cameras and parses into a list
"""
check_gphoto2() # No reason to keep going if GPhoto2 isn't installed
# TODO: Error checking/Handling
# Capture and cleanup camera list output
cameras = subprocess.run("gphoto2 --list-cameras", shell=True, capture_output=True)
cameras = cameras.stdout.decode("utf-8").split("\n\t")
return [v.strip("\n").strip('"') for v in cameras][1:]
|
bcb9c69a56d8bcc9e613db818b8e38ca5f9e5ac8
| 3,642,400
|
from typing import Iterable
from typing import Callable
def get_features_and_labels(instances: Iterable[NewsHeadlineInstance],
feature_generator: Callable[[NewsHeadlineInstance],
dict[str]]) -> tuple[list[dict[str]], list[int]]:
""" Return a tuple of the features and labels for each instance within the dataset. """
features = []
labels = []
for instance in instances:
features.append(feature_generator(instance))
labels.append(instance.label)
return features, labels
|
56d2f1a0a18eb1d1f8ecf9547184ae873d0b60e3
| 3,642,401
|
def countBarcodeStats(bcseqs,chopseqs='none',bcs = ["0","1"],use_specific_beginner=None):
"""this function uses edlib to count the number of matches to given bcseqs.
chopseqs can be left, right, both, or none. This tells the program to
chop off one barcode from either the left, right, both, or none of the
ends."""
x=[]
o1list = []
o2list = []
pcount = []
jcount = []
pjcount = []
jpcount = []
all_lists = {}
switch_lists = {}
run_lists = {}
first_last = {}
for bc in bcseqs:
if(bc=="conditions"):
continue
seqs = []
for seq in bcseqs[bc]:
#for every sequence we want to eliminate where it turns to -1
curseq = ""
if(len(seq)==0):
continue
elif((use_specific_beginner is not None) and (use_specific_beginner not in seq)):
continue
elif("B" in str(seq[0]) or "E" in str(seq[-1])):
#this sequence is already forwards
for element in seq:
if("B" in str(element)):
continue
elif(element == -1):
continue
elif('E' in str(element)):
break
else:
curseq+=str(element)
seqs += [curseq]
elif("E" in str(seq[0]) or "B" in str(seq[-1])):
#turn the seq forwards
for element in seq[::-1]:
if("B" in str(element)):
continue
elif(element == -1):
continue
elif('E' in str(element)):
break
else:
curseq+=str(element)
seqs += [curseq]
seqschop = []
curpcount = 0
curjcount = 0
curjpcount = 0
curpjcount = 0
curbclist = []
curswlist = []
currunslist = []
curfirstlast = [0,0,0]
for a in seqs:
anew = a
if(chopseqs=='right'):
anew = a[:-1]
elif(chopseqs == 'left'):
anew = a[1:]
elif(chopseqs == 'both'):
anew = a[1:-1]
#if(len(anew)>0):
seqschop+=[anew]
pct = anew.count(bcs[0])
jct = anew.count(bcs[1])
curbclist+=[[pct,jct]]
curpcount+=pct
curjcount+=jct
pjct = anew.count("".join(bcs))
jpct = anew.count("".join(bcs[::-1]))
curswlist += [[pjct,jpct]]
curpjcount+=pjct
curjpcount+=jpct
currunslist += [longestRun(a,"".join(bcs))]
if(len(anew)>1):
if(anew[0]==bcs[1]):
curfirstlast[0]+=1 #J in the first position
if(anew[-1]==bcs[1]):
curfirstlast[1]+=1 #J in the last position
curfirstlast[2]+=1 #this one counts all seqs
first_last.update({bc:tuple(curfirstlast)})
run_lists.update({bc:currunslist})
all_lists.update({bc:curbclist})
switch_lists.update({bc:curswlist})
pcount+=[curpcount]
jcount+=[curjcount]
jpcount +=[curjpcount]
pjcount +=[curpjcount]
return all_lists,run_lists,switch_lists,first_last
|
af19f5a77f241362d50245885ab15dabd5197dcd
| 3,642,402
|
def is_underflow(bin_nd, hist):
"""Retuns whether global bin number bin_nd is an underflow bin. Works
for any number of dimensions
"""
flat1d_bin = get_flat1d_bin(bin_nd, hist, False)
return flat1d_bin == 0
|
377c5a339f404ef4e55832f163952575f7b8d6a4
| 3,642,403
|
def deprecated_func_docstring(foo=None):
"""DEPRECATED. Deprecated function."""
return foo
|
f9c996c4f3735ed2767f0bbb139b1494e2a0fa39
| 3,642,404
|
def get_all_nodes(starting_node : 'NodeDHT') -> 'list[NodeDHT]':
"""Return all nodes in the DHT"""
nodes = [starting_node]
node = starting_node
while node != starting_node:
node = node.succ
nodes.append(node)
return nodes
|
91b2968b000abac3d6f9f51bad5889ccf0fe8388
| 3,642,405
|
import sys
def get_uvj(field, v4id):
"""Get the U-V and V-J for a given galaxy
Parameters:
field (str): field of the galaxy
v4id (int): v4id from 3DHST
Returns:
uvj_tuple (tuple): tuple of the form (U-V, V-J) for the input object from mosdef
"""
# Read the file
uvj_df = ascii.read(imd.loc_uvj).to_pandas()
# Get the object from mosdef_df, since we need id and not v4id
mosdef_obj = get_mosdef_obj(field, v4id)
# Get the input object
obj = uvj_df[np.logical_and(
uvj_df['field'] == field, uvj_df['id'] == mosdef_obj['ID'])]
# Get the U-V and V-J for that object
try:
u_v = obj['u_v'].iloc[0]
v_j = obj['v_j'].iloc[0]
uvj_tuple = (u_v, v_j)
except IndexError:
sys.exit(f'Could not find object ({field}, {v4id}) in uvj_df')
return uvj_tuple
|
39e1f6fd87ee4c7fc0f29fcfd18b7d780de4d532
| 3,642,406
|
import re
def by_regex(regex_tuples, default=True):
"""Only call function if
regex_tuples is a list of (regex, filter?) where if the regex matches the
requested URI, then the flow is applied or not based on if filter? is True
or False.
For example:
from aspen.flows.filter import by_regex
@by_regex( ( ("/secret/agenda", True), ( "/secret.*", False ) ) )
def use_public_formatting(request):
...
would call the 'use_public_formatting' flow step only on /secret/agenda
and any other URLs not starting with /secret.
"""
regex_res = [ (re.compile(regex), disposition) \
for regex, disposition in regex_tuples.iteritems() ]
def filter_function(function):
def function_filter(request, *args):
for regex, disposition in regex_res:
if regex.matches(request.line.uri):
if disposition:
return function(*args)
if default:
return function(*args)
algorithm._transfer_func_name(function_filter, function)
return function_filter
return filter_function
|
a3d47690120a8091596047d73792b0d1f637132b
| 3,642,407
|
def deserialize(name):
"""Get the activation from name.
:param name: name of the method.
among the implemented Keras activation function.
:return:
"""
name = name.lower()
if name == SOFTMAX:
return backward_softmax
if name == ELU:
return backward_elu
if name == SELU:
return backward_selu
if name == SOFTPLUS:
return backward_softplus
if name == SOFTSIGN:
return backward_softsign
if name == SIGMOID:
return backward_sigmoid
if name == TANH:
return backward_tanh
if name in [RELU, RELU_]:
return backward_relu
if name == EXPONENTIAL:
return backward_exponential
if name == LINEAR:
return backward_linear
raise ValueError("Could not interpret " "activation function identifier:", name)
|
133f01edaa678d60f85bf720590c0df3d1c552f3
| 3,642,408
|
def delete_item_image(itemid, imageid):
"""
Delete an image from item.
Args:
itemid (int) - item's id
imageid (int) - image's id
Status Codes:
204 No Content – when image deleted successfully
"""
path = '/items/{}/images/{}'.format(itemid, imageid)
return delete(path, auth=True, accepted_status_codes=[204])
|
28d3c7bea85cd7132de6010def1c2ec41a9cfc82
| 3,642,409
|
def bytes_(s, encoding='utf-8', errors='strict'): # pragma: no cover
"""Utility to ensure binary-like usability.
If ``s`` is an instance of ``text_type``, return
``s.encode(encoding, errors)``, otherwise return ``s``"""
if isinstance(s, text_type):
return s.encode(encoding, errors)
return s
|
269d315c1204be941766558fc3cbbc07c8e63657
| 3,642,410
|
import os
import uuid
def create_job_id(success_file_path):
"""Create job id prefix with a consistent naming convention based on the
success file path to give context of what caused this job to be submitted.
the rules for success file name -> job id are:
1. slashes to dashes
2. all non-alphanumeric dash or underscore will be replaced with underscore
Note, gcf-ingest- can be overridden with environment variable JOB_PREFIX
3. uuid for uniqueness
"""
clean_job_id = os.getenv('JOB_PREFIX', constants.DEFAULT_JOB_PREFIX)
clean_job_id += constants.NON_BQ_JOB_ID_REGEX.sub(
'_', success_file_path.replace('/', '-'))
# add uniqueness in case we have to "re-process" a success file that is
# republished (e.g. to fix a bad batch of data) or handle multiple load jobs
# for a single success file.
clean_job_id += str(uuid.uuid4())
return clean_job_id[:1024]
|
36417832ef7a7745af46798dbc0b83dcce5ba5f1
| 3,642,411
|
from operator import inv
import numpy
def normal_transform(matrix):
"""Compute the 3x3 matrix which transforms normals given an affine vector transform."""
return inv(numpy.transpose(matrix[:3,:3]))
|
b7f7256b9057b9a77b074080e698ff859ccbefb2
| 3,642,412
|
async def async_unload_entry(hass, config_entry):
"""Unload OMV config entry."""
unload_ok = await hass.config_entries.async_unload_platforms(
config_entry, PLATFORMS
)
if unload_ok:
controller = hass.data[DOMAIN][config_entry.entry_id]
await controller.async_reset()
hass.data[DOMAIN].pop(config_entry.entry_id)
return True
|
60955e2aac51d211a296de0736f784c2332f855b
| 3,642,413
|
import typing
import csv
def create_prediction_data(validation_file: typing.IO) -> dict:
"""Create a dictionary object suitable for prediction."""
validation_data = csv.DictReader(validation_file)
races = {}
# Read each horse from each race
for row in validation_data:
race_id = row["EntryID"]
finish_pos = float(row["Placement"])
if race_id not in races:
races[race_id] = []
# Skip horses that didn't run
if finish_pos < 1:
continue
# Create validation array
data = np.array(
[
float(feat if len(str(feat)) > 0 else 0)
for feat in list(row.values())[4:]
]
)
data = data.reshape(1, -1)
races[race_id].append(
{"data": data, "prediction": None, "finish_pos": finish_pos}
)
return races
|
6ec67b277460feb5d80bf7a35e7bc40f3014e6ce
| 3,642,414
|
def username(request):
""" Returns ESA FTP username """
return request.config.getoption("--username")
|
2393884c2c9f65055cd7a14c1b732fccf70a6e28
| 3,642,415
|
def complete_data(df):
"""Add some temporal columns to the dataset
- day of the week
- hour of the day
- minute
Parameters
----------
df : pandas.DataFrame
Input data ; must contain a `ts` column
Returns
-------
pandas.DataFrame
Data with additional columns `day`, `hour` and `minute`
"""
logger.info("Complete some data")
df = df.copy()
df['day'] = df['ts'].apply(lambda x: x.weekday())
df['hour'] = df['ts'].apply(lambda x: x.hour)
df['minute'] = df['ts'].apply(lambda x: x.minute)
return df
|
be342df461c04fc4b7f5b757f8287973c8826bd8
| 3,642,416
|
import re
def is_valid_mac_address_normalized(mac):
"""Validates that the given MAC address has
what we call a normalized format.
We've accepted the HEX only format (lowercase, no separators) to be generic.
"""
return re.compile('^([a-f0-9]){12}$').match(mac) is not None
|
7c4ea0a3353a3753907de21bbf114b2a228bb3c0
| 3,642,417
|
def get_Y(data):
"""
Function: convert pandas data table to sklearn Y variable
Arguments
---------
data: panadas data table
Result
------
Y[:,:]: float
sklearn Y variable
"""
return np.array((data["H"],data["sigma"])).T
|
d5e9d5b116fe8e82165d019c23394b6f1dfc4d9c
| 3,642,418
|
def get_bbox(mask, show=False):
"""
Get the bbox for a binary mask
Args:
mask: a binary mask
Returns:
bbox: (col_min, col_max, row_min, row_max)
"""
area_obj = np.where(mask != 0)
bbox = np.min(area_obj[0]), np.max(area_obj[0]), np.min(area_obj[1]), np.max(area_obj[1])
if show:
cv2.rectangle(mask, (bbox[2], bbox[0]), (bbox[3], bbox[1]), (255, 255, 255), 1)
mmcv.imshow(mask, "test", 10)
exit()
return bbox
|
2e074d305d50334809eb0fe3e15def6fd4d21644
| 3,642,419
|
from pineboolib.core import settings
def check_mobile_mode() -> bool:
"""
Return if you are working in mobile mode, searching local settings or check QtCore.QSysInfo().productType().
@return True or False.
"""
return (
True
if QtCore.QSysInfo().productType() in ("android", "ios")
else settings.CONFIG.value(u"ebcomportamiento/mobileMode", False)
)
|
99327efbc3d329218d027e4451aae1979a9ebccc
| 3,642,420
|
def check_for_overflow_candidate(node):
"""
Checks if the node contains an expression which can potentially produce an overflow
meaning an expression which is not wrapped by any cast, which involves the operator
+, ++, *, **. Note, the expression can have several sub-expression. It is the case
of the expression (a + 3 > 0 && a * 3 > 5). In this case, the control is not just
done for the first expression (which is the &&), but should be applied recursively
to all the subexpression, until it founds the expression with one of the whitelisted
operator.
:param node: Node could be an Expression or AstNode (Tuple or Literal) in both cases, they have a dictionary called 'dic'.
:return: List of tuples [(AstNode, {exp_id: expression}], where the AstNode is a node which of type Identifier
and it is refereeing to a newly created variable called exp_id. The seconds object of the tuple is the map
between the name of the variable added and its expression.
"""
# Check if in all the expression (also in depth) there is some operations
expression_candidates = []
whitelist_operators = ['+', '++', '*', '**', '-', '--']
logic_operators = ['||', '&&', '>', '>=', '<', '<=', '==', '!=']
# to let find_parent works
if not node:
return None
if node.parent:
node.parent = None
first_expression = asthelper.find_node(node.dic, {'nodeType': r'.*Operation'})
if not first_expression:
# no expression it is or an identifier or a literal
return None
if asthelper.find_parent(first_expression, {'kind': 'typeConversion'}) is not None:
# The expression is wrapped by a cast, if wrapped, can't be a candidate
return None
if first_expression['operator'] in whitelist_operators:
exp_map = {}
if 'name' not in first_expression.dic:
# if not name, it is not a variable declaration
# so expression is identifier
exp_name = 'exp_{}'.format(first_expression.dic['id'])
exp_map[exp_name] = expressionhelper.Expression(first_expression.dic)
# override
first_expression.dic['name'] = exp_name
first_expression.dic['nodeType'] = 'Identifier'
return [(first_expression, exp_map)]
# recursive case
if first_expression['operator'] in logic_operators:
left_candidates = check_for_overflow_candidate(expressionhelper.Expression(first_expression['leftExpression']))
right_candidates = check_for_overflow_candidate(expressionhelper.Expression(first_expression['rightExpression']))
if left_candidates is not None: expression_candidates += left_candidates
if right_candidates is not None: expression_candidates += right_candidates
return expression_candidates
return None
|
77232f5d94a6cba6fef79bd51886145e2dfec4bf
| 3,642,421
|
import struct
def parse_monitor_message(msg):
"""decode zmq_monitor event messages.
Parameters
----------
msg : list(bytes)
zmq multipart message that has arrived on a monitor PAIR socket.
First frame is::
16 bit event id
32 bit event value
no padding
Second frame is the endpoint as a bytestring
Returns
-------
event : dict
event description as dict with the keys `event`, `value`, and `endpoint`.
"""
if len(msg) != 2 or len(msg[0]) != 6:
raise RuntimeError("Invalid event message format: %s" % msg)
event = {
'event': struct.unpack("=hi", msg[0])[0],
'value': struct.unpack("=hi", msg[0])[1],
'endpoint': msg[1],
}
return event
|
df71541d34bc04b1ac25c6435b1b298394e27362
| 3,642,422
|
import toml
import json
def load_config(fpath):
"""
Load configuration from fpath and return as AttrDict.
:param fpath: configuration file path, either TOML or JSON file
:return: configuration object
"""
if fpath.endswith(".toml"):
data = toml.load(fpath)
elif fpath.endswith(".json"):
with open(fpath, "rt", encoding="utf-8") as infp:
data = json.load(infp)
else:
raise Exception(f"Cannot load config file {fpath}, must be .toml or json file")
return AttrDict(data)
|
27c68c944a431b4d8b12c6b64609f33043363b03
| 3,642,423
|
def softmax_layer(inputs, n_hidden, random_base, drop_rate, l2_reg, n_class, scope_name='1'):
"""
Method adapted from Trusca et al. (2020). Encodes the sentence representation into a three dimensional vector
(sentiment classification) using a softmax function.
:param inputs:
:param n_hidden:
:param random_base:
:param drop_rate:
:param l2_reg:
:param n_class:
:param scope_name:
:return:
"""
w = tf.get_variable(
name='softmax_w' + scope_name,
shape=[n_hidden, n_class],
# initializer=tf.random_normal_initializer(mean=0., stddev=np.sqrt(2. / (n_hidden + n_class))),
initializer=tf.random_uniform_initializer(-random_base, random_base),
regularizer=tf.keras.regularizers.L2(l2_reg)
)
b = tf.get_variable(
name='softmax_b' + scope_name,
shape=[n_class],
# initializer=tf.random_normal_initializer(mean=0., stddev=np.sqrt(2. / (n_class))),
initializer=tf.random_uniform_initializer(-random_base, random_base),
regularizer=tf.keras.regularizers.L2(l2_reg)
)
with tf.name_scope('softmax'):
outputs = tf.nn.dropout(inputs, rate=drop_rate)
predict = tf.matmul(outputs, w) + b
predict = tf.nn.softmax(predict)
return predict, w
|
1f77d99d12c927c0d77e136098fe8f9c2bc458b8
| 3,642,424
|
def node2freqt(docgraph, node_id, child_str='', include_pos=False,
escape_func=FREQT_ESCAPE_FUNC):
"""convert a docgraph node into a FREQT string."""
node_attrs = docgraph.node[node_id]
if istoken(docgraph, node_id):
token_str = escape_func(node_attrs[docgraph.ns+':token'])
if include_pos:
pos_str = escape_func(node_attrs.get(docgraph.ns+':pos', ''))
return u"({pos}({token}){child})".format(
pos=pos_str, token=token_str, child=child_str)
else:
return u"({token}{child})".format(token=token_str, child=child_str)
else: # node is not a token
label_str=escape_func(node_attrs.get('label', node_id))
return u"({label}{child})".format(label=label_str, child=child_str)
|
8c6690e5fec41f98501060f5bf24ed823a2c31b6
| 3,642,425
|
import argparse
def build_arg_parser():
"""Build the ArgumentParser."""
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("-f", "--fritzbox", default="fritz.box")
parser.add_argument("-u", "--username", default="dslf-config")
parser.add_argument("-p", "--password", required=True)
return parser
|
acf1baafdedfa8db7328e095eac5324f4ddae1ee
| 3,642,426
|
import os
def get_marathon_url():
"""Get Marathon URL from the environment.
This is optional, default: http://leader.mesos:8080.
"""
marathon_url = os.environ.get("MARATHON_URL", None)
if marathon_url is None:
logger.warning("Unable to parse MARATHON_URL environment variable, using default: http://leader.mesos:8080")
marathon_url = "http://leader.mesos:8080"
return marathon_url
|
69ff96ad112897067a7301053031a78c30112d4a
| 3,642,427
|
import os
import zipfile
def _load_dataset(name, split, return_X_y, extract_path=None):
"""Load time series classification datasets (helper function)."""
# Allow user to have non standard extract path
if extract_path is not None:
local_module = os.path.dirname(extract_path)
local_dirname = extract_path
else:
local_module = MODULE
local_dirname = DIRNAME
if not os.path.exists(os.path.join(local_module, local_dirname)):
os.makedirs(os.path.join(local_module, local_dirname))
if name not in _list_downloaded_datasets(extract_path):
url = "http://timeseriesclassification.com/Downloads/%s.zip" % name
# This also tests the validitiy of the URL, can't rely on the html
# status code as it always returns 200
try:
_download_and_extract(
url,
extract_path=extract_path,
)
except zipfile.BadZipFile as e:
raise ValueError(
"Invalid dataset name. ",
extract_path,
"Please make sure the dataset "
+ "is available on http://timeseriesclassification.com/.",
) from e
if isinstance(split, str):
split = split.upper()
if split in ("TRAIN", "TEST"):
fname = name + "_" + split + ".ts"
abspath = os.path.join(local_module, local_dirname, name, fname)
X, y = load_from_tsfile_to_dataframe(abspath)
# if split is None, load both train and test set
elif split is None:
X = pd.DataFrame(dtype="object")
y = pd.Series(dtype="object")
for split in ("TRAIN", "TEST"):
fname = name + "_" + split + ".ts"
abspath = os.path.join(local_module, local_dirname, name, fname)
result = load_from_tsfile_to_dataframe(abspath)
X = pd.concat([X, pd.DataFrame(result[0])])
y = pd.concat([y, pd.Series(result[1])])
y = pd.Series.to_numpy(y, dtype=np.str)
else:
raise ValueError("Invalid `split` value =", split)
# Return appropriately
if return_X_y:
return X, y
else:
X["class_val"] = pd.Series(y)
return X
|
32d5b83951b81d35f4bb26056521e2a2ff076144
| 3,642,428
|
def search(news_name):
"""method to fetch search results"""
news_name_list = news_name.split(" ")
search_name_format = "+".join(news_name_list)
searched_results = search_news(search_name_format)
sourcess=get_source_news()
title = f'search results for {news_name}'
return render_template('search.html', results=searched_results,my_sources=sourcess)
|
7521221b66a872b00310693a3ccc6c81013098a2
| 3,642,429
|
def encrypt_document(document):
"""
Useful method to encrypt a document using a random cipher
"""
cipher = generate_random_cipher()
return decrypt_document(document, cipher)
|
9a7e4bd79a83df261c4f946f62ff9bf40bfbf068
| 3,642,430
|
def bootstrap_alert(visitor, items):
"""
Format:
[[alert(class=error)]]:
message
"""
txt = []
for x in items:
cls = x['kwargs'].get('class', '')
if cls:
cls = 'alert-%s' % cls
txt.append('<div class="alert %s">' % cls)
if 'close' in x['kwargs']:
txt.append('<button class="close" data-dismiss="alert">×</button>')
text = visitor.parse_text(x['body'], 'article')
txt.append(text)
txt.append('</div>')
return '\n'.join(txt)
|
c2803176b2e1ed9b3d4aecd622eedcac673d4c42
| 3,642,431
|
def masked_mean(x, *, mask, axis,
paxis_name, keepdims):
"""Calculates the mean of a tensor, excluding masked-out entries.
Args:
x: Tensor to take the mean of.
mask: Boolean array of same shape as 'x'. True elements are included in the
mean, false elements are excluded.
axis: Axis of 'x' to compute the mean over.
paxis_name: Optional. If not None, will take a distributed mean of 'x'
across devices using the specified parallel axis.
keepdims: Same meaning as the corresponding parameter in `numpy.mean`.
Whether to keep the reduction axes or squeeze them out.
Returns:
Tensor resulting from reducing 'x' over axes in 'axis'.
"""
assert x.shape == mask.shape
x_masked_sum = masked_sum(
x, mask=mask, axis=axis, paxis_name=paxis_name, keepdims=keepdims)
mask_count = masked_sum(
x=mask, mask=None, axis=axis, paxis_name=paxis_name, keepdims=keepdims)
x_masked_mean = x_masked_sum / mask_count
return x_masked_mean
|
3242e86f571af61909efa63bd60158aa0f8eba88
| 3,642,432
|
def aspectRatioFix(preserve,anchor,x,y,width,height,imWidth,imHeight):
"""This function helps position an image within a box.
It first normalizes for two cases:
- if the width is None, it assumes imWidth
- ditto for height
- if width or height is negative, it adjusts x or y and makes them positive
Given
(a) the enclosing box (defined by x,y,width,height where x,y is the \
lower left corner) which you wish to position the image in, and
(b) the image size (imWidth, imHeight), and
(c) the 'anchor point' as a point of the compass - n,s,e,w,ne,se etc \
and c for centre,
this should return the position at which the image should be drawn,
as well as a scale factor indicating what scaling has happened.
It returns the parameters which would be used to draw the image
without any adjustments:
x,y, width, height, scale
used in canvas.drawImage and drawInlineImage
"""
scale = 1.0
if width is None:
width = imWidth
if height is None:
height = imHeight
if width<0:
width = -width
x -= width
if height<0:
height = -height
y -= height
if preserve:
imWidth = abs(imWidth)
imHeight = abs(imHeight)
scale = min(width/float(imWidth),height/float(imHeight))
owidth = width
oheight = height
width = scale*imWidth-1e-8
height = scale*imHeight-1e-8
if anchor not in ('nw','w','sw'):
dx = owidth-width
if anchor in ('n','c','s'):
x += dx/2.
else:
x += dx
if anchor not in ('sw','s','se'):
dy = oheight-height
if anchor in ('w','c','e'):
y += dy/2.
else:
y += dy
return x,y, width, height, scale
|
73a686f122ad31ee6693641e1ef386f13b67b4d8
| 3,642,433
|
def __do_core(SM, ToDB):
"""RETURNS: Acceptance trace database:
map: state_index --> MergedTraces
___________________________________________________________________________
This function walks down almost each possible path trough a given state
machine. During the process of walking down the paths it develops for each
state its list of _Trace objects.
___________________________________________________________________________
IMPORTANT:
There is NO GUARANTEE that the paths from acceptance to 'state_index' or
the paths from input position storage to 'state_index' are complete! The
calling algorithm must walk these paths on its own.
This is due to a danger of exponential complexity with certain setups. Any
path analysis is dropped as soon as a state is reached with an equivalent
history.
___________________________________________________________________________
"""
def print_path(x):
print(x.state_index, " ", end=' ')
if x.parent is not None: print_path(x.parent)
else: print()
class TraceFinder(TreeWalker):
"""Determines _Trace objects for each state. The heart of this function is
the call to '_Trace.next_step()' which incrementally develops the
acceptance and position storage history of a path.
Recursion Terminal: When state has no target state that has not yet been
handled in the 'path' in the same manner. That means,
that if a state appears again in the path, its trace
must be different or the recursion terminates.
"""
def __init__(self, state_machine, ToDB):
self.sm = state_machine
self.to_db = ToDB
self.result = dict((i, []) for i in self.sm.states.keys())
self.path = []
# Under some circumstances, the init state may accept!
# (E.g. the appendix state machines of the 'loopers')
TreeWalker.__init__(self)
def on_enter(self, Args):
PreviousTrace = Args[0]
StateIndex = Args[1]
# (*) Update the information about the 'trace of acceptances'
dfa_state = self.sm.states[StateIndex]
if not self.path: trace = _Trace(self.sm.init_state_index, dfa_state)
else: trace = PreviousTrace.next_step(StateIndex, dfa_state)
target_index_list = self.to_db[StateIndex]
# (*) Recursion Termination:
#
# If a state has been analyzed before with the same trace as result,
# then it is not necessary dive into deeper investigations again. All
# of its successor paths have been walked along before. This catches
# two scenarios:
#
# (1) Loops: A state is reached through a loop and nothing
# changed during the walk through the loop since
# the last passing.
#
# There may be connected loops, so it is not sufficient
# to detect a loop and stop.
#
# (2) Knots: A state is be reached through different branches.
# However, the traces through those branches are
# indifferent in their positioning and accepting
# behavior. Only one branch needs to consider the
# subsequent states.
#
# (There were cases where this blew the computation time
# see bug-2257908.sh in $QUEX_PATH/TEST).
#
existing_trace_list = self.result.get(StateIndex)
if existing_trace_list:
end_of_road_f = (len(target_index_list) == 0)
for pioneer in existing_trace_list:
if not trace.is_equivalent(pioneer, end_of_road_f):
continue
elif trace.has_parent(pioneer):
# Loop detected -- Continuation unnecessary.
# Nothing new happened since last passage.
# If trace was not equivalent, the loop would have to be stepped through again.
return None
else:
# Knot detected -- Continuation abbreviated.
# A state is reached twice via two separate paths with
# the same positioning_states and acceptance states. The
# analysis of subsequent states on the path is therefore
# complete. Almost: There is no alternative paths from
# store to restore that must added later on.
return None
# (*) Mark the current state with its acceptance trace
self.result[StateIndex].append(trace)
# (*) Add current state to path
self.path.append(StateIndex)
# (*) Recurse to all (undone) target states.
return [(trace, target_i) for target_i in target_index_list ]
def on_finished(self, Args):
# self.done_set.add(StateIndex)
self.path.pop()
trace_finder = TraceFinder(SM, ToDB)
trace_finder.do((None, SM.init_state_index))
return trace_finder.result
|
621c9c26f9a7054b2e1ef20984105b05738878e9
| 3,642,434
|
import random
def circle_area(radius: int) -> float:
""" estimate the area of a circle using the monte carlo method.
Note that the decimal precision is log(n). So if you want a precision of
three decimal points, n should be $$ 10 ^ 3 $$.
:param r (int): the radius of the circle
:return (int): the estimated area of the circle to three decimal places
"""
hits = 0
n = 1000
left_bottom = -1 * radius
right_top = radius
for _ in range(n):
# get random coordinates
x = left_bottom + (random() * right_top)
y = left_bottom + (random() * right_top)
# check if points fall within the bounds of the circle (geometrically)
if sqrt((x ** 2) + (y ** 2)) < radius:
hits += 1
return (hits / n) * ((2 * radius) ** 2)
|
2c85759ffbf798749263fca368cdfd159d67028b
| 3,642,435
|
def Quantized_MLP(pre_model, args):
"""
quantize the MLP model
:param pre_model:
:param args:
:return:
"""
#full-precision first and last layer
weights = [p for n, p in pre_model.named_parameters() if 'fp_layer' in n and 'weight' in n]
biases = [pre_model.fp_layer2.bias]
#layers that need to be quantized
ternary_weights = [p for n, p in pre_model.named_parameters() if 'ternary' in n]
params = [
{'params': weights},
{'params': ternary_weights},
{'params': biases}
]
optimizer = optim.SGD(params, lr=args.lr)
loss_fun = nn.CrossEntropyLoss()
return pre_model, loss_fun, optimizer
|
cd5b36c1b10567fee5a8b1f10679e6868f42f98f
| 3,642,436
|
def _super_tofrom_choi(q_oper):
"""
We exploit that the basis transformation between Choi and supermatrix
representations squares to the identity, so that if we munge Qobj.type,
we can use the same function.
Since this function doesn't respect :attr:`Qobj.type`, we mark it as
private; only those functions which wrap this in a way so as to preserve
type should be called externally.
"""
data = q_oper.data.toarray()
dims = q_oper.dims
new_dims = [[dims[1][1], dims[0][1]], [dims[1][0], dims[0][0]]]
d0 = np.prod(np.ravel(new_dims[0]))
d1 = np.prod(np.ravel(new_dims[1]))
s0 = np.prod(dims[0][0])
s1 = np.prod(dims[1][1])
return Qobj(dims=new_dims,
inpt=data.reshape([s0, s1, s0, s1]).
transpose(3, 1, 2, 0).reshape((d0, d1)))
|
da91aff35d891000773100b998b80dc5d998414f
| 3,642,437
|
def get_attention_weights(data):
"""Get the attention weights of the given function."""
# USE INTERACTIONS
token_interaction = data['tokeninteraction']
df_token_interaction = pd.DataFrame(token_interaction)
# check clicked tokens to draw squares around them
clicked_tokens = np.array(data['finalclickedtokens'])
clicked_tokens_indices = np.where(clicked_tokens == 1)[0].tolist()
# COMPUTE ATTENTION
attentions = []
for i, t in enumerate(data['tokens']):
new_attention = \
get_attention(index_token=t['id'],
df_interaction=df_token_interaction)
attentions.append(new_attention)
return attentions
|
e3189bd67f3da6ee8c1173348eec249d9c8cfa9a
| 3,642,438
|
def save_ecg_example(gen_data: np.array, image_name, image_title='12-lead ECG'):
"""
Save 12-lead ecg signal in fancy .png
:param gen_data:
:param image_name:
:param image_title:
:return:
"""
fig = plt.figure(figsize=(12, 14))
for _lead_n in range(gen_data.shape[1]):
curr_lead_data = gen_data[:, _lead_n]
plt.subplot(4, 3, _lead_n + 1)
plt.plot(curr_lead_data, label=f'lead_{_lead_n + 1}')
plt.title(f'lead_{_lead_n + 1}')
fig.suptitle(image_title)
plt.savefig(f'out/{image_name}.png', bbox_inches='tight')
plt.close(fig)
return fig
|
456fa204b20eee53645a900614877a6fb6a53e9c
| 3,642,439
|
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload an entry."""
component: EntityComponent = hass.data[DOMAIN]
return await component.async_unload_entry(entry)
|
b4ae648493b63a27f5127139876cf0bca2a2dcbb
| 3,642,440
|
import os
import time
import tqdm
def get_all_score_dicts(ref_punc_folder_name, res_punc_folder_name):
"""
Return a list of score dictionaries for a set of two folders. This function assumes the naming
of the files in the folders are correct according to the diagram and hence if sorted
match files. Both folders should be in the directory this script is also in.
:param ref_punc_folder_name: Filename of the reference punctuation folder
:param res_punc_folder_name: Filename of the restored punctuation folder
:return: A list of score dictionaries
"""
filenames_ref_punc = os.listdir(ref_punc_folder_name)
filenames_res_punc = os.listdir(res_punc_folder_name)
# print(f"Filenames Reference Punc: {filenames_ref_punc}")
# print(f"Filenames Restored Punc: {filenames_res_punc}")
# print(filenames_ref_punc)
print(f"Number of reference punctuation files: {len(filenames_ref_punc)}")
print(f"Number of restored punctuation files: {len(filenames_res_punc)}")
counter = 0
score_dicts_list = []
start_timer = time.time()
for i in tqdm(range(0, 461)): # 301, 461
# print(i)
fileName = str(i)
ref_punc_filename = ref_punc_folder_name + "\\" + fileName + "_reference_punc.txt"
res_punc_filename = res_punc_folder_name + "\\" + "pr_" + fileName + "_asr_output.txt"
if os.path.isfile(ref_punc_filename) == os.path.isfile(res_punc_filename) and os.path.isfile(
ref_punc_filename):
counter += 1
score_dicts_list.append(ref_and_res_to_scores(refPuncFileName=ref_punc_filename,
resPuncFileName=res_punc_filename))
# print("--- %s seconds ---" % (time.time() - start_time))
print(f"--- Processed {counter} files in {time.time() - start_timer} seconds ---")
# score_dicts_list = []
# assert len(filenames_ref_punc) == len(filenames_res_punc), "Amount of restored punctuation and reference punctuation files should be equal to calculate scores!"
# for i in range(len(filenames_ref_punc)):
# # print(f"ref file 0:3 {filenames_ref_punc[i][0:3]}")
# # print(f"res file 0:3 {filenames_res_punc[i][0:3]}")
# ref_path = ref_punc_folder_name + "\\" + filenames_ref_punc[i]
# res_path = res_punc_folder_name + "\\" + filenames_res_punc[i]
# score_dicts_list.append(ref_and_res_to_scores(refPuncFileName=ref_path,
# resPuncFileName=res_path))
return score_dicts_list
|
27943b7694f420123dcd8bfebb79b99fc5dab617
| 3,642,441
|
def run_random_climate(gdir, nyears=1000, y0=None, halfsize=15,
bias=None, seed=None, temperature_bias=None,
climate_filename='climate_monthly',
climate_input_filesuffix='', output_filesuffix='',
init_area_m2=None, unique_samples=False):
"""Runs the random mass balance model for a given number of years.
This initializes a :py:class:`oggm.core.vascaling.RandomVASMassBalance`,
and runs and stores a :py:class:`oggm.core.vascaling.VAScalingModel` with
the given mass balance model.
Parameters
----------
gdir : :py:class:`oggm.GlacierDirectory`
the glacier directory to process
nyears : int, optional
length of the simulation, default = 1000
y0 : int, optional
central year of the random climate period. The default is to be
centred on t*. Default = None
halfsize : int, optional
the half-size of the time window (window size = 2 * halfsize + 1),
default = 15
bias : float, optional
bias of the mb model. Default is to use the calibrated one, which
is often a better idea. For t* experiments it can be useful to set it
to zero. Default = None
seed : int
seed for the random generator. If you ignore this, the runs will be
different each time. Setting it to a fixed seed accross glaciers can
be usefull if you want to have the same climate years for all of them
temperature_bias : float, optional
add a bias to the temperature timeseries, default = None
climate_filename : str, optional
name of the climate file, e.g. 'climate_monthly' (default) or
'gcm_data'
climate_input_filesuffix: str, optional
filesuffix for the input climate file
output_filesuffix : str, optional
this add a suffix to the output file (useful to avoid overwriting
previous experiments)
init_area_m2: float, optional
glacier area with which the model is initialized, default is RGI value
unique_samples: bool, optional
if true, chosen random mass-balance years will only be available once
per random climate period-length
if false, every model year will be chosen from the random climate
period with the same probability (default)
Returns
-------
:py:class:`oggm.core.vascaling.VAScalingModel`
"""
# instance mass balance model
mb_mod = RandomVASMassBalance(gdir, y0=y0, halfsize=halfsize, bias=bias,
seed=seed, filename=climate_filename,
input_filesuffix=climate_input_filesuffix,
unique_samples=unique_samples)
if temperature_bias is not None:
# add given temperature bias to mass balance model
mb_mod.temp_bias = temperature_bias
# where to store the model output
diag_path = gdir.get_filepath('model_diagnostics', filesuffix='vas',
delete=True)
# instance the model
min_hgt, max_hgt = get_min_max_elevation(gdir)
if init_area_m2 is None:
init_area_m2 = gdir.rgi_area_m2
model = VAScalingModel(year_0=0, area_m2_0=init_area_m2,
min_hgt=min_hgt, max_hgt=max_hgt,
mb_model=mb_mod)
# specify path where to store model diagnostics
diag_path = gdir.get_filepath('model_diagnostics',
filesuffix=output_filesuffix,
delete=True)
# run model
model.run_until_and_store(year_end=nyears, diag_path=diag_path)
return model
|
2887c1e62d3357e028c7be0539225bfb879323d9
| 3,642,442
|
from typing import Optional
def sync_get_ami_arch_from_instance_type(instance_type: str, region_name: Optional[str]=None) -> str:
"""For a given EC2 instance type, returns the AMI architecture associated with the instance type
Args:
instance_type (str): An EC2 instance type; e.g., "t2.micro"
region_name (Optional[str], optional): AWS region to use for query, or None to use the default region. Defaults to None.
Returns:
str: The AMI architecture associated with instance_type
"""
processor_arches = sync_get_processor_arches_from_instance_type(instance_type, region_name=region_name)
result = sync_get_ami_arch_from_processor_arches(processor_arches)
return result
|
2289deea91c9a9dafa0492fac9230292b546e9b7
| 3,642,443
|
import math
def atan2(y, x):
"""Returns angle of a 2D coordinate in the XY plane"""
return math.atan2(y, x)
|
ede5a647c175bebf2800c22d92e396deff6077e2
| 3,642,444
|
def index_objects(
*, ids, indexer_class, index=None, transforms=None, manager_name=None
):
"""
Index specified `ids` in ES using `indexer_class`. This is done in a single
bulk action.
Pass `index` to index on the specific index instead of the default index
alias from the `indexed_class`.
Pass `transforms` or `manager_name` to change the queryset used to fetch
the objects to index.
Unless an `index` is specified, if a reindexing is taking place for the
default index then this function will index on both the old and new indices
to allow indexing to still work while reindexing isn't complete yet.
"""
if index is None:
index = indexer_class.get_index_alias()
# If we didn't have an index passed as argument, then we should index
# on both old and new indexes during a reindex.
indices = Reindexing.objects.get_indices(index)
else:
# If we did have an index passed then the caller wanted us to only
# consider the index they specified, so we only consider that one.
indices = [index]
if manager_name is None:
manager_name = 'objects'
manager = getattr(indexer_class.get_model(), manager_name)
if transforms is None:
transforms = []
qs = manager.filter(id__in=ids)
for transform in transforms:
qs = qs.transform(transform)
bulk = []
es = amo_search.get_es()
major_version = get_major_version(es)
for obj in qs.order_by('pk'):
data = indexer_class.extract_document(obj)
for index in indices:
item = {
'_source': data,
'_id': obj.id,
'_index': index,
}
if major_version < 7:
# While on 6.x, we use the `addons` type when creating indices
# and when bulk-indexing. We completely ignore it on searches.
# When on 7.x, we don't pass type at all at creation or
# indexing, and continue to ignore it on searches.
# That should ensure we're compatible with both transparently.
item['_type'] = 'addons'
bulk.append(item)
return helpers.bulk(es, bulk)
|
c93ea99946bb1516a58bb39aa5d43b1644f4f4da
| 3,642,445
|
def get_attrs_titles_with_transl() -> dict:
"""Returns attribut titles and translation"""
attr_titles = []
attrs = Attribute.objects.filter(show_in_list=True).order_by('weight')
for attr in attrs:
attr_titles.append(attr.name)
result = {}
for title in attr_titles:
result[title] = _(title)
return result
|
167955e669ddb3f6d5bbbd48cc01d26155a9e4ba
| 3,642,446
|
def kde_KL_divergence_2d(x, y, h_x, h_y, nb_bins=100, fft=True):
"""Uses Kernel Density Estimator with Gaussian kernel on two
dimensional samples x and y and returns estimated Kullback-
Leibler divergence.
@param x, y: samples, given as a (n, 2) shaped numpy array,
@param h: width of the Gaussian kernel,
@param nb_bins: number of grid points to use,
@param fft: whether to use FFT to compute convolution.
"""
min_ = np.min(np.vstack([np.min(x, axis=0), np.min(y, axis=0)]), axis=0)
max_ = np.max(np.vstack([np.max(x, axis=0), np.max(y, axis=0)]), axis=0)
bounds_ = np.vstack((min_, max_))
(x_grid, y_grid, kde_x) = gaussian_kde_2d(x, h_x, h_y,
nb_bins=nb_bins,
fft=fft,
bounds=bounds_
)
(x_grid2, y_grid2, kde_y) = gaussian_kde_2d(y, h_x, h_y,
nb_bins=nb_bins,
fft=fft,
bounds=bounds_
)
delta_x = x_grid[1] - x_grid[0]
delta_y = y_grid[1] - y_grid[0]
plogp = - kde_x * np.log((kde_x + EPSILON) / (kde_y + EPSILON))
# Integrate
div = trapz(trapz(plogp, dx=delta_x, axis=1), dx=delta_y, axis=0)
return div
|
ce7ef19846dfd729fe5703aceaec69392f455ca6
| 3,642,447
|
def gml_init(code):
"""
Initializes a Group Membership List (GML) for schemes of the given type.
Parameters:
code: The code of the scheme.
Returns:
A native object representing the GML. Throws an Exception on error.
"""
gml = lib.gml_init(code)
if gml == ffi.NULL:
raise Exception('Error initializing GML.')
return gml
|
5558f2db6a1c2269796cd52f675d5579ce357949
| 3,642,448
|
def before_run(func, force=False):
"""
Adds a function *func* to the list of callbacks that are invoked right before luigi starts
running scheduled tasks. Unless *force* is *True*, a function that is already registered is not
added again and *False* is returned. Otherwise, *True* is returned.
"""
if func not in _before_run_funcs or force:
_before_run_funcs.append(func)
return True
else:
return False
|
378604f6c574345682d8bd3d155ef8e4344aac27
| 3,642,449
|
def calc_z_scores(baseline, seizure):
""" This function is meant to generate the figures shown in the Brainstorm
demo used to select the 120-200 Hz frequency band. It should also
be similar to panel 2 in figure 1 in David et al 2011.
This function will compute a z-score for each value of the seizure power
spectrum using the mean and sd of the control power spectrum at each
frequency. In the demo, the power spectrum is calculated for the 1st
10 seconds of all three seizures and then averaged. Controls are
similarly averaged
Parameters
----------
baseline : ndarray
power spectrum of baseline EEG
seizure : ndarray
power spectrum of seizure EEG
Returns
-------
ndarray
seizure power spectrum scaled to a z-score by baseline power spectrum
mean and SD
"""
mean = np.mean(baseline, 1)
sd = np.std(baseline, 1)
z_scores = (seizure - mean)/sd
return z_scores
|
db3f6fbc42450658700ca2d120bf6faa31fccdfd
| 3,642,450
|
def get_column(data, column_index):
"""
Gets a column of data from the given data.
:param data: The data from the CSV file.
:param column_index: The column to copy.
:return: The column of data (as a list).
"""
return [row[column_index] for row in data]
|
3fd5c8c76ccfed145aba0e685aa57ad01b3695a5
| 3,642,451
|
def analytic_solution(num_dims,
t_val,
x_val=None,
domain_bounds=(0.0, 1.0),
x_0=(0.5, 0.5),
d=1.0,
k_decay=0.0,
k_influx=0.0,
trunc_order=100,
num_points=None):
"""This function returns the analytic solution to the heat equation with decay i.e. du/dt = nabla^2 u + k_1 - k_2 u
k_1 is the production rate, k_2 is the decay rate
Returns x-axis values, followed by an array of the solutions at different time points"""
if isinstance(t_val, (int, float)):
t_val = np.array([t_val])
if isinstance(num_points, (int, float)):
num_points = [num_points, num_points]
if isinstance(x_0, (int, float)):
x_0 = np.array([x_0, x_0])
if len(domain_bounds) < 4:
domain_bounds = (domain_bounds[0], domain_bounds[1], domain_bounds[0], domain_bounds[1])
assert isinstance(t_val, (list, tuple, np.ndarray))
assert isinstance(x_val, (tuple, list, np.ndarray)) or x_val is None
assert isinstance(domain_bounds, (list, tuple, np.ndarray))
assert isinstance(x_0, (tuple, list, np.ndarray))
assert isinstance(d, (int, float))
assert isinstance(k_decay, (int, float))
assert isinstance(k_influx, (int, float))
assert isinstance(trunc_order, int)
length = float(domain_bounds[1] - domain_bounds[0])
t = np.array(t_val)
if x_val is None:
assert num_points is not None
x_val = [np.linspace(domain_bounds[0], domain_bounds[1], num_points[0]),
np.linspace(domain_bounds[0], domain_bounds[1], num_points[1])]
if num_dims == 1:
if isinstance(x_val[0], (tuple, list, np.ndarray)):
x = np.array(x_val[0])
y = np.array(x_val[0])
else:
x = np.array(x_val)
y = np.array(x_val)
assert t.ndim == 1
t = t.reshape([t.shape[0], 1])
u = 1.0 / length
for n in range(1, trunc_order):
u += (2/length)*np.cos((n*np.pi/length)*x_0[0])*np.cos((n*np.pi/length)*x)*np.exp(-d*(n*np.pi/length)**2*t)
else:
assert isinstance(x_val[0], (tuple, list, np.ndarray))
assert isinstance(x_val[1], (tuple, list, np.ndarray))
x = np.array(x_val[0])
y = np.array(x_val[1])
xx, yy = np.meshgrid(x, y)
assert t.ndim == 1
t = t.reshape([t.shape[0], 1, 1])
u = 1.0 / length ** 2
for k in range(1, trunc_order):
u += (2.0 / length ** 2) * np.cos(k * np.pi * x_0[1] / length) * np.cos(k * np.pi * yy / length) * np.exp(
-d * t * (k * np.pi / length) ** 2)
for j in range(1, trunc_order):
u += (2.0 / length ** 2) * np.cos(j * np.pi * x_0[0] / length) * np.cos(j * np.pi * xx / length) * np.exp(
-d * t * (j * np.pi / length) ** 2)
for j in range(1, trunc_order):
for k in range(1, trunc_order):
u += (4.0 / length ** 2) * np.cos(j * np.pi * x_0[0] / length) * np.cos(k * np.pi * x_0[1] / length) * \
np.cos(j * np.pi * xx / length) * np.cos(k * np.pi * yy / length) * \
np.exp(-d * t * ((j * np.pi / length) ** 2 + (k * np.pi / length) ** 2))
if k_decay > 0.0 and k_influx == 0.0:
u *= np.exp(- k_decay * t)
elif k_decay == 0.0 and k_influx > 0.0:
u += k_influx * t
elif k_decay > 0.0 and k_influx > 0.0:
u += k_influx * (1.0 - np.exp(-k_decay * t)) / k_decay
if num_dims == 1:
return u, x
else:
return u, x, y
|
0a920ec22fbe1ae3ff510ddd4389c1cf4ae0912d
| 3,642,452
|
def safe_gas_limit(*estimates: int) -> int:
"""Calculates a safe gas limit for a number of gas estimates
including a security margin
"""
assert None not in estimates, "if estimateGas returned None it should not reach here"
calculated_limit = max(estimates)
return int(calculated_limit * constants.GAS_FACTOR)
|
439eca363dc1fe1f53972c69191513913feef39b
| 3,642,453
|
import typing
def integer_years(dates: typing.Any) -> typing.List[int]:
"""Maps a list of 'normalized_date' strings to a sorted list of integer years.
Args:
dates: A list of strings containing dates in the 'normalized_date' format.
Returns:
A list of years extracted from "dates".
"""
if not isinstance(dates, typing.Iterable):
return []
years: typing.Set[int] = set()
for date in dates:
if not isinstance(date, str):
continue
match = RANGE.search(date)
if match:
start_str, end_str = match.groups()
start = get_year(start_str)
end = get_year(end_str)
if start and end:
years.update(range(start, end + 1))
else:
year = get_year(date)
if year:
years.add(year)
return sorted(years)
|
cdf14f0a2fee197177f12ead43346dfd4eabb5ef
| 3,642,454
|
def add_wmts_gibs_basemap(ax, date='2016-02-05'):
"""http://gibs.earthdata.nasa.gov/"""
URL = 'http://gibs.earthdata.nasa.gov/wmts/epsg4326/best/wmts.cgi'
wmts = WebMapTileService(URL)
# Layers for MODIS true color and snow RGB
# NOTE: what other tiles available?: TONS!
#https://wiki.earthdata.nasa.gov/display/GIBS/GIBS+Available+Imagery+Products#expand-ReferenceLayers9Layers
#layer = 'MODIS_Terra_SurfaceReflectance_Bands143'
#layer = 'MODIS_Terra_CorrectedReflectance_Bands367'
#layer = 'ASTER_GDEM_Greyscale_Shaded_Relief' #better zoomed in
layer = 'SRTM_Color_Index'
#layer = 'BlueMarble_ShadedRelief' #static
#layer = 'BlueMarble_NextGeneration'
#layer = 'BlueMarble_ShadedRelief_Bathymetry'
#layer = 'Reference_Labels'
#layer = 'Reference_Features'
ax.add_wmts(wmts, layer, wmts_kwargs={'time': date}) # alpha=0.5
#NOTE: can access attributes:
#wmts[layer].title
return wmts
|
434ff85e1a721937ba83d0438bb7384d1a1f0600
| 3,642,455
|
import torch
def encode_position(
batch_size: int,
axis: list,
max_frequency: float,
num_frequency_bands: int,
sine_only: bool = False,
) -> torch.Tensor:
"""
Encode the Fourier Features and return them
Args:
batch_size: Batch size
axis: List containing the size of each axis
max_frequency: Max frequency
num_frequency_bands: Number of frequency bands to use
sine_only: (bool) Whether to only use Sine features or both Sine and Cosine, defaults to both
Returns:
Torch tensor containing the Fourier Features of shape [Batch, *axis]
"""
axis_pos = list(
map(
lambda size: torch.linspace(-1.0, 1.0, steps=size),
axis,
)
)
pos = torch.stack(torch.meshgrid(*axis_pos), dim=-1)
enc_pos = fourier_encode(
pos,
max_frequency,
num_frequency_bands,
sine_only=sine_only,
)
enc_pos = einops.rearrange(enc_pos, "... n d -> ... (n d)")
enc_pos = einops.repeat(enc_pos, "... -> b ...", b=batch_size)
return enc_pos
|
06a81219b85006226069b288cce8602fc62e7119
| 3,642,456
|
def expr_erode(src, size = 5):
"""
Same result as core.morpho.Erode(), faster and workable in 32 bit.
"""
expr = _morpho_matrix(size, mm = 'min')
return core.akarin.Expr(src, expr)
|
06f76f889cadcec538639ca1a920168c6a9ec467
| 3,642,457
|
def response_modification(response):
"""
Modify API response format.
"""
if (
status.is_client_error(response.status_code)
or status.is_server_error(response.status_code)
) and (status.HTTP_400_BAD_REQUEST != response.status_code):
return response
# Modify the response data
modified_data = {}
modified_data["code"] = response.status_code
modified_data["status"] = get_status(response.status_code)
modified_data["data"] = response.data
response.data = modified_data
return response
|
f8a3120f3a1671d71f32158b742212b896074bdc
| 3,642,458
|
import logging
import six
import base64
import urllib
def http_request(
url,
json_string,
username = None,
password = None,
timeout = None,
additional_headers = None,
content_type = None,
cookies = None,
gzipped = None,
ssl_context = None,
debug = None
):
"""
Fetch data from webserver (POST request)
:param json_string: JSON-String
:param username: If *username* is given, BASE authentication will be used.
:param timeout: Specifies a timeout in seconds for blocking operations
like the connection attempt (if not specified, the global default
timeout setting will be used).
See: https://github.com/gerold-penz/python-jsonrpc/pull/6
:param additional_headers: Dictionary with additional headers
See: https://github.com/gerold-penz/python-jsonrpc/issues/5
:param content_type: Possibility to change the content-type header.
:param cookies: Possibility to add simple cookie-items as key-value pairs.
The key and the value of each cookie-item must be a bytestring.
Unicode is not allowed here.
:param gzipped: If `True`, the JSON-String will be gzip-compressed.
:param ssl_context: Specifies custom TLS/SSL settings for connection.
Python > 2.7.9
See: https://docs.python.org/2/library/ssl.html#client-side-operation
:param debug: If `True` --> *logging.debug*
"""
# Debug
if debug:
logging.debug("Client-->Server: {json_string}".format(json_string=repr(json_string)))
# Create request and add data
request = urllib2.Request(url)
if gzipped:
# Compress content (SpooledTemporaryFile to reduce memory usage)
spooled_file = tools.SpooledFile()
tools.gzip_str_to_file(json_string, spooled_file)
del json_string
request.add_header("Content-Encoding", "gzip")
request.add_header("Accept-Encoding", "gzip")
spooled_file.seek(0)
if six.PY2:
request.add_data(spooled_file)
else:
request.data = spooled_file
else:
if six.PY2:
request.add_data(json_string)
else:
request.data = json_string
# Content Type
request.add_header("Content-Type", content_type or "application/json")
# Authorization
if username:
base64string = base64.b64encode("%s:%s" % (username, password)).strip()
request.add_unredirected_header("Authorization", "Basic %s" % base64string)
# Cookies
if cookies:
cookie = Cookie.SimpleCookie(cookies)
request.add_header("Cookie", cookie.output(header = "", sep = ";"))
# Additional headers (overrides other headers)
if additional_headers:
for key, val in six.iteritems(additional_headers):
request.add_header(key, val)
# Send request to server
http_error_exception = urllib2.HTTPError if six.PY2 else urllib.error.HTTPError
try:
if ssl_context:
try:
response = urllib2.urlopen(
request, timeout=timeout, context=ssl_context
)
except TypeError as err:
if "context" in unicode(err):
raise NotImplementedError("SSL-Context needs Python >= 2.7.9")
else:
raise
else:
response = urllib2.urlopen(request, timeout=timeout)
except http_error_exception as err:
if debug:
retval = err.read()
logging.debug("Client<--Server: {retval}".format(retval=repr(retval)))
raise
# Analyze response and return result
try:
if "gzip" in response.headers.get("Content-Encoding", ""):
response_file = tools.SpooledFile(source_file = response)
if debug:
retval = tools.gunzip_file(response_file)
logging.debug("Client<--Server: {retval}".format(retval = repr(retval)))
return retval
return tools.gunzip_file(response_file)
else:
if debug:
retval = response.read()
logging.debug("Client<--Server: {retval}".format(retval=repr(retval)))
return retval
return response.read()
finally:
response.close()
|
76e41483157fb01541aae5a29a12526f69a89326
| 3,642,459
|
import trace
def process_source_lineage(grid_sdf, data_sdf, value_field=None):
"""
performs the operation to generate the
"""
try:
subtypes = arcpy.da.ListSubtypes(data_sdf)
st_dict = {}
for stcode, stdict in list(subtypes.items()):
st_dict[stcode] = subtypes[stcode]['Name']
fields = arcpy.ListFields(data_sdf)
use_subtypes = False
for field in fields:
if field.name == value_field and field.type == 'Integer':
arcpy.AddMessage("Field has subtypes")
use_subtypes = True
poly_desc = arcpy.Describe(grid_sdf)
fc_desc = arcpy.Describe(data_sdf)
if poly_desc.extent.within(fc_desc.extent):
temp_fc = 'in_memory/clip'
arcpy.AddMessage('Clipping features to polygon')
arcpy.Clip_analysis(data_sdf, grid_sdf, temp_fc)
arcpy.AddMessage('Created in_memory fc')
data_sdf = geomotion.SpatialDataFrame.from_featureclass(temp_fc,
fields=[value_field])
arcpy.AddMessage('features read into spatial dataframe after clipping')
else:
data_sdf = geomotion.SpatialDataFrame.from_featureclass(data_sdf, fields=[value_field])
arcpy.AddMessage('features read into spatial dataframe without clipping')
grid_sdf = geomotion.SpatialDataFrame.from_featureclass(grid_sdf)
#data_sdf = geomotion.SpatialDataFrame.from_featureclass(data_sdf, fields=[value_field])
index = data_sdf.sindex
results = []
for idx, row in enumerate(grid_sdf.iterrows()):
geom = row[1].SHAPE
ext = [geom.extent.lowerLeft.X, geom.extent.lowerLeft.Y,
geom.extent.upperRight.X, geom.extent.upperRight.Y]
row_oids = list(index.intersect(ext))
df_current = data_sdf.loc[data_sdf.index.isin(row_oids)]
# disjoint == False means intersection with Grid polygon
df_sub = df_current.loc[df_current.disjoint(geom) == False].copy()
df_sub = df_sub.replace({np.nan: "NULL"})
grp = df_sub.groupby(by=value_field).size() # Get the counts.
# sort the values to get the biggest on the top
grp.sort_values(axis=0, ascending=False,
inplace=True, kind='quicksort',
na_position='last')
if use_subtypes:
if len(grp) > 1:
grp = grp.head(2)
results.append(
(
int(row[1].OBJECTID),
",".join([st_dict[i] for i in df_sub[value_field].unique().tolist()]),
st_dict[grp.index[0]],
int(grp[grp.index[0]]),
round(float(grp[grp.index[0]]) * 100.0 / float(len(df_sub)),1),
st_dict[grp.index[1]],
int(grp[grp.index[1]]),
round(float(grp[grp.index[1]]) * 100.0 / float(len(df_sub)),1),
)
)
elif len(grp) == 0:
results.append(
(int(row[1].OBJECTID),
'None',
'None',
0,
float(0),
'None',
0,
float(0))
)
elif len(grp) == 1:
results.append(
(
int(row[1].OBJECTID),
",".join([st_dict[i] for i in df_sub[value_field].unique().tolist()]),
st_dict[grp.index[0]],
int(grp[grp.index[0]]),
round(float(grp[grp.index[0]]) * 100.0 / float(len(df_sub)),1),
'None',
0,
float(0)
)
)
else:
if len(grp) > 1:
grp = grp.head(2)
results.append(
(
int(row[1].OBJECTID),
",".join(df_sub[value_field].unique().tolist()),
grp.index[0],
int(grp[0]),
round(float(grp[0]) * 100.0 / float(len(df_sub)),1),
grp.index[1],
int(grp[1]),
round(float(grp[1]) * 100.0 / float(len(df_sub)),1),
)
)
elif len(grp) == 0:
results.append(
(int(row[1].OBJECTID),
'None',
'None',
0,
float(0),
'None',
0,
float(0))
)
elif len(grp) == 1:
results.append(
(
int(row[1].OBJECTID),
",".join(df_sub[value_field].unique().tolist()),
grp.index[0],
int(grp[0]),
round(float(grp[0]) * 100.0 / float(len(df_sub)),1),
'None',
0,
float(0)
)
)
del grp
del df_sub
del row_oids
del df_current
del grid_sdf
del data_sdf
dtypes = np.dtype(
[
('_ID', np.int),
('THEME_LIST', '|S1024'),
('PRI_THEME', '|S256'),
('PRI_THEME_CNT', np.int32),
('PRI_THEME_PER', np.float64),
('SEC_THEME', '|S256'),
('SEC_THEME_CNT', np.int32),
('SEC_THEME_PER', np.float64)
]
)
array = np.array(results, dtypes)
del results
return array
except:
line, filename, synerror = trace()
raise FunctionError(
{
"function": "process_source_lineage",
"line": line,
"filename": filename,
"synerror": synerror,
"arc" : str(arcpy.GetMessages(2))
}
)
|
298e615474debbb01addc583ae19fc1c5191084b
| 3,642,460
|
def class_to_mask(classes: np.ndarray, class_colors: np.ndarray) -> np.ndarray:
"""クラスIDの配列をRGBのマスク画像に変換する。
Args:
classes: クラスIDの配列。 shape=(H, W)
class_colors: 色の配列。shape=(num_classes, 3)
Returns:
ndarray shape=(H, W, 3)
"""
return np.asarray(class_colors)[classes]
|
c574594b18d312e9ce432b68c8c2ff4d73771e6f
| 3,642,461
|
from typing import List
import logging
def get_vocab(iob2_files:List[str]) -> List[str]:
"""Retrieve the vocabulary of the iob2 annotated files
Arguments:
iob2_files {List[str]} -- List of paths to the iob2 annotated files
Returns:
List[str] -- Returns the unique list of vocabulary found in the files
"""
vocab = set()
for iob2_file in iob2_files:
logging.info("Loading file %s for creating corpus embeddings", iob2_file)
for line in open(iob2_file):
token = line.split("\t")[0]
vocab.add(token)
return list(vocab)
|
0dc2a1f969ed6f92b36b1b31875c855d5efda2d9
| 3,642,462
|
import numpy
def taylor_green_vortex(x, y, t, nu):
"""Return the solution of the Taylor-Green vortex at given time.
Parameters
----------
x : numpy.ndarray
Gridline locations in the x direction as a 1D array of floats.
y : numpy.ndarray
Gridline locations in the y direction as a 1D array of floats.
t : float
Time value.
nu : float
Coefficient of viscosity.
Returns
-------
numpy.ndarray
x-component of the velocity field as a 2D array of floats.
numpy.ndarray
y-component of the velocity field as a 2D array of floats.
numpy.ndarray
pressure field as a 2D array of floats.
"""
X, Y = numpy.meshgrid(x, y)
a = 2 * numpy.pi
u = -numpy.cos(a * X) * numpy.sin(a * Y) * numpy.exp(-2 * a**2 * nu * t)
v = +numpy.sin(a * X) * numpy.cos(a * Y) * numpy.exp(-2 * a**2 * nu * t)
p = (-0.25 * (numpy.cos(2 * a * X) + numpy.cos(2 * a * Y)) *
numpy.exp(-4 * a**2 * nu * t))
return u, v, p
|
f47f4cdf11b81fe8b8c38ae50d708ec4361f7098
| 3,642,463
|
def static_initial_state(batch_size, h_size):
""" Function to make an initial state for a single GRU.
"""
state = jnp.zeros([h_size], dtype=jnp.complex64)
if batch_size is not None:
state = add_batch(state, batch_size)
return state
|
a803da5b0af0ce17fc7d1f303f6141416da6d120
| 3,642,464
|
def get_desklamp(request, index):
"""
A pytest fixture to initialize and return the DeskLamp object with
the given index.
"""
desklamp = DeskLamp(index)
try:
desklamp.open()
except RuntimeError:
pytest.skip("Could not open desklamp connection")
def fin():
desklamp.unsubscribe()
desklamp.off()
desklamp.close()
request.addfinalizer(fin)
return desklamp
|
8f00296f5625c8a80bb094d1e470936a0733b83e
| 3,642,465
|
import torch
def conj(x):
"""
Calculate the complex conjugate of x
x is two-channels complex torch tensor
"""
assert x.shape[-1] == 2
return torch.stack((x[..., 0], -x[..., 1]), dim=-1)
|
b22cfd3f12759f9b237099ca0527f0cbe9b99348
| 3,642,466
|
def label_clusters(img, min_cluster_size=50, min_thresh=1e-6, max_thresh=1, fully_connected=False):
"""
Label Clusters
"""
dim = img.dimension
clust = threshold_image(img, min_thresh, max_thresh)
temp = int(fully_connected)
args = [dim, clust, clust, min_cluster_size, temp]
processed_args = _int_antsProcessArguments(args)
lib.LabelClustersUniquely(processed_args)
return clust
|
efe63ea0e71d3a5bf3b2f0a03f3c0f1c295c063b
| 3,642,467
|
def update_schema(schema_old, schema_new):
"""
Given an old BigQuery schema, update it with a new one.
Where a field name is the same, the new will replace the old. Any
new fields not present in the old schema will be added.
Arguments:
schema_old: the old schema to update
schema_new: the new schema which will overwrite/extend the old
"""
old_fields = schema_old["fields"]
new_fields = schema_new["fields"]
output_fields = list(old_fields)
field_indices = {field["name"]: i for i, field in enumerate(output_fields)}
for field in new_fields:
name = field["name"]
if name in field_indices:
# replace old field with new field of same name
output_fields[field_indices[name]] = field
else:
# add new field
output_fields.append(field)
return {"fields": output_fields}
|
e97827ac0d8ee943b88fc54506af3f6fc8285d71
| 3,642,468
|
def get_estimators(positions_all, positions_relevant):
"""
Extracts density estimators from a judged sample of paragraph positions.
Parameters
----------
positions_all : dict of (Path, float)
A sample of paragraph positions from various datasets in the NTCIR-11
Math-2, and NTCIR-12 MathIR format.
positions_relevant : dict of (Path, float)
A sample of relevant paragraph positions from various datasets in the
NTCIR-11 A subsample of relevant paragraph positions.
Returns
-------
(float, KernelDensity, KernelDensity)
An estimate of P(relevant), and estimators of p(position), and p(position | relevant).
"""
samples_all = [
(position,) for _, positions in positions_all.items() for position in positions]
samples_relevant = [
(position,) for _, positions in positions_relevant.items() for position in positions]
estimators = dict()
estimators["P(relevant)"] = len(samples_relevant) / len(samples_all)
LOGGER.info("Fitting prior p(position) density estimator")
estimators["p(position)"] = KernelDensity(**KERNEL).fit(samples_all)
LOGGER.info("Fitting conditional p(position | relevant) density estimator")
estimators["p(position|relevant)"] = KernelDensity(**KERNEL).fit(samples_relevant)
return (
estimators["P(relevant)"], estimators["p(position)"], estimators["p(position|relevant)"])
|
b5f95247ff683e6e7e86d425ec64c988daacab60
| 3,642,469
|
def openbabel_force_field(label, mol, num_confs=None, xyz=None, force_field='GAFF', return_xyz_strings=True,
method='diverse'):
"""
Optimize conformers using a force field (GAFF, MMFF94s, MMFF94, UFF, Ghemical)
Args:
label (str): The species' label.
mol (Molecule, optional): The RMG molecule object with connectivity and bond order information.
num_confs (int, optional): The number of random 3D conformations to generate.
xyz (list, optional): The 3D coordinates in an array format.
force_field (str, optional): The type of force field to use.
return_xyz_strings (bool, optional): Whether to return xyz in string or array format. True for string.
method (str, optional): The conformer searching method to use in open babel.
For method description, see http://openbabel.org/dev-api/group__conformer.shtml
Returns:
list: Entries are optimized xyz's in a list format.
Returns:
list: Entries are float numbers representing the energies in kJ/mol.
"""
xyzs, energies = list(), list()
ff = ob.OBForceField.FindForceField(force_field)
if xyz is not None:
if isinstance(xyz, (str, unicode)):
xyz = converter.get_xyz_matrix(xyz)[0]
# generate an open babel molecule
obmol = ob.OBMol()
atoms = mol.vertices
ob_atom_ids = dict() # dictionary of OB atom IDs
for i, atom in enumerate(atoms):
a = obmol.NewAtom()
a.SetAtomicNum(atom.number)
a.SetVector(xyz[i][0], xyz[i][1], xyz[i][2]) # assume xyz is ordered like mol; line not in in toOBMol
if atom.element.isotope != -1:
a.SetIsotope(atom.element.isotope)
a.SetFormalCharge(atom.charge)
ob_atom_ids[atom] = a.GetId()
orders = {1: 1, 2: 2, 3: 3, 4: 4, 1.5: 5}
for atom1 in mol.vertices:
for atom2, bond in atom1.edges.items():
if bond.isHydrogenBond():
continue
index1 = atoms.index(atom1)
index2 = atoms.index(atom2)
if index1 < index2:
obmol.AddBond(index1 + 1, index2 + 1, orders[bond.order])
# optimize
ff.Setup(obmol)
ff.SetLogLevel(0)
ff.SetVDWCutOff(6.0) # The VDW cut-off distance (default=6.0)
ff.SetElectrostaticCutOff(10.0) # The Electrostatic cut-off distance (default=10.0)
ff.SetUpdateFrequency(10) # The frequency to update the non-bonded pairs (default=10)
ff.EnableCutOff(False) # Use cut-off (default=don't use cut-off)
# ff.SetLineSearchType('Newton2Num')
ff.SteepestDescentInitialize() # ConjugateGradientsInitialize
v = 1
while v:
v = ff.SteepestDescentTakeNSteps(1) # ConjugateGradientsTakeNSteps
if ff.DetectExplosion():
raise ConformerError('Force field {0} exploded with method {1} for {2}'.format(
force_field, 'SteepestDescent', label))
ff.GetCoordinates(obmol)
elif num_confs is not None:
obmol, ob_atom_ids = toOBMol(mol, returnMapping=True)
pybmol = pyb.Molecule(obmol)
pybmol.make3D()
ff.Setup(obmol)
if method.lower() == 'weighted':
ff.WeightedRotorSearch(num_confs, 2000)
elif method.lower() == 'random':
ff.RandomRotorSearch(num_confs, 2000)
elif method.lower() == 'diverse':
rmsd_cutoff = 0.5
energy_cutoff = 50.
confab_verbose = False
ff.DiverseConfGen(rmsd_cutoff, num_confs, energy_cutoff, confab_verbose)
elif method.lower() == 'systematic':
ff.SystematicRotorSearch(num_confs)
else:
raise ConformerError('Could not identify method {0} for {1}'.format(method, label))
else:
raise ConformerError('Either num_confs or xyz should be given for {0}'.format(label))
ff.GetConformers(obmol)
obconversion = ob.OBConversion()
obconversion.SetOutFormat('xyz')
for i in range(obmol.NumConformers()):
obmol.SetConformer(i)
ff.Setup(obmol)
xyz = '\n'.join(obconversion.WriteString(obmol).splitlines()[2:])
if not return_xyz_strings:
xyz = converter.get_xyz_matrix(xyz)[0]
xyz = [xyz[ob_atom_ids[mol.atoms[j]]] for j, _ in enumerate(xyz)] # reorder
xyzs.append(xyz)
energies.append(ff.Energy())
return xyzs, energies
|
9964d94d2601e5cd7871886e396778457bb6e2cd
| 3,642,470
|
def parse_flarelabels(label_file):
"""
Parses a flare-label file and generates a dictionary mapping residue identifiers (e.g. A:ARG:123) to a
user-specified label, trees that can be parsed by flareplots, and a color indicator for vertices.
Parameters
----------
label_file : file
A flare-label file where each line contains 2-3 columns formatted as
- CHAIN:RESN:RESI (e.g. A:ARG:123)
- [[TOPLEVEL.]MIDLEVEL.]LABEL (e.g. Receptor.Helix2.2x44)
- COLOR (e.g. #FF0000 or white)
Returns
-------
dict of str : (dict of str : str)
Keys are all residue identifiers and values are dicts that hold both the LABEL by itself (key "label", the full
tree-path (key "treepath") and a CSS-compatible color string (key "color").
Raises
------
AssertionError
if a residue identifier (CHAIN:RESN:RESI) is specified twice in the file, or if a LABEL appears twice.
"""
if label_file is None:
return None
ret = {}
flarelabels = set() # Only used to check for duplicates
for line in label_file:
line = line.strip()
if not line:
continue # Ignore empty lines
columns = line.split("\t")
residentifier = columns[0]
flaretreepath = columns[1] if len(columns) > 1 else columns[0]
flarelabel = flaretreepath.split(".")[-1]
flarecolor = columns[2] if len(columns) > 2 else "white"
if residentifier in ret:
raise AssertionError("Residue identifier '"+residentifier+"' appears twice in "+label_file.name)
if flarelabel in flarelabels:
raise AssertionError("Flare label '"+flarelabel+"' used twice in "+label_file.name)
ret[residentifier] = {"label": flarelabel, "treepath": flaretreepath, "color": flarecolor}
flarelabels.add(flarelabel)
return ret
|
23df49af14af720311b320f65894e995983365bf
| 3,642,471
|
def remove_background(data, dim="t2", deg=0, regions=None):
"""Remove polynomial background from data
Args:
data (DNPData): Data object
dim (str): Dimension to perform background fit
deg (int): Polynomial degree
regions (None, list): Background regions, by default entire region is background corrected. Regions can be specified as a list of tuples [(min, max), ...]
Returns:
DNPData: Background corrected data
"""
proc_parameters = {
"dim": dim,
"deg": deg,
"regions": regions,
}
fit = background(data, dim=dim, deg=deg, regions=regions)
data = data - fit
proc_attr_name = "remove_backround"
data.add_proc_attrs(proc_attr_name, proc_parameters)
return data
|
54141b6f28b7a21ebdf1b0b920af3bfea4303b07
| 3,642,472
|
def get_hmm_datatype(query_file):
"""Takes an HMM file (HMMer3 software package) and determines what data
type it has (i.e., generated from an amino acid or nucleic acid alignment).
Returns either "prot" or "nucl".
"""
datatype = None
with open(query_file) as infh:
for i in infh:
if i.startswith('ALPH'):
dname = i.strip().split(' ')[1]
if dname == 'amino':
datatype = 'prot'
elif dname == 'DNA':
datatype = 'nucl'
break
# Check that it worked.
assert datatype is not None, """Error: Data type could not be
determined for input file: %s""" % query_file
# Return the data type.
return datatype
|
27653784b8a9fbae92226f8ea7d7b6e2b647765e
| 3,642,473
|
def detect_min_threshold_outliers(series, threshold):
"""Detects the values that are lower than the threshold passed
series : series, mandatory
The series where to detect the outliers
threshold : integer, float, mandatory
The threshold of the minimum value that will be considered outliers.
"""
bool_outliers = series < threshold
return bool_outliers
|
6032693341073d101c0aad598a105f6cbc0ec578
| 3,642,474
|
from datetime import datetime
def new_datetime(d):
"""
Generate a safe datetime from a datetime.date or datetime.datetime object.
"""
kw = [d.year, d.month, d.day]
if isinstance(d, real_datetime):
kw.extend([d.hour, d.minute, d.second, d.microsecond, d.tzinfo])
return datetime(*kw)
|
58479d70918dd287bfd29b1a15b6cd4dc1bfd695
| 3,642,475
|
def _to_str(x):
"""Converts a bool tensor to a string with True/False values."""
x = tf.convert_to_tensor(x)
if x.dtype == tf.bool:
return tf.where(x, 'True', 'False')
return x
|
7919139e0f2cb19cd0856110e962acb616193ada
| 3,642,476
|
def inpaintn(x,m=100, x0=None, alpha=2):
""" This function interpolates the input (2-dimensional) image 'x' with missing values (can be NaN of Inf). It is based on a recursive process
where at each step the discrete cosine transform (dct) is performed of the residue, multiplied by some weights, and then the inverse dct is taken.
The initial guess 'x0' for the interpolation can be provided by the user, otherwise it starts with a nearest neighbor filling.
Args
INPUTS:
x (numpy array) - is the image with missing elements (eiher np.nan or np.inf) from which you want to perform interpolation
m (int) - is the number of iteration; default=100
x0 (numpy array) - can be your initial guess; defaut=None
alpha (float) - some input number used as a power scaling; default=2
OUT:
y (numpy array) - is the interpolated image wrt proposed method
"""
sh = x.shape
ids0 = np.isfinite(x)
if ids0.all(): #Nothing to interpolate...
return x
# Smoothness paramaters:
s0 = 3
s1 = -6
s = np.logspace(s0,s1,num=m)
# Relaxation factor:
rf = 2
# Weight matrix, here we add some basis vectors to Lambda depending on original size of 'x':
Lambda = np.zeros(sh, float)
u0 = np.cos(np.pi*np.arange(0,sh[0]).reshape((sh[0],1))/sh[0])
u1 = np.cos(np.pi*np.arange(0,sh[1]).reshape((1,sh[1]))/sh[1])
Lambda = np.add(np.add(Lambda,u0),u1)
Lambda = 2*(2-Lambda)
Lambda = Lambda**alpha
# Starting interpolation:
if x0 is None:
y = initial_nn(x)
else:
y = np.copy(x0)
for mu in range(m):
Gamma = 1/(1+s[mu]*Lambda)
a = np.copy(y)
a[ids0] = (x-y)[ids0]+y[ids0]
y = rf*idct(Gamma*dct(a, norm='ortho'), norm='ortho')+(1-rf)*y
y[ids0] = x[ids0]
return y
|
2fddabc6e512f9fc1ae7e8298f8d44582eaf7c46
| 3,642,477
|
def obtain_bboxs(path) -> list:
"""
obatin bbox annotations from the file
"""
file = open(path, "r")
lines = file.read().split("\n")
lines = [x for x in lines if x and not x.startswith("%")]
lines = [x.rstrip().lstrip() for x in lines] # get rid of fringe whitespaces
bboxs = []
for line in lines:
items = line.split(" ")
bboxs.append([items[0], float(items[1]), float(items[2]), float(items[3]), float(items[4])])
return bboxs
|
75ceaac4bd8500320007d2ffb4cf4c490bd29473
| 3,642,478
|
def Timeline_Integral_with_cross_before(Tm,):
"""
计算时域金叉/死叉信号的累积卷积和(死叉(1-->0)不清零,金叉(0-->1)清零)
这个我一直不会写成 lambda 或者 apply 的形式,只能用 for循环,谁有兴趣可以指导一下
"""
T = [Tm[0]]
for i in range(1,len(Tm)):
T.append(T[i - 1] + 1) if (Tm[i] != 1) else T.append(0)
return np.array(T)
|
fdbd68e84e2a79a96c2078f92a7b69ab0138874e
| 3,642,479
|
from typing import Generator
def list_image_paths() -> Generator[str, None, None]:
"""List each image path in the input directory."""
return list_input_directory(INPUT_DIRECTORIES["image_dir"])
|
bce70f2af3c42905a27a30bf97de0a993161130f
| 3,642,480
|
def a_star(graph: Graph, start: Node, goal: Node, heuristic):
"""
Standard A* search algorithm.
:param graph: Graph A graph with all nodes and connections
:param start: Node Start node, where the search starts
:param goal: Node End node, the goal for the search
:return: shortest_path: list|False Either a list of node ids or false
"""
# Indexed priority queue
queue = pqdict()
# All visited connections
visited_stack = {}
# Add start node
visited_stack[start] = True
# The costs from start to a node
cost_to_node = {}
# Full costs from a node to goal
full_costs = {}
# All paths that have been taken
shortest_path = []
# Create a dummy for the start node
dummy_connection = Connection(start, start)
# Assign it to the queue so we can start
queue[dummy_connection] = 0
while queue:
# Get next connection from top queue
# and remove it (its a get + pop)
connection = queue.pop()
# Add the node to the shortest path
# cause otherwise we would not be here
shortest_path.append(connection)
cost_to_node[connection.to_node] = connection.cost
# We have found the target
if connection.to_node.id == goal.id:
# Remove all unneded paths and return
# a sorted list
return clean_route_list(shortest_path, goal.id)
# Get all connected nodes
next_connections = graph.get_connections(connection.to_node)
# Iterate through all connected nodes
# and calculate the costs and stuff
for c in next_connections:
# Calculate total costs from start to the goal node
to_goal_cost = heuristic(goal.position, c.to_node.position)
# Calculate costs from start to this node
current_cost = cost_to_node[connection.to_node] + c.cost
# Update lists and costs
queue[c] = current_cost
cost_to_node[c.to_node] = current_cost
full_costs[c.to_node] = current_cost + to_goal_cost
visited_stack[c.to_node] = True
# Never found the target, so sad ...
return False
|
ca25a15733d041cfca2560164ea8b047e55991b8
| 3,642,481
|
def buildAndTrainModel(model, learningRate, batchSize, epochs, trainingData, validationData, testingData, trainingLabels, validationLabels, testingLabels, MODEL_NAME, isPrintModel=True):
"""Take the model and model parameters, build and train the model"""
# Build and compile model
# To use other optimizers, refer to: https://keras.io/optimizers/
# Please do not change the loss function
optimizer = tf.keras.optimizers.Adam(lr=learningRate)
model.compile(optimizer=optimizer,
loss=tf.keras.losses.MeanSquaredError())
if isPrintModel:
print(model.summary())
for epoch in range(0, epochs):
model.fit(trainingData, trainingLabels,
epochs=1,
verbose=0,
batch_size=batchSize,
shuffle=False)
# Evaluate model
valLoss = model.evaluate(validationData, validationLabels, verbose=False)
## get metrics
predictions = model.predict(testingData)
MSE, MAE, MAPE, RMSE, PR = getMetrics(testingLabels,predictions)
MeanSquaredError.append(MSE)
RootMeanSquaredError.append(RMSE)
MeanAbsoluteError.append(MAE)
MeanAbsolutePercentageError.append(MAPE)
PearsonR.append(PR)
ValMSE.append(valLoss)
Epoch.append(epoch)
if valLoss <= min(ValMSE):
max_predictions = predictions
return MeanSquaredError, RootMeanSquaredError, MeanAbsoluteError, MeanAbsolutePercentageError, ValMSE, PearsonR, Epoch, max_predictions
|
af00f383311588525e66cff317908a99fa39859f
| 3,642,482
|
def gaussian_temporal_filter(tsincr: np.ndarray, cutoff: float, span: np.ndarray,
thr: int) -> np.ndarray:
"""
Function to apply a Gaussian temporal low-pass filter to a 1D time-series
vector for one pixel with irregular temporal sampling.
:param tsincr: 1D time-series vector to be filtered.
:param cutoff: filter cutoff in years.
:param span: 1D vector of cumulative time spans, in years.
:param thr: threshold for non-NaN values in tsincr.
:return: ts_lp: Low-pass filtered time series vector.
"""
nanmat = ~isnan(tsincr)
sel = np.nonzero(nanmat)[0] # don't select if nan
ts_lp = np.empty(tsincr.shape, dtype=np.float32) * np.nan
m = len(sel)
if m >= thr:
for k in range(m):
yr = span[sel] - span[sel[k]]
# apply Gaussian smoothing kernel
wgt = _kernel(yr, cutoff)
wgt /= np.sum(wgt)
ts_lp[sel[k]] = np.sum(tsincr[sel] * wgt)
return ts_lp
|
54060dbfc84ce1738698fda893afb556b48396e4
| 3,642,483
|
import requests
import json
def get_mactable(auth):
"""
Function to get list of mac-addresses from Aruba OS switch
:param auth: AOSSAuth class object returned by pyarubaoss.auth
:return list of mac-addresses
:rtype list
"""
url_mactable = "http://" + auth.ipaddr + "/rest/" + auth.version + "/mac-table"
try:
r = requests.get(url_mactable, headers=auth.cookie)
mactable = json.loads(r.text)['mac_table_entry_element']
return mactable
except requests.exceptions.RequestException as error:
return "Error:\n" + str(error) + " get_mactable: An Error has occurred"
|
8f81a03640d7a4ed0d6d70bcaf268b647dee987e
| 3,642,484
|
def presentations():
"""Shows a list of selected presentations"""
return render_template(
'table.html',
title='Presentations',
data=PRESENTATIONS,
target='_blank',
)
|
643c1b7a6595f4c8c84abc47019a0346b414df56
| 3,642,485
|
def get_consensus_mask(patient, region, aft, ref=HIVreference(subtype="any")):
"""
Returns a 1D vector of size aft.shape[-1] where True are the position that correspond to consensus sequences.
Position that are not mapped to reference or seen too often gapped are always False.
"""
ref_filter = trajectory.get_reference_filter(patient, region, aft, ref)
consensus_mask = trajectory.get_reversion_map(patient, region, aft, ref)
initial_idx = patient.get_initial_indices(region)
# gives reversion mask at initial majority nucleotide
consensus_mask = consensus_mask[initial_idx, np.arange(aft.shape[-1])]
return np.logical_and(ref_filter, consensus_mask)
|
da7699350609ffc29d20b9922fa03c0d1944b57d
| 3,642,486
|
import argparse
def parse_arguments():
""" Parse arguments """
parser = argparse.ArgumentParser()
parser.add_argument(
"-i",
type=str,
dest="input_pics",
help="A file consists of pics path with each pic on a single line.",
)
parser.add_argument("-o", type=str, dest="output_gif", help="Output gif path.")
parser.add_argument("-fps", type=float, dest="fps", help="FPS.")
parser.add_argument(
"-duration", type=float, dest="duration", help="Duration of each frame."
)
return parser.parse_args()
|
8956c690bfffbe2e93c40c98db0eb785ff440530
| 3,642,487
|
def return_next_entry_list_uri(links):
"""続くブログ記事一覧のエンドポイントを返す"""
for link in links:
if link.attrib["rel"] == "next":
return link.attrib["href"]
|
0c4c4139270ef8dedbb106f2db852097f4cd3028
| 3,642,488
|
def none(**_):
""" Input: anything
Return: 0.0 (float)
Descr.: Dummy method to handle no temperature correction"""
return 0.0
|
e06b22f91d5a73450ddb4ca53fbb2569d567dcf1
| 3,642,489
|
def paths_and_labels_to_rgb_dataset(image_paths, labels, num_classes, label_mode):
"""Constructs a dataset of images and labels."""
path_ds = dataset_ops.Dataset.from_tensor_slices(image_paths)
img_ds = path_ds.map(lambda path: load_rgb_img_from_path(path))
label_ds = dataset_utils.labels_to_dataset(labels, label_mode, num_classes)
img_ds = dataset_ops.Dataset.zip((img_ds, label_ds))
return img_ds
|
7c72b3d628937fe999d89f5524d4d079ef20d9da
| 3,642,490
|
def get_custom_headers(manifest_resource):
"""Generates the X-TAXII-Date-Added headers based on a manifest resource"""
headers = {}
times = sorted(map(lambda x: x["date_added"], manifest_resource.get("objects", [])))
if len(times) > 0:
headers["X-TAXII-Date-Added-First"] = times[0]
headers["X-TAXII-Date-Added-Last"] = times[-1]
return headers
|
6c3acf2ea330b347387bfec574b4f8edfffa69ab
| 3,642,491
|
def checkCulling( errs, cullStrings ) :
"""
Removes all messages containing sub-strings listed in cullStrings. cullStrings can be either a string or a
list of strings. If as list of strings, each string must be a sub-string in a message for the message to
be culled.
"""
def checkCullingMatch( message, cullStrings ) :
found = True
for cullString in cullStrings : found = found and ( cullString in message )
return( found )
def checkCulling2( message, cullStrings, level = 0 ) :
if( isinstance( message, list ) ) :
messages = []
for msg in message :
msg1 = checkCulling2( msg, cullStrings, level + 1 )
if( msg1 is not None ) : messages.append( msg1 )
if( len( messages ) < 2 ) : messages = None
return( messages )
else :
if( checkCullingMatch( message, cullStrings ) ) : return( None )
return( message )
if( isinstance( cullStrings, str ) ) : cullStrings = [ cullStrings ]
errs2 = []
for err in errs :
messages = []
if( isinstance( err.message, str ) ) :
if( not( checkCullingMatch( err.message, cullStrings ) ) ) : errs2.append( err )
else :
for message in err.message :
message = checkCulling2( message, cullStrings )
if( message is not None ) :
messages.append( message )
if( len( messages ) > 0 ) :
err.message = messages
errs2.append( err )
return( errs2 )
|
5414e52df999a8aef7ed34328a689efa1582aabb
| 3,642,492
|
def gram_matrix(x, ba, hi, wi, ch):
"""gram for input"""
if ba is None:
ba = -1
feature = K.reshape(x, [ba, int(hi * wi), ch])
gram = K.batch_dot(feature, feature, axes=1)
return gram / (hi * wi * ch)
|
6e6145d9941c2e63120c7d030ac5b6b1ccd5d97e
| 3,642,493
|
import csv
def read_pinout_csv(csv_file, keyname="number"):
"""
read a csv file and return a dict with the given keyname as the keys
"""
reader = csv.DictReader(open(csv_file))
lst = []
for row in reader:
lst.append(row)
d = {}
for item in lst:
d[item[keyname]] = item
return d
|
07a30b1191d311fee315c87773e3b3c1111d7624
| 3,642,494
|
async def start(hub, ctx, name, resource_group, **kwargs):
"""
.. versionadded:: 1.0.0
Power on (start) a virtual machine.
:param name: The name of the virtual machine to start.
:param resource_group: The resource group name assigned to the
virtual machine.
CLI Example:
.. code-block:: bash
azurerm.compute.virtual_machine.start testvm testgroup
"""
compconn = await hub.exec.azurerm.utils.get_client(ctx, "compute", **kwargs)
try:
# pylint: disable=invalid-name
vm = compconn.virtual_machines.start(
resource_group_name=resource_group, vm_name=name
)
vm.wait()
vm_result = vm.result()
result = vm_result.as_dict()
except CloudError as exc:
await hub.exec.azurerm.utils.log_cloud_error("compute", str(exc), **kwargs)
result = {"error": str(exc)}
return result
|
74a09ef57ea735ea6a5af2ee5d10d3407e770980
| 3,642,495
|
def Render(request, template_file, params):
"""Render network test pages."""
return util.Render(request, template_file, params)
|
a30e34297de9ec44982dc8bc19231c471cc080c4
| 3,642,496
|
import logging
from pathlib import Path
def setup_global_logger(log_filepath=None):
"""Setup logger for logging
Args:
log_filepath: log file path. If not specified, only log to console
Returns:
logger that can log message at different level
"""
logger = logging.getLogger(__name__)
try:
if not logger.handlers:
logger.propagate = False
logger.setLevel(logging.INFO)
formatter = logging.Formatter("%(levelname)s:[%(asctime)s] - %(message)s")
logging.getLogger("tornado").propagate = False
logging.getLogger("livereload").propagate = False
# Add sysout handler
ch = logging.StreamHandler()
ch.setFormatter(formatter)
logger.addHandler(ch)
# Add DB handler
logger.addHandler(LogDBHandler())
if log_filepath:
formatter = logging.Formatter(
"%(levelname)s:[%(asctime)s] - "
+ "[%(filename)s, line-%(lineno)d] - %(message)s"
)
# Add file handler
Path("logs").mkdir(parents=True, exist_ok=True)
fh = logging.handlers.TimedRotatingFileHandler(
Path("logs") / log_filepath, when="midnight", interval=1
)
fh.suffix = "%Y%m%d"
fh.setFormatter(formatter)
logger.addHandler(fh)
except Exception as ex:
logger.error(ex)
return logger
|
74a8911243947387352b5c20e81ef0a304b48aa5
| 3,642,497
|
def calculate_class_recall(conf_mat: np.array) -> np.array:
"""
Calculates the recall for each class from a confusion matrix.
"""
return np.diagonal(conf_mat) / np.sum(conf_mat, axis=1)
|
715f20b3e957dee25630bb413aff48140cf6aad3
| 3,642,498
|
def findall(element, path):
""" A helper function around a :attr:`lxml.etree._Element.findall` that passes the
element's namespace mapping.
"""
return element.findall(path, namespaces=element.nsmap)
|
20da8cb66ac591751501e5c944f6f95235582e80
| 3,642,499
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.