content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
def root():
"""
Landing page for PaperScraper, takes user input and run parameters
"""
if request.method == "GET":
return render_template("root.html")
elif request.method == "POST":
error = None
source = None
try:
source = get_source(request.form)
except OAuthException:
error = "Invalid username or password."
except ValueError as error:
error = str(error)
return render_template("root.html", error=error, source=source) | 2af8b4247b51afb86c71e61199c7d622996cb112 | 3,630,400 |
import os
def get_pkg_name_from_sxs_folder_name(path_folder):
"""
Get the name of the package from path
:param path_folder: path of the package
:return: string name of the package
"""
folder_name = os.path.basename(path_folder)
spited_name = folder_name.split("_")
if len(spited_name) < 2:
logger.info("could not find package name for {folder_name}".format(folder_name=folder_name))
return ""
if len(spited_name) == 2:
return spited_name[0]
elif len(spited_name) == 3:
return spited_name[1]
elif len(spited_name) == 6:
return spited_name[1]
elif len(spited_name) == 7:
return "_".join(spited_name[1:3]) # take 2 underscores
elif len(spited_name) == 8:
return "_".join(spited_name[1:4]) # take 3 underscores
raise ValueError("didn't expect to have more than 2 underscores in sxs folder name"
" {path_folder}".format(path_folder=path_folder)) | 8ebd35f0be590a19ea08ddcfdb129c0d9557098b | 3,630,401 |
from uncompyle6.scanner import get_scanner
import sys
def python_parser(version, co, out=sys.stdout, showasm=False,
parser_debug=PARSER_DEFAULT_DEBUG, is_pypy=False):
"""
Parse a code object to an abstract syntax tree representation.
:param version: The python version this code is from as a float, for
example 2.6, 2.7, 3.2, 3.3, 3.4, 3.5 etc.
:param co: The code object to parse.
:param out: File like object to write the output to.
:param showasm: Flag which determines whether the disassembled and
ingested code is written to sys.stdout or not.
:param parser_debug: dict containing debug flags for the spark parser.
:return: Abstract syntax tree representation of the code object.
"""
assert iscode(co)
scanner = get_scanner(version, is_pypy)
tokens, customize = scanner.ingest(co)
maybe_show_asm(showasm, tokens)
# For heavy grammar debugging
# parser_debug = {'rules': True, 'transition': True, 'reduce' : True,
# 'showstack': 'full'}
p = get_python_parser(version, parser_debug)
return parse(p, tokens, customize) | 5b83e9bf75b22d17b9e44a552db7625b3bae7334 | 3,630,402 |
import sys
import ast
import configparser
def __parse_options(config_file, section, options):
""" Parse the section options
:type config_file: ConfigParser object
:param config_file: The config file object to use
:type section: str
:param section: Which section to read in the configuration file
:type options: list of dicts
:param options:
A list of options to parse. Example list::
[{
'key': 'aws_access_key_id',
'option': 'aws-access-key-id',
'required': False,
'type': str
}]
:returns: dict
"""
configuration = {}
for option in options:
try:
if option.get('type') == 'str':
configuration[option.get('key')] = \
config_file.get(section, option.get('option'))
elif option.get('type') == 'int':
try:
configuration[option.get('key')] = \
config_file.getint(section, option.get('option'))
except ValueError:
print('Error: Expected an integer value for {0}'.format(
option.get('option')))
sys.exit(1)
elif option.get('type') == 'float':
try:
configuration[option.get('key')] = \
config_file.getfloat(section, option.get('option'))
except ValueError:
print('Error: Expected an float value for {0}'.format(
option.get('option')))
sys.exit(1)
elif option.get('type') == 'bool':
try:
configuration[option.get('key')] = \
config_file.getboolean(section, option.get('option'))
except ValueError:
print('Error: Expected an boolean value for {0}'.format(
option.get('option')))
sys.exit(1)
elif option.get('type') == 'dict':
configuration[option.get('key')] = \
ast.literal_eval(
config_file.get(section, option.get('option')))
else:
configuration[option.get('key')] = \
config_file.get(section, option.get('option'))
except configparser.NoOptionError:
if option.get('required'):
print('Missing [{0}] option "{1}" in configuration'.format(
section, option.get('option')))
sys.exit(1)
return configuration | 6a2813a336aee3e1696caeb148aaac98c7dd6621 | 3,630,403 |
from typing import Optional
def ArcSin(input_vertex: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
"""
Takes the inverse sin of a vertex, Arcsin(vertex)
:param input_vertex: the vertex
"""
return Vertex(context.jvm_view().ArcSinVertex, label, cast_to_vertex(input_vertex)) | 68f80b459ae0b235463284cf8db237d3d15751a1 | 3,630,404 |
def LSFIR(H,N,tau,f,Fs,Wt=None):
""" Least-squares fit of a digital FIR filter to the reciprocal of a given frequency response.
Parameters
----------
H: np.ndarray
frequency response values
N: int
FIR filter order
tau: float
delay of filter
f: np.ndarray
frequencies
Fs: float
sampling frequency of digital filter
Wt: np.ndarray, optional
vector of weights
Returns
-------
bFIR: np.ndarray
filter coefficients
References
----------
* Elster and Link [Elster2008]_
.. see_also ::mod::`PyDynamic.uncertainty.propagate_filter.FIRuncFilter`
"""
print("\nLeast-squares fit of an order %d digital FIR filter to the" % N)
print("reciprocal of a frequency response given by %d values.\n" % len(H))
H = H[:,np.newaxis]
w = 2*np.pi*f/Fs
w = w[:,np.newaxis]
ords = np.arange(N+1)[:,np.newaxis]
ords = ords.T
E = np.exp(-1j*np.dot(w,ords))
if not Wt == None:
if len(np.shape(Wt))==2: # is matrix
weights = np.diag(Wt)
else:
weights = np.eye(len(f))*Wt
X = np.vstack([np.real(np.dot(weights,E)), np.imag(np.dot(weights,E))])
else:
X = np.vstack([np.real(E), np.imag(E)])
Hs = H*np.exp(1j*w*tau)
iRI = np.vstack([np.real(1.0/Hs), np.imag(1.0/Hs)])
bFIR, res = np.linalg.lstsq(X,iRI)[:2]
if (not isinstance(res,np.ndarray)) or (len(res)==1):
print("Calculation of FIR filter coefficients finished with residual norm %e" % res)
Hd = dsp.freqz(bFIR,1,2*np.pi*f/Fs)[1]
Hd = Hd*np.exp(1j*2*np.pi*f/Fs*tau)
res= np.hstack((np.real(Hd) - np.real(H), np.imag(Hd) - np.imag(H)))
rms= np.sqrt( np.sum( res**2 )/len(f))
print("Final rms error = %e \n\n" % rms)
return bFIR.flatten() | 025d3327b985eaf708a61f808e913256c91a911d | 3,630,405 |
import math
def DrawTextBar(value, max_value, max_width=53):
"""Return a simple ASCII bar graph, making sure it fits within max_width.
Args:
value: integer or float representing the value of this bar.
max_value: integer or float representing the largest bar.
max_width: How many characters this graph can use (int)
Returns:
string
"""
hash_width = max_value / max_width
return int(math.ceil(value/hash_width)) * '#' | 7f4b267527317cbceddadc9f7a0307f8ec430bb4 | 3,630,406 |
def dpAdvisorTime():
"""
runs dpAdvisor and measures the time required to compute an answer.
"""
res = {}
length = len(subjects)
start_time = time.time()
res = dpAdvisor(subjects, 24)
end_time = time.time()
total_time = end_time - start_time
print 'It took', total_time, 'to compute an answer. The length of the list was ',length,'.'
return res | edd26cb9840de04ea3bca8504ab3a27f0dd4b93b | 3,630,407 |
import os
import json
from datetime import datetime
def expire_batch(
client,
batch_dir):
"""Expire all the (unanswered) HITs in the batch.
Parameters
----------
client : MTurk.Client
a boto3 client for MTurk.
batch_dir : str
the path to the directory for the batch.
Returns
-------
Dict[str, int]
A dictionary mapping strings to integers. The dictionary will
have the following form::
{
'batch_id': batch_id,
}
where ``batch_id`` is the UUID for the batch.
"""
# construct important paths
batch_dir_name, batch_dir_subpaths = settings.BATCH_DIR_STRUCTURE
batchid_file_name, _ = batch_dir_subpaths['batchid']
incomplete_file_name = settings.INCOMPLETE_FILE_NAME
batchid_file_path = os.path.join(
batch_dir, batchid_file_name)
incomplete_file_path = os.path.join(
batch_dir, settings.INCOMPLETE_FILE_NAME)
with open(batchid_file_path) as batchid_file:
batch_id = batchid_file.read().strip()
if not os.path.isfile(incomplete_file_path):
raise ValueError(
f'No {incomplete_file_name} file was found in {batch_dir}.'
f' Please make sure that the directory is a batch that has'
f' open HITs to be expired.')
with open(incomplete_file_path) as incomplete_file:
hit_ids = json.load(incomplete_file)['hit_ids']
logger.info(f'Expiring HITs in batch {batch_id}.')
for hit_id in hit_ids:
hit = client.update_expiration_for_hit(
HITId=hit_id,
ExpireAt=datetime.datetime.now())
logger.info(f'All HITs in batch {batch_id} are now expired.')
return {
'batch_id': batch_id
} | f63129c66ca6c5e58b48190dd64e062577df31ec | 3,630,408 |
import os
def googlenet(path = ""):
"""
returns info of each layer of the googlenet model with layer image path
"""
lr='0.001'
model = create_googlenet(48, 0.5)
files = os.listdir(path)
layer_name=[]
for layer in model.layers:
check = path + "/" + layer.name + ".png"
if check in [path+'/'+f for f in files]:
if 'conv' in layer.name and (not 'bn' in layer.name) and (not 'pad' in layer.name) and (not 'relu' in layer.name):
layer_name.append([check,[layer.name ,str(layer.kernel_size[0]) + ' x ' + str(layer.kernel_size[1]), '-']])
elif layer.name[:2] != "tf":
layer_name.append([check,[layer.name,'-', '-']])
elif layer.name[:2] == "tf":
layer_name.append([path+"/"+layer.name[3:]+".png",[layer.name, '-', '-']])
return layer_name | 83849cd5f39efb9a2a1ac19ec1126e699731db54 | 3,630,409 |
import ctypes
def get_size(ctype, num, il_code):
"""Return ILValue representing total size of `num` objects of given ctype.
ctype - CType of object to count
num - Integral ILValue representing number of these objects
"""
long_num = set_type(num, ctypes.longint, il_code)
total = ILValue(ctypes.longint)
size = ILValue(ctypes.longint)
il_code.register_literal_var(size, str(ctype.size))
il_code.add(math_cmds.Mult(total, long_num, size))
return total | 14a405edb55a14ec408094c5d27995a4c0906ce2 | 3,630,410 |
def pe_17():
"""Sum the number of characters in the UK words representing the
integers from 1 to 1,000. Exclude dashes and spaces.
"""
total = 0
for number_word in range(1, 1001):
number_word = lpe.number_to_word(number_word)
number_word = number_word.replace('-', '')
number_word = number_word.replace(' ', '')
total += len(number_word)
return ('The sum of characters in the number'
f' words from 1 to 1,000 is {total:,}.') | fb55ff73072c50c6b272a2c7f9c9228d5c406b40 | 3,630,411 |
def metrics_detection(scores, labels, pos_label=1, max_fpr=FPR_MAX_PAUC, verbose=True):
"""
Wrapper function that calculates a bunch of performance metrics for anomaly detection.
:param scores: numpy array with the anomaly scores. Larger values correspond to higher probability of a
point being anomalous.
:param labels: numpy array of labels indicating whether a point is nominal (value 0) or anomalous (value 1).
:param pos_label: value corresponding to the anomalous class in `labels`.
:param max_fpr: float or an iterable of float values in `(0, 1)`. The partial area under the ROC curve is
calculated for each FPR value in `max_fpr`.
:param verbose: Set to True to print the performance metrics.
:return:
"""
au_roc = roc_auc_score(labels, scores)
avg_prec = average_precision_score(labels, scores)
if hasattr(max_fpr, '__iter__'):
au_roc_partial = np.array([roc_auc_score(labels, scores, max_fpr=v) for v in max_fpr])
else:
au_roc_partial = roc_auc_score(labels, scores, max_fpr=max_fpr)
if verbose:
print("Area under the ROC curve = {:.6f}".format(au_roc))
print("Average precision = {:.6f}".format(avg_prec))
print("Partial area under the ROC curve (pauc):")
if hasattr(au_roc_partial, '__iter__'):
for a, b in zip(max_fpr, au_roc_partial):
print("pauc below fpr {:.4f} = {:.6f}".format(a, b))
else:
print("pauc below fpr {:.4f} = {:.6f}".format(max_fpr, au_roc_partial))
# ROC curve and TPR at a few low FPR values
fpr_arr, tpr_arr, thresh = roc_curve(labels, scores, pos_label=pos_label)
tpr = np.zeros(len(FPR_THRESH))
fpr = np.zeros_like(tpr)
if verbose:
print("\nTPR, FPR")
for i, a in enumerate(FPR_THRESH):
mask = fpr_arr >= a
tpr[i] = tpr_arr[mask][0]
fpr[i] = fpr_arr[mask][0]
if verbose:
print("{:.6f}, {:.6f}".format(tpr[i], fpr[i]))
return au_roc, au_roc_partial, avg_prec, tpr, fpr | 854c9a74518bdd09520a47173077bcd7865bef87 | 3,630,412 |
def bias_init(shape, name=None, constant=0.0):
"""Bias Initialization
Args:
shape : Shape of the variable
name : Name of the variable
constant: Value of constant to initialize
Returns:
Initialized bias tensor
"""
if name is None:
name = 'b'
b = tf.get_variable(name=name, shape=shape,
initializer=tf.constant_initializer(constant))
return b | a9f9314a3ba896f036e6cd30c88951ec08bbeb68 | 3,630,413 |
def _FindBinmanNode(dtb):
"""Find the 'binman' node in the device tree
Args:
dtb: Fdt object to scan
Returns:
Node object of /binman node, or None if not found
"""
for node in dtb.GetRoot().subnodes:
if node.name == 'binman':
return node
return None | bf924d173a1adf81c1705ad1ea1fae490567a317 | 3,630,414 |
def replace_escape_chars(text, which_ones=('\n', '\t', '\r'), replace_by=u'', \
encoding=None):
"""Remove escape characters.
`which_ones` is a tuple of which escape characters we want to remove.
By default removes ``\\n``, ``\\t``, ``\\r``.
`replace_by` is the string to replace the escape characters by.
It defaults to ``''``, meaning the escape characters are removed.
"""
text = str_to_unicode(text, encoding)
for ec in which_ones:
text = text.replace(ec, str_to_unicode(replace_by, encoding))
return text | 3742e08ed8b657ce1f9379b4e751fae413c19786 | 3,630,415 |
def purge_report_from_mobile_ucr(report_config):
"""
Called when a report is deleted, this will remove any references to it in
mobile UCR modules.
"""
if not toggles.MOBILE_UCR.enabled(report_config.domain):
return False
did_purge_something = False
for app in get_apps_in_domain(report_config.domain):
save_app = False
for module in app.modules:
if module.module_type == 'report':
valid_report_configs = [
app_config for app_config in module.report_configs
if app_config.report_id != report_config._id
]
if len(valid_report_configs) != len(module.report_configs):
module.report_configs = valid_report_configs
save_app = True
if save_app:
app.save()
did_purge_something = True
return did_purge_something | cf9a744772514868a5e0c6a7ec890ca55ecaa82c | 3,630,416 |
def image_color_cluster(image, k=5):
"""
:param image(numpy array): 추출된 옷
:param k(int) : 군집화할 개수
:return c_list(list) : 각 색의 rgb 값이 담긴 리스트
:return p_list(list) : 각 색의 분포도가 담긴 리스트
"""
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = image.reshape((image.shape[0] * image.shape[1], 3))
clt = KMeans(n_clusters=k)
clt.fit(image)
hist = centroid_histogram(clt)
bar, p_list, c_list = plot_colors(hist, clt.cluster_centers_)
return c_list, p_list | d0fc166cd3465b9f26b4a7bd2e290b1565c77488 | 3,630,417 |
def _get_auth_service(app):
"""Returns an _AuthService instance for an App.
If the App already has an _AuthService associated with it, simply returns
it. Otherwise creates a new _AuthService, and adds it to the App before
returning it.
Args:
app: A Firebase App instance (or None to use the default App).
Returns:
_AuthService: An _AuthService for the specified App instance.
Raises:
ValueError: If the app argument is invalid.
"""
return _utils.get_app_service(app, _AUTH_ATTRIBUTE, _AuthService) | 75b6890fb8650e621bd3d668d8704b6c38a76807 | 3,630,418 |
def winner_distance(r1, r2, reverse=False):
""" Asymmetrical winner distance.
This distance is the rank of the winner of r1 in r2, normalized by the number of candidates.
(rank(r1 winner)) - 1 / (n - 1)
Assuming no ties.
Args:
r1: 1D vector representing a judge.
r2: 1D vector representing a judge.
reverse: If True, lower is better.
"""
r1, r2 = np.array(r1), np.array(r2)
if reverse:
w1 = np.argmin(r1) # r1 winner
else:
w1 = np.argmax(r1) # r1 winner
return (rk.rank(r2)[w1] - 1) / (len(r2) - 1) | 4f8912165905360aeecd355c68da19a11adf3600 | 3,630,419 |
def make_list_accos():
"""Return the acco numbers as list
Returns:
list: List with acco numbers
"""
list_saharas= list(range(1,23))
list_kalaharis =list(range (637,656))
list_balis = list(range (621,627))
list_waikikis = list(range(627,637))
list_serengeti = list(range(659,668))
return list_saharas + list_kalaharis+list_balis+list_waikikis+list_serengeti | 9f2ac7aa4f78588013160f94374e602832b61771 | 3,630,420 |
def load_manifest(url, version, manifest_name):
"""Download and parse manifest."""
manifest_raw = do_curl(f"{url}/{version}/Manifest.{manifest_name}")
manifest = {}
if not manifest_raw:
raise Exception(f"Unable to load manifest {manifest_name}")
try:
lines = manifest_raw.splitlines()
for idx, line in enumerate(lines):
content = line.split('\t')
if content[0] == "MANIFEST":
manifest['format'] = content[1]
elif content[0] == "version:":
manifest['version'] = content[1]
elif content[0] == "previous:":
manifest['previous'] = content[1]
elif content[0] == "minversion:":
manifest['minversin'] = content[1]
elif content[0] == "filecount:":
manifest['filecount'] = content[1]
elif content[0] == "timestamp:":
manifest['timestamp'] = content[1]
elif content[0] == "contentsize:":
manifest['contentsize'] = content[1]
elif content[0] == "includes":
if not manifest.get('includes'):
manifest['includes'] = []
manifest['includes'].append(content[1])
elif len(content) == 4:
if not manifest.get('files'):
manifest['files'] = {}
manifest['files'][content[3]] = content
except Exception as _:
raise Exception(f"Unable to parse manifest {manifest_name} at line {idx+1}: {line}")
if not manifest.get('includes'):
manifest['includes'] = []
if not manifest.get('files'):
raise Exception(f"Invalid manifest {manifest_name}, missing file section")
return manifest | f2eb6bb8a9fb3e86a7a87293873f32ae73e2ae62 | 3,630,421 |
from typing import List
def create_intrusion_set_from_name(
name: str,
author: Identity,
external_references: List[ExternalReference],
object_marking_refs: List[MarkingDefinition],
) -> IntrusionSet:
"""Create intrusion set with given name."""
aliases: List[str] = []
alias = name.replace(" ", "")
if alias != name:
aliases.append(alias)
primary_motivation = None
secondary_motivations: List[str] = []
return create_intrusion_set(
name,
aliases,
author,
primary_motivation,
secondary_motivations,
external_references,
object_marking_refs,
) | 9e545b78af90acf4ab7e9ab556bd0f66989b5f9a | 3,630,422 |
from typing import Any
def get_clients(
*,
db: Session = Depends(deps.get_db),
current_user: models.User = Security(
deps.get_current_active_user,
scopes=[Role.ADMIN["name"], Role.SUPER_ADMIN["name"]],
),
) -> Any:
"""
Retrieve all clients.
"""
# TODO redundant check
if current_user is None:
raise exceptions.get_user_exception("user not found")
return crud.client.get_multi(db) | e292986d88dbc07e931e0656eefc0b86e75a7575 | 3,630,423 |
import os
import re
def find_files(directory='.', pattern='.*', recursive=True):
"""Search recursively for files matching a pattern"""
if recursive:
return (os.path.join(directory, filename)
for directory, subdirectories, filenames in os.walk(directory)
for filename in filenames if re.match(pattern, filename))
else:
return (os.path.join(directory, filename)
for filename in os.listdir(directory)
if re.match(pattern, filename)) | a725a30df0783badd90357e5ce917dd37cf99426 | 3,630,424 |
def _host_is_same(host1: str, host2: str) -> bool:
"""Check if host1 and host2 are the same."""
return host1.split(":")[0] == host2.split(":")[0] | 0bd9393786801d0f69d4982fc9f8edce378e9656 | 3,630,425 |
from typing import List
def check_chan_xt_three_bi(kline, bi_list: List[ChanObject]):
"""
获取指定3分笔得形态
(含有三笔)
:param kline: ctaLineBar对象
:param bi_list: 笔列表
:return:
"""
v = ChanSignals.Other.value
if len(bi_list) != 3:
return v
bi_1, bi_2, bi_3 = bi_list
# 最后一笔是下跌
if bi_3.direction == -1:
# X3LA0~向下不重合
# ^
# / \
# / \
# /
# \ /
# \ /
# v
if bi_3.low > bi_1.high:
v = ChanSignals.X3LA0.value
# X3LB0~向下奔走型
# ^
# / \
# / \
# \ / \
# \ /
# \ /
# v
if bi_2.low < bi_3.low < bi_1.high < bi_2.high:
v = ChanSignals.X3LB0.value
# X3LC0~向下收敛
# \
# \ ^
# \ / \
# \ / \
# \ /
# \ /
# v
if bi_1.high > bi_3.high and bi_1.low < bi_3.low:
v = ChanSignals.X3LC0.value
# X3LD0~向下扩张
# ^
# / \
# \ / \
# \ / \
# v \
# \
if bi_1.high < bi_3.high and bi_1.low > bi_3.low:
v = ChanSignals.X3LD0.value
# X3LE0~向下盘背, X3LF0~向下无背
if bi_3.low < bi_1.low and bi_3.high < bi_1.high:
if bi_3.height < bi_1.height:
# X3LE0~向下盘背
# \
# \
# \ ^
# \ / \
# \ / \
# v \
# \
v = ChanSignals.X3LE0.value
else:
# X3LF0~向下无背中枢
# \
# \ ^
# \ / \
# \ / \
# v \
# \
# \
v = ChanSignals.X3LF0.value
# 上涨线段
elif bi_3.direction == 1:
# X3SA0~向上不重合
# ^
# / \
# / \
# \
# \ /
# \ /
# v
if bi_3.high < bi_1.low:
v = ChanSignals.X3SA0.value
# X3SB0~向上奔走型
# ^
# / \
# / \ /
# / \ /
# \ /
# v
if bi_2.low < bi_1.low < bi_3.high < bi_2.high:
v = ChanSignals.X3SB0.value
# X3SC0~向上收敛
# ^
# / \
# / \ /
# / \ /
# / v
# /
if bi_1.high > bi_3.high and bi_1.low < bi_3.low:
v = ChanSignals.X3SC0.value
# X3SD0~向上扩张
# /
# ^ /
# / \ /
# / \ /
# / \ /
# v
if bi_1.high < bi_3.high and bi_1.low > bi_3.low:
v = ChanSignals.X3SD0.value
# X3SE0~向上盘背,X3SF0~向上无背
if bi_3.low > bi_1.low and bi_3.high > bi_1.high:
if bi_3.height < bi_1.height:
# X3SE0~向上盘背
# /
# ^ /
# / \ /
# / \ /
# / \ /
# / v
# /
# /
v = ChanSignals.X3SE0.value
else:
# X3SF0~向上无背
# /
# /
# ^ /
# / \ /
# / \ /
# / \ /
# / v
# /
v = ChanSignals.X3SF0.value
return v | cf4e9b88abb604ff0646b6513a0cec62e15660dc | 3,630,426 |
def get_filter_type_choices():
"""
Get a tuple of filter types
:return: tuple with filter types
"""
return ('', 'Select one'), ('Filter Types', [('storlet', 'Storlet'), ('native', 'Native')]) | 21f4173b1aafa35b4c877d6f844349c2907932a8 | 3,630,427 |
from typing import Optional
def get_connected_devices() -> ConnectedDevices:
"""Returns Mbed Devices connected to host computer.
Connected devices which have been identified as Mbed Boards and also connected devices which are potentially
Mbed Boards (but not could not be identified in the database) are returned.
Raises:
DeviceLookupFailed: If there is a problem with the process of identifying Mbed Boards from connected devices.
"""
connected_devices = ConnectedDevices()
board: Optional["Board"]
for candidate_device in detect_candidate_devices():
try:
board = resolve_board(candidate_device)
except NoBoardForCandidate:
board = None
except MbedTargetsError as err:
raise DeviceLookupFailed("A problem occurred when looking up board data for connected devices.") from err
connected_devices.add_device(candidate_device, board)
return connected_devices | cb9a3565c7b62f99858ed90e85dd9686c9db9e00 | 3,630,428 |
from typing import Union
from typing import Optional
def my_sqrt_with_local_types(x: Union[int, float]) -> float:
"""Computes the square root of x, using the Newton-Raphson method"""
approx: Optional[float] = None
guess: float = x / 2
while approx != guess:
approx = guess
guess = (approx + x / approx) / 2
return approx | 61263b7722ea6becc536e6ccac69063ded41aa92 | 3,630,429 |
def get_tempo(h5,songidx=0):
"""
Get release year from a HDF5 song file, by default the first song in it
"""
return h5.root.musicbrainz.songs.cols.tempo[songidx] | 155d690eb6773cbaf0d06e2cb83ea8976cb0f1d8 | 3,630,430 |
def boolstr(value):
"""Value to bool handling True/False strings."""
if isinstance(value, basestring):
if value.lower() == 'false':
return False
try:
value = float(value)
except ValueError:
pass
return bool(value) | ccfaa56e3f5694fad2f94ea53f90379890148181 | 3,630,431 |
import math
def round_half_up(n: float, decimals: float = 0) -> float:
"""This function rounds to the nearest integer number (e.g 2.4 becomes 2.0 and 2.6 becomes 3);
in case of tie, it rounds up (e.g. 1.5 becomes 2.0 and not 1.0)
Args:
n (float): number to round
decimals (int): number of decimal figures that we want to keep; defaults to zero
Returns:
rounded_number (float): input number rounded with the desired decimals
"""
multiplier = 10 ** decimals
rounded_number = math.floor(n * multiplier + 0.5) / multiplier
return rounded_number | e0aab5cba456b4ffe6fab11a21b97fe4e17b045a | 3,630,432 |
from typing import Set
def _possible_edges(n1: Set, n2: Set, directed: bool, self_loops: bool = False):
"""Compute the number of possible edges between two sets."""
a = n1.intersection(n2)
e = (len(n1) - len(a)) * (len(n2) - len(a))
if directed:
e *= 2
if self_loops:
e += len(n1) + len(n2) - len(a)
return e | 4cf21d9521c3d071d7d1376bd917f2ec39435108 | 3,630,433 |
import math
def compute_backbone_shapes(config, image_shape):
"""Computes the width and height of each stage of the backbone network.
Returns:
[N, (height, width)]. Where N is the number of stages
"""
if callable(config.BACKBONE):
return config.COMPUTE_BACKBONE_SHAPE(image_shape)
# Currently supports ResNet only
assert config.BACKBONE in ["resnet50", "resnet101"]
return np.array(
[[int(math.ceil(image_shape[0] / stride)),
int(math.ceil(image_shape[1] / stride))]
for stride in config.BACKBONE_STRIDES]) | 9af0a10393bf3297a1ac239682d493c0670fcbbd | 3,630,434 |
def coherency_phase_delay_bavg(time_series,lb=0,ub=None,csd_method=None):
"""
Band-averaged phase delay between time-series
Parameters
----------
time_series: float array
The time-series data
lb,ub : float, optional
Lower and upper bounds on the frequency range over which the phase delay
is averaged
Returns
-------
p : float array
The pairwise band-averaged phase-delays between the time-series.
"""
if csd_method is None:
csd_method = {'this_method':'welch'} #The default
f,fxy = get_spectra(time_series,csd_method)
lb_idx,ub_idx = ut.get_bounds(f,lb,ub)
if lb_idx == 0:
lb_idx = 1
p = np.zeros((time_series.shape[0],time_series.shape[0],
f[lb_idx:ub_idx].shape[-1]))
for i in xrange(time_series.shape[0]):
for j in xrange(i,time_series.shape[0]):
p[i][j] = coherency_phase_delay_bavg_calculate(f[lb_idx:ub_idx],
fxy[i][j][lb_idx:ub_idx])
p[j][i] = coherency_phase_delay_bavg_calculate(f[lb_idx:ub_idx],
fxy[i][j][lb_idx:ub_idx].conjugate())
return p | 62b689d9513c94ab6ad411e485d4e5f3cb9ca125 | 3,630,435 |
def write_sale_table(
df: DataFrame, output_path: str, filename: str = "sale.csv",
) -> DataFrame:
"""
Extract a sale transaction (fact) table from the staging data and save it in the csv format.
Args:
df: Staging dataframe containing source data.
output_path: Path to where the resulting csv files are saved.
filename: Optional filename of the resulting csv directory in the output path.
Returns: A sale transaction dataframe.
"""
sale_table = (
df.where(f.col("transaction_unique_identifier").isNotNull())
.withColumn(
"id",
f.udf(lambda x: x.strip("}").strip("{"), t.StringType())(
f.col("transaction_unique_identifier")
),
)
.withColumn("price", df["price"].cast(t.IntegerType()))
.withColumn("year", f.year(df["date"]))
.withColumn("month", f.month(df["date"]))
)
sale_table = _normalise_postcode(sale_table)
sale_table = sale_table.select(
["id", "price", "date", "postcode", "property_address", "year", "month"]
)
if output_path:
sale_table.write.partitionBy(["year", "month"]).csv(f"{output_path}/{filename}")
logger.info("Saved sale table")
return sale_table | c567c2a929bbe2676e8ff544398954c0a2926d93 | 3,630,436 |
def is_equal_tf(x: tf.Tensor, y: tf.Tensor) -> bool:
"""return true if two tf tensors are nearly equal"""
x = tf.cast(x, dtype=tf.float32)
y = tf.cast(y, dtype=tf.float32)
return tf.reduce_max(tf.abs(x - y)).numpy() < EPS | f9816cc393689c14e20381f9b0b3651235d40eab | 3,630,437 |
from typing import List
def align_dtw_scale(
reference: Trace, *traces: Trace, radius: int = 1, fast: bool = True
) -> List[Trace]:
"""
Align :paramref:`~.align_correlation.traces` to the :paramref:`~.align_correlation.reference` trace.
Use fastdtw (Dynamic Time Warping) with scaling as per:
Jasper G. J. van Woudenberg, Marc F. Witteman, Bram Bakker:
**Improving Differential Power Analysis by Elastic Alignment**
https://pdfs.semanticscholar.org/aceb/7c307098a414d7c384d6189226e4375cf02d.pdf
:param reference: Trace to align to.
:param traces: Traces to align.
:param radius:
:param fast:
:return: List of the aligned traces (with the reference).
"""
result = [deepcopy(reference)]
reference_samples = reference.samples
for trace in traces:
if fast:
_, path = fastdtw(reference_samples, trace.samples, radius=radius)
else:
_, path = dtw(reference_samples, trace.samples)
result_samples = np.zeros(len(reference_samples), dtype=trace.samples.dtype)
scale = np.zeros(len(reference_samples), dtype=trace.samples.dtype)
for x, y in path:
result_samples[x] += trace.samples[y]
scale[x] += 1
result_samples /= scale
del path
del scale
result.append(trace.with_samples(result_samples))
return result | 628e458ce88bebd6b2919fca35b9c4d5cc0782a1 | 3,630,438 |
import tensorflow as tf # wanted to circumvent this, but parsing the serialized data cleanly was difficult
import sqlite3
import collections
def load_stackoverflow_tff(cache_dir="~/data", user_idx=0, split="train"):
"""Load the tensorflow federated stackoverflow dataset into pytorch."""
if split == "validation":
split_name = "heldout"
elif split in ["train", "test"]:
split_name = split
else:
raise ValueError(f"Split name {split} does not correspond to entries in this dataset.")
db_name = _load_sql_database(TFF_URLS["stackoverflow"], cache_dir=cache_dir)
client_id = _fetch_client_id(db_name, user_idx, split_name=split_name)
query = (
f"SELECT serialized_example_proto FROM examples WHERE client_id='{client_id}' and split_name='{split_name}';"
)
cursor = sqlite3.connect(db_name)
result = cursor.execute(query)
data = list(result)
log.info(f"Now processing user {client_id} from tff database.")
def parse_proto(tensor_proto):
parse_spec = collections.OrderedDict(
creation_date=tf.io.FixedLenFeature(dtype=tf.string, shape=()),
score=tf.io.FixedLenFeature(dtype=tf.int64, shape=()),
tags=tf.io.FixedLenFeature(dtype=tf.string, shape=()),
title=tf.io.FixedLenFeature(dtype=tf.string, shape=()),
tokens=tf.io.FixedLenFeature(dtype=tf.string, shape=()),
type=tf.io.FixedLenFeature(dtype=tf.string, shape=()),
)
parsed_features = tf.io.parse_example(tensor_proto, parse_spec)
return parsed_features["tokens"].numpy().decode("ascii")
raw_texts = []
for proto_entry in data:
raw_texts.append(parse_proto(proto_entry[0]))
return raw_texts | 91c0defba1081c5871c20d09e2ad9e758617906f | 3,630,439 |
def format_string(current_size, total_length, elapsed_time):
"""
Consistent format to be displayed on the screen.
:param current_size: Number of finished object size
:param total_length: Total object size
:param elapsed_time: number of seconds passed since start
"""
n_to_mb = current_size / _KILOBYTE / _KILOBYTE
elapsed_str = seconds_to_time(elapsed_time)
rate = _RATE_FORMAT % (n_to_mb / elapsed_time) if elapsed_time else _UNKNOWN_SIZE
frac = float(current_size) / total_length
bar_length = int(frac * _BAR_SIZE)
bar = _FINISHED_BAR * bar_length + _REMAINING_BAR * (_BAR_SIZE - bar_length)
percentage = _PERCENTAGE_FORMAT % (frac * 100)
left_str = seconds_to_time(
elapsed_time / current_size * (total_length - current_size)) if current_size else _UNKNOWN_SIZE
humanized_total = _HUMANINZED_FORMAT % (total_length / _KILOBYTE / _KILOBYTE) + _STR_MEGABYTE
humanized_n = _HUMANINZED_FORMAT % n_to_mb + _STR_MEGABYTE
return _DISPLAY_FORMAT % (
bar, humanized_n, humanized_total, percentage, elapsed_str, left_str, rate) | 8e9df1ede4bcc42aa97c46139453d677703e77dc | 3,630,440 |
import os
import time
def get_file (queue):
"""Get file from queue after making sure it arrived completely; None
is returned if the file is not a fits file or still having trouble
reading the fits file even after waiting for 60s; otherwise the
filename is returned.
"""
# get event from queue
event = queue.get(True)
try:
# get name of new file
filename = str(event.src_path)
filetype = 'new'
except AttributeError as e:
# instead of event, queue entry is a filename added in
# [run_blackbox]
filename = event
filetype = 'pre-existing'
log.info ('detected a {} file: {}'.format(filetype, filename))
# only continue if a fits file
if 'fits' not in filename:
log.info ('{} is not a fits file; skipping it'.format(filename))
filename = None
else:
# if filename is a temporary rsync copy (default
# behaviour of rsync is to create a temporary file
# starting with .[filename].[randomstr]; can be
# changed with option "--inplace"), then let filename
# refer to the eventual file created by rsync
fn_head, fn_tail = os.path.split(filename)
if fn_tail[0] == '.':
filename = '{}/{}'.format(fn_head, '.'
.join(fn_tail.split('.')[1:-1]))
log.info ('changed filename from rsync temporary file {} to {}'
.format(event.src_path, filename))
# this while loop below replaces the old [copying]
# function; it times out after wait_max is reached
wait_max = 180
t0 = time.time()
nsleep = 0
while time.time()-t0 < wait_max:
try:
# read the file
data = read_hdulist(filename)
except:
process = False
if nsleep==0:
log.warning ('file {} has not completely arrived yet; '
'will keep trying to read it in for {}s'
.format(filename, wait_max))
# give file a bit of time to arrive before next read attempt
time.sleep(5)
nsleep += 1
else:
# if fits file was read fine, set process flag to True
process = True
log.info ('successfully read file {} within {:.1f}s'
.format(filename, time.time()-t0))
# and break out of while loop
break
if not process:
log.info ('{}s limit for reading file reached, not processing {}'
.format(wait_max, filename))
filename = None
return filename | 5a81f5026b55ac4359bf119c67271d935057545b | 3,630,441 |
import logging
def upload_file(src_local_path, dest_s3_path):
"""
upload file
:param src_local_path:
:param dest_s3_path:
:return:
"""
try:
with open(src_local_path, 'rb') as f:
s3.upload_fileobj(f, BUCKET_NAME, dest_s3_path)
except Exception as e:
logging.error(f'Upload data failed. | src: {src_local_path} | dest: {dest_s3_path} | Exception: {e}')
return False
logging.info(f'Uploading file successful. | src: {src_local_path} | dest: {dest_s3_path}')
return True | bf4f324fc3246d50e41a956e5bed4c2888276dee | 3,630,442 |
import torch
def labels_from(distribution):
"""Takes a distribution tensor and returns a labels tensor."""
nclasses = distribution.shape[0]
llist = [[i] * n for i, n in zip(range(nclasses), distribution)]
# labels = [l for cl in llist for l in cl] # flatten the list of lists
labels = list(chain(*llist)) # flatten using itertools.chain
return torch.tensor(labels, requires_grad=False) | 7f1cc3fec0b3fe4f2cef963da40087e33c974996 | 3,630,443 |
import argparse
def get_raygen_argparser():
"""
Get the command line input/output arguments passed in to `raygen`.
"""
parser = argparse.ArgumentParser(
description='A simple static site generator, for those who want to fully the generation of blog.'
)
parser.add_argument(
'--server',
help='run the server.',
action="store_true"
)
parser.add_argument(
'-p', '--port',
type=int,
default=8080,
help='Port number to serve files on.'
)
return parser | da3993c1d98be1a3ecf5cec2942b4cda5edb3a8d | 3,630,444 |
def import_ham_dataset(dataset_root, outf, training=True):
"""
Returns dataset class instance for DataLoader. Downloads dataset if not present in dataset_root.
Args:
dataset_root (str): root directory of dataset.
outf (str): path to working directory.
training (bool): return training or testing samples.
"""
dataset = HAMDataset(
csv_file='HAM10000_metadata.csv',
root_dir=dataset_root,
outf=outf,
training=training,
transform=transforms.Compose([
transforms.Resize(299), # required size
transforms.CenterCrop(299),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), # required normalisation
])
)
return dataset | 1f9a659a1c13d862fbd32edad2e5ea03860bc3ad | 3,630,445 |
def truncatechars(value, arg):
"""
Truncates a string after a certain number of letters
Argument: Number of letters to truncate after
"""
def truncate_chars(s, num):
"Truncates a string after a certain number of letters."
length = int(num)
letters = [l for l in s]
if len(letters) > length:
letters = letters[:length]
if not letters[-3:] == ['.','.','.']:
letters += ['.','.','.']
return ''.join(letters)
try:
length = int(arg)
except ValueError: # invalid literal for int()
return value # Fail silently
if not isinstance(value, basestring):
value = str(value)
return truncate_chars(value, length) | 1a1542504261fcf859edfac01048c8fa817a314b | 3,630,446 |
def default_category_orders() -> dict:
"""Returns the default dictionary of category orders"""
day_order = ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"]
weekend_order = ["Weekday", "Weekend"]
season_order = ["Spring", "Summer", "Autumn", "Winter"]
month_order = [
"January",
"February",
"March",
"April",
"May",
"June",
"July",
"August",
"September",
"October",
"November",
"December",
]
category_orders = {
"dayofweek": day_order,
"weekend": weekend_order,
"season": season_order,
"month": month_order,
}
return category_orders | 4110287bc30445f27c7c3d0c38cb662d769a5217 | 3,630,447 |
def argsort(seq, key=None, cmp=None, reverse=False):
"""Returns the indices corresponding to a sort of the given `seq`.
Can optionally pass in `cmp` and `reverse` just as you would to :func:`sorted()`.
"""
if not seq: return ()
ukey = key
iseq = sorted([(v, i) for i, v in enumerate(seq)], key=lambda v, i: ukey(v), cmp=cmp, reverse=reverse)
vals, idxs = zip(*iseq)
return idxs | eb56870506642928c35e434b2a859d8d4f0bf08a | 3,630,448 |
def new_category():
""" Add new category """
# Check if the user is loged in
if 'username' not in login_session:
return redirect('/login')
if request.method == 'POST':
# Get data from the front-end
name = request.form['name']
description = request.form['description']
# Put the data into a model
category = Category(name=name, user_id=login_session['user_id'])
# Save description if there are one
if description:
category.description = description
session.add(category)
session.commit()
return redirect(url_for('categories'))
return render_template('categories/new_category.html') | 0c108ff1d9cf86ef072ed723c067d94b11b82ddd | 3,630,449 |
def create_densenet(hidden_units, idx_to_cat):
"""Create a flowernet model based on the Densenet-121 architecture.
Args:
hidden_units: The number of hidden units in the flowernet classifier.
idx_to_cat: A dictionary mapping the internal index numbers provided by
the classifier to the actual flower category labels.
Returns:
FlowerNetModule: A new flowernet model.
"""
model = models.densenet121(pretrained=True)
for param in model.parameters():
param.requires_grad = False
classifier = create_classifier(ARCH_DENSNET_SIZE, hidden_units)
model.classifier = classifier
return FlowerNetModule(ARCH_DENSNET, model, classifier, hidden_units,
idx_to_cat) | 86643b9ca3b85d1aa940917c8c0fbe8467100ee6 | 3,630,450 |
import functools
import uuid
def log_event(func):
"""Decorator function to log events."""
@functools.wraps(func)
def wrapper(*args, **kwargs):
logger = get_logger()
result = func(*args, **kwargs)
_uuid = uuid.uuid4()
logger.info("[%s] Request: %s %s" % (_uuid, result['request']['method'], result['request']['url']))
logger.debug("[%s] Request HTTP headers: %s" % (_uuid, result['request']['headers']))
logger.debug("[%s] Request payload: %s" % (_uuid, result['request']['body']))
logger.info("[%s] Response: HTTP %s" % (_uuid, result['response']['code']))
logger.debug("[%s] Response HTTP headers: %s" % (_uuid, result['response']['headers']))
logger.debug("[%s] Response message: %s" % (_uuid, result['response']['message']))
return result
return wrapper | cbae86724756451b9e484e6d16141c5e8f06e278 | 3,630,451 |
import tokenize
def cleanup_string(string, already_lowercase=False):
""" Do the following cleanup steps on the provided string:
1. Case Folding
2. Tokenization
3. Give it to cleanup_list()
"""
if not already_lowercase: # Case folding only if required
string = string.lower()
return cleanup_list(tokenize(string), already_lowercase=True) | 5e3dccec9306097c5e952f69d883a78507991779 | 3,630,452 |
def fscore(mesh1,
mesh2,
sample_count=100000,
tau=1e-04,
points1=None,
points2=None):
"""Computes the F-Score at tau between two meshes."""
points1, points2 = get_points(mesh1, mesh2, points1, points2, sample_count)
dist12, _ = pointcloud_neighbor_distances_indices(points1, points2)
dist21, _ = pointcloud_neighbor_distances_indices(points2, points1)
f_score_tau = f_score(dist12, dist21, tau)
return f_score_tau | 07707b4ae58d3aa4f559ceb7f2b5f82c9d7fe8a7 | 3,630,453 |
def jsonp_ize(dep):
"""Parse dep as :term:`jsonp` (unless it has been modified with ``jsnop=False``). """
return modify(dep) if "/" in dep and type(dep) is str else dep | 0e5ff172a732a9319e7486a67cfd1db6520c8cf1 | 3,630,454 |
import re
def is_guid(techfin_tenant):
"""Validate guid arg
Args:
tenant (str): techfin tenant id
Returns:
bool: true if is valid guid value
"""
c = re.compile('[0-9a-f]{8}-[0-9a-f]{4}-[1-5][0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}', re.I)
res = c.match(techfin_tenant)
return res | 7242f0da279375ab5873670ffef1fd4aa8749546 | 3,630,455 |
from typing import Callable
def register(name: ServiceKey, version: ServiceVersion) -> Callable:
"""Maps a service with a callable
Basically "glues" the implementation to a function node
"""
key: _ServiceKeyVersionPair = (name, version)
if key not in FUNCTION_SERVICES_CATALOG.keys():
raise ValueError(
f"No definition of {key} found in the {len(FUNCTION_SERVICES_CATALOG)=}"
)
if key in FUNCTION_SERVICE_TO_CALLABLE.keys():
raise ValueError(f"{key} is already registered")
def _decorator(func: Callable):
# TODO: ensure inputs/outputs map function signature
FUNCTION_SERVICE_TO_CALLABLE[key] = func
# TODO: wrapper(args,kwargs): extract schemas for inputs and use them to validate inputs
# before running
return func
return _decorator | 0dc38ca605c4d55fde9019fd473ab58bd7225357 | 3,630,456 |
import re
def get_pd_metrics(data):
"""Function for create physical disks metrics files."""
create_dir(LLD_METRICS_PATH)
for ctrl, ctrl_value in data.items():
if isinstance(ctrl_value, dict):
for ar_key, ar_value in ctrl_value.items():
match = re.search(ARRAY_NAME_PATT, ar_key)
if match:
ar_name = match.groupdict()['array_name']
for pd_key, pd_value in ar_value.items():
match2 = re.search(PD_NAME_PATT, pd_key)
if match2:
pd_name = match2.groupdict()['pd_name']
file_name = clean_name(f"{ctrl}__{ar_name}__{pd_name}")
full_file_name = join(LLD_METRICS_PATH, file_name)
with open(full_file_name, 'w') as fl:
if isinstance(pd_value, dict):
for metric, value in pd_value.items():
if isinstance(value, str):
print(f"{metric}={value}", file=fl)
return data | 5b073b0ec045ac3df5de74c6d2454b6775ffac78 | 3,630,457 |
def svn_stream_read(*args):
"""svn_stream_read(svn_stream_t stream, char buffer) -> svn_error_t"""
return _core.svn_stream_read(*args) | 1c9657c2b65b8b30fc20bc62f9002f8f6578cc63 | 3,630,458 |
import random
def create_pipes(pipes, pipe_assets):
"""
Creates the pipes. Generates them randomly and appends them to the pipe list.
Args:
pipes(list): A list containing the pipe rects
pipe_assets(list): A list containing the pipe images images (rotated and non-rotated)
Returns:
pipes(list): A list containing the pipe rects
"""
pipe = pipe_assets[0] # Get the first index of the list
pipe_height = random.randint(400, SIZE[1] - 100) # Generate a random pipe height
pipeX = SIZE[0] + 50 # Pipe x coordinate
bottom_pipe_rect = pipe.get_rect() # Get the pipe rect
bottom_pipe_rect.midtop = (pipeX, pipe_height) # Change the midtop coordinates
top_pipe_rect = pipe.get_rect() # Get the pipe rect
top_pipe_rect.midbottom = (
pipeX,
pipe_height - 250,
) # Change the midbottom coordinates
score_counted = False # Set the score counted to False
pipes.append(
[[bottom_pipe_rect, top_pipe_rect], score_counted]
) # Appends bottom_pipe_rect, top_pipe_rect and score_counted to the pipes list
return pipes | f5740d13b8524063c736de38cabaf0132d8d0f6d | 3,630,459 |
def shell_short(unordered, ordered):
"""
Startig at the bottom of the stack:
1 - If the name is in the correct position move to the next
2 - If it is not in the position remove it, move all other names
one positions down and got to 1
sort all the removed positions, these names are the result
"""
unordered = unordered[::-1]
ordered = ordered[::-1]
names = {}
for i, name in enumerate(ordered):
names[name] = i
# Stack using id instead of names
stack = [names[n] for n in unordered]
# Extract numbers that need reorderin
reorder = []
for i in range(len(stack)):
if stack[i] != i-len(reorder):
reorder.append(stack[i])
return [ordered[n] for n in sorted(reorder)] | df57eb0bee03159ac6b698bf0377efec48355e76 | 3,630,460 |
def render_latest_entries_links(context, request=None):
"""
Renders the links to the 5 latest entries.
"""
qs = NewsEntry.objects.published()
context.update({
'entries': qs[:5]
})
return context | ea5afb238162d565bf00977c146ddd79a59fb3b4 | 3,630,461 |
def __standard_cand_fun(candidates):
"""
Convert candidates from the forms accepted by :py:fun:`recommend` into
a standard form, a function that takes a user and returns a candidate
list.
"""
if isinstance(candidates, dict):
return candidates.get
elif candidates is None:
return lambda u: None
else:
return candidates | ad206802bfbcd0ec8f4601ebc043f8d468709c75 | 3,630,462 |
import subprocess
def run_pwsh(code):
"""
:param code: powershell code to run
//TODO 16: add creation flags to make hidden, but still get stdout.
"""
p = subprocess.run(['powershell', code], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
return p.stdout.decode() | eb78bfc2b6cc9611e0b6f550ebd7e318555088d6 | 3,630,463 |
def diff(obj1, obj2):
"""Prints a simple human-readable difference between two Python objects."""
result = []
diffAny(obj1, obj2, result)
return '\n'.join(result) | 9f898a17ee6e447e5aca9edcba8b4b6151aa6e3c | 3,630,464 |
import torch
def get_static_features(
inputs,
num_windows,
stream_sizes=None,
has_dynamic_features=None,
streams=None,
):
"""Get static features from static+dynamic features."""
if stream_sizes is None:
stream_sizes = [180, 3, 1, 15]
if has_dynamic_features is None:
has_dynamic_features = [True, True, False, True]
if streams is None:
streams = [True] * len(stream_sizes)
_, _, D = inputs.shape
if stream_sizes is None or (len(stream_sizes) == 1 and has_dynamic_features[0]):
return inputs[:, :, : D // num_windows]
if len(stream_sizes) == 1 and not has_dynamic_features[0]:
return inputs
# Multi stream case
ret = []
start_indices = np.hstack(([0], np.cumsum(stream_sizes)[:-1]))
for start_idx, size, v, enabled in zip(
start_indices, stream_sizes, has_dynamic_features, streams
):
if not enabled:
continue
if v:
static_features = inputs[:, :, start_idx : start_idx + size // num_windows]
else:
static_features = inputs[:, :, start_idx : start_idx + size]
ret.append(static_features)
return torch.cat(ret, dim=-1) | 4767c0a480b04047b16d01d7e994fb09c61b543c | 3,630,465 |
def _l4_parameterize_ ( l4 ,
tree ,
xvar ,
yvar ,
zvar ,
uvar ,
cut = '' , first = 0 , last = _large ) :
"""Parameterize 4D unbinned ddistribuition from TTree in terms of Legendre sum
>>> l = LegendreSum4 ( 5 , 3 , 2 , 2 , -1.0 , 1.0 , 0.0 , 1.0 , 0.0 , 5.0 , 0.0 , 1.0 )
>>> tree = ...
>>> l.parameterize ( tree , 'X' , 'Y' , 'Z' , 'U' , 'q>0' )
"""
return Ostap.DataParam.parameterize ( tree , l4 ,
xvar , yvar , zvar , uvar ,
str ( cut ) , first , last ) | 6e4448450feb7063fb8ef8bb540a6b2af3fcfd5f | 3,630,466 |
import textwrap
def welcome():
"""
Welcomes the player, brief intro to the game, and returns hand size as integer
ranging from 4 to 9
"""
version = '0.1'
m01 = "Welcome to the MIT 6.00.1x Wordgame (etherwar's mod)"
m02 = "Build " + version
m03 = "The Game: "
m04 = "First, you must select a word length. Word of 4 characters to 8 characters long are supported."
m05 = "A word of that length will then be scrambled and presented for you to guess. You may guess any " + \
"word that you can make with any number of the letters given to you. Words are scored using Scrabble(tm)" + \
"letter values. You get a bonus of 50 points for using all the letters remaining in your hand"
m06 = "We give you the option to play the game yourself, or to let the computer play the hand."
m07 = "We recommend that you play the game as yourself first, then enter 'r' to see how your " + \
"play stacks up against the computer!"
m08 = "Let's get started!"
nL = "\n\n"
defaultWidth = 70
wrapper = textwrap.TextWrapper(width=defaultWidth)
print("{:^80}".format(textwrap.fill(m01, defaultWidth)), end=nL)
print("{:>80}".format(textwrap.fill(m02, defaultWidth)), end=nL)
print(wrapper.fill(m03), end=nL)
print(wrapper.fill(m04), end=nL)
print(wrapper.fill(m05), end=nL)
print(wrapper.fill(m06), end=nL)
print(wrapper.fill(m07), end=nL)
print(wrapper.fill(m08), end=nL)
try:
userInput = 3
while int(userInput) < 4 or int(userInput) > 8:
print()
userInput = input("How many letters would you like to be dealt? (4-8): ")
if 4 <= int(userInput) <= 8:
HAND_SIZE = int(userInput)
else:
print("Invalid number. Please select a number from 4 to 8.")
except:
HAND_SIZE = 6
print("Invalid input. Default hand size selected (6).")
return HAND_SIZE | 0a89b38c1b9636e78d7299b8d3c6051717ab7fd7 | 3,630,467 |
def look_up_annotation_set(p_load_list, p_type=''):
"""
Looks up an set of annotations in the database and finds the Ids of nodes containing SWISSPROT
proteins linked to by annotations
:param p_load_list: list of payloads
:param p_type: expected type of payloads
:return: list of tags for which nodes were not found, list of tags to node property yalues,
tags to db ids of the first match (preferntially uniprots)
"""
def db_id_mapping_helper(mapped_db_id_list):
if mapped_db_id_list:
return mapped_db_id_list[0][2]
else:
return ''
def transform_annotation_nodes(neo4j_native_nodes):
retlist = []
for node in neo4j_native_nodes:
node_bulbs_id = node.id
node_legacy_id = node['legacyID']
node_type = list(node.labels)[0]
node_display_name = node['displayName']
payload = (node_type, node_display_name, node_bulbs_id, node_legacy_id)
if node_type == 'UNIPROT':
retlist.insert(0, payload)
else:
retlist.append(payload)
return retlist
load_2_name_list = [(p_load, transform_annotation_nodes(p_nodes))
for (p_load, p_nodes) in
zip(p_load_list,
DatabaseGraph.batch_retrieve_from_annotation_tags(p_load_list,
p_type))]
db_id_list = [db_id_mapping_helper(value) for key, value in load_2_name_list]
not_found_list = [key for key, value in load_2_name_list if value == []]
log.debug('%s IDs out of %s have not been found', len(not_found_list), len(p_load_list))
log.debug('IDs of missing proteins: %s', not_found_list)
return not_found_list, load_2_name_list, db_id_list | d1480a77472fd0c1e1a3d957fb7286da5360d4e8 | 3,630,468 |
def check():
"""Do the database check."""
icursor.execute("""
SELECT datname, age(datfrozenxid) FROM pg_database
ORDER by age DESC LIMIT 1
""")
row = icursor.fetchone()
return row | cc2584d2d049aa25745c67ccc90a65f4dbb227ec | 3,630,469 |
import random
def sample_multi_ellipsoid(key, mu, radii, rotation, unit_cube_constraint=True):
"""
Sample from a set of overlapping ellipsoids.
When unit_cube_constraint=True then during the sampling when a random radius is chosen, the radius is constrained.
u(t) = R @ (x + t * n) + c
u(t) == 1
1-c = R@x + t * R@n
t = ((1 - c) - R@x)/R@n
Args:
key:
mu: [K, D]
radii: [K, D]
rotation: [K,D,D]
Returns: point uniformly sampled from intersection of ellipsoids [D]
"""
K, D = radii.shape
log_VE = vmap(log_ellipsoid_volume)(radii)
log_p = log_VE # - logsumexp(log_VE)
if unit_cube_constraint:
center_in_unit_cube = vmap(lambda mu: jnp.all(mu < 1.) & jnp.all(mu > 0.))(mu)
log_p = jnp.where(center_in_unit_cube, log_p, -jnp.inf)
# print(log_p)
def body(state):
(i, _, key, done, _) = state
key, accept_key, sample_key, select_key = random.split(key, 4)
k = random.categorical(select_key, log_p)
mu_k = mu[k, :]
radii_k = radii[k, :]
rotation_k = rotation[k, :, :]
u_test = sample_ellipsoid(sample_key, mu_k, radii_k, rotation_k, unit_cube_constraint=unit_cube_constraint)
inside = vmap(lambda mu, radii, rotation: point_in_ellipsoid(u_test, mu, radii, rotation))(mu, radii, rotation)
n_intersect = jnp.sum(inside)
done = (random.uniform(accept_key) < jnp.reciprocal(n_intersect))
return (i + 1, k, key, done, u_test)
_, k, _, _, u_accept = while_loop(lambda state: ~state[3],
body,
(jnp.array(0), jnp.array(0), key, jnp.array(False), jnp.zeros(D)))
return k, u_accept | 9b507adc2d485dc036718879e2de1a174e9713eb | 3,630,470 |
def auto_dataset(dataset, label) -> pd.DataFrame:
"""Prepares a dataset object."""
semantics = infer_semantic_from_dataframe(dataset)
def extract_by_semantic(semantic):
return [k for k, s in semantics.items() if s == semantic and k != label]
categorical_features = extract_by_semantic(Semantic.CATEGORICAL)
numerical_features = extract_by_semantic(Semantic.NUMERICAL)
def clean(dataset):
for key in categorical_features:
dataset[key] = dataset[key].fillna("")
return dataset[categorical_features + numerical_features + [label]]
return clean(dataset) | c58d1f6a7ca0a9aa36b93531c26b20deb23c7974 | 3,630,471 |
def chuz_top(input):
"""
:type input: ndarray
:return:
"""
return K.argmax(input, 1) | 009bea44ccb6a26e39fe9eb9bcd79afab1174fd7 | 3,630,472 |
import limix_legacy.modules.varianceDecomposition as VAR
import time
def _estimateKronCovariances(
phenos,
K1r=None,
K1c=None,
K2r=None,
K2c=None,
covs=None,
Acovs=None,
trait_covar_type="freeform",
rank=1,
lambd=None,
verbose=True,
init_method="random",
old_opt=True,
):
"""
estimates the background covariance model before testing
Args:
phenos: [N x P] np.array of P phenotypes for N individuals
K1r: [N x N] np.array of LMM-covariance/kinship koefficients (optional)
If not provided, then linear regression analysis is performed
K1c: [P x P] np.array of LMM-covariance/kinship koefficients (optional)
If not provided, then linear regression analysis is performed
K2r: [N x N] np.array of LMM-covariance/kinship koefficients (optional)
If not provided, then linear regression analysis is performed
K2c: [P x P] np.array of LMM-covariance/kinship koefficients (optional)
If not provided, then linear regression analysis is performed
covs: list of np.arrays holding covariates. Each covs[i] has one corresponding Acovs[i]
Acovs: list of np.arrays holding the phenotype design matrices for covariates.
Each covs[i] has one corresponding Acovs[i].
trait_covar_type: type of covaraince to use. Default 'freeform'. possible values are
'freeform': free form optimization,
'fixed': use a fixed matrix specified in covar_K0,
'diag': optimize a diagonal matrix,
'lowrank': optimize a low rank matrix. The rank of the lowrank part is specified in the variable rank,
'lowrank_id': optimize a low rank matrix plus the weight of a constant diagonal matrix. The rank of the lowrank part is specified in the variable rank,
'lowrank_diag': optimize a low rank matrix plus a free diagonal matrix. The rank of the lowrank part is specified in the variable rank,
'block': optimize the weight of a constant P x P block matrix of ones,
'block_id': optimize the weight of a constant P x P block matrix of ones plus the weight of a constant diagonal matrix,
'block_diag': optimize the weight of a constant P x P block matrix of ones plus a free diagonal matrix,
rank: rank of a possible lowrank component (default 1)
Returns:
VarianceDecomposition object
"""
# from . import varianceDecomposition as VAR
if verbose:
print(".. Estimating trait covariances")
vc = VAR.VarianceDecomposition(phenos)
if K1r is not None:
vc.addRandomEffect(K1r, trait_covar_type="freeform", rank=rank)
if K2r is not None:
# TODO: fix this; forces second term to be the noise covariance
vc.addRandomEffect(is_noise=True, K=K2r, trait_covar_type="freeform", rank=rank)
for ic in range(len(Acovs)):
vc.addFixedEffect(covs[ic], Acovs[ic])
start = time.time()
if old_opt:
conv = vc.optimize(fast=True)
elif lambd is not None:
conv = vc.optimize(init_method=init_method, verbose=verbose, lambd=lambd)
else:
conv = vc.optimize(init_method=init_method, verbose=verbose)
assert conv, "Variance Decomposition has not converged"
time_el = time.time() - start
if verbose:
print(("Done in %.2f s" % time_el))
return vc | 8b29d0b2c6a967153e3a1717eaff9715de5eea26 | 3,630,473 |
def dBrickId(brickId):
"""Return box id if valid, raise an exception in other case"""
if brickId >= 0 and brickId <= 15:
return brickId
else:
raise ValueError(
'{} is not a valid Brick Id, Brick Id must be between 0-15'.format(
brickId)) | 10e0f27f179dcd54c5cc4967ea960b77a4c5a924 | 3,630,474 |
def vibrational_density_state(path_to_mass_weighted_hessian: str, eps_o: float = 3e12, nq: int = 2e4):
"""
Compute the vibrational density of state from hessian matrix
:arg
path_to_mass_weighted_hessian: str
Point to the mass weighted hessian file
eps_o: float
The bandwidth — small eps leads to noisy vDoS and large eps leads to unrealistic vDoS
nq: int
Sampling grid size
:returns
hessian_matrix: np.ndarray
Hessian matrix
frq: np.ndarray
Frequency [rad/sec]
density_state: np.ndarray
Vibrational density of state
omg: np.ndarray
Frequency sampling of "density_state"
"""
_hessian = np.loadtxt(path_to_mass_weighted_hessian, delimiter=None)
hessian_symmetry = (np.triu(_hessian) + np.tril(_hessian).T) / 2
hessian_matrix = hessian_symmetry + np.triu(hessian_symmetry, 1).T
egn_value, egn_vector = np.linalg.eigh(hessian_matrix) # Note that egn_value is negative
egn_value = np.where(egn_value < 0, egn_value, 0.0) # Get rid of unstable modes
_frq = np.sqrt(-1 * egn_value) # Frequency of Hessian matrix
frq = np.sort(_frq)[np.newaxis]
omg = np.linspace(np.min(frq), np.max(frq), int(nq))[np.newaxis] # Sampling the frequency
density_state = 1 / nq * np.sum(1 / np.sqrt(np.pi) / eps_o * np.exp(-1 * np.power(omg.T - frq, 2) / eps_o / eps_o),
axis=1)[np.newaxis] # Density of states
return hessian_matrix, frq, density_state, omg | 4fbb92ad08174ab77d6df078876bcbdfc6786a92 | 3,630,475 |
import hashlib
def hash(text, digest_alg = 'md5'):
"""
Generates hash with the given text using the specified
digest hashing algorithm
"""
if not isinstance(digest_alg,str):
h = digest_alg(text)
else:
h = hashlib.new(digest_alg)
h.update(text)
return h.hexdigest() | 386268086a55b8e622c00b407cabd3207bb94ffb | 3,630,476 |
def box2d_iou(box1, box2):
"""Compute 2D bounding box IoU.
Input:
box1: tuple of (xmin,ymin,xmax,ymax)
box2: tuple of (xmin,ymin,xmax,ymax)
Output:
iou: 2D IoU scalar
"""
return get_iou(
{"x1": box1[0], "y1": box1[1], "x2": box1[2], "y2": box1[3]},
{"x1": box2[0], "y1": box2[1], "x2": box2[2], "y2": box2[3]},
) | 8f291f5a1dd9d6c2278a0ecb00d7b6b3d581e028 | 3,630,477 |
from lxml import etree
from . import mavgen_python
from . import mavgen_c
from . import mavgen_wlua
from . import mavgen_cs
from . import mavgen_javascript
from . import mavgen_objc
from . import mavgen_swift
from . import mavgen_java
from . import mavgen_cpp11
import sys
import re
import os
def mavgen(opts, args):
"""Generate mavlink message formatters and parsers (C and Python ) using options
and args where args are a list of xml files. This function allows python
scripts under Windows to control mavgen using the same interface as
shell scripts under Unix"""
xml = []
# Enable validation by default, disabling it if explicitly requested
if opts.validate:
try:
with open(schemaFile, 'r') as f:
xmlschema_root = etree.parse(f)
xmlschema = etree.XMLSchema(xmlschema_root)
except:
print("WARNING: Unable to load XML validator libraries. XML validation will not be performed", file=sys.stderr)
opts.validate = False
def mavgen_validate(xmlfile):
"""Uses lxml to validate an XML file. We define mavgen_validate
here because it relies on the XML libs that were loaded in mavgen(), so it can't be called standalone"""
xmlvalid = True
try:
with open(xmlfile, 'r') as f:
xmldocument = etree.parse(f)
xmlschema.assertValid(xmldocument)
forbidden_names_re = re.compile("^(break$|case$|class$|catch$|const$|continue$|debugger$|default$|delete$|do$|else$|\
export$|extends$|finally$|for$|function$|if$|import$|in$|instanceof$|let$|new$|\
return$|super$|switch$|this$|throw$|try$|typeof$|var$|void$|while$|with$|yield$|\
enum$|await$|implements$|package$|protected$|static$|interface$|private$|public$|\
abstract$|boolean$|byte$|char$|double$|final$|float$|goto$|int$|long$|native$|\
short$|synchronized$|transient$|volatile$).*", re.IGNORECASE)
for element in xmldocument.iter('enum', 'entry', 'message', 'field'):
if forbidden_names_re.search(element.get('name')):
print("Validation error:", file=sys.stderr)
print("Element : %s at line : %s contains forbidden word" % (element.tag, element.sourceline), file=sys.stderr)
xmlvalid = False
return xmlvalid
except etree.XMLSchemaError:
return False
# Process all XML files, validating them as necessary.
for fname in args:
if opts.validate:
print("Validating %s" % fname)
if not mavgen_validate(fname):
return False
else:
print("Validation skipped for %s." % fname)
print("Parsing %s" % fname)
xml.append(mavparse.MAVXML(fname, opts.wire_protocol))
# expand includes
for x in xml[:]:
for i in x.include:
fname = os.path.join(os.path.dirname(x.filename), i)
# Validate XML file with XSD file if possible.
if opts.validate:
print("Validating %s" % fname)
if not mavgen_validate(fname):
return False
else:
print("Validation skipped for %s." % fname)
# Parsing
print("Parsing %s" % fname)
xml.append(mavparse.MAVXML(fname, opts.wire_protocol))
# include message lengths and CRCs too
x.message_crcs.update(xml[-1].message_crcs)
x.message_lengths.update(xml[-1].message_lengths)
x.message_min_lengths.update(xml[-1].message_min_lengths)
x.message_flags.update(xml[-1].message_flags)
x.message_target_system_ofs.update(xml[-1].message_target_system_ofs)
x.message_target_component_ofs.update(xml[-1].message_target_component_ofs)
x.message_names.update(xml[-1].message_names)
x.largest_payload = max(x.largest_payload, xml[-1].largest_payload)
# work out max payload size across all includes
largest_payload = max(x.largest_payload for x in xml) if xml else 0
for x in xml:
x.largest_payload = largest_payload
if mavparse.check_duplicates(xml):
sys.exit(1)
print("Found %u MAVLink message types in %u XML files" % (
mavparse.total_msgs(xml), len(xml)))
# Convert language option to lowercase and validate
opts.language = opts.language.lower()
if opts.language == 'python':
mavgen_python.generate(opts.output, xml)
elif opts.language == 'c':
mavgen_c.generate(opts.output, xml)
elif opts.language == 'wlua':
mavgen_wlua.generate(opts.output, xml)
elif opts.language == 'cs':
mavgen_cs.generate(opts.output, xml)
elif opts.language == 'javascript':
mavgen_javascript.generate(opts.output, xml)
elif opts.language == 'objc':
mavgen_objc.generate(opts.output, xml)
elif opts.language == 'swift':
mavgen_swift.generate(opts.output, xml)
elif opts.language == 'java':
mavgen_java.generate(opts.output, xml)
elif opts.language == 'c++11':
mavgen_cpp11.generate(opts.output, xml)
else:
print("Unsupported language %s" % opts.language)
return True | a07a208252905a38eb4d1c91a9d457c3d5748264 | 3,630,478 |
def merge_params(params, config):
"""Merge CLI params with configuration file params. Configuration params
will overwrite the CLI params.
"""
return {**params, **config} | a1dc002a900968e6cf7c5ba401519759e6ef485e | 3,630,479 |
def init_rate():
""" This rate indicates the recorded positions' intervals. """
rate = float(0.1) # (Hz)
return rate | e6e9c6439fe4288c24be18bb098f1844aed9fc64 | 3,630,480 |
import time
import json
import base64
import requests
def doge_response(event, context):
""" Background Cloud Function to be triggered by Pub/Sub.
Takes the image URL passed in a Slack request and dogeifies it
by overlaying text generated by Cloud Vision.
Args:
event (dict): The data associated with the Pub/Sub event.
context (google.cloud.functions.Context): The metadata for the Cloud Function
Returns:
A response message to the slack channel containing the dogefied image.
"""
timestr = time.strftime("%Y%m%d-%H%M%S")
request = json.loads(base64.b64decode(event["data"]).decode("utf-8"))
try:
dogeify_image(get_labels(request), request)
except DogeException as e:
requests.post(
request["response_url"],
json={"text": "❌ " + str(e) + " ❌"},
headers={"Content-type": "application/json"},
)
return
upload_blob(timestr)
return send_message(timestr, request["response_url"]) | bc4120633ac278dbbdd11f6577bb72b3e953266e | 3,630,481 |
def get_gamma(y1, y2, gamma1, gamma2):
"""一般部位及び大部分がガラスで構成されていないドア等の開口部における日除けの効果係数 (-)
Args:
y1(float): 日除け下端から一般部及び大部分がガラスで構成されていないドア等の開口部の上端までの垂直方向距離 (mm)
y2(float): 一般部及び大部分がガラスで構成されていないドア等の開口部の高さ寸法 (mm)
gamma1(float): データ「日除けの効果係数」より算出した値
gamma2(float): データ「日除けの効果係数」より算出した値
Returns:
float: 一般部位及び大部分がガラスで構成されていないドア等の開口部における日除けの効果係数
"""
gamma = (gamma2 * (y1 + y2) - gamma1 * y1) / y2
return gamma | 6503a957bc7d5daee1926aaa23694b4550733f6d | 3,630,482 |
import io
import pprint
def show(files, repo):
"""Show the commit dialog.
Args:
files: files for pre-populating the dialog.
repo: the repository.
Returns:
The commit msg.
"""
if IS_PY2:
# wb because we use pprint to write
cf = io.open(_commit_file(repo), mode='wb')
else:
cf = io.open(_commit_file(repo), mode='w', encoding=ENCODING)
curr_b = repo.current_branch
if curr_b.merge_in_progress or curr_b.fuse_in_progress:
merge_msg = io.open(
_merge_msg_file(repo), mode='r', encoding=ENCODING).read()
cf.write(merge_msg)
cf.write('\n')
pprint.sep(stream=cf.write)
pprint.msg(
'Please enter the commit message for your changes above, an empty '
'message aborts', stream=cf.write)
pprint.msg('the commit.', stream=cf.write)
pprint.blank(stream=cf.write)
pprint.msg(
'These are the files whose changes will be committed:', stream=cf.write)
for f in files:
pprint.item(f, stream=cf.write)
pprint.sep(stream=cf.write)
cf.close()
_launch_editor(cf.name, repo)
return _extract_msg(repo) | 48a6e77c40ebbe87870a425613a01f3376232230 | 3,630,483 |
def download_clip_wrapper(row, output_filename):
"""Wrapper for parallel processing purposes. label_to_dir"""
#print(row, type(row))
#print(output_filename)
downloaded, log = download_clip(row['video-id'], output_filename, row['start-time'], row['end-time'])
status = tuple([str(downloaded), output_filename, log])
return status | f7715c075b2f59078c2af3e17928ddd9591f00fd | 3,630,484 |
def staff_user_id():
"""Creates a staff user and returns its ID."""
staff = factories.UserStaff()
return staff.pk | 57e582056f130a91c1cbdaabaf0744d6959bc5ab | 3,630,485 |
def shift_combine(images, offsets, stat='mean', extend=False):
"""
Statistics on image stack each being offset by some xy-distance
Parameters
----------
images
offsets
stat
extend
Returns
-------
"""
# convert to (masked) array
images = np.asanyarray(images)
offsets = np.asanyarray(offsets)
# it can happen that `offsets` is masked (no stars in that frame)
if np.ma.is_masked(offsets):
# ignore images for which xy offsets are masked
bad = offsets.mask.any(1)
good = ~bad
logger.info(f'Removing {bad.sum()} images from stack due to null '
f'detection')
images = images[good]
offsets = offsets[good]
# get pixel grid ignoring masked elements
shape = sy, sx = images.shape[1:]
grid = np.indices(shape)
gg = grid[:, None] - offsets[None, None].T
if np.ma.is_masked(images):
y, x = gg[:, ~images.mask]
sample = images.compressed()
else:
y, x = gg.reshape(2, -1)
sample = images.ravel()
# use maximal area coverage. returned image may be larger than input images
if extend:
y0, x0 = np.floor(offsets.min(0))
y1, x1 = np.ceil(offsets.max(0)) + shape + 1
else:
# returned image same size as original
y0 = x0 = 0
y1, x1 = np.add(shape, 1)
# compute statistic
yb, xb = np.ogrid[y0:y1, x0:x1]
bin_edges = (yb.ravel() - 0.5, xb.ravel() - 0.5)
results = binned_statistic_2d(y, x, sample, stat, bin_edges)
image = results.statistic
# mask nans (empty bins (pixels))
# note: avoid downstream warnings by replacing np.nan with zeros and masking
# todo: make optional
nans = np.isnan(image)
image[nans] = 0
return np.ma.MaskedArray(image, nans) | 2d6d3b721a2dc47a52b656256fdb5d42793511dc | 3,630,486 |
from .wrapper import tf2onnx, tf2onnx_builtin_conversion
import logging
def convert_tensorflow(frozen_graph_def,
name=None, input_names=None, output_names=None,
doc_string='',
target_opset=None,
channel_first_inputs=None,
debug_mode=False, custom_op_conversions=None):
"""
convert a tensorflow graph def into a ONNX model proto, just like how keras does.
:param graph_def: the frozen tensorflow graph
:param name: the converted onnx model internal name
:param input_names: the inputs name list of the model
:param output_names: the output name list of the model
:param doc_string: doc string
:param target_opset: the targeted onnx model opset
:param channel_first_inputs: A list of channel first input (not supported yet)
:param debug_mode: will enable the log and try to convert as much as possible on conversion
:return an ONNX ModelProto
"""
set_logger_level(logging.DEBUG if debug_mode else logging.INFO)
if target_opset is None:
target_opset = get_opset_number_from_onnx()
if not doc_string:
doc_string = "converted from {}".format(name)
tf_graph_def = tf2onnx.tfonnx.tf_optimize(input_names, output_names, frozen_graph_def, True)
with tf.Graph().as_default() as tf_graph:
tf.import_graph_def(tf_graph_def, name='')
custom_op_handlers = tf2onnx_builtin_conversion(target_opset)
if custom_op_conversions:
custom_op_handlers.update(custom_op_conversions)
with tf.Session(graph=tf_graph):
if not input_names:
input_nodes = list(_collect_input_nodes(tf_graph, output_names)[0])
input_names = [nd_.outputs[0].name for nd_ in input_nodes]
g = tf2onnx.tfonnx.process_tf_graph(tf_graph,
continue_on_error=debug_mode,
opset=target_opset,
custom_op_handlers=custom_op_handlers,
inputs_as_nchw=channel_first_inputs,
output_names=output_names,
input_names=input_names)
onnx_graph = tf2onnx.optimizer.optimize_graph(g)
model_proto = onnx_graph.make_model(doc_string)
return model_proto | 2348498cfa54879fc30c3227f7ce80fdd48e0271 | 3,630,487 |
def isNumber(s):
"""returns True if string s can be cast to a number, False otherwise"""
try:
float(s)
return True
except ValueError:
return False | fdee7992541ce42fb05e3202e63fc5bac04d43bc | 3,630,488 |
def tasks(tmpdir):
"""
Set up a project with some tasks that we can test displaying
"""
task_l = [
"",
" ^ this is the first task (released)",
" and it has a second line",
" > this is the second task (committed)",
" . this is the third task (changed, not yet committed)",
" - this is the fourth task (not yet made)",
" and this one has a second line also",
" + fifth task -- completed",
" < sixth task -- moved elsewhere",
" x seventh task -- abandoned",
"",
]
# project dir
prjdir = tmpdir.join("myproj")
# .project file
prjdir.join(".project").ensure()
# DODO file with content
dodo = prjdir.join("DODO")
dodo.write("\n".join(task_l) + "\n")
data = {
'tmpdir': tmpdir,
'prj': prjdir,
'dodo': dodo,
}
return data | 64104bde2aab55021cf0d49fbb1d47670d0e4e0d | 3,630,489 |
def user_login(username, password):
"""
用户登录api(手机登录)
Args:
username: 用户账号,手机号
password: 密码
Returns:
result: a json obj of user data
"""
base_url = 'https://music.163.com/weapi/login/cellphone'
login_url = 'https://music.163.com/weapi/login/'
password = hashlib.md5(password).hexdigest()
text = {
'phone': username,
'password': password,
'rememberLogin': 'true'
}
data = encrypted_request(text)
# s = requests.session()
# s.headers = header
try:
res = requests.post(base_url, data=data, headers=header, proxies=random.choice(proxylist)).content
result = json.loads(res)
return result
except Exception, e:
data_process_logger.error('%s login failed, reason = %s' % (username, e))
return -1 | d4e8ff4cce3e5606c59ebddb6c3ee2ec5275e4a4 | 3,630,490 |
import os
def get_input_source_directory(config):
"""
Given a configuration object, returns the directory of the input file(s).
"""
options = config.commandline
if options.multilocus:
# multilocus dataset: assume directory is given as input source
return os.path.abspath(options.input)
else:
# single locus dataset: return directory nanme
return os.path.dirname(os.path.abspath(options.input)) | 28f1b7d90ba45c812a36e8d16dde4813d5b4090d | 3,630,491 |
def validate_search_string(cls, value):
"""Strip search_field for evil chars used in XSS/SQL injection."""
return escape(value, quote=True) if value is not None else value | 4b77aea29d75d7ecc180514e44b1156c202be172 | 3,630,492 |
import torch
def get_topo(logbook):
"""Return a network for the experiment and the loss function for training."""
# create the network
net_config = logbook.config['indiv']['net']
if net_config['class'] not in logbook.module.__dict__:
raise ValueError('Network topology {} is not defined for problem {}'.format(net_config['class'], logbook.problem))
net = getattr(logbook.module, net_config['class'])(**net_config['params'])
# load checkpoint state or pretrained network
if logbook.ckpt:
net.load_state_dict(logbook.ckpt['indiv']['net'])
elif net_config['pretrained']:
load_pretrained(logbook, net)
# move to proper device and, if possible, parallelize
device = torch.cuda.current_device() if torch.cuda.is_available() else torch.device('cpu')
net = net.to(device)
if torch.cuda.device_count() > 1:
net_maybe_par = nn.DataParallel(net)
else:
net_maybe_par = net
# create the loss function
loss_fn_config = logbook.config['indiv']['loss_function']
loss_fn_dict = {**nn.__dict__, **logbook.module.__dict__}
if loss_fn_config['class'] not in loss_fn_dict:
raise ValueError('Loss function {} is not defined.'.format(loss_fn_config['class']))
loss_fn = loss_fn_dict[loss_fn_config['class']]
if 'net' in loss_fn.__init__.__code__.co_varnames:
loss_fn = loss_fn(net, **loss_fn_config['params'])
else:
loss_fn = loss_fn(**loss_fn_config['params'])
return net, net_maybe_par, device, loss_fn | 03977bb5d3520c913245b0e5bda56f6cad91a4bc | 3,630,493 |
def contact_length(roll_pass: RollPass, roll: Roll):
"""
Contact length between rolls and stock calculated using Siebel's approach
"""
height_change = roll_pass.in_profile.height - roll_pass.height
return np.sqrt(roll.min_radius * height_change - height_change ** 2 / 4) | 230914ec015ee84f6d3e4ece57d39357396b57f7 | 3,630,494 |
def swagger_url(self):
"""Patch for HTTPS"""
return url_for(self.endpoint('specs'), _external=True, _scheme='https') | 827ce733aef5a62ceb80a0ceb1fac7fd2d4a7b44 | 3,630,495 |
def read_matrix_as_image(path):
"""Read every channel of a fusion npy matrix.
Args
path: Path to the image.
"""
image = np.load(path)
#img = np.zeros((image.shape[0], image.shape[1],6))
#img[:,:,:5] = image
#return img[:, :, ::-1].copy()
return image[:, :, ::-1].copy() | 00ab043e5aff02f6e6de6aa1e1ac8aa52a6ddbc3 | 3,630,496 |
def home(request):
"""Mock home view"""
return HttpResponse('home') | 017be5073942202030c627a8a62393f49ef7d526 | 3,630,497 |
async def async_setup(hass: HomeAssistant, config: dict):
"""Set up the bpost component."""
hass.data.setdefault(DOMAIN, {})
return True | 69e7ea1d542f5aca3b7a6685b49761bc989b6985 | 3,630,498 |
from typing import List
def merge_subgroups(subgroup_list: List[Subgroup]) -> List[Subgroup]:
"""Post-processing the clusters by merging long names with short sub-name.
For example, `male canadian lynx` is merged with `canadian lynx`"""
if len(subgroup_list) <= 1:
return subgroup_list
# sort the subgroup with increasing name length
subgroup_list = sorted(list(subgroup_list), key=lambda subgroup: len(subgroup.name))
# merge
has_been_merged = set()
for i in range(len(subgroup_list)):
if i in has_been_merged:
continue
short_name = subgroup_list[i].name
for j in range(i + 1, len(subgroup_list)):
if j in has_been_merged:
continue
long_name = subgroup_list[j].name
if long_name.endswith(" " + short_name) or long_name == short_name:
subgroup_list[i].merge(subgroup_list[j])
has_been_merged.add(j)
return [subgroup for i, subgroup in enumerate(subgroup_list) if i not in has_been_merged] | 01fdfcd7c89a0e65eb9118d0b97748298a0c8f70 | 3,630,499 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.