content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
from datetime import datetime
def count_time(time_logon,time_logoff):
"""count the logoning time
Arg:
time_longon: for example, 07:20:00
time_logoff: for example, 15:20:00
return:
last_time: float, the number of hours of online.
"""
time_logon=datetime.strptime(time_logon,'%H:%M:%S')
time_logoff=datetime.strptime(time_logoff,'%H:%M:%S')
last_time=(time_logoff-time_logon).total_seconds()/3600
last_time=round(last_time,2)
return last_time
# print(last_time)
# print(type(last_time)) | cd22d78525c54306328689ffe752f1e5a5d24464 | 42,871 |
from contextlib import suppress
def _purge_headers_cb(headers):
"""
Remove headers from the response.
Args:
headers (list): headers to remove from the response
Returns:
callable: for been used in before_record_response VCR constructor.
"""
header_list = []
for item in headers:
if not isinstance(item, tuple):
item = (item, None)
header_list.append(item[0:2]) # ensure the tuple is a pair
def before_record_response_cb(response):
"""
Purge headers from response.
Args:
response (dict): a VCR response
Returns:
dict: a VCR response
"""
for (header, value) in header_list:
with suppress(KeyError):
if value:
response['headers'][header] = value
else:
del response['headers'][header]
return response
return before_record_response_cb | 9d0c0cc04ee407d6f4f60c2c65c39869d58333c0 | 42,872 |
import shlex
def tokenize(string):
"""
# parse the command
# 比如,'ls -l /home/shiyanlou' 划分之后就是
# ['ls', '-l', '/home/shiyanlou']
:param string:
:return:
"""
return shlex.split(string) | 3a1d26dc62081ac488086d4b8e74ea8716c92d0c | 42,873 |
import re
def get_label_and_caption(lines, i):
"""Capture any label and caption immediately after a code environment's end
Use regex on the two lines after a code environment's end (e.g. !ec) to
extract any label or caption. NB! This method might modify the two lines
after the code environment by removing any label{} and caption{} commands
:param list lines: lines of code
:param int i: current index
:return: label and caption
:rtype: (str, str)
"""
# capture any caption and label in the next two
# lines after the end of the code environment
label = None
label_regex = re.compile(r"[\\]*label\{(.*?)\}")
label_match = re.search(label_regex, "".join(lines[i + 1:i + 3]))
if label_match:
label = label_match.group(1)
caption = None
caption_regex = re.compile(r"[\\]*caption\{(.*?)\}")
caption_match = re.search(caption_regex, "".join(lines[i + 1:i + 3]))
if caption_match:
caption = caption_match.group(1)
# Remove label{} and caption{}
if len(lines) > i + 1:
lines[i + 1] = re.sub(label_regex, "", lines[i + 1])
lines[i + 1] = re.sub(caption_regex, "", lines[i + 1])
if len(lines) > i + 2:
lines[i + 2] = re.sub(label_regex, "", lines[i + 2])
lines[i + 2] = re.sub(caption_regex, "", lines[i + 2])
return label, caption | 4de2d4dc103a01fdd3505be3078f860079e8a30c | 42,874 |
def post_compute(e, q, d):
"""
Get single result, like a sum or count, from mongodb query
"""
field = e.dshape[0].names[0]
result = q.coll.aggregate(list(q.query))['result']
return result[0][field] | 03461039a70078bead17e3babb108d953da4b146 | 42,875 |
def compute_yield(x):
"""
Compute yield as measured with UMIs for a droplet x.
"""
return x["az_total"]*x["nb_hp"]*10.0/x["hp_total"] | e2fc9f96b828cd9cd8e9a93bbb874d67d0ce5671 | 42,876 |
def compute_multiples(origin_shape, broadcast_shape):
"""Compute multiples between origin shape with broadcast shape."""
len_gap = len(broadcast_shape) - len(origin_shape)
return broadcast_shape[0:len_gap] + tuple(map(lambda x, y: x // y, broadcast_shape[len_gap:], origin_shape)) | e3fa23db26988ea1c096491598c81ba587043bee | 42,878 |
def area_is_at_diagonal(i, j):
"""
Find whether extracted area is from the diagonal.
Parameters:
i -- First interval index.
j -- Second interval index.
Output:
Whether an area is at the diagonal.
"""
return(i == j) | dd12b2ab34018c6c1604410920ab69816de5cd3e | 42,879 |
import pkgutil
import encodings
def encoding_exists(encoding):
"""Check if an encoding is available in Python"""
false_positives = set(["aliases"])
found = set(name for imp, name, ispkg in pkgutil.iter_modules(encodings.__path__) if not ispkg)
found.difference_update(false_positives)
if encoding:
if encoding in found:
return True
elif encoding.replace('-', '_') in found:
return True
return False | 2e5d1bb114a15010523a9ed29636375fe2c6e87e | 42,880 |
def assign_worksessions(df, threshold=1, milliseconds=True):
"""Assign work sessions to events, in a column called
workSessionId.
"""
delimit_hours = threshold * 3600
if milliseconds:
delimit_hours = delimit_hours * 1000
df['newSession'] = df['time'].diff() > delimit_hours # diff > threshold hrs?
df['workSessionId'] = df['newSession'].cumsum().astype('int')
return df | aebb595b6531282e39551f1983b881b21824c36c | 42,881 |
import pickle
def deserialize(src):
"""
The default deserialization method,
can automatically read the version number, so no need to specify
Parameters
----------
src
Returns
-------
"""
return pickle.loads(src) | ea75e6ed28296020fffed913c0514cdbf09ecf26 | 42,883 |
def get_descriptions(nzo, match, name):
"""If present, get a description from the nzb name.
A description has to be after the matched item, separated either
like ' - Description' or '_-_Description'
"""
if nzo:
ep_name = nzo.nzo_info.get("episodename")
else:
ep_name = ""
if not ep_name:
if match:
ep_name = name[match.end() :] # Need to improve for multi-ep support
else:
ep_name = name
ep_name = ep_name.strip(" _.")
if ep_name.startswith("-"):
ep_name = ep_name.strip("- _.")
if "." in ep_name and " " not in ep_name:
ep_name = ep_name.replace(".", " ")
ep_name = ep_name.replace("_", " ")
ep_name2 = ep_name.replace(" - ", "-").replace(" ", ".")
ep_name3 = ep_name.replace(" ", "_")
return ep_name, ep_name2, ep_name3 | ab9a451874270d71e8a43d8f8aaefec1e848bacb | 42,884 |
def terminate(library, session, degree, job_id):
"""Request a VISA session to terminate normal execution of an operation.
Corresponds to viTerminate function of the VISA library.
Parameters
----------
library : ctypes.WinDLL or ctypes.CDLL
ctypes wrapped library.
session : VISASession
Unique logical identifier to a session.
degree : None
Not used in this version of the VISA specification.
job_id : VISAJobId
Specifies an operation identifier. If a user passes None as the
job_id value to viTerminate(), a VISA implementation should abort
any calls in the current process executing on the specified vi.
Any call that is terminated this way should return VI_ERROR_ABORT.
Returns
-------
constants.StatusCode
Return value of the library call.
"""
return library.viTerminate(session, degree, job_id) | ae976515c84cdbae36623483f7d0915bee2cfd56 | 42,887 |
import time
def select_timestamp(entry):
"""
Returns the date and time `entry` was updated, published or created
(respectively) as a time-tuple.
"""
for attr in ('updated', 'published', 'created'):
try:
return entry['%s_parsed'] % attr
except KeyError:
pass
return time.gmtime() | 1db5261394d3b9090a574df7ad3391ac70421816 | 42,889 |
import datetime
def create_table_schema(df, table_name):
"""Generates a SQL create table statement based off the columns (and their dtypes) in a Pandas dataframe"""
sql_obj_mapping={'string':'VARCHAR (255)',
'integer':'INT',
'float':'FLOAT',
'datetime':'DATETIME'
}
create_table_string="""CREATE TABLE %s (""" % table_name
mapping_dict={}
for col_name in df:
# check if col is a datetime obj
if df[col_name].apply(lambda x: isinstance(x, datetime.datetime)).any():
py_obj_type='datetime'
# check if col is a string
elif df[col_name].str.contains('-\d').dropna().all():
py_obj_type='date'
elif df[col_name].str.contains('[a-z]').dropna().any():
py_obj_type='string'
#check if col is a float or integer
else:
try:
df[col_name].dropna().apply(lambda x: int(x))
py_obj_type='integer'
except ValueError:
py_obj_type='float'
sql_object=sql_obj_mapping[py_obj_type]
mapping_dict.update({col_name:sql_object})
#print "%s: %s: %s" % (col_name, py_obj_type,sql_object)
create_table_string+=col_name+' '
create_table_string+=sql_object
create_table_string+=','
create_table_string=create_table_string[:-1]+');'
return create_table_string | c972bcdcfdd226482ea2b1386299a9b8e4ede11a | 42,890 |
def calculate_dynamic_pressure(rho, TAS):
"""Calculates the dynamic pressure.
Parameters
----------
rho : float
Air density (kg/m³).
TAS : float
True Air Speed (m/s).
Returns
-------
q_inf : float
Dynamic pressure. (Pa)
Notes
-----
$$ q_{inf} = 1/2 · rho · TAS² $$
"""
return 0.5 * rho * TAS ** 2 | f4f1a8f389f59a9fae129e616ba8b1de9f44472c | 42,891 |
from pathlib import Path
def test_directory():
"""Return the path to the top-level directory containing the tests."""
return Path(__file__).parent | 8483f9a897730d4644f7eae6060489e887c56922 | 42,892 |
def reindex_elements_in_tree(result):
"""
Change the level numbers from parent to chil
Before it was from children to parent
:param result: dict format of element specification
:return: result_list reindexed
"""
result_list = [values for index, values in result.items()]
# pp(result_list)
# Убираем родителя для всех окон(рабочий стол) и оставляем спец окна
reversed_result = list(reversed(result_list))[1:]
result_list = [{'level ' + str(index): item}
for index, item in enumerate(reversed_result)]
return result_list | 9928e52e9d41b494d900d2c30d140072c9069ea7 | 42,893 |
def parse_hostnames_from_ingresses(ingress_list):
"""
This function parses a list of Ingress objects into a map of hostname=>address
"""
hostnames = {}
for ingress in ingress_list:
rules = ingress.spec.rules
if ingress.status.load_balancer.ingress is None:
continue
address = ingress.status.load_balancer.ingress[0].ip
for rule in rules:
host = rule.host
hostnames[host] = address
return hostnames | 16bdd826f5a41af19f5d1ada61b7738c3f16c91a | 42,896 |
from pathlib import Path
import re
def get_min_ver(dependency: str) -> str:
"""Retrieve version of `dependency` from setup.py, raise if not found."""
setup_py = Path(__file__).parent.joinpath("../../../setup.py")
with open(setup_py, "r") as setup_file:
for line in setup_file.readlines():
min_ver = re.findall(fr'"{dependency}~=([0-9]+(\.[0-9]+){{,2}})"', line)
if min_ver:
return min_ver[0][0]
else:
raise RuntimeError(f"Cannot find {dependency} dependency in setup.py") | 5be3214783391962da4b71d75ae7352d518a1232 | 42,899 |
def init(module, weight_init, bias_init, gain=1):
"""
Parameters
----------
module : nn.Module
nn.Module to initialize.
weight_init : func
Function to initialize module weights.
bias_init : func
Function to initialize module biases.
Returns
-------
module : nn.Module
Initialized module
"""
weight_init(module.weight.data, gain=gain)
weight_init(module.weight.data)
bias_init(module.bias.data)
return module | 94c66b98ff26591a33ed17980108706658e6c091 | 42,901 |
def convert_ftp_url(url):
"""Convert FTP to HTTPS URLs."""
return url.replace('ftp://', 'https://', 1) | 7088d0f9f802cbfdeaa85c77d80f3ac41f33c1d1 | 42,903 |
import warnings
def _fix_auth(auth, username=None, password=None, verify=None, cert=None):
"""Updates auth from deprecated parameters username, password, verify and cert."""
if any(p is not None for p in (username, password, verify, cert)):
message = 'The use of "username", "password", "verify", and "cert" is deprecated. ' + \
'Please use the "auth" keyword during class instantiation. ' + \
'These keywords will be removed in a future release.'
warnings.warn(message, DeprecationWarning)
if username is not None:
auth.username = username
if password is not None:
auth.password = password
if verify is not None:
auth.verify = verify
if cert is not None:
auth.cert = cert
return auth | ecd28033279973482ec981ad23041929325ba2f6 | 42,905 |
import torch
import random
def addGaussianNoise2Image(images, labels, std=0.1, shuffle=True, device=None):
"""
给图片添加高斯噪声
原图片的标签为1:正样本
添加了噪声的图片的标签为0:负样本
"""
images = images.float().to(device)
labels = labels.float().to(device)
batch_size = labels.size(0)
labels = torch.unsqueeze(labels, dim=0).view(batch_size, 1)
noiseLabel = torch.zeros((batch_size, 1)).to(device)
noise = torch.zeros(images.shape).to(device)
noise = noise + (std ** 2) * torch.rand_like(images)
noise = torch.add(noise, images)
data = torch.cat((images, noise), dim=0)
labels = torch.cat((labels, noiseLabel), dim=0)
if shuffle:
tmp = []
for index, tensor in enumerate(data):
tmp.append([tensor, labels[index]])
random.shuffle(tmp)
data = [torch.unsqueeze(i[0], dim=0) for i in tmp]
labels = [i[1] for i in tmp]
labels = torch.cat(labels, dim=0)
data = torch.cat(data, dim=0)
if labels.dim() >= 2:
labels = torch.squeeze(labels, dim=1)
labels = labels.long()
data = data.float()
return data, labels | 60b9bed3f9acde6695abd54ce13012fd26e393ff | 42,909 |
def postproc(maps):
"""Generate PD, R1, R2* (and MTsat) volumes from log-parameters
Parameters
----------
maps : ParameterMaps
Returns
-------
pd : ParameterMap
r1 : ParameterMap
r2s : ParameterMap
mt : ParameterMap, optional
"""
maps.r1.volume = maps.r1.fdata().exp_()
maps.r1.name = 'R1'
maps.r1.unit = '1/s'
maps.r2s.volume = maps.r2s.fdata().exp_()
maps.r2s.name = 'R2*'
maps.r2s.unit = '1/s'
maps.pd.volume = maps.pd.fdata().exp_()
maps.r2s.name = 'PD'
maps.r2s.unit = 'a.u.'
if hasattr(maps, 'mt'):
maps.mt.volume = maps.mt.fdata().neg_().exp_()
maps.mt.volume += 1
maps.mt.volume = maps.mt.fdata().reciprocal_()
maps.mt.volume *= 100
maps.mt.name = 'MTsat'
maps.mt.unit = 'p.u.'
return maps.pd, maps.r1, maps.r2s, maps.mt
return maps.pd, maps.r1, maps.r2s | db16ec87e2400e7a627f23cc3f89a982c6a3ba66 | 42,910 |
def join_dict(keys, values):
"""
Create a dictionary from a list of
keys and values having equal lengths
"""
if len(keys) == len(values):
adict = dict(zip(keys, values))
return adict
else:
print('Error: Attempting to create a dictionary from '
'a key and value list of unequal length')
return -1 | 8b0297b85cdd3bf07544f954ac21d1e0e6328a0f | 42,911 |
from typing import List
def get_whitespace_operations(from_sequence: str, to_sequence: str) -> List[int]:
"""
Get the repair sequence that turns from_sequence into to_sequence (after applying the repair_whitespace function)
:param from_sequence: sequence that the returned repair tokens should be applied to to get the to_sequence
:param to_sequence: sequence that should result from applying the whitespace operations to the from_sequence
:return: list of repair tokens
"""
assert from_sequence.replace(" ", "") == to_sequence.replace(" ", ""), \
f"make sure from_sequence and to_sequence only differ in whitespaces:\n{from_sequence}\n{to_sequence}"
from_sequence_ptr = 0
to_sequence_ptr = 0
repair_tokens = []
while from_sequence_ptr < len(from_sequence): # and to_sequence_ptr < len(to_sequence):
from_char = from_sequence[from_sequence_ptr]
to_char = to_sequence[to_sequence_ptr] if to_sequence_ptr < len(to_sequence) else ""
if from_char == to_char:
repair_tokens.append(0)
from_sequence_ptr += 1
to_sequence_ptr += 1
elif to_char == " ":
repair_tokens.append(1)
from_sequence_ptr += 1
to_sequence_ptr += 2
elif from_char == " ":
repair_tokens.append(2)
from_sequence_ptr += 1
else:
raise ValueError("should not happen")
assert len(repair_tokens) == len(from_sequence), \
f"{''.join(str(r) for r in repair_tokens)}\n'{from_sequence}'\n'{to_sequence}'"
return repair_tokens | abd51f6d21ca3daded25d9bc0fb389c7c6133ae3 | 42,912 |
import json
def read_values(file):
"""
The function deserialize the JSON string from a file using json.load method.
"""
try:
with open(file) as f:
data = json.load(f)
return data
except IOError:
print('An IOError has occured!') | efc1772e9aa95efd6cd851c1a2fda81a94eafd12 | 42,914 |
def ticker_price_history(data, ticker):
"""filter time history of the specifed ticker, history of ticker close price """
tts_sub = data[[("Close", ticker)]] # ticker time series
tts_sub = tts_sub.reset_index()
tts_sub.columns = tts_sub.columns.droplevel()
tts_sub.columns = ["time_ts", "close_price"]
return tts_sub | 1939af45a974815c61d0a01013cc60a8a7703dcf | 42,917 |
def batch_norm(inputs,
activation_fn=None,
normalizer_fn=None,
normalizer_params=None):
"""Batch normalization layer compatible with the classic conv. API.
Simpler to use with arg. scopes.
"""
outputs = inputs
# BN...
if normalizer_fn is not None:
normalizer_params = normalizer_params or {}
outputs = normalizer_fn(outputs, **normalizer_params)
if activation_fn is not None:
outputs = activation_fn(outputs)
return outputs | 857c8a75c998c60478008b9db877bd4a8f974cb2 | 42,918 |
import torch
def squash(input_tensors, dim=2):
"""
Squashing function
Parameters
----------
input_tensors : a tensor
dim: dimensions along which to apply squashing
Returns
-------
squashed : torch.FloatTensor
A tensor of shape ``(batch_size, num_tokens, input_dim)`` .
"""
norm = torch.norm(input_tensors, 2, dim=dim, keepdim=True) # [batch_size, out_caps_num, 1]
norm_sq = norm**2 # [batch_size, out_caps_num, 1]
s = norm_sq/(1.0+norm_sq)*input_tensors/torch.sqrt(norm_sq + 1e-8)
return s | 779040e4a9e4cb3bd71e6d70643c3e4bb6434775 | 42,920 |
def sanitize(address):
"""
Sanitize the location field if needed.
:param str address: address to sanitize
"""
# Remove 'block of ' from the address.
addr = address.lower()
addr = addr.replace('block of ', '')
addr = addr.replace('block ', '')
return addr | e740a2cf9e12c4c8befed6a5efff1a1a672b4a24 | 42,921 |
import sys
import io
def capture_output(func):
"""Decorate a function with @capture_output to define a CapturedFunction()
wrapper around it.
Doesn't currently capture non-python output but could with dup2.
Decorate any function to wrap it in a CapturedFunction() wrapper:
>>> @capture_output
... def f(x,y):
... print("hi")
... return x + y
>>> f
CapturedFunction('f')
Calling a captured function suppresses its output:
>>> f(1, 2)
3
To call the original undecorated function:
>>> f.uncaptured(1, 2)
hi
3
If you don't care about the return value, but want the output:
>>> f.outputs(1, 2) == 'hi\\n'
True
If you need both the return value and captured output:
>>> f.returns_outputs(1, 2) == (3, 'hi\\n')
True
"""
class CapturedFunction:
"""Closure on `func` which supports various forms of output capture."""
def __repr__(self):
return "CapturedFunction('%s')" % func.__name__
def returns_outputs(self, *args, **keys):
"""Call the wrapped function, capture output, return (f(), output_from_f)."""
sys.stdout.flush()
sys.stderr.flush()
oldout, olderr = sys.stdout, sys.stderr
if sys.version_info < (3,0,0):
out = io.BytesIO()
else:
out = io.TextIOWrapper(io.BytesIO())
sys.stdout, sys.stderr = out, out
try:
result = func(*args, **keys)
out.flush()
out.seek(0)
return result, out.read()
finally:
sys.stdout, sys.stderr = oldout, olderr
def suppressed(self, *args, **keys):
"""Call the wrapped function, suppress output, return f() normally."""
return self.returns_outputs(*args, **keys)[0]
def outputs(self, *args, **keys):
"""Call the wrapped function, capture output, return output_from_f."""
return self.returns_outputs(*args, **keys)[1]
def __call__(self, *args, **keys):
"""Call the undecorated function, capturing and discarding it's output, returning the result."""
return self.suppressed(*args, **keys)
def uncaptured(self, *args, **keys):
"""Call the undecorated function and return the result."""
return func(*args, **keys)
return CapturedFunction() | 540d2edbf4474487dc6dc040c3b07966cda567da | 42,923 |
def bool_to_str(val):
"""
bool类似转化为是或否,为空则保留
"""
return val if not isinstance(val, bool) else val and '是' or '否' | 471e5e3a781aaef9aa644f3014079ac729a60583 | 42,924 |
def paint(width, height, performance):
"""Calculates how many paint does one need
for given area
@param width: area's width
@param height: area's height
@param performance: paint performance/m^2"""
area = width * height
return area / performance | 02243f92ab5b3f714bb94f489b2b8e6e49f6c4f0 | 42,925 |
import torch
def decode(box_p, priors):
"""
Decode predicted bbox coordinates using the same scheme
employed by Yolov2: https://arxiv.org/pdf/1612.08242.pdf
b_x = (sigmoid(pred_x) - .5) / conv_w + prior_x
b_y = (sigmoid(pred_y) - .5) / conv_h + prior_y
b_w = prior_w * exp(loc_w)
b_h = prior_h * exp(loc_h)
Note that loc is inputed as [(s(x)-.5)/conv_w, (s(y)-.5)/conv_h, w, h]
while priors are inputed as [x, y, w, h] where each coordinate
is relative to size of the image (even sigmoid(x)). We do this
in the network by dividing by the 'cell size', which is just
the size of the convouts.
Also note that prior_x and prior_y are center coordinates which
is why we have to subtract .5 from sigmoid(pred_x and pred_y).
"""
variances = [0.1, 0.2]
boxes = torch.cat((priors[:, :2] + box_p[:, :2] * variances[0] * priors[:, 2:],
priors[:, 2:] * torch.exp(box_p[:, 2:] * variances[1])), 1)
boxes[:, :2] -= boxes[:, 2:] / 2
boxes[:, 2:] += boxes[:, :2]
return boxes | 28c4909e7207cb813e7622d3313574c42da36fe9 | 42,926 |
import pandas
def flatten_results_dataset(dataset, row_levels=frozenset({'output'})):
"""Given an xarray.Dataset with results, flatten it into a pandas.DataFrame.
This function individually unstacks arrays, which results in a much more
compact DataFrame than calling `dataset.to_dataframe()`.
Args:
dataset: xarray.Dataset, e.g., as produced by `Inferer.run_on_files`.
row_levels: optional set giving names of dimensions to keep in the rows.
Returns:
pandas.DataFrame with concatenated data variables from `dataset` as columns.
Dimensions not in `row_levels` are be combined with variable names into
column names.
"""
frames = []
for array in dataset.data_vars.values():
frame = array.to_dataframe()
levels = [dim for dim in array.dims if dim not in row_levels]
if levels:
frame = frame.unstack(levels)
# flatten columns, adding level names into the flattened column names
new_keys = []
for keys in frame.columns:
pieces = [keys[0]]
pieces.extend('%s_%s' % (lev, k) for lev, k in zip(levels, keys[1:]))
new_keys.append('/'.join(pieces))
frame.columns = new_keys
frames.append(frame)
return pandas.concat(frames, axis=1) | bd62af2baa18c7b0f4f87f4834918a2cfff4c493 | 42,927 |
from functools import reduce
def test_reduce():
"""
Test reduce
当需要对一个列表进行一些计算并返回结果时,Reduce 是个非常有用的函数。
"""
product = reduce((lambda x, y: x * y), [1, 2, 3, 4])
return product | 9676776ab0f72f1a714e3594f0daba7c52c55e09 | 42,929 |
def cross(environment, book, row, sheet_source, column_source, column_key):
"""
Returns a single value from a column from a different dataset, matching by the key.
"""
a = book.sheets[sheet_source]
return environment.copy(a.get(**{column_key: row[column_key]})[column_source]) | 384fe03dec39de6e2f7bec51ed2b72c6a9555e78 | 42,930 |
from typing import Dict
from typing import Iterable
def build_pattern_registrar(d: Dict):
"""
:param d: a dictionary-like object that patterns will be registered to
:return: a function that
"""
def registrar(pattern: int, first_nibbles: Iterable) -> None:
"""
:param pattern: a combination of pattern flags describing variable storage
:param first_nibbles: an iterable of first nibbles
:return: a fu
"""
for first_nibble in first_nibbles:
d[first_nibble] = pattern
return registrar | 62ccb859ce8ae7f392517d8b5ad7492c4e3e668d | 42,931 |
def example_smiles_n_features():
"""
Sample data for testing
Returns
-------
dictionary
format {'smiles':required feature vector : List}
"""
feature_vector_C = [[
0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0,
0, 0, 1, 0, 0, 0, 0, 0.12011
]]
feature_vector_NN = [[
0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0,
1, 0, 0, 0, 0, 0, 0, 0.14007
],
[
0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0,
0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1,
0, 0, 0, 0, 0, 0, 0.14007
]]
return {'C': feature_vector_C, 'N#N': feature_vector_NN} | 2b5d119ab3a831b1ed35760131542bc8e5bcf0d7 | 42,933 |
from typing import Dict
from pathlib import Path
def _certificate_check(path) -> Dict[str, Path]:
"""
Check if the right certificates are at the given path.
"""
certi_path = {
"ca": "ca.crt",
"crt": "client.crt",
"key": "client.key",
}
r_paths: Dict[str, Path] = {}
for k, f in certi_path.items():
r_paths[k] = path / f
if not r_paths[k].exists():
raise FileNotFoundError(f"'{f}' was not found in at: {path}")
return r_paths | b38dc126715feca3ac982a6ea34314ef5fc10a25 | 42,934 |
def map_unconstrained_range(
x: float, in_min: float, in_max: float, out_min: float, out_max: float
) -> float:
"""
Maps a number from one range to another. Somewhat similar to the Arduino
:attr:`map()` function, but returns a floating point result, and
does not constrain the output value to be between :attr:`out_min` and
:attr:`out_max`. If :attr:`in_min` is greater than :attr:`in_max` or
:attr:`out_min` is greater than :attr:`out_max`, the corresponding range
is reversed, allowing, for example, mapping a range of 0-10 to 50-0.
See also :py:func:`map_range`
.. code-block::
from adafruit_simplemath import map_unconstrained_range
celsius = -20
fahrenheit = map_unconstrained_range(celsius, 0, 100, 32, 212)
print(celsius, "degress Celsius =", fahrenheit, "degrees Fahrenheit")
:param float x: Value to convert
:param float in_min: Start value of input range.
:param float in_max: End value of input range.
:param float out_min: Start value of output range.
:param float out_max: End value of output range.
:return: Returns value mapped to new range.
:rtype: float
"""
in_range = in_max - in_min
in_delta = x - in_min
if in_range != 0:
mapped = in_delta / in_range
elif in_delta != 0:
mapped = in_delta
else:
mapped = 0.5
mapped *= out_max - out_min
mapped += out_min
return mapped | cb07a0c71bd89f574faccd5bcd197d82558f4e6a | 42,936 |
def get_add_dict(add_default_dict, request):
"""
组合新增字典
:param add_default_dict:
:param request:
:return:
"""
add_dict = {}
if add_default_dict:
for k, v in add_default_dict.items():
add_dict[k] = v(request)
return add_dict | d9acb616aa1010d4e3a644fd7b3810662e1bbc7d | 42,937 |
def flatten_series():
"""
By default, MIPPY will pass the image datasets as a 2D list, divided by series.
If you want only a single, flat list from all your series, return True.
"""
return True | 7030b7ba9119cd5c84745bedc2c966e057cf596e | 42,939 |
def find_unique_value_error(exc_detail):
"""Find unique value error in exception details."""
for field, errors in exc_detail.items(): # noqa: B007
for error in errors:
if error.code == 'unique':
return error
return None | aa25202b311e03e19c842174bc641ad1236db920 | 42,940 |
def split_and_strip(string, delimiter):
"""
Return a list of stripped strings after splitting `string` by `delimiter`.
Parameters
----------
string : str
The string to split and strip.
delimiter : str
The string to split by.
Returns
-------
list[str]
The list of strings that are stripped after splitting by `delimiter`.
"""
# begin solution
return [s.strip() for s in string.split(delimiter)]
# end solution | 83e08c2a243aa01b5e0b670bf7ed81937b16de8f | 42,942 |
import requests
def get_word_count(title, base_url="https://klexikon.zum.de/api.php") -> int:
"""
Return the word count of an Klexikon article.
:param title:
:param base_url
:return:
"""
params = {
"action": "query",
"format": "json",
"list": "search",
"srlimit": 5,
"srsearch": title
}
try:
res = requests.get(url=base_url, params=params)
# print(len(res.json()['query']['search']))
article = res.json()['query']['search'][0]
return article['wordcount']
except IndexError: # Likely due to incorrect search results.
return 0 | d70dccc868cb5bf8837b43f05ac55f648ece6882 | 42,944 |
import math
def f0(system):
"""Return the natural frequency of the system."""
c1,c2,r1,r2 = system
fn = 1 / (2 * math.pi * math.sqrt(r1 * c1 * r2 * c2))
return fn | 018393c0eea1da35a22c45130ee61743e2535070 | 42,945 |
def component():
"""
Returns a test component.
"""
return {
"name": "foo",
"display_name": "Foo",
"attributes": []
} | c0d8efe88674a9a92665851124dc25b270b6a66a | 42,946 |
import os
def reduce_2_basename(files):
"""
Converts full file paths to file base names.
Parameters
----------
files : list of str
list of filepaths
Returns
-------
filenames : list of str
List of base file names.
"""
return [os.path.basename(f) for f in files] | 1ede7be65f841d871ba767d7e4c07264cd03ae4f | 42,947 |
import time
def wait_for_translation(client, id):
"""Wait for a translation to finish"""
while True:
res = client.get_translation_status(id)
res_data = res.json()
state = res_data['requestState']
if state == 'ACTIVE':
time.sleep(2)
continue
else:
return res_data | 238153fa4c0e3ca7f5ad9c2124613a3eca7d6142 | 42,948 |
from math import log, sqrt
def calculate_d1(underlying, strike, domestic_short_rate, foreign_short_rate, sigma, days_to_maturity):
""" d1 = (ln(i_F/i_S) + ((r_d - r_f) + 1/2 * sigma^2) * T) / (sigma * sqrt(T))
:param domestic_short_rate:
:param foreign_short_rate:
:param underlying:
:param strike:
:param sigma:
:param days_to_maturity:
:return:
"""
if days_to_maturity < 0:
raise ValueError("Invalid days to maturity")
year_fraction = float(days_to_maturity) / 365
if underlying < 0:
raise ValueError("Invalid underlying")
if strike <= 0:
raise ValueError("Invalid Strike")
moneyness = float(underlying) / strike
log_moneyness = log(moneyness)
r = domestic_short_rate - foreign_short_rate
numerator = log_moneyness + (r + .5 * sigma * sigma) * year_fraction
denominator = sigma * sqrt(year_fraction)
d1 = float(numerator) / denominator
return d1 | f1fe0876931d627d00263494c2ab2696e515db5c | 42,949 |
def get_last_data_idx(productions):
"""
Find index of the last production
:param productions: list of 24 production dict objects
:return: (int) index of the newest data or -1 if no data (empty day)
"""
for i in range(len(productions)):
if productions[i]['total'] < 1000:
return i - 1
return len(productions) - 1 | d06f2b8e6ff4c94931f57c66e81a9198bdd2baa9 | 42,950 |
import re
def split_keyword(keyword):
"""Split a keyword in multiple ones on any non-alphanumeric character
:param string keyword: keyword
:return: keywords
:rtype: set
"""
split = set(re.findall(r'\w+', keyword))
return split | 015eb669f8ca309c3abe139d6dbb20d0b9022ae8 | 42,951 |
import subprocess
import sys
import re
def get_sub_track_id(file, num):
"""Returns wanted sub track id and type of subs"""
# could also use ffprobe to json as it turns out
try:
raw_info = subprocess.check_output(["mkvmerge", "-i", file],
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as ex:
print(ex)
sys.exit(1)
pattern = re.compile('(\d+): subtitles \((.*?)\)')
mat = pattern.findall(str(raw_info))
# num is 1 indexed, get only the num track in file
mat = mat[num - 1]
if mat:
# track num, type of subs
return mat[0], mat[1]
else:
return None, None | b2d1584afb157565bce189a9b8f8c159280f3bad | 42,952 |
def valid_history(history, expected_count, expected_messages=None):
"""Checks if history is valid"""
expected_messages = expected_messages or []
if len(history) != expected_count:
return False
for i, value in enumerate(expected_messages):
if history[i]["type"] != value:
return False
return True | 3eef39ab8877236a22697b907703e4ee30a2685c | 42,953 |
import math
def select_dim_over_m(maxM, maxN, coef_nm, coef_n, coef_m, tot, rest=0):
"""
solves the problem, max n*m such that n <= maxN, m <= maxM and
coef_nm*nm + coef_n*n + coef_m*m <= tot
"""
tot = tot - rest
# We consider x = m = n and solve the quadratic equation
b = coef_n + coef_m
x = (-b + math.sqrt(b**2 + 4 * coef_nm * tot)) / (2 * coef_nm)
m = math.floor(min(maxM, x))
n = math.floor(min(maxN, x))
# If one of the two n, m was capped at it's limit we want to
# recalculate the value of the other variable by solving the
# corresponding linear equation.
if m == maxM and n < maxN:
n = (tot - coef_m * m) / (coef_nm * m + coef_n)
n = min(maxN, n)
if n == maxN and m < maxM:
m = (tot - coef_n * n) / (coef_nm * n + coef_m)
m = min(maxM, m)
n, m = int(n), int(m)
if n <= 0 or m <= 0:
raise RuntimeError("Available memory %.2fMB is not enough." % (tot / 2**20))
return n, m | c5e75a4bb8b4bbfd445dc334865e447253a63b99 | 42,954 |
def _get_main_opset_version(model):
"""
Returns the main opset version.
"""
for op in model.opset_import:
if op.domain == '' or op.domain == 'ai.onnx':
return op.version
return None | ba4717b473d08ae40840c681c55ed4b28e28dea5 | 42,955 |
def compute_ref_area(body_chord, body_span, tran_chord, tran_span, wing_chord,
wing_span, wing_tip_chord):
"""
Simple sum of 3 trapezoids.
"""
body_area = 0.5*(body_chord+tran_chord)*body_span
tran_area = 0.5*(tran_chord+wing_chord)*tran_span
wing_area = 0.5*(wing_chord+wing_tip_chord)*wing_span
return (body_area, tran_area, wing_area) | f200ac0549b38629f40760cc32eef3051d93b27f | 42,956 |
import os
def getAndAppend(name, append):
"""Retrieves the given environment variable and appends the given string to
its value and returns the new value. The environment variable is not
modified. Returns an empty string if the environment variable does not
exist."""
if name in os.environ:
return os.environ[name] + append
else:
return "" | d9481ded8195fba96b6a429af52199f3bdb6e891 | 42,957 |
import pandas
def load_csv_as_dataframe_from_web(url, dtype=None):
"""Loads csv data from the provided url to a DataFrame.
Expects the data to be in csv format, with the first row as the column
names.
url: url to download the csv file from"""
return pandas.read_csv(url, dtype=dtype) | 2c750a4787bca5f1dc0e983312e5b1135ca2b419 | 42,959 |
def get_package_data():
"""
Returns the packages with static files of Wapyce.
:return: The packages with static files of Wapyce.
:rtype: dict(str, list(str))
"""
package_data = {'': ['requirements.txt']}
return package_data | 08fa3b1e560c2fb2064dbc0d739b2ceefc73e7a6 | 42,960 |
def introduction():
"""Handle a request for the "Introduction" page.
This function really doesn't do much, but it returns the data contained at
views/introduction.tpl which contains some detail on the project and useful
links to other information.
Returns:
(string) A web page (via the @view decorator) an introduction to the
project.
"""
return {'error': None} | f4952cd08e430480e8da99418c8aeddf1b16a02e | 42,961 |
def create_parser(subparsers):
"""Parse the arguments for the train command.
Returns
-------
parser : ArgumentParser
"""
train_parser = subparsers.add_parser('train', help='Train MARL policies ')
train_parser.add_argument(
'configuration', type=str, help='Path to python config file. Include the .py extension.'
)
return train_parser | 0ec0f1279ac71e719ea01360414c8c583c0e13a3 | 42,964 |
import math
def _tree_depth(num_leaves: int, arity: int):
"""Returns the depth of the tree given the number of leaf nodes and arity."""
return math.ceil(math.log(num_leaves) / math.log(arity)) + 1 | 8633da9e1079cd9338126d1174c0e632d49662bd | 42,965 |
from typing import Any
def _rebase_entity_dict(
entity_dict: str, group: dict[str, Any], base_channel_no: int
) -> dict[int, Any]:
"""Rebase entity_dict with base_channel_no."""
new_fields = {}
if fields := group.get(entity_dict):
for channel_no, field in fields.items():
new_fields[channel_no + base_channel_no] = field
return new_fields | d1b0e6ad0f2e4acb7b333aaacfa10d00449bf0e0 | 42,966 |
from pathlib import Path
import subprocess
def current_git_hash() -> str:
""" Retrieve the current git hash number of the git repo (first 6 digit). """
repo_dir = Path(__file__).parent
git_show = subprocess.Popen(
'git rev-parse --short=6 HEAD',
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True,
cwd=repo_dir,
universal_newlines=True,
encoding='utf8'
)
hash_number = git_show.communicate()[0].partition('\n')[0]
if hash_number == '':
hash_number = 'unknown'
return hash_number | b2425d9902be817a6f156e01a0428dc900a0ab10 | 42,967 |
import io
def _text_reader(text_file):
"""Returns list of lines from the text file.
Performs lowercasing and white-space tokenization on each line before
returning. Slightly different than the one in totto_parent_eval.py.
Args:
text_file: String filename.
"""
texts = []
with io.open(text_file, encoding="utf-8") as f:
for line in f:
line = line.strip().lower()
texts.append(line)
return texts | b066ad145c08c3ec7c12d30f8fcdf13d973d7f8b | 42,968 |
def obtainDifferences(redundantDict, transformationContext):
"""
analize what is different between similar rules (same rc, rate and actions) throw unions
and intersections of their context
"""
redundantListDict = {}
for center in redundantDict:
for rate in redundantDict[center]:
tmp = []
tmp = [transformationContext[x] for x in redundantDict[center][rate]]
# if center == (('Prot(iMod~U)', ), ('Prot(egfr)', 'EGFR(prot)')):
# print '--', tmp
# pair together by traqnsformation the context of all those rules whose reaction center, rate
# and transformations are the same
tmp = zip(*tmp)
# if center == (('Prot(iMod~U)', ), ('Prot(egfr)', 'EGFR(prot)')):
# print '--------------------------------'
# print '--', tmp
# print '@@@', tmp
intersections = []
unions = []
for ttmp in tmp:
tint = ttmp[0]
tun = ttmp[0]
for idx in range(1, len(ttmp)):
tint &= ttmp[idx]
tun |= ttmp[idx]
intersections.append(tint)
unions.append(tun)
differences = []
# obtain the union -intersection per transformation
for x, y in zip(intersections, unions):
differences.append(y - x)
# obtain those elements that were found to be constantly different across all intersections
constantDifferences = differences[0]
for idx in range(1, len(differences)):
constantDifferences &= differences[idx]
if center not in redundantListDict:
redundantListDict[center] = {}
redundantListDict[center][rate] = constantDifferences
return redundantListDict | 84d756981c09e46747c039ceb9e0af041efb1e65 | 42,969 |
import copy
def flatten_dict(dictionary, flatten_key_method='tuple'):
"""
This function recursively flattens an arbitrarily nested
dictionary.
The keys into the flattened dictionary are tuples of the form
(key0, key1, key2..)
"""
dictionary = copy.deepcopy(dictionary)
to_flatten = {}
for key in dictionary.keys():
if isinstance(dictionary[key], dict):
flattened = flatten_dict(dictionary[key], flatten_key_method=flatten_key_method)
for key2 in flattened.keys():
if flatten_key_method == 'tuple':
to_flatten[(key,) + key2] = flattened[key2]
elif flatten_key_method in ['/', '-', '_']:
to_flatten[key + flatten_key_method + key2] = flattened[key2]
else:
if flatten_key_method == 'tuple':
to_flatten[(key,)] = dictionary[key]
elif flatten_key_method in ['/', '-', '_']:
to_flatten[key] = dictionary[key]
return to_flatten | 97bc6f1bd0aa16f4bbdf733676f074fb34ea81cc | 42,970 |
def __top_frond_right(dfs_data):
"""Returns the frond at the top of the RF stack."""
return dfs_data['RF'][-1] | faf59ef6af0ea2bd4045fa7de46b67e4ff3e9410 | 42,971 |
def evaluate_sampler( sampler_fn, obs, action ):
"""
:param sampler_fn:
fn(o,k) Function returning sample value
[p1,p2..] List of values per-arm
p1 Scalar value for all arms
:param obs:
:param action:
:return:
"""
if callable(sampler_fn):
sample = sampler_fn(obs, action)
elif type(sampler_fn) == list:
sample = sampler_fn[action]
else:
sample = sampler_fn # assume float or int
return sample | 10baa933330cb7fc81529ad8091f479d180de34a | 42,973 |
def yx_to_xy(yx_grid):
"""Turns a y/x grid (row, column) into an x/y grid.
Iterates through the yx grid keeping track of the current location, and maps that value to the corresponding
position within the xy grid
:param map: int[][], a typical ingested y/x grid
:return: int[][], a RHR cartesian x,y grid
------------------------------------------------
yx_style_grid:
0 1 2
0 a b c == [[a,b,c],
1 d e f [d,e,f]]
xy_style_grid:
1 a b c == [[d, a],
0 d e f [e, b],
0 1 2 [f, c]]
"""
len_x = len(yx_grid[0]) # any index works, grid should be same length anywhere.
len_y = len(yx_grid) # how many y indices there are.
xy_grid = []
# note that the above may change as we edit our code. I will think of a solution.
# generate locations for us to follow (relative to yx)
x_loc = 0
y_loc = 0 # note that the y direction is flipped
while x_loc < len_x:
temp = []
y_loc = 0
while y_loc < len_y:
temp.append(yx_grid[len_y - y_loc - 1][x_loc]) # need to flip the y
y_loc += 1
xy_grid.append(temp)
x_loc += 1
return xy_grid | c155dce74e219ec1133c6ab54ebea4c87bfc84b6 | 42,975 |
def hash_2():
""" Test hash 2 """
return "9afab28587926ce230e2e4430becc599" | d0d6f19ebd91b7d64ac0c1aaeb9b93107f631ab1 | 42,976 |
import copy
def unwind(data, max_depth = 1000, stop_term = ''):
""" Unwind nested dictionaries by repeating higher level fields.
Args:
max_depth: (int), maximum depth to unwind.
stop_term: (str), stop unwinding once this term appears as a key in dict.
Returns:
Unwound dictionary
"""
result_list = []
def unwinder(data, row = None, depth = 0):
# keep copying
# first sort values according to whether they are list or not
if row is None:
row = {}
else:
row = copy.deepcopy(row)
for key, value in data.items():
if key != 'items':
row[key] = data[key]
if 'items' in data.keys():
if (depth < max_depth) and (stop_term not in data.keys()):
for item in data['items']:
unwinder(item,row, depth = depth + 1)
else:
row['items'] = data['items']
result_list.append(row)
else:
result_list.append(row)
row = {}
unwinder(data, row)
return result_list | ab2ae7ec74d7ec8d84ee885e5760cd9685db2387 | 42,977 |
def generate_header_file_main_type_recursive(parent, command, h):
"""
Generates a struct with callbacks, one callback for each subcommand.
These callbacks are called when user issues a subcommand.
"""
if parent is None:
h += "struct CliParamsAll {\n"
command_name = command.get("name", None)
struct_name = command.get("cppStructName", None)
if command_name is not None and struct_name is not None:
h += f"{struct_name} {command_name};\n"
for subcommand in command.get("subcommands", []):
h = generate_header_file_main_type_recursive("<not root>", subcommand, h)
if parent is None:
h += "};\n\n"
return h | 4739975e06c1d776df1e3adbcc77b7ce81057443 | 42,979 |
def fetch_data_fake():
"""
Creating fake function to deliver the result
"""
print("fetching fake data")
return {
"name": "Valee",
"age": 40
} | e45954827e8e8a69f3b327ba244f3d3a1f95c449 | 42,980 |
import math
def delta_angle_degrees(from_angle_degrees, to_angle_degrees):
"""Calculates the shortest signed delta angle."""
delta = to_angle_degrees - from_angle_degrees
return delta - 360.0 * math.floor((delta + 180.0) / 360.0) | a86f5596aef5580aedceb050bef6ed8317ebd9d4 | 42,981 |
def slowComplete(prefix, list_of_words, top):
"""
For a given prefix, provide top suggestions from this list of words.
Parameters
----------
prefix: Signal word used here
list_of_words: a file that has the example format
top: top many suggestions as output
Return
------
the top k recommendations with given prefix and list
"""
file = open(list_of_words, 'r')
data = file.readlines()
data_list = []
for i in range(len(data)):
if i != 0:
data_list.append(data[i])
num_list = []
word_list = []
for l in data_list:
if l != '\n':
entry = l.split('\t')
num_list.append(int(entry[0]))
word_list.append(entry[1][:-1])
candidate_list = []
for i in range(len(word_list)):
if word_list[i].startswith(prefix):
candidate_list.append((word_list[i],num_list[i]))
sorted(candidate_list, key=lambda x: x[1])
final_list = candidate_list[0:top]
return(final_list) | 87ddf8727ba8418d3a1d28e0a9153fea7a9532fb | 42,982 |
def train_step(loss):
"""
Defines the ops to conduct an optimization step. You can set a learning
rate scheduler or pick your favorite optimizer here. This set of operations
should be applicable to both ConvNet() and Siamese() objects.
Args:
loss: scalar float Tensor, full loss = cross_entropy + reg_loss
Returns:
train_op: Ops for optimization.
"""
raise NotImplementedError
return train_op | 3b543c5a56da9135a20d958b36e736f976545bfd | 42,984 |
def get_folder_items(bucket_items, folder_prefix):
"""
Returns items in bucket which belong to a folder
:param bucket_items: items in the bucket
:param folder_prefix: prefix containing the folder name
:return: list of items in the folder without the folder prefix
"""
return [
item['name'][len(folder_prefix):]
for item in bucket_items
if item['name'].startswith(folder_prefix)
] | 1890be035b994e9e37d9f4a5f174121d174a3fb8 | 42,985 |
def list_books(args):
"""List books in this room."""
print('List books of type %s' % args.type)
return 0 | 05202fdde760b94d44126fd37b0f0bcdf3e7e8cf | 42,986 |
def to_palindrome(seq):
"""
Generates two possible palindromic sequence from the input sequence
"""
init_type = type(seq)
if not (init_type is str or init_type is int):
raise TypeError("Input a string or integer only")
if init_type == int:
seq = str(seq)
forward = seq + seq[::-1]
reverse = seq[::-1] + seq
return (forward, reverse) if init_type == str \
else (int(forward), int(reverse)) | 1a8e5f99423f0fd217d897eba9f8218f5e30d814 | 42,988 |
from typing import Tuple
def bbox_center(p: Tuple[float, float],
q: Tuple[float, float]) -> Tuple[float, float]:
"""
Return middle point between two points p and q.
"""
(min_lon, min_lat), (max_lon, max_lat) = p, q
center = min_lon + (max_lon - min_lon) / 2, min_lat + \
(max_lat - min_lat) / 2
return center | 017a31d06de4bed0ca2f1e8d63bfe7d7234cfaef | 42,989 |
import math
def generate_timecode(ms: int) -> str:
"""
Convert a duration in seconds to ISO8601 hh:mm:ss.sss format
"""
hours = math.floor(ms / (60 * 60 * 1000))
minutes = math.floor(ms / (60 * 1000)) % 60
seconds = math.floor(ms / 1000) % 60
milliseconds = ms % 1000
return (
str(hours).rjust(2, "0")
+ ":"
+ str(minutes).rjust(2, "0")
+ ":"
+ str(seconds).rjust(2, "0")
+ "."
+ str(milliseconds).rjust(3, "0")
) | 817a0d7f8732547f9c30943548870e84533ad18a | 42,990 |
def map_args(tree, args):
"""
Given a tree and a list of arguments, produce the tree with the arguments
instead of integers at the leaves of the tree.
E.g. for tree = [[1, 2], [3, 4]] and args = [a, b, c, d] we get
[[a, b], [c, d]].
"""
(s, t, a) = tree
if a[0] == 1:
return args[0]
return [map_args((1, t[0], a[1]), args[:a[1][0]]),
map_args((1, t[1], a[2]), args[a[1][0]:])] | ffe0b977d508d33227586a56dee8cae36f21b31b | 42,991 |
def _TransformOperationName(resource):
"""Get operation name without project prefix."""
# operation name is in the format of:
# operations/projects/{}/instances/{}/.../locations/{}/operations/{}
operation_name = resource.get('name')
results = operation_name.split('/')
short_name = '/'.join(results[3:])
return short_name | 542968ede55b0da7176f03d9515a5144d6316757 | 42,992 |
def questionnaire_single():
"""
What to ask if only one lookup is needed.
"""
while True:
scanos = input("OS (ex. AAD250): ")
branch = scanos[:3]
floor = scanos[3:6]
quants = [len(scanos) == 6, branch.isalpha(), floor.isdigit()]
if not all(quants):
print("OS MUST BE 3 LETTERS AND 3 NUMBERS, TRY AGAIN")
continue
else:
floor = int(floor)
ceil = floor
break
return branch, floor, ceil | 655ec67deddd9948a7d9dde88d16d6a0390322bc | 42,993 |
def expected_status(status_csv_path):
"""Loads pre-rendered, uncolored status table layouts"""
def load_expected_status(expected_status_file):
with open(status_csv_path(expected_status_file), "r") as es_file:
expected_status = es_file.readlines()
return ''.join([line for line in expected_status])
return load_expected_status | 60bc467fa5ba3d6499fd070fe2186b672ce29397 | 42,994 |
def relative_date_to_str(relative_date: int) -> str:
"""Convert a relative date to a human readable text."""
mapping = {
-2: "vorgestern",
-1: "gestern",
0: "heute",
1: "morgen",
2: "übermorgen"
}
return mapping.get(relative_date, f"vor {relative_date} Tagen" if relative_date < 0 else f"In {relative_date} Tagen") | 66c6d0f230430556e1fba38fd1d5f2a1fa165043 | 42,996 |
def shorten(name):
"""
Shortens full name to use to compare and group
"""
first_last = name.split(" ")
short = first_last[0][0] + "."
short+="".join(first_last[1:])
return short | dcf18790cb27ad79fa217d55b98d79b0f35c79ff | 42,997 |
def _get_queue_choices(queues):
"""Return list of `choices` array for html form for given queues
idea is to return only one choice if there is only one queue or add empty
choice at the beginning of the list, if there are more queues
"""
queue_choices = []
if len(queues) > 1:
queue_choices = [('', '--------')]
queue_choices += [(q.id, q.title) for q in queues]
return queue_choices | 46af345bd72098c13b2152aa00343713fddc2760 | 42,998 |
import re
def find_col_index(header, col_name):
""" Extracts the column index of the given variable in the data.
Given a list of headers, searches the list for one that first one that contains
the col_name. Uses regex match to search through each header.
Parameters
----------
header : list of strs
List of headers in the dataset.
col_name : str
Name of column to be indexed.
Returns
-------
int
If a match is found, returns the index. If no match, it raises an Exception.
"""
pat = re.compile('^(")*%s(")*$' % col_name.lower())
for i, _ in enumerate(header):
if re.match(pat, header[i].lower()):
return i
raise Exception("Column name not found: %s" % col_name) | a5504c2e4bd14966de2b332963bc518e378bd348 | 42,999 |
import os
def load_image(logo_image):
"""Load Bank Image"""
pyboleto_dir = os.path.dirname(os.path.abspath(__file__))
image_path = os.path.join(pyboleto_dir, 'media', logo_image)
return image_path | 5e821f3431a92ade9ae9e78f520b7ae6baba6453 | 43,000 |
import numpy
def get_data_tuple(molecules, indices):
"""
Take list of molecules and indices, return (x, y) for training machine
learning models.
:param molecules: List of molecules.
:param indices: Indices of molecules that should be in (x, y).
:return:
"""
data = [molecules[x] for x in indices]
return (
numpy.array(list(x['rep'] for x in data)),
numpy.array(list(x['atomization_energy'] for x in data)),
) | efea05b1f60b25c5912e08364336a32d9f37ebf3 | 43,001 |
def formatMultiplier(stat : float) -> str:
"""Format a module effect attribute into a string, including a sign symbol and percentage symbol.
:param stat: The statistic to format into a string
:type stat: float
:return: A sign symbol, followed by stat, followed by a percentage sign.
"""
return f"{'+' if stat >= 1 else '-'}{round((stat - (1 if stat > 1 else 0)) * 100)}%" | 5ab9df157d54a5414fc9cba46d0f73512a8a6c4c | 43,002 |
def camel2snake(text):
"""Convert camel case to snake case. This assumes the input is valid camel
case (if you have some weird hybrid of camel and snake case, for instance,
you'd want to do some preprocessing first).
Parameters
----------
text: str
Camel case string, e.g. vaderSentimentScore.
Returns
-------
str: `text` converted to snake case, e.g. vader_sentiment_score.
"""
res = []
for char in text:
if char.islower():
res.append(char)
else:
res.extend(['_', char.lower()])
return ''.join(res) | 120345ba898777a31adfe720b6ca1041fe39907c | 43,004 |
def sensor_cfg(sensor):
"""
parses sensor configuration file and select right bias for sensor
input:
sensor: sensor number in string\
returns:
sensor_bias for each axis
"""
sensor_cfg = '/home/james/catkin_ws/src/cp_simulator/cfg/sensor.cfg'
f = open(sensor_cfg, 'r')
contents = f.read()
contents = contents.split('\n')
sensor_bias = {}
for line in contents:
items = line.split(' ')
if len(items) <= 1:
continue
sensor_bias[items[0]] = {}
sensor_bias[items[0]]['x'] = items[1]
sensor_bias[items[0]]['y'] = items[2]
sensor_bias[items[0]]['z'] = items[3]
x_bias = sensor_bias[sensor]['x']
y_bias = sensor_bias[sensor]['y']
z_bias = sensor_bias[sensor]['z']
return x_bias, y_bias, z_bias | 8ff57811d96b078c51821b13e21dc142d757d326 | 43,005 |
def get_color(color):
""" Can convert from integer to (r, g, b) """
if not color:
return None
if isinstance(color, int):
temp = color
blue = temp % 256
temp = int(temp / 256)
green = temp % 256
temp = int(temp / 256)
red = temp % 256
return (red, green, blue)
if not len(color) == 3:
raise Exception('Invalid color {0}'.format(color))
return color | 8b1026034ece5635a432285818ac4e4fc9a777e4 | 43,006 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.