content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
def svn_diff_fns2_invoke_token_discard(*args):
"""svn_diff_fns2_invoke_token_discard(svn_diff_fns2_t _obj, void diff_baton, void token)"""
return _diff.svn_diff_fns2_invoke_token_discard(*args)
|
cdfd25973cf87190a6f82b07c82e741233c65fcd
| 3,647,700
|
import torch
def process_bb(model, I, bounding_boxes, image_size=(412, 412)):
"""
:param model: A binary model to create the bounding boxes
:param I: PIL image
:param bounding_boxes: Bounding boxes containing regions of interest
:param image_size: Choose the size of the patches
:return: Patches with the class of the ROIS
"""
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
patches = np.array([])
normalization = Compose([ToTensor(),
Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
for (x, y, w, h) in bounding_boxes:
patch = np.array(I.crop((x, y, x + w, y + h)))
patch = cv2.resize(patch, image_size)
patch = normalization(patch).unsqueeze(0).to(device)
patch = model(patch).cpu().detach().numpy()
patches = np.concatenate(patches, patch)
return patches
|
2029fa67fb85ce3e15913ce9c1684cbe762ea3b7
| 3,647,701
|
def roc(
observations,
forecasts,
bin_edges="continuous",
dim=None,
drop_intermediate=False,
return_results="area",
):
"""Computes the relative operating characteristic for a range of thresholds.
Parameters
----------
observations : xarray.Dataset or xarray.DataArray
Labeled array(s) over which to apply the function.
If ``bin_edges=='continuous'``, observations are binary.
forecasts : xarray.Dataset or xarray.DataArray
Labeled array(s) over which to apply the function.
If ``bin_edges=='continuous'``, forecasts are probabilities.
bin_edges : array_like, str, default='continuous'
Bin edges for categorising observations and forecasts. Similar to np.histogram, \
all but the last (righthand-most) bin include the left edge and exclude the \
right edge. The last bin includes both edges. ``bin_edges`` will be sorted in \
ascending order. If ``bin_edges=='continuous'``, calculate ``bin_edges`` from \
forecasts, equal to ``sklearn.metrics.roc_curve(f_boolean, o_prob)``.
dim : str, list
The dimension(s) over which to compute the contingency table
drop_intermediate : bool, default=False
Whether to drop some suboptimal thresholds which would not appear on a plotted
ROC curve. This is useful in order to create lighter ROC curves.
Defaults to ``True`` in ``sklearn.metrics.roc_curve``.
return_results: str, default='area'
Specify how return is structed:
- 'area': return only the ``area under curve`` of ROC
- 'all_as_tuple': return ``true positive rate`` and ``false positive rate``
at each bin and area under the curve of ROC as tuple
- 'all_as_metric_dim': return ``true positive rate`` and
``false positive rate`` at each bin and ``area under curve`` of ROC
concatinated into new ``metric`` dimension
Returns
-------
xarray.Dataset or xarray.DataArray :
reduced by dimensions ``dim``, see ``return_results`` parameter.
``true positive rate`` and ``false positive rate`` contain
``probability_bin`` dimension with ascending ``bin_edges`` as coordinates.
Examples
--------
>>> f = xr.DataArray(np.random.normal(size=(1000)),
... coords=[('time', np.arange(1000))])
>>> o = f.copy()
>>> category_edges = np.linspace(-2, 2, 5)
>>> xs.roc(o, f, category_edges, dim=['time'])
<xarray.DataArray 'histogram_observations_forecasts' ()>
array(1.)
See also
--------
xskillscore.Contingency
sklearn.metrics.roc_curve
References
----------
http://www.cawcr.gov.au/projects/verification/
"""
if dim is None:
dim = list(forecasts.dims)
if isinstance(dim, str):
dim = [dim]
continuous = False
if isinstance(bin_edges, str):
if bin_edges == "continuous":
continuous = True
# check that o binary
if isinstance(observations, xr.Dataset):
o_check = observations.to_array()
else:
o_check = observations
if str(o_check.dtype) != "bool":
if not ((o_check == 0) | (o_check == 1)).all():
raise ValueError(
'Input "observations" must represent logical (True/False) outcomes',
o_check,
)
# works only for 1var
if isinstance(forecasts, xr.Dataset):
varlist = list(forecasts.data_vars)
if len(varlist) == 1:
v = varlist[0]
else:
raise ValueError(
"Only works for `xr.Dataset` with one variable, found"
f"{forecasts.data_vars}. Considering looping over `data_vars`"
"or `.to_array()`."
)
f_bin = forecasts[v]
else:
f_bin = forecasts
f_bin = f_bin.stack(ndim=forecasts.dims)
f_bin = f_bin.sortby(-f_bin)
bin_edges = np.append(f_bin[0] + 1, f_bin)
bin_edges = np.unique(bin_edges) # ensure that in ascending order
else:
raise ValueError("If bin_edges is str, it can only be continuous.")
else:
bin_edges = np.sort(bin_edges) # ensure that in ascending order
# loop over each bin_edge and get true positive rate and false positive rate
# from contingency
tpr, fpr = [], []
for i in bin_edges:
dichotomous_category_edges = np.array(
[-np.inf, i, np.inf]
) # "dichotomous" means two-category
dichotomous_contingency = Contingency(
observations,
forecasts,
dichotomous_category_edges,
dichotomous_category_edges,
dim=dim,
)
fpr.append(dichotomous_contingency.false_alarm_rate())
tpr.append(dichotomous_contingency.hit_rate())
tpr = xr.concat(tpr, "probability_bin")
fpr = xr.concat(fpr, "probability_bin")
tpr["probability_bin"] = bin_edges
fpr["probability_bin"] = bin_edges
fpr = fpr.fillna(1.0)
tpr = tpr.fillna(0.0)
# pad (0,0) and (1,1)
fpr_pad = xr.concat(
[
xr.ones_like(fpr.isel(probability_bin=0, drop=False)),
fpr,
xr.zeros_like(fpr.isel(probability_bin=-1, drop=False)),
],
"probability_bin",
)
tpr_pad = xr.concat(
[
xr.ones_like(tpr.isel(probability_bin=0, drop=False)),
tpr,
xr.zeros_like(tpr.isel(probability_bin=-1, drop=False)),
],
"probability_bin",
)
if drop_intermediate and fpr.probability_bin.size > 2:
fpr, tpr = _drop_intermediate(fpr, tpr)
fpr_pad, tpr_pad = _drop_intermediate(fpr_pad, tpr_pad)
area = _auc(fpr_pad, tpr_pad)
if continuous:
# sklearn returns in reversed order
fpr = fpr.sortby(-fpr.probability_bin)
tpr = tpr.sortby(-fpr.probability_bin)
# mask always nan
def _keep_masked(new, ori, dim):
"""Keep mask from `ori` deprived of dimensions from `dim` in input `new`."""
isel_dim = {d: 0 for d in forecasts.dims if d in dim}
mask = ori.isel(isel_dim, drop=True)
new_masked = new.where(mask.notnull())
return new_masked
fpr = _keep_masked(fpr, forecasts, dim=dim)
tpr = _keep_masked(tpr, forecasts, dim=dim)
area = _keep_masked(area, forecasts, dim=dim)
if return_results == "area":
return area
elif return_results == "all_as_metric_dim":
results = xr.concat([fpr, tpr, area], "metric", coords="minimal")
results["metric"] = [
"false positive rate",
"true positive rate",
"area under curve",
]
return results
elif return_results == "all_as_tuple":
return fpr, tpr, area
else:
raise NotImplementedError(
"expect `return_results` from [all_as_tuple, area, all_as_metric_dim], "
f"found {return_results}"
)
|
328e00060c758ddf3c12cecdec1961561bb2d3f3
| 3,647,702
|
from typing import Literal
def make_grammar():
"""Creates the grammar to be used by a spec matcher."""
# This is apparently how pyparsing recommends to be used,
# as http://pyparsing.wikispaces.com/share/view/644825 states that
# it is not thread-safe to use a parser across threads.
unary_ops = (
# Order matters here (so that '=' doesn't match before '==')
Literal("==") | Literal("=") |
Literal("!=") | Literal("<in>") |
Literal(">=") | Literal("<=") |
Literal(">") | Literal("<") |
Literal("s==") | Literal("s!=") |
# Order matters here (so that '<' doesn't match before '<=')
Literal("s<=") | Literal("s<") |
# Order matters here (so that '>' doesn't match before '>=')
Literal("s>=") | Literal("s>"))
all_in_nary_op = Literal("<all-in>")
or_ = Literal("<or>")
# An atom is anything not an keyword followed by anything but whitespace
atom = ~(unary_ops | all_in_nary_op | or_) + Regex(r"\S+")
unary = unary_ops + atom
nary = all_in_nary_op + OneOrMore(atom)
disjunction = OneOrMore(or_ + atom)
# Even-numbered tokens will be '<or>', so we drop them
disjunction.setParseAction(lambda _s, _l, t: ["<or>"] + t[1::2])
expr = disjunction | nary | unary | atom
return expr
|
aef2a3fc897c42e61ebd81c9d43cb42f342b1fb6
| 3,647,703
|
def _pull(keys):
"""helper method for implementing `client.pull` via `client.apply`"""
if isinstance(keys, (list,tuple, set)):
return [eval(key, globals()) for key in keys]
else:
return eval(keys, globals())
|
779fcec45c3693bdd8316c14138a88c57f0c318c
| 3,647,704
|
def position(df):
"""
根据交易信号, 计算每天的仓位
:param df:
:return:
"""
# 由 signal 计算出实际每天持有的股票仓位
df['pos'] = df['signal'].shift(1)
df['pos'].fillna(method='ffill', inplace=True)
# 将涨跌停时不得买卖股票考虑进来
# 找出开盘涨停的日期
cond_cannot_buy = df['开盘价'] > df['收盘价'].shift(1) * 1.097 # 今天的开盘价相对于昨天的收盘价上涨了 9.7%
# 将开盘涨停日, 并且当天 position 为 1 时的 'pos' 设置为空值
# ?? 问题:为什么是 1?
df.loc[cond_cannot_buy & (df['pos'] == 1), 'pos'] = None
# 找出开盘跌停的日期
cond_cannot_buy = df['开盘价'] < df['收盘价'].shift(1) * 0.903 # 今天的开盘价相对于昨天的收盘价下跌了 9.7%
# 将开盘跌停日, 并且当天 position 为 0 时的 'pos' 设置为空值
# ?? 问题:为什么是 0?
df.loc[cond_cannot_buy & (df['pos'] == 0), 'pos'] = None
# position 为空的日期, 不能买卖。position 只能和前一个交易日保持一致。
df['pos'].fillna(method='ffill', inplace=True)
# 在 position 为空值的日期, 将 position 补全为 0
df['pos'].fillna(value=0, inplace=True)
return df
|
15666e26cf8a9d6ae98ff1746aecab759de9139b
| 3,647,705
|
def prepare_data(data, preprocessed_data, args):
"""Prepare Data"""
data = data.to_numpy()
train_size = int(len(data) * args.train_split)
test_size = len(data) - train_size
train_X = preprocessed_data[0:train_size]
train_Y = data[0:train_size]
test_X = preprocessed_data[train_size:len(preprocessed_data)]
test_Y = data[train_size:len(preprocessed_data)]
return train_X, train_Y, test_X, test_Y
|
b5e120eebd6060656d71f8f76755afd0d8eccce5
| 3,647,706
|
def svn_client_conflict_tree_get_victim_node_kind(conflict):
"""svn_client_conflict_tree_get_victim_node_kind(svn_client_conflict_t * conflict) -> svn_node_kind_t"""
return _client.svn_client_conflict_tree_get_victim_node_kind(conflict)
|
6258c011eb947ddedb1e060cd036ddcf9cbc1758
| 3,647,707
|
import importlib
def load_module(script_path: str, module_name: str):
"""
return a module
spec.loader.exec_module(foo)
foo.A()
"""
spec = importlib.util.spec_from_file_location(module_name, script_path)
module = importlib.util.module_from_spec(spec)
return spec, module
|
61ebc105d0c7a168b37210452445e8e24e16f87a
| 3,647,708
|
import os
def readfile(*paths):
"""Build a file path from *paths* and return the contents."""
with open(os.path.join(*paths), 'r') as f:
return f.read()
|
fde9a2866b189cea40ecc3e48695d8b3e98f6228
| 3,647,709
|
def memory_func(func):
"""
декоратор для замера памяти занимаемой функцией в оперативной памяти.
"""
def wrapper(*args, **kwargs):
proc = Process(getpid()) # получение идентификатора текущего процесса и объявление класса
start_memory = proc.memory_info().rss # сохранение начального значения занятой памяти
result = func(*args, **kwargs) # выполнение функции с параметрами
end_memory = proc.memory_info().rss # замер объема занятой памяти после выполнения функции
print(f"Физ. память используемая функцией {func.__name__}: {end_memory-start_memory} байт") # вывод результата
return result
return wrapper
|
a8c634b3415925b65fe35df584328705eb0d171e
| 3,647,710
|
def read_data(datafile='sampling_data_2015.txt'):
"""Imports data from an ordered txt file and creates a list of samples."""
sample_list = []
with open(datafile, 'r') as file:
for line in file:
method, date, block, site, orders = line.split('|')
new_sample = sample(method, date, block, site)
new_sample.import_orders(orders)
sample_list.append(new_sample)
return sample_list
|
4aee4b2ef0cd9d31eefbd9f394714f0ea789b49d
| 3,647,711
|
async def get_https(method: str = "all"):
"""Get https proxies from get_proxies_func() function."""
return await get_proxies_func("https", method)
|
575eccf2149724c29062d866fcc420fe3b34be78
| 3,647,712
|
import logging
import tqdm
def iam_analysis(obs_spec, model1_pars, model2_pars, rvs=None, gammas=None,
verbose=False, norm=False, save_only=True, chip=None,
prefix=None, errors=None, area_scale=False, wav_scale=True, norm_method="scalar", fudge=None):
"""Run two component model over all model combinations."""
rvs = check_inputs(rvs)
gammas = check_inputs(gammas)
if isinstance(model1_pars, list):
logging.debug(__("Number of close model_pars returned {0}", len(model1_pars)))
if isinstance(model2_pars, list):
logging.debug(__("Number of close model_pars returned {0}", len(model2_pars)))
# Solution Grids to return
iam_grid_chisqr_vals = np.empty((len(model1_pars), len(model2_pars)))
args = [model2_pars, rvs, gammas, obs_spec]
kwargs = {"norm": norm, "save_only": save_only, "chip": chip,
"prefix": prefix, "verbose": verbose, "errors": errors,
"area_scale": area_scale, "wav_scale": wav_scale,
"norm_method": norm_method, "fudge": fudge,
}
for ii, params1 in enumerate(tqdm(model1_pars)):
iam_grid_chisqr_vals[ii] = iam_wrapper(ii, params1, *args, **kwargs)
if save_only:
return None
else:
return iam_grid_chisqr_vals
|
28a5291fe5dd62c2a3a988d994cc98a81beeaac8
| 3,647,713
|
import os
def _parse_archive_name(pathname):
"""Return the name of the project given the pathname of a project
archive file.
"""
return os.path.basename(pathname).split('.')[0]
|
90e6bcf019ac48b73c16f4db605e1d92c3d32595
| 3,647,714
|
import imaplib
def is_authenticated(user, password):
"""Check if ``user``/``password`` couple is valid."""
global IMAP_WARNED_UNENCRYPTED
if not user or not password:
return False
log.LOGGER.debug(
"Connecting to IMAP server %s:%s." % (IMAP_SERVER, IMAP_SERVER_PORT,))
connection_is_secure = False
if IMAP_USE_SSL:
connection = imaplib.IMAP4_SSL(host=IMAP_SERVER, port=IMAP_SERVER_PORT)
connection_is_secure = True
else:
connection = imaplib.IMAP4(host=IMAP_SERVER, port=IMAP_SERVER_PORT)
server_is_local = (IMAP_SERVER == "localhost")
if not connection_is_secure:
try:
connection.starttls()
log.LOGGER.debug("IMAP server connection changed to TLS.")
connection_is_secure = True
except AttributeError:
if not server_is_local:
log.LOGGER.error(
"Python 3.2 or newer is required for IMAP + TLS.")
except (imaplib.IMAP4.error, imaplib.IMAP4.abort) as exception:
log.LOGGER.warning(
"IMAP server at %s failed to accept TLS connection "
"because of: %s" % (IMAP_SERVER, exception))
if server_is_local and not connection_is_secure and not IMAP_WARNED_UNENCRYPTED:
IMAP_WARNED_UNENCRYPTED = True
log.LOGGER.warning(
"IMAP server is local. "
"Will allow transmitting unencrypted credentials.")
if connection_is_secure or server_is_local:
try:
connection.login(user, password)
connection.logout()
log.LOGGER.debug(
"Authenticated IMAP user %s "
"via %s." % (user, IMAP_SERVER))
return True
except (imaplib.IMAP4.error, imaplib.IMAP4.abort) as exception:
log.LOGGER.error(
"IMAP server could not authenticate user %s "
"because of: %s" % (user, exception))
else:
log.LOGGER.critical(
"IMAP server did not support TLS and is not ``localhost``. "
"Refusing to transmit passwords under these conditions. "
"Authentication attempt aborted.")
return False
|
af7518aec858b2c1e1c6e4e71120f95af5787ce6
| 3,647,715
|
def apply_mask(input, mask):
"""Filter out an area of an image using a binary mask.
Args:
input: A three channel numpy.ndarray.
mask: A black and white numpy.ndarray.
Returns:
A three channel numpy.ndarray.
"""
return cv2.bitwise_and(input, input, mask=mask)
|
34451c71b9f18a64f5b27e3ff9269a9c4e3b803d
| 3,647,716
|
import sys
import time
def runUrllib2(urls, num):
"""Running benchmark for urllib2.
Args:
urls: List of URLs.
num: Number of requests.
"""
results = []
for i in range(num):
sys.stderr.write('.')
start = time.time()
for url in urls:
urlopen(url)
end = time.time()
results.append(round(end-start, 3))
return results
|
ecd883c44d68eb88b711dd9cbf22bc58fa93cd7b
| 3,647,717
|
import requests
import json
def fetch_track_lyrics(artist, title):
""" Returns lyrics when found, None when not found """
MUSIXMATCH_KEY = get_musixmatch_key()
api_query = 'https://api.musixmatch.com/ws/1.1/matcher.lyrics.get?'
api_query += 'q_track=%s&' % title
api_query += 'q_artist=%s&' % artist
api_query += 'apikey=%s' % MUSIXMATCH_KEY
response = requests.get(api_query)
if response.status_code != 200:
raise Exception("Mixmatcher API not accessible")
res_body = json.loads(response.text)
message = res_body['message']
if message['header']['status_code'] != 200:
return None
body = message['body']
if 'lyrics' not in body:
return None
lyrics = body['lyrics']
return {
'lyrics': lyrics['lyrics_body'],
'lang': lyrics['lyrics_language']
}
|
f8e049578bb8c6b52636fd1e1789a81af30b28e6
| 3,647,718
|
def string_avg(strings, binary=True):
"""
Takes a list of strings of equal length and returns a string containing
the most common value from each index in the string.
Optional argument: binary - a boolean indicating whether or not to treat
strings as binary numbers (fill in leading zeros if lengths differ).
"""
if binary: # Assume this is a binary number and fill leading zeros
strings = deepcopy(strings)
longest = len(max(strings, key=len))
for i in range(len(strings)):
while len(strings[i]) < longest:
split_string = strings[i].split("b")
strings[i] = "0b0" + split_string[1]
avg = ""
for i in (range(len(strings[0]))):
opts = []
for s in strings:
opts.append(s[i])
avg += max(set(opts), key=opts.count)
return avg
|
3d515cbeedc93b95c5f38de62000629002e41166
| 3,647,719
|
def refresh():
"""Pull fresh data from Open AQ and replace existing data."""
DB.drop_all()
DB.create_all()
api = openaq.OpenAQ()
status, body = api.measurements(city='Los Angeles', parameter='pm25')
reading_list = []
for reading in body['results']:
object = Record(datetime = reading['date']['utc'], value = reading['value'])
DB.session.add(object)
DB.session.commit()
return 'Data refreshed!'
|
9fcf71ffe0e5a46119e98f17c12aae29721285c8
| 3,647,720
|
import json
def update_organization(current_user):
""" Обновление информации об организации. """
try:
if CmsUsers.can(current_user.id, "put", "contacts"):
organization = CmsOrganization.query.first()
update_data = request.get_json()
for key in list(update_data.keys()):
if key not in ['company_name', 'full_company_name',
'requisites']:
del update_data[key]
if not organization_update_validator.is_valid(update_data):
errors = []
for error in sorted(
organization_update_validator.iter_errors(
update_data), key=str):
errors.append(error.message)
separator = '; '
error_text = separator.join(errors)
response = Response(
response=json.dumps({'type': 'danger',
'text': error_text}),
status=422,
mimetype='application/json'
)
else:
organization_name_old = organization.company_name
organization.company_name = update_data['company_name']
organization.full_company_name = update_data[
'full_company_name']
if 'requisites' in update_data:
organization.requisites = update_data['requisites']
db.session.add(organization)
db.session.commit()
response = Response(
response=json.dumps(
{'type': 'success',
'text': 'Отредактирована основная '
'информация организации '
+ str(organization_name_old) + '!',
'link': url_for('.get_organization',
_external=True)}),
status=200,
mimetype='application/json'
)
else:
response = Response(
response=json.dumps({'type': 'danger',
'text': 'Доступ запрещен (403)'}),
status=403,
mimetype='application/json'
)
except Exception:
response = server_error(request.args.get("dbg"))
return response
|
ff7826b1b4537eb0b793b426a6b6aa097936bfa9
| 3,647,721
|
def config_section_data():
"""Produce the default configuration section for app.config,
when called by `resilient-circuits config [-c|-u]`
"""
config_data = u"""[fn_sep]
sep_base_path=/sepm/api/v1
sep_auth_path=/sepm/api/v1/identity/authenticate
sep_host=<SEPM server dns name or ip address>
sep_port=8446
sep_username=<username>
sep_password=<password>
sep_domain=<SEP domain name>
# Optional settings for access to SEPM via a proxy.
#http_proxy=http://proxy:80
#https_proxy=http://proxy:80
# Limit result sent to Resilient, add full result as an attachment.
sep_results_limit=200
# Period of time (seconds) to wait for all endpoints to return a scan result.
sep_scan_timeout=1800
"""
return config_data
|
fcad9aa412d66b4a48dfb753d64e0f84979df617
| 3,647,722
|
def read_eieio_command_message(data, offset):
""" Reads the content of an EIEIO command message and returns an object\
identifying the command which was contained in the packet, including\
any parameter, if required by the command
:param data: data received from the network
:type data: bytestring
:param offset: offset at which the parsing operation should start
:type offset: int
:return: an object which inherits from EIEIOCommandMessage which contains\
parsed data received from the network
:rtype: \
:py:class:`spinnman.messages.eieio.command_messages.eieio_command_message.EIEIOCommandMessage`
"""
command_header = EIEIOCommandHeader.from_bytestring(data, offset)
command_number = command_header.command
if (command_number ==
constants.EIEIO_COMMAND_IDS.DATABASE_CONFIRMATION.value):
return DatabaseConfirmation.from_bytestring(
command_header, data, offset + 2)
# Fill in buffer area with padding
elif (command_number ==
constants.EIEIO_COMMAND_IDS.EVENT_PADDING.value):
return PaddingRequest()
# End of all buffers, stop execution
elif (command_number ==
constants.EIEIO_COMMAND_IDS.EVENT_STOP.value):
return EventStopRequest()
# Stop complaining that there is sdram free space for buffers
elif (command_number ==
constants.EIEIO_COMMAND_IDS.STOP_SENDING_REQUESTS.value):
return StopRequests()
# Start complaining that there is sdram free space for buffers
elif (command_number ==
constants.EIEIO_COMMAND_IDS.START_SENDING_REQUESTS.value):
return StartRequests()
# Spinnaker requesting new buffers for spike source population
elif (command_number ==
constants.EIEIO_COMMAND_IDS.SPINNAKER_REQUEST_BUFFERS.value):
return SpinnakerRequestBuffers.from_bytestring(
command_header, data, offset + 2)
# Buffers being sent from host to SpiNNaker
elif (command_number ==
constants.EIEIO_COMMAND_IDS.HOST_SEND_SEQUENCED_DATA.value):
return HostSendSequencedData.from_bytestring(
command_header, data, offset + 2)
# Buffers available to be read from a buffered out vertex
elif (command_number ==
constants.EIEIO_COMMAND_IDS.SPINNAKER_REQUEST_READ_DATA.value):
return SpinnakerRequestReadData.from_bytestring(
command_header, data, offset + 2)
# Host confirming data being read form SpiNNaker memory
elif (command_number ==
constants.EIEIO_COMMAND_IDS.HOST_DATA_READ.value):
return HostDataRead.from_bytestring(
command_header, data, offset + 2)
return EIEIOCommandMessage(command_header, data, offset + 2)
|
05abce201acf3b706e4b476c15eb2af5a0102cc8
| 3,647,723
|
def quote():
"""Get stock quote."""
if request.method == "POST":
# Get values
get_symbol = request.form.get("symbol")
stock = lookup(get_symbol)
# Ensure symbol was submitted
if not get_symbol:
return apology("must provide symbol")
# Ensure symbol exists
elif not stock:
return apology("stock not found")
# Display stocks
else:
return render_template("quoted.html", stock=stock)
else:
return render_template("quote.html", stock=None)
|
b26c04c01e5ddb26c19e555a45e3fac6f58c0fef
| 3,647,724
|
def counts_to_df(value_counts, colnames, n_points):
"""DO NOT USE IT!
"""
pdf = pd.DataFrame(value_counts
.to_frame('count')
.reset_index()
.apply(lambda row: dict({'count': row['count']},
**dict(zip(colnames, row['index'].toArray()))),
axis=1)
.values
.tolist())
pdf['count'] /= pdf['count'].sum()
proportions = pdf['count'] / pdf['count'].min()
factor = int(n_points / proportions.sum())
pdf = pd.concat([pdf[colnames], (proportions * factor).astype(int)], axis=1)
combinations = pdf.apply(lambda row: row.to_dict(), axis=1).values.tolist()
return pd.DataFrame([dict(v) for c in combinations for v in int(c.pop('count')) * [list(c.items())]])
|
85d5283f2d53dcf3ec33d7a1f3f52d9acc0affde
| 3,647,725
|
import math
def make_pair_plot(samples, param_names=None,
pair_plot_params=PairPlotParams()):
"""
Make a pair plot for the parameters from posterior destribution.
Parameters
-----------
samples : Panda's DataFrame
Each column contains samples from posterior distribution.
param_names : list of str
Names of the parameters for plotting. If None, all will be plotted.
Returns
-------
Seaborn's PairGrid
"""
param_names = filter_param_names(samples.columns, param_names)
if len(param_names) > pair_plot_params.max_params:
print((
f'Showing only first {pair_plot_params.max_params} '
f'parameters out of {len(param_names)} in pair plot.'
'Consider limiting the parameter with "param_names".'))
param_names = param_names[:pair_plot_params.max_params]
samples = samples[param_names]
# Show no more than `max_samples` markers
keep_nth = math.ceil(samples.shape[0] / pair_plot_params.max_samples)
samples = samples[::keep_nth]
g = sns.PairGrid(samples)
g = g.map_upper(sns.scatterplot, s=pair_plot_params.marker_size,
color=pair_plot_params.color,
edgecolor=pair_plot_params.edgecolor,
alpha=pair_plot_params.alpha)
g = g.map_lower(sns.kdeplot, color=pair_plot_params.color)
g = g.map_diag(plt.hist, color=pair_plot_params.color,
edgecolor=pair_plot_params.diag_edge_color)
return g
|
73f5d8fc7dee8b3179cb8c1513eb2989c788e7cf
| 3,647,726
|
def read_meta_soe(metafile):
"""read soe metadata.csv to get filename to meta mapping"""
wavfiles = csv2dict(metafile)
return {f['fid']:{k:v for (k,v) in f.items() if k!='fid'} for f in wavfiles}
|
51f82a45d12b332d9edbe7b027dc7ee2582af35b
| 3,647,727
|
def send_message(message, string, dm=False, user=None, format_content=True):
"""send_message
Sends a message with string supplied by [lang]_STRING.txt files.
:param message: MessageWrapper object with data for formatting.
:param string: Name of the string to read.
:param dm: Whether the message should be sent to dm. Requires user to not be None
:param user: User for dm usage.
"""
msg = get_string(string, users.get_language(message))
if not msg or msg == MessageCode.UNKNOWN_STRING:
return MessageCode.NO_STRING
return send_custom_message(message, msg, dm=dm, user=user, format_content=format_content)
|
c8396108126fcaea735a94be3dcd4ed954f43d70
| 3,647,728
|
def calc_torque(beam, fforb, index=False):
""" Calculates torque from a neutral beam (or beam component)
torque = F * r_tan = (P/v) * r_tan = (P/sqrt(2E/m)) * r_tan = P * sqrt(m/(2E)) * r_tan
:param fforb:
:param index:
:param beam: beam object with attributes z, m, a, en, pwr, rtan
:return: torque
"""
if index is not False:
power = beam.P.W[index]
energy = beam.E.J[index]
mass = beam.m
rtan = beam.rtang[index]
torque = power * np.sqrt(0.5 * mass / energy) * rtan * (1.0 - fforb) # Piper Changes: Included fast ion losses.
return torque
else:
power = beam.P.W
energy = beam.E.J
mass = beam.m
rtan = beam.rtang
torque = power * np.sqrt(0.5 * mass / energy) * rtan * (1.0-fforb) # Piper Changes: Included fast ion losses.
return torque
|
55cb8172f874a1d25c6dcf36c693f818d11d59c4
| 3,647,729
|
def cli(ctx, user_id):
"""Create a new API key for a given user.
Output:
the API key for the user
"""
return ctx.gi.users.create_user_apikey(user_id)
|
d7dafd77ef983286184b6f5aa2362bb734389696
| 3,647,730
|
import re
def whitespace_tokenizer(text):
"""Tokenize on whitespace, keeping whitespace.
Args:
text: The text to tokenize.
Returns:
list: A list of pseudo-word tokens.
"""
return re.findall(r"\S+\s*", text)
|
e79234b15912fdc225e2571788844732296f93d7
| 3,647,731
|
def _u2i(number):
"""
Converts a 32 bit unsigned number to signed. If the number
is negative it indicates an error. On error a pigpio
exception will be raised if exceptions is True.
"""
v = u2i(number)
if v < 0:
if exceptions:
raise error(error_text(v))
return v
|
920a2dcbf68df34141c482c2318917ccff248501
| 3,647,732
|
def apply_once(func, arr, axes, keepdims=True):
"""
Similar to `numpy.apply_over_axes`, except this performs the operation over
a flattened version of all the axes, meaning that the function will only be
called once. This only makes a difference for non-linear functions.
Parameters
----------
func : callback
Function that operates well on Numpy arrays and returns a single value
of compatible dtype.
arr : ndarray
Array to do operation over.
axes : int or iterable
Specifies the axes to perform the operation. Only one call will be made
to `func`, with all values flattened.
keepdims : bool
By default, this is True, so the collapsed dimensions remain with
length 1. This is simlar to `numpy.apply_over_axes` in that regard. If
this is set to False, the dimensions are removed, just like when using
for instance `numpy.sum` over a single axis. Note that this is safer
than subsequently calling squeeze, since this option will preserve
length-1 dimensions that were not operated on.
Examples
--------
>>> import deepdish as dd
>>> import numpy as np
>>> rs = np.random.RandomState(0)
>>> x = rs.uniform(size=(10, 3, 3))
Image that you have ten 3x3 images and you want to calculate each image's
intensity standard deviation:
>>> np.apply_over_axes(np.std, x, [1, 2]).ravel()
array([ 0.06056838, 0.08230712, 0.08135083, 0.09938963, 0.08533604,
0.07830725, 0.066148 , 0.07983019, 0.08134123, 0.01839635])
This is the same as ``x.std(1).std(1)``, which is not the standard
deviation of all 9 pixels together. To fix this we can flatten the pixels
and try again:
>>> x.reshape(10, 9).std(axis=1)
array([ 0.17648981, 0.32849108, 0.29409526, 0.25547501, 0.23649064,
0.26928468, 0.20081239, 0.33052397, 0.29950855, 0.26535717])
This is exactly what this function does for you:
>>> dd.apply_once(np.std, x, [1, 2], keepdims=False)
array([ 0.17648981, 0.32849108, 0.29409526, 0.25547501, 0.23649064,
0.26928468, 0.20081239, 0.33052397, 0.29950855, 0.26535717])
"""
all_axes = np.arange(arr.ndim)
if isinstance(axes, int):
axes = {axes}
else:
axes = set(axis % arr.ndim for axis in axes)
principal_axis = min(axes)
for i, axis in enumerate(axes):
axis0 = principal_axis + i
if axis != axis0:
all_axes[axis0], all_axes[axis] = all_axes[axis], all_axes[axis0]
transposed_arr = arr.transpose(all_axes)
new_shape = []
new_shape_keepdims = []
for axis, dim in enumerate(arr.shape):
if axis == principal_axis:
new_shape.append(-1)
elif axis not in axes:
new_shape.append(dim)
if axis in axes:
new_shape_keepdims.append(1)
else:
new_shape_keepdims.append(dim)
collapsed = np.apply_along_axis(func,
principal_axis,
transposed_arr.reshape(new_shape))
if keepdims:
return collapsed.reshape(new_shape_keepdims)
else:
return collapsed
|
939eea81d4443a4ef144105b1cc9335000b20f49
| 3,647,733
|
from io import BytesIO
def bytes_to_bytesio(bytestream):
"""Convert a bytestring to a BytesIO ready to be decoded."""
fp = BytesIO()
fp.write(bytestream)
fp.seek(0)
return fp
|
d59e4f5ccc581898da20bf5d3f6e70f8e8712aa6
| 3,647,734
|
from typing import List
def image_scatter_channels(im: Image, subimages=None) -> List[Image]:
"""Scatter an image into a list of subimages using the channels
:param im: Image
:param subimages: Number of channels
:return: list of subimages
"""
image_list = list()
if subimages is None:
subimages = im.shape[0]
for slab in image_channel_iter(im, subimages=subimages):
image_list.append(slab)
assert len(image_list) == subimages, "Too many subimages scattered"
return image_list
|
f87cb88ef060a6d093dacabdaab0ebc94861b734
| 3,647,735
|
def unauthorized_handler():
"""
If unauthorized requests are arrived then redirect sign-in URL.
:return: Redirect sign-in in page
"""
current_app.logger.info("Unauthorized user need to sign-in")
return redirect(url_for('userView.signin'))
|
72b505ae13023aea23e0353b6571da64d9f647b8
| 3,647,736
|
import posixpath
def pre_order_next(path, children):
"""Returns the next dir for pre-order traversal."""
assert path.startswith('/'), path
# First subdir is next
for subdir in children(path):
return posixpath.join(path, subdir)
while path != '/':
# Next sibling is next
name = posixpath.basename(path)
parent = posixpath.dirname(path)
siblings = list(children(parent))
assert name in siblings
if name != siblings[-1]:
return posixpath.join(parent, siblings[siblings.index(name) + 1])
# Go up, find a sibling of the parent.
path = parent
# This was the last one
return None
|
fcbe2b17b29396ac978f4a931a454c988e6fe05b
| 3,647,737
|
def gettiming(process_list, typetiming):
"""
Used to get a sort set for different duration needed to conver to
morse code.
"""
timing = []
for x in process_list:
if(x[0] == typetiming):
timing.append(x[3])
timing = set(timing)
return sorted(timing)
|
8e71449eacaee086f9f9147e1c3b8602ce8e553f
| 3,647,738
|
import click
def init():
"""Top level command handler."""
@click.command()
@click.option('--approot', type=click.Path(exists=True),
envvar='TREADMILL_APPROOT', required=True)
@click.argument('eventfile', type=click.Path(exists=True))
def configure(approot, eventfile):
"""Configure local manifest and schedule app to run."""
tm_env = appenv.AppEnvironment(root=approot)
container_dir = app_cfg.configure(tm_env, eventfile)
_LOGGER.info('Configured %r', container_dir)
return configure
|
2c24fe8dc2225b7f2f848ac3d2ef09275829c754
| 3,647,739
|
import subprocess
def launch_subprocess(command):
"""
Process launch helper
:param command Command to execute
:type command list[str]|str
:return Popen object
"""
is_shell = not isinstance(command, (list, tuple))
return subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=is_shell, close_fds=True)
|
f5ab217ffdec69147bc79061b0ba029225c7b8a0
| 3,647,740
|
def tobooks(f: '(toks, int) -> DataFrame', bks=bktksall) -> DataFrame:
"""Apply a function `f` to all the tokens in each book,
putting the results into a DataFrame column, and adding
a column to indicate each book.
"""
return pd.concat([f(v, i) for i, v in bks.items()])
|
e081e1c01d68b7b84cb6395f32f217360657636f
| 3,647,741
|
def _identity_error_message(msg_type, message, status_code, request):
"""
Set the response code on the request, and return a JSON blob representing
a Identity error body, in the format Identity returns error messages.
:param str msg_type: What type of error this is - something like
"badRequest" or "itemNotFound" for Identity.
:param str message: The message to include in the body.
:param int status_code: The status code to set
:param request: the request to set the status code on
:return: dictionary representing the error body
"""
request.setResponseCode(status_code)
return {
msg_type: {
"message": message,
"code": status_code
}
}
|
d73e182fc794f01c3415069ffeb37e76a01df7af
| 3,647,742
|
def _string_to_list(s, dtype='str'):
""" converts string to list
Args:
s: input
dtype: specifies the type of elements in the list
can be one of `str` or `int`
"""
if ' <SENT/> ' in s:
return s.split(' <SENT/> ')
elif dtype == 'int':
return [int(e) for e in s.split(LIST_SEPARATOR) if e]
if dtype == 'str':
return s.split(LIST_SEPARATOR)
elif dtype == 'int':
return [int(e) for e in s.split(LIST_SEPARATOR) if e]
|
9d2950afcd9f47e1fef7856af117953dbf99410a
| 3,647,743
|
import warnings
def internal_solve_pounders(
criterion,
x0,
lower_bounds,
upper_bounds,
gtol_abs,
gtol_rel,
gtol_scaled,
maxinterp,
maxiter,
delta,
delta_min,
delta_max,
gamma0,
gamma1,
theta1,
theta2,
eta0,
eta1,
c1,
c2,
solver_sub,
maxiter_sub,
maxiter_gradient_descent_sub,
gtol_abs_sub,
gtol_rel_sub,
gtol_scaled_sub,
gtol_abs_conjugate_gradient_sub,
gtol_rel_conjugate_gradient_sub,
k_easy_sub,
k_hard_sub,
batch_evaluator,
n_cores,
):
"""Find the local minimum to a non-linear least-squares problem using POUNDERS.
Args:
criterion_and_derivative (callable): Function that returns criterion
and derivative as a tuple.
x0 (np.ndarray): Initial guess for the parameter vector (starting points).
lower_bounds (np.ndarray): Lower bounds.
Must have same length as the initial guess of the
parameter vector. Equal to -1 if not provided by the user.
upper_bounds (np.ndarray): Upper bounds.
Must have same length as the initial guess of the
parameter vector. Equal to 1 if not provided by the user.
gtol_abs (float): Convergence tolerance for the absolute gradient norm.
Stop if norm of the gradient is less than this.
gtol_rel (float): Convergence tolerance for the relative gradient norm.
Stop if norm of the gradient relative to the criterion value is less
than this.
gtol_scaled (float): Convergence tolerance for the scaled gradient norm.
Stop if norm of the gradient divided by norm of the gradient at the
initial parameters is less than this.
maxinterp (int): Maximum number of interpolation points.
Default is `2 * n + 1`, where `n` is the length of the parameter vector.
maxiter (int): Maximum number of iterations. If reached, terminate.
delta (float): Delta, initial trust-region radius.
delta_min (float): Minimal trust-region radius.
delta_max (float): Maximal trust-region radius.
gamma0 (float): Shrinking factor of the trust-region radius in case the
solution vector of the suproblem is not accepted, but the model is fully
linar (i.e. "valid").
gamma1 (float): Expansion factor of the trust-region radius in case the
solution vector of the suproblem is accepted.
theta1 (float): Threshold for adding the current candidate vector
to the model. Function argument to find_affine_points().
theta2 (float): Threshold for adding the current candidate vector
to the model. Argument to get_interpolation_matrices_residual_model().
eta0 (float): Threshold for accepting the solution vector of the trust-region
subproblem as the best candidate.
eta1 (float): Threshold for successfully accepting the solution vector of the
trust-region subproblem as the best candidate.
c1 (float): Treshold for accepting the norm of our current x candidate.
Equal to sqrt(n) by default. Argument to find_affine_points() in case
the input array *model_improving_points* is zero.
c2 (int)): Treshold for accepting the norm of our current candidate vector.
Equal to 10 by default. Argument to find_affine_points() in case
the input array *model_improving_points* is not zero.
solver_sub (str): Solver to use for the trust-region subproblem.
Two internal solvers are supported:
- "bntr": Bounded Newton Trust-Region (default, supports bound constraints)
- "gqtpar": (does not support bound constraints)
maxiter_sub (int): Maximum number of iterations in the trust-region subproblem.
maxiter_gradient_descent_sub (int): Maximum number of gradient descent
iterations to perform when the trust-region subsolver BNTR is used.
gtol_abs_sub (float): Convergence tolerance for the absolute gradient norm
in the trust-region subproblem ("BNTR").
gtol_rel_sub (float): Convergence tolerance for the relative gradient norm
in the trust-region subproblem ("BNTR").
gtol_scaled_sub (float): Convergence tolerance for the scaled gradient norm
in the trust-region subproblem ("BNTR").
gtol_abs_conjugate_gradient_sub (float): Convergence tolerance for the
absolute gradient norm in the conjugate gradient step of the trust-region
subproblem ("BNTR").
gtol_rel_conjugate_gradient_sub (float): Convergence tolerance for the
relative gradient norm in the conjugate gradient step of the trust-region
subproblem ("BNTR").
k_easy_sub (float): topping criterion for the "easy" case in the trust-region
subproblem ("GQTPAR").
k_hard_sub (float): Stopping criterion for the "hard" case in the trust-region
subproblem ("GQTPAR").
batch_evaluator (str or callable): Name of a pre-implemented batch evaluator
(currently 'joblib' and 'pathos_mp') or callable with the same interface
as the estimagic batch_evaluators.
n_cores (int): Number of processes used to parallelize the function
evaluations. Default is 1.
Returns:
(dict) Result dictionary containing:
- solution_x (np.ndarray): Solution vector of shape (n,).
- solution_criterion (np.ndarray): Values of the criterion function at the
solution vector. Shape (n_obs,).
- history_x (np.ndarray): Entire history of x. Shape (history.get_n_fun(), n).
- history_criterion (np.ndarray): Entire history of the criterion function
evaluations. Shape (history.get_n_fun(), n_obs)
- n_iterations (int): Number of iterations the algorithm ran before finding a
solution vector or reaching maxiter.
- "success" (bool): Boolean indicating whether a solution has been found
before reaching maxiter.
"""
history = LeastSquaresHistory()
n = len(x0)
model_indices = np.zeros(maxinterp, dtype=int)
n_last_modelpoints = 0
if lower_bounds is not None and upper_bounds is not None:
if np.max(x0 + delta - upper_bounds) > 1e-10:
raise ValueError("Starting points + delta > upper bounds.")
xs = [x0]
for i in range(n):
x1 = x0.copy()
x1[i] += delta
xs.append(x1)
residuals = batch_evaluator(criterion, arguments=xs, n_cores=n_cores)
history.add_entries(xs, residuals)
accepted_index = history.get_best_index()
residual_model = create_initial_residual_model(
history=history, accepted_index=accepted_index, delta=delta
)
main_model = create_main_from_residual_model(
residual_model=residual_model, multiply_square_terms_with_residuals=False
)
x_accepted = history.get_best_x()
gradient_norm_initial = np.linalg.norm(main_model.linear_terms)
gradient_norm_initial *= delta
valid = True
n_modelpoints = n + 1
last_model_indices = np.zeros(maxinterp, dtype=int)
converged = False
convergence_reason = "Continue iterating."
for niter in range(maxiter + 1):
result_sub = solve_subproblem(
x_accepted=x_accepted,
main_model=main_model,
lower_bounds=lower_bounds,
upper_bounds=upper_bounds,
delta=delta,
solver=solver_sub,
maxiter=maxiter_sub,
maxiter_gradient_descent=maxiter_gradient_descent_sub,
gtol_abs=gtol_abs_sub,
gtol_rel=gtol_rel_sub,
gtol_scaled=gtol_scaled_sub,
gtol_abs_conjugate_gradient=gtol_abs_conjugate_gradient_sub,
gtol_rel_conjugate_gradient=gtol_rel_conjugate_gradient_sub,
k_easy=k_easy_sub,
k_hard=k_hard_sub,
)
x_candidate = x_accepted + result_sub["x"] * delta
residuals_candidate = criterion(x_candidate)
history.add_entries(x_candidate, residuals_candidate)
predicted_reduction = history.get_critvals(
accepted_index
) - history.get_critvals(-1)
actual_reduction = -result_sub["criterion"]
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=RuntimeWarning)
rho = np.divide(predicted_reduction, actual_reduction)
if (rho >= eta1) or (rho > eta0 and valid is True):
residual_model = residual_model._replace(
intercepts=history.get_residuals(index=accepted_index)
)
center_info = {"x": history.get_best_x(), "radius": delta}
x_candidate = history.get_centered_xs(center_info, index=-1)
residual_model = update_residual_model_with_new_accepted_x(
residual_model=residual_model, x_candidate=x_candidate
)
main_model = update_main_model_with_new_accepted_x(
main_model=main_model, x_candidate=x_candidate
)
x_accepted = history.get_best_x()
accepted_index = history.get_best_index()
critval_accepted = history.get_critvals(index=accepted_index)
# The model is deemend "not valid" if it has less than n model points.
# Otherwise, if the model has n points, it is considered "valid" or
# "fully linear".
# Note: valid is True in the first iteration
if not valid:
(
model_improving_points,
model_indices,
n_modelpoints,
project_x_onto_null,
) = find_affine_points(
history=history,
x_accepted=x_accepted,
model_improving_points=np.zeros((n, n)),
project_x_onto_null=False,
delta=delta,
theta1=theta1,
c=c1,
model_indices=model_indices,
n_modelpoints=0,
)
if n_modelpoints < n:
(
history,
model_indices,
) = add_geomtery_points_to_make_main_model_fully_linear(
history=history,
main_model=main_model,
model_improving_points=model_improving_points,
model_indices=model_indices,
x_accepted=x_accepted,
n_modelpoints=n_modelpoints,
delta=delta,
criterion=criterion,
lower_bounds=lower_bounds,
upper_bounds=upper_bounds,
batch_evaluator=batch_evaluator,
n_cores=n_cores,
)
n_modelpoints = n
delta_old = delta
delta = update_trustregion_radius(
result_subproblem=result_sub,
rho=rho,
model_is_valid=valid,
delta=delta,
delta_min=delta_min,
delta_max=delta_max,
eta1=eta1,
gamma0=gamma0,
gamma1=gamma1,
)
(
model_improving_points,
model_indices,
n_modelpoints,
project_x_onto_null,
) = find_affine_points(
history=history,
x_accepted=x_accepted,
model_improving_points=np.zeros((n, n)),
project_x_onto_null=False,
delta=delta,
theta1=theta1,
c=c1,
model_indices=model_indices,
n_modelpoints=0,
)
if n_modelpoints == n:
valid = True
else:
valid = False
(
model_improving_points,
model_indices,
n_modelpoints,
project_x_onto_null,
) = find_affine_points(
history=history,
x_accepted=x_accepted,
model_improving_points=model_improving_points,
project_x_onto_null=project_x_onto_null,
delta=delta,
theta1=theta1,
c=c2,
model_indices=model_indices,
n_modelpoints=n_modelpoints,
)
if n_modelpoints < n:
(
history,
model_indices,
) = add_geomtery_points_to_make_main_model_fully_linear(
history=history,
main_model=main_model,
model_improving_points=model_improving_points,
model_indices=model_indices,
x_accepted=x_accepted,
n_modelpoints=n_modelpoints,
delta=delta,
criterion=criterion,
lower_bounds=lower_bounds,
upper_bounds=upper_bounds,
batch_evaluator=batch_evaluator,
n_cores=n_cores,
)
model_indices, n_model_points = update_model_indices_residual_model(
model_indices, accepted_index, n_modelpoints
)
(
x_sample_monomial_basis,
monomial_basis,
basis_null_space,
lower_triangular,
n_modelpoints,
) = get_interpolation_matrices_residual_model(
history=history,
x_accepted=x_accepted,
model_indices=model_indices,
delta=delta,
c2=c2,
theta2=theta2,
n_maxinterp=maxinterp,
n_modelpoints=n_modelpoints,
)
center_info = {"x": x_accepted, "radius": delta_old}
interpolation_set = history.get_centered_xs(
center_info, index=model_indices[:n_modelpoints]
)
residual_model_interpolated = interpolate_residual_model(
history=history,
interpolation_set=interpolation_set,
residual_model=residual_model,
model_indices=model_indices,
n_modelpoints=n_modelpoints,
n_maxinterp=maxinterp,
)
coefficients_residual_model = get_coefficients_residual_model(
x_sample_monomial_basis=x_sample_monomial_basis,
monomial_basis=monomial_basis,
basis_null_space=basis_null_space,
lower_triangular=lower_triangular,
residual_model_interpolated=residual_model_interpolated,
n_modelpoints=n_modelpoints,
)
residual_model = residual_model._replace(
intercepts=history.get_residuals(index=accepted_index)
)
residual_model = update_residual_model(
residual_model=residual_model,
coefficients_to_add=coefficients_residual_model,
delta=delta,
delta_old=delta_old,
)
main_model = create_main_from_residual_model(residual_model)
gradient_norm = np.linalg.norm(main_model.linear_terms)
gradient_norm *= delta
(
last_model_indices,
n_last_modelpoints,
same_model_used,
) = get_last_model_indices_and_check_for_repeated_model(
model_indices=model_indices,
last_model_indices=last_model_indices,
n_modelpoints=n_modelpoints,
n_last_modelpoints=n_last_modelpoints,
)
converged, convergence_reason = _check_for_convergence(
gradient_norm=gradient_norm,
gradient_norm_initial=gradient_norm_initial,
critval=critval_accepted,
delta=delta,
delta_old=delta_old,
same_model_used=same_model_used,
converged=converged,
reason=convergence_reason,
niter=niter,
gtol_abs=gtol_abs,
gtol_rel=gtol_rel,
gtol_scaled=gtol_scaled,
maxiter=maxiter,
)
if converged:
break
result_dict = {
"solution_x": history.get_xs(index=accepted_index),
"solution_criterion": history.get_best_residuals(),
"history_x": history.get_xs(),
"history_criterion": history.get_residuals(),
"n_iterations": niter,
"success": converged,
"message": convergence_reason,
}
return result_dict
|
c3f602af6f78a1cb57c15a6488e4aeadcd081951
| 3,647,744
|
import torch
def get_overlap_info(bbox):
"""
input:
box_priors: [batch_size, number_obj, 4]
output: [number_object, 6]
number of overlapped obj (self not included)
sum of all intersection area (self not included)
sum of IoU (Intersection over Union)
average of all intersection area (self not included)
average of IoU (Intersection over Union)
roi area
"""
batch_size, num_obj, bsize = bbox.shape
# generate input feat
overlap_info = Variable(torch.FloatTensor(batch_size, num_obj, 6).zero_().cuda()) # each obj has how many overlaped objects
reverse_eye = Variable(1.0 - torch.eye(num_obj).float().cuda()) # removed diagonal elements
for i in range(batch_size):
sliced_bbox = bbox[i].view(num_obj, bsize)
sliced_intersection = bbox_intersections(sliced_bbox, sliced_bbox)
sliced_overlap = bbox_overlaps(sliced_bbox, sliced_bbox, sliced_intersection)
sliced_area = bbox_area(sliced_bbox)
# removed diagonal elements
sliced_intersection = sliced_intersection * reverse_eye
sliced_overlap = sliced_overlap * reverse_eye
# assign value
overlap_info[i, :, 0] = (sliced_intersection > 0.0).float().sum(1)
overlap_info[i, :, 1] = sliced_intersection.sum(1)
overlap_info[i, :, 2] = sliced_overlap.sum(1)
overlap_info[i, :, 3] = overlap_info[i, :, 1] / (overlap_info[i, :, 0] + 1e-9)
overlap_info[i, :, 4] = overlap_info[i, :, 2] / (overlap_info[i, :, 0] + 1e-9)
overlap_info[i, :, 5] = sliced_area
return overlap_info.view(batch_size * num_obj, 6)
|
451507a49fca589bc1102b085eab551ebe32bcc7
| 3,647,745
|
def get_current_language(request, set_default=True, default_id=1):
"""
Description:
Returns the current active language. Will set a default language if none is found.
Args:
request (HttpRequest): HttpRequest from Django
set_default (Boolean): Indicates if a default language must be activated (if none currently is). Default to True.
default_id (Integer): The PK for the default Language instance. Default to 1
Returns:
Language: The currently used language from our app's Language model
"""
# Base variables
language = None
language_name = request.session.get(LANGUAGE_SESSION_KEY, False)
# Get the language
if language_name:
try:
language = Language.objects.get(django_language_name=language_name)
except Language.DoesNotExist:
pass
# Set a default language if necessary
if language is None and set_default:
language = set_default_language(request, default_id)
# Always return the active language
return language
|
98bc2a25201dc87afcee24d8ff5d10fcab7849bb
| 3,647,746
|
def is_leap_year(year):
"""
returns True for leap year and False otherwise
:param int year: calendar year
:return bool:
"""
# return (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0)
return year % 100 != 0 or year % 400 == 0 if year % 4 == 0 else False
|
5bd0bb7a44dc7004b9198cb3d8ed244dc02417c2
| 3,647,747
|
def update_search_params(context, **kwargs):
"""Update the set parameters of the current request"""
params = context["request"].GET.copy()
for k, v in kwargs.items():
params[k] = v
return params.urlencode()
|
e3ce5a5a1dadc90bb544a761e154214d7a538f30
| 3,647,748
|
def dynamic2message(dynamic_dict: dict) -> Message:
"""
将从api获取到的原始动态转换为消息
"""
author_name = dynamic_dict['desc']['user_profile']['info']['uname']
dynamic_id = dynamic_dict['desc']['dynamic_id']
if dynamic_dict['desc']['type'] == 1: # 转发或投票
text = f"用户[{author_name}]转发了动态:\n" + dynamic_dict['card']['item']['content'] + "\n---------------------\n"
origin_dynamic = dynamic.get_info(dynamic_dict['card']['item']['orig_dy_id'])
ori_message = dynamic2message(origin_dynamic)
msg = MessageSegment.text(text) + ori_message + MessageSegment.text('\n---------------------')
elif dynamic_dict['desc']['type'] == 2: # 图文动态
text = f"用户[{author_name}]发布了动态:\n" + dynamic_dict['card']['item']['description']
msg = MessageSegment.text(text)
for i in range(dynamic_dict['card']['item']['pictures_count']):
msg = msg + MessageSegment.image(dynamic_dict['card']['item']['pictures'][i]['img_src'])
elif dynamic_dict['desc']['type'] == 4: # 纯文字动态
msg = MessageSegment.text(f"用户[{author_name}]发布了动态:\n" + dynamic_dict['card']['item']['content'])
elif dynamic_dict['desc']['type'] == 8: # 视频投稿
msg = MessageSegment.text(
f"用户[{author_name}]发布了视频:\n" + dynamic_dict['card']['dynamic'] + "\n视频标题:" + dynamic_dict['card'][
'title'] + "\n视频链接:" + dynamic_dict['card']['short_link'])
elif dynamic_dict['desc']['type'] == 64: # 发布专栏
msg = MessageSegment.text(f"用户[{author_name}]发布了专栏:\n" + dynamic_dict['card']['title'])
else:
msg = MessageSegment.text(f'用户[{author_name}]发布了动态,但无法判断类型')
msg = msg + MessageSegment.text(f'\n\n原动态链接:https://t.bilibili.com/{dynamic_id}')
return msg
|
b5330876cb58bf71c73ef9f4d3cbcdd0e583aba6
| 3,647,749
|
def dem_to_roughness(src_raster, band=0):
"""Calculate the roughness for the DEM.
Parameters
----------
src_raster : Raster
The dem used to calculate the roughness.
band : int, optional, default: 0
source band number to use.
Returns
-------
dst_raster: Raster
roughness calculated from the DEM.
"""
options = dict(band=band+1, format='MEM')
ds_src = src_raster.to_gdal_ds()
ds = gdal.DEMProcessing('', ds_src, 'Roughness', **options)
dst_raster = tgp.read_gdal_ds(ds)
return dst_raster
|
f8ba073560aaf18ab9101befc5a3d1727a7cb93e
| 3,647,750
|
import importlib
import sys
def import_file(path, name=None):
"""Import modules from file."""
spec = importlib.util.spec_from_file_location(name or '', path)
module = importlib.util.module_from_spec(spec)
if name:
sys.modules[name] = module
spec.loader.exec_module(module)
return module
|
3a2fc184c3bca4e48d0c725b9370e2a9e8bcadf3
| 3,647,751
|
from typing import Any
from typing import List
def batch_matmul_checker(
attrs: Any, args: List[relay.expr.Expr], op_name: str
) -> bool: # pylint: disable=unused-variable
"""Check if dense is supported by TensorRT."""
if get_tensorrt_use_implicit_batch_mode() and len(args[0].checked_type.shape) != len(
args[1].checked_type.shape
):
logger.info(f"{op_name}: requires use_implict_batch=False.")
return False
return True
|
6c83ce1116a88e864bd577ee2dd7d669d43c43a3
| 3,647,752
|
def func_run_dynamic(input_file, dynamic_dic, exclude, pprint):
"""
Execute one dynamic template
:param input_file: (string) The template file name
:param dynamic_dic: (dict) The dictionary of the dynamic variables
:return:
"""
new_template_filename = create_dynamic_template(input_file, dynamic_dic)
t = Template.Template()
t.file_path = new_template_filename
t.load_sections()
t.set_execute_order()
t.start_driver()
report = t.run()
if pprint:
t.pprint(exclude_none=exclude)
return t
|
9432eadb4e3735a06f35aedd8fd9bb175ab2ba55
| 3,647,753
|
import os
def find(store_config, shardid): # FIXME require config instead
"""Find the path of a shard.
Args:
store_config: Dict of storage paths to optional attributes.
limit: The dir size limit in bytes, 0 for no limit.
use_folder_tree: Files organized in a folder tree
(always on for fat partitions).
shardid: Id of the shard to find.
Returns:
Path to the shard or None if not found.
Raises:
AssertionError: If input not valid.
Example:
import storjlib
id = "2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae"
store_config = {"path/alpha": None, "path/beta": None}
shard_path = storjlib.store.manager.remove(store_config, id)
print("shard located at %s" % shard_path)
"""
assert(storjlib.store.shard.valid_id(shardid))
store_config = setup(store_config) # setup if needed
for store_path, attributes in store_config.items():
use_folder_tree = attributes["use_folder_tree"]
shard_path = _get_shard_path(store_path, shardid, use_folder_tree)
if os.path.isfile(shard_path):
return shard_path
return None
|
eddfc0f15deb833337f6312cb96b460b66ca16d7
| 3,647,754
|
import ctypes
def xclGetDeviceInfo2 (handle, info):
"""
xclGetDeviceInfo2() - Obtain various bits of information from the device
:param handle: (xclDeviceHandle) device handle
:param info: (xclDeviceInfo pointer) Information record
:return: 0 on success or appropriate error number
"""
libc.xclGetDeviceInfo2.restype = ctypes.c_int
libc.xclGetDeviceInfo2.argtypes = [xclDeviceHandle, ctypes.POINTER(xclDeviceInfo2)]
return libc.xclGetDeviceInfo2(handle, info)
|
794b6208c19a4f982a9fffb9270a3485299b62eb
| 3,647,755
|
def template_failure(request, status=403, **kwargs):
""" Renders a SAML-specific template with general authentication error description. """
return render(request, 'djangosaml2/login_error.html', status=status)
|
fbcc8ad756213b4ba7f44d799c67b67beaad18f8
| 3,647,756
|
def zflatten2xyz(z, x=None, y=None):
""" flatten an nxm 2D array to [x, y, z] of shape=(n*m, 3)"""
if x is None:
x = np.arrange(0, z.shape[0], step=1)
if y is None:
y = np.arrange(0, z.shape[1], step=1)
xlen = len(x)
ylen = len(y)
assert z.shape[0] == xlen and z.shape[1] == ylen, 'check dimensions!!!'
xx, yy = np.meshgrid(x, y)
xx = xx.T
yy = yy.T # meshgrid take the second dimension as x
xylen = xlen*ylen
return np.concatenate((xx.reshape((xylen, 1)),
yy.reshape((xylen, 1)),
z.reshape((xylen, 1))), axis=1)
|
96fcc9755660a85f5501958cf3f7d8c7a0e35b69
| 3,647,757
|
def penalized_log_likelihood(curve,t,pairwise_contact_matrix,a,b,term_weights,square_root_speed=None,pairwise_distance_matrix=None):
"""
penalized log likelihood
"""
if pairwise_distance_matrix is None: #if the do not already have the pairwise distance matrix computed, then compute it
pairwise_distance_matrix=compute_pairwise_distance(curve)
L=0 # initialize log likelihood term
R1=0 # initialize first order term
R2=0 # initialize second order term
Q=0 # initialize parametrization penalty term
S=0 # initialize shape prior term
if term_weights[0]!=0:
L=term_weights[0]*loglikelihood_Varoquaux_with_missing(pairwise_distance_matrix,a,b,pairwise_contact_matrix)
if (term_weights[1]!=0)&(term_weights[2]==0):
R1=term_weights[1]*srvf.length(curve,t)
if (term_weights[2]!=0):
R1,R2=srvf.roughness2(curve,t)
R1=term_weights[1]*R1
R2=term_weights[2]*R2
if (term_weights[3]!=0):
Q=term_weights[3]*parametrization_error(curve,square_root_speed,t)
if (term_weights[4]!=0):
S=term_weights[4]*0 # not implemented yet
return L-R1-R2-Q-S
|
f26b5148b5f56be958d99714e5207417fd40a15d
| 3,647,758
|
import os
def listening_ports():
""" Reads listening ports from /proc/net/tcp """
ports = []
if not os.path.exists(PROC_TCP):
return ports
with open(PROC_TCP) as fh:
for line in fh:
if '00000000:0000' not in line:
continue
parts = line.lstrip(' ').split(' ')
if parts[2] != '00000000:0000':
continue
local_port = parts[1].split(':')[1]
local_port = int('0x' + local_port, base=16)
ports.append(local_port)
return ports
|
1d92f9acf0330882048594b7a12976286f12ecc4
| 3,647,759
|
def dict2array(X):
"""
Returns a Numpy array from dictionary
Parameters
----------
X: dict
"""
all_var = []
for k in X.keys():
all_var.append(X[k])
return np.array(all_var)
|
e3d1ecabe9897af7c60a8e4be1e92603619d130a
| 3,647,760
|
import subprocess
def preprocess_field_data(subdelimiter, field_value, path_to_script):
"""Executes a field preprocessor script and returns its output and exit status code. The script
is passed the field subdelimiter as defined in the config YAML and the field's value, and
prints a modified vesion of the value (result) back to this function.
"""
cmd = subprocess.Popen([path_to_script, subdelimiter, field_value], stdout=subprocess.PIPE)
result, stderrdata = cmd.communicate()
return result, cmd.returncode
|
9cf0261c98652d0811868c91fbb3ab15e6c07af3
| 3,647,761
|
import requests
def dfs_level_details():
"""This function traverses all levels in a DFS style. It gets the child directories and
recursively calls the same function on child directories to extract its level details
Returns:
Dictionary: Key is the level name, value is a list with first element as url
and the second element as the bounding box of that url
"""
level_details = {}
local_server_name = app.config['HOST_NAME'] if 'HOST_NAME' in app.config else "Unknown"
try:
bounding_box_level = GeographyHelper.GetCoordinatesForLevel(local_server_name)
except:
print("An error has occured while retrieveing bounding box")
bounding_box_level = None
level_details[local_server_name] = [request.url_root, bounding_box_level]
locations_to_urls = DirectoryNameToURL.objects(relationship='child').all()
if locations_to_urls == None:
return None
for location_to_url in locations_to_urls:
request_url = urljoin(location_to_url.url, url_for('api.dfs_level_details'))
try:
response = requests.get(request_url)
if response.status_code != 200:
return jsonify(response.json()), response.status_code
results = response.json()
if results == None or len(results) == 0:
continue
for result in results:
level_details[result] = results[result]
except:
return jsonify(ERROR_JSON), 400
return jsonify(level_details), 200
|
31cf9fdf49620798e9411dda1eda99b95411858b
| 3,647,762
|
import copy
def makeNonParameterized(p):
"""Return a new Pointset stripped of its parameterization.
"""
if isinstance(p, Pointset) and p._isparameterized:
return Pointset({'coordarray': copy(p.coordarray),
'coordnames': copy(p.coordnames),
'norm': p._normord,
'labels': copy(p.labels)})
else:
raise TypeError("Must provide a parameterized Pointset")
|
778eed55d3da10dcfb4681484cb31d6469009ae8
| 3,647,763
|
def interface_getattr(*v):
"""Behaves like `getattr` but for zope Interface objects which
hide the attributes.
.. note:: Originally I simply tried to
override :meth:`InterfaceDocumenter.special_attrgetter` to deal with the
special access needs of :class:`Interface` objects, but found that this
is not intended to be overwritten. Instead one should register the
special accessor using :func:`app.add_autodoc_attrgetter`.
"""
obj, name = v[:2]
if "__dict__" == name:
# Interface objects do not list their members through
# __dict__.
return dict((n, obj.get(n)) for n in obj.names())
if name in obj.names(all=True):
return obj.get(name)
else:
return getattr(*v)
|
f981d9cdea352f206b087455f1d8b33846a4d5df
| 3,647,764
|
def round_time(t, to=timedelta(seconds=1)):
""" cftime will introduces noise when decoding values into date objects.
This rounds time in the date object to the nearest second, assuming the init time
is at most 1 sec away from a round minute. This is used when merging datasets so
their time dims match up.
Args:
t: datetime or cftime object
to: size of increment to round off to. By default round to closest integer
second.
Returns:
datetime or cftime object rounded to nearest minute
"""
midnight = t.replace(hour=0, minute=0, second=0, microsecond=0)
time_since_midnight = exact_cftime_datetime_difference(midnight, t)
remainder = time_since_midnight % to
quotient = time_since_midnight // to
if remainder <= to / 2:
closest_multiple_of_to = quotient
else:
closest_multiple_of_to = quotient + 1
rounded_time_since_midnight = closest_multiple_of_to * to
return midnight + rounded_time_since_midnight
|
dcc7d0caa4e4787f710a386968d8967661e662ca
| 3,647,765
|
def decide_end(match_list, return_whole_match_object = False):
"""
Among all the match objects, return the march string the closest to the end of the text
Return : a string. If return_whole_match_object is True, return a match object
"""
if len(match_list) == 0:
return pd.NA
ends = np.array(list(map(lambda match_object : match_object.span()[1], match_list)))
closest_index = np.argmax(ends)
if return_whole_match_object:
return match_list[closest_index]
else:
return match_list[closest_index].group()
|
72e9a4f63c9c7b95e5728b798bc1cd508d1911e6
| 3,647,766
|
def get_level_refactorings_count(level: int, dataset: str = "") -> str:
"""
Get the count of all refactorings for the given level
Parameter:
level (int): get the refactoring instances for this level
dataset (str) (optional): filter for these specific projects
"""
return f"SELECT refactoring, count(*) FROM (" + \
get_instance_fields(refactoringCommits, [(refactoringCommits, ["refactoring"])],
f"{refactoringCommits}.level = {str(level)}", dataset) + \
f" AND {valid_refactorings_filter(refactoringCommits)} AND {file_type_filter(refactoringCommits)}) t group by refactoring order by count(*) desc"
|
8150537a35161541d7eb4b483d06ef8096611d37
| 3,647,767
|
def repeat_batch(t, K, dim=0):
"""Repeat a tensor while keeping the concept of a batch.
:param t: `torch.Tensor`: The tensor to repeat.
:param K: `int`: The number of times to repeat the tensor.
:param dim: `int`: The dimension to repeat in. This should be the
batch dimension.
:returns: `torch.Tensor`: The repeated tensor. The new shape will be
batch size * K at dim, the rest of the shapes will be the same.
Example::
>>> a = torch.arange(10).view(2, -1)
>>> a
tensor([[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9]])
>>> a.repeat(2, 1)
tensor([[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9],
[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9]])
>>> repeat_batch(a, 2)
tensor([[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9],
[5, 6, 7, 8, 9]])
"""
shape = t.shape
tiling = [1] * (len(shape) + 1)
tiling[dim + 1] = K
tiled = t.unsqueeze(dim + 1).repeat(tiling)
old_bsz = shape[dim]
new_bsz = old_bsz * K
new_shape = list(shape[:dim]) + [new_bsz] + list(shape[dim + 1 :])
return tiled.view(new_shape)
|
31ae6e02bd23c56049a4f8e5ea9f36e5b6186678
| 3,647,768
|
def ifte(s, g_cond, g_true, g_false):
"""goal that succeeds if g_cond and g_true succeed or g_cond fails and g_false succeeds"""
def loop(s_inf=g_cond(s)):
try:
first_cond = next(s_inf)
except StopIteration:
yield from g_false(s)
return
except SuspendIteration as suspension:
raise SuspendIteration(loop(suspension.stream))
yield from append_inf(g_true(first_cond),
append_map_inf(g_true, s_inf))
return loop()
|
899ed78b53e056804e9515e2f01125831ae0dfba
| 3,647,769
|
def filter_by_continue_threshold_variance_threshold(peak_info, acc, cont_win_size=3, cont_thres=4, var_thres=0.001):
"""
Calculate the continuity by a given window length, then calculate the variance and filter the data by
a given threshold
:param peak_info: a 5D matrix
:param cont_win_size: continue window len
:param cont_thres: continue threshold
:param var_thres: variance threshold
:param fs: frequency of accelerometer data
:return: all_steps: step count list
"""
end_for = len(peak_info[:,2])-1
for i in np.arange(cont_thres-1, end_for):
v_count = 0
for x in np.arange(1, cont_thres+1):
if np.var(acc[int(peak_info[i-x+1, 0]):int(peak_info[i-x+2, 0]+1)], ddof=1) > var_thres:
v_count = v_count + 1
if v_count >= cont_win_size:
peak_info[i, 4] = 1
else:
peak_info[i, 4] = 0
peak_info = peak_info[peak_info[:, 4] == 1, 0]
return peak_info
|
7cdbe81b8c0931d315a9d928b6a32105e6da56fb
| 3,647,770
|
from datetime import datetime
def send_update(*args: str) -> bool:
""" Updates the path endpoint to contain the current UTC timestamp """
assert args, "Firebase path cannot be empty"
endpoint = args[-1]
value = {endpoint: datetime.utcnow().isoformat()}
return send_message(value, *args[:-1])
|
b9b9b7a277bc2a0ffd9ae0c4d658eb5f3d017d20
| 3,647,771
|
def execute_custom(datatype, runtype, driver, data_repository, step_list):
"""
Execute a custom testcase
"""
print_info("{0} {1}".format(datatype, runtype))
tc_status = False
if data_repository.has_key("suite_exectype") and \
data_repository["suite_exectype"].upper() == "ITERATIVE":
print_info("Testsuite execute type=iterative but the testcase datatype=custom. "
"All testcases in a iterative testsuite should have datatype=iterative, "
"Hence this testcase will be marked as failure.")
elif runtype.upper() == 'SEQUENTIAL_KEYWORDS' or runtype.upper() == 'PARALLEL_KEYWORDS':
tc_status = driver.main(step_list, data_repository, tc_status, system_name=None)
else:
print_error("Unsuppored runtype found, please check testcase file")
return tc_status
|
884ab4ff7f66f1ad969b03ec406513b301739169
| 3,647,772
|
def parseSolFile(filename):
"""Parses SOL file and extract soil profiles."""
data = {}
profile = None
lat = None
lon = None
with open(filename) as fin:
for line in fin:
if line.startswith("*"):
if profile is not None:
data[(lat, lon)] = "{0}\r\n".format(profile)
profile = line[1:].strip()
elif not line.startswith("@") and len(line.strip()) > 0:
toks = line.split()
if len(toks) == 5:
lat = float(toks[2])
lon = float(toks[3])
profile += "\r\n{0}".format(line.strip())
else:
try:
float(toks[0])
line = line.replace(toks[1], "".join([" "]*len(toks[1])))
profile += "\r\n{0}".format(line.rstrip())
except:
profile += "\r\n {0}".format(line.rstrip())
return data
|
7c3876f1e4899eff5b0036045df4348903a11306
| 3,647,773
|
import collections
def get_deps_info(projects, configs):
"""Calculates dependency information (forward and backwards) given configs."""
deps = {p: configs[p].get('deps', {}) for p in projects}
# Figure out the backwards version of the deps graph. This allows us to figure
# out which projects we need to test given a project. So, given
#
# A
# / \
# B C
#
# We want to test B and C, if A changes. Recipe projects only know about the
# B-> A and C-> A dependencies, so we have to reverse this to get the
# information we want.
downstream_projects = collections.defaultdict(set)
for proj, targets in deps.items():
for target in targets:
downstream_projects[target].add(proj)
return deps, downstream_projects
|
10215dfb623b8ebaaabdb2d1bcffd876d37f9f66
| 3,647,774
|
def write_cflags():
"""Adds C-Flags. C++ version is defined at the beginning of this file"""
text = f"""CFLAGS = ${{TF_CFLAGS}} ${{OMP_CFLAGS}} -fPIC -O2 -std={CPPVERSION}
LDFLAGS = -shared ${{TF_LFLAGS}}
"""
text += write_cflags_cuda()
return text
|
1348c70b5bdbe168760dba677f9bcc4507957510
| 3,647,775
|
def ins_to_sem_compatibility(sem_labels_for_instances, num_sem_labels, stuff_sem_cls_ids, stuff_penalisation=1.0):
"""
Returns the compatibility matrix for the instance_labels -> semantic_labels bipartite potentials in BCRF.
Args:
sem_labels_for_instances: Semantic labels of the instances. 0th instance must be 'no_instance' with label -1
num_sem_labels: The total number of semantic labels used
stuff_sem_cls_ids: List of semantic labels for the 'stuff' classes (no instances for these classes)
stuff_penalisation: Relative strength of the association between 'stuff' classes and 'no_instance'
object instance (instance label 0)
Returns:
A matrix of shape (num_instances, num_sem_labels), where the entry (i, j) contains the connection strength
between the i th instance and the j the semantic label.
"""
mat = np.zeros((len(sem_labels_for_instances), num_sem_labels), dtype=np.float32)
assert sem_labels_for_instances[0] == -1 # First instance must be 'no_instance'
# Attraction between an instance and its semantic class
for inst_lbl, sem_lbl in islice(enumerate(sem_labels_for_instances), 1, None):
mat[inst_lbl, sem_lbl] = 1.0 # TODO(sadeep) Learn this as a vector of size len(thing_sem_cls_ids)
# Attraction between `no_instance` and stuff classes
if stuff_sem_cls_ids is not None:
for stuff_id in stuff_sem_cls_ids:
mat[0, stuff_id] = stuff_penalisation # TODO(sadeep) Learn this as a vector of size len(stuff_sem_cls_ids)
return mat
|
3c96d3746a7e419848f9b2ddef4c636cf8a822d1
| 3,647,776
|
def get_coverage(inputs):
"""Get edge coverage.
Returns:
A dictionary of inputs and corresponding coverage
"""
cov_dict = dict()
for test_input in inputs:
"Get coverage by running the program"
cov = coverage(input)
"Update coverage dictionary of test input"
cov_dict[test_input] = cov
return cov_dict
|
5a80399b7877d968654e8c6fc069ff0f70d10a62
| 3,647,777
|
import os
import zipfile
import shutil
def compress_as(filename, fmt, target=None, keep=True):
"""Compress an existing file.
Supported compression formats are: gzip, bzip2, zip, and lzma (Python
3.3 or newer only).
Args:
filename: The path and name of the uncompressed file.
fmt: Decides to which format the file will be compressed.
* *zip*: Uses the standard zip library.
* *bz2*: Uses the bz2 library.
* *gz*: Uses the GNU zip library.
* *xz*: Uses the lzma format.
target: The default output filename is *filename.fmt*.
If you do not like it, you can set another filename here.
keep: If true, keep the original file after compressing. Otherwise it
will be deleted. Default is keeping.
Returns:
The filename of the newly created file.
"""
if not is_compression_format(fmt):
raise ValueError("Unknown compression format '%s'!" % fmt)
if target is None:
target = ".".join([filename, fmt])
# The filename inside the zipped archive
target_filename = os.path.basename(target)
target_basename, extension = os.path.splitext(target_filename)
if extension.endswith(fmt):
target_filename = target_basename
# Read datafile in 100 MiB chunks for good performance/memory usage
chunksize = 100 * 1024 * 1024
compfile = get_compressor(fmt)
try:
if fmt == "zip":
with compfile(target, 'w') as f_out:
f_out.write(
filename, arcname=target_filename,
compress_type=zipfile.ZIP_DEFLATED
)
else:
with open(filename, 'rb') as f_in:
if fmt == "gz":
# Coming from https://stackoverflow.com/a/38020236
with open(target, 'wb') as f_out:
with compfile(filename, 'wb', fileobj=f_out) as f_out:
shutil.copyfileobj(f_in, f_out, length=chunksize)
elif fmt == "bz2" or fmt == "xz":
with compfile(target, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out, length=chunksize)
except Exception as e:
raise e
else:
if not keep:
os.unlink(filename)
return target
|
4f4c4862e314945ff7360d48cdb3cc10448b14e9
| 3,647,778
|
from operator import add
def average(arr, mode = "mixed"):
"""
average(arr, mode) takes the average of a given array
Once again, the modes of add() can be used here to denote what the type of the array is
The function below, determine_mode(arr) can be used to determine the correct mode for your array
"""
if len(arr) == 0: return 0.0
return add(arr, mode)/len(arr)
|
74d0b836e6877d1f7d23b69a191e653bcffd6f00
| 3,647,779
|
def non_halting(p):
"""Return a non-halting part of parser `p` or `None`."""
return left_recursive(p) or non_halting_many(p)
|
d9d8b87cad15c5416041c40396bd3e51b0c28051
| 3,647,780
|
def _isValidWord(word):
"""Determine whether a word is valid. A valid word is a valid english
non-stop word."""
if word in _englishStopWords:
return False
elif word in _englishWords:
return True
elif wordnet.synsets(word):
return True
else:
return False
|
aa0dd1ceecc807b3aa6ecf740d5ec547bf748e7c
| 3,647,781
|
def compare_floats(value1: float, value2: float):
"""Função que compara 2 floats"""
return True if abs(value1 - value2) <= 10**-6 else False
|
225a8fd4d472fe630efe32c506cb1ac3f7ff4b5f
| 3,647,782
|
from typing import Tuple
import os
def carrega_dataset(caminho_diretorio: str, divisao: Tuple[int, int], embaralhar=True) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
"""Especifique o caminho do diretório em que os arquivos `noisy.npy`e `original.npy` estão.
Args:
caminho_diretorio (str): caminho do diretório.
divisao (Tuple[int, int]): como será a divisão entre treinamento e teste.
embaralhar (bool, optional): se deseja embaralhar o dataset. Defaults to True.
Returns:
Tuple: retorna (x_train, y_train, x_test, y_test)
"""
if caminho_diretorio != '':
x = np.load(os.path.join(caminho_diretorio, 'noisy.npy'))
y = np.load(os.path.join(caminho_diretorio, 'original.npy'))
else:
x = np.load('noisy.npy')
y = np.load('original.npy')
if embaralhar:
np.random.seed(42)
np.random.shuffle(x)
np.random.seed(42)
np.random.shuffle(y)
x_train, x_test = dividir_dataset_em_treinamento_e_teste(x, divisao=divisao)
y_train, y_test = dividir_dataset_em_treinamento_e_teste(y, divisao=divisao)
return (x_train, y_train, x_test, y_test)
|
30eb94954f012eb444f7cbcad24a5c943bafd4f4
| 3,647,783
|
from cuml.utils.import_utils import has_treelite, has_xgboost
import treelite
import treelite.runtime
import xgboost as xgb
def _build_treelite_classifier(m, data, arg={}):
"""Setup function for treelite classification benchmarking"""
if has_treelite():
else:
raise ImportError("No treelite package found")
if has_xgboost():
else:
raise ImportError("No XGBoost package found")
# use maximum 1e5 rows to train the model
train_size = min(data[0].shape[0], 100000)
dtrain = xgb.DMatrix(data[0][:train_size, :], label=data[1][:train_size])
params = {
"silent": 1, "eval_metric": "error", "objective": "binary:logistic"
}
params.update(arg)
max_depth = arg["max_depth"]
num_rounds = arg["num_rounds"]
n_feature = data[0].shape[1]
tmpdir = tempfile.mkdtemp()
model_name = f"xgb_{max_depth}_{num_rounds}_{n_feature}_{train_size}.model"
model_path = os.path.join(tmpdir, model_name)
bst = xgb.train(params, dtrain, num_rounds)
tl_model = treelite.Model.from_xgboost(bst)
tl_model.export_lib(
toolchain="gcc", libpath=model_path+"treelite.so",
params={'parallel_comp': 40}, verbose=False
)
return treelite.runtime.Predictor(model_path+"treelite.so", verbose=False)
|
095d9748988d55d2b578c0cb74fc4a662aa660c3
| 3,647,784
|
def _pkq(pk):
"""
Returns a query based on pk.
Note that these are designed to integrate with cells and how they are saved in the database
:Parameters:
----------------
pk : list
list of primary keys
:Returns:
-------
dict
mongo query filtering for table
:Examples:
----------
>>> import datetime
>>> assert _pkq(None) == {}
>>> assert dict(_pkq(['world', 'hello'])) == {"_pk": {"$eq": ["hello", "world"]}}
"""
if pk is None or len(pk) == 0:
return {}
else:
return q[_pk] == [pk]
|
d17527132c26c7e3504471f8456baccea295c71e
| 3,647,785
|
import torch
def inspect_decode_labels(pred, num_images=1, num_classes=NUM_CLASSES,
inspect_split=[0.9, 0.8, 0.7, 0.5, 0.0], inspect_ratio=[1.0, 0.8, 0.6, 0.3]):
"""Decode batch of segmentation masks accroding to the prediction probability.
Args:
pred: result of inference.
num_images: number of images to decode from the batch.
num_classes: number of classes to predict (including background).
inspect_split: probability between different split has different brightness.
Returns:
A batch with num_images RGB images of the same size as the input.
"""
if isinstance(pred, torch.Tensor):
pred = pred.data.cpu().numpy()
n, c, h, w = pred.shape
pred = pred.transpose([0, 2, 3, 1])
if n < num_images:
num_images = n
outputs = np.zeros((num_images, h, w, 3), dtype=np.uint8)
for i in range(num_images):
img = Image.new('RGB', (w, h))
pixels = img.load()
for j_, j in enumerate(pred[i, :, :, :]):
for k_, k in enumerate(j):
assert k.shape[0] == num_classes
k_value = np.max(softmax(k))
k_class = np.argmax(k)
for it, iv in enumerate(inspect_split):
if k_value > iv: break
if iv > 0:
pixels[k_,j_] = tuple(map(lambda x: int(inspect_ratio[it]*x), label_colours[k_class]))
outputs[i] = np.array(img)
return torch.from_numpy(outputs.transpose([0, 3, 1, 2]).astype('float32')).div_(255.0)
|
d8ee386e2088428b7bfe5579cc5558cf4d6890f1
| 3,647,786
|
from typing import Dict
from typing import Union
def set_default_values(
**attributes: Dict[str, Union[float, int, str]],
) -> Dict[str, Union[float, int, str]]:
"""Set the default value of various parameters.
:param attributes: the attribute dict for the electronic filter being calculated.
:return: attributes; the updated attribute dict.
:rtype: dict
"""
if attributes["quality_id"] <= 0:
attributes["quality_id"] = 1
return attributes
|
3c8871706446b2bd0aec1879b06e443a57898a96
| 3,647,787
|
import inspect
def validate_function(fn: FunctionType, config: Configuration, module_type: ModuleType) -> FunctionValidationResult:
"""Validates the docstring of a function against its signature.
Args:
fn (FunctionType): The function to validate.
config (Configuration): The configuration to use while validating.
module_type (ModuleType): The module from which the function was extracted.
Returns:
FunctionValidationResult: The result of validating this function.
"""
log(f"Validating function: {fn}")
result = FunctionValidationResult(fn)
doc = inspect.getdoc(fn)
if not doc:
if config.fail_on_missing_docstring:
result.result = ResultType.FAILED
result.fail_reason = f"Function does not have a docstring"
_, line_number = inspect.getsourcelines(fn)
result.range = Range(line_number, line_number, 0, 0)
else:
result.result = ResultType.NO_DOC
return result
parser = config.get_parser()
summary = parser.get_summary(doc, module_type)
if not summary and config.fail_on_missing_summary:
result.result = ResultType.FAILED
result.fail_reason = f"Function does not have a summary"
result.range = __get_docstring_range(fn, module_type, doc)
return result
sig = inspect.signature(fn)
sig_parameters = [Parameter(name, proxy.annotation) for name, proxy in sig.parameters.items() if name != "self"]
sig_return_type = type(None) if sig.return_annotation is None else sig.return_annotation
try:
doc_parameters = parser.get_parameters(doc, module_type)
doc_return_type = parser.get_return_type(doc, module_type)
except ParseException as e:
result.result = ResultType.FAILED
result.fail_reason = f"Unable to parse docstring: {str(e)}"
result.range = __get_docstring_range(fn, module_type, doc)
return result
# Validate return type
if sig_return_type != doc_return_type:
result.result = ResultType.FAILED
result.fail_reason = f"Return type differ. Expected (from signature) {sig_return_type}, but got (in docs) {doc_return_type}."
result.range = __get_docstring_range(fn, module_type, doc)
return result
# Validate equal number of parameters
if len(sig_parameters) != len(doc_parameters):
result.result = ResultType.FAILED
result.fail_reason = f"Number of arguments differ. Expected (from signature) {len(sig_parameters)} arguments, but found (in docs) {len(doc_parameters)}."
result.range = __get_docstring_range(fn, module_type, doc)
return result
# Validate name and type of function parameters
for sigparam, docparam in zip(sig_parameters, doc_parameters):
if sigparam.name != docparam.name:
result.result = ResultType.FAILED
result.fail_reason = f"Argument name differ. Expected (from signature) '{sigparam.name}', but got (in docs) '{docparam.name}'"
result.range = __get_docstring_range(fn, module_type, doc)
return result
# NOTE: Optional[str] == Union[str, None] # True
if sigparam.type != docparam.type:
result.result = ResultType.FAILED
result.fail_reason = f"Argument type differ. Argument '{sigparam.name}' was expected (from signature) to have type '{sigparam.type}', but has (in docs) type '{docparam.type}'"
result.range = __get_docstring_range(fn, module_type, doc)
return result
# Validate exceptions raised
if config.fail_on_raises_section:
sig_exceptions = get_exceptions_raised(fn, module_type)
doc_exceptions = parser.get_exceptions_raised(doc)
if len(sig_exceptions) != len(doc_exceptions):
result.result = ResultType.FAILED
result.fail_reason = f"Number of listed raised exceptions does not match actual. Doc: {doc_exceptions}, expected: {sig_exceptions}"
result.range = __get_docstring_range(fn, module_type, doc)
return result
intersection = set(sig_exceptions) - set(doc_exceptions)
if len(intersection) > 0:
result.result = ResultType.FAILED
result.fail_reason = f"Listed raised exceptions does not match actual. Docstring: {doc_exceptions}, expected: {sig_exceptions}"
result.range = __get_docstring_range(fn, module_type, doc)
return result
result.result = ResultType.OK
return result
|
cc9c858f8ade844b89d944dc149c0233ed5741e7
| 3,647,788
|
import registry
import os
def find_existing_installation(package_name: str, display_name: str, test=True):
"""
Finds an existing installation of a package in the windows registry given the package name and display name
#### Arguments
package_name (str): Name of the package
display_name (str): Display name of the package
test (bool, optional): If the command is being run to test successful installation / uninstallation. Defaults to True.
Returns:
[type]: [description]
"""
key = registry.get_uninstall_key(package_name, display_name)
installed_packages = [''.join(f.replace('.json', '').split(
'@')[:1]) for f in os.listdir(PathManager.get_appdata_directory() + r'\Current')]
if key:
if not test:
return package_name in installed_packages
return True
return False
|
70f6b8536693be2541c0365a743231eea95cc542
| 3,647,789
|
def say(l, b, i):
"""
!d Repeat a word or phrase
!a <message...>
!r moderator
"""
try:
print 'Saying the phrase:', ' '.join(i.args)
b.l_say(' '.join(i.args), i, 1)
return True
except TypeError:
return False
|
260867612cd468babd42654c6d823649cbc73d41
| 3,647,790
|
import re
def rSanderSelect(dbItem,index=0,interactive=False):
"""
rSanderSelect(dbItem,index=0,interactive=False)
select which rSander henry data to use in dbItem
Parameters:
dbItem, db[key] dictionary object with keys = ['hbpSIP','hbpSIPL',
'hbpSI_index']
index, positive integer index for list item in hbpSIPL to move into
hbpSIP. Use interactive=True to display choices and ask for
user input for an index.
interactive, True to display choices and ask user input for an index,
False to make the change silently
Returns:
Nothing on success (dbItem is succussfully changed) or error messages
if there is an issue
"""
keys = ['hbpSIP','hbpSIPL','hbpSI_index']
for key in keys: #test if dbItem has valid dictionary keys
if key not in dbItem.keys():
return print("Henry data (%s) not found in dbItem[%s]\n" %
(key,dbItem['name']))
nHbpSIPL =len(dbItem['hbpSIPL'])
if not interactive:
invalIndex = "Invalid index: %s\n0 <= index <= %s\n" % (index,nHbpSIPL-1)
if re.match(r'^[0-9]+$',str(index)): #make sure index is positive integer
if index > nHbpSIPL-1: #check for valid index
return print(invalIndex)
dbItem['hbpSI_index'] = index
dbItem['hbpSIP'] = [float(dbItem['hbpSIPL'][index][0]),
float(dbItem['hbpSIPL'][index][1])]
else:
return print(invalIndex)
else:
header = ['Index','Ho /mol/kg/Pa','dln(H)/d(1/T) /K','Code','Ref.']
inStr = "Select an index (%s to %s) or e(x)it: " % (0,nHbpSIPL-1)
choice = ''
while choice != 'x':
table = []
for idx in range(nHbpSIPL):
table.append([idx])
table[idx].extend(dbItem['hbpSIPL'][idx])
print('\n'+tabulate(table,headers=header,numalign='center',
stralign='center')+'\n')
choice = input(inStr)
inStr = "Select an index (%s to %s) or e(x)it: " % (0,nHbpSIPL-1)
invalStr = "Invalid input: %s\n0 <= index <= %s or \'x\' to exit\n" % (choice,nHbpSIPL-1)
if re.match(r'^[0-9]+$',choice):
index = int(choice)
if index > nHbpSIPL-1: #check for valid index
inStr = invalStr + inStr
else:
dbItem['hbpSI_index'] = index
dbItem['hbpSIP'] = [float(dbItem['hbpSIPL'][index][0]),
float(dbItem['hbpSIPL'][index][1])]
else:
inStr = invalStr + inStr
|
54e6a79a2095810e10032c2da59972e89ca186eb
| 3,647,791
|
def dataset_w_pedigree_field():
"""
:return: Return model Dataset example with `pedigree_field` defined.
"""
search_pattern = SearchPattern(left="*/*/*_R1.fastq.gz", right="*/*/*_R2.fastq.gz")
dataset = DataSet(
sheet_file="sheet.tsv",
sheet_type="germline_variants",
search_paths=("/path",),
search_patterns=(search_pattern,),
naming_scheme="only_secondary_id",
sodar_uuid="99999999-aaaa-bbbb-cccc-999999999999",
pedigree_field="familyId",
)
return dataset
|
2fce0d1391e234a7bb4f2a0bcab5ba24fc27abe0
| 3,647,792
|
import requests
def get_new_access_token(client_id, client_secret, refresh_token):
"""Use long-lived refresh token to get short-lived access token."""
response = requests.post(
'https://www.googleapis.com/oauth2/v4/token',
data={
'client_id': client_id,
'client_secret': client_secret,
'refresh_token': refresh_token,
'grant_type': 'refresh_token',
},
timeout=TIMEOUT,
)
response.raise_for_status()
access_token = response.json()['access_token']
return access_token
|
a8f79511f8f0078121cf291752c2b315023df6de
| 3,647,793
|
def prettify_seconds(seconds):
"""
Prettifies seconds.
Takes number of seconds (int) as input and returns a prettified string.
Example:
>>> prettify_seconds(342543)
'3 days, 23 hours, 9 minutes and 3 seconds'
"""
if seconds < 0:
raise ValueError("negative input not allowed")
signs = {"s": {"singular": "second", "plural": "seconds", },
"h": {"singular": "hour", "plural": "hours"},
"min": {"singular": "minute", "plural": "minutes"},
"d": {"singular": "day", "plural": "days"}
}
seperator = ", "
last_seperator = " and "
def get_sign(unit, value):
if value == 1 or value == -1:
return signs[unit]["singular"]
else:
return signs[unit]["plural"]
days, remainder = divmod(seconds, 86400)
hours, remainder = divmod(remainder, 3600)
minutes, seconds = divmod(remainder, 60)
daystext = "{} {}".format(days, get_sign("d", days)) if days else ""
hourstext = "{} {}".format(hours, get_sign("h", hours)) if hours else ""
minutestext = "{} {}".format(minutes, get_sign("min", minutes)) if minutes else ""
if (not seconds) and (days or hours or minutes):
secondstext = ""
else:
secondstext = "{} {}".format(seconds, get_sign("s", seconds))
output_list = [daystext, hourstext, minutestext, secondstext]
filtered = [item for item in output_list if item]
if len(filtered) <= 2:
output = last_seperator.join(filtered)
else:
output = seperator.join(filtered[:-1]) + last_seperator + filtered[-1]
return output
|
4b77f9ed3d2085895ef15c6be30b7bfe83d1f49d
| 3,647,794
|
import re
def get_regions_prodigal(fn):
"""Parse prodigal output"""
regions = {}
with open(fn, 'r') as f:
for line in f:
if line[:12] == '# Model Data':
continue
if line[:15] == '# Sequence Data':
m = re.search('seqhdr="(\S+)"', line)
if m:
id = m.group(1)
regions[id] = {}
regions[id]['+'] = []
regions[id]['-'] = []
else:
r = line[1:].rstrip().split('_')
n = int(r[
0]) # also store the index of the fragment - prodigal uses these (rather than coords) to identify sequences in the fasta output
s = int(r[1])
e = int(r[2])
regions[id][r[3]].append(NumberedRegion(s, e, n))
return regions
|
d69f7b6d9dfc6802ad4dab3472f90a2d68b95bdd
| 3,647,795
|
from typing import Optional
def get_transform(account_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
transform_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetTransformResult:
"""
Use this data source to access information about an existing resource.
:param str account_name: The Media Services account name.
:param str resource_group_name: The name of the resource group within the Azure subscription.
:param str transform_name: The Transform name.
"""
__args__ = dict()
__args__['accountName'] = account_name
__args__['resourceGroupName'] = resource_group_name
__args__['transformName'] = transform_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:media/v20180701:getTransform', __args__, opts=opts, typ=GetTransformResult).value
return AwaitableGetTransformResult(
created=__ret__.created,
description=__ret__.description,
last_modified=__ret__.last_modified,
name=__ret__.name,
outputs=__ret__.outputs,
type=__ret__.type)
|
533ff2c95303c25b0a9741c36b34a755e18948e5
| 3,647,796
|
def default_preprocessing(df):
"""Perform the same preprocessing as the original analysis:
https://github.com/propublica/compas-analysis/blob/master/Compas%20Analysis.ipynb
"""
return df[(df.days_b_screening_arrest <= 30)
& (df.days_b_screening_arrest >= -30)
& (df.is_recid != -1)
& (df.c_charge_degree != 'O')
& (df.score_text != 'N/A')]
|
e6f4d8ceaa09fe71657e7936db886c3eabfb7aa0
| 3,647,797
|
def get_step_type_udfs(
step_type: str,
workflow: str,
adapter: ArnoldAdapter = Depends(get_arnold_adapter),
):
"""Get available artifact udfs for a step type"""
artifact_udfs = find_step_type_artifact_udfs(
adapter=adapter, step_type=step_type, workflow=workflow
)
process_udfs = find_step_type_process_udfs(
adapter=adapter, step_type=step_type, workflow=workflow
)
return artifact_udfs + process_udfs
|
f3ad3ad96d3f33e343afbb2ffcfa176fd4c6e654
| 3,647,798
|
def decode_base58(s: str) -> bytes:
"""
Decode base58.
:param s: base58 encoded string
:return: decoded data
"""
num = 0
for c in s:
if c not in BASE58_ALPHABET:
raise ValueError(
"character {} is not valid base58 character".format(c)
)
num *= 58
num += BASE58_ALPHABET.index(c)
h = hex(num)[2:]
h = '0' + h if len(h) % 2 else h
res = bytes.fromhex(h)
# Add padding back.
pad = 0
for c in s[:-1]:
if c == BASE58_ALPHABET[0]:
pad += 1
else:
break
return b'\x00' * pad + res
|
ee56c73e4fd22f25cd0caf63651abc13a4ba147d
| 3,647,799
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.