content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
import platform
def get_dataset_mrnet_args(parser, args=[]):
"""
Get all relevant parameters to handle the dataset
-> here: MRNET
"""
# determine path
if platform.system() == "Linux":
path = "/home/biomech/Documents/OsteoData/MRNet-v1.0/"
else:
path = "C:/Users/Niko/Documents/data/MRNet-v1.0/MRNet-v1.0"
# path = "C:/Users/ga46yeg/data/MRNet-v1.0"
# Dataset MRNet:
# ------------------------------------------------------------------------
parser.add_argument(
"--root_dir_mrnet", type=str, default=path, help="Directory of the dataset"
)
parser.add_argument(
"--perspectives",
type=list,
default=["axial", "coronal", "sagittal"],
help="Perspectives of the Mr Scans",
)
parser.add_argument(
"--classes",
type=list,
default=["abn", "acl", "men"],
help="Classify for these classes",
)
# ------------------------------------------------------------------------
return parser | 466cb843fca4a09f52a72603dcd2c4379ea1e54d | 24,300 |
import base64
def convertImageToBase64(image):
""" Convert image to base64 for transmission
Args:
image (obj): opencv image object
Returns:
(str): image encoded as base64
"""
# im_arr: image in Numpy one-dim array format.
_, im_arr = cv2.imencode('.jpg', image)
im_bytes = im_arr.tobytes()
return base64.b64encode(im_bytes).decode('utf-8') | 25f4ce7e9dce20ebb50fc55d31c52c77b0b7aa4b | 24,301 |
def anchor_inside_flags(flat_anchors, valid_flags, tsize, allowed_border=0):
"""Check whether the anchors are inside the border.
Args:
flat_anchors (torch.Tensor): Flatten anchors, shape (n, 2).
valid_flags (torch.Tensor): An existing valid flags of anchors.
tsize (int): Temporal size of current video.
allowed_border (int, optional): The border to allow the valid anchor.
Defaults to 0.
Returns:
torch.Tensor: Flags indicating whether the anchors are inside a
valid range.
"""
if allowed_border >= 0:
inside_flags = (
valid_flags & (flat_anchors[:, 0] >= -allowed_border) &
(flat_anchors[:, 1] < tsize + allowed_border))
else:
inside_flags = valid_flags
return inside_flags | d7840ebb4e5fcb7735e27454c0367eb14cec6ff0 | 24,302 |
def args2command(*args):
""" to convert positional arguments to string list """
try:
assert None not in args
assert "" not in args
except:
print("args:", args)
raise(ValueError("None values not allowed in args!"))
return [str(_).strip() for _ in args] | 688fed2c2146583f05deb75a5c832aac6c971cbd | 24,303 |
from typing import Tuple
from typing import Optional
from typing import List
def parse_one(line: str) -> Tuple[Optional[str], List[str]]:
"""
Returns (first corruption char, remaining stack)
"""
stack = []
for c in line:
if c in BRACKET_MAP.keys():
stack.append(c)
continue
expected = BRACKET_MAP[stack[-1]]
if c != expected:
return c, stack
stack.pop()
return None, stack | 85c4479b743c5ff3de041bca23a01fec1294a6bd | 24,304 |
def load_callable_dotted_path(dotted_path, raise_=True, reload=False):
"""
Like load_dotted_path but verifies the loaded object is a callable
"""
loaded_object = load_dotted_path(dotted_path=dotted_path,
raise_=raise_,
reload=reload)
if not callable(loaded_object):
raise TypeError(f'Error loading dotted path {dotted_path!r}. '
'Expected a callable object (i.e., some kind '
f'of function). Got {loaded_object!r} '
f'(an object of type: {type(loaded_object).__name__})')
return loaded_object | e76cf024cfbc4700d224881a9929951a3b23e246 | 24,305 |
def get_engine():
"""Helper method to grab engine."""
facade = _create_facade_lazily()
return facade.get_engine() | 1d430d2fbe7b79d6c6cb69a0a11fb811ade92b24 | 24,306 |
def generate_diagonals():
"""
Cоздает словарь диагоналей на которые модет встать конь и массив с возможным количеством вариантов
дойти в кажду точку этой диагонали
:return: словарь - где ключ это число диагонали а значения, это список из
возможных способов добраться до точек на этой диагонали
"""
diagonals_dict: dict[str, list[int]] = {'2': [1]}
for diagonal_number in range(5, 50, 3):
prev_list: dict[str, list[int]] = diagonals_dict[str(diagonal_number - 3)]
new_list: list[int] = []
for i in range(0, len(prev_list) - 1, 1):
new_list.append(prev_list[i] + prev_list[i + 1])
diagonals_dict[str(diagonal_number)] = [1] + new_list + [1]
return diagonals_dict | cf5945a565197194c7844e8f59ff4a137cab1abf | 24,307 |
from typing import Union
def physical_rad_to_pix(im_prod: Union[Image, RateMap, ExpMap], physical_rad: Quantity,
coord: Quantity, z: Union[float, int] = None, cosmo=None) -> Quantity:
"""
Another convenience function, this time to convert physical radii to pixels. It can deal with both angular and
proper radii, so long as redshift and cosmology information is provided for the conversion from proper radii
to pixels.
:param Image/RateMap/ExpMap im_prod:
:param Quantity physical_rad: The physical radius to be converted to pixels.
:param Quantity coord: The position of the object being analysed.
:param float/int z: The redshift of the object (only required for input proper distance units like kpc).
:param cosmo: The chosen cosmology for the analysis (only required for input proper distance units like kpc).
:return: The converted radii, in an astropy Quantity with pix units.
:rtype: Quantity
"""
if physical_rad.unit.is_equivalent("kpc") and z is not None and cosmo is not None:
conv_rads = rad_to_ang(physical_rad, z, cosmo).to('deg')
elif physical_rad.unit.is_equivalent("kpc") and (z is None or cosmo is None):
raise ValueError("If you wish to convert to convert from proper distance units such as kpc, you must supply "
"a redshift and cosmology")
elif physical_rad.unit.is_equivalent("deg"):
conv_rads = physical_rad.to('deg')
elif physical_rad.unit == pix:
raise UnitConversionError("You are trying to convert from pixel units to pixel units.")
else:
conv_rads = None
raise UnitConversionError("cen_rad_units doesn't appear to be a distance or angular unit.")
phys_to_pix = 1 / pix_deg_scale(coord, im_prod.radec_wcs).value
conv_rads = Quantity(conv_rads.value * phys_to_pix, 'pix')
return conv_rads | 4a26079610c882e40a31c7ba2ca64f7a0ccdd901 | 24,308 |
from functools import reduce
def conversation_type_frequency_distribution(convo):
"""
Returns the type frequency (unigram) distribution for the convo.
Parameters
----------
convo : Conversation
Returns
-------
collections.Counter
"""
return reduce(lambda x, y: x + y, map(post_freq, convo.posts.values())) | 66da6bfea0f6a1df0657fba2c881f373acc7d69e | 24,309 |
def _cleanup_legacy_namespace(input_string):
"""
At some point in time, the ttml namespace was TTML_NAMESPACE_URI_LEGACY,
then it got changed to TTML_NAMESPACE_URI. There are tons of those floating
around, including our pre-dmr dfxps and ttmls files. The backend (this lib)
can deal with both namespaces, but the amara front end cannot. We therefore
convert all namespaces to the correct one (else a lot of namespace xml magic
has to be done on the front end, and trust me, you don't want to do it).
This function 'converts' all ...ttfa... to ...ttml... with a regex. To be a
bit less reckless, we're checking that it's quoted, as in an attribute. (that
of course doesn't guarantee the safety of this, just makes it a bit less
likely that the legacy url is being used inside a text node. All of this
because lxml cannot change namespace attribute values:
https://bugs.launchpad.net/lxml/+bug/555602
"""
input_string = TTML_NAMESPACE_URI_LEGACY_NO_ANCHOR_RE.sub(r'"%s\3' % TTML_NAMESPACE_URI, input_string)
return TTML_NAMESPACE_URI_LEGACY_RE.sub(r'"%s\3\4' % TTML_NAMESPACE_URI, input_string) | f5a291f02bf1df883e40b56526b8d500fd9bb810 | 24,310 |
import json
def _err_to_json(key, *args):
"""Translate an error key to the full JSON error response"""
assert (key in errors)
code = errors[key][0]
title = errors[key][1]
detail = errors[key][2].format(*args)
return json.dumps({
'message':
title,
'errors': [{
'title': title,
'detail': detail,
'code': code
}]
}) | 00be9d9603f5a5e36bb0197dd60886afb4f1f989 | 24,311 |
def multiref_represent(opts, tablename, represent_string = "%(name)s"):
"""
Represent a list of references
@param opt: the current value or list of values
@param tablename: the referenced table
@param represent_string: format string to represent the records
"""
if not opts:
return current.messages.NONE
s3db = current.s3db
table = s3db.table(tablename, None)
if table is None:
return current.messages.NONE
if not isinstance(opts, (list, tuple)):
opts = [opts]
rows = current.db(table.id.belongs(opts)).select()
rstr = Storage([(str(row.id), row) for row in rows])
keys = rstr.keys()
represent = lambda o: str(o) in keys and \
represent_string % rstr[str(o)] or \
current.messages.UNKNOWN_OPT
vals = [represent(o) for o in opts]
if len(opts) > 1:
vals = ", ".join(vals)
else:
vals = len(vals) and vals[0] or current.messages.NONE
return vals | 86cb90e04073ddb4ec5676de3d9c87417bed5740 | 24,312 |
def selected_cells(self):
"""Get the selected cells. Synchronous, so returns a list.
Returns:
A list of Cells.
"""
cells = []
generator = self.selected_cells_async()
for chunk in generator:
for value in chunk.cells:
cells.append(value)
return cells | 523e77757acf8755b32ac0d283fd8864d6784ff1 | 24,313 |
import warnings
def calc_annual_capital_addts_ferc1(steam_df, window=3):
"""
Calculate annual capital additions for FERC1 steam records.
Convert the capex_total column into annual capital additons the
`capex_total` column is the cumulative capital poured into the plant over
time. This function takes the annual difference should generate the annual
capial additions. It also want generates a rolling average, to smooth out
the big annual fluxuations.
Args:
steam_df (pandas.DataFrame): result of `prep_plants_ferc()`
Returns:
pandas.DataFrame: augemented version of steam_df with two additional
columns: `capex_annual_addt` and `capex_annual_addt_rolling`.
"""
# we need to sort the df so it lines up w/ the groupby
steam_df = steam_df.sort_values(IDX_STEAM)
# we group on everything but the year so the groups are multi-year unique
# plants the shift happens within these multi-year plant groups
steam_df['capex_total_shifted'] = steam_df.groupby(
[x for x in IDX_STEAM if x != 'report_year'])[['capex_total']].shift()
steam_df = steam_df.assign(
capex_annual_addt=lambda x: x.capex_total - x.capex_total_shifted
)
addts = pudl.helpers.generate_rolling_avg(
steam_df,
group_cols=[x for x in IDX_STEAM if x != 'report_year'],
data_col='capex_annual_addt',
window=window
)
steam_df_w_addts = (
pd.merge(
steam_df,
addts[IDX_STEAM + ['capex_total', 'capex_annual_addt_rolling']],
on=IDX_STEAM + ['capex_total'],
how='left',
)
.assign(
capex_annual_per_mwh=lambda x:
x.capex_annual_addt / x.net_generation_mwh,
capex_annual_per_mw=lambda x:
x.capex_annual_addt / x.capacity_mw,
capex_annual_per_kw=lambda x:
x.capex_annual_addt / x.capacity_mw / 1000,
capex_annual_per_mwh_rolling=lambda x:
x.capex_annual_addt_rolling / x.net_generation_mwh,
capex_annual_per_mw_rolling=lambda x:
x.capex_annual_addt_rolling / x.capacity_mw,
)
)
steam_df_w_addts = add_mean_cap_addts(steam_df_w_addts)
# bb tests for volumne of negative annual capex
neg_cap_addts = len(
steam_df_w_addts[steam_df_w_addts.capex_annual_addt_rolling < 0]) \
/ len(steam_df_w_addts)
neg_cap_addts_mw = (
steam_df_w_addts[
steam_df_w_addts.capex_annual_addt_rolling < 0]
.net_generation_mwh.sum()
/ steam_df_w_addts.net_generation_mwh.sum())
message = (f'{neg_cap_addts:.02%} records have negative capitial additions'
f': {neg_cap_addts_mw:.02%} of capacity')
if neg_cap_addts > .1:
warnings.warn(message)
else:
logger.info(message)
return steam_df_w_addts | 3d1c07182f590f39f394a2e6ef78105b9ad2b745 | 24,314 |
import time
import json
async def async_upload_file(serialUID, filepath, upload_blockinfo):
"""异步上传文件"""
ts = int(time.time() * 1000)
# 计算分片CRC32
data, crc32 = get_block_crc32(filepath, upload_blockinfo["startOffset"], upload_blockinfo["endOffset"])
upload_blockinfo['dataCRC32'] = crc32
# 数据加密和签名
request_data, sign_sha256 = encry_and_sign(upload_blockinfo, ts)
uri_path = settings.get("APICONF").get("API_FILE_UPLOAD") + serialUID
url = parse.urljoin(settings.get("URL").get("AI_UPLOAD_SERVER_URL"), uri_path)
# build auth
authinfo = build_authinfo(uri=uri_path, verb='POST', sign=sign_sha256, timestamp=ts)
headers = {"Authorization": authinfo, "uploadInfo": json.dumps(request_data),
"Content-Type": settings.Content_Type}
client = tornado.httpclient.AsyncHTTPClient()
request = tornado.httpclient.HTTPRequest(url, method="POST", body=data, headers=headers, validate_cert=False)
res = await client.fetch(request, raise_error=False)
return res | 08da476e3ce4b680b60124777972332a807137ce | 24,315 |
def parse(parser_name=None, file_key=None, **kwargs):
"""Call the given parser and return parsed data
It is possible to give file key instead of parser name. In that case the name of the parser will be read from the
file list.
TODO: This is the old style of running parsers, can be deleted when all parsers are new style.
Args:
parser_name (String): Name of parser
file_key (String): Used to look up parser in the Where file list.
kwargs: Input arguments to the parser
Returns:
Parser: The parsed data
"""
return setup_parser(parser_name=parser_name, file_key=file_key, **kwargs).parse() | 1a76add60f58e9830c669e3ed9547a3193e79a32 | 24,316 |
def create_cv_split(file_train, file_test, col_label='label', col_group=None, n_folds=5, splitter='skf', random_state=33):
"""
Parameters:
splitter : str
"kf", "skf", "gkf"
Example:
train_df, test_df = create_cv_split(os.path.join(args.data_dir, 'Train.csv'),
os.path.join(args.data_dir, 'Test.csv'),
col_label='Label',
col_group=None,
n_folds=5,
splitter='skf',
random_state=33)
"""
#
# In KFold and StratifiedKFold "groups" are always ignored
# so we just make substitute to unify split call
if col_group is None:
col_group = col_label
train_df = pd.read_csv(file_train)
test_df = pd.read_csv(file_test)
#
# Label encoded label
le = LabelEncoder()
train_df[col_label + '_le'] = le.fit_transform(train_df[col_label])
# Fake label for test (just for compatibility)
test_df[col_label] = 0
test_df[col_label + '_le'] = 0
# Template column for fold_id
train_df['fold_id'] = 0
test_df['fold_id'] = 0 # (just for compatibility)
# Check train/test columns
assert list(train_df.columns) == list(test_df.columns), 'Different set or order of columns in train/test'
if splitter == 'kf':
kf = KFold(n_splits=n_folds, shuffle=True, random_state=random_state)
elif splitter == 'skf':
kf = StratifiedKFold(n_splits=n_folds, shuffle=True, random_state=random_state)
elif splitter == 'gkf':
kf = GroupKFold(n_splits=n_folds)
else:
raise ValueError('Posible values for splitter are: "kf", "skf", and "gkf"')
for fold_id, (train_index, val_index) in enumerate(kf.split(X=train_df, y=train_df[col_label].values, groups=train_df[col_group].values)):
train_df.loc[train_df.index.isin(val_index), 'fold_id'] = fold_id
# Check fold_id: must have corresponding number of folds
assert len(train_df['fold_id'].unique()) == n_folds, 'Inconsistent number of folds'
# Check fold_id: must be consequtive and start from 0
lst = list(train_df['fold_id'])
assert list(np.sort(np.unique(lst))) == list(range(0, max(lst)+1)), 'Non-consequtive, or starts not from 0'
# Check groups: must not intersect
if splitter == 'gkf':
for i in range(n_folds):
assert train_df[train_df['fold_id'] == i][col_group].isin(train_df[train_df['fold_id'] != i][col_group]).sum() == 0, 'Groups are intersected'
# Shuffle
# We use random_state+1 because 'df.sample' with the same seed after 'KFold.split' will re-create initial order
train_df = train_df.sample(frac=1.0, random_state=random_state+1)
#
return train_df, test_df | 95a0ceb9c63c68a2cf322ccd72050cdf2708a59c | 24,317 |
def get_scanner(hass, config):
"""Validate the configuration and return a Bbox scanner."""
scanner = BboxDeviceScanner(config[DOMAIN])
return scanner if scanner.success_init else None | 2ce6e0e9e4b11885a2c3d9090ed31c0a3da9070d | 24,318 |
def _trim_name(image):
"""Remove the slash at the end of the filename."""
return image[:-1] if image[-1] == '/' else image | 823dd63920673352a18d73f83190853d5a234483 | 24,319 |
import os
def check_for_pyd_so(file_path):
""" Checks if a file with .pyd or .so extension exists """
return True if os.path.isfile(file_path+'.pyd') or os.path.isfile(file_path+'.so') else False | a060f2350c11a3d34b59054c9cd95acc594b781b | 24,320 |
from sys import path
def load_data(data_file):
"""Loads data CSV into input and target ndarrays of shape (n_samples,
features).
Args:
data_file (str): Local or remote CSV file containing data to load.
Returns:
ndarray: Input data of shape (n_samples, in_features).
ndarray: Target data of shape (n_samples, out_features).
"""
# Copy remote file from GCS
filename = path.basename(data_file)
if data_file.startswith('gs://'):
dirname = mkdtemp()
local_file = path.join(dirname, filename)
remote_file = data_file
copy_from_gcs(remote_file, local_file)
else:
dirname = path.dirname(data_file)
df = pd.read_csv(path.join(dirname, filename), header=[0, 1])
X = df['Input'].values
Y = df['Target'].values
# Remove temporary directory if remote
if data_file.startswith('gs://'):
rmtree(dirname)
return X, Y | 1e6e69c98e29e6e26f6d0b65b6ed82e51bd69a89 | 24,321 |
from typing import Callable
def sines_sum(parameters: ndarray) -> Callable:
"""
Construct a sum of sines for given parameters.
Parameters
----------
parameters : ndarray
y0, amplitude1, frequency1, phase1, amplitude2, frequency2, phase2, ...
Returns
-------
function
f(x) = amplitude1*sin(2*pi*frequency1*x + phase1) +
amplitude2*sin(2*pi*frequency2*x + phase2) + ... + y0
"""
par = parameters
def _sines_sum(x):
y = 0
for i in range(len(parameters) // 3):
i *= 3
y += par[i + 1] * np.sin(2 * np.pi * par[i + 2] * x + par[i + 3])
return y + par[0]
return _sines_sum | 43cff5790ec098debc638a2cd66d3ac929a67ef6 | 24,322 |
def _divide_and_conquer_convex_hull(points):
"""
Notes:
O(n * log(n))
Args:
points:
Returns:
"""
count = len(points)
if count < 6:
return Hull(_jarvis_convex_hull(points))
midpoint = count // 2
min_cloud, max_cloud = points[:midpoint], points[midpoint:]
min_hull = _divide_and_conquer_convex_hull(min_cloud)
max_hull = _divide_and_conquer_convex_hull(max_cloud)
return __merge_convex_hulls(min_hull, max_hull) | 46fc256c0efc08f978fe1049935d068a9a6b23de | 24,323 |
from typing import Union
def _parsed_method_to_method(
parsed: Union[parse.UnderstoodMethod, parse.ImplementationSpecificMethod]
) -> Union[UnderstoodMethod, ImplementationSpecificMethod]:
"""Translate the parsed method into an intermediate representation."""
if isinstance(parsed, parse.ImplementationSpecificMethod):
return ImplementationSpecificMethod(
name=parsed.name,
arguments=_parsed_arguments_to_arguments(parsed=parsed.arguments),
returns=(
None
if parsed.returns is None
else _parsed_type_annotation_to_type_annotation(parsed.returns)
),
description=(
_parsed_description_to_description(parsed.description)
if parsed.description is not None
else None
),
contracts=_parsed_contracts_to_contracts(parsed.contracts),
parsed=parsed,
)
elif isinstance(parsed, parse.UnderstoodMethod):
return UnderstoodMethod(
name=parsed.name,
arguments=_parsed_arguments_to_arguments(parsed=parsed.arguments),
returns=(
None
if parsed.returns is None
else _parsed_type_annotation_to_type_annotation(parsed.returns)
),
description=(
_parsed_description_to_description(parsed.description)
if parsed.description is not None
else None
),
contracts=_parsed_contracts_to_contracts(parsed.contracts),
body=parsed.body,
parsed=parsed,
)
else:
assert_never(parsed)
raise AssertionError("Should have never gotten here") | 061df4c074cd3fe5f0c5b8570bdefe8605527d46 | 24,324 |
def NS(namespace, tag):
"""
Generate a namespaced tag for use in creation of an XML file
"""
return '{' + XML_NS[namespace] + '}' + tag | 32a6f1e8e351ca15f84391632f6773ee4c538dfd | 24,325 |
import sys
from sys import version
def dump_requirements(nodes, strict=False):
"""Dump packages and their versions to a string.
Format of the string is like a "requirements.txt"::
# created with python-X.X
package-1==1.2.3
package-2==2.3.4
:param nodes: List of ast nodes in a module.
:param strict: If *True* throw an exception if a package is not found
:returns: String containing requirements.
"""
result = f'# created with python-{".".join([str(x) for x in sys.version_info[:3]])}\n'
for package in get_packages(nodes):
if package in STDLIBNAMES:
continue
try:
dist = get_distribution_name(package)
except RequirementNotFound as exc:
if strict and package not in _ignore_requirements:
raise exc
warn(f'The "{package}" requirement was not found.')
continue
result += f'{dist}=={version(dist)}\n'
return result | a9770800178e50234e96e648f16d821b3ea538be | 24,326 |
def non_contradiction_instance_2(person_list,
place_list,
n,
vi_function=vi,
not_vi_function=not_vi,
Everyone_str="Everyone",
every_place_str="every place"):
"""
T = {every x every P v(x,P)}
new = not v(xi, xj) ----------- 0
"""
people = get_n_different_items(person_list, 2)
sentence1 = vi_function(Everyone_str, every_place_str)
sentence2 = not_vi_function(people[0], people[1])
return sentence1, sentence2, 0 | 068655c85b9bb5a4979a94a9c58b4297222db32e | 24,327 |
def load_w2v_model(w2v_path):
"""
Loads pretrained w2v model
:param w2v_path:
:return:
"""
return gensim.models.Word2Vec.load(w2v_path) | f9e44290ae8d2e7069ed724b68c405f275d6b95b | 24,328 |
import hmac
def derive_keys(token, secret, strategy):
"""Derives keys for MAC and ENCRYPTION from the user-provided
secret. The resulting keys should be passed to the protect and
unprotect functions.
As suggested by NIST Special Publication 800-108, this uses the
first 128 bits from the sha384 KDF for the obscured cache key
value, the second 128 bits for the message authentication key and
the remaining 128 bits for the encryption key.
This approach is faster than computing a separate hmac as the KDF
for each desired key.
"""
digest = hmac.new(secret, token + strategy, HASH_FUNCTION).digest()
return {'CACHE_KEY': digest[:DIGEST_SPLIT],
'MAC': digest[DIGEST_SPLIT: 2 * DIGEST_SPLIT],
'ENCRYPTION': digest[2 * DIGEST_SPLIT:],
'strategy': strategy} | 1b7e53957f746f91df4b5e7545ac1a079a96ac94 | 24,329 |
from typing import Optional
def bitinfo_holding_ts(
track_addr: Optional[str] = None,
track_coin: Optional[str] = None,
timeframe: Optional[str] = "4h",
sma: Optional[int] = 20,
):
"""Scrap the data from bitinfo and calculate the balance based on the resample frequency.
track_addr (str): The address to track.
track_coin (str): The coin to track.
timeframe (str): The resample frequency.
sma (int): The moving average window.
For example, if the website url is
https://bitinfocharts.com/dogecoin/address/DRSqEwcnJX3GZWH9Twtwk8D5ewqdJzi13k-full/
track_coin value would be `dogecoin` and track_addr would be `DRSqEwcnJX3GZWH9Twtwk8D5ewqdJzi13k`.
For timeframe, we support frequency that listed on pandas doc, common value would be '4h', '1h', '1d'
Full list of timeframe available: https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases
"""
LOGGER.info(f"Scrapping data for {track_coin}, wallet address: {track_addr}")
track_addr = TRACK_ADDRESS if track_addr is None else track_addr
track_coin = TRACK_COIN if track_coin is None else track_coin
df_holding_ts = get_wallet_holding_data(
coin=track_coin,
address=track_addr,
)
balance_ts = produce_time_series(df_holding_ts, timeframe, sma)
return balance_ts | bf29a9f91c695a4424436522fd76b467e9e573e0 | 24,330 |
import logging
def sharpe(p):
"""Sharpe ratio of the returns"""
try:
return p.mean()/p.std()*np.sqrt(252)
except ZeroDivisionError:
logging.error("Zero volatility, divide by zero in Sharpe ratio.")
return np.inf | e2700f9dfdc5b1d405892bc7ee460a2930b860d4 | 24,331 |
from ase.lattice.cubic import FaceCenteredCubic
from ase.lattice.cubic import BodyCenteredCubic
import six
def create_manual_slab_ase(lattice='fcc', miller=None, host_symbol='Fe',
latticeconstant=4.0, size=(1, 1, 5), replacements=None, decimals=10,
pop_last_layers=0):
"""
Wraps ase.lattice lattices generators to create a slab having given lattice vectors directions.
:param lattice: 'fcc' and 'bcc' are supported. Set the host lattice of a slab.
:param miller: a list of directions of lattice vectors
:param symbol: a string specifying the atom type
:param latticeconstant: the lattice constant of a structure
:param size: a 3-element tuple that sets supercell size. For instance, use (1,1,5) to set
5 layers of a slab.
:param decimals: sets the rounding of atom positions. See numpy.around.
:param pop_last_layers: specifies how many bottom layers to remove. Sometimes one does not want
to use the integer number of unit cells along z, extra layers can be
removed.
:return structure: an ase-lattice representing a slab with replaced atoms
"""
if miller is None:
miller = [[1, 0, 0],
[0, 1, 0],
[0, 0, 1]]
if lattice == 'fcc':
structure_factory = FaceCenteredCubic
elif lattice == 'bcc':
structure_factory = BodyCenteredCubic
else:
raise ValueError(
'The given lattice {} is not supported'.format(lattice))
structure = structure_factory(miller=miller, symbol=host_symbol, pbc=(1, 1, 0),
latticeconstant=latticeconstant, size=size)
* _, layer_occupancies = get_layer_by_number(structure, 0)
if replacements is not None:
keys = six.viewkeys(replacements)
if max((abs(int(x)) for x in keys)) >= len(layer_occupancies):
raise ValueError('"replacements" has to contain numbers less than number of layers')
else:
replacements = {}
layer_occupancies.append(0) # technical append
atoms_to_pop = np.cumsum(np.array(layer_occupancies[-1::-1]))
for i in range(atoms_to_pop[pop_last_layers]):
structure.pop()
current_symbols = structure.get_chemical_symbols()
for i, at_type in six.iteritems(replacements):
if isinstance(i, str):
i = int(i)
layer, layer_z, layer_occupancies = get_layer_by_number(structure, i)
layer_occupancies.insert(0, 0)
if i < 0:
i = i - 1
atoms_to_skip = np.cumsum(np.array(layer_occupancies))[i]
for k in range(layer_occupancies[i + 1]):
current_symbols[k+atoms_to_skip] = at_type
structure.set_chemical_symbols(current_symbols)
structure.positions = np.around(structure.positions, decimals=decimals)
return structure | 47447a6f34b48865ab0bc4824f05c98e976b92be | 24,332 |
def telephone():
"""Generates random 10 digit phone numbers and returns them as a dictionary entry"""
num = ""
#
for i in range(1, 11):
num += str(rand.randint(0, 9))
if(i < 7 and i % 3 == 0):
num += "-"
return {"telephone":num} | 436c6a04fbdff8162de39433ddd250a610333173 | 24,333 |
from datetime import datetime
from typing import List
import warnings
import time
def get_kline(symbol: str, end_date: [datetime, str], freq: str,
start_date: [datetime, str] = None, count=None, fq: bool = False) -> List[RawBar]:
"""获取K线数据
:param symbol: 币安期货的交易对 BTCUSDT/ETHUSDT
:param start_date: 开始日期
:param end_date: 截止日期
:param freq: K线级别,可选值 ['1min', '5min', '30min', '60min', 'D', 'W', 'M']
:param count: K线数量,最大值为 5000
:param fq: 是否进行复权
:return: pd.DataFrame
>>> start_date = datetime.strptime("20200101", "%Y%m%d")
>>> end_date = datetime.strptime("20210701", "%Y%m%d")
>>> df1 = get_kline(symbol="BTCUSDT", start_date=start_date, end_date=end_date, freq="1min")
>>> df2 = get_kline(symbol="000001.XSHG", end_date=end_date, freq="1min", count=1000)
>>> df3 = get_kline(symbol="000001.XSHG", start_date='20200701', end_date='20200719', freq="1min", fq=True)
>>> df4 = get_kline(symbol="000001.XSHG", end_date='20200719', freq="1min", count=1000)
"""
# 从币安获取k线数据
if count and count > 1300:
warnings.warn(f"count={count}, 超过5000的最大值限制,仅返回最后5000条记录")
end_date = datetime.now()
result = []
if start_date:
start_date = pd.to_datetime(start_date)
while len(result) == 0:
try:
result = request_client.get_candlestick_data(symbol=symbol,
interval=freq_convert[freq],
startTime=start_date.timestamp() * 1000,
endTime=end_date.timestamp() * 1000)
except:
print("重连了")
time.sleep(2)
elif count:
while len(result) == 0:
try:
result = request_client.get_candlestick_data(symbol=symbol,
interval=freq_convert[freq],
endTime=end_date.timestamp() * 1000,
limit=count)
except:
print("重连了")
time.sleep(2)
else:
raise ValueError("start_date 和 count 不能同时为空")
bars = []
for kline in result:
bars.append(RawBar(symbol=symbol, dt=datetime.fromtimestamp(kline.openTime / 1000),
open=round(float(kline.open), 2),
close=round(float(kline.close), 2),
high=round(float(kline.high), 2),
low=round(float(kline.low), 2),
vol=int(float(kline.volume))))
return bars | 5f6d9cdd82adf1a79dc9ed054de139170249ac13 | 24,334 |
def get_terms(properties, out_log, classname):
""" Gets energy terms """
terms = properties.get('terms', dict())
if not terms or not isinstance(terms, list):
fu.log(classname + ': No terms provided or incorrect format, exiting', out_log)
raise SystemExit(classname + ': No terms provided or incorrect format')
if not is_valid_term(terms):
fu.log(classname + ': Incorrect terms provided, exiting', out_log)
raise SystemExit(classname + ': Incorrect terms provided')
return properties.get('terms', '') | b25c596fd65a68c4c3f7b99268ddbf39675ad592 | 24,335 |
from pathlib import Path
import os
def get_prefix(allow_base=False):
"""Get $CONDA_PREFIX as pathlib.Path object."""
confirm_active()
prefix = Path(os.environ.get("CONDA_PREFIX"))
if not allow_base and is_base_env(prefix):
raise ImportError(
"Base conda env detected, activate an environment before running this command..."
)
return prefix | 8c49675b96dbe0f2e51f980d3a89d2215bf10dab | 24,336 |
import struct
def _decomp_MAMFile(srcfile, destfile=''):
""" Superfetch file이나 Prefetch file의 MAM 포맷의 압축을 푼다. """
f = open(srcfile, 'rb')
data = f.read()
f.close()
# 압축된 파일인지 확인한다.
"""
MAX\x84 : Windows 8 이상 수퍼패치 파일
MAX\x04 : Windows 10 프리패치 파일
"""
id = data[0:3].decode('utf8') # MAM
b1 = ord(data[3:4]) # b'\x84' , b'\x04'
if (id != 'MAM') or (not b1 in [0x84, 0x04]):
print('[Error] Unknown format.')
exit()
decomp_size = struct.unpack('<i', data[4:8])[0] # 압축 풀었을때 데이터 크기 (decomp_size)
compdata_stpos = 8 # Signature + Total uncompressed data size
if b1 == 0x84: # SuperFetch 포맷이면...
compdata_stpos += 4 # Unknown (checksum?)
data = data[compdata_stpos:] # 압축된 데이터 (data)
dest_data = bytearray(decomp_size) # 압축 푼 데이터 출력 공간을 확보한다.
dest_data = comp.XpressHuffman['OpenSrc'].Decompress(data, dest_data)
if destfile == '':
return dest_data
else:
o = open(destfile, 'wb')
o.write(dest_data)
o.close()
return True | fd2687854a2918f5692d619b83bd8ca73d6c87aa | 24,337 |
def cg(A, b, x=None, tol=1e-10, verbose=0, f=10, max_steps=None):
"""
Parameters
----------
A: A matrix, or a function capable of carrying out matrix-vector products.
"""
n = b.size
b = b.reshape(n)
if x is None:
x = np.zeros(n)
else:
x = x.reshape(n)
if isinstance(A, np.ndarray):
A = MatrixVectorProduct(A)
max_steps = max_steps or n
alpha = None
r = b - A(x)
d = r.copy()
A_dot_d = A(d)
r_dot_r = r.dot(r)
for i in range(min(n, max_steps)):
if i != 0:
if f > 0 and i % f == 0:
r = b - A(x)
else:
r -= alpha * A_dot_d
old_r_dot_r = r_dot_r
r_dot_r = r.dot(r)
beta = r_dot_r / old_r_dot_r
d = r + beta * d
A_dot_d = A(d)
if verbose:
print("Step {}".format(i))
print("Drift: {}.".format(np.linalg.norm(r - b + A(x))))
print("R norm: {}.".format(np.linalg.norm(r)))
d_energy_norm = d.dot(A_dot_d)
if d_energy_norm < tol:
break
alpha = r_dot_r / d_energy_norm
x += alpha * d
if verbose:
r = b - A(x)
print("Final residual norm: {}.".format(np.linalg.norm(r)))
return x | 8d2b6e332eee6ce21296a9f66621b1629cd56c33 | 24,338 |
def from_string(spec):
"""Construct a Device from a string.
Args:
spec: a string of the form
/job:<name>/replica:<id>/task:<id>/device:CPU:<id>
or
/job:<name>/replica:<id>/task:<id>/device:GPU:<id>
as cpu and gpu are mutually exclusive.
All entries are optional.
Returns:
A Device.
"""
return Device().parse_from_string(spec) | c223ead53ee1677e5bbfd863aeaffb8aefc5e81f | 24,339 |
from typing import OrderedDict
def load_HDFS_data_timestamp_approach(input_path, time_delta_sec, timestamp_format, cached_workflow_path='data_df.csv', sep=',', encoding ='utf-8', cache_workflow=True):
"""
Downloads cached workflow data from csv file
Args:
input_path: path to cached workflow csv file
time_delta_sec: analyzed period of time in seconds
timestamp_format: timestamp format in logs
cached_workflow_path: path to cached workflow csv file
cache_workflow: cache workflow or not
Returns:
x_data: array of lists of event id's np.array(['E21', 'E22', ...], [...],...)
"""
print('====== Input data summary ======')
struct_log = pd.read_csv(input_path, sep=sep,encoding=encoding,header=0)
freq_val = str(time_delta_sec) + 'S'
struct_log['Timestamp'] = pd.to_datetime(struct_log['Timestamp'], format=timestamp_format, errors='ignore')
struct_log = struct_log.drop(['LineId', 'Pid'], axis=1)
struct_log.set_index('Timestamp', inplace=True)
struct_log = struct_log.groupby(pd.Grouper(freq=freq_val)).apply(lambda x:(x + ',').sum())
struct_log = pd.DataFrame(struct_log['EventId'])
# drop rows of NaT values in struct_log.index
struct_log = struct_log[pd.notnull(struct_log.index)]
data_dict = OrderedDict()
for idx, row in struct_log.iterrows():
group_id_list = str(idx)
if not group_id_list in data_dict:
data_dict[group_id_list] = None
data_dict[group_id_list] = list(filter(None, str(row['EventId']).split(',')))
data_df = pd.DataFrame(list(data_dict.items()), columns=['group_id', 'event_sequence'])
data_df['number_of_events'] = data_df['event_sequence'].apply(lambda x: len(x))
cols = ['group_id', 'number_of_events', 'event_sequence']
data_df = data_df[cols]
if cache_workflow:
data_df.to_csv(cached_workflow_path, index=False)
x_data = data_df['event_sequence'].values
print('Total: {} instances'.format(x_data.shape[0]))
return x_data | b3e7dff820a666ee0060dc3349e91eb990a5c9ab | 24,340 |
def pairwise_to_multiple(pwise, ref_seq, moltype, info=None):
"""
turns pairwise alignments to a reference into a multiple alignment
Parameters
----------
pwise
Series of pairwise alignments to ref_seq as
[(non-refseq name, aligned pair), ...]
ref_seq
The sequence common in all pairwise alignments
moltype
molecular type for the returned alignment
info
info object
Returns
-------
ArrayAlign
"""
if not hasattr(ref_seq, "name"):
raise TypeError(f"ref_seq must be a cogent3 sequence, not {type(ref_seq)}")
refseqs = [s for _, aln in pwise for s in aln.seqs if s.name == ref_seq.name]
ref_gaps = _gap_union(refseqs)
m = gap_coords_to_map(ref_gaps, len(ref_seq))
aligned = [Aligned(m, ref_seq)]
for other_name, aln in pwise:
curr_ref = aln.named_seqs[ref_seq.name]
curr_ref_gaps = dict(curr_ref.map.get_gap_coordinates())
other_seq = aln.named_seqs[other_name]
other_gaps = dict(other_seq.map.get_gap_coordinates())
diff_gaps = _combined_refseq_gaps(curr_ref_gaps, ref_gaps)
inject = _gaps_for_injection(other_gaps, diff_gaps, len(other_seq.data))
if inject:
m = gap_coords_to_map(inject, len(other_seq.data))
other_seq = Aligned(m, other_seq.data)
aligned.append(other_seq)
# default to ArrayAlign
return Alignment(aligned, moltype=moltype, info=info).to_type(
array_align=True, moltype=moltype
) | 36f8d63ba9a53aaa448bcf0c782f748edafc25fa | 24,341 |
def get_dashboard(title: str):
"""Get a dashboard by title"""
dashboards = sdk.search_dashboards(title=title)
if not dashboards:
print(f"dashboard {title} was not found")
return None
return dashboards[0] | 3738557ef1ef2dee35382df7da86cf373908974c | 24,342 |
import torch
def translate_tensor(tensor, input_size=32, nt=2):
"""
Data augmentation function to enforce periodic boundary conditions.
Inputs are arbitrarily translated in each dimension
"""
ndim = len(tensor[0,0, :].shape)
t = input_size//nt
t_vec = np.linspace(0, (nt-1)*t, nt).astype(int)
for i in range(len(tensor)):
if ndim == 2:
tensor1 = torch.roll(tensor[i,0, :], (np.random.choice(t_vec),
np.random.choice(t_vec)),
(0, 1)) # translate by random no. of units (0-input_size) in each axis
elif ndim == 3:
tensor1 = torch.roll(tensor[i,0, :], (
np.random.choice(input_size), np.random.choice(input_size), np.random.choice(input_size)), (0, 1, 2))
else:
raise
if i == 0:
newtensor = tensor1.unsqueeze(0).unsqueeze(0) # add back channel dim and batch dim
else:
newtensor = torch.cat((newtensor,tensor1.unsqueeze(0).unsqueeze(0)),dim=0)
return newtensor | 12280e33331adb6924b36eebc65c85a10f937d58 | 24,343 |
def _get_job_resources(args):
"""Extract job-global resources requirements from input args.
Args:
args: parsed command-line arguments
Returns:
Resources object containing the requested resources for the job
"""
logging = param_util.build_logging_param(
args.logging) if args.logging else None
timeout = param_util.timeout_in_seconds(args.timeout)
log_interval = param_util.log_interval_in_seconds(args.log_interval)
return job_model.Resources(
min_cores=args.min_cores,
min_ram=args.min_ram,
machine_type=args.machine_type,
disk_size=args.disk_size,
disk_type=args.disk_type,
boot_disk_size=args.boot_disk_size,
image=args.image,
regions=args.regions,
zones=args.zones,
logging=logging,
logging_path=None,
service_account=args.service_account,
scopes=args.scopes,
cpu_platform=args.cpu_platform,
network=args.network,
subnetwork=args.subnetwork,
use_private_address=args.use_private_address,
accelerator_type=args.accelerator_type,
accelerator_count=args.accelerator_count,
nvidia_driver_version=None,
timeout=timeout,
log_interval=log_interval,
ssh=args.ssh,
enable_stackdriver_monitoring=args.enable_stackdriver_monitoring,
max_retries=args.retries,
max_preemptible_attempts=args.preemptible,
block_external_network=args.block_external_network) | fbbf596c721369890a14581863d4dce0fb24eb42 | 24,344 |
import os
import requests
def am_api_post_json(api_path, data):
"""
POST json to the Archivematica API
:param api_path: URL path to request (without hostname, e.g. /api/v2/location/)
:param data: Dict of data to post
:returns: dict of json data returned by request
"""
am_url = os.environ["ARCHIVEMATICA_URL"]
am_user = os.environ["ARCHIVEMATICA_USERNAME"]
am_api_key = os.environ["ARCHIVEMATICA_API_KEY"]
am_headers = {"Authorization": f"ApiKey {am_user}:{am_api_key}"}
url = f"{am_url}{api_path}"
print(f"URL: {url}; Data: {data}")
response = requests.post(url, json=data, headers=am_headers)
print(f"Response: {response}")
response_json = response.json()
print(f"Response JSON: {response_json}")
return response_json | 94428a059e322246c35d38e690667f52bf842663 | 24,345 |
import os
def collect_checkpoint_paths(checkpoint_dir):
"""
Generates a list of paths to each checkpoint file found in a folder.
Note:
- This function assumes, that checkpoint paths were written in relative.
Arguments:
checkpoint_dir (string):
Path to the models checkpoint directory from which to collect checkpoints.
Returns:
paths (:obj:`list` of :obj:`string`):
List of paths to each checkpoint file.
"""
listing_file = os.path.join(checkpoint_dir, 'checkpoint')
lines = []
# Collect all lines from the checkpoint listing file.
for line in open(listing_file, 'r'):
line = line.strip()
lines.append(line)
# Discard the first line since it only points to the latest checkpoint.
lines = lines[1:]
# Extract the checkpoints path and global step from each line.
# NOTE: This functions assumes, that all checkpoint paths are relative.
# all_model_checkpoint_paths: "model.ckpt-<global-step>"
# Remove "all_model_checkpoint_paths: " from each line.
lines = [line.replace('all_model_checkpoint_paths: ', '') for line in lines]
# Remove surrounding quotation marks (" .. ") from each line.
lines = [line.replace('"', '') for line in lines]
# Extract the global step from each line.
# steps = [int(line.split('-', 1)[-1]) for line in lines]
# Build absolute paths to each checkpoint file.
paths = [os.path.join(checkpoint_dir, line) for line in lines]
return paths | 8c477535a77dc989b31b30d3a4487c7219efbfb3 | 24,346 |
import torch
def cosine_distance(input1, input2):
"""Computes cosine distance.
Args:
input1 (torch.Tensor): 2-D feature matrix.
input2 (torch.Tensor): 2-D feature matrix.
Returns:
torch.Tensor: distance matrix.
"""
input1_normed = F.normalize(input1, p=2, dim=1)
input2_normed = F.normalize(input2, p=2, dim=1)
distmat = 1 - torch.mm(input1_normed, input2_normed.t())
return distmat | e4aed2f8f0439797312977d674ccc0351a90402b | 24,347 |
def pad_sequences(sequences, maxlen=None, dtype='int32', padding='post',
truncating='post', value=0.):
""" pad_sequences.
Pad each sequence to the same length: the length of the longest sequence.
If maxlen is provided, any sequence longer than maxlen is truncated to
maxlen. Truncation happens off either the beginning or the end (default)
of the sequence. Supports pre-padding and post-padding (default).
Arguments:
sequences: list of lists where each element is a sequence.
maxlen: int, maximum length.
dtype: type to cast the resulting sequence.
padding: 'pre' or 'post', pad either before or after each sequence.
truncating: 'pre' or 'post', remove values from sequences larger than
maxlen either in the beginning or in the end of the sequence
value: float, value to pad the sequences to the desired value.
Returns:
x: `numpy array` with dimensions (number_of_sequences, maxlen)
Credits: From Keras `pad_sequences` function.
"""
lengths = [len(s) for s in sequences]
nb_samples = len(sequences)
if maxlen is None:
maxlen = np.max(lengths)
x = (np.ones((nb_samples, maxlen)) * value).astype(dtype)
for idx, s in enumerate(sequences):
if len(s) == 0:
continue # empty list was found
if truncating == 'pre':
trunc = s[-maxlen:]
elif truncating == 'post':
trunc = s[:maxlen]
else:
raise ValueError("Truncating type '%s' not understood" % padding)
if padding == 'post':
x[idx, :len(trunc)] = trunc
elif padding == 'pre':
x[idx, -len(trunc):] = trunc
else:
raise ValueError("Padding type '%s' not understood" % padding)
return x | f69c199861e17575185d0c40371f85b1fa5f2458 | 24,348 |
def get_regions(service_name, region_cls=None, connection_cls=None):
"""
Given a service name (like ``ec2``), returns a list of ``RegionInfo``
objects for that service.
This leverages the ``endpoints.json`` file (+ optional user overrides) to
configure/construct all the objects.
:param service_name: The name of the service to construct the ``RegionInfo``
objects for. Ex: ``ec2``, ``s3``, ``sns``, etc.
:type service_name: string
:param region_cls: (Optional) The class to use when constructing. By
default, this is ``RegionInfo``.
:type region_cls: class
:param connection_cls: (Optional) The connection class for the
``RegionInfo`` object. Providing this allows the ``connect`` method on
the ``RegionInfo`` to work. Default is ``None`` (no connection).
:type connection_cls: class
:returns: A list of configured ``RegionInfo`` objects
:rtype: list
"""
endpoints = load_regions()
if service_name not in endpoints:
raise BotoClientError(
"Service '%s' not found in endpoints." % service_name
)
if region_cls is None:
region_cls = RegionInfo
region_objs = []
for region_name, endpoint in endpoints.get(service_name, {}).items():
region_objs.append(
region_cls(
name=region_name,
endpoint=endpoint,
connection_cls=connection_cls
)
)
return region_objs | b63982d14c415d082c729595c85fee0833e75d8f | 24,349 |
import math
def sol_dec(day_of_year):
"""
Calculate solar declination from day of the year.
Based on FAO equation 24 in Allen et al (1998).
:param day_of_year: Day of year integer between 1 and 365 or 366).
:return: solar declination [radians]
:rtype: float
"""
_check_doy(day_of_year)
return 0.409 * math.sin(((2.0 * math.pi / 365.0) * day_of_year - 1.39)) | 20c0c491a9ad99a324754c8bc2f6a32e6c6b1f51 | 24,350 |
import unittest
def makeTestSuiteV201111():
"""Set up test suite using v201111.
Returns:
TestSuite test suite using v201111.
"""
suite = unittest.TestSuite()
suite.addTests(unittest.makeSuite(NetworkServiceTestV201111))
return suite | 39a7ecb94bce33e3edbbc52ae584e4fa36c95c42 | 24,351 |
def build_series(df):
"""
Return a series tuple where:
the first element is a list of dates,
the second element is the series of the daily-type variables,
the third element is the series of the current-type variables,
the fourth element is the series of the cum-type variables.
:param df: pd.DataFrame
:return: tuple
"""
dates = df[DATE_KEY].apply(lambda x: x.strftime(CHART_DATE_FMT)).tolist()
series_daily = sorted([
{
"id": col,
"name": VARS[col]["title"],
"data": df[col].tolist()
}
for col in DAILY_QUANTITIES
], key=lambda x: max(x[DATE_KEY]), reverse=True)
series_cum = sorted([
{
"id": col,
"name": VARS[col]["title"],
"data": df[col].tolist()
}
for col in CUM_QUANTITIES
], key=lambda x: max(x[DATE_KEY]), reverse=True)
series_current = sorted([
{
"id": col,
"name": VARS[col]["title"],
"data": df[col].tolist()
}
for col in NON_CUM_QUANTITIES
], key=lambda x: max(x[DATE_KEY]), reverse=True)
series = (dates, series_daily, series_current, series_cum)
return series | d5ca0f87c46a6061544481ca01a88ad29def4c7e | 24,352 |
def lnZ(df_mcmc):
"""
Compute log Z(1) from PTMCMC traces stored in DataFrame.
Parameters
----------
df_mcmc : pandas DataFrame, as outputted from run_ptmcmc.
DataFrame containing output of a parallel tempering MCMC
run. Only need to contain columns pertinent to computing
ln Z, which are 'beta_int', 'lnlike', and 'beta'.
Returns
-------
output : float
ln Z as computed by thermodynamic integration. This is
equivalent to what is obtained by calling
`sampler.thermodynamic_integration_log_evidence(fburnin=0)`
where `sampler` is an emcee.PTSampler instance.
Notes
-----
.. This is useful when the DataFrame from a PTSampler is too
large to store in RAM.
"""
# Average the log likelihood over the samples
log_mean = np.zeros(len(df_mcmc['beta_ind'].unique()))
for i, b in enumerate(df_mcmc['beta_ind'].unique()):
log_mean[i] = df_mcmc['lnlike'][df_mcmc['beta_ind']==b].mean()
# Set of betas (temperatures)
betas = np.concatenate((np.array(df_mcmc['beta'].unique()), (0,)))
# Approximate quadrature
return np.dot(log_mean, -np.diff(betas)) | 621d6db5a9126608d61688bd79ae09e03d25d114 | 24,353 |
def p2h(p, T=293., P0=1000., m=28.966, unit_p='mbar'):
""" Returns an elevation from barometric pressure
Parameters
----------
p: {float, array}
barometric pressure in mbar or torr specified with unit_p
T: float, optional
Temperature in K
P0: float, optional
Pressure at reference altitude in hPa (default = 1000.)
m: float, optional
average mass of gas molecules in u (default = 28.966)
unit_p: {[mbar], torr}, optional
Source
------
http://en.wikipedia.org/wiki/Barometric_formula
"""
if unit_p == 'torr':
p = unit_conversion.torr2mbar(p)
k = const.physical_constants['Boltzmann constant'][0]
g = const.physical_constants['standard acceleration of gravity'][0]
m *= 1 / const.physical_constants['Avogadro constant'][0] / 1000.
h = (np.log(P0) - np.log(p)) * ((k * T) / (m * g))
return h | 646eac27f7723116e9ea5cd9dcef226cc6c45cc5 | 24,354 |
from typing import Callable
def upon_teardown(f: Callable):
"""
Use this decorator to mark you ploogin function as a handler to call upon teardown.
"""
return PlooginEventHandler(event=PlooginEvents.TEARDOWN, f=f) | 289db179d427c9ebccd1ff408e4606d4f6e97bd5 | 24,355 |
def get_train_test_indices_drone(df, frac, seed=None):
""" Split indices of a DataFrame with binary and balanced labels into balanced subindices
Args:
df (pd.DataFrame): {0,1}-labeled data
frac (float): fraction of indicies in first subset
random_seed (int): random seed used as random state in np.random and as argument for random.seed()
Returns:
train_indices (torch.tensor): balanced subset of indices corresponding to rows in the DataFrame
test_indices (torch.tensor): balanced subset of indices corresponding to rows in the DataFrame
"""
split_idx = int(len(df) * frac / 2)
df_with = df[df['label'] == 1]
df_without = df[df['label'] == 0]
np.random.seed(seed)
df_with_train = df_with.sample(n=split_idx, random_state=seed)
df_with_test = df_with.drop(df_with_train.index)
df_without_train = df_without.sample(n=split_idx, random_state=seed)
df_without_test = df_without.drop(df_without_train.index)
train_indices = list(df_without_train.index) + list(df_with_train.index)
test_indices = list(df_without_test.index) + list(df_with_test.index)
""""
print('fraction of 1-label in train set: {}'.format(len(df_with_train)/(len(df_with_train) + len(df_without_train))))
print('fraction of 1-label in test set: {}'.format(len(df_with_test)/(len(df_with_test) + len(df_with_test))))
"""
return train_indices, test_indices | f27f893f8cfcc48718d0ca5166e2eafdb57bbad2 | 24,356 |
import requests
from bs4 import BeautifulSoup
import re
def get_subs(choice, chatid, obj):
"""Return subtitle download links."""
url = "https://yts-subs.com" + obj.get_url(chatid, int(choice))
try:
reponse = requests.get(url, headers=headers)
except Exception as e:
print(e)
raise Exception("Invalid url")
soup = BeautifulSoup(reponse.content, 'html5lib')
table = soup.find('tbody')
results = table.findAll('tr')
href = []
message = []
for i, result in enumerate(results):
link = result.find('a')['href']
link = link.replace('subtitles', 'subtitle')
language = result.findAll('td', {'class': 'flag-cell'})[0].text.strip()
title = result.find('a').text.strip()
title = re.findall("subtitle (.*)", title)[0]
title = re.sub(r'(\[.*\])', '', title)
title = f"{language}: {title}"
link = f"https://yifysubtitles.org{link}.zip"
href.append(link)
message.append(title)
if(i == 55):
break
return href, message | 60b19f1004771546cfe530be55fa793d24500df0 | 24,357 |
def get_shape(rhoa_range):
"""
Find anomaly `shape` from apparent resistivity values framed to
the best points.
:param rhoa_range: The apparent resistivity from selected anomaly bounds
:attr:`~core.erp.ERP.anom_boundaries`
:type rhoa_range: array_like or list
:returns:
- V
- W
- K
- C
- M
- U
:Example:
>>> from watex.core.erp import get_shape
>>> x = [60, 70, 65, 40, 30, 31, 34, 40, 38, 50, 61, 90]
>>> shape = get_shape (rhoa_range= np.array(x))
...U
"""
shape ='V'
try:
minlocals_ix, = argrelextrema(rhoa_range, np.less)
except :
minlocals_ix = argrelextrema(rhoa_range, np.less)
try :
maxlocals_ix, = argrelextrema(rhoa_range, np.greater)
except : maxlocals_ix = argrelextrema(rhoa_range, np.greater)
value_of_median = np.median(rhoa_range)
coef_UH = 1.2
c_=[rhoa_range[0] , rhoa_range[-1] ]
if len(minlocals_ix)==0 :
if len(maxlocals_ix)==0 and\
(max(c_) and min(c_)) > value_of_median :
return 'U'
return 'C'
if len(minlocals_ix) ==1 :
if max(c_) > np.median(rhoa_range) and min(c_) < value_of_median/2:
return 'C'
elif rhoa_range[minlocals_ix] > value_of_median or \
rhoa_range[minlocals_ix] > max(c_):
return 'M'
if len(minlocals_ix)>1 :
if (max(c_) or min(c_))> value_of_median :
shape ='W'
if max(c_) > value_of_median and\
min(c_) > value_of_median:
if rhoa_range[maxlocals_ix].mean()> value_of_median :
if coef_UH * rhoa_range[minlocals_ix].mean():
shape ='H'
coef_UH = 1.
if rhoa_range[minlocals_ix].mean() <= coef_UH * \
rhoa_range[maxlocals_ix].mean():
shape = 'U'
else : shape ='K'
elif (rhoa_range[0] and rhoa_range[-1]) < np.median(rhoa_range):
shape = 'M'
return shape
return shape | 7de41fb432c733f434853e74e873ecac1542c877 | 24,358 |
def elslib_D2(*args):
"""
* For elementary surfaces from the gp package (cones, cylinders, spheres and tori), computes: - the point P of parameters (U, V), and - the first derivative vectors Vu and Vv at this point in the u and v parametric directions respectively, and - the second derivative vectors Vuu, Vvv and Vuv at this point.
:param U:
:type U: float
:param V:
:type V: float
:param C:
:type C: gp_Cone
:param P:
:type P: gp_Pnt
:param Vu:
:type Vu: gp_Vec
:param Vv:
:type Vv: gp_Vec
:param Vuu:
:type Vuu: gp_Vec
:param Vvv:
:type Vvv: gp_Vec
:param Vuv:
:type Vuv: gp_Vec
:rtype: void
:param U:
:type U: float
:param V:
:type V: float
:param C:
:type C: gp_Cylinder
:param P:
:type P: gp_Pnt
:param Vu:
:type Vu: gp_Vec
:param Vv:
:type Vv: gp_Vec
:param Vuu:
:type Vuu: gp_Vec
:param Vvv:
:type Vvv: gp_Vec
:param Vuv:
:type Vuv: gp_Vec
:rtype: void
:param U:
:type U: float
:param V:
:type V: float
:param S:
:type S: gp_Sphere
:param P:
:type P: gp_Pnt
:param Vu:
:type Vu: gp_Vec
:param Vv:
:type Vv: gp_Vec
:param Vuu:
:type Vuu: gp_Vec
:param Vvv:
:type Vvv: gp_Vec
:param Vuv:
:type Vuv: gp_Vec
:rtype: void
:param U:
:type U: float
:param V:
:type V: float
:param T:
:type T: gp_Torus
:param P:
:type P: gp_Pnt
:param Vu:
:type Vu: gp_Vec
:param Vv:
:type Vv: gp_Vec
:param Vuu:
:type Vuu: gp_Vec
:param Vvv:
:type Vvv: gp_Vec
:param Vuv:
:type Vuv: gp_Vec
:rtype: void
"""
return _ElSLib.elslib_D2(*args) | c99d089ba0aa95ed1134be53a491f690feb03edd | 24,359 |
def readiness():
"""Handle GET requests that are sent to /api/v1/readiness REST API endpoint."""
return flask.jsonify({}), 200 | 7c1edf3b965ad1f2b7356d634135a75846886b21 | 24,360 |
def camino_minimo(origen,dest,grafo,aeropuertos_por_ciudad,pesado=True):
"""Obtiene el camino minimo de un vertice a otro del grafo"""
camino=[]
costo=float("inf")
for aeropuerto_i in aeropuertos_por_ciudad[origen]:
for aeropuerto_j in aeropuertos_por_ciudad[dest]:
if pesado:
distancia, predecesores= utils.dijkstra(grafo,aeropuerto_i,aeropuerto_j)
else:
predecesores, distancia= utils.bfs(grafo,aeropuerto_i,aeropuerto_j)
if distancia[aeropuerto_j]< costo:
costo=distancia[aeropuerto_j]
camino.clear()
utils.armar_camino(distancia,predecesores,camino,aeropuerto_i,aeropuerto_j)
distancia.clear()
predecesores.clear()
return costo,camino | a0ef06265ce754fa1a4289761139e86e241f14fc | 24,361 |
def extract_name_from_uri_or_curie(item, schema=None):
"""Extract name from uri or curie
:arg str item: an URI or curie
:arg dict schema: a JSON-LD representation of schema
"""
# if schema is provided, look into the schema for the label
if schema:
name = [record["rdfs:label"] for record in schema["@graph"] if record['@id'] == item]
if name:
return name[0]
else:
return extract_name_from_uri_or_curie(item)
# handle curie, get the last element after ":"
elif 'http' not in item and len(item.split(":")) == 2:
return item.split(":")[-1]
# handle URI, get the last element after "/"
elif len(item.split("//")[-1].split('/')) > 1:
return item.split("//")[-1].split('/')[-1]
# otherwise, rsise ValueError
else:
raise ValueError('{} should be converted to either URI or curie'.format(item)) | 08125457496c9d563f96a4f2a54a560c56c01af8 | 24,362 |
import os
def get_gcc_timeseries(site, roilist_id, nday=3):
"""
Read in CSV version of summary timeseries and return
GCCTimeSeries object.
"""
# set cannonical dir for ROI Lists
roidir = os.path.join(config.archive_dir, site, "ROI")
# set cannonical filename
gcc_tsfile = site + "_" + roilist_id + "_{0}day.csv".format(nday)
gcc_tspath = os.path.join(roidir, gcc_tsfile)
# create empty GCCTimeSeries object
gccts = GCCTimeSeries(site=site, ROIListID=roilist_id)
# read in from CSV file
gccts.readCSV(gcc_tspath)
return gccts | b800ffcbb90a6f3392c76ff388facae044d6265f | 24,363 |
import tqdm
def download_graph(coordinates, distances):
"""
Criação do grafo de ruas do OSM a partir das coordenadas solicitadas
"""
max_distance = max(distances)
G = False
print('Fetching street network')
for coordinate in tqdm(coordinates, desc='Downloading'):
if G: # "soma" (merge) com grafo já existente (deepcopy utilizado para não perder grafo entre iterações)
G = nx.compose(deepcopy(G), ox.graph_from_point(coordinate,
distance=max_distance+100,
network_type='walk'))
else: # inicializa grafo a partir de todos pontos
G = ox.graph_from_point(coordinate, distance=max_distance+100,
network_type='walk')
return G | bf7dd26d18f798982e77aa7dfd8fc905cb97ea66 | 24,364 |
def external_forces(cod_obj):
"""actual cone position"""
x_pos,y_pos,z_pos =cod_obj.pos_list[-1]
"""
Drift vector components
Drift signal//all directions
"""
divx = forcep['divxp']([x_pos,y_pos,z_pos])[0]
divy = forcep['divyp']([x_pos,y_pos,z_pos])[0]
divz = forcep['divzp']([x_pos,y_pos,z_pos])[0]
"""
Structure tensor components
Diffusion metric // Diffusive tensor components
"""
divxx = forcep['stpxx']([x_pos,y_pos,z_pos])[0]
divxy = forcep['stpxy']([x_pos,y_pos,z_pos])[0]
divxz = forcep['stpxz']([x_pos,y_pos,z_pos])[0]
divyy = forcep['stpyy']([x_pos,y_pos,z_pos])[0]
divyz = forcep['stpyz']([x_pos,y_pos,z_pos])[0]
divzz = forcep['stpzz']([x_pos,y_pos,z_pos])[0]
return [divx, divy, divz], [divxx, divxy, divxz, divyy, divyz, divzz] | 2dbd13b3e09b0eb7e09b10c0607a9e0e7a87c11a | 24,365 |
def charis_font_spec_css():
"""Font spec for using CharisSIL with Pisa (xhtml2pdf)."""
return """
@font-face {{
font-family: 'charissil';
src: url('{0}/CharisSIL-R.ttf');
}}
@font-face {{
font-family: 'charissil';
font-style: italic;
src: url('{0}/CharisSIL-I.ttf');
}}
@font-face {{
font-family: 'charissil';
font-weight: bold;
src: url('{0}/CharisSIL-B.ttf');
}}
@font-face {{
font-family: 'charissil';
font-weight: bold;
font-style: italic;
src: url('{0}/CharisSIL-BI.ttf');
}}
""".format(static_path('fonts')) | a812b65da61d333031dac878ebfdf9e4afe4b448 | 24,366 |
def set_symbols(pcontracts,
dt_start="1980-1-1",
dt_end="2100-1-1",
n=None,
spec_date={}): # 'symbol':[,]
"""
Args:
pcontracts (list): list of pcontracts(string)
dt_start (datetime/str): start time of all pcontracts
dt_end (datetime/str): end time of all pcontracts
n (int): last n bars
spec_date (dict): time range for specific pcontracts
"""
global _simulator
_simulator = ExecuteUnit(pcontracts, dt_start, dt_end, n, spec_date)
return _simulator | 9450e8355fa88d6794d58de7311992bb4bab357f | 24,367 |
def estimate_vol_gBM(data1, data2, time_incr=0.1):
""" Estimate vol and correlation of two geometric Brownian motion samples with time samples on a grid with mesh size time_incr using estimate_vol_2d_rv_incr, the drift parameter and mean rev paramters are set to 0.
----------
args:
data1 data array for X1
data2 data array for X2
time_incr time increment
log=True if True, then estimation based on log of data1 and data2, else in plain format.
output:
[0, 0, sigma_1], [0,0, sigma_2], rho format to be used direclty in a LOBLinear model object
"""
sigma_bid, sigma_ask, rho = estimate_vol_2d_rv_incr(data1, data2, time_incr, log=True)
return [float(0), float(0), sigma_bid], [float(0), float(0), sigma_ask], rho | 4ecc5fb7f97c41db2f9b7347db43bda70d7b6c14 | 24,368 |
import logging
import sys
def check_quarantine(av_quarentine_file):
"""Check if the quarantine is over."""
in_quarantine = True
try:
with open(av_quarentine_file, 'r', encoding="utf-8") as ff_av:
text = ff_av.readline()
quar_str, av_run_str = text.split(':')
quarantine = int(quar_str)
av_run = DT.date.fromisoformat(av_run_str.strip())
av_quar_end = av_run + DT.timedelta(days=quarantine)
av_today = DT.date.today()
if av_today > av_quar_end:
# Quarantine is over.
in_quarantine = False
except Exception as ee:
logging.error(f"\nError {ee} while reading quarantine file {av_quarentine_file}.")
sys.exit(1)
return in_quarantine | 2f2b6951f694de3b66b7aaa47829a31b5f09c87a | 24,369 |
import requests
def get_coin_total(credentials_file: str, coin: str) -> float:
"""
Get the current total amount of your coin
Args:
credentials_file: A JSON file containing Coinbase Pro credentials
coin: The coin requested
Returns:
coin_total: The total amount of the coin you hold in your account
"""
# Instantiate Coinbase API and query the price
coin_total = 0
coinbase_creds = get_cbpro_creds_from_file(credentials_file)
coinbase_auth = CoinbaseProAuth(coinbase_creds[0], coinbase_creds[1], coinbase_creds[2])
api_query = "accounts"
result = requests.get(API_URL + api_query, auth=coinbase_auth).json()
for account in result:
if account['currency'] == coin:
coin_total = float(account['balance'])
return coin_total | cf71d211cf44e0b215af8ce219a12179c005f52a | 24,370 |
from datetime import datetime
def datetime_to_serial(dt):
"""
Converts the given datetime to the Excel serial format
"""
if dt.tzinfo:
raise ValueError("Doesn't support datetimes with timezones")
temp = datetime(1899, 12, 30)
delta = dt - temp
return delta.days + (float(delta.seconds) + float(delta.microseconds) / 1E6) / (60 * 60 * 24) | 3142bfc9d33ddf782c0a6485898e6ed6bcc00418 | 24,371 |
from copy import deepcopy
def compute_transitive_closure(graph):
"""Compute the transitive closure of a directed graph using Warshall's
algorithm.
:arg graph: A :class:`collections.abc.Mapping` representing a directed
graph. The dictionary contains one key representing each node in the
graph, and this key maps to a :class:`collections.abc.MutableSet` of
nodes that are connected to the node by outgoing edges. This graph may
contain cycles. This object must be picklable. Every graph node must
be included as a key in the graph.
:returns: The transitive closure of the graph, represented using the same
data type.
.. versionadded:: 2020.2
"""
# Warshall's algorithm
closure = deepcopy(graph)
# (assumes all graph nodes are included in keys)
for k in graph.keys():
for n1 in graph.keys():
for n2 in graph.keys():
if k in closure[n1] and n2 in closure[k]:
closure[n1].add(n2)
return closure | 62a7191759614f495f5297379544fa3cdf77fcfa | 24,372 |
def A_intermediate(f1, f2, f3, v1, v2, v3, d1, d3):
"""Solves system of equations for intermediate amplitude matching"""
Mat = np.array(
[
[1.0, f1, f1 ** 2, f1 ** 3, f1 ** 4],
[1.0, f2, f2 ** 2, f2 ** 3, f2 ** 4],
[1.0, f3, f3 ** 2, f3 ** 3, f3 ** 4],
[0.0, 1.0, 2 * f1, 3 * f1 ** 2, 4 * f1 ** 3],
[0.0, 1.0, 2 * f3, 3 * f3 ** 2, 4 * f3 ** 3],
],
dtype="float",
)
a = np.array([v1, v2, v3, d1, d3], dtype="float")
return np.linalg.solve(Mat, a) | 484c17d0a176a1e666f2ebe447d74f3c83845918 | 24,373 |
def _evaluate_criterion(criterion, params, criterion_kwargs):
"""Evaluate the criterion function for the first time.
The comparison_plot_data output is needed to initialize the database.
The criterion value is stored in the general options for the tao pounders algorithm.
Args:
criterion (callable): Python function that takes a pandas DataFrame with
parameters as the first argument and returns a value or array to be
minimized and data for the comparison plot.
params (pd.DataFrame): See :ref:`params`.
criterion_kwargs (dict): Additional keyword arguments for criterion.
Returns:
fitness_eval (float): The scalar criterion value.
comparison_plot_data (np.array or pd.DataFrame): Data for the comparison_plot.
"""
criterion_out, comparison_plot_data = criterion(params, **criterion_kwargs)
if np.any(np.isnan(criterion_out)):
raise ValueError(
"The criterion function evaluated at the start parameters returns NaNs."
)
elif np.isscalar(criterion_out):
fitness_eval = criterion_out
else:
fitness_eval = np.mean(np.square(criterion_out))
return fitness_eval, comparison_plot_data | bbb0b2cdb4fb4e12d18b6e34c1878a3eaa40059a | 24,374 |
def group_by(collection, callback=None):
"""Creates an object composed of keys generated from the results of running
each element of a `collection` through the callback.
Args:
collection (list|dict): Collection to iterate over.
callback (mixed, optional): Callback applied per iteration.
Returns:
dict: Results of grouping by `callback`.
Example:
>>> results = group_by([{'a': 1, 'b': 2}, {'a': 3, 'b': 4}], 'a')
>>> assert results == {1: [{'a': 1, 'b': 2}], 3: [{'a': 3, 'b': 4}]}
>>> results = group_by([{'a': 1, 'b': 2}, {'a': 3, 'b': 4}], {'a': 1})
>>> assert results == {False: [{'a': 3, 'b': 4}],\
True: [{'a': 1, 'b': 2}]}
.. versionadded:: 1.0.0
"""
ret = {}
cbk = pyd.iteratee(callback)
for value in collection:
key = cbk(value)
ret.setdefault(key, [])
ret[key].append(value)
return ret | 5ca9e3867a1e340da92c223b8ba60a2bdcf2bc0b | 24,375 |
def lemmatize_verbs(words):
"""lemmatize verbs in tokenized word list"""
lemmatizer = WordNetLemmatizer()
lemmas = []
for word in words:
lemma = lemmatizer.lemmatize(word, pos='v')
lemmas.append(lemma)
return lemmas | 6d37cc6c4f52b062872586f56cb4d459d3fa5cb0 | 24,376 |
def loadfromensembl(homology, kingdom='fungi', sequence='cdna',
additional='type=orthologues', saveonfiles=False, normalized=False,
setnans=False, number=0, by="entropy", using="normal", getCAI=None):
"""
Load from ensembl the datas required in parameters ( look at PyCUB.get_data for more information)
returns a fully populated homology object.
Args:
homology: str the homology code
additional: str additional information on the retrieved sequence
kingdom: str flags the relevant kingdom of you current session [fungi,plants,bacteria, animals]
sequence: str flags the type of sequence you consider the full genome is (coding or non coding or full) [cds, all, cda]
by: str flags what type of computation should be done [entropy,frequency, entropylocation]
normalized: bool to true if should we normalize the entorpy by length
saveonfiles: bool to true if the retrieved data should be saved on a file
setnans: bool to true if nans should be set to NaN instead of an avg value
using: the method to compute the partition function if using entropy location
getCAI: wether or not to compute CAI !! need to have called the corresponding function on
Pycub before hand !!
Returns:
a populated PyCUB.homology of the homology object by [names, taxons, full, lenmat, homocode, nans,
KaKs_Scores, similarity_scores, proteinids, GCcount, geneids, refs, ecai, refgene, refprot,
tot_volume, mean_hydrophobicity, glucose_cost, synthesis_steps, isoelectricpoint,cai,
conservation, uncounted]
OR None if the homology is empty
Raises:
ConnectionError: "tried 50 times but still not able to connect"
"""
server = "http://rest.ensemblgenomes.org"
print 'homology: ' + homology + ' : ' + str(number)
ext = "/homology/id/" + homology + '?'
if sequence is not None:
# dna cdna cds ncrna Protein EMBL GENBANK MySQL TSV GTF GFF3
ext += 'sequence=' + sequence
if kingdom is not None:
ext += ';compara=' + kingdom
if additional is not None:
ext += ';' + additional
try:
r = requests.get(server + ext, headers={"Content-Type": "application/json"})
except ConnectionError:
print "problem at " + homology
if number > 50:
raise IOError("tried 50 times but still not able to connect")
return loadfromensembl(homology, kingdom=kingdom, sequence=sequence,
additional=additional, saveonfiles=saveonfiles, normalized=normalized,
setnans=setnans, number=number + 1, by=by, using=using)
if not r.ok:
r.raise_for_status()
data = r.json()['data']
if not data:
return None
data = data[0]['homologies']
if not data:
return None
if saveonfiles:
with open('utils/data/' + homology + '.json', "wb") as code:
code.write(json.dump(data))
species, GCcount, lenmat, H, nans, similarities, KaKs_Scores, taxons, proteinids,\
geneid, ref, ecai, cai, refgene, refprot, vol, cost, hydrophob, synthcost, isoepoint, conservation, others = process(
data, normalized=normalized, setnans=setnans, by=by, getCAI=getCAI)
if by == 'entropyLocation':
H = getloc(H, np.array(lenmat), using=using)
# here we add two things into names but only as a temporary saving measures removed by the
# application fo preprocessing in homoset.
homo = h.homology(names=[species, taxons], full=H, lenmat=lenmat, homocode=homology,
nans=nans, KaKs_Scores=KaKs_Scores, similarity_scores=similarities,
proteinids=proteinids, GCcount=GCcount, geneids=geneid, ref=ref, ecai=ecai, cai=cai, refgene=refgene,
refprot=refprot, tot_volume=vol, mean_hydrophobicity=hydrophob, glucose_cost=cost,
synthesis_steps=synthcost, isoelectricpoint=isoepoint, conservation=conservation, othercods=others)
homo.order(withtaxons=True) # a first ordering of the data, usefull afterward in the preprocessing
return homo | b7c3adee4ba4b61c0828b830b5f53578da75211c | 24,377 |
def dereference(reference_buffer, groups):
"""
find a reference within a group
"""
if len(reference_buffer)>0:
ref_number = int(''.join(reference_buffer))-1
return groups[ref_number % len(groups)] +' '
return '' | c76234051e81a16f44690de46435e9856996d677 | 24,378 |
def get_main_corpora_info():
"""Create dict with the main corpora info saved in CORPORA_SOURCES
:return: Dictionary with the corpora info to be shown
:rtype: dict
"""
table = []
for corpus_info in CORPORA_SOURCES:
corpus_id = CORPORA_SOURCES.index(corpus_info) + 1
props = corpus_info["properties"]
corpus_name = pretty_string(
f"{corpus_info['name']} ({props['slug']})", 2
)
table.append({
"id": corpus_id,
"name": corpus_name,
"lang": props["language"],
"size": props["size"],
"docs": props["doc_quantity"],
"words": props["word_quantity"],
"granularity": pretty_string('\n'.join(props["granularity"]), 1),
"license": pretty_string(props["license"], 1),
})
return table | d0a642e98248eabdbaa018991774612f33caca8f | 24,379 |
def start_compare_analysis(api_token, project_id, kind, url, username, password, target_branch, target_revision):
"""
Get the project identifier from the GraphQL API
:param api_token: the access token to the GraphQL API
:param project_id: identifier of the project to use as source
:param kind: kind of the target repositiory (Github, Gitlab, Git)
:param url: URL of the target repository
:param username: username of the target repository
:param password: password of the target repository
:return: the project identifier or None is exception or non-existent project.
"""
try:
args = []
args.append("projectId: " + str(project_id))
args.append("targetKind: " + kind)
args.append("targetUrl: \"" + url + "\"")
if target_revision:
args.append("targetRevision: \"" + target_revision + "\"")
if target_branch:
args.append("targetBranch: \"" + target_branch + "\"")
args_string = ",".join(args)
query = """
mutation { createCompareAnalysis(""" + args_string + """){id}}
"""
response_json = do_graphql_query(api_token, {"query": query})
return response_json["createCompareAnalysis"]["id"]
except KeyError:
log.error("Error while starting new analysis")
return None | 10482ed334f5522894b271e9d69803b4c804cb09 | 24,380 |
def get_matrix_in_format(original_matrix, matrix_format):
"""Converts matrix to format
Parameters
----------
original_matrix : np.matrix or scipy matrix or np.array of np. arrays
matrix to convert
matrix_format : string
format
Returns
-------
matrix : scipy matrix
matrix in given format
"""
if isinstance(original_matrix, np.ndarray):
return SPARSE_FORMAT_TO_CONSTRUCTOR[matrix_format](original_matrix)
if original_matrix.getformat() == matrix_format:
return original_matrix
return original_matrix.asformat(matrix_format) | 837b47ccb4d0bf608907dd13ed1bedd0cb780058 | 24,381 |
def get_haystack_response(res, debug=False, chatbot='QA'):
"""
Function that filters null answers from the haystack response. NOTE: The necessity of this suggests that
Deepset's no_ans_boost default of 0 may not be functioning for FARMReader.
:param res:
:param chatbot: Type of chatbot to get response for
:return:
"""
answer = None
try:
answers = res['results'][0]['answers']
df = pd.json_normalize(answers)
df.dropna(inplace=True)
df.reset_index(inplace=True)
answer = df.iloc[df.score.idxmax()]
response = answer['answer']
probability = answer['probability']
# TODO remove once haystack defect logged
# response = res['results'][0]['answers'][0]['answer']
# probability = res['results'][0]['answers'][0]['probability']
except Exception as e:
if debug:
response = "So sorry, but there was an error extracting the response from the {} chatbot: '{}'. "\
.format(chatbot, e)
else:
response = NO_ANSWER_RESPONSE
probability = 1
try:
url = answer['meta']['link']
except Exception:
url = ''
return response, probability, url | abb74471c19cc8376cedda41fbf2badebcf0b385 | 24,382 |
def _convert_name(name, recurse=True, subs=None):
"""
From an absolute path returns the variable name and its owner component in a dict.
Names are also formatted.
Parameters
----------
name : str
Connection absolute path and name
recurse : bool
If False, treat the top level of each name as the source/target component.
subs: tuple or None
Character pairs with old and substitute characters
Returns
-------
dict(str, str)
"""
def convert(name):
sep = '.'
name = name.replace('@', sep)
name_items = name.split(sep)
if recurse:
if len(name_items) > 1:
comp = name_items[-2] # -1 is variable name, before that -2 is the component name
path = name.rsplit(sep, 1)[0]
else:
msg = ('The name "{}" cannot be processed. The separator character is "{}", '
'which does not occur in the name.')
raise ValueError(msg.format(name, sep))
else:
comp = name_items[0]
path = comp
var = name_items[-1]
var = _replace_chars(var, substitutes=subs)
return {'comp': comp, 'var': var,
'abs_name': _format_name(name), 'path': _format_name(path)}
if isinstance(name, list): # If a source has multiple targets
return [convert(n) for n in name]
else: # string
return convert(name) | aa331f8616e3996d78a2bd278b10e2e806d56440 | 24,383 |
def _orthogonalize(constraints, X):
"""
Orthogonalize spline terms with respect to non spline terms.
Parameters
----------
constraints: numpy array
constraint matrix, non spline terms
X: numpy array
spline terms
Returns
-------
constrained_X: numpy array
orthogonalized spline terms
"""
Q, _ = np.linalg.qr(constraints) # compute Q
Projection_Matrix = np.matmul(Q,Q.T)
constrained_X = X - np.matmul(Projection_Matrix,X)
return constrained_X | 01eb69ffa30d48c84c76915e33be39b201fda73e | 24,384 |
import time
import subprocess
def get_shell_output(cmd,verbose=None):
"""Function to run a shell command and return returncode, stdout and stderr
Currently (pyrpipe v 0.0.4) this function is called in
getReturnStatus(), getProgramVersion(), find_files()
Parameters
----------
cdm: list
command to run
verbose: bool
to print messages
:return: (returncode, stdout and stderr)
:rtype: tuple: (int,str,str)
"""
if not verbose: verbose=_verbose
#not logging these commands
cmd=parse_cmd(cmd)
log_message=cmd
starttime_str=time.strftime("%y-%m-%d %H:%M:%S", time.localtime(time.time()))
if verbose:
pu.print_notification("Start:"+starttime_str)
pu.print_blue("$ "+log_message)
try:
result = subprocess.Popen(cmd,stdout=subprocess.PIPE,stderr=subprocess.STDOUT,shell=True)
stdout,stderr = result.communicate()
if stdout:
stdout=stdout.decode("utf-8")
else:
stdout=''
if stderr:
stderr=stderr.decode("utf-8")
else:
stderr=''
return(result.returncode,stdout,stderr)
except:
return(-1,"","Command failed to execute") | 10b10c32ccbe0926d7778b3961d524b6680adc24 | 24,385 |
def tril(input, diagonal=0, name=None):
"""
This op returns the lower triangular part of a matrix (2-D tensor) or batch
of matrices :attr:`input`, the other elements of the result tensor are set
to 0. The lower triangular part of the matrix is defined as the elements
on and below the diagonal.
Args:
input (Variable): The input variable which is a Tensor.
Support data types: ``float64``, ``float32``, ``int32``, ``int64``.
diagonal (int, optional): The diagonal to consider, default value is 0.
If :attr:`diagonal` = 0, all elements on and below the main diagonal are
retained. A positive value includes just as many diagonals above the main
diagonal, and similarly a negative value excludes just as many diagonals below
the main diagonal. The main diagonal are the set of indices
:math:`\{(i, i)\}` for :math:`i \in [0, \min\{d_{1}, d_{2}\} - 1]` where
:math:`d_{1}, d_{2}` are the dimensions of the matrix.
name (str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
Variable: Tensor, results of lower triangular operation by the specified diagonal of input tensor,
it's data type is the same as input's Tensor.
Raises:
TypeError: diagonal is not a int type.
ValueError: dimension of :attr:`input` is less than 2.
Examples:
.. code-block:: python
import numpy as np
import paddle.fluid as fluid
data = np.arange(1, 13, dtype="int64").reshape(3,-1)
# array([[ 1, 2, 3, 4],
# [ 5, 6, 7, 8],
# [ 9, 10, 11, 12]])
x = fluid.data(shape=(-1, 4), dtype='int64', name='x')
exe = fluid.Executor(fluid.CPUPlace())
# example 1, default diagonal
tril = fluid.layers.tril(x)
tril_out, = exe.run(fluid.default_main_program(), feed={"x": data},
fetch_list=[tril], return_numpy=True)
# array([[ 1, 0, 0, 0],
# [ 5, 6, 0, 0],
# [ 9, 10, 11, 0]])
.. code-block:: python
# example 2, positive diagonal value
import paddle.fluid as fluid
import numpy as np
data = np.arange(1, 13, dtype="int64").reshape(3,-1)
x = fluid.data(shape=(-1, 4), dtype='int64', name='x')
exe = fluid.Executor(fluid.CPUPlace())
tril = fluid.layers.tril(x, diagonal=2)
tril_out, = exe.run(fluid.default_main_program(), feed={"x": data},
fetch_list=[tril], return_numpy=True)
# array([[ 1, 2, 3, 0],
# [ 5, 6, 7, 8],
# [ 9, 10, 11, 12]])
.. code-block:: python
# example 3, negative diagonal value
import paddle.fluid as fluid
import numpy as np
data = np.arange(1, 13, dtype="int64").reshape(3,-1)
x = fluid.data(shape=(-1, 4), dtype='int64', name='x')
exe = fluid.Executor(fluid.CPUPlace())
tril = fluid.layers.tril(x, diagonal=-1)
tril_out, = exe.run(fluid.default_main_program(), feed={"x": data},
fetch_list=[tril], return_numpy=True)
# array([[ 0, 0, 0, 0],
# [ 5, 0, 0, 0],
# [ 9, 10, 0, 0]])
"""
return _tril_triu_op(LayerHelper('tril', **locals())) | 62eb0c83cc633db655859160ea5df1fc0158086a | 24,386 |
import csv
def csv_to_objects(csvfile_path):
"""
Read a CSV file and convert it to a Python dictionary.
Parameters
----------
csvfile_path : string
The absolute or relative path to a valid CSV file.
Returns
-------
dict
A dict containing a dict of show/episode entries and a dict of movie entries.
"""
logger.info("Attempting to read from {file}".format(file=csvfile_path))
try:
with open(csvfile_path) as csvfile:
csvreader = csv.reader(csvfile, delimiter=',')
headers = next(csvreader)
shows = []
movies = []
for row in csvreader:
item = {}
for header_num, header in enumerate(headers):
item[header] = row[header_num]
logger.debug(item)
if item['type'] == "episode":
shows.append(item)
elif item['type'] == "movie":
movies.append(item)
else:
logger.error("Unknow item type {type}".format(type=item['type']))
exit(1)
logger.info("{shows} shows and {movies} movies read from CSV".format(shows=len(shows), movies=len(movies)))
except (EnvironmentError, EOFError):
logger.error("Error whilst loading CSV file {filename}".format(filename=csvfile_path))
return {'shows': shows, 'movies': movies} | 87f28acf5b537a1206126f47a104c87564e76551 | 24,387 |
def rename_and_merge_columns_on_dict(data_encoded, rename_encoded_columns_dict, **kwargs):
"""
Parameters
----------
data_encoded: pandas.DataFrame with numerical columns
rename_encoded_columns_dict: dict of columns to rename in data_encoded
**kwargs
inplace:bool, default=False
decides if data_encoded is edited inplace or if a copy is returned
Returns
-------
pandas.DataFrame with columns renamed according to rename_encoded_columns_dict, columns that
share the same name after renaming are merged by adding the columns up
Example
-------
data_encoded:
x y z
0 0 1
1 0 1
0 1 0
rename_encoded_columns_dict:
{'y': 'x'}
return:
x z
0 1
1 1
1 0
"""
if 'inplace' not in kwargs:
kwargs['inplace'] = False
if kwargs['inplace']:
data_copy = data_encoded
else:
data_copy = data_encoded.copy()
data_copy.rename(columns=rename_encoded_columns_dict, inplace=True)
for col in data_copy.columns:
df_col = data_copy[col]
# if column name col appears more than once in data_encoded.columns -> df_col is DataFrame (else it is a Series)
if isinstance(df_col, pd.DataFrame):
# add index to identical column names: [cap-shape_x0, cap-shape_x1, ...]
df_col.columns = [col + str(i) for i in range(0, len(df_col.columns))]
# drop identical columns col from DataFrame
data_copy.drop(columns=col, inplace=True)
# create column of zeros and add the numerical columns up
col_merged = pd.Series(np.zeros(len(data_copy)), dtype=int)
for col_indexed in df_col.columns:
col_merged += df_col[col_indexed]
data_copy[col] = col_merged
if kwargs['inplace']:
data_encoded = data_encoded.reindex(sorted(data_encoded.columns), axis=1)
return
else:
data_copy = data_copy.reindex(sorted(data_copy.columns), axis=1)
return data_copy | cb8767a102ad421674381182a2ea65468613abee | 24,388 |
from typing import List
def _get_public_props(obj) -> List[str]:
"""Return the list of public props from an object."""
return [prop for prop in dir(obj) if not prop.startswith('_')] | 7b3be3e186bc009329ed417c6685fb2503a7c993 | 24,389 |
from typing import List
def get_vd_html(
voronoi_diagram: FortunesAlgorithm,
limit_sites: List[SiteToUse],
xlim: Limit,
ylim: Limit,
) -> None:
"""Plot voronoi diagram."""
figure = get_vd_figure(
voronoi_diagram, limit_sites, xlim, ylim, voronoi_diagram.SITE_CLASS
)
html = get_html(figure)
return html | 1fbec3a3bf2c23d878e9c4b7d1779fb2526560e0 | 24,390 |
def sample_normal_mean_jeffreys(s1, ndata, prec):
"""Samples the mean of a normal distribution"""
##
return rn.normal(s1 / ndata, 1 / np.sqrt(prec * ndata)) | 391cd72ea307903278e94bbc6b323f0997759f10 | 24,391 |
from distutils.dir_util import mkpath
from distutils.dep_util import newer
from distutils.errors import DistutilsFileError
from distutils import log
import os
def copy_tree(
src, dst, preserve_mode=1, preserve_times=1,
preserve_symlinks=0, update=0, verbose=0, dry_run=0,
condition=None):
"""
Copy an entire directory tree 'src' to a new location 'dst'. Both
'src' and 'dst' must be directory names. If 'src' is not a
directory, raise DistutilsFileError. If 'dst' does not exist, it is
created with 'mkpath()'. The end result of the copy is that every
file in 'src' is copied to 'dst', and directories under 'src' are
recursively copied to 'dst'. Return the list of files that were
copied or might have been copied, using their output name. The
return value is unaffected by 'update' or 'dry_run': it is simply
the list of all files under 'src', with the names changed to be
under 'dst'.
'preserve_mode' and 'preserve_times' are the same as for
'copy_file'; note that they only apply to regular files, not to
directories. If 'preserve_symlinks' is true, symlinks will be
copied as symlinks (on platforms that support them!); otherwise
(the default), the destination of the symlink will be copied.
'update' and 'verbose' are the same as for 'copy_file'.
"""
assert isinstance(src, (str, unicode)), repr(src)
assert isinstance(dst, (str, unicode)), repr(dst)
src = fsencoding(src)
dst = fsencoding(dst)
if condition is None:
condition = skipscm
if not dry_run and not zipio.isdir(src):
raise DistutilsFileError(
"cannot copy tree '%s': not a directory" % src)
try:
names = zipio.listdir(src)
except os.error as exc:
(errno, errstr) = exc.args
if dry_run:
names = []
else:
raise DistutilsFileError(
"error listing files in '%s': %s" % (src, errstr))
if not dry_run:
mkpath(dst)
outputs = []
for n in names:
src_name = os.path.join(src, n)
dst_name = os.path.join(dst, n)
if (condition is not None) and (not condition(src_name)):
continue
# Note: using zipio's internal _locate function throws an IOError on
# dead symlinks, so handle it here.
if os.path.islink(src_name) \
and not os.path.exists(os.readlink(src_name)):
continue
if preserve_symlinks and zipio.islink(src_name):
link_dest = zipio.readlink(src_name)
log.info("linking %s -> %s", dst_name, link_dest)
if not dry_run:
if update and not newer(src, dst_name):
pass
else:
make_symlink(link_dest, dst_name)
outputs.append(dst_name)
elif zipio.isdir(src_name) and not os.path.isfile(src_name):
# ^^^ this odd tests ensures that resource files that
# happen to be a zipfile won't get extracted.
# XXX: need API in zipio to clean up this code
outputs.extend(
copy_tree(src_name, dst_name, preserve_mode,
preserve_times, preserve_symlinks, update,
dry_run=dry_run, condition=condition))
else:
copy_file(src_name, dst_name, preserve_mode,
preserve_times, update, dry_run=dry_run)
outputs.append(dst_name)
return outputs | ae786a6e207da24e060517a32e0dee99aa159f69 | 24,392 |
def pipeline_report_build(submission: Submission, stdout: str, passed: bool, **_):
"""
POSTed json should be of the shape:
{
"stdout": "build logs...",
"passed": True
}
:param submission:
:param stdout:
:param passed:
:return:
"""
if len(stdout) > MYSQL_TEXT_MAX_LENGTH:
stdout = stdout[:MYSQL_TEXT_MAX_LENGTH]
# Log the build being reported
logger.info(
"submission build reported",
extra={
"type": "build_report",
"submission_id": submission.id,
"assignment_id": submission.assignment_id,
"owner_id": submission.owner_id,
"passed": passed,
"stdout": stdout,
},
)
# Update submission build
submission.build.stdout = stdout
submission.build.passed = passed
# If the build did not passed, then the
# submission pipeline is done
if passed is False:
submission.processed = True
submission.state = "Build did not succeed"
# Add and commit
db.session.add(submission)
db.session.add(submission.build)
db.session.commit()
# Report success
return success_response("Build successfully reported.") | cbd07d2642b511f301a7e82581d71de3d04a66c6 | 24,393 |
import random
def flatten(episode, context_length, include_labels=True, delimiter='\n'):
"""
Flatten the data into single example episodes.
This is used to make conditional training easier and for a fair comparison of
methods.
"""
context = deque(maxlen=context_length if context_length > 0 else None)
new_episode = []
for ex in episode:
context.append(ex.get('text', ''))
# add context
if len(context) > 1:
ex.force_set('text', delimiter.join(context))
# set episode_done to be True
ex.force_set('episode_done', True)
labels = ex.get('labels', ex.get('eval_labels', None))
if labels is not None and include_labels:
context.append(random.choice(labels))
new_episode.append(ex)
return new_episode | 37c44cb5e442e3d257230f151ce11a0012658ef5 | 24,394 |
def calc_binsize(num_bins, t_start, t_stop):
"""
Calculates the stop point from given parameter.
Calculates the size of bins :attr:`binsize` from the three parameter
:attr:`num_bins`, :attr:`t_start` and :attr`t_stop`.
Parameters
----------
num_bins: int
Number of bins
t_start: quantities.Quantity
Start time
t_stop
Stop time
Returns
-------
binsize : quantities.Quantity
Size of bins calculated from given parameter.
Raises
------
ValueError :
Raised when :attr:`t_stop` is smaller than :attr:`t_start`".
"""
if num_bins is not None and t_start is not None and t_stop is not None:
if t_stop < t_start:
raise ValueError("t_stop (%s) is smaller than t_start (%s)"
% (t_stop, t_start))
return (t_stop - t_start) / num_bins | 0eb42e56aebfd29aa76190b4837171e5cfb94e82 | 24,395 |
def d_within(geom, gdf, distance):
"""Find the subset of a GeoDataFrame within some distance of a shapely geometry"""
return _intersects(geom, gdf, distance) | 463be3ff9c3eb7f002dc652047b96fbc15ba05b4 | 24,396 |
def make_params(args, nmax=None):
"""Format GET parameters for the API endpoint.
In particular, the endpoint requires that parameters be sorted
alphabetically by name, and that filtering is done only on one
parameter when multiple filters are offered.
"""
if nmax and len(args) > nmax:
raise ValueError("Too many parameters supplied")
return [(k, stringify(args[k])) for k in sorted(args.keys())] | 406d23a5090b901c20a4ac10dc182fbc3051e61e | 24,397 |
def remap(value, oldMin, oldMax, newMin, newMax):
"""
Remaps the value to a new min and max value
Args:
value: value to remap
oldMin: old min of range
oldMax: old max of range
newMin: new min of range
newMax: new max of range
Returns:
The remapped value in the new range
"""
return newMin + (((value - oldMin) / (oldMax - oldMin)) * (newMax - newMin)) | c0e53ce2b2169b08d271f7077e552762c572cf1f | 24,398 |
async def construct_unit_passport(unit: Unit) -> str:
"""construct own passport, dump it as .yaml file and return a path to it"""
passport = _get_passport_dict(unit)
path = f"unit-passports/unit-passport-{unit.uuid}.yaml"
_save_passport(unit, passport, path)
return path | e4f1e90bbe82b1cb425cf5834fafbd1f36258454 | 24,399 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.