content
stringlengths 35
416k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
def is_config_or_test(example, scan_width=5, coeff=0.05):
"""Check if file is a configuration file or a unit test by :
1- looking for keywords in the first few lines of the file.
2- counting number of occurence of the words 'config' and 'test' with respect to number of lines.
"""
keywords = ["unit tests", "test file", "configuration file"]
lines = example["content"].splitlines()
count_config = 0
count_test = 0
# first test
for _, line in zip(range(scan_width), lines):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
nlines = example["content"].count("\n")
threshold = int(coeff * nlines)
for line in lines:
count_config += line.lower().count("config")
count_test += line.lower().count("test")
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
|
0e2823897b72a916afd9672beed904190bb2c1c2
| 701,404
|
import json
def prepare_clean_listing_record(listing_serializer_record):
"""
Clean Record
Sample Record (record_json) after clean
{
"id": 316,
"title": "JotSpot 28",
"description": "Jot things down",
"unique_name": "ozp.test.jotspot.28",
"description_short": "Jot stuff down",
"approval_status": "APPROVED",
"is_enabled": true,
"is_featured": true,
"is_deleted": false,
"avg_rate": 4,
"total_votes": 1,
"total_rate5": 0,
"total_rate4": 1,
"total_rate3": 0,
"total_rate2": 0,
"total_rate1": 0,
"total_reviews": 1,
"security_marking": "UNCLASSIFIED",
"is_private": false,
"is_exportable": false,
"agency": {
"id": 1,
"title": "Ministry of Truth",
"short_name": "Minitrue"
},
"listing_type": {
"id": 1,
"title": "web application",
"description": "web applications"
},
"categories": [
{
"id": 4,
"title": "Education",
"description": "Educational in nature"
},
{
"id": 14,
"title": "Tools",
"description": "Tools and Utilities"
}
],
"tags": [
{
"id": 1,
"name": "demo"
}
]
}
Args:
record: One record of ReadOnlyListingSerializer
"""
keys_to_remove = ['contacts',
'last_activity',
'required_listings',
'owners',
'current_rejection',
'what_is_new',
'iframe_compatible',
'edited_date',
'featured_date',
'version_name',
'feedback_score',
'usage_requirements',
'system_requirements',
'intents',
'custom_fields'
]
# Clean Record
for key in keys_to_remove:
if key in listing_serializer_record:
del listing_serializer_record[key]
image_keys_to_clean = ['large_icon',
'small_icon',
'banner_icon',
'large_banner_icon']
# Clean Large_icon
for image_key in image_keys_to_clean:
if listing_serializer_record.get(image_key):
del listing_serializer_record[image_key]['image_type']
del listing_serializer_record[image_key]['uuid']
del listing_serializer_record['agency']['icon']
record_clean_obj = json.loads(json.dumps(listing_serializer_record))
# title_suggest = {"input": [ record_clean_obj['title'] ] }
# record_clean_obj['title_suggest'] =title_suggest
# Flatten Agency Obj - Makes the search query easier
record_clean_obj['agency_id'] = record_clean_obj['agency']['id']
record_clean_obj['agency_short_name'] = record_clean_obj['agency']['short_name']
record_clean_obj['agency_title'] = record_clean_obj['agency']['title']
del record_clean_obj['agency']
# Flatten listing_type Obj - - Makes the search query easier
record_clean_obj['listing_type_id'] = record_clean_obj['listing_type']['id']
record_clean_obj['listing_type_description'] = record_clean_obj['listing_type']['description']
record_clean_obj['listing_type_title'] = record_clean_obj['listing_type']['title']
del record_clean_obj['listing_type']
tags = []
for tag_entry in record_clean_obj['tags']:
tag_entry['name_string'] = tag_entry['name']
tags.append(tag_entry)
record_clean_obj['tags'] = tags
return record_clean_obj
|
7330b24f90345be14966f28d61e7665b0785a9e6
| 701,405
|
def has_shape(data, shape, allow_empty=False):
"""
Determine if a data object has the provided shape
At any level, the object in `data` and in `shape` must have the same type.
A dict is the same shape if all its keys and values have the same shape as the
key/value in `shape`. The number of keys/values is not relevant.
A list is the same shape if all its items have the same shape as the value
in `shape`
A tuple is the same shape if it has the same length as `shape` and all the
values have the same shape as the corresponding value in `shape`
Any other object simply has to have the same type.
If `allow_empty` is set, lists and dicts in `data` will pass even if they are empty.
"""
if not isinstance(data, type(shape)):
return False
if isinstance(data, dict):
return (allow_empty or len(data) > 0) and\
all(has_shape(k, shape.keys()[0]) for k in data.keys()) and\
all(has_shape(v, shape.values()[0]) for v in data.values())
elif isinstance(data, list):
return (allow_empty or len(data) > 0) and\
all(has_shape(v, shape[0]) for v in data)
elif isinstance(data, tuple):
return len(data) == len(shape) and all(
has_shape(data[i], shape[i]) for i in range(len(data)))
else:
return True
|
f04add860bb6b886bb693ddc85b3d4877245d749
| 701,406
|
def RR_calc(classes, TOP):
"""
Calculate Global performance index (RR).
:param classes: confusion matrix classes
:type classes: list
:param TOP: number of positives in predict vector per class
:type TOP: dict
:return: RR as float
"""
try:
class_number = len(classes)
result = sum(list(TOP.values()))
return result / class_number
except Exception:
return "None"
|
814a11c339b25dc687d537efd3244ddad9c0f8fd
| 701,408
|
import torch
def view_complex_native(x: torch.FloatTensor) -> torch.Tensor:
"""Convert a PyKEEN complex tensor representation into a torch one using :func:`torch.view_as_complex`."""
return torch.view_as_complex(x.view(*x.shape[:-1], -1, 2))
|
14e74f1c8b5e6de673c962e4381e74026d3d3db2
| 701,410
|
def cross_corr_norm(patch_0, patch_1):
"""
Returns the normalized cross-correlation between two same-sized image
patches.
Parameters :
patch_0, patch_1 : image patches
"""
n = patch_0.shape[0] * patch_0.shape[1]
# Mean intensities
mu_0, mu_1 = patch_0.mean(), patch_1.mean()
# Standard deviations
sigma_0, sigma_1 = patch_0.std(), patch_1.std()
return (1.0 / (n - 1) * (((patch_0 - mu_0) / sigma_0) *
((patch_1 - mu_1) / sigma_1)).sum())
|
213100b174993baa07ea685b23541d3dfe49ace8
| 701,411
|
def get_flip_set(index):
"""Make flip set"""
n = 1
while n <= index:
n *= 2
def get(n, j):
if j <= 1:
return {b for b in range(j)}
n_half = n // 2
if j < n_half:
return get(n_half, j)
f = {b + n_half for b in get(n_half, j - n_half)}
if j == n - 1:
f.add(n_half - 1)
return f
return get(n, index)
|
3174b9bab59ae67e9f869fddd9c0a6669f742a45
| 701,412
|
def site_stat_stmt(table, site_col, values_col, fun):
"""
Function to produce an SQL statement to make a basic summary grouped by a sites column.
Parameters
----------
table : str
The database table.
site_col : str
The column containing the sites.
values_col : str
The column containing the values to be summarised.
fun : str
The function to apply.
Returns
-------
str
SQL statement.
"""
fun_dict = {'mean': 'avg', 'sum': 'sum', 'count': 'count', 'min': 'min', 'max': 'max'}
cols_str = ', '.join([site_col, fun_dict[fun] + '(' + values_col + ') as ' + values_col])
stmt1 = "SELECT " + cols_str + " FROM " + table + " GROUP BY " + site_col
return stmt1
|
c704d5687effd3c12abb3feecde9041eb88aae7a
| 701,413
|
def _format_source_error(filename, lineno, block):
""" A helper function which generates an error string.
This function handles the work of reading the lines of the file
which bracket the error, and formatting a string which points to
the offending line. The output is similar to:
File "foo.py", line 42, in bar()
41 def bar():
----> 42 a = a + 1
43 return a
Parameters
----------
filename : str
The full path to the offending file.
lineno : int
The line number of the offending like.
block : str
The name of the block scope in which the error occured. In the
sample above, the block scope is 'bar'.
Returns
-------
result : str
A nicely formatted string for including in an exception. If the
file cannot be opened, the source lines will note be included.
"""
text = 'File "%s", line %d, in %s()' % (filename, lineno, block)
start_lineno = max(0, lineno - 1)
end_lineno = start_lineno + 2
lines = []
try:
with open(filename, 'r') as f:
for idx, line in enumerate(f, 1):
if idx >= start_lineno and idx <= end_lineno:
lines.append((idx, line))
elif idx > end_lineno:
break
except IOError:
pass
if len(lines) > 0:
digits = str(len(str(end_lineno)))
line_templ = '\n----> %' + digits + 'd %s'
other_templ = '\n %' + digits + 'd %s'
for lno, line in lines:
line = line.rstrip()
if lno == lineno:
text += line_templ % (lno, line)
else:
text += other_templ % (lno, line)
return text
|
32d093e53811415338877349ca8e64b0e9261b1d
| 701,414
|
def calc_exposure(k, src_rate, bgd_rate, read_noise, neff):
"""
Compute the time to get to a given significance (k) given the source rate,
the background rate, the read noise, and the number
of effective background pixels
-----
time = calc_exposure(k, src_rate, bgd_rate, read_noise, neff)
"""
denom = 2 * src_rate**2
nom1 = (k**2) * (src_rate + neff*bgd_rate)
nom2 = ( k**4 *(src_rate + neff*bgd_rate)**2 +
4 * k**2 * src_rate**2 * neff * read_noise**2)**(0.5)
exposure = (nom1 + nom2)/ denom
return exposure
|
993853d244cfa5c6619300def02294a2497d78df
| 701,415
|
def type_or_null(names):
"""Return the list of types `names` + the name-or-null list for every type in `names`."""
return [[name, 'null'] for name in names]
|
72cbefcbba08c98d3c4c11a126e22b6f83f4175b
| 701,416
|
def variables_pool(variable, question='Variable to analyze'):
"""
:param variable: the variable chosen from variable pool in your dataframe
:param question: default parameter ("Variable to analyze")
don't :return:
"""
def guide(diz):
"""
function that guides user to chose the right variable
:param diz: dictionary with key a int number and for value the associate variable
don't :return:
"""
print("[i] Chose one from this list:")
for a_key in diz:
print("\t%s\t→ %s" % (str(a_key), str(diz[a_key])))
dictionary = {}
try:
variable = variable.tolist()
except AttributeError:
if type(variable) == type(list()):
pass
elif type(variable) == type(str()):
variable = variable.strip().split()
for i in range(0,len(variable)):
variable[i] = variable[i].strip()
else:
raise AttributeError("I can't handle "+str(type(variable))+" object")
for i in range(1, len(variable)+1):
dictionary[i] = variable[i-1]
done = False
while not done:
print("\n"+73 * "-")
guide(dictionary)
key = input("%s [1-%d]: " % (question,len(variable)))
try:
key = int(key)
if key in dictionary.keys():
done = True
return dictionary[key]
else:
print("Unknown variable...")
except ValueError:
if key in dictionary.values() and key != 0:
return key
else:
print("Unknown variable...")
|
fcff9eaa1467d96251ba08eaa433aa05b7b769f2
| 701,417
|
import torch
def quadratic_matmul(x: torch.Tensor, A: torch.Tensor) -> torch.Tensor:
"""Matrix quadratic multiplication.
Parameters
----------
x : torch.Tensor, shape=(..., X)
A batch of vectors.
A : torch.Tensor, shape=(..., X, X)
A batch of square matrices.
Returns
-------
torch.Tensor, shape=(...,)
Batched scalar result of quadratic multiplication.
"""
assert x.shape[-1] == A.shape[-1] == A.shape[-2]
x_T = x.unsqueeze(-2) # shape=(..., 1, X)
x_ = x.unsqueeze(-1) # shape=(..., X, 1)
quadratic = x_T @ A @ x_ # shape=(..., 1, 1)
return quadratic.squeeze(-1).squeeze(-1)
|
78335f6a57f34701f3f1fe9b8dd74e9b8be686a3
| 701,418
|
def get_global_step(estimator):
"""Return estimator's last checkpoint."""
return int(estimator.latest_checkpoint().split("-")[-1])
|
11b4a96f74d029f9d9cc5a0fcc93da7504729eb7
| 701,419
|
import torch
def box_iou(boxes1, boxes2):
"""Compute pairwise IoU across two lists of anchor or bounding boxes.
Defined in :numref:`sec_anchor`"""
def box_area(boxes): return ((boxes[:, 2] - boxes[:, 0]) *
(boxes[:, 3] - boxes[:, 1]))
# Shape of `boxes1`, `boxes2`, `areas1`, `areas2`: (no. of boxes1, 4),
# (no. of boxes2, 4), (no. of boxes1,), (no. of boxes2,)
areas1 = box_area(boxes1)
areas2 = box_area(boxes2)
# Shape of `inter_upperlefts`, `inter_lowerrights`, `inters`: (no. of
# boxes1, no. of boxes2, 2)
inter_upperlefts = torch.max(boxes1[:, None, :2], boxes2[:, :2])
inter_lowerrights = torch.min(boxes1[:, None, 2:], boxes2[:, 2:])
inters = (inter_lowerrights - inter_upperlefts).clamp(min=0)
# Shape of `inter_areas` and `union_areas`: (no. of boxes1, no. of boxes2)
inter_areas = inters[:, :, 0] * inters[:, :, 1]
union_areas = areas1[:, None] + areas2 - inter_areas
return inter_areas / union_areas
|
c358c15b99d0e742487a92630ff927606ad6d896
| 701,420
|
import os
def getBranchPath(path):
"""Get a path rooted in the current branch.
@param path: A path relative to the current branch.
@return: A fully-qualified path.
"""
currentPath = os.path.dirname(__file__)
fullyQualifiedPath = os.path.join(currentPath, '..', path)
return os.path.abspath(fullyQualifiedPath)
|
57897e58b57c704cea63549d437f22f96830d226
| 701,421
|
import uuid
def rand_uuid():
"""Generate a random UUID string
:return: a random UUID (e.g. '1dc12c7d-60eb-4b61-a7a2-17cf210155b6')
:rtype: string
"""
return str(uuid.uuid4())
|
fc35e154eeab62988bcd96799ce0f688f4ec427a
| 701,423
|
def filter_linksearchtotals(queryset, filter_dict):
"""
Adds filter conditions to a LinkSearchTotal queryset based on form results.
queryset -- a LinkSearchTotal queryset
filter_dict -- a dictionary of data from the user filter form
Returns a queryset
"""
if "start_date" in filter_dict:
start_date = filter_dict["start_date"]
if start_date:
queryset = queryset.filter(date__gte=start_date)
if "end_date" in filter_dict:
end_date = filter_dict["end_date"]
if end_date:
queryset = queryset.filter(date__lte=end_date)
return queryset
|
96a7e816e7e2d6632db6e6fb20dc50a56a273be9
| 701,424
|
import csv
def get_column(path, c=0, r=1, sep='\t'):
""" extracts column specified by column index
assumes that first row as a header
"""
try:
reader = csv.reader(open(path, "r"), delimiter=sep)
return [row[c] for row in reader] [r :]
except IOError:
print('list_rows: file "'+path +'" does not exist')
return 'list_rows failed'
|
036a1630417224474e8bfe7a9c038a04bd3ea0d5
| 701,425
|
def find_in_list(list_one, list_two):
"""Find and return an element from list_one that is in list_two, or None otherwise."""
for element in list_one:
if element in list_two:
return element
return None
|
9376b38a06cadbb3e06c19cc895eff46fd09f5c1
| 701,427
|
import sys
import pickle
def load_pickle(fname):
"""Loads a pickle file to memory.
Parameters
----------
fname : str
File name + path.
Returns
-------
dict/list
Data structure of the input file.
"""
assert fname, 'Must input a valid file name.'
if sys.version_info[0] == 2:
return pickle.load(open(fname, mode='rb'))
else:
return pickle.load(open(fname, mode='rb'), encoding='latin1')
|
d5890aec8fd491b89b81e9e6c01ee658b1a843bd
| 701,429
|
def _real_freq_filter(rfft_signal, filters):
"""Helper function to apply a full filterbank to a rfft signal
"""
nr = rfft_signal.shape[0]
subbands = filters[:, :nr] * rfft_signal
return subbands
|
0bee4822ac1d6b5672e4ad89bb59f03d72828244
| 701,430
|
def _strip_extension(name, ext):
"""
Remove trailing extension from name.
"""
ext_len = len(ext)
if name[-ext_len:] == ext:
name = name[:-ext_len]
return name
|
aa1e6f8c68e09597e2566ecd96c70d2c748ac600
| 701,431
|
def get_uuid_from_url(url: str) -> str:
""" Strip the URL from the string. Returns the UUID. """
return url.split('/')[-1]
|
d9e0ea9ed186d1ba19c40ead9d08108c45dbf850
| 701,433
|
def texFrac(frac):
""" Tex render for Fractions"""
return ["\\frac{" , str(frac._num) , "}{" , str(frac._denom) , "}"]
|
fd0ed6af8b50f8a4b89e0d83d7cb3e3c3a5f3a90
| 701,434
|
from typing import List
from typing import Optional
def span_to_label(tokens: List[str],
labeled_spans: dict,
scheme: Optional[str] = 'BIO') -> List[str]:
"""
Convert spans to label
:param tokens: a list of tokens
:param labeled_spans: a list of tuples (start_idx, end_idx, label)
:param scheme: labeling scheme, in ['BIO', 'BILOU'].
:return: a list of string labels
"""
assert scheme in ['BIO', 'BILOU'], ValueError("unknown labeling scheme")
if labeled_spans:
assert list(labeled_spans.keys())[-1][1] <= len(tokens), ValueError("label spans out of scope!")
labels = ['O'] * len(tokens)
for (start, end), label in labeled_spans.items():
if scheme == 'BIO':
labels[start] = 'B-' + label
if end - start > 1:
labels[start + 1: end] = ['I-' + label] * (end - start - 1)
elif scheme == 'BILOU':
if end - start == 1:
labels[start] = 'U-' + label
else:
labels[start] = 'B-' + label
labels[end - 1] = 'L-' + label
if end - start > 2:
labels[start + 1: end - 1] = ['I-' + label] * (end - start - 2)
return labels
|
dbd572d4c306f31202c93b5983f5dd4cdd237074
| 701,435
|
import argparse
import os
def build_args():
"""
Constructs command line arguments for the vulndb tool
"""
parser = argparse.ArgumentParser(
description="Fully open-source security audit for project dependencies based on known vulnerabilities and advisories."
)
parser.add_argument(
"--no-banner",
action="store_true",
default=False,
dest="no_banner",
help="Do not display banner",
)
parser.add_argument(
"--cache",
action="store_true",
default=False,
dest="cache",
help="Cache vulnerability information in platform specific user_data_dir",
)
parser.add_argument(
"--sync",
action="store_true",
default=False,
dest="sync",
help="Sync to receive the latest vulnerability data. Should have invoked cache first.",
)
parser.add_argument(
"--suggest",
action="store_true",
default=False,
dest="suggest",
help="Suggest appropriate fix version for each identified vulnerability.",
)
parser.add_argument(
"--risk-audit",
action="store_true",
default=True if os.getenv("ENABLE_OSS_RISK", "") in ["true", "1"] else False,
dest="risk_audit",
help="Perform package risk audit (slow operation). Npm only.",
)
parser.add_argument(
"--private-ns",
dest="private_ns",
default=os.getenv("PKG_PRIVATE_NAMESPACE"),
help="Private namespace to use while performing oss risk audit. Private packages should not be available in public registries by default. Comma separated values accepted.",
)
parser.add_argument(
"-t",
"--type",
dest="project_type",
help="Override project type if auto-detection is incorrect",
)
parser.add_argument(
"--bom",
dest="bom",
help="UNUSED: Examine using the given Software Bill-of-Materials (SBoM) file in CycloneDX format. Use cdxgen command to produce one.",
)
parser.add_argument(
"-i", "--src", dest="src_dir", help="Source directory", required=True
)
parser.add_argument(
"-o",
"--report_file",
dest="report_file",
help="Report filename with directory",
)
parser.add_argument(
"--no-error",
action="store_true",
default=False,
dest="noerror",
help="Continue on error to prevent build from breaking",
)
parser.add_argument(
"--no-license-scan",
action="store_true",
default=False,
dest="no_license_scan",
help="Do not perform a scan for license limitations",
)
return parser.parse_args()
|
ee24006780a225803cd503fb612264d49e37c7b2
| 701,436
|
def unimodal_converter(data):
"""
Returns ground truth labels when data is split modally to text and image
data: dataframe object
"""
for column in ["string", "numeric"]:
unimodal_image, unimodal_text = [], []
for i in range(len(data)):
temp_val = data.loc[i, "Modality_{}_gt".format(column)]
if temp_val in ["text-unimodal", "image-unimodal", 1, 2]: #prevelance of image-only hate low enough to treat image and text unimodal equally because of image text
if column == "string":
unimodal_image.append("none")
if column == "numeric":
unimodal_image.append(0)
unimodal_text.append(data.loc[i, "Primary_{}_gt".format(column)])
if temp_val in ["multimodal", 3]:
unimodal_image.append(data.loc[i, "Primary_{}_gt".format(column)])
unimodal_text.append(data.loc[i, "Primary_{}_gt".format(column)])
if temp_val in ["none", 0]:
if column == "string":
unimodal_image.append("none")
unimodal_text.append("none")
if column == "numeric":
unimodal_image.append(0)
unimodal_text.append(0)
data["Unimodal_text_{}".format(column)] = unimodal_text
data["Unimodal_image_{}".format(column)] = unimodal_image
return data
|
623208e7b8ee9e4f1e494c95d7ec0c16558f85b9
| 701,437
|
def convert_ids_to_tokens(inv_vocab, ids):
"""Converts a sequence of ids into tokens using the vocab."""
output = []
for item in ids:
output.append(inv_vocab[item])
return output
|
da1aa84d271fe46cedf530c2871ee54c57e676e2
| 701,438
|
import aiohttp
async def remove_device(
ws_client: aiohttp.ClientWebSocketResponse, device_id: str, config_entry_id: str
) -> bool:
"""Remove config entry from a device."""
await ws_client.send_json(
{
"id": 1,
"type": "config/device_registry/remove_config_entry",
"config_entry_id": config_entry_id,
"device_id": device_id,
}
)
response = await ws_client.receive_json()
return response["success"]
|
095926990c48a5f61267eb059591a80c48f7e3eb
| 701,439
|
import logging
import pickle
import time
def load_obj(path):
"""
return the python object saved in the given path
:param path: the path to be loaded
:return:
"""
logger = logging.getLogger("load_obj")
retry_count = 3
while retry_count > 0:
try:
with open(path, 'rb') as f:
return pickle.load(f)
except (IOError, EOFError) as e:
logger.warning("failed loading obj with error: {}. Will retry for {} time.".format(retry_count, str(e)))
time.sleep(2)
retry_count -= 1
|
d486846bdf284366a89a48e7d7ad0f86239b9f83
| 701,440
|
import importlib
def _check_import(package_name):
"""Import a package, or give a useful error message if it's not there."""
try:
return importlib.import_module(package_name)
except ImportError:
err_msg = (
f"{package_name} is not installed. "
"It may be an optional powersimdata requirement."
)
raise ImportError(err_msg)
|
c4cb7c5a49071663d23e9530155bdee3304a5f72
| 701,441
|
import getpass
def prompt(identifier) -> tuple:
"""Credential entry helper.
Returns:
Tuple of login_id, key
"""
login_id = input(f"API Login ID for {identifier}: ")
key = getpass.getpass(f"API Transaction Key for {identifier}: ")
return (login_id, key)
|
be0ed9be1a60c2c29753d6a9ca8b3f12294f183b
| 701,442
|
import torch
def rotation_3d_in_axis(points, angles, axis=0):
"""Rotate points by angles according to axis.
Args:
points (torch.Tensor): Points of shape (N, M, 3).
angles (torch.Tensor): Vector of angles in shape (N,)
axis (int, optional): The axis to be rotated. Defaults to 0.
Raises:
ValueError: when the axis is not in range [0, 1, 2], it will \
raise value error.
Returns:
torch.Tensor: Rotated points in shape (N, M, 3)
"""
rot_sin = torch.sin(angles)
rot_cos = torch.cos(angles)
ones = torch.ones_like(rot_cos)
zeros = torch.zeros_like(rot_cos)
if axis == 1:
rot_mat_T = torch.stack([
torch.stack([rot_cos, zeros, -rot_sin]),
torch.stack([zeros, ones, zeros]),
torch.stack([rot_sin, zeros, rot_cos])
])
elif axis == 2 or axis == -1:
rot_mat_T = torch.stack([
torch.stack([rot_cos, -rot_sin, zeros]),
torch.stack([rot_sin, rot_cos, zeros]),
torch.stack([zeros, zeros, ones])
])
elif axis == 0:
rot_mat_T = torch.stack([
torch.stack([zeros, rot_cos, -rot_sin]),
torch.stack([zeros, rot_sin, rot_cos]),
torch.stack([ones, zeros, zeros])
])
else:
raise ValueError(f'axis should in range [0, 1, 2], got {axis}')
return torch.einsum('aij,jka->aik', (points, rot_mat_T))
|
f9ae51e59e8531e25d376267b16746f5e88575e0
| 701,443
|
import pathlib
def get_managed_environment_log_path():
"""Path for charmcraft log when running in managed environment."""
return pathlib.Path("/tmp/charmcraft.log")
|
1d8c66d480094a728820ea80bdf1ad65a8859fe7
| 701,444
|
import time
def generate_timestamp(expire_after: float = 30) -> int:
"""
:param expire_after: expires in seconds.
:return: timestamp in milliseconds
"""
return int(time.time() * 1000 + expire_after * 1000)
|
16f2fcd77de9edb1e167f1288e37a10491469c22
| 701,445
|
import base64
def b64e(s):
"""b64e(s) -> str
Base64 encodes a string
Example:
>>> b64e("test")
'dGVzdA=='
"""
return base64.b64encode(s)
|
2562f5d18ac59bbe4e8a28ee4033eaa0f10fc641
| 701,446
|
import yaml
import random
import string
def tmp_config_file(dict_: dict) -> str:
"""
Dumps dict into a yaml file that is saved in a randomly named file. Used to as config file to create
ObservatoryConfig instance.
:param dict_: config dict
:return: path of temporary file
"""
content = yaml.safe_dump(dict_).replace("'!", "!").replace("':", ":")
file_name = "".join(random.choices(string.ascii_lowercase, k=10))
with open(file_name, "w") as f:
f.write(content)
return file_name
|
d4ea42a8dc1824757df7f9823f44f7fc181b29aa
| 701,447
|
def computeAlleleFrequency (f_C, f_T):
"""3
f_C = minor allele count
f_T = major allele count
minor_allele_frequency = f_C/ (f_C+f_T)
7"""
minor_allele_frequency = f_C/(f_C+f_T)
return minor_allele_frequency
|
984a7364bfd3ff2c724c885dedd97f61f959b7e6
| 701,448
|
def microcycle_days(weekly_training_days, weeks):
"""generates indexes of training days during the weeks"""
training_day_indexes = []
for w in range(weeks):
for d in weekly_training_days:
training_day_indexes.append(w * 7 + d.value)
return training_day_indexes
|
5c0364b1dc58d2c0205d4e6442d590a23519e5f4
| 701,449
|
import pprint
def check_overlapping(features):
"""Check for elements of `features` with overlapping ranges. In the
case of overlap, print an informative error message and return
names and positions of overlapping features.
"""
features = features[:]
overlapping = []
for i in range(len(features)-1):
prev_name, prev_start, prev_end = features[i]
name, start, end = features[i+1]
if prev_end >= start:
overlap = ((prev_name, prev_start, prev_end), (name, start, end))
overlapping.append(overlap)
raise ValueError('overlapping features: ' + pprint.pformat(overlap))
return overlapping
|
6a246aca29c01b32091d7890b6a55d66367e8e14
| 701,450
|
def compute_level(id, tree):
"""
compute the level of an id in a tree
"""
topic = tree[id]
level = 0
while (id != 0):
level += 1
id = topic['parent']
topic = tree[id]
return(level)
|
fb7fbc1c1f97e85c03abdd453a3deb3411960e45
| 701,451
|
import torch
import math
def eval_gather_inds(len_, num_samples=7):
""" get the gather indices """
inds = torch.arange(0, num_samples, dtype=torch.long)
mul = math.ceil(len_ / num_samples)
output = inds.repeat(mul)[:len_]
return output
|
516ca54ff5c3b442bc93becd15d1bdabd8c8025f
| 701,452
|
def none_or_valid_float_value_as_string(str_to_check):
"""
Unless a string is "none", tries to convert it to a float and back to check
that it represents a valid float value. Throws ValueError if type
conversion fails. This function is only needed because the MATLAB scripts
take some arguments either as a float value in string form or as "none".
:param str_to_check: string to validate
:return: string which is either "none" or represents a valid float value
"""
return str_to_check if str_to_check == "none" else str(float(str_to_check))
|
54f3c63fab0752678cb5a69723aa7790ab11a624
| 701,453
|
def S_moving_average_filter(_data_list, _smoothing=1):
"""
Returns moving average data without data lag.
Use the smoothing factor to get required overall smoothing where the smoothing factor is greater than zero.
"""
ma_data = []
ds = len(_data_list)
s = _smoothing
mas = int((ds * 0.02) * s)
fc = int(mas/2)
fmas = fc * 2
for i in range(ds):
if i < fc:
db = _data_list[:i+i+1]
nfc = len(db)
ma_data.append(sum(db)/nfc)
elif i >= fc:
if i < (ds - fc):
db = _data_list[i-fc:i+fc+1]
nfc = fmas+1
ma_data.append(sum(db)/nfc)
else:
db = _data_list[i-(ds-i-1):]
nfc = len(db)
ma_data.append(sum(db)/nfc)
return ma_data
|
763bff294c6225260cb67e2e38440eb4d514c126
| 701,454
|
def get_expanse_certificate_context(data):
"""
provide custom context information about certificate with data from Expanse API
"""
return {
"SearchTerm": data['search'],
"CommonName": data['commonName'],
"FirstObserved": data['firstObserved'],
"LastObserved": data['lastObserved'],
"DateAdded": data['dateAdded'],
"Provider": data['providers'][0]['name'],
"NotValidBefore": data['certificate']['validNotBefore'],
"NotValidAfter": data['certificate']['validNotAfter'],
"Issuer": {
"Name": data['certificate']['issuerName'],
"Email": data['certificate']['issuerEmail'],
"Country": data['certificate']['issuerCountry'],
"Org": data['certificate']['issuerOrg'],
"Unit": data['certificate']['issuerOrgUnit'],
"AltNames": data['certificate']['issuerAlternativeNames'],
"Raw": data['certificate']['issuer']
},
"Subject": {
"Name": data['certificate']['subjectName'],
"Email": data['certificate']['subjectEmail'],
"Country": data['certificate']['subjectCountry'],
"Org": data['certificate']['subjectOrg'],
"Unit": data['certificate']['subjectOrgUnit'],
"AltNames": data['certificate']['subjectAlternativeNames'],
"Raw": data['certificate']['subject']
},
"Properties": data['properties'][0],
"MD5Hash": data['certificate']['md5Hash'],
"PublicKeyAlgorithm": data['certificate']['publicKeyAlgorithm'],
"PublicKeyBits": data['certificate']['publicKeyBits'],
"BusinessUnits": data['businessUnits'][0]['name'],
"CertificateAdvertisementStatus": data['certificateAdvertisementStatus'][0],
"ServiceStatus": ','.join(data['serviceStatus']),
"RecentIPs": ','.join(data['details']['recentIps']),
"CloudResources": ','.join(data['details']['cloudResources']),
"PemSha1": data['certificate']['pemSha1'],
"PemSha256": data['certificate']['pemSha256']
}
|
cf5c1d38ae7ed3474171ccff7d437d7b622cbcec
| 701,455
|
from datetime import datetime
def convert_time(time_str):
"""Convert iso string to date time object
:param time_str: String time to convert
"""
try:
dt = datetime.strptime(time_str, "%Y-%m-%dT%H:%Mz")
return dt
except Exception:
return time_str
|
4ba3d5b8af4305cc44afb60d02eeb1b1d041fab9
| 701,456
|
def suck_out_formats(reporters):
"""Builds a dictionary mapping edition keys to their cite_format if any.
The dictionary takes the form of:
{
'T.C. Summary Opinion': '{reporter} {volume}-{page}',
'T.C. Memo.': '{reporter} {volume}-{page}'
...
}
In other words, this lets you go from an edition match to its parent key.
"""
formats_out = {}
for reporter_key, data_list in reporters.items():
# For each reporter key...
for data in data_list:
# Map the cite_format if it exists
for edition_key, edition_value in data["editions"].items():
try:
formats_out[edition_key] = data["cite_format"]
except KeyError:
# The item wasn't there; add it.
pass
return formats_out
|
a0db907839573ca53f7c96c326afe1eac5491c63
| 701,457
|
def cast_bytes(data, encoding='utf8'):
"""
Cast str, int, float to bytes.
"""
if isinstance(data, str) is True:
return data.encode(encoding)
elif isinstance(data, int) is True:
return str(data).encode(encoding)
elif isinstance(data, float) is True:
return str(data).encode(encoding)
elif isinstance(data, bytes) is True:
return data
elif data is None:
return None
else:
raise TypeError("Expected unicode or bytes, got %r" % data)
|
01ac5d7cd4a728e401075334900808a6a579deef
| 701,458
|
def B(i,j,k):
"""
Tensor B used in constructing ROMs.
Parameters
----------
i : int
j : int
k : int
Indices in the tensor.
Returns
-------
int
Tensor output.
"""
if i == j + k:
return -1
elif j == i + k or k == i + j:
return 1
else:
msg = "Possible Error: Indices ({},{},{})".format(i,j,k)
print(msg)
return 0
|
b4969759fd2f07bd2bd2baed48a2adfd8669987a
| 701,459
|
import io
def format_data(data, indent):
"""Format a bytestring as a C string literal.
Arguments:
data: Bytestring to write
indent: Indentation for each line, a string
Returns:
A multiline string containing the code, with indentation before every line
including the first. There is no final newline.
"""
fp = io.StringIO()
fp.write(indent)
fp.write('"')
rem = 80 - 3 - len(indent)
def advance(n):
nonlocal rem
if rem < n:
fp.write('"\n')
fp.write(indent)
fp.write('"')
rem = 80 - 3 - len(indent)
rem -= n
for n, c in enumerate(data.rstrip(b"\0")):
if 32 <= c <= 126:
if c in b'\\"':
advance(2)
fp.write("\\")
else:
advance(1)
fp.write(chr(c))
elif c == 0 and (n == len(data) - 1
or not b"0" <= data[n + 1:n + 2] <= b"7"):
advance(2)
fp.write("\\0")
else:
advance(4)
fp.write("\\{:03o}".format(c))
fp.write('"')
return fp.getvalue()
|
260e2b296addeb1113d657b086302197a8e365bb
| 701,460
|
def sample(bn, cond=None):
"""
Sample every variables of a Bayesian Network
:param bn: a Bayesian Network
:param cond: dict, given variables
:return:
"""
g = bn.DAG
cond = cond if cond else dict()
if any(nod not in cond for nod in bn.Exo):
raise ValueError('Exogenous nodes do not fully defined')
res = dict(cond)
for nod in bn.Order:
if nod not in res:
res[nod] = g.nodes[nod]['loci'].render(res)
return res
|
81606afea08f1e80b73d115ed6a6a2581c13890c
| 701,461
|
def psf_to_inhg(psf):
"""Convert lb/ft^2 to inches of mercury."""
return psf * 0.014139030952735
|
c1a482c71ad86ae31efece5f1a395fa354db8c3e
| 701,462
|
def get_version(v):
"""
Generate a PEP386 compliant version
Stolen from django.utils.version.get_version
:param v tuple: A five part tuple indicating the version
:returns str: Compliant version
"""
assert isinstance(v, tuple)
assert len(v) == 5
assert v[3] in ('alpha', 'beta', 'rc', 'final')
parts = 2 if v[2] == 0 else 3
main = '.'.join(str(i) for i in v[:parts])
sub = ''
if v[3] != 'final':
mapping = {'alpha': 'a', 'beta': 'b', 'rc': 'c'}
sub = mapping[v[3]] + str(v[4])
return str(main + sub)
|
946c9ea382ac7da0da1c74373cf981df174737c1
| 701,463
|
def get_nuc2prot():
"""
Returns a dict of nucleotide accessions numbers as keys and
protein acession numbers as values.
"""
nuc2prot_acc = {}
with open("./data/download/nucleotide2protein", "r") as handle:
line = handle.readline()
while line:
prot, nuc = line.split("|")
nuc2prot_acc[nuc[:-1]] = prot
line = handle.readline()
return nuc2prot_acc
|
8f6e0ab5ad76cfaa63d8c0bf12f84e18b759e750
| 701,464
|
def tadsize_chart(genome_name):
"""
Determine the distance threshold to build coverage tracks.
Args:
genome_name (string): name of the reference genome;
ex: mammals, drosophila, c_elegans, s_pombe, c_crescentus
Returns:
dist_thresh (int): integer specifying distance threshold in basepairs
"""
low_bound = {
"mammals": 100000,
"drosophila": 10000,
"c_elegans": 1000000,
"s_pombe": 50000,
"c_crescentus": 30000
}
upp_bound = {
"mammals": 2000000,
"drosophila": 100000,
"c_elegans": 2000000,
"s_pombe": 100000,
"c_crescentus": 400000
}
typ_res = {
"mammals": 1000000,
"drosophila": 250000,
"c_elegans": 3000000,
"s_pombe": 300000,
"c_crescentus": 250000
}
return low_bound[genome_name], upp_bound[genome_name], typ_res[genome_name]
|
844744424845a1d240fa93023b9786a7ed2cc12c
| 701,465
|
def convert_results_to_table(results, aggregation="average"):
"""
Convert results to table
Args:
results (dict): results dictionary
aggregation (str): aggregation method, either average or sum
"""
headers = []
columns = []
for target_task, source_tasks in results.items():
headers.append(target_task)
column = []
for _, metrics in source_tasks.items():
if metrics:
aggregate_value = sum(metrics.values()) if aggregation == "sum" else sum(metrics.values())/len(metrics)
aggregate_value = round(aggregate_value, 4)
else:
aggregate_value = "N/A"
column.append(aggregate_value)
columns.append(column)
return columns,headers
|
51d38a52cb5428568c89e518df86624c5f438cf6
| 701,466
|
import mmap
def is_word_in_file(fname, word):
""" Search word in given file. This function skips empty files.
"""
f = open(fname)
try:
s = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ)
if s.find(word) != -1:
return True
return False
except ValueError:
pass
return False
|
c16b94bb450807fefdab35470535c0752a9ecbbd
| 701,469
|
def emat(st1, yt, t, a=None):
"""
returns exponential moving average for a t-period EMA and incremental value
where st1 is the previous average, yt is the incr value, and t is the size of the avg
a can optionally be overridden with a specific coefficient, else 2/(t-1) is used
"""
# St = a * Yt + (1 - a) * St-1
# where:
# St-1 = last St (i.e. St from t-1)
# Yt = data point for t
# a = alpha factor from 0.0 - 1.0, but 2 / (N + 1) gives 86% weighting with large N's
# see http://en.wikipedia.org/wiki/Moving_average
a = 2.0 / (t + 1.0) if a == None else a
return a * yt + (1.0 - a) * st1
|
484403ca5ba13bd960bcda6220846eef9ac09114
| 701,470
|
import os
def relpath(path, cwd=None):
""" Find relative path from current directory to path.
Example usage:
>>> from Ska.File import relpath
>>> relpath('/a/b/hello/there', cwd='/a/b/c/d')
'../../hello/there'
>>> relpath('/a/b/c/d/e/hello/there', cwd='/a/b/c/d')
'e/hello/there'
>>> # Special case - don't go up to root and back
>>> relpath('/x/y/hello/there', cwd='/a/b/c/d')
'/x/y/hello/there'
:param path: Destination path
:param cwd: Current directory (default: os.getcwd() )
:rtype: Relative path
"""
if cwd is None:
cwd = os.getcwd()
currpath = os.path.abspath(cwd)
destpath = os.path.abspath(os.path.join(cwd, path))
currpaths = currpath.split(os.sep)
destpaths = destpath.split(os.sep)
# Don't go up to root and back. Since we split() on an abs path the
# zero element is always ''
if currpaths[1] != destpaths[1]:
return destpath
# Get rid of common path elements
while currpaths and destpaths and currpaths[0] == destpaths[0]:
currpaths.pop(0)
destpaths.pop(0)
# start with enough '..'s to get to top of common path then get
# the rest of the destpaths. Return '' if the list ends up being empty.
relpaths = [os.pardir] * len(currpaths) + destpaths
return os.path.join(*relpaths) if relpaths else ''
|
a743e02cb51ee352d7bbef047af13152c5834c14
| 701,471
|
def get_service_type(f):
"""Retrieves service type from function."""
return getattr(f, 'service_type', None)
|
fb4d98a4b4db0d10ab97d94d98ccfe21cea05fe9
| 701,472
|
def compute_error(model_data, reference_data):
"""Returns the summ of the squared differences between model and reference data."""
error = ((model_data - reference_data) ** 2).sum()
return error
|
66e80326b85eed67008b517dfeff99cc8352bffd
| 701,474
|
import json
import sys
def read_json_file(json_path):
""" Read inventory as json file """
tf_inv = {}
try:
with open(json_path) as json_handler:
try:
tf_inv = json.load(json_handler)
except json.decoder.JSONDecodeError:
print(
"Provided terraform inventory file (%s) is not a valid json." % json_path)
sys.exit(1)
except OSError:
print("Provided terraform inventory file (%s) does not exist." % json_path)
sys.exit(1)
return tf_inv
|
6758e50c441c10ed3e0c7e68b1ed87abbbeff6b1
| 701,476
|
import json
def dj(_dict):
"""Converts dicts to JSON and safely handles non-serializable items"""
return json.dumps(
_dict,
default=lambda o: 'ERROR: Item not JSON serializable',
sort_keys=True,
indent=3)
|
042fdc731a084e1d74175a1ac22bc5b4204050c6
| 701,477
|
def is_decorated(field_spec):
"""
is this spec a decorated one
:param field_spec: to check
:return: true or false
"""
if 'config' not in field_spec:
return False
config = field_spec.get('config')
return 'prefix' in config or 'suffix' in config or 'quote' in config
|
b44d13fbcadc67ac191d07b1c304f2ec5ce1f081
| 701,478
|
def split_path(path):
"""
Normalise S3 path string into bucket and key.
Parameters
----------
path : string
Input path, like `s3://mybucket/path/to/file`
Examples
--------
>>> split_path("s3://mybucket/path/to/file")
['mybucket', 'path/to/file']
"""
if path.startswith('s3://'):
path = path[5:]
if '/' not in path:
return path, ""
else:
return path.split('/', 1)
|
446f7643066864937e11b915d4ff842f21c65dd6
| 701,479
|
def unixtime2mjd(unixtime):
"""
Converts a UNIX time stamp in Modified Julian Day
Input: time in UNIX seconds
Output: time in MJD (fraction of a day)
"""
# unixtime gives seconds passed since "The Epoch": 1.1.1970 00:00
# MJD at that time was 40587.0
result = 40587.0 + unixtime / (24. * 60. * 60.)
return result
|
670e915b7a5de8cd9ced28e6b4d32c51ac916d54
| 701,480
|
def vecdist3(coord1, coord2):
"""Calculate vector between two 3d points."""
#return [i - j for i, j in zip(coord1, coord2)]
# Twice as fast for fixed 3d vectors
vec = [coord2[0] - coord1[0],
coord2[1] - coord1[1],
coord2[2] - coord1[2]]
return (vec[0]*vec[0] + vec[1]*vec[1] + vec[2]*vec[2])**0.5
|
0315ec921c051eb46da9f073e6bc76b2a0a448bb
| 701,481
|
def num_active_calls(log, ad):
"""Get the count of current active calls.
Args:
log: Log object.
ad: Android Device Object.
Returns:
Count of current active calls.
"""
calls = ad.droid.telecomCallGetCallIds()
return len(calls) if calls else 0
|
a6674df1e8e539478db6ab1a640fbce1cf0b6b4c
| 701,482
|
import os
def create_dir(ctx, param, value):
""" a command option callback to create parent directories if does not exist """
pardir = os.path.dirname(value.name) if hasattr(value, 'name') else None
if pardir:
os.makedirs(pardir, exist_ok=True)
return value
|
6a573ebbc0ddc5c4a8f0f15fa0fc91566610bc28
| 701,484
|
def density_standard(components):
"""
Natural gas density at standard temperature, kg/m3
:param components: (list) List of gas components. Each item is an object of class GasComponent
:return: (float) The density of natural gas an standard parameters, kg/m3
"""
return sum([component.density_standard * component.volume_percentage * 0.01 for component in components])
|
c087ce6ae1a3486dd092341286023c56606380a3
| 701,485
|
import re
def range_address_number(num, include_last=True):
"""
'5-7' -> [5, 6, 7]
'5' -> ['5']
:param num:
:return:
"""
range_re = re.search('(\d+)-(\d+)', num)
if range_re:
min, max = list(map(
lambda i: int(i),
list(range_re.groups())
))
max = max + 1 if include_last else max
return list(range(min, max))
else:
return [num]
|
3c67ddcba2a25915fce89c198d4d4f2a11f5ef60
| 701,486
|
def generate_test_uuid(tail_value=0):
"""Returns a blank uuid with the given value added to the end segment."""
return '00000000-0000-0000-0000-{value:0>{pad}}'.format(value=tail_value,
pad=12)
|
f113eef54eba9d8d1fb5234c87af3cb6290ea25e
| 701,487
|
def format_revision_list(revisions, use_html=True):
"""Converts component revision list to html."""
result = ''
for revision in revisions:
if revision['component']:
result += '%s: ' % revision['component']
if 'link_url' in revision and revision['link_url'] and use_html:
result += '<a target="_blank" href="{link_url}">{link_text}</a>'.format(
link_url=revision['link_url'], link_text=revision['link_text'])
else:
result += revision['link_text']
if use_html:
result += '<br />'
else:
result += '\n'
return result
|
d49e91069a1f33a7ee32963e81a3e19ea768d3ea
| 701,488
|
from typing import Dict
async def total() -> Dict:
"""
Sum of a list of numbers
---
tags:
- Total
get:
parameters:
- N/A
response:
200:
description: returns a dictionary with a total sum of a list of numbers
"""
return {"total": sum(list(range(10000001)))}
|
67c1d1abf6c76c533d8ea776dbb46a4184b0fca5
| 701,489
|
from bs4 import BeautifulSoup
def bishijie_info_parse(parse_str:str = ''):
"""
传入一个待解析的字符串
"""
# html_info = etree.HTML(parse_str,parser=None)
soup = BeautifulSoup(parse_str,features="lxml")
info_list = soup.find_all('div',class_="content")
result_info_list = []
for info in info_list:
title = info.find('h3').text.replace('\n','').replace(' ','')
description = info.find('div',class_='h63').text.replace('\n','').replace(' ','')
url = 'https://www.bishijie.com' + info.a['href']
look_count_true = info.find('div',class_='bull').text.replace('\n','').replace(' ','')
look_count_flase = info.find('div',class_='bear').text.replace('\n','').replace(' ','')
result_info_list.append({"Title":title,
"Description":description,
"look_count_true":look_count_true,
"look_count_false":look_count_flase,
"Url":url})
result_info = {
"status":True,
"info":"bishijie Fetch results",
"info_list":result_info_list
}
return result_info
|
e89a875f9c98b3b9cabeed2eb362ad1196b14275
| 701,490
|
def extractAliasFromContainerName(containerName):
""" Take a compose created container name and extract the alias to which it
will be refered. For example bddtests_vp1_0 will return vp0 """
return containerName.split("_")[1]
|
a5ab9487ae31ee1a4b2ed9b67062817488107983
| 701,491
|
def HexToByte( hexStr ):
"""
Convert a string hex byte values into a byte string. The Hex Byte values may
or may not be space separated.
"""
# The list comprehension implementation is fractionally slower in this case
#
# hexStr = ''.join( hexStr.split(" ") )
# return ''.join( ["%c" % chr( int ( hexStr[i:i+2],16 ) ) \
# for i in range(0, len( hexStr ), 2) ] )
bytes = []
hexStr = ''.join( hexStr.split(" ") )
for i in range(0, len(hexStr), 2):
bytes.append( chr( int (hexStr[i:i+2], 16 ) ) )
return ''.join( bytes )
|
eab4fd7ecaae10add8411cf51c03d1bf5b902700
| 701,492
|
def get_string(request, key):
"""Returns the first value in the request args for a given key."""
if not request.args:
return None
if type(key) is not bytes:
key = key.encode()
if key not in request.args:
return None
val = request.args[key][0]
if val is not None and type(val) is bytes:
val = val.decode()
return val
|
ae43bb3e11cf21deb8f726ed6a2321c51099e4f3
| 701,494
|
def map_serial_number(facilities) -> str:
"""Map serial number."""
facility = facilities.get("body", {}).get("facilitiesList", [])[0]
return str(facility.get("serialNumber", None))
|
81491de02a2583d30ee31833a427b4ffdebe6a88
| 701,496
|
def _get_maxmem(profile_df):
"""
Get current peak memory
:param pandas.core.frame.DataFrame profile_df: a data frame representing the current profile.tsv for a sample
:return str: max memory
"""
return "{} GB".format(str(max(profile_df['mem']) if not profile_df['mem'].empty else 0))
|
2e628d48f7b4e0e3c1465f09da7aa795d2954a06
| 701,497
|
import torch
def MaskedNLL(target, probs, balance_weights=None):
# adapted from https://gist.github.com/jihunchoi/f1434a77df9db1bb337417854b398df1
"""
Args:
target: A Variable containing a LongTensor of size
(batch, ) which contains the index of the true
class for each corresponding step.
probs: A Variable containing a FloatTensor of size
(batch, num_classes) which contains the
softmax probability for each class.
sw: A Variable containing a LongTensor of size (batch,)
which contains the mask to apply to each element in a batch.
Returns:
loss: Sum of losses with applied sample weight
"""
log_probs = torch.log(probs)
if balance_weights is not None:
balance_weights = balance_weights.cuda()
log_probs = torch.mul(log_probs, balance_weights)
losses = -torch.gather(log_probs, dim=1, index=target)
return losses.squeeze()
|
17132ad088b00ae096f16946f5026ed2133c8eeb
| 701,499
|
async def process_headers(headers):
"""Filter out unwanted headers and return as a dictionary."""
headers = dict(headers)
header_keys = (
"user-agent",
"referer",
"accept-encoding",
"accept-language",
"x-real-ip",
"x-forwarded-for",
)
return {k: headers.get(k) for k in header_keys}
|
32feeb40c12c4b69d65da1c178e396e85fc9e557
| 701,500
|
def by_circ(x, y):
"""
Sort circRNAs by the start and end position
"""
return x.end - y.end if x.start == y.start else x.start - y.start
|
5d8205389960b92f10c450fdb6385678a279406b
| 701,503
|
import yaml
def get_rest_of_manifest_values():
""" If an existing manifest is present then we do not want to overwrite any fields
the user may have filled out. So we want to read in everything but the resources:
section and use that when generating the file. """
stream = open('hardening_manifest/hardening_manifest.yaml', 'r')
existing_manifest = yaml.safe_load(stream)
del existing_manifest['resources']
return existing_manifest
|
03cc8afbcdf26a91596d189bafecce08c9cf2895
| 701,504
|
import sys
import gc
def nogc(func):
"""disable garbage collector
Python's garbage collector triggers a GC each time a certain number of
container objects (the number being defined by gc.get_threshold()) are
allocated even when marked not to be tracked by the collector. Tracking has
no effect on when GCs are triggered, only on what objects the GC looks
into. As a workaround, disable GC while building complex (huge)
containers.
This garbage collector issue have been fixed in 2.7.
"""
if sys.version_info >= (2, 7):
return func
def wrapper(*args, **kwargs):
gcenabled = gc.isenabled()
gc.disable()
try:
return func(*args, **kwargs)
finally:
if gcenabled:
gc.enable()
return wrapper
|
cdc9a1f48608d84b8a3e568bb0b50a6f12ffa34a
| 701,505
|
def _normalize_longitude(lon: float) -> float:
"""Normalize longitudes between [-180, 180]"""
return ((lon + 180.0) % 360.0) - 180.0
|
e50dc8fee9a0499a2e32f3ccf8b2e9a634581bba
| 701,507
|
def get_tf_tensor_shape(tensor):
"""Get tensor shape, if there is unkown tensor, set it as None"""
shape = []
try:
shape = tensor.get_shape().as_list()
if any(s is None for s in shape):
return None
return shape
except Exception: # pylint: disable=broad-except
shape = None
return shape
|
33c7e17102ad2f7d407c1f86b13c7cdfa61ca677
| 701,508
|
def _update_selected_experiment_table_rows(
last_select_click, last_clear_click, experiment_table_indices
):
"""The callback to select or deselect all rows in the experiment table.
Triggered when the select all or clear all button is clicked.
"""
last_select_click = last_select_click if last_select_click else 0
last_clear_click = last_clear_click if last_clear_click else 0
# "select all" is clicked: return all row indicies
if int(last_select_click) > int(last_clear_click):
return experiment_table_indices
# "clear all" or nothing yet is clicked: return no row indicies
return []
|
7a527272c780750ea9cbc076f0d947fe9b68a460
| 701,509
|
def rc4Decrypt(data, key):
"""RC4 algorithm"""
x = 0
box = list(range(256))
for i in range(256):
x = (x + int(box[i]) + int(key[i % len(key)])) % 256
box[i], box[x] = box[x], box[i]
x = y = 0
out = []
for char in data:
x = (x + 1) % 256
y = (y + box[x]) % 256
box[x], box[y] = box[y], box[x]
out.append(chr(int(char) ^ box[(box[x] + box[y]) % 256]))
return ''.join(out)
|
91c959cf03410626378647ab6d85391e5b0970d2
| 701,510
|
def _move_tutor_version_groups(table):
"""Tutored moves are never the same between version groups, so the column
collapsing ignores tutors entirely. This means that we might end up
wanting to show several versions as having a tutor within a single column.
So that "E, FRLG" lines up with "FRLG", there has to be a blank space for
"E", which requires finding all the version groups that contain tutors.
"""
move_tutor_version_groups = set()
for method, method_list in table:
if method.name != 'Tutor':
continue
for move, version_group_data in method_list:
move_tutor_version_groups.update(version_group_data.keys())
return move_tutor_version_groups
|
5b9d43a11d5e5d92351ac5b93a7ada5b8d5daa36
| 701,511
|
import textwrap
def make_code_format(light_theme: bool = False) -> str:
"""Create code format template for rich."""
theme = "light" if light_theme else "dark"
code_format = textwrap.dedent(
f"""\
<div class="terminal-container">
<div class="terminal {theme}-terminal">
<div class="top">
<div class="buttons">
<span class="circle {theme}-red"></span>
<span class="circle {theme}-yellow"></span>
<span class="circle {theme}-green"></span>
</div>
</div>
<code>
<pre class="terminal-content">{{code}}</pre>
</code>
</div>
</div>
"""
)
return code_format
|
deb5d97f3bce85c1ef91d4c9e88b68474d70c173
| 701,512
|
def _wrapped_value_and_num(value):
"""Returns a list containing value plus the list's length."""
if isinstance(value, (list, tuple)):
return value, len(value)
else:
return [value], 1
|
811521a18dffd9ee046751c74d4d8a097662c8cd
| 701,514
|
def perform_fit(cfmclient, fabric_uuid, name, description):
"""
Request a full fit across managed Composable Fabrics.
:param cfmclient: CFM Client object
:param fabric_uuid: Valid Fabric UUID of an existing fabric
:param name: Simple name of the fit
:param description: Longer Description of the fitting request
:return:
"""
data = {
'fabric_uuid': '{}'.format(fabric_uuid),
'name': '{}'.format(name),
'description': '{}'.format(description)
}
path = 'v1/fits'
return cfmclient.post(path, data=data)
|
66d6462c97b1354ef11b6378b82912030ed40a94
| 701,515
|
def make_task_hashable(task):
"""
Makes a task dict hashable.
Parameters
----------
task : dict
task that shall be made hashable.
Returns
-------
TYPE
hashable task.
"""
if isinstance(task, (tuple, list)):
return tuple((make_task_hashable(e) for e in task))
if isinstance(task, dict):
return tuple(sorted((k,make_task_hashable(v)) for k,v in task.items()))
if isinstance(task, (set, frozenset)):
return tuple(sorted(make_task_hashable(e) for e in task))
return task
|
4e27fe4c27c4ae220ed8b15ce701f2d87796b715
| 701,516
|
from pathlib import Path
def parent(path: str):
"""Returns the parent `Path` of the given path."""
return Path(path).parent.resolve()
|
d86b37bc8310b024eb0a78c1b1de404cf6c2c85a
| 701,517
|
def get_phone_number(phone_number):
"""
Following suggested RFC 3966 protocol by open id
expect: +111-1111-111111 format
"""
if '-' in phone_number:
phone_split = phone_number.split('-')
if len(phone_split) > 2:
#if had country code
return phone_split[2]
return phone_split[1]
return phone_number
|
287d3dde0cabc3c7730ac48bf94b2c4fc809f123
| 701,518
|
def mes_com_acentos(mes_a_mudar):
"""Retorna Mês com Maiúsculas e Acentos."""
meses_a_exibir = {
'janeiro': 'Janeiro',
'fevereiro': 'Fevereiro',
'marco': 'Março',
'abril': 'Abril',
'maio': 'Maio',
'junho': 'Junho',
'julho': 'Julho',
'agosto': 'Agosto',
'setembro': 'Setembro',
'outubro': 'Outubro',
'novembro': 'Novembro',
'dezembro': 'Dezembro',
}
return meses_a_exibir[mes_a_mudar]
|
8361d7e747d524242eeb572b839305d58021b35d
| 701,519
|
import base64
def b64encode(value):
"""
Encode a value in base64
"""
return base64.b64encode(value)
|
988abf5a9d2c0c1f38f16fbf8f80fd43aa115223
| 701,520
|
import os
def get_audio_embedding_model_path(input_repr, content_type):
"""
Returns the local path to the model weights file for the model
with the given characteristics
Parameters
----------
input_repr : "linear", "mel128", or "mel256"
Spectrogram representation used for model.
content_type : "music" or "env"
Type of content used to train embedding.
Returns
-------
output_path : str
Path to given model object
"""
return os.path.join(os.path.dirname(__file__),
'openl3_audio_{}_{}.h5'.format(input_repr, content_type))
|
8a3d0a5d09896467b672e8dde47b1a6a500cdce8
| 701,521
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.