content
stringlengths 35
416k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
import argparse
def make_argument_parser():
"""
Creates an ArgumentParser to read the options for this script from
sys.argv
:return:
"""
parser = argparse.ArgumentParser(
description="Find matches between the real and simulated tiles"
)
parser.add_argument('--size', '-S',
default=1000, type=int)
parser.add_argument('--njobs', '-J',
default=10, type=int)
parser.add_argument('--model', '-M',
default='1000', choices=['1000', 'rybski', 'marco'])
return parser
|
b3c644f0f3860722e9019955e8bc41718952b18b
| 26,932
|
def public(*args):
"""
This decorator identifies which methods should be included in the abi file.
"""
def public_wrapper():
pass
return public_wrapper
|
b62ff44fbaa365c0f275c1c71ca723c97ec7ebe1
| 26,933
|
import re
def as_string(node):
"""
We can get a simple string from a node by flattening it to give us a Tree
with all string children, then joining those strings.
On top of this, we heuristically try to push punctuation back up to the
previous token using a regex, so things like
"Is this a question ?"
become
"Is this a question?"
(note the '?' placement).
"""
joined = ' '.join(node.flatten())
joined = re.sub(r'``\s+', '"', joined)
joined = re.sub(r"\s+''", '"', joined)
joined = re.sub(r'\s+-LRB-\s+\s+-RRB-', '()', joined)
joined = re.sub(r'-LRB-\s+', '(', joined)
joined = re.sub(r'-RRB-', ')', joined)
joined = re.sub(r'-LSB-\s+', '[', joined)
joined = re.sub(r'-RSB-', ']', joined)
return re.sub(r"\s+([-?.,';)])", r'\1', joined)
|
245db63b04c2707ac7411af9908644dcbed9f740
| 26,935
|
def normalise_period(val):
"""Return an integer from 1 to 12.
Parameters
----------
val : str or int
Variant for period. Should at least contain numeric characters.
Returns
-------
int
Number corresponding to financial period.
Examples
--------
>>> normalise_period('P6')
6
>>> normalise_period(202106)
6
"""
val = ''.join(c for c in str(val) if c.isdigit())
return int(val[-2:])
|
bf1d5e334d49d98404193fbbc2047bf41f7b24e5
| 26,937
|
def select_in_between_user_login_and_create_profile():
"""
it checks the user option recursively till user do not select the valid input.
:return: it returns an integer value.
"""
option = int(input("select your option (1)/(2): "))
if option == 1:
return 1
elif option == 2:
return 2
else:
print("Please select valid option")
select_in_between_user_login_and_create_profile()
|
d8d5708982dbb2f14ee3227177d78989db16d5be
| 26,938
|
from datetime import datetime
def humanize_timestamp(utc_timestamp, verbose=False):
"""
Convert a utc timestamp into a human readable relative-time.
"""
timedelta = datetime.utcnow() - datetime.utcfromtimestamp(utc_timestamp)
seconds = int(timedelta.total_seconds())
if seconds < 60:
return 'moments ago' if verbose else '0min'
minutes = seconds // 60
if minutes < 60:
return ('%d minutes ago' % minutes) if verbose else ('%dmin' % minutes)
hours = minutes // 60
if hours < 24:
return ('%d hours ago' % hours) if verbose else ('%dhr' % hours)
days = hours // 24
if days < 30:
return ('%d days ago' % days) if verbose else ('%dday' % days)
months = days // 30.4
if months < 12:
return ('%d months ago' % months) if verbose else ('%dmonth' % months)
years = months // 12
return ('%d years ago' % years) if verbose else ('%dyr' % years)
|
62a402607f8c96775606892771850d07795a160b
| 26,939
|
import glob
def task_pot():
"""Re-create .pot ."""
return {
'actions': ['pybabel extract -o localization/telbot.pot src'],
'file_dep': glob.glob('src/*.py'),
'targets': ['localization/telbot.pot'],
}
|
99ef42549e94950ecbf37a51bb09769a6fb484f4
| 26,940
|
def get_marker(label, chunk):
"""
Returns marker instance from chunk based on the label correspondence
"""
for marker in chunk.markers:
if label == marker.label:
return marker
print("Marker not found! " + label)
return False
|
e479568f5b6bc96be0bcdfbf7a428820ab953cce
| 26,941
|
import os
def get_counts():
"""return test counts that are set via pyt environment variables when pyt
runs the test
:returns: dict, 3 keys (classes, tests, modules) and how many tests of each
were found by pyt
"""
counts = {}
ks = [
('PYT_TEST_CLASS_COUNT', "classes"),
('PYT_TEST_COUNT', "tests"),
('PYT_TEST_MODULE_COUNT', "modules"),
]
for ek, cn in ks:
counts[cn] = int(os.environ.get(ek, 0))
return counts
|
c32b167917e5922659438a5b04a16bb661bb2ba7
| 26,942
|
def check_default_empty(default):
"""Helper function to validate if a default parameter is empty based on type"""
if isinstance(default, str) or isinstance(default, list):
return len(default) == 0
elif isinstance(default, int):
return False
elif isinstance(default, dict):
return len(default.keys()) == 0
|
19b94f3b25450d39c5c2b3ddc0af2758e1fc1b5f
| 26,943
|
def knight_moves(start):
"""
Returns a list of positions that are legal knight moves
from 'start' on a chessboard that is represented as an list(64)
"""
rel_pos = [(-1, -2), (1, -2), (-2, -1), (2, -1),
(-2, 1), (2, 1), (-1, 2), (1, 2)]
res = []
for (x, y) in rel_pos:
adjustment = x + y*8
pos = start + adjustment
# print("trying pos=", pos, "adj=", adjustment)
if pos % 8 != x + start % 8:
# print("\t horizontal wrap")
continue
if pos > 0 and pos < 64:
res.append(pos)
return res
|
92f75aac5090b9f105a0fbc1be4a39a2d542dbdb
| 26,944
|
import itertools
def flatten(iterable):
"""Reduce dimensionality of iterable containing iterables
Args:
iterable: A multi-dimensional iterable
Returns:
A one dimensional iterable
"""
return itertools.chain.from_iterable(iterable)
|
3869bbc5f2e1377d5c893ce7e35c3b91988916a7
| 26,945
|
def get_max(current_max, input_score):
"""
compare two input numbers, and return bigger one.
:param current_max: int, the current max score.
:param input_score: int, the score just input.
:return: int, compare two numbers and return bigger one.
"""
if current_max != 0 and current_max > input_score:
return current_max
return input_score
|
ad6c195219e4fb522e4a5802ad931b064da3dcc4
| 26,946
|
def check_lead_zero(to_check):
"""Check if a number has a leading 0.
Args:
to_check (str): The number to be checked.
Returns:
True if a leading 0 is found or the string is empty, False otherwise.
"""
if str(to_check) in (None, ''):
return True
elif str(to_check[0]) != '0':
return False
else:
return True
|
39302f3f57ac2d9714d3057a744130cd3a5e5aa9
| 26,948
|
def dec2bin(x: int, n: int = 2) -> str:
"""Converts a single decimal integer to it binary representation
and returns as string for length >= n.
"""
return str(int(bin(x)[2:])).zfill(n)
|
cf5713c5f8855c5f737e4533f780ac75ee3abfa1
| 26,950
|
import math
def convert_bytes_to_string(number: int) -> str:
"""Returns the conversion from bytes to the correct
version (1024 bytes = 1 KB) as a string.
:param number: number to convert to a readable string
:return: the specified number converted to a readable string
"""
num = number
factor = 1024
if num >= factor:
num /= factor
if num >= factor:
num /= factor
if num >= factor:
num /= factor
if num >= factor:
num /= factor
if num >= factor:
num /= factor
suffix = " PB"
else:
suffix = " TB"
else:
suffix = " GB"
else:
suffix = " MB"
else:
suffix = " KB"
else:
suffix = " Bytes"
rounding_factor = 10**2
rounded = math.floor(num * rounding_factor) / rounding_factor
return f"{rounded:.2f}" + suffix
|
ca8f5d3417ad989bc12b2bf9cb76a561c735c548
| 26,954
|
def _get_location(location_text):
"""Used to preprocess the input location_text for URL encoding.
Doesn't do much right now. But provides a place to add such steps in future.
"""
return location_text.strip().lower()
|
979471af5d40f0a58a2c4e4e7e26af18779ca890
| 26,955
|
import fnmatch
def _match_in_cache(cache_tree, list_of_names):
"""
:type cache_tree: defaultdict
:description cache_tree: a defaultdict initialized with the tree() function. Contains names
of entries in the kairosdb, separated by "." per the graphite convention.
:type list_of_names: list
:description list_of_names: list of strings, in order, that will be sought after in the cache tree.
:rtype: list
:return: A list of matches, possibly empty.
Given a cache_tree, and a prefix, returns all of the values associated with that prefix,
that is, the keys that reside under the prefix.
"""
head_item = list_of_names[0]
# print "head_item is {0}".format(head_item)
head_item_matches = [ m for m in cache_tree.keys() if fnmatch.fnmatch(m, head_item) ]
if head_item not in cache_tree.keys():
# print "A"
return [] # Empty List to signify we're done here
elif len(list_of_names) == 1:
# print "B"
return cache_tree[head_item].keys()
else:
# print "C"
tail_list = list_of_names[1:]
return _match_in_cache(cache_tree[head_item], tail_list)
|
e559125f4b62a87956e0d30f5d8c3e4b2c7abef8
| 26,956
|
import requests
def ping(url: str) -> requests.Response:
"""
Pings the Server on the base url to make sure it's available.
"""
return requests.get(url)
|
a01cef7755a51ac8e235d5b22930b066cc83a08e
| 26,957
|
def range_intersect(a, b, extend=0):
"""
Returns the intersection between two reanges.
>>> range_intersect((30, 45), (55, 65))
>>> range_intersect((48, 65), (45, 55))
[48, 55]
"""
a_min, a_max = a
if a_min > a_max:
a_min, a_max = a_max, a_min
b_min, b_max = b
if b_min > b_max:
b_min, b_max = b_max, b_min
if a_max + extend < b_min or b_max + extend < a_min:
return None
i_min = max(a_min, b_min)
i_max = min(a_max, b_max)
if i_min > i_max + extend:
return None
return [i_min, i_max]
|
9184172de3921cfb74bccaeee9b15405045a1327
| 26,958
|
import codecs
def rot13(s: str) -> str:
"""ROT-13 encode a string.
This is not a useful thing to do, but serves as an example of a
transformation that we might apply."""
return codecs.encode(s, "rot_13")
|
a2413c3bd835dc09b445eb030aa6179e14411e17
| 26,960
|
import sys
def bbox_from_polygon(polygon):
"""Construct a numeric list representing a bounding box from polygon coordinates in numeric list representation."""
minx = sys.maxsize
miny = sys.maxsize
maxx = -sys.maxsize
maxy = -sys.maxsize
for xy in polygon:
if xy[0] < minx:
minx = xy[0]
if xy[0] > maxx:
maxx = xy[0]
if xy[1] < miny:
miny = xy[1]
if xy[1] > maxy:
maxy = xy[1]
return minx, miny, maxx, maxy
|
0518faeb45ea99834528072930829a5860576b03
| 26,962
|
from typing import Any
def is_numeric_scalar(x: Any) -> bool:
"""
Returns if the given item is numeric
>>> is_numeric_scalar("hello")
False
>>> is_numeric_scalar("234")
True
>>> is_numeric_scalar("1e-5")
True
>>> is_numeric_scalar(2.5)
True
"""
if isinstance(x, (float, int)):
return True
elif isinstance(x, str):
try:
_ = float(x)
return True
except ValueError:
return False
return False
|
7a0b8bd35be91b4ad6bda49d6a3543c1ca8f5ac7
| 26,963
|
def is_file_image(filename):
"""Return if a file's extension is an image's.
Args:
filename(str): file path.
Returns:
(bool): if the file is image or not.
"""
img_ex = ['jpg', 'png', 'bmp', 'jpeg', 'tiff']
if '.' not in filename:
return False
s = filename.split('.')
if s[-1].lower() not in img_ex:
return False
if filename.startswith('.'):
return False
return True
|
68df1997c4874cd990ca24739c91555378fc73fd
| 26,964
|
def get_coords(rect):
"""Takes an np.array with coords of rectangle.
Returns max / min height / width
"""
lw = min(rect[:,0])
uw = max(rect[:,0])
lh = min(rect[:,1])
uh = max(rect[:,1])
return(lw, uw, lh, uh)
|
021b718fbfc0c1c0cdd46e116972b647c780caff
| 26,966
|
def _get_jgroup(grouped_data):
"""Get the JVM object that backs this grouped data, taking into account the different
spark versions."""
d = dir(grouped_data)
if '_jdf' in d:
return grouped_data._jdf
if '_jgd' in d:
return grouped_data._jgd
raise ValueError('Could not find a dataframe for {}. All methods: {}'.format(grouped_data, d))
|
99b2ac71cd6d0ccf98976df43966ccabf14bcb98
| 26,967
|
def parse_fasta (lines):
"""Returns 2 lists: one is for the descriptions and other one for the sequences"""
descs, seqs, data = [], [], ''
for line in lines:
if line.startswith('>'):
if data: # have collected a sequence, push to seqs
seqs.append(data)
data = ''
descs.append(line[1:]) # Trim '>' from beginning
else:
data += line.rstrip('\r\n')
# there will be yet one more to push when we run out
seqs.append(data)
return descs, seqs
|
4dea579720f43f348389e53751c0308f1c493296
| 26,968
|
def format_response(event):
"""Determine what response to provide based upon event data.
Args:
event: A dictionary with the event data.
"""
text = ""
# Case 1: The bot was added to a room
if event['type'] == 'ADDED_TO_SPACE' and event['space']['type'] == 'ROOM':
text = 'Thanks for adding me to "%s"!' % event['space']['displayName']
# Case 2: The bot was added to a DM
elif event['type'] == 'ADDED_TO_SPACE' and event['space']['type'] == 'DM':
text = 'Thanks for adding me to a DM, %s!' % event['user']['displayName']
elif event['type'] == 'MESSAGE':
text = 'Your message: "%s"' % event['message']['text']
return { 'text': text }
|
b0ee2412ac35398fb3481e06ecd1c044742f0c50
| 26,969
|
def look_for_obj_by_att_val(my_obj_list, my_att, my_value):
"""Search for an Obj with an attribute of a given value, for methods that return list of Obj."""
ret_obj = None
for my_obj in my_obj_list:
if my_att in dir(my_obj):
# print(getattr(my_obj, my_att), my_value)
if getattr(my_obj, my_att) == my_value:
ret_obj = my_obj
break
return ret_obj
|
60ec811148260b490a36976234c6c3163d0d12b3
| 26,970
|
def weighted_soft_dice_loss(probs, labels, weights):
"""
Args:
probs: [B, 1, H, W]
labels: [B, 1,H, W]
weights: [B, 1, H, W]
"""
num = labels.size(0)
w = weights.view(num, -1)
w2 = w * w
m1 = probs.view(num, -1)
m2 = labels.view(num, -1)
intersection = m1 * m2
smooth = 1.0
score = 2.0 * ((w2 * intersection).sum(1) + smooth) / ((w2 * m1).sum(1) + (w2 * m2).sum(1) + smooth)
loss = 1 - score.sum() / num
return loss
|
064b1ea07adcd11b39a0687d738b3e62d6c1f500
| 26,972
|
def is_bool(obj):
"""Returns ``True`` if `obj` is a ``bool``."""
return isinstance(obj, bool)
|
6b63b9479c8a388a056c80cc62be74c548ac3504
| 26,974
|
def files_are_identical(pathA, pathB):
"""
Compare two files, ignoring carriage returns,
leading whitespace, and trailing whitespace
"""
f1 = [l.strip() for l in pathA.read_bytes().splitlines()]
f2 = [l.strip() for l in pathB.read_bytes().splitlines()]
return f1 == f2
|
60eeba55aa443577e98c45a46c10c1dc585e6c9f
| 26,975
|
import uuid
def _maybe_convert(value):
"""Possibly convert the value to a :py:class:`uuid.UUID` or
:py:class:`datetime.datetime` if possible, otherwise just return the value
:param str value: The value to convert
:rtype: uuid.UUID|datetime.datetime|str
"""
try:
return uuid.UUID(value)
except ValueError:
return value
|
66e70d33bd9b09c19c762ca66d4f50638a225821
| 26,976
|
from typing import List
def _poker_convert_shape(data: List[str]) -> list:
"""Converts card icons into shapes"""
return [row.replace("â£", " Clubs").replace("â¦", " Diamonds").replace("â¥", " Hearts").replace("â", " Spades") for row in data]
|
4942fbf1551f2fa2dfdcefddf6977c706393b0ee
| 26,977
|
import pickle
def load(name: str) -> object:
"""load an object using pickle
Args:
name: the name to load
Returns:
the unpickled object.
"""
with open(name, "rb") as file:
obj = pickle.load(file)
return obj
|
b8e11b703bea907386e2248c313c390429453a99
| 26,978
|
def decodeAny(string):
"""Try to decode the string from UTF-8 or latin9 encoding."""
if isinstance(string, str):
return string
try:
return string.decode('utf-8')
except UnicodeError:
# decoding latin9 never fails, since any byte is a valid
# character there
return string.decode('latin9')
|
4acd087710118e72d8e142a230b36c57a5b70253
| 26,979
|
def clean_text(review, model):
"""preprocess a string (tokens, stopwords, lowercase, lemma & stemming) returns the cleaned result
params: review - a string
model - a spacy model
returns: list of cleaned tokens
"""
new_doc = ''
doc = model(review)
for word in doc:
if not word.is_stop and word.is_alpha:
new_doc = f'{new_doc} {word.lemma_.lower()}'
return new_doc
|
a315f4d5bc581dce915f509023c65bba46f718a7
| 26,980
|
def regularize(np_arr):
"""
Project from [0, 255] to [0, 1]
:type np_arr: numpy array
:rtype: Projected numpy array
"""
return np_arr/255
|
ba9f3f64f4d1d02b357d296299ee0635b2c93196
| 26,981
|
import pkg_resources
def python_package_version(package: str) -> str:
"""
Determine the version of an installed Python package
:param package: package name
:return: version number, if could be determined, else ''
"""
try:
return pkg_resources.get_distribution(package).version
except pkg_resources.DistributionNotFound:
return ''
|
53f8de290418b1d64355f44efde162bed3a36b45
| 26,982
|
def set_extension(path, ext):
"""Set the extension of file.
Given the path to a file and the desired extension, set the extension of the
file to the given one.
Args:
path: path to a file
ext: desired extension
Return:
path with the correct extension
"""
ext = ext.replace(".", "")
current_ext = path.split(".")
if len(current_ext) == 2:
path = path.replace(current_ext[1], ext)
else:
path = path + "." + ext
return path
|
5764e085a204aa209f085dbeb8d043c710734e11
| 26,983
|
import re
def get_select_cols_and_rest(query):
"""
Separate the a list of selected columns from
the rest of the query
Returns:
1. a list of the selected columns
2. a string of the rest of the query after the SELECT
"""
from_loc = query.lower().find("from")
raw_select_clause = query[0:from_loc].rstrip()
rest_of_query = query[from_loc:len(query)]
# remove the SELECT keyword from the query
select_pattern = re.compile("select", re.IGNORECASE)
raw_select_clause = select_pattern.sub('', raw_select_clause)
# now create and iterate through a list of of the SELECT'ed columns
selected_columns = raw_select_clause.split(',')
selected_columns = [c.strip() for c in selected_columns]
return selected_columns, rest_of_query
|
33b397e7894792c0b354c41f219c609dc4df5927
| 26,984
|
import os
def check_env_variable_values(environment_variable):
"""
check environment variable value
Parameters
---------
environment_variable: str
Returns
-------
Any
"""
environment_variable_value = os.getenv(environment_variable)
if environment_variable_value is None or len(environment_variable_value) == 0:
print("\n---- Environment Variable Value Error -----",
f"'{environment_variable}' has value '{environment_variable_value}'",
sep="\n")
raise SystemExit
return environment_variable_value
|
3122ef5b763baa18094349bdc145dfcd65bdaeea
| 26,987
|
def extract_ner(labels, masks=None):
"""
labels: list
return: entities list
根据labels提取一句话中的实体, 并输出实体list
"""
if masks is None:
masks = [1] * len(labels)
entities = []
cur_entity = ""
for i, (label, mask) in enumerate(zip(labels, masks)):
if mask == 0:
break
if (label == "[CLS]") or (label == "[SEP]"):
cur_entity = ""
elif label.startswith("B-"):
label = label[2:]
cur_entity = label + "[" + str(i)
elif label.startswith("I-"):
label = label[2:]
if not cur_entity.startswith(label):
cur_entity = ""
elif label.startswith("M-"):
label = label[2:]
if not cur_entity.startswith(label):
cur_entity = ""
elif label.startswith("E-"):
label = label[2:]
if cur_entity.startswith(label):
cur_entity = cur_entity + '-' + str(i) + ']'
entities.append(cur_entity)
cur_entity = ""
elif label.startswith("O"):
cur_entity = ""
elif label.startswith("S-"):
label = label[2:]
cur_entity = label + "[" + str(i) + '-' + str(i) + "]"
entities.append(cur_entity)
cur_entity = ""
elif label == "[PAD]":
cur_entity = ""
else:
print(label, mask)
raise Exception
return entities
|
bafdfd35f7714a4d5ad9e34fd3d5d7b5a2747b7b
| 26,988
|
import logging
import argparse
def logging_level(string):
"""Convert a string to a logging level"""
if string.isdigit():
return int(string)
level = getattr(logging, string.upper(), None)
if not isinstance(level, int):
raise argparse.ArgumentTypeError("invalid log level {}".format(string))
return level
|
daf2aa631508f13d1684e76939e052e8faf42486
| 26,989
|
def getTableHTML(rows):
""" Binds rows items into HTML table code.
:param list rows: an array of row items
:return: HTML table code with items at rows
:rtype: str
>>> getTableHTML([["Name", "Radek"], ["Street", "Chvalinska"]])
'<table><tr><td>Name</td><td>Radek</td></tr><tr><td>Street</td><td>Chvalinska</td></tr><table>'
"""
if rows:
result = "<table>"
for row in rows:
result += "<tr>"
for item in row:
result += "<td>%s</td>" % item
result += "</tr>"
result += "<table>"
return result
else:
return ""
|
931c8af65052c6a4455feae7f2f3a0f65d33a347
| 26,990
|
def build_complement(c):
"""
The function convert alphabets in input c into certain alphabets.
:param c: str, can be 'A','T','G','C','a','t','g','c'
:return: The string of converted alphabets.
"""
c = c.upper() # The Str Class method changes input c into capital.
total = ''
for i in range(len(c)):
# Using for loop to convert the input alphabet from index 0 to the last index
# to certain alphabet, A to T, C to G vice versa.
ans = ''
if c[i] == 'A':
ans += 'T'
if c[i] == 'T':
ans += 'A'
if c[i] == 'C':
ans += 'G'
if c[i] == 'G':
ans += 'C'
total += ans
print('The complement of', c, 'is', total)
return total
|
6fece325432ac061d5f7577c85cb59c19918ed83
| 26,992
|
import argparse
import logging
def arg_parse_params():
""" parse the input parameters
:return dict: parameters
"""
# SEE: https://docs.python.org/3/library/argparse.html
parser = argparse.ArgumentParser()
parser.add_argument('-o', '--path_out', type=str, required=False, help='path to the output folder', default='')
parser.add_argument('-n', '--nb_runs', type=int, required=False, help='number of run experiments', default=5)
args = vars(parser.parse_args())
logging.info('ARGUMENTS: \n%r' % args)
return args
|
4e65925f5d788011a42fdee646217509b2626e6d
| 26,993
|
def normalise_reward(reward, prev_lives, curr_lives):
"""
No reward normalisation is required in pong; -1 indicates opponent has
scored, 0 indicates game is still in play and 1 indicates player has
scored.
"""
return reward
|
ff3c537ac6090692a05c7ee8c6f5bde890ed4d40
| 26,996
|
import torch
def _make_input(t, requires_grad=False, device=torch.device('cpu')):
"""Make zero inputs for AE loss.
Args:
t (torch.Tensor): input
requires_grad (bool): Option to use requires_grad.
device: torch device
Returns:
torch.Tensor: zero input.
"""
inp = torch.autograd.Variable(t, requires_grad=requires_grad)
inp = inp.sum()
inp = inp.to(device)
return inp
|
c65c4ba243d786471b810d48af2251fff94c5d5f
| 26,997
|
import os
def envor(envkey: str, default: str) -> str:
"""Returns an environment var value, or default.
Parameters:
envkey (str): An environment variable name.
default (str): A default value to return, in case the environment
variable ain't defined or have a value.
Returns:
str: If defined, the environment var value, else default value.
"""
if envkey in os.environ and os.environ[envkey] != '':
return os.environ[envkey]
else:
return default
|
35d5f74a2417ff134d331eebedc63d121ceffdfc
| 26,998
|
import torch
def bce_loss(pred, target, false_pos_weight=1.0, false_neg_weight=1.0):
"""Custom loss function with options to independently penalise
false positives and false negatives.
"""
norm = torch.tensor(1.0 / float(len(pred)))
tiny = torch.finfo(pred.dtype).tiny
false_neg = torch.sum(target * torch.log(
torch.clip(pred, min=tiny) # preventing -inf
))
false_pos = torch.sum(
(torch.tensor(1.0) - target)
* torch.log(torch.clip(torch.tensor(1.0) - pred, min=tiny))
)
return - norm * (false_pos_weight * false_pos
+ false_neg_weight * false_neg)
|
b9ed60d886212bc821fed3f21c4e58e6953b1e02
| 26,999
|
def bernoulli_kl(q_probs, p_probs):
"""
computes the KL divergence per batch between two Bernoulli distributions
parametrized by q_probs and p_probs
Note: EPS is added for numerical stability, see
https://github.com/pytorch/pytorch/issues/15288
Args:
q_probs (torch tensor): mean of q [batch_size, *latent_dims]
p_probs (torch tensor): mean of p [batch_size, *latent_dims]
Returns:
kl_div (torch tensor): kl divergence [batch_size, *latent_dims]
"""
EPS = 1e-32
p1 = p_probs
p0 = 1 - p1
q1 = q_probs
q0 = 1 - q1
logq1 = (q1 + EPS).log()
logq0 = (q0 + EPS).log()
logp1 = (p1).log()
logp0 = (p0).log()
kl_div_1 = q1 * (logq1 - logp1)
kl_div_0 = q0 * (logq0 - logp0)
return kl_div_1 + kl_div_0
|
52253d4cd07b6ceb5081236ea5167b87b754c2fc
| 27,001
|
def formatGpuCount(gpuCount):
"""
Convert the GPU Count from the SLurm DB, to an int
The Slurm DB will store a string in the form "gpu:1" if a GPU was requested
If a GPU was not requested, it will be empty
"""
if gpuCount:
intGpuCount = int("0"+gpuCount.split(":")[1])
return intGpuCount
else:
return int(0)
|
bb15f87da94d5994c9a08ff0614f5381eb3265de
| 27,002
|
import argparse
def default_pseudo_arg_parser():
"""
Create ArgumentParser for pseudo device.
:rtype: argparse.ArgumentParser
:return: Default argument parser.
"""
arg_parser = argparse.ArgumentParser(
description="pseudo device main.")
# arg_parser.add_argument("-a", "--host", type=str, default="0.0.0.0", help="acceptable host")
arg_parser.add_argument("-p", "--port", type=int,
default=9090, help="server port number")
arg_parser.add_argument("-d", "--dataport", type=int,
default=24242, help="emulation data port number")
# arg_parser.add_argument("-s", "--source", type=open,
# help="initial command file")
# arg_parser.add_argument(
# "-x", "--exec", help="single line initial commands separated with comma")
return arg_parser
|
98df8a7092d86c464636ffebdc5b1f233d9872b8
| 27,003
|
from typing import MutableMapping
from typing import Any
def to_nested_dict(d: dict):
"""
Converts a flat dictionary with '.' joined keys back into
a nested dictionary, e.g., {a.b: 1} -> {a: {b: 1}}
"""
new_config: MutableMapping[str, Any] = {}
for k, v in d.items():
if "." in k:
sub_keys = k.split(".")
sub_d = new_config
for sk in sub_keys[:-1]:
sub_d = sub_d.setdefault(sk, {})
sub_d[sub_keys[-1]] = v
else:
new_config[k] = v
return new_config
|
18230ecf0798ee5f2c9b74dccfb90b28547354e8
| 27,004
|
import os
def name(path: str) -> str:
"""
finds name for file
"""
name_extension = os.path.basename(path)
return name_extension.rsplit(".", 1)[0]
|
07e067b984de39df3996bf2d403c3435af806137
| 27,005
|
from typing import Any
def validate_boolean(var: Any) -> bool:
"""Evaluates whether an argument is boolean True/False
Args:
var: the input argument to validate
Returns:
var: the value if it passes validation
Raises:
AssertionError: `var` was not boolean
"""
assert isinstance(var, bool), "Argument must be True/False"
return var
|
67d8751a60fa6621f0fb366ab44cd017dfe3bd8c
| 27,008
|
def force_slash(x):
"""convert backslashes to slashes"""
return x.replace('\\', '/')
|
a7d9faafa61b5eaa09171e960c8f2498e03bce2e
| 27,009
|
def simplify_int_labels(int_labels, threshold=5):
"""
Given a sequence of integer labels, finds out unique labels in repetitions.
For example:
[0,10,10,10,10,10,10,0,12,12,12,12,12] to [10,12]
Args:
int_labels:
threshold: number of consecutive occurrences before selecting a label.
Returns:
"""
out = []
num_occur = 0
for idx in range(len(int_labels)-1):
if (int_labels[idx] == int_labels[idx+1]) and (int_labels[idx] != 0):
num_occur += 1
elif (int_labels[idx] != int_labels[idx+1]) and (num_occur >= threshold-1):
out.append(int_labels[idx])
num_occur = 0
else:
num_occur = 0
return out
|
0888c632e443e51e4a697450622c0c8237b6ba84
| 27,010
|
import re
import sys
def get_region_tag(sample_file_path):
"""Extracts the region tag from the given sample.
Errors if the number of region tags found is not equal to one. Ignores the
*_core tags.
"""
start_region_tag_exp = r'\[START ([a-zA-Z0-9_]*)\]'
end_region_tag_exp = r'\[END ([a-zA-Z0-9_]*)\]'
region_tags = []
with open(sample_file_path) as sample:
sample_text = sample.read()
start_region_tags = re.findall(start_region_tag_exp, sample_text)
end_region_tags = re.findall(end_region_tag_exp, sample_text)
for srt in start_region_tags:
# We don't need those with '_cores'
if 'core' in srt:
continue
if srt in end_region_tags:
region_tags.append(srt)
if not region_tags:
sys.exit("Found no region tags.")
if len(region_tags) > 1:
sys.exit("Found too many region tags.")
return region_tags[0]
|
3bcd70474b3499c070731d39689fe90ccedf4946
| 27,011
|
import functools
def or_none(fn):
"""
Wraps `fn` to return `None` if its first argument is `None`.
>>> @or_none
... def myfunc(x):
... return 2 * x + 3
>>> myfunc(4)
11
>>> myfunc(None)
"""
@functools.wraps(fn)
def wrapped(arg, *args, **kw_args):
return None if arg is None else fn(arg, *args, **kw_args)
return wrapped
|
54346619090c1aab12498297a31bb017d85377f5
| 27,012
|
import os
def load_dataset(filename, data_path="data"):
""" Load full dataset (all tasks)
"""
sentences = []
with open(os.path.join(data_path, filename), 'r') as file:
file = iter(file)
sentence = []
while True:
try:
line = next(file)
except StopIteration:
break
# end-of-sentence is encoded as an empty (falsy) line
if not line.strip():
sentences.append(sentence)
sentence = []
else:
sentence.append(line.split())
return sentences
|
1ed50c971f1b50f67fbbdae97df9fa99f8f85493
| 27,014
|
def start_connection(puzzle, length):
"""Make connection between words"""
connection = [0] * (length ** 2)
for i in range(length ** 2):
connection[i] = [False] * (length ** 2)
for i, j in enumerate(puzzle):
if j == '0':
continue
col, row = i // length, i % length
if row != 0:
connection[i][i - 1] = True and puzzle[i - 1] != '0'
if col != 0:
connection[i][i - length -
1] = True and puzzle[i - length - 1] != '0'
if col != length - 1:
connection[i][i + length -
1] = True and puzzle[i + length - 1] != '0'
if row != length - 1:
connection[i][i + 1] = True and puzzle[i + 1] != '0'
if col != 0:
connection[i][i - length +
1] = True and puzzle[i - length + 1] != '0'
if col != length - 1:
connection[i][i + length +
1] = True and puzzle[i + length + 1] != '0'
if col != 0:
connection[i][i - length] = True and puzzle[i - length] != '0'
if col != length - 1:
connection[i][i + length] = True and puzzle[i + length] != '0'
return connection
|
c3b42d341f1f4eb08bc83fb2dabd88d16837ee8e
| 27,016
|
import os
def needbinarypatch():
"""return True if patches should be applied in binary mode by default."""
return os.name == 'nt'
|
b7c3d8f9455620c3a14b51fe77b51db9e655bf29
| 27,017
|
def get_segment_output_path(output_path, muxing_output_path):
"""
Returns the output path for a stream segment.
"""
segment_path = muxing_output_path
substr = muxing_output_path[0:len(output_path)]
if substr == output_path:
return muxing_output_path[len(output_path):]
return segment_path
|
7c35c5becf6b6eb4f20399f6fa5b3367bce5af79
| 27,018
|
def read_flat_parameters(fname):
"""Read flat channel rejection parameters from .cov or .ave config file"""
try:
with open(fname, 'r') as f:
lines = f.readlines()
except:
raise ValueError("Error while reading %s" % fname)
reject_names = ['gradFlat', 'magFlat', 'eegFlat', 'eogFlat', 'ecgFlat']
reject_pynames = ['grad', 'mag', 'eeg', 'eog', 'ecg']
flat = dict()
for line in lines:
words = line.split()
if words[0] in reject_names:
flat[reject_pynames[reject_names.index(words[0])]] = \
float(words[1])
return flat
|
34ca5d9f4946d8ee087126b7932596949081b0c3
| 27,019
|
import re
import sys
def expand_rows( string_in, element_coordinates = [ None, None ] ):
"""
Expand a P7 row specification to a list of rows.
Acceptable row specifications include
o single row
o B
o row range
o E-G
o single and/or ranges of rows separated by commas
o E-F,H
"""
string_in = re.sub( r'\s', '', string_in )
row_list = []
for row_range in string_in.split( ',' ):
mobj = re.match( r'([a-hA-H])([-:]([a-hA-H]))?$', row_range )
if( not mobj ):
print( 'Error: spreadsheet cell: %s%s: bad row or row range \'%s\'' % ( element_coordinates[0], element_coordinates[1], row_range ), file=sys.stderr )
sys.exit( -1 )
icode_row1 = ord(mobj.group( 1 ))
icode_row2 = icode_row1
if( mobj.group( 2 ) ):
icode_row2 = ord(mobj.group( 3 ))
if( icode_row2 < icode_row1 ):
print( 'Error: spreadsheet cell: %s%s: bad row range: \'%\'' % ( element_coordinates[0], element_coordinates[1], string_in ), file=sys.stderr )
sys.exit( -1 )
for icode in range(icode_row1, icode_row2+1):
row_list.append(chr(icode))
return(row_list)
|
d2325300ecc5889226bfe7e80cca8d2b789a99dd
| 27,020
|
def get_rectangle_coordinates(win):
"""
Gets the coordinates of two opposite points that define a rectangle and
returns them as Point objects.
"""
point_1 = win.getMouse()
point_2 = win.getMouse()
return point_1, point_2
|
75a605ea628c8d9c6817bf727de3542894ac555f
| 27,021
|
import calendar
def get_days_of_month(year, month):
"""
返回天数
"""
return calendar.monthrange(year, month)[1]
|
2606f3b93b1d94ba3668d3a979f6d84a96e6ac00
| 27,022
|
def identifyL23(addition):
"""Check if it is L2 or L3 delta request."""
return 'L3' if 'routes' in list(addition.keys()) else 'L2'
|
63c47a0de8142ddae8559d2e4cc236e2f5e972fd
| 27,024
|
def find_border_crossing(subset, path, final_state):
"""
Find the transition that steps outside the safe L{subset}.
@param subset: Set of states that are safe.
@type subset: C{set} of L{State}
@param path: Path from starting state to L{final_state}, sequence of state
and its outgoing edge.
@type path: C{list} of pairs (L{State}, L{Event}
@param final_state: Final state of the path.
@type final_state: L{State}
@return: C{None} if the path stays within the safe subset, else a triplet
(from-state, event, dest-state).
@rtype: Either C{None} or (L{State}, L{Event}, L{State})
@precond: Initial state of the path must be in L{subset}
"""
if len(path) == 0:
# No path => final_state is also the first state => must be safe
assert final_state in subset
return None
assert path[0][0] in subset
pred_state, pred_evt = None, None
for state, evt in path:
if pred_state is not None and state not in subset:
# Tests may hold for the second or further entry of the path
return (pred_state, pred_evt, state)
pred_state, pred_evt = state, evt
if final_state in subset:
return None
return (pred_state, pred_evt, final_state)
|
8cf7953897066af2453c4bb428fb01326a4aa25f
| 27,026
|
def randomize(df, seed):
"""Randomizes the order of a Pandas dataframe"""
return df.sample(len(df), random_state=seed).reset_index(drop=True)
|
deb6f1327b993a5a39255bdf2a2b22bc61cdd0a3
| 27,028
|
def clean(df, verbose=False):
"""Nettoie le tableau de données"""
# on supprime les URLs qui ne pointent pas vers le site de la ville
if verbose:
print("Suppression des URLs : pas sur le site de la ville")
print(df.loc[~df["url"].str.contains("marseille.fr"), "url"])
df.loc[~df["url"].str.contains("marseille.fr"), "url"] = ""
# on supprime les URLs qui ne sont pas des PDF
# ? ou ? quickfix pour 1 URL mal formée (2020-02-27) : url = url + ".pdf"
if verbose:
print("Suppression des URLs : pas des PDF")
print(df.loc[~df["url"].str.endswith(".pdf"), "url"])
df.loc[~df["url"].str.endswith(".pdf"), "url"] = ""
#
return df
|
92182cdd84424c11ca318131ef92eac03fe2b242
| 27,030
|
import re
def count_expr_arity(str_desc):
""" Determine the arity of an expression directly from its str
descriptor """
# we start by extracting all words in the string
# and then count the unique occurence of words matching "x", "y", "z" or "t"
return len(set(var for var in re.findall("\w+", str_desc) if re.fullmatch("[xyzt]", var)))
|
6e94b6da17b90de4b6b4734add65311842af467d
| 27,031
|
from numpy import asarray, mean
def mean_reciprocal_rank(rs):
"""Score is reciprocal of the rank of the first relevant item
First element is 'rank 1'. Relevance is binary (nonzero is relevant).
Example from http://en.wikipedia.org/wiki/Mean_reciprocal_rank
>>> rs = [[0, 0, 1], [0, 1, 0], [1, 0, 0]]
>>> mean_reciprocal_rank(rs)
0.61111111111111105
>>> from numpy import asarray
>>> rs = asarray([[0, 0, 0], [0, 1, 0], [1, 0, 0]])
>>> mean_reciprocal_rank(rs)
0.5
>>> rs = [[0, 0, 0, 1], [1, 0, 0], [1, 0, 0]]
>>> mean_reciprocal_rank(rs)
0.75
Args:
rs: Iterator of relevance scores (list or numpy) in rank order
(first element is the first item)
Returns:
Mean reciprocal rank
"""
rs = (asarray(r).nonzero()[0] for r in rs)
return mean([1. / (r[0] + 1) if r.size else 0. for r in rs])
|
60cdba9ff4c75aa96931f3368cc7fcfcb33a785a
| 27,032
|
def attrsetter(attr, value):
""" Return a function that sets ``attr`` on its argument and returns it. """
return lambda method: setattr(method, attr, value) or method
|
355bf2e63051fbcdbc6a881c49ddd9fc404f4b75
| 27,033
|
def get_definition_end_position(self):
"""
The (row, column) of the end of the definition range. Rows start with
1, columns start with 0.
:rtype: Optional[Tuple[int, int]]
Returns:
int: The end position
"""
if self._name.tree_name is None:
return None
definition = self._name.tree_name.get_definition()
if definition is None:
return self._name.tree_name.end_pos
if self.type in ("function", "class"):
last_leaf = definition.get_last_leaf()
if last_leaf.type == "newline":
return last_leaf.get_previous_leaf().end_pos
return last_leaf.end_pos
return definition.end_pos
|
d7ceff7a03d63f5e96ce0727196f82ba4cadb980
| 27,034
|
import math
def sq_sign(x):
"""
Squares the magnitude but keeps the sign
:param x:
:return:
"""
return math.copysign(x * x, x)
|
c7ecbd665345be1fa30b0cf84e3692bfe7b139e5
| 27,035
|
from typing import Callable
def call(func: Callable, **kwargs):
"""Configurable version of `func(**kwargs)`."""
return func(**kwargs)
|
7ec9f1dc47444bbf9f0ecb6bfbe35fe03b6c5b42
| 27,037
|
import torch
def rmspe_loss(targets: torch.Tensor, predictions: torch.Tensor) -> torch.Tensor:
"""Root mean square percentage error."""
loss = torch.sqrt(torch.mean(((targets - predictions).float() / targets) ** 2))
return loss
|
76f94f6b1e21cad495108b1550e9d4afef3aafa6
| 27,039
|
from pkg_resources import EntryPoint
def resolveDotted(dotted_or_ep):
""" Resolve a dotted name or setuptools entry point to a callable.
"""
return EntryPoint.parse('x=%s' % dotted_or_ep).resolve()
|
504b73a7a3735753db57ae8b86361594b580a3cd
| 27,040
|
def get_request_header(headers):
"""
Get all headers that should be included in the pre-signed S3 URL. We do not add headers that will be
applied after transformation, such as Range.
:param headers: Headers from the GetObject request
:return: Headers to be sent with pre-signed-url
"""
new_headers = dict()
headers_to_be_presigned = ['x-amz-checksum-mode', 'x-amz-request-payer', 'x-amz-expected-bucket-owner', 'If-Match',
'If-Modified-Since', 'If-None-Match', 'If-Unmodified-Since']
for key, value in headers.items():
if key in headers_to_be_presigned:
new_headers[key] = value
return new_headers
|
97af84f361bc4265d934dbd2cf5398ad4f744e1e
| 27,043
|
def check_game(board, last_move):
"""
check if any player has managed to get 3 marks in a row, column or diagonally.
:param {String[]} board : current board with marker locations of both players
:param {int} last_move : between 1-9 where the most recent marker was put.
:return {Boolean} : True if player who played "last_move" has won. False otherwise
"""
win = ["012","345", "678", "036", "147", "258", "048", "246"]
win = list(filter(lambda string : str(last_move) in string, win))
for x in win:
if board[int(x[0])] == board[last_move]:
if board[int(x[1])] == board[last_move]:
if board[int(x[2])] == board[last_move]:
return True
return False
|
59bc4f5e38b469cd49b59b342ca402b50d591948
| 27,045
|
import re
def __fix_tz(text: str) -> str:
"""Overrides certain timezones with more relevant ones"""
replacements = {
"BST": "+0100", # British Summer Time
"IST": "+0530", # Indian Standard Time
}
for timezone, offset in replacements.items():
text = re.sub(fr"\b{timezone}\b", offset, text, flags=re.IGNORECASE)
return text
|
d31c3ac3899cecc7030583f83b06883ca00222bb
| 27,047
|
def grant_url(focus_area, month):
"""Use the focus area and donation date information to get the original grant URL."""
fa = {
"community-development": 13,
"education-youth": 28,
"religion": 11,
}[focus_area]
month_param = month[2:]
return f"https://lillyendowment.org/for-current-grantees/recent-grants/?fa={fa}&date={month_param}"
|
6ae83240afbec368038aba07b3f37d44ea0d1afe
| 27,049
|
def parse_srg(srg_filename):
"""Reads a SeargeRG file and returns a dictionary of lists for packages, classes, methods and fields"""
srg_types = {'PK:': ['obf_name', 'deobf_name'],
'CL:': ['obf_name', 'deobf_name'],
'FD:': ['obf_name', 'deobf_name'],
'MD:': ['obf_name', 'obf_desc', 'deobf_name', 'deobf_desc']}
parsed_dict = {'PK': [],
'CL': [],
'FD': [],
'MD': []}
def get_parsed_line(keyword, buf):
return dict(zip(srg_types[keyword], [i.strip() for i in buf]))
with open(srg_filename, 'r') as srg_file:
for buf in srg_file:
buf = buf.strip()
if buf == '' or buf[0] == '#':
continue
buf = buf.split()
parsed_dict[buf[0][:2]].append(get_parsed_line(buf[0], buf[1:]))
return parsed_dict
|
ac33b56fc52831a80f1e38261b38c47407fd1bfb
| 27,050
|
import random
def choose(bot, trigger):
""".choice option1|option2|option3 - Makes a difficult choice easy."""
if not trigger.group(2):
return bot.reply('I\'d choose an option, but you didn\'t give me any.')
choices = [trigger.group(2)]
for delim in '|\\/, ':
choices = trigger.group(2).split(delim)
if len(choices) > 1:
break
choices = [choice.strip() for choice in choices]
pick = random.choice(choices)
# Always use a comma in the output
display_options = ', '.join(
choice if ',' not in choice else '"%s"' % choice
for choice in choices
)
return bot.reply('Your options: %s. My choice: %s' % (display_options, pick))
|
ad4fd246e9db86d45a7a6cb06ef5b9813c0a0d5f
| 27,051
|
def slkBlocking(rec_dict, fam_name_attr_ind, giv_name_attr_ind,
dob_attr_ind, gender_attr_ind):
"""Build the blocking index data structure (dictionary) to store blocking
key values (BKV) as keys and the corresponding list of record identifiers.
This function should implement the statistical linkage key (SLK-581)
blocking approach as used in real-world linkage applications:
http://www.aihw.gov.au/WorkArea/DownloadAsset.aspx?id=60129551915
A SLK-581 blocking key is the based on the concatenation of:
- 3 letters of family name
- 2 letters of given name
- Date of birth
- Sex
Parameter Description:
rec_dict : Dictionary that holds the record identifiers as
keys and corresponding list of record values
fam_name_attr_ind : The number (index) of the attribute that contains
family name (last name)
giv_name_attr_ind : The number (index) of the attribute that contains
given name (first name)
dob_attr_ind : The number (index) of the attribute that contains
date of birth
gender_attr_ind : The number (index) of the attribute that contains
gender (sex)
This method returns a dictionary with blocking key values as its keys and
list of record identifiers as its values (one list for each block).
"""
block_dict = {} # The dictionary with blocks to be generated and returned
print('Run SLK-581 blocking:')
print(' Number of records to be blocked: '+str(len(rec_dict)))
print('')
for (rec_id, rec_values) in rec_dict.items():
rec_bkv = '' # Initialise the blocking key value for this record
# Get family name value
#
fam_name = rec_values[fam_name_attr_ind]
if (fam_name == ''):
rec_bkv += '999'
else:
fam_nam = fam_name.replace('-','') # Remove non letter characters
fam_nam = fam_name.replace(",",'')
fam_nam = fam_name.replace('_','')
if (len(fam_name) >= 5):
rec_bkv += (fam_name[1]+fam_name[2]+fam_name[4])
elif (len(fam_name) >= 3):
rec_bkv += (fam_name[1]+fam_name[2]+'2')
elif (len(fam_name) >= 2):
rec_bkv += (fam_name[1]+'22')
# Get given name value
#
giv_name = rec_values[giv_name_attr_ind]
if (giv_name == ''):
rec_bkv += '99'
else:
giv_nam = giv_name.replace('-','') # Remove non letter characters
giv_nam = giv_name.replace(",",'')
giv_nam = giv_name.replace('_','')
if (len(giv_name) >= 3):
rec_bkv += (giv_name[1]+giv_name[2])
elif (len(giv_name) >= 2):
rec_bkv += (giv_name[1]+'2')
# DoB structure we use: dd/mm/yyyy
# Get date of birth
#
dob = rec_values[dob_attr_ind]
dob_list = rec_values[dob_attr_ind].split('/')
# Add some checks
#
if (len(dob_list[0]) < 2):
dob_list[0] = '0' + dob_list[0] # Add leading zero for days < 10
if (len(dob_list[1]) < 2):
dob_list[1] = '0' + dob_list[1] # Add leading zero for months < 10
dob = ''.join(dob_list) # Create: ddmmyyyy
assert len(dob) == 8, dob
rec_bkv += dob
# Get gender
#
gender = rec_values[gender_attr_ind].lower()
if (gender == 'm'):
rec_bkv += '1'
elif (gender == 'f'):
rec_bkv += '2'
else:
rec_bkv += '9'
# Insert the blocking key value and record into blocking dictionary
#
if (rec_bkv in block_dict): # Block key value in block index
# Only need to add the record
#
rec_id_list = block_dict[rec_bkv]
rec_id_list.append(rec_id)
else: # Block key value not in block index
# Create a new block and add the record identifier
#
rec_id_list = [rec_id]
block_dict[rec_bkv] = rec_id_list # Store the new block
return block_dict
|
bceb7765e05f2d242d282c40688e18d110b5f32e
| 27,053
|
def caption_from_metadata(metadata):
"""
converts metadata list-of-lists to caption string which is one antinode per line
"""
caption = ""
for an in range(len(metadata)):
[cx, cy, a, b, angle, rings] = metadata[an]
#this_caption = "[{0}, {1}, {2}, {3}, {4}, {5}]".format(cx, cy, a, b, angle, rings)
this_caption = "{0},{1},{2},{3},{4},{5}".format(cx, cy, a, b, angle, rings)
if (an > 0):
caption +="\n"
caption += this_caption
return caption
|
49f7172601db54c855780243797a6635cfc10dfa
| 27,054
|
from typing import List
def primes(max: int) -> List[int]:
"""
Return a list of all primes numbers up to max.
>>> primes(10)
[2, 3, 5, 7]
>>> primes(11)
[2, 3, 5, 7, 11]
>>> primes(25)
[2, 3, 5, 7, 11, 13, 17, 19, 23]
>>> primes(1_000_000)[-1]
999983
"""
max += 1
numbers = [False] * max
ret = []
for i in range(2, max):
if not numbers[i]:
for j in range(i, max, i):
numbers[j] = True
ret.append(i)
return ret
|
1fbdda28414f9a846d2ea1dc945af619df6aeea7
| 27,055
|
def current_user(self):
"""Get current user"""
return self.handler.current_user
|
790a3478390dbb5439380ef1a35b6bc67f89721a
| 27,056
|
def _get_notebook_outputs(nb_node):
"""Returns a dictionary of the notebook outputs."""
outputs = {}
for cell in nb_node.cells:
for output in cell.get('outputs', []):
if 'papermill' in output.get('metadata', {}):
output_name = output.metadata.papermill.get('name')
if output_name:
outputs[output_name] = output
return outputs
|
8eda7268f36839703dbc94f31903e0c5d37f5e5b
| 27,058
|
def count_jobs(api, name, all=False):
"""Count how many zapps have already been submitted."""
if all:
sched = api.statistics.scheduler()
return sched['running_length']
execs = api.executions.list(status='submitted', name=name)
execs += api.executions.list(status='queued', name=name)
execs += api.executions.list(status='starting', name=name)
execs += api.executions.list(status='running', name=name)
return len(execs)
|
ed5ca077769b656b101fc9dd71a85878fe9ae8ef
| 27,059
|
import torch
from typing import Tuple
def _split_crossval(xy: torch.Tensor, crossval_count: int, crossval_index: int) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Generates a split of the given dataset along the first dimension for cross-validation.
:param xy: The data that should be split. The split will be generated acros dimension 0.
:param crossval_count: The number of splits in total
:param crossval_index: The index of the split that should be generated (0 <= crossval_index < crossval_count)
:return: A tuple of (training data, validation data)
"""
n = xy.shape[0]
split_size = n // crossval_count
val_start = crossval_index * split_size
val_end = (crossval_index + 1) * split_size
train1_start = 0 if crossval_index == 0 else (crossval_index - 1) * split_size
train1_end = 0 if crossval_index == 0 else val_start
train2_start = val_end if crossval_index < (crossval_count - 1) else 0
train2_end = n if crossval_index < (crossval_count - 1) else 0
val = xy[val_start:val_end]
train = torch.concat([xy[train1_start:train1_end], xy[train2_start:train2_end]])
return (train, val)
|
211146bdac3396da475b6c6b74d990031a537af7
| 27,061
|
from typing import Optional
from typing import Any
from typing import List
from typing import Iterable
def as_list(value: Optional[Any]) -> List[Any]:
"""Normalizes the value input as a list.
>>> as_list(None)
[]
>>> as_list("foo")
['foo']
>>> as_list(123)
[123]
>>> as_list(["foo", "bar", 123])
['foo', 'bar', 123]
>>> as_list(("foo", "bar", 123))
['foo', 'bar', 123]
>>> as_list(range(5))
[0, 1, 2, 3, 4]
>>> def gen():
... yield 1
... yield 2
>>> as_list(gen())
[1, 2]
"""
if value is None:
return []
if isinstance(value, str):
return [value]
if not isinstance(value, Iterable):
return [value]
return list(value)
|
54f13890437dfafd779583a3bbdc42ae769312f4
| 27,062
|
from typing import List
def get_cell_density(lstfile: str, n_phases: int) -> List[dict]:
"""
Parse .lst file to get new cell values with errors and density from a refinement.
Parameters
----------
lstfile : str
Path to GSASII .lst file.
n_phases : int
Number of phases in the .lst file
Returns
-------
List[dict]
Returns a List of dictionaries for each phase containing the crystal structure information (a,b,c,V) and associ-
ated errors.
"""
results = []
with open(lstfile, "r") as file:
data = file.read()
k2= data.split("names : a b c alpha beta gamma Volume")
for i in range(1, n_phases + 1):
try:
phasename = k2[i-1].split("Result for phase:")[1].split("\n")[0].replace(" ", "")
abc_angle_vol = k2[i].split("\n")[1].split("values:")[1]
err = k2[i].split("\n")[2].split("esds : ")[1]
density = k2[i][k2[i].find("Density:"):k2[i].find("g/cm**3")].split("Density:")[-1]
new_cell = [val for val in abc_angle_vol.split(" ") if len(val) > 0] # strip white space
new_cell = new_cell[:3] + [new_cell[-1]] # remove angles
err = [val for val in err.split(" ") if len(val) > 0] # strip white space
final_results = dict(name=phasename,
a = eval(new_cell[0]),
err_a = eval(err[0]),
b = eval(new_cell[1]),
err_b = eval(err[1]),
c = eval(new_cell[2]),
err_c = eval(err[2]),
V = eval(new_cell[3]),
err_V = eval(err[3]),
density = eval(density)
)
except:
final_results = {"name": "error"}
results.append(final_results)
return results
|
00460f9d3c0628710617ecdb270c56b7be153f0a
| 27,063
|
import re
def smooth(text):
"""Get rid the noise in the wikipedia corpus"""
text = re.sub(r"=+(.*)=+", "", text) # remove headers
# if it is lists or items, regard their contents as sentenses
# (append an extra comma to each)
text = re.sub(r"(\*|\#|:|;)+(.*)\n", "\g<2>。\n", text)
text = re.sub(r"\[(.*)\]", "", text) # remove links, images, templates
# remove all strings except multibyte characters
text = re.sub(r"[\x01-\x7E]", "", text)
text = re.sub(r"(。)+", "。\n", text) # one sentense per one line
return text
|
392852a80f45b0f15e9f3ae9e80da68197fe8d95
| 27,064
|
def mcs(G):
"""Maximum cardinality search.
Returns an ordering of the vertices as described in [TY84Simple].
The ordering is not reversed to make sure than G[V[0:i]] is connected.
"""
n = G.number_of_nodes()
sets = []
size = {}
ordering = []
orderingDict = {}
for v in G.nodes():
sets.append([])
size[v] = 0
sets[0].append(v)
number = n-1
j = 0
while number >= 0:
v = sets[j].pop(0)
ordering.append(v)
orderingDict[v] = number
size[v] = -1
for w in G.neighbors(v):
if size[w] >= 0:
sets[size[w]].remove(w)
size[w] = size[w]+1
sets[size[w]].append(w)
j += 1
if j == len(sets):
j -= 1
while j>=0 and not sets[j]:
j -= 1
number -= 1
# ordering.reverse()
return ordering
|
c1b59f45e5592ef31387124e65768b11cbd6ac2d
| 27,066
|
import time
def now():
""" return current time. """
if time.daylight: ttime = time.ctime(time.time() + int(time.timezone) + 3600)
else: ttime = time.ctime(time.time() + int(time.timezone))
return ttime
|
97fd12e2737b49e6a6f30ef67db5092d4f927c9e
| 27,068
|
from unittest.mock import Mock
def functions():
"""Test for regular function name completion."""
mock_func = Mock()
return {
"domain.func_1": mock_func,
"domain.func_2": mock_func,
"helpers.get_today": mock_func,
"helpers.entity_id": mock_func,
}
|
0281b5fa3fba5b2dd7b56c8291af1629d13c9b84
| 27,071
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.