content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def get_text_fingerprint(text, hash_meth, encoding="utf-8"): # pragma: no cover
"""
Use default hash method to return hash value of a piece of string
default setting use 'utf-8' encoding.
"""
m = hash_meth()
m.update(text.encode(encoding))
return m.hexdigest() | 7651cc1da12d257585320de1bbbbab1899d2bcec | 30,966 |
import json
def get_swiftlm_uptime_mon_data(uptime_stats_file, logger, openr=open):
"""retrieve values from a swiftlm uptime mon cache file
:params cache_file: cache file to retrieve items from.
:params openr: open to use [for unittests]
:return: dict of cache items and their values or none if not found
"""
try:
with openr(uptime_stats_file, 'r') as f:
return json.load(f)
except IOError:
logger.exception('Error reading swiftlm uptime mon cache file')
except ValueError:
logger.exception('Error parsing swiftlm uptime mon cache file') | 2d0a6ff07cafd4314b26fac3f6c6c40eb95ee345 | 30,968 |
def format_args(arg_type, text):
"""Format args based on type"""
if arg_type == 'list':
args_text = text.strip().split()
else:
args_text = text.strip()
return args_text | ee55883e1e8c2e797f25cd569768e4df14555277 | 30,969 |
from typing import Union
def array_copy(src: Union[bytes, bytearray], src_pos: int, dest: bytearray, dest_pos: int, length: int):
"""
Copies an array from the specified source array, beginning at the
specified position, to the specified position of the destination array.
A sub-sequence of array components are copied from the source array
referenced by src to the destination array referenced by dest.
The number of components copied is equal to the length argument.
The components at positions srcPos through srcPos+length-1 in the
source array are copied into positions destPos through destPos+length-1,
respectively, of the destination array.
:param src: the source array.
:param src_pos: starting position in the source array.
:param dest: the destination array.
:param dest_pos: starting position in the destination data.
:param length: the number of array elements to be copied.
:return:
"""
if src is dest:
# same buffer
if src_pos == dest_pos:
# same position, do nothing
return False
#
# now we have:
# start1 = src_pos, end1 = src_pos + length,
# start2 = dest_pos, end2 = dest_pos + length,
# and length > 0
#
# check for intersections:
# 1. start1 < end1 <= start2 < end2
# src: ********........
# dest: ........########
# 2. start1 < start2 < end1 < end2 (intersected, needs temporary buffer)
# src: ..********......
# dest: ......########..
# 3. start1 == start2 < end1 == end2 (intersected, but skipped)
# src: ....********....
# dest: ....########....
# 4. start2 < start1 < end2 < end1 (intersected, but doesn't need temporary buffer)
# src: ......********..
# dest: ..########......
# 5. start2 < end2 <= start1 < end1
# src: ........********
# dest: ########........
#
src_end = src_pos + length
if src_pos < dest_pos < src_end:
# copy to a temporary buffer
src = src[src_pos:src_end]
src_pos = 0
for i in range(length):
dest[dest_pos+i] = src[src_pos+i]
return True | 5dcd9ba39e7b36c722136e357bc6c62b407c7c34 | 30,970 |
import base64
def b64decode(data: str, urlsafe: bool = False) -> str:
"""Accepts a string and returns the Base64 decoded representation of this string.
`urlsafe=True` decodes urlsafe base64
"""
if urlsafe:
b64 = base64.urlsafe_b64decode(data.encode("ascii"))
return b64.decode("ascii")
b64 = base64.b64decode(data.encode("ascii"))
return b64.decode("ascii") | 3345bc380f75b5e195dd825f6ad5284d5382e7d6 | 30,971 |
def func(a: int, b: int) -> int:
"""An example of type annotation in a function
"""
return a + b | 31c3ec1ffc27c18d02a2b92f083b2d47675423c5 | 30,972 |
def max_depth(root):
"""Figure out what is maximum depth of a given binary tree is."""
if root is None:
return 0
return max(max_depth(root.left), max_depth(root.right)) + 1 | 89acbb44f5221871acd5f9875a8fd2168ea0661c | 30,975 |
from typing import Counter
def palindrome_permutation(string: str)-> bool:
"""naive implementation, multiset
"""
if not string: return True
preprocessed = string.lower().replace(' ','')
chance = True if not len(preprocessed)%2 == 0 else False
counter = Counter(preprocessed)
for e in counter.values():
if not e%2 == 0:
if not chance: return False
else: chance = False
return True | 34cc3941fe63f00ed8cc7221a2f47305a7d15d5e | 30,976 |
import os
def custom_docstring_func(_, kwargs):
"""
Return original docstring (if available) and
parametrization arguments in the format ``key: value``.
"""
kwargs_strings = [
"{}: {}".format(arg_name, arg_value)
for arg_name, arg_value in kwargs.items()
]
return os.linesep.join(kwargs_strings) | becf4557be530c431925b9bdd8fb538512a3a29a | 30,977 |
import random
def _split_a_and_b(data, sent_ids, begin_idx, tot_len, extend_target=False):
"""Split two segments from `data` starting from the index `begin_idx`."""
data_len = data.shape[0]
if begin_idx + tot_len >= data_len:
print("[_split_a_and_b] returns None: "
"begin_idx %d + tot_len %d >= data_len %d",
begin_idx, tot_len, data_len)
return None
end_idx = begin_idx + 1
cut_points = []
while end_idx < data_len:
if sent_ids[end_idx] != sent_ids[end_idx - 1]:
if end_idx - begin_idx >= tot_len: break
cut_points.append(end_idx)
end_idx += 1
a_begin = begin_idx
if len(cut_points) == 0 or random.random() < 0.5:
# NotNext
label = 0
if len(cut_points) == 0:
a_end = end_idx
else:
a_end = random.choice(cut_points)
b_len = max(1, tot_len - (a_end - a_begin))
# (zihang): `data_len - 1` to account for extend_target
b_begin = random.randint(0, data_len - 1 - b_len)
b_end = b_begin + b_len
while b_begin > 0 and sent_ids[b_begin - 1] == sent_ids[b_begin]:
b_begin -= 1
# (zihang): `data_len - 1` to account for extend_target
while b_end < data_len - 1 and sent_ids[b_end - 1] == sent_ids[b_end]:
b_end += 1
new_begin = a_end
else:
# isNext
label = 1
a_end = random.choice(cut_points)
b_begin = a_end
b_end = end_idx
new_begin = b_end
while a_end - a_begin + b_end - b_begin > tot_len:
if a_end - a_begin > b_end - b_begin:
# delete the right side only for the LM objective
a_end -= 1
else:
b_end -= 1
ret = [data[a_begin: a_end], data[b_begin: b_end], label, new_begin]
if extend_target:
if a_end >= data_len or b_end >= data_len:
print("[_split_a_and_b] returns None: "
"a_end %d or b_end %d >= data_len %d",
a_end, b_end, data_len)
return None
a_target = data[a_begin + 1: a_end + 1]
b_target = data[b_begin: b_end + 1]
ret.extend([a_target, b_target])
return ret | 14385bcc0d697f805e6a9b57e9a5323d2be7eca8 | 30,978 |
def get_bit_positions(bit_mask):
"""Return an array of positions for each enabled bit in bit_mask."""
bit_positions = []
# find bit positions of enabled bits in mask
for i in range(16):
if (bit_mask & (1 << i)) != 0:
bit_positions.append(i)
return bit_positions | d18de611cbc17f3c6d5cf0797d31976c654c0fea | 30,979 |
def add_custom_decoder_arguments(group):
"""Define arguments for Custom decoder."""
group.add_argument(
"--dec-block-arch",
type=eval,
action="append",
default=None,
help="Custom decoder blocks definition",
)
group.add_argument(
"--dec-block-repeat",
default=1,
type=int,
help="Repeat N times the provided decoder blocks if N > 1",
)
group.add_argument(
"--custom-dec-input-layer",
type=str,
default="embed",
choices=["linear", "embed"],
help="Custom decoder input layer type",
)
group.add_argument(
"--custom-dec-pw-activation-type",
type=str,
default="relu",
choices=["relu", "hardtanh", "selu", "swish"],
help="Custom decoder pointwise activation type",
)
return group | d7dcebe6d9bf1e17ee603d44aa4a4d024741beab | 30,980 |
import numpy
def drop_duplicates(df, subset=None, keep='first'):
""" Drops duplicates from a DataFrame df.
Params :
df : DataFrame
A dataFrame duplicates should be removed from
subset : [obj]
A list of column keys in df to care about when establishing
if two entries are duplicates. Defaults to all column keys
keep : 'first' or 'last' or False
A rule to determine which duplicate entries should be kept.
'first' means that the first entry of a duplicate should be
kept while 'last' means that the last entry of a duplicate
should be kept. If rule=False, then all duplicated entries
will be dropped.
Returns: DataFrame
"""
data = df.loc[numpy.invert(df.duplicated(subset=subset, keep=keep))]
if data.shape != df.shape:
print('Dropped duplicates : Original - {0!s} : New - {1!s}'.format(
df.shape, data.shape))
return data | 5819a6728a2aba987ea73755f6043988a6bfe79a | 30,981 |
def getCacheThumbName():
"""Returns a thumb cache filename"""
return "" | 18e064f062d84cce846076a07bcd803ee6bb0f7d | 30,982 |
def clean_sents(tenses_df, o_sents):
"""
Remove empty and lengthy sentences from the dataframe
----
PARAMETERS
----
tenses_df: pd.DataFrame
dataframe of the tenses, sentences, sentence lengths and location of verb tags
----
RETURN
----
tenses_df: pd.DataFrame
The same as the input but with empty and lengthy sentences removed
"""
### Reorder the columns
nC = len(tenses_df.columns)
nV = ((nC-3)/3) # number of verbs
o_sents = False
## Order column names
if o_sents:
col_order = ["sentence", "O_sentence", "sentence_length", "num_verb_tags"]
o_sents = True
else:
col_order = ["sentence", "sentence_length", "num_verb_tags"]
for j in range(1,int(nV)+1):
verb_j = "verb_%s"%(j)
col_order.append(verb_j)
verb_j_tag = "verb_%s_tag"%(j)
col_order.append(verb_j_tag)
verb_j_position = "verb_%s_position"%(j)
col_order.append(verb_j_position)
#set column order
tenses_df = tenses_df.reindex(columns=col_order)
# Remove empty sentences
tenses_df.dropna(subset=["sentence"], inplace=True)
##########################
# Remove lengthy sentences
##########################
### Remove sentences with more than 60 words or with less than 3 words
tenses_df = tenses_df[(tenses_df["sentence_length"] < 61) & (tenses_df["sentence_length"] > 2)]
### Remove empty columns
tenses_df.dropna(how='all', axis=1, inplace=True)
### Save resulting dataset
return(tenses_df) | 80d619714e5b4ef3be44c64cb3da2549f31b8b47 | 30,984 |
import random
def _number_of_children(xi, norm_const=1):
"""
Randomly selects the number of children using the discrete density function xi.
:param xi: List or lambda function such that xi[i] is the probability of i children, i>=0.
:param norm_const: Normalizing constant, sum of all positive values in xi.
:return: Randomly selected integer following the discrete density function xi.
"""
# Instead of dividing each probability by the normalizing constant, we 'cheat' by scaling the interval.
r = random.uniform(0, norm_const)
i = 0
is_callable = callable(xi)
while r > 0:
if is_callable:
p = xi(i)
else:
if i >= len(xi):
return 0
p = xi[i]
if r <= p:
return i
r -= p
i += 1
# This point should never be reached, as sum(xi) <= norm_const should be true.
return 0 | 0cc36740c9c45d8cf0eb7d31006e883f6ed9db35 | 30,987 |
def tonick(user_name):
"""Convert a Stack user name into an IRC nick name by stripping whitespace.
Also roundtrips it through raw_unicode_escape to handle nicks in the user
list (which use embedded \\uXXXX sequences for non-ASCII characters). For
some reason nicks in MessagePosted events use the actual utf-8 characters,
which are unharmed by this.
"""
return (user_name
.encode('raw_unicode_escape')
.decode('raw_unicode_escape')
.replace(' ', '')) | 125ec9b879f8921deca4ae23735de6ba2e844a0c | 30,988 |
def verifier_liste(liste):
"""
Vérifie si la liste de nombres entiers ne contient que des 0 et des 1
:param liste: La liste de nombres entiers
:return: La liste de nombre entiers
"""
for nombre in liste:
if nombre not in [0, 1]:
raise Exception("La liste contient un caractère autre que 0 ou 1.")
return liste | eed69576bd6d5146d6fd041dcccdae235abba839 | 30,989 |
import os
def isdir(path: str):
"""Check if a directory exists, or if the path given is a directory.
Original isdir does follow symlinks, this implementation does not."""
cwd = os.getcwd()
slash_i = path.find("/")
if slash_i == -1:
fir_part, sec_part = path, ""
else:
if slash_i == 0:
slash_i = path.find("/", slash_i+1)
fir_part, sec_part = (path, "") if slash_i == -1 else (path[:slash_i], path[slash_i+1:])
else:
fir_part, sec_part = path[:slash_i], path[slash_i+1:]
# The code above is written to handle state when user tries to create dir in /.
# It is impossible to do this. But there will be no exception if you try to os.chdir("/") and then os.chdir("anyD")
# But if if you run os.chdir("/anyD") the exception will occur.
try:
os.chdir(fir_part)
except OSError:
os.chdir(cwd)
return False
res = isdir(sec_part) if sec_part != "" else True
os.chdir(cwd)
return res | 936a162314f29bdd27f412ebf77d38a26d49f012 | 30,991 |
def get_tpu_cluster_resolver_fn():
"""Returns the fn required for runnning custom container on cloud TPUs.
This function is added to the user code in the custom container before
running it on the cloud. With this function, we wait for the TPU to be
provisioned before calling TpuClusterResolver.
https://cloud.devsite.corp.google.com/ai-platform/training/docs/
using-tpus#custom-containers
"""
return [
"import json\n",
"import logging\n",
"import time\n",
"logger = logging.getLogger(__name__)\n",
"logging.basicConfig(level=logging.INFO)\n",
"def wait_for_tpu_cluster_resolver_ready():\n",
" tpu_config_env = os.environ.get('TPU_CONFIG')\n",
" if not tpu_config_env:\n",
" logging.info('Missing TPU_CONFIG, use CPU/GPU for training.')\n",
" return None\n",
" tpu_node = json.loads(tpu_config_env)\n",
" logging.info('Waiting for TPU to be ready: %s.', tpu_node)\n",
" num_retries = 40\n",
" for i in range(num_retries):\n",
" try:\n",
" tpu_cluster_resolver = (\n",
" tf.distribute.cluster_resolver.TPUClusterResolver(\n",
" tpu=[tpu_node['tpu_node_name']],\n",
" zone=tpu_node['zone'],\n",
" project=tpu_node['project'],\n",
" job_name='worker'))\n",
" tpu_cluster_resolver_dict = "
"tpu_cluster_resolver.cluster_spec().as_dict()\n",
" if 'worker' in tpu_cluster_resolver_dict:\n",
(" logging.info('Found TPU worker: %s', "
"tpu_cluster_resolver_dict)\n"),
" return tpu_cluster_resolver\n",
" except Exception as e:\n",
" if i < num_retries - 1:\n",
(" logging.info('Still waiting for provisioning of TPU VM "
"instance.')\n"),
" else:\n",
" # Preserves the traceback.\n",
" raise RuntimeError('Failed to schedule TPU: {}'.format(e))\n",
" time.sleep(10)\n",
" raise RuntimeError('Failed to schedule TPU.')\n",
] | 3beac3e0c20c29acfa531194b13ecfdc8ff44b8a | 30,992 |
def convert_rag_text(dca_rating: str) -> str:
"""Converts RAG name into a acronym"""
if dca_rating == "Green":
return "G"
elif dca_rating == "Amber/Green":
return "A/G"
elif dca_rating == "Amber":
return "A"
elif dca_rating == "Amber/Red":
return "A/R"
elif dca_rating == "Red":
return "R"
else:
return "" | d22c8186b2e03c62f358e1e23e96614819aab9e0 | 30,995 |
def bookkeep_reactant(mol, candidate_pairs):
"""Bookkeep reaction-related information of reactants.
Parameters
----------
mol : rdkit.Chem.rdchem.Mol
RDKit molecule instance for reactants.
candidate_pairs : list of 2-tuples
Pairs of atoms that ranked high by a model for reaction center prediction.
By assumption, the two atoms are different and the first atom has a smaller
index than the second.
Returns
-------
info : dict
Reaction-related information of reactants
"""
num_atoms = mol.GetNumAtoms()
info = {
# free valence of atoms
'free_val': [0 for _ in range(num_atoms)],
# Whether it is a carbon atom
'is_c': [False for _ in range(num_atoms)],
# Whether it is a carbon atom connected to a nitrogen atom in pyridine
'is_c2_of_pyridine': [False for _ in range(num_atoms)],
# Whether it is a phosphorous atom
'is_p': [False for _ in range(num_atoms)],
# Whether it is a sulfur atom
'is_s': [False for _ in range(num_atoms)],
# Whether it is an oxygen atom
'is_o': [False for _ in range(num_atoms)],
# Whether it is a nitrogen atom
'is_n': [False for _ in range(num_atoms)],
'pair_to_bond_val': dict(),
'ring_bonds': set()
}
# bookkeep atoms
for j, atom in enumerate(mol.GetAtoms()):
info['free_val'][j] += atom.GetTotalNumHs() + abs(atom.GetFormalCharge())
# An aromatic carbon atom next to an aromatic nitrogen atom can get a
# carbonyl b/c of bookkeeping of hydroxypyridines
if atom.GetSymbol() == 'C':
info['is_c'][j] = True
if atom.GetIsAromatic():
for nbr in atom.GetNeighbors():
if nbr.GetSymbol() == 'N' and nbr.GetDegree() == 2:
info['is_c2_of_pyridine'][j] = True
break
# A nitrogen atom should be allowed to become positively charged
elif atom.GetSymbol() == 'N':
info['free_val'][j] += 1 - atom.GetFormalCharge()
info['is_n'][j] = True
# Phosphorous atoms can form a phosphonium
elif atom.GetSymbol() == 'P':
info['free_val'][j] += 1 - atom.GetFormalCharge()
info['is_p'][j] = True
elif atom.GetSymbol() == 'O':
info['is_o'][j] = True
elif atom.GetSymbol() == 'S':
info['is_s'][j] = True
# bookkeep bonds
for bond in mol.GetBonds():
atom1, atom2 = bond.GetBeginAtomIdx(), bond.GetEndAtomIdx()
atom1, atom2 = min(atom1, atom2), max(atom1, atom2)
type_val = bond.GetBondTypeAsDouble()
info['pair_to_bond_val'][(atom1, atom2)] = type_val
if (atom1, atom2) in candidate_pairs:
info['free_val'][atom1] += type_val
info['free_val'][atom2] += type_val
if bond.IsInRing():
info['ring_bonds'].add((atom1, atom2))
return info | 324f4c3216d4a34da0b64c5ee42d9f3716c7c1cd | 30,997 |
def process_max_frames_arg(max_frames_arg):
"""Handle maxFrames arg in vidstab.__main__
Convert negative values to inf
:param max_frames_arg: maxFrames arg in vidstab.__main__
:return: max_frames as is or inf
>>> process_max_frames_arg(-1)
inf
>>> process_max_frames_arg(1)
1
"""
if max_frames_arg > 0:
return max_frames_arg
return float('inf') | 8b4756804688516b828fbd082fb2b9be7b6d9f52 | 30,998 |
def E2L(energy):
"""
energy (ev) to wavelength in meter !!!
"""
return 12398.0 / energy * 1e-10 | 5f9d73a6079f77f4b9da0922bf3a56a48656fa22 | 31,001 |
def queens_fitness(genome):
"""Calculate the fitness of an organization of queens on the chessboard.
Arguments:
o genome -- A MutableSeq object specifying an organism genome.
The number returned is the number of unattacked queens on the board.
"""
fitness = 0
# check each queen on the board
for check_queen_col in range(len(genome)):
is_attacked = 0
# check against all other queens on the board
for other_queen_col in range(len(genome)):
# only check a queen if it isn't exactly the same queen
if check_queen_col != other_queen_col:
# get the row for the two queens we are comparing
check_queen_row = int(genome[check_queen_col])
other_queen_row = int(genome[other_queen_col])
# a queen is attacked if it is in a row with another queen
if check_queen_row == other_queen_row:
is_attacked = 1
break
# or it is attacked if it is diaganol to another queen
elif (abs(check_queen_row - other_queen_row) ==
abs(check_queen_col - other_queen_col)):
is_attacked = 1
break
if not(is_attacked):
fitness += 1
return fitness | 540064b3f389e1be5af08079c0d544195291ce83 | 31,003 |
import argparse
def _parse_args() -> argparse.Namespace:
"""Parses arguments."""
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='Create mapping from target categories to classifier '
'labels.')
parser.add_argument(
'desired_label_spec_json',
help='path to JSON file containing desired label specification')
parser.add_argument(
'classifier_label_spec_json',
help='path to JSON file containing label specification of a trained '
'classifier')
parser.add_argument(
'taxonomy_csv',
help='path to taxonomy CSV file')
parser.add_argument(
'-o', '--output', required=True,
help='path to output JSON')
parser.add_argument(
'-i', '--classifier-label-index',
help='(optional) path to label index JSON file for trained classifier, '
'needed if not all labels from <classifier_label_spec_json> were '
'actually used (e.g., if some labels were filtered out by the '
'--min-locs argument for create_classification_dataset.py)')
return parser.parse_args() | 2f7ecd890edf2fa4f5df5dd3576f80dbb6f54eed | 31,005 |
from typing import List
def compute_bootstrapped_returns(rewards: List, values: List, gamma: float = 0.95) -> List:
"""
Compute bootstrapped rewards-to-go. It's assumed the last state is the terminal state,
so V(s_T) = 0.
q(s_t, a_t) = r(s_t, a_t) + V(s_t+1) * (1 - done)
:param rewards:
:param values:
:param gamma:
:return:
"""
returns = []
for step in range(len(rewards) - 1):
q_t = rewards[step] + gamma * values[step + 1]
returns.append(q_t)
returns.append(rewards[-1]) # terminal state -> V(s_T) = 0
return returns | fde4eafc51fc64cd00245dfb2208c08697044f3e | 31,007 |
def dirac(x, y, tol=2e-2, bounds=None):
"""Creates a
Parameters
----------
x : int, float
Horizontal position of the Dirac.
y : int, float
Vertical position of the Dirac.
tol : float, optional
Small offset around the position to avoid completely null Dirac if no
vertex falls on the precise position.
bounds : list[int or float, int or float, int or float, int or float] or None, optional
Clamping bounds for the Dirac, if necessary.
"""
xmin, xmax = x - tol, x + tol
ymin, ymax = y - tol, y + tol
if bounds is not None:
xmin = max(xmin, bounds[0])
xmax = min(xmax, bounds[1])
ymin = max(ymin, bounds[2])
ymax = min(ymax, bounds[3])
return lambda x,y: 1. if (xmin <= x <= xmax and ymin <= y <= ymax) else 0. | fd5de2102ef3bff24e5eab1d79beae2fbe8414bb | 31,008 |
def chunks_in_region_file(filename):
"""Return the number of chunks generated in a region file."""
chunks = 0
with open(filename, 'rb') as f:
for i in range(1024):
entry = f.read(4)
if entry[-1] != 0:
chunks += 1
return chunks | f53578b084ca6006b477937a86285b956293f127 | 31,009 |
def my_subfunc(arg_a, arg_b):
""" Function to clacluate ...
Function 'my_subfunc' is a subfunction of ModuleTemplate which returns...
Source:
* Reference paper or book, with author and date
Args:
arg_a (str): Argument 1
arg_a (str): Argument 2
Returns:
new_arg (str): Output argument
.. warning::
Example of warning
"""
new_arg = arg_a + ' and ' + arg_b
return new_arg | 1862139dfb1e66c9465cb1050d518363f08e2bda | 31,010 |
import re
def get_image_name(url: str) -> str:
"""
获取图片名称
:param url: 图片链接
:return: 图片名称
>>> get_image_name('http://fm.shiyunjj.com/2018/1562/5idk.jpg')
'5idk.jpg'
"""
return re.split(r'/', url)[-1] | 9b621da34680abe259a8d45ccf9b0ee055e7a628 | 31,011 |
def no_coords_same(coords):
"""
input: list of coords
output: bool
"""
takenList=[{coord} for coord in coords]
s=set()
for taken in takenList:
s= s.symmetric_difference(taken)
u=set()
for taken in takenList:
u= u.union(taken)
if s!= u:
#notTooClose = False
return False
return True | 1836781d147da4d558a17393130920c1c21cb1bd | 31,013 |
import math
def find_circle_radius():
"""
031
Ask the user to enter the radius of a circle (measurement from the
centre point to the edge). Work out the area of the circle (π*radius2).
"""
radius = float(input("Enter the radius of a circle: "))
circle_area = round(math.pi * (radius ** 2), 4)
return circle_area | 942a6528e902551ac8a6e48f6274b4b85a47c900 | 31,016 |
def second_sub_method():
"""Second sub-subpackage method."""
return 2 | c7b801e38744ec5a32cb9110bd23a7bdac72433c | 31,017 |
import requests
async def get_url_async(url, **kwargs):
"""
Async variant of above get_url() method
"""
return requests.get(url, **kwargs) | 3f3398760705f25bcc05a5d971d5d907e5cc07af | 31,019 |
def set_fin_year_axis(
ax, rotation=0, ha="center", end_of_fin_year=True, every_year=True
):
"""
Parameters:
-----------
end_of_fin_year: Boolean, if True, convert 1996 to 1995-96, else 1996-97
"""
# Draw the canvas to force labels to be written out
fig = ax.get_figure()
fig.canvas.draw()
labels_old = [tick.get_text() for tick in ax.xaxis.get_ticklabels()]
if end_of_fin_year:
# labels_new = ['{0}-{1}'.format(i, int(i[2:])+1)
# if i != '' else '' for i in labels_old]
labels_new = [f"{int(t[:4]) - 1}-{t[2:4]}" for t in labels_old]
else:
labels_new = [f"{t[:4]}-{int(t[2:4]) + 1}" for t in labels_old]
ax.xaxis.set_ticklabels(labels_new, rotation, ha)
if not every_year:
# remove every second lear
labels_old = [tick.get_text() for tick in ax.xaxis.get_ticklabels()]
labels_new = [t.get_text() if i % 2 else "" for i, t in enumerate(labels_old)]
return labels_new | 1db177e04d717251e4f109128abec3f698145eca | 31,021 |
from typing import Dict
def invert_val_mapping(val_mapping: Dict) -> Dict:
"""Inverts the value mapping dictionary for allowed parameter values"""
return {v: k for k, v in val_mapping.items()} | 92971f233276c316e0a28805a303c9457793c646 | 31,022 |
def _iterable_to_varargs_method(func):
"""decorator to convert a method taking a iterable to a *args one"""
def wrapped(self, *args, **kwargs):
return func(self, args, **kwargs)
return wrapped | 5085f564cd67259231860e11674749be18429ae3 | 31,023 |
def get_bit(vector, offset):
""" 取值
:param vector: 位图
:param offset: 位偏移
:return: 对应的位是否为 1
"""
byte_index = offset >> 3
bit_offset = offset & 7
mask = 1 << bit_offset
return vector[byte_index] & mask != 0 | 64f1349aea8d8d5d89507bdda0d255b06e865c4d | 31,024 |
def lowersorted(xs):
"""Sort case-insentitively."""
return sorted(xs, key=lambda x: x[0].lower()) | aad12c0d3cceca8fd4b32b08b652858ee897a125 | 31,025 |
def do_digest_auth(id):
"""
A function that performs Digest authentication by comparing the
requested user name and password name with its own DB.
"""
id_list = {"admin": "admin", "testUser01": "testUser01"}
if id in id_list:
return id_list.get(id)
return None | 4c8d52d7cfa70a49288db72b1e75b6d6ce921fba | 31,026 |
from typing import Mapping
import click
def format_verbose_output(filename: str, print_statements: Mapping[int, str]) -> str:
"""Return the formatted output used when the `--verbose` flag is provided.
Args:
filename: Name of the file currently being checked.
print_statements: Mapping of line number where the print statement is
present to the code representation of that print statement.
Returns:
Formatted output or an empty string if there are no print statements.
"""
if len(print_statements) == 0:
return ""
result = [click.style(filename, fg="blue")]
for start, statement in print_statements.items():
for lineno, line in enumerate(statement.splitlines(), start=start):
result.append(
f" {click.style(lineno, dim=True)} {click.style(line, bold=True)}"
)
return "\n".join(result) | 91b635db0e9da6ff377c9e3d41f5d544bc0314a8 | 31,028 |
import os
def get_project_path():
""" Returns (absolute) path to project's root folder """
path = os.path.abspath(__file__)
while True:
path, file = os.path.split(path)
if file == 'src': # assume the 'src' folder is in root folder (!!)
return path
elif file == '':
raise RuntimeError("Could not find project's root directory") | fc7f47bb75f19a5aeefd2191b144949fb9255ac2 | 31,029 |
import re
def demultiplex(row, nperhit):
"""Demultiplexes this dict and returns a list of dicts."""
end = re.compile(r'_\d+$')
# de-multiplex data
ret = []
for i in range(nperhit):
# copy all data
d = dict(**row)
for k, v in sorted(d.items()):
# find input and output fields and delete them initially
if not k.startswith('Input.') and not k.startswith('Answer.'): continue
del d[k]
# rename to simplified keys
k = k.replace('Input.','').replace('Answer.','')
if end.search(k):
# if it's the current one, we want to add it back in
if k.endswith('_%d' % i):
k = k.rsplit('_', 1)[0]
else: continue # remove multiplexed keys
# add field back in
d[k] = v
ret.append(d)
return ret | ac32f02c3574f0cec69b11456073bca267df429b | 31,030 |
def element_dict_from_obj(element, type_dict, expand=None):
"""
Resolve the element to the type and return a dict
with the values of defined attributes
:param Element element
:return dict representation of the element
"""
expand = expand if expand else []
known = type_dict.get(element.typeof)
if known:
elem = {'type': element.typeof}
for attribute in known.get('attr', []):
if 'group' in element.typeof and 'group' in expand:
if attribute == 'members':
elem[attribute] = []
for member in element.obtain_members():
m_expand = ['group'] if 'group' in member.typeof else None
elem[attribute].append(
element_dict_from_obj(member, type_dict, m_expand))
else:
elem[attribute] = getattr(element, attribute, None)
else:
elem[attribute] = getattr(element, attribute, None)
return elem
else:
return dict(name=element.name, type=element.typeof) | 9df192f778dd8daa2e963c5f559d52a5b8b4447c | 31,031 |
import socket
def get_ip_address():
"""
Get current ip address
:return: ip address of this device
"""
host_name = socket.gethostname()
return socket.gethostbyname(host_name) | a8a3ef9bb65063faf45ccf082edae387bf6f78ed | 31,032 |
def integrityCheck(variables, envVariables):
"""
Checks each variable and its values with the environment variables
Args:
variables: new environment variables
envVariables: existing environment variables
Returns:
Bool: True -> integrity check successful, False -> integrity check unsuccessful
"""
# integrity check
for var in variables:
if var not in envVariables:
return False
else:
for value in variables[var]:
if value not in envVariables[var]:
return False
return True | 412aa2ec782b97ea59d4fe5b363adb45c838b81b | 31,033 |
import pickle
def read_model(pickle_file_name):
"""Reads model from Pickle file.
:param pickle_file_name: Path to input file.
:return: model_object: Instance of `xgboost.XGBClassifier`.
"""
pickle_file_handle = open(pickle_file_name, 'rb')
model_object = pickle.load(pickle_file_handle)
pickle_file_handle.close()
return model_object | b73ade8de86e61eda23837b560e5691e0537f387 | 31,034 |
def _build_arguments(keyword_args):
"""
Builds a dictionary of function arguments appropriate to the index to be computed.
:param dict keyword_args:
:return: dictionary of arguments keyed with names expected by the corresponding
index computation function
"""
function_arguments = {"data_start_year": keyword_args["data_start_year"]}
if keyword_args["index"] in ["spi", "spei"]:
function_arguments["scale"] = keyword_args["scale"]
function_arguments["distribution"] = keyword_args["distribution"]
function_arguments["calibration_year_initial"] = \
keyword_args["calibration_start_year"]
function_arguments["calibration_year_final"] = \
keyword_args["calibration_end_year"]
function_arguments["periodicity"] = keyword_args["periodicity"]
elif keyword_args["index"] == "pnp":
function_arguments["scale"] = keyword_args["scale"]
function_arguments["calibration_start_year"] = \
keyword_args["calibration_start_year"]
function_arguments["calibration_end_year"] = \
keyword_args["calibration_end_year"]
function_arguments["periodicity"] = keyword_args["periodicity"]
elif keyword_args["index"] == "palmers":
function_arguments["calibration_start_year"] = \
keyword_args["calibration_start_year"]
function_arguments["calibration_end_year"] = \
keyword_args["calibration_end_year"]
elif keyword_args["index"] != "pet":
raise ValueError(
"Index {index} not yet supported.".format(index=keyword_args["index"])
)
return function_arguments | 5bb04062613cd554ebee1879d7a526eca6a12830 | 31,035 |
import math
def entropy_term(x):
"""Helper function for entropy_single: calculates one term in the sum."""
if x==0: return 0.0
else: return -x*math.log2(x) | 0b98662fee53ff7eb4e0e99a0eef006cc20891a9 | 31,037 |
from datetime import datetime
def create_output_folder_name(suffix=None):
"""
Creates the name of the output folder. The name is a combination of the current date, time, and an optional suffix.
:param suffix: str, folder name suffix
:return: str, name of the output directory
"""
# Record start execution date and time
now = datetime.now().strftime('%Y-%m-%d_%H-%M')
# Prepare path to subdirectory within the result directory
name = '_'.join([now, suffix]) if suffix else now
return name | 29b21944d32b56532545b68a9f415c0a597de1e3 | 31,040 |
def derive_single_object_url_pattern(slug_url_kwarg, path, action):
"""
Utility function called by class methods for single object views
"""
if slug_url_kwarg:
return r'^%s/%s/(?P<%s>[^/]+)/$' % (path, action, slug_url_kwarg)
else:
return r'^%s/%s/(?P<pk>\d+)/$' % (path, action) | 52a006a26f704e4e794c6823ece5a3479f487e41 | 31,042 |
def _update_passed_filter(image):
""" This function updates whether the given image satisfies minimum requirements of the platform
e.g. format != none
"""
if not (image.latitude and image.longitude and image.format and image.width_pixels > 100 \
and image.height_pixels > 100):
image.passed_filter = False
return image | c54c47788fc357dd3242272c37feea30c1c155a0 | 31,043 |
import unicodedata
def find_unicodedata_name(data: str) -> list:
"""查询Unicode编码中的名字
♠ == BLACK SPADE SUIT
\N{BLACK SPADE SUIT} == ♠
:param data: 字符串
:return: 字符的Unicode名字列表
"""
ls = []
for i in data:
ls.append(unicodedata.name(i))
return ls | b23ebd7db8f1aed60659dea1160dd6c9dfcd13c0 | 31,044 |
def _calculate_bwa_score(cigar, edit_dist, weights):
"""
Calculates number of mismatches without the need to go through both sequences.
Returns alignment score.
"""
gap_penalties = 0
total_gaps = 0
n = len(cigar) - 1
for i, (length, op) in enumerate(cigar):
if i != 0 and i != n and not op.consumes_both():
# Should be inner insertion or deletion.
gap_penalties += weights.gap_penalty(length)
total_gaps += length
mismatches = edit_dist - total_gaps
return cigar.aligned_len - weights.mismatch * mismatches - gap_penalties | b0f36b19c0336624aff32f7d6e7e0b6d12f48e3e | 31,045 |
from numpy import amax
from copy import deepcopy
def normalize(img):
"""
Normalizes contents of 2-dimensional slice of N-dimensional
input array to values between 0.0 ~ 1.0. Normalized value is proportional
to the maximum value in each 2-dim slice.
Parameters
----------
img : N-dimensional ndarray image containing gray-scale pixel values.
Ndarray must be either 3 || 4 dimensional.
if 3-dimensional, iterate through 2-dim slices by 1-dim index.
img[idx][:, :]
if 4-dimensional, iterate through 2-dim slices by 3, 4-dim indices
img[:, :][time][zidx]
Returns
-------
ret : Normalized n-dimensional ndarray
"""
def _determine_dim(img):
"""determine whether input array is 3 || 4 dimension"""
if len(img.shape) == 3:
return 3
elif len(img.shape) == 4:
return 4
else:
return -1
def _img_rescale(ret):
"""rescale 4-dim img set to values 0.0 ~ 1.0"""
xl = img.shape[0]
yl = img.shape[1]
tl = img.shape[2]
zl = img.shape[3]
for t in range(tl):
for z in range(zl):
m = amax(ret[:, :, t, z])
for x in range(xl):
for y in range(yl):
ret[x, y, t, z] /= m
return ret
def _patch_rescale(ret):
"""rescale 3-dim patch set to values between 0.0 ~ 1.0"""
idx = img.shape[0]
xl = img.shape[1]
yl = img.shape[2]
for i in range(idx):
m = amax(ret[i, :, :])
for x in range(xl):
for y in range(yl):
ret[i, x, y] /= m
return ret
dim = _determine_dim(img)
if dim == -1:
raise AttributeError("ndarray must be [3/4] dimensional")
ret = deepcopy(img)
if dim == 3:
return _patch_rescale(ret)
elif dim == 4:
return _img_rescale(ret) | 9b5d8c21895eb4c59e5b019ce18f7986f80a8f0b | 31,046 |
def q3(series):
"""
Extract 75% quantile from
pandas series
Input
---
series : pandas series
Returns
---
27% quantile
"""
q3 = series.quantile(0.75)
return q3 | 012658c7faf27d9973cc21fc6c73c2ee0491b167 | 31,047 |
def target_help():
"""Returns help string.
Returns a string containing detailed documentation on the current deployment
target, to be displayed when users invoke the
``mlflow deployments help -t <target-name>`` CLI command.
"""
return """
MLflow deployment plugin to deploy MLflow models to Google Cloud Vertex AI.
Example::
from mlflow import deployments
client = deployments.get_deploy_client("google_cloud")
deployment = client.create_deployment(
name="deployment name",
model_uri=...,
# Config is optional
config=dict(
# Deployed model config
machine_type="n1-standard-2",
min_replica_count=None,
max_replica_count=None,
accelerator_type=None,
accelerator_count=None,
service_account=None,
explanation_metadata=None, # JSON string
explanation_parameters=None, # JSON string
# Model container image building config
destination_image_uri=None,
timeout=None,
# Model deployment config
sync="true",
# Endpoint config
description=None,
# Vertex AI config
project=None,
location=None,
experiment=None,
experiment_description=None,
staging_bucket=None,
)
)
""" | a459356945c0b1b9771fc7ec6d4480164f4bf8e6 | 31,048 |
def grabOverlappingKmer(seq, sitei, pos=0, k=9):
"""Grab the kmer from seq for which it is in the pos position at sitei
Return the gapped and non-gapped kmer
This is a generalization of grabKmer for pos = 0
If seq[sitei] is a gap then the non-gapped kmer is None.
If there are not enough non-gap AA to return before/after sitei then it returns None
Parameters
----------
seq : str
Sequence from which peptide will be grabbed.
sitei : int
Key position of the kmer (zero-based indexing)
pos : int
The position of the key sitei in the kmer.
k : int
Length of the peptide to return.
Returns
-------
gapped : str
A k-length peptide that overlaps sitei
nonGapped : str
A k-length peptide that overlaps sitei
If seq[sitei] is a gap then returns None.
If not then all gaps are removed before taking the k-length peptide
(if there aren't k AAs then return is None)"""
aaRight = k - pos
aaLeft = pos
if seq[sitei] == '-':
return None, None
if (sitei + aaRight) <= len(seq) and (sitei - aaLeft) >= 0:
if pos<k:
rh = seq[sitei:]
fullRH = rh[:aaRight]
if '-' in fullRH:
ngRH = rh.replace('-', '')
if len(ngRH) >= aaRight:
ngRH = ngRH[:aaRight]
else:
ngRH = None
else:
ngRH = fullRH
else:
fullRH = ''
ngRH = ''
if pos>0:
lh = seq[:sitei]
fullLH = lh[-aaLeft:]
if '-' in fullLH:
ngLH = lh.replace('-', '')
if len(ngLH) >= aaLeft:
ngLH = ngLH[-aaLeft:]
else:
ngLH = None
else:
ngLH = fullLH
else:
fullLH = ''
ngLH = ''
full = fullLH + fullRH
#print aaLeft,fullLH,",", aaRight,fullRH
if ngLH is None or ngRH is None:
ng = None
else:
ng = ngLH + ngRH
return full, ng
else:
return None, None | 378866da9dc9af0898a3a07a0aa3ac3e6988bb36 | 31,049 |
def mult2_op(array_a, array_b):
"""Multiply two arrays together blindly."""
return array_a * array_b | 605c550505ff39b9808f8c7e23e3a4725055b6a2 | 31,050 |
import argparse
def parse_args():
"""Parse input arguments."""
parser = argparse.ArgumentParser(description='Parse logs for Neural Networks')
parser.add_argument('--gen_database', dest='gen_data',
help="If this flag is passed the other flags will have no "
"effects, despite out_database. --gen_data <path where the parser must search for ALL LOGs FILES>",
default='')
parser.add_argument('--out_database', dest='out_data', help="The output database name",
default='./error_log_database')
# args = parser.parse_args()
parser.add_argument('--database', dest='error_database',
help='Where database is located', default="errors_log_database")
parser.add_argument('--benchmarks', dest='benchmarks',
help='A list separated by \',\' (commas with no sapace) where each item will be the benchmarks that parser will process.'
'\nAvailiable parsers: Darknet, Hotspot, GEMM, HOG, lavamd'
'\nnw, quicksort, accl, PyFasterRCNN, Lulesh, LUD, mergesort.'
' Darknet, Darknetv2 and Lenet benchmarks need --parse_layers parameter, which is False if no layer will be parsed, and True otherwise.'
' Darknet, Darknetv2, HOG, and PyFasterRCNN need a Precision and Recall threshold value.'
'If you want a more correct radiation test result pass --check_csv flag')
parser.add_argument('--parse_layers', dest='parse_layers',
help='If you want parse Darknet layers, set it True, default values is False',
default=False, action='store_true')
parser.add_argument('--pr_threshold', dest='pr_threshold',
help='Precision and Recall threshold value,0 - 1, defautl value is 0.5',
default=0.5)
parser.add_argument('--check_csv', dest='check_csv',
help='This parameter will open a csv file which contains all radiation test runs, then it will check '
'if every SDC is on a valid run, default is false',
default=False, action='store_true')
parser.add_argument('--ecc', dest='ecc',
help='If the boards have ecc this is passed, otherwise nothing must be passed', default=False,
action='store_true')
parser.add_argument('--is_fi', dest='is_fi', help='if it is a fault injection log processing', action='store_true',
default=False)
parser.add_argument('--err_hist', dest='parse_err_histogram',
help='This parameter will generate an histogram for a serie of error threshold,'
' these error threshold are calculated using ERROR_RELATIVE_HISTOGRAM dict values',
default=False,
action='store_true')
parser.add_argument('--multithread', dest='multithread', help='If multithread is activated each '
'benchmark will be parsed in a thread',
default=False, action='store_true')
args = parser.parse_args()
return args | 27707c75bf7cb67335d010206c5a7466d9c0c20d | 31,051 |
def ask_for_file_type():
""" Ask the user to provide the type of file to which data will be saved.
Should be either 'json' (for text file in JSON format) or 'bin' (for binary file)"""
while True:
file_type = input('Type of the file to store data (json/bin): ')
if file_type != 'json' and file_type != 'bin':
continue
else:
return file_type | c620d978b850bfc1d34162ce0c231f27d96c7dd8 | 31,053 |
def pssm_smooth(pssm_original, pssm_smooth_new, w_smooth, seq_len):
"""Smooth PSSM creation helper"""
for i in range(seq_len):
if i < (w_smooth - 1) / 2:
for j in range(i + (w_smooth - 1) // 2 + 1):
pssm_smooth_new[i] += pssm_original[j]
elif i >= (seq_len - (w_smooth - 1) // 2):
for j in range(i - (w_smooth - 1) // 2, seq_len):
pssm_smooth_new[i] += pssm_original[j]
else:
for j in range(i - (w_smooth - 1) // 2, i + (w_smooth - 1) // 2 + 1):
pssm_smooth_new[i] += pssm_original[j]
return pssm_smooth_new | 3f32a8c88d6304ad7e009d9289f82e7064b2498f | 31,054 |
def fcf(lastebit,atr,dep,amor,cwc,capex):
"""Returns free cash flow."""
return lastebit*(1 - atr)+ dep + amor - cwc - capex | e0c91095d29ce2bece541bddd08bb1e3815bb23f | 31,056 |
def generate_alias(tbl):
"""Generate a table alias, consisting of all upper-case letters in
the table name, or, if there are no upper-case letters, the first letter +
all letters preceded by _
param tbl - unescaped name of the table to alias
"""
return "".join(
[l for l in tbl if l.isupper()]
or [l for l, prev in zip(tbl, "_" + tbl) if prev == "_" and l != "_"]
) | 535cc686e7feb561a61ff8780dd9df84635e7c00 | 31,057 |
import time
def format_timestamp(timestamp=time.time(), fmt='%Y-%m-%d-%H-%M-%S'):
"""时间戳转为指定的文本格式,默认是当前时间
:param timestamp:
:param fmt: 默认不需要填写,默认='%Y-%m-%d-%H-%M-%S'. 可以更改成自己想用的string格式. 比如 '%Y.%m.%d.%H.%M.%S'
"""
return time.strftime(fmt, time.localtime(timestamp)) | 32928f8d0dac375ee8349cfc67122577d02b090c | 31,058 |
def get_one_element(singleton):
"""When singleton is not empty, return an element from it."""
for e in singleton:
return e | 49f43742036e5462a74febde93b5b30e4e5f97bc | 31,059 |
def inserir_site(conn, site):
"""
Inserir um novo site na tabela apropridada
:param site (name[string],data_inclusao[date]):
:return (ID do novo registro):
"""
sql = ''' INSERT INTO web_site(name,data_inclusao)
VALUES(?,?) '''
cur = conn.cursor()
cur.execute(sql, site)
return cur.lastrowid | 6d52f0c44bbe2fba2b266ed4d43bfbc49282181b | 31,063 |
def face_plane(point):
"""
Which of the six face-plane(s) is point P outside of?
@type point: numpy.ndarray | (float, float, float)
"""
face_plane_code = 0
if point[0] >= .5:
face_plane_code |= 0x01
if point[0] < -.5:
face_plane_code |= 0x02
if point[1] >= .5:
face_plane_code |= 0x04
if point[1] < -.5:
face_plane_code |= 0x08
if point[2] >= .5:
face_plane_code |= 0x10
if point[2] < -.5:
face_plane_code |= 0x20
return face_plane_code | c8f7be5234fa87dc16c4ade40d0ed08a20ac82e1 | 31,064 |
import re
def getFilename(resp,url):
""" tries to figure out the filename by either looking at the response
header for content-disposition, or by extracting the last segment of the URL
"""
filename = ''
if "Content-Disposition" in resp.headers.keys():
if 'filename' in resp.headers["Content-Disposition"]:
filename = re.findall("filename=(.+)", resp.headers["Content-Disposition"])[0]
else:
filename = url.split("/")[-1]
else:
filename = url.split("/")[-1]
return filename | 77529d910ff4585cd755f8cce872a40683e1bde1 | 31,065 |
import socket
def find_unused_port(family=socket.AF_INET, socktype=socket.SOCK_STREAM):
"""Returns an unused port that should be suitable for binding. This is
achieved by creating a temporary socket with the same family and type as
the 'sock' parameter (default is AF_INET, SOCK_STREAM), and binding it to
the specified host address (defaults to 0.0.0.0) with the port set to 0,
eliciting an unused ephemeral port from the OS. The temporary socket is
then closed and deleted, and the ephemeral port is returned.
"""
with socket.socket(family, socktype) as tempsock:
tempsock.bind(('', 0))
port = tempsock.getsockname()[1]
del tempsock
return port | 3cf82aa209832a9ba3c3b7e0c994f8e234ef1d14 | 31,066 |
def prep_reviews(df):
"""Converts Letterboxd reviews dataframe to list of reviews."""
reviews = df['Review'].tolist()
for i in reviews:
i = i.lower()
return reviews | e8433718a12d76da1d4e1a1afde7b094c8c0338c | 31,067 |
import json
def edit_label(old_name, new_name, new_color, repos, session, origin):
"""
Edits label
:param old_name: old name of the label
:param new_name: new name of the label
:param new_color: new color of the label
:param repos: repository in which is label edited
:param session: session for communication
:param origin: repository where the label came from
:return: message code 200 (int)
"""
for repo in repos:
if(repo != origin):
data = {"name": new_name, "color": new_color}
json_data = json.dumps(data)
if(old_name == None):
r = session.patch("https://api.github.com/repos/" + repo + "/labels/" + new_name,json_data)
else:
r = session.patch("https://api.github.com/repos/" + repo + "/labels/" + old_name, json_data)
return "200" | dcdc8511f96dde5a5d3ead5afc39cdbfe8c61205 | 31,070 |
def called(before=None, error=None):
"""[calls before() passing the response as args to the decorated function.
optional error handler. run the decorated function immediately.
WARNING: this is not for the regular decorating of a function
its syntactical sugar for a callback i.e.
@called(
lambda: º.ajax('https://www.google.com'),
lambda err: print('error:', err))
def success(data=None):
print("sweet!")
print(data)
"""
def decorator(function):
nonlocal before
nonlocal error
try:
if before is None:
return function()
r = before()
return function(r)
except Exception as e:
if error is not None:
error(e)
else:
raise e
return decorator | 6290c0bfe328ea730d80dc3b101da3408131da2a | 31,072 |
def beautify_message(message):
"""Make the message more readable by removing some SHACL-specific formatting."""
if message.startswith("Less than 1 values on"):
index = message.find("->") + 2
message = "Less than 1 value on " + message[index:]
return message | 54fbef5106267a7d519b821e65ce511847c88244 | 31,073 |
def dep_dir(tree, node_idx):
"""Return 'R' if the node at the given position in the given tree is
its parent's right child, 'L' otherwise. Return None for the technical root.
@rtype: str"""
parent_idx = tree.parents[node_idx]
if parent_idx >= 0 and node_idx > parent_idx:
return 'R'
elif parent_idx >= 0:
return 'L'
return None | 9f1f60d9301833797a642382a26a75003c2d4073 | 31,075 |
import subprocess
def ExtractSDKInfo(info, sdk):
"""Extract information about the SDK."""
return subprocess.check_output(
['xcrun', '--sdk', sdk, '--show-sdk-' + info]).strip() | 43326b70807af349127c2577f5dceec786a0f229 | 31,077 |
def get_sentinel_urls(search_scenes, bands=["red", "nir"]):
"""
This function executes the search using the input parameters
over the element84 and returns a dictionary with the urls for downloading.
inputs:
- data bands that corresponds to the sentinel datasets from the satsearch.
"""
urls = {}
try:
items = search_scenes.items()
items_dates = items.dates()
band_info = {}
for band in bands:
for item in items:
if band + "_band_info" not in band_info:
band_info[band + "_band_info"] = [item.asset(band)["href"]]
else:
band_info[band + "_band_info"].append(item.asset(band)["href"])
print(
"{0} references found for band {1}".format(
len(band_info[band + "_band_info"]), band
)
)
band_info["dates"] = items_dates
return band_info
except Exception as e:
raise Exception("error in search scenes: ", e)
exit() | 9715303481e2d2f624a1401217ad81ed3c2b94a2 | 31,080 |
def _trimStruct2D(s):
"""
remove highest frequencies from structured 2D trasnform
"""
res = {}; res.update(s)
N = s['N']
for x in ['D', 'H', 'V']:
res.pop('f%i_%s'%(N-1, x))
res.pop('i%i_%s'%(N-1, x))
res['N']-=1
return res | 7b08af0946af46959f8a0503a5c0cf351623012b | 31,081 |
import functools
import threading
def thread_funcrun(func):
""" run function in thread
Examples:
.. example_code::
>>> from apu.mp.thread_funcrun import thread_funcrun
>>> @thread_funcrun
... def test(*args, **kwargs):
... for i in range(5):
... print(f'elem: {i}')
elem: 0
elem: 1
elem: 2
elem: 3
elem: 4
Thread started for function "test"
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
threading.Thread(target=func, args=(args, kwargs)).start()
print(f"Thread started for function \"{func.__name__}\"")
return wrapper | 7f56a215536aca7202c4d195acb26c4092e7405c | 31,082 |
def question2_1(input_list):
"""Remove duplicates from an unsorted list"""
# easy method - call pythons set() function
# Attach each item as input into a list. Check list for each new item
cleaned_list = list()
for letter in input_list:
if letter in cleaned_list:
pass
else:
cleaned_list.append(letter)
return cleaned_list | 4ffbddebb176c02292ecf579b356a4dbc90b2294 | 31,083 |
def different_utr5_boundary(transcript1, transcript2):
"""Check if two transcripts have different UTR5 exon boundaries"""
for i in range(len(transcript1.utr5_exons)):
exon1 = transcript1.utr5_exons[i]
exon2 = transcript2.utr5_exons[i]
if exon1[0] != exon2[0] or exon1[1] != exon2[1]:
return True
return False | e43cb5cb056869218da5ce2be18c74f3d7b7b91d | 31,084 |
def fix_conf(jgame):
"""Fixture for a default configuration"""
return jgame["configuration"] | cd83110335a7a83e644698c36c02c6a1b351cef8 | 31,085 |
from typing import List
from typing import Dict
def data_splits_from_folds(folds: List[str]) -> List[Dict[str, List[str]]]:
"""
Create data splits by using Leave One Out Cross Validation strategy.
folds is a list of dataset partitions created during pre-processing. For example,
for 5-fold cross val: ["fold00", "fold01", ..., "fold04"]. This function will create
k test, validation, and train splits using these folds. Each fold is successively
treated as test split, the next split as validation, and the remaining as train.
Folds will be sorted before applying the above strategy.
With 5-fold, for example, we would have:
test=fold00, val=fold01, train=fold02..04,
test=fold01, val=fold02, train=fold03,04,01
...
test=fold04, val=fold00, train=01..03
"""
sorted_folds = tuple(sorted(folds))
assert len(sorted_folds) == len(set(sorted_folds)), "Folds are not unique"
num_folds = len(sorted_folds)
all_data_splits: List[Dict[str, List[str]]] = []
for fold_idx in range(num_folds):
test_fold = sorted_folds[fold_idx]
valid_fold = sorted_folds[(fold_idx + 1) % num_folds]
train_folds = [f for f in sorted_folds if f not in (test_fold, valid_fold)]
all_data_splits.append(
{
"train": train_folds,
"valid": [valid_fold],
"test": [test_fold],
}
)
assert not set(train_folds).intersection(
{test_fold, valid_fold}
), "Train folds are not distinct from the dev and the test folds"
return all_data_splits | 9abb2246f56dfd96c9d04bfdc1aea3eb4fd0bfa7 | 31,087 |
def TRIN(advances, declines, advVol, decVol):
"""TRIN = wskaźnik Armsa. Przekazujemy cztery tablice numpy (jednakowej długości):
ilość wzrostów danego dnia, ilość spadków, wolumen wzrostowy i wolumen
spadkowy. Wynik tej samej długości co wejścia."""
if(not (advances.size==declines.size==advVol.size==decVol.size)):
return None
numerator=advances.astype(float)/declines.astype(float)
denominator=advVol.astype(float)/decVol.astype(float)
return numerator/denominator | a11f5ccc76a9dcd1478997936e9110090f11ed0f | 31,089 |
def host_dict():
"""Return a Host dictionary."""
return {
"target": "1.2.3.4",
"board_id": "abcd1234",
"eid": "ZaAvk46j",
"flagged": True,
"hostnames": "host",
"id": "No3e25l6",
"label": "label",
"notes": "Note",
"os": "OS",
"os_type": "Linux",
"out_of_scope": True,
"owned": False,
"reviewed": True,
"shell": True,
"thumbs_down": True,
"thumbs_up": True,
"type": "Unknown",
} | ccf8d6834cb1bce2835d7228cf7d30ded2533799 | 31,090 |
from pathlib import Path
from typing import Dict
from typing import Optional
import toml
def read_pyproject_toml(working_dir: Path) -> Dict[str, Optional[str]]:
"""
Read project's `pyproject.toml` file
Args:
working_dir: CWD. Usually a root of source code
Returns:
Configurations described in toml file
Raises:
toml.TomlDecodeError: Failed to decode
OSError: Failed to read a file
"""
pyproject_toml_path: Path = working_dir / "pyproject.toml"
if not pyproject_toml_path.is_file():
return dict()
pyproject_toml = toml.load(pyproject_toml_path)
config_d = pyproject_toml.get("tool", {}).get("pytestdocgen", {})
config = dict()
for k in config_d:
config[f"--{str(k)}"] = config_d[k]
return config | 829265da48da5c221345816586b8ee91adc53494 | 31,091 |
def _unpickle_cached_list(cls, *args, **kwargs):
"""
When unpickling the list, attach an attribute which tells it to unpack
the values when first accessed. This is done lazily because the cache
backend breaks if trying to access the cache while in the middle of
unpickling a cached object.
"""
new_list = cls(*args, **kwargs)
new_list._unpack = True
return new_list | 5fe214133a2bdc69d1ff70e46d10103fbb68b49d | 31,092 |
def get_target_key(target_dict: dict, key: str) -> str:
"""
from the keys of the target_dict, get the valid key name corresponding to the `key`
if there is not a valid name, return None
"""
target_keys = {k.lower(): k for k in target_dict.keys()}
return target_keys.get(key.lower(), None) | eb442f7df9b401eda5a0915365e851c47b0a9c1d | 31,093 |
def tag_contents_xpath(tag, content):
"""Constructs an xpath matching element with tag containing content"""
content = content.lower()
return '//{}[contains(translate(*,"ABCDEFGHIJKLMNOPQRSTUVWXYZ","abcdefghijklmnopqrstuvwxyz"),"{}")]'.format(tag, content) | c6bcea931afb29282b2435a73c9ef86fa4f24dc8 | 31,094 |
import random
def sample_minor_versions(versions, sample):
"""
Pick randomly a sample from the given versions.
:param versions: A list of valid semver strings.
:param sample: The number of versions to choose from the available versions.
"""
minors = []
for version in versions:
major, minor, patch = version.split('.')
if minor != '0' or patch != '0':
minors.append(version)
random.shuffle(minors)
return minors[-sample:] | 88a7e0900b6380182a5e95760ba2105396f72055 | 31,095 |
def make_title() -> str:
"""Make sure the sniffer doesn't break."""
return "Components" | 9da473c23161753f0767241781b42007bb6308f3 | 31,096 |
import subprocess
def user_info(name: str) -> list:
"""Возвращает информацию о пользователе"""
return subprocess.check_output(
["powershell", "-Command",
f'Get-AdUser -Identity {name} -Property *'],
).decode("CP866").split('\r\n') | ecf1bbe17c8e08b96e047195e982507238c77844 | 31,097 |
def intersect(pieceA, pieceB):
""" Check if two 2-len tuples have at least one element in common
"""
return pieceA[0] in pieceB or pieceA[1] in pieceB | 2330389e6945167f58a1a75a05f1310d5db9c4dd | 31,099 |
from functools import reduce
def _extract_name_from_tags(tag_list):
"""
Extracts the value of the 'Name' tag of an EC2 instance. Filters all tag dictionaries to get the one where the key
is 'Name', then extracts the value from it. If there's several 'Name' tags, it uses the first one always
:param tag_list: List of tags for an EC2 instance
:return: Name value
"""
filtered_tag_list = list(filter(lambda tag: tag['Key'] == 'Name', tag_list))
names = list(map(lambda tag: tag['Value'], filtered_tag_list))
return reduce(lambda a, b: a, names) if len(names) > 0 else '' | 129183767492530fe363795d645f2a59a82ef18d | 31,100 |
def gasca_intre_noduri(tabla_de_joc, nr_nod_initial, nr_nod_scop, nr_nod_gasca):
"""
Verifica daca o gasca ( nr_nod_gasca) se afla in linie dreapta intre nodurile nr_nod_initial si nr_nod_scop
:param tabla_de_joc: instanta a clasei TablaDeJoc
:param nr_nod_initial: numarul nodului de plecare
:param nr_nod_scop: numarul nodului scop
:param nr_nod_gasca: numarul nodului cu gasca
:return: True/ False
"""
if nr_nod_initial in tabla_de_joc.muchii[nr_nod_gasca] and nr_nod_scop in tabla_de_joc.muchii[nr_nod_gasca]:
# x1-x2, y1-y2
vec1 = (tabla_de_joc.noduri[nr_nod_gasca].punct[0] - tabla_de_joc.noduri[nr_nod_initial].punct[0],
tabla_de_joc.noduri[nr_nod_gasca].punct[1] - tabla_de_joc.noduri[nr_nod_initial].punct[1])
vec2 = (tabla_de_joc.noduri[nr_nod_scop].punct[0] - tabla_de_joc.noduri[nr_nod_gasca].punct[0],
tabla_de_joc.noduri[nr_nod_scop].punct[1] - tabla_de_joc.noduri[nr_nod_gasca].punct[1])
# distanta dintre nod_gasca -> nod_intiial = distanta dintre nod_scop -> nod_gasca
if vec1 == vec2:
return True
return False | 138d8d571f71fb3750fee86720870a3d8dfcefe2 | 31,102 |
def is_mobile(request):
""" See if it's mobile mode
"""
if request.MOBILE == 1:
return {'is_mobile': True}
else:
return {'is_mobile': False} | 8529bcf163146e05f9c7995705807ab375df3dab | 31,103 |
import os
def move(source: str, dest: str, make_dest: bool = False) -> str:
"""
Move source to dest
Return path to new version
Params
source
path to original file/folder
dest
path to new containing directory.
This will assume that the directory is on the same disk as os.getcwd()
make_dest
create destination if it doesn't already exist
"""
dest = (os.path.realpath, str)[os.path.isabs(dest)](dest)
if not os.path.isdir(dest):
if not make_dest:
raise ValueError(f"Destination's path doesn't point to a directory")
os.makedirs(dest, exist_ok=True)
root, name = os.path.split(source)
new = os.path.join(dest, name)
os.rename(source, new)
return new | ebc169abffba8ec04b04fdc562ff55bac13ecec0 | 31,104 |
import json
def save_json(data, path):
"""
Save a JSON file to the specified path.
"""
with open(path, "w") as file:
return json.dump(data, file) | d93a212dc97a5f3a6a059868aedc92f67f146599 | 31,105 |
import subprocess
def __run(command):
"""
Execute shell command
:param command: list, required
:return: string
"""
output = subprocess.run(command, capture_output=True, text=True)
if output.returncode != 0: # An error occurred
raise ValueError(output.stderr)
else:
return output.stdout | ba02cc57dc4246df5307d3f4516828b553f86437 | 31,106 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.