content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def query(db):
"""
This function specify the query for the select query
"""
query = db.org_organisation.organisation_type_id == db.org_organisation_type.id
return query | da14331926709a4bda616fbf30dc65b45f3cf089 | 44,075 |
import binascii
def _wrap_base64(data, wrap=64):
"""Break a Base64 value into multiple lines."""
data = binascii.b2a_base64(data)[:-1]
return b'\n'.join(data[i:i+wrap]
for i in range(0, len(data), wrap)) + b'\n' | 9d07d6c757d10212c0f11be67452f1779b07e19e | 44,076 |
def create_query_token_func(session, model_class):
"""Create an ``query_token`` function that can be used in
resource protector.
:param session: SQLAlchemy session
:param model_class: TokenCredential class
"""
def query_token(client_id, oauth_token):
q = session.query(model_class)
return q.filter_by(
client_id=client_id, oauth_token=oauth_token).first()
return query_token | a25d876ef1dd5f7548741f27e584724f27447506 | 44,077 |
def check_parsed_args_compatible(imp, modules, contact, cc, parser):
"""
Check that the combination of arguments are compatible.
Args:
imp(str): CSV import specifier
modules(str): Modules argument
contact(str): Contact argument
cc(str): CC argument
parser(:class:`argparse.ArgumentParser`): Parser instance
Raises:
:class:`argparse.ArgumentParser` error:
* --import cannot be used with --contact or --cc
* You cannot set all modules in an area to one contact/cc, enter a
specific module.
"""
if imp and (contact or cc):
parser.error("--import cannot be used with --contact or --cc")
# Stop user from setting all modules in an area to one contact/cc
if not modules and (contact or cc):
parser.error("You cannot set all modules in an area to one contact/cc,"
" enter a specific module.")
# Just in case parser.error doesn't stop the script
return 1 | 36778436b4ed03c3de2e45e77f9e70021ac7b8f6 | 44,078 |
def mod(p):
"""
Compute modulus of 3D vector
p: array
Cartesian coordinates
"""
return (p[0]**2 + p[1]**2 + p[2]**2)**0.5 | a15755be4e49120fa323ece0e456ae947d826b6d | 44,081 |
def sum_diagonals(side_length):
"""Sums diagonals of a number spiral with given side length."""
total = 25
bottom_corner = 3
for s in range(5, side_length+2, 2):
bottom_corner += 4*s - 10
for c in range(4):
total += bottom_corner + (s-1)*c
return total | 7b202579e3b7f6105e04d1b8697adcd35788d3ea | 44,082 |
import numpy
def construct_VHS_incore(system, xshifted, sqrt_dt):
"""Construct the one body potential from the HS transformation
Parameters
----------
system :
system class
xshifted : numpy array
shifited auxiliary field
Returns
-------
VHS : numpy array
the HS potential
"""
VHS = numpy.zeros((system.nbasis, system.nbasis),
dtype=numpy.complex128)
VHS = (system.iA * xshifted[:system.nchol] +
system.iB * xshifted[system.nchol:])
VHS = VHS.reshape(system.nbasis, system.nbasis)
return sqrt_dt * VHS | 58cd9fa10708ec285b6c5c6998695bb18dc137b9 | 44,083 |
def get_weight_of(level: int) -> int:
"""Return the weight of a given `level`. The ratio is 1:3:5 for
modules of L4:L5:L6 respectively."""
levels = {4: 1, 5: 3, 6: 5}
return levels[level] if isinstance(level, int) and level in levels else 0 | 3eefdffdf828df8f5d2454ea56da075281400cf7 | 44,084 |
def formatter(ms):
"""
formats the ms into seconds and ms
:param ms: the number of ms
:return: a string representing the same amount, but now represented in seconds and ms.
"""
sec = int(ms) // 1000
ms = int(ms) % 1000
if sec == 0:
return '{0}ms'.format(ms)
return '{0}.{1}s'.format(sec, ms) | 3d66257672d4df906581ca6cf79c808fd9e8d3ef | 44,086 |
def form_binary_patterns(k: int) -> list:
"""
Return a list of strings containing all binary numbers of length not exceeding 'k' (with leading zeroes)
"""
result = []
format_string = '{{:0{}b}}'.format(k)
for n in range(2 ** k):
result.append(format_string.format(n))
return result | c13cb7e9cd831e85cc5fde6ef8497ec45289664d | 44,087 |
def containsZero(n):
"""
n: an int or a str
output: True if n contains '0'
"""
numStr = str(n)
for letter in numStr:
if letter is '0':
return True
return False | e8698f6ba935079ecdb37adbdeb88b61edf9d640 | 44,090 |
import calendar
import time
def timestamp():
"""Returns Unix Timestamp of curret UTC time"""
return calendar.timegm(time.gmtime()) | 9ba82ae0223130367196976b2b54430fa18762a1 | 44,091 |
def produit(a,b):
""" renvoie le résultat de la multiplication des nombres a et b"""
return a*b | 866d4561edd2b2168ca167ff7116241c0fff310c | 44,092 |
def extract_hairpin_name_and_sequence(file,sampleName):
"""
Reads one MIRNA cluster file
It returns the corresponding cluster name and hairpin sequence in a Python dictionary
Dictionary keys: cluster names
Dictionary values: cluster sequences
"""
with open(file,"r") as filin:
lines = filin.readlines()
clusterName = lines[0].split(" ")[0].strip()
hairpinSequence = lines[2].strip()
d = {clusterName:hairpinSequence}
return d | ea6c189a2b38822c08097ce77088f644c7b0c489 | 44,093 |
def _max_len(choices):
"""Given a list of char field choices, return the field max length"""
lengths = [len(choice) for choice, _ in choices]
return max(lengths) | 2042ff1466554abc2cbfdb6fc0faff664759ac55 | 44,094 |
def load_keywords(kw_file, lex):
""" load keywords """
kwords = []
orig_kwords = []
with open(kw_file, "r", encoding="utf-8") as fpr:
for line in fpr:
parts = line.strip().split()
# check if its in two column format or single column
if len(parts) == 2:
kwords.append(lex[parts[-1]])
orig_kwords.append(parts[-1])
else:
kwords.append(lex[parts[0]])
orig_kwords.append(parts[0])
return kwords, orig_kwords | cbcb099888c15fdd72745558fefa5b90321198a6 | 44,095 |
def CommandLine(command, args):
"""Convert an executable path and a sequence of arguments into a command
line that can be passed to CreateProcess"""
cmd = "\"" + command.replace("\"", "\"\"") + "\""
for arg in args:
cmd = cmd + " \"" + arg.replace("\"", "\"\"") + "\""
return cmd | dfb7de2d1a72a007c9d120a27de5078d407f947d | 44,096 |
import torch
def reflect(v: torch.Tensor, axis: torch.Tensor):
"""reflect vector a w.r.t. axis
Args:
`v`: tensor of shape `[...,3]`.
`axis`: tensor with the same shape or dim as `a`.
Returns:
the reflected vector
"""
axis = torch.broadcast_to(axis, v.shape)
h_vec = 2*axis * torch.sum(axis*v, dim=-1, keepdim=True)
return h_vec - v | 92dface741eb36a9c2b889091c1f3b1275fcdc68 | 44,097 |
def findPermutation(permMat):
"""
In so far as permMat is a permutation matrix, returns the permutation.
"""
maxs = [(vec.tolist().index(max(vec)), max(vec)) for vec in permMat]
mins = [(vec.tolist().index(min(vec)), min(vec)) for vec in permMat]
for ii in range(len(maxs)):
if maxs[ii][1] < -mins[ii][1]:
maxs[ii] = mins[ii]
return maxs | 7367b178fb742f66a8a39d7e6bc2bd725a6d5f70 | 44,098 |
def _delete_enhancement_git_data(enhancement):
"""Deletes the base GitHub data fields form an enhancement dictionary.
Parameters:
enhancement - The enhancement dictionary from which the GitHub data
fields should be deleted. This dictionary must contain
the base GitHub fields.
Returns the passed enhancement dictionary with all of base GitHub fields
deleted from it.
"""
del enhancement['git_id']
del enhancement['git_url']
del enhancement['git_status']
del enhancement['git_datetime']
return enhancement | 16745d6cb1d001278feb7b72609d843b73319166 | 44,099 |
def comp_surface_magnet(self):
"""Compute the surface of the Hole
Parameters
----------
self : HoleMLSRPM
A HoleMLSRPM object
Returns
-------
S: float
Surface of the Magnet. [m**2]
"""
point_dict = self._comp_point_coordinate()
Z3 = point_dict["Z3"]
Z4 = point_dict["Z4"]
Z7 = point_dict["Z7"]
Z8 = point_dict["Z8"]
# symmetry
x3 = Z3.real
y3 = Z3.imag
x4 = Z4.real
y4 = Z4.imag
x7 = Z7.real
y7 = Z7.imag
x8 = Z8.real
y8 = Z8.imag
S_magnet_1 = x3 * y4 + x4 * y7 + x7 * y8 + x8 * y3
S_magnet_2 = x3 * y8 + x4 * y3 + x7 * y4 + x8 * y7
S_magnet = 0.5 * abs(S_magnet_1 - S_magnet_2)
return S_magnet | 963d104794fcee3bca3272e62f8c1ba9be4c0c57 | 44,100 |
def to_nested_dict(d, delim='.', copy=True):
"""TLDR;
flat: {"a.b.c":0}
# pop 'a.b.c' and value 0 and break key into parts
parts: ['a','b','c']:
# process 'a'
flat <- {'a':dict()}
# process 'b'
flat <- {'a': {'b': dict()}}
# process 'c' @ tmp[parts[-1]] = val
flat <- {'a': {'b': {'c': 0}}}
"""
flat = dict(d) if copy else d
# we copy the keys since we are modifying the dict in place
keys = list(d)
for key in keys:
# Basic idea: for all keys that contain the delim
if delim in key:
val = flat.pop(key)
# get the parts (a.b.c -> [a, b, c])
parts = key.split(delim)
# we start with the outer dict, but as we process parts of the key
level = flat # we assign level to the newly created deeper dicts
for part in parts[:-1]:
if part not in level: # if the part isn't a key at this depth
level[part] = dict() # create a new dict to fill
level = level[part] # go deeper into the dict
level[parts[-1]] = val # when we get to the "leaf" set it as val
return flat | 12fd3be0869567b7fc3fce7ca9f3d2933b08eacc | 44,102 |
import random
import string
def get_random_string(length=10):
"""Get a string with random content"""
return ''.join(random.choice(string.ascii_uppercase) for i in
range(length)) | 5befd55c6f1cfa3bea941acb5f25048e56fd79ed | 44,103 |
def _convert_line_to_tab_from_orifile(line):
"""
:param line:
:return:
>>> _convert_line_to_tab_from_orifile('''IMG_1468832894.185000000.jpg -75.622522 -40.654833 -172.350586 \
657739.197431 6860690.284637 53.534337''')
['IMG_1468832894.185000000.jpg', '-75.622522', '-40.654833', '-172.350586', '657739.197431', '6860690.284637', '53.534337']
"""
return line.split() | 2e5343da7673c9897d97ae29003a8f4fa29c78a4 | 44,104 |
import torch
def get_ja(arr):
"""
calculates jaw aperture (euclidean distance between UL / J (LI)):
pertinent indexes:
Jx : 6, ULx: 10, Jy : 7, ULy : 11
"""
return torch.sqrt((arr[:,6]-arr[:,10])**2+(arr[:,7]-arr[:,11])**2) | 14b23e9e2eaead1a8a2ff4eb305107a909fbb419 | 44,105 |
def is_ranged_value(value, min_value=None, max_value=None,
min_inclusive: bool=True, max_inclusive: bool=True) -> bool:
"""
Parameters
----------
value : float
float : the value as a float
min_value / max_value : float / None
float : the constraint is active
None : the constraint is inactive
min_inclusive / max_inclusive; bool; default=True
flips [min_value, max_value] to:
- (min_value, max_value)
- [min_value, max_value)
- (min_value, max_value]
Returns
-------
is_ranged : bool
is the value in range
"""
is_ranged = True
if min_value is not None:
#print("min:")
if min_inclusive:
# ( is the goal
if value < min_value:
is_ranged = False
#print(' min_exclusive, %s %s' % (value, min_value))
#else:
#print(' passed minA=%s' % value)
else:
# [ is the goal
if value <= min_value:
is_ranged = False
#print(' min_inclusive, %s %s' % (value, min_value))
#else:
#print(' passed minB=%s' % value)
#else:
#print('no limit on min')
if max_value is not None:
#print("max:")
if max_inclusive:
# ] is the goal
if value > max_value:
#print(' max_exclusive, %s %s' % (value, max_value))
is_ranged = False
#else:
#print(' passed maxA=%s' % value)
else:
# ) is the goal
if value >= max_value:
is_ranged = False
#print(' max_inclusive, %s %s' % (value, max_value))
#else:
#print(' passed maxB=%s' % value)
#else:
#print('no limit on max')
return is_ranged | 217400c434b04591ec4155d6a3408ad42f497104 | 44,107 |
def get_dynamic_edgelist(data):
""" Make an edge list for all of the sequential visits of one site to the next
in a day per user. Each edge is directed. There is a dummy start node to
indicate the transition from being home to the first site visited that day """
data['total_people'] = 1
edges = data.groupby(["user_id", "date_time", "date",
"cell_id"]).sum()["total_people"].to_frame()
edges.reset_index(inplace=True)
# start is the name of the dummy node for edges from home to the first location visited
edges["from"] = 'dummy_start_node'
edges["to"] = edges["cell_id"]
make_link = (edges["user_id"].shift(1) == edges["user_id"]) & \
(edges["date"].shift(1) == edges["date"])
edges["from"][make_link] = edges["cell_id"].shift(1)[make_link]
dynamic_edgelist = edges[["from", "to", "total_people", "date_time"]]
dynamic_edgelist = dynamic_edgelist[dynamic_edgelist['from'] != dynamic_edgelist['to'] ]
return dynamic_edgelist | 36502d125ba10cb25ea15873c4925ff17d0afa8a | 44,108 |
def linear_segment(x0, x1, y0, y1, t):
"""Return the linear function interpolating the given points."""
return y0 + (t - x0) / (x1 - x0) * (y1 - y0) | de8bb06a7b294e0f0eb62f471da553a58e65fc49 | 44,109 |
from datetime import datetime
def now() -> str:
"""Return string timestamp for current time, to the second."""
return datetime.utcnow().isoformat(timespec='seconds') | 7a4b10b224398ad532137807b320ad404a7873ff | 44,110 |
import os
import subprocess
def _command_exists(command_name: str) -> bool:
"""Checks if a command exists in the environment."""
check_cli = os.name == 'nt' and 'where' or 'which'
result = subprocess.run(
[check_cli, command_name],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
return result.returncode != 0 | 8dec4ba2b99844cac191a71f3b5bba2fc1fd97a1 | 44,111 |
from typing import Any
from typing import Union
from typing import List
from typing import Sequence
def _list_convert(x: Any) -> Union[Any, List[Any]]:
"""Converts argument to list if not already a sequence."""
return [x] if not isinstance(x, Sequence) else x | 79a913305a931378e2cb2b8f46a74f1381850ac4 | 44,112 |
def p_value_stars(p_value):
"""
Returns the appropriate number of stars for the p_value
:param p_value: p_value
:type p_value: int
:param output_path: path to output directory
:type output_path: str
"""
if p_value < 0.0001:
return "****"
elif (p_value < 0.001):
return "***"
elif (p_value < 0.01):
return "**"
elif (p_value < 0.05):
return "*"
else:
return "-" | 06ca876bf28933749186f06b760a1518a24d483f | 44,113 |
def title(value):
""" Title cases a string, replacing hyphens with spaces """
return value.replace('-', ' ').title() | ce5276225e46adc7fbe6b3dda62c80dd5580cbfd | 44,115 |
from typing import Iterable
from typing import List
def make_error_col_names(qcols: Iterable) -> List:
"""helper func to make error column names of the form
<col_name>_low ... <col_name>_high
Args:
qcols (iterable): an iterable of column names used for matching
Returns:
list: list of error col names in non-interleaved order
"""
error_cols = [f"{dcol}_low" for dcol in qcols]
error_cols = error_cols + [f"{dcol}_high" for dcol in qcols]
return error_cols | 9bee77236b4d381d69b4359bdf0a319b06ac8285 | 44,117 |
import os
def os_parityPath(path):
"""
Converts unix paths to correct windows equivalents.
Unix native paths remain unchanged (no effect)
"""
path = os.path.normpath(os.path.expanduser(path))
if path.startswith('\\'):
return 'C:' + path
return path | 4bb2aaebb2cb6806101360337599c01c85fc1d41 | 44,118 |
import copy
def add_node_to_path(node, path):
"""
Adds the name of the node to the copy of the list of strings inside the
safely copied version of the path.
Leave the other two items in path unchanged (total distance traveled and
total number of buildings).
Parameters:
path: list composed of [[list of strings], int, int]
Represents the current path of nodes being traversed. Contains
a list of node names, total distance traveled, and total
number of buildings.
node: Node
Representing a building being added to the path
Returns:
A safely copied version of path with the node name added to the end of
the first element.
"""
# need a deepcopy so that the list inside of the the list 'path' is also copied
new_path = copy.deepcopy(path)
new_path[0].append(node.get_name())
return new_path | 9c7868a6a2c4df1161a1ed46f7f92bb887171269 | 44,119 |
def create_label_cleaner(metadata_raw):
"""Use metadata to Create dict that maps default column name
to human-readable column name."""
occupied_labels = {}
for label in metadata_raw.split("\n")[278:547]:
variable = label.replace("label variable ", "").split(" ")[0]
name = label.split('"')[1::2][0]
occupied_labels[variable] = name
return occupied_labels | 16e40dba50ce90915637c7269acb5427ec7cb727 | 44,120 |
def get_div(value, start):
"""Returns the maximum divider for `value` starting from `start` value"""
div = 1
for d in range(start, 0, -1):
if (value % d) == 0:
div = d
break
return div | f483a3fcdc31eba37b17ac7f106bcea28ea84511 | 44,121 |
def pack_str(var):
"""Convert a string to a list of bytes."""
return str(var).encode("utf-8") | ba620b2a82f99e9accf198a211c7b71406f391fe | 44,122 |
import os
def running_on_ci(): # no cov
# type: () -> bool
"""If the system is running on a CI platform."""
if os.environ.get('CI') or os.environ.get('TRAVIS') or os.environ.get('GITHUB_ACTIONS'):
return True
return False | 42ed338e1dd129bee011a7f21fadae96f828be35 | 44,123 |
def userInputToNumber():
"""returns a float, the product of the two user inputs"""
an_int = int(input("please input an integer: "))
a_float = float(input("please input a float: "))
return an_int * a_float | 5cb913bb2fcb5220e9fdb547bb6822692a0de885 | 44,126 |
def parse_rc_ns(rrset):
"""解析出域名的NS集合"""
ns = []
domain = ""
r = str(rrset.to_text())
for i in r.split('\n'):
i = i.split(' ')
rc_type, rc_ttl = i[3], i[1]
if rc_type == 'NS':
ns.append((i[4][:-1]).lower())
domain = i[0][:-1]
ns.sort()
return domain, ns | 47c66bb2f65f24e84bcbaebcda070d326b444747 | 44,127 |
import operator
def hamming_distance(s1, s2, equality_function=operator.eq):
"""
Returns the hamming distance between two strings.
"""
if not len(s1) == len(s2):
raise ValueError("String lengths are not equal")
# Number of non-matching characters:
return sum(not equality_function(c1, c2) for c1, c2 in zip(s1, s2)) | f5fd74d3eb6c33f0a51dc5b61158d4f6e51e6b9e | 44,128 |
import os
import json
def get_format_dict(cogs_dir):
"""Get a dict of numerical format ID -> the format dict."""
if (
os.path.exists(f"{cogs_dir}/formats.json")
and not os.stat(f"{cogs_dir}/formats.json").st_size == 0
):
with open(f"{cogs_dir}/formats.json", "r") as f:
fmt_dict = json.loads(f.read())
return {int(k): v for k, v in fmt_dict.items()}
return {} | d687da9f2f179bb2772b0811c80dc0744fbc66d6 | 44,129 |
import six
def iterable(obj, strok=False):
"""
Checks if the input implements the iterator interface. An exception is made
for strings, which return False unless `strok` is True
Args:
obj (object): a scalar or iterable input
strok (bool): if True allow strings to be interpreted as iterable
Returns:
bool: True if the input is iterable
Example:
>>> obj_list = [3, [3], '3', (3,), [3, 4, 5], {}]
>>> result = [iterable(obj) for obj in obj_list]
>>> assert result == [False, True, False, True, True, True]
>>> result = [iterable(obj, strok=True) for obj in obj_list]
>>> assert result == [False, True, True, True, True, True]
"""
try:
iter(obj)
except Exception:
return False
else:
return strok or not isinstance(obj, six.string_types) | 7a9e4a835eb1eb5034fba4a7083e38994a2535b3 | 44,130 |
import re
def remove_repeated_strings(text):
"""Removes strings that are identical and consecutive (repeated
more than 3 times).
Args:
text (string): markdown text that is going to be processed.
Returns:
string: text once it is processed.
"""
# Repeated strings
repeated_strings_regex = re.compile(r'([^#IVX0\n]{1,4}?)(\1){3,}', re.UNICODE)
processed_text = repeated_strings_regex.sub(r'\1', text)
return processed_text | be64c413aefdc5ca104d9e7e27466f09094c9d47 | 44,132 |
def annotation2rgb(i,palette,arr):
"""Go from annotation of patch to color.
Parameters
----------
i:int
Annotation index.
palette:palette
Index to color mapping.
arr:array
Image array.
Returns
-------
array
Resulting image.
"""
col = palette[i]
for i in range(3):
arr[...,i] = int(col[i]*255)
return arr | 7fa9de356f27fadf72a6fe71a3ad63c70d9f0a48 | 44,133 |
def fig_path(path, *specs, suffix='.png'):
"""Get output path for figure."""
stem = '-'.join((path.stem,) + specs)
return (path.parent / stem).with_suffix(suffix) | 64c7d2205f08b098d8eaeefa494fe5282cab01e6 | 44,135 |
def copy_candidates(from_alias, to_alias, alias2qids, max_candidates=30, qids_to_add=None):
"""This will copy the candidates from from_alias to to_alias. We assume to_alias does not exist and from_alias does exists.
qids_to_add will be added to the beginning of the candidate list to ensure they are among the top 30"""
if qids_to_add is None:
qids_to_add = []
assert from_alias in alias2qids, f"The from_alias {from_alias} must be in the alias2qids mapping. Use add_new_alias command from a new alias"
assert to_alias not in alias2qids, f"The to_alias {to_alias} must not be in alias2qids."
candidates = alias2qids[from_alias]
# Add the qids to add to candidates. As the user wants these qids, give them the highest score
if len(qids_to_add) > 0:
top_score = candidates[0][1]
new_candidates = [[q, top_score] for q in qids_to_add]
candidates = new_candidates + candidates
if len(candidates) > max_candidates:
print(f"Filtering candidates down to top {max_candidates}")
candidates = candidates[:max_candidates]
alias2qids[to_alias] = candidates
return alias2qids | 23835e2e99d1c663d0be1504575abe588741355e | 44,136 |
import random
def _rnd():
"""
"""
return random.randint(0, (2**32)) | 4041329e9739844e9da04a06341edb9d7dd2e4e5 | 44,138 |
from typing import Callable
from typing import Optional
from typing import Tuple
def then_parser(
first_parser: Callable[[str], Optional[Tuple[str, str]]],
second_parser: Callable[[str], Optional[Tuple[str, str]]],
text: str,
) -> Optional[Tuple[str, str]]:
"""
Uses one parser on the text,
then uses the next parser on the remaining
text from the first parse.
"""
first_result = first_parser(text)
if first_result is None:
return None
else:
parsed, rest = first_result
second_result = second_parser(rest)
if second_result is None:
return None
else:
parsed_2, rest_2 = second_result
return parsed + parsed_2, rest_2 | 7604deaed51af9177661defe5e62a13766a77065 | 44,139 |
import pickle
def load_investment_results(iteration):
"""Load investment results"""
with open(f'output/investment_plan/investment-results_{iteration}.pickle', 'rb') as f:
results = pickle.load(f)
return results | 93700e58b9db2c36ff5da9203ba1eeae9c7e4d84 | 44,141 |
def _translate_backupjobrun_summary_view(context, backupjobrun):
"""Maps keys for backupjobruns summary view."""
d = {}
d['id'] = backupjobrun['id']
d['created_at'] = backupjobrun['created_at']
d['status'] = backupjobrun['status']
return d | 3cf0394aff3191ab2db69f29f4052411f90c572b | 44,142 |
def inbreeding_as_dispersion(inbreeding, unique_haplotypes):
"""Calculate dispersion parameter of a Dirichlet-multinomial
distribution assuming equal population frequency of each haplotype.
Parameters
----------
inbreeding : float
Expected inbreeding coefficient of the sample.
unique_haplotypes : int
Number of possible haplotype alleles at this locus.
Returns
-------
dispersion : float
Dispersion parameter for all haplotypes.
"""
return (1 / unique_haplotypes) * ((1 - inbreeding) / inbreeding) | 3d57f8a31a82ad0effa8c12b7b69f36729a02c64 | 44,143 |
def add_startxref(article, pos_index):
""" """
o_num, o_gen, o_ver = article['o_num'], article['o_gen'], article['o_ver']
xref = article['content']
ret = ''
ret += f'<div id="obj{o_num}.{o_gen}.{o_ver}">\n<pre>\n'
ret += f'startxref\n'
if xref == 0:
ret += f'0\n'
else:
ret += f'<a class="obj-link" href="#obj{pos_index[xref]}">{xref}</a>\n'
ret += f'\n</pre>\n</div>\n'
return ret | 5dc8cbf91dfd4095fae65b5d33566de8cb736779 | 44,144 |
def josephus(n, k):
""" Function that calculates the last person location
of the josephus problem
Args:
n(int): number of people in circle
k(int): step rate
Returns:
int: index value of the winner
"""
# special case, k = 1
if k == 1:
return n - 1
# base case, if only one person left, they win
elif n <= 1:
return 0
if k <= n:
num_dead = n / k
else:
num_dead = 1
# first and last people to die in current round
# the mod n is applied in case k > n
first_index = (k - 1) % n
last_index = first_index + k * (num_dead - 1)
# which person the next round starts counting from
next_round_start = last_index + 1
# recursion to find out who winner is in next round
winner_next_round = josephus(n - num_dead, k)
# translate that over to the current round's numbering
# the people in [next_round_start, n) are all alive
if next_round_start + winner_next_round < n:
return next_round_start + winner_next_round
# look at [0, next_round_start). Some may be dead, adjust.
else:
winner_next_round -= (n - next_round_start)
# every k-th person is dead
block_num = winner_next_round / (k - 1)
index_in_block = winner_next_round % (k - 1)
return block_num * k + index_in_block | 1b4923b5168738e22d1cfe4820251f4a1a05e063 | 44,145 |
import six
import re
from datetime import datetime
def convert_to_python(data):
"""Convert raw data (e.g. from json conversion) into the appropriate
Python objects"""
if isinstance(data, six.string_types):
# Dates
if re.match(r'^\d{4}-\d{2}-\d{2}$', data):
return datetime.strptime(data, '%Y-%m-%d')
if isinstance(data, dict):
new_data = {}
for key in data:
new_data[key] = convert_to_python(data[key])
return new_data
if isinstance(data, tuple):
return tuple(map(convert_to_python, data))
if isinstance(data, list):
return list(map(convert_to_python, data))
return data | e5a7f3e06fbcc056384d77b4731cfa0704a1d641 | 44,146 |
def _pitch2m(res):
"""Convert pitch string to meters.
Something like -600- is assumed to mean "six-hundreths of an inch".
>>> _pitch2m("-600-")
4.233333333333333e-05
>>> _pitch2m("-1200-")
2.1166666666666665e-05
"""
res = int(res[1:-1])
return 0.0254 / res | a7fa2acaf8bbbf647bda3a35acfd5aad370df7f3 | 44,147 |
async def qr():
"""
words:二维码内容,链接或者句子
version:二维码大小,范围为[1, 40]
level:二维码纠错级别,范围为{L, M, Q, H},H为最高级,默认。
picture:自定义二维码背景图,支持格式为.jpg,.png,.bmp,.gif,默认为黑白色
colorized:二维码背景颜色,默认为False,即黑白色
contrast:对比度,值越高对比度越高,默认为1.0
brightness:亮度,值越高亮度越高,默认为1.0,值常和对比度相同
save_name:二维码名称,默认为qrcode.png
save_dir:二维码路径,默认为程序工作路径
"""
return {} | 1c05750eadc2a2039e08a5df51436f61c390f75b | 44,148 |
def tmp_bdb_root(mocker, tmp_path):
"""Set a temporary root directory for the BerkeleyDB minter hierarchy.
By default, a BDB path resolved by the minter will reference a location in EZID's
minter hierarchy, as configured in the EZID settings. Currently, `ezid/db/minters`.
This fixture causes BDB paths to resolve to an empty tree under /tmp. Any minters
created by the test are deleted when the test exits.
Returns a pathlib2.Path referencing the root of the tree. The slash operator can be
used for creating paths below the root. E.g., `tmp_bdb_root / 'b2345' / 'x1'`.
"""
for dot_path in ('nog.bdb._get_bdb_root','impl.nog.bdb._get_bdb_root',):
mocker.patch(
dot_path, return_value=(tmp_path / 'minters').resolve(),
)
return tmp_path | 36803584568992b2c68fb32eb810bb6ea2ede91d | 44,149 |
def parse_csv(columns, line):
"""
Parse a CSV line that has ',' as a separator.
Columns is a list of the column names, must match the number of
comma-separated values in the input line.
"""
data = {}
split = line.split(',')
for idx, name in enumerate(columns):
data[name] = split[idx]
return data | ff42251c5be595cc749ccc91d419e2ef105b9b49 | 44,150 |
import os
def git_pip_link_parse(repo):
"""Return a tuple containing the parts of a git repository.
Example parsing a standard git repo:
>>> git_pip_link_parse('git+https://github.com/username/repo@tag')
('repo',
'tag',
None,
'https://github.com/username/repo',
'git+https://github.com/username/repo@tag')
Example parsing a git repo that uses an installable from a subdirectory:
>>> git_pip_link_parse(
... 'git+https://github.com/username/repo@tag#egg=plugin.name'
... '&subdirectory=remote_path/plugin.name'
... )
('repo',
'tag',
'remote_path/plugin.name',
'https://github.com/username/repo',
'git+https://github.com/username/repo@tag#egg=plugin.name&'
'subdirectory=remote_path/plugin.name')
:param repo: git repo string to parse.
:type repo: ``str``
:returns: ``tuple``
"""
_git_url = repo.split('+')
if len(_git_url) >= 2:
_git_url = _git_url[1]
else:
_git_url = _git_url[0]
git_branch_sha = _git_url.split('@')
if len(git_branch_sha) > 1:
url, branch = git_branch_sha
else:
url = git_branch_sha[0]
branch = 'master'
name = os.path.basename(url.rstrip('/'))
_branch = branch.split('#')
branch = _branch[0]
plugin_path = None
# Determine if the package is a plugin type
if len(_branch) > 1:
if 'subdirectory' in _branch[-1]:
plugin_path = _branch[1].split('subdirectory=')[1].split('&')[0]
return name.lower(), branch, plugin_path, url, repo | 41212113c677df3163c3c0a18064da2b73c84fd2 | 44,151 |
import torch
def remove_zeros_from_traj(traj):
"""
Remove the start of the trajectory, if it only contains zeroes, while always keeping the last instant. This is
especially useful for when the trajectory consists of relative displacements (or velocities), because it removes
'redundant' instants where the agents are not moving
:param traj: Tensor of shape (traj_len, batch, 2). The observed trajectory (may be absolute coordinates, or
velocities)
:return: trajectory with starting zeroes removed
"""
t_start = 0
for t in range(0, traj.shape[0] - 1):
num_non_zeroes = torch.nonzero(traj[t, :, :])
if num_non_zeroes.shape[t] == 0:
t_start = t + 1
else:
break
return traj[t_start:, :, :] | dd913becb0880fc29feda886d096f28cc6322cec | 44,152 |
def armstrong(some_int: int) -> bool:
"""
Accepts an int
Returns whether or not int is an armstrong number
:param some_int:
:return:
"""
string_rep = str(some_int)
sum_val = 0
for digit in string_rep:
sum_val += int(digit) ** 3
return some_int == sum_val | dd0c2b3533e77c29330d750826b328c2b33b2460 | 44,153 |
def _get_slash_or_null_from_proto(proto):
"""Potentially returns empty (if the file is in the root directory)"""
return proto.path.rpartition("/")[1] | d7a9cac8aa6cb6a3a109f5579b88d5c4f77644a8 | 44,155 |
def prepare_auth_params_json(bot, manifest):
"""Returns a dict to put into JSON file passed to task_runner.
This JSON file contains various tokens and configuration parameters that allow
task_runner to make HTTP calls authenticated by bot's own credentials.
The file is managed by bot_main.py (main Swarming bot process) and consumed by
task_runner.py.
It lives it the task work directory.
Args:
bot: instance of bot.Bot.
manifest: dict with the task manifest, as generated by the backend in /poll.
"""
# This is "<kind>:<id>", e.g. "user:abc@example.com" or "bot:abc.example.com".
# Service accounts appear as "user:<account-email>" strings.
bot_ident = manifest.get('bot_authenticated_as', '')
bot_service_account = 'none'
if bot_ident.startswith('user:'):
bot_service_account = bot_ident[len('user:'):]
def account(acc_id):
acc = (manifest.get('service_accounts') or {}).get(acc_id) or {}
return acc.get('service_account') or 'none'
return {
'bot_id': bot.id,
'task_id': manifest['task_id'],
'swarming_http_headers': bot.remote.get_authentication_headers(),
'swarming_http_headers_exp': bot.remote.authentication_headers_expiration,
'bot_service_account': bot_service_account,
'system_service_account': account('system'),
'task_service_account': account('task'),
} | 775540265b9739de40ae4497033311e4fbbcb273 | 44,156 |
import ast
def get_input(arg, valid_keys):
"""Convert the input to a dict and perform basic validation"""
json_string = arg.replace("\\n", "\n")
try:
input_dict = ast.literal_eval(json_string)
if not all(k in input_dict for k in valid_keys):
return None
except Exception:
return None
return input_dict | 55f702725186cb74767546c8a3c4b068f0c03f2b | 44,157 |
import os
def partially_configured(build_directory: str) -> bool:
"""
Determine if project is not configured enough to begin the build process
@param build_directory: The full path to the build directory
@return: Whether or not this needs to be configured from scratch
"""
cmake_cache = os.path.join(build_directory, "CMakeCache.txt")
return not os.path.exists(cmake_cache) | 9a77c88d7b531da181523fcde7eacb84a79fa6f4 | 44,161 |
def convert_length(length_from, length_to):
"""
Determines the length scale factor
We crate a gravity_scale_length for any non-standard unit (ft, m)
"""
xyz_scale = 1.0
gravity_scale_length = 1.0
#if length_from != length_to:
if length_from == 'in':
xyz_scale *= 0.0254
gravity_scale_length /= 12.
elif length_from == 'ft':
xyz_scale *= 0.3048
# ft/s^2 are the base units for english
elif length_from == 'm':
#xyz_scale = 1.0
#gravity_scale_length = 1.0
# m/s^2 are the base units for SI
pass
elif length_from == 'cm':
xyz_scale *= 100.
gravity_scale_length /= 100.
elif length_from == 'mm':
xyz_scale /= 1000.
gravity_scale_length /= 1000.
else:
raise NotImplementedError('length from unit=%r; expected=[in, ft, m, cm, mm]' % length_from)
if length_to == 'in':
xyz_scale /= 0.0254
gravity_scale_length *= 12.
elif length_to == 'ft':
xyz_scale /= 0.3048
elif length_to == 'm':
#xyz_scale /= 1.0
pass
elif length_to == 'cm':
xyz_scale /= 100.
gravity_scale_length *= 100.
elif length_to == 'mm':
xyz_scale *= 1000.
gravity_scale_length *= 1000.
else:
raise NotImplementedError('length to unit=%r; expected=[in, ft, m, cm, mm]' % length_to)
return xyz_scale, gravity_scale_length | 33a6311c32932eb6fe55345c82c57e9f5730017f | 44,162 |
def get_pip_link(page_name):
"""
获取pip官方的包源地址 当下载不到的时候可以查看下
:param page_name: 包名称
:return:
"""
if page_name.find('==') > -1:
page_name = page_name[:page_name.rfind('==')]
return 'https://pypi.org/project/{0}/#files'.format(page_name) | 22a0fb391f04ef1adab7bfa0fae98e88d582d8a4 | 44,163 |
import requests
import json
import logging
def authenticate_with_ome(ip_address: str, user_name: str, password: str) -> tuple:
"""
Authenticates a session against an OME server
Args:
ip_address: IP address of the OME server
user_name: Username for OME
password: Password for the OME user
Returns: Returns a tuple of auth_success (bool), headers (dict). Which are true if authentication succeeded
and {'content-type': 'application/json' respectively.
"""
""" X-auth session creation """
auth_success = False
session_url = "https://%s/api/SessionService/Sessions" % ip_address
user_details = {'UserName': user_name,
'Password': password,
'SessionType': 'API'}
headers = {'content-type': 'application/json'}
session_info = requests.post(session_url, verify=False,
data=json.dumps(user_details),
headers=headers)
if session_info.status_code == 201:
headers['X-Auth-Token'] = session_info.headers['X-Auth-Token']
auth_success = True
else:
error_msg = "Failed create of session with {0} - Status code = {1} - Error: " + str(json.loads(session_info.content))
logging.error(error_msg.format(ip_address, session_info.status_code))
exit(0)
return auth_success, headers | a952e445acde71ee0bfde1aedc70a976fc2c49cf | 44,164 |
def searchForInsert(sortedList:list, value:float)->int:
"""Search for where to insert the value for the list to remain sorted
Args:
sortedList (list): a sorted list
value (float): the value to insert into the sorted list
Returns:
int: the index where to insert the value
"""
for i in range(len(sortedList)-1,-1,-1):
if(sortedList[i] <= value):
return i+1
else:
return 0 | 3974c8e7b58feb9d47aadb68f88a931ba3bd8048 | 44,165 |
def get_flash(tree):
"""Get the memory and control base addresses of the SPI flash to program"""
metal_entry = tree.chosen("metal,entry")
if metal_entry:
node = tree.get_by_reference(metal_entry[0])
if node:
compatible = node.get_field("compatible")
if compatible and "sifive,spi" in compatible:
reg = node.get_reg()
mem_base = reg.get_by_name("mem")[0]
control_base = reg.get_by_name("control")[0]
return {"mem_base": mem_base, "control_base": control_base}
return None
return None | 88d3a96135cc1df82b1a63c2e243b873eb6c8033 | 44,166 |
def date_formatter(raw_string):
"""Return a well formatted date as string."""
date = raw_string[:8]
date = f'{date[:4]}-{date[4:6]}-{date[6:8]}'
time = raw_string[9:]
if len(raw_string) > 15:
time = f'{time[0:2]}:{time[2:4]}:{time[4:6]}.{time[6:]}'
else:
time = f'{time[0:2]}:{time[2:4]}:{time[4:6]}'
return ' '.join((date, time)) | c4501690e378ab0c7d1a128c1df713a8c79c979a | 44,167 |
def deep_to(batch, device, dtype):
""" Static method to call :func:`to` on tensors or tuples. All items in tuple will have :func:`deep_to` called
Args:
batch (tuple / list / :class:`torch.Tensor`): The mini-batch which requires a :func:`to` call
device (:class:`torch.device`): The desired device of the batch
dtype (:class:`torch.dtype`): The desired datatype of the batch
Returns:
tuple / list / :class:`torch.Tensor`: The moved or casted batch
"""
is_tuple = isinstance(batch, tuple)
if isinstance(batch, list) or isinstance(batch, tuple):
batch = list(batch)
for i in range(len(batch)):
batch[i] = deep_to(batch[i], device, dtype)
batch = tuple(batch) if is_tuple else batch
elif isinstance(batch, dict):
for key in batch:
batch[key] = deep_to(batch[key], device, dtype)
else:
if batch.dtype.is_floating_point:
batch = batch.to(device, dtype)
else:
batch = batch.to(device)
return batch | e754e687c4f998c24fd6bf81259fa2aaf44bb419 | 44,169 |
def prune_with_bound(decision_tree, bound):
"""
Prunes the tree as described by Algorithm 3 in Appendix E of the paper.
Only to the outside 'while' loop of the algorithm is implemented in here. The 'for' loop is implemented by the 'prune_tree' method of the decision tree classifier object (which is called here).
"""
leaf = decision_tree.tree
while not leaf.is_leaf():
leaf = leaf.left_subtree
best_bound = bound(leaf)
bounds_value = decision_tree.compute_pruning_coefficients(bound)
while bounds_value and bounds_value[0] <= best_bound:
best_bound = bounds_value[0]
decision_tree.prune_tree(best_bound)
bounds_value = decision_tree.compute_pruning_coefficients(bound)
return best_bound | 54f999a60a06d1210b5c7162f7a4596e4ee49d48 | 44,171 |
def parse_listItems(lis):
""" Get data from lists """
results = []
for li in lis:
for item in li:
results.append(item.text[:item.text.index(',')])
return results | cb1b220a18ab2495800f36c3e07f6a3e3bd3845a | 44,172 |
def sum_two_smallest_numbers(numbers):
"""Find two lowest positive integers and add them."""
return sorted(numbers)[0] + sorted(numbers)[1] | 9e90f521dd56e8e5dce2a26ede7a66256f46f958 | 44,173 |
import re
def parse_log(line):
"""Parse logs with regex
Function that parse raw logs and extracts request_time and url
Args:
line: Decoded line
Returns:
reqest_time_data: float number or None if log can't be parsed
url_data: String or None if log can't be parsed
"""
url_format = re.compile(r"""((?:(?<=PUT )|(?<=GET )|(?<=POST )|(?<=HEAD ))(.*)(?=\ http))""", re.IGNORECASE)
request_time_format = re.compile(r"""(([0-9]*[.])?[0-9]+(?!.*\d))$""", re.IGNORECASE)
url_data = re.search(url_format, line)
request_time_data = re.search(request_time_format, line)
if url_data:
url_data = url_data.group()
if request_time_data:
request_time_data = request_time_data.group()
return request_time_data, url_data | 3c2a889fe3e2fc7e922628da0c447575cbfed6d9 | 44,176 |
import random
def somebells():
"""Returns some random amount of bells"""
return random.randint(100, 500) | 9472963e1c277c19c84b46e6e2f6a6bf28e90114 | 44,178 |
def gipsy_dashboard_widget(context, widget, index=None):
"""
Template tag that renders a given dashboard module
"""
context.update({
'template': widget.template,
'widget': widget,
'index': index,
})
return context | 53ac407be1ff55d0208712c7b1b26d7bb675f6e3 | 44,179 |
def deregister_sub(topics_to_subs, sub):
"""Deregister a Subscription from a mapping topics->subscriptions.
Args:
topics_to_subs (dict): dictionnary topic -> list of subscriptions
sub (Subscription): subscription to deregister
"""
key = str(sub.topics)
if key in topics_to_subs:
return topics_to_subs[str(sub.topics)].remove(sub)
else:
return topics_to_subs | 099e7e3a0683e0e047ab55fa4ce1dd0b111d19ee | 44,182 |
def server_status_field(log, server_status, field_name):
"""
Return (0, result) for a field of BarreleServerStatusCache
"""
# pylint: disable=unused-argument
return server_status.bssc_field_result(log, field_name) | 634d19ad50311902d92390b2f1ac5a2b73ddf389 | 44,183 |
def binaryPlusOne(operand):
"""
Adds 1 to the binary string operand.
No safety,i.e.: operand is assumed to be a string, with only binary content 9i.e.: 0s or 1s),
however a string may be returened only if it has the same length as operand.
"""
#print(str(operand))
length = len(operand)
k = len(operand) - 1
newSymbol = operand
notDone = True
while (k >= 0 ) and notDone:
if operand[k] == '0':
notDone = False
if k == 0:
newSymbol = "1" + newSymbol[1:]
if k == length - 1:
newSymbol = newSymbol[0 : k ] + "1"
else:
newSymbol = newSymbol[0 : k ] + "1" + newSymbol[k + 1 : ]
else:
if k == 0:
newSymbol = "0" + newSymbol[1:]
if k == length - 1:
newSymbol = newSymbol[0 : k] + "0"
else:
newSymbol = newSymbol[0 : k] + "0" + newSymbol[k + 1:]
k = k - 1
assert(len(newSymbol) == length)
return newSymbol | e3624a67fb33ebfe7c2270e0a45575e28a1e6dfa | 44,185 |
import warnings
def normalize_df(
df,
method,
drop_degenerate_cols=True,
replace_zero_denom=False):
"""For a given dataframe with columns including numerical values,
it generates a function which can be applied to original data as well as
any future data to normalize using two possible methods.
The `"statistical"` method removes the "mean" and divides by "std".
The `"min_max"` method removes the "minimum" and divides by the
"maximum - minimum".
If desired, the function also drops the columns which have only one
possible value and can cause issues not only during normalizaton
(returning a column with all NAs) but also potentially during fitting
as an example.
Parameters
----------
df : `pandas.DataFrame`
Input dataframe which (only) includes numerical values in all columns.
method : `str`
The method to be used for normalization.
The "statistical" method removes the "mean" and divides by "std" for each column.
The "min_max" method removes the "min" and divides by the "max - min"
for each column.
drop_degenerate_cols : `bool`, default True
A boolean to determine if columns with only one possible value should be
dropped in the normalized dataframe.
replace_zero_denom : `bool`, default False
A boolean to decide if zero denominator (e.g. standard deviation for
``method="statistical"``) for normalization should be replaced by 1.0.
Returns
-------
normalize_info : `dict`
A dictionary with with the main item being a normalization function.
The items are as follows:
``"normalize_df_func"`` : callable (pd.DataFrame -> pd.DataFrame)
A function which normalizes the input dataframe (``df``)
``"normalized_df"`` : normalized dataframe version of ``df``
``"keep_cols"`` : `list` [`str`]
The list of kept columns after normalization.
``"drop_cols"`` : `list` [`str`]
The list of dropped columns after normalization.
``"subtracted_series"`` : `pandas.Series`
The series to be subtracted which has one value for each column of ``df``.
``"denominator_series"`` : `pandas.Series`
The denominator series for normalization which has one value for each
column of ``df``.
"""
if method == "statistical":
subtracted_series = df.mean()
denominator_series = df.std()
elif method == "min_max":
subtracted_series = df.min()
denominator_series = (df.max() - df.min())
else:
raise NotImplementedError(f"Method {method} is not implemented")
# Replaces 0.0 in denominator series with 1.0 to avoid dividing by zero
# when the variable has zero variance
if replace_zero_denom:
denominator_series.replace(to_replace=0.0, value=1.0, inplace=True)
drop_cols = []
keep_cols = list(df.columns)
normalized_df = (df - subtracted_series) / denominator_series
if drop_degenerate_cols:
for col in df.columns:
if normalized_df[col].isnull().any():
drop_cols.append(col)
warnings.warn(
f"{col} was dropped during normalization as it had only one "
"possible value (degenerate)")
keep_cols = [col for col in list(df.columns) if col not in drop_cols]
if len(keep_cols) == 0:
raise ValueError(
"All columns were degenerate (only one possible value per column).")
subtracted_series = subtracted_series[keep_cols]
denominator_series = denominator_series[keep_cols]
def normalize_df_func(new_df):
"""A function which applies to a potentially new data frame (``new_df``)
with the same columns as ``df`` (different values or row number is allowed)
and returns a normalized dataframe with the same normalization parameters
applied to ``df``.
This function uses the series `subtracted_series` and
``denominator_series`` generated in its outer scope for normalization,
and in this way ensures the same mapping for new data.
Parameters
----------
new_df : `pandas.DataFrame`
Input dataframe which (only) includes numerical values in all columns.
The columns of ``new_df`` must be the same as ``df`` which is passed to
the outer function (``normalize_df``) to construct this function.
Returns
-------
normalized_df : `pandas.dataframe`
Normalized dataframe version of ``new_df``.
"""
normalized_df = new_df.copy()
if drop_degenerate_cols:
normalized_df = normalized_df[keep_cols]
return (normalized_df - subtracted_series) / denominator_series
return {
"normalize_df_func": normalize_df_func,
"normalized_df": normalized_df,
"keep_cols": keep_cols,
"drop_cols": drop_cols,
"subtracted_series": subtracted_series,
"denominator_series": denominator_series
} | 277353068155226894b0cdba9fcf56b24e58a891 | 44,186 |
def is_interesting_file(file_name):
# type: (str) -> bool
"""Return true if this file should be checked."""
file_blacklist = ["buildscripts/cpplint.py"]
directory_blacklist = ["src/third_party"]
if file_name in file_blacklist or file_name.startswith(tuple(directory_blacklist)):
return False
directory_list = ["buildscripts", "pytests"]
return file_name.endswith(".py") and file_name.startswith(tuple(directory_list)) | dbddc9dd7fd493b8a18f8c6d9cf04fd8a16638bc | 44,188 |
def _check_handle(handle):
"""Checks if provided file handle is valid."""
return handle is not None and handle.fileno() >= 0 | 7bce4bc10f4a7ee9c393a4e6a79eb3cdc22f4a12 | 44,189 |
def clear_data_for_origin(origin: str, storageTypes: str) -> dict:
"""Clears storage for origin.
Parameters
----------
origin: str
Security origin.
storageTypes: str
Comma separated list of StorageType to clear.
"""
return {
"method": "Storage.clearDataForOrigin",
"params": {"origin": origin, "storageTypes": storageTypes},
} | 8c8bf90c4ac4f7dc0dc3459fe18999c127d12226 | 44,191 |
def get_one_item(CURSOR, BARCODE):
"""
Returns the specified item from the database, determined by the
primary key - in this case the barcode number.
Args:
CURSOR (object):
BARCODE (str): the barcode number
Returns:
ITEM (list): the item in the database
"""
ITEM = CURSOR.execute("""
SELECT
*
FROM
pantry
WHERE
barcode_number = ?
;""", [BARCODE]).fetchone()
return ITEM | 013c4a86ba7ca8c41965a448370d5d6ef05874e0 | 44,194 |
def linear_search(L, v):
""" (list, object) -> int
Return the index of the first occurrence of v in L, or
return -1 if v is not in L.
>>> linear_search([2, 3, 5, 3], 2)
0
>>> linear_search([2, 3, 5, 3], 5)
2
>>> linear_search([2, 3, 5, 3], 8)
-1
"""
i = 0
while i != len(L) and v != L[i]:
i = i + 1
if i == len(L):
return -1
else:
return i | 50a9ac2e720373d050deb29f31724d27a06e97d6 | 44,196 |
def closure(s):
"""
ε-closure(s),即集合 s 的ε闭包。也就是从集合 s 中的每个节点,加上从这个节点出发通过ε转换所能到达的所有状态。
:param s:
:return:
"""
# ret_list = []
# for state in s:
# ret_list.append(state)
#
# if isinstance(state.value, EBNFNode):
# if state.value.node_type != NodeType.RANGE:
# pass
# else: # 只要是 * +号,就认为是可以达到的状态
# # ret_list.append(state)
# pass
# elif state.value == "begin": # begin
# ret_list.extend(state.next_state_list)
# else:
# pass
#
# return ret_list
return s | 7253c90215e01370086b01cd5330daac9429f3d3 | 44,198 |
def matrix_identity(n: int) -> list:
"""
Generate the n-sized identity matrix.
"""
if n == 0:
return [0]
identity: list = []
for col in range(n):
column: list = []
for row in range(n):
if col == row:
column.append(1)
else:
column.append(0)
identity.append(column)
return identity | 6ac3945b90e4c8ae9bb0ca6f2b9c55379ab13cec | 44,199 |
def get_gcd(x, y):
"""Calculate the greatest common divisor of two numbers."""
if y > x:
x, y = y, x
r = x % y
if r == 0:
return y
else:
result = get_gcd(y, r)
return result | 1e17028c0595df01897504b3332a6032186cd4f5 | 44,200 |
import base64
def img_to_base64(img):
""" 把图片处理成 base64 编码的图片
:param img: 输入是图片 IO
:return: 返回的是 base64 编码的图片
"""
return base64.b64encode(img) | 97088304813082d59580f65e081a5aca17d13180 | 44,201 |
import requests
from bs4 import BeautifulSoup
def getPDFlink(extension_list):
"""
Scrapes each ICO profile page and collects the Whitepaper download links if available.
Indexes are recorded to keep track of ICO's without download links'
"""
website = "http://ico.coincheckup.com"
indexes = []
link_list = []
links = extension_list
for idx, link in enumerate(links):
profileURL = website + link
page = requests.get(profileURL)
if page.status_code == 200:
soup = BeautifulSoup(page.content, 'html.parser')
a = soup.select("a.button")
if len(a) > 3:
pdfLink = a[3]['href']
link_list.append(pdfLink)
indexes.append(idx)
return indexes, link_list | 848cfed414d045354eee105f74ee6ab47991b4bb | 44,204 |
def raise_failure_factory(message):
"""
Create a function that raises a failure message
:param str message: Message to raise in combination with standard out
:return func raise_failure:
"""
def raise_failure(process_output):
_, _, standard_error = process_output
raise RuntimeError("{}\n{}".format(message, standard_error))
return raise_failure | fefca7d9672f1b07402e56c4b41e1c880ff6ae7c | 44,206 |
import requests
def get_from_github_with_auth_raw(url, apikeys):
""" """
headers = {}
username = apikeys.get("github-user")
key = apikeys.get("github-key")
return requests.get(url, headers, auth=(username, key)) | cb9f6ea799054192100de16bd37300bb39feb91a | 44,207 |
def clean(text):
"""
clean text for creating a folder
"""
return "".join(c if c.isalnum() else "_" for c in text) | e5dbfd2733cdd4e2387b04161bfc06921fe1ba75 | 44,208 |
import math
def repeat_data(preload, num_simulation_steps):
"""Repeats the transformer preload data until there are as many values as there are
simulation steps.
Args:
preload: (list): Containing the data (floats) to be repeated.
num_simulation_steps: (int): Number of simulation steps and expected length of
the transformer preload after it is repeated.
Returns:
transformer_preload_repeated: (list): Repeated values. len() = num_simulation_steps.
"""
n = math.floor(num_simulation_steps / len(preload))
transformer_preload_repeated = preload * n
values_to_add = num_simulation_steps - len(transformer_preload_repeated)
transformer_preload_repeated += preload[:values_to_add]
return transformer_preload_repeated | 1e2c172d40d13c10802fd7edadb68fdb29153836 | 44,209 |
def get_node_and_attribute(attribute):
"""
Split a name between its node and its attribute.
Args:
attribute (str): attribute name, node.attribute.
Returns:
list: [node_name, attribute]
"""
split_attribute = attribute.split('.')
if not split_attribute:
return None, None
node = split_attribute[0]
attr = '.'.join(split_attribute[1:])
return node, attr | 8534366c4d7800681c1088a1446f828087d9ddea | 44,210 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.