content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
import random
def id_gen(bits=32):
""" Returns a n-bit randomly generated int """
return int(random.getrandbits(bits))
|
aad302c45e2ec8f1541f015aae30fe1cd341c7e5
| 83,701
|
def _latch_name(node, syntax):
"""Return latch name for `node`."""
# terminal ?
if node.var is None:
latch = syntax['TRUE']
else:
# avoid identifiers containing `-`
node_id = str(int(node)).replace('-', 'n')
latch = 'latch_{i}'.format(i=node_id)
return latch
|
534146656d0c2eface4e1fc9e7e4566a61827e44
| 83,703
|
import typing
def voiceless_variants(base_phone) -> typing.Set[str]:
"""
Generate variants of voiceless IPA phones
Parameters
----------
base_phone: str
Voiceless IPA phone
Returns
-------
set[str]
Set of base_phone plus variants
"""
return {base_phone + d for d in ["", "ʱ", "ʼ", "ʰ", "ʲ", "ʷ", "ˠ", "ˀ", "̚", "͈"]}
|
806f1d37c210a0743852e76fb746ebaf245dcb9c
| 83,704
|
def ensure_str(val):
"""Converts the argument to a string if it isn't one already"""
if isinstance(val, str):
return val
elif isinstance(val, (bytes, bytearray)):
return val.decode()
else:
raise ValueError('Expected bytes or string')
|
2b40c5c3fa2f466a084f8575916a1aa5e0e60d09
| 83,709
|
def get_typing_type(plotly_type, array_ok=False):
"""
Get Python type corresponding to a valType string from the plotly schema
Parameters
----------
plotly_type : str
a plotly datatype string
array_ok : bool
Whether lists/arrays are permitted
Returns
-------
str
Python type string
"""
if plotly_type == "data_array":
pytype = "numpy.ndarray"
elif plotly_type == "info_array":
pytype = "list"
elif plotly_type == "colorlist":
pytype = "list"
elif plotly_type in ("string", "color", "colorscale", "subplotid"):
pytype = "str"
elif plotly_type in ("enumerated", "flaglist", "any"):
pytype = "Any"
elif plotly_type in ("number", "angle"):
pytype = "int|float"
elif plotly_type == "integer":
pytype = "int"
elif plotly_type == "boolean":
pytype = "bool"
else:
raise ValueError("Unknown plotly type: %s" % plotly_type)
if array_ok:
return f"{pytype}|numpy.ndarray"
else:
return pytype
|
79fb7cbed7d87e33bd67356b1e220a9f0317e809
| 83,710
|
def greatest_prod(digits, n):
""" Find the `n` adjacent digits in `digits` with the greatest product. """
# Calculate the product of the first `n` digits.
prod = 1
zero_cnt = 0
for c in digits[:n]:
d = int(c)
if d > 1:
prod *= d
elif d == 0:
zero_cnt += 1
max_prod = prod
# Evaluate all other products.
for i in range(1, len(digits) - n + 1):
# Divide by the left-most digit/factor of the previous product.
left = int(digits[i - 1])
if left > 1:
prod /= left
elif left ==0:
zero_cnt -= 1
# Multiply by our new right-most digit.
right = int(digits[i + n - 1])
if right > 1:
prod *= right
elif right == 0:
zero_cnt += 1
# As long as there are zeros within our `n` adjacent digits,
# our product is 0 and should be ignored.
if zero_cnt == 0:
max_prod = max(max_prod, prod)
return int(max_prod)
|
23a19accb50acc0a3767e5b0a6c10a5b474d8b00
| 83,711
|
def _extract_feature_values(eopatches, feature):
""" A helper function that extracts a feature values from those EOPatches where a feature exists.
"""
feature_type, feature_name = feature
return [eopatch[feature] for eopatch in eopatches if feature_name in eopatch[feature_type]]
|
e60790267cc979632fd67ae6923efcdfac344720
| 83,712
|
def format_pydantic_error_message(msg):
"""Format pydantic's error message field."""
# Replace shorthand "str" with "string".
msg = msg.replace("str type expected", "string type expected")
return msg
|
3f49c13ae409f7cbb263cb0255062de86f3be1a2
| 83,715
|
def create_event_from_class(constructor, klass, parameter_names, name_name, event_name):
"""
Creates an event passing trough a constructor.
Parameters
----------
klass : `type`
The type to work with.
parameter_names : `tuple` of `str`
The parameters names to pass to the constructor.
name_name : `str` or `None`
The event's name's name.
event_name : `str`
The event's name. If event is nto found, then defaults to `name_name`'s found value if any.
Returns
-------
instance : `Any`
The created instance.
Raises
------
BasesException
Any occurred exception.
"""
if not isinstance(klass, type):
raise TypeError(f'Expected `type` instance, got {klass.__class__.__name__}.')
parameters_by_name = {}
for parameter_name in parameter_names:
try:
parameter = getattr(klass, parameter_name)
except AttributeError:
found = False
parameter = None
else:
found = True
parameters_by_name[parameter_name] = (parameter, found)
name = klass.__name__
if (name_name is not None) and (not parameters_by_name[name_name][1]):
parameters_by_name[name_name] = (name, True)
if not parameters_by_name[event_name][1]:
try:
parameter = getattr(klass, name)
except AttributeError:
pass
else:
parameters_by_name[event_name] = (parameter, True)
return constructor(*(parameters_by_name[parameter_name][0] for parameter_name in parameter_names))
|
be3e11cc76be7c1725967e1bbe231a6729c2e0d9
| 83,723
|
import re
def _get_repo_param(pattern, repo_params_raw, param):
"""Parse a string with all the repo params to get the value of a single repo param."""
repo_param = re.search(pattern, repo_params_raw, re.MULTILINE | re.DOTALL)
if repo_param:
return repo_param.group(1)
raise ValueError(param, repo_params_raw)
|
9075dea90862c42b0465e959f6976551fbf0344a
| 83,727
|
def _equals(value):
"""
Equality test.
"""
return lambda v: value == v
|
45741860c3caae23fc0cc76360eae5dbaec4a63e
| 83,728
|
import importlib
import logging
def load_plugin(entry_point, callback=None):
"""Load a generic plugin.
Parameters
----------
entry_point : str
A entry point specify for the plugin. The specifier should be a
dotted module name followed by a ``:`` and an identifier nameing an
object within the module.
callback : func
If provided, this function will be called on the plugin and the
result returned.
Returns
-------
object
The plugin.
"""
module_name, cls_name = entry_point.split(":")
plugin = None
try:
module = importlib.import_module(module_name)
except ImportError:
logging.info("Unable to import {module}.".format(module=module_name))
else:
try:
plugin = module.__dict__[cls_name]
except KeyError:
logging.info(
"{plugin} not contained in {module}.".format(
plugin=cls_name, module=module_name
)
)
if callback and plugin:
plugin = callback(plugin)
return plugin
|
0c27cfff4af9c9d99c59622b4ba5f93b2a9912d4
| 83,729
|
def unpack_key(key):
"""
Unpacks given key
For example the unpacked format of key "key as key1" is
{
'name': 'key',
'alias': 'key1'
}
"""
parts = key.split(' as ')
if len(parts) == 1:
parts.append(parts[0])
return parts
|
b4aa6ea289892c6aaa288101b7cc98cd0a538b35
| 83,730
|
def parse_cloudflare_trace_ip(res: str):
"""Parses the IP address line from the cloudflare trace service response.
Example response:
fl=114f30
h=1.1.1.1
ip=188.6.90.5
ts=1567700692.298
visit_scheme=https
uag=Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36
colo=VIE
http=http/2
loc=HU
tls=TLSv1.3
sni=off
warp=off
"""
for line in res.splitlines():
if line.startswith("ip="):
ip = line[len("ip=") :]
return ip
|
fd5dba51df6e2bf3965021197e95fbac8af129ea
| 83,732
|
import pkg_resources
def resource_to_data(path_to_data):
"""
This is an auxiliar function to read data from a given path, so as to wrap the load
process of the static data files from investpy.
Returns:
:obj:`pandas.DataFrame` - data:
This function returns a :obj:`pandas.DataFrame` object with all the static file's data
retrieved from investpy.
Raises:
FileNotFoundError: raised if the static data file was not found.
IOError: raised if the data file is empty or errored.
"""
resource_package = 'investpy'
resource_path = '/'.join(('resources', path_to_data))
if pkg_resources.resource_exists(resource_package, resource_path):
data = None
else:
raise FileNotFoundError("ERR#0115: data file not found or errored.")
if data is None:
raise IOError("ERR#0115: data file was empty or errored.")
return data
|
7539be5fced1f2c69f9f6c1c4b39d201e8935222
| 83,734
|
def guess_decimals(
val,
n_max=16,
base=10,
fp=16):
"""
Guess the number of decimals in a given float number.
Args:
val ():
n_max (int): Maximum number of guessed decimals.
base (int): The base used for the number representation.
fp (int): The floating point maximum precision.
A number with precision is approximated by the underlying platform.
The default value corresponds to the limit of the IEEE-754 floating
point arithmetic, i.e. 53 bits of precision: log10(2 ** 53) = 16
approximately. This value should not be changed unless the
underlying platform follows a different floating point arithmetic.
Returns:
prec (int): the guessed number of decimals.
Examples:
>>> guess_decimals(10)
0
>>> guess_decimals(1)
0
>>> guess_decimals(0.1)
1
>>> guess_decimals(0.01)
2
>>> guess_decimals(0.000001)
6
>>> guess_decimals(-0.72)
2
>>> guess_decimals(0.9567)
4
>>> guess_decimals(0.12345678)
8
>>> guess_decimals(0.9999999999999)
13
>>> guess_decimals(0.1234567890123456)
16
>>> guess_decimals(0.9999999999999999)
16
>>> guess_decimals(0.1234567890123456, 6)
6
>>> guess_decimals(0.54235, 10)
5
>>> guess_decimals(0x654321 / 0x10000, 16, 16)
4
"""
offset = 2
prec = 0
tol = 10 ** -fp
x = (val - int(val)) * base
while base - abs(x) > tol and abs(x % tol) < tol < abs(x) and prec < n_max:
x = (x - int(x)) * base
tol = 10 ** -(fp - prec - offset)
prec += 1
return prec
|
78a28f687ccd4272deae1601832dccee5e5b6c3c
| 83,737
|
def top1accuracy(pred, target):
"""Computes the precision@1"""
batch_size = target.size(0)
correct = pred.eq(target).float().sum(0)
return correct.mul_(100.0 / batch_size)
|
e0362040062c5dcf3b69a69d2612c6a54670ef9d
| 83,738
|
def retry_on_value_error(exc):
"""
Function that returns True (retries) on a
ValueError with the message 'do_retry'.
Args:
exc (Exception): The exception object to check against.
Returns:
(bool): Whether this should trigger a retry.
"""
if isinstance(exc, ValueError) and exc.args[0] == 'do_retry':
return True
else:
return False
|
d7e54e34974b7dc2328d46a16cdf05a13072be3d
| 83,739
|
def is_external(url):
"""
Test if a url is external.
"""
prefixes = ("http", "www", "mailto:", "tel:", "skype:", "ftp:")
return url.startswith(prefixes)
|
65b66fbf7c5fa72573f6bc0e7ab727890d6b87b7
| 83,740
|
def get_metacols(df):
"""return a list of metadata columns"""
return [c for c in df.columns if c.startswith("Metadata_")]
|
5621830cc0ddd7e946c7c2dfd54c02a3fcdd4f48
| 83,741
|
def get_mapping(conn, table):
"""
Obtain a mapping between app ids and their titles.
:param conn: The SQLite connection.
:param table: The table to obtain a mapping for (should be apps, widgets or downloading_apps)
:return: A tuple with two items. The first value is a dict containing a mapping between
the title and (id, uuid, flags) for each item. The second item contains the maximum
id of the items found.
"""
cursor = conn.execute(f'''
SELECT {table}.item_id, {table}.title, items.uuid, items.flags
FROM {table}
JOIN items ON items.rowid = {table}.item_id
''')
mapping = {}
max_id = 0
while True:
# Grab the current row
row = cursor.fetchone()
if row is None:
break
# Unpack the row and add it to our mapping
id, title, uuid, flags = row
mapping[title] = (id, uuid, flags)
# Obtain the maximum id in this table
max_id = max(max_id, id)
return mapping, max_id
|
3cb524e2aa11ea377ba8f926788e08643b8fc103
| 83,742
|
import bz2
import pickle
def load_index(filename="inverted_index"):
""" Load saved index. """
with bz2.BZ2File(filename + ".pbz2", "rb") as f:
index = pickle.load(f)
return index
|
293604ccfe9777a3471f82731a3830cc26ed8fa6
| 83,748
|
import random
import string
def rndstr(N):
"""
random string of N lower case ascii letters and numbers
"""
return ''.join(random.choices(string.ascii_lowercase + string.digits, k=N))
|
0a31c9b9f6268f9abe8ac62413baf7e4e01ed9ff
| 83,749
|
import requests
def get_record(
airtable_key: str,
base_id: str,
table_name: str = "submissions",
record_id: str = "",
):
"""
Get record from Airtable with a given record ID `record_id`
"""
if record_id != "":
request_url = f"https://api.airtable.com/v0/{base_id}/{table_name}/{record_id}"
headers = {
"Authorization": f"Bearer {airtable_key}",
}
output = requests.get(request_url, headers=headers)
return output
else:
return None
|
7b0312b7fe6759deb9d9492c15cace6d8d031387
| 83,750
|
from math import sqrt
def run_prime_factorization(max_number: int) -> dict:
"""Run prime factorization.
Args:
max_number: Int of number (greater than 1).
Returns:
A dictionary's items ((base, exponent) pairs).
Landau notation: O(log n)
"""
ans = dict()
remain = max_number
for base in range(2, int(sqrt(max_number)) + 1):
if remain % base == 0:
exponent_count = 0
while remain % base == 0:
exponent_count += 1
remain //= base
ans[base] = exponent_count
if remain != 1:
ans[remain] = 1
return ans
|
721a518e4b192e6dbd5e96026b1b0a5c15468731
| 83,761
|
def colors_players() -> dict:
"""
Age of Empires II player colors for minimap.
Credit for a list of Age of Empires II terrain and player colors goes to:
https://github.com/goto-bus-stop/recanalyst.
:rtype: dict
"""
return {
# BLUE
0: (0, 0, 255),
# RED
1: (255, 0, 0),
# GREEN
2: (0, 255, 0),
# YELLOW
3: (255, 255, 0),
# CYAN
4: (0, 255, 255),
# PINK
5: (255, 0, 255),
# GRAY
6: (67, 67, 67),
# ORANGE
7: (255, 130, 1),
# BLACK
8: (0, 0, 0),
# BLACK
9: (0, 0, 0),
# BLACK
10: (0, 0, 0),
# BLUE
11: (0, 0, 255),
# YELLOW
12: (255, 255, 0),
# WHITE
13: (255, 255, 255),
# RED
14: (255, 0, 0),
}
|
0dd677544888c4361bfa49df8756393df0f25fe3
| 83,769
|
def _update_col_names(x, i):
"""Internal helper function to convert the names of the initial dataset headers
Keyword Arguments:
x {string} -- name of the column (can be None)
i {integer} -- integer representing the number of the column
Returns:
string - returns simplified string version of the column.
If the column didn't have a name, it return "col_{number of the column}"
"""
if x != "":
x = x.replace(" ", "_")
x = x.replace("-", "_")
x = x.replace("?", "")
else:
x = f"col_{i}"
return x.lower()
|
63e6260c4755cdaaced8cfb4c06e1027bc69fd01
| 83,771
|
def kn(dp, gas):
"""
Calculate the Knudsen number of a particle.
The Knudsen number determines the appropriateness of the continuum assumption. If Kn >~1, then the continuum
assumption is not appropriate for the problem solution.
Parameters
----------
dp: float
particle diameter in nm
gas: gas object
Gas object used to determine the mean free path of the gas.
Returns
-------
float
Knudsen number
"""
return 2*gas.l/dp
|
3ac53349b3ff84f42bab75914d5948d50bc1d95e
| 83,776
|
def print_board_number ( board_number: tuple) -> str:
"""prints the number and an asterisk if that number has been found"""
if board_number[1]:
return f"{board_number[0]:2}*"
else:
return f"{board_number[0]:2} "
|
84f8fa67e63264aec4f1d6f19d518ae91a1cb03c
| 83,778
|
def compare_binary_images(image1, image2):
"""
Return the fraction of pixels that are the same in the two images.
"""
if not image1.size == image2.size:
return 0.0
pix1 = image1.load()
pix2 = image2.load()
num_same = 0
num_total = image1.size[0] * image1.size[1]
for ix in range(image1.size[0]):
for iy in range(image1.size[1]):
if pix1[ix, iy] == pix2[ix, iy]:
num_same += 1
return float(num_same / num_total)
|
8a9abb6d6ec5a76b796e6385ea087dabdc9592ce
| 83,784
|
def fill_padded_part(xs, ilens, fill_value):
"""Fucntion to fill padded part with selected value
:param torch.Tensor xs: tensor (B, Tmax, ...)
:param torch.Tensor ilens: list of lengths (B)
:param float fill_value: value to fill padded part
:return: xs whose padded parts are filled by fill_value
"""
assert xs.size(0) == len(ilens)
new_xs = xs.new(*xs.size()).fill_(fill_value)
for idx, l in enumerate(ilens):
new_xs[idx, :l] = xs[idx, :l]
return new_xs
|
3448f26e516715391e04cc3f019ba5867708aced
| 83,791
|
from typing import Callable
def make_unknown_function(func_name: str) -> Callable:
"""
Returns a stub function in place of a function that could not be reimported.
"""
def unknown_function(*args, **kwargs):
raise ValueError(f"Function '{func_name}' cannot be found.")
# Mark this function as an unknown function.
unknown_function.unknown_function = True # type: ignore
return unknown_function
|
e5a8deb4cd4effb1277005bbf9f424658b3297c9
| 83,792
|
def clocal(self, kcn="", kcs="", xl="", yl="", zl="", thxy="", thyz="",
thzx="", par1="", par2="", **kwargs):
"""Defines a local coordinate system relative to the active coordinate
APDL Command: CLOCAL
system.
Parameters
----------
kcn
Arbitrary reference number assigned to this coordinate system.
Must be greater than 10. A coordinate system previously defined
with this number will be redefined.
kcs
Coordinate system type:
0 or CART - Cartesian
1 or CYLIN - Cylindrical (circular or elliptical)
2 or SPHE - Spherical (or spheroidal)
3 or TORO - Toroidal
xl, yl, zl
Location (in the active coordinate system) of the origin of the new
coordinate system (R, θ, Z for cylindrical, R, θ,Φ for spherical or
toroidal).
thxy
First rotation about local Z (positive X toward Y).
thyz
Second rotation about local X (positive Y toward Z).
thzx
Third rotation about local Y (positive Z toward X).
par1
Used for elliptical, spheroidal, or toroidal systems. If KCS = 1
or 2, PAR1 is the ratio of the ellipse Y-axis radius to X-axis
radius (defaults to 1.0 (circle)). If KCS = 3, PAR1 is the major
radius of the torus.
par2
Used for spheroidal systems. If KCS = 2, PAR2 = ratio of ellipse
Z-axis radius to X-axis radius (defaults to 1.0 (circle)).
Notes
-----
Defines and activates a local coordinate system by origin location and
orientation angles relative to the active coordinate system. This
local system becomes the active coordinate system, and is automatically
aligned with the active system (i.e., x is radial if a cylindrical
system is active, etc.). Nonzero rotation angles (degrees) are
relative to this automatic rotation. See the CS, CSKP, CSWPLA, and
LOCAL commands for alternate definitions. Local coordinate systems may
be displayed with the /PSYMB command.
This command is valid in any processor.
"""
command = "CLOCAL,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s" % (str(kcn), str(kcs), str(
xl), str(yl), str(zl), str(thxy), str(thyz), str(thzx), str(par1), str(par2))
return self.run(command, **kwargs)
|
afd89aafb2a78998fddd20f2ddd0b9387719354a
| 83,794
|
def eval_if_symbolic(obj, context, **options):
"""Evaluate an object if it is a symbolic expression, or otherwise just
returns it back.
Args:
obj: Either a symbolic expression, or anything else (in which case this
is a noop).
context: Passed as an argument to `obj._eval` if `obj` is symbolic.
`**options`: Passed as arguments to `obj._eval` if `obj` is symbolic.
Returns:
anything
Examples:
>>> eval_if_symbolic(Symbol('x'), {'x': 10})
10
>>> eval_if_symbolic(7, {'x': 10})
7
"""
return obj._eval(context, **options) if hasattr(obj, '_eval') else obj
|
8b73f541add15d3cb3beec153e6179ddc92e5a2c
| 83,797
|
def find_member(message, nickname):
"""
Finds the first memeber that matches the nickname
on the guild where the message was sent.
Parameters
----------
message : discord.Message
Message that triggered the event.
nickname : str
nickname of the user that might be
on the same guild where the message
was delivared.
Returns
-------
member : discord.Member
First discord member that matches the nickname.
If no member was found that matches the nickname
None will be returned.
"""
for member in message.guild.members:
if nickname in member.display_name:
return member
|
036ec983a34522f15293ab25246f7b89fc83dade
| 83,808
|
def _disk(x, y, r, c, t=0):
"""Return a SVG disc."""
return ('<circle cx="%.5f%%" cy="%.5f%%" '
'r="%d" fill="%s" '
'transform="translate(%d, 0)"'
' />') % (x, y, r, c, t)
|
feefebba230c3c50865a80811ff43bd5e5595bd4
| 83,810
|
import secrets
import base64
def generate_key() -> str:
"""
Generate a random key for the Sec-WebSocket-Key header.
"""
key = secrets.token_bytes(16)
return base64.b64encode(key).decode()
|
ac3d373422e1666a5789c0e73268b6f54b619eab
| 83,816
|
import random
def generate_doors(n):
"""
Returns: a list of n random doors, one with a car behind it all others with Goats.
"""
Doors = ["G" for i in range(n)]
# choose a random place to put car behind
Doors[random.randint(0, n-1)] = "C"
return Doors
|
01f6c7e878516b08204f66be7488443d8cda3416
| 83,820
|
def version_string(version_info):
"""
Return the 5-tuple version_info as a version string, as follows:
"1.2.3" # if version_info[3] == 'final'
"1.2.3.alpha.42" # if version_info[3] != 'final'
"""
major, minor, micro, releaselevel, serial = version_info
version_str = '{}.{}.{}'.format(major, minor, micro)
if releaselevel != 'final':
version_str = '{}.{}.{}'.format(version_str, releaselevel, serial)
return version_str
|
4d691fdf9b8eda2722e2439b426d1eb1007e17f9
| 83,821
|
def append(df, entry):
"""
This function adds a new entry in the DataFrame
if the entry timestamp does not exists
Parameters
----------
df: pandas.DataFrame
Fataframe to record
entry: pandas.DataFrame
entry dataframe
Returns
-------
A pandas with the updated DataFrame
"""
if entry.index[0] not in df.index:
return df.append(entry, ignore_index=False)
else:
return df
|
bdfcf7f7741eec8fccc0680f82e3d516e297c1ed
| 83,823
|
def convert_to_title(name: str) -> str:
"""Makes a string title-ized.
Args:
name: Any string.
Returns:
str: Capitalized and white-spaced stripped string.
"""
return name.title().strip()
|
7b6d244272a859fd50b14f0a838760c7743f84ff
| 83,830
|
import types
def has_variable(object, name):
"""Check if the given object has a variable with the given name"""
attribute = getattr(object, name, None)
if attribute is not None:
if not callable(attribute):
return True
# if callable, it might be a callable object, a function, or method
# A variable can be an object or a function, but not a method.
return not isinstance(attribute, types.MethodType) # types.FunctionType
return False
|
30d1be657e08138cd4a2821df979190813c41dd9
| 83,837
|
def insertion_sort(m):
"""
Sorts a list using the insertion sort algorithm.
m = The unsorted list.
Examples
insertion_sort([4,7,6,3,2,5,1])
# => [1,2,3,4,5,6,7]
Complexity: O(n^2)
Returns the sorted list.
"""
for j in range(1, len(m)):
key = m[j]
i = j - 1
# shift everything greater than 'key' to it's right
while i >= 0 and m[i] > key:
m[i + 1] = m[i]
i = i - 1
m[i + 1] = key
return m
|
8603ef9c83f1ed1631287abd5a166bca6619d04a
| 83,838
|
def perp_comp(vec, along):
"""Return the perpendicular component of vec along along."""
return vec - vec.dot(along)*along
|
7f3ba3e37e1a7b7037eeba53a3ec60d48d3cc916
| 83,840
|
def unquote(s: str):
"""Git can return quoted file names, unquote them. Always return a str."""
if not (s[0] == '"' and s[-1] == '"'):
# Unquoted strings are always safe, no need to mess with them
return s
# The string will be of the form `"<escaped>"`, where <escaped> is a
# backslash-escaped representation of the name of the file.
# Examples: "with\ttwo\ttabs" , "\303\261aca-utf8", "\361aca-latin1"
# Get rid of the quotes, we never want them in the output.
s = s[1:-1]
# Un-escape the backslashes.
# latin1 is ok to use here because in Python it just maps the code points
# 0-255 to the bytes 0x-0xff, which is what we expect.
s = s.encode("latin1").decode("unicode-escape")
# Convert to utf8.
s = s.encode("latin1").decode("utf8", errors="backslashreplace")
return s
|
63343fb8e1bad282ffb2827ebe311bd4397f7d04
| 83,849
|
def lines(file_path):
"""Returns all lines from a file.
file_path -- path of the file to read
"""
try:
with open(file_path) as file:
return file.readlines()
except FileNotFoundError:
raise FileNotFoundError(str(file_path) + " could not be found. Check the file path is correct.")
|
5dd6cf3f1c8ca44668936bb0ba716848a34b9701
| 83,855
|
def options_dictionary_to_string(options_dictionary):
"""Given a dictionary, returns a string in the form:
"\x00KEY1\x00VALUE1\x00KEY2\x00VALUE2\x00...\x00"
Sorted in order of key
"""
ops = []
for (key, value) in sorted(options_dictionary.iteritems()):
ops.append(key)
ops.append(value)
ops_string = "\x00".join(ops)
ops_string += "\x00" if ops else ""
return ops_string
|
607c474f76f7dd922b6235d63544638055afdbc2
| 83,856
|
def REPLACE(text, position, length, new_text):
"""
Replaces part of a text string with a different text string. Position is counted from 1.
>>> REPLACE("abcdefghijk", 6, 5, "*")
'abcde*k'
>>> REPLACE("2009", 3, 2, "10")
'2010'
>>> REPLACE('123456', 1, 3, '@')
'@456'
>>> REPLACE('foo', 1, 0, 'bar')
'barfoo'
>>> REPLACE('foo', 0, 1, 'bar')
Traceback (most recent call last):
...
ValueError: position invalid
"""
if position < 1:
raise ValueError("position invalid")
return text[:position - 1] + new_text + text[position - 1 + length:]
|
cbd2e5aedfc8f2bbf7dc17eeb9e693f8653f0a3b
| 83,866
|
import cgi
def parse_content_type(headers):
"""
Find content-type and encoding of the response
Args:
headers: Response headers
Returns:
:py:class:`tuple` (content-type, encoding)
"""
content_type = headers.get("content-type")
if not content_type:
return None, "utf-8"
else:
type_, parameters = cgi.parse_header(content_type)
encoding = parameters.get("charset", "utf-8")
return type_, encoding
|
3b33d1686271228a94ea7946595c37766a3935c8
| 83,868
|
def part1(rules, messages):
"""Return the number of messages that match rule 0."""
return sum(rules["0"].match(message) for message in messages)
|
b2735f4fe1704058bde94222c1a36492ff04a42b
| 83,871
|
def python_version(hver):
""" Return Python version """
return '{major}.{minor}'.format(major=int(hver[:-2]), minor=int(hver[-2:]))
|
ae11bbf25d013ff6e0e683fa9eed83f4ff872391
| 83,872
|
import torch
def split_leading_dim(x, shape):
"""Reshapes the leading dim of `x` to have the given shape."""
new_shape = torch.Size(shape) + x.shape[1:]
return torch.reshape(x, new_shape)
|
27c8a6ac9696b90d018bd734482e2d856a15342f
| 83,877
|
def split32(long_int, n):
"""Split long_int into n 32-bit words big-endian"""
assert(long_int >= 0)
result = []
for x in range(n):
result += [ "0x%08xUL" % (long_int & (2**32-1)) ]
long_int >>= 32
return result
|
8496abc02b5c6d5699cddfc982011c4c1ff2a102
| 83,878
|
def scale(A):
"""
A no-op data scaling transformation
"""
return A
|
bee9f37080b1f5bfaf878f08b7765cc33b96f164
| 83,880
|
def archive_to_fits(ar_file, extension="fits", container="/pawsey/mwa/singularity/dspsr/dspsr.sif"):
"""Returns bash commands to turn an arhive file to a fits file"""
container_launch = f"singularity exec -e {container}"
pam_cmd = f"{container_launch} pam -a PSRFITS"
pam_cmd += f" -e {extension}"
pam_cmd += f" {ar_file}"
return pam_cmd
|
018607c4c412e90ceb72f44cbbe36f5a8947f120
| 83,883
|
def add_args(parser):
"""
Create parser for command line utility.
:meta private:
"""
parser.add_argument("--model", help="Trained prediction model", required=True)
parser.add_argument("--test", help="Test Data", required=True)
parser.add_argument("--embedding", help="h5 file with embedded sequences", required=True)
parser.add_argument("-o", "--outfile", help="Output file to write results")
parser.add_argument("-d", "--device", type=int, default=-1, help="Compute device to use")
return parser
|
1b9d9bb2cbe5d2d0431dc529fc8b4826852c5f11
| 83,885
|
def gradient(f,h,X):
"""
Compute gradient of f at location specified with vector X. Values in X
must be in same order as args of f so we can call it with f(*X). h is
the step size to use in forward finite difference computation. For 1D X,
the finite difference is:
f(x + h)-f(x)
-------------
h
But, generally we need to tweak each of X_i and recompute f(X) to get
the gradient.
"""
# X = list(X)# flip to a list from tuple so we can modify elements
fx = f(*X) # only need this once
dX = []
for i in range(len(X)):
# Make a vector of Value(X_i, [0 ... 1 ... 0]) with 1 in ith position
X[i] += h # tweak in dimension i
y = f(*X)
X[i] -=h # undo the tweak for next round
dx = (y - fx)/h
dX.append(dx)
return dX
|
d96110206800641279a2a7f6d0efae35414aa6a9
| 83,889
|
def point_box_relation(u, vbox):
"""
Check in which point is located related to a box
:param u: point to check (y, x)
:param vbox: box to check point with (y0, x0, y1, x1)
:return: code with the location of the point
0 3 8
---
2 | 4 | 7
---
1 6 9
"""
uy, ux = u
vy0, vx0, vy1, vx1 = vbox
if (ux < vx0 and uy <= vy0) or (ux == vx0 and uy == vy0):
relation = 0 # 'left-above'
elif vx0 <= ux < vx1 and uy <= vy0:
relation = 3 # 'above'
elif (vx1 <= ux and uy < vy0) or (ux == vx1 and uy == vy0):
relation = 8 # 'right-above'
elif vx1 <= ux and vy0 <= uy < vy1:
relation = 7 # 'right-of'
elif (vx1 < ux and vy1 <= uy) or (ux == vx1 and uy == vy1):
relation = 9 # 'right-below'
elif vx0 < ux <= vx1 and vy1 <= uy:
relation = 6 # 'below'
elif (ux <= vx0 and vy1 < uy) or (ux == vx0 and uy == vy1):
relation = 1 # 'left-below'
elif ux <= vx0 and vy0 < uy <= vy1:
relation = 2 # 'left-of'
elif vx0 < ux < vx1 and vy0 < uy < vy1:
relation = 4 # 'inside'
else:
relation = None
return relation
|
fb305ce83a142247b573f7055237d5fbff5a2219
| 83,893
|
def read_list(f, name):
""" Read group with name as the key from the hdf5 file and return a list numpy vectors. """
grp = f[name]
return [grp[str(i)] for i in range(len(grp))]
|
93d8db117d7704af5170f2f5ffaf4d861f8f6800
| 83,895
|
def readStringAtRva(emu, rva, maxsize=None, charsize=1):
"""
Borrowed from vivisect/PE/__init__.py
:param emu: emulator
:param rva: virtual address of string
:param maxsize: maxsize of string
:param charsize: size of character (2 for wide string)
:return: the read string
"""
ret = bytearray()
# avoid infinite loop
if maxsize == 0:
return bytes()
while True:
if maxsize and maxsize <= len(ret):
break
x = emu.readMemory(rva, 1)
if x == b"\x00" or x is None:
break
ret += x
rva += charsize
return bytes(ret)
|
4e15af911ba096bbe0ee5f9088670c5f68d94844
| 83,902
|
def _is_cglc(fname):
"""Check if a filename can be a CGLC raster."""
if len(fname.split("_")) != 8:
return False
if not fname.lower().endswith(".tif") or "_ProbaV_LC100_" not in fname:
return False
return True
|
a1e5daa5e8459d8f05e1fdbf59bc92bc434f7e0a
| 83,911
|
from typing import List
def make_ngrams(text: str, n: int) -> List[str]:
"""Turn a term string into a list of ngrams of size n
:param text: a text string
:type text: str
:param n: the ngram size
:type n: int
:return: a list of ngrams
:rtype: List[str]"""
if not isinstance(text, str):
raise TypeError('text must be a string')
if not isinstance(n, int):
raise TypeError('n must be a positive integer')
if n < 1:
raise ValueError('n must be a positive integer')
if n > len(text):
return []
text = "#{t}#".format(t=text)
max_start = len(text) - n + 1
return [text[start:start + n] for start in range(0, max_start)]
|
a574e78a9873a6f2dbbc3643197a9f23d72d84ab
| 83,913
|
def loadmeta_altafsir(madhab_path, tafsir_path):
""" Load metadata text files for altafsir.
Args:
madhab_path (str): path for madhab file.
tafsir_path (str): path for tafsir file.
returns:
dict, dict: madhab metadata and tafsir metadata.
"""
with open(madhab_path) as fp:
lines = (li.split('|') for li in filter(None, (l.strip() for l in fp)) if not li.startswith('#'))
madhab_mapping = {_id.strip() : name.strip() for _id, name in lines}
with open(tafsir_path) as fp:
lines = (list(map(str.strip, li.split('|'))) for li in filter(None, (l.strip() for l in fp)) if not li.startswith('#'))
tafsir_mapping = {_id : {'name':name, 'author':author, 'date':date} for _id, name, author, date in lines}
return madhab_mapping, tafsir_mapping
|
7a9393622a5420c8536a4f5891a84e54c188cec5
| 83,916
|
import keyword
def esc_kw(token: str) -> str:
""" Escape python keywords
:param token: token
:return: token with '_' suffixed if it is a keyword
"""
return token + '_' if keyword.iskeyword(token) else token
|
68e2c9bec2857fa282bf8b7b49ef6bf1c1274edd
| 83,917
|
from typing import Union
from pathlib import Path
from typing import Dict
from typing import Set
import logging
import json
def load_forces_from_json(json_path: Union[str, Path]) -> Dict[str, Dict[str, Union[Set, Dict]]]:
"""
Load forced labels, changes, and non-changes from a json file and return them
in a dictionary that can be passed through to JointModel.get_harmony(...) as kwargs.
Parameters
----------
json_path : Union[str, Path]
A reference to a json file containing forced labels, changes, and non-changes,
to be used for JointModel.get_harmony(...). It may have the following keys:
- "forced_chord_changes": A list of integers at which the chord must change.
- "forced_chord_non_changes": A list of integers at which the chord cannot change.
- "forced_key_changes": A list of integers at which the key must change.
- "forced_key_non_changes": A list of integers at which the key cannot change.
- "forced_chords": A dictionary mapping the string form of a tuple in the form
(start, end) to a chord_id, saying that the input indexes on
the range start (inclusive) to end (exclusive) must be output
as the given chord_id.
- "forced_keys": Same as forced_chords, but for keys.
Returns
-------
forces_kwargs: Dict[str, Dict[str, Union[Set, Dict]]]
A nested dictionary containing the loaded keyword arguments for each input.
The outer-most keys should reference a specific input by string name,
or be the keyword "default", in which case the loaded kwargs will be used for all
input pieces not otherwise matched by string name.
In the inner dictionaries, keyword arguments have been loaded (with the correct types)
from the json file that can be passed directly as kwargs to JointModel.get_harmony(...)
for that particular piece.
"""
def load_forces_from_nested_json(raw_data: Dict) -> Dict[str, Union[Set, Dict]]:
"""
Load an inner forces_kwargs dict from a nested json forces_kwargs dict data.
Parameters
----------
raw_data : Dict
The inner nested dictionary from which we will load the kwargs.
See load_forces_from_json for details.
Returns
-------
Dict[str, Union[Set, Dict]]
The kwargs for a single piece, unnested.
"""
forces_kwargs = dict()
for key in [
"forced_chord_changes",
"forced_chord_non_changes",
"forced_key_changes",
"forced_key_non_changes",
]:
if key in raw_data:
forces_kwargs[key] = set(raw_data[key])
for key in ["forced_chords", "forced_keys"]:
if key in raw_data:
forces_kwargs[key] = {
tuple(map(int, range_tuple_str[1:-1].split(","))): label_id
for range_tuple_str, label_id in raw_data[key].items()
}
for key in raw_data:
if key not in [
"forced_chord_changes",
"forced_chord_non_changes",
"forced_key_changes",
"forced_key_non_changes",
"forced_chords",
"forced_keys",
]:
logging.warning(
"--forces-json inner key not recognized: %s. Ignoring that key.", key
)
logging.info("Forces:" if len(forces_kwargs) > 0 else "Forces: None")
for key, item in sorted(forces_kwargs.items()):
if type(item) == dict:
logging.info(" %s:", key)
for inner_key, inner_item in sorted(item.items()):
logging.info(" %s = %s", inner_key, inner_item)
else:
logging.info(" %s = %s", key, item)
return forces_kwargs
with open(json_path, "r") as json_file:
raw_data = json.load(json_file)
if (
"forced_chord_changes" in raw_data
or "forced_chord_non_changes" in raw_data
or "forced_key_changes" in raw_data
or "forced_key_non_changes" in raw_data
or "forced_chords" in raw_data
or "forced_keys" in raw_data
):
logging.info(
"Given --json-forces not a nested, piece-specific mapping. Treating as default for "
"all inputs."
)
raw_data = {"default": raw_data}
all_forces_kwargs = {}
for key, nested_raw_data in raw_data.items():
logging.info("Loading forces for %s", key)
all_forces_kwargs[key] = load_forces_from_nested_json(nested_raw_data)
return all_forces_kwargs
|
ad328af4842cf880889d3fbc5ed465af4feb713e
| 83,921
|
def get_class_name(type_):
"""
Get just the class name (w/o module(s) from the type.
Args:
type_ (type): Class as a type.
Returns:
(str|None): Just the name of the class or None.
"""
try:
return str(type_).rsplit('.', 1)[1].rstrip("'>")
except IndexError:
return None
|
1c3a94d8f1fb5f9f28bbd3cd4b531f335c948a08
| 83,922
|
def get_lemmas(synset):
""" Look up and return all lemmas of a given synset. """
lemmas = synset.lemma_names()
return lemmas
|
e24decdd2af6b65f5c495d9949ef8ff9aaa5c8da
| 83,928
|
def _deb_kernel_package(kernel, dist, arch, name):
"""
Return kernel package name.
Args:
kernel (str): Kernel version.
dist (str): Distribution.
arch (str): Architecture.
name (str): Package name.
Returns:
str: kernel package.
"""
# Define package suffix
if dist == 'Ubuntu':
suffix = 'generic'
elif name == 'linux-image':
suffix = arch.replace('x86_64', 'amd64')
else:
suffix = 'common'
return '-'.join((name, kernel, suffix))
|
583b5c96ba7dd71a4d5888913fa043287a02ed52
| 83,934
|
from typing import Union
from typing import List
from typing import Tuple
def get_match_patterns(filters: Union[str, List[str]]) -> Tuple[List[str], List[str]]:
"""The function to parse 'filters' defined as a single string into the lists
of 'include' and 'exclude' patterns.
>>> get_match_patterns("")
([], [])
>>> get_match_patterns(":")
([], [])
>>> get_match_patterns(":!")
([], [])
>>> get_match_patterns("f:")
(['f'], [])
>>> get_match_patterns("f:!f 1")
(['f'], ['f 1'])
>>> get_match_patterns("f:!f 1:f 2:!f 3")
(['f', 'f 2'], ['f 1', 'f 3'])
>>> get_match_patterns(["f", "!f 1"])
(['f'], ['f 1'])
"""
if not isinstance(filters, str):
filters = ":".join(filters) if filters else ""
include: List[str] = []
exclude: List[str] = []
filters = filters.split(':')
for f in filters:
if f.startswith('!'):
if len(f) > 1:
exclude.append(f[1:])
elif f:
include.append(f)
return include, exclude
|
1c4f8874dbb2abab315265b0ba0e371c352774df
| 83,936
|
def clip_string(s, limit=1000, sep=None):
"""
Clip a string at a given character and add "..." if the string was clipped.
If a separator is specified, the string is not clipped at the given limit
but after the last occurence of the separator below the limit.
:param s: string to clip
:type s: str
:param limit: number of characters to retain (including "...")
:type limit: int
:param sep: separator
:type sep: str
:rtype: str
"""
if len(s) < limit:
return s
s = s[: limit - 3]
if sep is None:
return s
sep_pos = s.rfind(sep)
if sep_pos == -1:
return s
return s[: sep_pos + len(sep)] + "..."
|
fe6a8f6868010391d056cff8a809d8f4aff63722
| 83,942
|
import re
def remove_tags(input_str):
"""Removes formatting tags, double spaces from text string"""
new_str01 = re.sub(r'<(i|sub|sup)[^>]*?>', '', input_str)
new_str02 = re.sub(r'</(i|sub|sup)[^>]*?>', '', new_str01)
new_str03 = re.sub(r' ', ' ', new_str02)
return new_str03
|
a7baf944178b84d649a9a7a8ea9382fb0409cd9b
| 83,948
|
import math
def Critical(n1, n2):
"""Calculate critical angle in degrees."""
assert n1 > n2, "\nWarning: Critical angle is not defined, since n1 <= n2!"
return math.degrees(math.asin(n2/n1))
|
674c3588285b73b6a1be13810cc0bc2e924a1baa
| 83,954
|
def attribution() -> str:
"""Returns data attribution string"""
return (
'\u0026copy; '
'<a href="https://github.com/CSSEGISandData/COVID-19">'
'Johns Hopkins University</a>. ')
|
dfd40478699f17f6ada1989c1ee5dd460fb6964a
| 83,955
|
def get_claim_request_dto(requirements: dict, block_id: str, transaction_count: int) -> dict:
"""Get the claim request DTO for matchmaking
Args:
requirements: matchmaking requirements for this block
block_id: relevant block_id
transactionCount: number of transactions in this block
Returns:
DTO as a dict
"""
# Noqa for typing until we use TypedDict
return {
"numL2s": requirements["l2"].get("nodesRequired"), # noqa: T484
"numL3s": requirements["l3"].get("nodesRequired"), # noqa: T484
"numL4s": requirements["l4"].get("nodesRequired"), # noqa: T484
"numL5s": requirements["l5"].get("nodesRequired"), # noqa: T484
"blockId": str(block_id),
"transactionCount": transaction_count,
}
|
196ff535d05b4f1cacab5574b8d828b5f725aa06
| 83,958
|
from typing import Any
def couple_combinaison_from(elements: list[Any]) -> list[tuple[Any, Any]]:
"""create all possible combinaison of two elements from the input list"""
zip_size = len(elements)
return sum(
(list(zip([element] * zip_size, elements)) for element in elements),
[],
)
|
8c299a86fe3f2faf7f27b7363f9c4c6e2188a5db
| 83,969
|
def animate(env, axesimage, i):
"""Animates an environment by letting the environment proceed a timestep, then setting the values into image."""
env.proceed(1.0)
v = env.value.copy()
axesimage.set_array(v)
return [axesimage]
|
77982bceccd858085431533d948c1b6835f76c2c
| 83,974
|
def recursive_replace_line (input_line: str, triggers: list, source: dict) -> str:
"""
Recursive replacer. Recursively calls itself as long as there is at least one "$" present in the `input_line`.
Parameters:
input_line (str) : input line
triggers (list) : list of triggers (templated variables to be replaced)
source (dict) : dictionary of variables with substitution details (usually either datasets or numbers)
Returns:
output_line (str) : processed (replaced) line
"""
is_triggered = False
output_line = input_line
if '$' in input_line:
for case in triggers:
test_case = f'${case}$'
if test_case in input_line:
output_line = input_line.replace(test_case, source[case])
is_triggered = True
break
elif test_case.upper() in input_line:
output_line = input_line.replace(test_case.upper(), source[case].upper())
is_triggered = True
break
if is_triggered:
return recursive_replace_line(output_line, triggers, source)
else:
print(output_line)
raise ValueError('Recursion went wrong, not all cases considered')
return output_line
|
79c8a488437aefb1c6f7e539c75e8c9919ad7fba
| 83,980
|
from typing import Sequence
from typing import List
def insertion_sort_iter(seq: Sequence) -> List:
"""
Sort a sequence with the iterative insertion sort algorithm.
Parameters
----------
seq : Sequence
Returns
-------
List
"""
lst: List = list(seq)
size: int = len(seq)
for i in range(1, size):
key = lst[i]
j: int = i - 1
while (j >= 0) and (lst[j] > key):
lst[j + 1] = lst[j]
j -= 1
lst[j + 1] = key
return lst
|
bb0ffd7dd90d13f7bdf5e64b90aa37ae6021b3cd
| 83,984
|
def pytest_make_parametrize_id(config, val, argname=None):
"""Return the canonical string representation of the value."""
return repr(val)
|
e56f195ca05101ab0e3f4d4c902fe6caebd6f0ad
| 83,988
|
import json
def create_json_file(filename, data, encoding = "utf-8-sig") -> dict:
"""
Grava um dicionário dentro de um arquivo JSON.
"""
with open(filename, "w", encoding = encoding) as file:
content = json.dumps(data, indent = 4, ensure_ascii = False)
file.write(content)
return data
|
19cfcc5fe75e65d5992447dca4162b237fcbe281
| 83,992
|
import re
def read_timespan(ts):
""" Read sleep-like timespan, return it as seconds """
if not re.fullmatch("((\d+)([dwms]?))+", ts):
raise ValueError("Invalid timespan: '{}'".format(ts))
seconds = 0
for amount, multiplier in re.findall("(\d+)([dwms]?)", ts):
if multiplier == "w":
seconds += int(amount) * 60 * 60 * 24 * 7
elif multiplier == "d":
seconds += int(amount) * 60 * 60 * 24
elif multiplier == "h":
seconds += int(amount) * 60 * 60
elif multiplier == "m":
seconds += int(amount) * 60
else:
seconds += int(amount)
return seconds
|
901fe27469b9a49de8882de7cf085b1ffe97932f
| 83,993
|
import random
def get_words(part, num):
"""
For a given part of speech, return a list of num unique words.
:param part: part of speech
:param num: number of words in the part of speech to be returned
:return: list of words
"""
if num > len(part):
raise ValueError('Too many words asked than available for part {}'.format(part))
words = [random.choice(part) for n in range(num)]
while len(set(words)) != num:
words = [random.choice(part) for n in range(num)]
return words
|
fb58893b6d0ac70381ff24118e943e4912977666
| 83,996
|
import math
def lb_kim_sequence(candidate_seq, query_sequence):
"""
Calculate lb kim lower bound between candidate and query sequence
:param candidate_seq:
:param query_sequence:
:return: lb kim lower bound distance between query and sequence
"""
lb_kim_sim = math.sqrt((candidate_seq[0] - query_sequence[0]) ** 2 + (candidate_seq[-1] - query_sequence[-1]) ** 2)
return lb_kim_sim / 2.0
|
cf6d4b2ff5d2aa5837bfbe053465a59aa36c6a47
| 83,997
|
from typing import Counter
def extract_binary_tally(class_name, tally):
"""Extract single-class TP, FP, FN, TN from multi-class confusion tally.
Reduces the mutli-class expectation/prediction to binary - did they
include the class of interest, or not?
Returns a 4-tuple of values, True Positives (TP), False Positives (FP),
False Negatives (FN), True Negatives (TN), which sum to the tally total.
"""
bt = Counter()
for (expt, pred), count in tally.items():
bt[class_name in expt.split(";"), class_name in pred.split(";")] += count
return bt[True, True], bt[False, True], bt[True, False], bt[False, False]
|
a06f21c6338e098e8b31450d4196eb964d721a3e
| 84,004
|
from datetime import datetime
def format_datetime(value: datetime, dt_format: str = r"%d %b %Y %I:%M %p") -> str:
"""Formats a datetime object into a datetime string"""
return value.strftime(dt_format)
|
88e0680a595635ea867bd57afc812f98ee802872
| 84,005
|
def isExportName(name):
"""Tell if named object wants to be exported."""
return name.startswith("g_")
|
3528234f9745c7a47f88cce58ec7e454de601021
| 84,011
|
def kinem_allowed(q2, par, B, V, lep):
"""True if q2 is in the kinematically allowed region"""
ml = par['m_'+lep]
mB = par['m_'+B]
mV = par['m_'+V]
if q2 < ml**2 or q2 > (mB-mV)**2:
return False
else:
return True
|
9b73d603d12fd0e57b378726ebc4081002ac064b
| 84,012
|
def get_flanks(snp_region, flank_size):
"""return the flanking regions for a location in a chromosome"""
region_seqs = snp_region.seq
rawflank5 = region_seqs[0: flank_size]
rawflank3 = region_seqs[flank_size+1: flank_size*2+1]
ensembl_ref = region_seqs[flank_size: flank_size+1]
return rawflank5, ensembl_ref, rawflank3
|
f9de27b24300b8db91a65780cd79d54d0476229a
| 84,014
|
def get_snr_from_mix_path(mix_path):
""" Retrieves mixing SNR from mixture filename.
Args:
mix_path (str): Path to the mixture. Something like :
book_11346_chp_0012_reader_08537_8_kFu2mH7D77k-5YOmLILWHyg-\
gWMWteRIgiw_snr6_tl-35_fileid_3614.wav
Returns:
int or None: the SNR value if we could parse it.
"""
snr_str = mix_path.split("snr")[-1].split("_")[0]
try:
snr = int(snr_str)
except ValueError:
snr = None
return snr
|
d9a0ce74c9e4792c2893aed5ea80aad0f87f881b
| 84,016
|
import requests
def request_json(rest_url, ext):
"""Make a REST request and return as json."""
if rest_url.endswith("/") and ext.startswith("/"):
ext = ext[1:]
r = requests.get(rest_url + ext, headers={"Content-Type": "application/json"})
if not r.ok:
r.raise_for_status()
return r.json()
|
5fbd28d0755b317dc2006f9fafaf1c0b778abe47
| 84,019
|
def add_element(dic, key, val):
"""
given a dictionary containing lists and a key for that dictionary,
appends val to the list at that key if it already exists,
or else creates the list at that key with the given element
returns the dict
"""
if key in dic:
dic[key].append(val)
else:
dic[key] = [val]
return dic
|
ee31afb23fba8f528625b7e1ab9e965f56603125
| 84,020
|
import random
def jitter(interval):
"""Apply some 'jitter' to an interval. This is a random +/- 10% change in order to
smooth out patterns and prevent everything from retrying at the same time.
"""
return interval * (0.9 + 0.2 * random.random())
|
f490ddea6feb65310b96a9af9be350b7d212ec08
| 84,022
|
def shift_window_inside_image(start, end, image_axis_size, input_axis_size):
"""
If the window goes outside the bound of the image, then shifts it to fit inside the image.
"""
if start < 0:
start = 0
end = start + input_axis_size
elif end > image_axis_size:
end = image_axis_size
start = end - input_axis_size
return start, end
|
fcca8fa479c0e3ee26aa47feb9ed646185b8f3fe
| 84,023
|
def q_to_r_Qh(params, substep, state_history, prev_state, policy_input):
"""
This function calculates and returns Q after a trade where delta_Q is the amount being sold into the pool
"""
delta_Q = policy_input['q_sold'] #amount of Q being sold by the user
Q = prev_state['Q']
# JS July 9, 2021: compute threshold for reserve availability
threshold = Q + delta_Q
if delta_Q == 0 or threshold < 0:
return ('Q', Q)
else:
return ('Q', Q + delta_Q)
|
83f9119d366ec8630ccd04e8076a021207c4a8c3
| 84,024
|
def _create_filter(names):
"""
Create a filter function matching names.
"""
if names == "*":
return lambda k: False
names = names.split("|")
return lambda k: k not in names
|
883500ffb906fd29882deffc17513ee0bec42177
| 84,026
|
def extract_entity_span_start(**args):
"""
Example entity feature that gets the start span for each entity
Returns:
(function) A feature extraction function that returns the start span of the entity
"""
def _extractor(example, resources):
query, entities, entity_index = example
features = {}
current_entity = entities[entity_index]
current_entity_token_start = current_entity.token_span.start
features["entity_span_start"] = current_entity_token_start
return features
return _extractor
|
cd78888f7186e808b7998ba9883fbf6c20c94b77
| 84,028
|
def jp_large_contents_manager(request, tmp_path):
"""Returns a LargeFileManager instance."""
file_manager = request.param
return file_manager(root_dir=str(tmp_path))
|
f1eb7eb09139fb5434e75d7064e67af50e7c86b0
| 84,031
|
import requests
def api_query(api_key, series_id):
"""Execute an EIA API query and extract the data returned
Execute a query of the EIA API and extract the data from the
structure returned by the API.
Args:
api_key (str): EIA API key.
series_id (str): Identifying string for a specific data series.
Returns:
A nested list of data with inner lists structured as
[year string, data value].
"""
query_str = ('https://api.eia.gov/series/?series_id=' + series_id +
'&api_key=' + api_key)
data = requests.get(query_str).json()
try:
data = data['series'][0]['data']
# If an invalid series_id is used, the 'series' key will not be present
except KeyError:
print('\nSeries ID not available from API: ' + series_id)
return data
|
eefc1cfb5432ec63f8b75447491ce3553dbb3661
| 84,033
|
def safeget(dct, keys):
"""
Get a nested value in a json file
Only nested dict is supported, no lists
"""
for key in keys:
try:
dct = dct[key]
except KeyError:
return None
return dct
|
fce6accd6725411ec3e6e53e90c8907d402bf1f9
| 84,036
|
def clamp(value, min_value, max_value):
"""
Clamp The given value between a min and a max value.
:param value: Value to be clamped
:param min_value: min value to clamp against
:param max_value: max value to clamp against
:return: clamped value
"""
if value < min_value:
return min_value
elif value > max_value:
return max
return value
|
965c6380d9777927d219b52c46b53c4036edc910
| 84,037
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.