content stringlengths 39 14.9k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def emojify(s):
""" returns emojis based on forecast descriptions """
s = s.lower()
if "ясно" in s:
return "☀️"
elif "дождь" in s and "гроз" in s:
return "⛈"
elif "дождь" not in s and "гроз" in s:
return "🌩"
elif ("пасмурно" in s or "сплошная облачность" in s) and "дождь" in s:
return "🌧"
elif "облач" in s and "дождь" in s:
return "🌦"
elif "малооблачно" in s or "небольшая облачность" in s:
return "🌤"
elif "пасмурно" in s or "сплошная облачность" in s:
return "☁️"
elif "облач" in s:
return "🌥"
return "" | 4589c4e653c99e49e16de4e7acc0f47ad9aac746 | 111,657 |
import re
def parse_bytes(bytestr):
"""Parse a string indicating a byte quantity into an integer., example format: 536.71KiB, 31.5 mb, etc...
modified from original source at youtube-dl.common"""
try:
# if input value is int return it as it is
if isinstance(bytestr, int):
return bytestr
# remove spaces from string
bytestr = bytestr.replace(' ', '').lower()
matchobj = re.match(r'(?i)^(\d+(?:\.\d+)?)([kMGTPEZY]\S*)?$', bytestr)
if matchobj is None:
return 0
number = float(matchobj.group(1))
unit = matchobj.group(2).lower()[0:1] if matchobj.group(2) else ''
multiplier = 1024.0 ** 'bkmgtpezy'.index(unit)
return int(round(number * multiplier))
except:
return 0 | c82752dc58e43e95e31783533690ef3738dd9b70 | 111,658 |
import errno
def is_perm_error(e):
"""Return true if this exception is file permission related.
:param EnvironmentalError e: Exception to test for permission issues
:return: True if the exception is permission related, false otherwise
:rtype: bool
"""
try:
return e.errno in (errno.EACCES, errno.EPERM)
except AttributeError:
return False | 29aaf6021a694e1967c0f00eee9e5fb0e1fdea69 | 111,659 |
def build_args_from_supported_features( features ):
"""Build a list of --enable or --disable features from a dictionary.
Ex: --enable-music-wave would be the result of {'music-wave': True}"""
args = []
for feature in features:
if features[feature]:
args.append( '--enable-' + feature )
else:
args.append( '--disable-' + feature )
return args | d55701b6b20cb2d5a4281dbd44fd72b66645a83c | 111,665 |
def t_name_to_flan_pattern_name(t_name: str) -> str:
"""Converts `t_name` to flan `PATTERN` key.
Some seqio tasks use the same flan patterns.
Args:
t_name: Task config name.
Returns:
a key for `PATTERNS`.
"""
if 'para_crawl' in t_name:
return 'para_crawl'
elif 'wmt16_translate' in t_name:
return 'wmt16_translate'
elif t_name in {'arc_challenge', 'arc_easy'}:
return 'arc'
elif t_name in {'anli_r1', 'anli_r2', 'anli_r3'}:
return 'anli'
elif t_name in {'mnli_matched', 'mnli_mismatched'}:
return 'mnli'
return t_name | 8d70d68378a85dc4c7546453394ef1f21237fd94 | 111,667 |
def read_distance_file(dist_file, threshold):
"""
Read a previously created distance file and store it as a hash
:param threshold: The threshold for the distances
:type threshold: float
:param dist_file: The file to read
:type dist_file: str
:return: A hash of rxn1 rxn2 -> distance
:rtype: dict
"""
distances = {}
with open(dist_file, 'r') as f:
for l in f:
p = l.strip().split("\t")
if float(p[2]) > threshold:
continue
if p[0] not in distances:
distances[p[0]] = {}
if p[1] not in distances:
distances[p[1]] = {}
distances[p[0]][p[1]] = float(p[2])
distances[p[1]][p[0]] = float(p[2])
return distances | 7e38543106e35fadb0a45fd158289e5985014477 | 111,670 |
def str_to_bool(value):
"""
Convert a string too a bool object
Parameters
----------
value : str
The string object to convert to a bool. The following case insensitive
strings evaluate to True ['true', 1', 'up', 'on']
Returns
-------
bool
Boolean based on the string
"""
value = value.lower()
return value in ['true', '1', 'up', 'on'] | 708785c819981848ef871e88e7fe8a2bbb477b32 | 111,671 |
def training_non_prepare(X, y):
"""
Dummy function for the prepare option in the generator. It wont do anything with the data.
It only prints the X,y values.
"""
return X, y | 50654b86190ccd0c90e27965b0d8e7e712bb98d1 | 111,672 |
def format_time(elapsed):
"""Formats elapsed seconds into a human readable format."""
hours = int(elapsed / (60 * 60))
minutes = int((elapsed % (60 * 60)) / 60)
seconds = int(elapsed % 60)
rval = ""
if hours:
rval += "{0}h".format(hours)
if elapsed > 60:
rval += "{0}m".format(minutes)
rval += "{0}s".format(seconds)
return rval | 11f8af75f253d5772014709fd6f829aee06eb46a | 111,673 |
def _edge_func(G):
"""Returns the edges from G, handling keys for multigraphs as necessary.
"""
if G.is_multigraph():
def get_edges(nbunch=None):
return G.edges(nbunch, keys=True)
else:
def get_edges(nbunch=None):
return G.edges(nbunch)
return get_edges | ca488c5bde3b193ec308a99f66eb184bee6acd5c | 111,676 |
from typing import Counter
def confusion_count(prediction, truth):
"""Get a dict containing the counts of all combinations of predicction and corresponding truth values."""
return Counter(zip(prediction, truth)) | f872b340a85a24799bbb8ed63e6d0d3a97bef496 | 111,678 |
import torch
def smooth_L1(ti, ti_star):
"""
smooth L1 function:
0.5 * (x^2) if abs(x) < 1
abs(x) - 0.5 otherwise
Params:
ti: shape([N])
ti_star: shape([N])
Return: score: shape([N])
"""
abs_sub = torch.abs(ti - ti_star)
smaller_than_1 = torch.where(abs_sub < 1)
greater_than_1 = torch.where(abs_sub >= 1)
abs_sub[smaller_than_1] = torch.pow(abs_sub[smaller_than_1], 2) / 2
abs_sub[greater_than_1] = abs_sub[greater_than_1] - 0.5
return abs_sub | c0c9e76966bb91f08b300f61fe570bc4ecf4be37 | 111,679 |
def encode_timeseries_window(source, lag_size, lead_size, input_fields, predict_fields):
"""
Encode raw data to a time-series window.
:param source: A 2D array that specifies the source to be encoded.
:param lag_size: The number of rows uses to predict.
:param lead_size: The number of rows to be predicted
:param input_fields: Boolean array that specifies which columns to use for prediction.
:param predict_fields: Boolean array that specifies which columns to predict.
:return: A tuple that contains the x (input) & y (expected output) for training.
"""
result_x = []
result_y = []
output_row_count = len(source) - (lag_size + lead_size) + 1
for raw_index in range(output_row_count):
encoded_x = []
for i, use_field in enumerate(input_fields):
if use_field:
for j in range(lag_size):
encoded_x.append(source[raw_index+j][i])
result_x.append(encoded_x)
# Encode y (prediction)
encoded_y = []
for i, use_field in enumerate(predict_fields):
if use_field:
for j in range(lead_size):
encoded_y.append(source[lag_size+raw_index+j][i])
result_y.append(encoded_y)
return result_x, result_y | 584b16018c1038a0ee6538dc390b8b074a8e99b0 | 111,681 |
def make_margins(width, height, left=0, right=0, top=0, bottom=0, margins=None):
"""Create Matplotlib margins. Returns tuple that can be unpacked for subplots_adjust with *
Args:
width, height: figure size
margins: equal margins all around
left, right, top, bottom: set individual margins
"""
if margins:
left = margins
right = margins
top = margins
bottom = margins
LM = left/width
BM = bottom/height
RM = 1-right/width
TM = 1-top/height
return LM, BM, RM, TM | a3d2f46dd81bfbe7ffeacfa1b8d30e9a62908624 | 111,683 |
def commit_exists(cursor, sha):
"""Returns whether at least one commit with a given exists in the database."""
cursor.execute('SELECT COUNT(*) FROM commit WHERE sha = %s', sha)
return cursor.fetchone()[0] > 0 | 2e3eefbe9c3e01b0b238328e13790e89061ce486 | 111,692 |
def get_size(tensor):
"""Get the element size of tensor."""
shape = tensor.shape.as_list()
size = 1
for s in shape:
if s == -1: s = 1
size *= s
return size | 4a31553f07d02d2566270bcf7bed2e2d1b373c29 | 111,695 |
def __splitTime(sec):
"""Takes an amount of seconds and returns a tuple for hours, minutes and seconds.
@rtype: tuple(int, int, int)
@return: A tuple that contains hours, minutes and seconds"""
minutes, sec = divmod(sec, 60)
hour, minutes = divmod(minutes, 60)
return (hour, minutes, sec) | 9464f96e78267f69f14dcc6f2d1e1485c39dfc89 | 111,698 |
def field_subset(f, inds, rank=0):
"""Return the value of a field at a subset of points.
Parameters
----------
f: array, shape (a1, a2, ..., ad, r1, r2, ..., rrank)
Rank-r field in d dimensions
inds: integer array, shape (n, d)
Index vectors
rank: integer
The rank of the field (0: scalar field, 1: vector field and so on).
Returns
-------
f_sub: array, shape (n, rank)
The subset of field values.
"""
f_dim_space = f.ndim - rank
if inds.ndim > 2:
raise Exception('Too many dimensions in indices array')
if inds.ndim == 1:
if f_dim_space == 1:
return f[inds]
else:
raise Exception('Indices array is 1d but field is not')
if inds.shape[1] != f_dim_space:
raise Exception('Indices and field dimensions do not match')
# It's magic, don't touch it!
return f[tuple([inds[:, i] for i in range(inds.shape[1])])] | fec3f3e36e26967d49d57a5d535163dbb048e562 | 111,706 |
def has_person(doc):
"""
Doc-level spaCy attribute getter, which returns True if
any named entity in the Doc is a PERSON.
"""
return any([ent.label_ == "PERSON" for ent in doc.ents]) | 15b2f4cb28c8468e6385c4f353d7b23d338bd5ac | 111,709 |
import random
import string
def random_string(chars=10):
"""
Generate a random string
:param chars: Number of characters in string
:return: A string with [chars] random characters.
"""
return ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(chars)) | c89d58e9ae3d5406f0a56361cd1c78e93fb76d77 | 111,714 |
def increment_data(data):
"""Increment each character."""
return ''.join(chr(ord(n) + 1) for n in data) | e6e62081d127d320dab602026ff2a03b0f205016 | 111,721 |
def getKeyFromKV(inputtext='',keylist=[],valuelist=[]):
"""Prompts the user to select a value from a list.
Returns the key enterred by the user that matches an
entry in a predefined key:value list.
(the key values are provided when defining the function call)
*inputtext defines the text preceding the user interaction
*keylist defines the values acceptable as a user response
*valuelist defines the values that correspond to the keys
## *note that the values in inputlist and returned value
should all be text strings
"""
while 1:
displaylist=[keylist[i]+'\t:\t'+valuelist[i]
for i in range(len(keylist))]
outputtext=input(inputtext + '\n\t' + '\n\t'.join(displaylist) +
'\nPlease Select From The Listed Options:\n')
if outputtext in keylist:
return outputtext
else: print('INVALID SELECTION') | ee3aec463688be637edcc13c62c6f36f58af0aa1 | 111,725 |
def all_validator(value):
"""No validation for value insertion. Returns True always. """
# pylint: disable=unused-argument
return True | eef87b72a54f1ad129ffc1d10bfa290e10f88ca8 | 111,734 |
def find_sort_symbol(list_names):
"""
:param list_names: список имен
:return: первый символ имени, реже всего встречающийся в списке
"""
# инициализация пустого словаря dict_First_sym_of_names
dict_First_sym_of_names = {}
#print(type(dict_First_sym_of_names),dict_First_sym_of_names)
# огранизация цикла для перебора имён из списка list_names
for name in list_names:
# присвоение переменной First_sym_of_name первого символа имени [0]
First_sym_of_name = name[0]
# наполнение славоря dict_First_sym_of_names с key = First_sym_of_name и value = количество повторов символа
dict_First_sym_of_names[First_sym_of_name] = dict_First_sym_of_names.get(First_sym_of_name, 0) + 1
# преобразование типа словаря в тип листа и сортировка по второму элементу листа, т.е. по количествам повторов символа
# x[0] - key (str), а x[1] - value (integer)
dict_First_sym_of_names = sorted(list(dict_First_sym_of_names.items()), key=lambda x: x[1])
#print(type(dict_First_sym_of_names),dict_First_sym_of_names)
return dict_First_sym_of_names[0][0] | e779cbb4198dccdc46e0fc151566b7559f335f22 | 111,737 |
def _allele_from_index(variant, allele_index):
"""Gets the reference_bases or alternative_bases for allele_index."""
if allele_index == 0:
return variant.reference_bases
else:
return variant.alternate_bases[allele_index - 1] | fbb1d79f7fdff091127b6c4cb5a4166a66424538 | 111,743 |
import re
def add_selections_to_tweet(tweet_text, selections):
"""Replace placeholder characters with correct words given a selections list.
Args:
tweet_text (str): A tweet template sentence from tweet_content.yaml
selections (list): A list of the names of the segments of the day (in preference order)
Returns:
text_with_selection: A grammatically correct sentence with the selections substituted in
"""
text_with_selection = tweet_text
for selection in selections:
text_with_selection = re.sub("NOUN", selection, text_with_selection, count=1)
if selections[0] == "afternoon" or selections[0] == "evening":
text_with_selection = re.sub("A\(N\)", "an", text_with_selection)
else:
text_with_selection = re.sub("A\(N\)", "a", text_with_selection)
return text_with_selection | 09ba72b6a116215156efd943c69f02df4165e683 | 111,744 |
def get_intersection(polygon1, polygon2):
""" Returns are of intersection of two polygons. """
intersection = polygon1 & polygon2
if len(intersection) == 0:
return 0
return intersection.area() | d22f9162df0c2a95b0c1ef3f784aa87b3ccea1cd | 111,747 |
def closest_color(rgb, colors):
"""
Determine the closest color in `colors` to `rgb`.
WARNING: this function is *destructive*. It removes the result from the input
`colors` list. This is to prevent overlapping colors in the color assignment below
(starting line 263). It is recommended to pass a copied list to this function if you
wish to protect the original list.
Parameters
----------
rgb : tuple[int]
An RGB color.
colors : list[tuple[int]]
Returns
-------
tuple[int]
An RGB color.
"""
r, g, b = rgb
color_diffs = []
for color in colors:
cr, cg, cb = color
color_diff = (abs(r - cr) ** 2 + abs(g - cg) ** 2 + abs(b - cb) ** 2) ** (1 / 2)
color_diffs.append((color_diff, color))
result = min(color_diffs)[1]
colors.remove(result)
return result | 851f5ebaa59cc3a73299d2fd04503cd93dc8544a | 111,751 |
def get_rnx_band_from_freq(frequency):
"""
Obtain the frequency band
>>> get_rnx_band_from_freq(1575420030.0)
1
>>> get_rnx_band_from_freq(1600875010.0)
1
>>> get_rnx_band_from_freq(1176450050.0)
5
>>> get_rnx_band_from_freq(1561097980.0)
2
"""
# Backwards compatibility with empty fields (assume GPS L1)
ifreq = 154 if frequency == '' else round(frequency / 10.23e6)
# QZSS L1 (154), GPS L1 (154), GAL E1 (154), and GLO L1 (156)
if ifreq >= 154:
return 1
# QZSS L5 (115), GPS L5 (115), GAL E5 (115)
elif ifreq == 115:
return 5
# BDS B1I (153)
elif ifreq == 153:
return 2
else:
raise ValueError("Cannot get Rinex frequency band from frequency [ {0} ]. "
"Got the following integer frequency multiplier [ {1:.2f} ]\n".format(frequency, ifreq))
return ifreq | d2e8d2ccb2c3aba0b597a2b8dd301bf81271a023 | 111,753 |
import torch
def complex_to_mag_phase(data):
"""
:param data (torch.Tensor): A complex valued tensor, where the size of the third last dimension should be 2
:return: Mag and Phase (torch.Tensor): tensor of same size as input
"""
assert data.size(-3) == 2
mag = (data ** 2).sum(dim=-3).sqrt()
phase = torch.atan2(data[:, 1, :, :], data[:, 0, :, :])
return torch.stack((mag, phase), dim=-3) | 2cf51c96a9c3832f745a0f9ca3c1d27892720cc5 | 111,755 |
def _xpubs_equal_ignoring_version(xpub1: bytes, xpub2: bytes) -> bool:
"""
Xpubs: 78 bytes. Returns true if the xpubs are equal, ignoring the 4 byte version.
The version is not important and allows compatibility with Electrum, which exports PSBTs with
xpubs using Electrum-style xpub versions.
"""
return xpub1[4:] == xpub2[4:] | 6fc589fffa8b7bd0f02918fdd504d46482ac6227 | 111,760 |
def drop_rows_by_column_values(df, column_name,
values):
"""Drop rows where their column values are specified list.
Args:
df: A pandas dataframe.
column_name: Name of the column to filter the rows by.
values: List of values to filter by.
Returns:
Filtered dataframe.
"""
return df[~df[column_name].isin(values)] | de564af3e00fab510ed277b9eb996647ee2fde67 | 111,762 |
def get_raw_xy(data):
"""
Return only metrics/values that we will base
our predictions on first, then a list of closing prices.
Both metrics/values are from the same day.
"""
# Removing data column and adj close.
# Assuming that our data don't have any
# dividents, so close column is the same as
# adj close.
data=data.drop(columns=['Date','Adj Close'])
values=data.values
# Each column number match a specific metric:
# Open=0,High=1,Low=2,Close=3,Volume=4
return values[:, [0,1,2,3,4]], values[:, 3] | 843e4884bf844aec5d16f88d48f25c337a65dbb0 | 111,763 |
def compute_theta_phi_range(phys_t, phys_p):
"""Compute the min/max range about the initial offset.
Based on the "full_range" defined in plate_control/petal/posmodel.py
Args:
phys_t (float): PHYSICAL_RANGE_T in degrees.
phys_p (float): PHYSICAL_RANGE_P in degrees.
Returns:
(tuple): The (theta_min, theta_max, phi_min, phi_max) angles.
"""
t_min = -0.5 * phys_t
t_max = 0.5 * phys_t
p_min = 185.0 - phys_p
p_max = 185.0
return (t_min, t_max, p_min, p_max) | 1870836cfc85a339df0acdc1bc10c92c436ccc36 | 111,765 |
import random
def generate_random(power):
"""
Generates 5 random lists of a certain size
:param power: power of 2
:return: generated lists
"""
randlist = []
for i in range(5):
experiment = []
for j in range(2 ** power):
experiment.append(random.randint(0, 10000))
randlist.append(experiment)
return randlist | eda433f2676f4a395332930371880632c7ac96cb | 111,767 |
def parse_sensor_values(data):
"""
Input line: <humidity>,<temperature>,<sound>
Output structure:
{
'humidity': <humidity>,
'temperature': <temperature>,
'sound': <sound>
}
"""
metrics = ['humidity', 'temperature', 'sound']
values = data.decode('utf-8').strip().split(',')
return dict(zip(metrics, values)) | c14c5b989ca31447a905f2f9c56d1e3bcca34fe9 | 111,769 |
def exclude_val_from_dict(dictionary, key):
"""
Remove a certain key/value pair from a dictionary and return the new one without changing the original one
:param dictionary: original dictionary
:param key: key of the value to be removed
:return: new dictionary
"""
dict_copy = dictionary.copy()
del dict_copy[key]
return dict_copy | 775c64f9db96efe5b6e6335c2f26b56d3e875f67 | 111,770 |
def convert_mixing_names_to_positions(mixing_names, ind_var_names):
"""
Parameters
----------
mixing_names : list.
All elements should be strings. Denotes the names of the index
variables that are being treated as random variables.
ind_var_names : list.
All elements should be strings, representing (in order) the variables
in the index.
Returns
-------
list.
All elements should be ints. Elements will be the position of each of
the elements in mixing name, in the `ind_var_names` list.
"""
return [ind_var_names.index(name) for name in mixing_names] | 2d69dcb3307fba1f1e90b6cc556e2f4250d3a992 | 111,773 |
def get_name(dictionary):
"""Return name from UA or OS dictionary's `family`.
As bizarre UA or OS strings can be parsed like so:
{'major': None, 'minor': None, 'family': 'Other', 'patch': None}
we return "Unknown", rather than "Other"
"""
name = dictionary.get('family')
if name.lower() == "other":
name = "Unknown"
return name | 6bdf0435fa62314fb93449637b3a3f25827f10d2 | 111,775 |
def as_integer(value):
"""Return the given value as an integer if possible."""
try:
int_value = int(value)
if value == int_value:
return int_value
except Exception:
# The value wasn't even numeric.
pass
return value | a5494539d41b13103e55525c165c57966bf698fd | 111,777 |
def rstmac(self, file1="", lstep1="", sbstep1="", file2="", lstep2="",
sbstep2="", maclim="", cname="", keyprint="", **kwargs):
"""APDL Command: RSTMAC
Calculates modal assurance criterion (MAC) and matches nodal solutions
from two results files or from one results file and one universal
format file.
Parameters
----------
file1
File name (32 characters maximum) corresponding to the first
results file (.rst or .rstp file). If the file name does not
contain the extension, it defaults to .rst.
lstep1
Load step number of the results to be read in File1.
N - Reads load step N. Defaults to 1.
sbstep1
Substep number of the results to be read in File1.
N - Reads substep N.
All - Reads all substeps. This value is the default.
file2
File name (32 characters maximum) corresponding to the second file
(.rst, .rstp, or .unv file). If the file name does not contain the
extension, it defaults to .rst.
lstep2
Load step number of the results to be read in File2.
N - Reads load step N. Defaults to 1.
sbstep2
Substep number of the results to be read in File2.
N - Reads substep N.
All - Reads all substeps. This value is the default.
maclim
Smallest acceptable MAC value. Must be 0 and 1. The default value
is 0.90.
cname
Name of the component from the first file (File1). The component
must be based on nodes. If unspecified, all nodes are matched and
used for MAC calculations. If a component name is specified, only
nodes included in the specified component are used. Not applicable
to node mapping (TolerN=-1).
keyprint
Printout options:
0 - Printout matched solutions table. This value is the default.
1 - Printout matched solutions table and full MAC table.
2 - Printout matched solutions table, full MAC table and matched nodes table.
Notes
-----
The RSTMAC command allows the comparison of the solutions from
either:
Two different results files
One result file and one universal format file
The modal assurance criterion (MAC) is used.
The meshes read on File1 and File2 may be different. If TolerN>0,
the nodes are matched. This is the default. If TolerN = -1, the
nodes are mapped and the solutions are interpolated from File1.
Units and coordinate systems must be the same for both
models. When a universal format file is used, the nodal
coordinates can be scaled using UNVscale.
The corresponding database file (.db) for File1 must be resumed
before running the command only if a component (Cname) is used or
if the nodes are mapped (TolerN = -1).
Results may be real or complex; however, if results from File1
have a different type from results in File2, only the real parts
of the solutions are taken into account in MAC calculations. The
analysis type can be arbitrary.
Only structural degrees of freedom are considered. Degrees of
freedom can vary between File1 and File2, but at least one common
degree of freedom must exist.
When node mapping and solution interpolation is performed
(TolerN=-1), File1 must correspond to a model meshed in solid
and/or shell elements. Other types of elements can be present but
the node mapping is not performed for those
elements. Interpolation is performed on UX, UY, and UZ degrees of
freedom.
The solutions read on the results files are not all written to the
database, therefore, subsequent plotting or printing of solutions
is not possible. A SET command must be issued after the RSTMAC
command to post-process each solution.
RSTMAC comparison on cyclic symmetry analysis works only if the
number of sectors on File1 and File2 are the same. Also comparison
cannot be made between cyclic symmetry results and full 360 degree
model results (File1 – cyclic solution, File2 – full 360 degree
model solution). Comparing cyclic symmetry solutions written on
selected set of node (OUTRES) is not supported.
The modal assurance criterion values can be retrieved as
parameters using the *GET command (Entity = RSTMAC).
For more information and an example, see Comparing Nodal Solutions
From Two Models (RSTMAC) in the Basic Analysis Guide.
"""
command = f"RSTMAC,{file1},{lstep1},{sbstep1},{file2},{lstep2},{sbstep2},,{maclim},{cname},{keyprint}"
return self.run(command, **kwargs) | 39b10523df42733e6152baddbc63d3ea07883c99 | 111,779 |
import zipfile
def extract_zip(filepath, destination):
"""
Extract zip file to the destination and return list of filenames contained
within the archive.
:param filepath: Filepath to the zip file to be unzipped
:param destination: Location for the archive files to be extracted to
:type filepath: string
:type destination: string
:return: List of filenames that have been extracted to the destination
:rtype: list of strings
"""
zfile = zipfile.ZipFile(filepath, "r")
zfile.printdir()
zfile.extractall(destination)
zfile.close()
return zfile.namelist() | fb5effc95870d99dda2bd0bd5863b7b79abd11af | 111,782 |
def query_encode(query):
# type: (str) -> str
"""Replaces " " for "+" in query"""
return query.replace(" ", "+") | 4bf06cc0b1bf47849ccf0efb6c736cbb89ebb30b | 111,783 |
def is_probably_packed( pe ):
"""Returns True is there is a high likelihood that a file is packed or contains compressed data.
The sections of the PE file will be analyzed, if enough sections
look like containing containing compressed data and the data makes
up for more than 20% of the total file size. The function will
return True.
"""
# Calculate the lenth of the data up to the end of the last section in the
# file. Overlay data won't be taken into account
#
total_pe_data_length = len( pe.trim() )
has_significant_amount_of_compressed_data = False
# If some of the sections have high entropy and they make for more than 20% of the file's size
# it's assumed that it could be an installer or a packed file
total_compressed_data = 0
for section in pe.sections:
s_entropy = section.get_entropy()
s_length = len( section.get_data() )
# The value of 7.4 is empircal, based of looking at a few files packed
# by different packers
if s_entropy > 7.4:
total_compressed_data += s_length
if ((1.0 * total_compressed_data)/total_pe_data_length) > .2:
has_significant_amount_of_compressed_data = True
return has_significant_amount_of_compressed_data | e57c2bfd6e2f2ecbd4211c93305777a9b045910a | 111,784 |
from datetime import datetime
def time_to_date(time):
"""Convert a js stamp to python date"""
return datetime.fromtimestamp(time / 1000) | 37b797903966c9133694283b58a817a6a4503989 | 111,786 |
import math
def softmax(y_pred):
"""Normalizes a dictionary of predicted probabilities, in-place."""
exp = {c: math.exp(p) for c, p in y_pred.items()}
total = sum(exp.values())
return {c: exp[c] / total for c in y_pred} | 44ada3f8ddc49cfd23e5002a9ffa49c1d45ad0dc | 111,792 |
def make_first_upper(in_string):
"""
Change first character in string into upper case
:param in_string: original string
:return: changed string
"""
return in_string[0].upper() + in_string[1:] | 9d1ba21e11531759fa5364afe5a03b218b9df763 | 111,797 |
def loadString(dset):
"""!
Load a string from an HDF5 dataset and return as a Python str object.
Since version 3.0, h5py loads UTF8 strings as `bytes` objects.
This function provides uniform behavior across h5py 2.0 and h5py 3.0 by
always returning `str` objects.
"""
s = dset[()]
if isinstance(s, str):
return s
return s.decode("utf-8") | 9997029ae7f025435f89e35da1c0e37fa211195b | 111,801 |
from typing import List
from typing import Dict
from typing import Any
def _sort_keywords_for_gpc_by_descending_weight(
keywords_for_gpc: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
"""Sorts keywords_for_gpc by their weight values in descending order."""
return sorted(keywords_for_gpc, key=lambda x: x['weight'], reverse=True) | 6da6d8e61db4e82e20ea3629759e08ce16645141 | 111,810 |
def getvalueor(row, name, mapping={}, default=None):
"""Return the value of name from row using a mapping and a default value."""
if name in mapping:
return row.get(mapping[name], default)
else:
return row.get(name, default) | 666c00807fac9023455ede8ae007910ffdc48299 | 111,813 |
def train(params, step, iters=1):
"""Executes several model parameter steps."""
for i in range(iters):
params = step(params)
return params | 3a0c5d618d18bba9615d1b459cfeb36b7ce42f21 | 111,816 |
from typing import Any
import pickle
def dump_binary(o: Any) -> bytes:
"""Serializes an object to binary format."""
return pickle.dumps(o) | ae188ff455489f4b9ffbbf18501f008591efbb11 | 111,818 |
def mean(data):
"""
Return the arithmetic mean of ``data``.
Examples
--------
>>> mean([1, 2, 3, 4, 4])
2.8
"""
if len(data) < 1:
return None
return float(sum(data)) / len(data) | 4a974de0a8e7ebb938fdc30296e5d887b4126804 | 111,823 |
def chunkIt(seq, num):
"""
This comes from https://stackoverflow.com/questions/2130016/splitting-a-list-into-n-parts-of-approximately-equal-length
I will use it to create a bunch of lists for sequential clustering
Args:
seq: The initial list for chunking
num: The number of items in each chunk
Return:
A chunked list with roughly equal numbers of elements
Author: Max Shawabkeh
"""
avg = len(seq) / float(num)
out = []
last = 0.0
while last < len(seq):
out.append(seq[int(last):int(last + avg)])
last += avg
return out | 0ffa74ad3b243ab8a23d7c1f54fe32b177f3b096 | 111,825 |
from typing import Any
def identity(x: Any) -> Any:
"""Simple callable to provide default no-op.
:param x: whatever input, preferably tensors
:type x: Any
:return: the input itself
:rtype: Any
"""
return x | 99ab075daf95ef2e36fb33b07253a457a0db5497 | 111,829 |
import re
def alpha_num(text):
"""Remove non alphanumeric characters from the text."""
return re.sub(r'[^a-zA-Z0-9]', '', text) | 2f52d86b9e5a2d0bac9ef0ae33ca22566da8e6f7 | 111,831 |
def get_sample_count(profileDict):
"""
Gets the number of samples taken from a dictionary representing data from an
Arm MAP file
Args:
profileDict (dict): Dictionary from which to obtain the count of samples
Returns:
The number of samples taken (non-negative integer)
"""
assert isinstance(profileDict, dict)
return profileDict["samples"]["count"] | 26d31dd6fae3e0c4adf567706387712397e7fd28 | 111,838 |
def merge_dicts(*args):
"""Merge an arbitrary number of dictionaries into one."""
z = args[0].copy() # start with the first dictionary's keys and values
for y in args[1:]:
z.update(y) # modifies z with y's keys and values & returns None
return z | 7eb9e0de859887cf997e5ab1f5ed472099ec073c | 111,839 |
from typing import Optional
def find_occurrence(string: str, sub: str, position: int, start: Optional[int] = None, end: Optional[int] = None) -> int:
"""
Similar to `str.find`, but finds the offset of the `position-th` occurrence of `sub` in `string`.
If it can't be found, returns -1.
"""
if len(sub) == 0:
return 0
marker: int = 0
curpos: int = start if start is not None else 0
if end is None:
end = len(string)
while marker <= position and curpos <= len(string):
j = string.find(sub, curpos)
if j < 0 or marker == position:
return j
curpos = j + len(sub)
marker += 1
# Theoretically we should never get here, but if we do ...
raise RuntimeError("invalid state") | 25f6ae657ea66de352bf320d0f26ce7f2dfe7e70 | 111,840 |
from typing import List
from typing import Set
def extract_n_grams(tokenized_documents: List[List[str]], n: int) -> Set[str]:
"""
Extracts n-grams from a list of tokenized documents.
:param tokenized_documents: List of tokenized documents.
:param n: The n-gram size.
:return: List of n-grams.
"""
n_grams = []
for document in tokenized_documents:
print(document)
print(n)
for i in range(len(document) - n + 1):
n_grams.append(" ".join(document[i : i + n]))
return set(n_grams) | a4c1daae3deda39f2c4221934448a01e8245e809 | 111,843 |
def stringBits(words):
"""
Function to skip a letter in the string.
Given a string, return a new string made of every other character starting
with the first, so "Hello" yields "Hlo".
Args:
words (String): String provided by user
Return:
result (String): String with every other character skipped
"""
result = ""
for i in range(len(words)):
if i % 2 == 0:
result = result + words[i]
return result | 402a38f66c7b67c30680830f8c2126be24a1dd5d | 111,844 |
import hmac
import base64
import hashlib
def gen_signature(url, signed_nonce, nonce, data):
"""Request signature based on url, signed_nonce, nonce and data."""
sign = '&'.join([url, signed_nonce, nonce, 'data=' + data])
signature = hmac.new(key=base64.b64decode(signed_nonce),
msg=sign.encode(),
digestmod=hashlib.sha256).digest()
return base64.b64encode(signature).decode() | 008d8fa6890d2e780924a62e7c1757f535e6ce06 | 111,845 |
import ipaddress
def ip_is_loopback(ip):
"""Return whether an IP address is on the loopback interface"""
ip = ipaddress.ip_address(ip)
if ip.version == 6 and ip.ipv4_mapped is not None:
ip = ip.ipv4_mapped
return ip.is_loopback | ee6b2ebce5299d7c87b39e2a6e51eeac047d5d6c | 111,848 |
def get_default_data_sub_id(ad):
""" Get default data subscription id
"""
if hasattr(ad, "default_data_sub_id"):
return ad.default_data_sub_id
else:
return ad.droid.subscriptionGetDefaultDataSubId() | 227bd6a39bd0d27fdd9d64d4c9c8f87b05e9e118 | 111,854 |
def dec2hex(x):
"""
Convert decimal number to hexadecimal string.
For instance: 26 -> '1a'
"""
return hex(x)[2:] | 6973ede4d19f28969d8c090de1a840e672777c96 | 111,860 |
def partition_array(nums, k):
"""
Partition given array (like quick sort)
:param nums: given array
:type nums: list[int]
:param k: partition value
:type k: int
:return: the partitioning index
:rtype: int
"""
left = 0
right = len(nums) - 1
while left <= right:
while left <= right and nums[left] < k:
left += 1
while left <= right and nums[right] >= k:
right -= 1
if left <= right:
nums[left], nums[right] = nums[right], nums[left]
left += 1
right -= 1
return left | dc4bc95a36d4f538e8ddc6ab8cf56c458d9bac28 | 111,862 |
def get_long_description(path, encoding='utf-8'):
"""
Prepare long desciption for setup.py from *readme* file.
"""
content = ""
with open(path, encoding=encoding) as f:
content = "\n" + f.read()
return content | 2bb183144b643de3871ecb28ced05f726e79001c | 111,868 |
def a_an(word):
"""Add 'a' or 'an' to a word depending on the first letter"""
if word[0] in ('a', 'e', 'i', 'o', 'u'):
return 'an ' + word
else:
return 'a ' + word | 404600617ef501e786ff19692ddf2fb27b35d9ea | 111,869 |
import time
def is_after(now, flag='2016-01-01 00:00:00'):
"""
判断输入的时间戳是否在规定时间之后
:param now: 时间戳
:param flag: 规定的时间
:return:
"""
# 将其转换为时间数组
timeArray = time.strptime(flag, "%Y-%m-%d %H:%M:%S")
# 转换为时间戳
timeStamp = int(time.mktime(timeArray))
return timeStamp <= int(now) / 1000 | cf2880112eca2aa93f041305cc029d64fe256ba6 | 111,871 |
import pickle
def ReadPickledData(filename):
"""
Read pickled data from disk and return object
@param filename: the location of the saved pickled data
"""
with open(filename, 'rb') as fd:
return pickle.load(fd) | e3e677c3cd3969c8b2d21b2a51be0905b715234d | 111,872 |
def read_fits(fr, record, verbose=False):
"""Read (fits) record and return data and header information.
Parameters
----------
fr: FITS hdu record
hdu
record: int
hdu number
verbose: bool, optional, default=False
Returns
-------
dat: FITS_rec
data
header: FITS header
header
names: list of strings
column names
"""
if verbose is True:
print('read_fits: Using HDU number (record) ' + str(record))
if verbose is True:
print('Reading \'columns.names\'')
names = fr.columns.names
if verbose is True:
print('Reading \'data\'')
data = fr.data
if verbose is True:
print('Reading \'header\'')
header = fr.header
return data, header, names | c7809b19ed45c7e2ed57831e233755c56bbe24d4 | 111,874 |
def resolve_url_endpoint(base_url: str, endpoint_url: str) -> str:
"""Add `endpoint_url` to the `base_url` and return the whole URL"""
if endpoint_url.startswith("/"):
endpoint_url = endpoint_url[1:]
if endpoint_url.startswith("https://") or endpoint_url.startswith("http://"):
if endpoint_url.startswith(base_url):
return endpoint_url
raise ValueError("`endpoint` referrs to unknown full url, that doesn't belong to the base url")
else:
return base_url + endpoint_url | fa006f7b0bd4aa623adb2a99e895dae97a87fd11 | 111,875 |
import math
def param_to_angle(ratio: float, param: float) -> float:
"""Returns circle angle from ellipse parameter for argument `angle`.
Args:
ratio: minor axis to major axis ratio as stored in the ELLIPSE entity
(always <= 1).
param: ellipse parameter between major axis and point on the ellipse
curve
Returns:
the circle angle in the range [0, 2π)
"""
return math.atan2(math.sin(param) * ratio, math.cos(param)) | b7d12842a843894bb489975da0bca2c3158c4083 | 111,877 |
import multiprocessing
import unittest
def requires_multiprocessing(test_item):
"""Decorator that skips test if run on single-core CPU.
Args:
test_item (callable): function or class to be decorated.
Returns:
callable: the decorated function.
"""
skip = multiprocessing.cpu_count() <= 1
reason = 'Multicore CPU not available, skipping test'
return unittest.skipIf(skip, reason)(test_item) | 4ceabe7536761f41747712e449928178bcc9f908 | 111,878 |
def format_timestamp(timestamp, weekday=False):
"""Converts a timestamp into a readable string."""
date_format = "%-m/%-d/%Y %-I:%M %p"
if weekday:
date_format += " (%A)"
return timestamp.strftime(date_format) | f06257e1a3db0448d651e657a7391a7b7348311c | 111,885 |
import logging
import requests
def start(API_KEY, INPUT_SAS, CALLBACK_DESTINATION):
"""
Creates a diagnose job on the Dolby.io server.
Inputs:
API_KEY: The Dolby.io media API key required for authenticating jobs.
INPUT_SAS: The SAS URL that points to the file for diagnosis.
CALLBACK_DESTINATION: The HTTP location the callback should point to.
Returns:
Post response
"""
logging.info("Diagnosing Azure Stored Media")
callback_destination = CALLBACK_DESTINATION + "?job=diagnose_success" + "&input_file=" + INPUT_SAS.split("?")[0].split("/")[-1]
#Submit a diagnose job
url = "https://api.dolby.com/media/diagnose"
body = {"input" : INPUT_SAS,
'on_complete': {'url': callback_destination, "headers": ["x-job-id"]}}
headers = {"x-api-key":API_KEY,"Content-Type": "application/json","Accept": "application/json", "x-job-id":"True"}
response = requests.post(url, json=body, headers=headers)
response.raise_for_status()
return response | efce505ef21170604bc26abfbc55db9c61021aad | 111,891 |
import re
def preprocess_data(data, lowercase=False, clean=False, remove_continuations=True):
"""
Preprocess the data according to parameters.
Divides the data into training, validation and test sets.
:param data: Pandas dataframe imported by `import_data`
Optional:
:param lowercase: Convert all text to lowercase
:param clean: Remove punctuation marks and non-verbal utterances
:param remove_continuations: Remove utterances with act tag "+"
:return: Pandas series with training, validation and test tags and utterances
"""
if lowercase:
data['Text'] = data['Text'].str.lower()
if clean:
# Remove punctuation
data['Text'] = [re.sub('[(|)|#|.]', '', line) for line in data['Text']]
# Remove dashes and words in angle brackets (e.g. "<Laughter>")
data['Text'] = [re.sub('\W-+\W|<\w+>', ' ', line) for line in data['Text']]
if remove_continuations:
data = data[~(data.DamslActTag == '+')]
# Remove extra spaces
data['Text'] = [re.sub('\s+', ' ', line) for line in data['Text']]
data = data[~(data.Text == ' ')] # Remove data rows that end up empty after cleaning
X, y = data.Text, data.DamslActTag
return X, y | a3343d961f91bab3986d14cc1cc0911a5a7c5cee | 111,894 |
def read_ip_config(filename):
"""Read networking configuration from file.
Format:
[server_id] [ip] [port]
0 172.31.40.143 50050
1 172.31.36.140 50050
2 172.31.47.147 50050
3 172.31.30.180 50050
Parameters
----------
filename : str
name of target configure file.
Returns
-------
dict
server namebook, e.g.,
{0:'172.31.40.143:50050',
1:'172.31.36.140:50050',
2:'172.31.47.147:50050',
3:'172.31.30.180:50050'}
"""
assert len(filename) > 0, 'filename cannot be empty.'
server_namebook = {}
try:
lines = [line.rstrip('\n') for line in open(filename)]
for line in lines:
ID, ip, port = line.split(' ')
server_namebook[int(ID)] = ip+':'+port
except:
print("Incorrect format IP configure file, the data format on each line should be: [server_id] [ip] [port]")
return server_namebook | 104fffbafa305b344f7ca55ee538259ff42f73a7 | 111,895 |
def absorp(t, d):
""" computes the absorption coefficient for given temperature T and density d
"""
return 1.984e24 * d / pow(t, 3.5) | 87ed6c193f0a8dbff187b21e1a5de3d04f703644 | 111,898 |
def load_capcodes_dict(filename):
"""Load capcodes to dictionary."""
capcodes = {}
try:
print("Loading data from '{}'".format(filename))
with open(filename, "r") as csv_file:
csv_list = [
[val.strip() for val in r.split(",")] for r in csv_file.readlines()
]
(_, *header), *data = csv_list
for row in data:
key, *values = row
capcodes[key] = {key: value for key, value in zip(header, values)}
print("{} records loaded".format(len(capcodes)))
except KeyError:
print(f"Could not parse file contents of: {filename}")
except OSError:
print(f"Could not open/read file: {filename}, ignore filter")
return capcodes | a0172a8bc58609604b3d457f088cf445367e41a4 | 111,901 |
from typing import List
def simple_split_tokenizer(value: str) -> List[str]:
"""Tokenize a string using a split on spaces."""
return value.split(" ") | a05b914ba90b7adf70b13a28b4e0d75261a659c3 | 111,907 |
def get_extension(path, format):
"""
Return the extension of the path, if any
"""
splits = path.split(".")
if len(splits) == 1:
# This means there's no two parts - so either no ., or nothing before
# or after the .. Easier to handle by just saying we found no extensions
return ""
return splits[-1] | 9622c759a0333f6104ce6bf9021424716449c025 | 111,908 |
def add_pyflow_args(parser):
"""
Given an argument parser from the argparse library,
adds a set of common pyflow parameters.
Parameters added:
run_mode
nCores
memMb
mailTo
dry
isContinue
pyflow_dir
schedulerArgList
"""
parser.add_argument('--pyflow_dir', type=str, default='.', required=False)
parser.add_argument('--nCores', type=int, default=128)
parser.add_argument('--memMb', type=int, default=4096)
parser.add_argument('--run_mode', type=str, default='sge')
parser.add_argument('--dry', default=False, action="store_true")
parser.add_argument('--isContinue', default='Auto', action="store_true")
parser.add_argument('--forceContinue', default=False, action="store_true")
parser.add_argument('--mailTo', type=str, default=None)
parser.add_argument('--startFromTasks', type=str, default=None)
parser.add_argument('--ignoreTasksAfter', type=str, default=None)
parser.add_argument('--resetTasks', type=str, default=None)
parser.add_argument('--schedulerArgList', type=str, default=None)
return parser | d4297a7045ca18f85a18813cb3d19b66bfb9c678 | 111,911 |
def _parse_planet_record_r1(row):
"""Parse a planet record in the table of `Borucki+ 2011a
<http://adsabs.harvard.edu/abs/2011ApJ...728..117B>`_.
Args:
row (str): The row in Borucki et al. 2011a.
Returns:
dict: A dict containing the planet parameters.
"""
koi = int(row[0:3])
planet_id = float(row[0:6])
r = float(row[21:25])*11.209
P = float(row[34:41])
rs = float(row[53:58])
rR = r*6371/rs/695700.
return {'koi': koi, 'planet_id': planet_id,
'r' : r,
'P' : P,
'rR' : rR,
} | 91b132573f6fac99f94101c5f2e36f244bea87f8 | 111,912 |
def stringifyOptionValue(value):
"""
Convert option value from in-memory representation to a suitable string.
In particular, boolean values are converted to '0' or '1'.
"""
if value is True:
return '1'
elif value is False:
return '0'
else:
return str(value) | 7b25ba7d31562e559640026d3dbbab2e763fbcf6 | 111,913 |
def get_host_cl_datatype(datatype):
"""Get corresponding OpenCL datatype: float -> cl_float"""
return "cl_" + datatype | 15f39704742fa9681f4bd0994c12a56885c994e1 | 111,917 |
def check_for_ticket_name_error(ticket_name):
"""
Returns any error message if the ticket name is not alphanumeric, if there is an
invalid space in the name or the name is too long for a ticket name. If
there is no errors it returns false.
:param ticket_name: a string for the ticket's name
:return: false if no error, else returns the error as a string message
"""
if not ticket_name.replace(' ', '').isalnum():
return "The name of the ticket has to be alphanumeric only"
if (ticket_name[0] == ' ') or (ticket_name[-1] == ' '):
return "The name of the ticket is only allowed spaces if it is not the first or last character"
if len(ticket_name) > 60:
return "The name of the ticket should be no longer than 60 characters"
return False | 38163e28e8acb5788b21ee832c037cc707574a83 | 111,923 |
def check_username(db, username):
"""
Checks if given username exists in the database.
:param username: username to check
:return: True if username is in the database, False otherwise
"""
if db(db.users.username == username).select():
return True
return False | 5d81aed7a43628429bbd98d80a6935bf71e9717c | 111,924 |
from typing import Callable
import click
def dcos_checkout_dir_argument(command: Callable[..., None],
) -> Callable[..., None]:
"""
An argument decorator for choosing a DC/OS checkout directory.
"""
function = click.argument(
'dcos_checkout_dir',
type=click.Path(exists=True),
envvar='DCOS_CHECKOUT_DIR',
default='.',
)(command) # type: Callable[..., None]
return function | c39193c23cae92344a3fc96f2f0cc8828c76f086 | 111,926 |
from typing import List
def snake_case_split(string: str) -> List[str]:
"""Splits a snake_case string."""
return string.split('_') | 3e405d211f3b8d4195a4ebb45ebe9a1c2ff298e7 | 111,930 |
def constants(input_x, rtn_dict=False):
"""
Dictionary storing commonly used constants
Parameters
----------
input_x (str): name of contant of interest
rtn_dict (boolean): retrun complete dictionary instead of single value
Returns
-------
value of constant (float) or dictionary on constants and values (dict)
Notes
-----
"""
con_dict = {
# Relative atomic mass of air (Just considering N2+O2)
'RMM_air': (.78*(2.*14.)+.22*(2.*16.)),
# Avogadro constant (Mol^-1)
'AVG': 6.0221413E23,
# Dobson unit - (molecules per meter squared)
'mol2DU': 2.69E20,
# Specific gas constant for dry air (J/(kg·K))
'Rdry': 287.058,
}
if rtn_dict:
return con_dict
else:
return con_dict[input_x] | 7b21ace11220dfe5558ecaa71fd18b3ea9a041fd | 111,932 |
def show_busy(*_):
"""Callback to indicate busy"""
return None | e38e7ef988794d72a91a45d2dad9a6a64d884a85 | 111,938 |
def AptGetServiceName(vm):
"""Returns the name of the mongodb service."""
return 'mongodb' | 94e4b68c4d3cd898ea639117b0aed6578592cdef | 111,939 |
def index_of(val, in_list):
"""Return index of value in in_list"""
try:
return in_list.index(val)
except ValueError:
return -1 | 55af78ebf975ae7f23c069e8ef792de5b6e248a8 | 111,940 |
def binary_to_decimal(binarybits):
""" Convert binary bits to decimal"""
# helps during list access
decimal = int(binarybits, 2)
return decimal | 9101582f1742f06a520c4bd6688043ccd5e5e053 | 111,945 |
def get_formatted_file_list(commit):
""" Formats and returns the file list in a given commit """
file_list = commit.stats.files.keys()
return ['\\{0}'.format(f.replace('/', '\\')) for f in file_list] | 54ae62cd4dce0502388be9fe29f7e8928afef593 | 111,946 |
def zero(l):
"""Return all-zero vector"""
return 0.0 | 1af1d4ea47f9069d41532dd2088b00fb740eb39e | 111,948 |
import base64
def encode_basic_auth(username, password):
"""Returns the content of an Auth header given a username and password"""
creds = username + ":" + password
return b"Basic " + base64.b64encode(bytes(creds, "utf-8")) | b788d93283f88dba6acc2e2c1f0c53b601bc75ca | 111,949 |
import time
def _Await(fn, timeout_secs):
"""Waits up to timeout_secs for fn() to return True."""
deadline = time.time() + timeout_secs
while time.time() < deadline:
if fn():
return True
time.sleep(0.2)
return False | 20258002747f1c3e2663a414c7f57e201ba9443e | 111,953 |
import torch
import math
def _taylor_approx(z: torch.Tensor) -> torch.Tensor:
"""Compute an approximation of the lambertw function at z.
Based on the polynomial expansion in https://arxiv.org/pdf/1003.1628.pdf. An empirical comparison of this polynomial
expansion against the winitzki approximation found that this one is better when z < -0.2649.
Args:
z: The input to the lambertw function.
Returns:
An estimated value of lambertw(z).
"""
p2 = 2 * (1. + math.e * z)
p = torch.sqrt(p2)
return -1. + p - p2 / 3. + 0.1527777777 * p2 * p | 6513c3fe4c1a4b1d2fa782dc51cc25d5f57dcdf4 | 111,955 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.