content stringlengths 39 14.9k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def predecessor_inorder(bst, node):
"""
Cases:
1. Node has a left subtree - predecessor is the maximum in the left subtree (LEFT, ROOT(node))
2. Node does not have a left subtree. Node lies in the right subtree of its predecessor (ROOT, RIGHT(node))
To find predecessor, keep going up till the node is a right child of its parent. Then parent node is the
predecessor.
"""
if node.left is not None:
return bst.maximum(node.left)
y = node.parent
while y is not None and y.left == node: # not right
node = y
y = y.parent
return y | bae594ae95cb81e89f4b219f4d4c13037ff68a4f | 92,747 |
def find_join(df, id, downstream_col="downstream", upstream_col="upstream"):
"""Find the joins for a given segment id in a joins table.
Parameters
----------
df : DataFrame
data frame containing the joins
id : any
id to lookup in upstream or downstream columns
downstream_col : str, optional (default "downstream")
name of downstream column
upstream_col : str, optional (default "upstream")
name of upstream column
Returns
-------
Joins that have the id as an upstream or downstream.
"""
return df.loc[(df[upstream_col] == id) | (df[downstream_col] == id)] | 504da29838f60979c441273a795d2e1c328b7988 | 92,749 |
def _get_docker_build_fuzzers_args_not_container(host_repo_path):
"""Returns arguments to the docker build arguments that are needed to use
|host_repo_path| when the host of the OSS-Fuzz builder container is not
another container."""
return ['-v', f'{host_repo_path}:{host_repo_path}'] | fd166cf8fef9a7c4b8c9f51a631f3a0cd5b1bb52 | 92,750 |
import re
def longest_matching(lines, regex):
"""Returns the index of the first : in the longest matching line
:param lines: list of strings to find the longest parameter in
:param regex: regex to identify the lines to compare
:returns: The highest index of the first : in the matching lines,
or None if no line matches the given regex
"""
longest = None
for line in lines:
if re.match(regex, line):
index = line.index(":")
if index > longest:
longest = index
return longest | 009171898a11a8a745a92d4c2cb76a5888046ce1 | 92,752 |
import typing
def split_slot_args(
slot_name: str,
) -> typing.Tuple[str, typing.Optional[typing.List[str]]]:
"""Split slot name and arguments out (slot,arg1,arg2,...)"""
# Check for arguments.
slot_args: typing.Optional[typing.List[str]] = None
# Slot name retains argument(s).
if "," in slot_name:
slot_name, *slot_args = slot_name.split(",")
return slot_name, slot_args | 88cb6f46c586655f5b59771419d1d8d611d9d777 | 92,753 |
import re
def StripHTML(input: str) -> str:
"""
Strip the HTML formatting from a string.
Parameters
----------
input : str
HTML formatted string.
Returns
-------
str
Input string without the HTML formatting.
"""
# Regex is hideous, but this gets the job done faster than an external
# library. This is also future-proof against any sort of HTML characters,
# such as   and &.
expression: re.Pattern[str] = re.compile(
"<.*?>|&([a-z0-9]+|#[0-9]{1,6}|#x[0-9a-f]{1,6});"
)
return re.sub(expression, "", input) | a54ea28351fa862360728780d7f19fb674f546da | 92,754 |
import math
def calc_percent_four_pi(omega):
"""
Calculate the solid angle as a percentage of a sphere
:param omega: The solid angle coverage in sr.
:return: The percentage of a sphere covered.
"""
return omega / 4 / math.pi | 03979d316f353ea417d09df47fe8042d8187a0ca | 92,760 |
def nc_variables_with_dimension(nc1, dimension):
"""Variables which use a given dimension.
Parameters
----------
nc1 - netCDF4.Dataset
dimension - str
Returns
out - list of str
"""
list_of_variables = []
for var_name in nc1.variables.keys():
var1 = nc1.variables[var_name]
if dimension in var1.dimensions:
list_of_variables.append(var_name)
return list_of_variables | 1a316ecc4c7be05b0e0e1bde38f40ec18af75ff7 | 92,761 |
def parse_shot(s):
"""
Parse input string to extract shot
"""
idx_shot = s.find("_")
if idx_shot < 0:
raise ValueError("No shot info in input")
sh = s[idx_shot+1:]
idx_Y = sh.find("Y")
if idx_Y < 0:
raise ValueError("No Y shot position in input")
idx_Z = sh.find("Z")
if idx_Z < 0:
raise ValueError("No Z shot position in input")
sh_Y = sh[idx_Y+1:idx_Z]
sh_Z = sh[idx_Z+1:]
return (float(sh_Y), float(sh_Z)) | ffc5b3710558c2b95222be838c50807e85c8a5b9 | 92,763 |
def recall(TP, FN):
"""Sensitivity, hit rate, recall, or true positive rate"""
return (TP) / (TP + FN) | c5c67a67ca09cbd2a72a11777b8891304d6aa92c | 92,764 |
def evaluate_cards_to_take(laid_card, cards_to_take=0):
"""
Function used to evaluate how many cards have to be taken as a punish.
:param laid_card: tuple with last played card
:param cards_to_take: integer value with earlier punishment
:return: integer value with punishment after card played
"""
if laid_card in [('hearts', 'K'), ('pikes', 'K')]:
cards_to_take += 5
value = laid_card[1]
if value in ['2', '3']:
cards_to_take += int(value)
return cards_to_take | 28b7c041959eef30e49e03ef07e76b8c4cf653f7 | 92,767 |
def within(x, y, size, px, py):
"""Returns true if a point (px, py) is within a range (x, y, x+size, y+size)."""
if(px >= x and px <= x + size):
if (py >= y and py <= y + size):
return True
return False | 978276d3d1111d6b7feafd092d2ae1948621a0fa | 92,768 |
def make_filename(parts_list):
"""
Assemble items in a list into a full path
Creates a full path file name from a list of parts such as might
have been created by full_path.split('/')
@param parts_list : list of str
@return: str
"""
name = ''
for part in parts_list:
name += '/'+part
return name | 6597a343de3a3c8bca484d9042ca9a5b27e8e847 | 92,771 |
def grow_slice(slc, size):
"""
Grow a slice object by 1 in each direction without overreaching the list.
Parameters
----------
slc: slice
slice object to grow
size: int
list length
Returns
-------
slc: slice
extended slice
"""
return slice(max(0, slc.start-1), min(size, slc.stop+1)) | f6d0e5f052d1b2dbf80cee92109fadb012d268c3 | 92,775 |
from typing import Dict
def stations() -> Dict:
"""Provide example configuration of radio stations."""
return {
"example_fm": {"url": "http://example.org/stream.mp3", "name": "Example FM"}
} | 0a071366540ac64c0973ef4c451dc1e909d39b9f | 92,780 |
def make_parameter_grid(estimator_name=None, named_ranges=None):
"""
Builds a sklearn.pipeline compatible dict of named ranges,
given an list of tuples, wherein each element is (param_name, param_values).
Here, a param_name is the name of an estimator's parameter, and
param_values are an iterable of values for that parameter.
Parameters
----------
estimator_name : str
Valid python identifier to name the current step of the pipeline.
Example: estimator_name='random_forest_clf'
named_ranges : list of tuples
List of tuples of size 2, wherein each element is (param_name, param_values).
Here, a param_name is the name of an estimator's parameter, and
param_values are an iterable of values for that parameter.
named_ranges = [('n_estimators', [50, 100, 500]),
('max_features', [10, 20, 50, 100]),
('min_samples_leaf', [3, 5, 10, 20])]
Returns
-------
param_grid : dict
An sklearn.pipeline compatible dict of named parameter ranges.
"""
if named_ranges is None or estimator_name in [None, '']:
return None
prepend_param_name = lambda string: '{}__{}'.format(estimator_name, string)
param_grid = dict()
for param_name, param_values in named_ranges:
param_grid[prepend_param_name(param_name)] = param_values
return param_grid | fefcc009e7fb029ccdfb64018909c6227d5a86e6 | 92,783 |
def generate_partitions(model, n_players):
"""
>>> from asym_model import *
>>> import utils
>>> m = AsymmetricModel(0.1, 0.25)
>>> utils.print_partition(m.partitions)
player 0
(1, None) [(1, 1, 0), (1, 1, 1)]
(0, None) [(0, 1, 0), (0, 1, 1)]
(None, None) [(0, 0, 0), (0, 0, 1), (1, 0, 0), (1, 0, 1)]
player 1
(0, 1) [(0, 1, 1)]
(1, 0) [(1, 0, 1)]
(0, 0) [(0, 0, 1)]
(1, 1) [(1, 1, 1)]
(None, None) [(0, 0, 0), (0, 1, 0), (1, 0, 0), (1, 1, 0)]
"""
partitions = [{} for i in range(n_players)]
for x in model.statespace:
obs = model.run(x)
for i in range(n_players):
if obs[i] in partitions[i]:
partitions[i][obs[i]] += [x]
else:
partitions[i][obs[i]] = [x]
return partitions | 20105d2ad73ebbe4e947fffe293c0e072d024410 | 92,785 |
def isMissionary(info, iReligion):
"""
Returns True if <info> is the Missionary for <iReligion>.
"""
return info.getReligionSpreads(iReligion) | 5331f459cb57f116443c1837d62df8143bb9a133 | 92,794 |
def ravel_index(ix, shape):
"""
"Flattens" the 2D input index into a single index on the flattened matrix, similar to np.ravel_multi_index.
Parameters
----------
ix: array or list of ints of shape (2,)
The 2D input index.
shape: list or tuple of ints of length 2
The shape of the corresponding matrix.
Returns
-------
int between 0 and shape[0]*shape[1]-1
The index on the flattened matrix corresponding to the 2D input index.
"""
return ix[0]*shape[1] + ix[1] | c9a042d8d05545299b6ce4738ccdf04d2d335852 | 92,797 |
def candidate_matches(o, sm_current, list_matches):
"""
checks if an article shares authors with all other articles in a list
:param o: examined article
:param sm_current: list of articles that share authors
:param list_matches: list of all combinations of two articles that share authors
:return: True iff o shares articles with all articles in sm_current
"""
for elem in sm_current:
if [elem, o] not in list_matches and [o, elem] not in list_matches:
return False
return True | 0821b7d50816e37db478789f4090f1a45f996057 | 92,803 |
def test_classifier(classifier, test_data, test_labels=None):
""" test_classifier function
Tests a given classifier on the given test data and outputs the results.
If test_labels is given, will calculate and print accuracy.
Args
----
classifier : LogisticRegression
classifier to be tested
test_data : np.array
matrix of size N x p where N is the number of data points and p
is the number of features
test_labels : np.array (default None)
the list of labels for the associated data set
Returns
-------
LogisticRegression
"""
guesses = classifier.predict(test_data)
if test_labels is not None:
right = 0.0
total = 0.0
for i in range(len(guesses)):
guess = guesses[i]
actual = test_labels[i]
if guess == actual:
right += 1.0
total += 1.0
print("Accuracy: " + str(right/total))
return guesses | 384e98e19167ce8d0cc36e26841c49d822de8a30 | 92,804 |
def find_peak_linearly(ls: list) -> int:
"""Finds the index of the first peak in ls.
If there's no peak or the list is empty, -1 is returned.
Time complexity: O(n), where len(ls) == n."""
for i in range(1, len(ls) - 1):
if ls[i - 1] <= ls[i] >= ls[i + 1]:
return i
return -1 | 7ab80b1067cb47687c830f28ac42d834eba9fa42 | 92,808 |
import linecache
import traceback
def _failureOldStyle(fail):
"""Pre-Failure manhole representation of exceptions.
For compatibility with manhole clients without the \"Failure\"
capability.
A dictionary with two members:
- \'traceback\' -- traceback.extract_tb output; a list of tuples
(filename, line number, function name, text) suitable for
feeding to traceback.format_list.
- \'exception\' -- a list of one or more strings, each
ending in a newline. (traceback.format_exception_only output)
"""
tb = []
for f in fail.frames:
# (filename, line number, function name, text)
tb.append((f[1], f[2], f[0], linecache.getline(f[1], f[2])))
return {
'traceback': tb,
'exception': traceback.format_exception_only(fail.type, fail.value)
} | 2046c5f61c2eb39ffddeeb5ec5a24374de69e197 | 92,809 |
def httpbin_secure_untrusted(monkeypatch, httpbin_secure):
"""
Like the `httpbin_secure` fixture, but without the
make-CA-trusted-by-default.
"""
monkeypatch.delenv('REQUESTS_CA_BUNDLE')
return httpbin_secure | dfd7d033ef25e12f40fb5c15b59084d26c6ff6d6 | 92,810 |
def join_keys(keys,sep=','):
"""
>>> join_keys(['key1'])
'key1'
>>> join_keys(['key1','key2'])
'key1,key2'
>>> join_keys(['key1','key2'],'.')
'key1.key2'
>>> join_keys(['key.1','key2'],'.')
"'key.1','key2'"
>>> join_keys(['key,1','key2'],'.')
"'key,1','key2'"
>>> join_keys([])
'DEFAULT'
"""
if not keys:
return 'DEFAULT'
mash = ''.join(keys)
if '.' in mash or ',' in mash:
quote = "'"
sep = quote + ',' + quote
return quote + sep.join(keys) + quote
return sep.join(keys) | 9863662e0d5a703bf3dc69daa20589ae0f1c37b1 | 92,812 |
def get_overlapping_miRNA_mRNA(conn, location: dict = {}):
"""
Takes a Chado database connection, a location, and returns a dictionary of all miRNA /
mRNA features that overlap the given location.
:param conn: The psycopg2 connection object for the Chado database.
:param location: Dictionary containing the featureloc fields (srcfeature_id, scaffold, fmin, and fmax)
:return: Dictionary containing FlyBase ID as key and a tuple of FlyBase ID, symbol, and feature type.
"""
# SQL query to look for overlapping transcript features.
miRNA_mRNA_query = """
select f.uniquename,
flybase.current_symbol(f.uniquename),
cvt.name
from featureloc_slice(%s, %s, %s) as fl join feature f on fl.feature_id=f.feature_id
join cvterm cvt on f.type_id=cvt.cvterm_id
where f.uniquename ~ '^FBtr\d+$'
and f.is_obsolete = false
and f.is_analysis = false
and cvt.name in ('miRNA','mRNA')
;
"""
cur = conn.cursor()
cur.execute(miRNA_mRNA_query, (location['srcfeature_id'], location['fmin'], location['fmax']))
# Return a dictionary containing all miRNA and mRNA features that overlap the given location.
# The dictionary key is the FBtr ID and the value is a tuple with FBtr ID, symbol, and feature type.
return {r[0]: r for r in cur} | 668b6bd3d514378aa5052bb734e61f5ea8a8797e | 92,813 |
import re
def camelToSnake(s):
"""
https://gist.github.com/jaytaylor/3660565
Is it ironic that this function is written in camel case, yet it
converts to snake case? hmm..
"""
_underscorer1 = re.compile(r'(.)([A-Z][a-z]+)')
_underscorer2 = re.compile('([a-z0-9])([A-Z])')
subbed = _underscorer1.sub(r'\1_\2', s)
return _underscorer2.sub(r'\1_\2', subbed).lower() | 9f468a1e537918e2610a9548f78bf46e7ee456ad | 92,815 |
def float_list_input(prompt="", stop_words=("stop", "end", "halt")):
"""Uses `input(prompt)` to request a list of float values from the user, retrying if the user
does not enter a valid value. The user can terminate the entry loop by entering one of the `stop_words`,
which are "stop", "end" and "halt" by default.
@param str prompt: The prompt to display.
@return float: The entered value.
"""
floats = []
while True:
try:
string = input(prompt).strip()
if not len(string):
continue
if string.lower() in stop_words:
return floats
floats.append(float(string))
except ValueError:
print("Invalid input. Please enter a float value.") | 2db0e4b8e4cdf62cc027eee20ea8ff89d0d02c81 | 92,821 |
def calc_no_within_ts(levels, lvl, varname):
"""calculate the position for a given level and variable inside the current timestep
Args:
levels: all levels
lvl: the level we are interested in
varname: the variable we are interested in
Returns:
no of steps
"""
steps = 0
for i in range(len(levels)):
if lvl == levels[i]['level']:
#print('level reached ', lvl)
steps += list(map(lambda x: x[0], levels[i]['vars'])).index(varname)
break
steps += len(levels[i]['vars'])
return steps | bec792632bf65c4bf30cd2e001ab9b6f45bb0d4d | 92,822 |
import math
def test1(x):
"""
>>> test1(3)
0.1411200080598672
>>> import dis
>>> dis.dis(test1)
23 0 LOAD_GLOBAL 0 (math)
3 LOAD_ATTR 1 (sin)
6 LOAD_FAST 0 (x)
9 CALL_FUNCTION 1 (1 positional, 0 keyword pair)
12 RETURN_VALUE
"""
return math.sin(x) | 5fd40e6657c8fb91ef462e6be49caa17325f35bf | 92,823 |
import re
def get_ip_and_type(line):
"""
This function will take a line from an open source file.
It extracts the IP relevant information (either an IP address or CIDR class)
It will return the IP value and the type of value('Address - cidr','Address - ipv4-addr')
:param A line containing an IP address or CIDR address class.
:type line: str
:returns: str, str
"""
ip_result = re.match("^[0-9.]+([/0-9]+)?",line)
if ip_result == None:
return (None,None)
ip = ip_result.group(0)
if ip.find("/") != -1:
return (ip,"Address - cidr")
return (ip,"Address - ipv4-addr") | 6165d699313f6b86161f139a87c799de130597c6 | 92,828 |
def rev_comp(seq):
"""Take the reverse compliment of a sequence
Parameters
----------
seq : str
The original sequence.
Returns
-------
new_seq : str
The reverse compliment.
"""
compliment = {"A": "T", "C": "G", "G": "C", "T": "A"}
new_seq = seq[::-1]
new_seq = "".join([compliment[i] for i in new_seq])
return new_seq | a1c809d49757dee05f3c2d8d156232bc29897be1 | 92,829 |
def parse_specific_gate_opts(strategy, fit_opts):
"""Parse the options from ``fit_opts`` which are relevant for ``strategy``.
"""
gate_opts = {
'tol': fit_opts['tol'],
'steps': fit_opts['steps'],
'init_simple_guess': fit_opts['init_simple_guess'],
'condition_tensors': fit_opts['condition_tensors'],
'condition_maintain_norms': fit_opts['condition_maintain_norms'],
}
if 'als' in strategy:
gate_opts['solver'] = fit_opts['als_solver']
gate_opts['dense'] = fit_opts['als_dense']
gate_opts['enforce_pos'] = fit_opts['als_enforce_pos']
gate_opts['pos_smudge'] = fit_opts['als_enforce_pos_smudge']
elif 'autodiff' in strategy:
gate_opts['autodiff_backend'] = fit_opts['autodiff_backend']
gate_opts['autodiff_optimizer'] = fit_opts['autodiff_optimizer']
return gate_opts | bd41f2cdf45bea9ad4a0ce26b03f226e76584716 | 92,830 |
def parse_actors(movie):
"""
Convert casting information to a dictionnary for dataframe.
Keeping only 3 actors with most facebook likes.
:param movie: movie dictionnary
:return: well-formated dictionnary with casting information
"""
sorted_actors = sorted(movie['cast_info'], key=lambda x:x['actor_fb_likes'], reverse=True)
top_k = 3
parsed_actors = {}
parsed_actors['total_cast_fb_likes'] = sum([actor['actor_fb_likes'] for actor in movie['cast_info']]) + movie['director_info']['director_fb_links']
for k, actor in enumerate(sorted_actors[:top_k]):
if k < len(sorted_actors):
parsed_actors['actor_{}_name'.format(k+1)] = actor['actor_name']
parsed_actors['actor_{}_fb_likes'.format(k+1)] = actor['actor_fb_likes']
else:
parsed_actors['actor_{}_name'.format(k+1)] = None
parsed_actors['actor_{}_fb_likes'.format(k+1)] = None
return parsed_actors | 90a0b037fad75caf578417cb64560af0bf5b629d | 92,831 |
def existeValor(matriz, valor):
"""Devuelve True si encuentra en la matriz el mismo valor que
el pasado como argumento. En caso contrario, devuelve False"""
res = False
for y in range(len(matriz)):
aux = matriz[y]
for x in range(len(aux)):
if matriz[y][x] == valor:
res = True
return res | 15bb5450bbfb87351502be2112262868d2e927e3 | 92,834 |
from typing import List
import re
def find_variables_to_interpolate(string) -> List[str]:
"""
Returns a list of variables in the string that need to be interpolated.
"""
var_finder_re = r"\$\(inputs\.params\.(?P<var>\w+)\)"
return re.findall(var_finder_re, string, re.UNICODE) | f219c40ad7d2bd6b5268a8db650d2052aba87ac9 | 92,839 |
import torch
def _channelwise_sum(x: torch.Tensor):
"""Sum-reduce all dimensions of a tensor except dimension 1 (C)"""
reduce_dims = tuple([0] + list(range(x.dim()))[2:]) # = (0, 2, 3, ...)
return x.sum(dim=reduce_dims) | 49552e3acb0730a591fcb7b5548daf1e94bf05b5 | 92,845 |
def isAscii(b):
"""
Check if a given hex byte is ascii or not
Argument : the byte
Returns : Boolean
"""
return b == 0x0a or b == 0x0d or (b >= 0x20 and b <= 0x7e) | 7b6b84bd0ca36582b98de0dc2c4ebd1c31144f3b | 92,847 |
def get_bbox_corners(center_bbox_repr):
"""
Function converts bounding box representation from
(center_x, center_y, height, width) to (x1, y1, x2, y2)
Args:
center_box_repr(tensor) - shape (..., 4)
returns:
tensor - shape (..., 4)
"""
if center_bbox_repr.shape[-1] != 4:
raise ValueError("The center representation of bounding box"
f"must have 4 values. Received {center_bbox_repr.shape[-1]}")
center_x = center_bbox_repr[...,0]
center_y = center_bbox_repr[...,1]
half_height = center_bbox_repr[...,2] / 2
half_width = center_bbox_repr[...,3] / 2
#get new placeholder tensor on same device of same dtype
box_preds = center_bbox_repr.new_empty(center_bbox_repr.shape)
box_preds[...,0] = center_x - half_height
box_preds[...,1] = center_y - half_width
box_preds[...,2] = center_x + half_height
box_preds[...,3] = center_y + half_width
return box_preds | a35917e6c2a6ea95ced458b89c3f4e4b2eab3af4 | 92,848 |
def kendallTauDistance(order1, order2):
""" Returns the Kendall's tau distance between two orders.
:param order1: The first order.
:type order1: tuple
:param order2: The second order.
:type order2: tuple
:return: The Kendall's tau distance between the two orders.
:rtype: int
"""
if len(order1) != len(order2):
raise ValueError("Rankings must have the same length to compute their Kendall's tau distance")
res = 0
norm = 0
for j1 in range(len(order1)):
for j2 in range(j1 + 1, len(order1)):
if (order2.index(order1[j1]) > order2.index(order1[j2])):
res += 1
norm += 1
return res / norm | 99e8b3b7160c6857200927b3c2e8342258d56fbf | 92,850 |
import torch
def offsets_from_counts(counts):
"""Creates a tensor of offsets from the given tensor of counts
Parameters
----------
counts : torch.Tensor
1-d tensor representing the counts in a ragged array
Returns
-------
torch.Tensor
A 1-d tensor representing the offsets in a ragged array.
Its length is one plus the length of the input `counts` array.
"""
if isinstance(counts, torch.Tensor):
device = counts.device
else:
device = None
counts = torch.as_tensor(counts, dtype=torch.int64, device=device)
offsets = counts.new_zeros(counts.shape[0] + 1)
torch.cumsum(counts, 0, out=offsets[1:])
return offsets | 667d277afa76d3b746b5409888d25057f790712f | 92,852 |
def z_step(step):
"""Takes in a step number and returns the redshift of the simulation.
This assumes 500 steps from z=200 to z=0, with spacing in scale factor.
"""
a_in = 1./(1.+200)
a_fin = 1./(1.)
delta = (a_fin-a_in)/500.
a = a_in + delta*(step+1.)
z = 1./a -1
return z | 1c72d2f2e52e933378fe540cfac018cb34905b3a | 92,854 |
import gzip
def bin_open(fname: str):
"""
Returns a file descriptor for a plain text or gzipped file, binary read mode
for subprocess interaction.
:param fname: The filename to open.
:return: File descriptor in binary read mode.
"""
if fname.endswith(".gz"):
return gzip.open(fname, "rb")
return open(fname, "rb") | 2946206ae779d68fbb91c883e1a5248bca90e5e6 | 92,857 |
import socket
import fcntl
import struct
def get_ip_address(ifname):
"""
Returns the IPv4 address of the requested interface (thanks Martin Konecny, https://stackoverflow.com/a/24196955)
:param string interface: The interface to get the IPv4 address of.
:returns: The interface's IPv4 address.
"""
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa(
fcntl.ioctl(s.fileno(), 0x8915, struct.pack("256s", bytes(ifname[:15], "utf-8")))[20:24]
) | daf94b7816b9e6d45f40948a357be49c6c0a7234 | 92,860 |
def associate_successors(graph, node=""):
"""
Returns a dictionary with outer key 'successors' securing a value list
of dictionaries associating the source, target and edge_attribute
with the corresponding Vertex.name strings for each successor to the
passed node.
"""
return {
"successors": [
{
"source": node,
"target": succ,
"edge_attribute": graph.succ[node][succ]["edge_attribute"],
}
for succ in graph.succ[node]
]
} | a7029ea79f0b8467b415d35b4494a149467b163f | 92,863 |
def _append_project(issue_ids, project_name):
"""Append project name to convert <id> to <project>:<id> format."""
result = []
id_list = issue_ids.split()
for id_str in id_list:
if ':' in id_str:
result.append(id_str)
# '-' means this issue is being removed
elif id_str.startswith('-'):
result.append('-%s:%s' % (project_name, id_str[1:]))
else:
result.append('%s:%s' % (project_name, id_str))
return result | 97c0def059b2db53b02d8fdb09240866a9112a17 | 92,867 |
def clean_boolean(item=None):
"""Parse string boolean values."""
if item is not None:
if isinstance(item, str):
return item.lower() == 'true'
elif isinstance(item, bool):
return item
return False | f3d8dfebd6028f3f58ea243a1972be792c70ef4a | 92,871 |
def et_adjustment(ws):
"""
Given an array of weather stations, returns an array of 12 decimals, one
for the ET Adjustment of each month. We average the `etadj` of all weather
stations, and use that value for all months.
"""
avg_etadj = float(sum([w.etadj for w in ws])) / len(ws)
return [avg_etadj] * 12 | e3e6376e3b7dbcd449fa72ef153937b1040e332a | 92,872 |
def compute_knee_frequency(knee, exponent):
"""Compute the frequency value of the knee given the aperiodic parameter values.
Parameters
----------
knee : float
Knee parameter value.
exponent : float
Exponent parameter value.
Returns
-------
float
Frequency value, in Hz, of the knee occurs.
"""
return knee ** (1./exponent) | 48805be3ead3e21974267746c09ebb938fad1cd2 | 92,874 |
def speed_change(sound, speed=0.0):
""" Change speed of audio
Parameters
----------
sound : AudioSegment object
speed : int
new speed
Returns
-------
AudioSegment object with altered frame rate
"""
# Manually override the frame_rate. This tells the computer how many
# samples to play per second
speed = 1.0 + speed / 10 # Tune speed value easier to use
sound_with_altered_frame_rate = \
sound._spawn(sound.raw_data,
overrides={"frame_rate": int(sound.frame_rate * speed)})
# convert the sound with altered frame rate to a standard frame rate
# so that regular playback programs will work right. They often only
# know how to play audio at standard frame rate (like 44.1k)
return sound_with_altered_frame_rate.set_frame_rate(sound.frame_rate) | 49e0ee88f1c568dcf72721cfc781800122cca200 | 92,875 |
def _should_update_date(verified_mode):
""" Returns whether or not the verified mode should be updated. """
return not(verified_mode is None or verified_mode.expiration_datetime_is_explicit) | c2ad192fb5bb8da27fd754466660cb819ac141a6 | 92,876 |
def get_column_names(fy: int, q: int) -> list[str]:
"""Get the column names for the report."""
cols = [f"{fy-1}-Actual-Full Year"]
# Add YTD totals if not Q4
if q != 4 or fy <= 2010:
cols += [
f"{fy}-Target Budget-YTD",
f"{fy}-Actual-YTD",
"",
]
# Add full year totals
cols += [
f"{fy}-Adopted Budget-Full Year",
f"{fy}-Target Budget-Full Year",
f"{fy}-Current Projection-Full Year",
"",
"",
]
return cols | e68ef1bf867a70d76354f83b37a0b7647f46d6dd | 92,877 |
def glyphicon(icon):
""" Shorthand for bootstrap glyphicon markup
:param icon: the icon to present, gets appended to glyphicon-{{ icon }}
:return: the template context
"""
return {'icon': icon} | 70670714c3fd15374088a98b888844f19b7c3ae5 | 92,881 |
import mpmath
def mean(loc=0, scale=1):
"""
Mean of the logistic distribution.
"""
with mpmath.extradps(5):
return mpmath.mpf(loc) | 55e82350a53fe453a2eb9f9da5252c41d605a470 | 92,883 |
def vec_psd_dim(dim):
"""Compute vectorized dimension of dim x dim matrix."""
return int(dim * (dim + 1) / 2) | 561f00592ec0bce32048ddfcdddb46dd541d4918 | 92,888 |
def bytes_to_num(bytes_, signed_=True, big_=True):
"""To get the int format of a given bytes
(used for data_pro)
:param bytes_: A given bytes
:type bytes_: bytes
:param signed_: True for signed input
:type signed_: bool
:param big_: Same as the "high_head" in the function 'num_to_bytes'
:type big_: bool
:returns: Int for bytes
:rtype: int
"""
if not signed_:
if big_:
return int.from_bytes(bytes_, byteorder='big')
else:
return int.from_bytes(bytes_, byteorder='little')
else:
if big_:
return int.from_bytes(bytes_, byteorder='big', signed=True)
else:
return int.from_bytes(bytes_, byteorder='little', signed=True) | b4123151c110753f16f07af9b8037b66394cd075 | 92,889 |
def make_xerr(star):
"""
returns a vector of xerr = [parallax_error, pmra_error, pmdec_error]
"""
err_names = ['parallax_error', 'pmra_error', 'pmdec_error']
return star.loc[err_names].values.astype('f') | 3e15624fc16bc7706d74dae66c5963029acfcda6 | 92,893 |
def uint8array_to_number(uint8array):
""" Convert array of uint8_t into a single Python number """
return sum([x << (i*8) for i, x in enumerate(uint8array)]) | 2a7ff0ef92c4219b290558c05a4d47598e8ce630 | 92,896 |
def check_if_two_hsp_ranges_overlap(lists):
"""Takes two lists which contain the start and stop positions of two HSPs,
and returns True if they overlap.
Note: This assumes that the SearchIO coordinates are provided using Python
string slicing conventions (this is suggested to be the case here:
https://biopython.org/DIST/docs/api/Bio.SearchIO-module.html).
"""
x = set(range(lists[0][0], lists[0][1]))
y = set(range(lists[1][0], lists[1][1]))
intersect = x.intersection(y)
overlap = False
if len(list(intersect)) > 0:
overlap = True
return overlap | 2f1115c1f14b39f4074cd285d01d00c66059dd67 | 92,899 |
def escape_special_characters(s):
"""Add a backslash before characters that have special meanings in
regular expressions. Python does not handle backslashes in regular
expressions or substitution text so they must be escaped before
processing."""
special_chars = r'\\'
new_string = ''
for c in s:
if c in special_chars:
new_string += '\\'
new_string += c
return new_string | ce01a0880e6b4a46d489b8b8201dd4563a905661 | 92,900 |
def generate_huffman_codes(node, prefix):
"""
Generate Huffman codes for each character by traversing the tree
and assigning '0' to a move towards left child node and '1' to right child node.
Parameters:
node (class: Node): root node of Huffman Tree
prefix (str): starting prefix
Returns:
huffman_codes (dictionary): map of characters to generated Huffman codes
"""
huffman_codes = {}
def generate_codes(node, prefix=""):
if node is None:
return
if node.right_child is None and node.left_child is None:
huffman_codes[node.char] = prefix
generate_codes(node.left_child, prefix + '0')
generate_codes(node.right_child, prefix + '1')
generate_codes(node)
return huffman_codes | eeb8de5e1d191a8edd6c3af729516092b5dd662b | 92,903 |
def _is_junk(line, t_strs):
"""Ignore empty line, line with blast info, or whitespace line"""
# empty or white space
if not line or not line.strip():
return True
# blast info line
for t_str in t_strs:
if line.startswith("# %s" % t_str):
return True
return False | 639e4a4cb45430f0980fd19ccb6a544eddb628ea | 92,904 |
import torch
def pdist(A, B, sqrt=True):
""" Pairwise Euclidean distance
>>> import torch; A = torch.randn(2,4); B = torch.randn(15,4); pdist(A, B).size()
torch.Size([2, 15])
"""
A_squared = A.pow(2).sum(1).unsqueeze(1)
B_squared = B.pow(2).sum(1).unsqueeze(0)
AB = torch.mm(A, B.t()) # A @ B.t()
A_B_squared = A_squared + B_squared - 2 * AB
A_B_squared.clamp_(min=1e-16) # min=0 will cause Nan grad
return torch.sqrt( A_B_squared ) if sqrt else A_B_squared | 4174deb906c84fe307c825110572c32e23cce4df | 92,918 |
from typing import Any
from typing import List
def rec_load(obj: Any, mod: List[str], count: int=0) -> Any:
"""
Load recursively JavaPackages and JavaClasses residing inside the JVM:
python world -> gateway -> JVM -> my_scala_packages
There is no guarantee your package exist though!
See the example below for the syntax.
Parameters
----------
obj : SparkContext instance or Any
Initial call must take a SparkContext (`pyspark.context.SparkContext`).
Then obj will represent subsequently:
- `py4j.java_gateway.JavaGateway`
- `py4j.java_gateway.JVMView`
- `py4j.java_gateway.JavaPackage`
- `py4j.java_gateway.JavaPackage`
- ...
mod : str
List of packages from the SparkContext to your class in the JVM
count : int, optional
Counter used for the recursion. Must be 0 at the initial call.
Returns
----------
obj : Any
obj is an instance of a JVM object and will represent subsequently:
- `py4j.java_gateway.JavaGateway`
- `py4j.java_gateway.JVMView`
- `py4j.java_gateway.JavaPackage`
- `py4j.java_gateway.JavaPackage`
- ...
Example
----------
>>> pysc = get_spark_context()
>>> mod = "_gateway.jvm.com.astrolabsoftware.spark3d.geometryObjects"
>>> jvm_obj = rec_load(pysc, mod.split("."))
>>> print(type(jvm_obj))
<class 'py4j.java_gateway.JavaPackage'>
"""
if count == len(mod):
return obj
else:
return rec_load(getattr(obj, mod[count]), mod, count+1) | 433a310c02393c04d943921bb85486e497fa9579 | 92,927 |
def split_str_chunks(s, n):
"""Produce `n`-character chunks from `s`."""
out_list = []
for start in range(0, len(s), n):
out_list.append(s[start:start+n])
return out_list | 0698340b4369b280cdf48a7645b536f562aabacb | 92,928 |
import json
def _load_deploy_event_result(line):
"""Helper to extract a data dictionary from parsed command output line.
Shub tool prints the lines as python dicts, so quotes should be properly
replaced with double quotes to extract data from json dict.
"""
return json.loads(line.replace("'", '"')) | 2aad7d0a5dd250ccaa0de1c03b8b92b7f0cb1355 | 92,931 |
def tokens_from_module(module):
"""
Helper method; takes a module and returns a list of all token classes
specified in module.__all__.
Useful when custom tokens are defined in single module.
"""
return [getattr(module, name) for name in module.__all__] | da1c53f9f1c56784c81000f98a3be9e675956e2a | 92,934 |
def select_dict(subset, superset):
"""
Selects a subset of entries from the superset
:return: the subset as a dict
"""
res = {}
for key in subset:
if key in superset:
res[key] = superset[key]
return res | 4bb033fd63deffafd52a60539e8492d6341652c1 | 92,935 |
def prepare_opts(opts):
"""
>>> opts = {'<module>': 'Blog'}
>>> prepare_opts(opts)
{'<module>': 'Blog', '--model': 'Blog'}
"""
rv = opts.copy()
rv.update({
'--model': opts.get('<module>')
})
return rv | c27c0dd2f091e70b1dbb3d69c29c806d8cfa4fac | 92,937 |
def forcedEccentricity(binary_sys,r):
"""
Given a binary class object and an array of radial points in the disk,
compute the forced eccentricity defined by Moriwaki et al. 2004
eqn 9 to first order. Extra factor of 2 to give e_pumped instead
of e_forced. Note: This only applies when e_binary != 0 and when
m2/(m1 + m2) != 0.5 (i.e. only applies for eccentric, non-equal mass
binary)
Parameters
----------
binary_sys : binary.Binary class object
r : array
array of radii in AU
Returns
-------
e_forced : array
array of len(r)
"""
mu = binary_sys.m2/(binary_sys.m1 + binary_sys.m2)
return (5./2.)*(1.0 - 2.0*mu)*binary_sys.e*binary_sys.a/r | 415abc88c18c80c7d48f2260bcd660da4cb620ba | 92,940 |
def convert_specific_keys(string: str):
"""
Convert specific keys to demisto standard
Args:
string: the text to transform
Returns:
A Demisto output standard string
"""
if string == 'OsName':
return 'OSName'
if string == 'OsNumber':
return 'OSNumber'
if string == 'Ram total':
return 'RamTotal'
if string == 'AssetDataId':
return 'AssetDataID'
if string == 'AssetClassId':
return 'AssetClassID'
if string == 'AssetStatusId':
return 'AssetStatusID'
if string == 'AssetTypeId':
return 'AssetTypeID'
if string == 'MappedId':
return 'MappedID'
if string == 'OwnerId':
return 'OwnerID'
if string == 'HdQueueId':
return 'HdQueueID'
if string == 'Ip':
return 'IP'
return string | ef36d5edb325db02d96a0c80f27df6cc0e109c12 | 92,941 |
def prompt(question):
"""
ask a question (e.g 'Are you sure you want to do this (Y or N)? >')
return the answer
"""
try:
print(question)
foo = input()
return foo
except KeyboardInterrupt as e:
raise e
except:
return | 359d4dda9690e5363fd51db21b9b432bf5b6fa04 | 92,949 |
def swift_module_name(label):
"""Returns a module name for the given label."""
prefix = label.package.lstrip("//").replace("/", "_").replace("-", "_")
suffix = label.name.replace("-", "_")
if prefix:
return (prefix + "_" + suffix)
else:
return suffix | e9ac44f41e6d4009fac29924eec3ef6a64d902cd | 92,951 |
def get_largest_logo(logos):
"""
Given a list of logos, this one finds the largest in terms of perimeter
:param logos: List of logos
:return: Largest logo or None
"""
if len(logos) >= 1:
logo = max(logos, key=lambda x: int(x.get('height', 0)) + int(x.get('width', 0))).get('value')
return logo | 69b3beab44cb076eb1856aa11d63af7d3c189561 | 92,952 |
def add_wgi(image):
"""
Calculates the Water Adjusted Green Index (WGI)
"""
wgi = image.expression(
'NDWI * GI', {
'NDWI': image.select('NDWI'),
'GI': image.select('GCVI'),
}
).rename('WGI')
return image.addBands(wgi) | 6b7dbc8825fe028181bcfacad6d6619febeee432 | 92,954 |
def multiply(a,b):
"""
Args:
+ a: a list, like [a1,a2]
+ b: a list, like [b1,b2]
Returns:
a1b1+a2b2
"""
#a,b两个列表的数据一一对应相乘之后求和
sum_ab=0.0
for i in range(len(a)):
temp=a[i]*b[i]
sum_ab+=temp
return sum_ab | 4200fc834b5bd704207bed1b59dce85c2184efe1 | 92,960 |
def calc_num_correlations(num_timeseries):
"""Calculates number of correlations."""
return (num_timeseries * (num_timeseries - 1)) / 2 | 3a72dea96248a14dd85fa33280d3c537692a2010 | 92,961 |
import secrets
def create_random_key(nbytes):
"""Create a URL safe secret token."""
return secrets.token_urlsafe(nbytes) | c8e60319f92234012875cf582ae5ef446ad8e24d | 92,966 |
def check_tag(s, l):
"""Checks if any string in the list l is contained in string s."""
for i in l:
if s in i:
return True
return False | 87d151bd5ee6355770f7e64f9ee78544f75d4bf2 | 92,968 |
def style_red_green_fg(value) -> str:
"""Return a green font color if the given value is greater or equal than 0,
else return a red font."""
return "color: #ff0000aa;" if value < 0 else "color: #00ff00aa;" | d253d880c4724e010d22801f513306762865361a | 92,974 |
def generate_memory_list_view(expected_memory) -> str:
"""
Convert expected memory's bytestring to a list of Cells (in string)
:param expected_memory: "A00023"
:return: [Cell("A0"), Cell("00"), Cell("23")]
"""
list_view = "["
for i in range(0, len(expected_memory), 2):
list_view += f"Cell(\"{expected_memory[i] + expected_memory[i + 1]}\"),"
list_view += "]"
return list_view | 50a064429022afabc7936a25abef054f07073823 | 92,978 |
def impedance_miki_model(f, sigma):
"""
impedance_miki_model(f, sigma)
Caculate the surface impedance according to Miki Model.
Parameters
----------
f : 1-D array of frequencies in [Hz].
sigma : double flow resistivity in [Ns/m^4].
Returns
-------
Zs : 1-D array of normalized ground impedance, Zs/rhoc.
"""
Zs = (
1
+ 5.51 * (1000 * f / sigma) ** (-0.632)
- 8.42j * (1000 * f / sigma) ** (-0.632)
)
return Zs | 9332411227d6fed65ce082f71dc13f09b20e060f | 92,980 |
def multiples_of_3_or_5(limit: int) -> int:
"""Computes the sum of all the multiples of 3 or 5 below the given limit,
using tail recursion.
:param limit: Limit of the values to sum (exclusive).
:return: Sum of all the multiples of 3 or 5 below the given limit.
"""
def loop(acc, num):
if num < 1:
return acc
if num % 3 == 0 or num % 5 == 0:
return loop(acc + num, num - 1)
return loop(acc, num - 1)
return loop(0, limit - 1) | 1130a5b1a8a3660b0b8a282776f8a183727a7d7d | 92,982 |
def get_orig_function(f):
"""Make use of the __wrapped__ attribute to find the original function
of a decorated function."""
try:
while True:
f = f.__wrapped__
except AttributeError:
return f | 675a67ec90b683b68918c023317a3c5a3c7e0a6d | 92,988 |
def draft_app(app):
"""Drafts app fixture."""
return app | a74aa874f87c50811b4f7172e6838920e22bc3e9 | 92,992 |
import re
def extract_params(text):
"""
Function used to extract parameters from a function docstring.
Parameters
----------
text : str
Parameters section of a function docstring. For example what you are
reading right now is within the parameters section of the extract_params
docstring.
Returns
-------
params : dict
Dictionary containing all parameters found and their descriptions,
datatypes, and whether they are optional or not.
"""
# this will match a parameter name, type, and description
param_re = re.compile(r"(?s)\w+ : .*?(?=\w+ :)")
# the above will not find the final parameter, this will
param_re2 = re.compile(r"(?s)\w+ : .*")
# initialise parameter dictionary
params = {}
while True:
new_param = param_re.search(text)
# print(new_param)
if new_param is None:
# if no parameters have been found, try with modified regex
new_param = param_re2.search(text)
if new_param is None:
# if we still cannot find any new parameters, break
break
# extract text
new_param = new_param.group()
# split by newline character
new_param = new_param.split("\n")
# first line is param name and description, get both
name, dtype = new_param[0].split(":")
# formatting description
desc = "\n".join(new_param[1:])
desc = re.sub(r"\s+", " ", desc)
# check if a parameter is optional
if "optional" in dtype:
dtype = dtype.split(",")[0]
optional = True
else:
optional = False
# add parameter details to params dictionary
params[name.strip()] = {"dtype": dtype.strip(), "description": desc.strip(), "optional": optional}
text = text.replace("\n".join(new_param), "")
return params | 87657045f966a313e8347b62e8822a4a806f1d88 | 92,994 |
def get_sizes(input_size, hidden_size, output_size, layer_idx, nlayers):
"""Get the input and hidden size for a layer"""
if nlayers == 1:
return input_size, output_size
if layer_idx == 0:
return input_size, hidden_size
if layer_idx == nlayers - 1:
return hidden_size, output_size
return hidden_size, hidden_size | bc48705407425f6df999a489e1f719d73add8869 | 93,002 |
def gen_allowed_obj(allowed):
"""Generate dict in compliance with the ActionAllowed JSON Schema."""
if allowed:
return {'allowed': True}
return {'allowed': False} | 7f5f2211d7acd35b9cdaf725a6e0428403943f50 | 93,003 |
def _color(val):
"""
Parse string as hex color string. The scim CSV does some dumb things like
truncate, convert to float, and remove #, so fix all that crap.
>>> _color('#ff0000')
'#ff0000'
>>> _color('ff0000')
'#ff0000'
>>> _color('668800.00')
'#668800'
>>> _color('345')
'#000345'
"""
if val.startswith('#'):
return val # already ok
if val.endswith('.00'):
val = val[:-3]
while len(val) < 6:
val = '0' + val
return '#' + val | 1172c4d8bc740cf547fc4c1fcaeed82bf0edcdad | 93,004 |
def pgdrive_heading(heading: float) -> float:
"""
Transform the heading in Panda3d to PGDrive
:param heading: float, heading in panda3d (degree)
:return: heading (degree)
"""
return -heading | 32251ebf544687c090c6b613307b5a37015be709 | 93,008 |
def check_editable(role, authz):
"""Check if a role can be edited by the current user."""
if authz.is_admin:
return True
return role.id == authz.id | b0b7765f5760254e5a30c182b19b310245f8f473 | 93,009 |
def find_equivalent(elem, find_list):
"""Returns the element from the list that is equal to the one passed in.
For remove operations and what not, the exact object may need to be
provided. This method will find the functionally equivalent element
from the list.
:param elem: The original element.
:param find_list: The list to search through.
:returns: An element from the that is functionally equivalent (based on
__eq__). If it does not exist, None is returned.
"""
for find_elem in find_list:
if find_elem == elem:
return find_elem
return None | 5ac27f8474030edcd7ff3d076ec1e5b50ab45e40 | 93,014 |
def user_controls_client(user, client):
"""
Checks whether the given user can control the given client instance.
@param user: The user to check against.
@param client: The client to be checked.
@return: True if the given user controls the given client, False otherwise.
"""
return user.is_admin or user.id == client.user_id | 8daf33407e31b4d53ae265c106d81fa9f0e368a5 | 93,017 |
def _is_nth_child_of_kind(stack, allowed_nums, kind):
"""
Checks if the stack contains a cursor which is of the given kind and the
stack also has a child of this element which number is in the allowed_nums
list.
:param stack: The stack holding a tuple holding the parent cursors
and the child number.
:param allowed_nums: List/iterator of child numbers allowed.
:param kind: The kind of the parent element.
:return: Number of matches.
"""
is_kind_child = False
count = 0
for elem, child_num in stack:
if is_kind_child and child_num in allowed_nums:
count += 1
if elem.kind == kind:
is_kind_child = True
else:
is_kind_child = False
return count | b03d28f7a0dde19d8bb585186944ceb5a3a9da7a | 93,021 |
def any_true (seq) :
"""Returns first true element of `seq`, otherwise returns False."""
for e in seq :
if e :
return e
else :
return False | cf26b76561a86d40975de8e5666e025646af7d23 | 93,022 |
def extract_station_name(filepath: str) -> str:
"""Find the station name (e.g. station_3500) inside a filepath."""
return filepath[filepath.index("station_"):filepath.index("station_") + 12] | c6a7797fd97c9db4a6c0569186558a1ad8b895b6 | 93,025 |
import math
def noise_from_coordinates(point_x, point_y, random_seed=6000):
"""
This function modifies obtains a simple noise based on sine for a given point
Parameters:
point_x (float): X coordinate of a point
point_y (float): Y coordinate of a point
random_seed (float): seed used to generate the values. Default to 6000
Returns:
random_value (float): The value obtained for specified coordinates
"""
# Get random Y from sine curve
random_value = math.sin(point_x*100 + point_y*random_seed)*random_seed
# Get only the decimal part to make it go from 0 to 1
random_value = math.modf(random_value)[0]
# Return random Y value
return random_value | 13ae22be0fbc15a6b71193e5b7360246a99c2461 | 93,031 |
import functools
import operator
def sequence(grammar, n):
"""
Creates a grammar element that matches exactly N of the input
grammar.
"""
return functools.reduce(operator.add, [grammar] * n) | d374e9683e3e29bbacde953f8d3fe5b390a90305 | 93,032 |
def changenonetostr(s,text='None'):
"""Convert Nonetype to string 'None' for grouping
"""
if not s:
return text
else:
return s | b631532a933dd306da4d481ad0fb0ae1566c1d88 | 93,033 |
def clean_word(word: str) -> str:
"""
Clean a word for counting.
"""
return word.strip(",.:()&-").lower() | 1eb00e465b990f469b4c0427434a45f20b133ee4 | 93,039 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.