content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def watch_pyramid_from_above(characters):
"""String pyramid implementation view from above."""
if not characters:
return characters
str_len = len(characters)
count = str_len + (str_len - 1)
container = []
max_count = count
min_count = 0
if count == 1:
print(characters)
for num in range(count):
char = characters[0] * count
container.append(list(char))
max_count -= 1
min_count += 1
count -= 2
while count > 0:
for ch in characters[1:]:
for alist in container[min_count: max_count]:
alist[min_count: max_count] = ch * count
max_count -= 1
min_count += 1
count -= 2
output = ''
for row in container:
joined = "".join(row)
output += joined + '\n'
return output.rstrip('\n') | e6395a3365db2e1e5bfcb1609e7b20ce8b5afd93 | 33,992 |
def emft(self, **kwargs):
"""Summarizes electromagnetic forces and torques.
APDL Command: EMFT
Notes
-----
Use this command to summarize electromagnetic force and torque in both
static electric and magnetic problems. To use this command, select the
nodes in the region of interest and make sure that all elements are
selected. If RSYS = 0, the force is reported in the global Cartesian
coordinate system. If RSYS ≠ 0, force is reported in the specified
coordinate system. However, for torque, if RSYS ≠ 0, this command will
account for the shift and rotation as specified by RSYS, but will
report only the Cartesian components.
Forces are stored as items _FXSUM, _FYSUM, _FZSUM, and _FSSUM. Torque
is stored as items _TXSUM, _TYSUM, _TZSUM, and _TSSUM.
This command is valid only with PLANE121, SOLID122, SOLID123, PLANE233,
SOLID236 and SOLID237 elements. For any other elements, you must use
FMAGSUM.
"""
command = f"EMFT,"
return self.run(command, **kwargs) | 77e4321d607a991565f4d55fc661ce9777b8bb1c | 33,994 |
def encontrar_repetidos(jogadas):
""" Recebe uma lista de jogadas e conta
quantas series de numeros adjacentes iguais
ocorrem nas faces dos dados jogados
list -> int """
series = 0
inicio_serie = None
for i in range(len(jogadas)-1):
numero = jogadas[i]
prox_num = jogadas[i + 1]
if numero == prox_num:
if numero != inicio_serie:
inicio_serie = numero
series += 1
if numero != inicio_serie:
inicio_serie = numero
return series | c57fc7a1665d8272a97140c358b3445c15254656 | 33,995 |
def _sec_to_min(seconds):
"""converts seconds to minutes,
assuming that input is a number representing seconds"""
return seconds/60.0 | 82365fa70735592f21deea11183f65aac620fad6 | 33,996 |
def lines_for_reconstruction(unicode_text):
"""Split unicode_text using the splitlines() str method,
but append an empty string at the end if the last line
of the original text ends with a line break, in order
to be able to keep this trailing line end when applying
LINE_BREAK.join(splitted_lines).
The line break characters below were taken from
<https://docs.python.org/3/library/stdtypes.html#str.splitlines>
"""
if isinstance(unicode_text, str):
splitted_lines = unicode_text.splitlines()
else:
raise TypeError('This function requires a unicode argument.')
#
if unicode_text and \
unicode_text[-1] in '\n\r\v\f\x1c\x1d\x1e\x85\u2028\u2029':
splitted_lines.append('')
#
return splitted_lines | 228a94218c1dc0babf984f488e38184483530675 | 33,997 |
def normalize(entries):
"""Normalizes the input by stripping any special characters or capital
letters present in the input data.
This method does an in-place normalization for the entries stored in
memory.
Args:
entries {list of Entry}: The entries to be shuffled.
Returns:
{list of Entry} A list of entries after normalization.
"""
return entries | 00ae3bbc14181fd5b20d347993ee78932ef26280 | 33,998 |
def bool_to_text(b):
""" Changes a provided boolean into a 'Yes' or 'No' string
No test is performed on whether or not the provided variable is boolean or
not, only an 'if b: / else:' test is performed.
Parameters
----------
b: any
Object that will be tested for its evaluation to a boolean.
Returns
-------
text: :obj:`str`
'Yes' if input variable is evaluated as `True`, 'No' otherwise. """
if b:
return "Yes"
return "No" | 3b07f22f61c26a87499e794ea9c85c3eda90bb0d | 33,999 |
def PyObject_Type(space, w_obj):
"""When o is non-NULL, returns a type object corresponding to the object type
of object o. On failure, raises SystemError and returns NULL. This
is equivalent to the Python expression type(o). This function increments the
reference count of the return value. There's really no reason to use this
function instead of the common expression o->ob_type, which returns a
pointer of type PyTypeObject*, except when the incremented reference
count is needed."""
return space.type(w_obj) | d3faeeb6c119711340bc95972e5e96a038451f26 | 34,001 |
def make_desc_dict(ecfp):
"""
Format tuple of fingerprint information into dictionary
:param ecfp: Tuple of fingerprint features and values
:return: dictionary of fingerprint features and values
"""
ecfp_feat, ecfp_val = zip(*ecfp)
return ecfp_feat, ecfp_val | 5925cda8d428fad5c9bfd46a7a26276e34256569 | 34,002 |
def checkRules(puzzle):
""" this function receives a sudoku puzzle as a 9x9 list.
and checks if it satisfies the rules of Sudoku, specifically
(i): if all the numbers in rows are unique.
(ii): if all the numbers in columns are unique
(iii): if all the numbers in cells are unique"""
# Checking condition (i)
# Checking numbers to be unique in rows
rowCheck = True;
for i in range(9):
for j in range(9):
if not puzzle[i][j]==0:
if puzzle[i][:].count(puzzle[i][j]) != 1:
rowCheck = False;
# Checking condition (ii)
# checking to be unique in columns
colCheck = True;
for i in range(9):
col = [row[i] for row in puzzle]
for j in range(9):
if not col[j]==0:
if col.count(col[j]) != 1:
colCheck = False;
# Checking condition (iii)
# Checking numbers to be unique in each cell
cellCheck = True;
for i in range(3):
for j in range(3):
cell = [];
cell = [row[3*i:3*(i+1)] for row in puzzle[3*i:3*(i+1)]];
cell_flat = [];
for row in cell:
cell_flat = cell_flat + row;
for k in range(9):
if not cell_flat[k]==0:
if cell_flat.count(cell_flat[k])!=1:
cellCheck=False;
return rowCheck and colCheck and cellCheck | 804d97b4badc6e95d08c09e5fc91ac2e9b4061b5 | 34,003 |
def extract_tags(tags):
""" Extract the tag names from tag list"""
return [tag['display_name'] for tag in tags] | 2c07a66ac3ddf1291fd0dab803fd4310c2e80941 | 34,004 |
def get_orders(value):
"""
Return a list of orders for context tiers.
Parameters
----------
value : int or string
The maximum context length or a string in the set "bigram" (for
context 1, and 2), "trigram" (for context 1, 2, and 3), or
"fourgram" (for contexts 1, 2, 3, and 4).
"""
# Dictionary used for mapping string descriptions of window size to
# actual Python ranges; by mapping to `range()` here in advance
# (and consuming such range into a list), computations is a bit
# faster and, in particular, it is clearer. Note that we always start
# from 1, so no zero-length is included in the lists (the zero distance
# is the actual alignment site itself).
order_map = {
"bigram": list(range(1, 2)),
"trigram": list(range(1, 3)),
"fourgram": list(range(1, 4)),
}
# get mapping
if isinstance(value, int):
orders = list(range(1, value + 1))
elif isinstance(value, str):
orders = order_map[value]
else:
orders = []
return orders | 7f473e50a9fbb0ec0927016994367150aee7aa70 | 34,005 |
def serialize_string(input_string):
"""apply a serial counter to a string"""
s = input_string.strip().split()
last_token = s[-1]
all_other_tokens_as_string = input_string.replace(last_token, '')
if last_token.isdigit():
value = '%s%s' % (all_other_tokens_as_string, int(last_token) + 1)
else:
value = '%s 1' % input_string
return value | b95003285ecb25dcd0025e32479e2c4a50fc1cbd | 34,006 |
def clean_env(env):
"""Make a copy of env without game."""
new_env = env.copy()
del new_env["game"]
return new_env | dc7356a271c6f82ba8fe06a600302f6bc1eae963 | 34,008 |
def get_nodes_keys(nodes: list) -> set:
"""Получить ключи узлов из списка узлов"""
return set(z.key for z in nodes) | 343d47e04e1ec3359ffc04edd2aea23f9190734e | 34,009 |
def _num_clips(
duration_sec: float,
fps: float,
stride_frames: int,
window_size_frames: int,
backpad_last: bool = True,
) -> int:
"""
Utility to calculate the number of clips for a given duration, fps, stride & window_size
"""
num_frames = round(duration_sec * fps)
N = num_frames - window_size_frames
if N < 0:
return 1
result = N // stride_frames + 1
# handle padded frame
if backpad_last and N % stride_frames != 0:
result += 1
return result | f13cec1dd9ced5a4a446b24524646a2e6db3479e | 34,010 |
import csv
def load_review_data(path_data):
"""
Returns a list of dict with keys:
* sentiment: +1 or -1 if the review was positive or negative, respectively
* text: the text of the review
"""
basic_fields = {'sentiment', 'text'}
data = []
with open(path_data) as f_data:
for datum in csv.DictReader(f_data, delimiter='\t'):
for field in list(datum.keys()):
if field not in basic_fields:
del datum[field]
if datum['sentiment']:
datum['sentiment'] = int(datum['sentiment'])
data.append(datum)
return data | e19e51a37007ad308893c190a3629beef9e57f90 | 34,011 |
def swap(a: int, b: int) -> tuple[int, int]:
"""
Return a tuple (b, a) when given two integers a and b
>>> swap(2,3)
(3, 2)
>>> swap(3,4)
(4, 3)
>>> swap(67, 12)
(12, 67)
"""
a ^= b
b ^= a
a ^= b
return a, b | 3475e93eb62398cc912f5f7d528de704799747a2 | 34,012 |
def days_per_month(leap=False):
"""Return array with number of days per month."""
ndays = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
if leap:
ndays[1]+= 1
return ndays | e8efeab08bc82344792a3956df91e53767be81c9 | 34,013 |
def generate_discrete_distribution(random_state, N, distribution_type,
distribution_args=None):
"""
Generates a sequence of distrete random variables for the
degree distribution given the parameters for the distribution
and the type that is used.
Currently supports:
poisson: lambda
uniform: low, high
powerlaw: rank, exponent
:param random_state: a np.random.RandomState
"""
if distribution_args is None:
distribution_args = ()
if distribution_type == 'poisson':
return random_state.poisson(*distribution_args, size=N)
elif distribution_type == 'uniform':
return random_state.randint(*distribution_args, size=N)
elif distribution_type == 'powerlaw' or distribution_type == 'zipf':
return random_state.zipf(*distribution_args, size=N) | e07548969e1486ada45a7aa47e4faa8cc8723390 | 34,014 |
def parserun(runpath, maxrank=20):
"""constructs a run from a trec run"""
#initialize
#ranked lists go in a dict of lists
#rl[query]=[doc1,doc2,...]
rl = dict()
name = None
#read the ranked list into a dict of dicts
#rawlist[query][doc]=score
rawlist = dict()
#open the run
IN = open(runpath)
#and read it line by line
for line in IN.readlines():
#chomp it and split it by white space
row = line.strip().split()
#make sure the row wasn't empty
if len(row)==0:
continue
if name == None:
name = row[-1]
#read the query, doc, score, etc
query = row[0]
if row[-2] == 'NaN':
score = 0.0
else:
score = float(row[-2])
#make sure the query is in the dict
if not query in rawlist:
rawlist[query]=dict()
#store the doc in the presorted list
#if the doc shows up more than once in a query,
#that's not my problem
doc = row[2]
rawlist[query][doc] = score
IN.close()
#for each query
for query in sorted(rawlist):
#sort the list by score and then by name
rl[query] = sorted(rawlist[query],key=lambda x: (rawlist[query][x],x),reverse=True)[:maxrank]
return name, rl | a42712d3c014b2b88a3191ddd6518f1b7220c67b | 34,016 |
def linear_annuity_mapping_func(underlying, alpha0, alpha1):
"""linear_annuity_mapping_func
calculate linear annuity mapping function.
Annuity mapping function is model of $P(t, T) / A(t)$
so that it's value is positive.
linear annuity mapping function calculates following formula:
.. math::
\\alpha(S) := S \\alpha_{0} + \\alpha_{1}.
where
:math:`S` is underlying,
:math:`\\alpha_{0}` is alpha0,
:math:`\\alpha_{1}` is alpha1.
:param float underlying:
:param float alpha0:
:param float alpha1:
:return: value of linear annuity mapping function.
:rtype: float.
"""
assert(underlying * alpha0 + alpha1 > 0)
return underlying * alpha0 + alpha1 | 3c4f780c8bc90ac2e6e2b3a1f9b930cbd291c9a7 | 34,017 |
def buffer_polygons(polys_in, buff):
""" """
poly_num = len(polys_in)
polys_out = [None]*poly_num
# buffer polygons
for i in range(0,poly_num):
polys_out[i] = polys_in[i].buffer(buff)
return polys_out | 363283f61021f457c21040c3feb5fefacc55476b | 34,018 |
def formatted_search_results(result):
"""
Create a curated list of the results we want to see on the search page.
"""
entry = result[0]
return [
{
'title': 'Publisher',
'value': entry['dc']['publisher'],
},
{
'title': 'Format',
'value': entry['files'][0]['mime_type'],
},
{
'title': 'Size (bytes)',
'value': entry['files'][0]['length'],
},
] | 848662c8eeb01177569b51205d0d86bdbeb01070 | 34,019 |
def not_square(patients_dcm):
"""
This function tells us if all the slices that make up a scan are squares.
Parameters:
patients_dcm (list): A list containing the MRI scans. Each MRI scan is a list of the slices that make up the scan
Returns:
scans_not_square (list): A list containing the indices of scans that contain non-square slices
"""
scans_not_square = []
for i in range(len(patients_dcm)):
# we compare only the first slice because all of them in one scan have the same dimension
if patients_dcm[i][0].pixel_array.shape[0] != patients_dcm[i][0].pixel_array.shape[1]:
scans_not_square.append(i)
print("Not all images are squares")
return scans_not_square | 2e29ad8463d3ba2307a58672912bb6bded9cb412 | 34,020 |
def xy_to_wcs(xy, _w):
"""
Convert pixel coordinates (xy) to astronomical
coordinated (RA and DEC)
"""
_radec = _w.wcs_pix2world(xy, 1)
return _radec[:, 0], _radec[:, 1] | 0d82fadee33a783ec6f1f202b74f9e6345ff827e | 34,021 |
import re
def is_senior_area_chair_from(role):
"""
Computes if the role description corresponds to a senior area chair, and
also return the track(s) in which they are an area chair
SACs are discinguishable from everyone else via the "manager" suffix.
E.g. committee:Speech:Speech (manager 1)
"""
if '(manager' in role:
no_colon_role = re.sub(r': ', '- ', role)
track_strings = re.findall('([^:]+) \(manager', no_colon_role)
if len(track_strings) > 0:
track_strings = [track.strip() for track in track_strings]
else:
track_strings = ['']
return (True, track_strings)
else:
return (False, []) | 2d46f5d679e3b53cd8d6de585566fd6186c5d12e | 34,022 |
import io
import sys
def capture_stdout(function, *args, **kwargs):
"""capture and return the standard output from a function"""
io_stdout = io.StringIO()
sys.stdout = io_stdout
function(*args, **kwargs)
sys.stdout = sys.__stdout__
return io_stdout.getvalue() | a4b2f60fba88d28f12aef0f8a88187a1c2d167ad | 34,023 |
def format_month_day(dt, month_fmt="%b"):
"""
Formats the month and day of a datetime
Args:
dt (datetime.datetime): The datetime to be formatted
month_fmt (Optional[str]): The strftime-compatible month format
Returns:
str: The formatted month and day
"""
# NOTE: This function is used instead of just 'strftime' because the '%-d' directive, which is used to produce a
# formatted day without trailing zeros, is platform-dependent.
return "{} {}".format(dt.strftime(month_fmt), dt.day) | 19e7862e49d998b48b1c2f404e24af4eaf5bf73c | 34,024 |
def capitalize(text):
"""capitalizes a word, for use in rendering template
Args:
text (str): word to capitalize
Returns:
capitalized (str): capitalized word
"""
return text[0].upper() + text[1:] | 63248f2f0477c56ca1032aaefde69dd3398970fd | 34,025 |
import sys
import json
from typing import OrderedDict
def read_in():
"""
Parse JSON passed via standard input
"""
lines = sys.stdin.readlines()
return json.JSONDecoder(object_pairs_hook=OrderedDict).decode(lines[0]) | 5ab26fa31cfd5082f16691c762b853222bdb651d | 34,026 |
def string_to_maya_value(string):
""" Convert a value saved in string to a numerical type understood by maya
"""
if string.isdigit():
return int(string)
if '.' in string or ',' in string:
if string.replace(',', '').replace('.', '').isdigit():
return float(string)
return string | eeac4948225f496c3ad224f149b7eca2b1572e9e | 34,027 |
def get_library_name():
"""
Get the name for the pyLabLib library (the one containing current the module).
"""
module_name=__name__
return ".".join(module_name.split(".")[:-3]) | c6058efbf0240c98e58cb6a1ff7e69f8b659111e | 34,028 |
import re
def split_sentences(text):
"""
Funcao para quebrar o texto em uma lista de sentenças.
"""
sentence_delimiters = re.compile(u'[\\[\\]\n.!?,;:\t\\-\\"\\(\\)\\\'\u2019\u2013]')
sentences = sentence_delimiters.split(text)
return sentences | cd6fb82f58f021951bb7bb252170951ea60dd8f9 | 34,029 |
import itertools
def _get_columns_(term_list, required, dont_cares):
"""
term_list -- full list of terms
required -- integers for terms that are essential prime implicants . . .
each required int will appear in the source list for only 1 item in needed
"""
ignore = []
keep = []
for index, term in [(i, v) for i, v in enumerate(term_list)
if v.used is False and not v.dontcare]:
# Find Terms in "needed" that exist in required, add them to the final result,
# and add that Term's sources to the "columns" we can now ignore (already covered
# terms)
if len((set(required) & set(term.source))) >= 1:
term_list[index] = term._replace(final="Required")
ignore += itertools.chain(term.source)
# Otherwise add the sources to our list of "columns" we need to keep
else:
keep += itertools.chain(term.source)
ignore = ignore + dont_cares
# create a list of the remaining 1st gen terms that we still need to find minterms for
keep = list(set(keep) - set(ignore))
return keep | 6f244243d54f8b83e9300c681187765c79721af7 | 34,030 |
def get_outgoing_edges(graph, node):
"""
Get all outgoing edges from a given node in
a graph.
Includes logic for choosing the ordering of the edges.
"""
return [(node, neighbor, edge_key)
for neighbor, edge_keys
in graph[node].iteritems()
for edge_key in edge_keys] | b48910fbb2146fc60f9a726aa65200795dc5b73a | 34,031 |
def get_arg(*args, index: int):
"""Get the argument at index."""
if args:
return args[index]
return None | 7f0e3ad04affdceb4ed02cb88a79f1c7b00e6ad2 | 34,033 |
import torch
def sqrt(x):
"""Apply sqrt function."""
return torch.sqrt(x) | e6e7421f27ba44dc91d3c2162d6846d2b6427626 | 34,034 |
def dec2hex(value, delim=''):
""" 12648430 -> 'c0ffee' ; (255, 255) -> 'ffff' ; (256 * 256 - 1, 10) -> 'ffff0a' """
if type(value) == int:
s = hex(value)
return '0' * (len(s) % 2) + s[2:]
else:
return delim.join(dec2hex(item, delim) for item in value) | 23549c552fc5ac2340bd76c4ea3418704fa677c6 | 34,035 |
import functools
def operate(type_id, operands):
""" Apply the operation specified in the type_id to the operands, returning the result
"""
result = None
match type_id:
case 0: #sum
result = sum(operands)
case 1: # product
result = functools.reduce(lambda x, y: x * y, operands, 1)
case 2: # minimum
result = min(operands)
case 3: # maximum
result = max(operands)
# There is no case 4 because that is a literal
case 5: # greater than
assert(len(operands) == 2)
result = 1 if operands[0] > operands[1] else 0
case 6: # less than
assert(len(operands) == 2)
result = 1 if operands[0] < operands[1] else 0
case 7: # equal
assert(len(operands) == 2)
result = 1 if operands[0] == operands[1] else 0
case _:
raise Exception(f'Unrecognized type_id {type_id}')
return result | a3e9d2f7becc3be018bf2e7b85d9a6d23d37d622 | 34,036 |
def vector_sequence_to_words(sequence, w2v_model):
"""
Reconstructs a sentence from an array of word vectors.
:param sequence: np array of word vectors (matrix)
:param w2v_model: word embedding model
:return: str
"""
return " ".join([w2v_model.most_similar(positive=[vect], topn=1)[0][0] for vect in sequence]) | c0a491521ce9ea6166e6f5c3ba63f82f756fdea8 | 34,037 |
def tile_in_roi(conn, g, roi, tile_pkey):
"""
Checks if the given tile_pkey is at a location within the specified roi
"""
c = conn.cursor()
c.execute(
"""
SELECT name FROM phy_tile WHERE pkey =
(SELECT phy_tile_pkey FROM tile WHERE pkey = ?)
""", (tile_pkey, )
)
tile, = c.fetchone()
loc = g.loc_of_tilename(tile)
return roi.tile_in_roi(loc) | 136e63a9b1fa2d426350607998a064cd5d363a85 | 34,038 |
def check_api_key(tmdb) -> bool:
"""
Checks for the presence of the TMDB API Key.
"""
return False if not tmdb.api_key or "" else True | 8a6c4c59448c761d548de3ba66b089beb814f839 | 34,040 |
def read_fastq(filename):
"""This function reads a FASTQ file storing the read and its ID in a dictionary where keys are IDs and read value.
This function does not consider + and score lines.
Key Arguments:
filename -- name of FASTQ input file.
"""
reads_dict = dict()
with open(filename, 'r') as fastq_file:
for line in fastq_file:
if '@' in line:
reads_dict[line[1:].replace('\n', '')] = next(
fastq_file).replace('\n', '')
next(fastq_file)
next(fastq_file)
return reads_dict | 85ba5987ad709ba5da5276b6ee6ed056e13ca884 | 34,042 |
def f_score(p, r, beta=1):
"""
F-score
"""
return (1 + beta ** 2) * ((p * r) / (((beta ** 2) * p) + r)) | 3afae02a2bba0b34485eac09fcb8af068cae5410 | 34,043 |
def represent_dictionary_order(self, dict_data):
""" instantiates yaml dict mapping """
return self.represent_mapping('tag:yaml.org,2002:map', dict_data.items()) | c7817bc2e35e675b997e9c9e22b47d9484d61891 | 34,045 |
def getQuadratic(A, b, c=0):
"""
Given A, b and c, this returns a quadratic, Q
.. math::
\mathbf{Q( x ) = 0.5 x A x + b x} + c
"""
def Quadratic(x, return_g=True, return_H=True):
f = 0.5 * x.dot( A.dot(x)) + b.dot( x ) + c
out = (f,)
if return_g:
g = A.dot(x) + b
out += (g,)
if return_H:
H = A
out += (H,)
return out if len(out) > 1 else out[0]
return Quadratic | 9dec83acec6eeaf55573ea7f6c315ac5c16813eb | 34,046 |
from datetime import datetime
def start_and_end_of_the_month(dt: datetime):
"""Get first of month and first of next month for a given datetime.
"""
start = dt.replace(day=1, hour=0, minute=0, second=0, microsecond=0)
if start.month == 12:
end = start.replace(year=start.year + 1, month=1, day=1, hour=0, minute=0, second=0, microsecond=0)
else:
end = start.replace(month=start.month + 1)
return start, end | 9f1001c325d04a4754b666477292788f620596f7 | 34,047 |
def dict_scale(d, scl):
"""scales all values in dict and returns a new dict"""
return dict([(k, v * scl) for k, v in d.items()]) | 64ddc09fe1b038e69f6c8200e7906e78d7373cd6 | 34,051 |
def has_any_permissions(user, permissions):
""" Retuns `True` if the user has any of the permissions """
for perm in permissions:
if user.has_perm(perm):
return True
return False | 2de21fcb3f5a4984365185216f45ed2814fd13e4 | 34,053 |
def weighing_function(orig_length, cur_length):
"""
Function to generate the weight value given the predicted text-length and the expected text-length
The intuition is that if the predicted text-length is far from the expected text-length then weight should be
small to that word-bbox.
:param orig_length: Length of the expected word bounding box
:param cur_length: Length of the predicted word bounding box
:return:
"""
if orig_length == 0:
if cur_length == 0:
return 1
return 0
return (orig_length - min(orig_length, abs(orig_length - cur_length)))/orig_length | 09afb649952039e0d7f7b776d43cc0b0307329ee | 34,054 |
import torch
def sinkhorn_unrolled(c, a, b, num_sink, lambd_sink):
"""
An implementation of a Sinkhorn layer with Automatic Differentiation (AD).
The format of input parameters and outputs is equivalent to the 'Sinkhorn' module below.
"""
log_p = -c / lambd_sink
log_a = torch.log(a).unsqueeze(dim=-1)
log_b = torch.log(b).unsqueeze(dim=-2)
for _ in range(num_sink):
log_p = log_p - (torch.logsumexp(log_p, dim=-2, keepdim=True) - log_b)
log_p = log_p - (torch.logsumexp(log_p, dim=-1, keepdim=True) - log_a)
p = torch.exp(log_p)
return p | 6732a0c82ea2a896d1064711cffc48a571ec5bc7 | 34,056 |
def _parse_text_with_command(text: str):
""" Parse string that is passed when bot command is invoked. """
if after_bot_name_text := text.split('</at>')[-1].rstrip().strip():
return after_bot_name_text.split()
else:
return '', [] | 03559b81e696064fee90c8818cb2825a93cbe719 | 34,057 |
import os
def replay_test_dir():
"""Fixture to test directory."""
return os.path.join(os.path.abspath(os.path.curdir), 'test-replay') | bbab9932f8506f5b2a74f2b7d8b49c81ec134831 | 34,058 |
def top_xticks(ax, minor=None, major=None, labels=None):
"""
Add top axis to ax with custom major / minor ticks.
args:
minor # list() or np.ndarray() minor xtick locations
major # None, list() or np.ndarray() major xtick locations
labels # None, list() or np.ndarray() major xtick labels
returns:
ax_top
example:
import numpy as np
import matplotlib.pyplot as plt
from positronium import Bohr
# minor ticks
nvals = np.arange(10, 200)
minor = Bohr.energy(2, nvals, unit="nm")
# major ticks
nvals = [10, 12, 15, 19, 24, 32, np.inf]
labels = map(lambda x: r"$\\infty$" if x == np.inf else f"{x:d}", nvals)
major = Bohr.energy(2, nvals, unit="nm")
# plot
fig, ax = plt.subplots()
ax_top = top_xticks(ax, minor, major, labels)
ax_top.grid(which="both", axis="x", zorder=-20, alpha=0.2)
ax.set_xlim(728, 762)
plt.show()
"""
ax_top = ax.twiny()
ax.get_shared_x_axes().join(ax, ax_top)
# major
if major is not None:
ax_top.set_xticks(major)
if labels is not None:
ax_top.xaxis.set_ticklabels(labels)
# minor
if minor is not None:
ax_top.set_xticks(minor, minor=True)
ax_top.set_xbound(ax.get_xbound())
return ax_top | 8244af4c50ac5a66601e79d312c6cbca5119498d | 34,059 |
from typing import Mapping
from typing import Optional
def _parse_obsolete(obsolete_file_path: str) -> Mapping[str, Optional[str]]:
"""Parses the data file from PDB that lists which pdb_ids are obsolete."""
with open(obsolete_file_path) as f:
result = {}
for line in f:
line = line.strip()
# Format: Date From To
# 'OBSLTE 06-NOV-19 6G9Y' - Removed, rare
# 'OBSLTE 31-JUL-94 116L 216L' - Replaced, common
# 'OBSLTE 26-SEP-06 2H33 2JM5 2OWI' - Replaced by multiple, rare
if line.startswith('OBSLTE'):
if len(line) > 30:
# Replaced by at least one structure.
from_id = line[20:24].lower()
to_id = line[29:33].lower()
result[from_id] = to_id
elif len(line) == 24:
# Removed.
from_id = line[20:24].lower()
result[from_id] = None
return result | c4e3f3a04d3349d6752d133d079a1bb3aa34e13f | 34,060 |
def compact_date(date):
"""
Converts an ISO 8601 format date string into a compact date.
Parameters
----------
Date: a string date in iso format.
Returns
----------
A string date without hyphens.
"""
return date.replace('-', '') | e377096d2f0ce9404835968627e9b4e3f9d4b252 | 34,062 |
from typing import Sequence
def recursive_flatten(seq: Sequence) -> Sequence:
""" """
if not seq: # is empty Sequence
return seq
if isinstance(seq[0], Sequence):
return (*recursive_flatten(seq[0]), *recursive_flatten(seq[1:]))
return (*seq[:1], *recursive_flatten(seq[1:])) | 017b1ab6df950a599d3521a5cd372114a54ab63d | 34,063 |
import torch
def calculate_accuracy(inputs: torch.Tensor, targets: torch.Tensor) -> float:
"""
A function that calculates accuracy for batch processing.
Returns accuracy as a Python float.
Args:
inputs (torch.Tensor): shape == [N, n_class]
targets (torch.Tensor): shape == [N]
Returns: accracy (float)
"""
with torch.no_grad():
total = targets.shape[0]
_, predicted = torch.max(inputs, 1)
correct = (predicted == targets).cpu().sum().float().item()
return correct / total | 53eff3901d675370a9ed0f260d1230c2c5badb79 | 34,064 |
from datetime import datetime
def epoch_to_utc_timestamp(epoch):
""" convert epoch to utc_timestamp """
epoch = float(epoch)
utc_timestamp = datetime.utcfromtimestamp(epoch).replace(tzinfo=None)
return utc_timestamp | 791c4b3ee48090c44d57730abd6109a8ca4cae8d | 34,067 |
import base64
def base64_untoken(base64_bytes):
"""base64: untoken."""
token_bytes = base64.b64decode(base64_bytes)
untoken = token_bytes.decode('ascii')
base64_user = untoken.split(":", 1)[0]
base64_pass = untoken.split(":", 1)[1]
return base64_user, base64_pass | 20b1f2383889eccce5cf03d23fc50105d4371f2d | 34,068 |
import hashlib
def hash100(s: str):
"""
Hash a string into 1~100.
Useful when you split a dataset into subsets.
"""
h = hashlib.md5(s.encode())
return int(h.hexdigest(), base=16) % 100 + 1 | 0686d599e42c104d487462682340f04bd3fe94b4 | 34,069 |
def precond_grad_scaw(Ql, qr, Grad):
"""
apply scaling-and-whitening preconditioner
"""
return (Ql.t().mm(Ql)).mm(Grad * (qr * qr)) | e10ae14eeb6da4fc5f45ce9284f04431a737665b | 34,070 |
def _get_union_type_name(type_names_to_union):
"""Construct a unique union type name based on the type names being unioned."""
if not type_names_to_union:
raise AssertionError(
"Expected a non-empty list of type names to union, received: "
"{}".format(type_names_to_union)
)
return "Union__" + "__".join(sorted(type_names_to_union)) | f72f6a5212aa97eba32a3da7249087c97ce471b3 | 34,071 |
def is_clinical_in_cases(clinical_obj, cases):
"""Checks to see if clinical object representation is part of cases to assess
Example:
>>> clin_obj = xml_to_raw_clinical_patient("Test_Data/nationwidechildrens.org_clinical.TCGA-MH-A562.xml")
>>> cases = set(["ad7ba244-67fa-4155-8f13-424bdcbb12e5", "dc39df39-9945-4cb6-a852-d3b42177ac80", "b877d608-e4e0-4b28-9235-01dd65849cf7"])
>>> is_clinical_in_cases(clin_obj, cases)
False
>>> cases = set(["ad7ba244-67fa-4155-8f13-424bdcbb12e5", "dc39df39-9945-4cb6-a852-d3b42177ac80", "b877d608-e4e0-4b28-9235-01dd65849cf7", "45bdcfd6-1e3f-4be8-b843-ae949e8e43eb"])
>>> is_clinical_in_cases(clin_obj, cases)
True
"""
def get_days_to_birth():
pass
def get_days_to_death():
"""This returns either the censored value of the"""
pass
patient_uuid = clinical_obj['kirp:patient']['shared:bcr_patient_uuid']['#text']
patient_uuid = patient_uuid.strip().lower()
return patient_uuid in cases | ecff8e7e4697b0b89f5cf5e45de8e6515e2c0ea9 | 34,072 |
import os
import pathlib
def autoversion(filename):
""" Returns a string matching the filename and a query param of the file's
last modification time in milliseconds
Useful for agressive caching of static assets like app JavaScript and CSS.
Args:
filename (str): the name of the file receiving a
Returns:
string: a filename plus it's version number query param
"""
fullpath = os.path.join(pathlib.Path(__file__).parent, filename[1:])
try:
timestamp = str(os.path.getmtime(fullpath))
except OSError as e:
return filename
return "{0}?v={1}".format(filename, timestamp) | 9607b827979837838782f31db91e464bf18b1933 | 34,073 |
def get_blanks(nrows, ncols, plot_set):
"""Return a list of plot locations that should remain blank."""
assert type(plot_set) == set
nplots = nrows * ncols
plot_numbers = range(1, nplots + 1)
return list(set(plot_numbers) - plot_set) | e2049d16b7ff1f3e5d9d3628881f2574febffe16 | 34,074 |
def _qualified_names(modname):
"""Split the names of the given module into subparts
For example,
_qualified_names('pylint.checkers.ImportsChecker')
returns
['pylint', 'pylint.checkers', 'pylint.checkers.ImportsChecker']
"""
names = modname.split(".")
return [".".join(names[0 : i + 1]) for i in range(len(names))] | 97c614bbcc0997ab95a8535bf02489291d9d1c11 | 34,075 |
import numpy
def overall_mean(pollruns, data, default=0, round_to=1):
"""Return the mean of data values for each pollrun."""
padded_data = [data.get(pollrun.pk, default) for pollrun in pollruns]
return round(numpy.mean(padded_data), round_to) | b2b3d3ac8a5779c06ab0c07a561d7d64ff10594c | 34,076 |
import logging
def select_common_data(original_df, loading_df, df):
"""
Selects common measurements in the given original data, loadings, and data
to project.
:param pd.DataFrame original_df
:param pd.DataFrame loading_df
:param pd.DataFrame df
:rtype Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame]
"""
logging.info('Selecting common data')
common_fields = original_df.columns.intersection(
loading_df.index).intersection(df.columns)
if common_fields.size < 1:
raise ValueError('no common fields to use')
logging.info('Selecting fields: {!r}'.format(common_fields.tolist()))
original_result = original_df[common_fields]
logging.info('Resulting original data is a table with shape {}'.format(
original_result.shape))
loading_result = loading_df.loc[common_fields]
logging.info('Resulting loadings is a table with shape {}'.format(
loading_result.shape))
result = df[common_fields]
logging.info('Resulting data is a table with shape {}'.format(
result.shape))
return original_result, loading_result, result | d51498bf3c3cf78bab5cd32c2762ee3c64068d63 | 34,077 |
def __is_allow_module(scope, endpoint):
"""是否有完整模块访问权"""
# endpoint -> 'blue_name.red_name+func_name'
splits = endpoint.split('+')
if len(splits) != 2:
return False
return splits[0] in scope.allow_module | ae1bb8d44e15a6f15ef2dea0b922ff5f881fbfca | 34,078 |
import re
def tokenize_sentences(text):
"""Simple tokenizer."""
print('starting tokenization')
text = re.sub('\n', ' ', text)
sentences = re.split('(?<!\w\.\w.)(?<![A-Z][a-z]\.)(?<=\.|\?)\s', text)
# Filter out too long sentences.
sentences = [t for t in sentences if len(t) < 150]
return sentences | a1cd49a08be1a06cd77c227b9cf69b6c1fdb2205 | 34,082 |
def find_index(x,y):
"""
find the index of x in y, if x not in y, return -1
"""
for index, item in enumerate(y):
if x == item:
return index
return -1 | 2e454c97155bd9d52851a1e20a0c795e8a485a46 | 34,083 |
def fixed_cation_oxidation_states():
"""
Args:
Returns:
dictionary of {element (str) : oxidation state (int) for cations with likely fixed oxidation states}
"""
plus_one = ['H', 'Li', 'Na', 'K', 'Rb', 'Cs', 'Fr', 'Ag']
plus_two = ['Be', 'Mg', 'Ca', 'Sr', 'Ba', 'Ra']
plus_three = ['Sc', 'Y', 'La', 'Al', 'Ga', 'In',
'Pr', 'Nd', 'Pm', 'Sm', 'Eu', 'Gd', 'Tb',
'Dy', 'Ho', 'Er', 'Tm', 'Yb', 'Lu']
fixed_cations = plus_one + plus_two + plus_three
data = {}
for c in fixed_cations:
if c in plus_one:
data[c] = 1
elif c in plus_two:
data[c] = 2
elif c in plus_three:
data[c] = 3
return data | 0b2a77a6351730778fc722f2cbb8a8aea7a3a0df | 34,084 |
def grad_refactor_1(a, b):
""" if_test """
def inner(x, y):
return x * y
return inner(a, b) | 95f4cbe713d695601f7252d0043ac36e95a3f1a2 | 34,086 |
def get_user_action(message, options):
"""
Reads user input and returns value specified in options.
In case user specified unknown option, returns default value.
If default value is not set, returns None
:param message: The message for user.
:type message: string
:param options: Options mapping.
:type options: dict
:return: Value according to the user selection.
"""
r = input(message).lower()
return options.get(r, options.get("default")) | 207e0a64ff1b5e9e638fc449c6782ae3bf3b8f5f | 34,087 |
def vforms_weights(vform_choice, repeats, nqubits):
"""
Returns the number of weights a certain circuit has with 1 repeat and
without any trailing gates.
@vform_choice :: String of the choice of variational form.
"""
switcher = {
"two_local": lambda: nqubits * (repeats + 1),
}
nweights = switcher.get(vform_choice, lambda: None)()
if nweights is None:
raise TypeError("Specified variational form is not implemented!")
return nweights | 893d7833627322956a882a4d5aed16a736afdec5 | 34,088 |
def use_proxy(browser, proxy, url):
"""
Open browser with proxy.
:param browser:
:param proxy:
:param url:
:return:
"""
profile = browser.profile
profile.set_preference('network.proxy.type', 1)
profile.set_preference('network.proxy.http', proxy[0])
profile.set_preference('network.proxy.http_port', int(proxy[1]))
profile.set_preference('permissions.default.image', 2)
profile.update_preferences()
browser.profile = profile
browser.get(url)
browser.implicitly_wait(30)
return browser | 6bd95c2fd2245366f855bc240b78b8371b11219d | 34,089 |
def getTrainTimes(t1,
testTimes):
"""SNIPPET 7.1 PURGING OBSERVATION IN THE TRAINING SET
Given testTimes, find the times of the training observations.
—t1.index: Time when the observation started.
—t1.value: Time when the observation ended.
—testTimes: Times of testing observations.
"""
trn = t1.copy(deep=True)
for i, j in testTimes.iteritems():
df0 = trn[(i <= trn.index) & (trn.index <= j)
].index # train starts within test
df1 = trn[(i <= trn) & (trn <= j)].index # train ends within test
df2 = trn[(trn.index <= i) & (j <= trn)].index # train envelops test
trn = trn.drop(df0.union(df1).union(df2))
return trn | 8ddcef5c9d283565bd33a3f90ca0eca6b387416e | 34,090 |
import re
def clean_output(gold, predictions):
"""
Utility function to clean generated output from BART
"""
label = gold.replace("<eos>", "").strip()
labels = [int(id_[2:-1]) for id_ in label.split()]
# handle cases when output is empty
if len(predictions) == 0:
return labels, []
preds = []
for p in predictions[0].split():
pos = re.findall('\\d+', p)
if len(pos) == 1:
preds.append(int(pos[0]))
return labels, preds | f3442be89dea3f32f31d269e30a38d0021708e19 | 34,091 |
def byte_pos(text, line, col):
""" Return position index of (line, col)
line is line index, col is column index
The returning byte position __includes__ all '\n's.
Text is unicode.
"""
if type(text) != list:
lines = text.splitlines(True)[:line+1]
else:
lines = text[:line+1]
b = len(''.join(lines[:line])) + len(lines[line][:col])
return b | 8bd545b76569861a0c23d6a23cb70fdfcbad0475 | 34,092 |
def format_client_name(dataframe, client_name_column='client'):
"""takes a dataframe containing client Name in DWH format :
the client name column must *not* be called clientName
* add the clientName column using refactoring rules from DI
* rename the former client name column to clientNameDWH"""
dataframe['clientName'] = dataframe[client_name_column].apply(
lambda client_name: client_name.replace(' ', '').split('(')[0][:12])
dataframe.rename(columns={client_name_column: 'clientNameDWH'}, inplace=True)
return dataframe | 369eb2166e872a269acc1e275826bc0c7ca82833 | 34,094 |
from typing import Optional
import os
import logging
def get_artifact_data(artifact_folder, artifact_relative_path: str) -> Optional[str]:
"""
Retrieves artifact data according to the artifact relative path from 'ARTIFACTS_FOLDER' given.
Args:
artifact_folder (str): Full path of the artifact root folder.
artifact_relative_path (str): Relative path of an artifact file.
Returns:
(Optional[str]): data of the artifact as str if exists, None otherwise.
"""
artifact_data = None
try:
file_name = os.path.join(artifact_folder, artifact_relative_path)
if os.path.isfile(file_name):
logging.info(f'Extracting {artifact_relative_path}')
with open(file_name, 'r') as file_data:
artifact_data = file_data.read()
else:
logging.info(f'Did not find {artifact_relative_path} file')
except Exception:
logging.exception(f'Error getting {artifact_relative_path} file')
return artifact_data | 509d5f2f6af9b1b321f0859b59e81f703f0a2c9a | 34,095 |
import re
import copy
def _select_allowed_items(item_dict, allow_patterns, disallow_patterns):
"""
Creates the dictionary of items selected from `item_dict` such that item names
satisfy re patterns in `allow_patterns` and not satisfy patterns in `disallow_patterns`.
The function does not modify the dictionary, but creates a new dictionary with
selected items, where item values are deep copies of the values of the original
dictionary.
Parameters
----------
item_dict: dict
Dictionary of items.
allow_patterns: list(str)
Selected item should match at least one of the re patterns. If the value is ``[None]``
then all items are selected. If ``[]``, then no items are selected.
disallow_patterns: list(str)
Selected item should not match any of the re patterns. If the value is ``[None]``
or ``[]`` then no items are deselected.
Returns
-------
dict
Dictionary of the selected items.
"""
items_selected = {}
for item in item_dict:
select_item = False
if allow_patterns:
if allow_patterns[0] is None:
select_item = True
else:
for pattern in allow_patterns:
if re.search(pattern, item):
select_item = True
break
if select_item:
if disallow_patterns and (disallow_patterns[0] is not None):
for pattern in disallow_patterns:
if re.search(pattern, item):
select_item = False
break
if select_item:
items_selected[item] = copy.deepcopy(item_dict[item])
return items_selected | c56d9b6bf7db0f7a26da42c6da808de8fb8b7341 | 34,096 |
def scale_lr_and_momentum(args, cifar=False, skip=False):
"""
Scale hyperparameters given the adjusted batch_size from input
hyperparameters and batch size
Arguements:
args: holds the script arguments
cifar: boolean if we are training imagenet or cifar
skip: boolean skipping the hyperparameter scaling.
"""
if skip:
return args
print('=> adjusting learning rate and momentum. '
f'Original lr: {args.lr}, Original momentum: {args.momentum}')
std_b_size = 128 if cifar else 256
old_momentum = args.momentum
args.momentum = old_momentum ** (args.batch_size / std_b_size)
args.lr = args.lr * (args.batch_size / std_b_size *
(1 - args.momentum) / (1 - old_momentum))
print(f'lr adjusted to: {args.lr}, momentum adjusted to: {args.momentum}')
return args | abe65febe2538218db67470b1c2919ae7dd08cb2 | 34,097 |
def _py_lazy_or(cond, b):
"""Lazy-eval equivalent of "or" in Python."""
return cond or b() | 965c831da37e79cc22caaf20bc9edd8677e86be6 | 34,098 |
def get_printable_table(class_name: str, class_info: dict) -> str:
"""
Creates and returns a string displaying the class info in a
format that is easily read in a table.
:param class_name: The name of a class owning the data.
:type class_name: str
:param class_info: The data in the class to display.
:type class_info: dict{str: variant}
:return: The class info in a readable format.
:rtype: str
"""
max_key = max([len(k) for k in class_info.keys()])
max_value = max([len(str(v)) for v in class_info.values()])
header_separator = f"+{'=' * (max_key + max_value + 5)}+"
row_separator = header_separator.replace("=", "-")
rows = [header_separator,
f"| {class_name}"
f"{' ' * (max_key + max_value - len(class_name) + 4)}|",
header_separator]
for key, value in class_info.items():
row = f"| {key}{' ' * (max_key - len(key))} | " \
f"{value}{' ' * (max_value - len(str(value)))} |"
rows.append(row)
rows.append(row_separator)
return "\n".join(rows) | 7734537c4f68df0cc755c51661fd63d5ddb46978 | 34,099 |
def get_geometry_type(gi):
""" Return the geometry type from a __geo_interface__ dictionary """
if gi["type"] == "Feature":
return get_geometry_type(gi["geometry"])
elif gi["type"] in ("FeatureCollection", "GeometryCollection"):
return get_geometry_type(gi["geometries"][0])
else:
return gi["type"] | 6e1292863dd45933c59e84cc465d3ade53248c08 | 34,102 |
def allLongestStrings(inputArray):
"""
https://codefights.com/arcade/intro/level-3/fzsCQGYbxaEcTr2bL
Given an array of strings, return another array containing all of its longest strings.
Example
For inputArray = ["aba", "aa", "ad", "vcd", "aba"], the output should be
allLongestStrings(inputArray) = ["aba", "vcd", "aba"].
Input/Output
[time limit] 4000ms (py3)
[input] array.string inputArray
A non-empty array.
Constraints:
1 ≤ inputArray.length ≤ 10,
1 ≤ inputArray[i].length ≤ 10.
[output] array.string
Array of the longest strings, stored in the same order as in the inputArray.
inputArray: ["aba",
"aa",
"ad",
"vcd",
"aba"]
Output:
Empty
Expected Output:
["aba",
"vcd",
"aba"]
nput:
inputArray: ["aa"]
Output:
Empty
Expected Output:
["aa"]
Console Output:
Empty
nput:
inputArray: ["abc",
"eeee",
"abcd",
"dcd"]
Output:
Empty
Expected Output:
["eeee",
"abcd"]
"""
res = []
_max = 0
for s in inputArray:
if len(s) > _max:
res = []
res.append(s)
_max = len(s)
elif len(s) == _max:
res.append(s)
return res | c1011b8cb5a9f9a52d532c405070b75a69f8fe87 | 34,103 |
def check_pattern(patterns, pattern_dic):
"""
desc check if the your input on the pattern exist in the pattern file
Input patterns, pattern_dic
output True or False
"""
# print(patterns, pattern_dic)
for pattern in patterns:
try:
pattern_dic[pattern]
except:
print("the list of the pattern in pattern file")
for pd in pattern_dic:
print(pd, pattern_dic[pd]['Name'], pattern_dic[pd]['Pattern'])
print(
"\nSome of your input",
','.join(patterns),
"Not in pattern file. Ref. to the above, please try again"
)
return False
return True | d197aa9c7db60da61356193d0400905bf53891ac | 34,104 |
def ListToString(alist, useAssert=False):
"""Convert a list of strings into a single string
alist is the list to be converted into a string
if useAssert is True, then the function checks whether all elements of alist are strings before proceeding"""
if useAssert:
assert all([isinstance(x, str) for x in alist]), "All elements of input list must be strings"
return ''.join(alist) | 29b4329df4f4e97c571240076f7f3571e91f970e | 34,105 |
def is_number_offset(c_offset):
"""
Is the offset a number
"""
return 0x66 <= c_offset <= 0x6f | 6606048314047de7f59e77e01f48e438d4113159 | 34,107 |
def filter_stems_prob(stem_dic, CUTOFF_PROB):
""" Function: filter_stems_prob()
Purpose: Create new dictionary which stores stems with high probability.
Input: A dictionary of stems.
Return: An new dictionary of stems.
"""
stems_ib = stem_dic.copy()
stem_list = stems_ib.items()
for stem, values in stem_list:
if values[1] < CUTOFF_PROB:
del stems_ib[stem]
return stems_ib | a8ed07583300c3da92e44f733dc5995ea3999c7c | 34,108 |
def group_songs(songs):
"""Groups a sequence of songs per dirname.
The value order is the same as with the passed in songs.
"""
dirs = {}
for s in songs:
dirs.setdefault(s("~dirname"), []).append(s)
return dirs | 82dc2e440fde8031dc818079f63ca99058d43f0d | 34,109 |
def inbound_degrees(adj_list: dict) -> dict:
"""Calculate the inbound degree of each node in a graph.
Args:
adj_list (dict): An adjacency list. Can be from an undirected or directed graph.
Returns:
dict: A dictionary where the key is a graph node \
and the value is the number of inbound edges.
"""
indegrees = {node: 0 for node in adj_list}
for node in adj_list:
for neighbor in adj_list[node]:
indegrees[neighbor] += 1
return indegrees | 0580e29e4f21dcc33ee6ef74717bda4250dca8c7 | 34,111 |
def is_present(actual, expected):
""" evaluates if all params in actual exist in expected """
if expected is None:
notfound = actual
else:
notfound = filter(lambda x: x not in actual, expected)
return notfound | ba407d30bec2b7ff9c8bc4fe5ecd089be8a30d89 | 34,112 |
def sizes (im):
"""
Return the dimensions of an image as a list.
Arguments:
im the image whose dimensions are to be returned
"""
return im.shape | 3bddd044a103b11a7c2e3eef46efb8b83c7f44d6 | 34,113 |
def _range_to_number(bucket_string):
"""Converts "X-Y" -> "X"."""
return int(bucket_string.split('-')[0]) | 1bc40f88cfabec19d8f9e5e14e7de03cec825f58 | 34,114 |
import torch
def compute_input_lengths(padded_sequences: torch.Tensor) -> torch.Tensor:
"""
Parameters
----------
padded_sequences
(N, S) tensor where elements that equal 0 correspond to padding
Returns
-------
torch.Tensor
(N,) tensor where each element corresponds to the non-padded length of each sequence
Examples
--------
>>> X = torch.tensor([[1, 2, 0, 0, 0], [1, 2, 3, 0, 0], [1, 2, 3, 0, 5]])
>>> compute_input_lengths(X)
tensor([2, 3, 5])
"""
lengths = torch.arange(padded_sequences.shape[1]).type_as(padded_sequences)
return ((padded_sequences > 0) * lengths).argmax(1) + 1 | 44640410704c2118a09b3c0490bc854fde93269a | 34,115 |
import re
def train(text, model):
"""generate or update a word model (dictionary of word:frequency)"""
words = lambda text : re.findall('[a-z]+', text.lower())
for word in words(text):
model[word] += 1
return model | d821d4228453c43c0f423d2e3e125452d64b054b | 34,117 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.