content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
def GetPlural(count):
"""Returns a plural 's' if count is not 1"""
return 's' if count != 1 else ''
|
3ad7c44c27d15e02d5f6a3de2a08341da8778b01
| 53,912
|
def in_docker() -> bool:
"""
Check if the session is running inside a docker
.. code-block:: python
if in_docker():
print("OMG we are stock in a Docker ! Get me out of here !")
else:
print("We are safe")
Returns:
bool: True if inside a docker
"""
try:
with open("/proc/1/cgroup", "rt") as ifh:
in_dck = "docker" in ifh.read()
# pylint: disable=W0703
except Exception:
in_dck = False
return in_dck
|
24e976fc0d29d482a0fb8cb83a62ea4a4825654e
| 53,916
|
import re
def extract_img_and_tag(entry_main, entry_tag):
"""
Extracts features about an article's main image (3) and tags (1) from the API dictionary
and returns them: 4 variables returned.
------
ARGUMENTS:
The API dictionary entries for the main image ('result->field->main') and tags ('result->tags')
"""
img_path = re.findall(r"<img src=\"(.+?)\"", entry_main)
if len(img_path) != 0:
img_url = img_path[0]
else:
img_url = "NULL"
img_descr = re.findall(r"<span class=\"element-image__caption\">(.+?)<", entry_main)
if len(img_descr) != 0:
img_capt = img_descr[0]
else:
img_capt = "NULL"
img_credits = re.findall(r"<span class=\"element-image__credit\">(.+?)<", entry_main)
if len(img_credits) != 0:
img_cred = img_credits[0]
else:
img_cred = "NULL"
tags = None
for el in entry_tag:
if el["type"] == "keyword":
if tags is None:
tags = el["webTitle"].lower()
else:
tags = f'{tags},{el["webTitle"].lower()}'
return img_url, img_capt, img_cred, tags
|
d51c6d77c18ea85d03b0bc460c67abf483e610f3
| 53,924
|
import pytz
async def get_user_timezone(ctx, user):
"""
Returns a pytz.timezone for a user if set, returns None otherwise
"""
query = '''SELECT tz
FROM timezones
WHERE "user" = $1;'''
record = await ctx.bot.pool.fetchrow(query, user.id)
if record is None:
return None
else:
return pytz.timezone(record.get('tz'))
|
dec6f058902ffb097b5dadcc17f6debde45e4c20
| 53,927
|
def shift_series(s):
"""Produces a copy of the provided series shifted by one, starting with 0."""
s0 = s.copy()
s0 = s0.shift(1)
s0.iloc[0] = 0.0
return s0
|
5c8937de05da5087095600721f4ca9230a0e4296
| 53,929
|
import unicodedata
def slugify(value, replace_spaces=False):
"""
Normalizes string, converts to lowercase, removes non-alpha and "foreign"
characters, removes forbidden characters, and converts spaces to underscores.
Used for file and folder names.
"""
replaced = ('/', '\\', ':', '$', '&', '!', '*', '~', '`', '"', '+', '>', '<', '?', '|', '¿', '~', '¡')
for r in replaced:
value = value.replace(r, '_')
if replace_spaces:
value = value.replace(' ', '_')
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore')
return value.strip()
|
73b55e3cb6d6e00d3f43380af91f4190a0743c27
| 53,930
|
def findstr_in_file(file_path, line_str):
"""
Return True if the line is in the file; False otherwise.
(Trailing whitespace is ignored.)
"""
try:
with open(file_path, 'r') as fh:
for line in fh.readlines():
if line_str == line.rstrip():
return True
except Exception:
# swallow exception
pass
return False
|
8cd741fd13bd99f1532951c8b887a04e6bdaffa0
| 53,931
|
def get_particle_time_steps(particle_h5_file):
"""Get time steps stored in the HDF5 particle file
Args :
particle_h5_file : HDF5 file generated from particle tracking
Returns :
time_steps : list containing step numbers
Raises :
None
"""
# read step numbers
indices = particle_h5_file['Index']
# add all keys to list, ignoring the 'Final' key
time_steps = []
for key in list(indices.keys()):
if key != 'Final':
time_steps.append(key)
return time_steps
|
3cdfdb4e4241e29db8f78cad5228641bc3e73488
| 53,938
|
def shape_and_validate(y, log_pred_prob):
"""Validate shapes and types of predictive distribution against data and
return the shape information.
Parameters
----------
y : ndarray of type int or bool, shape (n_samples,)
True labels for each classication data point.
log_pred_prob : ndarray, shape (n_samples, n_labels)
Array of shape ``(len(y), n_labels)``. Each row corresponds to a
categorical distribution with *normalized* probabilities in log scale.
Therefore, the number of columns must be at least 1.
Returns
-------
n_samples : int
Number of data points (length of `y`)
n_labels : int
The number of possible labels in `y`. Inferred from size of
`log_pred_prob` and *not* from `y`.
Notes
-----
This does *not* check normalization.
"""
n_samples, n_labels = log_pred_prob.shape
assert n_samples >= 1 # Otherwise min and max confused
assert n_labels >= 1 # Otherwise makes no sense
assert y.shape == (n_samples,) and y.dtype.kind in ("b", "i")
assert 0 <= y.min() and y.max() < n_labels
return n_samples, n_labels
|
ae71d0c60f4bd9dc132080c10b2c3c1ef80f1fa9
| 53,944
|
from typing import Callable
from typing import Any
from typing import Dict
import inspect
def _get_default_args(func: Callable[..., Any]) -> Dict[str, Any]:
"""
Get default arguments of the given function.
"""
return {
k: v.default
for k, v in inspect.signature(func).parameters.items()
if v.default is not inspect.Parameter.empty
}
|
d2ffa3ac2babc1aa21ef8737ac8ed1d11c3af034
| 53,946
|
def schedule_parser(time_list: str) -> tuple:
""" Module translate string-list of the hours/minutes to the tuple of the seconds.
:arg
string like "00:01; 03:51".
';' it`s separate between some delta-time.
':' it`s separate between hours (first) and minute (second).
:return
tuple like (60, 13860)
"""
tmp_list = []
for time_ in time_list.split('; '):
sec_ = time_.split(':')
tmp_time_1 = int(sec_[0])*3600
tmp_time_2 = int(sec_[1])*60
tmp_list.append(tmp_time_1 + tmp_time_2)
return tuple(tmp_list)
|
c57199a22ab84892cd321c2a89131478edbd75a7
| 53,947
|
def validate_passphrases(passphrases, validator):
"""Validate passphrases with validator function."""
return [passphrase for passphrase in passphrases
if validator(passphrase)]
|
c262da8c243c7661ac4e5c5a84eed454d0b77e4f
| 53,951
|
def dq(s):
"""Enclose a string argument in double quotes"""
return '"'+ s + '"'
|
2f76a375768a4bc4832f35fde837ccbe11493cef
| 53,956
|
def tot_power(Power,N,T):
"""
Calculate the area of the power spectrum, i.e, the total power in V^2 of a signal
--------------------------------------------------------------------------------
input:
- Power: COMPLEX, N-dimensional. Power spectrum of the signal
- N: INTEGER. Number of sampling points
- T: REAL or INTEGER: number of sampling points
output:
- REAL. Total power
"""
tmp = 0
for i in range(1,int(N/2)):
tmp+=Power[i]
return tmp/T
|
14153a9263a294f3a2ded859aff4d77df6815258
| 53,959
|
def scrapeSite(soup):
""" Scrapes ICO website URL from ICODrops listing """
return soup \
.find("div", attrs = { "class" : "ico-right-col" }) \
.find("a")["href"]
|
33950d306ea9bd66dd5af3aba2d28406de533491
| 53,962
|
def sysadmin_check(user):
"""
Check whether user is a sysadmin and has an active account.
"""
return user.is_active and hasattr(user, 'sysadmin')
|
9dc13240c71f01c052eacb233fa2dbabebb16cba
| 53,968
|
def hash_code(key, HASH_SIZE):
"""
Return the hash value of given key and the size of hash table
:param key: given key
:type key: str
:param HASH_SIZE: size of hash table
:type HASH_SIZE: int
:return: hash value
:rtype: int
"""
n = len(key)
res = 0
for i in range(n):
# hash(abc) = (a * 33^2 + b * 33 + c) % M
# = (33(33(33 * 0 + a) + b) + c) % M
# = (33(33(33 * 0 + a) % M + b) % M + c) % M
res = 33 * res + ord(key[i])
res %= HASH_SIZE
return res
|
af890f853c4774551526518dcc0018fe46b1d266
| 53,969
|
import functools
def func_if_not_scalar(func):
"""Makes a function that uses func only on non-scalar values."""
@functools.wraps(func)
def wrapped(array, axis=0):
if array.ndim == 0:
return array
return func(array, axis=axis)
return wrapped
|
19802ebb16f7d5e63991b1ff2e6d87b71ca3bfd9
| 53,970
|
def split_transactions(transactions):
"""
Split transactions into 80% to learn and 20% to test
:param transactions: The whole transactions list
:return: The transactions list to be learnt, the transactions list to be tested
"""
i = int(len(transactions) * 0.8)
transactions_to_learn = transactions[:i]
print(str(len(transactions_to_learn)) + " transactions will be used to learn.")
transactions_to_test = transactions[i:]
print(str(len(transactions_to_test)) + " transactions will be used to test.")
return transactions_to_learn, transactions_to_test
|
4e1b0a2b82005fa040da5f46fdac831621d1a0ec
| 53,972
|
from typing import List
def get_config_indexes(
config_lines: List[str],
search_string: str
) -> List[int]:
"""
Get index for every list entry that matches the provided search string
Arguments:
config_lines (List[str]):
Keepalived config split into lines
search_string (str):
The term to search the lines for
Return:
A list of integers denoting the index where a value was found
Example:
test_list = ["Test", "Value", "Test", "Test", "Stop"]
index_list = _get_config_indexes(test_list, "Test")
print(index_list) # [0, 2, 3]
This function is used to find the index of each vrrp_instance, but can
be used to find other indexes. It's useful to know where each group
in the config starts.
"""
stripped_lines: List[str] = [x.strip() for x in config_lines]
config_start_indices: List[int] = [i for i, x in enumerate(stripped_lines)
if search_string in x]
return config_start_indices
|
d76e93ec93ea6f9605e54726f3564f7577313732
| 53,976
|
import re
import textwrap
def dedent_string(s):
"""
Strip empty lines at the start and end of a string and dedent it.
Empty and whitespace-only lines at the beginning and end of ``s``
are removed. The remaining lines are dedented, i.e. common leading
whitespace is stripped.
"""
s = re.sub(r'^([^\S\n]*\n)*', '', re.sub(r'(\n[^\S\n]*)*$', '', s))
return textwrap.dedent(s)
|
1599d2187b27b3cfc28ff371940ba6250965317b
| 53,977
|
def format_coord(x, y, X, extent=None):
"""Set the format of the coordinates that are read when hovering
over on a plot.
"""
numrows, numcols = X.shape
if extent is not None:
col = int((x - extent[0]) / (extent[1] - extent[0]) * numcols + 0.5)
row = int((y - extent[3]) / (extent[2] - extent[3]) * numrows + 0.5)
else:
col = int(x + 0.5)
row = int(y + 0.5)
if col >= 0 and col < numcols and row >= 0 and row < numrows:
z = X[row, col]
return 'x=%1.4f, y=%1.4f, z=%1.4f' % (x, y, z)
else:
return 'x=%1.4f, y=%1.4f, z=E!' % (x, y)
|
dd28987d5c1d4d8ac69bf790be38129984961347
| 53,980
|
def get_rowIndex(uid):
"""
return row index as integer
"""
return uid.rowIndex
|
77c10637b7bcb9736beaccf38148bcdc9f0b61df
| 53,984
|
from typing import List
import shlex
def split_line_elements(text: str) -> List[str]:
"""Separates by space and tabulator.
Args:
text (str): Text to break into separate fields.
Returns:
List[str]: List of formatted values.
"""
# parts = re.split(" |\t", text.strip())
parts = shlex.split(text.strip())
values = list(filter(lambda part: part != "", parts))
return values
|
b4ac18f7b60faf29f042c4f7ff02fb5d85d8749e
| 53,986
|
def func_lineno(func):
"""Get the line number of a function. First looks for
compat_co_firstlineno, then func_code.co_first_lineno.
"""
try:
return func.compat_co_firstlineno
except AttributeError:
try:
return func.func_code.co_firstlineno
except AttributeError:
return -1
|
27e1a1c141aac4563ba085fdfe6184a7c31c3e8b
| 53,987
|
def remap(minfm, maxfm, minto, maxto, v):
"""Map value v in 'from' range to 'to' range"""
return (((v-minfm) * (maxto-minto)) / (maxfm-minfm)) + minto
|
35b3f840d18efe5393bf80e8c6f7e89dbeabf860
| 53,993
|
def get_frequency_graph_min_max(latencies):
"""Get min and max times of latencies frequency."""
mins = []
maxs = []
for latency in latencies:
mins.append(latency.time_freq_start_sec)
to_add = len(latency.time_freq_sec) * latency.time_freq_step_sec
maxs.append(latency.time_freq_start_sec + to_add)
return min(mins), max(maxs)
|
7326cfe1ab13aa22067becc9d87b7e536508fd4d
| 53,998
|
def _parse_istag(istag):
"""Append "latest" tag onto istag if it has no tag."""
if ":" not in istag:
return f"{istag}:latest"
return istag
|
3477750a744a556e57462da14858b6b1d27ec571
| 53,999
|
def HTMLColorToRGB(colorString):
"""
Convert #RRGGBB to a [R, G, B] list.
:param: colorString a string in the form: #RRGGBB where RR, GG, BB are hexadecimal.
The elements of the array rgb are unsigned chars (0..255).
:return: The red, green and blue components as a list.
"""
colorString = colorString.strip()
if colorString[0] == '#':
colorString = colorString[1:]
if len(colorString) != 6:
raise ValueError("Input #%s is not in #RRGGBB format" % colorString)
r, g, b = colorString[:2], colorString[2:4], colorString[4:]
r, g, b = [int(n, 16) for n in (r, g, b)]
return [r, g, b]
|
ffd4009b26c6c87e407887db6f992500493d1faf
| 54,003
|
def binaryForm(n, nbDigs=0):
"""Generates the bits of n in binary form.
If the sequence has less than nbDigs digits, it is left-padded with zeros.
"""
digits = []
while n:
digits.append(n & 1)
n >>= 1
return reversed(digits + [0]*(nbDigs - len(digits)))
|
78f3fbda7de846a9380d42696a5cead087031d84
| 54,006
|
def map_node_id_to_coordinates(aux_structures, node):
"""
Maps node IDs to lat and lon.
Returns a dictionary of the format: {node_id: (lat, lon)}
"""
nodes, ways, max_speed_dic = aux_structures
return (nodes[node]['lat'], nodes[node]['lon'])
|
926e247d48163e245d75fc7787b8a644d4a464fd
| 54,008
|
def remove_redun_hits_from_within_pos_hit_list(l):
"""Take a list of hit sequence ID-Evalue tuples and return a modified list
with redundant items removed. (if two items have the same ID, remove the
one with the higher E-value)
"""
# Check that the E-values are floats.
assert isinstance(l[0][1], float)
# Sort the list by ascending E-value.
l = sorted(l, key=lambda x: x[1])
# Compile a new list of sequences.
new_l = []
id_list = []
for i in l:
if i[0] not in id_list:
id_list.append(i[0])
new_l.append(i)
return new_l
|
9f4ef222fac41460d93c88d6758c11e508b01d8f
| 54,010
|
from typing import List
import re
def split_authors(authors: str) -> List:
"""
Split author string into authors entity lists.
Take an author line as a string and return a reference to a list of the
different name and affiliation blocks. While this does normalize spacing
and 'and', it is a key feature that the set of strings returned can be
concatenated to reproduce the original authors line. This code thus
provides a very graceful degredation for badly formatted authors lines, as
the text at least shows up.
"""
# split authors field into blocks with boundaries of ( and )
if not authors:
return []
aus = re.split(r'(\(|\))', authors)
aus = list(filter(lambda x: x != '', aus))
blocks = []
if len(aus) == 1:
blocks.append(authors)
else:
c = ''
depth = 0
for bit in aus:
if bit == '':
continue
if bit == '(': # track open parentheses
depth += 1
if depth == 1:
blocks.append(c)
c = '('
else:
c = c + bit
elif bit == ')': # track close parentheses
depth -= 1
c = c + bit
if depth == 0:
blocks.append(c)
c = ''
else: # haven't closed, so keep accumulating
continue
else:
c = c + bit
if c:
blocks.append(c)
listx = []
for block in blocks:
block = re.sub(r'\s+', ' ', block)
if re.match(r'^\(', block): # it is a comment
listx.append(block)
else: # it is a name
block = re.sub(r',?\s+(and|\&)\s', ',', block)
names = re.split(r'(,|:)\s*', block)
for name in names:
if not name:
continue
name = name.rstrip().lstrip()
if name:
listx.append(name)
# Recombine suffixes that were separated with a comma
parts: List[str] = []
for p in listx:
if re.match(r'^(Jr\.?|Sr\.?\[IV]{2,})$', p) \
and len(parts) >= 2 \
and parts[-1] == ',' \
and not re.match(r'\)$', parts[-2]):
separator = parts.pop()
last = parts.pop()
recomb = "{}{} {}".format(last, separator, p)
parts.append(recomb)
else:
parts.append(p)
return parts
|
f6c3952f2b8a2a06411d4acfcda97f55adcd1ba9
| 54,012
|
def is_seq(obj):
"""
Returns true if `obj` is a non-string sequence.
"""
try:
len(obj)
except (TypeError, ValueError):
return False
else:
return not isinstance(obj, str)
|
ad58f0afdcfd49869eb3bcafe4b82dd878d8b945
| 54,013
|
def _get_stackframe_filename(frame):
"""Return the filename component of a traceback frame."""
# Python 3 has a frame.filename variable, but frames in
# Python 2 are just tuples. This code works in both versions.
return frame[0]
|
68f6b91f587df88df366691ef558533e49a18988
| 54,016
|
def assign(var_name, value, current_params, current_variables):
"""
Assigns the indicated `value` to a variable with name `var_name`
Args:
var_name (str) : Name of the variable to assign the value to
value (any) : Value to assign to that variable
Returns:
A dictionary with { var_name: value }
"""
return { var_name: value }
|
acc84a5383a63a35b5012c7b15996606f9251b1d
| 54,018
|
def normPath(path: str) -> str:
""" Helper function that's mission is to normalize path strings. Accomplishes this by changing path
strings that are passed in to it from Windows style path strings (unescaped backslashes) and windows style
python path strings (escaped backslashes i.e. double backslash) to unix style strings (forward slashes)
:param path: str, path to normalize.
:return:
"""
# first convert any instances of double backslash paths (e.g. windows python style)
# to single backslash paths
result = '\\'.join(path.split(r'\\'))
# then, convert all single backslash paths (e.g. windows style) to unix-style paths (also compatible)
# with python in windows
result = '/'.join(result.split('\\'))
return result
|
1a858b2ed2ab7a2ea2d8e6e7dd33417aea09cae8
| 54,020
|
def split_stack(stack):
"""
Splits the stack into two, before and after the framework
"""
stack_before, stack_after = list(), list()
in_before = True
for frame in stack:
if 'flow/core' in frame[0]:
in_before = False
else:
if in_before:
stack_before.append(frame)
else:
stack_after.append(frame)
return stack_before, stack_after
|
8d4d143f72357ffd6740d6b08412044edc128307
| 54,021
|
import base64
import hashlib
def generate_short_url(original_url: str, timestamp: float):
"""generate_short_url generates an 8 character string used to represent the original url. This 8 character
string will be used to "unshorten" to the original url submitted.
parameter original_url: the url that the user passes to the api
parameter timestamp: the current datatime timestamp
returns: an 8 character string representing the shortened url"""
to_encode_str = f'{original_url}{timestamp}'
b64_encoded_str = base64.urlsafe_b64encode(
hashlib.sha256(to_encode_str.encode()).digest()).decode()
return b64_encoded_str[:8]
|
4704111df15e9eb0f71204732fc5cf2897904cf1
| 54,022
|
def multiply_matrices(m_1: list, m_2: list) -> list:
"""
Parameters
----------
m_1 : list of lists =>
[[1, 2, 3],
[4, 5, 6],
[7, 8, 9]]
m_2 : list of lists =>
[[10, 11, 12],
[13, 14, 15],
[17, 18, 19]]
Returns
------
transformation : list of lists =>
[[ 87, 93, 99],
[207, 222, 237],
[327, 351, 375]]
"""
if len(m_1[0]) != len(m_2):
raise ValueError("Size mismatch: m_1 columns do not match m_2 rows")
transformation = [[] for _ in m_1]
for column_idx, _ in enumerate(m_2[0]):
for i, m1_row in enumerate(m_1):
m_2_column = [m2_row[column_idx] for m2_row in m_2]
positional_sum = sum(a * b for a, b in zip(m1_row, m_2_column))
transformation[i].append(positional_sum)
return transformation
|
4f849eb2c8d9216dcebb7d6a3ae10a4ff61ea80e
| 54,028
|
def response_to_dictionary(response):
"""
Converts the response obtained from UniProt mapping tool to a dictionary.
Output: Dictionary with Uniprot IDs as keys and a lists of PDB ids as values (single UniProt IDs may map to different PDB ids).
"""
list = response.decode('utf-8').split(sep="\n") # output: ['From\tTo', uniprot1\tpdb1, uniprot2\tpdb2, ... ]
split = [a.split('\t') for a in list[1:]] # output: [ [uniprot1, pdb1], [uniprot2, pdb2], ... ]
mapping_dict = {}
for id in split:
if len(id) == 2:
if id[0] not in mapping_dict.keys():
mapping_dict[id[0]] = [id[1]]
else:
mapping_dict[id[0]].append(id[1])
return mapping_dict
|
efb5c952ab4487a9d2fd5c5759f22272a08e1257
| 54,030
|
def fields_for_mapping(mapping):
"""Summarize the list of fields in a table mapping"""
fields = []
for sf_field, db_field in mapping.get("fields", {}).items():
fields.append({"sf": sf_field, "db": db_field})
for sf_field, lookup in mapping.get("lookups", {}).items():
fields.append({"sf": sf_field, "db": lookup.get_lookup_key_field()})
return fields
|
4a61c8f43a8a88e4c7ba35570eb54383925f0392
| 54,031
|
def get_state(coq):
"""Collect the state variables for coq."""
return coq.root_state, coq.state_id, coq.states[:]
|
d04c9fac975bacb2c616b2429b871e159256222e
| 54,032
|
def check_flags(peek):
"""Check byte sequence for only flag bytes."""
for i in peek:
if i not in [0, 1]:
return False
return True
|
6e33e5a91b9f0cea146aa0772d139a9da8baddc9
| 54,034
|
def _all_ones(x):
"""Utility function to check if all (3) elements all equal 1"""
return all(n == 1 for n in x)
|
e7d82b7d564ab1c9f94fc875c6652c44108a14b8
| 54,036
|
def get_sentid_from_offset(doc_start_char, doc_end_char, offset2sentid):
""" This function gets the sent index given the character offset of an entity in the document
Args:
doc_start_char: starting character offset of the entity in the document
doc_end_char: ending character offset of the entity in the document
offset2sentid: a mapping from character offset to sent index in the document
"""
sentid = None
for offset in offset2sentid:
if offset[0] <= doc_start_char < doc_end_char <= offset[1]:
return offset2sentid[offset]
return sentid
|
61ba6729574cd73af0a3508a2964ef6576385156
| 54,037
|
def kmers(dna, k):
"""
Given a dna sequence return a hash with all kmers of length k and their frequency.
This method does NOT use the reverse complement, it only checks the strand you supply.
:param dna: the dna sequence
:param k: the length of the kmer
:return: a hash of kmers and abundance
"""
counts = {}
for i in range(len(dna)-k):
subseq=dna[i:i+k]
counts[subseq] = counts.get(subseq, 0) + 1
return counts
|
464386ebc14b31f54d676e6eecf6d9b469d8dff6
| 54,039
|
def request_vars(*valid_param_fields):
"""
Decorator maker to assign variables to the params field.
:param valid_param_fields: The variables to move from **kwargs to params.
:return: A decorator that checks for the passed variables and moves them to the params var.
"""
def decorator(func):
def wrapped(self, *args, **all_args):
params = all_args.pop('params', {})
valid_params = {key: all_args[key] for key in all_args if key in valid_param_fields}
params.update(valid_params)
return func(self, *args, params=params, **all_args)
return wrapped
return decorator
|
07d7a673a86358f4fb7033bf9f6fd3683b8f00f7
| 54,040
|
def names_from_file(filename):
"""Takes filenames from the content of a file (removes spaces and end-of-lines, no comment allowed)"""
with open(filename) as f:
item_list = [name for name in f.read().split() if name]
return item_list
|
3931fed7f6d657f12062f4dcb0213c193ab394f4
| 54,043
|
def unprocess_image(image):
""" Undo preprocess image. """
# Normalize image to [0, 1]
image = (image / 2) + 0.5
image = image * 255.0 #[0,1] to [0,255] range
return image
|
68b8bddfa0d33530687e753bc0f2a07c9be4e425
| 54,045
|
import math
def _Scale(quantity, unit, multiplier, prefixes=None, min_scale=None):
"""Returns the formatted quantity and unit into a tuple.
Args:
quantity: A number.
unit: A string
multiplier: An integer, the ratio between prefixes.
prefixes: A sequence of strings.
If empty or None, no scaling is done.
min_scale: minimum power of multiplier corresponding to the first prefix.
If None assumes prefixes are for positive powers only.
Returns:
A tuple containing the raw scaled quantity (float) and the prefixed unit.
"""
if (not prefixes or not quantity or math.isnan(quantity) or
quantity in [float('inf'), float('-inf')]):
return float(quantity), unit
if min_scale is None:
min_scale = 0
prefixes = ('',) + tuple(prefixes)
value, prefix = quantity, ''
for power, prefix in enumerate(prefixes, min_scale):
# This is more numerically accurate than '/ multiplier ** power'.
value = float(quantity) * multiplier ** -power
if abs(value) < multiplier:
break
return value, prefix + unit
|
224d302c5d2f3a9ffc72b34e5d6614eacb33c89f
| 54,046
|
def _convert_value(value, conversions, default):
"""Converts a value using a set of value mappings.
Args:
value: The value to convert.
conversions: A dict giving the desired output for each of a set of possible
input values.
default: The value to return if the input value is not one of the ones
listed in "conversions".
Returns:
The converted value.
"""
return conversions.get(value, default)
|
e673e950889e63081e343bc41921f16de101be61
| 54,056
|
import asyncio
def create_task(coro, *, name=None):
"""create_task that supports Python 3.6."""
if hasattr(asyncio, "create_task"):
task = asyncio.create_task(coro)
if name is not None and hasattr(task, "set_name"):
task.set_name(name)
return task
else:
return asyncio.ensure_future(coro)
|
ad0757beca469e6df83923918efa76851d98ab81
| 54,057
|
def normalize(X, mean, std):
"""Normalization of array
Args:
X (np.array): Dataset of shape (N, D)
mean (np.array): Mean of shape (D, )
std (float): Standard deviation of shape(D, )
"""
return (X - mean) / std
|
abeeb7b17a1e8f0079f80992cbf42e330ffe1ce7
| 54,058
|
import requests
def get_video_info(video_id=""):
"""Get the dictionnary of info (fron Tournesol API) for a video from it's ID"""
# Get video info dictionary
print("Call API with video_id: ", video_id)
response = requests.get(f"https://tournesol.app/api/v2/videos/?video_id={video_id}").json()
if response["count"]:
video_dict = response["results"][0]
print("The video has been found on Tournesol.")
return video_dict
else:
print("The video has not been found on Tournesol!")
return 0
|
0e7373cb7bdcc7cbc8f2c5885fa183e573541b0b
| 54,059
|
def _make_ssa_name(name):
"""Converts a symbol name (string) into an SSA name, by prepending '%'.
Only used for pretty printing the graph.
"""
return "%" + name
|
3a46e300d5880f9ea482f1df00e983de38b2d51c
| 54,060
|
import re
import json
def get_bam_library_info(bam):
"""Get the library info from a BAM's comment lines.
Args:
bam (pysam.AlignmentFile): BAM file
Returns:
list of dicts
"""
comments = bam.header['CO']
libraries = []
for comment in comments:
m = re.match(r'^library_info:(.+)$', comment)
if m:
libraries.append(json.loads(m.group(1)))
return libraries
|
cb41eeecf6545c3d1961a4275417e85e4a624d2c
| 54,062
|
def get_href_id(prop):
"""Helper function to extract key property information from trademe search site (url and ID)."""
output = {}
output["href"] = prop["href"]
output["id"] = prop["id"]
return output
|
858ef1b116a08595b03c3b906b3d636d0072a67c
| 54,064
|
def minus(a, b):
"""
Returns the assymetrical difference of set 'a' to set 'b' (a minus b).
In plain english:
Remove all the items in 'a' from 'b'. Return 'a'. (Order matters.)
Minus is set_a.difference(set_b). The nomenclature 'difference
is not linguistically descriptive (at least to a layman) so the
method 'minus' was used, as the meaning of 'minus' conveys the
result of the function more properly (once again... at least to
the layman).
"""
return a.difference(b)
|
e4f92e57fbe12d9aa5167fe1c0f031ede0d6fba9
| 54,065
|
def checkEqual(vec, val):
"""
Checks if all entries are equal to the supplied value.
"""
allEqual = True
for v in vec:
if v != val: return False
return allEqual
|
c9fcf5489d2cac5ea21181c3833b35dc073a3674
| 54,066
|
def xor_strings(b1, b2):
"""XOR two strings"""
if len(b1) != len(b2):
raise ValueError('Two strings not of equal length')
return bytes([a ^ b for a,b in zip(b1, b2)])
|
969c0a934099a17aa49bbebea0a8a811c8b0b37a
| 54,069
|
from collections import defaultdict
def res_contacts(contacts):
"""
Convert atomic contacts into unique residue contacts. The interaction type is removed as well as any third or
fourth atoms that are part of the interaction (e.g. water-bridges). Finally, the order of the residues within an
interaction is such that the first is lexicographically smaller than the second ('A:ARG' comes before 'A:CYS').
Example
-------
res_frequencies([
[0, 'hbbb', 'A:ASN:108:O', 'A:ARG:110:N'],
[0, 'vdw', 'A:ARG:110:N', 'A:ASN:108:CB']
])
# => [[0, 'A:ARG:110', 'A:ASN:108']]
Parameters
----------
contacts: List of list
List of contacts, where each contact is a list of frame-num, i-type, and atom names
Returns
-------
List of list
Each entry is a list with a frame and two residue identifiers
"""
# print(contacts)
# Associates a frame-number with a set of contacts
frame_dict = defaultdict(set)
for atom_contact in contacts:
frame = atom_contact[0]
resi1 = ":".join(atom_contact[2].split(":")[0:3])
resi2 = ":".join(atom_contact[3].split(":")[0:3])
if resi2 < resi1:
resi1, resi2 = resi2, resi1
frame_dict[frame].add((resi1, resi2))
ret = []
for frame in sorted(frame_dict):
for resi1, resi2 in frame_dict[frame]:
ret.append([frame, resi1, resi2])
return ret
|
42b4736675afa0d76ecc14d663ea261a870430b6
| 54,073
|
def pupil_to_int(pupil):
"""Convert pupil parameters to integers. Useful when drawing.
"""
p = pupil
return ((int(p[0][0]), int(p[0][1])), (int(p[1][0]), int(p[1][1])), int(p[2]))
|
2b289dc43c7c65c25b1ad901aa31055f62d67a59
| 54,076
|
def check_key(dictionary, key, default_value):
"""
Returns the value assigned to the 'key' in the ini file.
Parameters:
dictionary : [dict]
key : [string|int]
default_value : [string]
Output: Value assigned to the 'key' into the 'dictionary' (or default value if not found)
"""
if key in dictionary.keys():
return dictionary[key]
else:
return default_value
|
058b3e78503f7427ce39d48afde6c9fb80c8d0d7
| 54,078
|
def get_list_duplicates(in_list):
"""Identify duplicates in a list."""
seen = set()
duplicates = set()
for item in in_list:
if item in seen and item != 'N/A':
duplicates.add(item)
seen.add(item)
return list(duplicates)
|
a9b7fa3f996f0109f266d31f80fd809cef88333a
| 54,079
|
import string
import random
def passwordWithRequirements(lenght=12):
"""assumes lenght is an int >=4, representing how long the password should be
returns a string of ASCII characters, representing a suggested password
containing at least 1 uppercase, 1 lowercase, 1 digit, and 1 symbol"""
if not isinstance(lenght, int):
raise TypeError("lenght must be an int greater than or equal to 4")
if lenght < 4:
raise ValueError("lenght must be an int greater than or equal to 4")
password = ""
# innitialize valid characters
chars_list = []
for char in string.ascii_letters:
chars_list.append(char)
for char in string.digits:
chars_list.append(char)
for char in string.punctuation:
chars_list.append(char)
# pick most password chars
for i in range(lenght-4):
random_char = chars_list[random.randint(0, len(chars_list)-1)]
password += random_char
# pick requirement password chars
random_lowercase = string.ascii_lowercase[random.randint(0, len(string.ascii_lowercase)-1)]
random_index = random.randint(0, lenght-5)
password = password[:random_index] + random_lowercase + password[random_index:]
random_uppercase = string.ascii_uppercase[random.randint(0, len(string.ascii_uppercase)-1)]
random_index = random.randint(0, lenght-5)
password = password[:random_index] + random_uppercase + password[random_index:]
random_digit = string.digits[random.randint(0, len(string.digits)-1)]
random_index = random.randint(0, lenght-5)
password = password[:random_index] + random_digit + password[random_index:]
random_symbol = string.punctuation[random.randint(0, len(string.punctuation)-1)]
random_index = random.randint(0, lenght-5)
password = password[:random_index] + random_symbol + password[random_index:]
# return
return password
|
d0d69050c39ed577acea0680d54eaf8905d801ce
| 54,080
|
def decodeObjectQualifiersList(objectQualifiers):
"""Turn an object qualifiers list into a list of conditions.
Object qualifiers are found as part of G-AIRMET messages.
Object qualifiers are stored in a three array list, each element represents
eight bits or 24 bits in total. Most of the elements are reserved, with the
last byte containing most of the values we care about.
Args:
objectQualifiers (list): List of 3 integers representing the
object qualifiers list from the message.
Returns:
list: List of text abbreviations representing the qualifiers.
Available qualifiers:
- UNSPCFD (unspecified)
- ASH
- DUST
- CLOUDS
- BLSNOW (blowing snow)
- SMOKE
- HAZE
- FOG
- MIST
- PCPN (precipitation)
"""
objQualList = []
if (objectQualifiers[0] & 0x80) != 0:
# Unspecified
objQualList.append('UNSPCFD')
if (objectQualifiers[1] & 0x01) != 0:
# Ash
objQualList.append('ASH')
if (objectQualifiers[2] & 0x80) != 0:
# Dust
objQualList.append('DUST')
if (objectQualifiers[2] & 0x40) != 0:
# Clouds
objQualList.append('CLOUDS')
if (objectQualifiers[2] & 0x20) != 0:
# Blowing snow
objQualList.append('BLSNOW')
if (objectQualifiers[2] & 0x10) != 0:
#Smoke
objQualList.append('SMOKE')
if (objectQualifiers[2] & 0x08) != 0:
# Haze
objQualList.append('HAZE')
if (objectQualifiers[2] & 0x04) != 0:
# Fog
objQualList.append('FOG')
if (objectQualifiers[2] & 0x02) != 0:
# Mist
objQualList.append('MIST')
if (objectQualifiers[2] & 0x01) != 0:
# Precipitation
objQualList.append('PCPN')
return objQualList
|
01ab66c594a50389f1993b13e25951eb9ca6eca2
| 54,082
|
import typing
def filesystem_info(path: str) -> typing.Dict[str, typing.Any]:
"""Parse /proc/mounts (which has fstab format) to find out the type of a filesystem."""
# Get the data and split into a list of mounts.
with open("/proc/mounts", "r") as f:
procmounts_raw = f.read()
procmounts = procmounts_raw.strip().split("\n")
# Find all mount points that match our path.
possiblemounts = {}
for mnt in procmounts:
items = mnt.split()
if path.startswith(items[1]):
possiblemounts[items[1]] = items
# Find the longest mountpoint that matches our path.
if len(possiblemounts) > 0:
ordered = sorted(possiblemounts.keys(), key=len, reverse=True)
longest_key = ordered[0]
longest = possiblemounts[longest_key]
return {
"fs_spec": longest[0],
"fs_file": longest[1],
"fs_vfstype": longest[2],
"fs_mntops": longest[3],
"fs_freq": longest[4],
"fs_passno": longest[5],
}
raise FileNotFoundError
|
3e32d19d2874191a642cd40757a729d6dc3f9743
| 54,086
|
from operator import mod
def is_gregorian_leap_year(g_year):
"""Return True if Gregorian year 'g_year' is leap."""
return (mod(g_year, 4) == 0) and (mod(g_year, 400) not in [100, 200, 300])
|
2a7cc6fcba9eee503d007679695b87ee04e0c85f
| 54,087
|
from typing import Optional
def _default_progress_str(progress: int, total: Optional[int], final: bool):
"""
Default progress string
Args:
progress: The progress so far as an integer.
total: The total progress.
final: Whether this is the final call.
Returns:
A formatted string representing the progress so far.
"""
prefix = "completed " if final else ""
if total is not None:
percent = (100.0 * progress) / total
return "%s%d / %d (%3d%%)" % (prefix, progress, total, int(percent))
else:
return "%s%d / %d" % (prefix, progress, progress)
|
0aa2daf0e1750fb534142125af253e036bb65148
| 54,096
|
def concatenate_replacements(text, replacements):
"""Applies a rewrite to some text and returns a span to be replaced.
Args:
text: Text to be rewritten.
replacements: An iterable of (new_text, start of replacement, end)
Returns:
A new replacement.
"""
joined = []
first_start = None
last_end = None
for rewritten, start, end in replacements:
if start > end:
raise ValueError(
'Rewrites have invalid spans: start=%r > end=%r' % (start, end))
if first_start is None:
first_start = start
if last_end is not None:
joined.append(text[last_end:start])
if last_end is not None and last_end > start:
raise ValueError(
'Rewrites overlap: end > next start: '
'{last_end} > {start}. '
'(Was about to write: text[{start}:{end}] (== {old!r}) <- {new!r})'
.format(
start=start,
end=end,
last_end=last_end,
old=text[start:end],
new=rewritten))
joined.append(rewritten)
last_end = end
return ''.join(joined), first_start or 0, last_end or 0
|
48046d59273a542e36efb967b30dc4b82b1c104d
| 54,098
|
def check_availability(lower_bound: int, upper_bound: int, high_lower_bound: int, map_array: list,
coordinates: tuple, size: int, scale_factor: int):
"""
Function checks if there is enough space on map to generate structure of given size
at given coordinates
Args:
lower_bound (int): elevation level above which structure can be generated
upper_bound (int): elevation level below which structure can be generated
high_lower_bound (int): elevation level above which structure can be generated
despite the upper bound
map_array (`obj`:list: of `obj`:list: of `obj`:int:): a list representing the map
coordinates (`obj`:tuple: of `obj`:int:): a tuple with coordinates of central point
of the structure
size (int): size of the structure meaning the size in every direction from central point
scale_factor (int): scale factor between structure pixel size and map pixel size, see
generator.generate_sites()
Returns
Bool value indicating if there's enough free space for structure
"""
if int((coordinates[0] + size)/scale_factor) >= len(map_array) or (coordinates[0] - size) < 0 or \
int((coordinates[1] + size)/scale_factor) >= len(map_array[0]) or (coordinates[1] - size) < 0:
return False
for xk in range(-size, size):
for yk in range(-size, size):
if not (upper_bound >= map_array[int((coordinates[0] + xk) / scale_factor)]
[int((coordinates[1] + yk) / scale_factor)] > lower_bound or
map_array[int((coordinates[0] + xk) / scale_factor)]
[int((coordinates[1] + yk) / scale_factor)] > high_lower_bound):
return False
return True
|
b97d77d3ee1533a435694844cc21772e49bb7481
| 54,105
|
from typing import Optional
from typing import List
from typing import Callable
def generate_fake_wait_while(*, status: str, status_info: Optional[List[str]] = None) -> Callable:
"""Generate a wait_while function that mutates a resource with the specified status info."""
if status_info is None:
status_info = []
def _wait_while_status(**kwargs):
# We need either a `module` or `execution` argument
if "module" in kwargs:
module = kwargs["module"]
module.status = status
module.status_info = status_info
return module
elif "execution" in kwargs:
execution = kwargs["execution"]
execution.status = status
execution.status_info = status
return execution
return _wait_while_status
|
93381a16c68d74161b112bb47a57f2a745678531
| 54,108
|
def Byte(value):
"""Encode a single byte"""
return bytes((value,))
|
aedfae62826571fe631f62438205f6204a87a04c
| 54,111
|
def create_centroid_ranges(centroids, tol=15):
"""
Create high and low BGR values to allow for slight variations in layout
:param centroids: legend colors identified from get_centroids
:param tol: int, number of pixels any of B, G, R could vary by and still
be considered part of the original centroid. Defaults to 15.
:return: List of [low, high] bounds for each centroid
"""
cent_bounds = []
for c in centroids:
b, g, r = c
cent_bounds.append(
[[b - tol, g - tol, r - tol], [b + tol, g + tol, r + tol]])
return cent_bounds
|
56b3922801dfe75837331e868406964d7ac18f43
| 54,112
|
import re
def _parse_log_entry(l):
"""
Returns an re match object with the contents:
First group: timestamp
2: module name
3: logging level
4: message
"""
pattern = re.compile(r'(\d*-\d*-\d* \d*:\d*:\d*,\d*) - ([\w.]+) - (\w*) - (.+)')
return pattern.match(l)
|
1693ccf644e530577d126b9af4f1fa891c4659ef
| 54,114
|
import re
def check_username(username):
"""
Checks if a username is valid
:param username: username to check
:return: True if the username is valid, false otherwise
"""
return bool(re.fullmatch(r"[a-zA-Z0-9_\.\-]{3,30}", username))
|
ccdfa594f7bf51a47c31ff2d7dc92e9823ee8bf4
| 54,118
|
from typing import Dict
from typing import List
def sort_dict_desc(word_dict: Dict) -> List:
"""Sort the dictionary in descending order.
:param word_dict: the dictionary of word.
:return: sorted list of dictionary.
"""
# sort by value
# word_dict.items() to get a list of (key: value) pairs
sorted_word_dict = sorted(word_dict.items(), key=lambda item: item[1])
# descending order
sorted_word_dict.reverse()
return sorted_word_dict
|
87ea93145b7d4d7a867845a52ae3ef1c8403075d
| 54,121
|
import hashlib
import six
def md5files(lfp):
"""Returns the MD5 of a list of key:path.
The MD5 object is updated with: key1, file1, key2, file2, ..., keyN, fileN,
with the keys sorted alphabetically.
"""
m = hashlib.md5()
sorted_lfp = sorted(lfp, key=lambda ab: ab[0])
for ab in sorted_lfp:
m.update(six.b(ab[0]))
with open(ab[1], 'rb') as f:
for l in f.readlines():
m.update(l)
return m.hexdigest()
|
309d2dbec88089e78683f6d5dabbd5c2f7826b8f
| 54,123
|
def _parse_numbered_syllable(unparsed_syllable):
"""Return the syllable and tone of a numbered Pinyin syllable."""
tone_number = unparsed_syllable[-1]
if not tone_number.isdigit():
syllable, tone = unparsed_syllable, '5'
elif tone_number == '0':
syllable, tone = unparsed_syllable[:-1], '5'
elif tone_number in '12345':
syllable, tone = unparsed_syllable[:-1], tone_number
else:
raise ValueError("Invalid syllable: %s" % unparsed_syllable)
return syllable, tone
|
f9829ba0b14e9cdcd7c78f19a5159a1cbfd99716
| 54,125
|
def is_valid_rtws(rtws):
"""
Given a clock-valuation timedwords with reset-info, determine its validation.
"""
if len(rtws) == 0 or len(rtws) == 1:
return True
current_clock_valuation = rtws[0].time
reset = rtws[0].reset
for rtw in rtws[1:]:
if reset == False and rtw.time < current_clock_valuation:
return False
else:
reset = rtw.reset
current_clock_valuation = rtw.time
return True
|
726b44af21a083ae64a260e5214204c53416ffe5
| 54,128
|
def _to_bytes(value, encoding='utf-8'):
"""Encodes string to bytes"""
return value.encode(encoding)
|
d7b0bebe488fb9f42b6f08e1dc41cffb9063eb11
| 54,140
|
def convert_genre(genre: str) -> str:
"""Return the HTML code to include for the genre of a word."""
return f" <i>{genre}.</i>" if genre else ""
|
443ba5217c7ff916e462b3d6f6931e8e60c9b78b
| 54,147
|
import itertools
def _region_interval_to_dimensions(old_project_data):
"""
From
interval_definitions:
- name: annual
description: ''
filename: annual_intervals.csv
region_definitions:
- name: national
description: ''
filename: uk_nations_shp/regions.shp
- name: oxfordshire
description: ''
filename: oxfordshire/regions.geojson
To
dimensions:
- name: annual
description: ''
elements: annual_intervals.csv
- name: national
description: ''
elements: uk_nations_shp/regions.shp
- name: oxfordshire
description: ''
elements: oxfordshire/regions.geojson
"""
dimensions = []
definitions = itertools.chain(
old_project_data['interval_definitions'],
old_project_data['region_definitions']
)
for definition in definitions:
dimensions.append({
'name': definition['name'],
'description': definition['description'],
'elements': definition['filename']
})
old_project_data.pop('interval_definitions')
old_project_data.pop('region_definitions')
old_project_data['dimensions'] = dimensions
return old_project_data
|
cab19b4d2c02766ffa313c8c1960388975751c51
| 54,149
|
def resolve_cardinality(class_property_name, class_property_attributes, class_definition):
"""Resolve class property cardinality from yaml definition"""
if class_property_name in class_definition.get('required', []):
min_count = '1'
elif class_property_name in class_definition.get('heritable_required', []):
min_count = '1'
else:
min_count = '0'
if class_property_attributes.get('type') == 'array':
max_count = class_property_attributes.get('maxItems', 'm')
min_count = class_property_attributes.get('minItems', 0)
else:
max_count = '1'
return f'{min_count}..{max_count}'
|
fed27e5c326842f5895f98e3f5e9fb5852444b07
| 54,153
|
def get_col(square: tuple) -> list:
"""
Gets all the squares in the column, this square is in.
:param square: A tuple (row, column) coordinate of the square
:return: A list containing all the tuples of squares in the column
"""
col_squares = []
for j in range(0, 9):
col_squares.append((j, square[1]))
return col_squares
|
4c77cb3cba7b2bc19cdff2eb9763b0c95e9f948f
| 54,162
|
def remove_apostrophes(df, from_columns=None):
"""
Remove apostrophes from columns names and from data in given columns.
Also strips leading and trailing whitespace.
This function operates in-place on DataFrames.
Parameters
----------
df : pandas.DataFrame
from_columns : Collection, optional
Returns
-------
pandas.DataFrame
"""
df.columns = df.columns.str.replace("'", "")
df.columns = df.columns.str.strip()
if from_columns:
for c in from_columns:
df[c] = df[c].str.replace("'", "")
return df
|
82f41c807afda33f14adca29d8d3e6bb106e7833
| 54,163
|
from typing import List
def define_frontal_chest_direction(robot: str) -> List:
"""Define the robot-specific frontal chest direction in the chest frame."""
if robot != "iCubV2_5":
raise Exception("Frontal chest direction only defined for iCubV2_5.")
# For iCubV2_5, the z axis of the chest frame is pointing forward
frontal_base_direction = [0, 0, 1]
return frontal_base_direction
|
5bf61e3f1291780dddf6cf3b9bb07ce1fb4b13c1
| 54,164
|
def test_consumer(test_amqp):
"""Return a consumer created by the test AMQP instance."""
consumer = test_amqp.consumer()
return consumer
|
cb9d18952e4a1a7719f35c5813ccee9fb74d32ee
| 54,168
|
def _compare_address_lists(list_one, list_two):
"""
Counts the number of elements in list one that are not in list two.
:param list_one: list of strings to check for existence in list_two
:param list_two: list of strings to use for existence check of
list_one parts
:return: the count of items in list_one that are missing from list_two
"""
diff = 0
for part in list_one:
if part not in list_two:
diff += 1
return diff
|
ac8e910b0ac54c6402ca980b4d238e2c27fe90e5
| 54,172
|
def get_diff(url, client):
"""Uses client to return file diff from a given URL."""
return client.get(url)
|
e2fce7d01f4eee2e806393c865e54dd5ffe1a339
| 54,174
|
def sparse_dot_product(A, B):
"""
Function used to compute the dotproduct of sparse weighted sets represented
by python dicts.
Runs in O(n), n being the size of the smallest set.
Args:
A (Counter): First weighted set.
B (Counter): Second weighted set.
Returns:
float: Dot product of A & B.
"""
# Swapping so we iterate over the smallest set
if len(A) > len(B):
A, B = B, A
product = 0.0
for k, w1 in A.items():
w2 = B.get(k)
if w2 is not None:
product += w1 * w2
return product
|
128d221e2e37125364c31d85c8aa5fb5c98d94c3
| 54,175
|
def rect_offset(rect, offset):
""" Offsets (grows) a rectangle in each direction. """
return (rect[0] - offset, rect[1] - offset, rect[2]+2*offset, rect[3]+2*offset)
|
ce2bb154acf64e153031c11a889f5eeebc8b61db
| 54,179
|
def _convert_delimiters_to_regex(*delimiters):
"""Converts a list of strings into a regex
Arguments
------------------
*delimiters : str
The delimiters as that should be converted into a regex
Returns
------------------
regex : str
The converted string
"""
return "|".join(delimiters)
|
7d2c26b2fe563907040872c0f33452a3b0ccd072
| 54,182
|
def shrink_sides(image, ts=0, bs=0, ls=0, rs=0):
"""Shrinks/crops the image through shrinking each side of the image.
params:
image: A numpy ndarray, which has 2 or 3 dimensions
ts: An integer, which is the amount to shrink the top side
of the image
bs: An integer, which is the amount to shrink the bottom side
of the image
ls: An integer, which is the amount to shrink the left side
of the image
rs: An integer, which is the amount to shrink the right side
of the image
return: A numpy ndarray, which has the same number of dimensions as image
"""
return image[ts:image.shape[0] - bs, ls:image.shape[1] - rs]
|
6858a75516626affb3d65b9c8aad8bd207cfe495
| 54,186
|
from pathlib import Path
def get_files(source, extensions=['yaml', 'yml', 'json']):
"""Get all files matching extensions.
Args:
extensions (list): List of extensions to match.
Returns:
list: List of all files matching extensions relative to source folder.
"""
all_files = []
for ext in extensions:
all_files.extend(Path(source).rglob(f'*.{ext}'))
return all_files
|
44c172d0211f147ced5c5f0cb249686b89cb3593
| 54,188
|
def get_subj_ids(data):
"""
Get unique subject ids from the supplied panda DataFrame. (expect to find a 'subj_idx' column.)
"""
# get unique subject ids corresponding to a data subset
return data["subj_idx"].unique()
|
94e577e6a2bcd7d55669eae1b01fa8f47b9f6ad3
| 54,189
|
def clean_table_query(conn, table):
"""
Query the specified table with SQL to create clean dataset.
:param conn: the Connection object
:param table: the name of table
:return: cleaned rows dataset
"""
cur = conn.cursor()
if table == 'question2':
cur.execute('''SELECT upper(country) as alpha_2, hours_bucket, installs, country_total_installs,
replace(share_within_country,"%","") as share_within_country
FROM question2
WHERE length(country) = 2''')
print("Success - table question2 was queried to produce a clean dataset.")
rows = cur.fetchall()
return rows
else:
print("Sorry, this is not possible in current implementation.")
|
063485ec8c43e1e4add883737034c8c1beb27f15
| 54,190
|
def clean_text(df, text):
"""
Cleans text by replacing unwanted characters with blanks
Replaces @ signs with word at
Makes all text lowercase
"""
# pdb.set_trace()
df[text] = df[text].str.replace(r'[^A-Za-z0-9()!?@\s\'\`\*\"\_\n\r\t]', '', regex=True)
df[text] = df[text].str.replace(r'@', 'at', regex=True)
df[text] = df[text].replace(r'\s+|\\n', ' ', regex=True)
df[text] = df[text].str.lower()
return df
|
4c8f4b8dae76ef47314ef57b62f51f72ceaacf34
| 54,193
|
def get_base_encoding(encoding: str) -> str:
"""
Check which base encoding is needed to create required encoding.
! Adapt if new encoding is added !
:param encoding: required encoding
:return: base encoding
"""
if encoding in ('raw', '012', 'onehot', '101'):
return 'raw'
else:
raise Exception('No valid encoding. Can not determine base encoding')
|
7ba27b9bfc44445d90d2b3efec2943f8bd3dfddc
| 54,199
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.