content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
from typing import AnyStr
import codecs
def auto_encode(string: AnyStr, encoding: str = "utf-8", errors: str = "strict") -> bytes:
"""Lookup a encoder and encode the string if it is bytes, else return it
untouched if it's already in bytes (for utf). If its an int, etc, it'll try
to wrap it in bytes for you.
:param string: The text to encode
:param encoding: The encoding-type to use; default is `utf-8`
:param errors: optional; pass `replace` or `namereplace` if you don't want
the default `strict` for how to process errors
:return: The encoded text
"""
encoder = codecs.getencoder(encoding=encoding)
if isinstance(string, bytes):
return string
elif isinstance(string, str):
return encoder(string)[0]
else:
return encoder(str(string))[0]
|
ff5854e843f718adaec728ac11083cfa22604e9e
| 73,544
|
def list_transit_times(t0, period, steps_or_num_transits=range(0, 10), return_string=False):
"""List the transit times based on the supplied transit parameters"""
if isinstance(steps_or_num_transits, int):
steps = range(0, steps_or_num_transits)
else:
steps = steps_or_num_transits
times = [t0 + period * i for i in steps]
if return_string:
return ",".join(map(str, times))
else:
return times
|
9cc74861f02f05412ef4ac71202b210862087734
| 73,545
|
import re
def get_version(file_path):
"""
Return the project version tag.
Parameters
----------
file_path : str
The path to the project __init__.py file.
Returns
-------
version : str
The project version tag.
"""
with open(file_path, mode='r') as init_file:
init_cont = init_file.read()
# PEP 440: [N:]N(.N)*[{a|b|c|rc}N][.postN][.devN]
ver_spec = (r'__version__ = ["\'](\d+:)?\d+(\.\d+){1,2}' +
r'((a|b|c|rc)\d+)?(\.post\d+)?(\.dev\d+)?["\']')
ver_match = re.search(ver_spec, init_cont)
if not ver_match:
raise RuntimeError('Unable to find (valid) version specifier in {0!r}'
.format(file_path))
return ver_match.group(0)[15:-1]
|
4ccfaadf78466f4124aa3fb2c3c6249257c03e93
| 73,546
|
def safe_unicode(obj):
"""Safe conversion to the Unicode string version of the object."""
try:
return str(obj)
except UnicodeDecodeError:
return obj.decode("utf-8")
|
f6a592f7ea0de5179f1b8a91e0bd75c9a5d522df
| 73,547
|
import random
def e2e_slot_to_hotel_slot(slot):
"""Map an E2E slot onto a slot in the Hotel domain. If there are multiple tokens in the corresponding category
in the Hotel domain, randomly pick one from that category.
"""
slot_map = {
'food': ['address', 'phone', 'postcode'],
'customerrating': ['address', 'phone', 'postcode'],
'familyfriendly': ['acceptscreditcards', 'dogsallowed', 'hasinternet'],
'eattype': ['type']
}
if slot in slot_map:
if len(slot_map[slot]) == 1:
return slot_map[slot][0]
else:
return random.choice(slot_map[slot])
else:
return slot
|
b8fc7f83d8ce678ebb8df94732f2a1f2f49ceae1
| 73,555
|
def prepare_region_data(df, scale_by=1):
"""Prepare regional data so that it can be used in train_model_region."""
regions = df.region_name.unique()
x_region = {}
x_t_region = {}
y_region = {}
for i, region in enumerate(regions):
x_region[region] = (
df.loc[
(df.region_name == region) & (df["cases_pos_hospitalized_icu"] > 0),
"cases_pos_hospitalized_icu",
].to_numpy()[:-1]
/ scale_by
)
x_t_region[region] = (
df.loc[
(df.region_name == region) & (df["cases_pos_hospitalized_icu"] > 0),
"cases_pos_hospitalized_icu_change",
].to_numpy()[1:]
/ scale_by
)
y_region[region] = (
df.loc[
(df.region_name == region) & (df["cases_pos_hospitalized_icu"] > 0),
"cases_pos_hospitalized_icu_change",
].to_numpy()[1:]
/ scale_by
)
return x_region, x_t_region, y_region
|
b9fb87f344378bd1a0b34931ce278f00901d280c
| 73,557
|
import functools
def cached(f):
"""
Decorator that creates a cached property.
"""
key = f.__name__
@property
@functools.wraps(f)
def decorated(self):
if key not in self._cache:
self._cache[key] = f(self)
return self._cache[key]
return decorated
|
15246ae4c07d9f855c6830e5a60cf3cf3597effe
| 73,559
|
import codecs
def to_hex(data):
"""Convert binary data to a hex string"""
return codecs.encode(data, 'hex')
|
4685c0ef6f5a5cfe1b0027bac6d1a9421e86dda6
| 73,567
|
def get_unique_list(input_list):
"""
Return a new list of unique elemets only while preserving original order
"""
new_list = []
for element in input_list:
if element not in new_list:
new_list.append(element)
return new_list
|
593bb0cc12f9c98a22490670d136f1a354d5cec0
| 73,571
|
import json
def parse_advice(json_response) -> str:
"""Get the advice from the JSON response."""
json_slip = json.loads(json_response)
advice = json_slip['slip']['advice']
return advice
|
d4a8680602917032ecd8a463fafe083851d0446c
| 73,572
|
from typing import List
from typing import Union
from typing import Any
def reduce(values: List) -> Union[List[Any], Any]:
""" Reduce a list to a scalar if length == 1 """
while isinstance(values, list) and len(values) == 1:
values = values[0]
return values
|
3922a86c462d265fd37736e9c9b56069ce4ab8d0
| 73,573
|
import requests
def get_sequence(UniprotID):
"""Get protein sequence from UniProt Fasta (using REST API).
"""
# collect UniProtID data
fasta_URL = 'https://www.uniprot.org/uniprot/'+UniprotID+'.fasta'
request = requests.post(fasta_URL)
request.raise_for_status()
fasta_string = request.text.split('\n')
sequence = ''.join(fasta_string[1:])
return sequence
|
ede837154bda738dc8e7e83c97c5f81b284db17c
| 73,574
|
def modulus(x, y):
""" Modulus """
return x % y
|
ea2694f98133ddaf43da3f3eb702f5ba22dec597
| 73,576
|
def capitalized(piece_name: str) -> str:
"""Returns a capitalized version of a piece name
Args:
piece_name: Piece name
Returns:
Capitalized version
"""
return '%s%s' % (piece_name[0].upper(), piece_name[1:].lower())
|
98838f820e90f6b1540f537ed878324d02aefb87
| 73,579
|
def isDigit(char):
"""assumes char is a single character
returns a boolean, True is char is a digit, else False"""
digits = "0123456789"
return char in digits
|
f26193c9fff51cdaa87d76ac0e638cac09936b6e
| 73,583
|
def first(predicate_or_None, iterable, default=None):
"""
Returns the first item of iterable for which predicate(item) is true.
If predicate is None, matches the first item that is true.
Returns value of default in case of no matching items.
"""
return next(
filter(predicate_or_None, iterable),
default
)
|
e477073ab7c59f3650adc112f9dafd811661b32f
| 73,587
|
from typing import Dict
import click
def prep_secrets(*, environment_mappings: Dict[str, str], secret_values: Dict[str, str]) -> Dict[str, str]:
"""Convert secrets from standardized name map to required environment variable map.
:param dict environment_mappings: Mapping from secret identifiers to environment variable names
:param dict secret_values: Mapping from secret identifiers to secret values
:returns: Mapping from environment variable names to secret values
:raises click.UsageError: if secrets contains an identifier that is not in environment_mappings
"""
try:
return {environment_mappings[key]: value for key, value in secret_values.items()}
except KeyError as error:
missing_key = error.args[0]
raise click.UsageError(f'Identifier key "{missing_key}" not found in environment variable mapping.')
|
cc73ac7052ff3b4476e64cee671e8501b7cb248e
| 73,591
|
def percentageCalculator(x, y, case=1):
"""Calculate percentages
Case1: What is x% of y?
Case2: x is what percent of y?
Case3: What is the percentage increase/decrease from x to y?
"""
if case == 1:
#Case1: What is x% of y?
r = x/100*y
return r
elif case == 2:
#Case2: x is what percent of y?
r = x/y*100
return r
elif case == 3:
#Case3: What is the percentage increase/decrease from x to y?
r = (y-x)/x*100
return r
else:
raise Exception("Only case 1,2 and 3 are available!")
|
30443f29282cf06443a4276d3cbbe20b7c792382
| 73,594
|
def name_matches_object(name, *objects, **kwargs):
"""Determine if a resource name could have been created by given objects.
The object(s) must implement RandomNameGeneratorMixin.
It will often be more efficient to pass a list of classes to
name_matches_object() than to perform multiple
name_matches_object() calls, since this function will deduplicate
identical name generation options.
:param name: The resource name to check against the object's
RESOURCE_NAME_FORMAT.
:param *objects: Classes or objects to fetch random name
generation parameters from.
:param **kwargs: Additional keyword args. See the docstring for
RandomNameGenerator.name_matches_object() for
details on what args are recognized.
:returns: bool
"""
unique_rng_options = {}
for obj in objects:
key = (obj.RESOURCE_NAME_FORMAT, obj.RESOURCE_NAME_ALLOWED_CHARACTERS)
if key not in unique_rng_options:
unique_rng_options[key] = obj
return any(obj.name_matches_object(name, **kwargs)
for obj in unique_rng_options.values())
|
f176c6681ea602e5d981a54bf7bb3fe5e2a31a40
| 73,595
|
def getRow(array, index):
"""
Returns the row of the given 2D array at the selected index
"""
column = []
for i in range(0,9):
column.append(array[index][i])
return column
|
b3ab69bba27f8fecc9d7c0dc5c686aa508e6a169
| 73,597
|
import random
def getRandomWord(wordList):
"""
Returns a random string from the passed list of strings.
"""
wordIndex = random.randint(0, len(wordList) - 1)
return wordList[wordIndex]
|
1045614d50889c29626a0702a979a056c503b285
| 73,598
|
def create_time_callback(data):
"""Creates callback to get total times between locations."""
def service_time(node):
"""Gets the service time for the specified location."""
return data["demands"][node] * data["time_per_demand_unit"]
def travel_time(from_node, to_node):
"""Gets the travel times between two locations."""
travel_time = data["travel_time_matrix"][from_node, to_node]
if travel_time < 15:
travel_time = 15
return travel_time
def time_callback(from_node, to_node):
"""Returns the total time between the two nodes"""
serv_time = service_time(from_node)
trav_time = travel_time(from_node, to_node)
return serv_time + trav_time
return time_callback
|
72633675e0f659bfebb5b30a3374818f2c5cedc7
| 73,599
|
def export_report(ss, report_id, export_format, export_path, sheet_name):
"""
Exports a report, given export filetype and location. Allows export format 'csv' or 'xlsx'.
:param ss: initialized smartsheet client instance
:param report_id: int, required; report id
:param export_format: str, required; 'csv' or 'xlsx'
:param export_path: str, required; filepath to export sheet to
:param sheet_name: str, required; name of sheet exported
:return: str, indicating failure or success, with path, filename, extension
"""
if export_format == 'csv':
ss.Sheets.get_sheet_as_csv(report_id, export_path)
elif export_format == 'xlsx':
ss.Sheets.get_sheet_as_excel(report_id, export_path)
if export_format == 'csv' or export_format == 'xlsx':
return 'Report exported to {}{}.{}'.format(export_path, sheet_name, export_format)
else:
return 'export_format \'{}\' is not valid. Must be \'csv\' or \'xlsx\''.format(export_format)
|
a2ec63fa85a7498043f5848404870a71059ffa9f
| 73,604
|
def build_add_edge_query(source_label: str, target_label: str, edge_type: str, edge_props: str) -> str:
"""Build a standard edge insert query based on the given params"""
insert_query = 'UNWIND $props AS prop MERGE (n:Base {{objectid: prop.source}}) ON MATCH SET n:{0} ON CREATE SET n:{0} MERGE (m:Base {{objectid: prop.target}}) ON MATCH SET m:{1} ON CREATE SET m:{1} MERGE (n)-[r:{2} {3}]->(m)'
return insert_query.format(source_label, target_label, edge_type, edge_props)
|
645fe2edef12a2600aa22cdc2228ab7c22e301cc
| 73,606
|
from pathlib import Path
def default_messages_path() -> Path:
"""
Get path of included messages.
"""
return Path(__file__).joinpath("..", "locale").resolve()
|
ce01a37d9b09000d46720325d79188702001e9c3
| 73,609
|
def get_filename(file_name_parts=None):
"""
This routine creates the filename to be used to store the record.
:param file_name_parts: file name parts
:return: File Name to store the record
"""
file_partial = ''
if file_name_parts is not None:
for part in file_name_parts:
file_partial = file_partial + '_' + part
output_file = 'record_' + file_partial + '.wav'
return output_file
|
d78a7c53b0db618dffdad3157989b81d699e0008
| 73,610
|
def getMetadataSizes(conn, showEmpty = True):
"""Get sizes of all InterMine metadata entries
Returns a dictionary of <name>:<size>
conn - open database connection
showEmpty - if true then 0 sizes are also shown"""
with conn.cursor() as cur:
cur.execute("select key, length(value), length(blob_value) from intermine_metadata;")
entries = cur.fetchall()
results = {}
for entry in entries:
(name, size, blobSize) = entry
size = max(size, blobSize)
if showEmpty or size > 0:
results[name] = size
return results
|
9ae706eb3d0d45ca27f9551293b33b08b4ea2260
| 73,614
|
def merge_properties(previous_val, next_val):
"""
Function used in reduce for merging several xml properties to dict
Example:
Source: [
{"name": "name", "value": "cache_name"},
{"name": "groupName", "value": "group_name"}
]
Result: {
"name": "cache_name",
"groupName": "group_name"
}
:param previous_val: reduce previous value
:param next_val: next value
:return: properties dict
"""
if previous_val["name"] == 'name':
previous_val["name"] = previous_val["value"]
del previous_val["value"]
previous_val[next_val["name"]] = next_val.get("value", next_val["_children"])
return previous_val
|
e91d0abba49e1f9bc1b628e25070d9d6e965b7d5
| 73,615
|
def correlation(x, y):
"""
Fill in this function to compute the correlation between the two
input variables. Each input is either a NumPy array or a Pandas
Series.
correlation = average of (x in standard units) times (y in standard units)
Remember to pass the argument "ddof=0" to the Pandas std() function!
"""
std_x = (x - x.mean()) / x.std(ddof=0)
std_y = (y - y.mean()) / y.std(ddof=0)
return (std_x * std_y).mean()
|
669f32eda3fa9508cefe0ea7e1b80c04523e6935
| 73,616
|
def dot(x, y):
"""Dot product of two lists of equal length"""
return sum(x_ * y_ for (x_, y_) in zip(x, y))
|
060c88268aa17d74d3544197110da94c89d755f6
| 73,619
|
def get_rank(s, i):
"""
get number of occurence of s[i] before i.
"""
c = s[i]
return s[:i].count(c)
|
3b86e8e1678f98ae3a352238110e188732cd3fe5
| 73,620
|
def average_absolute_deviation(nums: list[int]) -> float:
"""
Return the average absolute deviation of a list of numbers.
Wiki: https://en.wikipedia.org/wiki/Average_absolute_deviation
>>> average_absolute_deviation([0])
0.0
>>> average_absolute_deviation([4, 1, 3, 2])
1.0
>>> average_absolute_deviation([2, 70, 6, 50, 20, 8, 4, 0])
20.0
>>> average_absolute_deviation([-20, 0, 30, 15])
16.25
>>> average_absolute_deviation([])
Traceback (most recent call last):
...
ValueError: List is empty
"""
if not nums: # Makes sure that the list is not empty
raise ValueError("List is empty")
average = sum(nums) / len(nums) # Calculate the average
return sum(abs(x - average) for x in nums) / len(nums)
|
a95b5e6fcc9c80ee28be367969959b8b977aeda9
| 73,625
|
def users_to_fullnames(users_df):
"""Returns a list a full names from users DataFrame"""
first_names = list(users_df["First name"])
surnames = list(users_df["Surname"])
full_names = [i + " " + j for i,j in zip(first_names, surnames)]
return full_names
|
08b93e71d90b41723e0b219e86144df77a908bd2
| 73,626
|
def defuzzify(fired_rule_tuples: list) -> float:
"""
defuzzify consequent fuzzy values based on weighted average
:param fired_rule_tuples: list of tuples containing consequent values and matching rule weights
:return: The weighted average
"""
print(list(map(lambda x: x[0], fired_rule_tuples)))
return sum([w*z for w,z in fired_rule_tuples]) / sum(map(lambda x: x[0], fired_rule_tuples))
|
c401319c6ea5d44ee880b76d28da2dacee24f305
| 73,630
|
def least_residue(a , m):
"""
Returns least residue of a (mod m)
Parameters
----------
a : int
denotes a in a (mod m)
m : int
denotes m in a (mod m)
return : int
returns integer least residue
"""
return a%m
|
a697656664fa11c64c32d8902ebce893b70f9203
| 73,633
|
import torch
def slerp(val, low, high):
"""Spherical interpolation. val has a range of 0 to 1."""
if val <= 0:
return low
elif val >= 1:
return high
elif torch.allclose(low, high):
return low
omega = torch.arccos(torch.dot(low/torch.norm(low), high/torch.norm(high)))
so = torch.sin(omega)
return torch.sin((1.0-val)*omega) / so * low + torch.sin(val*omega)/so * high
|
fe0bfaab8db2bbff15b5f83077a47b60def1390f
| 73,637
|
import hashlib
def md5sum(filename, blocksize=65536):
"""Generate md5 sum"""
hsh = hashlib.md5()
with open(filename, 'rb') as f_handle:
for block in iter(lambda: f_handle.read(blocksize), b''):
hsh.update(block)
return hsh.hexdigest()
|
b34f2fc913c3132094e04d5f347de8fc704a0dba
| 73,638
|
def in_labelset(xmrs, nodeids, label=None):
"""
Test if all nodeids share a label.
Args:
nodeids: An iterable of nodeids.
label: If given, all nodeids must share this label.
Returns:
True if all nodeids share a label, otherwise False.
"""
nodeids = set(nodeids)
if label is None:
label = xmrs.ep(next(iter(nodeids))).label
return nodeids.issubset(xmrs._vars[label]['refs']['LBL'])
|
1db07cd4f4a24a16ac5f06d09dbe65f9473da4b6
| 73,643
|
import itertools
def split_to_episodes(iterable, is_splitter):
"""
Split iterable into sub-lists with func is_splitter
:param iterable: The collection to operate on.
:param is_splitter: This func is to tell whether the current element is a splitter.
Splitter itself will be dropped and never show up in sub-lists.
:return: A list of sub-lists, each sub-list has two elements:
The first one is the episode information line
The send element holds all the quotes mentioned by this episode.
"""
sentences = [list(g) for k, g in itertools.groupby(iterable, is_splitter) if not k]
episodes = [x for x in iterable if is_splitter(x)]
return zip(episodes, sentences[1:])
|
f5523a660a76af24e854c61635c47d4287667375
| 73,644
|
def get_prefrosh_and_adjacent(prefrosh_id, prefrosh_list):
"""Returns a prefrosh and the IDs of the neighboring two frosh in the provided list."""
idx, prefrosh = next(((idx, pf) for idx, pf in enumerate(prefrosh_list)
if pf['prefrosh_id'] == prefrosh_id))
prev_id = prefrosh_list[idx - 1]['prefrosh_id'] if idx > 0 else None
next_id = prefrosh_list[idx + 1]['prefrosh_id'] if idx < len(prefrosh_list) - 1 else None
return [prefrosh, prev_id, next_id]
|
b56199ad0bdb2ca532bdfcd456864ee59fdb90ba
| 73,645
|
def get_bounding_straight_rectangle(points):
"""Given a list of points, determine the straight rectangle bounding all of them. Returns xywh."""
xs = [point[0] for point in points]
ys = [point[1] for point in points]
xmin, xmax = (min(xs), max(xs))
ymin, ymax = (min(ys), max(ys))
return xmin, ymin, xmax-xmin, ymax-ymin
|
d498c96cef4d6d28648349077931e2f32d118564
| 73,649
|
import re
def _testname(filename):
"""Transform the file name into an ingestible test name."""
return re.sub(r'[^a-zA-Z0-9]', '_', filename)
|
af6acb345647eb78d40e3ef1c6b2750c8050b1ac
| 73,651
|
def encode_bool(value):
"""Encode booleans to produce valid XML"""
if value:
return "true"
return "false"
|
8079fc74cf184485f9cdeb02ea454265d5ed6d38
| 73,653
|
import hashlib
def _hash_key(k):
"""
Returns the input k (a large integer key) as a hashed byte array.
"""
return hashlib.sha256(str(k).encode()).digest()
|
23b986478078a515f7ab711424fba2fad644bbd7
| 73,654
|
def _header2row_numbers(local_header, global_header):
"""Calculate local grid starting and ending rows in global grid
Return:
ind_top: the index of the top row
ind_bottom: the index of the bottom row
"""
y_bottom_gap = local_header['yllcorner']-global_header['yllcorner']
row_gap = round(y_bottom_gap/local_header['cellsize'])
ind_bottom = global_header['nrows']-1-row_gap
ind_top = ind_bottom-local_header['nrows']+1
ind_top = int(ind_top)
ind_bottom = int(ind_bottom)
return ind_top, ind_bottom
|
da198dc59e27cdc4eb862f7a47fe08d702eae856
| 73,656
|
def commission_low(q, p):
"""
ํ๊ตญํฌ์์ฆ๊ถ ์์๋ฃ(๋ฎ์ ๋)
:param q: (int) ๋งค์ ์ฃผ์ ์
:param p: (float) ๋งค์ ์ฃผ์ ๊ฐ๊ฒฉ
:return: (float) ์์๋ฃ + ์ธ๊ธ
"""
x = abs(q * p)
if q < 0:
tax = int(x * 0.003)
else:
tax = 0
return x * 0.00024164 // 10 * 10 + tax
|
ba14e427eef0f2c6e3b04b2bb525086bb3ceaad6
| 73,658
|
from typing import OrderedDict
def parse_re(input, target_re):
"""
Run re.findall with `target_re` regexp on given `input`
Return found targets as OrderedDict with float values.
parse_re("G0 S3 P0.1") = {'S': 3, 'P': 0.1}
"""
x = input.strip()
res = target_re.findall(x)
return OrderedDict(map(lambda x: (x[0], float(x[1])), res))
|
f853f4355819a2d18e5f773817f32e3e83365dcc
| 73,659
|
def _calc_rates(matrix, thresh):
"""Calculates true positive rate and false positive rate for a given threshold
Arguments:
matrix {np.ndarray} -- true labels and predicted probabilities
thresh {float} -- threshold for a round of ROC construction
Returns:
[float, float] -- true positive rate, false positive rate
"""
tp = 0
fp = 0
tn = 0
fn = 0
n = len(matrix)
for i in matrix:
pred = 1 if i[0] >= thresh else 0
if pred == 1:
if i[1] == 1:
tp += 1
else:
fp += 1
else:
if i[1] == 0:
tn += 1
else:
fn += 1
tpr = tp / (tp+fn)
fpr = fp / (fp + tn)
return tpr, fpr
|
5c94129cb5fb346d945adb76f0247edee75799cd
| 73,660
|
def is_standardized(table, has_time=False, verbose=True):
""" Check if the table has the correct indices and the minimum required columns.
Args:
table: pandas DataFrame
Annotation table.
has_time: bool
Require time information for each annotation, i.e. start and stop times.
verbose: bool
If True and the table is not standardized, print a message with an example table in the standard format.
Returns:
res: bool
True if the table has the standardized Ketos format. False otherwise.
"""
required_indices = ['filename', 'annot_id']
required_cols = ['label']
if has_time:
required_cols = required_cols + ['start', 'end']
mis_cols = [x for x in required_cols if x not in table.columns.values]
res = (table.index.names == required_indices) and (len(mis_cols) == 0)
message = """ Your table is not in the Ketos format.
It should have two levels of indices: filename and annot_id.
It should also contain at least the 'label' column.
If your annotations have time information, these should appear in the 'start' and 'end' columns
extra columns are allowed.
Here is a minimum example:
label
filename annot_id
file1.wav 0 2
1 1
2 2
file2.wav 0 2
1 2
2 1
And here is a table with time information and a few extra columns ('min_freq', 'max_freq' and 'file_time_stamp')
start end label min_freq max_freq file_time_stamp
filename annot_id
file1.wav 0 7.0 8.1 2 180.6 294.3 2019-02-24 13:15:00
1 8.5 12.5 1 174.2 258.7 2019-02-24 13:15:00
2 13.1 14.0 2 183.4 292.3 2019-02-24 13:15:00
file2.wav 0 2.2 3.1 2 148.8 286.6 2019-02-24 13:30:00
1 5.8 6.8 2 156.6 278.3 2019-02-24 13:30:00
2 9.0 13.0 1 178.2 304.5 2019-02-24 13:30:00
"""
if res == False and verbose == True:
print(message)
return res
|
33f5ad6e774c93a2cf5f7a805486eba8e8964fdc
| 73,664
|
def find_all_ind2(target, tokens):
"""
finds all appearances of a target phrase (could be multiple words, e.g. 'dark brown') within a list of tokens.
:param target: a String of the feature/country. E.g. 'dark brown', 'Italian wine', etc.
:param tokens: the List of tokens of the sentence to be looked at
:return: all indices where a match was found
"""
featur_tokens = target.split()
indices = []
i = 0
while i < len(tokens):
equal = True
for j, ft in enumerate(featur_tokens):
if i + j >= len(tokens) or tokens[i + j] != ft:
equal = False
break
if equal:
indices.append(i)
i += len(featur_tokens)
i += 1
return indices
|
6e54a7e5706b0b3584d2ac46d34aa8fc4655e4e3
| 73,666
|
import six
def _to_reddit_list(arg):
"""Return an argument converted to a reddit-formatted list.
The returned format is a comma deliminated list. Each element is a string
representation of an object. Either given as a string or as an object that
is then converted to its string representation.
"""
if (isinstance(arg, six.string_types) or not (
hasattr(arg, "__getitem__") or hasattr(arg, "__iter__"))):
return six.text_type(arg)
else:
return ','.join(six.text_type(a) for a in arg)
|
7bc76a0142a1fb8fdcdb61f5b6f9947e83af9d43
| 73,671
|
def inRange(p,q,r):
"""
return true iff q is between p and r
"""
return ( (p <=q <= r) or (r<=q<=p) )
|
2243a41abab3c27c1c78062c6833296a11b2f9b7
| 73,673
|
import re
def clean_spaces(t):
"""Remove multiple spaces from a string"""
if t is None:
return t
return re.sub('\s+', ' ', t)
|
3d57ea88d8a09ca3ca1afd910a67f6a297137c23
| 73,677
|
def mock_none(self, *args):
"""
For mocking various FTP methods that should return nothing for tests.
"""
return None
|
8cbedd7396dfb123e2d620d4d8e6b478d22eeaff
| 73,678
|
import itertools
def _powerset(iterable, minsize=0):
"""From the itertools recipes."""
s = list(iterable)
return itertools.chain.from_iterable(
itertools.combinations(s, r) for r in range(minsize, len(s) + 1)
)
|
41c5576ae031288957d827d82231eab43f1ce70b
| 73,679
|
def buffer_bbox(bbox_osm):
"""
Buffers a EPSG:4326 bounding box slightly to ensure covering the whole area of interest.
:param bbox_osm: array-like of four coordinates: miny, minx, maxy, maxx.
:return: array-like of four coordinates: miny, minx, maxy, maxx
"""
offset_lat, offset_lon = 0.02, 0.02 # degrees
bbox_osm[0] -= offset_lat # min lat
bbox_osm[1] -= offset_lon # min lon
bbox_osm[2] += offset_lat # max lat
bbox_osm[3] += offset_lon # max lon
return bbox_osm
|
e705351da7531a270ced4c504e4142388f99d59c
| 73,684
|
def remove_chars(seq):
"""Remove all non digit characters from a string, but cleanly passthrough non strs.
Parameters
----------
seq : Any
Strings will be modified, any other type is directly returned
Returns
-------
Any
The original sequence, minus non-digit characters if input was a string
"""
seq_type = type(seq)
if seq_type != str:
return seq
return seq_type().join(filter(seq_type.isdigit, seq))
|
6257da82d12f9f1b4a446b8ccc93436cbd24278a
| 73,686
|
def time_warp(ts):
"""
>>> time_warp('1 2 3 4 5 5 2 3 3'.split(' '))
['1', '2', '3', '4', '5', '2', '3']
"""
ts = ts.split(' ')
result = []
most_recent_elem = None
for e in ts:
if e != most_recent_elem:
result.append(e)
most_recent_elem = e
return result
|
d6d8471d017d13cee9b4e9e0f2bdf70c14bb314e
| 73,687
|
def determinant(a, b, c, d, e, f, g, h, i):
"""Calculate the determinant of a three by three matrix.
Where the matrix contents match the parameters, like so:
|a b c|
|d e f|
|g h i|
"""
return a*(e*i - f*h) - b*(d*i - f*g) + c*(d*h - e*g)
|
2a923d076a2bd027905b1573215e15422ec791e6
| 73,689
|
def strip_prefix(s, prefix):
"""Remove prefix frm the beginning of s
is s = "some_something" and prefix = "some_", return "something"
if s is not a string return None
if prefix is not a string, return s
:param str s: string to modify
:param str prefix: prefix to remove
:rtype: Optional[str]=None
"""
if not isinstance(s, str):
return None
if not isinstance(prefix, str):
return s
if s.startswith(prefix):
return s[len(prefix) :] # noqa: E203
return s
|
747fa22d147cac883dd38b7a7d93a58191b5f2df
| 73,694
|
def compute_finance_labels(df, shift=1):
"""
Computes labels of financial data.
Args:
df:
shift:
Returns: df with newly added label columns, nr. of labels
"""
n = len(df.columns)
df["label t"] = df["return"].apply(lambda x: 1. if x > 0 else 0.)
df["label t+1"] = df["return"].shift(-shift).apply(lambda x: 1. if x > 0 else 0.)
df["label volume"] = df["volume_daily_change"].apply(lambda x: 1. if x > 0 else 0.)
# removing last shift rows as there labels can not be computed
df = df.iloc[:-shift]
nr_labels = len(df.columns) - n
return df, nr_labels
|
631e20a329ad91661411e0f0216b830070e0970d
| 73,705
|
import http
def build_status(code):
"""Transform a numeric code (200) in a HTTP status ('200 OK')."""
status = http.HTTPStatus(code)
return '{:3d} {}'.format(code, status.phrase)
|
91b4eff1449756aa31d74d1cc63f230b1dcd6ea6
| 73,708
|
def read_config(filename):
"""Read the content of filename and put flags and values in a
dictionary. Each line in the file is either an empty line, a line starting
with '#' or an attribute-value pair separated by a '=' sign. Returns the
dictionary."""
file = open(filename, 'r')
config = {}
for line in file:
line = line.strip()
if not line:
continue
if line.startswith('#'):
continue
(flag, value) = [s.strip() for s in line.split('=')]
config[flag] = value
file.close()
return config
|
4659404f16a7b0d6f8a0f90c11df96c15c3e76d1
| 73,709
|
import logging
def _GetEffectiveVerbosity(verbosity):
"""Returns the effective verbosity for verbosity. Handles None => NOTSET."""
return verbosity or logging.NOTSET
|
63825bc4c3e79412bb4d22c27108b17f0ac3e753
| 73,710
|
def get_value(testvalue):
"""Renders the correct value for the given TestValue."""
test = testvalue.test
return test.get_value(testvalue.value)
|
c6f57f5b468865196ff70c32d6c32cbfd6104ea1
| 73,714
|
import torch
def jitter_node_pos(pos_matrix, scale=1):
"""
Randomly jitter nodes in xyz-direction.
Args:
pos_matrix: matrix with xyz-node positions (N x 3).
scale: scale factor of jittering
"""
return pos_matrix + (torch.randn(*pos_matrix.shape).numpy() * scale)
|
28234011a2f781569fab681d07d4ca6b9bec792b
| 73,715
|
def setElements(data, inds, vals):
"""
Sets the elements in data specified by inds with the values in vals
Args:
data (ndarray): data to edit
inds (ndarray): indexes of data to access
vals (ndarray): new values to insert into the data
Returns:
ndarray: modified data
"""
res = data.copy()
for i in range(len(inds)):
res[inds[i]] = vals[i]
#res[inds] = vals
#for i in range (0,(inds.size-1)):
# res[inds[i]] = vals[i]
return res
|
5b8327ac3e95896988bd6626345a4726f36c921d
| 73,717
|
def StrReverse(s):
"""Reverse a string"""
l = list(str(s))
l.reverse()
return "".join(l)
|
ec49f74ed9d526891e7d95cbdee249299972967d
| 73,720
|
def get(obj, path, default=None):
"""Gets the value at path of object.
If the resolved value is undefined, the default value is returned
in its place.
Exampele:
>>> obj = { 'a': [{ 'b': { 'c': 3 } }] }
>>> get(obj, 'a.0.b.c')
3
Args:
obj (dict,list): The object to query.
path (str): The path of the property to get.
default (any, optional): The value returned for unresolved values.
Returns:
any: Returns the resolved value.
"""
path_items = path.split(".")
value = obj
try:
for key in path_items:
if isinstance(value, (list, tuple)):
value = value[int(key)]
else:
value = value[key]
except (KeyError, ValueError, TypeError):
value = default
return value
|
282d9ab669e11e032daebd8284a407aea5701781
| 73,721
|
def average(stats):
""" Compute the average of all values for the next structure:
{peer1: {block1: value11, block2: value2}, peer2: {...}, ...}
"""
total = 0
count = 0
for peer_stats in stats.itervalues():
total += sum(peer_stats.itervalues())
count += len(peer_stats)
return total / count
|
23645a0ebf9d61d68516ea88981012a28d18488b
| 73,724
|
def parseInput(input):
"""
Converts an input string of integers into an array of integers.
"""
return [int(num) for num in input.split(',')]
|
0d29a72c1c19b2703c6f736de5819cd58ab08d4d
| 73,725
|
import re
def include_symbol(tablename, schema=None):
"""Exclude some tables from consideration by alembic's 'autogenerate'.
"""
# Exclude `*_alembic_version` tables
if re.match(r'.*_alembic_version$', tablename):
return False
# If the tablename didn't match any exclusion cases, return True
return True
|
cb097a4b6a19c11bccc6dfc780a53431c505f36b
| 73,727
|
def get_cve_context(cve_list):
"""
Prepare CVE context data as per the Demisto standard.
:param cve_list: cve list from response.
:return: List of cves dictionary representing the Demisto standard context.
"""
return [{
'ID': cve_dict.get('cve', ''),
'CVSS': cve_dict.get('baseScore', ''),
'Description': cve_dict.get('summary', '')
} for cve_dict in cve_list]
|
6122a666d42382acf845f819ceb58c1efe189b00
| 73,728
|
def reorder_correlator_inputs(input_map, corr_inputs):
"""Sort a list of correlator inputs into the order given in input map.
Parameters
----------
input_map : np.ndarray
Index map of correlator inputs.
corr_inputs : list
List of :class:`CorrInput` objects, e.g. the output from
:func:`get_correlator_inputs`.
Returns
-------
corr_input_list: list
List of :class:`CorrInput` instances in the new order. Returns `None`
where the serial number had no matching entry in parameter ``corr_inputs``.
"""
serials = input_map["correlator_input"]
sorted_inputs = []
for serial in serials:
for corr_input in corr_inputs:
if serial == corr_input.input_sn:
sorted_inputs.append(corr_input)
break
else:
sorted_inputs.append(None)
return sorted_inputs
|
5d9ccacb60a8843f2a7a66e64ba474c8c9a31c65
| 73,730
|
def calculate_city_state_vol_delta(df):
"""
This function creates the specific market growth (city + state observation) rate using volume by doing the following:
1. Creates the city_state_growth_pop feature out of the total_mortgage_volume_pop
2. Creates the city_state_growth_nc feature out of the total_mortgage_volume_nc
3. Returns the df with the new features
"""
# create city_state_vol_delta_pop
df["city_state_vol_delta_pop"] = df.sort_values(["year"]).groupby(["city", "state"])[["total_mortgage_volume_pop"]].pct_change()
# create city_state_vol_delta_nc
df["city_state_vol_delta_nc"] = df.sort_values(["year"]).groupby(["city", "state"])[["total_mortgage_volume_nc"]].pct_change()
return df
|
376bb79eef02fd9debba11cfb77380a11d245676
| 73,731
|
def factorial(n):
"""
calculate the factorial n!
"""
if (n>1):
return n*factorial(n-1)
return 1
|
fa4194ba29d4193af6857caa4286d2ed45b7ba1c
| 73,733
|
def capture_current_size(thread_list, recorded):
"""
Amount captured so far is the sum of the bytes recorded by warcprox,
and the bytes pending in our background threads.
"""
return recorded + sum(getattr(thread, 'pending_data', 0) for thread in thread_list)
|
90d988c7bc37fb4cc5a7c357baf28f425e389cfe
| 73,735
|
def join_uri_paths(prefix, suffix):
"""
Returns the uinion of two URI path strings without creating a double '/'
between them.
\param prefix The left side of the path,
\param suffix The sub-path,
\return Combination of the two strings sans '//' in the middle.
"""
prefix_end = -1 if prefix.endswith('/') else None
suffix_start = 1 if suffix.startswith('/') else 0
return prefix[:prefix_end] + '/' + suffix[suffix_start:]
|
0be6cbe82b721f997d6a459c50e52238c740f191
| 73,736
|
def _decode_mask(mask):
"""splits a mask into its bottom_any and empty parts"""
empty = []
bottom_any = []
for i in range(32):
if (mask >> i) & 1 == 1:
empty.append(i)
if (mask >> (i + 32)) & 1 == 1:
bottom_any.append(i)
return bottom_any, empty
|
6b93a30881ed526e801b4ad55c4a04af5562d8b6
| 73,741
|
def _get_content_type_header(request):
"""
Get the content type from the request. Return an empty dict if it is not set
Args:
request (HTTPRequest): The HTTP request
Return:
a dict containing the content type
"""
try:
return {'Content-Type':request.META['CONTENT_TYPE']}
except KeyError:
return {}
|
99aae9764a7afcd1b9c7446d8808d2029baa9a43
| 73,743
|
from typing import Iterable
def generate_dropdown_options(values: Iterable[str]):
"""
Generates Dropdown options from a list
Parameters
----------
values : Iterable[str]
Iterable with the option values
Returns
-------
Dict
Options for the Dropdown
"""
options = []
for value in values:
options.append({"label": value, "value": value})
return options
|
9675e574c6139635cd97ebae7a50185fc8c47deb
| 73,744
|
def shorten_name(name, max_length = 10):
"""
Makes a string shorter, leaving only certain quantity of the last characters, preceded by '...'.
@param name: The string to shorten.
@param max_length: Maximum length of the resulting string.
@return: A string with max_lenght characters plus '...'
"""
if len(name) > max_length:
return "..."+name[-max_length:]
else:
return name
|
77fc27581bf9324797d3ccfbf25edd94f4db0990
| 73,745
|
def decode_uint40(bb):
"""
Decode 5 bytes as an unsigned 40 bit integer
Specs:
* **uint40 len**: 5 bytes
* **Format string**: 'j'
"""
return int.from_bytes(bb, byteorder='little')
|
8d241b5052751cfa39b1384c747f260583227fa5
| 73,748
|
def sum_right_most(x, ndim):
"""Sum along the right most `ndim` dimensions of `x`,
Parameters
----------
x : Tensor
Input tensor.
ndim : Int
Number of dimensions to be summed.
Returns
-------
Tensor
"""
if ndim == 0:
return x
axes = list(range(-ndim, 0))
return x.sum(axes)
|
c6d4eb6a2070e9806ba7e358cc8d08750515f369
| 73,762
|
def fill_dict(new_dict, mean, adj, degree):
"""
Create dictionary entry to store in .toml
Parameters
----------
new_dict : Dictionary object
mean : Mean vector
adj : Adjacency matrix
degree : Degree matrix
Returns
-------
new_dict : Filled up dictionary entry
"""
new_dict["nodes"] = len(mean)
new_dict["node_means"] = mean
new_dict["Adj"] = adj
new_dict["Degree"] = degree
return new_dict
|
1b0202d28529ad9401c2059f35a666f5c0427983
| 73,765
|
def get_body(b2_world, index):
""" get the body in a given position
:param b2_world: an handler to a b2World object
:type b2_world: b2World reference
:param index: the index in the json list of joints
:type index: integer
:return: the body in the given position
:rtype: b2Body
"""
return b2_world.bodies[index]
|
5663138880efc7dc51e3cc65323eed7c88a03047
| 73,768
|
import re
from typing import Optional
def status_detector(
phrase: str,
stop_patt: re.Pattern,
ne_list: list
) -> Optional[str]:
"""Detects status based on given phrase
Args:
phrase: phrase string
stop_patt: Regex pattern for stop words
ne_list: list of named entities
Returns:
status (suggested-highlight, suggested-stop, None)
"""
# --------------- Stop Detection ---------------
if stop_patt.search(phrase):
return "suggested-stop"
# --------------- NE Search ---------------
words = phrase.split()
for word in words:
if word not in ne_list:
return None
return "suggested-highlight"
|
87690f4fe9b88d5eca4d454e0fb13c790a409b6b
| 73,775
|
def sub2ind(shape, row_sub, col_sub):
"""
Return the linear index equivalents to the row and column subscripts for
given matrix shape.
:param shape: Preferred matrix shape for subscripts conversion.
:type shape: `tuple`
:param row_sub: Row subscripts.
:type row_sub: `list`
:param col_sub: Column subscripts.
:type col_sub: `list`
"""
assert len(row_sub) == len(
col_sub), "Row and column subscripts do not match."
res = [j * shape[0] + i for i, j in zip(row_sub, col_sub)]
return res
|
bd3e640171f23f80c21a66c92e4d5e3292cddea7
| 73,776
|
def parse_command_line_parameters(parser):
"""
@brief Parses the command line parameters provided by the user and makes
sure that mandatory parameters are present.
@param[in] parser argparse.ArgumentParser
@returns an object with the parsed arguments.
"""
msg = {
'--input-dir': 'Path to the input folder.',
'--output-dir': 'Path to the output folder.',
'--im-ext': """Extension of the image files inside the input
folder. Typically '.jpg'""",
'--seg-suffix': """Suffix of the segmentation files. For example, if
an input image is called image.jpg, and the
corresponding segmentation is image_seg.png,
then the suffix is '_seg'.""",
'--seg-ext': """Extension of the segmentation mask files.
Typically '.png'""",
'--max-inst': 'Maximum number of instruments present in the image.',
'--max-tips': 'Maximum number of instruments present in the image.',
}
parser.add_argument('--input-dir', required=True, help=msg['--input-dir'])
parser.add_argument('--output-dir', required=True, help=msg['--output-dir'])
parser.add_argument('--im-ext', required=False, default='.jpg', help=msg['--im-ext'])
parser.add_argument('--seg-suffix', required=False, default='_seg',
help=msg['--seg-suffix'])
parser.add_argument('--seg-ext', required=False, default='.png', help=msg['--seg-ext'])
parser.add_argument('--max-inst', required=True, help=msg['--max-inst'])
parser.add_argument('--max-tips', required=True, help=msg['--max-tips'])
args = parser.parse_args()
args.max_inst = int(args.max_inst)
args.max_tips = int(args.max_tips)
return args
|
f79b016118cb818893e484c6902c45f817a3d58a
| 73,777
|
def get_user_choice(message, options):
"""User Interaction: Input string message and list of options, returns user choice"""
choice = input(message + " ("+"/".join(map(str, options))+") :").lower()
while choice.lower() not in options:
choice = get_user_choice(message, options)
return choice
|
3ac387443ff006b47182bba8a64cd582e6732f77
| 73,785
|
def are_multisets_equal(x, y):
"""Return True if both vectors create equal multisets.
input:
x, y -- 1-d numpy arrays
output:
True if multisets are equal, False otherwise -- boolean
Not vectorized implementation.
"""
x.sort()
y.sort()
n = len(x)
m = len(y)
if (n != m):
return False
for i in range(n):
if (x[i] != y[i]):
return False
return True
|
ee911bfacb805ca132d48492d46c12eb558e7f9d
| 73,789
|
def _sort_tag_version(release_data: dict) -> tuple:
"""Sort a list of releases by tag version."""
return tuple(int(x) for x in release_data["tag_name"].lstrip("v").split("."))
|
709466a10071a3513ebc431b8b1906bf84caf791
| 73,791
|
def _parse_header_row(row, **options):
"""Parse the header row of a table.
If a column spans multiple cells, then duplicate values are returned
for each. Duplicated columns are appended with ``suffix`` and index
for each repeat.
Parameters
----------
row : BeautifulSoup Tag object
A <tr> tag from the html, all of whose cells are <th> tags
suffix : string, optional (default='__')
The seperator between column name and index for multi-cols.
Returns
-------
columns : list
The headers as a list.
"""
suffix = options.pop('suffix', '__')
# If exists <td> tags then not a header row
if row.find_all('td'):
raise ValueError("`row` is not a table header.")
columns = []
for x in row.find_all('th'):
colspan = int(x.attrs.pop('colspan', 1))
if colspan > 1:
columns += \
[x.text.strip() + suffix + str(i) for i in range(colspan)]
else:
columns += [x.text.strip()]
return columns
|
7cf011da337443d7501ecb4cf8fee68b4c3dca70
| 73,792
|
def get_api_url(account):
"""construct the tumblr API URL"""
global blog_name
blog_name = account
if '.' not in account:
blog_name += '.tumblr.com'
return 'http://api.tumblr.com/v2/blog/' + blog_name + '/posts'
|
10efd015940e7afee6173a5cd37560b0e1f4d8da
| 73,793
|
def xml_elements_equal(first, second):
"""
Tests two XML elements for equality.
Parameters
----------
first : _Element
The first element.
second : _Element
The second element.
Returns
-------
bool
Whether the two elements are equal.
"""
if first.tag != second.tag:
return False
if first.text != second.text:
return False
if first.tail != second.tail:
return False
if first.attrib != second.attrib:
return False
if len(first) != len(second):
return False
return all(xml_elements_equal(c1, c2) for c1, c2 in zip(first, second))
|
7dd500c7d0ff795aa6c7dd2ab0c1078738089235
| 73,794
|
import yaml
def get_config(path: str) -> dict:
"""Open a yaml file and return the contents."""
with open(path) as input_file:
return yaml.safe_load(input_file)
|
3ba743e2a699aafa972a2dfcde0c9257408cd0fd
| 73,795
|
def find_smallest_positive(items) -> int:
"""
Returns the smallest positive integer that does not exist in the given int array.
:param items: array of ints
:return: the smallest positive int not in the array
"""
# Create boolean array to hold if an integer is found
# Maps index of array to the integer in items
items_b = [False] * len(items)
for item in items:
if len(items_b) >= item > 0:
items_b[item - 1] = True
# Return the index(+1) of the first False value
for idx, item_b in enumerate(items_b):
if not item_b:
return idx + 1
# If all ints in items are sequential from 1, return the next positive integer
return len(items) + 1
|
3a8fb34314303a4828af170741f8e9ba58deac49
| 73,801
|
from typing import Type
import pydantic
def get_pydantic_base_orm_config() -> Type[pydantic.BaseConfig]:
"""
Returns empty pydantic Config with orm_mode set to True.
:return: empty default config with orm_mode set.
:rtype: pydantic Config
"""
class Config(pydantic.BaseConfig):
orm_mode = True
return Config
|
ef3927339cbccf1ccaa88c9422d1004f0f3ffac3
| 73,803
|
def INK(n):
"""
Returns control codes to set the ink colour (0-7).
Use this in a ``PRINT`` or ``SET`` command. Example:
``PRINT("normal",INK(1),"blue",INK(2),"red")``
Args:
- n - integer - the ink colour (0-7)
"""
return "".join((chr(16),chr(int(n))))
|
6cc3962ec4a6906668c4e2a703c41e99d1e4a175
| 73,805
|
def create_window(name, title, message, options=None, required=None, active=True, window_type="FORM"):
"""
Returns a window `dict` to be used by lottus
:param name `str`: name of the window
:param title `str`: title of the window
:param message `str`: message of the window
:param options `list`: list of `dict` options from which the client must choose
:param required `dict`: the variable that will be created and stored in the session
:param active `bool`: indicates whether the window will be showed to the client
:param window_type `str`: indicates whether the will is a FORM or a MESSAGE
"""
return {
'name': name,
'message': message,
'title': title,
'options': options,
'active': active,
'type': window_type
}
|
3bdb6614541b787c2999ce529433ee207aaa5b87
| 73,808
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.