content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
|---|---|---|
def get_word(word):
"""Return normalized contents of a word."""
return "".join([n.string.strip() for n in word.contents if n.name in [None, "s"]])
|
7625e485b66a95e53cb9d0ccea30190408d7579b
| 652,513
|
def hmean(*args):
"""Calculate the harmonic mean of the given values.
http://en.wikipedia.org/wiki/Harmonic_mean
Args:
*args: List of numbers to take the harmonic mean of.
Returns:
Harmonic mean of args, or NaN if an arg is <= 0.
"""
for val in args:
if val <= 0:
return float('NaN')
return len(args) / sum(1. / val for val in args)
|
2083ce1f9ea321827494fbcdfa45ed3edba56bc5
| 206,547
|
def kmp(sequence, sub):
"""
Knuth–Morris–Pratt algorithm, returning the starting position
of a specified sub within another, larger sequence.
Often used for string matching.
"""
partial = [0]
for i in range(1, len(sub)):
j = partial[i - 1]
while j > 0 and sub[j] != sub[i]:
j = partial[j - 1]
partial.append(j + 1 if sub[j] == sub[i] else j)
positions, j = [], 0
for i in range(len(sequence)):
while j > 0 and sequence[i] != sub[j]:
j = partial[j - 1]
if sequence[i] == sub[j]: j += 1
if j == len(sub):
positions.append(i - (j - 1))
j = 0
return positions
|
dae57355b8da6e7f33662196c78d6f3832a8d009
| 187,004
|
from typing import Union
import torch
def convert_dict_entries_to_tensors(
loss_params: dict, device: Union[str, torch.device]
) -> dict:
"""Set scalars in loss to torch tensors for use with unsupervised losses.
Args:
loss_params: loss dictionary to loop over
device: device to send floats and ints to
Returns:
dict with updated values
"""
for loss, params in loss_params.items():
for key, val in params.items():
if type(val) == float:
loss_params[loss][key] = torch.tensor(
val, dtype=torch.float, device=device
)
if type(val) == int:
loss_params[loss][key] = torch.tensor(
val, dtype=torch.int, device=device
)
return loss_params
|
3620a7a9858d14c0bb01319a92a406a6cf525946
| 72,317
|
def typeOf(value):
"""
Get the type of value.
:param value: What to get the type of.
:type value: anything
:returns: str
"""
return str(type(value))
|
081500ca6554f71b0a87439be16bd3fb067bdc2a
| 607,782
|
def get_ages(df, sex):
"""Returns the ages of men or women, whom Embarked from section C.
Parameters:
df(pyspark.sql.DataFrame): based on the titanic.csv module
sex(str): for our dataset, it is a binary classification of 'male' or 'female'
Returns: List[float]
"""
# get ages of those whom Embarked from 'C', and sex
people_embarked_C_ages = df.filter((df.Embarked == 'C') & (df.Sex == sex)).rdd.map(lambda person: person.Age).collect()
return people_embarked_C_ages
|
93d49fc11845b8dc560c7235e441eb7b101d19d1
| 643,327
|
from pathlib import Path
import logging
def evaluate_implementation(callback_file: Path, function_name: str) -> bool:
"""Checks whether a function name is found in a source file or not."""
found = False
impl = callback_file.read_text()
if function_name in impl:
logging.info(f"Found '{function_name}' in '{callback_file}'.")
found = True
else:
logging.error(f"Did not find '{function_name}' in '{callback_file}'.")
return found
|
3ad087dc2db09aeb78874c9b934a553f7457e511
| 38,088
|
def task_id_in(ids, body, message):
"""Return true if task id is member of set ids'."""
return body['id'] in ids
|
2e9576dab616f70a1181e2f52808e24bb295ca17
| 314,383
|
from typing import Callable
def function_name(func: Callable) -> str:
"""
Return the qualified name of the given function.
Builtins and types from the :mod:`typing` package get special treatment by having the module
name stripped from the generated name.
"""
# For partial functions and objects with __call__ defined, __qualname__ does not exist
module = func.__module__
qualname = getattr(func, '__qualname__', repr(func))
return qualname if module == 'builtins' else '{}.{}'.format(module, qualname)
|
4b992fdcfa4e47a0a289d0eba7a97136625d8e5d
| 430,163
|
def open_symbols_file(index):
"""
This function accept an index symbol as parameter, opens the corresponding file
and returns a list of the symbols.
Parameters:
----------
index: str, the index symbol.
Returns:
-------
symbols: list of symbols.
"""
f = open('data/symbols_files/' + index + '_symbols.dat', 'r')
symbols = [symbol.strip() for symbol in f]
f.close()
return symbols
|
bbc303f73a2b0a408c3e9416d6746a376fe12d57
| 442,362
|
def validateDSType(dsType):
"""
>>> validateDSType('counter')
'COUNTER'
>>> validateDSType('ford prefect')
Traceback (most recent call last):
ValueError: A data source type must be one of the following: GAUGE COUNTER DERIVE ABSOLUTE COMPUTE
"""
dsType = dsType.upper()
valid = ['GAUGE', 'COUNTER', 'DERIVE', 'ABSOLUTE', 'COMPUTE']
if dsType in valid:
return dsType
else:
valid = ' '.join(valid)
raise ValueError('A data source type must be one of the ' + 'following: %s' % valid)
|
61cbab13382d2cf997a22bc2b993be18d00d6cfb
| 246,700
|
from typing import Any
def _make_arg_str(arg: Any) -> str:
"""Helper function to convert arg to str.
:param arg: argument
:type arg: Any
:return: arg converted to str
:rtype: str
"""
arg = str(arg)
too_big = len(arg) > 15 or "\n" in arg
return "..." if too_big else arg
|
480161c8061df2fdfc14f051fd0c29657fbebc56
| 82,608
|
def next_power_of_two(num):
"""Returns the next power of two >= num.
Args:
num: A positive integer.
Returns:
The next power of two >= num.
"""
val = 1
while val < num:
val *= 2
return val
|
f15a8e303612a8537c65eb33f7a43b88e046393d
| 354,320
|
def state_symbols_match(pattern, value):
""" returns True if the value matches the pattern, where '.' can be used
as placeholders that match every state """
return all(a == '.' or a == b
for a, b in zip(pattern, value))
|
4398bbd23d9ab526784c6109cc54251f5fb6d9f4
| 167,461
|
def evaluate_g8( mu, kappa, nu, sigma, s8 ):
"""
Evaluate the eighth constraint equation and also return the jacobian
:param float mu: The value of the modulus mu
:param float kappa: The value of the modulus kappa
:param float nu: The value of the modulus nu
:param float sigma: The value of the modulus sigma
:param float s8: The value of the constraint
"""
return 4 * mu * ( kappa + nu - 2 * sigma ) - 2 * sigma - s8**2,\
{ 'mu':4 * ( kappa + nu - 2 * sigma ), 'kappa': 4 * mu, 'nu': 4 * mu, 'sigma':-8 * mu - 2, 's8':-2 * s8 }
|
d1be8deb9f38dae55082a341e2501044d7b2aef7
| 700,981
|
def bb_center(row):
""" Takes a twitter place bounding box (in lon, lat) and returns the centroid of the quadrilateral (in lat, lon)."""
bb_coords = row[0]
avg_lon = (bb_coords[0][0] + bb_coords[1][0] + bb_coords[2][0] + bb_coords[3][0])/4
avg_lat = (bb_coords[0][1] + bb_coords[1][1] + bb_coords[2][1] + bb_coords[3][1])/4
return [avg_lat,avg_lon]
|
2c2dd977b92b514c75ebb82de797466ef2d497fa
| 562,972
|
import mmap
def get_num_lines(file_path: str) -> int:
"""Count the number of lines in the log file
Parameters
----------
file_path: str
The filepath to the file to be counted
Returns
-------
int
The number of lines in the file
"""
fp = open(file_path, "r+")
buf = mmap.mmap(fp.fileno(), 0)
lines = 0
while buf.readline():
lines += 1
fp.close()
return lines
|
6ebed4620d57fa1959e4d8c2cb2db841fcb94dc7
| 577,521
|
def _fake_status_for_screenshot(status):
"""Return a fake status line for screenshots."""
return {
'stopped': '0:00:00 / 3:42:37 stopped [|| ]',
'playing': '0:20:85 / 3:42:37 playing [||| ]',
'recording': '0:11:37 / 3:42:37 recording * [|||||||| ]',
}[status.mode]
|
839983a2e68ab2e12bba1a8023333e616d72eeac
| 438,053
|
def constant_function(c):
"""Create a constant function
Parameters
----------
c : float
constant to return
Returns
-------
function
Constant function equal to `c`
"""
return lambda x: c
|
6f36f1ef9cb061073e01dc032cc568bc5da85155
| 315,611
|
def clarke_error_zone_detailed(act, pred):
"""
This function outputs the Clarke Error Grid region (encoded as integer)
for a combination of actual and predicted value
Based on 'Evaluating clinical accuracy of systems for self-monitoring of blood glucose':
https://care.diabetesjournals.org/content/10/5/622
"""
# Zone A
if (act < 70 and pred < 70) or abs(act - pred) < 0.2 * act:
return 0
# Zone E - left upper
if act <= 70 and pred >= 180:
return 8
# Zone E - right lower
if act >= 180 and pred <= 70:
return 7
# Zone D - right
if act >= 240 and 70 <= pred <= 180:
return 6
# Zone D - left
if act <= 70 <= pred <= 180:
return 5
# Zone C - upper
if 70 <= act <= 290 and pred >= act + 110:
return 4
# Zone C - lower
if 130 <= act <= 180 and pred <= (7/5) * act - 182:
return 3
# Zone B - upper
if act < pred:
return 2
# Zone B - lower
return 1
|
83ddd6d5df4515c274cc4fa919a8716d6d08d4e6
| 75,278
|
def minRefuelStops_DP(
target_distance: int, start_fuel: int, stations: list[list[int]]
) -> int:
"""1-D Dynamic Programming
DP_table[num_stops]
the furthest distance (== max gas) that we can get
with num_stops times of refueling.
So for every station, stations[i],
if the current distance DP_table[num_stops] >= stations[i][0], we can refuel:
DP_table[num_stops + 1] = max(
DP_table[num_stops + 1],
DP_table[num_stops] + stations[i][1]
)
In the end, we'll return
the first num_stops with DP_table[num_stops] >= target,
otherwise -1.
Complexity:
n = len(stations)
Time: O(n^2)
Space: O(n) for the DP table
Args:
target_distance:
start_fuel:
stations: list of [distance, gallon] pairs, in sorted order of distance
Returns: LEAST # of stops to destination OR -1 if not possible
Examples:
>>> stations = [[10,60],[20,30],[30,30],[60,40]]
>>> minRefuelStops_DP(target_distance=100, start_fuel=10, stations=stations)
2
>>> minRefuelStops_DP(target_distance=100, start_fuel=1, stations=stations)
-1
>>> minRefuelStops_DP(target_distance=1, start_fuel=1, stations=[])
0
>>> stations = [[10,10],[20,10],[30,30],[60,40]]
>>> minRefuelStops_DP(target_distance=100, start_fuel=10, stations=stations)
4
"""
## Initialize
# vars
pass
# DS's/res
# DP_table[num_stops]<=> furthest distance (== max gas) possible if refueling num_stops times.
DP_table = [start_fuel] + [0] * len(stations) # |DP_table| == len(stations) + 1
## POPULATE the table
for station_i in range(len(stations)):
# Check all stops ≤ current station_idx
# Updates values from the worst case (all stations) to the best case (0 stations)
for num_stops in range(station_i + 1)[::-1]: # Backwards iteration
station_distance, station_fuel = stations[station_i]
curr_fuel = DP_table[num_stops]
if curr_fuel >= station_distance: # station is reachable
## VISIT station
# update if a better option was found
DP_table[num_stops + 1] = max(
DP_table[num_stops + 1], curr_fuel + station_fuel
)
## FIND OPTIMAL value
# Return the 1st min number of stops
# where distance traveled ≥ target distance
for num_stops, travel_distance in enumerate(DP_table):
if travel_distance >= target_distance:
return num_stops
return -1
|
af445b1af66e2758eb5a7f9ac0032a90b60963db
| 657,049
|
def clip(value, min_v, max_v):
"""
Clip the given value to be within a range
https://stackoverflow.com/questions/9775731/clamping-floating-numbers-in-python
"""
return max(min(value, max_v), min_v)
|
cd4a5977832acbdc8522fa0637aa79743e586fe9
| 55,958
|
def count_nines(loss_rate):
"""
Returns the number of nines after the decimal point before some other digit happens.
"""
nines = 0
power_of_ten = 0.1
while True:
if power_of_ten < loss_rate:
return nines
power_of_ten /= 10.0
nines += 1
if power_of_ten == 0.0:
return 0
|
e95894f03b5d976a12537517cfaea02b41520fee
| 605,657
|
def force_int(s):
"""Forcibly convert to int
:param s: any python object
:return: int or None
"""
try:
return int(s)
except ValueError:
return None
|
93723eab77e4f7141a48bd4c0a5e01d825478d34
| 432,747
|
def clean_code(code: str, lengte: int) -> str:
"""
Schoont een code van voor- en naloop whitespace en voorziet de code van het juiste aantal voorloop nullen.
:param str code: De code.
:param int lengte: De gewenste lengte van de code.
:rtype: str
"""
return code.zfill(lengte)
|
275ac814f8110675c5382b1a88ce68452d4a9f40
| 427,134
|
def get_child_object(obj, child_name):
"""Return the child object
Arguments:
obj {object} -- parent object
child_name {str} -- cild name
Returns:
object -- child object
"""
if hasattr(obj, '__getattr__'):
try:
return obj.__getattr__(child_name)
except AttributeError:
pass
if hasattr(obj, '__getattribute__'):
try:
return obj.__getattribute__(child_name)
except AttributeError:
pass
if hasattr(obj, '__getitem__'):
try:
return obj.__getitem__(child_name)
except AttributeError:
pass
|
d9404e09cdeaaf755c75675e6c0dc42f5fc7adf2
| 51,221
|
def parse_logistic_flag(kwargs):
""" Checks whether y_dist is binomial """
if "y_dist" in kwargs:
if kwargs["y_dist"] == "binomial":
return True
return False
|
2f4add10cebc7b0975212ca33acbbaed5717f13a
| 458,735
|
def test_student(incorrect_msg, max_attempts, description, solution):
"""
Parameters: There are 4 parameters in this function:
-incorrect_msg_list is a list of strings that correspond to messages to show to the student if the answer is incorrect (e.g., [“Better luck next time”, “Not quite right”, “Try again!”]. It’s up to you what contents you put in this list but you should have at least 3 strings in it
- max_attempts: this is an integer representing the max number of attempts a student has to get the answer right
- description: this is a string that corresponds to a problem description (the problem the student is asked to solve)
- solution: this is a string that corresponds to the solution to the problem they have to solve
"""
# Initialize the variable keeping track of the number of attempts to 0
attempts = 0
while True:
# Increase number of attempts the did
attempts += 1
# Display the problem, stored in the parameter description
print('The problem is {}'.format(description))
# Grab the answer
sol = input('Your answer: ')
# and strip the answer of whitespace to make processing easier
sol = sol.strip()
# Compare the user’s solution to the correct one
if sol == solution.strip():
# If they got it right, tell them a happy message and return True
print('Great job!')
return True
elif attempts < max_attempts:
# number of tries left
tries_left = max_attempts - attempts
# tell them to try again and the number of tries they have left
print('{} You have {} tries left.'.format(
incorrect_msg, tries_left))
# If the number of tries exceeds the max_attempts
elif attempts == max_attempts:
# then tell them they are out of tries AND what the actual answer was
print(
'Out of tries for this one - moving on. The answer was {}'.format(solution))
# and return False
return False
|
d330c8528d695aa8c722dbb9dfe2e6c2ca95e83d
| 283,604
|
def get_list_of(key, ch_groups):
"""
Return a list of values for key in ch_groups[groups]
key (str): a key of the ch_groups[groups] dict
ch_groups (dict): a group-name-indexed dict whose values are
dictionaries of group information (see expand_ch_groups)
Example: get_list_of('system', ch_groups)
If ch_groups is the one specified in the json in the expand_ch_groups
example, this will return the list [ 'geds', 'spms', 'auxs' ]
"""
values = []
for ch_info in ch_groups.values():
if key in ch_info and ch_info[key] not in values:
values.append(ch_info[key])
return values
|
f5bbc32c836d0de5e9aa2c055d97bacdfc794d0e
| 14,546
|
import pkg_resources # part of setuptools
def get_pkg_version(pkg_name, parse=False):
"""
Verify and get installed python package version.
:param pkg_name: python package name
:param parse: parse version number with pkg_resourc.parse_version -function
:return: None if pkg is not installed,
otherwise version as a string or parsed version when parse=True
"""
try:
version = pkg_resources.require(pkg_name)[0].version
return pkg_resources.parse_version(version) if parse else version
except pkg_resources.DistributionNotFound:
return None
|
11d8ffb604cb403f461e1eab814b48c5807d95eb
| 438,482
|
def is_rescue_entry(boot_entry):
"""
Determines whether the given boot entry is rescue.
:param BootEntry boot_entry: Boot entry to assess
:return: True is the entry is rescue
:rtype: bool
"""
return 'rescue' in boot_entry.kernel_image.lower()
|
ba456c2724c3ad4e35bef110ed8c4cc08147b42c
| 4,208
|
def rate(hit, num):
"""Return the fraction of `hit`/`num`."""
return hit / (num or 1.0)
|
b055fa6995f85ab223747dd369a464644046f7d7
| 696,493
|
def extract_active_ids(group_status):
"""Extracts all server IDs from a scaling group's status report.
:param dict group_status: The successful result from
``get_scaling_group_state``.
:result: A list of server IDs known to the scaling group.
"""
return [obj['id'] for obj in group_status['group']['active']]
|
2247e1aec63058e78b32e4d837e14ef5d978db16
| 95,639
|
import io
import torch
def reload_script_model(module):
"""
Save a jit module and load it back.
Similar to the `getExportImportCopy` function in torch/testing/
"""
buffer = io.BytesIO()
torch.jit.save(module, buffer)
buffer.seek(0)
return torch.jit.load(buffer)
|
d3bb5b9efa7c6a9cd28fd007d9cc18cd65475baf
| 290,473
|
def getColumnsEndingAt(columns, zLevel):
"""Returns columns w/ endPoint at zLevel"""
columnGroup = {}
for columnID, column in columns.inventory.iteritems():
diff = abs(zLevel - column.endJoint.z)
if diff <= 0.001:
columnGroup[column.uniqueName] = column
return columnGroup
|
4f6b7aac922bd5985b6faeb509d26bf6aec98629
| 696,047
|
def find_max(a):
"""
Find the coordinates (i0, j0) in terms of pixels of the maximum correlation peak from
the polynomial surface fit parameters
:param a: List of polynomial surface fit parameters returned by polyfit2d()
Returns i0, j0
"""
i0 = ((a[2]*a[3]) - (2*a[0]*a[4])) / (4*a[0]*a[1] - a[2]**2)
j0 = ((a[2] * a[4]) - (2 * a[1] * a[3])) / (4 * a[0] * a[1] - a[2] ** 2)
return i0, j0
|
8ee0a6ce5866e62ad7708e7696495cebe0c745b4
| 475,293
|
import toml
import logging
def _parse_conf(conf_file: str):
"""Parses the incoming toml config file"""
try:
conf = toml.load(conf_file)
except Exception as e:
logging.warning(f"Failed to decode TOML file: {e}. Using chrome as browser.")
return "chrome", "", "", "", ""
else:
sid = ''
token = ''
to = ''
from_ = ''
if 'twilio' in conf:
if 'auth' in conf['twilio']:
sid = conf['twilio']['auth']['sid']
token = conf['twilio']['auth']['token']
if 'phone' in conf['twilio']:
to = conf['twilio']['phone']['to']
from_ = conf['twilio']['phone']['from']
browser = conf['conf']['browser']
if 'refresh_mins' in conf:
refresh_mins = conf['conf']['refresh_mins']
else:
refresh_mins = 60
return browser, refresh_mins, sid, token, to, from_
|
af5e659305210db6f74a1e99870506034a6bc4d9
| 606,555
|
from typing import Tuple
def to_slice(start: Tuple[int, ...], shape: Tuple[int, ...]):
"""Convert start and shape tuple to slice object"""
return tuple(slice(st, st + sh) for st, sh in zip(start, shape))
|
99bd9ef3ec37e23cabf50c45dbe13fff14c29eec
| 540,376
|
def _cut_series_by_rank(df, ts_settings, n=1, top=True):
"""
Select top-n or bottom-n series by rank
df: pandas df
ts_settings: dict
Parameters for datetime DR projects
n: int
number of series to select
top: bool
Select highest (True) or lowest series (False)
Returns:
--------
pandas df
"""
df_agg = df.groupby(ts_settings['series_id']).mean()
selected_series_names = (
df_agg.sort_values(by=ts_settings['target'], ascending=top).tail(n).index.values
)
return selected_series_names
|
2032b16d67cd38413abd985d40a5084b5c3a11d4
| 165,689
|
def get_type_and_data(h_node):
""" Helper function to return the py_type and data block for a HDF node """
py_type = h_node.attrs["type"][0]
if h_node.shape == ():
data = h_node.value
else:
data = h_node[:]
return py_type, data
|
49c2478bb02c370a7e916aef7cd256fa1dd92a77
| 358,951
|
def alpha(requestContext, seriesList, alpha):
"""
Assigns the given alpha transparency setting to the series. Takes a float value between 0 and 1.
"""
for series in seriesList:
series.options['alpha'] = alpha
return seriesList
|
25ef9bad1b39549585bdb175e487236591d0816f
| 16,194
|
def amortization(loan, r, c, n):
"""Amortization
Returns: The amount of money that needs to be paid at the end of
each period to get rid of the total loan.
Input values:
loan : Total loan amount
r : annual interest rate
c : number of compounding periods a year
n : total number of compounding periods
"""
ipp = r / c
amt = (loan * ipp) / (1 - ((1 + ipp) ** (-n)))
return amt
|
9f0a9c4f51c623a7935fcd595604ce9593a9530e
| 406,580
|
from functools import reduce
def process_group(grp):
"""
Given a list of tokens of tokens
where each token is an `x` or a
number, compute the minimum value
of t such that:
For all indices i such that arr[i] != 'x':
it is the case that
t = 0 mod (arr[i] - i)
This is a modular congruence that can be
solved by using the Chinese Remainder Theorem.
"""
e_time = int(grp[0])
buses = grp[1].split(",")
size = len(buses)
mod_pairs = []
for i in range(size):
if buses[i] == "x":
continue
mod_val = int(buses[i])
residue_val = (mod_val - i) % mod_val
if residue_val == 0:
mod_pairs.append((mod_val, mod_val))
else:
mod_pairs.append((residue_val, mod_val))
def extended_euclidean(a, b):
"""
Given two ints a, b
find a tuple of ints p, q
such that a*p + b*q = 1.
:param a: A positive int.
:param b: A positive int.
:return: A tuple of ints
(p, q) such that
a * p + b * q = 1.
"""
if not b:
return (1, 0)
x, y = extended_euclidean(b, a % b)
fct = a // b
return (y, x - fct * y)
def chinese_remainder_theorem(n1, n2):
"""
Given a system of modular congruences
of size two:
t = x1 mod m1
t = x2 mod m2
Compute the smallest positive int t
that satisifes this system.
:param n1: A tuple representing (x1, m1)
:param n2: A tuple representing (x2, m2)
Notes
____
The Chinese Remainder Theorem requires
that gcd(m1, m2) = 1. Otherwise, a
solution is not even guaranteed.
:return: A tuple (t, m1*m2) where t is
the smallest positive int that satisfies
the above system of modular congruences.
"""
# Extract.
x1, m1 = n1
x2, m2 = n2
# Compute x, y such that m1 * x + m2 * y = 1.
(x, y) = extended_euclidean(m1, m2)
# The Chinese Remainder Theorem says the final
# result is always mod m1*m2.
# The modular residue is x2 * m1 * x + x1 * m2 * y.
m_final = m1 * m2
x_final = x2 * m1 * x + x1 * m2 * y
# Return the result as a tuple (x_final, m_final)
return ((x_final % m_final + m_final) % m_final, m_final)
# To solve a larger system of modular congruences
# we can reduce the nums array using the Chinese Remainder Theorem.
# It is required that all mod values in the nums be pairwise
# relatively prime otherwise the answer is not guaranteed to be correct.
# It can be verified by pairwise gcd computation for all the
# mod values in nums that gcd(x, y) = 1 for every distinct pair
# of mod values x, and y.
residue, modular = reduce(chinese_remainder_theorem, mod_pairs)
# Reducing the solution using the two-pair CRT
# we get the smallest value of positive t
# that satisfies all the modular congruences in
# `mod_pairs`.
return residue
|
9977152cd78cbd1bf5a23b566cefdf7489348ff5
| 510,348
|
from typing import Optional
import re
def email_is_valid(email: Optional[str]) -> bool:
"""
Validate that a valid email was provided.
None is a valid option.
Parameters
----------
email: Optional[str]
The email to validate.
Returns
-------
status: bool
The validation status.
"""
if email is None:
return True
if re.match(r"^[a-zA-Z0-9]+[\.]?[a-zA-Z0-9]+[@]\w+[.]\w{2,3}$", email):
return True
return False
|
58efc79668c4856fc74b7fc470405c5e27256c60
| 42,626
|
def calculate_iou(proposals, label):
"""
Calculate the intersection over union between bounding boxes of selective search and labels
This code is adapted from https://gist.github.com/meyerjo/dd3533edc97c81258898f60d8978eddc
:param proposals: A region proposal bounding box
:param label: Bounding box of label
"""
# determine the (x, y)-coordinates of the intersection rectangle
xA = max(proposals[0], label[0])
yA = max(proposals[1], label[1])
xB = min(proposals[2], label[2])
yB = min(proposals[3], label[3])
# compute the area of intersection rectangle
interArea = abs(max((xB - xA, 0)) * max((yB - yA), 0))
proposalsArea = abs((proposals[2] - proposals[0]) * (proposals[3] - proposals[1]))
labelArea = abs((label[2] - label[0]) * (label[3] - label[1]))
unionArea = proposalsArea + labelArea - interArea
iou = interArea / float(unionArea)
return iou
|
fb2e724e2426c47d742b8c55be8e095b0304d13a
| 609,313
|
def has_repetition(page_list):
"""
Check if a list of page hits contains a page repetition (A >> B >> A) == True.
Run on journeys with collapsed loops so stuff like A >> A >> B are not captured as a repetition.
Similar to cycles/triangles, but from a flat perspective.
:param page_list: list of page hits derived from BQ user journey
:return: True if there is a repetition
"""
return len(set(page_list)) != len(page_list)
|
685ac3698cc29fe3bf9980676c4212ef110d12a2
| 395,640
|
def calculate_overshoot(cigar):
""" The calculate overshoot function calculates the number of basepairs that have not been mapped but are part of
the read.
:param cigar: a list containing tuples representing the cigar string.
:return overshoot: an integer indicating the number of basepairs in the read before it is mapped.
"""
overshoot = 0
for element in cigar:
if element[0]:
overshoot += element[1]
else:
break
return overshoot
|
687ce44bef05376339fbe7892b1c07b84981710f
| 79,018
|
def get_filenames(i):
"""Returns the filepaths for the output MusicXML and .png files.
Parameters:
- i: unique identifier for the score
Returns:
- (sheet_png_filepath, musicxml_out_filepath)
"""
output_folder_prefix = "dataset/"
sheet_png_out_filepath = output_folder_prefix + "{}-sheet.png".format(i)
musicxml_out_filepath = output_folder_prefix + "{}-musicxml.xml".format(i)
return (sheet_png_out_filepath, musicxml_out_filepath)
|
4e26391eff80e08756431221d53c862c810c8071
| 692,048
|
from typing import List
import math
def lcm(nums: List[int]) -> int:
"""
Computes the LCM of the specified numbers
:param nums: Numbers to computer the LCM for
:return: LCM of the numbers
"""
n = nums[0]
for i in nums[1:]:
n = n * i // math.gcd(n, i)
return n
|
d4aef55265989a8cae9966fc59c94c99c52ffbdd
| 361,858
|
import torch
def offsets_from_counts(counts):
"""Creates a tensor of offsets from the given tensor of counts
Parameters
----------
counts : torch.Tensor
1-d tensor representing the counts in a ragged array
Returns
-------
torch.Tensor
A 1-d tensor representing the offsets in a ragged array.
Its length is one plus the length of the input `counts` array.
"""
if isinstance(counts, torch.Tensor):
device = counts.device
else:
device = None
counts = torch.as_tensor(counts, dtype=torch.int64, device=device)
offsets = counts.new_zeros(counts.shape[0] + 1)
torch.cumsum(counts, 0, out=offsets[1:])
return offsets
|
667d277afa76d3b746b5409888d25057f790712f
| 92,852
|
import torch
def tlbr2cthw(boxes):
"""
Convert top/left bottom/right format `boxes` to center/size corners."
:param boxes: bounding boxes
:return: bounding boxes
"""
center = (boxes[..., :2] + boxes[..., 2:])/2
sizes = boxes[..., 2:] - boxes[..., :2]
return torch.cat([center, sizes], dim=-1)
|
15dff8e597e1386e88a8a0417cf5061e830c863f
| 658,127
|
import asyncio
def async_test(test):
"""
Decorator to run async test methods.
"""
def wrapper(*args, **kwargs):
asyncio.run(test(*args, **kwargs))
return wrapper
|
b03c10f6b16fb7af148d21a89f1d3485b4fe4681
| 689,574
|
from typing import List
import re
def substitution(arr: List[str]) -> List[str]:
"""
>>> substitution(['a = 1;', 'b = input();', '',
... 'if a + b > 0 && a - b < 0:',' start()', 'elif a*b > 10 || a/b < 1:',
... ' stop()', 'print set(list(a)) | set(list(b))',
... '#Note do not change &&& or ||| or & or |',
... "#Only change those '&&' which have space on both sides.",
... "#Only change those '|| which have space on both sides."]
... ) #doctest: +NORMALIZE_WHITESPACE
['a = 1;', 'b = input();', '', 'if a + b > 0 and a - b < 0:',
' start()', 'elif a*b > 10 or a/b < 1:',
' stop()', 'print set(list(a)) | set(list(b))',
'#Note do not change &&& or ||| or & or |',
"#Only change those '&&' which have space on both sides.",
"#Only change those '|| which have space on both sides."]
"""
pattern = re.compile(r"(?<= )(&&|\|\|)(?= )")
return [pattern.sub(lambda x: "and" if x.group() == "&&" else "or", row)
for row in arr]
|
66c4ad7a1d8a14c703a71f20c97ecbabd7b484e7
| 379,103
|
def sanitize_sacred_arguments(args):
"""
This function goes through and sanitizes the arguments to native types.
Lists and dictionaries passed through Sacred automatically become
ReadOnlyLists and ReadOnlyDicts. This function will go through and
recursively change them to native lists and dicts.
`args` can be a single token, a list of items, or a dictionary of items.
The return type will be a native token, list, or dictionary.
"""
if isinstance(args, list): # Captures ReadOnlyLists
return [
sanitize_sacred_arguments(item) for item in args
]
elif isinstance(args, dict): # Captures ReadOnlyDicts
return {
str(key) : sanitize_sacred_arguments(val) \
for key, val in args.items()
}
else: # Return the single token as-is
return args
|
07849b685bbbd84e9164352c1cf03f69f9ef731d
| 654,443
|
def ft2m(ft):
"""
Converts feet to meters.
"""
if ft == None:
return None
return ft * 0.3048
|
ca2b4649b136c9128b5b3ae57dd00c6cedd0f383
| 5,187
|
def is_string_arg(parameter):
"""Whether the parameter's type is string or bytes."""
return parameter["grpc_type"] in ["string", "bytes"]
|
d0f7a3016bdf20edae9dc8bbcf2736da38b259ce
| 643,710
|
import re
def tokenize(string):
"""Parse query string for ngram into token objects
Parameters
----------
string : str
Query string with each token enclosed in a pair
of square brackets. In each token, the tag ``word``
and ``pos`` could be given as ``[word="他們" pos="N.*"]``.
To search with regex in ``word``, append ``.regex`` to
``word``: ``[word.regex="們$" pos="N.*"]``.
``pos`` by default uses regex search.
Returns
-------
list
A list of token objects (dictionaries), with each dictionary
representing the token in the query string (i.e. token enclosed
in the brackets). Each token has three key-value pairs:
- `tk`: ``str``. The pattern of the word to search for.
- `tk.regex`: ``bool``. Whether to use regex search with word.
- `pos`: ``str``. The pattern of the pos tag to search for.
"""
# Deal with single exact match of token
if string.find("[") == -1:
return [{
'tk': string,
'pos': None,
'tk.regex': False,
}]
# Scan through the string to find matching brackets
tokens = []
openPos =[]
depth = 0
for i, char in enumerate(string):
if char == '[':
openPos.append(i)
depth += 1
if char == ']':
start = openPos.pop()
depth -= 1
tokens.append({
'start': start,
'end': i,
'inside': string[start+1:i],
'depth': depth
})
# Get matching brackets at first depth level
tk_pat = re.compile('''word=['"]([^'"]+)['"]''')
pos_pat = re.compile('''pos=['"]([^'" ]+)['"]''')
tkRegEx_pat = re.compile('''word.regex=['"]([^'"]+)['"]''')
output = []
for tk in tokens:
if tk['depth'] == 0:
token = tk_pat.findall(tk['inside'])
tkRegEx = tkRegEx_pat.findall(tk['inside'])
token = tkRegEx if tkRegEx else token
pos = pos_pat.findall(tk['inside'])
output.append({
'tk': token[0] if len(token) > 0 else None,
'pos': pos[0] if len(pos) > 0 else None,
'tk.regex': True if tkRegEx else False,
})
return output
|
aa6117e41f891cdaecfe72f0d234e33d996e90dd
| 521,612
|
from typing import OrderedDict
def usage_table_format(result):
"""Format usage information as a table."""
table = []
for item in result:
row = OrderedDict()
row['Value'] = item['name']['localizedValue']
row['Usage'] = item['currentValue'] or "0"
row['Limit'] = item['limit'] or "0"
table.append(row)
return table
|
9a15e2c159878a6a4598c21c44ef2f1d17ec8450
| 164,232
|
def not_none_dim(rel):
""" Returns the index of dimension which is not None (which is 1/-1) in rel
Index is 0 based: 0 is right/left, 1 is front/back, 2 is above/below. """
for index, element in enumerate(rel):
if element == 1:
return index
continue
# Should never reach here: None only on error
return None
|
b7d5378a7eaa3758e3fbd52c569d252ddeb66abe
| 330,891
|
def exit(*args, **kwargs):
""" Craft and return (without raising!) a SystemExit exception """
exc = SystemExit("\n\t".join(args))
return exc
|
e1b79c7c249233c4fee870af2b146bc791350059
| 136,633
|
from typing import Dict
def split_data(dish_dataset: Dict) -> Dict:
"""Splits dataset.
Args:
dish_dataset (Dict): Dataset.
Returns:
names (dictionary): Dish names in a dictionary.
"""
# Get dish names
dish_names = dish_dataset[['name']].values.flatten().tolist()
# Pack and return the split data
return {'names': dish_names}
|
75a58f486693683bb02d3716c729a971d3da210f
| 347,911
|
def get_level(level):
"""Returns the level below the level specified, defaults to District.
"""
if level.lower() == 'district':
return 'Site'
elif level.lower() == 'ta' or level.lower() == 'site':
return 'Household'
return 'District'
|
389f8f3013455cc3607041f523a1da5df5f07055
| 145,418
|
import random
def get_shuffled_range(n):
"""
Utility function to generate a random ordering of the integers in [0, n)
:param n: The length of the returned list
:returns: A list of the numbers from 0 to n-1, inclusive, shuffled
"""
return random.sample(list(range(n)), k=n)
|
cc83d44903df9a587554fdd6f7e2898799a28f23
| 446,158
|
def _fetch_one(cursor, row_callback):
"""Fetch exactly one row and return it or `None`."""
return row_callback(cursor.fetchone(), cursor) \
if cursor.rowcount == 1 else None
|
e4cdceba8b656905df3a7a8f47e7b1f575857704
| 236,296
|
def is_valid_project_type(type):
""" Validates supported project types; currently only maven is supported; hopefully gradle in the future """
if not type:
return False
if "maven" != type:
return False
return True
|
b450423c06d778701c639c3175b87b271ddfb5d9
| 638,075
|
def vectorize(function):
"""
:param function: Function to be vectorized
:return: Vectorized function
Create new function y= vf(x), such that:
y = f(x)
/ map(f, x) if x is list
vf =
\ f(x) if x is not list
Example:
>>> import math
>>>
>>> sqrt = vectorize(math.sqrt)
>>> sqrt(2)
1.4142135623730951
>>>
>>> sqrt([1, 2, 3, 4, 5])
[1.0, 1.4142135623730951, 1.7320508075688772, 2.0, 2.23606797749979]
"""
def vectorized_function(x):
if isinstance(x, list):
return list(map(function, x))
return function(x)
return vectorized_function
|
551daccbb1264da1bae31f0477a65d76d2d29035
| 636,534
|
def __is_now_playing(track):
"""
Returns True if the track is now playing.
"""
return '@attr' in track and track['@attr']['nowplaying'] == 'true'
|
c454a7e3158d2c3af29c665ecb5b618e5a90512e
| 124,218
|
def term_info_as_list(term_info):
"""
Given a dictionary as returned by `parse_term_code`, return a list
suitable for use as a sort key.
"""
return [term_info["year"], term_info["spring"]]
|
aaf1dd9c5d155d0d2b44779f3a2ae3994fbe9efe
| 293,265
|
def flatten(items):
"""Removes one level of nesting from items.
items can be any sequence, but flatten always returns a list.
"""
result = []
for i in items:
try:
result.extend(i)
except:
result.append(i)
return result
|
d064409430085dc0ebbb535565c0faa51a9dd510
| 154,914
|
def _getView(syn, view_id, clause=None):
"""
Based on a user-defined query calls to synapse's tableQuery function and returns the entity-view generator object.
:param syn: A Synapse object: syn = synapseclient.login(username, password) - Must be logged into synapse
:param view_id: A Synapse ID of an entity-view (Note: Edit permission on its' files is required)
:param clause: A SQL clause to allow for sub-setting & row filtering in order to reduce the data-size
:return: An object of type synapse entity-view
"""
if clause is None:
query = "".join(['select * from ', view_id])
view = syn.tableQuery(query)
else:
query = "".join(['select * from ', view_id, ' ', clause])
view = syn.tableQuery(query)
return view
|
c61f2b1b674b7e31bbdbbdb9d4cda629400a3e03
| 676,242
|
def subsample_fourier(x, k):
"""
Subsampling of a vector performed in the Fourier domain
Subsampling in the temporal domain amounts to periodization
in the Fourier domain, hence the formula.
Parameters
----------
x : tensor_like
input tensor with at least 3 dimensions, the last
corresponding to time (in the Fourier domain).
The last dimension should be a power of 2 to avoid errors.
k : int
integer such that x is subsampled by 2**k
Returns
-------
res : tensor_like
tensor such that its fourier transform is the Fourier
transform of a subsampled version of x, i.e. in
FFT^{-1}(res)[t] = FFT^{-1}(x)[t * (2**k)]
"""
N = x.shape[-2]
res = x.view(x.shape[:-2] + (k, N // k, 2)).mean(dim=-3)
return res
|
418a28bd9d96f70c338301fd0067811816513b52
| 606,081
|
import inspect
def is_legacy_scheduler(scheduler_class):
"""
Determines whether a scheduler is a legacy implementation that gets passed task parameters instead of only the
target throughput.
"""
constructor_params = inspect.signature(scheduler_class.__init__).parameters
return len(constructor_params) >= 2 and "params" in constructor_params
|
f7bb4291b5dca6faba52eec0062fdc242e072edf
| 520,238
|
def n_missing(s):
"""Get the number of missing feature subsets.
Parameters
----------
s : pandas.Series
Records of feature subsets.
Returns
-------
int
See also
--------
subrela.records.iterate_missing : Iterate missing feature subsets.
Examples
--------
>>> s = from_arrays([[True, False, False], [True, False, True]],
... [0.2, 0.8],
... features=['A', 'B', 'C'])
>>> s
A B C
True False False 0.2
True 0.8
dtype: float64
>>> n_missing(s)
5
"""
return 2**s.index.nlevels - 1 - len(s)
|
3a7b642f9e15e4abd251eab364f700b19d5115f4
| 60,273
|
def run(
*, # Force kwargs.
head_branch="master",
event="push",
status="completed",
conclusion="success",
is_fork=False
) -> dict:
"""Generate Github run like dict."""
return {
"head_branch": head_branch,
"event": event,
"status": status,
"conclusion": conclusion,
"repository": {"fork": is_fork},
}
|
f253365451c4f5847ad2f344540fec0512c2125f
| 591,993
|
def next_nl_or_end(s, n=0):
"""Next newline or end of text"""
# first identify starting newlines, we pass them
start = n
while start < len(s) - 1 and s[start] == '\n':
start += 1
p = s.find('\n', start + 1)
if p > -1:
# Another newline found, continue until no more newlines or end of string
while p < len(s) and s[p] == '\n':
p += 1
return p
else:
# No (more) newlines found, return length of string as end index
return len(s)
|
6cfff226259f3df499f20ae3390ba42297f4597a
| 494,676
|
import logging
def saveData(fileName, data):
"""Save data to persistent file. If file does not already exist it will
be created. The function will return True if successful, else False.
:param fileName: Name of the file to save to.
:type fileName: string
:param data: The data object to save
:type data: var
:return: Status. True if successful, False if unsuccessful.
:rtype: bool
"""
try:
with open(fileName, 'w', encoding='utf-8') as f:
f.write(data)
except Exception as e:
logger = logging.getLogger('discord')
logger.warning(
'Exception thrown when trying to serialize data. '
'Error message reads as follows:\n'
f'{e}\n'
)
return False
return True
|
c9bc0e9a223376c8bf5563ad48b66a1cbed5301d
| 189,770
|
def siwsi(b8a, b11):
"""
Shortwave Infrared Water Stress Index \
(Fensholt and Sandholt, 2003).
.. math:: SIWSI = (b8a - b11)/(b8a + b11)
:param b8a: NIR narrow.
:type b8a: numpy.ndarray or float
:param b11: SWIR 1.
:type b11: numpy.ndarray or float
:returns SIWSI: Index value
.. Tip::
Fensholt, R., Sandholt, I. 2003. Derivation of a shortwave \
infrared water stress index from MODIS near- and shortwave \
infrared data in a semiarid environment. Remote Sensing of \
Environment 87, 111-121. doi:10.1016/j.rse.2003.07.002.
"""
SIWSI = (b8a - b11)/(b8a + b11)
return SIWSI
|
9a718dbce9eeb5963d8117093568d85d183a0bb3
| 194,832
|
import math
def cubehelix(samples=16, start=0.5, rot=-0.9, sat=1.0, gamma=1., alpha=None):
"""
Create CubeHelix spectrum colourmap with monotonically increasing/descreasing intensity
Implemented from FORTRAN 77 code from D.A. Green, 2011, BASI, 39, 289.
"A colour scheme for the display of astronomical intensity images"
http://adsabs.harvard.edu/abs/2011arXiv1108.5083G
Parameters
----------
samples : int
Number of colour samples to produce
start : float
Start colour [0,3] 1=red,2=green,3=blue
rot : float
Rotations through spectrum, negative to reverse direction
sat : float
Colour saturation grayscale to full [0,1], >1 to oversaturate
gamma : float
Gamma correction [0,1]
alpha : list or tuple
Alpha [min,max] for transparency ramp
Returns
-------
list
List of colours ready to be loaded by colourmap()
"""
colours = []
if not isinstance(alpha,list) and not isinstance(alpha,tuple):
#Convert as boolean
if alpha: alpha = [0,1]
for i in range(0,samples+1):
fract = i / float(samples)
angle = 2.0 * math.pi * (start / 3.0 + 1.0 + rot * fract)
amp = sat * fract * (1 - fract)
fract = pow(fract, gamma)
r = fract + amp * (-0.14861 * math.cos(angle) + 1.78277 * math.sin(angle))
g = fract + amp * (-0.29227 * math.cos(angle) - 0.90649 * math.sin(angle))
b = fract + amp * (+1.97294 * math.cos(angle))
r = max(min(r, 1.0), 0.0)
g = max(min(g, 1.0), 0.0)
b = max(min(b, 1.0), 0.0)
a = 1.0
if alpha:
a = alpha[0] + (alpha[1]-alpha[0]) * fract
colours.append((fract, 'rgba(%d,%d,%d,%d)' % (r*0xff, g*0xff, b*0xff, a*0xff)))
return colours
|
5ee4cb9f62430bd125dd0a8fb618035016c20f00
| 410,943
|
def _bfs_for_reachable_nodes(target_nodes, name_to_input_name):
"""Breadth first search for reachable nodes from target nodes."""
nodes_to_keep = set()
# Breadth first search to find all the nodes that we should keep.
next_to_visit = target_nodes[:]
while next_to_visit:
n = next_to_visit[0]
del next_to_visit[0]
if n in nodes_to_keep:
# Already visited this node.
continue
nodes_to_keep.add(n)
next_to_visit += name_to_input_name[n]
return nodes_to_keep
|
b22464d315415af40bc31ad8d707de19319d0707
| 327,345
|
def can_import(name):
"""Attempt to __import__ the specified package/module, returning
True when succeeding, otherwise False"""
try:
__import__(name)
return True
except ImportError:
return False
|
7b02767184e73935c92b9e0e15769bb73ac24d95
| 666,209
|
def startstrip(string: str, part: str) -> str:
"""
Remove ``part`` from beginning of ``string`` if ``string`` startswith ``part``.
Args:
string (str): source string.
part (str): removing part.
Returns:
str: removed part.
"""
if string.startswith(part):
return string[len(part):]
return string
|
671af88f6f502f98a1da7a421eff92b0631d296f
| 411,504
|
import re
def highlight_query_string(text: str, qstring: str) -> str:
"""
Highlights query word/phrases in input text by surrouding them with <span> tags
Args:
text: Section raw text from db
qstring: A query string from search result page (could be in single/doubl quotes)
Returns:
str: A text with the query term highlighted (put b/n <span> tags)
"""
text_lower = text.lower()
# if qstring is empty, return text as is
if qstring == "":
return text
# if qstring in double quotes, find & highligh full query str
if qstring.startswith('"') and qstring.endswith('"'):
qlist = [qstring[1:-1]]
# if qstring in single quotes, find & highligh full query str
elif qstring.startswith("'") and qstring.endswith("'"):
qlist = [qstring[1:-1]]
# else highlight each word in the query str, separatly
else:
qlist = qstring.split()
# include upper, title, capitalized cases of the query strings
qlist += [qterm.upper() for qterm in qlist] \
+ [qterm.capitalize() for qterm in qlist] \
+ [qterm.title() for qterm in qlist]
# get (index, "qterm") tuples in the input text
positions = []
for qterm in set(qlist):
positions += [(_.start(), qterm) for _ in re.finditer(qterm, text_lower)]
if positions == []:
return text
positions.sort()
# iterate through positions and insert <span> tags in text
output_text = ""
length = len(positions)
start = 0
end = positions[0][0]
for i in range(length):
output_text += text[start:end]
output_text += f"<span style='background-color:yellow'>"
output_text += positions[i][1]
output_text += "</span>"
start = end + len(positions[i][1])
if i < length - 1:
end = positions[i+1][0]
output_text += text[start:len(text)]
return output_text
|
8d7b67565d61b6b13b142f96c91ff555065aeb17
| 376,391
|
def get_outcome(game_info):
"""
Get the score of the game based on one team's score and the plus minus.
Parameters
----------
game_info : list
A list of the info for this game, coming form LeagueGameLog. From the
perspective of the home team.
Returns
-------
game_outcome : tuple
The score for home (first entry in tuple) and away (second entry in tuple)
and a binary indicator for whether or not the home team won (3rd entry).
"""
home_pts = game_info[-3]
plus_minus = game_info[-2]
away_pts = home_pts - plus_minus
home_win = 1 if home_pts > away_pts else 0
return (home_pts, away_pts, home_win)
|
43c8b4f8aab026c99948825dae89013913251604
| 566,174
|
def check_for_ticket_name_error(ticket_name):
"""
Returns any error message if the ticket name is not alphanumeric, if there is an
invalid space in the name or the name is too long for a ticket name. If
there is no errors it returns false.
:param ticket_name: a string for the ticket's name
:return: false if no error, else returns the error as a string message
"""
if not ticket_name.replace(' ', '').isalnum():
return "The name of the ticket has to be alphanumeric only"
if (ticket_name[0] == ' ') or (ticket_name[-1] == ' '):
return "The name of the ticket is only allowed spaces if it is not the first or last character"
if len(ticket_name) > 60:
return "The name of the ticket should be no longer than 60 characters"
return False
|
38163e28e8acb5788b21ee832c037cc707574a83
| 111,923
|
def date2String(date_time):
"""This function is used to take a datetime object and convert it to a string
representing the month, day, and time in 12 hour form"""
day = str(date_time.strftime("%b")) + " " + str(date_time.day)
time = date_time.strftime("%I:%M %p")
displayDate = day + ", " + time
return displayDate
|
c5ce14d7bc2603d1068ac4309035b6cf36d660b0
| 73,940
|
def get_valid_init_tree(trees):
"""Returns first NewickTree entry that is not NoTree"""
for i in range(len(trees)):
if trees[i] == "NoTree":
continue
else:
return trees[i]
|
69841abe1e8720c42104a961e8165306b399f710
| 657,084
|
def clip_pt_to_im(pt, im_size):
"""Clips a 2D point to the image frame.
:param pt: 2D point (x, y).
:param im_size: Image size (width, height).
:return: Clipped 2D point (x, y).
"""
return [min(max(pt[0], 0), im_size[0] - 1),
min(max(pt[1], 0), im_size[1] - 1)]
|
386b845fd1561976ccbce0e8b098ca6128a938a0
| 99,859
|
def salary_job_stats(df):
"""
Get the stats (count, mean, std, min, 25%, 50%, 75%, max) for each job
(Data Scientist, BI, Data Analyst, Developpeur, Data Engineer)
Parameter:
df : a dataframe with salary and job title
Returns:
salary_df : a dataframe with mean/std salary for each job
"""
# Stats for each job (even junior, alternance, etc)
salary_stats = df.groupby('Job')['Salary'].describe().reset_index()
salary_df = salary_stats.T # To have job title columns
# Keep only 5 jobs titles
col_to_keep = ['data scienstist', 'data analyst', 'business intelligence',
'developpeur']
salary_df.drop(salary_df.columns.difference(col_to_keep), axis=1,
inplace=True)
return salary_df
|
c2291b1f4aeb542f196441a16e6accfad1844955
| 119,148
|
import re
def parse_show_vrrp_statistics(raw_result):
"""
Parse the 'show vrrp statistics' command raw output.
:param str raw_result: vtysh raw result string.
:rtype: dict
:return: The parsed result of the show vrrp statistics command in a \
dictionary of the form:
::
{
'invalid_grp_oth_reason': '0',
'mast_to_backup': '0',
'backup_to_init': '0',
'group': '1',
'address_family': 'IPv4',
'grp_discard_pkts': '3856',
'vrrp_v2_adv': 'sent 0 (errors 0) - rcvd 0',
'ip_addr_own_conflicts': '0',
'init_to_backup': '2 (Last change Mon Jun 16 11:19:36.316 UTC)',
'state': 'MASTER',
'adv_intv_err': '0',
'init_to_mast': '0',
'ip_conf_mismatch': '0',
'adv_recv_in_init': '0',
'interface': '1',
'state_dur': '6 mins 55.006 secs',
'backup_to_mast': '2 (Last change Mon Jun 16 11:19:39.926 UTC)',
'vrrp_v3_adv': 'sent 4288 (errors 0) - rcvd 0',
'mast_to_init': '1 (Last change Mon Jun 16 11:17:49.978 UTC)',
'vrrpv2_incompat': None
}
"""
show_re = (
r'VRRP Statistics for interface (?P<interface>.*) -' +
' Group (?P<group>\d+) - Address-Family (?P<address_family>\S+)\s*'
r'State is (?P<state>.*)\n+\s*'
r'State duration (?P<state_dur>.*)\n+\s*'
r'(VRRPv3 Advertisements: (?P<vrrp_v3_adv>.*))?\s*'
r'(VRRPv2 Advertisements: (?P<vrrp_v2_adv>.*))?\s*'
r'(Group Discarded Packets: (?P<grp_discard_pkts>\d+))?\s*'
r'(\s*VRRPv2 incompatibility: (?P<vrrpv2_incompat>\d+))?\s*'
r'(\s*IP Address Owner conflicts: (?P<ip_addr_own_conflicts>\d+))?\s*'
r'(\s*IP address configuration mismatch : (?P<ip_conf_mismatch>\d+))?\s*'
r'(\s*Advert Interval errors : (?P<adv_intv_err>\d+))?\s*'
r'(\s*Adverts received in Init state: (?P<adv_recv_in_init>\d+))?\s*'
r'(\s*Invalid group other reason: (?P<invalid_grp_oth_reason>\d+))?\s*'
r'(Group State transition:)?\s*'
r'(\s*Init to master: (?P<init_to_mast>.*))?\s*'
r'(\s*Init to backup: (?P<init_to_backup>.*))?\s*'
r'(\s*Backup to master: (?P<backup_to_mast>.*))?\s*'
r'(\s*Master to backup: (?P<mast_to_backup>.*))?\s*'
r'(\s*Master to init: (?P<mast_to_init>.*))?\s*'
r'(\s*Backup to init: (?P<backup_to_init>.*))?\s*'
)
re_result = re.search(show_re, raw_result)
assert re_result
result = re_result.groupdict()
return result
|
8132ad997f186550b94c3607bc919290f6e37eca
| 420,985
|
def get_modelpath_and_name(savedmodel_path):
"""
Help function which returns the full path to a model, excluding the epoch
number at the end, e.g. "./mybestmodel-40" returns "./mybestmodel".
"""
return savedmodel_path[:savedmodel_path.rfind("-")]
|
3b14a97a07acbbabc7b93c0249418f641bcf8a48
| 562,991
|
def tcllist(l: list) -> str:
"""Translate a Python list to the equivalent Tcl command.
All elements in the list are quoted using brackets and will not support
variable substitution. Do that on the Python side.
Example:
>>> tcllist([1.0, 2, "sam's the best!"])
"[list {1.0} {2} {sam's the best!}]"
"""
list_items = ['{' + str(i) + '}' for i in l]
list_str = ' '.join(list_items)
return f'[list {list_str}]'
|
1426d1cc35345ae28b4e1363e361b60d15959006
| 550,194
|
def eight_decimal_places(amount, format="str"):
"""
>>> eight_decimal_places(3.12345678912345)
"3.12345679"
>>> eight_decimal_places("3.12345678912345")
"3.12345679"
>>> eight_decimal_places(3.12345678912345, format='float')
3.12345679
>>> eight_decimal_places("3.12345678912345", format='float')
3.12345679
"""
if type(amount) == str:
return amount
if format == 'str':
return "%.8f" % amount
if format == 'float':
return float("%.8f" % amount)
|
1ebd8eda2fe162984812dd60ab87221714903811
| 500,961
|
def get_all_inds(string, substr):
"""Returns indices of all occurrences of a substring."""
# initialize list of indices of occurrences with dummy entry
inds = [0]
# find first occurrence
i_curr = string.find(substr)
# while end of string has not been reached
while i_curr != -1:
# determine true index by adding spacing to last index
inds += [i_curr + inds[-1]]
# cut off string to current character because .find(substr) will only
# return the first occurrence
string = string[inds[-1]:]
# find next occurrence in string
i_curr = string.find(substr)
# If no entries have been added, return 0 as sentinel
if len(inds) == 1:
result = 0
# otherwise, cutoff dummy entry at first position before returning result
else:
result = inds[1:]
return result
|
c1e4cca7556529029583d47c154bfd7af77dccd6
| 461,326
|
def strip_meta(value: dict) -> dict:
"""Strip the "_meta" node from dict, recursively."""
result = {}
for k, v in value.items():
if k == "_meta":
continue
if isinstance(v, dict):
result[k] = strip_meta(v)
else:
result[k] = v
return result
|
88cd5bc6667823043a6231a8c35c9bf022e6d33b
| 82,662
|
import re
def is_url_valid(string):
"""
This function checks whether input string follow URL format or not
Args:
string: Input string
Returns:
True: if string follows URL format
False: if string doesn't follow URL format
>>> is_url_valid("C:/users/sample/Desktop/image.jpg")
False
>>> is_url_valid("/examples/data/image.jpg")
False
>>> is_url_valid("http://google.com")
True
>>> is_url_valid("https://images.financialexpress.com/2020/01/660-3.jpg")
True
>>> is_url_valid("https://images.financialexpress.com 2020/01/660-3.jpg")
False
"""
match_result = False
pattern = re.compile(
r'^(?:http|ftp)s?://'
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' #domain...
r'localhost|'
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})'
r'(?::\d+)?'
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
if re.match(pattern, string):
match_result = True
return match_result
|
a6ff543fa68f1cdaef5f7a4c4be8f6e5979e6a5d
| 63,998
|
def get_empty_tiles_positions(grid): # Fonction renvoyant la liste des positions des tuiles vides
"""
Gets all the empty tiles' coordinates.
:param grid: The game grid
:return: List with all the coordinates of empty tiles
"""
list_empty_positions = []
for i in range (len(grid)):
for j in range (len(grid[0])):
if grid[i][j] == ' ' or grid[i][j] == 0:
list_empty_positions.append((i,j))
return list_empty_positions
|
7cacffd0c54040011276f72c9f7337a1ea8d453c
| 408,405
|
def _has_page_changed(url, driver_url):
"""
Has the page's URL changed? Exclude everything after a hash.
"""
return \
url[0:(url.find('#') \
if url.find('#') > 0 \
else len(url))]\
.rstrip('/') \
!= \
driver_url[0:(driver_url.find('#') \
if driver_url.find('#') > 0 \
else len(driver_url))]\
.rstrip('/')
|
0920896a3ad4e6373d1af706698323ce1fff725a
| 293,618
|
import math
def num_k_of_n(n: int, k: int) -> int:
"""Return number of combinations of k elements out of n."""
if k > n:
return 0
if k == n:
return 1
return math.factorial(n) // (math.factorial(k) * math.factorial((n - k)))
|
de99dd88fc6e747421e36c698a525b7e58b1e4de
| 6,832
|
import timeit
def measure_overhead(timer_factory):
"""Measure the overhead of a timer instance from the given factory.
:param timer_factory: callable which returns a new timer instance
:return: the average duration of one observation, in seconds
"""
timeit_timer = timeit.Timer(
globals={'timer': timer_factory('foo', log_fn=lambda x: x)},
stmt='with timer: pass'
)
n, duration = timeit_timer.autorange()
min_duration = min([duration] + timeit_timer.repeat(number=n))
return min_duration / n
|
3e981430487e444e599de5faa9b9afd89dcec536
| 165,643
|
def decode_bytes(obj):
"""If the argument is bytes, decode it.
:param Object obj: A string or byte object
:return: A string representation of obj
:rtype: str
"""
if isinstance(obj, bytes):
return obj.decode('utf-8')
elif isinstance(obj, str):
return obj
else:
raise ValueError("ERROR: {} is not bytes or a string.".format(obj))
|
48c56e899cc83deb478cc665b3f051e1e99a18ae
| 696,809
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.