content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
import re
def get_link_destinations(chunk):
"""Find any target of a link in HTML code
Use regex to find tags with the id or name attribute, which makes them a possible target of a link
:param str chunk: text string
:return: destinations, destination_tags
:rtype: Tuple[list[str], list[str]]
"""
destinations, destination_tags = [], []
# html links. label{} has already been converted
pattern_tag = r'[\w _\-:]'
pattern_backslash = '[\\\]'
pattern = r'<' + pattern_tag + \
'+ (id|name)=' + pattern_backslash + '["\']' + \
'(' + pattern_tag + '+)' + pattern_backslash + '["\'][^>]*>'
for m in re.finditer(pattern, chunk):
match = m.group()
tag = m.group(2)
destinations.append(match)
destination_tags.append(tag)
return destinations, destination_tags | c3e2e48355aa2da147162b5cd5c97c800aea0b7d | 43,800 |
def getRemainingFiles(movedFileList, allFiles):
""" Make a diff between the moved files and all files """
# loop over all entries and remove those that are in the movedFileList
for file in movedFileList:
for i in range(len(allFiles)):
if file == allFiles[i]:
del allFiles[i]
break
return allFiles | 5c22558bd3419fa9f9f172866d5f65728c3b3a1b | 43,802 |
import os
def grab_predict_label3d_file(defaultdir=""):
"""
Finds the paths to the training experiment yaml files.
"""
def_ep = os.path.join(".", defaultdir)
label3d_files = os.listdir(def_ep)
label3d_files = [
os.path.join(def_ep, f) for f in label3d_files if "dannce.mat" in f
]
label3d_files.sort()
if len(label3d_files) == 0:
raise Exception("Did not find any *dannce.mat file in {}".format(def_ep))
print("Using the following *dannce.mat files: {}".format(label3d_files[0]))
return label3d_files[0] | f0324ddb008fb77c3be543eece484decb5797c71 | 43,803 |
import csv
def get_bc_reg_corps_csv():
"""
Check if all the BC Reg corps are in orgbook (with the same corp type)
"""
bc_reg_corp_types = {}
bc_reg_corp_names = {}
bc_reg_corp_infos = {}
with open('export/bc_reg_corps.csv', mode='r') as corp_file:
corp_reader = csv.DictReader(corp_file)
for row in corp_reader:
bc_reg_corp_types[row["corp_num"]] = row["corp_type"]
bc_reg_corp_names[row["corp_num"]] = row["corp_name"]
bc_reg_corp_infos[row["corp_num"]] = {
"corp_num": row["corp_num"],
"corp_type": row["corp_type"],
"corp_name": row["corp_name"],
"recognition_dts": row["recognition_dts"],
"bn_9": row["bn_9"],
"can_jur_typ_cd": row["can_jur_typ_cd"],
"xpro_typ_cd": row["xpro_typ_cd"],
"othr_juris_desc": row["othr_juris_desc"],
"state_typ_cd": row["state_typ_cd"],
"op_state_typ_cd": row["op_state_typ_cd"],
"corp_class": row["corp_class"],
}
return (bc_reg_corp_types, bc_reg_corp_names, bc_reg_corp_infos) | da50a41965244ccce17e693c4946d34068161d09 | 43,804 |
import re
def validateBranch(branch):
""" Check that branch/label contains only valid characters """
if not re.match(r'\w[\w\._]+$', branch):
return False
return True | 7fae3987c2b8e5e76ef67325b0dc50c7c595293f | 43,806 |
import re
def class_name2module_name(class_name):
"""
python 的模块名改为类名
DemoPipeline -> demo_pipeline
"""
pattern = re.compile(r'([A-Z]{1})')
return re.sub(pattern, r'_\1', class_name).lower().replace('_', '', 1) | c46836aadf08f6fd41849c9be77554028b743f0f | 43,808 |
def estimated_sp(vests):
"""Convert VESTS to SP units for display."""
return vests * 0.0005034 | 771dddf5b7c3373c8ca82c48a8889c4b3e1f39ae | 43,809 |
def _get_top_values_categorical(series, num_x):
"""Get the most frequent values in a pandas Series. Will exclude null values.
Args:
column (pd.Series): data to use find most frequent values
num_x (int): the number of top values to retrieve
Returns:
top_list (list(dict)): a list of dictionary with keys `value` and `count`.
Output is sorted in descending order based on the value counts.
"""
frequencies = series.value_counts(dropna=True)
df = frequencies.head(num_x).reset_index()
df.columns = ["value", "count"]
df = df.sort_values(["count", "value"], ascending=[False, True])
value_counts = list(df.to_dict(orient="index").values())
return value_counts | d3ea35ba6ee60536a56bbfc07cfae3633f26b138 | 43,810 |
import math
def cosine_rule(v_original: float, v_target: float, angle_dif: int) -> float:
"""Apply the cosign rule to compute the Delta-V needed to transfer from one velocity
to another with a difference in angle.
Args:
v_original: the original velocity.
v_target: the target velocity.
angle_dif: the angle at which the 2 velocities differ in degrees.
Returns:
the length of the velocity vector connecting the 2 ends of v_original and v_target."""
return math.sqrt(((v_original ** 2) + (v_target ** 2)) -
(2 * v_original * v_target * math.cos(math.radians(angle_dif)))) | 3d0274e4ae98ff076c75341f709c785cb6430007 | 43,811 |
def generate_json_tokens(valid_tokens):
"""
input: list of valid question tokens
output: string rep. of the json store
"""
arr_str = "["
for tok in valid_tokens:
arr_str += " \"%s\", " % tok
arr_str = arr_str[:-2]
arr_str += "]"
return arr_str | 38057d5fbae983c382b2883a64bd2bc803e4d0e5 | 43,812 |
import re
def as_fuse_id(shader_name,shader_id):
"""
Derive an identifier from shader_name.
Remove whitespace, leading digits, special characters, etc to make something that can be used as an identifier out of `shader_name`.
Such an identifier is in particular what's needed as the first parametert to `FuRegisterClass()`.
"""
name = shader_name
# Example: "Fork Who cares? nmbr73 321" -> "Who cares"
name = re.sub(r'^Fork (.+) ([^ ]+) \d+$',r'\1',name)
# Replace all invalid characters with a ' '
name = re.sub(r'[^A-Za-z0-9 ]+',' ', name)
# Put 'D' in front if first character is a digit
name = re.sub(r'^(\d.*)$',r'D\1', name)
# Transform leading characters to upper case
name = name.title()
# Eliminate all spaces
name = ''.join(x for x in name if not x.isspace())
return name | 78be0775207147b7de65138596338459c0f6b537 | 43,814 |
def simple_linear_regression(x, y, return_fitted=False):
"""Closed form solution for single variable least square regression
"""
n = len(x)
x_mean = x.mean()
y_mean = y.mean()
a = ((x*y).sum() - n*x_mean*y_mean) / ((x*x).sum() - n*x_mean*x_mean)
b = y_mean - a*x_mean
if return_fitted:
return a*x + b
return a, b | 60d28c72fc5240425d27352bd1354eaa963851b9 | 43,817 |
def next_frame_prediction(generator, input_tensor):
"""Just one forward pass through the generator"""
output_tensor = generator.inference(input_tensor, None, None)
return output_tensor | 580af372c9b73306b1158327cde874c3d34649d4 | 43,818 |
def testfunctionWithReturn(varA, varB, varC):
"""
Some basic description
:param varA: description
:param varB: description
:param varC: description
:type varA: type description
:type varB: type description
:type varC: type description
:return: return description
:rtype: the return type description
"""
return 1 | 6b78dd6edf24f12d6ae57681d24a9626439af5a0 | 43,819 |
def _mark_settled(req, courses):
"""
Finds and marks all courses in 'courses' that have been settled to
this requirement.
"""
num_marked = 0
for sem in courses:
for c in sem:
if len(c["reqs_satisfied"]) > 0: # already used in some subreq
continue
if len(c["settled"])>0:
for p in c["settled"]: # go through the settled paths
if p in req["path_to"] and (c["external"] or req["path_to"] in c["possible_reqs"]): # c was settled into this requirement
num_marked += 1
c["reqs_satisfied"].append(req["path_to"])
break
elif c["num_settleable"] == 1 and req["path_to"] in c["possible_reqs"]:
num_marked += 1
c["reqs_satisfied"].append(req["path_to"])
c["settled"].append(req["path_to"])
return num_marked | 564e262facc087fde0ce1b0ce1c746499560676e | 43,820 |
def convertFloat(s):
"""Tells if a string can be converted to float and converts it
Args:
s : str
Returns:
s : str
Standardized token 'FLOAT' if s can be turned to an float, s
otherwise"""
try:
float(s)
return "FLOAT"
except:
return s | 6d73c112775bbf0d0584f29b58d021a56cd6b31a | 43,822 |
def readInfoFile(info_file):
"""Transform dax info file to dictionary format"""
#ensure appropriate extension
ext = info_file.split('.')[-1]
if ext=='dax':
info_file=info_file.replace('.dax','.inf')
lines = [ln for ln in open(info_file,'r')]
dic={}
for ln in lines:
splits = ln[:-1].split(' =')
if len(splits)>1:
try:
dic[splits[0]]=float(splits[1])
except:
dic[splits[0]]=splits[1]
return dic | 083d851cbdf59e57349dffa194310ef6a1561889 | 43,823 |
def mirror_path(fn):
"""Simply replicate the source path in the build directory"""
return fn | c4447ca82cd7d14fb9771da6e6340b46a622dbdc | 43,824 |
def argset(name, *args, **kwargs):
"""
Decorator to add sets of required mutually exclusive args to subcommands.
"""
def _arg(f):
if not hasattr(f, '_subcommand_argsets'):
f._subcommand_argsets = {}
f._subcommand_argsets.setdefault(name, []).append((args, kwargs))
return f
return _arg | ee3d9716cc906960b5626168aa86668d0e2ea9c1 | 43,828 |
import numpy as np
def deplete_profile(A, B, C, r):
"""
Profile to dilute the number of pixels in rings.
"""
dilute = 1 - A*np.exp(-B*np.exp(-C*r))
return dilute | b6577ca59caf1575beffbf5182e842d503c15ed3 | 43,833 |
def census_count(
df, group2=None, weight="person_weight", normalize=False, total=None, drop=True
):
""" Counts the total number of the given data in each PUMA /
Calculates percentage of the given data in each PUMA
Parameters
----------
df : DataFrame/GeoDataFrame
group2 : str, optional
a string keyword that specifies if there is
a second column to group by in the groupby function
weight : str, default = 'person_weight'
change to 'house_weight' for household census data
normalize : bool, optional
a boolean keyword that specifies whether to
divide by the total to calculate the percentage.
If normalize = True, need keyword total.
total : series, optional
a series to divide the count by to return a percentage.
drop : bool, optional
a boolean keyword that specifies whether
to drop the index when resetting
Returns
--------
census_count : series or dataframe
"""
# Two columns to groupby
# Returns pivot dataframe with with second group as columns
if group2 is not None:
group = df.groupby(["geo_id", group2])
census_count = group[weight].sum().reset_index()
census_count = census_count.pivot(
index="geo_id", columns=group2, values=weight
).reset_index(drop=drop)
# Groupby PUMA
# Returns series
else:
group = df.groupby(["geo_id"])
census_count = group[weight].sum().reset_index(drop=drop)
# Divide series or dataframe by total to return percentage
if normalize:
census_count = census_count.div(total, axis=0) * 100
return census_count | 8c10307118e54c32486ceb407ec0432a7ff7bb5f | 43,834 |
def yolo_detection(cv2_img, tfnet):
"""yolo"""
max_area = None
max_tlxy = None
max_brxy = None
cropped_img = None
results = tfnet.return_predict(cv2_img)
print('origin', cv2_img.shape)
for result in results:
tl = [result['topleft']['x'] * 0.75, result['topleft']['y'] * 0.75]
br = [result['bottomright']['x'] * 1.25, result['bottomright']['y'] * 1.25]
label = result['label']
area = (br[0] - tl[0]) * (br[1] - tl[1])
if label != 'person':
continue
if max_area == None:
max_area = area
max_tlxy = tl
max_brxy = br
elif max_area < area:
max_area = area
max_tlxy = tl
max_brxy = br
if max_tlxy != None and max_brxy != None:
if max_brxy[0] > cv2_img.shape[1]:
max_brxy[0] = cv2_img.shape[1]
if max_tlxy[0] < 0:
max_tlxy[0] = 0
if max_brxy[1] > cv2_img.shape[0]:
max_brxy[1] = cv2_img.shape[0]
if max_tlxy[1] < 0:
max_tlxy[1] = 0
cropped_img = cv2_img[int(max_tlxy[1]): int(max_brxy[1]), int(max_tlxy[0]): int(max_brxy[0])]
print('max_tlxy', max_tlxy)
print('max_brxy', max_brxy)
print('cropped_img', cropped_img.shape)
return max_tlxy, max_brxy, cropped_img, cv2_img | c2185cdbaed85744fd1cece5394549d1a68616c3 | 43,836 |
def clip_overflow(textblock, width, side='right'):
"""Clips overflowing text of TextBlock2D with respect to width.
Parameters
----------
textblock : TextBlock2D
The textblock object whose text needs to be clipped.
width : int
Required width of the clipped text.
side : str, optional
Clips the overflowing text according to side.
It takes values "left" or "right".
Returns
-------
clipped text : str
Clipped version of the text.
"""
side = side.lower()
if side not in ['left', 'right']:
raise ValueError("side can only take values 'left' or 'right'")
original_str = textblock.message
start_ptr = 0
end_ptr = len(original_str)
prev_bg = textblock.have_bg
textblock.have_bg = False
if textblock.size[0] == width or textblock.size[0] <= width:
textblock.have_bg = prev_bg
return original_str
if side == 'left':
original_str = original_str[::-1]
while start_ptr < end_ptr:
mid_ptr = (start_ptr + end_ptr)//2
textblock.message = original_str[:mid_ptr] + "..."
if textblock.size[0] < width:
start_ptr = mid_ptr
elif textblock.size[0] > width:
end_ptr = mid_ptr
if mid_ptr == (start_ptr + end_ptr)//2 or\
textblock.size[0] == width:
textblock.have_bg = prev_bg
if side == 'left':
textblock.message = textblock.message[::-1]
return textblock.message | 9c17b49515d5028d172526612bfb2f956eecb248 | 43,838 |
def perc_bounds(percent_filter):
"""
Convert +/- percentage to decimals to be used to determine bounds.
Parameters
----------
percent_filter : float or tuple, default None
Percentage or tuple of percentages used to filter around reporting
irradiance in the irr_rc_balanced function. Required argument when
irr_bal is True.
Returns
-------
tuple
Decimal versions of the percent irradiance filter. 0.8 and 1.2 would be
returned when passing 20 to the input.
"""
if isinstance(percent_filter, tuple):
perc_low = percent_filter[0] / 100
perc_high = percent_filter[1] / 100
else:
perc_low = percent_filter / 100
perc_high = percent_filter / 100
low = 1 - (perc_low)
high = 1 + (perc_high)
return (low, high) | 006999800a18ebf0ad96e4a6a8e022d1a7a79306 | 43,839 |
def isSpecialTrueType( glyph ):
""" Fontforge treats three control characters as the special
TrueType characters recommended by that standard
"""
e = glyph.encoding
return e == 0 or e == 1 or e == 0xD | 2f79894b077989660d07153ea360145e8b167b3b | 43,840 |
def repair_too_short_phoneme(label, threshold=5) -> None:
"""
LABファイルの中の発声時刻が短すぎる音素(5ms未満の時とか)を修正する。
直前の音素の長さを削る。
一番最初の音素が短い場合のみ修正できない。
"""
threshold_100ns = threshold * 10000
# 短い音素が一つもない場合はスルー
if all(phoneme.duration >= threshold_100ns for phoneme in label):
return None
# 短い音素が連続しても不具合が起こらないように逆向きにループする
if label[0].duration < threshold_100ns:
raise ValueError(f'最初の音素が短いです。修正できません。: {label[0]}')
for i, phoneme in enumerate(reversed(label)):
# 発声時間が閾値より短い場合
if phoneme.duration < threshold_100ns:
print('短い音素を修正します。:', phoneme)
# 閾値との差分を計算する。この分だけずらす。
delta_t = threshold_100ns - phoneme.duration
# 対象の音素の開始時刻をずらして、発生時間を伸ばす。
phoneme.start -= delta_t
# 直前の音素の終了時刻もずらす。
# label[-(i + 1) - 1]
label[-i - 2].end -= delta_t
return None | 59d92e09712c601054f6321c8879809a154dd8e3 | 43,841 |
def linear(x0: float, x1: float, p: float):
"""
Interplates linearly between two values suchh that when p=0
the interpolated value is x0 and at p=1 it's x1
"""
return (1 - p) * x0 + p * x1 | ec41990ab6dc277ac6b88bfc130bcd5a4b11ce0c | 43,842 |
def extract_array_column(a, col):
"""Extracts a column from a tabular structure
@type a: list/tuple
@param a: an array of equal-sized arrays, representing
a table as a list of rows.
@type col: usually int or str
@param col: the column key of the column to extract
@rtype: list
@return: the values of column col from each row in a
"""
column = []
for r in a:
column.append( r[col] )
return column | 9dbdeee05076339f54c5ab3e7985ac466d8ceddb | 43,843 |
import re
def texifyterm(strterm):
"""Replace a term string with TeX notation equivalent."""
strtermtex = ''
passed_term_Lchar = False
for termpiece in re.split('([_A-Za-z])', strterm):
if re.match('[0-9]', termpiece) is not None and not passed_term_Lchar:
# 2S + 1 number
strtermtex += r'$^{' + termpiece + r'}$'
elif re.match('[A-Z]', termpiece) is not None:
# L character - SPDFGH...
strtermtex += termpiece
passed_term_Lchar = True
elif re.match('[eo]', termpiece) is not None and passed_term_Lchar:
# odd flag, but don't want to confuse it with the energy index (e.g. o4Fo[2])
strtermtex += r'$^{\rm ' + termpiece + r'}$'
elif re.match(r'[0-9]?.*\]', termpiece) is not None:
# J value
strtermtex += termpiece.split('[')[0] + r'$_{' + termpiece.lstrip('0123456789').strip('[]') + r'}$'
elif re.match('[0-9]', termpiece) is not None and passed_term_Lchar:
# extra number after S char
strtermtex += termpiece
strtermtex = strtermtex.replace('$$', '')
return strtermtex | fc637e5825e7bc4b97f6f777aa1d9ab23f18783b | 43,845 |
import re
def preProcessDocs(docList):
"""
Remove stop words and phrases
Parameters
----------
dict : List
DESCRIPTION. list of document contents
Parameters
----------
docList : TYPE
DESCRIPTION.
Returns
-------
newDocList : TYPE
DESCRIPTION.
"""
newDocList=[]
# Remove punctuation
docList['paper_text_processed'] = docList['paper_text'].map(lambda x: re.sub('[,\.!?]', '', x))
# Convert the titles to lowercase
docList['paper_text_processed'] = docList['paper_text_processed'].map(lambda x: x.lower())
return newDocList | 611bbbf00441e6eb16de7c4d561188c01c0b9d6e | 43,846 |
import math
def schaffer_function(transposed_decimal_pop_input):
"""Schaffer Test Function"""
y = []
for individual in transposed_decimal_pop_input:
shaffer = 0
parentheses = 0
for xi in individual:
parentheses = parentheses + xi**2
term_1 = parentheses**0.25
term_2 = (((math.sin(50*(parentheses**0.1)))**2)+1)
schaffer = (term_1 * term_2)
y.append(schaffer)
return y | 8ec48221f96b6936df2d9425498a6c05372eeb6a | 43,847 |
def expand_aabb(left, right, top, bottom, delta_pixel):
""" Increases size of axis aligned bounding box (aabb).
"""
left = left - delta_pixel
right = right + delta_pixel
top = top - delta_pixel
bottom = bottom + delta_pixel
return left, right, top, bottom | 0073df23538892a5ae0262b82f16eabbd1f41da2 | 43,848 |
def check_small_primes(n):
"""
Returns True if n is divisible by a number in SMALL_PRIMES.
Based on the MPL licensed
https://github.com/letsencrypt/boulder/blob/58e27c0964a62772e7864e8a12e565ef8a975035/core/good_key.go
"""
small_primes = [
2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89,
97, 101, 103, 107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181,
191, 193, 197, 199, 211, 223, 227, 229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281,
283, 293, 307, 311, 313, 317, 331, 337, 347, 349, 353, 359, 367, 373, 379, 383, 389, 397,
401, 409, 419, 421, 431, 433, 439, 443, 449, 457, 461, 463, 467, 479, 487, 491, 499, 503,
509, 521, 523, 541, 547, 557, 563, 569, 571, 577, 587, 593, 599, 601, 607, 613, 617, 619,
631, 641, 643, 647, 653, 659, 661, 673, 677, 683, 691, 701, 709, 719, 727, 733, 739, 743,
751
]
for prime in small_primes:
if n % prime == 0:
return True
return False | 4ec93c4dd6c0f67c0b0085f698cbc0604e1977a9 | 43,849 |
def getLSBParity(list):
"""given a list as input, return the parity of the LSBs in said list"""
parity = 0
for item in list:
parity = parity ^ (item % 2)
return parity | 7f9329707c59b88474886579adcded14c83327f9 | 43,854 |
def uncompleted_task_on(line: str) -> bool:
"""Return whether there's an uncompleted task on the line."""
return bool(line.strip() and not line.startswith("x ")) | c68923a9134de99c9bfd80c52c5a8ad1e861f6e1 | 43,855 |
def calculate_costs(billing_info):
"""
Function to calculate various monthly and yearly costs of the cluster and
container registry required to run a BinderHub. A contingency value is parsed
from the command line in order to account for autoscaling.
:param billing_info: a dict object containing the billing information
:param contingency: the desired contingency percentage (float, 0 < contingency < 1)
:return: updated instance of billing_info (dict)
"""
# Create empty dictionary
billing_info["costs"] = {}
# Calculate total VM cost per month
billing_info["costs"]["cluster_cost_permonth_usd"] = (
billing_info["cluster"]["cost_pernode_permonth_usd"]
* billing_info["cluster"]["node_number"]
)
# Calculate ACR cost per year
billing_info["costs"]["acr_cost_peryear_usd"] = (
billing_info["acr"]["cost_perday_usd"] * 365.25
)
# Calculate average ACR cost per month
billing_info["costs"]["acr_cost_peravgmonth_usd"] = (
billing_info["costs"]["acr_cost_peryear_usd"] / 12
)
return billing_info | edff8372ea464eb330e1b66e68197f3fe050a82a | 43,856 |
import os
def get_filename(path):
"""Returns the filename for a given path"""
try:
return os.path.split(path)[1]
except IndexError:
return path | d34a357a3bb55a4bd4a7521d2e6c74ecbba5cc7e | 43,857 |
def retry(times, func, *args, **kwargs):
"""Try to execute multiple times function mitigating exceptions.
:param times: Amount of attempts to execute function
:param func: Function that should be executed
:param args: *args that are passed to func
:param kwargs: **kwargs that are passed to func
:raises: Raise any exception that can raise func
:returns: Result of func(*args, **kwargs)
"""
for i in range(times):
try:
return func(*args, **kwargs)
except Exception:
if i == times - 1:
raise | 3b25dab272c9919986775222cd29d8c7a9c78606 | 43,860 |
from typing import List
def clean_subpath_id_wp(db_name: str, subpath_id: str, pathway_list: List) -> str:
"""
Clean subpathway ids for WikiPathway pathways
:param db_name:
:param subpath_id:
:param pathway_list:
:return:
"""
if '{}:'.format(db_name) in subpath_id:
return subpath_id
else:
referenced_pathways = [p.uid for p in pathway_list if subpath_id.split(':')[-1] == p.name]
if len(referenced_pathways) > 0:
return db_name + ':' + referenced_pathways[0]
else:
return subpath_id | 583983f324ec994eac0c616d4d4a19f2efaab33b | 43,861 |
def process_problems(problems):
"""Inject styles, update image pathing into raw_content, produce final_content"""
for pnum, contents in problems.items():
raw_content = contents['content_raw']
problem_description = raw_content.find('h2')
problem_description['style'] = 'color: #6b4e3d;'
problem_info = raw_content.find('div', id='problem_info')
problem_info['style'] = 'font-family: Consolas;'
problem_content = raw_content.find('div', 'problem_content')
problem_content['style'] = ('background-color: #fff; color: #111; padding: 20px;'
'font-family: "Segoe UI", Arial, sans-serif; font-size: 110%;'
'border: solid 1px #bbb; box-shadow: 5px 5px 5px #bbb;')
problem_html = '{}\n{}\n{}'.format(problem_description, problem_info, problem_content)
problem_html = problem_html.replace('src=\"project/images/', '<img src="./images/')
problems[pnum]['content_final'] = problem_html
return problems | acaef615ca422dcddb137aca9d70f1b8e21d0ed7 | 43,862 |
def stateless_switch(switch_factory):
"""
Fixture that represents a simple on/off switch as a "state machine" with no initial state.
"""
return switch_factory() | c3062d6b8d6f23014155fa3d53832673f1d97a81 | 43,864 |
import hashlib
def create(username, password):
""" Create a hashed string of a password with username as the salt """
return hashlib.sha512(username + password).hexdigest() | 5749142912c78113f6e25ec515854286eacc2de2 | 43,865 |
def left_semi_join(df_1, df_2, left_on, right_on):
"""
Pefrorm left semi join b/w tables
"""
left_merge = lambda df_1, df_2: df_1.merge(
df_2, left_on=left_on, right_on=right_on, how="leftsemi"
)
## asserting that number of partitions of the right frame is always 1
assert df_2.npartitions == 1
return df_1.map_partitions(left_merge, df_2.to_delayed()[0], meta=df_1._meta) | 7a8c131d4dab5bb0fc94545983f8d7d776ffcb53 | 43,867 |
import os
def isroot():
"""Checks if the script is executed as root
returns:
true if root, else false
"""
return os.geteuid() == 0 | 3c9a64ee8ac3946446af8fca0280c6e7760c541c | 43,868 |
def get_enum_name_from_repr_str(s):
"""
when read from config, enums are converted to repr(Enum)
:param s:
:return:
"""
ename = s.split('.')[-1].split(':')[0]
if ename == 'None':
ename = None
return ename | ab7b2857f61162ffa398bb83fe06c7cd4a84e66e | 43,869 |
import torch
import random
def time_mask(align, replace_value=0.0, num_mask=2, T=20, max_ratio=0.2):
"""Time masking
:param torch.Tensor align: input tensor with shape (B, T, D)
:param int replace_value: the value to be replaced with
:param int T: maximum width of each mask
:param int num_mask: number of masks
"""
batch_size = align.size(0)
len_align = align.size(1)
max_len = int(max_ratio * len_align)
current_len = 0
mask = torch.full([batch_size, len_align], fill_value=True, dtype=torch.bool, device=align.device)
for i in range(0, num_mask):
t = random.randrange(0, T)
t = min(t, max_len - current_len)
t_zero = random.randrange(0, len_align - t)
# avoids randrange error if values are equal and range is empty
if t_zero == t_zero + t:
return align, mask
# mask_end = random.randrange(t_zero, t_zero + t)
mask_end = t_zero + t
align[:, t_zero:mask_end, :] = replace_value
mask[:, t_zero:mask_end] = False
current_len += t
return align, mask | ae31c9945b735b34cf03b3e3e7ad39b9ea881d34 | 43,870 |
def sort_tuple_list(tuple_list:list):
"""sort_tuple Sort a list of tuple
Sort tuple. Ref: https://www.geeksforgeeks.org/python-program-to-sort-a-\
list-of-tuples-by-second-item/
Args:
tuple_list (list): a list of tuple
"""
tuple_list.sort(key = lambda x: x[0])
return tuple_list | d9e5d83b4e1cdae43ce0449693732b27636efa69 | 43,871 |
def get_per_difference(list1, list2):
"""list1 : quocient (base value)"""
my_size = len(list1)
my_range = list(range(0, my_size))
diff = []
if len(list2) != my_size:
print('Error! lists have different sizes')
exit()
else:
for i in my_range:
diff.append([])
A = list1[i]
B = list2[i]
my_2_size = list(range(0, len(A)))
for j in my_2_size:
quo = A[j]
per_ = abs(100*(A[j] - B[j]))/quo
diff[i].append(per_)
return diff | 0ff0efad31698583358353c18142846cd03476de | 43,872 |
def is_palindrome_without_typecast(number):
# This condition is defined by the problem statement
"""
# When num < 0, num is not a palindrome
# Also if last digit is zero and number is not zero, it's not - since num can only be zero
"""
if number < 0 or (number % 10 == 0 and number != 0):
return False
rev_number = 0
while number > rev_number: # This while condition indicates it's half completed
rev_number = rev_number * 10 + number % 10
number = number // 10
# When the length is an odd number, we can get rid of the middle digit by revertedNumber/10
# For example when the input is 12321, at the end of the while loop we get x = 12, revertedNumber = 123,
# since the middle digit doesn't matter in palidrome(it will always equal to itself), we can simply get rid of it.>
return number == rev_number or (number == rev_number // 10) | e904dc89e6106982af3032975f561ed6838a5f6d | 43,874 |
import re
def get_cluster_pool(project_id, region, client, selected_filters=None):
"""Gets the clusters for a project, region, and filters
Args:
project_id (str): The project id to use
region (str): The region to use
client (dataproc_v1beta2.ClusterControllerClient): The client that provides the
listing clusters method
credentials (google.oauth2.credentials.Credentials): The authorization credentials to
attach to requests.
Returns:
str: the component gateway url
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due to a retryable error and
retry attempts failed.
ValueError: If the parameters are invalid.
"""
cluster_pool = list()
filter_set = set()
filters = ['status.state=ACTIVE']
if selected_filters is not None:
filters.extend(selected_filters)
filter_str = ' AND '.join(filters)
try:
for cluster in client.list_clusters(request={'project_id' : project_id, 'region' : region, 'filter': filter_str}):
#check component gateway is enabled
if len(cluster.config.endpoint_config.http_ports.values()) != 0:
action_list = list()
for action in cluster.config.initialization_actions:
# check if livy init action with a region with the regex pattern [a-z0-9-]+
is_livy_action = re.search("gs://goog-dataproc-initialization-actions-"\
"[a-z0-9-]+/livy/livy.sh", action.executable_file) is not None
if is_livy_action:
action_list.append(action.executable_file)
cluster_pool.append(cluster.cluster_name)
for key, value in cluster.labels.items():
filter_set.add('labels.' + key + '=' + value)
return cluster_pool, list(filter_set)
except:
raise | ec08780c54f0869bd038ba3e079fe142d10262d2 | 43,875 |
def indent(text: str, amount: int) -> str:
"""indent according to amount, but skip first line"""
return "\n".join(
[
amount * " " + line if id > 0 else line
for id, line in enumerate(text.split("\n"))
]
) | a78fedaada94bd9dd5129e7f85f2ba2fe506eb42 | 43,876 |
def _set_default_if_empty_str(tst_str, default=None):
"""
Return None if str is an empty string or return str. Used to test for
general options that reset with value of "" and reset to either None
or the default value.
Parameters:
tst_str (:term:1string):
the string to test for value of ""
default (:term:`string`):
Optional default string value to return
Returns:
None or the value of the default parameter
"""
return default if tst_str == "" else tst_str | bac210253bc6ffd685fc3677ccdd062d4483035d | 43,878 |
def is_wgs_accession_format(contig_accession):
"""
Check if a Genbank contig is part of WGS (Whole Genome Shotgun) sequence
:param contig_accession: Genbank contig accession (ex: CM003032.1)
:return: True if the provided contig is in the WGS format
"""
wgs_prefix = contig_accession[:4]
wgs_numeric_suffix = contig_accession[4:].replace(".", "")
return str.isalpha(wgs_prefix) and str.isnumeric(wgs_numeric_suffix) | 1e4ece9c428264ed5e74e8f83ad9b0521bc57988 | 43,879 |
import sys
def sanity_check_manifest(manifest):
""" Nurse maid the users.
"""
manifest_keys = ['ldflags', 'static_args', 'name', 'native_libs', 'binary', 'modules']
old_manifest_keys = ['modules', 'libs', 'search', 'shared']
new_manifest_keys = ['main', 'binary', 'dynamic_args', 'lib_spec', 'main_spec']
dodo_manifest_keys = ['watch']
replaces = {'modules': 'main', 'libs': 'modules', 'search': 'ldflags'}
warnings = [False]
def cr(warnings):
""" I like my warnings to stand out.
"""
if not warnings[0]:
warnings[0] = True
sys.stderr.write('\n')
if manifest is None:
sys.stderr.write('\nManifest is None.\n')
return False
if not isinstance(manifest, dict):
sys.stderr.write('\nManifest is not a dictionary: {0}.\n'.format(type(manifest)))
return False
for key in manifest:
if key in manifest_keys:
continue
if key in dodo_manifest_keys:
cr(warnings)
sys.stderr.write('Warning: "{0}" is no longer supported; ignoring.\n'.format(key))
continue
if key in old_manifest_keys:
cr(warnings)
sys.stderr.write('Warning: old style key "{0}" is DEPRECATED, use {1}.\n'.format(key, replaces[key]), )
continue
if not key in new_manifest_keys:
cr(warnings)
sys.stderr.write('Warning: "{0}" is not a recognized key; ignoring.\n'.format(key))
continue
return True | d956d84019262b16aa72b5ba20c7062b00e49dcd | 43,880 |
def build_empty_response(search_path, operation_name, service_model):
"""
Creates an appropriate empty response for the type that is expected,
based on the service model's shape type. For example, a value that
is normally a list would then return an empty list. A structure would
return an empty dict, and a number would return None.
:type search_path: string
:param search_path: JMESPath expression to search in the response
:type operation_name: string
:param operation_name: Name of the underlying service operation.
:type service_model: :ref:`ibm_botocore.model.ServiceModel`
:param service_model: The Botocore service model
:rtype: dict, list, or None
:return: An appropriate empty value
"""
response = None
operation_model = service_model.operation_model(operation_name)
shape = operation_model.output_shape
if search_path:
# Walk the search path and find the final shape. For example, given
# a path of ``foo.bar[0].baz``, we first find the shape for ``foo``,
# then the shape for ``bar`` (ignoring the indexing), and finally
# the shape for ``baz``.
for item in search_path.split('.'):
item = item.strip('[0123456789]$')
if shape.type_name == 'structure':
shape = shape.members[item]
elif shape.type_name == 'list':
shape = shape.member
else:
raise NotImplementedError(
'Search path hits shape type {0} from {1}'.format(
shape.type_name, item))
# Anything not handled here is set to None
if shape.type_name == 'structure':
response = {}
elif shape.type_name == 'list':
response = []
elif shape.type_name == 'map':
response = {}
return response | 09a3a143cbfb10a961807b26fcb7fd691a80e663 | 43,881 |
from hashlib import sha224
def create_path_for_genes_db(tissues):
"""Create almost-certainly unique path for a database which will contain
information about genes having variant-egene pairs in given tissues."""
tissues_serialized = ','.join(tissues).encode('utf-8')
tissues_hash_code = sha224(tissues_serialized).hexdigest()
return 'genes_{hash_code}.db'.format(
hash_code=tissues_hash_code
) | 39b93ffeb1fa8a0fd20b0d9c1f6554e93ed4b15f | 43,884 |
def merge_config(config, *argv):
"""
merge multiple configs
"""
cf = {}
cf.update(config)
for d in argv:
cf.update(d)
return cf | 3efa14ec86ba234303c4eb11a349202546067c19 | 43,886 |
def typed(expected_type, cls=None):
"""Base Type checker decorator.
Args:
expected_type: A Type (str, dict, list, etc)
cls: A python class, subclassed from DescriptorBase
Returns:
cls: returns class
"""
if cls is None:
return lambda cls: typed(expected_type, cls)
super_set = cls.__set__
def __set__(self, instance, val):
"""type checker.
Args:
instance: the name of the element (eg: "foo)
val: the value of the element (eg: "bar")
Raises:
TypeError: in the event the type of the val is not the same as the expected type
"""
if not isinstance(val, expected_type):
# API Gateway sends None in the event of no data, account for it
if val is not None:
raise TypeError("Expected " + str(expected_type))
super_set(self, instance, val)
cls.__set__ = __set__
return cls | 3045fcf6e66614a57dc6e68f493bde8c93eda28e | 43,887 |
import six
def _to_text(obj, encoding):
"""
In Python3:
Decode the bytes type object to str type with specific encoding
In Python2:
Decode the str type object to unicode type with specific encoding,
or we just return the unicode string of object
Args:
obj(unicode|str|bytes) : The object to be decoded.
encoding(str) : The encoding format
Returns:
decoded result of obj
"""
if obj is None:
return obj
if isinstance(obj, six.binary_type):
return obj.decode(encoding)
elif isinstance(obj, six.text_type):
return obj
elif isinstance(obj, (bool, float)):
return obj
else:
return six.u(obj) | d1ea66b7d27b6ebc90e5252038254235e3b599ce | 43,888 |
def make_wide(df, cols):
"""
Pivot an ACS table.
This function takes rows and pivots them to be columns.
df: str
cols: list.
One or more values from the new_var column may be given as a list.
This function takes those values (rows), reshapes, and returns
them as columns in a new df.
"""
return (
df[df.new_var.isin(cols)]
.assign(num=df.num.astype("Int64"))
.pivot(index="GEOID", columns="new_var", values="num")
.reset_index()
.rename_axis(None, axis=1)
) | 6f2559bf6b6f513d7510e2dc09b05248cce77232 | 43,890 |
def create_literal(val, datatype=None, lang=None):
"""
Put the value in unicode escaping new special characters to keep canonical form
From NT specification at 'https://www.w3.org/TR/n-triples/#canonical-ntriples':
"Within STRING_LITERAL_QUOTE, only the characters U+0022, U+005C, U+000A, U+000D are
encoded using ECHAR. ECHAR must not be used for characters that are allowed directly
in STRING_LITERAL_QUOTE."
"""
escape_pairs = [(u'\u005C', r'\\'), (u'\u0022', r'\"'), (u"\u000A", r'\n'), (u"\u000D", r'\r')]
for orig, rep in escape_pairs:
val = val.replace(orig, rep)
lit_value = u'"{}"'.format(val)
if datatype is not None:
lit_value += "^^<{}>".format(datatype)
elif lang is not None:
lit_value += "@{}".format(lang)
return lit_value | 33bbc6512898646d9a1c0076bab78d9edb182fd0 | 43,891 |
import threading
def simple_thread(func):
"""
function with this decorator will be run in a new thread
A thread pool should be used if run in large scale
"""
def simple_thread_wrapper(*args, **kwargs):
t = threading.Thread(target=func, args=args, kwargs=kwargs)
t.start()
return simple_thread_wrapper | 362cc099102b16ef6eebf8515f1dd5724604cae4 | 43,892 |
import os
def is_newer(path1, path2):
"""Check if `path1` is newer than `path2` (using mtime)
Compare modification time of files at path1 and path2.
Non-existing files are considered oldest: Return False if path1 does not
exist and True if path2 does not exist.
Return None for equal modification time. (This evaluates to False in a
Boolean context but allows a test for equality.)
"""
try:
mtime1 = os.path.getmtime(path1)
except OSError:
mtime1 = -1
try:
mtime2 = os.path.getmtime(path2)
except OSError:
mtime2 = -1
# print "mtime1", mtime1, path1, "\n", "mtime2", mtime2, path2
if mtime1 == mtime2:
return None
return mtime1 > mtime2 | 7c781804b5cc754201a30aae90ebfe1a98df2c8a | 43,893 |
def get_file_size(file_path):
"""
Get the size of a file in bytes.
"""
input_file = open(file_path, "rb")
input_file.seek(0, 2)
file_size = input_file.tell()
input_file.close()
return file_size | e073daa27ca3a52268d33f968a06d9c4f43b0d09 | 43,894 |
def sentences(s):
"""Split the string s into a list of sentences.
"""
assert isinstance(s, str)
pos = 0
sentence_list = []
l = len(s)
while pos < l:
try:
p = s.index('.', pos)
except:
p = l + 1
try:
q = s.index('?', pos)
except:
q = l + 1
try:
e = s.index('!', pos)
except:
e = l + 1
end = min(p, q, e)
sentence_list.append(s[pos:end].strip())
pos = end + 1
if sentence_list:
return sentence_list
# If no sentences were found, return a one-item list containing the entire input string.
return [s] | 5095f8d56f08a60a62aa7ba430751f4c35865297 | 43,896 |
from typing import List
from typing import DefaultDict
import random
def predict_next_word(chain: List[str], transitions: DefaultDict[str, str], smpl_size: int) -> str:
""""Поиск нового токена путем поиска по последнему токену в матрице возможных переходов"""
last_state = "".join(chain[-(smpl_size - 1):])
next_words = transitions[last_state]
return random.choice(next_words) if next_words else "" | 48bd37fae8fa3b7c86b3e1757232cc4d81e1e878 | 43,897 |
def moeda(v=0, moeda='R$'):
"""
-> Função que formata os preços no formato moeda
:param v: recebe preço a ser formatado
:param moeda: recebe o tipo de moeda (padrão real)
:return: retorna a o preço formatado
"""
return f'{moeda}{v:.2f}'.replace('.', ',') | 8ff1eaf396e1c8e4d88a1e85fedb59745646337d | 43,900 |
def solution(A, summed_value): # O(N^2)
"""
Write a function that will return the numbers used to add up to a certain value.
>>> solution([-2, 0, 1, 3], 2)
[(-2, 1, 3)]
>>> solution([5, 1, 3, 4, 7], 12)
[(1, 4, 7), (3, 4, 5)]
>>> solution([1, 2], 3)
[]
"""
length = len(A) # O(1)
result = [] # O(1)
if not A or length < 3: # O(1)
return result # O(1)
A.sort() # O(NlogN)
for i, _ in enumerate(A): # O(N)
j = i + 1 # O(1)
k = length - 1 # O(1)
while j < k: # O(N)
computed = A[i] + A[j] + A[k] # O(1)
if computed == summed_value: # O(1)
result.append((A[i], A[j], A[k])) # O(1)
j += 1 # O(1)
k -= 1 # O(1)
elif computed < summed_value: # O(1)
j += 1 # O(1)
elif computed > summed_value: # O(1)
k -= 1 # O(1)
return result # O(1) | 381e865311bf5b221a776727e1416d2629aba5ea | 43,903 |
def find_intersection(x1, y1, x2, y2, x3, y3, x4, y4):
"""
find_intersection will find the intersection point of two line segments when four coordinates
are given for both line segments. It will return the intersection point as a tuple.
:param x1: x-coordinate of vertex 1 in line 1
:param y1: y-coordinate of vertex 2 in line 1
:param x2: x-coordinate of vertex 1 in line 2
:param y2: y-coordinate of vertex 2 in line 2
:param x3: x-coordinate of vertex 1 in line 3
:param y3: y-coordinate of vertex 2 in line 3
:param x4: x-coordinate of vertex 1 in line 4
:param y4: y-coordinate of vertex 2 in line 4
:return: intersection point of two line segments as tuple
"""
# Values that exist in both px and py
cross_1 = (x1 * y2 - y1 * x2)
cross_2 = (x3 * y4 - y3 * x4)
denominator = ((x1 - x2) * (y3 - y4) - (y1 - y2) * (x3 - x4))
# Find the x-coordinate of the center
px = (cross_1 * (x3 - x4) - (x1 - x2) * cross_2) / denominator
# Find the y-coordinate of the center
py = (cross_1 * (y3 - y4) - (y1 - y2) * cross_2) / denominator
# Return the center as tuple
return px, py | a61d9999b0c4217f0b3156e131c0acb83f654826 | 43,905 |
def _get_assignmentgroup_name(assigmentgroup):
"""
Returns a string containing the group members of the
assignmentgroup separated by '-'.
"""
cands = assigmentgroup.get_candidates()
cands = cands.replace(", ", "-")
return cands | 469ff5e795e7e338d0cacce9fc275602a3c0d9a8 | 43,906 |
def download_blob(client, bucket_name, source_blob_name):
"""Downloads a blob from the bucket."""
bucket = client.get_bucket(bucket_name)
blob = bucket.blob(source_blob_name)
b64_img = blob.download_as_string()
return b64_img | 12cb3e83af242a5c1cce41debba2b336462eb15b | 43,907 |
import numpy
def quatArrayTDist(q,qarray):
"""computes the natural distance (Haar measure) for quaternions, q is a 4-element array, qarray is Nx4"""
return numpy.arccos(numpy.minimum(1.0,numpy.abs(numpy.dot(qarray,q)))) | 5eee3fd081f897005efc9bae33ede998eb71c068 | 43,908 |
def mipmap_levels(base_width, base_height):
"""Return max number of mipmap for the size
Args:
base_width (int): Width source
base_height (int): Height source
Returns:
int: Number of mipmap levels
"""
width = base_width
height = base_height
levels = 1
while width > 1 or height > 1:
width = width // 2 or 1
height = height // 2 or 1
levels += 1
return levels | 193c076aeeab36cc80f9e0862f67b4666dc9c902 | 43,909 |
import subprocess
def get_daemon_cids():
"""return list of CIDs with pin types"""
output = subprocess.check_output(
['docker-compose', 'exec', '-T', 'daemon', 'ipfs', 'pin', 'ls'])
return [
'-'.join(l.split()[0:2]) for l in output.decode('utf-8').splitlines()
] | f273f556351c63c1ce09b57ac47b66629d09bcb2 | 43,910 |
def _format_2(raw):
"""
Format data with protocol 2.
:param raw: returned by _load_raw
:return: formatted data
"""
return raw[1:], raw[0] | 6c750a37aecfd36922b765d1c5a1c1e30ea8ca68 | 43,911 |
def _get_span_name(span):
"""Get span name by using instrumentation and kind while backing off to
span.name
"""
instrumentation_name = (
span.instrumentation_info.name if span.instrumentation_info else None
)
span_kind_name = span.kind.name if span.kind else None
name = (
"{}.{}".format(instrumentation_name, span_kind_name)
if instrumentation_name and span_kind_name
else span.name
)
return name | 0dbd6aeb1322ddd9b0ab94078ebdcbbe6d63eb50 | 43,912 |
from typing import Sequence
def _group_dimensions(expr: str) -> Sequence[str]:
"""Splits an expression into its separate grouped dimensions.
An unqualified dimension index is a group by itself.
Parentheses are interpreted as demarcating a sequence of dimension indices
to be grouped into a single dimension.
'1' is an alias for '()', denoting a dimension of size 1 with no indices.
Nested parentheses are not permitted.
Examples:
'ijk' is grouped as ['i', 'j', 'k']
'(mn)hwc' is grouped as ['mn', 'h', 'w', 'c']
'n111' is grouped as ['n', '', '', '']
'n...' is grouped as ['n', '...'], where '...' stands for multiple groups.
Args:
expr: Shape expression to group.
Returns:
List of simple expressions, each consisting solely of dimension
indices, specifying the indices that constitute each grouped dimension.
"""
groups = []
i = 0
while i < len(expr):
if expr[i].isalpha():
# Top-level dimension index is a group by itself.
groups.append(expr[i])
i += 1
elif expr[i] == '1':
# Dimension of size 1 with no indices; equivalent to '()'.
i += 1
groups.append('')
elif expr[i] == '(':
# Sequence of indices to be grouped as a single dimension.
i += 1
group_begin = i
while i < len(expr) and expr[i].isalpha():
i += 1
group_end = i
if not(i < len(expr) and expr[i] == ')'):
raise ValueError('Unclosed parenthesis')
i += 1
groups.append(expr[group_begin:group_end])
elif expr[i:].startswith('...'):
# Wildcard sequence of dimensions.
i += len('...')
if '...' in groups:
raise ValueError('Wildcard "..." may only occur once')
groups.append('...')
else:
raise ValueError(f'Illegal character: {ord(expr[i])}')
return groups | 578a9f990a66c050806260e6dfcf915e916c99d2 | 43,913 |
def add_form_default_values():
"""
Change default form rendering on page load.
Add default form values i.e. {'<variable name>': {'value': 10}}
Ignore some of the input fields i.e. {'<variable name>': {'action': 'ignore'}}
"""
return {'ambient_temp_deg_c': {'action': 'update', 'value': 20}} | 6c96155cfaab5d16f3c89ed35c173e59c4707b1e | 43,914 |
def save_working_answers(assignment_id, data, r):
"""
save answered responses to redis
data: string
"""
answers = list()
for d in data:
if d['type'] == 'final_answer':
answers.append(d['value'])
working_answers = ','.join(answers)
key = assignment_id + '-working_answers'
r.delete(key)
r.set(key, working_answers)
return True | 61ba4e17eea9eb7972ed580261e989dd7dc1b756 | 43,916 |
def transform0(self, f):
"""
pyspark does not have a transform before version 3... we need to add one to DataFrame.
This is based on https://mungingdata.com/pyspark/chaining-dataframe-transformations/
"""
return f(self) | c2ff684109473ac7ce61d95b5b01e166fcf71e39 | 43,917 |
def _is_generic(cls) -> bool:
"""Return True if cls is a generic type. For example, List or List[int]."""
if cls.__module__ != "typing":
if not any(c.__module__ == "typing" for c in cls.mro()):
return False
params = getattr(cls, "__parameters__", ())
if params:
return True
return bool(getattr(cls, "__args__", ())) | cfc03585f0f1d1abc18e1f72edc54fb480eeff4e | 43,919 |
from numpy import delete
def monotonic_indices(T, MinTimeStep=1.0e-3):
"""Given an array of times, return the indices that make the array strictly monotonic."""
Ind = range(len(T))
Size = len(Ind)
i = 1
while i < Size:
if T[Ind[i]] <= T[Ind[i - 1]] + MinTimeStep:
j = 0
while T[Ind[j]] + MinTimeStep < T[Ind[i]]:
j += 1
# erase data from j (inclusive) to i (exclusive)
Ind = delete(Ind, range(j, i))
Size = len(Ind)
i = j - 1
i += 1
return Ind | 52cbae4c9ed7227b0d28286a3b4d2d9686f2f3b4 | 43,920 |
def is_empty(xs):
"""Returns true if the given value is its type's empty value; false
otherwise"""
try:
return len(xs) == 0
except TypeError:
return False | d0bac13e659714537e632b52cd57be1952635c07 | 43,921 |
import copy
def removeBuff(state, remBuff) :
"""Remove a buff to the state
"""
newState = copy.deepcopy(state)
# Removes the buff from the list of buffs
newState['player']['buff'] = [ b for b in newState['player']['buff'] if b[0]['name'] not in remBuff ]
# Removes the potential remaining actions to make the buff fall down, for
# example if a skill removes a buff with a duration
newState['timeline']['nextActions'] = [ na for na in newState['timeline']['nextActions'] if na[1] not in [ { 'type': 'removeBuff', 'name': bn } for bn in remBuff ] ]
return newState | 499a9c4934f210612ffe0efbd8bd156d801802eb | 43,922 |
def get_video_type(link):
""" Takes a url and decides if it's Vimeo or YouTube.
Returns None for unkown types. """
if 'vimeo.com/' in link:
return 'vimeo'
elif 'youtube.com/' in link or 'youtu.be' in link:
return 'youtube'
return None | a6f514c9eeae211490d61b5aa1cc635177b28809 | 43,923 |
import os
import re
def batch_rename(movies_path):
"""
在之前爬取及合并ts文件时,为防止重名,对文件名添加了随机数后缀,该函数用于批量重命名,删除该随机数后缀。
注意:mp4文件似乎大小写不敏感,大小写不一样的字符串也会视为重名。
:param movies_path:
:return:
"""
if os.path.exists(movies_path):
os.chdir(movies_path)
files = set(os.listdir())
success, fail = set(), files.copy()
for file in files:
new_name = re.sub(r'_\d+.mp4', '.mp4', file)
if new_name == file:
fail.remove(file)
success.add(new_name)
continue
if new_name not in success and new_name not in fail:
try:
os.rename(file, new_name)
except:
pass
else:
fail.remove(file)
success.add(new_name)
print("total: {}, success: {}, fail: {}".format(len(files), len(success), len(fail)))
return fail
else:
print("path doesn't exist: {}".format(movies_path)) | e692e38ef00acfc3422fa5546bfdd390a94383a2 | 43,924 |
def velocities(snapshot):
"""
"""
if snapshot._velocities is None:
snapshot.load_details()
return snapshot.velocity_direction * snapshot._velocities | 43f2984112502e20d6b669b8c1a8d4d8790429a4 | 43,925 |
def set_recursive(config, recursive):
""" set global recursive setting in config """
config['recursive'] = recursive
return True | 5a5d557abf453a656a42166f955cc31015d26b0d | 43,928 |
import os
import fnmatch
def DepotToolsPylint(input_api, output_api):
"""Gather all the pylint logic into one place to make it self-contained."""
files_to_check = [
r'^[^/]*\.py$',
r'^testing_support/[^/]*\.py$',
r'^tests/[^/]*\.py$',
r'^recipe_modules/.*\.py$', # Allow recursive search in recipe modules.
]
files_to_skip = list(input_api.DEFAULT_FILES_TO_SKIP)
if os.path.exists('.gitignore'):
with open('.gitignore') as fh:
lines = [l.strip() for l in fh.readlines()]
files_to_skip.extend([fnmatch.translate(l) for l in lines if
l and not l.startswith('#')])
if os.path.exists('.git/info/exclude'):
with open('.git/info/exclude') as fh:
lines = [l.strip() for l in fh.readlines()]
files_to_skip.extend([fnmatch.translate(l) for l in lines if
l and not l.startswith('#')])
disabled_warnings = [
'R0401', # Cyclic import
'W0613', # Unused argument
]
return input_api.canned_checks.GetPylint(
input_api,
output_api,
files_to_check=files_to_check,
files_to_skip=files_to_skip,
disabled_warnings=disabled_warnings) | ee5429c4e47ad917160d14c45a644039a9119ba3 | 43,929 |
def calc_average_precision(precisions, recalls):
"""Calculate average precision defined in VOC contest."""
total_precision = 0.
for i in range(11):
index = next(conf[0] for conf in enumerate(recalls) if conf[1] >= i/10)
total_precision += max(precisions[index:])
return total_precision / 11 | dcdd48bb0b6845c05c6c964f01060b53117d184b | 43,930 |
import re
def count_vowels(string):
"""Count the number of vowels in the given string.
Keyword arguments:
string -- The string to count the vowels
Returns:
A dictionary where the keys are the vowel
"""
# Create a dictionary with the vowels as keys
vowels_dict = {'a': 0, 'e': 0, 'i': 0, 'o': 0, 'u': 0, 'total': 0}
# Convert the string to lower
string = string.lower()
# Extract all the vowels using regular expression and ignoring case
vowels = re.findall('[aeiou]', string, re.IGNORECASE)
# Fin each vowel and adds to the dictionary the count
for vowel in vowels:
if vowel in vowels_dict:
vowels_dict[vowel] += 1
vowels_dict['total'] += 1
return vowels_dict | 11d68587e2ce919f0f509dcb511ace91c4024b4e | 43,931 |
import argparse
def parse_args():
"""
Parse the command line arguments.
:return: An args dictionary with command line arguments.
"""
parser = argparse.ArgumentParser()
parser.add_argument(
"-f", "--filename", help="path to file with records to delete"
)
parser.add_argument(
"-t", "--table", help="name of the table to delete records from"
)
parser.add_argument("-d", "--database", help="database to delete from")
parser.add_argument(
"-s",
"--schema",
default="falcon_default_schema",
help="schema to delete from",
)
parser.add_argument(
"-p", "--separator", default="|", help="separator to use in data"
)
args = parser.parse_args()
return args | bc94e6525714ea7806e3b4e159d5f1c08031faf8 | 43,932 |
def filter_duplicates(l):
"""
>>> filter_duplicates([{'a': 1}, {'b': 1}, {'a': 1}])
[{'a': 1}, {'b': 1}]
"""
def make_hashable(o):
try:
hash(o)
return o
except TypeError:
return helper[type(o)](o)
helper = {
set: lambda o: tuple([make_hashable(e) for e in o]),
tuple: lambda o: tuple([make_hashable(e) for e in o]),
list: lambda o: tuple([make_hashable(e) for e in o]),
dict: lambda o: frozenset(
[(make_hashable(k), make_hashable(v)) for k, v in
o.items()]),
}
l = list(l)
return list({
hashable: entry
for hashable, entry in zip(make_hashable(l), l)
}.values()) | 2652134e8ee1e3f357daa37aa659a456ede8b06a | 43,933 |
import os
def set_config():
"""
Return the name of the configuration file from the Enviromental variable or default name
:return: Name of config file (String)
"""
if "LABELORD_CONFIG" in os.environ:
return os.environ['LABELORD_CONFIG']
else:
return 'config.cfg' | 94464011f8f1be0d62ceb4d252ce809188cb03bd | 43,935 |
import math
def generate_sine(offset: float = 0,
sine_range: float = 1,
second: int = 0,
second_range: int = 60):
"""Return the point along an optionally-transformed sine wave for a given second"""
return sine_range * math.sin((second / second_range) * 2 * math.pi) + offset | 3b2516e8ee85941abc83bbd291aaa6bc144429b3 | 43,936 |
def crible_eratosthene(n: int) -> list[int]:
"""
Renvoie tous les nombres premiers inférieurs à n
en utilisant le crible d'Erastothène
"""
if n <= 1:
return []
tableau = [(False if k % 2 == 0 else True) for k in range(n + 1)]
tableau[1] = False
tableau[2] = True
for i in range(3, n // 2 + 1, 2):
for j in range(2, n // i + 1):
tableau[i * j] = False
return [i for i, est_premier in enumerate(tableau) if est_premier] | 3ed882613b5afecc30a0097a70b31bc2f122f075 | 43,939 |
import numpy
def calc_powder_iint_1d_ordered(f_nucl, f_m_perp,
flag_f_nucl: bool = False, flag_f_m_perp: bool = False):
"""Calculated powderly averaged integrated intensity for ordered sublattice in equatorial plane (alpha=90 deg.)
For details see documentation "Integrated intensity from powder diffraction".
In equatorial plane the scattering is not a function of polarized neutrons.
Output is integrated intensities with derivatives.
"""
f_nucl_sq = numpy.square(numpy.abs(f_nucl))
f_m_perp_sq = numpy.square(numpy.abs(f_m_perp)).sum(axis=0)
iint = f_nucl_sq + f_m_perp_sq
dder = {}
if flag_f_nucl:
dder["f_nucl_real"] = 2 * f_nucl.real
dder["f_nucl_imag"] = 2 * f_nucl.imag
if flag_f_m_perp:
dder["f_m_perp_real"] = 2*f_m_perp.real
dder["f_m_perp_imag"] = 2*f_m_perp.imag
return iint, dder | 1da25540ca9ef822ab03eb8b3508e04d393d9fb9 | 43,940 |
import subprocess
def get_commit_id() -> str:
"""Get current git commit hash.
Returns:
str: git commit hash.
"""
cmd = "git rev-parse --short HEAD"
commid_id = subprocess.check_output(cmd.split()).strip().decode("utf-8")
return commid_id | 3d0242063b1b4ff5b16c56b54e3fd411abbf5734 | 43,941 |
import inspect
import importlib
def list_class_names(clz, package):
"""
Find sub-classes in a specific module and return their names
Args:
clz: the target superclass
package: the module
Returns: list of potential classes
"""
def isclz(obj):
if inspect.isclass(obj):
return issubclass(obj, clz) and not obj == clz
return False
module = importlib.import_module(package)
return [name for name, _ in inspect.getmembers(module, isclz)] | 1f996b46de94fb656719cea1035769fe1ebb357e | 43,944 |
def despine_ax(ax=None):
"""
Remove spines and ticks from a matplotlib axis
Parameters
----------
ax : matplotlib.axes.Axes object
axes from which to remote spines and ticks. if None, do nothing
Returns
-------
ax : matplotlib.axes.Axes object
despined ax object
"""
# Nothing real passed in.
if ax is None:
return None
# remove spines
ax.spines['right'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.set_xticks([])
ax.set_yticks([])
return ax | ab806f4d225099316db9f6e34fc5fc568d3f05aa | 43,945 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.