content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
import os
def parse_labels(label_file_path: str) -> dict:
"""
Function to parse labels
Input:
label_file_path - path to audacity-style label file
Ouput:
dict in style of label_dict["class_name"]=(onset, offset, low_freq, high_freq)
"""
assert os.path.exists(label_file_path), f"Could not find label file: {label_file_path}\n"
label_file = open(label_file_path)
label_lines = label_file.readlines()
label_file.close()
label_dict = {}
for line_no, label_line in enumerate(label_lines):
#backslash implies it's a frequency range line
if label_line[0] == "\\":
continue
label_vals = label_line.rstrip("\n").split(" ")
#get class of annotation
print(label_vals)
voc_class = label_vals[2]
#don't need it in tuple
del label_vals[2]
label_vals.extend(label_lines[line_no + 1].rstrip("\n")[2:].split(" "))
#Turn values into floats
label_vals = [float(val) for val in label_vals]
#Check if dict already contain annotation of this class
if voc_class in label_dict:
label_dict[voc_class].append(tuple(label_vals))
else:
label_dict[voc_class] = [tuple(label_vals)]
return label_dict | 6b13749fbe637482183a159bcdda37d7ffeae2d3 | 51,289 |
def list_widgfield(widgfield, widgfield_dict):
"""Converts a widgfield (with no i value), and a dictionary to a list of
of widgfield,value tuples, with each widgfield in the tuple having i set to the key
and the list is sorted by key"""
if widgfield.i:
return [(widgfield, widgfield_dict)]
return sorted([(widgfield._replace(i=key), val) for key, val in widgfield_dict.items()], key=lambda tup: tup[0].i) | 7e7a735cb17638e969c2c0211407d24f2de33084 | 51,290 |
def batch_to_numpy_images_and_labels(data):
"""Returns a list of images and labels from a data batch
Args:
data (batch): A data batch with labels and images
Returns:
tuple: A tuple of lists of labels and images
"""
images, labels = data
numpy_images = images.numpy()
numpy_labels = labels.numpy()
return numpy_images, numpy_labels | fe58dd02dfc7128928042ec7e2be65db6c18ce85 | 51,291 |
import os
import logging
def convert_spectpath_to_audiofpath(audiofolder: str, specpath: str) -> str:
"""
Finds the path of the audio file that corresponds to the spectrogram
found at `specpath`.
"""
specfname = os.path.basename(specpath)
wavfname = os.path.splitext(specfname)[0] + ".wav"
wavfpath = os.path.join(audiofolder, wavfname)
if os.path.isfile(wavfpath):
return wavfpath
else:
# Try not removing the .png. Sometimes I am stupid.
tryagainfpath = os.path.join(audiofolder, specfname + ".wav")
logging.warn("Could not find {}. Trying {}.".format(wavfpath, tryagainfpath))
if not os.path.isfile(tryagainfpath):
raise FileNotFoundError("Could not find {} or {}.".format(wavfpath, tryagainfpath))
return tryagainfpath | 7553e813b6d378fd9ee64efa3c3e50b8e0933276 | 51,292 |
def get_byte_array(integer):
"""Return the variable length bytes corresponding to the given int"""
# Operate in big endian (unlike most of Telegram API) since:
# > "...pq is a representation of a natural number
# (in binary *big endian* format)..."
# > "...current value of dh_prime equals
# (in *big-endian* byte order)..."
# Reference: https://core.telegram.org/mtproto/auth_key
return int.to_bytes(
integer,
(integer.bit_length() + 8 - 1) // 8, # 8 bits per byte,
byteorder='big',
signed=False
) | 09f432308ca62ee05273dc278178fde27bc18c40 | 51,293 |
def neutronify(name):
"""Adjust the resource name for use with Neutron's API"""
return name.replace('_', '-') | 660b403cbdcf3dea4c16439668a24d04a22acbf5 | 51,294 |
def calc_current_density(Nt: float, height: float, thickness: float, i_ph: float):
"""
Calculates the average current density of a superconducting winding, if the filling factor considered constant.
:param Nt: number of active turns in the winding [#]
:param height: height of the winding [mm]
:param thickness: winding width [mm]
:param i_ph: phase_current [A]
:return:
"""
return Nt * i_ph / (height * thickness) | 7ffc63c894686cc2d59e8636da0bf7ccdecc4464 | 51,295 |
def pluralize(num, singular):
"""Return the proper plural version.
Examples:
>>> pluralize(2, "meme")
'2 memes'
>>> pluralize(1, "thing")
'1 thing'
>>> pluralize(1, "class")
'1 class'
>>> pluralize(0, "class")
'0 classes'
"""
if num == 1:
return f"{num} {singular}"
plural_form = singular + ("es" if (singular[-1] == "s") else "s")
return f"{num} {plural_form}" | 458f09e95a9dbb329e719c50fc5c72c27e952057 | 51,298 |
def rangeSplit(rangeStr):
"""Return an array of numbers from a specified set of ranges.
Given a string such as "1 2 4-6 8" will return [1,2,4,5,6,8]. The numbers
and ranges can either be space separated or comma separated (but not both).
Keyword arguments:
rangeStr -- a string containing ranges such as "1 2 4-6 8"
"""
result = []
splitChar = ' '
if ',' in rangeStr:
splitChar = ','
for part in rangeStr.split(splitChar):
if '-' in part:
a, b = part.split('-')
a, b = int(a), int(b)
result.extend(range(a, b + 1))
else:
a = int(part)
result.append(a)
return result | 60f8d2d7a051e6f7a1819b6de3da8f42a20179dc | 51,299 |
import torch
def compute_accuracy(y_pred, y_true):
"""
Computes the average accuracy given predictions and true labels
Parameters
----------
y_true : torch.Tensor
The list of true labels
y_pred : torch.Tensor
The list of predicted labels
"""
confidences, winners = torch.squeeze(y_pred).max(dim=1)
corrects = (winners == torch.squeeze(y_true))
accuracy = (corrects.sum().float() / float(y_true.size(0))).cpu().detach().numpy()
return confidences, winners, corrects, accuracy | 161952e666175ab6a8d8d2f9a262dc3e122ad5f4 | 51,300 |
def funnels_as_ini_section(funnels):
"""from a list of funnels, gives back a dict that can be exported to a
file that RTW can understand
"""
section_content = {}
for name, funnel in funnels.items():
section_content[name+ "Pos"] = round(funnel.position)
for name, funnel in funnels.items():
if funnel.oval:
section_content[name+"Oval"] = 1
else:
section_content[name+"Oval"] = 0
return section_content | cd9477ea6e8885c6b00e052932440deef6795b3c | 51,301 |
import os
def abspath(path: str) -> str:
"""Return os.path.abspath(path) if the path is on a local file system. If the
path is of form s3:..., return the path as is.
"""
if path.startswith('s3:'):
return path
return os.path.abspath(path) | 7458c48f69bc8c8f955c153ec2e624fd25261382 | 51,302 |
import pickle
def load_sample_illuminations():
"""Loads example illuminations sampled from the test set.
Returns:
Numpy arrays of azimuth_npy and lc_npy from the data directory.
"""
azimuth_npy = pickle.load(open("factorize_a_city/data/azimuth.npy", "rb"),
encoding="bytes")
lc_npy = pickle.load(
open("factorize_a_city/data/lighting_context.npy", "rb"),
encoding="bytes")
return azimuth_npy, lc_npy | 4fad1bd29e53e6246cd607486d9db48d834d8da2 | 51,303 |
def _filter_folders(folder_list, from_date_obj=None, to_date_obj=None,
min_price=0.0, max_price=None, transportation_mean=None,
min_segment_nb=1, max_segment_nb=None,
bicycle_without_reservation_only=None,
bicycle_with_reservation_only=None,
bicycle_with_or_without_reservation=None):
""" Filter a list of folders, based on different attributes, such as
from_date or min_price. Returns the filtered list """
filtered_folder_list = []
for folder in folder_list:
to_be_filtered = False
# Price
if folder.price < min_price:
to_be_filtered = True
if max_price is not None:
if folder.price > max_price:
to_be_filtered = True
# Date
if from_date_obj:
if folder.departure_date_obj < from_date_obj:
to_be_filtered = True
if to_date_obj:
if folder.departure_date_obj > to_date_obj:
to_be_filtered = True
for trip in folder.trips: # Check every trip
# Transportation mean
if transportation_mean:
for segment in trip.segments:
if segment.transportation_mean != transportation_mean:
to_be_filtered = True
break
# Number of segments
if min_segment_nb:
if len(trip.segments) < min_segment_nb:
to_be_filtered = True
if max_segment_nb:
if len(trip.segments) > max_segment_nb:
to_be_filtered = True
# Bicycle
# All segments of the trip must respect the bicycle conditions
if bicycle_with_reservation_only:
for segment in trip.segments:
if segment.bicycle_with_reservation != \
bicycle_with_reservation_only:
to_be_filtered = True
break
if bicycle_without_reservation_only:
for segment in trip.segments:
if segment.bicycle_without_reservation != \
bicycle_without_reservation_only:
to_be_filtered = True
break
if bicycle_with_or_without_reservation:
for segment in trip.segments:
condition = (segment.bicycle_with_reservation or
segment.bicycle_without_reservation)
if condition != bicycle_with_or_without_reservation:
to_be_filtered = True
break
# Add to list if it has not been filtered
if not to_be_filtered:
filtered_folder_list.append(folder)
return filtered_folder_list | 698be54fae0cc1cc783ca6acc2662e25eb0dd22d | 51,305 |
def overlap(v1, v2):
"""Determine whether affected positions of two variants overlap."""
v1_b = v1.pos + max(len(v1.ref), len(v1.alt))
v2_b = v2.pos + max(len(v2.ref), len(v2.alt))
return min(v1_b, v2_b) - max(v1.pos, v2.pos) > 0 | 79c4e4958d0293896cb00937a8119d2c2bb7e696 | 51,306 |
import ast
import os
import imp
def load_rules(rules, root='./'):
"""
Load custom post-processing rules.
Rules should be added to the configuration file under a property called
"rules", which has key-value pairs mapping a unique rule name to a Python
file. Each Python file intended to be used as a rules file should have a
run_rules() function which takes one argument.
Example config.json:
{ "rules": { "my_rules": "rules/my_rules.py" } }
Example rules file:
def run_rules(data):
pass
"""
if not rules:
return {}
loaded_rules = {}
for (rule, path) in ast.literal_eval(rules).iteritems():
module = None
if os.path.exists(path):
module = imp.load_source(rule, path)
elif os.path.exists(os.path.join(root, path)):
module = imp.load_source(rule, os.path.join(root, path))
assert module is not None
assert module.run_rules is not None
loaded_rules[rule] = module
return loaded_rules | 9fabc1ff2199bfa36464d44ddfcbccbfe37e28f3 | 51,307 |
def _get_plot_title(target_name: str, last_observation_date: str,
eval_dataset_creation_date: str,
forecast_horizon: int) -> str:
"""Gets the title of the plot."""
return (
f"Comparison of metrics for predicting {target_name}. Forecast date: "
f"{last_observation_date}, forecast horizon: {forecast_horizon} days, "
f"evaluation reporting date: {eval_dataset_creation_date}.") | bdd8a9a0d648342192200d7583cb4ed452ab989c | 51,310 |
from time import sleep
def fatorial(n, show=True):
"""
=> calcula o Fatorial de um número.
:param n: O número a ser calculado.
:param show: (opcional) mostra ou não a conta.
:return: O valor do fatorial de um número n.
"""
f = 1
print(f'\nO fatorial de {n} é: ')
sleep(0.5)
for i in range(n, 0, -1):
f *= i
if show:
print(i, end='')
if i > 1:
print(f' x ', end='')
elif 1 == 1:
print(f' = ', end='')
return f | 1e364b70c0c0ebf03012635c22ff1839beac137a | 51,311 |
def list_chunk(target, n):
"""
리스트를 n개로 분할해줍니다.
**Example**
simple_utils.array.list_chunk(your_list, 5)
**Parameters**
* **target** (list) --
분할할 타겟입니다.
* **n** (int) --
몇 개씩 분할할지 정합니다.
"""
return [target[i:i+n] for i in range(0, len(target), n)] | 77d9ec17cacb339a7899c05156670ad5164aa9f5 | 51,312 |
def box_kernel_config(im_shape, block=(2, 4, 32)):
"""
block = (z=2,y=4,x=32) was hand tested to be very fast
on the Quadro P2000, might not be the fastest config for other
cards
"""
grid = (
(im_shape[0] + block[0] - 1) // block[0],
(im_shape[1] + block[1] - 1) // block[1],
(im_shape[2] + block[2] - 1) // block[2],
)
return block, grid | e857371336e503d4a0f418075b81e21a07710a78 | 51,313 |
import os
import sys
def on_slurm_cluster():
""" On the cluster? """
return 'HPCCLUSTER_INSTITUTE' in os.environ and sys.platform.find('linux') != -1 | 3d2422db90e6c7cd0323c278cc6780139a56a589 | 51,314 |
import csv
def split_categories(cat, csv_path):
"""
access the sub-class of a categories and re-number the class ID.
:param cat:
:return:
"""
with open(csv_path, 'r') as file:
reader = csv.reader(file)
lines = [row for row in reader][1:]
isCat = []
for line in lines:
if line[5] == cat:
isCat.append(line)
table = {}
tag = 0
for line in isCat:
table[int(line[0])] = tag
tag += 1
return table, tag # tag is the length of the table. | 9db8d47f5c876156d13ec285c7c11542841b6b6c | 51,315 |
def _quote_if_str(val):
"""
Helper to quote a string.
"""
if isinstance(val, str):
return f"'{val}'"
return val | 5b216d94c1039c6a95947220badf414e2b6b3a93 | 51,316 |
def count_labels_distribution(data_loader):
""" Count dataset statistics """
bg_pix_count = 0
fg_pix_count = 0
for i, sample_batch in enumerate(data_loader):
labels = sample_batch['label'][:, :, :, :].numpy()
bg_pix_count += (labels == 0).sum()
fg_pix_count += (labels == 1).sum()
return bg_pix_count, fg_pix_count | daa65a759dca1c88b91bb58ff46b438bd6027142 | 51,317 |
import re
def extract_cast(soup):
"""
extracts the cast from the given page
:param soup: BeautifulSoup object of the page
:return: list of the cast
"""
info_row = soup.find(class_="cast_list").find("tr").find_next_sibling("tr")
cast = []
while info_row is not None:
actor = {}
actor_tag = info_row.find(attrs={"itemprop": "actor"})
if actor_tag is None:
break
actor["name"] = actor_tag.text.strip()
screen_name_tag = actor_tag.find_next_sibling(class_="character")
if screen_name_tag is None:
break
screen_name = screen_name_tag.text.strip()
re_match = re.search("^(.+?)(\((\w|\W)+?\))?$", screen_name)
if re_match is None or len(re_match.groups()) < 2:
actor["screen_name"] = screen_name
else:
actor["screen_name"] = re_match.group(1).strip()
cast.append(actor)
info_row = info_row.find_next_sibling("tr")
return cast | fdbde1167938dfe2f1fb61220dd1d70f4e869d54 | 51,318 |
from typing import List
def find_max_cross_subarray(sequence: List[int], low: int, mid: int, high: int):
"""Find max with a crossover point at mid"""
# NOTE: Observe the end points of the for loop, there is a chance to easily
# make off by one errors
# calculate the left sum
left_sum = float("-inf")
curr_sum = 0
max_left = None
# NOTE: here mid downto low is critical for cross sum to be valid
# because the contiguous array starts from mid down to low
for left_index in range(mid, low - 1, -1):
curr_sum += sequence[left_index]
if curr_sum > left_sum:
left_sum = curr_sum
max_left = left_index
# calculate the right sum
right_sum = float("-inf")
curr_sum = 0
max_right = None
# NOTE: mid to high is critical for cross sum to be valid
# because array goes from low -> mid -> high
for right_index in range(mid + 1, high + 1):
curr_sum += sequence[right_index]
if curr_sum > right_sum:
right_sum = curr_sum
max_right = right_index
return (max_left, max_right, left_sum + right_sum) | 5c37d441f2ef7ff3f5b814532e1a58985884b096 | 51,320 |
from typing import Counter
def count_entities(entities):
"""
Return entity cound
:param entities:
:return:
"""
return Counter(entities).most_common() | 3a5e520489c54fc216f7367157f6622eedeea803 | 51,321 |
from typing import TextIO
from typing import List
from typing import Any
def count_birds(observations_file: TextIO) -> List[List[Any]]:
"""Return a set of the bird species listed in observations_file,
which has one bird species per line.
>>> infile = StringIO('bird 1\\nbird 2\\nbird 1\\n')
>>> count_birds(infile)
[['bird 1', 2], ['bird 2', 1]]
"""
bird_counts = []
for line in observations_file:
bird = line.strip()
found = False
#Find bird in the list of bird counts.
for entry in bird_counts:
if entry[0] == bird:
entry[1] = entry[1] + 1
found = True
if not found:
bird_counts.append([bird, 1])
return bird_counts | 95fd1047d01a6cdc878de25b2a267745449b51a6 | 51,322 |
import os
def _sort_file_by_oldest(d):
"""
Get files and directories in specified path sorted by last modified time
:param d: directory path, ```str```
:return: a list of files and directories sorted by last modified time (old first), ```list```
"""
files = os.listdir(d)
files.sort(key=lambda f: os.path.getmtime(os.path.join(d, f)))
return files | 487e84cb49d5ea6ad039cc047ff0e5b93b9a47c5 | 51,323 |
def simpleInterest(p, r, t):
"""Simple interest
Returns: interest value
Input values:
See 'Simple interest future value' below
"""
i = p * r * t
return i | bda3e6fe4e922c531454843c6afbf177bfd788d6 | 51,324 |
import os
import re
def find_files(folder, regex, remove_empty = False):
"""
Find all files matching the [regex] pattern in [folder]
folder : string
folder to search (not recursive)
regex : string (NOT regex object)
pattern to match
"""
files = os.listdir(folder)
matches = [os.path.abspath(os.path.join(folder, f))
for f in files
if re.search(regex, f, re.IGNORECASE)]
if remove_empty:
matches = [f for f in matches if os.path.getsize(f) > 0]
matches.sort()
return matches | 896e72a64b484d8e2b454bdb758ca90d6902a72a | 51,325 |
from typing import Union
import os
def result_file(file_path: str) -> Union[str, None]:
"""
Return a file path if it and the file_path directory exist
:param file_path: (str) where should be the file (e.g. ./tmp/file_id)
:return: (str) the file path in the directory if it exists,
None otherwise
"""
if not os.path.isdir(file_path):
return None
else:
file_list = list()
for file in os.listdir(file_path):
file_list.append(file)
if not file_list or len(file_list) > 1:
# it should be just one file per file_id directory
return None
else:
return file_list[0] | 393e60ac595cdd74a488ae927eef0918f7a0f817 | 51,326 |
def take_out_npools_from_cmdline(cmdline):
"""
Wipe out any indication about npools from the cmdline settings
:param cmdline: list of strings with the cmdline options (as specified in
pw input settings)
:return : the new cmdline with the options about npools
"""
return [e for i,e in enumerate(cmdline)
if (e not in ('-npools','-npool','-nk')
and cmdline[i-1] not in ('-npools','-npool','-nk'))] | 6ae18ffb8092cfc9aca8d1016b28faa3c579769d | 51,327 |
def actors(hg):
""""Returns an iterator over all actors."""
return [edge[1] for edge in hg.search('(actor/P/. *)')] | 78e66fb98613e35a9892957951819d6759a61236 | 51,328 |
import json
def get_message_content_sents(parsedData):
"""
:param parsedData: data after being parsed by the en parser
:return: sentence recognition
"""
sents = {}
# the "sents" property returns spans
# spans have indices into the original string
# where each index value represents a token
for i, span in enumerate(parsedData.sents):
# for span in parsedData.sents:
# go from the start to the end of each span, returning each token in the sentence
# combine each token using join()
sent = ''.join(parsedData[i].string for i in range(span.start, span.end)).strip()
sents[i] = sent
json_data = json.dumps(sents)
return json_data | 35c8e0c68f30f25883f77076b91f80a578c75e7e | 51,329 |
import torch
def spknn(x, y, k):
""" knn serach
Arguments:
pos_support - [B,N,3] support points
pos - [B,M,3] centre of queries
k - number of neighboors, needs to be > N
Returns:
idx - [B,M,k]
dist2 - [B,M,k] squared distances
"""
B = x.size(0)
m, n = x.size(1), y.size(1)
# xx经过pow()方法对每单个数据进行二次方操作后,在axis=1 方向(横向,就是第一列向最后一列的方向)加和,此时xx的shape为(m, 1),经过expand()方法,扩展n-1次,此时xx的shape为(m, n)
xx = torch.pow(x, 2).sum(2, keepdim=True).expand(B, m, n)
# yy会在最后进行转置的操作
yy = torch.pow(y, 2).sum(2, keepdim=True).expand(B, n, m).transpose(1, 2)
dist = xx + yy
# torch.addmm(beta=1, input, alpha=1, mat1, mat2, out=None),这行表示的意思是dist - 2 * x * yT
# dist.addmm_(1, -2, x, y.transpose(1, 2))
distances = dist - 2 * torch.matmul(x, y.transpose(1, 2))
# clamp()函数可以限定dist内元素的最大最小范围,dist最后开方,得到样本之间的距离矩阵
dist = distances.topk(k=k, dim=-1)[0]
idx = distances.topk(k=k, dim=-1)[1]
return idx | a334bb30e91139d57301be650fb6924bb435b7b8 | 51,330 |
def cvsecs(*args):
""" converts a time to second. Either cvsecs(min, secs) or
cvsecs(hours, mins, secs).
"""
if len(args) == 1:
return float(args[0])
elif len(args) == 2:
return 60 * float(args[0]) + float(args[1])
elif len(args) == 3:
return 3600 * float(args[0]) + 60 * float(args[1]) + float(args[2]) | a4f39701a1b486e5df39555cf174d268f40be4a0 | 51,331 |
def is_small_calcification(bb_width, bb_height, min_diam):
""" remove blobs with greatest dimension smaller than `min_diam`
"""
if max(bb_width, bb_height) < min_diam:
return True
return False | 823af619ce76b08e0b4c36f2a2244dea3db813a0 | 51,333 |
def strings_to_services(strings, string_to_service):
"""Convert service strings to SUPPORT_* service bitmask."""
services = 0
for string in strings:
services |= string_to_service[string]
return services | e863ed240980dd5ed60e665c73a0d999681d0bde | 51,335 |
def _pkcs1_padding(m, size):
"""Add PKCS padding to the message 'm', so that it's appropriate for
use with a public key of 'size' bytes."""
# I'd rather use OAEP+, but apparently PyCrypto barely supports
# signature verification, and doesn't seem to support signature
# verification with nondeterministic padding. "argh."
s = [ "\x00\x01", "\xff"* (size-3-len(m)), "\x00", m ]
r = "".join(s)
return r | 430a5220b046034ac4caa79de7508c7613028d0b | 51,336 |
def build_interval_data(chanjo_db, sample_id, group_id, interval_data):
"""
.. versionchanged:: 2.1.2
Create model without adding it to the session.
"""
# create a new intervals data entry
return chanjo_db.create(
'interval_data',
parent_id=interval_data[3],
sample_id=sample_id,
group_id=group_id,
coverage=float(interval_data[-2]), # second to last field
completeness=float(interval_data[-1]) # last field
) | 844a1220258431573858d764a2a817d8dd0dfdf3 | 51,337 |
from typing import List
def process_covid_csv_data(covid_csv_data: List[str]):
"""Function that returns 3 variables from the csv file
variables returned:
- last7days_cases - cumulative number of cases from the last 7 days
- current_hospital_cases - number of current hospital covid-19 cases
- total_deaths - cumulative number of deaths"""
covid_csv_data_dict = []
for line in covid_csv_data[1:]:
covid_csv_data_dict.append(line.rstrip().split(","))
last7days_cases = 0
day1 = covid_csv_data_dict[0]
day14 = covid_csv_data_dict[13]
current_hospital_cases = day1[5]
total_deaths = day14[4]
for i in range(2, 9):
day = covid_csv_data_dict[i]
last7days_cases += int(day[6])
return last7days_cases, current_hospital_cases, total_deaths | ef50908b60e73d8f26ce605f5ea7da5389c883fc | 51,339 |
import re
def _verify_format(s, format):
"""
Make sure the input is in the right format
"""
r = re.compile(format)
if r.match(s) is not None:
return True
return False | 0078a16e053d68afb5d170bbe808763361bcaeab | 51,340 |
def hasSheBang(fname):
"""Checks fname is a #! script."""
with open(fname) as f:
return f.read(2).startswith('#!') | b5d04b3a28fd09b42e59b862a51afdbbe2e35ae9 | 51,343 |
def quote_escape(value, lf='&mjf-lf;', quot='&mjf-quot;'):
"""
Escape a string so that it can safely be quoted. You should use this if the
value to be quoted *may* contain line-feeds or both single quotes and double
quotes.
If the value contains ``\n`` then it will be escaped using ``lf``. By
default this is ``&mjf-lf;``.
If the value contains single quotes *and* double quotes, then all double
quotes will be escaped using ``quot``. By default this is ``&mjf-quot;``.
>>> quote_escape('hello')
'hello'
>>> quote_escape('hello\\n')
'hello&mjf-lf;'
>>> quote_escape('hello"')
'hello"'
>>> quote_escape('hello"\\'')
"hello&mjf-quot;'"
>>> quote_escape('hello"\\'\\n', '&fish;', '&wobble;')
"hello&wobble;'&fish;"
"""
if '\n' in value:
value = value.replace('\n', lf)
if '\'' in value and '\"' in value:
value = value.replace('"', quot)
return value | b9ad94a91e9fbecb9cef7c2f7fba362f09be25a7 | 51,345 |
def load_noise(front_path, end_path):
"""
Load the front and end noise
:param front_path:
:param end_path:
:return: List: front_list, end_list
"""
front_list = []
end_list = []
with open(front_path, 'r', encoding='utf-8') as front_f:
while True:
line = front_f.readline()
if not line:
print('Front noise phrase load finished!')
break
front_list.append(line.replace('\n', ''))
with open(end_path, 'r', encoding='utf-8') as end_f:
while True:
line = end_f.readline()
if not line:
print('End noise phrase load finished!')
return front_list, end_list
end_list.append(line.replace('\n', '')) | e1388935afc8437c0e773591d57a01fc79a1560f | 51,346 |
def str_to_array_of_int(value):
""" Convert a string representing an array of int into a real array of int """
return [int(v) for v in value.split(",")] | 244d8250e42665fd782b7dfbc52d1f5938d1d3d8 | 51,347 |
def is_json_response(req):
"""Returns True when the request wants a JSON response, False otherwise"""
return "Accept" in req.headers and "application/json" in req.accept | d566e03c84ebb6a26254fa14dbd794539caf5728 | 51,348 |
def convert_pt_to_in(pts: int) -> float:
"""Converts a length in pts to a length in inches.
Parameters
----------
pts : int
A length in pts.
Returns
-------
float
A length in inches.
References
----------
- https://www.overleaf.com/learn/latex/Lengths_in_LaTeX
"""
return 12.0 * 249.0 / 250.0 / 864.0 * pts | 396438bd677cf267c3b9a83fab1198f3ab0808a5 | 51,349 |
def run(x_train, y_train, x_test, y_test, clf, epochs):
"""Train and test"""
test_err = []
clf.max_iter = 1
for i in range(epochs):
clf.fit(x_train, y_train)
terr = 1.0 - clf.score(x_test, y_test)
clf.warm_start = True
test_err.append(terr)
return test_err | 5b3616a4de8678464cb520688279f2184af32515 | 51,351 |
def mean(r):
"""Return the mean (i.e., average) of a sequence of numbers.
>>> mean([5, 10])
7.5
"""
try:
return float(sum(r)) / len(r)
except ZeroDivisionError:
raise ValueError("can't calculate mean of empty collection") | 08e6cf79231444735e5ba9d56765b96d6a3cfce0 | 51,352 |
def calc_log_probs(exp, result, batch):
"""
Calculates log_probs of batch
"""
mods = exp.modalities
log_probs = {}
weighted_log_prob = 0.0
for m_key in mods:
mod = mods[m_key]
ba = batch[0][mod.name]
log_probs[mod.name] = -mod.calc_log_prob(out_dist=result['rec'][mod.name], target=ba,
norm_value=exp.flags.batch_size)
weighted_log_prob += exp.rec_weights[mod.name] * log_probs[mod.name]
return log_probs, weighted_log_prob | 510e9c0210efde5507453e436dab72795feae36a | 51,353 |
def split_list(input_list):
"""Split input_list into three sub-lists.
This function splits the input_list into three, one list containing the
inital non-empty items, one list containing items appearing after the
string 'Success' in input_list; and the other list containing items
appearing after the string 'Failure' in input_list.
"""
initial_flag = 1
success_flag = 0
failure_flag = 0
initial_list = []
success_list = []
failure_list = []
for c in input_list:
if c == 'Success:':
success_flag = 1
failure_flag = 0
elif c == 'Failure:':
failure_flag = 1
success_flag = 0
elif c != '' and success_flag:
success_list.append(c)
elif c != '' and failure_flag:
failure_list.append(c)
elif c != '' and initial_flag:
initial_list.append(c)
return initial_list, success_list, failure_list | 3c4d73b824b56e1f74cf1f5eeea7a86d829f2aed | 51,355 |
def add42(self, a_val):
""" Add a class constant to a variable. """
return a_val + self.A42 | 47f6ddd9b3e6c19a45787e7ec311e3fc2004c662 | 51,356 |
def synchronize_iterables(iterables):
"""Synchronize the given iterables in item-wise order.
Return: the {field: value} dictionary list
Examples
--------
>>> from nipype.pipeline.engine.utils import synchronize_iterables
>>> iterables = dict(a=lambda: [1, 2], b=lambda: [3, 4])
>>> synced = synchronize_iterables(iterables)
>>> synced == [{'a': 1, 'b': 3}, {'a': 2, 'b': 4}]
True
>>> iterables = dict(a=lambda: [1, 2], b=lambda: [3], c=lambda: [4, 5, 6])
>>> synced = synchronize_iterables(iterables)
>>> synced == [{'a': 1, 'b': 3, 'c': 4}, {'a': 2, 'c': 5}, {'c': 6}]
True
"""
out_list = []
iterable_items = [(field, iter(fvals()))
for field, fvals in sorted(iterables.items())]
while True:
cur_dict = {}
for field, iter_values in iterable_items:
try:
cur_dict[field] = next(iter_values)
except StopIteration:
pass
if cur_dict:
out_list.append(cur_dict)
else:
break
return out_list | 4573e9c6a5a2bc04447b54e088ae68650b5f832e | 51,357 |
def clean_iso_timestamps(tl: dict) -> dict:
"""
Timestamps can be in any format, and our processing handles it okay
However, for testing purposes, we want the output format to line up,
and unfortunately there is some ambiguity in ISO timestamp formats.
"""
tl["timestamp"] = tl["timestamp"].replace("Z", "+00:00")
return tl | e5747e28176cfe69e0d370a2c189fa89ca57125a | 51,358 |
def get_config(configs, requisition):
"""Get the associated config."""
for config in configs:
ref = "{}-{}".format(config["enduser_id"], config["aspsp_id"])
if requisition["reference"] == ref:
return config | 7f1a3ceaa845954984ef9f3c17c843097f82ad06 | 51,359 |
def zipstar(L, lazy=False):
"""
A much faster A, B, C = zip(*[(a, b, c), (a, b, c), ...])
May return lists or tuples.
"""
if len(L) == 0:
return L
width = len(L[0])
if width < 100:
return [[elem[idx] for elem in L] for idx in range(width)]
L = zip(*L)
return L if lazy else list(L) | dc458ca547b5bdf7fccf9137f158f3eb67474ff7 | 51,360 |
import os
import re
def guess_source(filename_or_string):
"""Returns whether the source type of the content of the file or string is
xml, ttk or text. This is a guess because the heuristics used are simple and
are just searching the first 1000 characters of the input."""
# this needs to be large enough so that you are very likely to at least
# capture the first tag
chars_to_read = 1000
content = filename_or_string[:chars_to_read]
if os.path.exists(filename_or_string):
# using codecs.open gives unicode error
fh = open(filename_or_string)
content = fh.read(chars_to_read)
fh.close()
content = content.strip()
tag_expression = '<([^> ]+)[^>]*>'
result = re.search(tag_expression, content)
if result is None:
return 'text'
else:
tag = result.group(1)
return 'ttk' if tag.lower() == 'ttk' else 'xml' | b7ec72822c5fab002e18d3ead04051bcdde155c3 | 51,361 |
import zlib
def decompress_and_truncate(zipped, truncate=True, max_length=1000):
"""
Decompress a string zipped by zlib and truncate it
:param zipped: Zipped string
:param truncate: If it result be truncated
:param max_length: Specified length
:return:
"""
res = zlib.decompress(zipped).decode("utf-8") if zipped else ""
if truncate and len(res) > max_length:
res = res[: max_length] + "..."
return res | 63eaa83381e851721dde8a4726b93b59ce4a4317 | 51,362 |
def BuildAdGroupAdOperations(adgroup_operations):
"""Builds the operations adding a TextAd to each AdGroup.
Args:
adgroup_operations: a list containing the operations that will add AdGroups.
Returns:
a list containing the operations that will create a new TextAd for each of
the provided AdGroups.
"""
adgroup_ad_operations = [
{
# The xsi_type of the operation can usually be guessed by the API
# because a given service only handles one type of operation.
# However, batch jobs process operations of different types, so
# the xsi_type must always be explicitly defined for these
# operations.
'xsi_type': 'AdGroupAdOperation',
'operand': {
'adGroupId': adgroup_operation['operand']['id'],
'ad': {
'xsi_type': 'TextAd',
'headline': 'Luxury Cruise to Mars',
'description1': 'Visit the Red Planet in style.',
'description2': 'Low-gravity fun for everyone!',
'displayUrl': 'www.example.com',
'finalUrls': ['http://www.example.com/1']
}
},
'operator': 'ADD'
}
for adgroup_operation in adgroup_operations]
return adgroup_ad_operations | 5786705380d9bd870c6ff9f391b0152de0241f75 | 51,363 |
def remove_values(the_list, val):
"""
Remove all items with value `val` from `the_list`
"""
return [value for value in the_list if value != val] | 071f7a2adf88540187b1eea6f1465ea6025bc2d7 | 51,364 |
def is_valid(x, size):
""" 実行可能解であることを確認する """
return all(-1 < i < size for i in list(x)) | 1deda8370707eb586053e6e897f5fbdcfc0976ee | 51,365 |
def flatten(list_of_lists):
"""Return a list-of-lists into a single list with only items in it."""
return [item for inner_list in list_of_lists for item in inner_list] | 2ec2ccca79377b9f99417e630a9b7f0ab513c3d4 | 51,366 |
def get_state_dict(model):
"""Return model state dictionary whether or not distributed training was used"""
if hasattr(model, "module"):
return model.module.state_dict()
return model.state_dict() | fe1f22c63dac3b7d96b0f0485c9a010e98bae2f5 | 51,367 |
import os
def IsWindows():
"""Checks whether or not the script is running on Windows.
Returns:
True if running on Windows.
"""
return os.name == 'nt' | 65b76b5bb5b94c7a6294a5099a3be5f77bb4961b | 51,370 |
def permutation(l):
"""
Code plagiarized from StackOverflow. With a given list of values,
this function changes the list in situ to the next permutation.
This function differs from itertools.permutations in that it takes
into account repeated values in the list and avoids returning duplicates.
Args: "l" is the list of elements we wish to permute
"o" is the permutation order
Returns: a list
"""
n = len(l)
# Step 1: Find tail
last = n - 1 # tail is from `last` to end
while last > 0:
if l[last - 1] < l[last]:
break
last -= 1
# Step 2: Increase the number just before tail
if last > 0:
small = l[last - 1]
big = n - 1
while l[big] <= small:
big -= 1
l[last - 1], l[big] = l[big], small
# Step 3: Reverse tail
i = last
j = n - 1
while i < j:
l[i], l[j] = l[j], l[i]
i += 1
j -= 1
return l | bc8f987696021fd31eb6232b602e6932f28425e7 | 51,371 |
def get_total_expenses(context):
"""
Gets the total of all expenses
returns an integer
"""
return sum(map(lambda x: x.amount, context['Expenses'])) | 59b91c4b2ad8abfe4719e55b31aeab71d5b18db9 | 51,372 |
import sys
def builtins_module():
"""
Python3 renames the __builtin__ module to builtins.
This function returns whichever is correct for the python version.
"""
if sys.version_info >= (3, 0):
return "builtins.open"
else:
return "__builtin__.open" | 5256f599feb815e209c49244709e14c61c1c446c | 51,374 |
def printed(o, **kwargs):
"""Print an object and return it"""
return print(o, **kwargs) or o | a40f40541999ebb8bd9b9e61b6d2dbd7b77a6722 | 51,376 |
def ele_C(w, C):
"""
:param
w: Angular frequency [1/s], (s:second)
C: F
:return:
"""
return 1 / (1j * w * C) | 5c2c8126ff54a2ab359d80b1a92315d69f7acf1b | 51,378 |
def onCircle_elements(nelx,nely):
"""
return a list of the element centroid located on the border
of a circle of radius nelx
"""
list = []
for x in range(nelx):
for y in range(nely):
if abs(((x-(nelx-1)/2.0)**2+(y-(nely-1)/2.0)**2)**.5 - (nelx-1)/2.0) < .5 :
list.append((x,y))
return list | bdaf5a5420a8811d00b73099c15f9719f9795a60 | 51,379 |
from pathlib import Path
def museum_packages_dir(tmpdir, monkeypatch):
"""
Fixture pointing to a directory containing museum packages
"""
path = Path(tmpdir) / "MuseumPackages"
path.mkdir(exist_ok=True)
return path | 0625b8dbb41ec320772d359d47d3b572ad2d7c36 | 51,381 |
import psycopg2
from psycopg2 import sql
import os
def test_data_db(image_name, structure, database_name):
"""Inputs: string describing the name of an image and a string describing a subcellular structure in that image
and a connection object
Tests if any object data for a given structure and image has been inserted into the database
Returns 'True' if there is 1 or more objects for that structure are in the db
Returns 'False' if there are no objects for that structure are in the db
"""
conn = psycopg2.connect('postgresql://'+os.environ['POSTGRES_USER']+':'+os.environ['POSTGRES_PASSWORD']+'@'+"db"+':'+'5432'+'/'+database_name)
cur = conn.cursor()
query = sql.SQL("SELECT * FROM {table} where name = %s").format(
table=sql.Identifier(structure))
cur.execute(query, (image_name,))
processed_bool = bool(cur.fetchone())
cur.close()
conn.close()
return processed_bool | 09539121b7b9077480ff40917e1b197443f17246 | 51,382 |
def is_bool(value):
""" Checks if the value is a bool """
return value.lower() in ['true', 'false', 'yes', 'no', 'on', 'off'] | c71cac1dbf2ffacc2da420906bc7588bd4f416ec | 51,384 |
import click
def modify_pileups(ctx, normalize, startpos, endpos, no_coverage, pileups):
"""
modify_pileups - Performs normalization, truncation and/or selecting the
range of the pileup, if these options are enabled.
INPUT:
[CONTEXT] [ctx]
[BOOL] [normalize/dont_normalize]
[INT] [startpos]
[INT] [endpos]
[STRING] [no_coverage] [truncate/remove_no_coverage/keep_no_coverage]
Options to truncate low-coverage regions on the ends of the pileup,
ignore all low coverage regions, or keep all low coverage regions
[PILEUP_LIST] [pileups]
The pileups to be modified
RETURN:
[2D ARRAY] [pileups]
The modified pileups, returned as a two-dimensional array.
POST:
The calling function, dist, will use the 2D array that is returned.
"""
startpos = int(startpos)
endpos = int(endpos)
pileups.select_pileup_range(startpos, endpos)
# converting startpos and endpos back to one-based indexing for click.echo
click.echo(("The pileup covers %s positions after selecting " +
"range between original pileup positions %d and %d.")
% (pileups.get_pileup_length(), startpos + 1, endpos + 1))
if normalize:
pileups.normalize_pileups()
old_length = pileups.get_pileup_length()
if no_coverage != 'keep_no_coverage':
if no_coverage == 'truncate':
pileups.truncate_output()
click.echo("Truncating positions with no coverage that " +
"are contiguous with the start or end " +
"position of the pileup only.")
elif no_coverage == 'remove_no_coverage':
pileups.remove_no_coverage()
click.echo("Truncating all positions with no coverage.")
click.echo("%d positions were truncated on the left." %
pileups.get_num_left_positions_truncated())
click.echo("%d positions were truncated on the right." %
pileups.get_num_right_positions_truncated())
click.echo("%d positions were removed in total from the pileup." %
(old_length - pileups.get_pileup_length()))
# end if
return pileups.get_pileups_as_numerical_array() | e6e7674e9d59d080de60a6a808416cb7b6828c19 | 51,385 |
def get_slope(x,t):
"""This function calculates the slope of a peak from exceeding the threshold to the maximum.
Args:
x (list): x Values from which the slope is to be determined
t (list): time section from which the slope is to be determined
Returns:
slope (float): slope of the section
"""
end = 0
flag = False
for i in range(len(x)-1):
if flag == False:
if x[i+1] > x[i]:
pass
else:
end = i
flag = True
slope = (x[end]-x[0])/(t[end]-t[0])
return slope | 5c8b7347d5b3abeb633fd6990afc68a288c0f733 | 51,386 |
import os
import time
def get_file_age(filename):
"""
Returns age of file in seconds
:param str filename: file to inspect
.. versionadded:: 1.0.0
"""
mtime = os.path.getmtime(filename)
now = time.time()
return int(now - mtime) | e8ab862ebe28c00f24801fe17ae3fc30be0af479 | 51,387 |
def _unmangle_name(mangled_name, class_name):
"""Transform *mangled_name* (which is assumed to be a
"_ClassName__internal" name) into an "__internal" name.
:arg str mangled_name:
a mangled "_ClassName__internal" member name
:arg str class_name:
name of the class where the (unmangled) name is defined
:return:
the transformed "__internal" name
:rtype:
str
"""
return mangled_name.replace("_%s" % class_name.lstrip('_'), "") | 681e7b0e05b2feda22764149feafd8da89609ebc | 51,388 |
import math
def get_page_numbers(resources, page):
"""
Get the pagination information for the request.
Parameters
----------
resources: array
List of top level resources
page: int
Page number passed through the query parameters
Returns
-------
A dictionary of page numbers
"""
total_page = 1
if len(resources) > 10:
total_page = math.floor(len(resources)/10)
# If we are on the first page
if page == 1:
previous_page = None
if total_page == 1:
next_page = None
else:
next_page = page + 1
# If we are on the last page
elif page == total_page:
previous_page = page - 1
next_page = None
# If more pages exist
elif page < total_page:
previous_page = page - 1
next_page = page + 1
# If we are past total pages
else: # page > total_page
previous_page = total_page
next_page = None
pages = {
"first_page": '1',
"previous_page": previous_page,
"next_page": next_page,
"last_page": total_page,
"total_pages": total_page,
"per_page": 10
}
return pages | 4880d846374af9b915bb3cff69adaa4fafde18ce | 51,389 |
def name(name):
"""Args, positional, are parsed in the order they're added in"""
return name | 442feb1ecfd083dc6880855e05eaec2f4e460722 | 51,391 |
import sys
def _GetCommandStdout(proc):
"""Extract the stored standard error, print it and return it.
"""
out = proc.stdout.read()
sys.stdout.write(out)
return out | 2dc136ede014276ace2128b757b4ae0b6911c05d | 51,392 |
def find_largest_digit(n):
"""
:param n: int, a number that we want to get the largest digit of.
:return: int, the largest digit in n.
"""
# convert negative integer to positive.
if n < 0:
n = n*-1
# base case
# return will n is a single digit number.
if n < 10:
return n
# recursive case
else:
k = n % 10 # k mod 10 can get the last digit.
n = (n-k)//10 # n minus last digit and floor divided by 10 will get rid of the last digit.
if k > find_largest_digit(n): # send new n to next layer. Comparing each layers k and return the bigger one.
return k
else:
return find_largest_digit(n) | 1e14a231a60b87d03eb6b7e38f8b65c6c9442b6b | 51,394 |
def storage_management_feature(cpc_or_partition):
"""
Return a boolean indicating whether the specified CPC, or the CPC of the
specified partition has the DPM storage management feature enabled.
On z13 and earlier, the storage managemt feature is always disabled.
On z14 and later, the storage managemt feature is always enabled.
Nevertheless, this function performs the proper lookup of the feature.
"""
features = cpc_or_partition.prop('available-features-list', [])
for f in features:
if f['name'] == 'dpm-storage-management':
return f['state']
return False | 559a7646d2304ff915366751c52ba9c8b636b012 | 51,395 |
import json
def to_json(data):
"""Converts Python data to JSON.
"""
return json.dumps(data).encode('utf8') | 512ba5c2df08029a735b6046ebc977210117dbdc | 51,396 |
def state_derivatives(states):
"""Returns functions of time which represent the time derivatives of the
states."""
return [state.diff() for state in states] | c8daf98027e9949be2a12db7acb799ed6c9845c0 | 51,397 |
from typing import Callable
from typing import Iterable
def sort_by(key: Callable):
"""Return a new list containing all items from the iterable in ascending order, sorted by a key.
>>> sort_by(len)(["hi!", "my", "name", "is"])
['my', 'is', 'hi!', 'name']
"""
def sort_by(seq: Iterable):
return sorted(seq, key=key)
return sort_by | 9e37364aa604c170c599286829c60b2f8a7f2ae4 | 51,398 |
def IntValidate(text):
"""
vérifie si l'entrée est une string convertible en integer ou vide
"""
if text == "":
return True
try:
int(text)+1
except Exception as e:
# print(e)
return False
return True | a40dff48f2e2b4110a27e8ef3ad32b791e1bbe69 | 51,399 |
def obj_ratio(dets):
"""Calculate the ratio of one ojbect detected versus anything not one.
Parameters
----------
dets : list of lists
The list of how many ojbects were detected.
Returns
-------
ratio : float
The ratio of when one object was detected versus something else.
"""
ones = 0
twos = 0
for i in range(len(dets)):
chunks = dets[i]
ones += chunks.count(1)
twos += chunks.count(2)
ratio = twos/(twos + ones)
return ratio | f872c6265a0bf7f43007b1cc687fa2c32eaf64b6 | 51,401 |
def timeformatter(x, pos):
"""The two args are the value and tick position"""
return '{}:{}:{:02d}'.format(int(x/3600), int(x/24/60), int(x%60)) | 281edb1ff155597925d6742d06d42d579a6059e2 | 51,402 |
def human_bytes(num, suffix='B', use_binary_prefix=True):
"""Provide a human readable representation of a specified number of bytes.
Convert a number of bytes into a higher order representation such as megabytes
or gigabytes to make more human readable. Similar to specifying `-h` in many command
line tools.
Args:
num (int): The number of bytes you wish to represent.
suffix (str, optional): The suffix for the representation. Defaults to 'B'
use_binary_prefix (bool, optional): Use binary prefix, Defaults to True, if False use decimal prefix.
https://en.wikipedia.org/wiki/Binary_prefix
Returns:
str: The human representation of the bytes provided.
Examples:
>>> print(human_bytes(1024))
1.0KiB
"""
if use_binary_prefix:
multiplyer = 1024.0
units = ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi', 'Yi']
else:
multiplyer = 1000.0
units = ['', 'K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y']
for unit in units:
if abs(num) < multiplyer:
return "%3.1f%s%s" % (num, unit, suffix)
num /= multiplyer
return "%.1f%s%s" % (num*multiplyer, units[-1], suffix) | 4bb21c33fb8cadfde03e0d084b3ed61ea6414970 | 51,403 |
def read_binary_metadata(metadata_file):
""" Read the metadata of a binary file.
Arguments:
metadata_file: str
Metadata filename.
Returns:
metadata: dict
Metadata.
"""
fid = open(metadata_file, 'r')
trans_tab = str.maketrans(dict.fromkeys('\n{}'))
metadata = dict()
while 1:
line = fid.readline()
if '=' in line:
key, value = line.split('=', 1)
if ('{' in value) and ('}' not in value):
while '}' not in line:
line = fid.readline()
if line.strip()[0] == ';':
continue
value += line
key = key.strip()
if ('{' in value) and ('}' in value):
value = value.translate(trans_tab).strip()
value = list(map(str.strip, value.split(',')))
else:
value = value.translate(trans_tab).strip()
metadata[key] = value
if line == '':
break
fid.close()
return metadata | 074ce259221efb906a305119a8a1eeef744c2031 | 51,404 |
def color_tuple_to_hsl(three_tuple):
"""
Converts a (h, s, l) to a valid CSS value.
h: hue
s: saturation (in %)
l: lightness (in %)
"""
return 'hsl({}, {}%, {}%)'.format(*three_tuple) | 5ea546bef92fe2dfa40b5440e8d0004330ee5299 | 51,405 |
def process_group(grp):
"""
Given a list of list of ints,
where two ints share a directed edge
u-v with v > u if v = u + i for some
i in (1, 2, 3), compute the total number
of branches (or equivalently leaves) in
this directed tree.
:param grp: The list of list of ints.
:return: The count of the number of leaves.
"""
st = list(sorted(map(int, grp)))
st = [0] + st + [max(st) + 3]
exists = set(st)
def count_leaves(memo, curr_val):
"""
Given a tree structure with root 0
count the number of leaves present in it.
Notes
_____
Recursive Step:
Given a curr_val, we store in memo[curr_val]:
'The number of leaves in the subtree rooted at curr_val.'
"""
if curr_val == st[-1]:
# Reached a leaf.
# Leaves have exactly one leaf in the subtree
# rooted at them.
memo[curr_val] = 1
return 1
elif curr_val in memo:
# If memoized, don't recompute, save time.
return memo[curr_val]
else:
# Subdivide the problem amongst
# the current nodes children.
for i in range(1, 4):
if curr_val + i in exists:
count_leaves(memo, curr_val + i)
# Assume it is solved for children.
# Then how to use children's solution
# to produce current node's?
# The number of leaves in the subtree rooted
# at curr_val is:
# The sum of the number of leaves in the
# subtrees rooted at its children.
memo[curr_val] = 0
for i in range(1, 4):
if curr_val + i in memo:
memo[curr_val] += memo[curr_val + i]
# Populate memo[curr_val] with the result.
# and trace back to the next node.
return memo[curr_val]
mm = dict()
count_leaves(mm, 0)
return mm[0] | 30192a6773a6d008e3c6b7f47b136ce80680df24 | 51,406 |
def fmt_filename(text):
"""File name formatter.
Remove all file system forbidden char from text.
**中文文档**
移除文件系统中不允许的字符。
"""
forbidden_char = ["\\", "/", ":", "*", "?", "|", "<", ">", '"']
for char in forbidden_char:
text = text.replace(char, "")
return text | 5326a74dfd1887f3d17141824b96e5b2459eea53 | 51,407 |
import requests
def lint_file(file):
"""Lints GitLab CI file. Returns True on success"""
with open(file) as f:
r = requests.post(
"https://gitlab.com/api/v4/ci/lint",
json={"content": f.read()},
verify=False,
)
if r.status_code != requests.codes["OK"]:
print("POST returned status code %d" % r.status_code)
return False
data = r.json()
if data["status"] != "valid":
print("File %s returned the following errors:" % file)
for error in data["errors"]:
print(error)
return False
return True | 08c5fb309d2ac4608da388d85392efd5bcd96a60 | 51,409 |
import os
import json
def load_recipes_from_dir(directory):
"""Loads all recipe JSON files from a given directory."""
recipes = []
for root, _, files in os.walk(directory):
for file in files:
if file.endswith(".json"):
with open(os.path.join(root, file)) as jsonf:
recipe_json = json.load(jsonf)
# Skip JSON files that are likely not recipes.
if 'name' in recipe_json and 'modules' in recipe_json:
recipes.append(recipe_json)
return recipes | 3b8c9044aeb145a6528957eedf3d05544be0bb27 | 51,410 |
def escape(x):
"""
Shell escape the given string
Implementation borrowed from now-deprecated commands.mkarg() in the stdlib
"""
if '\'' not in x:
return '\'' + x + '\''
s = '"'
for c in x:
if c in '\\$"`':
s = s + '\\'
s = s + c
s = s + '"'
return s | f4943943be451fd71189f457d2ab06e4ae7918e1 | 51,411 |
from typing import Dict
from typing import Optional
from typing import Any
def nested_get(dictionary: Dict, *keys: str) -> Optional[Any]:
"""Get nested object from dict."""
if len(keys) > 1:
return nested_get(dictionary.get(keys[0], {}), *keys[1:])
elif len(keys) == 1:
return dictionary.get(keys[0])
else:
raise TypeError("nested_get expected at least 1 key, got 0") | 2a4f441a27ae24cf1fddb304144d2bb0ed433cc5 | 51,413 |
def div(num1, num2):
"""
Divides two numbers
>>> div(4,2)
2.0
Raises zero division error
>>> div(4,0)
Traceback (most recent call last):
...
ZeroDivisionError: division by zero
"""
return num1 / num2 | b575f4addbbc5319a56b9aa5c5e555a40eab9930 | 51,415 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.