content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
import re
def read_data_names(filepath: str):
"""
function to read class names & attributes
:param filepath: the relative path to the file containing the specifications of the attribute_values
:return: a tuple of lists
classes: is a one-dimensional list containing the class names
attributes: is a one-dimensional list containing the attribute names
attribute_values: is a two-dimensional list where each row respresents
one attribute(in the order of 'attributes') and the possible values
"""
with open(filepath, "r") as f:
lines = f.read().splitlines()
classes = re.sub(r'^' + re.escape("class values: "), '', lines.pop(0))
classes = classes.split(", ")
attributes = re.sub(r'^' + re.escape("attributes: "), '', lines.pop(0))
attributes = attributes.split(", ")
attribute_values = []
for i in range(0, len(attributes)):
values = re.sub(r'^' + re.escape(attributes[i] + ": "), '', lines.pop(0))
attribute_values.append(values.split(", "))
return classes, attributes, attribute_values
|
1111bd25970e5d41c596eaefa9517992186fba8d
| 65,280
|
def field_is_void(field):
"""Check if the field in the parameter is void, otherwise return False"""
if field == '':
return "Field not available"
else:
return False
|
c110176f8bc15d20cbda65f7a53ac9068a26c601
| 65,284
|
def fillNoneValues(column):
"""Fill all NaN/NaT values of a column with an empty string
Args:
column (pandas.Series): A Series object with all rows.
Returns:
column: Series with filled NaN values.
"""
if column.dtype == object:
column.fillna('', inplace=True)
return column
|
7def3345decd08a0f410e1e13ee00ccc864615ef
| 65,287
|
def smooth_n_point(scalar_grid, n=5, passes=1):
"""Filter with normal distribution of weights.
Parameters
----------
scalar_grid : array-like or `pint.Quantity`
Some 2D scalar grid to be smoothed.
n: int
The number of points to use in smoothing, only valid inputs
are 5 and 9. Defaults to 5.
passes : int
The number of times to apply the filter to the grid. Defaults
to 1.
Returns
-------
array-like or `pint.Quantity`
The filtered 2D scalar grid.
Notes
-----
This function is a close replication of the GEMPAK function SM5S
and SM9S depending on the choice of the number of points to use
for smoothing. This function can be applied multiple times to
create a more smoothed field and will only smooth the interior
points, leaving the end points with their original values. If a
masked value or NaN values exists in the array, it will propagate
to any point that uses that particular grid point in the smoothing
calculation. Applying the smoothing function multiple times will
propogate NaNs further throughout the domain.
"""
if n == 9:
p = 0.25
q = 0.125
r = 0.0625
elif n == 5:
p = 0.5
q = 0.125
r = 0.0
else:
raise ValueError('The number of points to use in the smoothing '
'calculation must be either 5 or 9.')
smooth_grid = scalar_grid[:].copy()
for _i in range(passes):
smooth_grid[1:-1, 1:-1] = (p * smooth_grid[1:-1, 1:-1]
+ q * (smooth_grid[2:, 1:-1] + smooth_grid[1:-1, 2:]
+ smooth_grid[:-2, 1:-1] + smooth_grid[1:-1, :-2])
+ r * (smooth_grid[2:, 2:] + smooth_grid[2:, :-2] +
+ smooth_grid[:-2, 2:] + smooth_grid[:-2, :-2]))
return smooth_grid
|
41f65a0794ad42bc74abdaf05defdb33a6921cc6
| 65,288
|
def encode_inpt(row, col):
"""
Converts integers row, col to chess piece string in form "a1"
:param row: int
:param col: int
:return: string in form "a1"
"""
num = str(row + 1)
letter = chr(col + ord("a"))
return letter + num
|
cbc45ba74f5238bb957e477ee8420797d5b45162
| 65,293
|
def execute_cmd(conn, sqlcmd):
""" Execute the sqlcmd statement
params: conn: Connection Object
sqlcmd: An sql statement
returns the rows returned by executing the statement
"""
with conn:
cur = conn.execute(sqlcmd)
rows = cur.fetchall()
return rows
|
9ea8a9c4529d46f0aeaac75491b11e9ae6ec4dd0
| 65,296
|
import uuid
def generate_job_id(group_uuid):
"""
Generates a job ID
"""
return "{}.{}".format(group_uuid, uuid.uuid4())
|
a0aab5e36899930af720e156e0856b43041b5f67
| 65,302
|
def retrieve_data_from_paper(client, paper):
"""Given the url of a venue retrieves the data to a csv file
Args:
client (Client object from openreview): Specifies the base URL
and the login infomation
paper (string): Piece of text that identifies the paper in the venue
Returns:
A list of strings corresponding to the data fetched from a peper id,
the list represents a row for the csv file in the following order:
title,authors,emails,decision,abstract,pdf,replies
Where replies contains in all the replies for the paper as a string
with list format for example:
[[('title', 'review of Deep Learning'),
('review', "This paper ... )')],
[('title', 'review of Deep Learning')]]
"""
tmp = paper.to_json()
forum_id = tmp['forum']
content_keys = tmp["content"].keys()
decision = ""
if 'decision' in content_keys:
decision = tmp['content']['decision']
elif 'recommendation' in content_keys:
decision = tmp['content']['recommendation']
forum_comments = client.get_notes(forum=str(forum_id))
row = []
replies = []
for comment in forum_comments:
if 'abstract' in comment.content.keys():
row.append(comment.content["title"])
row.append(comment.content['authors'])
row.append(comment.content['authorids'])
if 'decision' in comment.content.keys():
row.append(comment.content['decision'])
else:
row.append(decision)
row.append(comment.content["abstract"])
row.append(comment.content["pdf"])
else:
replies.append(list(comment.content.items()))
row.append(replies)
return row
|
7e45cebcac4c1c8047366350033a5fdcbf4dd583
| 65,305
|
def gcd(a: int, b: int) -> int:
"""Get greater common divisor (GCD) of two numbers
Args:
a, b: Numbers
Returns:
GCD of a and b
Examples:
>>> gcd(16, 28)
4
"""
return gcd(b, a % b) if b else a
|
9ed29b10613f679bf168fe884f1e0f4cb2680aea
| 65,308
|
def heading_depth(line):
"""Returns depth of heading indent
'# heading' returns 1
'### heading' returns 3
Args:
line (str): line in a markdown page
"""
assert line.startswith('#')
n = 0
while line[n:n + 1] == '#':
n += 1
return n
|
d4e9179ea91faab1450a1d615604c5601dcdd461
| 65,315
|
def create_option(name, ty, docstring, default_factory=lambda: None):
"""Creates a type-checked property.
Args:
name: The name to use.
ty: The type to use. The type of the property will be validated when it
is set.
docstring: The docstring to use.
default_factory: A callable that takes no arguments and returns a default
value to use if not set.
Returns:
A type-checked property.
"""
def get_fn(option):
# pylint: disable=protected-access
if name not in option._options:
option._options[name] = default_factory()
return option._options.get(name)
def set_fn(option, value):
if not isinstance(value, ty):
raise TypeError("Property \"%s\" must be of type %s, got: %r (type: %r)" %
(name, ty, value, type(value)))
option._options[name] = value # pylint: disable=protected-access
return property(get_fn, set_fn, None, docstring)
|
abf646c7b8ddbd71bf7761c8f94989bcbc9f107b
| 65,317
|
def parse_bsub(output):
"""Parse bsub output and return job id.
:param output: stdout of bsub command
:type output: str
:returns: job id
:rtype: str
"""
for line in output.split("\n"):
if line.startswith("Job"):
return line.split()[1][1:-1]
|
557377cbc28ba9e1bd516a3ce68132ced5b05b7b
| 65,320
|
def numeric_to_binary(data):
""" Convert numeric to binary """
if not data:
return ''
if not data.isdigit():
raise Exception('Numeric mode support only 0..9 characters!')
result = ''
for i in range(0, len(data), 3):
bin_val = bin(int(data[i:i + 3], 10))[2:]
if len(data[i:i + 3]) == 3:
result += bin_val.zfill(10)
elif len(data[i:i + 3]) == 2:
result += bin_val.zfill(7)
else:
result += bin_val.zfill(4)
return result
|
9d5f96f79706df00c832e83ff17813cca8c0fa74
| 65,325
|
def split_byte(byte):
"""Return the 8-bit `byte` as two 4-bit unsigned integers.
Parameters
----------
byte : bytes
The byte to split, if more than one byte is supplied only the first
will be split.
Returns
-------
2-tuple of int
The (4 most significant, 4 least significant) bits of `byte` as ``(int,
int)``.
"""
byte = ord(byte[:1])
return byte >> 4, 0b00001111 & byte
|
e8f5eeb1dd8dbd60d3797d47871738bd05eb5dd6
| 65,327
|
def remove_punctuation(string: str) -> str:
"""
Removes all non-letter, non-space characters from a string
:param string: a string
:return: a string containing only letters and spaces
"""
return ''.join([c for c in string if c.isalpha() or c.isspace()])
|
f414479daa41a700997bc7405ecd7fcc97b5e7e6
| 65,328
|
def human_readable_number(number, suffix=""):
"""
Format the given number into a human-readable string.
Code adapted from http://stackoverflow.com/a/1094933
:param variant number: the number (int or float)
:param string suffix: the unit of the number
:rtype: string
"""
for unit in ["", "K", "M", "G", "T", "P", "E", "Z"]:
if abs(number) < 1024.0:
return "%3.1f%s%s" % (number, unit, suffix)
number /= 1024.0
return "%.1f%s%s" % (number, "Y", suffix)
|
83c63a300966794ae5654cee6f6e39bc644856eb
| 65,332
|
def get_account_deploy_count(accounts: int, account_idx: int, deploys: int) -> int:
"""Returns account index to use for a particular transfer.
"""
if accounts == 0:
return 1
q, r = divmod(deploys, accounts)
return q + (1 if account_idx <= r else 0)
|
8668fd9619fd8e64e55833dc85ebcf9358e183dc
| 65,340
|
def get_cube_intersection(a, b):
"""
Find the intersection of two cubes. Note: method returns invalid cube if input cubes do not actually intersect
:param a: cube a, tuple of min, max tuple of coords
:param b: cube a, tuple of min, max tuple of coords
:return: intersecting volume, tuple of min, max tuple of coords
"""
return tuple(zip(*[(max(i, k), min(j, l)) for i, j, k, l in zip(a[0], a[1], b[0], b[1])]))
|
e359c1675fc92eeb9551d329679b3594d1c42f58
| 65,342
|
def cheating_matrix(seating_chart):
"""
Calculate and return the probabilities of cheating for each position in a rxc grid
:param seating_chart: A nested list representing a rxc grid
:return: A nested list, the same size as seating_chart, with each element representing that
position's cheating probability
"""
# Create matrix of probabilities by location
prob_matrix = [[.025, .3, .025],
[.2, 0, .2],
[.025, .2, .025]]
# Create blank nested list for saving calculated probabilities (same size as seating_chart)
calc_prob = []
for ch_row in range(len(seating_chart)):
new_row = []
for ch_col in range(len(seating_chart[ch_row])):
new_row.append(seating_chart[ch_row][ch_col])
calc_prob.append(new_row)
# calculate probabilities for each spot in seating_chart, store in calc_prob
for row in range(len(seating_chart)):
for col in range(len(seating_chart[row])):
calc_prob[row][col] = 0
for r_adj in range(-1, 2):
for c_adj in range(-1, 2):
if 0 <= row + r_adj < len(seating_chart):
if 0 <= col + c_adj < len(seating_chart[row]):
if seating_chart[row][col] == seating_chart[row + r_adj][col + c_adj]:
calc_prob[row][col] += prob_matrix[1 + r_adj][1 + c_adj]
return calc_prob
|
f9eaa1eb187ceeb0252d208002c739da3f70743c
| 65,347
|
def format_container_output(output: bytes) -> str:
"""
Format the output of a Docker container for printing or logging.
Args:
output: Raw bytes output by the container.
Returns:
Output formatted as a string.
"""
# Decode bytestring, remove trailing newlines that get inserted.
return output.decode("utf-8").rstrip("\n")
|
53024b28dbae4c6ed929233af410385e1ffc1ffa
| 65,350
|
def get_worker_count(cores):
"""
Get number of workers to run depending on server core count
"""
return cores * 2
|
58c8755a0c0409ab41266549fd530f885993077f
| 65,357
|
import torch
def torch_generate_spatial_feature(bounding_box, W, H):
"""
This function generate spatial features.
:param bounding_box: set of bounding boxes in format [xmin, ymin, xmax, ymax]
:param W: images width.
:param H: images height.
:return: set of spatial features.
"""
res_1 = bounding_box[:, 0] / W
res_2 = bounding_box[:, 1] / H
res_3 = bounding_box[:, 2] / W
res_4 = bounding_box[:, 3] / H
width = torch.clamp(bounding_box[:, 2] - bounding_box[:, 0], min=0)
heigth = torch.clamp(bounding_box[:, 3] - bounding_box[:, 1], min=0)
res_5 = (width * heigth) / (W * H)
results = torch.cat([res_1, res_2, res_3, res_4, res_5], dim=-1)
return results
|
896f0c34b07c2142d588349e51cb72f976cd1794
| 65,360
|
import torch
def metric_fixed_point(cost_matrix, gamma=0.99, eps=1e-7):
"""DP for calculating PSM (approximately).
Args:
cost_matrix: DIST matrix where entries at index (i, j) is DIST(x_i, y_j)
gamma: Metric discount factor.
eps: Threshold for stopping the fixed point iteration.
"""
d = torch.zeros_like(cost_matrix)
def operator(d_cur):
d_new = 1 * cost_matrix
discounted_d_cur = gamma * d_cur
d_new[:-1, :-1] += discounted_d_cur[1:, 1:]
d_new[:-1, -1] += discounted_d_cur[1:, -1]
d_new[-1, :-1] += discounted_d_cur[-1, 1:]
return d_new
while True:
d_new = operator(d)
if torch.sum(torch.abs(d - d_new)) < eps:
break
else:
d = d_new[:]
return d
|
1d84e1cb3232bd2905f005013f81b7ffc77e5f15
| 65,365
|
def normalize_pose_arr(data_arr, data_mean, data_std):
"""
Normalizes a pose array.
Parameters:
data_arr: ndarray(nTimePoints, nJoints=16, nCoords=(2 or 3)) of float64
data_mean, data_std: ndarray(nJoints=16, nCoords=(2 or 3)) of float64
"""
return (data_arr - data_mean) / data_std
|
674da0636cb39fb2e2f1c6dca1ea96789e7f42f4
| 65,368
|
from textwrap import dedent
def dedent_sql(command):
"""Dedent a SQL command string."""
lines = command.split('\n')
return dedent('\n'.join([l for l in lines if not set(l).issubset({' '})]))
|
c87c3fc4752623762feed368558a4035954df439
| 65,369
|
def get(cluster_id, client):
"""Requests information on certain cluster."""
return client.get_cluster(str(cluster_id))
|
aeef1add213bf66ad3ea55aea938edb9de25c998
| 65,379
|
def binary_encoder(msg: str) -> str:
"""Encodes user message into binary presentation
Parameters:
msg: user input to be encoded
Returns:
str: binary representation of input"""
return ' '.join(format(ord(i), 'b') for i in msg.strip())
|
168babf980bcf6c127cbea1f561f2148a550cae6
| 65,381
|
def _list_bcast_where(F, mask, new_val_l, old_val_l):
"""Broadcast where. Implements out[i] = new_val[i] * mask + old_val[i] * (1 - mask)
Parameters
----------
F : symbol or ndarray
mask : Symbol or NDArray
new_val_l : list of Symbols or list of NDArrays
old_val_l : list of Symbols or list of NDArrays
Returns
-------
out_l : list of Symbols or list of NDArrays
"""
return [F.broadcast_mul(new_val, mask) + F.broadcast_mul(old_val, 1 - mask)
for new_val, old_val in zip(new_val_l, old_val_l)]
|
36415acdf900ad1f255e08906e66484836864efc
| 65,385
|
def parseFasta(file):
"""Returns sequence string from FASTA format."""
f=open(file)
ref=''
for i in f:
if i[0]!='>':
ref+=i.rstrip()
for l in 'RYLMKSWHBVD':
ref=ref.replace(l,'N')
return ref
|
fae4efb6f3625f2278e427579c8738e429c21638
| 65,387
|
from typing import Optional
from typing import Sequence
from typing import Dict
import dataclasses
def asdict_filtered(obj, remove_keys: Optional[Sequence[str]] = None) -> Dict:
"""Returns the attributes of a dataclass in the form of a dict, with unwanted attributes removed.
Each config group has the term 'name', which is helpful in identifying the node that was chosen
in the config group (Eg. config group = optimizers, nodes = adam, sgd).
However, the 'name' parameter is not required for initializing any dataclasses. Hence it needs to be removed.
Args:
obj: The dataclass whose atrributes will be converted to dict
remove_keys: The keys to remove from the dict. The default is ['name'].
"""
if not dataclasses.is_dataclass(obj):
raise ValueError(f"Not a dataclass/dataclass instance")
if remove_keys is None:
remove_keys = ["name"]
# Clean the arguments
args = dataclasses.asdict(obj)
for key in remove_keys:
if key in args:
args.pop(key)
return args
|
d6b11b41a4ce7265b5cba870239dccf73dd6f330
| 65,388
|
def parse_apbs_output(apbs_file):
"""Parse APBS output files for SASA information.
:param file apbs_file: file-like object with APBS output data
:returns: list of per-atom SASAs
:rtype: list(float)
"""
sasa_values = []
for line in apbs_file:
line = line.strip()
if line.startswith("SASA for atom "):
words = line.split()
sasa = float(words[4])
sasa_values.append(sasa)
return sasa_values
|
73dd4fe398cb45bd13ebfcf6e91fb994ee3407ca
| 65,389
|
import base64
def decode(encoded):
"""Decode urlsafe base64"""
decoded = base64.urlsafe_b64decode(encoded).decode()
return decoded
|
f374af33c07b0d5eef003da05ae26d0fcbb909f3
| 65,392
|
def raw_hostname(ip):
"""
For now, return raw IP
"""
return ip
|
36fd431ba4ceb422cf3c2dde8fdbaa59efb18ce6
| 65,395
|
def leftPadItems(alist):
"""Add a space to the begining of each string in a given list."""
return [' ' + item for item in alist]
|
8cd74bdf74c021a81532c8209774975fa5b6f9b4
| 65,399
|
def exempt_parameters(src_list, ref_list):
"""Remove element from src_list that is in ref_list"""
res = []
for x in src_list:
flag = True
for y in ref_list:
if x is y:
flag = False
break
if flag:
res.append(x)
return res
|
b22e62c4f5d284d9f5888c7c720c18d36f8d9146
| 65,400
|
def schema_table_exists(schema, table, conn):
"""Determines if the given table exists in the schema"""
query = """
SELECT EXISTS(
SELECT *
FROM information_schema.tables
WHERE
table_schema = '{0}' AND
table_name = '{1}'
);
""".format(schema, table)
with conn.cursor() as cur:
cur.execute(query)
return cur.fetchone()[0] == 't'
#return False
|
e463f26aefaf716b1715baa39eaab93d30bc645e
| 65,405
|
import re
def ordered_char_list_regex(chars):
"""
Turn a list of characters into a regex pattern that matches them all in order
"""
return ".*?".join(re.escape(char) for char in chars)
|
eb523775162cb9ac3475138618f3925141dc8f01
| 65,406
|
def multiply(data, scale):
"""
Multiply DATA by a factor of SCALE.
"""
op_output = {
'data': scale * data
}
return op_output
|
64965fa09d0717d77ac5de3d83af6ffb22847c1a
| 65,407
|
def get_body(html):
"""
Extract HTML code inside ``body`` tag.
"""
start = html.index('<body>') + 6
stop = html.rindex('</body>')
return html[start:stop]
|
f77b3c54708f640b5d02de4cbe0dace965360b20
| 65,410
|
def fixCertStr(certStr):
"""
Remove spaces after newlines.
When certificate is put in header by NGINX, one space is put after each
newline character, which breaks certificate. This function fixes
certificate string by removing those spaces.
"""
newCertStr = certStr.replace('\n ', '\n')
return newCertStr
|
fa276cc1eea3db820858328e15c309305b4faba0
| 65,414
|
def check_polygon(nums: list[float]) -> bool:
"""
Takes list of possible side lengths and determines whether a
two-dimensional polygon with such side lengths can exist.
Returns a boolean value for the < comparison
of the largest side length with sum of the rest.
Wiki: https://en.wikipedia.org/wiki/Triangle_inequality
>>> check_polygon([6, 10, 5])
True
>>> check_polygon([3, 7, 13, 2])
False
>>> check_polygon([1, 4.3, 5.2, 12.2])
False
>>> nums = [3, 7, 13, 2]
>>> _ = check_polygon(nums) # Run function, do not show answer in output
>>> nums # Check numbers are not reordered
[3, 7, 13, 2]
>>> check_polygon([])
Traceback (most recent call last):
...
ValueError: Monogons and Digons are not polygons in the Euclidean space
>>> check_polygon([-2, 5, 6])
Traceback (most recent call last):
...
ValueError: All values must be greater than 0
"""
if len(nums) < 2:
raise ValueError("Monogons and Digons are not polygons in the Euclidean space")
if any(i <= 0 for i in nums):
raise ValueError("All values must be greater than 0")
copy_nums = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1])
|
114ae7120e661e19f32a7424895e37493f530ee6
| 65,415
|
import time
def test_engine(engine):
""" Tests SQLAlchemy engine and returns response time """
if not engine:
return {'status': 'ERROR', 'error': 'No engine defined'}
try:
start = time.time()
connection = engine.connect()
if not connection.closed:
connection.close()
elapsed = '{:.3f}'.format(time.time() - start)
return {
'engine': str(engine),
'label': getattr(engine, 'label', '<unknown>'),
'status': 'OK',
'time': elapsed
}
except Exception as err:
return {'status': 'ERROR', 'error': str(err)}
|
ced48c2d21480124ff4673630a7070772d370335
| 65,416
|
import random
def get_random_hashtags(input, number):
"""Gets a user-specified number of random hashtags from input
Parameters:
input (list): list of hashtags e.g. from fileIO.get_hashtags()
number (int): number of random hashtags the we wanna get out
Returns:
Array of random hashtags
"""
output = []
count = 0
while (count < number):
hashtag = random.choice(input)
output.append(hashtag)
count = count + 1
return output
|
218ecbdc681c0eaf9ebae05c485de77e4c70c9d5
| 65,419
|
def SimpleElement(tag, value):
"""
Args:
tag: xml tag name
value: character data
Returns:
XML: <tag>value</tag>
"""
return '<%s>%s</%s>\n' % (tag, value, tag)
|
8684d661f0fbf04c6d6cb4153041201378a258cc
| 65,422
|
def underline_filter(text):
"""Jinja2 filter adding =-underline to row of text
>>> underline_filter("headline")
"headline\n========"
"""
return text + "\n" + "=" * len(text)
|
417340cef3dce0348e197d7af9150b8407a8fa5e
| 65,423
|
import re
def remove_shortwords(text, length=3):
"""Returns a string with all short words of a particular length or less removed
params:
length: Specify the length of words you want to remove
default is 3 letter word length or less
"""
token_words = re.split(r"\W+", text)
long_words_list = [i for i in token_words if len(i) > int(length)]
return " ".join(long_words_list)
|
2e1d1a2b86505a2e693f38d67915c5fdc2b0af3e
| 65,424
|
def RestMethod(url, http_methods):
"""Decorator to define a method's REST URL parameters."""
def WrapFunction(fn):
fn.is_rest_method = True
fn.rest_url = url
fn.rest_http_methods = http_methods
return fn
return WrapFunction
|
6e317645732373c957288b033340caf01f1ddee2
| 65,425
|
def power(a, p):
"""
Function that returns a to the p
"""
return a ** p
|
65bda2861ab82da93918a900d668f0c361322b73
| 65,427
|
import functools
import time
def timer(f):
""" Add this @decorator to a function to print its runtime after completion """
@functools.wraps(f)
def t_wrap(*args, **kwargs):
t_start = time.perf_counter()
ret = f(*args, **kwargs)
t_run = round((time.perf_counter() - t_start)/60)
print(f"{f.__name__} completed in {t_run} minutes.")
return ret
return t_wrap
|
4c2d91bd492caa5ec78ce855d979a4e63f75f199
| 65,431
|
def calc_angular_radius(km):
"""calculate the angular radius for a given distance in kilometers"""
earth_radius = 6371.0 # in km
return km / earth_radius
|
d768de974f1ebb2fe5514b54200cb91e68866e30
| 65,433
|
def _get(result, field, mandatory=False, default="", transform=lambda x: x):
"""Retrieve a given field if available, return default or exception otherwise. Result may be manipulated by transformation function"""
if field in result:
return transform(result[field])
else:
if mandatory:
raise KeyError("Field '"+field+"' not found in dictionary")
else:
return default
|
94f4975036a210fa1b2882438e5f05faababfe5a
| 65,435
|
def date_to_str(dt, with_timezone=False):
"""
Convert datetime to str
Parameters
----------
dt : datetime
datetime to convert
with_timezone : bool, default False
Include timezone or not
Returns
-------
datetime_str : str
Datetime string
"""
if with_timezone and dt.tzinfo:
dtstr = dt.strftime("%Y-%m-%dT%H:%M:%S.%f%z")
return dtstr[:-2] + ":" + dtstr[-2:]
else:
return dt.strftime("%Y-%m-%dT%H:%M:%S.%f")
|
fb3d8d10a95feec96e385af13490399bbe2d3172
| 65,450
|
import pathlib
import re
def find_files(pattern, min=None, max=None, check_parent=True, partial_match_is_error=True):
"""Find files matching a pattern with a sequence number.
The sequence number is represented using {N} in the input pattern,
which can be repeated.
Parameters
----------
pattern : str or pathlib.Path
File pattern using {N} to represent the sequence number.
min : int or None
Only values of N >= min will be returned.
max : int or None
Only values of N <= max will be returned.
check_parent : bool
Raise an exception when True and the parent directory does
not exist.
partial_match_is_error : bool
Raise an exception when True for any paths that match the
first {N} but not all subsequent {N}'s in the input pattern.
Returns
-------
list
List of filenames matching the pattern and filtered by
any min/max cuts.
"""
if not isinstance(pattern, pathlib.Path):
pattern = pathlib.Path(pattern)
# Find which parts of the pattern contain {N}.
parts = pattern.parts
part_has_N = ['{N}' in part for part in parts]
if not any(part_has_N):
# Nothing to match. Return the input if it exists.
return [str(pattern)] if pattern.exists() else []
first_N = part_has_N.index(True)
# Build the parent path to search.
parent_path = pathlib.Path(*parts[:first_N])
if check_parent and not parent_path.exists():
raise FileNotFoundError(parent_path)
# Build the suffix patttern if there is one.
remaining = first_N + 1
suffix_pattern = str(pathlib.Path(*parts[remaining:])) if remaining < len(parts) else None
# Look for paths matching the first {N} in the path using * as a glob pattern.
first_N_pattern = parts[first_N]
paths = sorted([str(P) for P in parent_path.glob(first_N_pattern.format(N='*'))])
# Check for integer matches to N.
regexp = re.compile(first_N_pattern.format(N='([0-9]+)') + '$')
selected = []
suffix = ''
for path in paths:
found = regexp.search(path)
if found:
N = int(found.group(1))
if min is not None and N < min:
continue
if max is not None and N > max:
continue
if suffix_pattern:
# Build the full path for this value of N.
# Use the regexp string match rather than the integer N to preserve formatting.
suffix = suffix_pattern.format(N=found.group(1))
full_path = pathlib.Path(path) / suffix
# Silently ignore paths that match the first {N} but not subsequent ones.
if not full_path.exists():
if partial_match_is_error:
raise ValueError(
'Partial match error: found {path} but not {full_path}.'
.format(path=path, full_path=full_path))
else:
continue
path = str(path)
selected.append(str(pathlib.Path(path) / suffix))
return selected
|
738e936c022fe38a5f0f8314011ec0a79969a5d0
| 65,452
|
def OpenFileForRead(filename):
""" Exception-safe file open for read
Args:
filename: local file name
Returns:
file: if open succeeded
None: if open failed
"""
try:
f = open(filename, 'r')
return f
except:
return None
|
0f2a65e4136f5b91320320cc7eb027d4a7b99ab3
| 65,453
|
import secrets
def random(b: int) -> bytes:
""" Helper function to create a sequence of `b` random bytes using a cryptographically secure source of randomness.
"""
return secrets.token_bytes(b)
|
a6498cf3cb45c9d3dc6c364e6b8f8a4830fff6d6
| 65,457
|
def widget_type(field):
"""Determine a field's widget type on the fly.
:rtype: str
Example:
.. code::
{% if field|widget_type == "Textarea" %}
...
{% endif %}
"""
try:
return field.field.widget.__class__.__name__
except AttributeError:
return field.widget.__class__.__name__
|
f53cdef299b2f945c98d30607abaeeab4307e912
| 65,461
|
def _global_toc(cog, text):
""" Returns a single item for an unordered list with link to other file in same folder """
return f' * [{text}]({cog}.md#{text})\n'
|
e2ef80db14af15af59b3f860ce8b85be5574861d
| 65,462
|
def get_update_url(d_included, base_url):
"""Parse a dict and returns, if present, the post URL
:param d_included: a dict, as returned by res.json().get("included", {})
:type d_raw: dict
:param base_url: site URL
:type d_raw: str
:return: post url
:rtype: str
"""
try:
urn = d_included["updateMetadata"]["urn"]
except KeyError:
return ""
except TypeError:
return "None"
else:
return f"{base_url}/feed/update/{urn}"
|
e4e2ba9f02b1718b220fe820c7eea95f5c0172c0
| 65,463
|
def is_palindrome(sequence):
"""
Checks if sequence is a palindrome, returns true or false
"""
sequence = str(sequence).lower()
for index, letter in enumerate(sequence):
if letter != sequence[(index + 1) * -1]:
return False
return True
|
b12325d89ecbcb77ef905b58485e39da945265de
| 65,466
|
def invalid_resource(message, response_code=400):
"""
Returns the given message within in bad request body, and sets the response
code to given response code. Defaults response code to 404, if not provided.
"""
return {"message": message, "code": response_code}
|
9cb9ac48d4cfaeeb05b94ffeba0abc68922313a6
| 65,467
|
import heapq
def nlargest_ref_heapq(n, iterable):
"""Vanilla implementation from the heapq module."""
return heapq.nlargest(n, iterable)[::-1]
|
81ad93e4ffd8cc4918c7a833310b09a9bd94eea0
| 65,468
|
def group_locations(obj, ids):
"""
Return vertex locations for possible group of cells.
:param obj : geoh5py object containing cells, vertices structure.
:param ids : list of ids (or possibly single id) that indexes cells array.
:return locations : tuple of n locations arrays where n is length of second
dimension of cells array.
"""
return (obj.vertices[obj.cells[ids, i]] for i in range(obj.cells.shape[1]))
|
97f9ad1b0a637eaf6b098ad8347ea0b8391ebdc2
| 65,469
|
import re
from typing import List
def parse_input(s) -> List[tuple]:
"""
Parses the search input query and creates a data structure:
( column_to_search_in, string_to_search, should we filter filter for quoted string)
:param s: input string to parse
:return: the list of the search terms to perform and where
"""
# implementation for 'AND'
combined_queries = s.split(' AND ')
queries_to_perform = []
# find content operators (e.g. "title:")
regex = r"([a-z]+):([a-zA-Z0-9 _]+( |$))"
for query in combined_queries:
matches = re.finditer(regex, query, re.MULTILINE)
for match in matches:
query = list(match.groups())
# match 0 is the column
# match 1 is the string to query
queries_to_perform.append((query[0], query[1], False))
# assumption: quoted queries are not combined with search operators
if not queries_to_perform:
if s.startswith('"') and s.endswith('"'):
s.replace('"', '') # remove quotes
queries_to_perform.append(('content', s, True))
else:
queries_to_perform.append(('content', s, False))
return queries_to_perform
|
8b7b388e82d69723875a2281a21e625fce34c063
| 65,471
|
def merge_dicts(*dict_args):
"""
Merge all dicts passed as arguments, skips None objects.
Repeating keys will replace the keys from previous dicts.
"""
result = None
for dictionary in dict_args:
if dictionary is not None:
if result is None:
result = dictionary
else:
result.update(dictionary)
return result
|
fe66414408aaf945ed45ac6a092e2865e775280c
| 65,475
|
def strip_unexecutable(lines):
"""Remove all code that we can't execute"""
valid = []
for l in lines:
if l.startswith("get_ipython"):
continue
valid.append(l)
return valid
|
f52fcf9c4afd0262f39bbe51d13860e5826606fb
| 65,476
|
def csvdata(nodelist):
"""
Returns the data in the given node as a comma separated string
"""
data = ""
for subnode in nodelist:
if (subnode.nodeType == subnode.ELEMENT_NODE):
try:
data = data + "," + subnode.childNodes[0].data
except:
data = data+ ","
return data[1:] + "\n"
|
c3fc1a220a501691a654c87a74860ab6cdd73fb0
| 65,478
|
def is_active(collection_name, document):
"""Return whether the given node settings document is active, e.g. has an external
account or is configured.
"""
if collection_name == 'addonfigsharenodesettings':
return any([
document['figshare_type'],
document['figshare_id'],
document['figshare_title'],
])
elif collection_name == 'googledrivenodesettings':
return any([
document['folder_path'],
document['user_settings'],
document['folder_id'],
])
elif collection_name == 'forwardnodesettings':
return document.get('url', False)
else:
return bool(document['external_account'])
|
c9afeee58b94cfc86f160db1158e73d658cf8b3f
| 65,483
|
def row_is_simple_header(row):
"""
Determine whether the row is a header row. The
three cols must be "lane","sample" and "index", in order
:type row: list[string]
:rtype: bool
"""
return len(row) == 3 \
and row[0].lower() == 'lane' \
and row[1].lower() == 'sample' \
and row[2].lower() == 'index'
|
3efca9963c3be1cdfaf994549f5f32180c7a3612
| 65,484
|
import re
def url_get_scheme( url, clean=False ):
"""
Given a URL it returns the scheme.
If clean is 'True', it returns the scheme without '://'.
If there is not scheme it will return 'None'.
"""
# Regular Expression to get the scheme
scheme_pattern = "^.*://"
res = re.search( scheme_pattern, url )
scheme = None
# If something has been found get it
if res != None:
scheme = res.group()
if clean:
scheme = scheme[:-3]
return scheme
|
a315bd589aa58b8e948677c5b8d3032e6fad1f76
| 65,487
|
from pathlib import Path
def find_confounds_file(nii_file):
"""Finds the corresponding confounds.tsv file for a bold.nii.gz
Parameters:
nii_file: pathlib.Path
Returns:
confounds_file: pathlib.Path
"""
confounds_options = [str(fname).replace("desc-confounds_timeseries.tsv", "") for
fname in nii_file.parent.glob("*confound*tsv")]
confounds_file, = [fname for fname in confounds_options if
str(nii_file).startswith(fname)]
return Path(confounds_file + "desc-confounds_timeseries.tsv")
|
12f61251d0ddc9f7c0789922157ea8771fda3cda
| 65,491
|
def simple_accuracy(preds, labels):
"""Returns the accuracy of the prediction."""
return (preds == labels).mean()
|
63a64d9c024581492738e3bf4891cff526c5d705
| 65,499
|
def pt_agent_country(country):
"""Clean the country"""
c = country.strip()
if c.lower() == 'unknown':
return ''
return c
|
e87389b3a6713933b1ee4a578c7d6ab048d1ac0f
| 65,501
|
def conjugate_par(par_dict):
"""Given a dictionary of parameter values, return the dictionary where
all CP-odd parameters have flipped sign.
This assumes that the only CP-odd parameters are `gamma` or `delta` (the
CKM phase in the Wolfenstein or standard parametrization)."""
cp_odd = ['gamma', 'delta']
return {k: -v if k in cp_odd else v for k, v in par_dict.items()}
|
f92c73b34d884928119dace81622088c79e30e77
| 65,502
|
def get_individual_positions(individuals):
"""Return a dictionary with individual positions
Args:
individuals(list): A list with vcf individuals in correct order
Returns:
ind_pos(dict): Map from ind_id -> index position
"""
ind_pos = {}
if individuals:
for i, ind in enumerate(individuals):
ind_pos[ind] = i
return ind_pos
|
e3ef0ced7f6fd3bc2c7ee060abc4afd9f09ed06c
| 65,504
|
def splitLine(text):
"""split a line of text on the first space character and return
two strings, the first word and the remaining string. This is
used for parsing the incoming messages from left to right since
the command and its arguments are all delimited by spaces and
the command may not contain spaces"""
sp = text.split(" ")
try:
a = sp[0]
b = " ".join(sp[1:])
except:
a = text
b = ""
return a, b
|
82ce7005f18c22de6af438fd810a59760471b3d9
| 65,506
|
def restrict_encode( content, tool ):
"""
Disable the random interval ENCODE tool
This tool filter will disable all the ENCODE tool when enabled.
"""
if tool.id == 'random_intervals1':
return False
return True
|
d2fa098934aa842986f5a59762ea4d85660779ac
| 65,510
|
def ZFrequencyList_to_TFrequencyList(Z_frequency_list,Z01=complex(50,0),Z02=complex(50,0)):
""" Converts z parameters into T parameters. Z-parameters should be in the form [[f,Z11,Z12,Z21,Z22],...]
the port 1 (Z01) and port 2 (Z01) impedances can be specified, default is 50. Returns data in the form
[[f,T11,T12,T21,T22],...]
"""
t_frequency_list=[]
for row in Z_frequency_list[:]:
[frequency,Z11,Z12,Z21,Z22]=row
denominator=2*Z21*(Z01.real*Z02.real)**(.5)
T11= ((Z11+Z01)*(Z22+Z02)-Z12*Z21)/denominator
T12=((Z11+Z01)*(Z02.conjugate()-Z22)+Z12*Z21)/denominator
T21=((Z11-Z01.conjugate())*(Z22+Z02)-Z12*Z21)/denominator
T22=((Z01.conjugate()-Z11)*(Z22-Z02.conjugate())+Z12*Z21)/denominator
t_frequency_list.append([frequency,T11,T12,T21,T22])
return t_frequency_list
|
410b4cf43600c333f6c0a6f337885c2d3a126ed8
| 65,511
|
def overlaps(mc1, mc2):
"""Compare two motifs and/or clusters to see if their location ranges overlap."""
return (mc1.start < mc2.end) and (mc1.end > mc2.start)
|
f87622c473d58172448ffa0bbe3d4fab99cc1fb7
| 65,517
|
def transceive(spi,csPin,busyPin,cmd: int,len: int) -> bytearray:
"""
Method to receive data by an SPI slave after sending a command.
Parameters
-----
spi : spi object from busio.SPI
csPin : control slave pin from board.D##
cmd : opcode of command
len : number of bytes to receive
Returns
-----
arr : bytearray of length len with received bytes
"""
# wait until BUSY goes low
while busyPin.value:
pass
arr = bytearray(len)
csPin = False
spi.writeto(bytes([cmd]))
spi.readinto(arr)
csPin = True
return arr
|
45b92a583ec654764abf97c3a12ae051d650ae86
| 65,520
|
def dataset_info(graph):
"""Returns information on the dataset (number of users, links ...)"""
n_users = len(graph.ids[0])
n_songs = len(graph.ids[1])
n_tags = len(graph.ids[2])
n_user_song_links = graph.n_links(0, 1)
n_song_tag_links = graph.n_links(1, 2)
user_song_volume = [
(user, len(songs.keys())) for user, songs in graph.graphs[0][1].items()
]
_, max_user_song_volume = max(user_song_volume, key=lambda x: x[1])
_, min_user_song_volume = min(user_song_volume, key=lambda x: x[1])
mean_user_song_volume = sum(volume for _, volume in user_song_volume) / n_users
song_tag_volume = [
(song, len(tags.keys())) for song, tags in graph.graphs[1][2].items()
]
_, max_song_tag_volume = max(song_tag_volume, key=lambda x: x[1])
_, min_song_tag_volume = min(song_tag_volume, key=lambda x: x[1])
mean_song_tag_volume = sum(volume for _, volume in song_tag_volume) / n_songs
return {
'n_users': n_users,
'n_songs': n_songs,
'n_tags': n_tags,
'n_users_song_links': n_user_song_links,
'n_song_tag_links': n_song_tag_links,
'max_user_song_volume': max_user_song_volume,
'max_song_tag_volume': max_song_tag_volume,
'min_user_song_volume': min_user_song_volume,
'min_song_tag_volume': min_song_tag_volume,
'mean_user_song_volume': mean_user_song_volume,
'mean_song_tag_volume': mean_song_tag_volume,
}
|
2787b79ac5ad2fe6b0b2f0ec744fda6dec5ab827
| 65,521
|
def decode_erd_bytes(value: str) -> bytes:
"""Decode a raw bytes ERD value sent as a hex encoded string."""
return bytes.fromhex(value)
|
e30acf5a7ad78850f2a822fb8d9d097488baf0db
| 65,525
|
def _improve_segmentation(doc):
"""
Helper function to improve spacy sentence segmentation specifically for product descriptions.
A lot of descriptions contain lists that spacy does not properly handle. To fix that, this function marks every
token that is followed by an empty token (i.e. newline) as the start of a sentence.
This does NOT cause issues when there is a whitespace after a token even though it looks like it might.
:param doc: The loaded spacy model.
:return: The model, but with improved segmentation for sentence splitting.
"""
for token in doc[:-1]:
if not token.text.strip():
# blank, such as \n \n etc.
doc[token.i+1].is_sent_start = True
return doc
|
aff9c3ce57ef02074d69924a0388c0028afc90ca
| 65,528
|
import random
import string
import time
def generate_random_filename(randlen=2, suffix=None):
"""
Generates a random string with a specified prefix, useful for generating
random filenames. No two filenames will be the same if executed on the same
machine.
*randlen* specifies the length of the filename, while *suffix* allows you to
append a suffix at the end of the file name, i.e xxx.gif or xxx.html.
Returns a string.
"""
try:
int(randlen)
if randlen > 7:
randlen = 7
except:
randlen = 2
if suffix:
return "%s%s.%s" % ( "".join(random.sample(string.digits + string.ascii_lowercase, randlen)), int(time.time()), suffix.strip() )
else:
return "%s%s" % ( "".join(random.sample(string.digits + string.ascii_lowercase, randlen)), int(time.time()) )
|
4091e1e57d7efa9d148e8dc3ae88c42cb9941a85
| 65,529
|
def uniq_srt(it):
"""Returns the input sequence unified and sorted (according to the values)
>>> uniq_srt([3, 3, 5, 3, 4, 2, 4])
[2, 3, 4, 5]
>>> uniq_srt('abrakadabra')
['a', 'b', 'd', 'k', 'r']
"""
return sorted(set(it)) # your solution
|
dc5d8e7152a8ec5580f19960043bdb9ab4b5fa42
| 65,533
|
def _IsOutputField(help_text):
"""Determines if the given field is output only based on help text."""
return help_text and help_text.startswith('[Output Only]')
|
256d51383c6493deed5f3c041a8b34d22789bf8f
| 65,538
|
def calc_eaf(delta_T, N_lay, P_i, A_i, v, A_p, D_i, t_p, alpha, E_p, thick=False):
"""Return the effective axial force [N], negative in compression."""
if thick:
return (
N_lay
- P_i * A_i
+ 2 * P_i * v * (A_p / 4) * (D_i / t_p - 1)
- alpha * delta_T * E_p * A_p
)
else:
return N_lay - P_i * A_i * (1 - 2 * v) - A_p * E_p * alpha * delta_T
|
919f50ff03cc2d92c603ec1a4249eec971f414c2
| 65,540
|
import re
def fix_type_pre(value):
"""
So far this only modifies ellipses.
>>> fix_type_pre("... ....")
' <nobr> . . . </nobr> <nobr> . . . . </nobr> '
"""
new_value = value
new_value = re.sub(r'\.{3}[^\.]',r' <nobr> . . . </nobr> ', new_value)
new_value = re.sub(r'\.{4}',r' <nobr> . . . . </nobr> ', new_value)
return new_value
|
ecf19286286b56a2e81462625e6c4b3a0102ef46
| 65,541
|
def _remove_nesting(ref):
"""Remove the outer layer of nesting if ref is a nested reference.
Return the original reference if it's not nested"""
return (ref['reference']
if isinstance(ref, dict) and 'reference' in ref
else ref)
|
2f120a5530d4b11778f28dccf85c96ac5df2d798
| 65,543
|
def nt_escape(node_string):
"""Properly escape strings for n-triples and n-quads serialization."""
output_string = ''
for char in node_string:
if char == u'\u0009':
output_string += '\\t'
elif char == u'\u000A':
output_string += '\\n'
elif char == u'\u000D':
output_string += '\\r'
elif char == u'\u0022':
output_string += '\\"'
elif char == u'\u005C':
output_string += '\\\\'
elif char >= u'\u0020' and char <= u'\u0021' or\
char >= u'\u0023' and char <= u'\u005B' or\
char >= u'\u005D' and char <= u'\u007E':
output_string += char
elif char >= u'\u007F' and char <= u'\uFFFF':
output_string += '\\u%04X' % ord(char)
elif char >= u'\U00010000' and char <= u'\U0010FFFF':
output_string += '\\U%08X' % ord(char)
return output_string
|
ff586a4a1cbe3236c67b8d7a276343a8299af525
| 65,544
|
import json
def lambda_handler(event, context):
"""
This is roughly the exact same handler function that AWS provides.
"""
response = {
'statusCode': 200,
'body': json.dumps("Hello world")
}
return response
|
13e21c253356ff2c23551ef7e9c34d0fb0f85d60
| 65,546
|
def filter_experiments(collection, configurations):
"""Check database collection for already present entries.
Check the database collection for experiments that have the same configuration.
Remove the corresponding entries from the input list of configurations to prevent
re-running the experiments.
Parameters
----------
collection: pymongo.collection.Collection
The MongoDB collection containing the experiments.
configurations: list of dicts
Contains the individual parameter configurations.
Returns
-------
filtered_configs: list of dicts
No longer contains configurations that are already in the database collection.
"""
filtered_configs = []
for config in configurations:
lookup_dict = {
f'config.{key}': value for key, value in config.items()
}
lookup_result = collection.find_one(lookup_dict)
if lookup_result is None:
filtered_configs.append(config)
return filtered_configs
|
20c26a299fb302680520fcb7ae7fe25ae0d53364
| 65,553
|
def filter_jwt(respose, response_json):
"""Replace the refresh and access token with some mock up data."""
if "refresh" in response_json and "access" in response_json:
response_json["refresh"] = "mock_refresh"
response_json["access"] = "mock_access"
return respose, response_json
|
24c144978f8eecaf83674d7127b5c3b235d7924a
| 65,554
|
def _propagate(P, F, B):
"""Propagate labels by one step
Parameters
----------
P : scipy sparse matrix, shape = [n_samples, n_samples]
Propagation matrix
F : numpy array, shape = [n_samples, n_classes]
Label matrix
B : numpy array, shape = [n_samples, n_classes]
Base matrix
Returns
----------
F_new : array, shape = [n_samples, n_classes]
Label matrix
"""
F_new = P.dot(F) + B
return F_new
|
9946514062fdb454e0d1cdbaf14f2887fb6781de
| 65,555
|
def remove_equal_start_stop(gpx):
"""Removes any sily tracks with either 1 point, or 2 points on the same location.
"""
for track in gpx.tracks:
remove = False
if len(track.segments[0].points) > 1:
remove = True
elif len(track.segments[0].points) > 2:
p0 = track.segments[0].points[0]
p1 = track.segments[0].points[1]
if p0.lat == p1.lat and p0.lon == p1.lon:
remove = True
if remove:
track.remove = True
return gpx
|
3cfc2b6c644eedc84fddafa542f2702961f0961a
| 65,556
|
from typing import Callable
from typing import Any
from typing import Sequence
import functools
def apply_middlewares(
func: Callable[..., Any], middlewares: Sequence[Callable[..., Any]]
) -> Callable[..., Any]:
"""
Apply a list of middlewares to a source function.
- Middlewares must be structured as: ``middleware(next, *args, **kwargs)``
and call the next middleware inline.
>>> def square(x): return x ** 2
>>> def double(next, x): return next(x * 2)
>>> def substract_one(next, x): return next(x - 1)
>>> final = apply_middlewares(square, [double, substract_one])
>>> final(2) # ((2 - 1) * 2) ^ 2
4
>>> final(10) # ((10 - 1) * 2) ^ 2
324
"""
tail = func
for mw in middlewares:
if not callable(mw):
raise TypeError("Middleware should be a callable")
tail = functools.partial(mw, tail)
return tail
|
04685a34f401eb884e8bc352a8be285fb3b9a53e
| 65,560
|
import re
def namespace(element):
"""
Gets XML namespace of element
:param element: element, whose tag carry namespace
:return: namespace of element, if element does not have one, return empty string
"""
m = re.match('\{.*\}', element.tag)
return m.group(0) if m else ''
|
316baac2e99b5171f30ee69d5e4cc0274d1a6cc3
| 65,565
|
def expected_headers(auth_token='my-auth-token'):
"""
Return an expected set of headers, given an auth token
"""
return {
'content-type': ['application/json'],
'accept': ['application/json'],
'x-auth-token': [auth_token],
'User-Agent': ['OtterScale/0.0']
}
|
9a7a926a8da86e0eb4ae9bdf24820b9462539966
| 65,566
|
from typing import List
from typing import Any
def columns(table: List[List[Any]]) -> List[List[Any]]:
"""Returns a list with the columns of a table.
Args:
table (List[List[Any]]): The table. Needs to be a n x m matrix.
Returns:
List[List[Any]]: The columns of the table.
Raises:
ValueError: if `table` is no n x m matrix.
"""
for i in range(len(table)):
if len(table[0]) != len(table[i]):
raise (ValueError("Table needs to be a n x m matrix."))
row_iter = range(len(table))
col_iter = range(len(table[0]))
return [[table[row][col] for row in row_iter] for col in col_iter]
|
0e475b1ac4c1910548c80283c22ebb3b1506a10d
| 65,568
|
def isUniqueWithSet (str):
"""Given a string, checks if the string has unique charachters"""
return len(set(str)) == len(str)
|
299e4eaf617fbec4b5aa55493a64abba51fd8354
| 65,571
|
import time
def wait_for(boolean_predicate, timeout_seconds=None, poll_period=0.25, exceptions_to_swallow=None):
"""
Waits a specified amount of time for the conditional predicate to be true.
:param boolean_predicate: A callable to continually evaluate until it returns a truthy value
:type boolean_predicate: callable
:param timeout_seconds: The timeout (in seconds)
:type timeout_seconds: int
:param poll_period: The frequency at which boolean_predicate should be evaluated
:type poll_period: float
:param exceptions_to_swallow: A set of acceptable exceptions that may be thrown by boolean_predicate
:type exceptions_to_swallow: Exception | list(Exception)
:return: True if boolean_predicate returned True before the timeout; False otherwise
:rtype: bool
"""
exceptions_to_swallow = exceptions_to_swallow or ()
timeout_seconds = timeout_seconds or float('inf')
end_time = time.time() + timeout_seconds
while time.time() < end_time:
try:
if boolean_predicate():
return True
except exceptions_to_swallow:
pass
time.sleep(poll_period)
return False
|
bbcb36e0d7fa7a73b2ac18f02b2e23cbbc354fb8
| 65,574
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.