content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
|---|---|---|
def make_key(obj):
"""Returns a key for this object."""
return id(obj)
|
b3380bfadba5c8e64d087573d5de7e299339e0d6
| 409,393
|
from typing import Tuple
def parse_notification(notification: dict) -> Tuple[str, str]:
"""valdiates notification payload
Args:
notification(dict): Pub/Sub Storage Notification
https://cloud.google.com/storage/docs/pubsub-notifications
Or Cloud Functions direct trigger
https://cloud.google.com/functions/docs/tutorials/storage
with notification schema
https://cloud.google.com/storage/docs/json_api/v1/objects#resource
Returns:
tuple of bucketId and objectId attributes
Raises:
KeyError if the input notification does not contain the expected
attributes.
"""
if notification.get("kind") == "storage#object":
# notification is GCS Object reosource from Cloud Functions trigger
# https://cloud.google.com/storage/docs/json_api/v1/objects#resource
return notification["bucket"], notification["name"]
if notification.get("attributes"):
# notification is Pub/Sub message.
try:
attributes = notification["attributes"]
return attributes["bucketId"], attributes["objectId"]
except KeyError:
raise RuntimeError(
"Issue with Pub/Sub message, did not contain expected"
f"attributes: 'bucketId' and 'objectId': {notification}"
) from KeyError
raise RuntimeError(
"Cloud Function recieved unexpected trigger:\n"
f"{notification}\n"
"This function only supports direct Cloud Functions"
"Background Triggers or Pub/Sub storage notificaitons"
"as described in the following links:\n"
"https://cloud.google.com/storage/docs/pubsub-notifications\n"
"https://cloud.google.com/functions/docs/tutorials/storage")
|
80e1c6c43047817dcf9e401347ed52609f3588c8
| 386,459
|
def get_at(doc, path, create_anyway=False):
"""Get the value, if any, of the document at the given path, optionally
mutating the document to create nested dictionaries as necessary.
"""
node = doc
last = len(path) - 1
if last == 0:
return doc.get(path[0])
for index, edge in enumerate(path):
if edge in node:
node = node[edge]
elif index == last or not create_anyway:
# the key doesn't exist, and this is the end of the path:
return None
else:
# create anyway will create any missing nodes:
node = node[edge] = {}
return node
|
ff3cf22a878b40e17a41d2ad4ec8b410b130b323
| 550,829
|
import random
def split_dataset(ids, validation_split=0.2):
"""Split dataset for training and validation."""
random.shuffle(ids)
split_index = int((1 - validation_split) * len(ids))
train_ids = ids[:split_index]
valid_ids = ids[split_index:]
print('Training count: %s' % len(train_ids))
print('Validation instance count: %s' % len(valid_ids))
return {'train_ids': train_ids,
'valid_ids': valid_ids}
|
e2bc9dee16885a442b889ac90cd50d4e64fdcb44
| 436,494
|
def anglicize1to19(n):
"""
Returns the English equiv of n.
Parameter: the integer to anglicize
Precondition: n in 1..19
"""
if n == 1:
return 'one'
elif n == 2:
return 'two'
elif n == 3:
return 'three'
elif n == 4:
return 'four'
elif n == 5:
return 'five'
elif n == 6:
return 'six'
elif n == 7:
return 'seven'
elif n == 8:
return 'eight'
elif n == 9:
return 'nine'
elif n == 10:
return 'ten'
elif n == 11:
return 'eleven'
elif n == 12:
return 'twelve'
elif n == 13:
return 'thirteen'
elif n == 14:
return 'fourteen'
elif n == 15:
return 'fifteen'
elif n == 16:
return 'sixteen'
elif n == 17:
return 'seventeen'
elif n == 18:
return 'eighteen'
# n = 19
return 'nineteen'
|
faa87aa7fa8db485da22715e7c754098babe5af2
| 623,159
|
def pi_using_float(precision):
"""Get value of pi via BBP formula to specified precision using floats.
See: https://en.wikipedia.org/wiki/Bailey%E2%80%93Borwein%E2%80%93Plouffe_formula
:param precision: Precision to retrieve.
:return: Pi value with specified precision.
"""
value = 0
for k in range(precision):
# Dot suffix converts value to a Float.
value += 1. / 16. ** k * (
4. / (8. * k + 1.) -
2. / (8. * k + 4.) -
1. / (8. * k + 5.) -
1. / (8. * k + 6.)
)
return value
|
a68126129c5fac24ed643af0bd43fcb0a6a65137
| 589,539
|
def join_distributions(a, b):
"""joins two distributions of absolute class counts by adding the values
of each key"""
assert a.keys() == b.keys()
return {k: a[k] + b[k] for k in a}
|
0d74e844d13f11cb29c17c71610881e3392c1f37
| 373,633
|
import importlib
def import_func(func):
"""
Imports a function from the autocnet package.
Parameters
----------
func : str
import path. For example, to import the place_points_in_overlap function,
this func can be called with: 'spatial.overlap.place_points_in_overlap'
Returns
-------
func : obj
The function object for use.
"""
if not func[0] == '.':
# Since this intentionally forces the package to be autocnet
# need the import path relative to the package name. Convenience
# for the caller to add the '.' so they don't get a cryptic
# ModuleImportError.
func = f'.{func}'
module, func = func.rsplit('.', 1)
module = importlib.import_module(module, package='autocnet')
func = getattr(module, func)
return func
|
f7830c3d26e02349c7f4a8f7ae0b790b98b0e7d4
| 625,777
|
def _deslugify(string):
"""Deslugify string."""
return string.replace("_", " ").title()
|
4238631831d99fc8016ba2f423443802d7cfc131
| 313,373
|
def job_runner(job):
"""
Run a job. Called in a Process pool.
"""
return job.run()
|
7c76225cc8d19e08231ba2973bee0ba7ff4827cc
| 589,802
|
def overlaps(mc1, mc2):
"""Compare two motifs and/or clusters to see if their location ranges overlap."""
return (mc1.start < mc2.end) and (mc1.end > mc2.start)
|
f87622c473d58172448ffa0bbe3d4fab99cc1fb7
| 65,517
|
def is_ip_in_subnet(ip, subnet):
"""
Return True if the IP is in the subnet, return False otherwise.
This implementation uses bitwise arithmetic and operators on
IPv4 subnets. Currently, this implementation does not accomodate
IPv6 address/subnet definitions. This should be added in the
future, before Tor core accomodates IPv6 addresses and subnets.
>>> is_ip_in_subnet('0.0.0.0', '0.0.0.0/8')
True
>>> is_ip_in_subnet('0.255.255.255', '0.0.0.0/8')
True
>>> is_ip_in_subnet('1.0.0.0', '0.0.0.0/8')
False
@type ip: C{string}
@param ip: The IP address to check for membership in the subnet.
@type subnet: C{string}
@param subnet: The subnet that the given IP address may or may not
be in.
@rtype: C{boolean}
@return: True if the IP address is in the subnet, false otherwise.
@see: U{http://www.webopedia.com/TERM/S/subnet_mask.html}
@see: U{http://wiki.python.org/moin/BitwiseOperators}
"""
# If the subnet is a wildcard, the IP will always be in the subnet
if (subnet == '*'):
return True
# If the subnet is the IP, the IP is in the subnet
if (subnet == ip):
return True
# If the IP doesn't match and no bits are provided,
# the IP is not in the subnet
if ('/' not in subnet):
return False
# Separate the base from the bits and convert the base to an int
base, bits = subnet.split('/')
# a.b.c.d becomes a*2^24 + b*2^16 + c*2^8 + d
a, b, c, d = base.split('.')
subnet_as_int = (int(a) << 24) + (int(b) << 16) + (int(c) << 8) + \
int(d)
# Example: if 8 bits are specified, then the mask is calculated by
# taking a 32-bit integer consisting of 1s and doing a bitwise shift
# such that only 8 1s are left at the start of the 32-bit integer
if (int(bits) == 0):
mask = 0
else:
mask = (~0 << (32 - int(bits)))
# Calculate the lower and upper bounds using the mask.
# For example, 255.255.128.0/16 should have lower bound 255.255.0.0
# and upper bound 255.255.255.255. 255.255.128.0/16 is the same as
# 11111111.11111111.10000000.00000000 with mask
# 11111111.11111111.00000000.00000000. Then using the bitwise and
# operator, the lower bound would be
# 11111111.11111111.00000000.00000000.
lower_bound = subnet_as_int & mask
# Similarly, ~mask would be 00000000.00000000.11111111.11111111,
# so ~mask & 0xFFFFFFFF = ~mask & 11111111.11111111.11111111.11111111,
# or 00000000.00000000.11111111.11111111. Then
# 11111111.11111111.10000000.00000000 | (~mask % 0xFFFFFFFF) is
# 11111111.11111111.11111111.11111111.
upper_bound = subnet_as_int | (~mask & 0xFFFFFFFF)
# Convert the given IP to an integer, as before
a, b, c, d = ip.split('.')
ip_as_int = (int(a) << 24) + (int(b) << 16) + (int(c) << 8) + int(d)
# Now we can see if the IP is in the subnet or not
if (ip_as_int >= lower_bound and ip_as_int <= upper_bound):
return True
else:
return False
|
2dfa9806ba88e2e35235a17a98d32cb8bf5a26fd
| 148,295
|
import asyncio
async def await_other_fixture(loop):
"""Await all other task but the current task."""
async def wait_for_tasks(current_task):
"""Wait for the tasks."""
tasks = asyncio.all_tasks() - {current_task}
await asyncio.gather(*tasks)
return wait_for_tasks
|
e124a45585ede8f13c0a191f10ba440ce4a63942
| 363,150
|
def ReleasePowerAssertion(io_lib, assertion_id):
"""Releases a power assertion.
Assertions are released with IOPMAssertionRelease, however if they are not,
assertions are automatically released when the process exits, dies or
crashes, i.e. a crashed process will not prevent idle sleep indefinitely.
Args:
io_lib: IOKit library from ConfigureIOKit()
assertion_id: c_uint, assertion identification number from
CreatePowerAssertion()
Returns:
0 if successful, stderr otherwise.
"""
try:
return io_lib.IOPMAssertionRelease(assertion_id)
except AttributeError:
return 'IOKit library returned an error.'
|
a96a10785571e28f488350f7068e8c72980a7018
| 193,276
|
def get_branch_condition(branch):
"""
Extract branchCondition_GuardedBranchTransition from specification, e.g.
"type.VALUE == "graphical"" -> "#graphical"
:param branch: "branches_Branch"
:return: extracted condition string
"""
branch_condition = branch.find("./branchCondition_GuardedBranchTransition").get(
"specification").split("==")[-1].replace('"', "").replace(" ", "")
branch_condition = '#{condition}#'.format(condition=branch_condition)
return branch_condition
|
6a8bb3dc05fce391e19a05abc01a3fca370bb33b
| 613,044
|
def format_gpus(connection, node_id: int):
"""
Returns GPUs formatted in Dash friendly manner.
:return: list of Dicts
"""
gpus = connection.get_gpus(node_id)
res = []
for gpu in gpus:
res.append({'label': f'{gpu}', 'value': gpu})
return res
|
34e646a1fe989d5552a197d45fad49616fcc1d0f
| 314,845
|
def fails_if_called(test, msg="This function must not be called.",
arguments=True):
"""
Return a new function (accepting any arguments)
that will call test.fail(msg) if it is called.
:keyword bool arguments: If set to ``False``, then we will
not accept any arguments. This can avoid
masking when we would expect a TypeError to be raised by
calling an instance method against a class.
"""
if not arguments:
return lambda: test.fail(msg)
return lambda *_args, **__kwargs: test.fail(msg)
|
a234b141a8e24a74a15a98929aee02a181721f5c
| 677,002
|
def _version_string(version):
"""Convert version from bytes to a string.
Args:
version: version in byte format.
Returns:
Three byte version string in hex format: 0x00 0x00 0x00
"""
return ' '.join('0x{:02x}'.format(ord(b)) for b in version)
|
cb17c4efb4d3ccab5b1d13ca5b90471841b1f9a4
| 621,020
|
def add_sets(*args):
"""
Add sets. The arguments need not be sets. Returns a set of unique
values. If the arguments include unhashable types, raises a TypeError.
"""
out = set()
for arg in args:
for thing in arg:
out.add(thing)
return out
|
32277f8ac5a7e23f074aeba88335227182ae828d
| 145,265
|
def MakeGray(rgbTuple, factor, maskColour):
"""
Make a pixel grayed-out. If the pixel matches the `maskColour`, it won't be
changed.
:param `rgbTuple`: a tuple representing a pixel colour;
:param `factor`: a graying-out factor;
:param `maskColour`: a colour mask.
"""
if rgbTuple != maskColour:
r, g, b = rgbTuple
return map(lambda x: int((230 - x) * factor) + x, (r, g, b))
else:
return rgbTuple
|
949e88735522cadc9d7060b630a6b4781c484782
| 649,618
|
def nCWRk(n, r):
"""Returns nCk(n+r-1, n-1) optimized for large n and small r."""
val = 1
for i in range(1, r+1):
val *= n + r - i
val //= i
return val
|
6ed2307192f8cff91c6fd345c73867170fc5ede5
| 359,102
|
import warnings
def preprocess_metrics(input_metrics, metrics_dict):
"""Preprocess the inputed metrics so that it maps
with the appropriate function in metrics_dict global variable.
input_metrics can have str or function. If it's a string
then it has to be a key from metrics_dict global variable dict
Returns a dictionnary with metric's name as key and
metric function as value
Parameters
----------
input_metrics: list
List of metrics to compute
metrics_dict: dict
Dictionnary to compare input_metrics with
Returns
-------
dict:
Dictionnary with metric's name as key and
metric function as value
Raises
------
TypeError:
input_metrics must be a list
"""
if type(input_metrics) != list:
raise TypeError('input_metrics must be a list')
fn_dict = {}
cnt_custom = 1
for fn in input_metrics:
if type(fn) == str:
if fn in metrics_dict:
fn_dict[fn] = metrics_dict[fn]
else:
warnings.warn('%s function not found' % fn)
else:
fn_dict['custom_'+str(cnt_custom)] = fn
cnt_custom += 1
if len(fn_dict.keys()) == 0:
raise ValueError('No valid metrics found')
return fn_dict
|
1c455f45d628ff63422bf0b22f14d34d5db4d7d0
| 533,512
|
import torch
def sort_batch_by_length(tensor: torch.Tensor, sequence_lengths: torch.Tensor): # pragma: no cover
"""
Sort a batch first tensor by some specified lengths.
Parameters
----------
tensor : torch.FloatTensor, required.
A batch first Pytorch tensor.
sequence_lengths : torch.LongTensor, required.
A tensor representing the lengths of some dimension of the tensor which
we want to sort by.
Returns
-------
sorted_tensor : torch.FloatTensor
The original tensor sorted along the batch dimension with respect to sequence_lengths.
sorted_sequence_lengths : torch.LongTensor
The original sequence_lengths sorted by decreasing size.
restoration_indices : torch.LongTensor
Indices into the sorted_tensor such that
``sorted_tensor.index_select(0, restoration_indices) == original_tensor``
permuation_index : torch.LongTensor
The indices used to sort the tensor. This is useful if you want to sort many
tensors using the same ordering.
"""
if not isinstance(tensor, torch.Tensor) or not isinstance(sequence_lengths, torch.Tensor):
raise ValueError("Both the tensor and sequence lengths must be torch.Tensors.")
sorted_sequence_lengths, permutation_index = sequence_lengths.sort(0, descending=True)
sorted_tensor = tensor.index_select(0, permutation_index)
index_range = sequence_lengths.new_tensor(torch.arange(0, len(sequence_lengths)))
# This is the equivalent of zipping with index, sorting by the original
# sequence lengths and returning the now sorted indices.
_, reverse_mapping = permutation_index.sort(0, descending=False)
restoration_indices = index_range.index_select(0, reverse_mapping)
return sorted_tensor, sorted_sequence_lengths, restoration_indices, permutation_index
|
d91b3c52fffeda87383a6ef9f8748259a6cf9f34
| 320,574
|
def remove_sql_comments(sql):
"""Strip SQL comments starting with --"""
return ' \n'.join(map(lambda x: x.split('--')[0], sql.split('\n')))
|
e3849a59495485f2a014fbabf53bcad83639961d
| 256,759
|
def build_pwsh_test_command() -> str:
""" Build command for powershell test
Returns:
str: powershell test command
"""
command = "Invoke-Pester"
# Return exit code when finished
command += ' -Configuration \'@{Run=@{Exit=$true}; Output=@{Verbosity="Detailed"}}\''
return f"pwsh -Command {command}"
|
9a2ee76785891ebbf4c906b4be630e7004a62eac
| 460,973
|
import re
def remove_hanging_parenthesis(sample_string):
"""
Removes parenthesis at the end of strings.
Args:
sample_string (str): Input string
Returns:
str
"""
return re.sub(r"[^.*]\($", "", sample_string).strip()
|
23d4deb582973e209c5bb04f9b573c93997c7388
| 630,675
|
def _captalize(arg1):
"""Returns the string with an initial capital"""
return str(arg1).title()
|
eab47f235edc16ae47840d35c15422f45686822a
| 321,624
|
def trim_silence(audio, noise_threshold=150):
""" Removes the silence at the beginning and end of the passed audio data
:param audio: numpy array of audio
:param noise_threshold: the maximum amount of noise that is considered silence
:return: a trimmed numpy array
"""
start = None
end = None
for idx, point in enumerate(audio):
if abs(point) > noise_threshold:
start = idx
break
# Reverse the array for trimming the end
for idx, point in enumerate(audio[::-1]):
if abs(point) > noise_threshold:
end = len(audio) - idx
break
return audio[start:end]
|
cee7e4db02c9ed9074e4b023d13f78f47a25fd2a
| 231,731
|
def is_cn_char(ch):
""" Test if a char is a Chinese character. """
return ch >= u'\u4e00' and ch <= u'\u9fa5'
|
63d26e6d2ff4b446f2e80cb80588bce214b648a6
| 468,612
|
def _lcs_length(x, y):
"""
Computes the length of the longest common subsequence (lcs) between two
strings. The implementation below uses a DP programming algorithm and runs
in O(nm) time where n = len(x) and m = len(y).
Source: http://www.algorithmist.com/index.php/Longest_Common_Subsequence
>>> _lcs_length('ABCDE', 'CD')
2
>>> _lcs_length('the police killed the gunman'.split(), 'gunman police killed'.split())
2
:param x: sequence of words
:param y: sequence of words
:return: Length of LCS between x and y
"""
n, m = len(x), len(y)
len_table = {}
for i in range(n + 1):
for j in range(m + 1):
if i == 0 or j == 0:
len_table[i, j] = 0
elif x[i - 1] == y[j - 1]:
len_table[i, j] = len_table[i - 1, j - 1] + 1
else:
len_table[i, j] = max(len_table[i - 1, j], len_table[i, j - 1])
return len_table[n, m]
|
808ba58208b72bef91443e7466d1497b82d545ee
| 323,779
|
def compare_simple(x, y, context):
"""
Returns a very simple textual difference between the two supplied objects.
"""
if x != y:
return context.label('x', repr(x)) + ' != ' + context.label('y', repr(y))
|
467009d5f232e9ab8c40bddc750fcba51b80cb00
| 359,440
|
def permute_all_atoms(labels, coords, permutation):
"""
labels - atom labels
coords - a set of coordinates
permuation - a permutation of atoms
Returns the permuted labels and coordinates
"""
new_coords = coords[:]
new_labels = labels[:]
for i in range(len(permutation)):
new_coords[permutation[i]] = coords[i]
new_labels[permutation[i]] = labels[i]
return new_labels, new_coords
|
ba8aa571afd9039725347a0b5a04885dfafb02b3
| 655,836
|
def ts_candle_from_ts(ts, timeframe_int):
"""Return candle timestamp from a timestamp and a given timeframe (integer)"""
return((ts // timeframe_int) * timeframe_int)
|
351636aca4ac03f6733a54811620580d31e18599
| 433,623
|
def validateFitMohrCoulomb(value):
"""
Validate fit to Mohr-Coulomb yield surface.
"""
if not value in ["inscribed", "middle", "circumscribed"]:
raise ValueError("Unknown fit to Mohr-Coulomb yield surface.")
return value
|
af7f59454c35caa33aa98f4457593d82c7186278
| 543,061
|
def get_ls_user_line(user: dict) -> str:
"""Get a string representing a user in the User Ls command.
:param user: User data.
:returns: User string.
"""
line = user["id"] + " | "
username = user["username"]
c = len(username)
if c <= 20:
username = username + (" " * (20 - c))
else:
username = f"{username[:17]}..."
admin = "Yes" if user["admin"] else "No "
enabled = "Yes" if user["enabled"] else "No "
line += username + " | "
line += admin + (" " * 11) + "| "
line += enabled
return line
|
cfe34e988fa53182855406473214e4e59a75ffc8
| 209,706
|
import math
def borders_ms_to_frames(borders, rate):
"""
Function to convert a list of 2-item lists or tuples from milliseconds to
frames.
Parameters
----------
borders : list
a list of 2-item lists or tuples, each item of which is a number of
milliseconds
rate : float
frames per millisecond (fps / 1000)
Returns
-------
frame_borders : list
a list of 2-item lists or tuples, each item of which is a number of
frames
"""
frame_borders = []
for start, stop in borders:
frame_borders.append((math.floor(start * rate), math.ceil(stop *
rate)))
return frame_borders
|
0d9faf19be3da2ec4b40c71b13826a6eae0cdc02
| 446,683
|
import time
def strftime_utc(epoch):
"""Convert seconds from epoch into UTC time string."""
return time.strftime("%a, %d %b %Y %H:%M:%S+0000", time.gmtime(epoch))
|
acc40d25c482c6ba3f3ef6fe684e225e8e9754f3
| 152,455
|
def bps_mbps(val: float) -> float:
"""
Converts bits per second (bps) into megabits per second (mbps).
Args:
val (float): The value in bits per second to convert.
Returns:
float: Returns val in megabits per second.
Examples:
>>> bps_mbps(1000000)
1.0
>>> bps_mbps(1129000)
1.13
"""
return round(float(val) / 1000000, 2)
|
13520e00c393a647ccdc2ba0021445970e169fd2
| 70,921
|
from typing import Callable
import asyncio
def add_async_job(target: Callable, *args):
"""Add a callable to the event loop."""
loop = asyncio.get_event_loop()
if asyncio.iscoroutine(target):
task = loop.create_task(target)
elif asyncio.iscoroutinefunction(target):
task = loop.create_task(target(*args))
else:
task = loop.run_in_executor(None, target, *args)
return task
|
590bce904241c598e742d6c7370ebf2563aba5f1
| 690,451
|
def index_dependent_values(self, chromosome):
"""Test of the GA's ability to improve fitness when the value is index-dependent.
If a gene is equal to its index in the chromosome + 1, fitness is incremented.
"""
# Overall fitness value
fitness = 0
for i, gene in enumerate(chromosome):
# Increment fitness is the gene's value is i+1
if gene.value == i+1:
fitness += 1
return fitness
|
20df3cc47ef566f34fbdba4eca84a85795a02408
| 662,450
|
def check_type_and_size_of_param_list(param_list, expected_length):
"""
Ensure that param_list is a list with the expected length. Raises a helpful
ValueError if this is not the case.
"""
try:
assert isinstance(param_list, list)
assert len(param_list) == expected_length
except AssertionError:
msg = "param_list must be a list containing {} elements."
raise ValueError(msg.format(expected_length))
return None
|
0a4fdb3271dc9fdd0a3258f56e248f0f1a4e439b
| 311,754
|
import torch
def get_obs_y_dict(select_pairs, x_a, x_s):
"""
Input
-------
select_pairs: pairs of (alpha, beta) selected
x_a: array holding avoidance count for all dogs & all trials, example for 30 dogs & 25 trials, shaped (30, 25)
x_s: array holding shock count for all dogs & all trials, example for 30 dogs & 25 trials, shaped (30, 25)
Output
-------
Outputs a dictionary with tuple of alpha, beta as key & observerd values of y corresponding to alpha, beta in key
"""
y_dict = {}
for alpha, beta in select_pairs:# pair of alpha, beta
y_dict[(alpha, beta)] = torch.exp(alpha*x_a + beta* x_s)
return y_dict
|
a0e7b86cb010ca00ba9dd53befdcd64fa7abad01
| 129,582
|
import multiprocessing
import functools
def in_separate_process(func):
"""Decorator that runs a function in a separate process, to force garbage collection
collection upon termination of that process, limiting long-term memory usage.
Parameters
----------
func : function
Function to run in a separate process. Per multiprocessing spec,
must be declared in global scope.
Returns
-------
function
wrapped function
See Also
--------
multiprocessing
Python multiprocessing module, for writing parallel programs
"""
@functools.wraps(func)
def new_func(*args, **kwargs):
recv_pipe, send_pipe = multiprocessing.Pipe(False)
def temp_func(conn, args, kwargs):
result = func(*args, **kwargs)
conn.send(result)
conn.close()
proc = multiprocessing.Process(target=temp_func, args=(send_pipe, args, kwargs))
proc.start()
proc.join()
result = recv_pipe.recv()
recv_pipe.close()
return result
return new_func
|
ccdbba74dc6637928d53d5aad19a2a274f8c0ae4
| 139,062
|
from typing import List
def ensure_size(arr: List, default, size: int) -> List:
"""Ensures the size of an array by using the default
when an element does not exist.
"""
return [arr[x] if x < len(arr) else default for x in range(size)]
|
8cc6d4274f156e0ebc8615f37f46200075feba1b
| 382,024
|
import base64
def _dbase64_encode(b):
"""Internal helper to encode bytes using our base64 variant.
This is like urlsafe base64 encode but strips the trailing '='
padding. Also, it returns a string, not a bytes object.
"""
bb = base64.urlsafe_b64encode(b)
ss = str(bb.decode('ascii'))
s = ss.rstrip('=') # Remove padding.
return s
|
b7c35f5708e5607f4edbf126733c608f6969a579
| 354,870
|
import string
def num_Ponctuation(Instr):
"""
this function return the number of ponctuation characters in a String
"""
nonpuc = [c for c in Instr if c not in string.punctuation]
nonpuc = "".join(nonpuc)
if "." in nonpuc:
nonpuc = nonpuc + "." # this line just to ignore the period for one time
return len(Instr) - len(nonpuc)
|
b2cfea49828d483fb4b0033fb52cdfedfe9e8d7b
| 549,625
|
def create_bool_mask(mask, label, ignore_label):
"""Creates a boolean mask for plant region or individual class.
Args:
mask: Array representing a mask with integer labels
ignore_label: Integer, pixel value of label to exclude.
label: String, which label to return. 'plant_region' returns mask for whole plant.
Returns:
Array of type numpy.bool_
"""
if label == 'plant_region':
bool_mask = (mask > 0) if ignore_label is None else (mask > 0) & (mask != ignore_label)
else:
bool_mask = (mask == ignore_label)
return bool_mask
|
5b8bafba20364e373b4699bd285d638568c8c4b0
| 498,346
|
def create_paper(paper_id, paper_title, paper_abstract, paper_year, paper_citations):
"""Initialize a paper."""
paper = {
"id": paper_id,
"title": paper_title,
"abstract": paper_abstract,
"year": paper_year,
"is_influential": False,
"citations": paper_citations,
"cluster": 0,
"references": [],
"authors": []
}
return paper
|
d1094c23549dc6d2108c0b08e79b8ad60c8343e6
| 167,014
|
def isnumber(*args):
"""Checks if value is an integer, long integer or float.
NOTE: Treats booleans as numbers, where True=1 and False=0.
"""
return all(map(lambda c: isinstance(c, int) or isinstance(c, float), args))
|
6ee42132a517c0d2d7c3b745a2faa2c18693c02f
| 143,340
|
import ssl
import aiohttp
def get_os_session(*, os_cacert, insecure, log):
"""
Returns a secure - or insecure - HTTP session depending
on configuration settings.
Works for both HTTP and HTTPS endpoints.
"""
if os_cacert:
ssl_context = ssl.create_default_context(cafile=os_cacert)
return aiohttp.ClientSession(connector=aiohttp.TCPConnector(ssl_context=ssl_context))
if insecure:
log.warning('Insecure connection to OpenStack')
return aiohttp.ClientSession(connector=aiohttp.TCPConnector(verify_ssl=False))
log.error('No cacert provided and insecure parameter not specified')
|
50cb11785a20d7828fa72f474f8a326064503e90
| 98,195
|
def reverse_word(word):
"""
description: reverse the characters in a word
input:
'word': the word to be reversed
output:
the reversed word
"""
i = len(word)
reversed_word = ""
while i > 0:
reversed_word += word[i-1]
i = i-1
return reversed_word
|
4bc72fc43db0f07766c035f79bd9815d2d6502db
| 203,074
|
import itertools
def all_segments(N):
"""Return (start, end) pairs of indexes that form segments of tour of length N."""
return (
(start, end)
for (start, end) in itertools.combinations(range(N), 2)
if end > start+1
)
|
993c93a182cf2fc0be4d25962144842c0c1db181
| 142,140
|
def shash(s):
"""Get the shallow hash of a statement."""
return s.get_hash(shallow=True)
|
3e140937078fac8657d3bd51b12635b07545e1e2
| 246,673
|
def create_element(number,etype):
"""
Create an element:
Parameters
----------
number : int
Number of element
etype : str
Element type
::
# Example
create_element(1, "PLANE182") # -> ET,1,PLANE182
"""
_el = "ET,%g,%s"%(number,etype)
return _el
|
ea9b1ddcdd8f6a33ea6d2f7a1f3cadb1b29219dd
| 354,007
|
import math
def calculate_zoom(fov, height=1.0):
"""Calculates the zoom (distance) from the camera
with the specified FOV and height of image.
:param float fov: The FOV to use.
:param float height: The height of the image at the
desired distance.
:rtype: A float representing the zoom (distance) from the camera for the
desired height at the specified FOV.
:raise ZeroDivisionError: Raised if the fov is
0.0.
"""
return float(height) / math.tan(fov / 2.0)
|
ddd3fa00a0855c9900ee3ea33bb65379f182818b
| 572,062
|
def get_nested_value_by_path(nested_dict, path, default=None, mode='mix'):
"""
Get a nested value of nested dict by path
:param nested_dict: nested dict object
{
"club": [
{
"manager": {
"last_name": "Lionel",
"first_name": "Messi"
}
}
]
}
:param path: path to access the nested dict value
"club/0/manager/first_name"
:param default: default value
:param mode: ['json', 'list', 'mix']
:return: value of dict
"Messi"
"""
current_nested_dict = nested_dict
try:
keys = path.split('/')
for key in keys:
if mode == 'json':
current_nested_dict = current_nested_dict.get(key)
elif mode == 'list':
current_nested_dict = current_nested_dict[int(key)]
elif mode == 'mix':
try:
current_nested_dict = current_nested_dict[int(key)]
except:
current_nested_dict = current_nested_dict.get(key)
except Exception:
current_nested_dict = default
return current_nested_dict or default
|
ed7f94f516690737188d90d086835de0a914bf7e
| 215,323
|
def decode(s):
"""
Run length decoding
(str) -> str
>>> decode('1B5W1B4W')
'BWWWWWBWWWW'
"""
ret = ''
sizeStr = ''
for ch in s:
if ch.isalpha():
if sizeStr:
ret += ch * int(sizeStr)
sizeStr = ''
else:
sizeStr += ch
return ret
|
6f092db0a6ab5058abe2165a6adb227b7865c097
| 592,840
|
def choose_condition(data, events, condition):
"""Filters out a specific condition from the data.
:param data: data from which to extract conditions from
:type data: numpy array
:param events: event data of shape [trials x 4]
:type events: numpy array
:param condition: Condition to be filtered out of the data.
Conditions that exist are:
0 <=> Pronounced Speech
1 <=> Inner Speech
2 <=> Visualized Condition
:type condition: string of integer specifying the condition
:return: eeg data and event data
:rtype: tuple of two numpy arrays
"""
# convert condition to the right format
if type(condition)==str:
condition = sorted(condition.replace(' ', '').lower())
if condition == sorted('pronouncedspeech'):
condition = 0
elif condition == sorted('innerspeech'):
condition = 1
elif condition == sorted('visualizedcondition'):
condition = 2
else:
raise ValueError("The condition-string you provided is wrong!")
# filter out every sample with the right condition
keep_pos = events[:, 2] == condition
data = data[keep_pos]
events = events[keep_pos]
return data, events
|
8fa8cc12a11561c3a69a067b1f302dd1ccaf1b31
| 661,747
|
def get_columns(c, table, verbose=False):
"""Get all columns in a specified table."""
head = c.execute("select * from " + table)
names = list(map(lambda x: x[0], head.description))
if verbose:
print(names)
return(names)
|
cdf0166b1057224cb1ba308b9f5f10b88d688891
| 126,220
|
import socket
def createTestSocket(test, addressFamily, socketType):
"""
Create a socket for the duration of the given test.
@param test: the test to add cleanup to.
@param addressFamily: an C{AF_*} constant
@param socketType: a C{SOCK_*} constant.
@return: a socket object.
"""
skt = socket.socket(addressFamily, socketType)
test.addCleanup(skt.close)
return skt
|
3b8e9d63e29151adb1bd2d2c4e48d07cb1bd4e6a
| 663,796
|
import pickle
def read_pic(dir, file):
"""
Function that reads a pickle variable file and returns it.
Arguments:
dir -- directory that contains the file
file -- name of the pickle file
Returns:
x -- pickle variable contained in the file
"""
f = open(dir + '/' + file, 'rb')
x = pickle.load(f)
f.close()
return x
|
18ab2f6764b86e919258829cace0d693077f9a41
| 215,295
|
def decode_extra_length(bits, length):
"""Decode extra bits for a match length symbol."""
if length == 285:
return 258
extra = (length - 257) / 4 - 1
length = length - 254
if extra > 0:
ebits = bits.read(extra)
length = 2**(extra+2) + 3 + (((length + 1) % 4) * (2**extra)) + ebits
return length
|
64bc02f4c9ada7eb1fa38b58765832129470b15b
| 496,550
|
import time
def seconds_from_now_to_hhmm(seconds_from_now: int) -> str:
"""
Takes a time in seconds and returns the time at which
those seconds will elapse in the form of hh:mm
"""
now_hhmm = time.strftime("%H:%M")
now_hrs, now_mins = map(int,now_hhmm.split(':'))
now_secs = now_hrs*3600 + now_mins*60
secs = (seconds_from_now + now_secs) % (24*60*60)
mins = secs//60
hrs = str(mins//60)
mins = str(mins % 60)
if len(hrs)==1:
hrs = "0" + hrs
if len(mins)==1:
mins = "0" + mins
hhmm = f"{hrs}:{mins}"
return hhmm
|
9a3749403f2b46d20015f0ebf4361ee3f1153d5f
| 377,162
|
def edge_boundary(G, nbunch1, nbunch2=None, data=False, keys=False,
default=None):
"""Returns the edge boundary of `nbunch1`.
The *edge boundary* of a set *S* with respect to a set *T* is the
set of edges (*u*, *v*) such that *u* is in *S* and *v* is in *T*.
If *T* is not specified, it is assumed to be the set of all nodes
not in *S*.
Parameters
----------
G : NetworkX graph
nbunch1 : iterable
Iterable of nodes in the graph representing the set of nodes
whose edge boundary will be returned. (This is the set *S* from
the definition above.)
nbunch2 : iterable
Iterable of nodes representing the target (or "exterior") set of
nodes. (This is the set *T* from the definition above.) If not
specified, this is assumed to be the set of all nodes in `G`
not in `nbunch1`.
keys : bool
This parameter has the same meaning as in
:meth:`MultiGraph.edges`.
data : bool or object
This parameter has the same meaning as in
:meth:`MultiGraph.edges`.
default : object
This parameter has the same meaning as in
:meth:`MultiGraph.edges`.
Returns
-------
iterator
An iterator over the edges in the boundary of `nbunch1` with
respect to `nbunch2`. If `keys`, `data`, or `default`
are specified and `G` is a multigraph, then edges are returned
with keys and/or data, as in :meth:`MultiGraph.edges`.
Notes
-----
Any element of `nbunch` that is not in the graph `G` will be
ignored.
`nbunch1` and `nbunch2` are usually meant to be disjoint, but in
the interest of speed and generality, that is not required here.
"""
nset1 = {v for v in G if v in nbunch1}
# Here we create an iterator over edges incident to nodes in the set
# `nset1`. The `Graph.edges()` method does not provide a guarantee
# on the orientation of the edges, so our algorithm below must
# handle the case in which exactly one orientation, either (u, v) or
# (v, u), appears in this iterable.
if G.is_multigraph():
edges = G.edges(nset1, data=data, keys=keys, default=default)
else:
edges = G.edges(nset1, data=data, default=default)
# If `nbunch2` is not provided, then it is assumed to be the set
# complement of `nbunch1`. For the sake of efficiency, this is
# implemented by using the `not in` operator, instead of by creating
# an additional set and using the `in` operator.
if nbunch2 is None:
return (e for e in edges if (e[0] in nset1) ^ (e[1] in nset1))
nset2 = set(nbunch2)
return (e for e in edges
if (e[0] in nset1 and e[1] in nset2)
or (e[1] in nset1 and e[0] in nset2))
|
28af02017c9e166089f167a576cca59779da06ce
| 432,919
|
def parse_row(row):
"""Parse a row into a nice dictionary."""
old_match = dict()
old_match["year"] = row[0]
old_match["eventshort"] = row[1]
old_match["complevel"] = row[2]
old_match["matchnumber"] = row[3]
old_match["red1"] = row[4]
old_match["red2"] = row[5]
old_match["red3"] = row[6]
old_match["blue1"] = row[7]
old_match["blue2"] = row[8]
old_match["blue3"] = row[9]
old_match["redscore"] = row[10]
old_match["bluescore"] = row[11]
return old_match
|
9a05599242679c22d5e0743c6851586e7b415a2c
| 477,511
|
def format_topic_code(topic_code: str) -> str:
"""Takes a topic code string and formats it as
human readable text.
"""
return str.title(topic_code.replace('_', ' '))
|
30936e9b134f6b3c30b7daaf717ca0a8c732d587
| 257,579
|
def _process_results(results, keep_N, correlation_threshold=0, thresholds=None):
"""
Given the results of the CPA, output an array of arrays sorted by most likely candidate first and an array of
likely incorrect bytes
"""
possible_keys = []
likely_wrong = set()
# plot(results[0])
for index, r in enumerate(results):
r = abs(r)
rMax = r.max(1)
# did not reach the threshold, it's likely that something went wrong
# (for example, the key used in a previous step was incorrect)
threshold = thresholds[index] if thresholds is not None else correlation_threshold
print("\t\tbyte", index, ": correlation max", rMax.max(), "mean", rMax.mean(), "thresh", threshold)
if rMax.max() < threshold:
likely_wrong.add(index)
possible_keys.append(rMax.argsort()[::-1][:keep_N])
return possible_keys, likely_wrong
|
20efa02fddd05995fb395f8a059ebb4849db6c32
| 80,139
|
import logging
def get_mac_from_port(port, neutronclient):
"""Get mac address from port, with tenacity due to openstack async.
:param port: neutron port
:type port: neutron port
:param neutronclient: Authenticated neutronclient
:type neutronclient: neutronclient.Client object
:returns: mac address
:rtype: string
"""
logging.info("Trying to get mac address from port:"
"{}".format(port['port']['id']))
refresh_port = neutronclient.show_port(port['port']['id'])
return refresh_port['port']['mac_address']
|
039feeb1a8950a50da2b1ccd9397c17c8f5849b7
| 481,900
|
def is_collection(collection):
"""Return ``True`` if passed object is Collection and ``False`` otherwise."""
return type(collection).__name__ == 'Collection'
|
6c8c613a48106d1aae81010a536cc5165ebe7b33
| 460,027
|
def _gcd(num1: int, num2: int) -> int:
"""Get the GCD of the given two numbers"""
while num1 % num2 != 0:
old_num1 = num1
old_num2 = num2
num1 = old_num2
num2 = old_num1 % old_num2
return num2
|
1b6df1855fe018e7f7ae2e6638a1d0c69a8970c6
| 203,941
|
def get_agegroups_from_breakpoints(breakpoints):
"""
This function consolidates get_strat_from_breakpoints from Romain's age_strat module and define_age_structure from
James' model.py method into one function that can return either a dictionary or a list for the model stratification.
(One reason for using this approach rather than Romain's is that the lists need to be ordered for model.py.)
Args:
breakpoints: The age group cut-offs.
Returns:
agegroups: List of the strings describing the age groups only.
agegroups_dict: List with strings of agegroups as keys with values being
lists of the lower and upper age cut-off for that age group.
"""
# initialise
agegroups = []
agegroups_dict = {}
if len(breakpoints) > 0:
for i in range(len(breakpoints)):
# the first age-group
if i == 0:
agegroup_string = '_age0to' + str(int(breakpoints[i]))
agegroups_dict[agegroup_string] = [0., float(breakpoints[i])]
# middle age-groups
else:
agegroup_string = '_age' + str(int(breakpoints[i - 1])) + 'to' + str(int(breakpoints[i]))
agegroups_dict[agegroup_string] = [float(breakpoints[i - 1]),
float(breakpoints[i])]
agegroups += [agegroup_string]
# last age-group
agegroup_string = '_age' + str(int(breakpoints[-1])) + 'up'
agegroups_dict[agegroup_string] = [float(breakpoints[-1]),
float('inf')]
agegroups += [agegroup_string]
# if no age groups
else:
# list consisting of one empty string required for methods that iterate over strains
agegroups += ['']
return agegroups, agegroups_dict
|
c637d9d86582fb06dc9920fa362126b29353804a
| 432,092
|
def _sigma_from_hybrid(psfc, hya, hyb, p0=100000.):
"""Calculate sigma at the hybrid levels."""
# sig(k) = hya(k) * p0 / psfc + hyb(k)
# This will be in Pa
return hya * p0 / psfc + hyb
|
61a7751ba4195d9e3c73369d173dd947785ce26d
| 541,875
|
def _strip_after_pound(string):
"""
Treat "#" as a comment character and remove everything after.
"""
msg = ""
for char in string:
if char != "#":
msg = msg + char
else:
return msg
return msg
|
6a8052ce2e78283b21af9eabb40a2f695a0eb87b
| 285,744
|
import json
def analyse_line_dataturk_format(line):
"""
scans a json file line and returns the file name and label
the format is specifically for the one returned by the dataturks labelling
platform
Args:
line: string read from json file
Returns:
file_name: sting with name of the file in the json
label: string with the identified value of label identified
"""
line_dict = json.loads(line)
file_name = line_dict["content"]
file_name = "___".join(file_name.split("___")[1:])
label = None
if "annotation" in line_dict.keys():
if line_dict["annotation"] is not None:
if "labels" in line_dict["annotation"].keys():
labels = line_dict["annotation"]["labels"]
if len(labels) > 1:
print("more than one label contained in file {}".format(file_name))
#raise ValueError("more than one label contained in file {}".format(file_name))
elif len(labels) == 0:
print("No label contained in file {}".format(file_name))
#raise ValueError("No label contained in file {}".format(file_name))
else:
label = labels[0]
elif "label" in line_dict["annotation"].keys():
labels = line_dict["annotation"]["label"]
if len(labels) > 1:
print("more than one label contained in file {}".format(file_name))
#raise ValueError("more than one label contained in file {}".format(file_name))
elif len(labels) == 0:
print("No label contained in file {}".format(file_name))
#raise ValueError("No label contained in file {}".format(file_name))
label = labels[0]
return(file_name, label)
|
1154bfa98608f2fa8f1e30a7dad781e8e489453c
| 106,774
|
def artifact_version_unsupported(artifact_version, supported_major_version):
"""artifact_version_unsupported message"""
return "The decisioning artifact version ({}) is not supported. " \
"This library is compatible with this major version: " \
"{}".format(artifact_version, supported_major_version)
|
12228c330dbee93d8ffb1ab7e594dfce8dc0b83c
| 610,556
|
def basic_variant_dict(request):
"""Return a variant dict with the required information"""
variant = {
'CHROM': '1',
'ID': '.',
'POS': '10',
'REF': 'A',
'ALT': 'C',
'QUAL': '100',
'FILTER': 'PASS',
'FORMAT': 'GT',
'INFO': '.',
'info_dict': {},
}
return variant
|
ff4c15e1c5308176951812f2e259332d59ce5dc8
| 574,309
|
def create_short2element(elements):
"""
Returns a map of case normalized short name to element description.
Format of the element description is defined in the instruction.
"""
return dict( (short.casefold(),f"{long} ({short})") for short,long in elements.items() )
|
d9b04f01660e972e398867a5485bddeaef02517b
| 491,594
|
def path_dictionary(path_dict):
"""Creates a dictionary listing all possible types of cells"""
# Cells which are dead ends.
path_dict[(False, False, True, False)] = 0
path_dict[(False, False, False, True)] = 0
path_dict[(True, False, False, False)] = 0
path_dict[(False, True, False, False)] = 0
# Cells which have one direction to traverse.
path_dict[(False, True, True, False)] = 1
path_dict[(False, False, True, True)] = 1
path_dict[(True, False, False, True)] = 1
path_dict[(True, True, False, False)] = 1
path_dict[(True, False, True, False)] = 1
path_dict[(False, True, False, True)] = 1
# Cells which have two directions to traverse.
path_dict[(True, False, True, True)] = 2
path_dict[(True, True, True, False)] = 2
path_dict[(False, True, True, True)] = 2
path_dict[(True, True, False, True)] = 2
# Cells which have three directions to traverse.
path_dict[(True, True, True, True)] = 3
return(path_dict)
|
1a3b815717e18f4261afc356b7fef8047d50721b
| 497,585
|
import re
def parse_bytes(bytestr):
"""Parse a string indicating a byte quantity into an integer., example format: 536.71KiB, 31.5 mb, etc...
modified from original source at youtube-dl.common"""
try:
# if input value is int return it as it is
if isinstance(bytestr, int):
return bytestr
# remove spaces from string
bytestr = bytestr.replace(' ', '').lower()
matchobj = re.match(r'(?i)^(\d+(?:\.\d+)?)([kMGTPEZY]\S*)?$', bytestr)
if matchobj is None:
return 0
number = float(matchobj.group(1))
unit = matchobj.group(2).lower()[0:1] if matchobj.group(2) else ''
multiplier = 1024.0 ** 'bkmgtpezy'.index(unit)
return int(round(number * multiplier))
except:
return 0
|
c82752dc58e43e95e31783533690ef3738dd9b70
| 111,658
|
def explicit_no_context(arg):
"""Expected explicit_no_context __doc__"""
return "explicit_no_context - Expected result: %s" % arg
|
d9e5abd375768adf8c53a4f9b11f9f2d83fd39de
| 437,550
|
import re
def inline_whitespace(text: str) -> str:
"""Collapse multiple spaces or tabs within a string into one space
character.
Args:
text: The input string.
Returns:
Text with collapsed spaces and tabs.
"""
return re.sub(r"[ \t]+", " ", text)
|
03799aa3a1f2f0b415687223ac87d812e2b7d1d3
| 445,313
|
def removeGame(gtitle: str) -> str:
"""Return a query to remove a given game from the database."""
return (f"DELETE FROM game "
f"WHERE title='{gtitle}';"
)
|
589096681b70232cbd199f7b2f5bd4685418888d
| 101,671
|
def log(rv):
"""
Returns the natural logarithm of a random variable
"""
return rv.log()
|
ff6db73f2bca02d9d298578f4c23166827a33f07
| 531,553
|
def _process_custom_formatters(formatters, columns):
"""Re-keys a dict of custom formatters to only use column indices.
Args:
formatters: A dict of formatters, keyed by column index or name.
columns: The list of columns names.
Returns:
A dict of formatters keyed only by column index.
"""
if not formatters:
return {}
# Check that all keys provided are valid column names or indices.
# Warn if something doesn't check out.
column_set = set(columns)
for col in formatters:
if isinstance(col, int) and col >= len(columns):
print(('Warning: Custom formatter column index %d exceeds total number '
'of columns (%d)') % (col, len(columns)))
if not isinstance(col, int) and col not in column_set:
print(('Warning: Custom formatter column name %s not present in column '
'list') % col)
# Separate out the custom formatters that use indices.
output_formatters = {
k: v for k, v in formatters.items() if isinstance(k, int)
}
for i, name in enumerate(columns):
# Attempt to find a formatter based on column name.
if name in formatters:
if i in output_formatters:
print(('Warning: Custom formatter for column index %d present, '
'ignoring formatter for column name %s') % (i, name))
else:
output_formatters[i] = formatters[name]
return output_formatters
|
266fc3e8d4f78e6c65d18bdc87591c2e5b0c688b
| 463,405
|
def _format_counters(counters, indent='\t'):
"""Convert a map from group -> counter name -> amount to a message
similar to that printed by the Hadoop binary, with no trailing newline.
"""
num_counters = sum(len(counter_to_amount)
for group, counter_to_amount in counters.items())
message = 'Counters: %d' % num_counters
for group, group_counters in sorted(counters.items()):
if group_counters:
message += '\n%s%s' % (indent, group)
for counter, amount in sorted(group_counters.items()):
message += '\n%s%s%s=%d' % (indent, indent, counter, amount)
return message
|
7d763a6ddd92e86e71628d3fa8a82e117077e51e
| 526,348
|
def pep440_version(date, ref, dirty=False):
"""Build a PEP440-compliant version number from the passed information."""
return "{date}+g{ref}{dirty}".format(
date=date.strftime("%Y%m%d"), ref=ref, dirty=".dirty" if dirty else ""
)
|
984c51b60f13ff3b8f86fb0ad5e29458ce81f337
| 426,606
|
def format_notes(section):
"""Format the "Notes" section."""
assert len(section) == 1
return '!!! note "Notes"\n {0}'.format(section[0].strip())
|
639fce05e775ad76bb906cba8ba902fc4f6f5ac8
| 98,021
|
from typing import OrderedDict
def _deep_convert_dict(layer):
"""Helper function to convert dictionary back from OrderedDict"""
to_ret = layer
if isinstance(layer, OrderedDict):
to_ret = dict(layer)
try:
for key, value in list(to_ret.items()):
to_ret[key] = _deep_convert_dict(value)
except AttributeError:
pass
return to_ret
|
0404aaf7278fbc8a118602e4bd71022d6ddb467c
| 272,983
|
def distinct_brightness(dictionary):
"""Given the brightness dictionary returns the dictionary that has
no items with the same brightness."""
distinct, unique_values = {}, set()
for char, brightness in dictionary.items():
if brightness not in unique_values:
distinct[char] = brightness
unique_values.add(brightness)
return distinct
|
7f7bb5dba9bab113e15cc4f90ddd4dfda7bb5f01
| 12,826
|
def convert_to_image_file_format(format_str):
"""Converts a legacy file format string to an ImageFileFormat enum value.
Args:
format_str: A string describing an image file format that was passed to
one of the functions in ee.data that takes image file formats.
Returns:
A best guess at the corresponding ImageFileFormat enum name.
"""
if format_str is None:
return 'AUTO_JPEG_PNG'
format_str = format_str.upper()
if format_str == 'JPG':
return 'JPEG'
elif format_str == 'AUTO':
return 'AUTO_JPEG_PNG'
elif format_str == 'GEOTIFF':
return 'GEO_TIFF'
elif format_str == 'TFRECORD':
return 'TF_RECORD_IMAGE'
else:
# It's probably "JPEG" or "PNG", but might be some other supported format.
# Let the server validate it.
return format_str
|
d42affe700f8bf3639d9a713d24e32548c4ac932
| 501,441
|
def lerp(v0, v1, t):
"""
Simple linear interpolation between v0 and v1 with parameter t, 0 ≤ t ≤ 1
"""
return v0 + (v1 - v0) * t
|
dda9cb7532ad59eab4fa67716e0f940452b0cf88
| 613,274
|
import re
def check_email_valid(submission):
"""
Check if submission is a valid email address
"""
if re.match(r"[^@]+@[^@]+\.[^@]+", submission):
return True
else:
return False
|
1528df90d59c4e0cedc8c030acec1cfcd6984e64
| 19,760
|
def check_login(session):
"""
Function to check if the specified session has a logged in user
:param session: current flask session
:return: Boolean, true if session has a google_token and user_id
"""
# Check that session has a google_token
if session.get('google_token') and session.get('user_id'):
return True
return False
|
cd5651ce622ffd108ea7d0b8c1c4f70b1b4947ab
| 32,571
|
import pkgutil
import encodings
def encoding_exists(encoding):
"""Check if an encoding is available in Python"""
false_positives = set(["aliases"])
found = set(name for imp, name, ispkg in pkgutil.iter_modules(encodings.__path__) if not ispkg)
found.difference_update(false_positives)
if encoding:
if encoding in found:
return True
elif encoding.replace('-', '_') in found:
return True
return False
|
2e5d1bb114a15010523a9ed29636375fe2c6e87e
| 42,880
|
def parse_frontmatter(raw_text: str) -> dict:
"""
Parser for markdown file front matter. This parser has the following features:
* Simple key-value pairings (`key: value`)
* Comma-separated lists between brackets (`list: ['value1', 'value2']`)
* Keys are case insensitive
Args:
raw_text (str): String containing frontmatter (excluding fences)
Returns:
dict: A dictionary containing all the frontmatter key:value pairs
"""
front_matter = {}
lines = raw_text.split("\n")
for line in lines:
if ":" in line:
key, value = (item.strip() for item in line.split(": "))
if value.startswith("[") and value.endswith("]"):
value = [item.strip().strip("'\"") for item in value[1:-1].split(",")]
front_matter[key.lower()] = value
else:
continue
return front_matter
|
0b0411917084fc7ae6abcec028d6f7c15916b3c4
| 369,759
|
def get_zip_filename(book_id):
""" Get the filename for a zip document with the book identifier. """
return book_id + ".zip"
|
2919ca946c887a4c51a98af2d7abea1c74816161
| 657,662
|
from typing import Union
def ppu2mpp(ppu: int, units: Union[str, int]) -> float:
"""Convert pixels per unit (ppu) to microns per pixel (mpp)
Args:
ppu (int):
Pixels per unit.
units (Uniont[str, int]):
Units of pixels per unit. Valid options are "cm",
"centimeter", "inch", 2 (inches), 3(cm).
Returns:
mpp (float):
Microns per pixel.
"""
microns_per_unit = {
"centimeter": 1e4, # 10,000
"cm": 1e4, # 10,000
"mm": 1e3, # 1,000
"inch": 25400,
"in": 25400,
2: 25400, # inches in TIFF tags
3: 1e4, # cm in TIFF tags
}
if units not in microns_per_unit:
raise ValueError(f"Invalid units: {units}")
return 1 / ppu * microns_per_unit[units]
|
078d3dbdb368174fed7bed7cc31470147dbed20b
| 339,580
|
import six
def split_address(address):
"""Split an e-mail address into local part and domain."""
assert isinstance(address, six.text_type),\
"address should be of type %s" % six.text_type.__name__
if "@" not in address:
local_part = address
domain = None
else:
local_part, domain = address.rsplit("@", 1)
return (local_part, domain)
|
2576badcb48fbeb5e95cfb40a29e8f9029e432b7
| 561,724
|
def print_functions(s):
"""
Define edge and point drawing functions.
EXAMPLES::
sage: from sage.graphs.print_graphs import print_functions
sage: print(print_functions(''))
/point %% input: x y
{ moveto
gsave
currentpoint translate
0 0 2 0 360 arc
fill
grestore
} def
/edge %% input: x1 y1 x2 y2
{ moveto
lineto
stroke
} def
"""
s += "/point %% input: x y\n"
s += "{ moveto\n"
s += " gsave\n"
s += " currentpoint translate\n"
s += " 0 0 2 0 360 arc\n"
s += " fill\n"
s += " grestore\n"
s += " } def\n\n\n"
s += "/edge %% input: x1 y1 x2 y2\n"
s += "{ moveto\n"
s += " lineto\n"
s += " stroke\n"
s += " } def\n\n"
return s
|
8fb3772d60613cba8141f417976bce1230a8db29
| 333,207
|
def get_seconds_from_minutes(minutes: int) -> int:
""" Simple utility to convert minutes into seconds """
return minutes * 60
|
1a4c7a85a500a6d7848537058518114e4a8eb93b
| 390,031
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.