content stringlengths 39 14.9k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def any_isinstance(items, cls):
"""`True` if any item is of type `cls`."""
return any(isinstance(item, cls) for item in items) | 6a2cd513158525d72aea1eef206aa16105678d27 | 119,633 |
def mands(datum):
"""Combines minutes and seconds into seconds total"""
result = datum['s']
if datum['m'] > 0:
result += datum['m'] * 60
return result | 63ad995c4759dcfe6a8ecb107b095dff9a44c172 | 119,634 |
import re
def github_sanitize_filename(x):
"""
Sanitize a filename by GitHub wiki conventions (see
https://help.github.com/articles/adding-and-editing-wiki-pages-locally/#naming-wiki-files):
* remove '\/:*?"<>|'
* change spaces to hyphens
"""
return re.sub(r'[\/:*?"<>|]', '', x, re.U).replace(' ', '-') | 16ee6c33233485fa08c848618c25ba3be2901d0e | 119,639 |
def check_form(row):
"""
This function check the string that user entered is regulated.
:param row: string, a string of words
:return: boolean
"""
# check the length of row is regulated
if len(row) < 6 or len(row) > 7:
return False
else:
for i in range(len(row)):
ch = row[i]
# check the user enter space between every character
if i % 2 == 1:
if ch != ' ':
return False
else:
if not ch.isalpha():
return False
return True | 7e81d6d68edd922f0ded62d4b9197306e3d1c3d7 | 119,646 |
import torch
def norm(input, *args, **kwargs):
"""
Returns the matrix norm or vector norm of a given tensor.
Examples::
>>> import torch
>>> import treetensor.torch as ttorch
>>> t1 = torch.randn(3, 4)
>>> t1
tensor([[ 0.0363, -1.7385, 1.0669, 2.6967],
[ 0.0848, 0.2735, 0.3538, 0.2271],
[-0.1014, 1.1351, -0.5761, -1.2671]])
>>> ttorch.norm(t1)
tensor(3.8638)
>>> tt1 = ttorch.randn({
... 'a': (2, 3),
... 'b': {'x': (3, 4)},
... })
>>> tt1
<Tensor 0x7f95f684f4a8>
├── a --> tensor([[-0.5012, 2.0900, 0.0151],
│ [-0.5035, 0.2144, 0.8370]])
└── b --> <Tensor 0x7f95f684f400>
└── x --> tensor([[ 0.3911, 0.3557, -2.2156, 0.3653],
[-0.3503, 1.2182, -0.2364, -0.2854],
[-1.5770, -0.7349, 0.8391, -0.2845]])
>>> ttorch.norm(tt1)
<Tensor 0x7f95f684fa20>
├── a --> tensor(2.3706)
└── b --> <Tensor 0x7f95f684f978>
└── x --> tensor(3.2982)
"""
return torch.norm(input, *args, **kwargs) | 46d6e75636328f84dcb47dcaf7ae84f0a2892b8a | 119,648 |
def gen_gamma(params, size, rg):
"""
generate bootstrap samples of microtubule catastrophe data for use with bebi103.bootstrap.draw_bs_reps_mle()
parameters
----------
params (array-like) : alpha and beta for parametrized the gamma distribution
size (int) : number of values to draw from the distribution
rg (generator) : name of random number generatory (np.random.default_rng())
returns
-------
np.array of the values drawn out of the gamma distribution parametrized by params
"""
𝛼, 𝛽 = params
return rg.gamma(𝛼, 1 / 𝛽, size=size) | 7fb20296668ddbe6f08b969cf06bd3e5dc186f0e | 119,650 |
def isoddv1(n):
"""
Is odd number
>>> isoddv1(3)
True
"""
return not n % 2 == 0 | f91cd661b2e554ca95c754212a94f33df7bfe953 | 119,651 |
def get_lineup_number(players: list[dict], prompt: str) -> int:
"""Get the player by lineup number and confirm it is a valid lineup number."""
while True:
try:
number: int = int(input(prompt))
except ValueError:
print("Invalid integer. Please try again.")
continue
if number < 1 or number > len(players):
print("Invalid player number. " +
"Please try again.")
else:
break
return number | 508aabde982d310f52fca8cf80892edb7586db17 | 119,655 |
def divide_java_sources(ctx):
"""Divide sources into plain java, generated java, and srcjars."""
java_sources = []
gen_java_sources = []
srcjars = []
if hasattr(ctx.rule.attr, "srcs"):
srcs = ctx.rule.attr.srcs
for src in srcs:
for f in src.files:
if f.basename.endswith(".java"):
if f.is_source:
java_sources.append(f)
else:
gen_java_sources.append(f)
elif f.basename.endswith(".srcjar"):
srcjars.append(f)
return java_sources, gen_java_sources, srcjars | bbdf8500b97d28dc1596451ac3a46a0bbdee7613 | 119,663 |
def watts_to_amps(watts: int) -> float:
"""Convert power consumption to watts to electric current in amps."""
return round((watts / float(220)), 1) | 986cb6b41790e2d3fef6c29a2058bac77c26cb68 | 119,672 |
def parse_cobalt_step_id(output, step_name):
"""Parse and return the step id from a cobalt qstat command
:param output: output qstat
:type output: str
:param step_name: the name of the step to query
:type step_name: str
:return: the step_id
:rtype: str
"""
step_id = None
for line in output.split("\n"):
if line.split()[0] == step_name:
line = line.split()
step_id = line[1]
break
return step_id | 5341983f705ba45ffb9c688e579c274f0c6db237 | 119,674 |
def clean_code(code: str) -> str:
"""Replaces certain parts of the code, which cannot be enforced via the generator"""
# Mapping used for replacement
replace_map = [
("...", "default_factory=list"),
("type_class", "typeClass"),
("type_name", "typeName"),
("from core", "from easyDataverse.core"),
]
for replacement in replace_map:
code = code.replace(*replacement)
return code | f09eee589c74c4211afa778515c6186ea0c79496 | 119,683 |
def remove_prepended_comment(orig_df):
"""
remove the '#' prepend to the first column name
Note: if there is no comment, this won't do anything (idempotency).
"""
input_df = orig_df.copy()
first_col = input_df.columns[0]
return input_df.rename(columns={first_col: first_col.replace('#', "")}) | c4f11b50644070e0f26464a97feef11b4c1e6dbb | 119,685 |
import re
def extract_seq(fasta, start, length):
"""
Extract sequence from fasta according to given position
Parameters
-----
fasta : str
file name of fasta
start : int
offset of chrosome
length : int
length of chromsome sequence
Returns
-----
str
sequence from start to start + length
"""
with open(fasta, 'r') as f:
f.seek(start, 0)
seq = f.read(length)
seq = re.sub('\n', '', seq)
return seq | 6f4138f7ab64e00f44557d16d003ee81b2f49139 | 119,686 |
def _undo_enumerate(_idx, row):
"""
A :py:meth:`tf.data.Dataset.map` function for
reversing the :py:meth:`tf.data.Dataset.enumerate()`
transformation.
This function takes a :py:class:`tf.data.Dataset`
of shape ``(idx, row)`` and returns the ``row``.
"""
return row | 141d8c4878b6d3d0cd2bd7acde54a514279062ee | 119,687 |
import pickle
def load_res_file(meta, metric, version, target):
"""Locates pickle obj for a given metric, loads and stores into variable
Returns res_file (loaded pickle object).
Ex: keras_file = load_res_file("history", "v0", "mem_bin")
loads from: "./data/2021-05-06-1620351000/results/mem_bin/history"
"""
datestring = meta[version]["date"]
res_file = f"data/{datestring}/results/{target}/{metric}"
# print(f"{metric} file located: \n{res_file}")
res_data = pickle.load(open(res_file, "rb"))
return res_data | c11ba59b45822839ba61f1c8355d7c2d1a1567b7 | 119,689 |
def orm_to_selectable(q):
"""Normalize an ORM query into a selectable."""
return q.selectable | 9713336b2725c430e2b932c6d7d9236cb8a91b58 | 119,692 |
def person_to_text(person):
"""Returns the person's information as string."""
return ('id=%s full_name=%s entry_date=%s' % (
person.get_record_id(),
person.primary_full_name,
person.entry_date.isoformat(),
)) | 0a940c9db61f0d3d8270b599005825cc06220364 | 119,694 |
def strip_empty_leading_and_trailing_lines(s):
"""
Removes all empty leading and trailing lines in the multi-line string `s`.
"""
lines = s.split('\n')
while lines and not lines[0].strip(): del lines[0]
while lines and not lines[-1].strip(): del lines[-1]
return '\n'.join(lines) | e55cb96d4e0ee7dec4494f444bab9b492923a028 | 119,695 |
def datetime_to_estudent(date):
"""Take a datetime object and return a compatible string.
(2017, 5, 4) -> 04-May-2017
"""
string = date.strftime('%d-%b-%Y')
return string | 0e2e271e937a32b8690dfe486c6161886c557e3e | 119,697 |
def _get_team_membership_csv_headers(course):
"""
Get headers for team membership csv.
['user', 'mode', <teamset_id_1>, ..., ,<teamset_id_n>]
"""
headers = ['user', 'mode']
for teamset in sorted(course.teams_configuration.teamsets, key=lambda ts: ts.teamset_id):
headers.append(teamset.teamset_id)
return headers | 6565202b295cd530933a53209c02ba5da59074cd | 119,700 |
import torch
def sort_batch(seq_len):
"""Sorts torch tensor of integer indices by decreasing order."""
with torch.no_grad():
slens, sidxs = torch.sort(seq_len, descending=True)
oidxs = torch.sort(sidxs)[1]
return oidxs, sidxs, slens.tolist() | bb8861731974a5e561322d7f2cb73b1a0837664f | 119,705 |
from typing import Callable
import functools
from typing import Any
def kwargs_parameter_dict(func: Callable) -> Callable:
"""Decorator that passes all received `kwargs` as a keyword dictionary parameter.
Arguments:
func:
The function to be decorated.
Returns:
The decorated function, which now has a new dictionary parameter called
`parameter_dict` with all the original keyword arguments.
"""
@functools.wraps(func)
def wrapper(*args, **kwargs) -> Any:
"""Wrapped function to be returned by the decorator.
Returns:
The original function evaluation.
"""
return func(*args, parameter_dict=kwargs, **kwargs)
return wrapper | d8fff41fdbe6ff8d0976f239f255be0417d01206 | 119,707 |
import zipfile
def _info_name(info):
"""
Returns a normalized file path for an archive info object
:param info:
An info object from _list_archive_members()
:return:
A unicode string with all directory separators normalized to "/"
"""
if isinstance(info, zipfile.ZipInfo):
return info.filename.replace('\\', '/')
return info.name.replace('\\', '/') | d446e2b59449e0e2843b923a98e5c00f9e4e9c90 | 119,711 |
from typing import List
def neteja_diccionari(diccionari: dict, *args: List[dict]) -> dict:
"""
Treu les claus *args d'un diccionari i retorna el resultat.
Args:
diccionari (dict): Diccionari d'entrada
Returns:
dict: Diccionari sense les claus *args
"""
for key in args:
diccionari.pop(key, None)
return diccionari | c67f77f3724f75d17b917ee734e12309640adbe7 | 119,713 |
def get_perc_99_len(input_length):
"""Get 99 percentile sequence length."""
lengths = sorted(input_length)
perc_99 = len(input_length) * 99 // 100
perc_99_len = lengths[perc_99]
return perc_99_len | f3a337ddf5a23e8722ff9fd2898a83c0848a97c2 | 119,718 |
def graph_input_slice(gr, sl):
""" Produce a slice of the original graph dataset.
Example: grs = graph_input_slice(gr, slice(500, 1000)) """
grs = dict()
for k, v in gr.items():
grs[k] = v[sl]
return grs | cab3d806bf718837ea7f9d39f0d86bcad3d1baf7 | 119,719 |
def read_lines(path, header=True):
"""Open the dataset file and return its lines (raw)."""
with open(path, 'r') as f:
if not header:
f.readline() # skip header
return f.readlines() | b69f65dc64e7d5a3d7d054bdfb9fef0f9909bdfb | 119,720 |
def longest_seq(seqs):
"""
Find the longest chain in the output of all_uninterrupted_seqs
"""
max_len = 0
max_seq = []
for seq in seqs:
if len(seq) >= max_len:
max_seq = seq
max_len = len(max_seq)
return max_seq | a2d98e04ba652d11aa853a6bc6c0298a9629e802 | 119,726 |
from typing import Dict
from typing import List
def add_toggle_completion(
toggle_outcomes: Dict[str, Dict[str, bool]],
subtasks: List[str]):
"""Create toggled outcomes/completions mapping
for each toggle-able subtasks.
e.g.
>>> toggle({}, ['Click A', 'Click B', 'Click C'])
>>> {
'Click A': {'Click B': False, 'Click C': False},
'Click B': {'Click A': False, 'Click C': False},
'Click C': {'Click A': False, 'Click B': False}
}
"""
for subtask in subtasks:
toggle_outcomes.update({
subtask: {s: False for s in subtasks if s != subtask}
})
return toggle_outcomes | 383ca79ee47cb6d095c74d5f88dc25f06cf60702 | 119,727 |
def max_pow_2(number_peers):
"""Return the powers of 2 >= to the number of peers"""
powers = []
x = 0
while 2**x < number_peers:
powers.append(2**x)
x += 1
return powers + [2**x] | 023e31e92e2da86c9f8c3f4fdd22af1acc7558f0 | 119,728 |
def pairs_to_lists(pairs):
"""Convert list of pairs into to two tuple lists.
Removes any SNPs with an empty match list.
Parameters
----------
pairs : list
From filter_by_distance():
(rsid, loc, set((rsid, loc)))
Returns
-------
snp_list : list_of_tuple
comp_list : list_of_tuple
(rsid, loc)
"""
query = []
comp = []
for snp, loc, matches in pairs:
if not matches:
continue
query.append((snp, loc))
for snp2, loc2 in matches:
comp.append((snp2, loc2))
return set(query), set(comp) | be223329a8a44bc6c60c25725ef5536606a29fd9 | 119,731 |
import random
def GetDimRandomVector(proj_matrix, proj_dim, dim):
"""
Get the random vector for dimension 'dim'. If it's already in 'proj_matrix',
then just return it. Otherwise, generate a new random vector of length
'proj_dim' with values between -1 and 1.
@return list of length 'dim' which contains vector of random values
"""
# import pdb; pdb.set_trace()
if proj_matrix.has_key(dim):
# print 'Using random vector: %4d' % dim
vector = proj_matrix.get(dim)
else:
# print 'Generating random vector: %4d' % dim
random.seed() # Use default source for seed
vector = []
index = 0
while index < proj_dim:
vector.append(random.uniform(-1, 1))
index += 1
proj_matrix[dim] = vector
return vector | dcc9ea8cfc5006bf59bc224288fd37e7382cb6bf | 119,736 |
def calc_get_item(func, in_data, **kwargs):
"""[GetItem](https://docs.chainer.org/en/v4.3.0/reference/generated/chainer.functions.get_item.html)
Extract part of an array. This operation is zero FLOPs.
Most of tensor libraries have view feature, which doesn't actually create
a new array unless necessary, but this is not considered in
chainer-computational-cost.
Memory read runs only for the necessary elements, so both memory
read and write are same as the size of output tensor.
| Item | Value |
|:-------|:------|
| FLOPs | $$ 0 $$ |
| mread | $$ \| y \| $$ |
| mwrite | $$ \| y \| $$ |
| params | `slices`: list of slices, a slice is an int or a tuple with 3 elements |
""" # NOQA
x, = in_data
y = x[func.slices]
slices = [(s.start, s.stop, s.step) if type(s) is slice else s
for s in func.slices]
return (0, y.size, y.size, {'slices': slices}) | b83eaaab84871099f03982b37c763239b7d6ecb6 | 119,738 |
def sum_of_squares(n):
""" Calculate the sum of the squares of the first n natural numbers """
return sum(i ** 2 for i in range(1, n + 1)) | 2c3ca7f8321d3ace19630aa6525931292b7ac3a7 | 119,740 |
def get_http_addr(config):
"""
Get the address of the Proteus server
Args:
config (Config): Pytest config object containing the options
Returns:
str: Address of the Proteus server
"""
hostname = config.getoption("hostname")
http_port = config.getoption("http_port")
return f"{hostname}:{http_port}" | 46352e1c65186fac5c08e5320e70cdd08b6786f3 | 119,741 |
import torch
def get_alpha(z):
"""Calculates the waves dissipation profile, :math:`\\alpha(z)` as defined
in (9) of Holton and Lindzen.
Parameters
----------
z : tensor
Height [m]
Returns
-------
tensor
:math:`\\alpha (z)` [:math:`\mathrm{s^{-1}}`]
"""
alpha = torch.zeros(z.shape)
idx = (17e3 <= z) & (z <= 30e3)
alpha[idx] = (1 / 21) + (2 / 21) * (z[idx] - 17e3) / 13e3
alpha[~idx] = 1 / 7
return alpha / (60 * 60 * 24) | 8b6b0c2697b8393431367d1c39fee73631320456 | 119,745 |
def does_contain_unicode(name: str) -> bool:
"""
Check if name contains unicode characters.
>>> does_contain_unicode('hello_world1')
False
>>> does_contain_unicode('')
False
>>> does_contain_unicode('привет_мир1')
True
>>> does_contain_unicode('russian_техт')
True
"""
try:
name.encode('ascii')
except UnicodeEncodeError:
return True
else:
return False | 0470aa98e257585e981ef4e850ed3ce98c72b479 | 119,749 |
def replace_strings(s, replacers):
"""Performs a sequence of find-replace operations on the given string.
Args:
s: the input string
replacers: a list of (find, replace) strings
Returns:
a copy of the input strings with all of the find-and-replacements made
"""
sout = s
for sfind, srepl in replacers:
sout = sout.replace(sfind, srepl)
return sout | 002c0373871bc746bc3657a4fc2b84954d987552 | 119,754 |
def increment(x):
"""
Add 1 to an integer
"""
return(x + 1) | a49cbeca8e9b04fc18211b49fbad85dd777f82c7 | 119,757 |
def file_from_link(link: str) -> str:
"""
Given a URL of a hosted file, retrieve just the name of the file.
"""
rev_link = link[::-1]
return rev_link[: rev_link.find("/")][::-1] | cd7bbbe8f487a528ad8879ec59b2cbc741446a60 | 119,767 |
def microsecond_to_cm(delta_microseconds: float) -> float:
"""
Convert ticks_diff(ticks_us(), ticks_us()) -> centimeters
- Divide by 2 to (the sound wave went "out" then "back")
- Divide by the speed of sound in microseconds
Speed of sound: 343.2 meters/second = 1cm/29.1 microseconds
:param float delta_microseconds: Pulse duration (microseconds)
:returns float:
"""
return (delta_microseconds / 2) / 29.1 | dd9841efb562c102dbd27636257e82b71d2a7702 | 119,770 |
import re
def load_log(log_file):
""" Parses data from a log file instead of the JSON dump.
Args:
log_file: The file to load data from.
Returns:
List of the testing_loss, training loss, testing accuracy, and
training accuracy. """
testing_loss = []
training_loss = []
testing_acc = []
training_acc = []
lines = log_file.read().split("\n")
for line in lines:
if "Training loss" in line:
# This line contains the training loss and accuracy.
numbers = re.findall("\d\.\d+", line)
loss, acc = [float(num) for num in numbers]
training_loss.append(loss)
training_acc.append(acc)
if "Testing loss" in line:
# This line contains the testing loss and accuracy.
numbers = re.findall("\d\.\d+", line)
loss, acc = [float(num) for num in numbers]
testing_loss.append(loss)
testing_acc.append(acc)
return testing_loss, training_loss, testing_acc, training_acc | 056049e930ea6b38fbabaf5bf0a042e51c1b6634 | 119,774 |
import itertools
def arrival_curve_steps(event_model, bruteforce=False):
"""Yields the steps of the arrival curve for the given event model. A is a step if f(A) != F(A+1).
If bruteforce is true, compute the steps by invoking eta_plus. This is mainly useful for testing purposes"""
def by_dmin(event_model):
for i in itertools.count(1):
yield event_model.delta_min(i)
def by_bruteforce(event_model):
"Computes the arrival curve steps by evaluating the arrival curve a lot"
i = 1
last = None
while True:
new = event_model.eta_plus(i)
if last != new:
yield i-1
i += 1
last = new
if bruteforce:
return by_bruteforce(event_model)
return by_dmin(event_model) | 7410fcb065288f42593649ecde994591d4725b86 | 119,777 |
def find_stream(stdin, argv):
"""Find a stream to use as input for filters.
:param stdin: Standard in - used if no files are named in argv.
:param argv: Command line arguments after option parsing. If one file
is named, that is opened in read only binary mode and returned.
A missing file will raise an exception, as will multiple file names.
"""
assert len(argv) < 2, "Too many filenames."
if argv:
return open(argv[0], 'rb')
else:
return stdin | 9201fd28187f855f411817b9f7335a3cc2627fc9 | 119,778 |
import collections
def sort_typedef(typedef):
"""Apply special sorting rules to the type definition to improve readability.
Sorts the fields of a type definition so that important fields such as the
'type' or 'name' are at the top and don't get buried beneath longer fields
like 'message_typedef'. This will also sort the keys of the
'message_typedef' based on the field number.
Args:
typedef - dictionary representing a Blackboxprotobuf type definition
Returns:
A new OrderedDict object containing the contents of the typedef
argument sorted for readability.
"""
# Sort output by field number and sub_keys so name then type is first
TYPEDEF_KEY_ORDER = [
"name",
"type",
"message_type_name",
"example_value_ignored",
"field_order",
]
output_dict = collections.OrderedDict()
for field_number, field_def in sorted(typedef.items(), key=lambda t: int(t[0])):
output_field_def = collections.OrderedDict()
field_def = field_def.copy()
for key in TYPEDEF_KEY_ORDER:
if key in field_def:
output_field_def[key] = field_def[key]
del field_def[key]
for key, value in field_def.items():
if key == "message_typedef":
output_field_def[key] = sort_typedef(value)
else:
output_field_def[key] = value
output_dict[field_number] = output_field_def
return output_dict | cb142e390492469e014ca264f1615cfa7e686561 | 119,780 |
import math
def has_gotten_higher(values: list, allow_same: bool = True, EPS: float = 1e-6) -> bool:
"""
Check if any of the elements is higher than the first element.
Used for early stopping.
If the list contains None, ignore them.
```
if not has_gotten_higher(val_acc[-20:]):
logger.info("Validation accuracy hasn't increased for 20 epochs. Stopping training..")
raise Exception("Early stopping triggered.")
```
"""
values = [value for value in values if value is not None and not math.isnan(value)]
if len(values) <= 1:
raise ValueError("Can't determine if values got higher with 0 or 1 value.")
if allow_same:
for value in values[1:]:
if values[0] < value + EPS:
return True
else:
for value in values[1:]:
if values[0] < value - EPS:
return True
return False | eba7011f6a900fd401d7134dcd82c3229d498add | 119,785 |
from typing import List
def prepare_data_split_list(data, n: int) -> List[int]:
"""
Create list of sizes for splitting
:param data: dataset
:param n: number of equal parts
:return: list of sizes
"""
parts = [len(data) // n] * n
if sum(parts) < len(data):
parts[-1] += len(data) - sum(parts)
return parts | 7701b0b8514a57fb10c25cf48ed45f42a2219d52 | 119,786 |
import math
def check_image_size(image_size, stride=32):
""" Verify img_size is a multiple of stride s
Args:
image_size (int): Image length and width.
stride (int, optional): Divide multiples of the step size. (default: ``32``)
Returns:
Return a new image size.
"""
new_size = math.ceil(image_size / stride) * stride # ceil gs-multiple
if new_size != image_size:
print(f"WARNING: --image-size {image_size} must be multiple of max stride {stride}, updating to {new_size}")
return new_size | 0b6b9be68afd024814c9a1843a6f43c681b09576 | 119,791 |
def calc_squashable_walkers_single_method(walker_weights, max_weight):
"""Calculate the maximum number of squashable walkers in collection of
walkers, that still satisfies the max weight constraint.
We don't guarantee or know if this is a completely optimal solver
for this problem but it is assumed to be good enough in practice
and no harm comes from underestimating it only a reduced potential
performance.
Parameters
----------
walker_weights : list of float
The weights of the walkers
max_weight : float
The maximum weight a walker can have.
Returns
-------
n_squashable : int
The maximum number of squashable walkers.
"""
# to get an estimate of the number of squashable walkers we start
# summing the weights starting from the smallest walker. When the
# addition of the next highest weight walker would make the total
# greater than max_weight then we quit and say that the number of
# squashable walkers is the number of them summed up, minus one
# for the fact that one of them won't be squashed if a merge of
# all of them was to occur
n_squashable = 0
# there must be at least 2 walkers in order to be able to do a
# merge, so if there are not enough the number of squashable
# walkers is 0
if len(walker_weights) < 2:
return n_squashable
# sort the weights smallest to biggest
walker_weights.sort()
idx = 0
sum_weights = walker_weights[idx]
merge_size = 1
while sum_weights <= max_weight:
# if the next index would be out of bounds break out of the
# loop
if idx + 1 >= len(walker_weights):
break
else:
idx += 1
# add this walker to the sum weights
sum_weights += walker_weights[idx]
# add one to the merge size (since we only will make our
# estimate based on the single largest possible merge)
merge_size += 1
else:
# the loop condition failed so we remove the last count of
# merge size from the merge group. This won't run if we break
# out of the loop because of we are out of walkers to include
merge_size -= 1
# then we also take one less than that as the number of
# squashable walkers
n_squashable = merge_size - 1
return n_squashable | 9c85b47cdbc00c1fe7cdb2dc526aaf8950689600 | 119,792 |
def preprocess_text8(data):
"""
Text8 dataset contains a lot of single-letter words.
We have to remove those letters before working with the dataset
"""
words = [w for w in data.split(" ") if len(w) > 1]
words.append('a')
words.append('i')
return ' '.join(words) | 4c392914a7f40d800f1c6841e3c86b15cabcf5f4 | 119,793 |
def generate_list_from_file(data_file: str) -> list:
"""
Generate a list from a given file containing a single line of desired data, intended for IPv4 and passwords.
Args:
data_file: A file containing a single IPv4 address per line
Returns:
List of IPv4 addresses
"""
print("Generating data list from: {}".format(data_file))
data_list = []
with open(data_file, 'r') as my_file:
for line in my_file:
ip = line.strip('\n').strip(' ')
url = "http://{}".format(ip)
data_list.append(url)
return data_list | f64a76f38515db44cbf48e354d97b7432c7d3582 | 119,794 |
def last_index(_list, _value):
"""Finds index of value in reversed list and calculates the correct index
Args:
_list (list): List of values
_value (str): Value to be found
Returns:
int: Index of value in list
Raises:
ValueError: If value was not found in list
"""
return len(_list) - _list[-1::-1].index(_value) - 1 | 915c7bf7bdb2733d2e42cf2b88b6aadde4583fbd | 119,797 |
def findFace(halfEdge):
"""Find the first face on the left of halfEdge that is not degenerated."""
while halfEdge.next.next is halfEdge:
halfEdge=halfEdge.next.twin
for i in halfEdge.newFace.incidentElements:
if i: return halfEdge.newFace
return None | 8bf9797cf2d5b22e6de6610662742bbf6116425a | 119,798 |
from typing import List
from typing import Dict
from typing import Tuple
def merge_frames(
frames_per_sent: List[List[Dict]], tokens_per_sent: List[List[Tuple[int, int]]]
):
"""
Merge the sentence level frame list into one, updating the refs.
"""
ref_offset = 0
all_frames = []
for frames, tokens in zip(frames_per_sent, tokens_per_sent):
for frame in frames:
for target in frame["targets"]:
target["refs"] = [ref + ref_offset for ref in target["refs"]]
for entity in frame["entities"]:
entity["refs"] = [ref + ref_offset for ref in entity["refs"]]
all_frames.append(frame)
ref_offset += len(tokens)
return all_frames | 9af863fe34b33b1fd3507cbc19ca158d60c92d3a | 119,799 |
import re
def slugify(text, length_limit=None):
"""Takes a string and turns it into a slug.
:Example:
>>> slugify('Some (???) Title Somewhere')
some-title-somewhere
>>> slugify('Sly & the Family Stone')
sly-and-the-family-stone
>>> slugify('Happy birthday!', length_limit=4)
happ
"""
slug = re.sub('[.!@#\'$,?\(\)]', '', text.lower())
slug = re.sub('&', ' and ', slug)
slug = re.sub(' {2,}', ' ', slug)
slug = '-'.join(slug.split(' '))
while '--' in slug:
slug = re.sub('--', '-', slug)
if length_limit:
slug = slug[:length_limit]
return str(slug) | 1197fbdd19d6cd1772b38cfec95be1e10a1942e6 | 119,800 |
def calc_custom_metrics(y_true, y_pred, metrics):
"""
Takes in the true and predicted value and returns the
metrics passed inside of the metrics parameter.
Metrics can be passed through in the form of
(<name>, func_ref)
The function reference given must accept the arguments y_true, and y_pred
"""
return {name: metric(y_true, y_pred) for name, metric in metrics} | c1406156cf1412279dfb1b267503150ca7338bf2 | 119,808 |
def _min(*args):
"""Returns the smallest non-negative argument."""
args = [x for x in args if x > -1]
if args:
return min(args)
else:
return -1 | 597e7a562aa4c1e6538341c762bc07d7b322576c | 119,810 |
from pathlib import Path
import _hashlib
def get_hash(file: Path, first_chunk_only=False, hash_algo=_hashlib.sha1):
"""Returns a hash of the provided Path_Obj.
Can return full hash or only first 1024 bytes of file.
Args:
file (Path_Obj): File to be hashed.
first_chunk_only (bool, optional): Hash total file?. Defaults to False.
hash_algo (Hash Function, optional): Hash routine to use. Defaults to _hashlib.sha1.
Returns: Hash Value
"""
def chunk_reader(fobj, chunk_size=1024):
""" Generator that reads a file in chunks of bytes """
while True:
chunk = fobj.read(chunk_size)
if not chunk:
return
yield chunk
hashobj = hash_algo()
with open(file, "rb") as f:
if first_chunk_only:
hashobj.update(f.read(1024))
else:
for chunk in chunk_reader(f):
hashobj.update(chunk)
return hashobj.digest() | cd6d7dc87a1b25cf9111105ed49508dc91155a0e | 119,812 |
def groupMap(key_func, value_func, iterable):
"""
groupMap(key_func: function, value_func: function, iter: iterable)
group elements with key_func, transform values with value_func
args:
key_func = L x: x%2, value_func = L x: -x, iter = [1,2,3]
return:
{0:[-2], 1:[-1,-3]}
"""
res = {}
for x in iterable:
k = key_func(x)
if k not in res: res[k] = []
res[k].append(value_func(x))
return res | cdcafd0af2605e5d14d90e9c8fa57aecebe89ee6 | 119,818 |
from typing import Any
def str_capped(object: Any, max_len: int) -> str:
"""Return the string representation of `object` trimmed to `max_len`.
Trailing ellipsis is added to the returned string if the original had to be trimmed.
"""
s = str(object)
if len(s) <= max_len:
return s
return s[: max_len - 3] + "..." if max_len >= 3 else "..." | 753ef207a56b8a72cf7e7bf983cd2996041a33f8 | 119,820 |
from datetime import datetime
def get_pregnancy_week(today, edd):
""" Calculate how far along the mother's prenancy is in weeks. """
due_date = datetime.strptime(edd, "%Y-%m-%d")
time_diff = due_date - today
time_diff_weeks = time_diff.days / 7
preg_weeks = 40 - time_diff_weeks
# You can't be less than two week pregnant
if preg_weeks <= 1:
preg_weeks = 2 # changed from JS's 'false' to achieve same result
return preg_weeks | 193f71076ff5f37ba7db4367103431ebb7315170 | 119,823 |
def _is_chinese_char(char_code: int):
"""Checks whether char_code is the code of a Chinese character."""
# https://en.wikipedia.org/wiki/List_of_CJK_Unified_Ideographs,_part_1_of_4
if (
(char_code >= 0x4E00 and char_code <= 0x9FFF) # CJK Unified Ideographs
or (char_code >= 0x3400 and char_code <= 0x4DBF) # CJK Unified Ideographs Extension A
or (char_code >= 0x20000 and char_code <= 0x2A6DF) # CJK Unified Ideographs Extension B
or (char_code >= 0x2A700 and char_code <= 0x2B73F) # CJK Unified Ideographs Extension C
or (char_code >= 0x2B740 and char_code <= 0x2B81F) # CJK Unified Ideographs Extension D
or (char_code >= 0x2B820 and char_code <= 0x2CEAF) # CJK Unified Ideographs Extension E
or (char_code >= 0xF900 and char_code <= 0xFAFF) # CJK Compatibility Ideographs
or (char_code >= 0x2F800 and char_code <= 0x2FA1F) # CJK Compatibility Ideographs Supplement
):
return True
return False | b36aff17e96fe900300635274d36c594f2f3e24b | 119,824 |
def clean_sequence(seq):
"""Clean up provided sequence by removing whitespace."""
return seq.replace(' ', '') | d272221994f15a3e2bb21fcf1b4b8bba40191070 | 119,825 |
def unquote(str):
"""Remove quotes from a string."""
if len(str) > 1:
if str[0] == '"' and str[-1:] == '"':
return str[1:-1]
if str[0] == '<' and str[-1:] == '>':
return str[1:-1]
return str | 16a0c0fabd0d1a809390528d4930c607e051eb91 | 119,829 |
def strip_lines(text):
"""
Given text, try remove unnecesary spaces and
put text in one unique line.
"""
output = text.replace("\r\n", " ")
output = output.replace("\r", " ")
output = output.replace("\n", " ")
return output.strip() | f6ee58f58421762bfb7336f5f66ace23df6ef9db | 119,832 |
def get_sheets(workbook):
"""
读取工作簿的所有sheet
Args:
workbook: openpyxl.workbook.Workbook
Returns:
list, 每一项是openpyxl.worksheet.worksheet.Worksheet
"""
return workbook.worksheets | 9b87a60a42be17eafb85121172ed97adf0ab50ad | 119,836 |
def findNeighbors(atomId, adjMat):
"""
Find the IDs of the neighboring atom IDs
ARGUMENTS:
atomId - atom of interest
adjMat - adjacency matrix for the compound
"""
nbrs = []
for i, nid in enumerate(adjMat[atomId]):
if nid >= 1:
nbrs.append(i)
return nbrs | f5fbaaa0159e4189d6fbf6eb0a45febc7860925c | 119,838 |
from pathlib import Path
def get_image_db(data_folder='data'):
"""Returns a mapping from test sets to ordered image filenames."""
root = Path(data_folder)
d = {}
for test_set in root.glob('*.images'):
img_names = []
test_set_name = test_set.name.replace('.images', '')
with open(test_set) as f:
for line in f:
img_names.append(test_set_name + '/' + line.strip())
d[test_set_name] = img_names
return d | d1eaa61ea836cd4150c6b731c14f6ea28ccc2313 | 119,839 |
import math
def pages_number(no_of_records: int, limit: int) -> int:
"""Returns all available pages number."""
return math.ceil(no_of_records / limit) | 0baf667e8716d11d796bf9a73fda3b8e922e0299 | 119,844 |
def flood_risk(station):
"""Returns the flood risk level of each station based on the relative water level"""
if hasattr(station,'relative_level'):
if station.relative_level != None:
if station.relative_level >= 2:
return "severe risk"
elif station.relative_level >= 1 and station.relative_level < 2:
return "high risk"
elif station.relative_level >= 0.75 and station.relative_level < 1:
return "moderate risk"
else:
return "low risk"
else:
print("Station with latest level of None found") | 34be8533d32ccf673cd47029db25040a4d16e4d8 | 119,853 |
def cover(X, itemset): # could be called "rows_covered_by_itemset"
"""
Returns the rows in X which satisfy the itemset
An itemset is satisfied when all elements of the itemset are evaluated to 1 (True)
Input:
X: pandas DataFrame (all one-hot encoded)
itemset: an iterable of column names representing an itemset
Returns:
X (subset): pandas DataFrame whose rows satisfy all itemset elements (have value 1)
"""
return X[(X[itemset]==1).all(axis=1)] | ebb70df60d9bb440807002d53cb9ef209cb679b0 | 119,856 |
from typing import List
def check_for_nones(ls: List):
"""Checks for nones in list."""
for i in ls:
if i is None:
return True
return False | 8275aa5782db3d6134d74dfba3998e234bced468 | 119,858 |
def minimize_document(document):
"""
Takes a document obtained directly from its json from the portal and strips
it down to a subset of desired fields. The document @id is prepended to the
attachment href
"""
minimized_document = {}
for field in ('document_type', 'urls', 'references'):
if field in document:
minimized_document[field] = document[field]
if 'attachment' in document:
minimized_document['attachment'] = document['@id'] + document['attachment']['href']
return minimized_document | 8ddb569942b4d2af9529819b448c76bf23ad22ca | 119,859 |
def shrink_image(img, bins):
"""YG Dec 12, 2017 dev@CHX shrink a two-d image by factor as bins, i.e., bins_x, bins_y
input:
img: 2d array,
bins: integer list, eg. [2,2]
output:
imgb: binned img
"""
m, n = img.shape
bx, by = bins
Nx, Ny = m // bx, n // by
# print(Nx*bx, Ny*by)
return img[: Nx * bx, : Ny * by].reshape(Nx, bx, Ny, by).mean(axis=(1, 3)) | 7c69aa572565d18f67be15ac0f9893152938e3d0 | 119,863 |
import torch
def sample_noise(batch_size, dim):
"""
Generate a PyTorch Tensor of uniform random noise.
Input:
- batch_size: Integer giving the batch size of noise to generate.
- dim: Integer giving the dimension of noise to generate.
Output:
- A PyTorch Tensor of shape (batch_size, dim) containing uniform
random noise in the range (-1, 1).
"""
return 2 * torch.rand(batch_size, dim) - 1 | cf8516f9d89e6d03b7c49857cbc07d01813abf05 | 119,864 |
def portfolio_value(pf):
"""Given a dictionary of stock values, return the total value."""
return sum([v for v in pf.values()]) | 083f4adca61bbe48ba3a25fe2cd6b32f37f474f1 | 119,870 |
def normalize_device(dev: str) -> str:
"""
turns /dev/sdb to /dev/xvdb (if needed)
"""
last_part = dev.split("/")[2]
if last_part.startswith("sd"):
drive = last_part[2:]
return f"/dev/xvd{drive}"
return dev | 0e790e6073f53719066e29e4f3d4a1af46af1c93 | 119,875 |
def leadingZeros(num):
""" Converts a number to a string of 2 digits with leading zeros. """
return "{:02d}".format(num) | fdb1ce5543cc19710f1f3f2dce85967755b5525a | 119,878 |
def get_checklist(full_name):
""" Generate a list of names that may contain the 'full_name'.
Notes:
Eg. if full_name looks like "a.b.c", then the list
["a.b.c", "a.*", "a.b.*", "a.b.c.*"]
is generated. So either the full name itself may be found, or when
full_name is included in some *-import.
Args:
full_name: The full module name
Returns:
List of possible "containers".
"""
if not full_name: # guard against nonsense
return []
mtab = full_name.split(".") # separate components by dots
checklist = [full_name] # full name is always looked up first
m0 = ""
for m in mtab: # generate *-import names
m0 += "." + m if m0 else m
checklist.append(m0 + ".*")
return tuple(checklist) | 5454d8c3e7ef4170f7364901e8a23bde10d2bf0a | 119,887 |
def sanitize_basisname(name):
"""Function to return *name* in coded form, stripped of
characters that confuse filenames, characters into lowercase,
``+`` into ``p``, ``*`` into ``s``, and ``(``, ``)``, & ``,``
into ``_``.
"""
temp = name.lower()
temp = temp.replace('+', 'p')
temp = temp.replace('*', 's')
temp = temp.replace('(', '_')
temp = temp.replace(')', '_')
temp = temp.replace(',', '_')
return temp | 07cf13c5ed9c94a753295af275f1a6ae661d7588 | 119,890 |
def text_wrap(text, font, max_width):
"""Wrap text base on specified width.
This is to enable text of width more than the image width to be display
nicely.
@params:
text: str
text to wrap
font: obj
font of the text
max_width: int
width to split the text with
@return
lines: list[str]
list of sub-strings
"""
lines = []
# If the text width is smaller than the image width, then no need to split
# just add it to the line list and return
if font.getsize(text)[0] <= max_width:
lines.append(text)
else:
# split the line by spaces to get words
words = text.split(' ')
i = 0
# append every word to a line while its width is shorter than the image width
while i < len(words):
line = ''
while i < len(words) and font.getsize(line + words[i])[0] <= max_width:
line = line + words[i] + " "
i += 1
if not line:
line = words[i]
i += 1
lines.append(line)
return lines | 8928b6c853c691e9de5cf8a7402581a48e98c114 | 119,892 |
import pickle
def load_array(name):
"""
This function loads the data from the pickle file.
"""
with open('./savedata/' + name + '.pickle', 'rb') as handle:
data_item = pickle.load(handle)
return data_item | 6058bb8b57c441a2b2dafe1b3fcad6145c1a1e49 | 119,902 |
def gen_batch_index(N_all, batch_size):
"""
return list of index lists, which can be used to access each batch
---------------
inputs:
N_all: # of all elements
batch_size: # of elements in each batch
outputs:
batch_index_list[i] is the indexes of batch i.
---------------
notes:
Python don't have out range check, the simpliest version could be:
for _i in range(0, N_all, batch_size):
yield range(_i, _i + batch_size)
---------------
examples:
>>> gen_batch_index(6,3) == [[0,1,2],[3,4,5]]
True
>>> gen_batch_index(7,3) == [[0,1,2],[3,4,5],[6]]
True
>>> gen_batch_index(8,3) == [[0,1,2],[3,4,5],[6,7]]
True
"""
batch_index_list = []
for _batch_start_indx in range(0, N_all, batch_size):
_batch_end_indx = int(min(_batch_start_indx + batch_size, N_all))
batch_index_list.append(range(_batch_start_indx, _batch_end_indx))
return batch_index_list | d584c60ebcf172b2486b28f25e26feae37b946ee | 119,905 |
def _same_shape(tensor_list, start=0, end=-1):
"""Return whether tensors have same (sub-)shapes.
Args:
tensor_list ([torch.Tensor]): List of tensors whose shapes are compared.
start (int, optional): The dimension to start comparing shapes.
end (int, optional): The dimension until (including) shapes are compared.
Raises:
ValueError: If the tensors don't have identical (sub-)shapes.
"""
def _slice(shape):
"""Get relevant parts of the shape for comparison.
Args:
shape (torch.Size): Shape of a ``torch.Tensor``.
Returns:
torch.Size: Relevant sub-shape.
"""
return shape[start:] if end == -1 else shape[start : end + 1]
unique = {_slice(tensor.shape) for tensor in tensor_list}
if len(unique) != 1:
raise ValueError(
f"Got non-unique shapes comparing dims {start} to including {end}: {unique}"
) | bbd18f559cdb4f10156c88ed6f26f33302b9af22 | 119,911 |
def is_string(value):
"""
Tests the value to determine whether it is a string.
:param any value:
:return: True of the value is a string (an instance of the str class)
>>> is_string( 'Hello' )
True
>>> is_string( ['Hello'] )
False
"""
return isinstance(value, str) | 0499bc306994d8caf79a3fdbd0e2048ee7e4a2f9 | 119,914 |
def _get_menu_item(tag):
"""
Convenience function to extract the menu item as a string from the HTML.
:param bs4.element.Tag tag: the tag that contains the relevant text.
:return str: The menu item, as a string.
"""
return str(tag.td.contents[-1]).strip().title() | 74b588963a07e182771c7c91e2ef53c511292532 | 119,925 |
def data_dir(request):
"""
Return a :class:`py.path.local` object pointing to the "data" directory
relative to the test file.
"""
return request.fspath.dirpath('data') | 1c1b2463b8dd236ce3a936cb6cd4d211a0692da0 | 119,927 |
def create_new_label_columns(label_columns: list) -> list:
"""
Extracts names for new label columns for one-hot-encoded data
"""
transformed_label_columns = []
for column in label_columns:
transformed_label_columns.append(column + "_" + "none")
transformed_label_columns.append(column + "_" + "favor")
transformed_label_columns.append(column + "_" + "against")
return transformed_label_columns | 8b8f69d7bbc78f56de1874683aa6d1a5d84f4230 | 119,929 |
def is_number(s):
"""
Checks if the parameter s is a number
:param s: anything
:return: true if s is a number, false otherwise
"""
try:
float(s)
return True
except ValueError:
return False | 7983b3f3c006544db6e37d3e1ff62ad4fcb667cc | 119,933 |
def get_default_nrealizations(weights):
"""Return default number of realizations given input bitwise weights = the number of bits in input weights plus one."""
return 1 + 8 * sum(weight.dtype.itemsize for weight in weights) | 04155e43ff6e40cde18f69acbf0943513e8fef3d | 119,935 |
import importlib
def convert_to_mate_resource(troposphere_resource, troposphere_mate_template):
"""
This method converts ``troposphere.AWSObject`` to ``troposphere_mate.AWSObject``.
:type troposphere_resource: AWSObject
:rtype: Mixin
"""
troposphere_mate_module_name = troposphere_resource.__class__.__module__ \
.replace("troposphere.", "troposphere_mate.")
troposphere_mate_module = importlib.import_module(troposphere_mate_module_name)
troposphere_mate_aws_resource_class = getattr(
troposphere_mate_module, troposphere_resource.__class__.__name__
)
kwargs = {
"title": troposphere_resource.title,
"Metadata": troposphere_resource.resource.get("Metadata"),
"Condition": troposphere_resource.resource.get("Condition"),
"CreationPolicy": troposphere_resource.resource.get("CreationPolicy"),
"DeletionPolicy": troposphere_resource.resource.get("DeletionPolicy"),
"DependsOn": troposphere_resource.resource.get("DependsOn"),
"UpdatePolicy": troposphere_resource.resource.get("UpdatePolicy"),
"UpdateReplacePolicy": troposphere_resource.resource.get("UpdateReplacePolicy"),
}
kwargs = {
k: v
for k, v in kwargs.items() if v is not None
}
for key in troposphere_resource.props:
value = troposphere_resource.resource.get("Properties", {}).get(key)
if value is not None:
kwargs[key] = value
troposphere_mate_resource = troposphere_mate_aws_resource_class(**kwargs)
return troposphere_mate_resource | 1324c4e3c1e133f6ac24d500f4a851eb0ffcd085 | 119,939 |
import html
def format_mail(template: str, birthday: dict, ishtml: bool):
"""
Formats the email template according to lambda triggering event.
:param template: Email template.
:param birthday: DynamoDB query result dict.
:param ishtml: True if template is HTML. Linebreaks are changed accordingly.
:return: Formatted email template.
"""
header = "Happy birthday {}, from Kaustubh Khavnekar!".format(birthday['FirstName']['S'])
subtext = birthday['Subtext']['S']
if ishtml:
subtext = html.escape(subtext).replace('\n', '<br/>')
else:
subtext = subtext.replace('\n', '\r\n')
# uuid.uuid4().hex
unsubscribe_key = birthday['UnsubscribeKey']['S']
template = template.replace('{{header}}', header)
template = template.replace('{{subtext}}', subtext)
template = template.replace('{{unsubscribe-key}}', unsubscribe_key)
return template | 1277571c3556e36bc9f482a840b6856911e86f22 | 119,941 |
import math
def rate_to_phred33(rate):
"""Convert an error rate to a phred 33 character"""
if rate < 0.0001: return 'I'
return chr(int(-10*math.log10(rate))+33) | c83c50886d8f6bb1577b37ca73250171946a97fc | 119,942 |
def matchNestedGrouping(lpat, ipat, rpat, fi, s, i):
"""Match nested grouping and return (start index, end index) if a match is found.
Returns None if match failed.
lpat -- Opening pattern.
ipat -- Inner opening pattern.
rpat -- Closing pattern.
fi -- Inner matcher.
"""
firstMatch = lpat.match(s, i)
j = firstMatch.end()
innerMatch = ipat.search(s, j)
lastMatch = rpat.search(s, j)
while innerMatch or lastMatch:
if innerMatch:
j = fi(innerMatch)
innerMatch = ipat.search(s, j)
lastMatch = rpat.search(s, j)
elif lastMatch:
return (i, lastMatch.end())
return None | b94250196d11ea174c7d4a0a6f6fa91c7ec6e6fd | 119,944 |
def intervals_overlap(i1, i2):
"""Returns True if Interval i1 overlaps Interval i2. The intervals are considered to be closed, so the intervals
still overlap if i1.end == i2.begin or i2.end == i1.begin.
"""
return not (i1.end < i2.begin or i2.end < i1.begin) | 4ab6cea8e869739eed369f6be348db233f64f6e8 | 119,945 |
from datetime import datetime
def time_to_str(value: datetime) -> str:
"""Convert a ``datetime`` object to a JSON (JavaScript) string
Formats the provided datetime object to a ``str`` that is compliant with the
JSON schema. Example: `2020-01-01T10:10:10Z`.
:param value: The ``datetime`` object to convert
:type value: datetime
:raises ValueError: If the provided value is not a valid datetime object
:returns str: The JSON (JavaScript) compliant version of the date and time
"""
if value is None or not isinstance(value, datetime):
raise ValueError("provided value is not a valid datetime object")
return value.strftime("%Y-%m-%dT%H:%M:%SZ") | 6ba1ad6ed6d58db700532bf294e452a248fed738 | 119,947 |
def hex_to_b64(hex_str):
"""Convert Hex string to Base64 string."""
arr = bytearray.fromhex(hex_str)
decoded = ''.join([chr(c) for c in arr])
b64_str = decoded.encode('base64').strip()
return b64_str | f2fd5f3de60638df19f1b477ea005549c6c43b35 | 119,948 |
def group_reference(string):
"""
Group reference number to have space after every fifth number
e.g. 1 23456
:param string: input string
:return: modified string
"""
string = ''.join(string.split())
return ''.join(e if (len(string) - i - 1) % 5 else e + " " for (i, e) in enumerate(string)).strip() | e26a5378d4ce17e891fc6e20cf65891eb96692c8 | 119,954 |
def parse_user_params(default_params=None, user_params=None):
"""
Parse a comma-separated list of key=value parameters, and populate a
dictionary with the values.
Enter: default_params: a dictionary with the default values. Only
parameters listed in the dictionary are set.
Other parameters and value-less parameters are
returned separately. Values within the default
dictionary are kept the same type as the
default.
user_params: a comma-separated key=value list of parameters.
Exit: params: a dictionary with the user parameters combined with the
default parameters.
other_params: a dictionary of parameters that were not present
in the default dictionary.
items: a list of keys without values.
"""
params = default_params.copy() if default_params else {}
other_params = {}
items = []
if user_params:
for param in user_params.split(','):
try:
key, value = param.split('=', 1)
if key not in params:
params[key] = value
continue
if isinstance(params[key], int):
params[key] = int(value)
elif isinstance(params[key], float):
params[key] = float(value)
else:
params[key] = value
except Exception:
if param.strip():
items.append(param.strip())
return params, other_params, items | fca04c9d0021f47749af91e1f92ede57252edc42 | 119,955 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.