content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
from pathlib import Path
def join_legacy_read_path(sample_path: Path, suffix: int) -> Path:
"""
Create a path string for a sample read file using the old file naming
convention (eg. reads_1.fastq).
:param sample_path: the path to the sample directory
:param suffix: the read file suffix
:return: the read path
"""
return sample_path / f"reads_{suffix}.fastq"
|
c5efb0da5ace242b916ea0515062190fb55346f5
| 47,164
|
def remove_hidden(names):
"""Remove (in-place) all strings starting with a '.' in the given list."""
i = 0
while i < len(names):
if names[i].startswith('.'):
names.pop(i)
else:
i += 1
return names
|
19ed2fc093f96038612c6eb5b943f89f9b91f452
| 47,166
|
import re
import operator
def is_valid(policy: str, password: str) -> bool:
"""
Given a policy (e.g. `1-3 a`) and a password (e.g. `abcde`),
determine if the password complies with the policy
"""
char = policy[-1:]
pos_1 = int(re.findall("[0-9]+", policy)[0])
pos_2 = int(re.findall("[0-9]+", policy)[1])
return operator.xor((password[pos_1 - 1] == char), (password[pos_2 - 1] == char))
|
aa513b29d7575e80ee962055e0933e173c18441c
| 47,168
|
def _convert2dict(file_content_lines):
"""
Convert the corosync configuration file to a dictionary
"""
corodict = {}
index = 0
for i, line in enumerate(file_content_lines):
stripped_line = line.strip()
if not stripped_line or stripped_line[0] == '#':
continue
if index > i:
continue
line_items = stripped_line.split()
if '{' in stripped_line:
corodict[line_items[0]], new_index = _convert2dict(file_content_lines[i+1:])
index = i + new_index
elif line_items[0][-1] == ':':
corodict[line_items[0][:-1]] = line_items[-1]
elif '}' in stripped_line:
return corodict, i+2
return corodict, index
|
31ffb2449e8f4b52478fa79197b10dfc96b76f4a
| 47,170
|
import requests
def get_seed(pulse_url):
"""
Given a pulse url does a GET request to the Random UChile API to get the seed given by that pulse.
:param pulse_url: String representing the URL of the pulse.
:return: A 512-bit random string that can be used as seed by a pseudo random generator.
"""
response = requests.get(pulse_url)
return response.json()['pulse']['outputValue']
|
411746e0768599ea6d3e4fc23a5efc7482a18373
| 47,171
|
def map_zero_one(A):
"""
Maps a given array to the interval [0,1]
"""
# return A
# print("MI max and min ",A.max(),A.min())
A_std = (A - A.min())/(A.max()-A.min())
retval = A_std * (A.max() - A.min()) + A.min()
return A_std
# return softmax(A)
# return torch.sigmoid(torch.Tensor(A)).numpy()
|
222e43212e1e6c1eab04f74db16cee2cd3b203b0
| 47,172
|
import re
def _is_pattern_match(re_pattern, s):
"""Check if a re pattern expression matches an entire string."""
match = re.match(re_pattern, s, re.I)
return match.group() == s if match else False
|
30b65a696c27b2141e50672775333642a1fb2b57
| 47,175
|
import torch
def double_features(f: torch.Tensor) -> torch.Tensor:
"""Double feature vector as (A, B, C, D) --> (A, A, B, B, C, C, D, D)
Args:
f: Feature vectors (n_batch, n_features)
Returns:
f: Feature vectors (2*n_btach. n_features)
"""
return torch.repeat_interleave(f, 2, dim=0)
|
d7f1dca83d1933a7a550d3fadfbf2ff96fe79ad8
| 47,176
|
def flatten_dictionary(dictionary):
"""
Input: a request's JSON dictionary output with nested dictionary
Output: a flattened dictionary (format: key1.key2 = value2)
"""
flattenedDictionary = dict()
for key, value in dictionary.items():
if isinstance(value, dict):
for subkey, subvalue in value.items():
flattenedDictionary[key + '.' + subkey] = subvalue
else:
flattenedDictionary[key] = value
return flattenedDictionary
|
1ca7c9021360bc6c39fb1f3ba07794ac74831272
| 47,177
|
import torch
def _to_real(x: torch.Tensor) -> torch.Tensor:
"""View complex tensor as real."""
x = torch.view_as_real(x)
return x.view(*x.shape[:-2], -1)
|
8bb7b8db208bcfd433976236fa56ce42560b8adf
| 47,181
|
def compute_path_cost(nxobject, path):
"""
Compute cost of a path
"""
cost = 0
for index_station in range(len(path) - 1):
cost += nxobject[path[index_station]][path[index_station + 1]]["weight"]
return cost
|
01652396938f2431a0b8e077723e615680856e32
| 47,182
|
def parse_request(event):
"""
Parses the input api gateway event and returns the product id
Expects the input event to contain the pathPatameters dict with
the productId key/value pair
:param event: api gateway event
:return: a dict containing the productId key/value
"""
if 'pathParameters' not in event:
raise Exception("Invalid event. Missing 'pathParameters'")
path_parameters = event["pathParameters"]
if 'productId' not in path_parameters:
raise Exception("Invalid event. Missing 'productId' in 'pathParameters'")
return {
"product_id": path_parameters['productId']
}
|
1f56867c5a15ea602f92d2c08c3410cc690b392b
| 47,184
|
def listify(l):
"""Encapsulate l with list[] if not."""
return [l] if not isinstance(l, list) else l
|
f029b8df4f4442ec8e3f16f7c92ec59fe6338cce
| 47,185
|
def getContainerName(ctx, containerFlavor):
""" Get name of the container for a specific flavor """
return getattr(ctx.cf.ocp.containers, containerFlavor).name
|
66baf4ab95b8a6a8a43fb70568140d46158b2e41
| 47,188
|
def check_image_in_supercell(site1, site2, supercell_size):
"""
Checks whether site1 and site2 are periodic images of each other in the super cell structure given the size of the
super cell
:param site1: (Site) site in super cell
:param site2: (Site) site in super cell
:param supercell_size: (integer) side length of super cell (in unit cells)
:return: (boolean) whether site1 and site2 are periodic images of each other in the super cell
"""
is_image = False
x1 = site1.frac_coords[0]
x2 = site2.frac_coords[0]
y1 = site1.frac_coords[1]
y2 = site2.frac_coords[1]
z1 = site1.frac_coords[2]
z2 = site2.frac_coords[2]
if round((x1 - x2) * supercell_size, 5).is_integer() and \
round((y1 - y2) * supercell_size, 5).is_integer() and \
round((z1 - z2) * supercell_size, 5).is_integer():
is_image = True
return is_image
|
905ab373287586c34a6b19f23c5acdb91bc821d7
| 47,191
|
def formatAbn(abn):
"""Formats a string of numbers (no spaces) into an ABN."""
if len(abn)!=11:
return abn
return u'{0} {1} {2} {3}'.format(abn[0:2],abn[2:5],abn[5:8],abn[8:11])
|
2746c206ee5156fa7939ed11f04af1865824ef8c
| 47,193
|
def disjoint_bounds(bounds1, bounds2):
"""Returns True if bounds do not overlap
Parameters
----------
bounds1: rasterio bounds tuple (xmin, ymin, xmax, ymax)
bounds2: rasterio bounds tuple
"""
return (bounds1[0] > bounds2[2] or bounds1[2] < bounds2[0] or
bounds1[1] > bounds2[3] or bounds1[3] < bounds2[1])
|
866162f11f293609a07179b1b688ddc10157e72a
| 47,196
|
import codecs
def encoding(argument):
"""
Verfies the encoding argument by lookup.
(Directive option conversion function.)
Raises ValueError for unknown encodings.
"""
try:
codecs.lookup(argument)
except LookupError:
raise ValueError('unknown encoding: "%s"' % argument)
return argument
|
4563ab18821d22f8613c82227a9b6192080e95bb
| 47,202
|
def has_linear_regression(args):
"""Returns whether some kind of linear regression
"""
return args.linear_regression or args.linear_regressions or \
args.linear_regression_tag
|
459679a4489f30ee96496c1086e413441119f686
| 47,208
|
def _is_spark_step_type(step_type):
"""Does the given step type indicate that it uses Spark?"""
return step_type.split('_')[0] == 'spark'
|
31d367c44f1a856e21c25f03e78a4eab48fc3af8
| 47,211
|
def is_message(line, msg_number):
"""Return 'True' if 'line' contains the message identified by 'msg_number'.
Parameters
----------
line: A single line from the NEST CI build log file.
msg_number: Message number string.
Returns
-------
True or False
"""
if msg_number in line:
return True
return False
|
d1290a8b2e13955f2dcb6d59576d032c8f499165
| 47,212
|
def _clean_header_str(value: bytes) -> str:
"""Null-terminates, strips, and removes trailing underscores."""
return value.split(b'\x00')[0].decode().strip().rstrip('_')
|
e5776a36eeb36320aec8085f094d6d00f3c3a718
| 47,213
|
def binData(data, new_shape):
"""bin time-series.
Parameters
-----------
data : nd array
time-series (numTrials * time-points).
new_shape : 1d array
[numTrials, numBin]
Returns
-------
binned_data : nd array
binned time-series (numTrials * numBin).
"""
shape = (new_shape[0], data.shape[0] // new_shape[0],
new_shape[1], data.shape[1] // new_shape[1])
binned_data = data.reshape(shape).sum(-1).sum(1)
return binned_data
|
1f27c77a695404c516aa19dc94d0d60ca6d455cd
| 47,216
|
def compose_batch_command_of_script(
source, destination, script, particle, wait_jobs, suffix
):
"""
Creates the slurm command of the 'cmd' script
Parameters
----------
source: str
Source directory
destination: str
Destination directory
script: str
Script to be used in the slurm command. Either 'lstmcpipe_utils_move_dir' or 'lstmcpipe_utils_cp_config'
particle: str
Particle type for slurm job-naming
suffix: str
Suffix to indicate the kind of job
wait_jobs: str
Job-id to be used as dependency in the batched slurm command
Returns
-------
batch_cmd: str
Full slurm batch command ready to batched the script argument
"""
cmd = f"{script} -s {source} -d {destination}"
jobe = f"slurm-{particle}_{suffix}.e"
jobo = f"slurm-{particle}_{suffix}.o"
batch_cmd = (
f"sbatch --parsable -p short -J {particle}_{suffix} -e {jobe} -o {jobo} "
f'--dependency=afterok:{wait_jobs} --wrap="{cmd}"'
)
return batch_cmd
|
aa5ed37eeb0a75da60ea2c2100ba5bc3f02503f5
| 47,219
|
def check_greenlist_positions(curword: str, grn: list) -> bool:
"""
Checks the greenlist positions to ensure every word has a green letter in the correct positions
:param curword: The current word from the word pool
:param grn: Array representing the correct letters
:return: Bool -false if the word does not contain green letters, true otherwise
"""
for i in range(5):
# Checks if a letter has been guessed and then if the word matches the correct guess
if grn[i] != "0" and curword[i] != grn[i]:
return False
return True
|
aad1e8267fd77134c9408c28724f6077f58ae984
| 47,221
|
def find_klt_for_frame(klt, klt_frames, i):
""" Finds all KLT tracks appearing in a given frame """
if not i in klt_frames:
return []
ids = klt_frames[i]
klt_this = [klt[x] for x in ids]
return klt_this
|
108e0dc926ed175d8e5bde1bdceb7c7ed038571b
| 47,222
|
import math
def translate_point(point, angle, distance):
"""Translate a point a distance in a direction (angle)."""
x, y = point
return (x + math.cos(angle) * distance, y + math.sin(angle) * distance)
|
a32c4209cad97fc670c18acb47c27ec7fbc8bc5c
| 47,228
|
import collections
def decode_attribute_idx_data(submissions):
"""Return a list of dicts representing the decoded data.
Some of the form data returned from MTurk is encoded as
``"attribute-idx": value`` mappings, where attribute represents the
attribute encoded and idx is the index of the problem instance. This
function takes a list of dictionaries in the attribute-idx style and
decodes them into the individual problem instances.
Parameters
----------
submissions : List[Dist[str, str]]
The data to decode. Each submission must be formatted in the
attribute-idx style.
Returns
-------
List[Dist[str, str]]
A list of dictionaries with each instance separated out
individually.
"""
rows = []
for submission in submissions:
idx_to_row = collections.defaultdict(dict)
for k, v in submission.items():
attribute, idx = k.rsplit('-', 1)
idx_to_row[idx][attribute] = v
rows.extend(idx_to_row.values())
return rows
|
3a1853e18e1038e9c0891ad684b39c916e26fade
| 47,232
|
def get_index_with_default(header, column_name, default_value=None):
"""Helper function to extract the index of a column."""
return header.index(column_name) if column_name in header else default_value
|
0a2fefc8def6e6d91c4852d42da0db5ca4813a8c
| 47,233
|
import torch
def get_optimal_reference_mic(
bf_mat: torch.Tensor,
target_scm: torch.Tensor,
noise_scm: torch.Tensor,
eps: float = 1e-6,
):
"""Compute the optimal reference mic given the a posteriori SNR, see [1].
Args:
bf_mat: (batch, freq, mics, mics)
target_scm (torch.ComplexTensor): (batch, freqs, mics, mics)
noise_scm (torch.ComplexTensor): (batch, freqs, mics, mics)
eps: value to clip the denominator.
Returns:
torch.
References
Erdogan et al. 2016: "Improved MVDR beamforming using single-channel maskprediction networks"
https://www.merl.com/publications/docs/TR2016-072.pdf
"""
den = torch.clamp(
torch.einsum("...flm,...fln,...fnm->...m", bf_mat.conj(), noise_scm, bf_mat).real, min=eps
)
snr_post = (
torch.einsum("...flm,...fln,...fnm->...m", bf_mat.conj(), target_scm, bf_mat).real / den
)
assert torch.all(torch.isfinite(snr_post)), snr_post
return torch.argmax(snr_post, dim=-1)
|
16f2c7b5b91987e487c8643b61628c2e8ea5f9d4
| 47,237
|
import torch
def _get_model_analysis_input(cfg, use_train_input):
"""
Return a dummy input for model analysis with batch size 1. The input is
used for analyzing the model (counting flops and activations etc.).
Args:
cfg (Config): the global config object.
use_train_input (bool): if True, return the input for training. Otherwise,
return the input for testing.
Returns:
Args: the input for model analysis.
"""
rgb_dimension = 3
if use_train_input:
input_tensors = torch.rand(
rgb_dimension,
cfg.DATA.NUM_INPUT_FRAMES,
cfg.DATA.TRAIN_CROP_SIZE,
cfg.DATA.TRAIN_CROP_SIZE,
)
else:
input_tensors = torch.rand(
rgb_dimension,
cfg.DATA.NUM_INPUT_FRAMES,
cfg.DATA.TEST_CROP_SIZE,
cfg.DATA.TEST_CROP_SIZE,
)
model_inputs = input_tensors.unsqueeze(0)
if cfg.NUM_GPUS:
model_inputs = model_inputs.cuda(non_blocking=True)
inputs = {"video": model_inputs}
return inputs
|
6a6b85e11182001a2596acda9bfbdbf3616311b8
| 47,242
|
import difflib
def diff(s1, s2):
"""Compute the difference betweeen two strings normalized by the length
of the longest of the two strings"""
longest = max((s1, s2), key=len)
return sum(d[0] != ' ' for d in difflib.ndiff(s1, s2)) / len(longest)
|
6ce40f4b470f3a1062037ff31269ffd699dd4251
| 47,246
|
from typing import List
from typing import Tuple
def parse_data(file_name: str) -> List[List[Tuple[int, int]]]:
"""
Read data from a file and save it into a nested list.
Nested lists consists of two coordinates - 2 tuples, each with 2 digits.
Eg. line 0,9 -> 5,9 will produce [ [ (0, 9), (5, 9) ] ]
Args:
file_name (str): name of file in folder / or absolute path
Returns:
List[List[Tuple[int, int]]]: a nested list of coordinates
"""
list_of_coordinates: list = []
with open(file_name, encoding="utf-8") as f:
for line in f:
el1, el2 = line.rstrip().split("->")
el1 = tuple(map(int, el1.split(",")))
el2 = tuple(map(int, el2.split(",")))
list_of_coordinates.append([el1, el2])
return list_of_coordinates
|
a0dfafe9ab0081a85005db7caf31fd460fb0b221
| 47,247
|
import time
def blockingCalculation(a, b):
"""
Returns a*b, slowly. This is an example of a function that
blocks. Note that it has no special decorations -- this could
just as easily be a standard python disk or network function. But
time.sleep is enough.
"""
time.sleep(2.) # thinking...
return a*b
|
b385880016583e3a022de48f1bbb0ae32bd5dd76
| 47,250
|
from pathlib import Path
from textwrap import dedent
def _default_config(config_dir=None):
"""Default configuration Python file, with a plugin placeholder."""
if not config_dir: # pragma: no cover
config_dir = Path.home() / '.phy'
path = config_dir / 'plugins'
return dedent("""
# You can also put your plugins in ~/.phy/plugins/.
from phy import IPlugin
# Plugin example:
#
# class MyPlugin(IPlugin):
# def attach_to_cli(self, cli):
# # you can create phy subcommands here with click
# pass
c = get_config()
c.Plugins.dirs = [r'{}']
""".format(path))
|
bfce2eda98682734d9295e5a491c2946062d5f0e
| 47,255
|
import re
def find_failing_lines(exception_message):
""" Parse which line is failing and return the line numbers """
failing_lines = []
key = re.compile(br'\d+\:\d+', re.IGNORECASE)
for line in exception_message.splitlines():
numbers = key.findall(line)
if numbers:
idxs = re.findall(br'\d+', numbers[0])
failing_lines.append(int(idxs[1]))
return failing_lines
|
eb865d50ebbbf85a7ab8dd8bf0ff362574842741
| 47,256
|
def _totalWords(dataset, index):
"""
Given a dataset, compute the total number of words at the given index.
GIVEN:
dataset (list) list of lists, where each sublist is a document
index (int) index in dataset to count words
RETURN:
total_words (int) total number of words in the dataset
"""
total_words = 0
for d in dataset:
words = d[index].split(" ")
total_words += len(words)
return total_words
|
10ad7f04da68310a5c0321c62203e6fc1a6e8cc7
| 47,258
|
def GetDeferGroups(env):
"""Returns the dict of defer groups from the root defer environment.
Args:
env: Environment context.
Returns:
The dict of defer groups from the root defer environment.
"""
return env.GetDeferRoot()['_DEFER_GROUPS']
|
d623ada67c1e49e00a678ce26f97b53f579c4379
| 47,260
|
import functools
def gen_to_list(func):
"""
Transforms a function that would return a generator into a function that
returns a list of the generated values, ergo, do not use this decorator
with infinite generators.
"""
@functools.wraps(func)
def inner(*args, **kwargs):
return list(func(*args, **kwargs))
return inner
|
20c606bf73d97ff6a3d9e8a6b139cdca9c2bb8fa
| 47,262
|
def objects_init_params(fake_request):
"""Init parameters for Icinga2Objects."""
return {
"results": ({"name": "objectname", "type": "Objecttype", "attrs": {"state": 1, "host_name": "Hostname"}}, ),
"request": fake_request
}
|
5087f66e1041b1f3dee28dcd09532db58240a484
| 47,268
|
def calcUserMeanRating(userRatingGroup):
""" Calculate the average rating of a user
"""
userID = userRatingGroup[0]
ratingSum = 0.0
ratingCnt = len(userRatingGroup[1])
if ratingCnt == 0:
return (userID, 0.0)
for item in userRatingGroup[1]:
ratingSum += item[1]
return (userID, 1.0 * ratingSum / ratingCnt)
|
9a8a292b5a464d23aabfa4a73e631049546f2ff2
| 47,271
|
def subset(ds, X=None, Y=None):
"""Subset model output horizontally using isel, properly accounting for horizontal grids.
Inputs
------
ds: xarray Dataset
Dataset of ROMS model output. Assumes that full regular grid setup is
available and has been read in using xroms so that dimension names
have been updated.
X: slice, optional
Slice in X dimension using form `X=slice(start, stop, step)`. For example,
>>> X=slice(20,40,2)
Indices are used for rho grid, and psi grid is reduced accordingly.
Y: slice, optional
Slice in Y dimension using form `Y=slice(start, stop, step)`. For example,
>>> Y=slice(20,40,2)
Indices are used for rho grid, and psi grid is reduced accordingly.
Returns
-------
Dataset with form as if model had been run at the subsetted size. That is, the outermost
cells of the rho grid are like ghost cells and the psi grid is one inward from this size
in each direction.
Notes
-----
X and Y must be slices, not single numbers.
Example usage
-------------
Subset only in Y direction:
>>> xroms.subset(ds, Y=slice(50,100))
Subset in X and Y:
>>> xroms.subset(ds, X=slice(20,40), Y=slice(50,100))
"""
if X is not None:
assert isinstance(X, slice), "X must be a slice, e.g., slice(50,100)"
ds = ds.isel(xi_rho=X, xi_u=slice(X.start, X.stop - 1))
if Y is not None:
assert isinstance(Y, slice), "Y must be a slice, e.g., slice(50,100)"
ds = ds.isel(eta_rho=Y, eta_v=slice(Y.start, Y.stop - 1))
return ds
|
eb454aad6a344ac76defd1a7314e52c1d948422f
| 47,276
|
def contains_sep(name):
""" Test if name contains a mode name, e.g. TEM, ITG, ETG"""
return any(sub in name for sub in ["TEM", "ITG", "ETG"])
|
414497454191394783dec87e772fd5f7f0e3b717
| 47,280
|
def get_smallest_divs(soup):
"""Return the smallest (i.e. innermost, un-nested) `div` HTML tags."""
return [
div for div in soup.find_all("div") if not div.find("div") and div.text.strip()
]
|
f3181c7f3cd5b4c82f060780e23dcf34028316e8
| 47,281
|
def get_dict_values(dicts, keys, return_dict=False):
"""Get values from `dicts` specified by `keys`.
When `return_dict` is True, return values are in dictionary format.
Parameters
----------
dicts : dict
keys : list
return_dict : bool
Returns
-------
dict or list
Examples
--------
>>> get_dict_values({"a":1,"b":2,"c":3}, ["b"])
[2]
>>> get_dict_values({"a":1,"b":2,"c":3}, ["b", "d"], True)
{'b': 2}
"""
new_dicts = dict((key, dicts[key]) for key in keys if key in list(dicts.keys()))
if return_dict is False:
return list(new_dicts.values())
return new_dicts
|
f965fc1593381f771ba0a2bebc525bc8b11c3815
| 47,282
|
import json
def load_metadata(metadata_path: str='/autograder/submission_metadata.json'):
"""Load JSON data from metadata file."""
return json.load(open(metadata_path))
|
f0f55c530bff130c95c5d119a659339795682e13
| 47,288
|
from datetime import datetime
def datetimefstr(dtime_list, dtformat):
"""
converts a datetime (as one or several string elements of a list) to
a datetimeobject
removes "used" elements of list
:returns: a datetime
:rtype: datetime.datetime
"""
parts = dtformat.count(' ') + 1
dtstring = ' '.join(dtime_list[0:parts])
dtstart = datetime.strptime(dtstring, dtformat)
for _ in range(parts):
dtime_list.pop(0)
return dtstart
|
d13ec79fa279a90b31b3a5679c8e2b6854a5c56c
| 47,289
|
def first_non_null(*args):
"""return the first non null value in the arguments supplied"""
for x in args:
if x != '':
return x
return ''
|
1169be1c179b7e4b0c753e4202045aa7b9a0d5c4
| 47,292
|
def limit_vector(vector, bottom_limit, upper_limit):
"""
This function cut the a vector to keep only the values between the bottom_limit and the upper_limit.
Parameters
----------
vector : list
The vector that will be cut
upper_limit : float
The maximum value of the vector.
bottom_limit : float
The minimum value of the vector
Returns
-------
vector : iterable
The limited vector
"""
temp = []
for x in vector:
if bottom_limit <= x <= upper_limit:
temp.append(x)
return temp
|
5c520f09e6caac08461cd6add911ea69a12afa72
| 47,293
|
import json
def xml_to_json(element, tag, prefix=''):
"""Converts a layer of xml to a json string. Handles multiple instances of the
specified tag and any schema prefix which they may have.
Args:
element (:obj:`xml.etree.ElementTree.Element`): xml element containing the data
tag (str): target tag
prefix (str): schema prefix on the name of the tag
Returns:
(str): json of the original object with the supplied tag as the key and a list
of all instances of this tag as the value.
"""
tag = ''.join([prefix, tag])
all_data = [{field.tag[len(prefix):]: field.text
for field in fields.getiterator()
if field.tag != tag}
for fields in element.getiterator(tag)]
return json.dumps(all_data)
|
ef28c257d2b2f974569b431ab10c8aa7a2e6ab7a
| 47,294
|
def _to_http_uri(s: str) -> str:
"""Prefix the string with 'http://' if there is no schema."""
if not s.startswith(('http://', 'https://')):
return 'http://' + s
return s
|
5ad67d12bbfbca13143dfbacd4ea96f53b9919e9
| 47,297
|
def _is_iterable(val):
"""
Checks if the input if an iterable. This function will return False if a
string is passed due to its use in pmutt.
Parameters
----------
val : iterable or non-iterable
Value to check if iterable
Returns
-------
is_iterable : bool
True if iterable. False if not iterable or string.
"""
if isinstance(val, str):
return False
else:
# If it's not a string, check if it's iterable
try:
iter(val)
except TypeError:
return False
else:
return True
|
34f140c9bc6fce9f06f05c8b3a5aa1aabe3df840
| 47,300
|
def clean_locals(params):
"""
Clean up locals dict, remove empty and self params.
:param params: locals dicts from a function.
:type params: dict
:returns: cleaned locals dict to use as params for functions
:rtype: dict
"""
return dict((k, v) for k, v in params.items() if v is not None and k != 'self')
|
89ecf3376958bb191250397cc285a33028433fad
| 47,301
|
import random
import pathlib
def _random_name() -> str:
""" Pick (hopefully unique) random name for a machine
credit for the words_alpha.txt file https://github.com/dwyl/english-words
"""
name = random.choice(
[word for word in open(pathlib.Path(__file__).parent / "words_alpha.txt")]
)[:-1]
return name
|
49af36e5443496ce1c017709e38016d9de78a4ae
| 47,309
|
def dict_drop(my_dict, keys):
"""
You've guessed it right - returns a new dictionary with `keys`
removed from `my_dict`
"""
if not isinstance(keys, (list, tuple)):
# works for a few cases - a single string is wrapped into a list
keys = [keys]
return { k: v for k, v in my_dict.items() if k not in keys }
|
b51137581b826cf9042e432907d95ff2523742e5
| 47,310
|
def iterate_array(client, url, http_method='GET', limit=100, offset=0, params=None):
"""
Get a list of objects from the Podio API and provide a generator to iterate
over these items. Use this for
e.g. to read all the items of one app use:
url = 'https://api.podio.com/comment/item/{}/'.format(item_id)
for item in iterate_array(client, url, 'GET'):
print(item)
"""
all_elements = []
if params is None:
params = dict(limit=limit, offset=offset)
else:
params['limit'] = limit
params['offset'] = offset
do_requests = True
while do_requests == True:
if http_method == 'POST':
api_resp = client.post(url, data=params)
elif http_method == 'GET':
api_resp = client.get(url, params=params)
else:
raise Exception("Method not supported.")
if api_resp.status_code != 200:
raise Exception('Podio API response was bad: {}'.format(api_resp.content))
resp = api_resp.json()
num_entries = len(resp)
if num_entries < limit or num_entries <= 0:
do_requests = False
params['offset'] += limit
all_elements.extend(resp)
# print(f"array of {len(all_elements)}")
return all_elements
|
745e3209e5add0b0a8a64aea1878ac5b2897afb9
| 47,311
|
def get_appliance_info(tintri):
"""
Get Tintri Appliance details
Args:
tintri (obj): Tintri object
Returns:
appliance: Dict of apliance details
"""
appliance = {}
info = tintri.get_appliance_info()
product = None
if tintri.is_vmstore():
product = 'Tintri VMstore'
elif tintri.is_tgc():
product = 'Tintri Global Center'
appliance['product'] = product
appliance['model'] = info.get('modelName')
return appliance
|
fd774fa1052ea1c24fbdc5ec86db498fd6d02a10
| 47,313
|
def lowest_pending_jobs(backends):
"""Returns the backend with lowest pending jobs."""
backends = filter(lambda x: x.status.get('available', False), backends)
by_pending_jobs = sorted(backends,
key=lambda x: x.status['pending_jobs'])
return by_pending_jobs[0]
|
7c0d6fa2234247edde3657f08ca8dc95d4ebd1a7
| 47,316
|
def key_type(secret):
"""Return string value for keytype depending on passed secret bool.
Possible values returned are: `secret`, `public`.
.. Usage::
>>> key_type(secret=True)
'secret'
>>> key_type(secret=False)
'public'
"""
return "secret" if secret else "public"
|
3f401b0f5ef3c95f6ee2ee6f1f81b9ee6ea0108d
| 47,319
|
def get_pad(shape, *, to):
"""Pad the shape to target size.
Details
-------
Tries to pad symmetrically, but leans towards the origin
if odd padding is required.
"""
excess = [t - s for s, t in zip(shape, to)]
assert all(e >= 0 for e in excess) # safegurad
pad = []
# start from the last dimension and move forward (see `F.pad`)
for exc in reversed(excess):
div, mod = divmod(exc, 2)
pad.extend((div, div + mod))
# divmod guarantees that div + (div + mod) == exc, so no safeguard here
return pad
|
f019903d69e2a37ad0949f6eb1c06c3c1b6f0dbb
| 47,326
|
def get_auth_token(cloud, account, username, password, expected_status_code=None):
"""
Get auth token from user login
:param cloud: Cloud API object
:param account: Account id
:param username: User id
:param password: Password
:param expected_status_code: Asserts the result in the function
:return: Auth token
"""
r = cloud.iam.authenticate_user(account=account, username=username, password=password,
expected_status_code=expected_status_code).json()['token']
return r
|
a39044b68c8f0d70fc93c17ac2d57023994a4b51
| 47,329
|
import re
def format_text(text, max_len, prefix="", min_indent=None):
"""
Format a text in the biggest lines possible with the constraint of a maximum length and an indentation.
Args:
text (`str`): The text to format
max_len (`int`): The maximum length per line to use
prefix (`str`, *optional*, defaults to `""`): A prefix that will be added to the text.
The prefix doesn't count toward the indent (like a - introducing a list).
min_indent (`int`, *optional*): The minimum indent of the text.
If not set, will default to the length of the `prefix`.
Returns:
`str`: The formatted text.
"""
text = re.sub(r"\s+", " ", text).strip()
if min_indent is not None:
if len(prefix) < min_indent:
prefix = " " * (min_indent - len(prefix)) + prefix
indent = " " * len(prefix)
new_lines = []
words = text.split(" ")
current_line = f"{prefix}{words[0]}"
for word in words[1:]:
try_line = f"{current_line} {word}"
if len(try_line) > max_len:
new_lines.append(current_line)
current_line = f"{indent}{word}"
else:
current_line = try_line
new_lines.append(current_line)
return "\n".join(new_lines)
|
925f1075e5039dd876105c0a2e8e9b20e72af41d
| 47,331
|
import hashlib
import json
import base64
def hash_dict(obj):
"""
Hashes the json representation of obj using sha-256 to have almost certain uniqueness.
@param obj: dict to be hashed
@return: sha256 b64 encoded hash of the dict
"""
m = hashlib.sha256()
s = json.dumps(obj, sort_keys=True)
m.update(bytes(s, 'utf-8'))
return base64.b64encode(m.digest()).decode('utf-8')
|
45628ec94d01e0aac6696b8d65627968754f7f4b
| 47,332
|
def determinant(tup):
"""Calculates the determinant of a tuple (m11, m12, m13, m21, m22, m23, m31, m32, m33)
Args:
tup (tuple of ints)
Returns:
det (int)
"""
m11, m12, m13, m21, m22, m23, m31, m32, m33 = tup
det = m11 * m22 * m33 + m12 * m23 * m31 \
+ m13 * m21 * m32 - m11 * m23 * m32 \
- m12 * m21 * m33 - m13 * m22 * m31
return det
|
4aadb49d7f9e10ad8d7134535aed29fb6864bcb7
| 47,338
|
def string_strip(lst):
"""
apply strip to each string in an iterable container of strings
ARGS:
lst (list): an iterable containing strings
"""
return([x.strip() for x in lst])
|
68b84f5fed2f903a0019042304da9b34b5e73aaf
| 47,339
|
def execute(opts, data, func, args, kwargs):
"""
Directly calls the given function with arguments
"""
return func(*args, **kwargs)
|
460d735b0259bf5dfd4b9ea5229b2495540cf2bf
| 47,343
|
def get_region_total_dispatch(m, region_id, trade_type):
"""
Compute total dispatch in a given region rounded to two decimal places
"""
total = sum(m.V_TRADER_TOTAL_OFFER[i, j].value
for i, j in m.S_TRADER_OFFERS
if (j == trade_type) and (m.P_TRADER_REGION[i] == region_id))
return total
|
7f98e830eb3a413be82cfd8d1410ceabcfd3b9f8
| 47,345
|
def fib(n):
"""
Returns the nth Fibonacci number.
"""
if n < 2:
return n
else:
return fib(n-2) + fib(n-1)
|
ccbbab202e771d3344cd36b05cbb9a3565f43f78
| 47,346
|
import struct
def _pack(keyparts):
"""
Pack parts into a SSH key blob.
"""
parts = []
for part in keyparts:
parts.append(struct.pack('>I', len(part)))
parts.append(part)
return b''.join(parts)
|
3f9cb19a3ed46dd9204a2b88eeefccfb1eecb7f3
| 47,348
|
def clean_id(id):
"""
Return id with initial and final whitespace removed, and
with any internal whitespace sequences replaced by a single
blank. Also, all nonprintable characters are removed.
"""
id = id.strip()
new_id = ""
for c in id:
if c.isspace():
c = " "
if (c != " " or (len(new_id)>0 and new_id[-1] != " ")) \
and c.isprintable():
new_id += c
return new_id
|
0c9f455033de258879bff5ad305539ff636e5389
| 47,349
|
def actions_side_effect(state):
""" Side effect for actions, returns the actions available for a given state
:param state: the current state
:return: actions available for the given state
"""
match state:
case "a":
val = ["a1", "a2", "a3"]
case "b":
val = ["b1", "b2", "b3"]
case "c":
val = ["c1", "c2", "c3"]
case "d":
val = ["d1", "d2", "d3"]
case _:
val = [] # Cases e-m are leaves, so should have no possible actions
return val
|
9361f4ac0be1d29f640387f03546fdc401bc1acc
| 47,356
|
import torch
def gpu_device(gpu):
"""
Returns a device based on the passed parameters.
Parameters
----------
gpu: bool or int
If int, the returned device is the GPU with the specified ID. If False, the returned device
is the CPU, if True, the returned device is given as the GPU with the highest amount of
free memory.
Returns
-------
torch.device
A PyTorch device.
"""
if isinstance(gpu, bool) and gpu:
assert torch.cuda.is_available()
return torch.device('cuda', 0)
if isinstance(gpu, bool):
return torch.device('cpu')
assert gpu < torch.cuda.device_count()
return torch.device('cuda', gpu)
|
bb17d30af82f1d90fbc7bad7d9486947d76468a1
| 47,359
|
import torch
def torch_image_to_numpy(image_torch, inplace=False):
"""Convert PyTorch tensor to Numpy array.
:param image_torch: PyTorch float CHW Tensor in range [0..1].
:param inplace: modify the tensor in-place.
:returns: Numpy uint8 HWC array in range [0..255]."""
if not inplace:
image_torch = image_torch.clone()
return image_torch.mul_(255).add_(0.5).clamp_(0, 255).permute(1, 2, 0).to('cpu', torch.uint8).numpy()
|
1eb788a01c0052315f8f9adaa688502080470a38
| 47,361
|
def _strip_tweet_hashtags(status_text: str) -> str:
"""Strip out words from tweet that are hashtags (ie. begin with a #)."""
text_split = [word for word in status_text.split() if not word.startswith("#")]
text = " ".join(text_split)
return text
|
f359071115b12b1d8ad54aba39a838d3ee207ae7
| 47,363
|
def apply_to_feature(feature_df,groupby_func_name=None,function=None):
"""
Apply a function to the entries for each feature.
feature_df ... dataframe with index (chrom, feature_name, pos)
(Such as the output of data_per_feature())
groupby_func_name ... name of the function of the groupby object
to apply to the data
This is faster than applying a function object.
function ... alternatively: function object to apply
"""
groups = feature_df.groupby(lambda idx: idx[1])
if groupby_func_name is not None:
return getattr(groups,groupby_func_name)()
elif function is not None:
return groups.apply(function)
else:
raise ValueError("Either groupby_func_name or function have to be given.")
|
4457641597303e2b422f84840c6e6fd2446b9c74
| 47,366
|
def getPrecedence(operator):
"""
Returns the precedence for operators for use in toReversePolish(), where high numbers represent greater precedence
:param operator: operator token data
:return: number representing the precedence of the given operator
"""
if operator == "^":
return 3
elif operator in ["*","/"]:
return 2
elif operator in ["+","-"]:
return 1
|
44532c6bec002aea1596219b78ec029955db0694
| 47,367
|
def import_from_string(path):
"""
Utility function to dynamically load a class specified by a string,
e.g. 'path.to.my.Class'.
"""
module_name, klass = path.rsplit('.', 1)
module = __import__(module_name, fromlist=[klass])
return getattr(module, klass)
|
234799abfcaebf7cceb679168d436ae7596a2d30
| 47,371
|
import time
def _format_time(epoch_time, format_string="%Y-%m-%d %H:%M:%S"):
"""Return a formatted representation of an epoch timestmap"""
return time.strftime(format_string, time.localtime(epoch_time))
|
e04fa89ac516633282416e14e2327f8fae2c7b36
| 47,376
|
def if_none(value, default):
"""
Returns value or default if value is None.
"""
if (value is None): return default
return value
|
7c7e83c2c633ae809a2b1a75a6e16fc7fee3eb21
| 47,377
|
def rtimport(name):
"""Imports a module, even within a package (via the
'package.module' naming convention, and returns a reference
to the module (or object within a module!). Can raise
the ImportError exception."""
# This can raise ImportError
obj = __import__(name)
components = name.split('.')
for comp in components[1:]:
try:
obj = getattr(obj, comp)
except AttributeError:
raise ImportError
return obj
|
a447af189af00d2bcdb8340fab3a0d6cb6e22aa0
| 47,383
|
import typing
import importlib
def import_object(path: str, default=None) -> typing.Any:
"""Import object from path.
Paths have the format ``module_path:object_path``.
The ``default`` value is returned when ``path`` is ``None``. This is
a convenience for passing in settings that may be ``None``.
Examples::
>>> import_object('dijkstar.graph:Graph')
<class 'dijkstar.graph.Graph'>
>>> import_object('dijkstar.graph:Graph.load')
<bound method Graph.load of <class 'dijkstar.graph.Graph'>>
"""
if path is None:
return default
module_path, object_path = path.split(":")
module = importlib.import_module(module_path)
names = object_path.split(".")
obj = module
for name in names:
obj = getattr(obj, name)
return obj
|
9c53a0616581a5958bad4b94d42cfe363e413cf8
| 47,385
|
from typing import Iterable
def get_entropy(prob_list: Iterable[float], info_list: Iterable[float]):
"""get entropy from list of probability of events and their information"""
return sum(p * i for p, i in zip(prob_list, info_list))
|
6b3feb37d944e6fc3e971a38e4d91cf13c4f6e5d
| 47,390
|
def make_zero_based_midi(defs):
"""
The official MIDI spec is 1 based (why???), but
clearly most things are 0 based. So this function shifts all of the
program numbers down by one and keeps 0 as piano.
:param defs:
:return:
"""
for k, v in defs.items():
pgms = [max(i - 1, 0) for i in v['program_numbers']]
defs[k]['program_numbers'] = pgms
return defs
|
305b6e6b48b116a8d86ec02036a1218d8a88070d
| 47,392
|
def get_distribution_names(namespace_pairs, rv_base_class):
"""
Collect names of statistical distributions and their generators.
Parameters
----------
namespace_pairs : sequence
A snapshot of (name, value) pairs in the namespace of a module.
rv_base_class : class
The base class of random variable generator classes in a module.
Returns
-------
distn_names : list of strings
Names of the statistical distributions.
distn_gen_names : list of strings
Names of the generators of the statistical distributions.
Note that these are not simply the names of the statistical
distributions, with a _gen suffix added.
"""
distn_names = []
distn_gen_names = []
for name, value in namespace_pairs:
if name.startswith('_'):
continue
if name.endswith('_gen') and issubclass(value, rv_base_class):
distn_gen_names.append(name)
if isinstance(value, rv_base_class):
distn_names.append(name)
return distn_names, distn_gen_names
|
a4b4f09c4629417951a90eb2737121eefeaa44fb
| 47,393
|
def clamp(n, vmin, vmax):
"""Computes the value of the first specified argument clamped to a range
defined by the second and third specified arguments
:param n: input Value
:param vmin: MiniMum Value
:param vmax: Maximum Value
:returns: The clamped value of n
"""
return max(min(n, vmax), vmin)
|
e46ba82598b5f5cb8bad1233edee576ead0c3fd8
| 47,394
|
def walsh_iob_curve(t, insulin_action_duration):
"""Returns the fraction of a single insulin dosage remaining at the specified number of minutes
after delivery; also known as Insulin On Board (IOB).
This is a Walsh IOB curve, and is based on an algorithm that first appeared in GlucoDyn
See: https://github.com/kenstack/GlucoDyn
:param t: time in minutes since the dose began
:type t: float
:param insulin_action_duration: The duration of insulin action (DIA) of the patient, in minutes
:type insulin_action_duration: int
:return: The fraction of a insulin dosage remaining at the specified time
:rtype: float
"""
# assert insulin_action_duration in (3 * 60, 4 * 60, 5 * 60, 6 * 60)
iob = 0
if t >= insulin_action_duration:
iob = 0.0
elif t <= 0:
iob = 1.0
elif insulin_action_duration == 180:
iob = -3.2030e-9 * (t**4) + 1.354e-6 * (t**3) - 1.759e-4 * (t**2) + 9.255e-4 * t + 0.99951
elif insulin_action_duration == 240:
iob = -3.310e-10 * (t**4) + 2.530e-7 * (t**3) - 5.510e-5 * (t**2) - 9.086e-4 * t + 0.99950
elif insulin_action_duration == 300:
iob = -2.950e-10 * (t**4) + 2.320e-7 * (t**3) - 5.550e-5 * (t**2) + 4.490e-4 * t + 0.99300
elif insulin_action_duration == 360:
iob = -1.493e-10 * (t**4) + 1.413e-7 * (t**3) - 4.095e-5 * (t**2) + 6.365e-4 * t + 0.99700
return iob
|
8879243950d60805445518e4caa89edb5954d7d0
| 47,396
|
def estimate_mean(sample, values, weights=None):
"""
estimate_mean(sample, values, weights=None)
Based on a sample, estimate and return the average value over all existing items.
Parameters
----------
sample: - a sample of items (iterable)
values: - function: item -> value
weights: - function: item -> sampling_weight of this item
"""
if weights==None: # uniform sample
weights = lambda x: 1
up = down = 0.
for v in sample:
up += 1.*values(v)/weights(v)
down += 1./weights(v)
return up/down
|
ca6a18d58b5c4d96cf67364244cdf84a54e96236
| 47,397
|
import base64
def base64_decode(content):
"""
base64解码
:param content: base64文本
:return: 解码后的字符串
"""
return base64.b64decode(content).decode('utf8')
|
5712d69af1ac0c18a13135de7c765abde711b6a1
| 47,400
|
def _sortByCreated(a, b):
"""Sort function for object by created date"""
if a.created < b.created:
return 1
elif a.created > b.created:
return -1
else:
return 0
|
e1a33ffaacc51ea3936f2b0824b11b8639718f85
| 47,401
|
def get_iterable(in_dict, in_key):
"""
Similar to <dict>.get(), but if value is None, False, ..., An empty tuple is returned instead
:param in_dict: a dictionary
:param in_key: the key to look for at in_dict
:return: in_dict[in_var] or () if it is None or not present
"""
if not in_dict.get(in_key):
return ()
return in_dict[in_key]
|
95f9dde329ea301e8bd68105d543f0d00e563bcd
| 47,403
|
def get_package_version(package):
"""
Return the version number of a Python package as a list of integers
e.g., 1.7.2 will return [1, 7, 2]
"""
return [int(num) for num in package.__version__.split('.')]
|
682eb4ffdba67d189997ceb629b06cb1ccb2a437
| 47,404
|
def getItemNames(fullname):
"""Split a fullname in the 3 parts that compose it
Args:
fullname(str): Fullname
Returns:
tuple with the names of each item in the hierarchy or None
for the items that are not present
"""
n = fullname.split(':')
if len(n) == 1:
return n[0], None, None
if len(n) == 2:
return n[0], n[1], None
if len(n) == 3:
return n[0], n[1], n[2]
return None, None, None
|
949701e821eb4659e53131b0b61fa2ea248738a2
| 47,406
|
def train_test_split(windows, labels, size=.2):
"""
Splits windows and labels into train and test splits.
"""
split_size = int(len(windows) * (1-size))
window_train = windows[:split_size]
window_test = windows[split_size:]
label_train = windows[:split_size]
label_test = windows[split_size:]
return window_train, window_test, label_train, label_test
|
0ec24566e09db53860afd1e628ec66385e7ce4a3
| 47,408
|
def options(cdiv=False, inline=False):
"""Set behavioural options which affect the generated code.
:param cdiv: Set ``True`` to match C behaviour when performing integer division.
:param inline: Set ``True`` to always inline the function.
"""
def wrapper(decl):
decl.options.update(cdiv=cdiv, inline=inline)
return decl
return wrapper
|
c5647b3b83d04cea8f41bb9cdba0e3faf34e99dc
| 47,416
|
def is_fits(string):
"""
Boolean function to test if the extension of the filename provided
is either .fits or .fit (upper- or lowercase).
Parameters
----------
string: str
(path to) filename to test
Returns
-------
bool
"""
string = string.upper()
return string.endswith(".FITS") or string.endswith(".FIT")
|
2c31363998a7cde559f6702af03bfea68213edca
| 47,417
|
def image_check(name: str):
"""
A function that checks the string end for image file suffix.
Args:
name (str): the string to test the suffix for
Returns:
True: if the suffix suffix contains image extension
False; if the string suffix does not contain image extension
"""
name = name.lower()
checks = [".jpg", ".png", ".jpeg", ".gif", ".webp"]
for i in checks:
if name.endswith(i):
return True
return False
|
378ab09b17a69dd729b9076d8a3b4882fe86381e
| 47,422
|
def format_version(v):
"""
Return a PEP 440-compliant version number from VERSION.
Using "major.minor.micro" versioning.
"""
version = f'{v[0]}.{v[1]}.{v[2]}'
return version
|
27d0d266f1b109ebfffc86d80b78095757030542
| 47,428
|
import colorsys
def class_specific_color(class_id, bright=True):
"""
Generate class specific color.
"""
brightness = 1.0 if bright else 0.7
hsv = (class_id / 20, 1, brightness)
color = colorsys.hsv_to_rgb(*hsv)
return color
|
eb7f3c0b9aef6e9256fa08a87b46ec3b762752bf
| 47,430
|
def get_fp_color(n, col_set=1):
"""
Get the color of a fixed point given
the number of unstable modes
Arguments:
n (int): number of unstable modes
col_set (int): which colors set to use
Returns:
color (str)
"""
if n == 0:
color = "seagreen" if col_set == 1 else "lightseagreen"
elif n == 1:
color = "salmon" if col_set == 1 else "lightsalmon"
elif n == 2:
color = "skyblue" if col_set == 1 else "deepskyblue"
else:
color = "magenta" if col_set == 1 else "purple"
return color
|
5f32d2a04edaa7a4534da654eb223132d2fec3fb
| 47,432
|
from typing import Any
def is_sdense(x: Any) -> bool:
"""check if an object is an `SDense` (a SAX dense S-matrix representation)"""
return isinstance(x, (tuple, list)) and len(x) == 2
|
05069ee65d1485fabf246f0bf1c3fc276d6a4d07
| 47,438
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.