content stringlengths 39 14.9k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def make_gym(a, b, c, d):
"""Returns a pokemon gym (represented by list) of the four pokemons a, b, c, d."""
return [a, b, c, d] | 584a05489352714e0876d58d4c9038955e15d170 | 110,149 |
def parent(index: int) -> int:
"""Gets parent's index.
"""
return index // 2 | fd00111ac33ff77f28f30d2f5f0bcf375206f705 | 110,153 |
def topdir_file(name):
"""Strip opening "src" from a filename"""
if name.startswith("src/"):
name = name[4:]
return name | ece1df4aabb53603abd934064cc449dca3aa709c | 110,154 |
def get_bounds(results):
"""Get upper and lower bounds for 2D plot."""
max_val = max(max(results[1][0]), max(results[1][1]))
min_val = min(min(results[1][0]), min(results[1][1]))
bound = max(abs(max_val), abs(min_val))
pad = bound/5
return [-bound - pad, bound + pad] | 32691575574114116431d83deb909291e2048f46 | 110,157 |
def remove_umis(adj_list, cluster, nodes):
"""
Removes the specified nodes from the cluster and returns
the remaining nodes
"""
# list incomprehension: for x in nodes: for node in adj_list[x]: yield node
nodes_to_remove = set([node for x in nodes for node in adj_list[x]] + nodes)
return cluster - nodes_to_remove | 44ac9b446b6e50c005a8d0d6a4751f67efc4ada6 | 110,158 |
from pathlib import Path
def requirements_dev_project(tmp_path_factory):
"""
Returns a temp directory containing a requirements_dev.txt
file.
"""
folder: Path = tmp_path_factory.mktemp("myrepo")
req_dev_txt = folder.joinpath("requirements_dev.txt")
req_dev_txt.touch()
return folder | 0bbd73b364fc502a677fdd3e650c97f99505d796 | 110,159 |
def _get_expression_levels(expr):
"""
:returns: dictionary with the level of depth of each part of the expression. Brackets are ignored in the result.
e.g.: ['A', 'OR', 'B', 'AND', '(', 'A', 'IF', '(', 'NOT', 'C', 'IF', 'D', ')', ')']
=> {0: [0, 1, 2, 3], 1: [5, 6], 2: [8, 9, 10, 11]}
"""
level = 0
levels = dict()
for i, x in enumerate(expr):
if x == '(':
level += 1
elif x == ')':
level -= 1
else:
levels.setdefault(level, []).append(i)
return levels | 64e475cc21ec2cdd5c0afc8a09cae805cabe5fb8 | 110,160 |
def _rst_header(char, text, leading=False, newline=False):
"""Create rST header data from a given character and header text."""
sep = char * len(text)
data = [text, sep]
if leading:
data = [sep] + data
if newline:
data.append('')
return data | 2d1f3da19a4dcc274535ebabea80a92c7dcb9a36 | 110,163 |
def process_muse_vcf(job, muse_vcf, work_dir, univ_options):
"""
Process the MuSE vcf for accepted calls.
:param toil.fileStore.FileID muse_vcf: fsID for a MuSE generated chromosome vcf
:param str work_dir: Working directory
:param dict univ_options: Dict of universal options used by almost all tools
:return: Path to the processed vcf
:rtype: str
"""
muse_vcf = job.fileStore.readGlobalFile(muse_vcf)
with open(muse_vcf, 'r') as infile, open(muse_vcf + 'muse_parsed.tmp', 'w') as outfile:
for line in infile:
line = line.strip()
if line.startswith('#'):
print(line, file=outfile)
continue
line = line.split('\t')
if line[6] == 'PASS':
print('\t'.join(line), file=outfile)
return outfile.name | 368d6441cd9cd8eba40e0949ca54da8deaac8a1e | 110,164 |
def filterString(text):
"""
Replace/Remove invalid file characters
text : The text to search in
Returns:
A filtered version of the given string
"""
# Remove invalid chars
text = text.strip()
text = text.replace(':', ';')
text = text.replace('?', '')
text = text.replace('"', "'")
return text; | fe7f8655cbb10665514c869ea20e04bd614eeb2e | 110,165 |
def extract_obj_name(name: str) -> str:
"""
Generates a shortened name, without the module information. Useful for node-names etc. Only extracts the final
object information often separated by `.` in the python fully qualified notation
"""
if name is None:
return ""
if "." in name:
return name.split(".")[-1]
return name | 9d185700507cb7cfbf37650f20da67398d7b9d41 | 110,169 |
def has_content(l):
"""
Returns true if list [l] contains any non-null objects
"""
return any(filter(lambda x: x, l)) | daecaad64055901312fddc3c1222aab34a6b1c85 | 110,170 |
def find_close_packages(packages):
"""
Given a list of packages, find the two packages who have all but one letter in common in a position
:param packages: list of packages
:return: the common letters
>>> find_close_packages(['abcde', 'fghij', 'klmno', 'pqrst', 'fguij', 'axcye', 'wvxyz'])
'fgij'
"""
for p1, p2 in [(packages[i], packages[j]) for i in range(len(packages)) for j in range(i, len(packages))]:
common_letters = [p[0] for p in zip(p1, p2) if p[0] == p[1]]
if len(common_letters) == len(p1) - 1:
return ''.join(common_letters)
return None | 8e458774aa11daa34265248e27c18f3d293561c2 | 110,172 |
def short_hamming(ident: int, len1: int, len2: int) -> float:
"""Compute the normalized Hamming distance between two sequences."""
return 1 - ident / min(len1, len2) | 4efa260c0ff14c228d26ea27aa983a692b7fabec | 110,175 |
def findSubstringInList(substr, the_list):
"""Returns a list containing the indices that a substring was found at.
Uses a generator to quickly find all indices that str appears in.
Args:
substr (str): the sub string to search for.
the_list (List): a list containing the strings to search.
Returns:
tuple - containing:
* a list with the indices that the substring was found in
(this list can be empty if no matches were found).
* an integer containing the number of elements it was found in.
"""
indices = [i for i, s in enumerate(the_list) if substr in s]
return indices, len(indices) | c2e800161a2f493bc820839d048086a3459aace2 | 110,176 |
def bstr_to_set(b):
"""
Convert a byte string to a set of the length-1 bytes obejcts within it.
"""
return set(bytes([c]) for c in b) | 86a7baf9fb77d3140389196cbe44ef4037c2991e | 110,180 |
def transform_parameter(
floor: float, ceiling: float, request: float, current: float
) -> float:
"""Return requested if between floor and ceiling, or else return current."""
if not floor <= current <= ceiling:
request = min(ceiling, max(floor, request))
if floor <= request <= ceiling:
return request
return current | f4b44c6180f53fe5d63dddc7da57e27523c622d2 | 110,181 |
def get_item(obj_list, lookup):
"""
Returns a dictionary object based on the given
`lookup` parameter
"""
return next(
(
item for item in obj_list
if item['item'] == lookup
),
None
) | 6f151308eddce77d2c2ccfa282cadeea0e1b3862 | 110,186 |
def _resolve(scope, key, context):
"""
Resolve scope and key to a context item
Provides very minimal validation of presence
Parameters
----------
scope: str
singular variant of scope in context
key: str
key to lookup context item in context within scope
context: dict
Returns
-------
context item: str, dict, etc
"""
if scope not in context:
raise ValueError("Scope {0} is not found in context!".format(scope))
if key not in context[scope]:
raise ValueError("Key {0} is not found in context!".format(key))
return context[scope][key] | ac4bb1cc4ba485a34dc1c915949a4c838a64408a | 110,188 |
import re
def package_in_pip_output(package: str, output: str) -> bool:
"""
Determine if a package is found in the output of packages installed by pip
:param package:
:param output:
:return: True if found, False otherwise
"""
return re.search('^{}\s'.format(package), output, re.IGNORECASE | re.MULTILINE) is not None | 35820ed4b7bfeec08a6aca9fb7202884ce5ca692 | 110,190 |
def _GetHost(cpu, target_os):
"""Returns the host triple for the given OS and CPU."""
if cpu == 'x64':
cpu = 'x86_64'
elif cpu == 'arm64':
cpu = 'aarch64'
if target_os == 'linux':
return cpu + '-unknown-linux'
elif target_os == 'mac':
return cpu + '-apple-darwin'
elif target_os == 'ios':
return cpu + '-ios-darwin'
else:
raise RuntimeError('Unsupported host') | 1431239a7352a3cee23bcc0aec8d1961dcb7296f | 110,191 |
import torch
def cal_center(group_xyz):
"""
Calculate Global Coordinates of the Center of Triangle
:param group_xyz: [B, N, K, 3] / [B, N, G, K, 3]; K >= 3
:return: [B, N, 3] / [B, N, G, 3]
"""
center = torch.mean(group_xyz, dim=-2)
return center | e0fb5c5cfd19354d72595a0a7b7313d7c4f70388 | 110,195 |
def percentage(nominator: int, denominator: int, precision: int) -> str:
"""Get percentage string
Arguments:
nominator {int} -- a nominator
denominator {int} -- a denominator
precision {int} -- precision
Returns:
str -- percentage string
"""
percent = round(float(nominator) / denominator * 100, precision)
format_string = f"{{:.{precision}f}} %"
return format_string.format(percent) | 1548a97a194cacfe6b580d8437ceb55bf5e3adb9 | 110,200 |
def compute_iou(bboxA, bboxB):
"""compute iou of two bounding boxes
Args:
bboxA(list): coordinates of box A (i,j,w,h)
bboxB(list): coordinates of box B (i,j,w,h)
Return:
float: iou score
"""
ix = max(bboxA[0], bboxB[0])
iy = max(bboxA[1], bboxB[1])
mx = min(bboxA[0] + bboxA[2], bboxB[0] + bboxB[2])
my = min(bboxA[1] + bboxA[3], bboxB[1] + bboxB[3])
area_inter = max(mx - ix, 0) * max(my - iy, 0)
area_A = bboxA[2] * bboxA[3]
area_B = bboxB[2] * bboxB[3]
iou = float(area_inter) / float(area_A + area_B - area_inter)
return iou | e32aa77208f7b4cd31c7a0eb2a6adca25c332917 | 110,203 |
def strip_wrapping(html):
"""
Removes the wrapping that might have resulted when using get_html_tree().
"""
if html.startswith('<div>') and html.endswith('</div>'):
html = html[5:-6]
return html.strip() | 4a9aa4e8d49f79f53669fdfce85164b500018907 | 110,208 |
def argmax_pair(array, key):
"""Find an (unordered) pair of indices that maximize the given function"""
n = len(array)
mi, mj, m = None, None, None
for i in range(n):
for j in range(i+1, n):
k = key(array[i], array[j])
if not m or k > m:
mi, mj, m = i, j, k
return mi, mj | e90489096398048bf1299d1ec0be18521e72ff06 | 110,209 |
import json
import logging
def parse_json_to_dict(text_data):
"""parse json-containing text, return a python dictionary"""
try:
results = json.loads(text_data)
return results
except:
#print('error parsing json result: {}'.format(sys.exc_info()[0]))
logging.exception('unexpected error parsing json result')
return {} | 5528ac68bec9f8d3cd8338bffbe7b3a65d719417 | 110,210 |
def default(event, **kwargs):
"""A Hacktoolkit-flavored default event handler for Slack webhook events
Returns a payload if applicable, or None
"""
text = kwargs.get('text')
command = kwargs.get('command')
args = kwargs.get('args')
# for example, we could...
# make another webhook call in response
channel = event['channel_id']
slack_text = 'You said:\n>%s\n Roger that.' % text
#webhook_call(text=slack_text, channel=channel, username=username)
payload = {
'text' : slack_text,
}
return payload | 4496fd9698dc331cc70abb173b8cff4d8a06d994 | 110,211 |
from datetime import datetime
import time
def convert_debian_time_to_unix(debian_time):
"""Convert Debian time to unix time, i.e. seconds from epoch.
Args:
date in the format day-of-week, dd month yyyy hh:mm:ss +zzzz
Returns:
str of unix timestamp
"""
dt_obj = datetime.strptime(debian_time, '%a, %d %b %Y %H:%M:%S %z')
return str(int(time.mktime(dt_obj.timetuple()))) | 1511a7f0c9197e140ba4a418153bc180659bbe0f | 110,213 |
import time
def convert_epoch_ms_to_iso8601(record, timestamp_key):
""" Convert epoch time ms to iso8601 format """
s, ms = divmod(int(record[timestamp_key]), 1000)
record[timestamp_key] = "%s.%03d" % (time.strftime("%Y-%m-%dT%H:%M:%S", time.gmtime(s)), ms)
return record | 844fc7c8a2df9424812e170c8592b778b9dcb020 | 110,215 |
def rec_matrix_shape(matrix, shape):
"""
Recursive function to calculate the shape of a Matrix
Returns the shape as a list of integers
"""
if type(matrix) == list:
shape.append(len(matrix))
rec_matrix_shape(matrix[0], shape)
return shape | 35b968b5fd0240238ac7b55d385742d3768f9d99 | 110,221 |
def cached_property(fn):
"""
Decorator that turns the given method into a cached property.
To clear the cache, delete self._cache[fn].
The preferred way of clearing the cache is by using an
"@invalidates_cache" decorator on another method.
"""
def wrapped(self, *args, **kwargs):
if fn not in self._cache:
self._cache[fn] = fn(self, *args, **kwargs)
return self._cache[fn]
return property(wrapped) | 586381e8a36c174af39fd3f219aa54688f39920c | 110,223 |
def _get_graph_consistency(num_R_vertices: int, num_E_vertices: int):
"""
Calculates the consistency of the given graph as in Definition 3.2.
:param num_R_vertices: number of R vertices contained in a graph.
:param num_E_vertices: number of E vertices contained in a graph.
:return: consistency score of the graph.
"""
return 1 - abs(num_R_vertices - num_E_vertices) / (num_R_vertices + num_E_vertices) | 36b695983cb7a6d83a51892b1bd6617dcfec97fd | 110,225 |
import pathlib
def find(path, fname):
"""Given a path, this will recursively search for a file (bob.txt) or
pattern (\*.txt). It returns an array of found file paths."""
fn = []
for p in pathlib.Path(path).rglob(fname):
fn.append(p)
return fn | 7280a22926c12f92a0e991d3da68101086359f97 | 110,233 |
def combine_block(block):
"""Combine a block of commands into a single string."""
return "\n".join(block) | b7ede21665e690ddf2114700436f29774ddd14d4 | 110,236 |
from typing import Callable
def bisect_search(predicate: Callable[[int], bool], low: int, high: int) -> int:
"""Find the lowest int between low and high where predicate(int) is True."""
while low < high:
mid = low + (high - low) // 2 # Avoids integer overflow compared to mid = (low + high) // 2
if predicate(mid):
high = mid
else:
low = mid + 1
return low | b7bad9297cd76d09045c8d73973add89d3c15954 | 110,238 |
def _get_type_feature_stats(stats, type_):
"""Returns stats of features that have a specified type.
Args:
stats: A DatasetFeatureStatistics proto
type_: A FeatureNameStatistics.Type value
Returns:
A list of FeatureNameStatistics for features that have the specified type.
"""
return [f for f in stats.features if f.type == type_] | f8bc36ce18ab40e242d0e2cd009e884f056fac10 | 110,245 |
def _is_compliant_shape(a, b):
"""Compares shapes of two arguments.
If size of a dimensions is None, this dimension size is ignored.
Example:
assert _is_compliant_shape((1, 2, 3), (1, 2, 3))
assert _is_compliant_shape((1, 2, 3), (1, None, 3))
assert not _is_compliant_shape((1, 2, 3), (1, 10, 3))
assert not _is_compliant_shape((1, 2), (1,))
:return: True, if the shapes are compliant
"""
if len(a) != len(b):
return False
for i in range(len(a)):
if a[i] and b[i]:
if a[i] != b[i]:
return False
return True | 4ab7f3b979d0faf94880847f769165bd59d05e39 | 110,247 |
def checkAttr(self, attrName):
""" Checks if an attribute exist otherwise raise an error"""
if not hasattr(self, attrName):
raise AttributeError('module has no attribute ' + attrName)
return False
return True | 3ac9730851065aa3bb066c17a6d415ba80917c4f | 110,248 |
import re
def max_num(x: str)->int:
"""
Input: String
Output: Integer
Finds the maximum integer in the string
"""
c = re.findall(r"[1-9]+",x)
maxele = 0
for i in c:
if int(i) > int(maxele):
maxele = i
return maxele | 640695ea55a5b2278a4382b02399d1c5672ae969 | 110,253 |
from typing import Any
import torch
def move_to(obj: Any, device: torch.device):
"""Credit: https://discuss.pytorch.org/t/pytorch-tensor-to-device-for-a-list-of-dict/66283
Arguments:
obj {dict, list} -- Object to be moved to device
device {torch.device} -- Device that object will be moved to
Raises:
TypeError: object is of type that is not implemented to process
Returns:
type(obj) -- same object but moved to specified device
"""
if torch.is_tensor(obj) or isinstance(obj, torch.nn.Module):
return obj.to(device)
if isinstance(obj, dict):
res = {k: move_to(v, device) for k, v in obj.items()}
return res
if isinstance(obj, list):
return [move_to(v, device) for v in obj]
if isinstance(obj, tuple):
return tuple(move_to(list(obj), device))
return obj | 9ed0ee66d2976aa0b17c0c0c060d6f29cf19f03f | 110,254 |
import re
def firsth1(html):
""" Returns the content of the first h1 element. """
match = re.search(r'<h1[^>]*>(.*?)</h1>', html, flags=re.DOTALL)
return match.group(1) if match else '' | fd35191588e554dec8c372069f56b947ba95d513 | 110,255 |
def gillespie (r, *args, **kwargs):
"""
Run a Gillespie stochastic simulation.
Examples:
rr = te.loada ('S1 -> S2; k1*S1; k1 = 0.1; S1 = 40')
# Simulate from time zero to 40 time units
result = rr.gillespie (0, 40)
# Simulate on a grid with 10 points from start 0 to end time 40
result = rr.gillespie (0, 40, 10)
# Simulate from time zero to 40 time units using the given selection list
# This means that the first column will be time and the second column species S1
result = rr.gillespie (0, 40, ['time', 'S1'])
# Simulate from time zero to 40 time units, on a grid with 20 points
# using the give selection list
result = rr.gillespie (0, 40, 20, ['time', 'S1'])
"""
if r.integrator is None:
raise ValueError("model is not loaded")
prev = r.integrator.getName()
if kwargs is not None:
kwargs['integrator'] = 'gillespie'
else:
kwargs = {'integrator' : 'gillespie'}
result = r.simulate(*args, **kwargs)
r.setIntegrator(prev)
return result | 420c1c5b2d97ded73dc077cad30f6fa94a4e5c79 | 110,257 |
def parse_directions(input_directions):
"""Parse input_directions for single tile from str to list of str."""
directions = []
index = 0
while index != len(input_directions):
left = input_directions[index:]
if left.startswith('s') or left.startswith('n'):
directions.append(left[:2])
index += 2
else:
directions.append(left[0])
index += 1
return directions | bbe90ec491b3a088359990e4e29c133e854365cd | 110,258 |
def sumsets(sets):
"""
Union of sets
"""
return frozenset().union(*sets) | ebcc388cca1d7b3d8cf0a58e24fb0f3519220410 | 110,261 |
def mystery_2c_no_if(c1: int, c2: int, c3: int) -> bool:
"""Return the same value as mystery_2c_if, but without using any if statements."""
return c1 != c2 and ((c1 > c2 and c3 > c2) or (c1 <= c2 < c3)) | 108c4675d63a7cd22da3629492f762ea2d758d6c | 110,263 |
def green_channel(image):
"""Return the green channel."""
return image[:, :, 1] | 0f857903fb7523c69cb1ebcdd651749826e81b9a | 110,268 |
def usd(value):
"""Formats value as USD."""
if value < 0:
return f"-${value*-1:,.2f}"
else:
return f"${value:,.2f}" | 843add884fd0bec184ea8499aebb9bde4088fce3 | 110,273 |
def _library_identifier(*, architectures, environment, platform):
"""Return a unique identifier for an embedded framework to disambiguate it from others.
Args:
architectures: The architectures of the target that was built. For example, `x86_64` or
`arm64`.
environment: The environment of the target that was built, which corresponds to the
toolchain's target triple values as reported by `apple_support.link_multi_arch_binary`.
for environment. Typically `device` or `simulator`.
platform: The platform of the target that was built, which corresponds to the toolchain's
target triple values as reported by `apple_support.link_multi_arch_binary` for platform.
For example, `ios`, `macos`, `tvos` or `watchos`.
Returns:
A string that can be used to determine the subfolder this embedded framework will be found
in the final XCFramework bundle. This mirrors the formatting for subfolders as given by the
xcodebuild -create-xcframework tool.
"""
library_identifier = "{}-{}".format(platform, "_".join(architectures))
if environment != "device":
library_identifier += "-{}".format(environment)
return library_identifier | a62d114789af64d48d39fa606a4bf719774701a1 | 110,276 |
from functools import reduce
from operator import getitem
def getitems(array, values):
"""
Equivalent to array[*values]
"""
return reduce(getitem, values, array) | 39b21ba6b7d9a46fdfeb4da7dba9198548e57ac9 | 110,281 |
def get_high_and_water_ways(ways):
"""
Extracts highways and waterways from all ways.
:param ways: All ways as a dict
:return: highways (list), waterways (list)
"""
highways = list()
waterways = list()
for way_id in ways:
way = ways[way_id]
if "highway" in way.tags:
highways.append(way)
elif "waterway" in way.tags:
waterways.append(way)
return highways, waterways | 850a5118540ba07931abeabbde46cf6cafcf387e | 110,284 |
import torch
from typing import Dict
from typing import Tuple
def ngram(
spikes: torch.Tensor,
ngram_scores: Dict[Tuple[int, ...], torch.Tensor],
n_labels: int,
n: int,
) -> torch.Tensor:
# language=rst
"""
Predicts between ``n_labels`` using ``ngram_scores``.
:param spikes: Spikes of shape ``(n_examples, time, n_neurons)``.
:param ngram_scores: Previously recorded scores to update.
:param n_labels: The number of target labels in the data.
:param n: The max size of n-gram to use.
:return: Predictions per example.
"""
predictions = []
for activity in spikes:
score = torch.zeros(n_labels, device=spikes.device)
# Aggregate all of the firing neurons' indices
fire_order = []
for t in range(activity.size()[0]):
ordering = torch.nonzero(activity[t].view(-1))
if ordering.numel() > 0:
fire_order += ordering[:, 0].tolist()
# Consider all n-gram sequences.
for j in range(len(fire_order) - n):
if tuple(fire_order[j : j + n]) in ngram_scores:
score += ngram_scores[tuple(fire_order[j : j + n])]
predictions.append(torch.argmax(score))
return torch.tensor(predictions, device=spikes.device).long() | 1b7ddad13bf0da0b5c438aa139e5773ddff0128a | 110,285 |
def diff(a, n=1):
"""
Calculate the n-th discrete difference along given axis.
The first difference is given by ``out[n] = a[n+1] - a[n]`` along
the given axis, higher differences are calculated by using `diff`
recursively.
:param a: The list to calculate the diff on
:param n: The order of the difference
:type a: list | tuple
:type n: int
:return: THe array of nth order differences
"""
if n == 0:
return a
if n < 0:
raise ValueError("order must be non-negative but got " + repr(n))
b = map(lambda x: x[1] - x[0], zip(a[:-1], a[1:]))
if n > 1:
return diff(b, n-1)
return b | 2a99d7a1899fab1f49aaf283563e0b8724a18ef9 | 110,288 |
def shouldExcludeFile(filename, excludes):
"""
Determines whether a file is in an excluded directory.
Arguments:
- filename: filename being tested
- excludes: array of excluded directory names
Returns: True if should exclude, False if not.
"""
for exc in excludes:
if exc in filename:
return True
return False | 941e1fe92f2cc7634c893fd9db1ca4f99660dbce | 110,289 |
import torch
def is_torch_integer(input: torch.Tensor) -> bool:
"""Checks if tensor is a integer or not. \n
Args:
- input (Tensor): input tensor to be checked.
"""
int_list = [torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64]
for int_val in int_list:
if input.dtype == int_val:
return True
return False | 5ec95760fe401beb0513030892a809b6b5621c28 | 110,290 |
import pickle
def load_MT_waveforms_dict_from_file(waveforms_data_filename):
"""Function to read waveforms dict output from full_waveform_inversion."""
wfs_dict = pickle.load(open(waveforms_data_filename, "rb"))
return wfs_dict | 94eb6d81da36bb4af38cba90734e8b0e1d68c62f | 110,293 |
def multi_match_query(query, fields, size=10, fuzziness=None):
"""Returns the body of the multi_match query in Elasticsearch with possibility of setting
the fuzziness on it"""
query = {
"query": {
"multi_match": {
"query": query,
"fields": fields
}
},
"size": size
}
if fuzziness:
query["query"]["multi_match"]["fuzziness"] = fuzziness
return query | 230d5c6e1ceb9c4b67a764177dd600764545750b | 110,294 |
import re
def new_line_to_br(text: str) -> str:
"""
Transform new lines to HTML tags.
Args:
text (str): Text from textarea.
Returns:
str: Text with <br />.
"""
return re.sub(r"\n", "<br />", text, flags=re.UNICODE) | ed0dc168655b9cc695456ce5d0a593917e68b17f | 110,296 |
from pathlib import Path
def abs_path(path_str: str) -> Path:
"""
Validate `path_str` and make it absolute.
Arguments:
path_str -- A path to check.
Returns:
An absolute path.
"""
return Path(path_str).absolute() | ad8234c2f8188b27bb04f361818d95f8e1bdff34 | 110,299 |
def read_genome_list(input_file):
"""
Function used to read the genome list. The output is a dictionary with the genome list
(fasta_prefix -> new_prefix) and the total number of genomes in the list
"""
genome_count = 0
genome_info = {}
for line in open(input_file, 'r'):
line = line.rstrip()
element = line.split("\t")
genome_info[element[0]] = element[2]
genome_count += 1
return genome_info, genome_count | 9e661221c2e56b1f0aac1907055cec6a0108f864 | 110,301 |
def _transform(command, *args):
"""Apply command's transformation function (if any) to given arguments.
Arguments:
command -- the command description dict
*args -- command arguments
"""
if "value_transform" in command:
return command["value_transform"](*args)
return args if len(args) > 1 else args[0] | 36e338acd53769c75ca744927a4811bd7b7c2a9f | 110,303 |
import time
import calendar
def get_timestamp_from_datetime(dt, is_local_time=True):
"""Get an epoch timestamp from a UTC datetime.
Ref: https://docs.python.org/2/library/calendar.html#calendar.timegm
http://stackoverflow.com/a/8778548/257924"""
if is_local_time:
timestamp = time.mktime(dt.timetuple()) # DO NOT USE IT WITH UTC DATE
else:
timestamp = calendar.timegm(dt.timetuple())
return timestamp | b9ee43b368e9e1516df5eb71e5a86c56d49e24a7 | 110,312 |
def normalize_edu_string(edu_string):
"""Remove superfluous whitespace from an EDU and return it."""
return ' '.join(edu_string.strip().split()) | 993560051793fe1df939bdd3cb1dddd684771ffb | 110,317 |
def read_raw_data(data_fn):
"""
Reads raw data from a file. Each line contains one example.
"""
examples = []
with open(data_fn, 'r') as fin:
for line in fin:
examples.append(line.strip())
return examples | 4b33985852d2f2a89b94b86b4c1ff4668f0e0263 | 110,321 |
def compute_mean_point(particles):
"""
Compute the mean for all particles that have a reasonably good weight.
This is not part of the particle filter algorithm but rather an
addition to show the "best belief" for current position.
"""
x, m_count = 0, 0
for p in particles:
m_count += p.w
x += p.x * p.w
# m_y += p.x_right * p.w
if m_count == 0:
print("m_count == 0")
return 0, 1
x /= m_count
# m_y /= m_count
# Now compute how good that mean is -- check how many particles
# actually are in the immediate vicinity
m_count = 0
for p in particles:
if abs(p.x - x) < 0.5:
m_count += 1
return x, (float(m_count) / len(particles)) | bba8c54cbbe8fa4e77f00f94c053a4b3578b4fa3 | 110,322 |
def ignore_case(key):
"""
A trivial function to just return the key as it is without modifications.
"""
return key | f7d6d04e6f66dafaf282e3069adbfaa55fe23291 | 110,323 |
from typing import Dict
import copy
def remove_null_values(d: Dict) -> Dict:
"""
Removes None from dictionary `d` recursively.
Args:
d (dict)
Returns:
d (dict): Dictionary without None values.
"""
bd = copy.deepcopy(d)
del_keys = {key for key, value in bd.items() if value is None}
for key in del_keys:
del bd[key]
for key in [key for key, value in bd.items() if isinstance(value, dict)]:
bd[key] = remove_null_values(bd[key])
return bd | 2368844186ebdf2e2a25cc266108acdaa96b2b46 | 110,328 |
def fibonacci_optimal_iteration(n: int) -> int:
"""Return the nth fibonacci number using optimal iteration method
This function calculate a fibonacci number using iteration loop, not recursion.
So it has O(n) time complexity which is very fast.
>>> fibonacci_optimal_iteration(0)
0
>>> fibonacci_optimal_iteration(1)
1
>>> fibonacci_optimal_iteration(50)
12586269025
>>> fibonacci_optimal_iteration(200)
280571172992510140037611932413038677189525
>>> fibonacci_optimal_iteration(-2)
Traceback (most recent call last):
...
ValueError: n must be >= 0
"""
if n < 0:
raise ValueError("n must be >= 0")
a, b = 0, 1
for _ in range(1, n + 1):
a, b = b, a + b
return a | b577ec7a8bb4b259107eaed6a2663aae7e5bbfec | 110,329 |
def get_scaling_factors(all_truth, all_pred):
"""
Compute factors to scale each target prediction
:param all_truth: ground truth
:param all_pred: predicitons
:return: scaling factors corresponding to each target
"""
N, L, C = all_pred.shape
flat_pred = all_pred.reshape(N * L, C)
flat_truth = all_truth.reshape(N * L, C)
truth_per_cell_line_sum = flat_truth.sum(axis=0)
pred_per_cell_line_sum = flat_pred.sum(axis=0)
scaling_factors = truth_per_cell_line_sum / pred_per_cell_line_sum
return scaling_factors | a6f25f5126af6819d18f287ed3e7ecfc0b3baf8c | 110,331 |
def is_openml_benchmark(benchmark: str) -> bool:
""" Check if 'benchmark' is a valid identifier for an openml task or suite. """
if len(benchmark.split('/')) == 3:
domain, oml_type, oml_id = benchmark.split('/')
supported_types = ['s', 't']
if oml_id.isdecimal():
return domain == "openml" and oml_type in supported_types
return False | 0bd4813fc51985a17ec4d1d3e0c04bd6fdab906f | 110,334 |
def _kwargs_keys_to_func_name(kwargs_key):
""" Convert from self.kwargs key name to the function/method name
Parameters
----------
kwargs_key : str
Key from self.kwargs dictionary
Returns
-------
func_name : str
Name of method or function associated with the input key
"""
func_name = '_{:s}_rtn'.format(kwargs_key)
return func_name | ee2ee9aed60550bd3db3425fba879e177d6ff6d6 | 110,335 |
def equalscontent(string1, string2):
"""Tests if two strings are equal.
None is treated like an empty string. Trailing and leading whitespace is
ignored."""
if not string1:
string1 = ""
if not string2:
string2 = ""
return string1.strip() == string2.strip() | ff68a0eca8cd2532347152f1d94064b3c6a739ef | 110,336 |
def arn_endpoint_wildcard(arn: str) -> str:
"""
Take an arn containing a full path of endpoints and
return the arn with a wildcard for all endpoints
Example
-------
input: arn:aws:execute-api:us-east-1:0000000000:XXXYYY/stage/POST/some/endpoint
output: arn:aws:execute-api:us-east-1:0000000000:XXXYYY/stage/POST/*
"""
stripped_arn = "/".join(arn.split("/")[0:3])
return f"{stripped_arn}/*" | 0a6b01587b7f1e1c93142e60446414e60a2a0b3d | 110,340 |
from typing import Union
import hashlib
def etag(content: Union[str, bytes]) -> str:
"""Calculate a unique ETag for the provided content.
Parameters
----------
content : Union[str, bytes]
Content for which the etag should be calculated.
Returns
-------
str
Calculated etag.
"""
if isinstance(content, str):
content = content.encode("utf-8")
return hashlib.sha256(content).hexdigest() | fded4a34c28dd464ad0fdf32cca22d9c91a681a1 | 110,347 |
def parse_problems(lines):
""" Given a list of lines, parses them and returns a list of problems. """
problems = []
i = 0
while i < len(lines):
if int(lines[i]) > 0:
problems.append(lines[i+1].strip().split(" "))
i += 2
else:
problems.append([])
i += 1
return problems | ad82d12fcbbc36481ea426197c75dd2096b40756 | 110,349 |
def combine_dicts(*args):
"""Return a new dict that combines all the args in successive update calls.
:param args: any number of dict-type objects
:returns: a dict which is the result of combining all the args
:rtype: dict
"""
result = {}
for arg in args:
result.update(arg)
return result | 76f77aa972d216939864e3ffc9d452e14c8321c1 | 110,351 |
def wrap_with_callbacks(func):
"""Call callbacks before and/or after a member function
If this decorator is used on a member function, and if there is also
a member function defined on the object called
'{before/after}_{function_name}_callback', that function will be
called before/after the member function, with the same arguments
that are given to the member function.
"""
def callback(self, when, *args, **kwargs):
func_name = func.__name__
callback_name = f'{when}_{func_name}_callback'
f = self.__dict__.get(callback_name)
if f is not None:
f(*args, **kwargs)
def wrapper(self, *args, **kwargs):
callback(self, 'before', *args, **kwargs)
ret = func(self, *args, **kwargs)
callback(self, 'after', *args, **kwargs)
return ret
return wrapper | e7d654fb7d4993f88fabb7cdeb0f42fa96c17f4b | 110,368 |
def _format_newlines(prefix, formatted_node, options):
"""
Convert newlines into U+23EC characters, followed by an actual newline and
then a tree prefix so as to position the remaining text under the previous
line.
"""
replacement = u''.join([
options.NEWLINE,
u'\n',
prefix])
return formatted_node.replace(u'\n', replacement) | 12d7b4b4bf9d248642ce11645ce992c3324501fc | 110,372 |
def is_transect_survey(sheet):
"""Is this worksheet a secondary "transect survey"?"""
return 'transect' in sheet.name.lower() | 093e94a0e335bc3b98dfcff0c94e4d2e153ecdc4 | 110,375 |
def remove_from_list(list_name, list_to_be_removed_from, content, begin_index):
"""
Remove an entry from a list.
:param list_name: The name of the list.
:param list_to_be_removed_from: The list to remove the element from.
:param content: The message.
:param begin_index: The begin index of the entry.
:return: The return note.
"""
input_list = content.split()
if len(input_list) < (begin_index + 1):
return f"Hmm... I think you forgot to enter what you want to remove. Please follow the format 'dogg remove from {list_name} list [...]' "
entry_to_remove = ""
for word in range(begin_index, len(input_list)):
entry_to_remove += input_list[word] + " "
try:
list_to_be_removed_from.remove(entry_to_remove)
except ValueError:
return f"I think the entry you wish to remove isn't there in the list. Are you sure about that? Maybe use " \
f"`dogg print {list_name} list` command before issuing this command. "
return f"The element is successfully removed from the {list_name} list." | a0256dc2d6514d37e26d8065e3b16f83e292c1cc | 110,377 |
def find_index(text, pattern, index=0):
"""Return the starting index of the first occurrence of pattern in text,
or None if not found.
O(N*M) where n is length of text and M is the length of the pattern"""
assert isinstance(text, str), 'text is not a string: {}'.format(text)
assert isinstance(pattern, str), 'pattern is not a string: {}'.format(text)
if pattern == '':
return index
while index < len(text):
if text[index] == pattern[0]:
j = index
for letter in pattern:
if j > len(text) - 1 or text[j] != letter:
break
j += 1
else:
return index
index += 1
return None | af0f0fdecc1a20c57b59059400fb7f93761d3d95 | 110,380 |
def q2loss_tangent(q):
"""Calculate loss tangent from Q-factor.
Args:
q: Q-factor
Returns:
loss tangent
"""
return 1 / q | ba44b4d46e8d01d4d6ec2f303017f11853a34ad8 | 110,382 |
def ham_dist(one: str, two: str) -> int:
"""Calculates HAMming DISTance between two strings
'Hamming distance' is the number of substituted chars
:param one: a string
:type one: str
:param two: a string to compare to one
:type two: str
:returns: the substitution distance between one and two
:rtype: int
"""
if not one or not two:
raise ValueError('Cannot find distance between empty strings')
if len(one) != len(two):
raise ValueError('Strings must be of the same length')
dist = 0
for i in range(len(one)):
if one[i] != two[i]:
dist += 1
return dist | bcb6f306bb886cbd8564119d534fce7585f5335e | 110,384 |
def the_row(row, width=3, height=3):
"""Return all coordinates of the fields of the given row number.
Args:
row (int): The number of the row.
width (int): The width of the sudoku.
height (int): The height of the sudoku.
Returns:
list: The coordinates of the row with the given number.
Raises:
ValueError: If the row number is invalid.
Example::
>>> the_row(1, width=3, height=3)
[(1, 0), (1, 1), (1, 2), (1, 3), (1, 4), (1, 5), (1, 6), (1, 7), (1, 8)]
"""
if not 0 <= row < width * height:
raise ValueError(
"row must be less equal 0 and less than %d" % width * height)
return [(row, c) for c in range(width * height)] | f9cf6d8522eff81fb3471a70e19eb3f1fc743072 | 110,392 |
import torch
def abs_loss(labels, predictions):
""" Square loss function
Args:
labels (array[float]): 1-d array of labels
predictions (array[float]): 1-d array of predictions
Returns:
float: square loss
"""
# In Deep Q Learning
# labels = target_action_value_Q
# predictions = action_value_Q
# loss = 0
# for l, p in zip(labels, predictions):
# loss = loss + (l - p) ** 2
# loss = loss / len(labels)
# loss = nn.MSELoss()
output = torch.abs(predictions - labels)
output = torch.sum(output) / len(labels)
# output = loss(torch.tensor(predictions), torch.tensor(labels))
# print("LOSS OUTPUT")
# print(output)
return output | f371a972180960a089e720b0d4e6b08da602f7ae | 110,396 |
def bmatrix(aaa):
"""Returns a LaTeX bmatrix
:aaa: numpy array or pg.matrix
:returns: LaTeX bmatrix as a string
"""
output = "\\begin{bmatrix} "
nrows,ncols = aaa.shape
for r in range(nrows):
for c in range(ncols):
realp = aaa[r][c].real
imagp = aaa[r][c].imag
if abs(realp) < 1e-16:
realp=0.0
if abs(imagp) < 1e-16:
imagp=0.0
if imagp == 0.0:
str_num = "{:.4}".format(realp)
elif realp == 0.0:
str_num = "{:.4}j".format(imagp)
else:
str_num = "{:.4}".format(aaa[r][c])
output += str_num
if c+1 ==ncols:
output += " \\\\ "
else:
output += " & "
output = output + "\\end{bmatrix}"
return output | 66f430a4e6a6761ab39d4fb51a58729c937c5124 | 110,397 |
from typing import Mapping
from typing import Sequence
def partition_chunks_by_null_result(result_by_chromosome):
"""
Bin chromosome name by whether processing result was null.
:param Sequence[(str, object)] | Mapping[str, object] result_by_chromosome:
pairs of name and result of processing
:return (list[str], list[str]): sequence of names of chromosomes for which
result was null, and an analogous sequence for those with a null result
"""
# Ideally, the filtration strategy would be an argument.
# Due to cPickle's disdain for anonymous functions and general
# inability to serialize a callable, though, the filtration
# strategy employed is fixed to be whether the result is null.
bad_chroms, good_chroms = [], []
if isinstance(result_by_chromosome, Mapping):
res_by_chr = result_by_chromosome.items()
if not isinstance(result_by_chromosome, Sequence):
# Non-mapping input suggests pre-sorting, so sort Mapping to
# facilitate concordance between results for varied input types.
# Don't destroy order of an already-ordered Mapping, though.
res_by_chr = sorted(res_by_chr)
else:
res_by_chr = result_by_chromosome
for c, r in res_by_chr:
chroms = bad_chroms if r is None else good_chroms
chroms.append(c)
return bad_chroms, good_chroms | fb8aeee974c69f7197c1f4a17108e0ae681b1e60 | 110,399 |
def validate_time_course(simulation):
""" Validate that BoolNet can execute the desired time course
Args:
simulation (:obj:`UniformTimeCourseSimulation`): simulation
Returns:
nested of :obj:`list` of :obj:`str`: errors
"""
errors = []
if simulation.initial_time != 0:
errors.append(['Initial time must be 0.'])
if simulation.output_start_time != int(simulation.output_start_time):
errors.append(['Output start time must be a non-negative integer.'])
if simulation.output_end_time != int(simulation.output_end_time):
errors.append(['Output end time must be a non-negative integer.'])
if (simulation.output_end_time - simulation.output_start_time) != simulation.number_of_points:
errors.append(['Number of points ({}) must be equal to the difference between the output end ({}) and start times ({}).'.format(
simulation.number_of_points, simulation.output_end_time, simulation.output_start_time)])
return errors | b2aec1d81cea3c8204ee62f188a5f9d0091de212 | 110,402 |
import csv
def read_all_reviews_seperately(filename1, filename2):
"""
Read all the reviews and the reviewCount for each restaurant
Returns:
Tuple(reviewCount for each restaurant, list of all reviews)
"""
with open(filename2, 'r', encoding='utf-8') as f2:
reader = csv.reader(f2)
reviewCount_list = []
for row in reader:
reviewCount_list += row
f2.close()
reviewCounts = map(int, reviewCount_list)
with open(filename1, 'r', encoding='utf-8') as f1:
reader = csv.reader(f1)
L = []
for row in reader:
L += row
f1.close()
return (reviewCounts, L) | 875865ba7b975d6a79dc775dd9bb5a9636b103e1 | 110,405 |
def surface_tension_temp(T,a=241.322,b=1.26,c=0.0589,d=0.5,e=0.56917,Tc=647.096):
""" temperature-dependent surface tension calculation
Parameters
----------
T : int array
diameters array mapped to their indices to generate an array
a, b, c, d, e : float, optional
model parameters of sigma parameterization
Tc : float, optional
triple point temperature
Returns
-------
sigma : float
surface tension --> returned in SI units
"""
""" This function returns the surface tension of water estimated as a
function of instrument temperature. The empirical relationship between the
temperature and surface tension has been used as prescribed by
Kalova and Mares (2018)."""
tau = 1 - T/Tc
sigma = a*tau**b * (1 - c*tau**d- e*tau)
return sigma*10**-3 | 7ed30f98c4c7cd7c1cdfdd46dfeadee6b6d52932 | 110,408 |
import hashlib
def hash_str(f):
"""
Return sha256 of input string
"""
return hashlib.sha256(str(f).encode()).hexdigest() | 5991c8ec3eafc56df56ddd380ebad77401590fbd | 110,410 |
import mimetypes
def _guess_doc_mimetype(doc):
"""
Guess the MIME type for a Weboob document.
:param doc: A Weboob ``Document`` object.
:return: The guessed MIME type or ``application/octet-stream``.
"""
# Python mimetypes library expects a filename, so we have to build a
# filename from the extension taken from Weboob data.
guessed_mime = mimetypes.guess_type("foobar.%s" % doc.format)
return guessed_mime[0] if guessed_mime[0] else 'application/octet-stream' | b903546fdadb4da832054a20d789d7970167f009 | 110,412 |
def get_linear_formula(start_i, end_i):
"""
Get Patsy formula string that has the first order terms for the variables
that range from start_i to end_i (inclusive).
>>> get_linear_formula(4, 9)
'x4 + x5 + x6 + x7 + x8 + x9'
"""
return ' + '.join('x' + str(i) for i in range(start_i, end_i + 1)) | b8ee0764d3661b81acd8dc29883b14236d2c289e | 110,413 |
import attr
def get_required_fields(cls):
"""Return the mandatory fields for a resource class.
"""
return {f.name for f in attr.fields(cls) if f.default is attr.NOTHING} | 8da9be69291108409c2ef90414584584c490fa6a | 110,419 |
def normalize(s):
"""
Remove all of a string's whitespace characters and lowercase it.
"""
return "".join(c for c in s.lower() if not c.isspace()) | 271f41a77e9e1726809eeefce7b3b251707e818f | 110,420 |
def _split(f, s, a_p='', a_s='', b_p='', b_s='', reverse=False):
"""Split string on a symbol and return two string, first possible empty"""
splitted = f.split(s)
if len(splitted) == 1:
a, b = '', splitted[0]
if reverse:
b, a = a, b
else:
a, b = splitted
if a:
a = '%s%s%s' % (a_p, a, a_s)
if b:
b = '%s%s%s' % (b_p, b, b_s)
return a, b | 1e66d7776a77930f2ca1e7d4e6208e0d67131858 | 110,421 |
def get_os2_unicoderange_bitmap(font):
"""Get an integer bitmap representing the UnicodeRange fields in the os/2 table."""
os2_table = font['OS/2']
return (os2_table.ulUnicodeRange1 |
os2_table.ulUnicodeRange2 << 32 |
os2_table.ulUnicodeRange3 << 64 |
os2_table.ulUnicodeRange4 << 96) | 4fe5510d69b7a78d1d4ec09e46216a8a1c1ae197 | 110,423 |
def remove_duplicates(seq):
"""Remove duplicates for a list while preserving the order.
The first occurrence for each item is used.
"""
items = set()
return [x for x in seq if not (x in items or items.add(x))] | 8c5602442d9b6edb76910527530edc1bb8521b70 | 110,426 |
def extract_json_values(obj: dict, key: str) -> list:
"""
Pull all values of specified key from nested JSON.
Args:
obj (dict): nested dict
key (str): name of key to pull out
Returns:
list: [description]
"""
arr = []
def extract(obj, arr, key):
""" Recursively search for values of key in JSON tree. """
if isinstance(obj, dict):
for k, v in obj.items():
if isinstance(v, (dict, list)):
extract(v, arr, key)
elif k == key:
arr.append(v)
elif isinstance(obj, list):
for item in obj:
extract(item, arr, key)
return arr
results = extract(obj, arr, key)
return results | b3708c0ff80886b632056bbffb86ca521fa6a7ca | 110,427 |
def pd6(shellmound_model):
"""Perioddata defined with month-end frequency
"""
m = shellmound_model
# month end vs month start freq
m.cfg['tdis']['perioddata']['freq'] = '6M'
m.cfg['tdis']['options']['start_date_time'] = '2007-04-01'
m.cfg['tdis']['perioddata']['end_date_time'] = '2015-10-01'
m.cfg['tdis']['perioddata']['nstp'] = 15
m._perioddata = None
pd6 = m.perioddata.copy()
return pd6 | b22f53d069e8421c6590da5eab754138d83a5d6c | 110,429 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.