content stringlengths 39 14.9k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def read_file(file_path):
"""Read file by the path."""
f = open(file_path, 'rb')
return f.read() | fe52927bdc05b8eb8cd7fadb15fff87a93fde431 | 95,212 |
def headers_are_same_geogrid(header1, header2):
"""
Given two headers, verify that they are in the same projection with the
same geotransform and the same fraction sizes
"""
return header1.spatialref.IsSame(header2.spatialref) and \
(header1.geot == header2.geot) and \
header1.width == header2.width and \
header1.height == header2.height and \
header1.frac_width == header2.frac_width and \
header1.frac_height == header2.frac_height | 2f7ec72268702125158e8d70b49c31fa20314ea1 | 95,213 |
def socket_recvall(socket, length, bufsize=4096):
"""A helper method to read of bytes from a socket to a maximum length"""
data = b""
while len(data) < length:
data += socket.recv(bufsize)
return data | 937168fd50c10d561fba55b165c0414e9840bf41 | 95,215 |
def _get_user_id_from_access_token(access_token):
"""
Get user's identifier from the access token claims
"""
token_user_id = access_token.get("sub")
if token_user_id:
try:
token_user_id = int(token_user_id)
except ValueError:
# if we can't cast to an int, that's an issue. fence should
# only issue access tokens with the user's id as the sub field.
token_user_id = None
return token_user_id | 11dce739d09770907ddf316bfb2782c71b627151 | 95,216 |
def slices_union(seq):
"""Sort 2-tuples and combine them as if right-open intervals."""
out = []
for (start, end) in sorted(seq):
if len(out) < 1 or out[-1][1] < start:
out.append((start, end))
else:
out[-1] = (out[-1][0], end)
return out | 021d6c6776440da1b3a717fc9091a7d18c019251 | 95,217 |
def picasToPoints( picas ):
"""
Converts the given number of picas to points.
"""
return picas * 12.0 | 72a84108b9f376ea380f1a20f5290427af26cd3b | 95,218 |
def indent_block(block, level=1):
"""
Indents the provided block of text.
Args:
block (str): The text to indent.
level (int): The number of tabs to indent with.
Returns:
str: The indented block.
"""
tab = "\t" * level
sep = "\n{:}".format(tab)
return tab + sep.join(str(block).splitlines()) | 179f78b8bbf1ddead795c28c08756af97da19fa7 | 95,223 |
def uuid_convert(value):
"""Strips the leading 'uuid:' part from the string."""
return value.split(":")[-1] | 0c8a5f142faf9823890b3a2e6dae4b4232f7b621 | 95,225 |
import time
def timestamp(date):
"""Converts a datetime to timestamp
:param datetime date:
:returns int: The timestamp of date.
"""
_timestamp = time.mktime(date.timetuple())
return int(_timestamp) | 33058d370fa2ca9aa97edca242941093a960a0f7 | 95,226 |
def name_of_class(pred):
""" Return name of FashionMNIST class. """
classes = ["T-shirt/top", "Trouser", "Pullover", "Dress", "Coat",
"Sandal", "Shirt", "Sneaker", "Bag", "Ankle boot"]
return classes[pred] | 9f54caf5a742b8ef0f6b73bbcc187d49a06d60c2 | 95,230 |
def clog2(x):
"""Ceiling log 2 of x.
>>> clog2(0), clog2(1), clog2(2), clog2(3), clog2(4)
(0, 0, 1, 2, 2)
>>> clog2(5), clog2(6), clog2(7), clog2(8), clog2(9)
(3, 3, 3, 3, 4)
>>> clog2(1 << 31)
31
>>> clog2(1 << 63)
63
>>> clog2(1 << 11)
11
"""
x -= 1
i = 0
while True:
if x <= 0:
break
x = x >> 1
i += 1
return i | e3cceb918d048fd796685fc43850d93369f82ef2 | 95,235 |
def combine_ip_and_route_domain(ip, route_domain):
"""Return address that includes IP and route domain
Input ip format must be of the form:
<ipv4_or_ipv6>
"""
address = "{}%{}".format(ip, route_domain)
return address | 214899753498d1f9c7041c36ceef98dff821a667 | 95,236 |
def _findbits(fp, bitsperint):
"""Find which bits are set in a list/vector.
This function is used by the Fingerprint class.
>>> _findbits([13, 71], 8)
[1, 3, 4, 9, 10, 11, 15]
"""
ans = []
start = 1
for x in fp:
i = start
while x > 0:
if x % 2:
ans.append(i)
x >>= 1
i += 1
start += bitsperint
return ans | 372bb31f13c58b315f8b0c90722c1e346965bced | 95,243 |
def search_dict_of_lists(value, dictionary):
"""
Search through a dictionary of lists for a given value.
Parameters
----------
value: Any data-type.
The value that we are searching for in the lists.
dictionary: Dictionary of lists.
A dictionary of lists we're searching through.
Returns
----------
True
If the value is in the dictionary.
False
Otherwise.
Examples
----------
>>> my_dict = {'People' : ['John', 'Mary', 'Joseph'],
... 'Age' : [21, 8, 87],
... 'Height' : [186.4, 203.1, 87.8]}
>>> search_dict_of_lists("John", my_dict)
True
>>> search_dict_of_lists("Carol", my_dict)
False
>>> search_dict_of_lists(87, my_dict)
True
>>> search_dict_of_lists(5, my_dict)
False
>>> search_dict_of_lists(186.4, my_dict)
True
>>> search_dict_of_lists(186.9, my_dict)
False
"""
for key in dictionary.keys():
if value in dictionary[key]:
return True
return False | b2722d174c9484385b2399bfae91e64f9ddcfe27 | 95,244 |
def text_to_word_list(text):
"""Given text, clean it and format it into a word list"""
text = text.lower()
text = text.strip()
for i in ',.?!:;*-"(){}[]@$%\n':
text = text.replace(i, ' ')
text = text.replace("' ", ' ')
text = text.replace(" '", ' ')
words = text.split()
filtered_words = []
for i in words:
if len(i) == 1 and not i in ('i'):
continue
filtered_words.append(i)
return filtered_words | c1bcad5b2c2064e4a95d2edfcf78a2c91339f6b0 | 95,245 |
def train_test_split(X, y, test_size = 0.3):
"""
Returns data split in train and test part.
Test part cotains the last 0.3 percentage of data, while train part
contains rest. Useful in spliting time series data, where we want to predict
on data that model has never seen
Keyword arguments:
X -- data frame or numpy array contaning predictors
y -- dataframe or numpy array contaning predicted values
test_size -- percent of data taken into test sample
"""
assert len(X) == len(y), "X and y not the same size"
size = int((1 - test_size) * len(X))
X_train = X[:size]
X_test = X[size:]
y_train = y[:size].values.reshape(-1,1)
y_test = y[size:].values.reshape(-1,1)
return X_train, X_test, y_train, y_test | 22fdfb89bca4a9d509c9884c3e80a9346c0818e6 | 95,246 |
import hashlib
def get_sha1_digest(data: bytes) -> bytes:
"""Get the SHA1 digest of the given data bytes."""
m = hashlib.sha1()
m.update(data)
return m.digest() | 737b3a9d235a3c5c2273ddf7893d798acde15490 | 95,247 |
import typing
def find_all(iterable: typing.Iterable, predicate: typing.Callable) -> typing.List[typing.Any]:
"""
A helper that returns all elements in the iterable that meet the
predicate.
If nothing is found that meets the predicate, then an empty list is
returned.
Parameters
----------
iterable
An iterable to search through.
predicate
A callable which takes one parameter and returns a boolean.
Returns
-------
List[Any]
The elements that met the predicate.
"""
l = list()
for (i) in iterable:
if predicate(i):
l.append(i)
return l | 197419b5b6333697f7fa2ceec96642b7a72b1a56 | 95,248 |
def sum_of_columns(dataframe, columns):
"""
Calculates the sum of the specified columns from the specified dataframe
:param dataframe: the dataframe
:param columns: the columns of the dataframe
:return: the sum of the specified columns
"""
temp = 0
for col in columns:
temp += dataframe[col]
return temp | d4d88a4e495c634fd9ec0c4d244070ea72cb6d1e | 95,251 |
def _binaryExponent(base, exponent):
"""Calculate base^exponent using the binary exponentiation algorithm.
Depends on another function that verifies that all parameters are
properly given.
Arguments:
base: Intiger or floating point number.
exponent: Intiger number.
Returns:
base^exponent
Raises
RuntimeError: When trying to perform 0^0
"""
# For an exponent <= 4 the algorithm makes no sense.
# log_2(4) = 2 and 4 = 0b100, which will result in the exact same
# process as the binary exponentiation algorithm.
#
# Avoiding it, we are actually saving a few operations (the bitwise and the
# comparison).
result = 1
if (exponent == 0 and base == 0):
raise RuntimeError("Magic error happened: 0^0.")
elif (exponent == 0 and base != 0):
return 1
elif (exponent < 5):
for _ in range(exponent):
result *= base
else:
while (exponent > 0):
# Current bit = 1
if (exponent & 1):
result *= base
base *= base
exponent = exponent >> 1
return result | 44ed64fc9666c145567def6f31c7ad5673fd9abf | 95,252 |
def find_index(phrase: str, letter: str) -> int:
"""
Find the 'phrase' index at the first occurrence of 'letter' string
:param phrase: string where the index will be found on
:param letter: index occurrence
:return: integer corresponding to 'letter' position
"""
try:
return phrase.index(letter)
except ValueError:
return -1 | 532b15cfc9131e5ae068325ed21c3570ae8832d1 | 95,253 |
def _swap(f, i):
"""
Make the variable `x_i` the leading one in a multivariate polynomial `f`.
"""
ring = f.ring
fswap = ring.zero
for monom, coeff in f.iterterms():
monomswap = (monom[i],) + monom[:i] + monom[i+1:]
fswap[monomswap] = coeff
return fswap | 8c162b17a9f898e8fa0ec8793e469ef0c1d87759 | 95,256 |
def dedup(d, preferred):
"""
Remove duplicate entries from a dict.
Keys in preferred will be included in the resulting dict if possible.
(Later keys are favored over earlier ones.)
"""
rev = {v: k for k, v in d.items()}
for key in preferred:
rev[d[key]] = key
return {k: v for v, k in rev.items()} | 0fc1002ca86ae37838cee0e9228e890033ef7322 | 95,269 |
def truncate_message(message, limit=100):
"""Display the first characters of a message.
The message is truncated using ellipsis and stripped to avoid blank spaces
before the ellipsis.
Args:
message (str): Message to truncate.
limit (int): Maximum size of the message.
Returns:
str: Truncated message.
"""
assert limit - 3 > 0, "Limit too short"
if len(message) <= limit:
return message
return message[: limit - 3].strip() + "..." | bfc53345f4d6fdc48081acfd8028c48ffc2f50af | 95,273 |
def time_format( t ) :
"""
Produces the time format expected by the srt format,
given a parameter t representing a number of seconds.
"""
hours = int(t/3600)
minutes = int((t-hours*3600)/60)
seconds = int(t-hours*3600-minutes*60)
fraction = int((t%1)*100)
return str(hours) + ":" + str(minutes) + ":" + str(seconds) + "." + str(fraction) | d1eb53b01d4778203d15b3668911058be5cc3678 | 95,275 |
import functools
def compose(*funcs):
"""
Compose any number of unary functions into a single unary function.
>>> import textwrap
>>> from six import text_type
>>> stripped = text_type.strip(textwrap.dedent(compose.__doc__))
>>> compose(text_type.strip, textwrap.dedent)(compose.__doc__) == stripped
True
Compose also allows the innermost function to take arbitrary arguments.
>>> round_three = lambda x: round(x, ndigits=3)
>>> f = compose(round_three, int.__truediv__)
>>> [f(3*x, x+1) for x in range(1,10)]
[1.5, 2.0, 2.25, 2.4, 2.5, 2.571, 2.625, 2.667, 2.7]
"""
def compose_two(f1, f2):
return lambda *args, **kwargs: f1(f2(*args, **kwargs))
return functools.reduce(compose_two, funcs) | 563f81dfe34d7617c706e36d699dfa979229ae4e | 95,278 |
def slicebyn(obj, n, end=None):
"""
Iterator over n-length slices of obj from the range 0 to end.
end defaults to len(obj).
"""
if end is None:
end = len(obj)
return (obj[i:i+n] for i in range(0, end, n)) | 4a87829826a8d440a99a1815252b25526623ef00 | 95,281 |
def number_of_ballots_in_profile(P):
"""
Return number of ballots in profile P.
"""
return sum([P[ballot] for ballot in P]) | 0edd15d0e20c4732ea840b83d65265c7b33fccb4 | 95,283 |
def count_orbits(orbits, cur_object, cur_orbit):
"""Count direct and indirect orbits, starting from cur_object."""
if not orbits[cur_object]:
return cur_orbit
return sum(count_orbits(orbits, next_object, cur_orbit + 1)
for next_object in orbits[cur_object]) + cur_orbit | 39a715adcfb8bcf96243aefea601af1fdb64d7c8 | 95,285 |
def mention_match(mention1, mention2):
"""
Checks if two mentions matches each other.
Matching condition: One of the mentions is sub-string of the other one.
"""
match = ((mention1 in mention2) or (mention2 in mention1))
return match | 77cdd124d1bf284df04bfc94c183a2c85d445d1d | 95,295 |
def parse_mailboxes(imap_mailboxes):
"""Translates the twisted imap4 result of listing mailboxes into a
list of names of mailboxes.
"""
return [entry[2] for entry in imap_mailboxes] | e805344b89ca4da8aed99359bb219b9a82bd4c4e | 95,300 |
def get_chart_parameters(prefix='', rconn=None):
"""Return view, flip and rotate values of the control chart"""
if rconn is None:
return (100.0, False, 0.0)
try:
view = rconn.get(prefix+'view').decode('utf-8')
flip = rconn.get(prefix+'flip').decode('utf-8')
rot = rconn.get(prefix+'rot').decode('utf-8')
except:
return (100.0, False, 0.0)
return float(view), bool(flip), float(rot) | 5aea82231821a023b1b65f8eb7767c6004260dd5 | 95,301 |
import torch
from typing import Tuple
from typing import List
def calc_errors(
output: torch.Tensor, target: torch.Tensor, topk: Tuple[int, ...] = (1,)
) -> List[torch.Tensor]:
"""Calculate top-k errors.
Args
output (torch.Tensor): Output tensor from model.
target (torch.Tensor): Training target tensor.
topk (Tuple[int, ...]): Tuple of int which you want to know error.
Returns:
List[torch.Tensor]: List of errors.
"""
with torch.no_grad():
maxk = max(topk)
dim_batch = target.size(0)
_, pred = output.topk(
maxk, dim=1
) # return the k larget elements. top-k index: size (b, k).
pred = pred.t() # (k, b)
correct = pred.eq(target.view(1, -1).expand_as(pred))
errors = list()
for k in topk:
correct_k = correct[:k].reshape((-1,)).float().sum(0, keepdim=True)
wrong_k = dim_batch - correct_k
errors.append(wrong_k.mul_(100.0 / dim_batch))
return errors | a5034739cb3c261053faa7a47c7c11590bdf9def | 95,302 |
from bs4 import BeautifulSoup
from typing import Union
def _find_wallpaper_urls(
page_html: BeautifulSoup,
title: str,
) -> Union[list, None]:
"""
Parse HTML and search URLs of the wallpaper.
One wallpaper has two URLs. For wallpapers with and without calendar.
Args:
page_html (BeautifulSoup): HTML for parsing.
title (str): title in 'Wallpaper name - resolution' format.
Example - 'Beautiful image - 1920x1080'.
Returns:
Union[list, None]: URLs of wallpapers or
'None' if URLs of wallpapers does not exist.
"""
wallpaper_urls_tags = page_html.find_all(
lambda tag:
tag.name == "a" and
tag.get("title") == title,
)
if wallpaper_urls_tags:
return [url_tag.get("href") for url_tag in wallpaper_urls_tags]
return None | ea62203f0325dd1d2a8d19c245bbea1739b939d4 | 95,304 |
def addParam(baseLength: int, sqlBase, sqlToAdd, separator: str = ", "):
"""concatenates text with separator if the text length is longer than specified"""
if len(sqlBase) > baseLength:
sqlBase += separator
return sqlBase + sqlToAdd | f383d97888229c30ddec27b24fa96cf1b867b2ce | 95,314 |
def percentage(part: int, whole: int) -> float:
""" Calculates the percentage of a whole and a part
:param part: part
:param whole: whole
:return: percentage of the whole and the part
"""
return round(100 * float(part) / float(whole), 2) | 987c0e51a707c19167e027b10bb3532990461e26 | 95,316 |
def filter_optional(l, filter_list=None):
"""
Optionally filter elements in a list according to a second filter list
:param l: list to potentially filter
:param filter_list: filter list specifying all elements which are allowed in the returned list
:return: if filter_list is not None, list containing the intersection of l and filter_list else l
"""
if filter_list is None:
return l
else:
return [f for f in l if f in filter_list] | b04be0e512b3d3bd0830102b8cf93b34519383b1 | 95,318 |
def collided_done(py_measurements):
"""Define the main episode termination criteria"""
m = py_measurements
collided = (m["collision_vehicles"] > 0 or m["collision_pedestrians"] > 0
or m["collision_other"] > 0)
return bool(collided) | fc0d2908c0bd545962c325d51408266fd3355240 | 95,327 |
import string
import re
from typing import Counter
def f1_score(gold_answer: str, pred_answer: str) -> float:
"""
Calculate token-level F1 score
See also https://github.com/W4ngatang/qags/blob/master/qa_utils.py#L43
Args:
gold_answer (str): answer selected based on source document
pred_answer (str): answer selected based on generated summary
"""
def _normalize_answer(text: str):
def _remove_punc(text: str):
exclude = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def _remove_articles(text: str):
return re.sub(r"\b(a|an|the)\b", " ", text)
def _white_space_fix(text: str):
return " ".join(text.split())
return _white_space_fix(_remove_articles(_remove_punc(text.lower())))
gold_toks = _normalize_answer(gold_answer).split()
pred_toks = _normalize_answer(pred_answer).split()
common_toks = Counter(gold_toks) & Counter(pred_toks)
num_same_toks = sum(common_toks.values())
# If either is <unanswerable>, then F1 is 1 if they agree, 0 otherwise
if gold_answer == "<unanswerable>" or pred_answer == "<unanswerable>":
return int(gold_answer == pred_answer)
if num_same_toks == 0:
return 0.0
precision = 1.0 * num_same_toks / len(pred_toks)
recall = 1.0 * num_same_toks / len(gold_toks)
f1 = (2 * precision * recall) / (precision + recall)
return f1 | c41464d724c7ba427b2e2a7f77cb86b476a8ba2f | 95,330 |
def set_savings (problem, alpha=0.3):
"""
This method calculate the saving of edges according to the given alpha.
NOTE: Edges are modified in place.
:param problem: The instance of the problem to solve.
:param alpha: The alpha parameter of the PJS.
:return: The problem instance modified in place.
"""
dists, depot = problem.dists, problem.depot
for edge in problem.edges:
cost, inode, jnode = edge.cost, edge.inode, edge.jnode
revenue = inode.revenue + jnode.revenue
edge.savings = {
source.id : (1.0 - alpha)*(dists[inode.id, depot.id] + dists[source.id, jnode.id] - cost) + alpha*revenue
for source in problem.sources}
return problem | 58ee38f93a885786e4915f8932834b92e3caba18 | 95,337 |
def get_app_module_name_list(modules):
"""
Returns the list of module name from a Ghost App
>>> modules = [{
... "name" : "symfony2",
... "post_deploy" : "Y29tcG9zZXIgaW5zdGFsbCAtLW5vLWludGVyYWN0aW9u",
... "pre_deploy" : "ZXhpdCAx",
... "scope" : "code",
... "initialized" : False,
... "path" : "/var/www",
... "git_repo" : "https://github.com/symfony/symfony-demo"
... }]
>>> get_app_module_name_list(modules)
['symfony2']
>>> modules = [{
... "initialized" : False,
... "path" : "/var/www",
... "git_repo" : "https://github.com/symfony/symfony-demo"
... }]
>>> get_app_module_name_list(modules)
[]
>>> modules = [{
... "name" : "mod1",
... "initialized" : False,
... "path" : "/var/www",
... "git_repo" : "https://github.com/symfony/symfony-demo"
... },{
... "name" : "mod-name2",
... "initialized" : False,
... "path" : "/var/www",
... "git_repo" : "https://github.com/symfony/symfony-demo"
... }]
>>> get_app_module_name_list(modules)
['mod1', 'mod-name2']
>>> modules = [{
... "name" : "mod1",
... "initialized" : False,
... "path" : "/var/www",
... "git_repo" : "https://github.com/symfony/symfony-demo"
... },{
... "noname" : "mod-name2",
... "initialized" : False,
... "path" : "/var/www",
... "git_repo" : "https://github.com/symfony/symfony-demo"
... },{
... "name" : "mod3",
... "initialized" : False,
... "path" : "/var/www",
... "git_repo" : "https://github.com/symfony/symfony-demo"
... }]
>>> get_app_module_name_list(modules)
['mod1', 'mod3']
"""
return [app_module['name'] for app_module in modules if 'name' in app_module] | 342b084d0bb6c391b82d87bda99e942f308f4f32 | 95,347 |
from datetime import datetime
def datetime_str_to_int(date_time_str):
"""Given a date time in string format return
a date time in integer format"""
format_date_time_str = date_time_str.split('+')[0] # delete UTC
return datetime.strptime(format_date_time_str, '%Y-%m-%dT%H:%M:%S.%f').timestamp() | daf3c83a1d6e574e48ad3696aa8daa051155cba5 | 95,350 |
def get_days(driver):
"""
Returns a list with the days as webelement of the week (monday - friday)
:param driver: webdriver
:return: list with webelement objects days (containing the lessons)
"""
ls = []
for day in driver.find_elements_by_class_name('day'):
if len(ls) < 5:
ls.append(day)
return ls | 2922de2ad0473a736a3623a3ebaae55fff35ddb7 | 95,351 |
def filter_data(all_data):
"""
Takes the data from SIMS and removes all students not in
block 1 of the current academic year.
This should remove any students on placement (block 1S),
doing a dissertation (block D1/D2) or repeating something (block 2)
"""
# Remove all students not from this academic year
YEAR_COLUMN = 'Acad Year'
THIS_YEAR = '2017/8'
filtered_data = all_data.loc[all_data[YEAR_COLUMN] == THIS_YEAR]
# Remove all students not currently in block 1 (so those doing placement or dissertation)
BLOCK_COLUMN = 'Block'
filtered_data = filtered_data.loc[filtered_data[BLOCK_COLUMN] == '1']
return filtered_data | da2b150e48f1252b475f85ba32df46f979f70817 | 95,354 |
def get_court(court_string):
"""Find the court(s) in which the judge presides.
"""
return([s for s in court_string.stripped_strings][0]) | 4a89eaab917659ebdd300b441243198262eca3dd | 95,357 |
def get_all_user_accounts(messenger_blobs):
"""
Parse each user account from the messenger_blobs.
A user account is defined as the duple (user_id, network).
For instance, the same user may have a mainnet account and
a testnet account, and those would be treated separately.
"""
user_accounts = set()
for messenger_blob in messenger_blobs:
user_accounts.add(
(messenger_blob["user_id"], messenger_blob["dydx_config"]["environment"])
)
return user_accounts | c9ae38cbe1a37c4b39109d606a4c5fe1f142c7c1 | 95,360 |
def constraints(connection):
"""SQL test statements for the ChangeMetaConstraintsTests suite.
Args:
connection (django.db.backends.base.BaseDatabaseWrapper):
The connection being tested.
Returns:
dict:
The dictionary of SQL mappings.
"""
return {
'append_list': [
'CREATE TABLE "TEMP_TABLE" '
'("id" integer NOT NULL UNIQUE PRIMARY KEY,'
' "int_field1" integer NOT NULL,'
' "int_field2" integer NOT NULL,'
' "char_field1" varchar(20) NOT NULL,'
' "char_field2" varchar(40) NOT NULL,'
' CONSTRAINT "base_check_constraint"'
' CHECK ("char_field1" LIKE \'test%\' ESCAPE \'\\\'),'
' CONSTRAINT "base_unique_constraint_plain"'
' UNIQUE ("int_field1", "char_field1"),'
' CONSTRAINT "new_unique_constraint"'
' UNIQUE ("int_field2", "int_field1"),'
' CONSTRAINT "new_check_constraint"'
' CHECK ("int_field1" >= 100));',
'INSERT INTO "TEMP_TABLE"'
' ("id", "int_field1", "int_field2", "char_field1", "char_field2")'
' SELECT "id", "int_field1", "int_field2", "char_field1",'
' "char_field2" FROM "tests_testmodel";',
'DROP TABLE "tests_testmodel";',
'ALTER TABLE "TEMP_TABLE" RENAME TO "tests_testmodel";',
],
'removing': [
'CREATE TABLE "TEMP_TABLE" '
'("id" integer NOT NULL UNIQUE PRIMARY KEY,'
' "int_field1" integer NOT NULL,'
' "int_field2" integer NOT NULL,'
' "char_field1" varchar(20) NOT NULL,'
' "char_field2" varchar(40) NOT NULL);',
'INSERT INTO "TEMP_TABLE"'
' ("id", "int_field1", "int_field2", "char_field1", "char_field2")'
' SELECT "id", "int_field1", "int_field2", "char_field1",'
' "char_field2" FROM "tests_testmodel";',
'DROP TABLE "tests_testmodel";',
'ALTER TABLE "TEMP_TABLE" RENAME TO "tests_testmodel";',
],
'replace_list': [
'CREATE TABLE "TEMP_TABLE" '
'("id" integer NOT NULL UNIQUE PRIMARY KEY,'
' "int_field1" integer NOT NULL,'
' "int_field2" integer NOT NULL,'
' "char_field1" varchar(20) NOT NULL,'
' "char_field2" varchar(40) NOT NULL,'
' CONSTRAINT "new_check_constraint"'
' CHECK ("char_field1" LIKE \'test%\' ESCAPE \'\\\'),'
' CONSTRAINT "new_unique_constraint_plain"'
' UNIQUE ("int_field1", "char_field1"));',
'INSERT INTO "TEMP_TABLE"'
' ("id", "int_field1", "int_field2", "char_field1", "char_field2")'
' SELECT "id", "int_field1", "int_field2", "char_field1",'
' "char_field2" FROM "tests_testmodel";',
'DROP TABLE "tests_testmodel";',
'ALTER TABLE "TEMP_TABLE" RENAME TO "tests_testmodel";',
],
'setting_from_empty': [
'CREATE TABLE "TEMP_TABLE" '
'("id" integer NOT NULL UNIQUE PRIMARY KEY,'
' "int_field1" integer NOT NULL,'
' "int_field2" integer NOT NULL,'
' "char_field1" varchar(20) NOT NULL,'
' "char_field2" varchar(40) NOT NULL,'
' CONSTRAINT "new_check_constraint"'
' CHECK ("char_field1" LIKE \'test%\' ESCAPE \'\\\'),'
' CONSTRAINT "new_unique_constraint_plain"'
' UNIQUE ("int_field1", "int_field2"));',
'INSERT INTO "TEMP_TABLE"'
' ("id", "int_field1", "int_field2", "char_field1", "char_field2")'
' SELECT "id", "int_field1", "int_field2", "char_field1",'
' "char_field2" FROM "tests_testmodel";',
'DROP TABLE "tests_testmodel";',
'ALTER TABLE "TEMP_TABLE" RENAME TO "tests_testmodel";',
],
} | fcbd973191b532966a400688f84680b43f28306a | 95,363 |
def gini_impurity(counts):
"""Computes Gini impurity of a set of counts.
If used by a decision tree learning algorithm, the goal is to minimize the Gini impurity inside
each leaf.
Parameters:
counts (collections.Counter)
Example:
>>> counts = {
... 'sunny': 4,
... 'rainy': 2,
... 'green': 10
... }
>>> gini_impurity(counts)
0.53125...
References:
1. `A Simple Explanation of Gini Impurity <https://victorzhou.com/blog/gini-impurity/>`_
"""
N = sum(counts.values())
return sum(c / N * (1 - c / N) for c in counts.values()) | dc25441c3be28f2cf50f03a5515ca80d6a588ce3 | 95,367 |
def get_dim_int(tensor, dim):
"""
Compatability function to get an int shape value for dimension dim of a
tensor.
Parameters
----------
tensor : tf.tensor
The input tensor
dim : int
The Dimension who's shape we want to know'
Returns
-------
shape : int
The shape of dimension dim
"""
try:
# this should work only in tensorflow 1
shape = tensor.get_shape()[dim].value
except AttributeError:
# this should work in tensorflow 2
shape = tensor.get_shape()[dim]
return shape | 7a73cf0e31f220b69bb98eae6084e2b91d0ccbaa | 95,370 |
def convert_to_ascii(text):
"""Converts text to ascii
"""
text_encoded = text.encode("ascii", "ignore")
return text_encoded.decode() | b83140bad567b9485b15d56f03bb3893fd5bdd98 | 95,378 |
def area(box):
""" Calculates area of a box
@param: (x, y, w, h) tuple of a box
@return: area
"""
(x, y, w, h) = box
return w * h | 26f638788285c91cad75c5a7b5abca098bf0a073 | 95,380 |
def example_transform(v, row, row_n, i_s, i_d, header_s, header_d,scratch, errors, accumulator):
""" An example column transform.
This is an example of a column transform with all of the arguments listed. An real transform
can omit any ( or all ) of these, and can supply them in any order; the calling code will inspect the
signature.
When the function is listed as a transform for a column, it is called for every row of data.
:param v: The current value of the column
:param row: A RowProxy object for the whiole row.
:param row_n: The current row number.
:param i_s: The numeric index of the source column
:param i_d: The numeric index for the destination column
:param header_s: The name of the source column
:param header_d: The name of the destination column
:param scratch: A dict that can be used for storing any values. Persists between rows.
:param errors: A dict used to store error messages. Persists for all columns in a row, but not between rows.
:param accumulator: A dict for use in accumulating values, such as computing aggregates.
:return: The final value to be supplied for the column.
"""
return str(v)+'-foo' | ecd8ca5c1bba2431630642fb3e23390e323f392c | 95,381 |
def passport_valid(
passport: str,
required_fields: tuple[str, str, str, str, str, str, str] = (
"byr",
"iyr",
"eyr",
"hgt",
"hcl",
"ecl",
"pid",
),
) -> bool:
"""Check is a passport is valid, by having all required fields in it."""
fields_in_passport = [field in passport for field in required_fields]
return all(fields_in_passport) | 338eaeff7c88f47fa295404de8adc25c84aa5429 | 95,382 |
def find_largest_digit_helper(n, greater):
"""
This function is the helper of find_largest_digit(n)
:param n: int, the input number
:param greater: int, the greater number within n
:return: return greater
"""
if n != 0:
if n % 10 > greater:
greater = n % 10
return find_largest_digit_helper(n//10, greater)
return greater | 2f0335bdaf9e03407870b53f11514a60e543707b | 95,385 |
def neuron_cylinder(cur,name):
"""
Returns the cylindrical locations of neuron segment centroids
[cell_name,radius,phi,z]
Parameters:
----------
cur : MySQLdb
name : str
Cell name
"""
sql = ("select radialPharynx.distance,radialPharynx.phi,image.IMG_SectionNumber "
"from radialPharynx "
"join object on object.OBJ_Name=radialPharynx.OBJ_Name "
"join contin on contin.CON_Number=object.CON_Number "
"join image on image.IMG_Number=object.IMG_Number "
"where contin.CON_AlternateName = '%s'" %name)
cur.execute(sql)
data = [[int(a[0]),float(a[1]),int(a[2])] for a in cur.fetchall()]
return data | a3766919f5eb2abf5ce8aca4a5f2789b05078f57 | 95,387 |
import math
def round_up(number, ndigits=0):
"""Round a floating point number *upward* to a given precision.
Unlike the builtin `round`, the return value `round_up` is always
the smallest float *greater than or equal to* the given number
matching the specified precision.
Parameters
----------
number : float
Number to be rounded up.
ndigits : int, optional
Number of decimal digits in the result. Default is 0.
Returns
-------
float
Examples
--------
>>> round_up(math.pi)
4.0
>>> round_up(math.pi, ndigits=1)
3.2
>>> round_up(math.pi, ndigits=2)
3.15
>>> round_up(-math.pi, ndigits=4)
-3.1415
"""
multiplier = 10 ** ndigits
return math.ceil(number * multiplier) / multiplier | 5662baabe4a56424526749fdc61d4140a170fe61 | 95,389 |
import struct
def loadBowtieSa(fh):
""" Load a .sa file from handle into an array of ints """
nsa = struct.unpack('I', fh.read(4))[0]
return [ struct.unpack('I', fh.read(4))[0] for i in range(0, nsa) ] | 8ca4b836816edbc34ac7c267bae063fef7ce1c2b | 95,391 |
from typing import Any
from typing import Union
from datetime import datetime
def _pre_serialize(value: Any) -> Union[Any, str]:
"""Convert `value` to str if datetime, otherwise do nothing."""
return value if not isinstance(value, datetime) else str(value) | bf7b1169ab6345000af27bb4c1630bd2d633fee5 | 95,398 |
def token_response(token: str):
"""
Return a token response
"""
return {
'access token': token
} | d32786176b52bc6273ed06e903d0b60bc944df9a | 95,403 |
def p_topic_given_document(document_topic_counts,
document_lengths,
topic,
d,
k,
alpha=0.1):
"""the fraction of words in document d
that are assigned to topic (plus some smoothing)"""
return ((document_topic_counts[d][topic] + alpha) /
(document_lengths[d] + k * alpha)) | 9e6a3c9036e5a0952aa0ae6ed2ee8c984858a5fc | 95,405 |
def read_elemnames(filename, delimiter='\n'):
"""
A helper function to read element names from text files into a list of strings.
Standard input is a text file with linebreaks between elements.
Parameters
----------
filename : str
input file name
delimiter : str
(Optional) which separator to use for elements,
default is the (unix) linefeed (which also works for windows linefeeds)
"""
with open(filename) as fi:
contents = fi.read()
element_names = list()
for elem in contents.split(delimiter):
elem = elem.strip() # this also removes \r in case of \r\n linefeeds
if len(elem) > 0:
element_names.append(elem)
return element_names | 9b5f316c4029fe8bec82562a43a4069d6fc5349d | 95,409 |
def reindex_halo_subsamples(halos, subsamp_start_key='subsamp_start', subsamp_len_key='subsamp_len'):
"""
If we concatenate halos and particles into big files/arrays, the "subsample start" indices
in the halos table no longer correspond to the concatenated particle array. But we can
easily reconstruct the correct indices.
Parameters
----------
halos: ndarray
The FoF halo catalog from `read_halos_FoF`
The halo catalog must be in its original order (i.e. the order
on disk, because the subsamples on disk are in the same order)
Returns
-------
njump: int
The number of discontinuities that were fixed in the subsample indexing.
Should be equal to the number of file splits.
"""
# The number of "discontinuities" in particle indexing should equal the number of files
# This helps us make sure the halos were not reordered
njump = (halos[subsamp_start_key][:-1] + halos[subsamp_len_key][:-1] != halos[subsamp_start_key][1:]).sum()
# Now reindex the halo records
assert halos[subsamp_start_key][0] == 0
halos[subsamp_start_key][1:] = halos[subsamp_len_key].cumsum()[:-1]
return njump | 59002b381f1107f5a655133906b359fbd8f47069 | 95,410 |
import optparse
def parse_options(args=None, values=None):
"""
Define and parse `optparse` options for command-line usage.
"""
usage = """%prog [options] [TOOLBOX_PATH]"""
desc = "Generate ArcGIS Metadata from markdown'd toolbox code. "
parser = optparse.OptionParser(usage=usage, description=desc)
parser.add_option("-y", "--yes", dest="yes", default=None, action='store_true',
help="Implicit confirmation to run")
(options, args) = parser.parse_args(args, values)
if len(args) == 0:
raise Exception("Input toolbox needed")
else:
input_file = args[0]
opts = {
'input': input_file,
'implicit_run': options.yes,
}
return opts | 41284b776d4229bccabe2a01aa9553eaa253ac02 | 95,420 |
import time
def get_current_time() -> int:
"""Returns the time in seconds since the epoch with the precision up to
1 second.
"""
return int(time.time()) | 5e5f6761d7b091b0ba86686152ceb95e55e5faf3 | 95,421 |
def fq_name(partition, value, sub_path=''):
"""Returns a 'Fully Qualified' name
A BIG-IP expects most names of resources to be in a fully-qualified
form. This means that both the simple name, and the partition need
to be combined.
The Ansible modules, however, can accept (as names for several
resources) their name in the FQ format. This becomes an issue when
the FQ name and the partition are both specified as separate values.
Consider the following examples.
# Name not FQ
name: foo
partition: Common
# Name FQ
name: /Common/foo
partition: Common
This method will rectify the above situation and will, in both cases,
return the following for name.
/Common/foo
Args:
partition (string): The partition that you would want attached to
the name if the name has no partition.
value (string): The name that you want to attach a partition to.
This value will be returned unchanged if it has a partition
attached to it already.
sub_path (string): The sub path element. If defined the sub_path
will be inserted between partition and value.
This will also work on FQ names.
Returns:
string: The fully qualified name, given the input parameters.
"""
if value is not None and sub_path == '':
try:
int(value)
return '/{0}/{1}'.format(partition, value)
except (ValueError, TypeError):
if not value.startswith('/'):
return '/{0}/{1}'.format(partition, value)
if value is not None and sub_path != '':
try:
int(value)
return '/{0}/{1}/{2}'.format(partition, sub_path, value)
except (ValueError, TypeError):
if value.startswith('/'):
dummy, partition, name = value.split('/')
return '/{0}/{1}/{2}'.format(partition, sub_path, name)
if not value.startswith('/'):
return '/{0}/{1}/{2}'.format(partition, sub_path, value)
return value | a9878fb82a8a08b87fc20f2d3033896bc918ac4e | 95,423 |
def map_class_names(src_classes, dst_classes):
"""Computes src to dst index mapping
src2dst[src_idx] = dst_idx
# according to class name matching, -1 for non-matched ones
assert(len(src2dst) == len(src_classes))
ex)
src_classes = ['person', 'car', 'tree']
dst_classes = ['tree', 'person', 'sky', 'ball']
-> Returns src2dst = [1, -1, 0]
"""
src2dst = []
for src_class in src_classes:
if src_class in dst_classes:
src2dst.append(dst_classes.index(src_class))
else:
src2dst.append(-1)
return src2dst | 4bef099bc312b37f2dfdbc9adaaffda28e84ae54 | 95,426 |
def rot_encode(data: str, n: int) -> str:
"""rotate by custom places
Args:
data (str): data to be encoded
n (int): custom places
Returns:
str: Encoded data
Example:
rot_encode("aaa", 25) -> "zzz"
"""
n = (26 - (-n % 26)) * 2
chars = "AaBbCcDdEeFfGgHhIiJjKkLlMmNnOoPpQqRrSsTtUuVvWwXxYyZz"
trans = chars[n:]+chars[:n]
def rot_char(c): return trans[chars.find(c)] if chars.find(c) > -1 else c
return ''.join(rot_char(c) for c in data) | 3a5d4514d45e423554305854d339a8b751ba2202 | 95,429 |
def check_xml_for_silence(sbf):
"""
Function to determine if a section marked in an Audacity xml file contains
silence or sound.
Parameters
----------
sbf : dictionary
`<simpleblockfile>` tag parsed from Audacity xml file
Returns
-------
silence: boolean
True if silence, False if sound
"""
if float(sbf['min']) == 0.0 and float(sbf['max']) == 0.0 and float(sbf[
'rms']) == 0.0:
return True
else:
return False | c6e0d80846888965412810099a30598fb81dda7c | 95,432 |
def RGB_TO_HSB(col):
"""
Converts a 3-tuple with RGB values (in range 0..255) into a 3-tuple
with HSB color values in range [0..1].
"""
r, g, b = col
cmax = float(max(r, g, b))
cmin = float(min(r, g, b))
delta = cmax - cmin
brightness = cmax / 255.0
saturation = (delta / cmax) if cmax > 0 else 0
hue = 0
if saturation > 0:
redc = (cmax - r) / delta
greenc = (cmax - g) / delta
bluec = (cmax - b) / delta
if r == cmax:
hue = bluec - greenc
elif g == cmax:
hue = 2.0 + redc - bluec
else:
hue = 4.0 + greenc - redc
hue /= 6.0
if hue < 0: hue += 1.0
return (hue, saturation, brightness) | 1b0fd73c57d354f331d1ccbac6705a9bb4a338a4 | 95,433 |
def GetStartingGapLength(sequence):
"""
This function takes in a sequence, and counts from the beginning how many
gaps exist at the beginning of that sequence, up till the first ATG (start
codon).
Parameters:
- sequence: a string along which you wish to count the number of gaps
"""
gaps = 0
for i, position in enumerate(sequence):
if sequence[i:i+3] == 'ATG':
break
elif sequence[i] == '-':
gaps += 1
else:
pass
return gaps | 8cdac2b013071e2762ffeb51a3b12ff51c306bf7 | 95,434 |
def format_percent(x):
"""
Format float to percent.
:param x:
:return:
"""
return '{0:2.2f}{1}'.format(x, '%') | 1b21e9d23e260f2c115f4d014eb28b284329cb95 | 95,436 |
def z_coord(cube):
"""
Return the canonical vertical coordinate.
Examples
--------
>>> import iris
>>> url = ('http://omgsrv1.meas.ncsu.edu:8080/thredds/dodsC/'
... 'fmrc/us_east/US_East_Forecast_Model_Run_Collection_best.ncd')
>>> cube = iris.load_cube(url, 'sea_water_potential_temperature')
>>> zvar = z_coord(cube)
>>> zvar.name()
'ocean_s_coordinate_g2'
>>> cube.coord_dims(zvar)
(1,)
"""
# http://cfconventions.org/cf-conventions/v1.6.0/cf-conventions.html#dimensionless-v-coord
dimensionless = [
"atmosphere_hybrid_height_coordinate",
"atmosphere_hybrid_sigma_pressure_coordinate",
"atmosphere_sigma_coordinate",
"atmosphere_sleve_coordinate",
"ocean_s_coordinate",
"ocean_s_coordinate_g1",
"ocean_s_coordinate_g2",
"ocean_sigma_coordinate",
"ocean_sigma_z_coordinate",
]
# Setting `dim_coords=True` to avoid triggering the download
# of the derived `z`. That usually throws a Memory error as it
# varies with `t`, `x`, `y`, and `z`.
zvars = cube.coords(axis="Z", dim_coords=True)
if not zvars:
zvars = cube.coords(axis="altitude", dim_coords=True)
# Could not find a dimension coordinate.
if not zvars:
zvars = [
coord
for coord in cube.coords(axis="Z")
if coord.name() in dimensionless
]
if not zvars:
raise ValueError(f"Could not find the vertical coordinate!")
if len(zvars) > 1:
raise ValueError(f"Found more than one vertical coordinate: {zvars}")
else:
zvar = zvars[0]
return zvar | e46f0875f782bef4f4615bd0b9d5d8998efaba41 | 95,438 |
import inspect
def _call_location(levels=2):
"""Return caller's location a number of levels up."""
f = inspect.currentframe()
if not f:
return '<unknown location>'
for _ in range(levels):
f = f.f_back or f
return '%s:%s' % (f.f_code.co_filename, f.f_lineno) | ad2c64ef8d45ae3332f8d29450d4b9dc7af6ae55 | 95,452 |
def Bonferroni(P, alpha=0.05):
"""Return the Bonferroni threshold for a vector of P-values."""
return alpha / len(P) | 3e8941111a0439e26a01964b6fc63fa45f412210 | 95,454 |
def get_first_list_prop(lst):
"""
Returns the first element in the list that starts with list_, -1 if not found.
Arguments:
lst {list}
"""
for i, e in enumerate(lst):
if e.startswith("list_"):
return i
return -1 | faa73a11e835b4ee7133b504d06cb715df00331f | 95,455 |
import re
def is_re(v):
"""
Check value is valid regular expression
>>> is_re("1{1,2}")
True
>>> is_re("1[")
False
"""
try:
re.compile(v)
return True
except Exception:
return False | 620e5705fb83f22537c06cf38e5117a7d32ca9c4 | 95,460 |
from typing import Mapping
def is_dict(x):
"""Checks if argument is a dictionary."""
return isinstance(x, Mapping) | 25a675a868083d787f35e17a60cff02fee72cbc8 | 95,466 |
def mu_na(n: float, a: float) -> float:
"""
mu = n^2 / a^3
:param n: mean motion in degrees
:type n: float
:param a: semi-major axis
:type a: float
:return: mu
:rtype: float
"""
return n * n * a * a * a | 5301959b886d4a299dad69d62974df7621ea8dc3 | 95,469 |
def snp_histogram(reads, reference, templated_ops=None, templated=True):
"""
Returns a histogram of the frequency of base-changes at each position.
:param reads: a list of reads
:param reference: the reference object
:param templated_ops: a list of templated ops; if None, returns all SNPs, irrespective of the templated parameter
:param templated: If templated_ops are specified, then templated=True will return the templated histogram, and
False will return the non-templated histogram.
:return: a list of floats
"""
hist = [0] * len(reference.seq)
for read in reads:
for aln in read.alns:
for op in aln.transform:
# determines whether to include this op in histogram
if templated_ops:
if (op in templated_ops) == templated:
include = True
else:
include = False
else:
include = True
# adds to histogram the frequency at each position, normalizing by the number of possible alignments of
# the read.
if op[1] in "S":
if include:
hist[op[0]] += 1 / len(reads) / len(read.alns)
elif op[1] == "D":
for x in range(op[2]):
if include:
hist[op[0] + x] += 1 / len(reads) / len(read.alns)
elif op[1] == "I":
if include:
hist[op[0]] += len(op[2]) / len(reads) / len(read.alns)
return hist | 9f12db3a49f005c67510f49bf0840e175f66acb5 | 95,471 |
import json
def json_read_write(file, load_var=None, mode='r'):
"""
Args:
file: address of json file to be loaded
load_var: variable to be written to, or read from
mode: 'r' to read from json, 'w' to write to json
Returns:
load_var: variable with data that has been read in mode 'r'
original variable in case of 'w'
"""
if mode == 'r':
with open(file, mode) as json_file:
load_var = json.load(json_file) # Reading the file
print(f"{file} json config read successful")
json_file.close()
return load_var
elif mode == 'w':
assert load_var is not None, "load_var was None"
with open(file, mode) as json_file:
json.dump(load_var, json_file) # Writing to the file
print(f"{file} json config write successful")
json_file.close()
return load_var
else:
assert mode == 'w' or 'r', f"unsupported mode type: {mode}"
return None | fc83b7385bd86b320fda39bf44b725330ef5f54b | 95,472 |
def to_snake_case(field: str) -> str:
"""Return string converted to snake case (e.g. "Given Name" -> "given_name")."""
return "".join("_" if c == " " else c.lower() for c in field) | 833a107f3a6c9b24a44d47c73851c7e2ce0ffb38 | 95,473 |
def strmap(show):
"""Hardcode a particular ast Node to string representation 'show'."""
return lambda self, node=None: show | ec8ec7f097b6f042b1564f069081aafbe470dc4f | 95,475 |
from typing import Dict
import logging
def map_logging_level_names_to_values() -> Dict[str, int]:
"""
Record the name and value of log levels from the logging module
"""
log_levels: Dict[str, int] = dict()
for name in dir(logging):
value = getattr(logging, name)
if isinstance(value, int) and not isinstance(value, bool):
log_levels[name] = value
return log_levels | 0c083b91c48b96be4225144ce6ea21737aae52c0 | 95,479 |
import re
def indv_block(st):
"""
Return the header line and the sequence of individual constructs in a file
Args:
st(str): The text contained in a fasta file, as a string. Consists of a
header, which is initiated by > and ends with a newline. Subsequent
lines are sequence data, until another > is found.
Returns:
A list of strings, where each string is the construct header and sequence,
as a single string. For example, a file containing 4 proteins would
a list of 4 strings. Each string begins with >, and contains both the
headers and the newline characters.
"""
if st.startswith('>'):
fstr = re.split('>', st)
seq_list = []
for f in fstr:
if f:
f = '>' + f
f = f.rstrip()
seq_list.append(f)
return seq_list
else:
return [st] | 3464088ecf424900342e437efe227fb3f90c982f | 95,484 |
def cbool(bool):
""" Convert Python bool to string "true" or "false" """
return "true" if bool else "false" | 0f4cfd632a1f86a67b52c028c52a6204b327731e | 95,487 |
from typing import Union
from pathlib import Path
def is_path_tissuumaps_filename(path: Union[Path, str]) -> bool:
"""Returns True if the path corresponds to a valid tissuumap path.
Whenever the user attempts to save, the plugins are checked iteratively until
one corresponds to the right file format. This function checks if the user is
trying to export to the tissuumap file format.
Parameters
----------
path : str
Path to check
Returns
-------
bool
True if the path ends with a valid tissuumap file extension.
"""
extensions = "".join(Path(path).suffixes)
return extensions.endswith(".tmap") | 4a18b30985d59923be1b360f0c73abd6cb333487 | 95,489 |
def correct_background (bkgnd_data, track_coords, track_intensities):
"""Subtract background intensity from track_coords.
Args:
bkgnd_data (dict): Background intensities, e.g. from read_ij_intensity
track_coords (list): numpy arrays of x,y,t spot positions per track
track_intensities (list): List of float numpy arrays, one per track,
containing the fluorescence intensity values of each tracked spot
Returns:
bkgnd_int (list): List of numpy arrays (one for each track in
track_intensities) storing background intensity values
bkgnd_corr_int (list): List of numpy arrays (one for each track in
track_intensities) storing background-subtracted track intensities
"""
# Subtract background intensities from track intensities
bkgnd_raw = bkgnd_data['intensities']
bkgnd_int = [] # background intensities per track
bkgnd_corr_int = [] # spot intensities corrected for background
for i, track in enumerate(track_coords):
start = int(track[0,2])
n_frames = track.shape[0]
bkgnd_temp = bkgnd_raw[start:(start+n_frames)]
corrected_int = track_intensities[i]-bkgnd_temp
bkgnd_int.append(bkgnd_temp)
bkgnd_corr_int.append(corrected_int)
return bkgnd_int, bkgnd_corr_int | 2a82282420cf7a3e529048e27f15c99cf80c79d9 | 95,491 |
def _func_destroy_scheduler_session(sessionId, dask_scheduler):
"""
Remove session date from _raft_comm_state, associated with sessionId
Parameters
----------
sessionId : session Id to be destroyed.
dask_scheduler : dask_scheduler object
(Note: this is supplied by DASK, not the client)
"""
if sessionId is not None and sessionId in dask_scheduler._raft_comm_state:
del dask_scheduler._raft_comm_state[sessionId]
else:
return 1
return 0 | b819ea3240ca6dc322546027128c65d38df92967 | 95,501 |
def _single_line(s):
"""Returns s if it is a single line; otherwise raises ValueError."""
if '\n' in s:
raise ValueError('must be single line')
return s | 0716af2594e1b7139bda79673fabafa9fadb4c07 | 95,508 |
from pathlib import Path
import random
from datetime import datetime
import json
def raffle_word() -> str:
"""This function raffle one word of the words set."""
with open(Path(__file__).parent / 'words.json') as f:
random.seed(datetime.today().date().toordinal())
return random.choice(json.load(f)) | 863b8ee17fdbd0b377598d10fc2ce9cd34d4d7fb | 95,514 |
def _handle_response(classes, response):
"""Process response from AI Platfrom service."""
if 'error' in response:
raise RuntimeError(response['error'])
if response:
prediction_class = response.get('predictions')[0].get('classes') - 1
prediction_probabilities = response.get('predictions')[0].get(
'probabilities')
return prediction_class
return None | a7fb3d3a1b632d8b0ab279b3577b733ec56b4b01 | 95,517 |
def check_dataframes(m1, m2):
"""Check if 2 dataframes have the same shape and share the same index column.
Args:
m1 (DataFrame): first dataframe
m2 (DataFrame): second dataframe
Returns:
bool: Whether the conditions are satisfied
"""
if m1.shape == m2.shape and set(m1.index) == set(m2.index):
return True
else:
return False | 971993ebdf9a49db36dce498c8f9f00260e9b47a | 95,518 |
def check_update_contraints(record_id:int,
record_ids_map:dict,
id_counter:dict,
constraints:dict)->bool:
"""This function is used by downloading and filtering code primarily on speak data
to constrain the number of recordings per speaker, line, and/or lesson.
It checks if the counts for the `record_id` is less than the constraints in `constraints.
If the count is less, the constraint passes and the `id_counter` is incremented.
Args:
record_id (int): id of the record
record_id_map (dict): dict that maps record_ids to speaker, lesson, and line ids
id_counter (dict): dict of counts of speaker, lesson, and line ids
constraints (dict): dict of 3 ints specifying the max number of utterances
per speaker, line, and lesson
Returns:
bool: true if the count of utterances per speaker, lesson, and line are all
below the max value in `constraints`
"""
pass_constraint = True
# constraint_names = ['lesson', 'line', 'speaker']
constraint_names = list(constraints.keys())
for name in constraint_names:
constraint_id = record_ids_map[record_id][name]
count = id_counter[name].get(constraint_id, 0)
if count >= constraints[name]:
pass_constraint = False
break
# if `record_id` passes the constraint, update the `id_counter`
if pass_constraint:
for name in constraint_names:
constraint_id = record_ids_map[record_id][name]
id_counter[name][constraint_id] = id_counter[name].get(constraint_id, 0) + 1
return pass_constraint | 4c7d57690276994b64bb0fc72607e0c45cb6ed4b | 95,520 |
from typing import List
def makeMatrix(xDim : int, yDim : int) -> List[List[int]]:
"""Create an (xDim, yDim) matrix of zeros.
:param int xDim: The number of columns to create
:param int yDim: The number of rows to create
:return: An (xDim, yDim) matrix of zeros.
:rtype: List[List[int]]
"""
return [[0] * xDim for _ in range(yDim)] | 65d5adc7ef5d2c341dd4fa2311623b554721d4f7 | 95,521 |
def getColor(x):
"""Selects an html color based on 0 <= x <= 100
Parameters
----------
x : float
percent of injection to color visually. Higher the percent the darker
the color
Returns
----------
html color name useful for classifying
"""
if x >= 75:
return "red"
elif x >= 50:
return "orange"
elif x >= 25:
return "yellow"
elif x >= 5:
return "lime"
else:
return "white" | d69e768f5e664872d2c2a603f4137a52a45bccb6 | 95,522 |
def get_nu_H(region, direction):
"""外皮の部位の暖房期の方位係数
Args:
region(int): 省エネルギー地域区分
direction(str): 外皮の部位の方位
Returns:
float: 外皮の部位の暖房期の方位係数
"""
# 表1 暖房期の方位係数
table_1 = {
'上面': (1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, None),
'北': (0.260, 0.263, 0.284, 0.256, 0.238, 0.261, 0.227, None),
'北東': (0.333, 0.341, 0.348, 0.330, 0.310, 0.325, 0.281, None),
'東': (0.564, 0.554, 0.540, 0.531, 0.568, 0.579, 0.543, None),
'南東': (0.823, 0.766, 0.751, 0.724, 0.846, 0.833, 0.843, None),
'南': (0.935, 0.856, 0.851, 0.815, 0.983, 0.936, 1.023, None),
'南西': (0.790, 0.753, 0.750, 0.723, 0.815, 0.763, 0.848, None),
'西': (0.535, 0.544, 0.542, 0.527, 0.538, 0.523, 0.548, None),
'北西': (0.325, 0.341, 0.351, 0.326, 0.297, 0.317, 0.284, None),
'下面': (0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, None),
}
return table_1[direction][region - 1] | edb386bb98199ab9318486222d90ab06b23b638e | 95,523 |
def bmul(vec, mat, axis=0):
"""Expand vector for batchwise matrix multiplication.
Parameters
----------
vec : 2dtensor
vector for multiplication
mat : 3dtensor
matrix for multiplication
axis : int, optional
batch axis, by default 0
Returns
-------
3dtensor
Product of matrix multiplication. (bs, n, m)
"""
mat = mat.transpose(axis, -1)
return (mat * vec.expand_as(mat)).transpose(axis, -1) | 78fc47c98bd10792efeeea8df073e52921e4c9ac | 95,527 |
def parse_str_list(list_str):
""" Parse a string containing comma separated values and return a list of
strings.
:param list_str: a string containing a comma separated list of strings
:return: a list of string Python builtin object.
"""
# remove all whitespace characters (space, tab, newline, etc.) and ignore
# possible ending coma with filter.
return list(filter(None, ''.join(list_str.split()).split(','))) | 417f7ca8f59d725c9ea7bd36864de8549148213f | 95,529 |
import torch
def index2one_hot(index_tensor, vocabulary_size):
"""
Transforms index representation to one hot representation
:param index_tensor: shape: (batch_size, sequence_length, 1) tensor containing character indices
:param vocabulary_size: scalar, size of the vocabulary
:return: chars_one_hot: shape: (batch_size, sequence_length, vocabulary_size)
"""
device = index_tensor.device
index_tensor = index_tensor.type(torch.LongTensor).to(device)
batch_size = index_tensor.size()[0]
char_sequence_len = index_tensor.size()[1]
chars_one_hot = torch.zeros((batch_size, char_sequence_len, vocabulary_size), device=device)
chars_one_hot.scatter_(dim=2, index=index_tensor, value=1)
return chars_one_hot | 852d0f9bed6ebd772f3622c187f799662cd57649 | 95,532 |
def is_title(s):
"""Return True if a single token string s is title cased.
is_title(s) treats strings containing hyphens and/or slashes differently
than s.istitle() does:
is_title("Hyphened-word") returns True; "Hyphened-word".istitle() returns False
is_title("Hyphened-Word") returns False; "Hyphened-Word".istitle() returns True
s: single token string to be checked for title case
returns: True is s is title cased or False if s is not title cased.
"""
for idx, c in enumerate(s):
if c.islower():
return False
if c.isupper():
for c2 in s[idx+1:]:
if c2.isupper():
return False
return True
return False | 871c50256dc8b834db33353127d7b00e4f679554 | 95,534 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.