content stringlengths 39 14.9k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def split(func, iterable):
"""split an iterable based on the truth value of the function for element
Arguments
func -- a callable to apply to each element in the iterable
iterable -- an iterable of element to split
Returns
falsy, truthy - two tuple, the first with element e of the itrable where
func(e) return false, the second with element of the iterable that are True
"""
falsy, truthy = [], []
for e in iterable:
if func(e):
truthy.append(e)
else:
falsy.append(e)
return tuple(falsy), tuple(truthy) | 74fcc301a9c537dbdcfd455e72664e98a706320d | 120,301 |
def exclude(users, excluded_user):
"""
Return a filtered list excluding the passed value.
Used to render other Chat users that are not the current user.
"""
return [item for item in users if item != excluded_user] | 331d492e727048b63a0a53b5117e6fbb9ac939ed | 120,305 |
def getattr_recursive(item, attr_key, *args):
"""
Allows dot member notation in attribute name when getting an item's attribute.
NOTE: also searches dictionaries
"""
using_default = len(args) >= 1
default = args[0] if using_default else None
for attr_key in attr_key.split('.'):
try:
if isinstance(item, dict):
item = item.__getitem__(attr_key)
else:
item = getattr(item, attr_key)
except (KeyError, AttributeError):
if using_default:
return default
raise
return item | 1dee8511aa35455a0f7076356e03b45830d3e9b0 | 120,310 |
def __evaluate_model(valid_world, batchsize, datatype, display_examples, max_exs=-1):
"""Evaluate on validation/test data.
- valid_world created before calling this function
- batchsize obtained from opt['batchsize']
- datatype is the datatype to use, such as "valid" or "test"
- display_examples is bool
- max_exs limits the number of examples if max_exs > 0
"""
print('[ running eval: ' + datatype + ' ]')
valid_world.reset()
cnt = 0
for _ in valid_world:
valid_world.parley()
if cnt == 0 and display_examples:
print(valid_world.display() + '\n~~')
print(valid_world.report())
cnt += batchsize
if valid_world.epoch_done() or (max_exs > 0 and cnt >= max_exs):
# note this max_exs is approximate--some batches won't always be
# full depending on the structure of the data
break
valid_report = valid_world.report()
print(datatype + ':' + str(valid_report))
return valid_report, valid_world | 3cf3fbd9dd6b1430f4f086786b26abc3e9c90e5b | 120,311 |
import re
def replace_urls(text, replace_with="<URL>"):
"""Replace urls in a sentence with a chosen string.
>>> replace_urls("I love https://github.com")
"I love <URL>"
Args:
text (str): Input sentence
replace_with (str, optional): string to replace the url with. Defaults to "<URL>".
Returns:
str: Output sentence with replaced url
"""
url_regex = re.compile(r"((http|ftp|https):\/\/)?[-a-zA-Z0-9@:%._\+~#=]{2,256}\.[a-z]{2,6}\b(.*)")
text = url_regex.sub(replace_with, text)
return text | 63966c6427fd48f3acff2d92ea51f3dabb1edc5f | 120,318 |
import hashlib
def strToHash(inputStr, algorithm='sha512'):
"""
hashStr(inputStr, algorithm='sha512')
hashes a string with a given algorithm, default is sha512
"""
hasher = hashlib.new(algorithm)
hasher.update(inputStr)
return hasher.hexdigest() | 4bb420574a7dafbff20255efb0b7ac4c3e4628a0 | 120,319 |
def get_new_attributes(existing_attributes, changed_attributes):
"""
>>> existing_attributes = {'a': 1, 'b': 2, 'c': 3}
>>> changed_attributes = {'a': 6, 'c': 'x,y'}
>>> get_new_attributes(existing_attributes,changed_attributes) \
== {'b': 2, 'c': 'x,y', 'a': 6}
True
"""
new_attributes = existing_attributes.copy()
new_attributes.update(changed_attributes)
return new_attributes | d262a2d9726b75adfe3b15a14bf827780e6bde37 | 120,324 |
def HasReplaceAdvertisementFlags(args):
"""Returns whether replace-style flags are specified in arguments."""
return (args.advertisement_mode or
args.set_advertisement_groups is not None or
args.set_advertisement_ranges is not None) | 28b984ce7cec2201cbe080d236b38064b018ec07 | 120,325 |
def make_dict_all_atoms_iso_to_idx_dict(mol):
"""
Make a dictionary of every atom in a molecule with Iso as the key and the
Idx as its value.
Inputs:
:param rdkit.Chem.rdchem.Mol mol: an rdkit molecule
Return
:returns: dict mol_iso_to_idx_dict: a dictionary of the iso-label of every
atom in the mol as the keys and the idx of that atom in the mol object.
ie) {1008: 7, 1009: 8, 1003: 4, 1004: 3, 1010: 9, 1006: 5, 1007: 6, 10000:
0, 10001: 1, 10002: 2, 1005: 10}
"""
mol_iso_to_idx_dict = {}
for atom in mol.GetAtoms():
iso = atom.GetIsotope()
idx = atom.GetIdx()
mol_iso_to_idx_dict[iso] = idx
return mol_iso_to_idx_dict | 144afcac292b9d4d38a2d151580d4c8bd5aa3636 | 120,328 |
def enquote(str):
"""This function will put a string in double quotes, properly
escaping any existing double quotes with a backslash. It will
return the result."""
return '"' + str.replace('"', "\\\"") + '"' | b837a03747fe1170a07d1374713b3a98d88b1392 | 120,329 |
async def check_name_and_abbreviation(db, ref_id, name=None, abbreviation=None):
"""
Check is a otu name and abbreviation are already in use in the reference identified by `ref_id`. Returns a message
if the ``name`` or ``abbreviation`` are already in use. Returns ``False`` if they are not in use.
:param db: the application database client
:type db: :class:`~motor.motor_asyncio.AsyncIOMotorClient`
:param ref_id: the id of the reference to check in
:type ref_id: str
:param name: a otu name
:type name: str
:param abbreviation: a otu abbreviation
:type abbreviation: str
"""
name_count = 0
if name:
name_count = await db.otus.count({
"lower_name": name.lower(),
"reference.id": ref_id
})
abbr_count = 0
if abbreviation:
abbr_count = await db.otus.count({
"abbreviation": abbreviation,
"reference.id": ref_id
})
unique_name = not name or not name_count
unique_abbreviation = not abbreviation or not abbr_count
if not unique_name and not unique_abbreviation:
return "Name and abbreviation already exist"
if not unique_name:
return "Name already exists"
if not unique_abbreviation:
return "Abbreviation already exists"
return False | 4ad90d778ad8324a9153b65572efebeaec5c9223 | 120,330 |
def search_sub(a, m):
"""
Returns the index of the first inclusion of the sublist m in a. Returns
None if m is not in a.
"""
j = 0
for i in range(len(a)):
if a[i] != m[j]:
j = 0
else:
j += 1
if j == len(m):
return i - len(m) + 1
return None | c5a83e89049ea8ef4970378db3cde205887985df | 120,332 |
def time_range(from_date, to_date):
"""Define the time range specified by the user."""
time_range_params = {}
if from_date:
time_range_params['from_'] = from_date or None
if to_date:
time_range_params['to'] = to_date or None
return time_range_params | 25056fefa7df06003395322c4c2247e8b4b0bf5d | 120,333 |
def read_one_line(filename):
"""Open a file and read one line"""
return open(filename, 'r').readline().rstrip('\n') | 1df6a7b9d437a79277c8ff009657287551a5320f | 120,334 |
from datetime import datetime
import locale
def format_datetime_2822(date_obj: datetime) -> str:
"""
Get an RFC 2822-formatted string for a datetime.
"""
old = locale.setlocale(locale.LC_ALL)
locale.setlocale(locale.LC_ALL, 'C')
date = date_obj.strftime('%a, %d %b %Y %H:%M:%S %z')
locale.setlocale(locale.LC_ALL, old)
return date | fa36de864e90d6607e55e4b62c66d83f8b27710e | 120,343 |
def revcomp(seq):
"""Returns the reverse complement of a DNA sequence."""
COMPLEMENT_DICT = {'A': 'T', 'G': 'C', 'T': 'A', 'C': 'G', 'N': 'N'}
rc = ''.join([COMPLEMENT_DICT[base] for base in seq])[::-1]
return rc | c8498e177c77771f26e82ef42039ac58391d0397 | 120,346 |
def angle_diff(a, b):
"""Return the absolute difference between two angles."""
abs_diff = abs(a - b) % 360
diff = abs_diff if abs_diff < 180 else 360 - abs_diff
return diff | 0eccdc082741d35f753af5c65d498c7193d764a9 | 120,347 |
def k_to_last_iter(head, k):
"""
Method returns the index of last element iteratively
It uses a fast pointer that is ahead of the slow pointer
by k hops. So when the fast pointer hits the end, the slow
points to the kth last elements
NOTE: k has to be greater than 0. So 1st last element is
the last element
"""
if k < 1:
return "ERROR: k has to be > 0"
slow = head
fast = head
ctr = 0
while ctr < k:
fast = fast.next
ctr += 1
while fast:
slow = slow.next
fast = fast.next
return slow.data | 68acf29a238c01b9baef0f6461b9358441d2a386 | 120,349 |
import importlib
def import_module(libname):
""" Function import module using string representation """
return importlib.import_module(libname) | 1fe10e5c655a363a45ea742e33dd9d23a221b178 | 120,350 |
def unpack_pose_msg(msg, stamped=False):
""" Get position and orientation from a Pose(Stamped) message. """
if stamped:
p = msg.pose.position
o = msg.pose.orientation
else:
p = msg.position
o = msg.orientation
return (p.x, p.y, p.z), (o.w, o.x, o.y, o.z) | 2988ea556b1e7391bef2702739df65b8469bccaa | 120,351 |
import pickle
def work(pickled_task):
"""Unpickle and execute task."""
task = pickle.loads(pickled_task)
return task.execute() | 4274b8ffaae4dccb07306612e994f35f8ad1d725 | 120,352 |
def determine_left_shift(five_prime, indel, three_prime):
"""
Adjust 5' and 3' context of indel such that
indel is fully shifted to 5'
Args:
five_prime (string) : Five prime sequence
indel (string) : Sequence of indel
three_prime (string) : Three prime sequence
Returns:
(string, string) : New sequence of 5' and 3' sequences
"""
while five_prime.endswith(indel):
five_prime = five_prime[:-len(indel)]
three_prime = indel + three_prime
# may have not fully shifted homopolymer
while len(indel) * five_prime[-1] == indel:
three_prime = five_prime[-1] + three_prime
five_prime = five_prime[:-1]
return (five_prime, three_prime) | a92dff06fb6184ec36f81e51bb8c7add97fc77a6 | 120,354 |
def start_word(words: list) -> str:
"""Asks the user to input a word
Args:
words (list): the list of words available in wordle
Returns:
str: the word guess
"""
word = input("Please input the attempt in wordle> ")
# protect against guesses that contain more or less than 5 letters
while len(word) != 5:
print("Input is not a 5 letter word!")
word = input("Please input the attempt in wordle> ")
# protect against guesses that are not in the word list
while word not in words:
print("Not in the word list!")
word = input("Please input the attempt in wordle> ")
return word | 09299c9fe25a4f2cd41053a4c13ec6fbd5ca6f8a | 120,358 |
def _getParent(pagename):
""" Return parent of pagename.
"""
pos = pagename.rfind('/')
if pos >= 0:
return pagename[:pos]
return None | cf135b2b57fce38abe76fb7a5a6165863096de32 | 120,359 |
def find_non_numeric(in_col):
"""
Finds each unique entry in an iterable that cannot be typecast as float and returns a list of those entries.
"""
non_nums = []
for item in in_col:
try:
float(item)
except:
if item not in non_nums:
non_nums.append(item)
return non_nums | 7b93619d5b2c111c7605da246c34bfc2af3328f9 | 120,360 |
import torch
def squeeze_left(const: torch.Tensor):
"""
Squeeze the size-1 dimensions on the left side of the shape tuple.
PyTorch's `squeeze()` doesn't support passing multiple `dim`s at once, so
we do it iteratively.
"""
while len(const.shape) > 0 and const.shape[0] == 1:
const = const.squeeze(dim=0)
return const | 1f2390a17e06258757b8890fa8e0df38edb79a67 | 120,363 |
def f_end(version):
"""Gives fortran file ending used in MESA depending on the version used
Parameters
----------
version : int
version number of MESA to be checked
Returns
-------
str
Either 'f90' or 'f', depending on `version`.
"""
if version >= 7380:
return "f90"
else:
return "f" | ddc13ca00855f99d71ed9e4bc482892daaa07a8a | 120,369 |
def get_otpauth_dict(otpauth_str):
"""
Parse otpauth url
Args:
otpauth_str (str): otpauth url as string
Returns:
dict: Dictionary with the parameters of the otpauth url as key-value pairs
"""
return {
kv[0]: kv[1]
for kv in [
otpvar.split("=") for otpvar in otpauth_str.replace("?", "&").split("&")[1:]
]
} | c9055008437993d7e3d731e2ca16c0f234e7eb5d | 120,370 |
def link_in_embed(text: str, url: str) -> str:
"""Makes a clickable link inside Embed object"""
return f"[{text}]({url})" | 487fa26b5076e74ede0a43cf12dd0ee32e2ea0b6 | 120,374 |
def _GetDomainAndDisplayId(project_id):
"""Returns tuple (displayed app id, domain)."""
l = project_id.split(':')
if len(l) == 1:
return l[0], None
return l[1], l[0] | 41ecee133d5470553f95e40023ce8f4aab746baf | 120,375 |
import torch
def _onehot_encode(predictions: torch.Tensor,
labels: torch.Tensor,
n_classes: int):
"""
One-hot-encode the predictions and the labels.
:param predictions: BxHxW int64 tensor containing the class predictions
:param labels: BxHxW int64 tensor containing the correct class labels
:param n_classes: number of classes present
:return:
"""
print('n classes: ', n_classes)
pred_tensor = torch.nn.functional.one_hot(predictions, num_classes=n_classes).permute(0, 3, 1, 2)
onehot_gt_tensor = torch.nn.functional.one_hot(labels, num_classes=n_classes).permute(0, 3, 1, 2)
return pred_tensor, onehot_gt_tensor | 7d69c489e0821e93852c4082aa6c887286272a00 | 120,377 |
import re
def process_disk_usage(df_output):
"""
Extract the file-system and disk usage from 'df -h' output.
Could also use same "pattern" and re.search (while looping over the lines, but
re.find was a more elegant solution).
"""
# Strip out the header line
df_output = re.sub(r"^Filesystem.*Mounted on$", "", df_output, flags=re.M)
pattern = r"^(\S+).*?(\d+%) .*"
match = re.findall(pattern, df_output, flags=re.M)
if not match:
raise ValueError("Failed to parse 'df' output correctly")
return match | a8006f6e46b5525c8e74bb1d7608b913bcfd73d9 | 120,378 |
def read_list(filename):
"""Read a file and returns a list where each element corresponds to a line in the file.
Args:
filename (str): Name of the file.
Returns:
list: A list of elements.
Examples:
>>> read_list("share/data.txt")
['I like to move it, move it', 'I like to move it, move it', 'I like to move it, move it', 'Ya like to move it']
"""
with open(filename, "r") as f:
lines = [line.strip() for line in f]
return lines | 51c86e5ffd4c1bfbc7efd6772777de29254607a7 | 120,379 |
from typing import Tuple
from typing import Optional
def _parse_queryjson(query: dict) -> Tuple[Optional[str], Optional[str]]:
"""
Parse the collection and item ids from a traditional STAC API Item Search body.
The query is a JSON object with relevant keys, "collections" and "ids".
"""
collection_ids = query.get("collections")
item_ids = query.get("ids")
# Collection and ids are List[str] per the spec,
# but the client may allow just a single item
if isinstance(collection_ids, list):
collection_ids = ",".join(collection_ids)
if isinstance(item_ids, list):
item_ids = ",".join(item_ids)
return (collection_ids, item_ids) | a3db113e3187c1f6e05509042315202e641e1e74 | 120,380 |
def remove_extension(filename: str) -> str:
"""
Removes the extension on a filename
Parameters
----------
filename : String
the name of a file
Returns
-------
string without an extension
"""
return filename[0:filename.rindex(".")] | 7c4c785c0fab8e1ce3b3ed9bd051615c01c9d09e | 120,382 |
def is_query(line: str) -> bool:
"""
Return True, if provided line embeds a query, else False
"""
return "@SQLALCHEMY" in line and "|$" in line | 9f6bc9a95bbb0811bf0dcac7ad07cb7194d151ec | 120,385 |
import re
def link_search(content):
"""Runs a web link search using regex on the passed string"""
# Regex looks for protocol://link until reaching the closing "
links = re.findall(r"\w+://[0-9a-zA-Z./\-_?=]+", content)
return links | 06200ccc2e48e4d78092fa89cf69a4233f99e445 | 120,386 |
def crc(line):
"""Calculate the cyclic redundancy check (CRC) for a string
Parameters
----------
line : str, characters to calculate crc
Returns
-------
crc : str, in hex notation
"""
crc = ord(line[0:1])
for n in range(1, len(line)-1):
crc = crc ^ ord(line[n:n+1])
return '%X' % crc | 235b1b37418fb246bc7fd0468370b7012797dc4d | 120,389 |
import difflib
def diff(string_a, string_b):
"""Return unified diff of strings."""
string_a = string_a.splitlines(1)
string_b = string_b.splitlines(1)
result = difflib.unified_diff(string_a, string_b)
return ''.join(result) | 736d07e7eda5895a43bd281d7b58c984abf5aff9 | 120,392 |
import torch
def expmap2quat_torch(exp):
"""
Converts expmap to quaternion
batch pytorch version ported from the corresponding numpy method above
:param R: N*3
:return: N*4
"""
theta = torch.norm(exp, p=2, dim=1).unsqueeze(1)
v = torch.div(exp, theta.repeat(1, 3) + 0.0000001)
sinhalf = torch.sin(theta / 2)
coshalf = torch.cos(theta / 2)
q1 = torch.mul(v, sinhalf.repeat(1, 3))
q = torch.cat((coshalf, q1), dim=1)
return q | e11857d4e41c74bb8248b0b4d40d182586cccd8e | 120,398 |
def monomial_mul(a, b):
"""Multiplication of tuples representing monomials.
Lets multiply x**3*y**4*z with x*y**2:
>>> monomial_mul((3, 4, 1), (1, 2, 0))
(4, 5, 1)
which gives x**4*y**5*z.
"""
return tuple([ x + y for x, y in zip(a, b) ]) | 5c7ffa62f0634074dffe88c07f92555caaccbcc5 | 120,401 |
from typing import Iterable
from typing import Tuple
def compute_iterable_delta(old: Iterable, new: Iterable) -> Tuple[set, set, set]:
"""Given two iterables, return the entries that's (added, removed, updated).
Usage:
>>> old = {"a", "b"}
>>> new = {"a", "d"}
>>> compute_iterable_delta(old, new)
({"d"}, {"b"}, {"a"})
"""
old_keys, new_keys = set(old), set(new)
added_keys = new_keys - old_keys
removed_keys = old_keys - new_keys
updated_keys = old_keys.intersection(new_keys)
return added_keys, removed_keys, updated_keys | a049f98f192b35745b99ec7c72adf6fabccfe078 | 120,420 |
def calc_center_point(point_a, point_b):
"""
已知两点坐标,计算中间点坐标
:param point_a: A点坐标
:param point_b: B点坐标
:return: 中心点坐标
"""
return (point_a[0] + point_b[0]) // 2, \
(point_a[1] + point_b[1]) // 2 | b7b7b4f1c3258b1159f0f1977ad0839c239f2d7f | 120,421 |
def quantileNorm(df):
""" Quantile normalization across samples
Parameters
----------
df : pd.DataFrame
indexed by features and samples are in the column
Returns
-------
pd.DataFrame
A dataframe that has been quantile normalized
"""
rank_mean = df.stack().groupby(df.rank(method='first').stack().astype(int)).mean()
result = df.rank(method='min').stack().astype(int).map(rank_mean).unstack()
return result | 1068b9671710429421c5f2e906701c445d3acaf2 | 120,428 |
def minimum(a, b):
"""
Finds the minimum of two numbers.
>>> minimum(3, 2)
2
>>> minimum(2, 3)
2
>>> minimum(2, 2)
2
:param a: first number
:param b: second number
:return: minimum
"""
return a if a <= b else b | 3f56bc232fb5276a658d33fbac654f280e9bce48 | 120,430 |
def extract_id_type(soup):
""" Takes a BS4 object of a puchem page as input and returns the Pubchem ID type and Pubchem ID Value """
pubhcem_uid_type = soup.find_all('meta', {'name':'pubchem_uid_type'})[0]['content']
pubhcem_uid_value = soup.find_all('meta', {'name':'pubchem_uid_value'})[0]['content']
return pubhcem_uid_type +':'+ pubhcem_uid_value | bfa2025546cd7c0b07c911d7d05c622c6a7f1400 | 120,436 |
def get_input(input_string):
"""Function that allows me to get the user input and force them to provide a number within the appropriate bounds. """
response = ""
try:
response = float(input(input_string)) # try to convert to float
except:
print("Please provide a valid input") # if it cant, then something must be wrong with the input. prompt the user to give me better input.
get_input(input_string)
return response | f8abc3aa03e798111ef6d88e67314b9d32e93ef9 | 120,438 |
def return_period_from_string(arg):
"""
Takes a string such as "days=1,seconds=30" and strips the quotes
and returns a dictionary with the key/value pairs
"""
period = {}
if arg[0] == '"' and arg[-1] == '"':
opt = arg[1:-1] # remove quotes
else:
opt = arg
for o in opt.split(","):
key, value = o.split("=")
period[str(key)] = int(value)
return period | f3f70df82c1567d0b2f329c5add15d64b9b8b115 | 120,440 |
def extract_status_code_from_topic(topic):
"""
Topics for responses from DPS are of the following format:
$dps/registrations/res/<statuscode>/?$<key1>=<value1>&<key2>=<value2>&<key3>=<value3>
Extract the status code part from the topic.
:param topic: The topic string
"""
POS_STATUS_CODE_IN_TOPIC = 3
topic_parts = topic.split("$")
url_parts = topic_parts[1].split("/")
status_code = url_parts[POS_STATUS_CODE_IN_TOPIC]
return status_code | a75dd25e4236b37a54ee764b9caf2863c1e13873 | 120,442 |
import ast
def create_ast_function_call_with_numeric_values(func_name: str, **kwargs):
"""
Creates an ast call for function name with passed numeric keyword arguments.
:Notes:
Will not work if a non-numeric keyword value is passed.
:Examples:
>>> import astor
>>> value = create_ast_function_call_with_numeric_values('ModelInputs', n_phones=100, price_scrap=200)
>>> astor.to_source(value)
'ModelInputs(n_phones=100, price_scrap=200)\n'
"""
ast_keywords = [
ast.keyword(
arg=kwarg_name,
value=ast.Num(kwarg_value)
)
for kwarg_name, kwarg_value in kwargs.items()
]
call = ast.Call(
func=ast.Name(func_name),
args=[],
keywords=ast_keywords
)
return call | 37dcc3468d294106cd4443803a6ddc79cf5c01d3 | 120,443 |
from pathlib import Path
import hashlib
def check_hash(path, notebook, old_hash):
"""Check if current notebook SHA1 matches calculated hash"""
hashsource = []
notebook_name = str(Path(path).name).lower()
hashsource.append(notebook_name + ":")
for cell in notebook.get('cells', []):
if cell.get('cell_type', '') == 'code':
hashsource.append('[{}]'.format(cell.get('execution_count', '') or ''))
hashsource.append(cell.get('source', '') + ';;;')
current_hash = hashlib.sha1(''.join(hashsource).encode('utf-8')).hexdigest()
return current_hash == old_hash | 9b583cd1ece654a54a2393d08e3079cec50e57ff | 120,447 |
from typing import Iterable
def count_op(it: Iterable, oper, value):
"""Return a count of the number of items in `it` where **oper** **value**
== True. This allows user-defined objects to be included and is subtly
different to ``[...].count(...)`` which uses the __eq__ operator."""
return [oper(x, value) for x in it].count(True) | c52bea160b2d4460b9f4ad147752bd080ae440f6 | 120,449 |
def get_crab_engineering_value(fuel_needed: int) -> int:
"""Use formula n(n+1)/2 to find sum of continous set of integers"""
return int(fuel_needed * (fuel_needed + 1) / 2) | 78c9766333427d1c0a1d5d3bd53b8f6437777a6d | 120,451 |
def get_upload_location(instance, filename):
"""
This method provides a filename to store an uploaded image.
Inputs:
instance: instance of an Observation class
filename: string
returns:
string:
"""
psr = instance.pulsar.jname
beam = instance.beam
utc = instance.utc.utc_ts.strftime("%Y-%m-%d-%H:%M:%S")
return f"{psr}/{utc}/{beam}/{filename}" | 2bbf76d02e8b26ec703d5b4047074b3c05957407 | 120,452 |
def parse_to_float(value):
"""Expect a cell value as a string an attempt to cast to a float.
The logic handles known edgecases in the data (Excel formulae errors
or negative values) and returns a None value to represent an invalid value.
A space for a thousands separator is removed.
"""
if value and value != '#VALUE!' and not value.startswith("-"):
return float(value.replace(" ", ""))
else:
return None | daacf2d98c60f758d7d3ac0e9d1196d4910f31b2 | 120,456 |
from typing import Dict
def process_transcript_chunk(chunk: Dict) -> Dict:
"""
Calculates end timestamp from start timestamp and duration and converts timestamp from seconds
to milliseconds.
"""
return {
"text": chunk["text"],
"start": int(chunk["start"] * 1000),
"end": int((chunk["start"] + chunk["duration"]) * 1000),
} | 0531c57b88e4ba4120f758ed050e6886cb19515e | 120,457 |
def optional_id(record, context):
"""
Create an optional id for mirbase entries. This basically uses the name
field, which will be the miRBase gene name.
"""
if "description" in record and "name" in record and " " not in record["name"]:
return record["name"]
if context.database == "MIRBASE":
return record["name"]
return None | 6724a5c394c6b3113291aee41c4ed981e65fc388 | 120,461 |
from typing import List
from typing import Dict
from typing import Any
def batch_dictionary(products: List[Dict[str, Any]]):
"""Creates dictionary with the entries info for error control.
Args:
products: Chunk of products prepared to be uploaded.
Returns:
Dictionary with the batchId as key and the method as value.
"""
entries_dict = {}
for product in products:
batch_num = product['batchId']
entries_dict[batch_num] = {
'method': product['method']
}
return entries_dict | cb7dfd8695d050bcc48a835e0368dac27df6efea | 120,463 |
def get_cell_corners(cell:list, width:float, height:float, rel_loc='mid') -> list:
"""
Returns the corners of a cell (x1, x2, y1, y2).
:param cell: A tuple of lat/Y,lon/X-coordinates of the cell
:param width: Width of the cell
:param height: Height of the cell
:param rel_loc: Which position of the cell does `cell` indicate: center (mid)
:returns: y1, y2, x1, x2
:Example:
"""
y, x = cell
if rel_loc == 'mid':
x1 = x - width/2
x2 = x + width/2
y1 = y - height/2
y2 = y + height/2
elif rel_loc == 'ul':
x1 = x
x2 = x + width
y1 = y - height
y2 = y
else:
raise AttributeError('Invalid rel_loc. Supported: mid, ul')
corners = [
[y2, x1],
[y2, x2],
[y1, x2],
[y1, x1],
]
return corners | 32e041dbabb4b968e799bdb65e995a33ae69d55e | 120,465 |
def dx_to_wes_state(dx_state):
"""Convert the state returned by DNAnexus to something from the list of states
defined by WES.
"""
if dx_state in ("running", "waiting_on_input", "waiting_on_output"):
return "RUNNING"
elif dx_state == "runnable":
return "QUEUED"
elif dx_state in ("failed", "terminating", "terminated"):
return "EXECUTOR_ERROR"
elif dx_state in "done":
return "COMPLETE"
return "UNKNOWN" | c6d3f1c7f52ffe5f8b361de2b83abe9770025ff4 | 120,470 |
from typing import Tuple
def string_builder(str_list: Tuple[str]) -> str:
"""
Build strings separate by whitespace.
"""
return "".join(str_list) | b373a57a1e6213527e137f5800a946ba3e13a472 | 120,475 |
def format_time(s):
"""Converts a datetime object to serializable format."""
return s.isoformat() | 1f3eed90bb7917edf86a761b8133a0338bca12fb | 120,477 |
def stree2gtree(stree, genes, gene2species):
"""Create a gene tree with the same topology as the species tree"""
tree = stree.copy()
for gene in genes:
tree.rename(gene2species(gene), gene)
return tree | d9db5315ec75cd4ea21f8d6a669bfc87a5e52780 | 120,478 |
def get_nc_attr(nc, name, default=None):
"""Non-error raising netCDF attribute getter"""
try:
return nc.getncattr(name)
except AttributeError:
return default | 970cf5f5dc9442ec90175aae883a9b93150189d8 | 120,480 |
import six
import importlib
def import_class(cls_path):
"""
Imports a class from dotted path to the class
"""
if not isinstance(cls_path, six.string_types):
return cls_path
# cls is a module path to string
if '.' in cls_path:
# Try to import.
module_bits = cls_path.split('.')
module_path, class_name = '.'.join(module_bits[:-1]), module_bits[-1]
module = importlib.import_module(module_path)
else:
# We've got a bare class name here, which won't work (No AppCache
# to rely on). Try to throw a useful error.
raise ImportError("Rquires a Python-style path (<module.module.Class>) "
"to load given cls. Only given '%s'." % cls_path)
cls = getattr(module, class_name, None)
if cls is None:
raise ImportError(
"Module '{}' does not appear to have a class called '{}'.".format(
module_path, class_name))
return cls | 1d237a81a28778cdcab6f549d692178d091cb006 | 120,481 |
import turtle
def make_turtle(color, size):
"""
Sets up a turtle with the given color and pensize.
Returns the new turtle.
"""
t = turtle.Turtle()
t.color(color)
t.pensize(size)
return t | fd4d026ee929eb51dea29087702458597c428c66 | 120,483 |
def filter_dictionary_by_resolution(raw_data, threshold=False):
"""Filter SidechainNet data by removing poor-resolution training entries.
Args:
raw_data (dict): SidechainNet dictionary.
threshold (float, bool): Entries with resolution values greater than this value
are discarded. Test set entries have no measured resolution and are not
excluded. Default is 3 Angstroms. If False, nothing is filtered.
Returns:
Filtered dictionary.
"""
if not threshold:
return raw_data
if isinstance(threshold, bool) and threshold is True:
threshold = 3
new_data = {
"seq": [],
"ang": [],
"ids": [],
"evo": [],
"msk": [],
"crd": [],
"sec": [],
"res": []
}
train = raw_data["train"]
n_filtered_entries = 0
total_entires = 0.
for seq, ang, crd, msk, evo, _id, res, sec in zip(train['seq'], train['ang'],
train['crd'], train['msk'],
train['evo'], train['ids'],
train['res'], train['sec']):
total_entires += 1
if not res or res > threshold:
n_filtered_entries += 1
continue
else:
new_data["seq"].append(seq)
new_data["ang"].append(ang)
new_data["ids"].append(_id)
new_data["evo"].append(evo)
new_data["msk"].append(msk)
new_data["crd"].append(crd)
new_data["sec"].append(sec)
new_data["res"].append(res)
if n_filtered_entries:
print(f"{n_filtered_entries} ({n_filtered_entries/total_entires:.1%})"
" training set entries were excluded based on resolution.")
raw_data["train"] = new_data
return raw_data | eca9edd4c79dfd06006339c488ace2d21b6e621f | 120,486 |
import functools
def sync_after(f):
"""Use as a decorator to wrap methods that update cell information
in the database to make sure the data is synchronized immediately.
"""
@functools.wraps(f)
def wrapper(self, *args, **kwargs):
result = f(self, *args, **kwargs)
self._cell_data_sync(force=True)
return result
return wrapper | 81c06f6543bebc5cef9b94bf5890faac8ce0a426 | 120,487 |
def d_theta(t, alpha):
"""
theta'(t) = t / (alpha + |t|)
Also called phi' or psi'.
Baus et al 2013, table 1, Theta_2.
Nikolova et al 2013, table 1, f3.
Nikolova et al 2014, table 1, theta_2.
"""
assert alpha > 0
return t / (abs(t) + alpha) | 1b6364192f34e7ba3bd36f1e3cfcd885463d5db4 | 120,488 |
import secrets
def make_key_5(size=1):
"""
Creates a 24*size character key
"""
return secrets.token_urlsafe(18*size) | 5fe0b7d99f30be84ce012996030641759026c72c | 120,490 |
def henry(molar_weight, solubility, vapor_pressure):
"""
Return the henry's law constant []. if < 3e-7, can be neglected
source : (“CHEMMAP technical User’s manual 6.10,” 2014)
params
------
molar_weight[kg/mol] (in the ref, the dimension is not defined)
solubility [kg/m³]
vapor_pressure [Pa]
"""
P = vapor_pressure/101325
s = solubility * 1000
mw = molar_weight / 1000
return P / (s/mw) | aa3570cdfb10664e465932f99091dd90a38c8fd8 | 120,493 |
def extractScholarItem(pub, item):
"""Extract item from Scholar bib object
Parameters
----------
pub : dict
Scholar bib dictionary
item : str
Keyword to obtain value from
Returns
-------
out : str or int
Keyword value
"""
try:
out = pub['bib'][item]
except:
out = None
return out | 35ee4561bfefd724abdf85b46ca7bfe1e2519a1e | 120,496 |
def get_chunked_dim_size(dim_size, split_size, idx):
"""
Computes the dim size of the chunk for provided ``idx`` given ``dim_size``
and ``split_size``.
Args:
dim_size(int): Size of the dimension being chunked.
split_size(int): The chunk size for each chunk of ``dim_size``.
idx(int): The index of chunk whose dim size is being requested.
Returns:
An int indicating the dim size of the chunk.
"""
return min(dim_size, split_size * (idx + 1)) - split_size * idx | 95d0713fe63abaa3bf14f039261504765120bc0e | 120,498 |
import socket
def create_socket(host, port):
"""
Creates TCP socket and starts listening
host: host to bind to
port: port to bind to
"""
addr = (
host,
port,
)
if socket.has_dualstack_ipv6():
gemsocket = socket.create_server(
addr, family=socket.AF_INET6, dualstack_ipv6=True
)
else:
gemsocket = socket.create_server(addr)
gemsocket.listen()
return gemsocket | 1de0c794c153f6f78e9275836cc12a51961ec1df | 120,500 |
def _flatten(t: list) -> list:
"""
Flatten nested list
"""
return [item for sublist in t for item in sublist] | 287b542f2e8294b32b618556f5eac5e08dd7ff24 | 120,502 |
def get_device_connections_info(device):
""" Get connection information of a device from testbed file.
Args:
device (`obj`): device object
Returns:
device.connections (`dict`)
"""
return device.connections | b9941a9fd990343046b52d98ad1e1235010cfc0e | 120,504 |
def get_precision_at_K(relevant, ranking, K):
"""
Returns the precision at K
P@K = num cited cases in top K poisitions of ranking / K
Parameters
----------
relevant: list of cases that were cited
ranking: ranking of ancestors
Output
------
precision at K
"""
# get top k ranked cases
top_k = set(ranking[ranking['rank'] <= K].index)
precision_k = [1 for r in relevant if r in top_k]
return float(len(precision_k)) / K | 3b7ac3486e5ace62bd9b6689dc589f6b7d09e342 | 120,507 |
import functools
def _get_option(target_obj, key):
"""
Given a target object and option key, get that option from
the target object, either through a get_{key} method or
from an attribute directly.
"""
getter_name = 'get_{key}'.format(**locals())
by_attribute = functools.partial(getattr, target_obj, key)
getter = getattr(target_obj, getter_name, by_attribute)
return getter() | 61babde1b245d36e5ac053667806a71403cb6e01 | 120,513 |
def rename_entity_id(old_name):
"""
Given an entity_id, rename it to something else. Helpful if ids changed
during the course of history and you want to quickly merge the data. Beware
that no further adjustment is done, also no checks whether the referred
sensors are even compatible.
"""
rename_table = {
"sensor.old_entity_name": "sensor.new_entity_name",
}
if old_name in rename_table:
return rename_table[old_name]
return old_name | c2461fda461392404694f7ac084a9f86a2b40c9f | 120,517 |
def cycle(permutation, start):
"""
Compute a cycle of a permutation.
:param permutation: Permutation in one-line notation (length n tuple of the numbers 0, 1, ..., n-1).
:param start: Permutation element to start with.
:return: Tuple of elements we pass until we cycle back to the start element.
.. rubric:: Examples
>>> cycle((2, 3, 0, 1), 0)
(0, 2)
>>> cycle((2, 3, 0, 1), 1)
(1, 3)
"""
cycle_list = [start]
next_elem = permutation[start]
while next_elem != start:
cycle_list.append(next_elem)
next_elem = permutation[next_elem]
return tuple(cycle_list) | f44571afca591d4e92ca196d32bab195aae54625 | 120,518 |
def check(value, condition, string = ""):
"""
Verify that the condition is true and return the value.
Useful for conditional assignments, eg:
xs = [x]*n_elements if not isinstance(x, (list, tuple)) else check(x, len(x)==n_elements)
:param value:
:param condition:
:param string:
:return:
"""
assert condition, string
return value | 077f848c051be29d8642e848363e00869220b21f | 120,520 |
def clean_data(df):
""" Drop duplicate data and return only unique """
return df.drop_duplicates(keep='first') | 4bac9098613402bf710a0a906bf54732a73fcb8e | 120,522 |
def get_maximally_ambiguous_network(query_gene_expression_dict):
"""Return maximally ambiguous network by assigning expression value of 1.0 to all of the genes.
"""
return {gene: 1.0 for gene in query_gene_expression_dict.keys()} | 70e83a69f24fb4a0187fecd3fa95bebfd6f8a1dd | 120,531 |
import json
def read_json(num, filename_to_read = "yelp_academic_dataset_review.json"):
""" This function takes in two arguments:
1) num, the number of json object from the dataset that we want
2) filename_to_read, this is a default argument, the name of the dataset file
The json objects we read are review object from the Yelp Dataset. We
want to extract the review text and star rating for each review.
This function returns a list of tuples, with the form (review, stars)
"""
f = open(filename_to_read, "r")
result = []
for i in range(num):
string_data = f.readline()
data = json.loads( string_data )
review = data["text"]
stars = data["stars"]
result.append( (review, stars) )
return result | 817fa63cbee3930e51cab771f28244b55c277a63 | 120,532 |
def any_in(a, b):
"""Checks if 'a in b' is true for any element of a."""
return any(x in b for x in a) | bf74a6b0b4e23f280ce4622ba94e2b006f5196cc | 120,534 |
def get_pathway_list(gene, pathways_list):
"""
This function searches the given pathways for the given gene and returns the name of the pathways where the given gene name is listed
:param gene: A gene name in HUGO (HGNC) format
:type gene: string
:param pathways_list: List of pathways and genes that play role in these pathways
:type pathways_list: List of dictionaries
:return: A list of pathway names where the given gene plays a role
"""
contributed_pathways = []
for pathway in pathways_list:
if gene in pathway['genes']:
if pathway['pathway_name'] not in contributed_pathways:
contributed_pathways.append(pathway['pathway_name'])
return contributed_pathways | 2a75e49b9193d4f9586a029433af249c628b7ce7 | 120,535 |
from typing import Counter
def invert_dict_mapping_unique(source_dict):
"""
Inverts keys and values of a dict. Only entries with unique values are inverted.
"""
counter = Counter(source_dict.values())
unique = set([text for text, cnt in counter.most_common() if cnt == 1])
return {v: k for k, v in source_dict.items() if v in unique} | 234d2a962558e14e309b1b5ac930d9eed31dca4c | 120,536 |
import requests
from bs4 import BeautifulSoup
def netcdf_links(year: str, month: str) -> list[str]:
""" Return any netcdf links for the selected month and year """
fetch_url = f"https://www.ncei.noaa.gov/data/sea-surface-temperature-optimum-interpolation/v2.1/access/avhrr/{year}{month}/"
req = requests.get(fetch_url)
soup = BeautifulSoup(req.text, "html.parser")
links = []
for link in soup.find_all("a"):
href = link.get("href")
if ".nc" in href:
links.append(fetch_url + href)
return links | 2a568393048e239b3aede7ad84ef830c17251ffb | 120,538 |
def solve_captcha(line):
"""
>>> solve_captcha('1122')
3
>>> solve_captcha('1111')
4
>>> solve_captcha('1234')
0
>>> solve_captcha('91212129')
9
"""
return sum(int(c) for i, c in enumerate(line) if line[i - 1] == c) | 96dbc3564360cabcd39fbe3a4a6726fb7f2e5f4d | 120,544 |
def flip_list(a, inplace=False):
"""
Flip (reverse) a list.
Parameters
----------
a : list
List to be reversed.
inplace : bool, optional
Specifies whether to flip the list "in place",
or return a new list (default).
Returns
-------
flipped : list (or None)
The flipped list. If `inplace=True`, None is returned.
>>> flip_list([1, 2, 3])
[3, 2, 1]
>>> a = [1, 2, 3]
>>> flip_list(a, inplace=True)
>>> a
[3, 2, 1]
"""
if inplace is True:
a[:] = a[::-1]
return None
else:
return a[::-1] | 34c5d6da6688788f356fd469ac9d66f3bdbf62b8 | 120,549 |
import getpass
def query_user(prompt, var_type, default=None, hide=False):
"""
Query user for a certain value and let them try again, if they fail entering a valid value.
A default value can be given, that is returned when the user enters an empty string. If no default value is given,
an empty input may still be valid if the given type allows construction from empty string. The default value – if
given – is automatically appended to the prompt (in brackets).
Booleans must be entered with 'y' or 'n' by the user.
:param prompt: The prompt string to be presented to the user
:type prompt: str
:param var_type: The type to convert the value to. If it fails with ValueError the user is queried again.
:type var_type: type
:param default: The default value. Must be of the var_type or None.
:param hide: If True, getpass() is used for the query to hide the input in the terminal (for password inputs etc.)
:type hide: bool
:return: The entered value, converted to the given var_type
:rtype: var_type
"""
if var_type is bool:
prompt += " [{}/{}]".format('Y' if default else 'y', 'n' if default or default is None else 'N')
elif var_type is str:
if default:
prompt += " [{}]".format(default)
else:
prompt += " [{}]".format(default)
while True:
result = getpass.getpass(prompt + " ") if hide else input(prompt + " ")
if not result and default is not None:
return default
if var_type is bool:
if result.lower() in "yn":
return result.lower() == 'y'
else:
print("Invalid input. Must be 'y' or 'n'.")
else:
try:
return var_type(result)
except ValueError:
print("Not a valid {}. Please try again.".format(var_type.__name__)) | 304746324dd28a1e825b2ef6ab9394d5bf4f5a13 | 120,551 |
import requests
def get_page_url_by_title(title: str, base_url: str) -> str:
"""
Gets the full URL of a page associated with a specific title from Wikipedia API
:param title: Title heading
:param base_url: Wikipedia base URL
:return: Full URL
"""
params = {
"action": "query",
"format": "json",
"prop": "info",
"inprop": "url",
"titles": title
}
res = requests.get(url=base_url, params=params)
data = res.json()
(page_id, entry), = data['query']['pages'].items()
return entry['fullurl'] | 697979d8a18e88e69cda9c1e247dcfe6328dd1f8 | 120,552 |
def _get_dp_bin(dp):
"""Returns lower bin width for the given depth.
Bin width
- 0..19: 1bp
- 20..49: 5bp
- 50..199: 10bp
- 200..: = 200
"""
if dp < 20:
return dp
elif dp < 50:
return (dp // 2) * 2
elif dp < 200:
return (dp // 5) * 5
else:
return 200 | a522ee79820bf12dc8b812d00d9ffc1bb15fa4da | 120,560 |
def is_json_response(response):
"""
Returns True if the given response object is JSON-parseable
Args:
response (requests.models.Response): The requests library response object
Returns:
bool: True if this response is JSON-parseable
"""
return response.headers.get("Content-Type") == "application/json" | a772b3e5dd529ba684082953e710a186f9589016 | 120,567 |
def load_source_with_environment(source, component_name, environment=None):
"""
Execute a source snippet, injecting the variables specified in environment
Return the local variable defined by `component_name`. This should be used
for source files that need to register `@app` callbacks. In this case, be
sure to pass app in the environment.
"""
environment = environment or {}
exec(source, environment)
return environment[component_name] | cc13d753f3151b3c2a5f555e4e5c65224e721480 | 120,576 |
import io
def indent(string, spaces=4, tabs=0, notAtStart=False, notAtEnd=False):
"""
Return a str copy of the input where the sequence
- `tabs` (default: 0) horizontal tab characters, followed by
- `spaces` (default: 4) space characters
has been inserted
- before the first character of the input, and
- after every newline in the input
If `notAtStart` is True (default: False), then do not insert the indentation
sequence before the start of the input.
If `notAtEnd` is True (default: False), then do not insert the indentation
sequence at the end of the input even if the final character is a newline.
NOTE: Encode each tab character as 'backslash t' rather than as ASCII HT.
"""
indentationSequence = "\t" * tabs + " " * spaces
newlineIndentationSequence = "\n" + indentationSequence
result = io.StringIO()
if not notAtStart:
result.write(indentationSequence)
if not notAtEnd or not string.endswith("\n"):
result.write(string.replace("\n", newlineIndentationSequence))
else:
result.write(
string.replace("\n", newlineIndentationSequence, string.count("\n") - 1)
)
return result.getvalue() | 856847f3785608912b7d8f884f172e512b14f9ac | 120,577 |
def text_equals(text):
""" Tests whether the evaluated element's text matches ``text``. """
def evaluator(element, _):
return element.text == text
return evaluator | 54196e63a338e458a22f635c1f416bbc00f112bc | 120,580 |
import struct
def _hkey_to_bytes(hkey):
"""Converts 128 bits integer hkey to binary representation."""
max_int64 = 0xFFFFFFFFFFFFFFFF
return struct.pack('=QQ', (hkey >> 64) & max_int64, hkey & max_int64) | 6a0ce3ff3863cef6873033360689385bedb50f99 | 120,591 |
def vals_to_array(offset, *content):
"""
Slice all the values at an offset from the content arrays into an array.
:param offset: The offset into the content arrays to slice.
:param content: The content arrays.
:return: An array of all the values of content[*][offset].
"""
slice = []
for arr in content:
slice.append(arr[offset])
return slice | 3a6dcb84f6ee8ecebc57ffc8e42ca18f6d56e9c7 | 120,594 |
def MaskList(inputList, mask):
"""
Return a list containing elements of the input list where the corresponding
elements of the input mask are True
"""
assert(len(inputList) == len(mask))
outputList = []
for i, entry in enumerate(inputList):
if mask[i]:
outputList.append(entry)
return outputList | 81e119ed79f142ddd67536fab936d60c8ae5befa | 120,596 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.