content stringlengths 39 14.9k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
import re
def get_pins(file_names):
"""
Get pins from USB device name (must be format MV1-****)
Args:
file_names: list of binary file paths
Returns:
pin_keep: list of pin numbers or devices
"""
pin = []
pin_keep = []
for item in file_names:
pin.append(re.sub(r'.*-', '', item))
for items in pin:
pin_keep.append(items.split('/', 1)[0])
return pin_keep | 62f4150182e81afa9b0bd1a1deff06111d3fd998 | 102,371 |
def get_number_aliens_x(ai_settings, alien_width):
"""Ustalenie liczby obcych, którzy zmieszczą się w rzędzie.
:param ai_settings: Ustawienia
:param alien_width: Szerokość statku obcego
:return: Ilość obcych w rzędzie - int
"""
available_space_x = ai_settings.screen_width - 2 * alien_width # Ilość miejsca w poziomie
number_aliens_x = int(available_space_x / (2 * alien_width)) # Ile obcych zmieści się w rzędzie
return number_aliens_x | c6287df0fb439cf1b059ee0520c109541306c78f | 102,373 |
def date_span_intersection(date_span_1, date_span_2):
"""Return a tuple of dates representing the overlap between `date_span_1`
and `date_span_2`. If the date spans do not overlap, return `None`.
"""
intersection_first_date = max(date_span_1[0], date_span_2[0])
intersection_second_date = min(date_span_1[1], date_span_2[1])
if intersection_first_date > intersection_second_date:
# Date spans don't overlap
return None
else:
return (intersection_first_date, intersection_second_date) | 48b34877d863cb2ba9389857f8140728e83950a2 | 102,374 |
def str_to_pairs(poly: str) -> dict:
"""For the parm string, return a dictionary of counts of adjacent paris of characters.
For example 'AAAB, returns {'AA': 2, AB: 1}."""
pairs = {}
for i in range(len(poly) - 1):
pair = poly[i:i+2]
if pair in pairs:
pairs[pair] += 1
else:
pairs[pair] = 1
return pairs | 312687580f36c22aa10affc1b17b5a753582c88f | 102,377 |
def km_to_mile(km):
"""
Converts Kilometers to Miles
"""
try:
return float(km) / 1.609344
except ValueError:
return None | 643a8df55cd3d71639e14b2d221607023d42d6a3 | 102,381 |
def epc_calc_bin_mode(reg_dict):
"""
Get the current binning modes
Parameters
----------
reg_dict : dict
The dictionary that contains all the register information
Returns
----------
bool
Row binning mode
bool
Column binning mode
"""
bin_mode = reg_dict["bin_mode"][2]
col_bin = bool(bin_mode & 0x01)
row_bin = bool(bin_mode & 0x02)
return row_bin, col_bin | 551a36dc130c3c5b4048acadf50d108dc9100390 | 102,383 |
def forward_euler(x,h,f):
"""Forward Euler integration method for an autonomous system f."""
return x + h * f(x) | 3369b6124e66b5c78f4e33d59d3107617bd0cf24 | 102,385 |
def hydrophobic_atom_count(mol2_dict, hphob_types=('C.1', 'C.2', 'C.3', 'C.ar', 'S.3')):
"""
Calculate the number of hydrophobic atoms in a Tripos MOL2 file based on
SYBYL atom types in `hphob_types`.
:param mol2_dict: Tripos MOL2 atom records as returned by `parse_tripos_atom`
:type mol2_dict: :py:dict
:param hphob_types: hydrophic atoms as SYBYL atom type
:type hphob_types: :py:tuple, :py:list
:return: hydrophobic atom count
:rtype: :py:int
"""
return sum([1 for atoms in mol2_dict.values() if atoms['atom_type'] in hphob_types]) | 268e8a60de800b526520921bce902290c38fb19d | 102,387 |
def file_length(file_path: str) -> int:
"""
Returns the number of lines in a file
"""
i = -1
with open(file_path) as f:
for i, l in enumerate(f):
pass
return i + 1 | df4c200287b136e299978783cbd05f1c815026b6 | 102,388 |
import re
def scrub_pmid(value):
"""
Minimal cleanup on incoming PMIDs for validation.
http://www.nlm.nih.gov/bsd/mms/medlineelements.html#pmid
"""
if value.startswith("PMC"):
return None
match = re.findall(r'([1-9]{1}\d{2,7})', value)
try:
v = match[0]
except IndexError:
return None
#Don't allow 0 to be returned.
if v == 0:
return None
return v | d85e8270e11bed225aed5b69d1855684b4862d69 | 102,392 |
def _table_exists(client, database, table):
"""
Check if a specific Glue table exists
"""
try:
client.get_table(DatabaseName=database, Name=table)
return True
except client.exceptions.EntityNotFoundException:
return False | eb6d107915097ac8fa6fb0841fba8005b55214b8 | 102,394 |
def is_base_pair(s1, s2):
""" (str, str) -> bool
Precondition: s1 and s2 both contain a single character from 'A', 'T', 'C'
or 'G'.
Return True iff s1 and s2 form a base pair.
>>> is_base_pair('A','T')
True
>>> is_base_pair('G','T')
False
"""
cond1 = (s1 == 'A' and s2 == 'T')
cond2 = (s1 == 'T' and s2 == 'A')
cond3 = (s1 == 'G' and s2 == 'C')
cond4 = (s1 == 'C' and s2 == 'G')
if cond1 or cond2 or cond3 or cond4:
return True
else:
return False | feb8d7ac23f610be077f9e898bd796b781a5d5ca | 102,395 |
def num_in_numbits(num, numbits):
"""Does the integer `num` appear in `numbits`?
Returns:
A bool, True if `num` is a member of `numbits`.
"""
nbyte, nbit = divmod(num, 8)
if nbyte >= len(numbits):
return False
return bool(numbits[nbyte] & (1 << nbit)) | b7c6f684a3c91088b09bdd04e513b65dd273eefd | 102,396 |
def _printable_name(model_obj):
"""Returns a print-frendly name for a model object.
:param model_obj: a model, schema, table, column, key, or foreign key object.
:return: string representation of its name or "catalog" if no name found
"""
if not hasattr(model_obj, 'name'):
return 'catalog'
if hasattr(model_obj, 'constraint_name'):
return "%s:%s" % model_obj.name
if hasattr(model_obj, 'table'):
return "%s:%s:%s" % (model_obj.table.schema.name, model_obj.table.name, model_obj.name)
if hasattr(model_obj, 'schema'):
return "%s:%s" % (model_obj.schema.name, model_obj.name)
if hasattr(model_obj, 'name'):
return model_obj.name
return 'unnamed model object' | 7bdde57ac5ac94067417745eb2b2535635d8e8df | 102,398 |
def get_urls_from_object(tweet_obj):
"""Extract urls from a tweet object
Args:
tweet_obj (dict): A dictionary that is the tweet object, extended_entities or extended_tweet
Returns:
list: list of urls that are extracted from the tweet.
"""
url_list = []
if "entities" in tweet_obj.keys():
if "urls" in tweet_obj["entities"].keys():
for x in tweet_obj["entities"]["urls"]:
try:
url_list.append(x["expanded_url"] if "expanded_url" in x.keys() else x["url"])
except Exception:
pass
return url_list | 5f7318f5edfbcf83a827633131c2346e9625368a | 102,402 |
from typing import OrderedDict
import itertools
def dictProduct(dicts):
"""
Computes the Cartesian product of a dictionary of lists as
a list of dictionaries.
Gladly copied this code from:
https://stackoverflow.com/questions/5228158/cartesian-product-of-a-dictionary-of-lists
Example input:
options = {"number": [1,2,3], "color": ["orange","blue"] }
Example output:
[ {"number": 1, "color": "orange"},
{"number": 1, "color": "blue"},
{"number": 2, "color": "orange"},
{"number": 2, "color": "blue"},
{"number": 3, "color": "orange"},
{"number": 3, "color": "blue"}
]
"""
dicts = OrderedDict(dicts)
keys = list(dicts)
return (OrderedDict(zip(keys, x)) for x in itertools.product(*dicts.values())) | ce2297437df3a9f4fd6b53fe64dec60f217b2832 | 102,403 |
def addTruncations(lexicon, textwords, truncation_label, min_cut_phonemes=2, min_left_phonemes=2, LEXICON_CONSTANT=1):
""" Adds truncated lexicon entries for all textwords into the lexicon inplace.
Returns the added words as a set.
Note: LEXICON_CONSTANT is used here to keep compatibility with both lexiconp.txt and lexicon.txt
formats. It is either 1 or 0, as lexicon[uttid][0] can be a weight or the first phoneme of the
pronunciation.
"""
truncation_words = set()
for word in textwords:
for pronunciation in lexicon[word]:
pronunciation_list = pronunciation.split()
if len(pronunciation_list) - LEXICON_CONSTANT < min_cut_phonemes + min_left_phonemes:
continue
for upto_pos in range(min_left_phonemes + LEXICON_CONSTANT, len(pronunciation_list) - min_cut_phonemes):
truncation = u" ".join(pronunciation_list[:upto_pos])
truncation_word = truncation_label + word
lexicon.setdefault(truncation_word, []).append(truncation)
truncation_words.add(truncation_word)
return truncation_words | d4a9582f78b2761a6da8079a1cdf329a09163465 | 102,405 |
import plistlib
def payload_to_xml(payload):
"""Quick and dirty conversion of dictionaries to XML with plistlib. Returns a converted dictionary with the header and footer removed."""
unprocessed = plistlib.dumps(payload).decode("utf-8")
header_removed = unprocessed.split("\n", 3)[3]
return header_removed.rsplit("\n", 2)[0] | 5be4e4c4d41298bf889490f706e07d9b1c4118d0 | 102,409 |
def get_genome_build(variant_case_obj):
"""Find genom build in `variant_case_obj`. If not found use build #37"""
build = variant_case_obj.get("genome_build")
if build in ["37", "38"]:
return build
return "37" | 75a21b0349dd9cd525b620018ca7257afc580ec7 | 102,412 |
def column_compare(file1, file2):
"""Before and after comparisons of non-na values for column"""
stats1 = file1.notna().sum()
stats2 = file2.notna().sum()
return stats1, stats2 | a38bca82240a425e9bc666a5b4f1a4a699485785 | 102,414 |
def _find_rct_position(rxn, mol):
"""Find the position of a reactant in a reaction"""
react_pos = -1
for pos, rct in enumerate(rxn.GetReactants()):
if mol.HasSubstructMatch(rct):
react_pos = pos
return react_pos | fcc41137ca763de851e7be30fba02a493eaba0b2 | 102,416 |
def is_valid_url(url):
"""
Check if the href URL is valid
"""
return (
url != "#" and
url != "" and
url[0] != "?" and
url[0] != "#" and
not url.startswith("tel:") and
not url.startswith("javascript:") and
not url.startswith("mailto:")
) | dd3692f4ec9cdbce3a253ead3d4f18ea67f47e31 | 102,418 |
def make_response(data, message, status="success", error=False):
"""Return a standardised response object."""
return {
"data": data,
"message": message,
"status": status,
"error": error
} | 2755c9d5c3db7bed9e2e9d7f9e91aab290500a57 | 102,426 |
def get_list_of_tuples(list_given: list) -> list:
"""
:param list_given: List containing List
:return: List containing tuple
"""
list_result = []
for list_item in list_given:
list_result.append(tuple(list_item))
return list_result | bab3edbb523b5dabcef1cdc9754dc88a2fe035d9 | 102,429 |
def calculate_percent(value, total):
"""
Return the rounded value percentage of total.
"""
ratio = (value / total) * 100
return round(ratio, 2) | 4defd88c55689009da872a2e0d85d4f9ceb79d74 | 102,430 |
from typing import Tuple
import re
def check_tvm_version(curr: str, min_req: str) -> bool:
"""Check if the current TVM version satisfies the minimum requirement.
Parameters
----------
curr: str
The current version.
min_req: str
The minimum requirement version.
Returns
-------
check: bool
Return true if the current version satisfies the minimum requirement.
"""
def parse_version(ver_str: str) -> Tuple[Tuple[int, int, int], bool]:
"""Parse TVM version to a tuple-3.
Parameters
----------
ver_str: str
The TVM version string.
Returns
-------
ver: Tuple[Tuple[int, int, int], bool]
(3-way version number, is a release version)
"""
# The release version such as 0.8.0.
tokens = re.search(r"(\d|)\.(\d+)\.(\d+)", ver_str)
if tokens is not None:
return ((int(tokens.group(1)), int(tokens.group(2)), int(tokens.group(3))), True)
# The dev version such as 0.8.dev0 or 0.8.dev94+g0d07a329e.
tokens = re.search(r"(\d|)\.(\d+)\.dev(\d+)\+*.*", ver_str)
if tokens is not None:
return ((int(tokens.group(1)), int(tokens.group(2)), int(tokens.group(3))), False)
raise RuntimeError("Unrecognized TVM version: %s" % ver_str)
curr_ver, curr_rel = parse_version(curr)
req_ver, req_rel = parse_version(min_req)
# Major version.
if curr_ver[0] < req_ver[0]:
return False
if curr_ver[0] > req_ver[0]:
return True
# Miner version.
if curr_ver[1] < req_ver[1]:
return False
if curr_ver[1] > req_ver[1]:
return True
if curr_rel and not req_rel:
# Current version is "release" but the target version is "dev".
return True
if not curr_rel and req_rel:
# Current version is "dev" but the target version is "release".
return False
# Both are "dev" versions.
return curr_ver[2] >= req_ver[2] | f99d7da7212a5c96b186681ceb4b03dc47f2cb26 | 102,432 |
from typing import Counter
def sagan_ip_path(trace):
"""given an sagan traceroute measurement obj,
return a list of IP hop, each hop contain only one IP address.
Args:
trace (ripe.atlas.sagan.traceroute.TracerouteResult): one single instance of traceroute measurement;
in sagan object, for each hop, have at most three IP address.
Returns:
single_path (list): e.x. ['137.194.164.254', '137.194.4.240', '212.73.200.45', ...]
that is one IP per hop
"""
single_path = []
full_path = trace.ip_path
for hops in full_path:
single_path.append(Counter(hops).most_common(1)[0][0])
return single_path | 4e9db2e0f13d62ee953e76659c87f7084959f775 | 102,435 |
def _extract_bound_violations(
grb_model, variables, negative_slack_prefix, positive_slack_prefix
):
"""
Extracts the lower and upper bound changes that have to applied to
the input model in order to make its solution feasible.
"""
# Extracts the magnitude of the bound violations on the relaxed
# Gurobi model
epsilon = grb_model.Params.FeasibilityTol
violations = []
for var in variables:
lb_change = -grb_model.getVarByName(negative_slack_prefix + var).X
ub_change = grb_model.getVarByName(positive_slack_prefix + var).X
if lb_change < -epsilon:
violations.append((var, lb_change - epsilon, 0))
elif ub_change > epsilon:
violations.append((var, 0, ub_change + epsilon))
return violations | 1663445e3876e560f47a17a8cc35fa79a1da469a | 102,441 |
from typing import List
def _format_sql_query(
query_path: str,
strings_to_insert: List[str] = [],
) -> str:
"""
Loads a sql query from a file and inserts parameter strings
"""
# Get SQL query from file as string
with open(query_path, "r") as f:
sql = f.read()
# Insert format string into {} in query string
sql = sql.format(*strings_to_insert)
# Remove leading and trailing whitespaces
sql = sql.strip()
# Remove carriage return characters
sql = sql.replace("\r", "")
return sql | e486883290271b2ddcf67b5858b9a625f2aacb22 | 102,445 |
def get_processes(data, phases, train=True):
"""Get the processes for which certain phases are present. For train
processes, the provided phases must be a subset of the present phases;
for the test set, the provided and present phases must exactly match.
Parameters
----------
- data: pd.DataFrame
the raw data
- phases: array-like
the phases that (at least) need to be present in the data
-train: bool
whether or not we must extract processes from the training or
testing data
Returns
-------
- filtered_processes: array-like
"""
filtered_processes = []
phases = set(phases)
processes = set(data['process_id'])
for process in processes:
process_phases = set(data[data['process_id'] == process]['phase'])
if train:
if phases.issubset(process_phases):
filtered_processes.append(process)
else:
if len(phases) == len(process_phases) == len(phases.intersection(process_phases)):
filtered_processes.append(process)
return filtered_processes | 1b4f9ae271d7af8b43034cd66e0abff3566ee3aa | 102,452 |
import re
def create_file_name(document_tracking_id, valid_output):
"""
Returns filename according to standard:
https://docs.oasis-open.org/csaf/csaf/v2.0/csd01/csaf-v2.0-csd01.html#51-filename
if valid_input is false, `_invalid ` is appended to filename.
"""
if document_tracking_id is not None:
file_name = re.sub(r"([^+\-_a-z0-9]+)", '_', document_tracking_id.lower())
else:
file_name = 'out'
if not valid_output:
file_name = f'{file_name}_invalid'
file_name = f'{file_name}.json'
return file_name | 38d86d2255bd8ad332857f7f58c9210f43a64fba | 102,454 |
def update_labels_and_tags(dataset_id,
existing_labels_or_tags,
new_labels_or_tags,
overwrite_ok=False):
"""
Updates labels or tags in dataset if not set or needing to be updated
or overwrites existing labels or tags in the dataset
:param dataset_id: string name to identify the dataset
:param existing_labels_or_tags: labels already existing on the dataset = Dict[str, str]
tags already existing on the dataset = Dict[str, '']
:param new_labels_or_tags: new labels to add to the dataset = Dict[str, str]
new tags to add to the dataset = Dict[str, '']
:param overwrite_ok: flag to signal if labels or tags are to be either
overwritten (False as default) or updated (True)
:raises: RuntimeError if parameters are not specified
:raises: RuntimeError if overwrite_ok is false and new value for label is provided
:return: a dictionary of new labels or tags
"""
if not dataset_id:
raise RuntimeError("Provide a dataset_id")
if not new_labels_or_tags:
raise RuntimeError("Please provide a label or tag")
# excludes duplicate keys
updates = dict(new_labels_or_tags.items() - existing_labels_or_tags.items())
overwrite_keys = updates.keys() & existing_labels_or_tags.keys()
if overwrite_keys:
if not overwrite_ok:
raise RuntimeError(f'Cannot update labels on dataset {dataset_id}'
f'without overwriting keys {overwrite_keys}')
return {**existing_labels_or_tags, **updates} | 31c8a34d059b8a2493910e3ebe8a89d8459201c8 | 102,456 |
def mib_to_gib(size: float) -> float:
"""Convert memory unit from MiB to GiB."""
return round(size / 2 ** 10, 2) | 81021e41e273345f187a2c9e6d69bdedee257442 | 102,457 |
def split_array(array, ratio=0.9):
"""
Split an array according to a given ratio along the first axis. Useful to partition arrays into train/val
:param array: The array to split
:param ratio: The ratio to split with
:return:
"""
assert ratio > 0
assert ratio < 1
return (array[0:int(ratio*len(array))], array[int(ratio*len(array)):]) | f45d8b40ba5a4420e68f519f0fba1d8c7edc3511 | 102,458 |
import requests
def get_call_api(url, payload, headers):
"""Does a GET API call for a given url along with provided payload & headers.
Args:
url (str): Url for GET API call
payload (dict): Payload for GET API call
headers (dict): Headers for GET API call
Returns:
request: Response of GET API call
"""
return requests.request("GET", url, headers=headers, data=payload) | 5ccba0cfb21b1b03da8d83b44e24c669a0434641 | 102,461 |
import html
import re
def clean_text(text):
"""
Removes HTML tags, unescapes HTML entities, and truncates text to 280 characters.
Parameters
----------
text : str
HTML text
"""
cleaned_text = html.unescape(re.sub(re.compile('<.*?>'), '', text))
if len(cleaned_text) > 280:
cleaned_text = f"{cleaned_text[:277]}..."
return cleaned_text | fed63c58384d6cb33cc3944647f73af75c4faec5 | 102,464 |
import binascii
def b2hex(bts):
"""Produce a hex string representation of the given bytes.
:param bts: bytes to convert to hex.
:type bts: bytes
:rtype: str
"""
return binascii.hexlify(bts).decode("ascii") | 2b8e637050ee8ad9897113d66b7af4ca5c58080c | 102,466 |
def lnot(x):
"""Logical inverse"""
return not x | c46b71e38dd9dc020a91e5e439994d886b9eb8f2 | 102,467 |
def _root_sum_of_squares(list):
"""Returns the root of the sum of squares of a given list.
Args:
list (list): A list of floats
Returns:
[float]: Root sum of squares
"""
return sum((el ** 2 for el in list)) ** (0.5) | c8f6a7ce87f3dd8d0212601140010bfc2c7410ff | 102,471 |
def linear(x1, x2):
"""
linear kernel function
Parameters
------------
x1 : numpy array, (..., n)
the first input feature vector
x2 : numpy array, (..., n)
the second input feature vector
Returns
---------
kernel : numpy array
output kernel
"""
return (x1*x2).sum(axis=-1) | f387d2196f4ea410f94708fabfd63972bb5db936 | 102,472 |
import pathlib
def explore_path(target: pathlib.Path, recursive=False):
"""Explore paths add return a list of files found"""
targets = list()
if target.is_dir():
for sub in target.iterdir():
if sub.name.startswith("."):
continue
if sub.is_dir():
if recursive:
targets += explore_path(sub, recursive)
else:
targets.append(sub)
else:
targets.append(target)
return targets | e62a549a80bd5e3f130f9e3b0930a9ff898a9c8e | 102,473 |
def omega(delta_lambda):
"""Calculate the Buchdahl chromatic coordinate."""
return delta_lambda/(1 + 2.5*delta_lambda) | b8659be24bd94b85bf8d82ee4a7b9991628de1ba | 102,476 |
def grey2float(im, maxval=255):
"""Converts the given greyscale image to a float one"""
assert im.mode in 'IL'
return im.point(lambda p: p/float(maxval), 'F') | fbadd8d5fdbf3e84d1bcf926759f0bff8d67e8ea | 102,479 |
def extract_model_from_path(model_run_path: str) -> str:
""" Take the model run path, and get back the model abbreviation """
name_split = model_run_path.strip('/').split('/')
return name_split[-5] | 83bd3d47ef82beb6de0a009e9689b1903a88a2b0 | 102,484 |
def uniqify(seq): # Dave Kirby
"""
Return only unique items in a sequence, preserving order
:param list seq: List of items to uniqify
:return list[object]: Original list with duplicates removed
"""
# Order preserving
seen = set()
return [x for x in seq if x not in seen and not seen.add(x)] | a1a15b06f2c632c9a95dca7b687177e264e5551e | 102,491 |
def to_ISO8601(dt):
"""Convert from a datetime to a timestamp string."""
if hasattr(dt, "microsecond") and dt.microsecond:
return dt.isoformat(timespec="milliseconds")
return dt.isoformat() | 1d9d6db7a4adb06fc34943c4fe299fe3d1d1f033 | 102,493 |
def no_check(param):
"""doesn't perform any check, always returns true"""
return True | 02bcc16249dd34fcfa9113ee90fb4bef3bc16fca | 102,495 |
def trailing_zeros(number):
"""
Returns the 1-based index of the first bit set to 1 from the right side of a
32bit integer
>>> trailing_zeros(0)
32
>>> trailing_zeros(0b1000)
4
>>> trailing_zeros(0b10000000)
8
"""
if not number:
return 32
index = 0
while (number >> index) & 1 == 0:
index += 1
return index + 1 | 1731213ddf8a2d6c5ba336fb8aab7274a4a9750c | 102,496 |
def class_import_string(o):
"""Returns a fully qualified import path of an object class."""
return f'{o.__class__.__module__}.{o.__class__.__qualname__}' | 14ca6ddf9a542ef5404b5be220611a69b5c0449d | 102,497 |
def _baseline_fun(x, a):
"""A constant function."""
return a | 28b59bfbade03e279755f97a933b94ba43509267 | 102,498 |
import re
def extract_day_of_week(period_row, period_column, worksheet):
"""
Extracts the day of week for a ridership data column be searching
for 'weekday' or 'saturday' or 'sunday' strings under a period header.
Args:
period_row: Row for the period header. The day of week should be under it.
period_column: Column for the period header.
worksheet: The worksheet where the search occurs.
Returns:
A string with the day of week if one present; ``None`` otherwise.
"""
day_of_week = None
day_cell = worksheet.cell((period_row + 1), period_column)
if day_cell.ctype == 1:
day = day_cell.value.lower()
regex_pattern = re.compile(r'(weekday|saturday|sunday)')
result = regex_pattern.match(day)
if result:
return result.group(1)
return day_of_week | f95fab127ff4771df121ca74699780c7c3c9dbb0 | 102,499 |
def checkdms(dms):
"""Verify a sexagesimal string; returns True if valid, False if not
"""
assert isinstance(dms, str)
try:
d=dms.split(':')
for i in range(3): float(d[i])
return True
except:
return False | 8255b4a983ee3e2e5d633ed3eba89b81a12fb6f5 | 102,500 |
import itertools
def peek(iterator):
"""
Peek at next item from iterator.
>>> item, iterator = peek(iter(range(3)))
>>> assert item == 0
>>> assert list(iterator) == [0, 1, 2]
"""
item = next(iterator)
return item, itertools.chain([item], iterator) | 7c92e4eb26cb464b8ef71e517c361a8add67fec6 | 102,505 |
def e(s):
"""Convert a string in 0.1D+01 FORTRAN notation into 0.1e+10."""
return s.replace('D', 'e') | 220dd070bf6131fbdddbfe8571d08bb1f7a94c7a | 102,506 |
def previously_valid_data(data: list, invalid_data: set):
"""
Return a list of valid data from a input list. When an element is in the
invalid data set, is must be replaced with the previous valid data.
>>> previously_valid_data(['0', '2', None, '1', None, '0', None, '2'], {None})
['0', '2', '2', '1', '1', '0', '0', '2']
>>> previously_valid_data(['0', '2', 'None', '1', None, '0', None, '2'], {None, 'None'})
['0', '2', '2', '1', '1', '0', '0', '2']
"""
last_valid_data = None
result = list()
for elem in data:
if elem not in invalid_data:
last_valid_data = elem
result.append(last_valid_data)
return result | 878b2d2aa85d538c137c6cd6a0272eb2abe076c5 | 102,508 |
def count_collisions(right: int, down: int, aoc_map: list) -> int:
"""
Given the trajectory, count the number of collisions (or close shaves) with trees.
:param right: The number of spaces to the right moved in one time tick.
:param down: The number of spaces down moved in one time tick.
:param aoc_map: The map of the hill
:return: The number of times one will encounter a tree on the way down.
"""
map_length = len(aoc_map[0])
map_height = len(aoc_map)
count = 0
j = right
for i in range(down, map_height, down):
j %= map_length
if aoc_map[i][j] == "#":
count += 1
j += right
return count | 31d99b57283edbe60e80f53d994bd0255024d31c | 102,510 |
def get_factors(num):
"""Returns a list
Factors of the number passed as argument
"""
factors = []
inc_value = 1
while inc_value * inc_value <= num:
if num % inc_value == 0:
if num//inc_value == inc_value:
factors.append(inc_value)
else:
factors.append(inc_value)
factors.append(num//inc_value)
inc_value += 1
return factors | 0b5aa973733d3483854f583f1abae753dd509798 | 102,511 |
def validate_workgroup_state(workgroup_state):
"""
Validate State for Workgroup
Property: WorkGroup.State
"""
VALID_WORKGROUP_STATE = ("ENABLED", "DISABLED")
if workgroup_state not in VALID_WORKGROUP_STATE:
raise ValueError(
"Workgroup State must be one of: %s" % ", ".join(VALID_WORKGROUP_STATE)
)
return workgroup_state | f2ae46df59cd8cc232a5fbd03fbf05e9ccc25f9e | 102,513 |
import warnings
def ij2xy(i, j, geotrans, origin="ul"):
"""
Transforms global/world system coordinates to pixel coordinates/indexes.
Parameters
----------
i : int or np.array
Column number(s) in pixels.
j : int or np.array
Row number(s) in pixels.
geotrans : 6-tuple
GDAL geo-transformation parameters/dictionary.
origin : str, optional
Defines the world system origin of the pixel. It can be:
- upper left ("ul", default)
- upper right ("ur")
- lower right ("lr")
- lower left ("ll")
- center ("c")
Returns
-------
x : float or np.array
World system coordinate(s) in X direction.
y : float or np.array
World system coordinate(s) in Y direction.
"""
px_shift_map = {"ul": (0, 0),
"ur": (1, 0),
"lr": (1, 1),
"ll": (0, 1),
"c": (.5, .5)}
px_shift = px_shift_map.get(origin, None)
if px_shift is None:
wrng_msg = "Pixel origin '{}' unknown. Upper left origin 'ul' will be taken instead".format(origin)
warnings.warn(wrng_msg)
px_shift = (0, 0)
# shift pixel coordinates to the desired pixel origin
i += px_shift[0]
j += px_shift[1]
# applying affine model: https://gdal.org/user/raster_data_model.html
x = geotrans[0] + i * geotrans[1] + j * geotrans[2]
y = geotrans[3] + i * geotrans[4] + j * geotrans[5]
return x, y | 5192aaa2d356b35c9dcf0beac4ef3e50d709c769 | 102,516 |
import warnings
def get_relative_tag_xpath(schema_dict, name, root_tag, contains=None, not_contains=None):
"""
DEPRECATED
Tries to find a unique relative path from the schema_dict based on the given name of the tag
name of the root, from which the path should be relative and additional further specifications
:param schema_dict: dict, containing all the path information and more
:param name: str, name of the tag
:param root_tag: str, name of the tag from which the path should be relative
:param contains: str or list of str, this string has to be in the final path
:param not_contains: str or list of str, this string has to NOT be in the final path
:returns: str, xpath for the given tag
:raises ValueError: If no unique path could be found
"""
warnings.warn(
'get_relative_tag_xpath is deprecated. Use the relative_tag_xpath method on the schema dictionary instead',
DeprecationWarning)
return schema_dict.relative_tag_xpath(name, root_tag, contains=contains, not_contains=not_contains) | 6faf0d14bdad568ac3db53f725e652bde0ba15c6 | 102,521 |
import re
def get_year(text):
# type: (str) -> int
"""Attempts to extract the year from text, default 0"""
year = re.search(r"\d{4}", text)
return int(year.group()) if year else 0 | a556f556b3be72bbe2128f3468ed33eed4f8dab8 | 102,526 |
def get_categories(cat_str):
"""
Transform string with categories from cli into list of categories.
"""
if cat_str is None:
cat_list = ['gititized']
elif len(cat_str) == 0 :
cat_list = []
else:
cat_list = cat_str.split(',')
return cat_list | 45c27f4c545b147fb8b16f6be301cd723af93361 | 102,528 |
def _listify(objs):
"""
Listify `objs` :: str
>>> _listify("aaa bbb ccc")
['aaa', 'bbb', 'ccc']
>>> _listify("aaa, bbb, ccc")
['aaa', 'bbb', 'ccc']
>>> _listify("aaa, bbb, c c c")
['aaa', 'bbb', 'c c c']
"""
return [s.strip() for s in objs.split(',' if ',' in objs else None)] | db2bb0dfc05587ef63d2dfd723e88c3fca0d2ff1 | 102,529 |
def get_face_size(bbox, width, height):
"""
Compute the face size in pixel coordinates.
"""
x1 = int(bbox[0] * width)
y1 = int(bbox[1] * height)
x2 = int(bbox[2] * width)
y2 = int(bbox[3] * height)
width = x2 - x1
height = y2 - y1
return width, height | 1c1da35b301ba7f2f08f0ab5e4db1537608d1a53 | 102,530 |
def camelcase_to_snake_case(_input):
""" Convert a camel case string to a snake case string: CamelCase -> camel_case
Args:
_input (str): The string to convert """
# https://codereview.stackexchange.com/a/185974
res = _input[0].lower()
for i, letter in enumerate(_input[1:], 1):
if letter.isupper():
try:
if _input[i - 1].islower() or _input[i + 1].islower():
res += "_"
except IndexError:
pass
res += letter.lower()
return res | 979e53c99efea07e7e8e57d8cf5757aabe329673 | 102,534 |
import json
def load_questions(data_dir, data_type):
"""Load questions information from directory.
Args:
data_dir: str, data directory.
data_type: str, one of 'train', 'val', 'test'
Returns:
questions: list of dictionaries. Each dictionary contains information
about a question, its corresponding image and answer.
"""
filename = data_dir + '/questions/CLEVR_' + data_type + '_questions.json'
with open(filename) as data_file:
return json.load(data_file)['questions'] | caf1939258e0637d4d366ce8ef7d66d84248ec71 | 102,535 |
def set_flag(bits, bit, value):
"""
Sets the flag value.
:param bits: The bits
:type bits: int
:param bit: The bit index
:type bit: int
:param value: The bit value
:type value: bool
:returns: The bits
:rtype: int
"""
mask = 1 << bit
return bits | mask if value else bits & ~mask | 36862086bb8ea01e57f19d469d0d5ff9663f685c | 102,537 |
def nbits(val):
"""Return number of bits set to 1 in n"""
n = 0
while val:
if val & 1:
n += 1
val >>= 1
return n | eecf7a17aadc4d8d2ce143603e98fc7147099a81 | 102,538 |
def get_payload_bits(payload_bytes):
"""Return a list of bits from a list of bytes
Keyword arguments:
payload_bytes -- a list of bytes"""
payload_bits = list()
#convert each byte to a sequence of bits
for byte in payload_bytes:
temp = list()
for i in range(8):
temp.append(byte >> i & 1)
#temp has the bits in reversed order
payload_bits.extend(temp[::-1])
return payload_bits | 3563893eb84cdec4b557852bc1595d534b4d9dc8 | 102,539 |
def get_expires(cookie):
"""Get expiration time from cookie"""
return cookie.get("expires") | eb17c27d90c414c8427b9d2ee67128d57381ad88 | 102,540 |
def queue_up(process_class, inputs, storage):
"""
This queues up the Process so that it's executed by the daemon when it gets
around to it.
:param process_class: The process class to queue up.
:param inputs: The inputs to the process.
:type inputs: Mapping
:param storage: The storage engine which will be used to save the process (of type plum.persistence)
:return: The pid of the queued process.
"""
# The strategy for queueing up is this:
# 1) Create the process which will set up all the provenance info, pid, etc
proc = process_class.new_instance(inputs)
pid = proc.pid
# 2) Save the instance state of the Process
storage.save(proc)
# 3) Ask it to stop itself
proc.stop()
proc.run_until_complete()
del proc
return pid | 41399d1bda82911bbb4b4c921f714f2b506498a0 | 102,544 |
def lower_list(li):
""" Convert all element of a list to lowercase"""
if li:
return [x.lower() for x in li]
else:
return None | 0fbe2b8390c468d27a0733613121dc544059ae2e | 102,554 |
def safe_zip(*iterables):
"""Like builtin `zip`, but checks that iterables have same length."""
n = len(iterables[0])
for iterable in iterables:
if len(iterable) != n:
raise ValueError("Zipped items must all have same length.")
return zip(*iterables) | a7ad55d7872f7a38541fd5306ecd2d3cf60f307f | 102,555 |
def clean_parses(parses):
"""
Remove the stuff we don't need from a list of parses
Arguments
parses : list of (bis,buffer,route,k)
Returns
list of (bis,route)
"""
return [ (bis,route) for (bis,_,route,_) in parses ] | 6ff160187c7d99d8db9a0b6d909e7cdef066503d | 102,561 |
def treversed(*args, **kwargs):
"""Like reversed, but returns a tuple."""
return tuple(reversed(*args, **kwargs)) | 1b26313e9af2b8e515e81ca932adadf85925351e | 102,562 |
def deletes_single_file(bucket, file_path):
"""Method deletes a single file form a Google Cloud Storage Bucket.
Args:
bucket: A storage Bucket object specifying a Google Cloud Storage bucket.
file_path: A string containing a Google Cloud Storage Bucket path to a specific file.
Returns:
None.
"""
blob = bucket.blob(file_path)
blob.delete()
return None | cd87fe1188411754a9e4b467ffeef8869d9a8912 | 102,566 |
def RGBtoHSB( nRed, nGreen, nBlue ):
"""RGB to HSB color space conversion routine.
nRed, nGreen and nBlue are all numbers from 0 to 255.
This routine returns three floating point numbers, nHue, nSaturation, nBrightness.
nHue, nSaturation and nBrightness are all from 0.0 to 1.0.
"""
nMin = min( nRed, nGreen, nBlue )
nMax = max( nRed, nGreen, nBlue )
if nMin == nMax:
# Grayscale
nHue = 0.0
nSaturation = 0.0
nBrightness = nMax
else:
if nRed == nMin:
d = nGreen = nBlue
h = 3.0
elif nGreen == nMin:
d = nBlue - nRed
h = 5.0
else:
d = nRed - nGreen
h = 1.0
nHue = ( h - ( float( d ) / (nMax - nMin) ) ) / 6.0
nSaturation = (nMax - nMin) / float( nMax )
nBrightness = nMax / 255.0
return nHue, nSaturation, nBrightness | 5e698f58c0f01fade4bc4c4cea7b7838239c7caf | 102,567 |
def text_to_char_index(text_data, char_dict, bos):
"""
convert characters to char indices. '| ' is added to the beginning of each sentence.
Parameters
----------
text_data : list of string, usually sentences in a batch
char_dict : OrderedDict, {character: index}
Returns
-------
: a list of lists of int, usually sentences in a batch
"""
return [[char_dict.get(c, 1) for c in (bos + ' ' + l.strip())] for l in text_data] | 6bf5d1dba7db48b2eef37d89fadf94b46b76eb7b | 102,573 |
def tag_list_to_dict(tags):
"""
Takes a list of dicts and makes a single dict
"""
new_tags = {}
for tag in tags:
new_tags.update({
tag["Key"]: tag["Value"]
})
return new_tags | df1982941d032e7e870ed97b760db22e0ae1664e | 102,575 |
def bubble_sort(li):
""" Python implementation of bubble sort
args:
li = unordered list of elements in integer
returns:
ordered list of input li in ascending order
BigO:
O(n**2): there are better ways to implemet sorting
i.e)quicksort, mergesort
"""
li_length = len(li)
for i in range(li_length - 1):
for j in range(li_length - 1):
if li[j] > li[j + 1]:
temp = li[j+1]
li[j+1] = li[j]
li[j] = temp
''' better code
reference: https://github.com/minsuk-heo/problemsolving\
/blob/master/sort/BubbleSort.py
alist[j], alist[j+1] = alist[j+1], alist[j]
'''
return li | b94bd1763990e5be1b83e24ce9a54ab6018233ac | 102,577 |
def make_cdec_config_string(cdec_items, cdec_features_items):
"""
Create the string equivalent of a cdec.ini
:param cdec_items: list of cdec.ini entries
:param cdec_features_items: list of [cdec:features] entries (for backward compatibility)
this lines are interpreted as A=B where A is the name of a cdec class that implements the feature function
and B is the string used to construct the feature function object
A=B gets converted to (cdec format): feature_function=A B
:return str: the configuration string
"""
kvs = [line.split('=', 1) for line in cdec_features_items]
return '{0}\n{1}'.format('\n'.join(cdec_items),
'\n'.join('feature_function={0} {1}'.format(k, v) for k, v in kvs)) | 50a939ac0fcc193a8aa20cd360a33922861aa916 | 102,579 |
def resolve(ctx, token):
"""Resolve token (var or plain object) in current context and return it's value.
"""
return token(ctx) if callable(token) else token | e630478d665d5cd3b5c8720882f440426a056c59 | 102,580 |
def id_from_url(url):
"""Get player id from URL."""
return url.strip("/").split("/")[-1] | 58a09e26f720a5f6f02656667dbb566a25288d5e | 102,581 |
def calculate_cost(files):
""" Calculate the cost associated with a possible solution """
offset = cost = 0
for file in files:
cost += (offset + file[0]) * file[1]
offset += file[0]
return cost | beabaa0886bdc7463e10047cae1284f38dc29dac | 102,586 |
def last_modified_times(files: list) -> dict:
"""Check last modified on all files in list.
Args:
files: list of file paths
Returns:
dict of files and timestamps
"""
return {filepath: filepath.stat().st_mtime for filepath in files} | fa7bffa143499e694bf74013e75755592c89f15f | 102,587 |
def azero(seq):
"""Return True if all numbers in 'seq' are 0s."""
return not filter(None, seq) | 61d5914b735e95c1df40269cba1110ced630953a | 102,595 |
def get_article_info(response: dict):
"""Create a list of articles containing the:
Source name, title, description, url and author
:param response:
:return: a list of articles
"""
article_info = []
if response:
for article_number in range(response["totalResults"]):
source_name = response["articles"][article_number]["source"]["name"]
title = response["articles"][article_number]["title"]
description = response["articles"][article_number]["description"]
url = response["articles"][article_number]["url"]
article_info.append([title, description, url, source_name])
return article_info
return None | 1d048ccda239610dcda374cec8efbf73fda72082 | 102,596 |
def _clean_sizes(sizes: list) -> list:
"""
Remove all text from sizes.
The parser to capture sizes of various objects includes 'in bytes: ' in the
string which should be stripped, leaving only numbers.
Parameters
----------
sizes : list
A ``list`` of ``strings`` of sizes of various objects.
Returns
-------
list
Returns a ``list`` of ``integers`` of sizes of various objects.
"""
return [int(size.replace('in bytes: ', '')) for size in sizes] | 007320588120f42a43b5a91c4aa1e73edd49f337 | 102,600 |
def is_empty(row):
""" Check if a csv row (represented as a list
of values) is empty.
[] => True
["","","foo"] => True
["foo","bar"] => False
"""
if len(row) == 0:
return True
if row[0] == "":
return True
return False | aef3046959b398b01ee35955dbd2887a91ff290e | 102,604 |
import torch
def get_norm_layer(norm: str, channels: int):
"""
Args:
norm: str
type of normalization: `layernorm`, `batchnorm`, `instancenorm`
channels: int
size of features for normalization
"""
if norm == "layernorm":
return torch.nn.LayerNorm(channels)
elif norm == "batchnorm":
return torch.nn.BatchNorm1d(channels)
elif norm == "instancenorm":
return torch.nn.InstanceNorm1d(channels)
else:
return torch.nn.Identity() | f21663d89c702df28dae7ae25015265c7ecee32c | 102,608 |
def list_group_members(client, group_id):
"""Get the members of a group."""
return client.group_member_list(group_id) | 50d9249b0ce58bfd62f5ad5b8dad2e28d52d7977 | 102,609 |
def get_func_name(s):
"""
Gets the function name from a string.
E.g. "void func(type f1)" -> "func"
"""
pre_open_paren_str = s.split('(')[0]
if ' ' not in pre_open_paren_str:
return pre_open_paren_str
return pre_open_paren_str[pre_open_paren_str.rindex(' ') + 1:] | d7fa06079e4c8458cb7d315de4dcc665da66b24f | 102,611 |
def inverse_transform(claims_enc, le_dict):
"""inverse transform value of claims
Parameters
----------
claims_enc: panda.DataFrame
a data frame [source_id, object_id, value] where value is already label encoded.
Returns
-------
claims: pandas.DataFrame
a data frame [source_id, object_id, value]
"""
df = claims_enc.copy()
group_by_source = df.groupby('object_id')
for g, index in group_by_source.groups.items():
df.loc[index, 'value'] = le_dict[g].inverse_transform(
df.loc[index, 'value'])
return df | 57ab46fbd41933f399b8d92fa6a5b8c259fb4a91 | 102,612 |
def index() -> str:
"""Root page of Flask API.
Returns:
A test message.
"""
return "What I think a REST API is supposed to be in flask (probably wrong)!" | bea6676bdbb95b359fd642073002d39ab9f60a9f | 102,614 |
def get_line_count(file_name, chunk_size=8192*1024):
"""
Get the number of lines in a file.
Parameters:
file_name (str): file name
chunk_size (int, optional): chunk size for reading large files
"""
count = 0
with open(file_name, "rb") as fin:
chunk = fin.read(chunk_size)
while chunk:
count += chunk.count(b"\n")
chunk = fin.read(chunk_size)
return count | ee820620c56a94cd7b432d865b49cf1aa52730d1 | 102,616 |
import json
def json_fmt(obj):
"""Format the dict as JSON."""
return json.dumps(obj, sort_keys=True, indent=4, separators=(',', ': ')) | 42c3b5faac3e5022d611371b0fc7d86b9f509cff | 102,617 |
def params(message):
"""
This function decides whether a user inputs measurements in the format as it's asked.
:param message
:return: True -- if a user inputs at least six items (it asks for: "bust <number> waist <number> hips <number>"),
the 0th item is "bust", the 2nd is "waist", and the 4th is "hips"
False -- otherwise
"""
# split based on spaces
request = message.text.split()
if len(request) < 6 or request[0].lower() not in "bust" or request[2].lower() not in "waist" or request[4].lower() not in "hips":
return False
else:
return True | 8dd24f8ac852f25c8dd0e4886ed8bec89da9e78f | 102,618 |
def get_conversion_rate(df, total, conversions):
"""Return the conversion rate of column.
Args:
:param df: Pandas DataFrame.
:param total: Column containing the total value.
:param conversions: Column containing the conversions value.
Returns:
Conversion rate of conversions / total
Example:
df['cr'] = get_conversion_rate(df, 'sessions', 'orders')
"""
value = (df[conversions] / df[total])
return value | 3dbfd40669e3fc751a1344e4b9d6c467f742e35b | 102,619 |
def complex_impedance(z, XR):
"""
Returns the complex impedance from z (in %) and the X/R ratio.
"""
z = float(abs(z))
XR = float(abs(XR))
real = (z**2/(1+XR**2))**0.5
try:
imag = (z**2/(1+1/XR**2))**0.5
except ZeroDivisionError:
imag = 0.0
return complex(real, imag) | 9647c0dc769612173d83d97b2e50704b3f46170c | 102,620 |
def get_entity_mention_by_sent_id(graph, sent_id):
"""
Returns all entity mentions in a given sentence.
:param graph: the OKR graph
:param sent_id: the sentence ID
:return all entity mentions in a given sentence
"""
return { ent_id : mention for ent_id, ent in graph.entities.iteritems()
for mention_id, mention in ent.mentions.iteritems()
if mention.sentence_id == sent_id } | c37a14200f6b34a25c4bcbdffbd1eb864b6698b1 | 102,622 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.