content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def remove_dul(entitylst):
"""
Remove duplicate entities in one sequence.
"""
entitylst = [tuple(entity) for entity in entitylst]
entitylst = set(entitylst)
entitylst = [list(entity) for entity in entitylst]
return entitylst | 7763ea273b9eed00d923ac1b8d631b9a24daf01a | 31,786 |
import argparse
import ast
def add_dannce_train_args(
parser: argparse.ArgumentParser,
) -> argparse.ArgumentParser:
"""Add arguments specific to dannce training.
Args:
parser (argparse.ArgumentParser): Command line argument parser.
Returns:
argparse.ArgumentParser: Parser with added arguments.
"""
parser.add_argument(
"--dannce-train-dir",
dest="dannce_train_dir",
help="Training directory for dannce network.",
)
parser.add_argument(
"--rotate",
dest="rotate",
type=ast.literal_eval,
help="If True, use rotation augmentation for dannce training.",
)
parser.add_argument(
"--augment-continuous-rotation",
dest="augment_continuous_rotation",
type=ast.literal_eval,
help="If True, rotate all images in each sample of the training set by a random value between [-5 and 5] degrees during training.",
)
parser.add_argument(
"--drop-landmark",
dest="drop_landmark",
type=ast.literal_eval,
help="Pass a list of landmark indices to exclude these landmarks from training",
)
parser.add_argument(
"--use-npy",
dest="use_npy",
type=ast.literal_eval,
help="If True, loads training data from npy files"
)
parser.add_argument(
"--rand-view-replace",
dest="rand_view_replace",
type=ast.literal_eval,
help="If True, samples n_rand_views with replacement"
)
parser.add_argument(
"--n-rand-views",
dest="n_rand_views",
type=ast.literal_eval,
help="Number of views to sample from the full viewset during training"
)
parser.add_argument(
"--multi-gpu-train",
dest="multi_gpu_train",
type=ast.literal_eval,
help="If True, distribute training data across multiple GPUs for each batch",
)
parser.add_argument(
"--heatmap-reg",
dest="heatmap_reg",
type=ast.literal_eval,
help="If True, use heatmap regularization during training",
)
parser.add_argument(
"--heatmap-reg-coeff",
dest="heatmap_reg_coeff",
type=float,
help="Sets the weight on the heatmap regularization term in the objective function.",
)
parser.add_argument(
"--save-pred-targets",
dest="save_pred_targets",
type=ast.literal_eval,
help="If True, save predictions evaluated at checkpoints during training. Note that for large training datasets, this can cause memory issues.",
)
return parser | 0d011591eb3ba7c6904c24dcf5fcb0b19137d8f8 | 31,787 |
import re
def _is_disabled(name, disabled=[]):
"""Test whether the package is disabled.
"""
for pattern in disabled:
if name == pattern:
return True
if re.compile(pattern).match(name) is not None:
return True
return False | 05f7e82a3c08411c4021f8bd695c2f6b75a636c8 | 31,788 |
def _get_elements_and_boundaries(flows):
"""filter out elements and boundaries not used in this TM"""
elements = {}
boundaries = {}
for e in flows:
elements[e] = True
elements[e.source] = True
elements[e.sink] = True
if e.source.inBoundary is not None:
boundaries[e.source.inBoundary] = True
if e.sink.inBoundary is not None:
boundaries[e.sink.inBoundary] = True
return (elements.keys(), boundaries.keys()) | 79c2cab04b18789b473f5c922d7aa5679a62c967 | 31,789 |
def get_wind(bearing):
"""get wind direction"""
if (bearing <= 22.5) or (bearing > 337.5):
bearing = u'\u2193 N'
elif (bearing > 22.5) and (bearing <= 67.5):
bearing = u'\u2199 NE'
elif (bearing > 67.5) and (bearing <= 112.5):
bearing = u'\u2190 E'
elif (bearing > 112.5) and (bearing <= 157.5):
bearing = u'\u2196 SE'
elif (bearing > 157.5) and (bearing <= 202.5):
bearing = u'\u2191 S'
elif (bearing > 202.5) and (bearing <= 247.5):
bearing = u'\u2197 SW'
elif (bearing > 247.5) and (bearing <= 292.5):
bearing = u'\u2192 W'
elif (bearing > 292.5) and (bearing <= 337.5):
bearing = u'\u2198 NW'
return bearing | 45a7627e6ae7366b1652e54f763e0b2da1e81d75 | 31,790 |
def response(hey_bob):
"""
Get the response
"""
if not hey_bob.strip():
return "Fine. Be that way!"
chars = "".join(filter(lambda c: c.isalpha(), hey_bob))
if hey_bob.strip()[-1] == "?":
if chars.isupper():
return "Calm down, I know what I'm doing!"
return "Sure."
if chars.isupper():
return "Whoa, chill out!"
return "Whatever." | 55934d58fa94ee855d1295138dd68fa7590ca293 | 31,791 |
def configure(config):
"""
| [google] | example | purpose |
| -------- | ------- | ------- |
| cs_id | 00436473455324133526:ndsakghasd | Custom search ID |
| api_key | ASkdasfn3k259283askdhSAT5OADOAKjbh | Custom search API key |
"""
chunk = ''
if config.option('Configuring google search module', False):
config.interactive_add('google', 'cs_id', 'Custom search ID', '')
config.interactive_add('google', 'api_key', 'Custom search API key', '')
return chunk | afbe061817230f8e2ca101695e91427b7b39eca1 | 31,792 |
def a_source_filename(plugin_ctx, fsm_ctx):
"""send source filename."""
src_file = plugin_ctx.src_file
fsm_ctx.ctrl.sendline(src_file)
return True | 97a2d12fd2807b0dfca8d503478ccc5c514b88b6 | 31,793 |
import binascii
def givenCodeGetGPML(s, code):
"""download gpml files from wikipathways database"""
code = code.decode()
res = s.getPathwayAs(code, filetype="gpml")
# newres = unidecode(res)
newres = binascii.a2b_base64(bytes(res, "ascii")) # convert to ascii
return newres | ab6ff977b8c7194ed5b3d8574bca9aa5d76f7be0 | 31,794 |
def velocity(range, avg_range, volume, avg_volume):
"""
The average of the average of the range and volume for a period.
:param range:
:param avg_range:
:param volume:
:param avg_volume:
:return:
"""
rng_rate = range / avg_range
vol_rate = volume / avg_volume
total = sum((rng_rate, vol_rate))
return round(total / 2, 2) | d63b317860d0962e1d140d962df4400ae434a011 | 31,795 |
import os
import glob
import pyclbr
import importlib
def model_finder(game_mode):
""" Returns an initialized model.
:param game_mode: str
Should be either the name of a module located in the
models folder or the name of the first class inside one of the modules
located in models.
In case there are multiple classes inside a module, the first
one will be selected.
"""
# Go up to retro_baseline folder
path_file = os.path.dirname(os.path.dirname(__file__))
# Find all modules located in models library (non-recursive), unequal to base_model
models = glob.glob(os.path.join(path_file, "models", "[!base_model]*.py"))
# Extract module name
models_name = [os.path.basename(each)[:-3] for each in models]
# Locate all classes inside the module
classes = [pyclbr.readmodule_ex("models." + name) for name in models_name]
# Select the first class from the module
classes_keys = [next(iter(k)) for k in classes]
# Check for name as module
if game_mode in models_name:
# Get the name of the class that has to be imported
to_import_class = classes_keys[models_name.index(game_mode)]
# Import the module
module = importlib.import_module(f"models.{game_mode}")
# Return class instance
return getattr(module, to_import_class)
# Check for name as Class Name
if game_mode in classes_keys:
# Get the name of the module that has to be imported
to_import_module = models_name[classes_keys.index(game_mode)]
# Import the module
module = importlib.import_module(f"models.{to_import_module}")
# Return class instance
return getattr(module, game_mode)
else:
# Combine the module name and first Class Name from that module and then
# make a single string out of them with enters between them.
valid = "\n ".join([f'{k.ljust(25)}{v}' for (k, v) in zip(models_name, classes_keys)])
print("\nUnrecognized model, valid inputs were:\n "
f"{valid}")
exit(1) | 184aba7fcca7709083bc5064427e6e602293027f | 31,797 |
import re
def seperate_data(data, labels):
"""
Given a data file such as
| x11 u11 x12 other x13 |
| x21 u21 x22 other x23 |
| ..................... |
And labels for each column such as
'x' 'u' 'x' '_' 'x'
Split the data into x' and u'
"""
# Take the index of all instances of 'x', get the cols with
# that index, which should be the states.
xt = data[:, [m.start() for m in re.finditer('x', labels)]]
# Save as above, but with 'u' and inputs.
ut = data[:, [m.start() for m in re.finditer('u', labels)]]
return xt, ut | 6dc739f55fcf6113cc45d849c256e5186e328cc0 | 31,798 |
def hr2deg(deg):
"""Convert degrees into hours."""
return (deg * (24.0 / 360.0)) | e9f268be41221cd2b17d596f859d7a7686c3b4ac | 31,799 |
import os
def is_model_dir(model_dir):
"""Checks if the given directory contains a model and can be safely removed.
specifically checks if the directory has no subdirectories and
if all files have an appropriate ending."""
allowed_extensions = {".json", ".pkl", ".dat"}
dir_tree = list(os.walk(model_dir))
if len(dir_tree) != 1:
return False
model_dir, child_dirs, files = dir_tree[0]
file_extenstions = [os.path.splitext(f)[1] for f in files]
only_valid_files = all([ext in allowed_extensions for ext in file_extenstions])
return only_valid_files | 1797a64271053c9d1dc57a25118a97857defcfa5 | 31,800 |
import os
def create_dir_and_make_writable(directory):
"""
Creates a directory if it does not exist, and make sure it is writable by us.
@rtype: bool
@return: success
"""
if not os.path.exists(directory):
try:
os.makedirs(directory)
except OSError:
return False
if os.access(directory, os.W_OK):
return True
else:
return False | ac5236eb8ccb8f966d947b09c115ccf2caa708b1 | 31,801 |
def main():
"""Для того, чтобы сделать на сайте «звонибельную» ссылку,
надо указать параметр href c префиксом tel:
Например, <a href="tel:+79012342452">+79012342452</a>
Напишите программу, которая «оборачивает» телефон в такие теги."""
# Code goes over here.
tel = input()
print('<a href="tel:%s">%s</a>' % (tel, tel))
return 0 | fbce02cfc9655059723d22894ff3c4aa99c0101f | 31,802 |
import itertools
def check_all_hats(s0, s1, s2, s3, verbose=False):
"""s0~3 = 3x3 strategy for that player"""
successes = 0
for hats in itertools.product(range(3), repeat=4):
guesses = [s0[hats[3]][hats[1]],
s1[hats[0]][hats[2]],
s2[hats[1]][hats[3]],
s3[hats[2]][hats[0]]]
if (hats[0] == guesses[0] or hats[1] == guesses[1] or
hats[2] == guesses[2] or hats[3] == guesses[3]):
successes += 1
if verbose:
correct_bits = 0
for h in range(4):
if hats[h] == guesses[h]:
print(f'<{hats[h]}>', end='')
correct_bits += 2**h
else:
print(f' {hats[h]} ', end='')
print(guesses, end='')
match correct_bits:
case 1:
print('Only player 0', hats[3], hats[1])
case 2:
print('Only player 1', hats[0], hats[2])
case 4:
print('Only player 2', hats[1], hats[3])
case 8:
print('Only player 3', hats[2], hats[0])
case _:
print('Success')
elif verbose:
print('***', hats, guesses, "Failure")
if verbose: print("Total successes:", successes)
return successes | cc9814794aa584f981923acf2b4ebdb4873f8605 | 31,803 |
from typing import Union
import time
def cast_timestamp_seconds_since_epoch_to_text(seconds: Union[int, float, str],
timezone: str = "UTC",
time_in_nanoseconds: bool = False,
date_time_format: str = '%Y-%m-%d %H:%M:%S') -> str:
"""
>>> assert cast_timestamp_seconds_since_epoch_to_text(0,"UTC") == '1970-01-01 00:00:00'
>>> assert cast_timestamp_seconds_since_epoch_to_text("0","LOCAL")
>>> assert cast_timestamp_seconds_since_epoch_to_text(1590674483 ,"UTC") == '2020-05-28 14:01:23'
>>> assert cast_timestamp_seconds_since_epoch_to_text(1590674574765797619 ,"UTC", time_in_nanoseconds=True) == '2020-05-28 14:02:55'
"""
seconds = float(seconds)
if time_in_nanoseconds:
seconds = seconds / 1E9
seconds = int(round(seconds))
if timezone == "UTC":
epochseconds = time.gmtime(seconds)
else:
epochseconds = time.localtime(seconds)
return time.strftime(date_time_format, epochseconds) | cc675ad75bc90e3866eb737b6878c1c21d049ac4 | 31,805 |
def to_lower( tokens ):
"""Convert all tokens to lower case.
Args:
tokens (list): List of tokens generated using a tokenizer.
Returns:
list: List of all tokens converted to lowercase.
"""
return [w.lower() for w in tokens] | acef98d5e52ed03104f75d542b8f8328790a7c5f | 31,806 |
import re
def has_three_consecutive_vowels(word):
"""Returns True if word has at least 3 consecutive vowels"""
pattern = re.compile(r"[aAeEiIoOuU]{3,}")
match = pattern.search(word)
return True if match else False | 6159ccca9a132e2d2bbd28b8a867aca496ba8436 | 31,807 |
import re
def wrap_parser(namespace, parser): # pragma: no cover
"""Wraps an argument parser, putting all following options under a
namespace. """
robj = re.compile(r'^(-+)')
class _Wrapper:
def __init__(self, _parser):
self.parser = _parser
def add_argument(self, *args, **kwargs):
args = [robj.sub(r'\1' + namespace + '.', s) for s in args]
self.parser.add_argument(*args, **kwargs)
return _Wrapper(parser) | 63f3c1ae8fd102f21d71a6fd378b379ea26ba5d2 | 31,808 |
def even_control_policy(time):
"""Policy carrying out evenly distributed disease management."""
return [0.16]*6 | 84a0087c90f6dc9dbb28adb7c97a4f6f96eaf25c | 31,810 |
def rotc(ebit, debt, equity):
"""Computes return on total capital.
Parameters
----------
ebit : int or float
Earnins before interest and taxes
debt : int or float
Short- and long-term debt
equity : int or float
Equity
Returns
-------
out : int or float
Return on total capital
"""
return ebit / (debt - equity) | 4c4149d55439c0b6d91b15559fe0618dd09efac0 | 31,811 |
def is_final_option(string):
"""Whether that string means there will be no further options
>>> is_final_option('--')
True
"""
return string == '--' | 272b3300571096eb0a4931c7f699b00b7a39842c | 31,812 |
def traverse_dict(obj: dict, convert_to_string: bool = True):
"""
Traversal implementation which recursively visits each node in a dict.
We modify this function so that at the lowest hierarchy,
we convert the element to a string.
From https://nvie.com/posts/modifying-deeply-nested-structures/
"""
if isinstance(obj, dict):
out_dict = {}
for key, val in obj.items():
out_dict[key] = traverse_dict(val)
return out_dict
if isinstance(obj, list):
return [traverse_dict(elem) for elem in obj]
return_obj = str(obj) if convert_to_string else obj
return str(return_obj) | af323c350fc5c784362baf6aaee9a61be0d7f0ca | 31,816 |
def squash_dims(tensor, dims):
"""
Squashes dimension, given in dims into one, which equals to product of given.
Args:
tensor (Tensor): input tensor
dims: dimensions over which tensor should be squashed
"""
assert len(dims) >= 2, "Expected two or more dims to be squashed"
size = tensor.size()
squashed_dim = size[dims[0]]
for i in range(1, len(dims)):
assert dims[i] == dims[i - 1] + 1, "Squashed dims should be consecutive"
squashed_dim *= size[dims[i]]
result_dims = size[:dims[0]] + (squashed_dim,) + size[dims[-1] + 1:]
return tensor.contiguous().view(*result_dims) | d12ee924fabae3529aa48a90d124bbb41cc3a655 | 31,817 |
import re
def is_a_uri(uri_candidate):
"""
Validates a string as a URI
:param uri_candidate: string
:return: True or False
"""
# https://gist.github.com/dperini/729294
URL_REGEX = re.compile(
"^"
# protocol identifier
"(?:(?:https?|ftp)://)"
# user:pass authentication
"(?:\S+(?::\S*)?@)?"
"(?:"
# IP address exclusion
# private & local networks
"(?!(?:10|127)(?:\.\d{1,3}){3})"
"(?!(?:169\.254|192\.168)(?:\.\d{1,3}){2})"
"(?!172\.(?:1[6-9]|2\d|3[0-1])(?:\.\d{1,3}){2})"
# IP address dotted notation octets
# excludes loopback network 0.0.0.0
# excludes reserved space >= 224.0.0.0
# excludes network & broadcast addresses
# (first & last IP address of each class)
"(?:[1-9]\d?|1\d\d|2[01]\d|22[0-3])"
"(?:\.(?:1?\d{1,2}|2[0-4]\d|25[0-5])){2}"
"(?:\.(?:[1-9]\d?|1\d\d|2[0-4]\d|25[0-4]))"
"|"
# host name
"(?:(?:[a-z\\u00a1-\\uffff0-9]-?)*[a-z\\u00a1-\\uffff0-9]+)"
# domain name
"(?:\.(?:[a-z\\u00a1-\\uffff0-9]-?)*[a-z\\u00a1-\\uffff0-9]+)*"
# TLD identifier
"(?:\.(?:[a-z\\u00a1-\\uffff]{2,}))"
")"
# port number
"(?::\d{2,5})?"
# resource path
"(?:/\S*)?"
"$"
, re.UNICODE
)
return re.match(URL_REGEX, uri_candidate) | c1d6eb37170011a7d46a203fce325fdfd0b0be91 | 31,818 |
def update_config(config,config_update):
"""Update config with new keys. This only
does key checking at a single layer of depth,
but can accommodate dictionary assignment
:config: Configuration
:config_update: Updates to configuration
:returns: config
"""
for key, value in config_update.items():
config[key] = value
return config | 6265be1e72e91b1321d9742f4c543fc875acf927 | 31,819 |
def transform_post(post):
"""Transforms post data
Arguments:
post {dict} -- Post data
"""
return {
'id': post['id'],
'title': post['title'],
'url': post['url'],
'image': post['feature_image'],
'summary': post['custom_excerpt'] \
if post['custom_excerpt'] else post['excerpt']
} | c765ba32ec5b00c289034e8daa2424f431678307 | 31,821 |
def int_to_unknown_bytes(num, byteorder='big'):
"""Converts an int to the least number of bytes as possible."""
return num.to_bytes((num.bit_length() + 7) // 8 or 1, byteorder) | 36865905ca7c57823c9e8a26f5c318a5597d6720 | 31,822 |
def add_color_esc_codes(text: str, c: int) -> str:
"""Surround string with escape color code and
"""
cnum = 16
base = 30 if c < cnum / 2 else 90
color_code = f'\033[0;{base + c % 8};40m'
reset_code = '\033[0m'
return f'{color_code}{text}{reset_code}' | 01fe49c570cc9f7d0483f20077e961dd94b3600c | 31,823 |
def min_operations(number):
"""
Return number of steps taken to reach a target number
number: target number (as an integer)
:returns: number of steps (as an integer)
"""
# Solution:
# 1. The number of steps to reach a target number = number of steps take to make the target number 0
# 2. We will be greedy, each time if it is possible we will try to make the number half
steps_count = 0
while number > 0:
# check if we can make the number half
if number % 2 == 0:
number = number / 2
else:
# number can't halved so we can only decrease it by 1
number -= 1
steps_count += 1
return steps_count | d15d91e22aa2d552acb8308882482ffeafa9d5e3 | 31,824 |
import ipaddress
def validate_ipv4_address(ipv4_address):
"""
This function will validate if the provided string is a valid IPv4 address
:param ipv4_address: string with the IPv4 address
:return: true/false
"""
try:
ipaddress.ip_address(ipv4_address)
return True
except:
return False | 51de6b7d4da2de4413217ee339147538aac4c2f5 | 31,825 |
def flatten(some_list):
"""
Flatten a list of lists.
Usage: flatten([[list a], [list b], ...])
Output: [elements of list a, elements of list b]
"""
new_list = []
for sub_list in some_list:
new_list += sub_list
return new_list | 26df175c0e119f5a872449df9fd8ea8d46393542 | 31,827 |
def flood_fill(surface, seed_point, color, pan=(0, 0)):
"""Flood fills a pygame surface, starting off at specific seed point.
Returns the original surface with that area filled in.
Thanks to wonderfully concise example of (non-recursive)
flood-fill algorithm in Python:
http://stackoverflow.com/a/11747550
"""
seed_point = (seed_point[0] - pan[0], seed_point[1] - pan[1])
if seed_point[0] > surface.get_width()\
or seed_point[1] > surface.get_height()\
or seed_point[0] < 0 or seed_point[1] < 0:
return surface
to_fill = set()
to_fill.add(seed_point)
background = surface.get_at(seed_point)
while len(to_fill) > 0:
x, y = to_fill.pop()
surface.set_at((x, y), color)
for i in range(x - 1, x + 2):
if i < 0 or i > surface.get_width() - 1:
continue
for j in range(y - 1, y + 2):
if j < 0 or j > surface.get_height() - 1:
continue
if color != background\
and surface.get_at((i, j)) == background:
to_fill.add((i, j))
return surface | 6d9bc60cbbd25ad0d1bead1fe88ff74be71d57db | 31,828 |
def bbox_to_pixel_offsets(gt, bbox):
"""Helper function for zonal_stats(). Modified from:
https://gist.github.com/perrygeo/5667173
Original code copyright 2013 Matthew Perry
"""
originX = gt[0]
originY = gt[3]
pixel_width = gt[1]
pixel_height = gt[5]
x1 = int((bbox[0] - originX) / pixel_width)
x2 = int((bbox[1] - originX) / pixel_width) + 1
y1 = int((bbox[3] - originY) / pixel_height)
y2 = int((bbox[2] - originY) / pixel_height) + 1
xsize = x2 - x1
ysize = y2 - y1
return (x1, y1, xsize, ysize) | 4b7e9737db78beab3d605d0a93450b3f8259c365 | 31,829 |
def autocomplete(prefix, structure, algorithm='linear_search'):
"""Return all vocabulary entries that start with the given prefix using the
given structure and algorithm, specified as linear_search, trie, etc."""
if algorithm == 'linear_search':
# Search the list using linear search
return [word for word in structure if word.startswith(prefix)]
elif algorithm == 'trie':
# Search the trie structure for the prefix
return structure.search(prefix) | 84494b9da63b779fd98be0a88a2382c4e6d73cb7 | 31,830 |
from typing import Any
from typing import Set
def _all_names_on_object(obj: Any) -> Set[str]:
"""Gets all names of attributes on `obj` and its classes throughout MRO.
Args:
obj: The object to get names for.
Returns:
A set of names of attributes of `obj` and its classes.
"""
nameset = set(obj.__dict__.keys())
for cls in obj.__class__.__mro__:
nameset = nameset.union(set(cls.__dict__.keys()))
return nameset | ac635b970df640a602656af55eabb94c4d55daae | 31,831 |
import ipaddress
import re
def _validate_cidr_format(cidr):
"""Validate CIDR IP range
:param str cidr:
:return:
:rtype: bool
"""
try:
ipaddress.ip_network(cidr, strict=False)
except (ValueError, ipaddress.AddressValueError,
ipaddress.NetmaskValueError):
return False
if '/' not in cidr:
return False
if re.search('\s', cidr):
return False
return True | 5f2a667c93720909ce7b9ff3019a0c403a499222 | 31,833 |
import requests
def rx_id_from_up_id(up_id):
"""Get the Reactome Stable ID for a given Uniprot ID."""
react_search_url = 'http://www.reactome.org/ContentService/search/query'
params = {'query': up_id, 'cluster': 'true', 'species':'Homo sapiens'}
headers = {'Accept': 'application/json'}
res = requests.get(react_search_url, headers=headers, params=params)
if not res.status_code == 200:
return None
json = res.json()
results = json.get('results')
if not results:
print('No results for %s' % up_id)
return None
stable_ids = []
for result in results:
entries = result.get('entries')
for entry in entries:
stable_id = entry.get('stId')
if not stable_id:
continue
name = entry.get('name')
stable_ids.append(stable_id)
return stable_ids | 3ff9434824a348d5d77c54295765ad72680db0d1 | 31,834 |
from typing import List
from typing import Callable
from typing import Tuple
def evolve_until_stationary(layout: List[str], change_rule: Callable[[str, int], str], floor_is_empty: bool = True) -> List[str]:
"""Evolves *layout* applying *change_rule* to each position, until no more changes are made.
*floor_is_empty* controls wether to consider the flor as an empty position or ignore it"""
def count_occupied(board: List[List[str]], pos: Tuple[int, int], floor_is_empty: bool) -> int:
# Assumes that layout has a border around it, and pos is in (1..len(board)-1)
count = 0
for y in (-1, 0, 1):
for x in (-1, 0, 1):
aux_pos = [y + pos[0], x + pos[1]]
# If floor isn't considered empty, search the first non-empty
# in the direction. Ignore dir (0,0) - shouldn't be necessary but...
if not floor_is_empty and not (x == 0 and y == 0) :
while board[aux_pos[0]][aux_pos[1]] == '.':
aux_pos[0] += y
aux_pos[1] += x
count += (board[aux_pos[0]][aux_pos[1]] == '#')
# We conted our own position, so subtract it here
return count - (board[pos[0]][pos[1]] == '#')
# Add a border around the input to ignore index safety later
board = [['L'] + list(line) + ['L'] for line in layout]
board.insert(0, ['L'] * (len(layout[0]) + 2))
board.append(['L'] * (len(layout[0]) + 2))
new_board = [l[:] for l in board]
while True:
changes = 0
# Loop only over the input, ignoring the border
for y, line in enumerate(board[1:-1], start=1):
for x, state in enumerate(line[1:-1], start=1):
if state == '.': continue
occupied = count_occupied(board, (y, x), floor_is_empty)
state = change_rule(state, occupied)
changes += (state != new_board[y][x])
new_board[y][x] = state
# switch
board, new_board = new_board, board
if changes == 0: break
return [''.join(line[1:-1]) for line in board[1:-1]] | 9a6df32d5774ee5559f09b1dcb9a9022d37d4be7 | 31,835 |
def _convert_path_to_ee_sources(path: str) -> str:
"""Get the remote module path from the 'ee-sources' GCS bucket.
Args:
path: str
Returns:
An ee-sources module url.
"""
if path.startswith("http"):
eempath = path
else:
bpath = path.replace(":", "/")
eempath = f"https://storage.googleapis.com/ee-sources/{bpath}"
return eempath | f650736711fb8909e0e11df2165a89f06210cb53 | 31,838 |
import pytz
def convert_utc_to_localtime(utc_datetime, timezone_str):
"""
轉換 utc 時間為指定的 local 時間,如果輸入的時區有錯,則保持原來的 UTC
Args:
utc_datetime(datetime): utc 時間
timezone(str): 指定轉換的時區,採用 tz database 列表
Returns
timezone_dt(datetime): 回傳轉換好的時區時間
"""
# 取得 tzinfo 的時區
if timezone_str in pytz.common_timezones:
tz = pytz.timezone(timezone_str)
# 取得時區資訊的 dateime
dateime_include_tzinfo = pytz.utc.localize(utc_datetime, is_dst=None)
timezone_dt = dateime_include_tzinfo.astimezone(tz)
return timezone_dt
return utc_datetime | 3185746161ddfd812f023bdfa74bb58bd2be9113 | 31,840 |
def get_number(char):
"""
判断字符串中,中文的个数
:param char: 字符串
:return:
"""
count = 0
for item in char:
if 0x4E00 <= ord(item) <= 0x9FA5:
count += 1
return count | d6b4cc2a3283d776d2f57ab1032b7dcb65daf3e0 | 31,841 |
import functools
import operator
def prod(iterable):
"""Product function.
Parameters
----------
iterable
"""
return functools.reduce(operator.mul, iterable, 1) | 246959f45c31e38eaab55dd1387ba00f9d569781 | 31,842 |
import subprocess
def get_git_branch():
"""Get the symbolic name for the current git branch."""
cmd = "git rev-parse --abbrev-ref HEAD".split()
try:
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError:
return None
else:
return output | 8bd97de3cf37e589a0be5cd7cc9c5d86bc514281 | 31,843 |
import os
def screenshot_data():
"""Loads base64-encoded screenshot data.
"""
try:
file = open(os.path.join("tests/fixtures/screenshot_base64.txt"), 'r')
data = file.read()
except FileNotFoundError:
return ''
else:
file.close()
return data | 75a846e3a5ace1b5007589387b0840c68959db65 | 31,844 |
def reverse_amount(amount, account):
"""
get counterparty amount
:param amount:
:param account:
:return: {{value: string, currency: *, issuer: *}}
"""
return {
'value': str(-float(amount['value'])),
'currency': amount['currency'],
'issuer': account
}
pass | 6b61d5a693799bbbaf5a2327a8006233e2c98da0 | 31,846 |
import collections
def list_compare(a: list, b: list) -> bool:
"""Check if two lists contain the same elements."""
return collections.Counter(a) == collections.Counter(b) | 05a15f46b5e00f6e17e81f4b67332a196154f4e7 | 31,848 |
def time_int_to_decimal(time):
"""Takes a number of the form HHMMSS or +/-DDMMSS and converts it
to a decimal."""
if time[0] == "-":
sign = -1
timestr = time[1:]
elif time[0] == "+":
sign = 1
timestr = time[1:]
else:
sign = 1
timestr = time
index = timestr.find('.')
if index > -1:
seconds_fraction = timestr[index:]
timestr = timestr[:index]
else:
seconds_fraction = ".0"
# We expect six digits but it could be less so work from the back
hh = int(timestr[-6:-4])
mm = int(timestr[-4:-2])
ss = float(timestr[-2:]+seconds_fraction)
return sign*hh+mm/60.+ss/3600. | 405652db7cb673ad2548c5c4e7ba998b79adcfd7 | 31,849 |
def _get_map_key(wire_position):
"""
wire position, tuple of tuple
like: ((0, 0), (0, 1))
"""
if wire_position[0][0] + wire_position[0][1] > \
wire_position[1][0] + wire_position[1][1]:
return str((wire_position[1], wire_position[0]))
return str(wire_position) | 22796f1dcac78d109f078c1de04a7d7fde4348ca | 31,850 |
import collections
def find_best_kmer_diagonal(kmertoposA, kmertoposB, bandwidth, ignore_self_diagonal=False):
"""To approximate the best alignment between sequences A and B, find the
highest number of basepairs in sequence A involved in kmer matches within a single
diagonal band in an A-to-B alignment. The size of the diagonal band is
defined as +/- `bandwidth`. So, bands are `1+2*bandwidth` basepairs wide.
"""
# Map from diagonal number to positions in A that start a kmer match.
diagonaltoposA = collections.defaultdict(set)
for kmer in kmertoposA:
for posA in kmertoposA[kmer]:
for posB in kmertoposB[kmer]:
diagonal = posA - posB
if diagonal == 0 and ignore_self_diagonal:
continue
# contribute the match to all diagonals that are +/- bandwidth
for d in range(diagonal-bandwidth, diagonal+bandwidth+1):
diagonaltoposA[d].add(posA)
# Find the number of A basepairs in kmers matches in each diagonal band.
assert len(kmertoposA) > 0
k = len(kmertoposA.keys()[0]) # kmer size
bestscore = 0 # best score yet
for diagonal in diagonaltoposA:
score = 0 # number of A basepairs in kmer matches in this diagonal band
b,e = 0,0 # begin,end of current run of basepairs in kmer match
for posA in sorted(diagonaltoposA[diagonal]):
if posA > e: # no overlap with previous run
score += (e - b)
b = posA
e = posA + k
score += (e - b)
if score > bestscore:
bestscore = score
return bestscore | 84b7bc3055a2cba0a2878858ef66975ae568e44a | 31,852 |
def copper_heat_capacity_CRC(T):
"""
Copper specific heat capacity as a function of the temperature from [1].
References
----------
.. [1] William M. Haynes (Ed.). "CRC handbook of chemistry and physics". CRC Press
(2014).
Parameters
----------
T: :class:`pybamm.Symbol`
Dimensional temperature
Returns
-------
:class:`pybamm.Symbol`
Specific heat capacity
"""
cp = 1.445e-6 * T ** 3 - 1.946e-3 * T ** 2 + 0.9633 * T + 236
return cp | 5a1aa532ea0d9eb7cb4974709963495835d608aa | 31,853 |
import re
def n_channels(s):
"""Get the number of channels from filename"""
temp = re.split("_|\.", s.lower())
if "C64" in temp:
return 64
return 32 | f7e48f0e7dc5032ea15a07a693c1b59f3965275a | 31,854 |
def getAttributeValue(Data, Attribute, IsDiscrete):
"""
统计所有离散属性的所有取值。
Args:
Data (): 数据集
Attribute (): 属性集
IsDiscrete (): 是否离散
Returns: 字典,所有离散属性的所有取值。例如:
{'色泽': {'乌黑', '青绿', '浅白'}, '根蒂': {'硬挺', '稍蜷', '蜷缩'}, '敲声': {'浊响', '沉闷', '清脆'},
'纹理': {'模糊', '清晰', '稍糊'}, '脐部': {'凹陷', '稍凹', '平坦'}, '触感': {'硬滑', '软粘'}}
"""
AttributeValue = {}
for i in range(len(IsDiscrete)):
if not IsDiscrete[i]: # 连续属性不统计取值
continue
tempSet = set()
for sample in Data:
tempSet.add(sample[i])
AttributeValue[Attribute[i]] = tempSet
return AttributeValue | 686476aee803b540853e32c2b5a7ac7414d19a03 | 31,858 |
def _compute_all_mask(ds):
"""Computes a mask that is true everywhere"""
da_mask = ds.z < ds.z + 1
da_mask.attrs["long_name"] = "All data mask"
return da_mask | 93f04dde3a20c504b017d30ceb654d3c62bce798 | 31,859 |
def merge_dict(destination, source, path=None):
"""merges source into destination"""
if path is None:
path = []
for key in source:
if key in destination:
if isinstance(destination[key], dict) and isinstance(source[key], dict):
merge_dict(destination[key], source[key], path + [str(key)])
elif destination[key] == source[key]:
pass # same leaf value
else:
raise Exception('Conflict at %s' % '.'.join(path + [str(key)]))
else:
destination[key] = source[key]
return destination | 6f6480b0d515eaa789ea17fa66a404b4de9817f6 | 31,860 |
def traverse_for_weight(network, parent, stop_concept, previous_concepts=None, previous_weights=None):
"""Find all the weights between two concepts in a network.
Returns a list of lists. Each inner list represents a path within the network. The items of the list are the weights
for the path.
"""
if parent is stop_concept:
# should only get here is if the initial call the parent is the stop concept. In this case, the effective weight is 1
return [1,]
results = []
previous_concepts = (previous_concepts or []) + [parent,]
previous_weights = previous_weights or []
for child_rel in network.fromModelObject(parent):
if child_rel.toModelObject in previous_concepts:
# In a cycle - skip this child.
continue
if child_rel.toModelObject is stop_concept:
results.append(previous_weights + [child_rel.weight,])
else:
next_result = traverse_for_weight(network, child_rel.toModelObject, stop_concept, previous_concepts, previous_weights + [child_rel.weight,])
if len(next_result) > 0:
for x in next_result:
results.append(x)
return results | 41230e99e9fda36094320b38d9b236cd0d221fbe | 31,862 |
def _convert_hab_op(lulc_array, *hab_conversion_list):
"""Convert lulc to odd value if even value of hab conversion is 1."""
result = lulc_array.copy()
hab_iter = iter(hab_conversion_list)
for mask_array, conversion in zip(hab_iter, hab_iter):
if isinstance(conversion, int):
result[mask_array == 1] = conversion
elif conversion.startswith('+'):
result[mask_array == 1] += int(conversion[1:])
else:
raise ValueError(f'unknown conversion: {conversion}')
return result | 6e5ba79edeb4e4c4a03f5398da12763968377799 | 31,863 |
import logging
def obr_repr(obj):
"""Return obj representation if possible."""
try:
return repr(obj)
# pylint: disable-msg=broad-except
except Exception as e:
logging.error(e)
return 'String Representation not found' | 8a95574dc0d8b18fe1c7f8c040b167654930380e | 31,865 |
def generate_playlist_url(playlist_id:str)->str:
"""Takes playlist Id and generates the playlist url
example https://www.youtube.com/playlist?list=PLGhvWnPsCr59gKqzqmUQrSNwl484NPvQY
"""
return f'https://www.youtube.com/playlist?list={playlist_id}' | f6f234e3f3eb061e37d573e35aff7c77637b0952 | 31,866 |
import time
def getNewId(Tag):
"""Build a new Id"""
t1 = time.time()
sid = str(Tag) + '_' + str(t1)
return sid | 0f6cfea36ab80d180123bad27ce820f87e6407b0 | 31,867 |
import torch
def correct_predictions(predictions, targets):
"""
:param predictions: input of predicted values. size should be WxC
:param targets: input of target values. size should be W
:return: total number pf correct predictions
"""
# calculate correct predictions over each batch
correct = 0
for batch in range(predictions.size(0)):
if torch.argmax(predictions[batch]).item() == targets[batch].item():
correct += 1
return correct | 1f680f7bc78e7bcb153e13681ffd8e8723ee76e1 | 31,869 |
from typing import Optional
import base64
import os
def make_gafaelfawr_token(username: Optional[str] = None) -> str:
"""Create a random or user Gafaelfawr token.
If a username is given, embed the username in the key portion of the token
so that we can extract it later. This means the token no longer follows
the format of a valid Gafaelfawr token, but it lets the mock JupyterHub
know what user is being authenticated.
"""
if username:
key = base64.urlsafe_b64encode(username.encode()).decode()
else:
key = base64.urlsafe_b64encode(os.urandom(16)).decode().rstrip("=")
secret = base64.urlsafe_b64encode(os.urandom(16)).decode().rstrip("=")
return f"gt-{key}.{secret}" | 6eff70ada5774e52e0f145236e267adcf2ec5d84 | 31,870 |
def get_file_list(num_files):
"""
formats number of files in olympus format
:param num_files:
:return: list of file numbers
"""
file_list = []
for num in range(1, num_files+1):
num = str(num)
to_add = 4-len(num)
final = '0'*to_add+num
file_list.append(final)
return file_list | 501fbcaec213e6319157bc728fd8fc1fab142635 | 31,871 |
def _binary_search(f, xmin, xmax, eps=1e-9):
"""Return the largest x such f(x) is True."""
middle = (xmax + xmin) / 2.
while xmax - xmin > eps:
assert xmin < xmax
middle = (xmax + xmin) / 2.
if f(xmax):
return xmax
if not f(xmin):
return xmin
if f(middle):
xmin = middle
else:
xmax = middle
return middle | 980b45c220c058e66964f5a8307a639aea13d1d5 | 31,872 |
def int_check(self, value, key):
"""
Cast a value as an integer
:param value: The value to cast as int
:param key: The value name
:return: The value as an integer, otherwise an error message
"""
try:
int(value)
return int(value), ''
except:
return None, 'Error: %s value must be a integer' % (key) | 4529cc80104dd075539f6e846dddd8814756e116 | 31,873 |
def get_organizer_emails(form):
"""Get up to 15 organizer emails from an input form."""
return form.getlist("organizer")[:15] | 129b21b7fe0e3c9d12c0c363facce2cc669baa60 | 31,875 |
def find_dependency_in_spec(spec, ref):
"""Utility to return the dict corresponding to the given ref
in a dependency build spec document fragment
"""
for item in spec:
if item['ref'] == ref:
return item | e7fa516b5d7d88ec68cdd5e5f5994f37f6ca05ce | 31,876 |
import json
def get_json_dump(data):
"""
get dump of data as JSON
"""
return json.dumps(data, indent=4, separators=(',', ': ')) | 8393e73de0c4d1ef9296cffb2b419e7fb6c147a5 | 31,877 |
def lerp(pos_x, x0, x1, fx0, fx1):
""" integer linear interpolation """
return fx0 + (fx1 - fx0) * (pos_x - x0) // (x1 - x0) | 7172b177e05831ddc38e704bde9047263f10ece2 | 31,880 |
def encoding(fields, use_original_field_names=False):
"""Convert fields structure to encoding maps for values and field names.
Expects a fields dictionary structure, returns a tuple of two dictionaries where the keys are the field names
(either original or new depending on the use_original_field_names parameter).
Resulting dictionaries map the values for each field and field names.
"""
encoding_map = {}
encoding_field_names = {}
for original_field_name, f in fields.items():
if "encoding" in f:
if f["encoding"].get("expand", True):
if use_original_field_names:
key = original_field_name
else:
key = f.get("name", original_field_name)
encoding_field_names[key] = f["encoding"].get("name", f"{key}_")
encoding_map[key] = f["encoding"].get("map", {})
return encoding_map, encoding_field_names | 4789733018eebbf7e9f7287359ebc6c56c5182f6 | 31,881 |
import torch
import logging
def compute_ious(gt_masks, pred_masks, gt_boxes, pred_boxes):
"""Compute Intersection over Union of ground truth and predicted masks.
Args:
gt_masks (torch.IntTensor((img_height, img_width, nb_gt_masks))):
Ground truth masks.
pred_masks (torch.FloatTensor((img_height, img_width, nb_pred_masks))):
Predicted masks.
Returns:
ious (torch.FloatTensor((nb_gt_masks, nb_pred_masks))):
Intersection over Union.
"""
# compute IOUs
gt_masks = gt_masks.to(torch.uint8)
pred_masks = pred_masks.to(torch.uint8)
nb_gts, nb_preds = gt_masks.shape[2], pred_masks.shape[2]
ious = torch.zeros((nb_gts, nb_preds), dtype=torch.float)
gt_areas = gt_masks.sum((0, 1))
pred_areas = pred_masks.sum((0, 1))
logging.info(f"{nb_gts} x {nb_preds} (GT x predictions)")
for gt_idx in range(0, gt_masks.shape[2]):
gt_mask = gt_masks[:, :, gt_idx]
for pred_idx in range(0, pred_masks.shape[2]):
# skip masks whose boxes do not intercept
if (gt_boxes[gt_idx, 0] > pred_boxes[pred_idx, 2] or
pred_boxes[pred_idx, 0] > gt_boxes[gt_idx, 2] or
gt_boxes[gt_idx, 1] > pred_boxes[pred_idx, 3] or
pred_boxes[pred_idx, 1] > gt_boxes[gt_idx, 3]):
iou = 0.0
else:
intersection = pred_masks[:, :, pred_idx] & gt_mask
intersection = intersection.nonzero().shape[0]
union = (pred_areas[pred_idx] + gt_areas[gt_idx]
- intersection).item()
iou = intersection/union if union != 0.0 else 0.0
ious[gt_idx, pred_idx] = iou
return ious | a1922f67041420ddefe1910296490d4356a9cef8 | 31,882 |
def getMatches(file, regex):
""" Returns a list of all passages in `file` matching `regex` """
source = open(file, "r")
return [x for x in regex.findall(source.read())] | 17a791998d9bf2f2ba8aad9e15e6a395785b78c2 | 31,884 |
def qualitytodeg(ql):
"""
Maps a string to a distance in degree between the outer points of the cylinder
=> Lower distance = more points generated
@:param ql String which represents choosen quality of the stl file
@:return int which represents the distance between outer points of the cylinder sides
"""
mapper = {
"ultrahigh": 1,
"high": 10,
"mid": 30,
"low": 60,
"cube": 90,
}
if ql in mapper:
return mapper.get(ql)
else:
return 30 | 46a4797c22d650cae62ea8569f400c65e018eb0b | 31,885 |
from typing import Tuple
def ext_gcd(a: int, b: int) -> Tuple[int, int, int]:
"""Extended Euclidean algorithm
solve ax + by = gcd(a, b)
Parameters
----------
a
b
Returns
-------
(d, x, y) s.t. ax + by = gcd(a, b)
"""
if b == 0:
return a, 1, 0
d, y, x = ext_gcd(b, a % b)
return d, x, y - (a // b) * x | a4c1c7f682d13ceab6bb3d06b855c61a9a88d9f8 | 31,886 |
def resolve_path(base, path):
""" Resolve (some) relative path. """
if path[0] == "/":
# Absolute path
return path
return base + path | afaa64b7d06c8140256072acc9a5c8b7aa3d94ec | 31,887 |
def parse_zone_id(full_zone_id: str) -> str:
"""Parse the returned hosted zone id and returns only the ID itself."""
return full_zone_id.split("/")[2] | 1dbcbcee8dbd09b24d22957d7e598e1f5692910f | 31,888 |
def _real_name(name, finfo):
"""Given the name of an object, return the full name (including package)
from where it is actually defined.
"""
while True:
parts = name.rsplit('.', 1)
if len(parts) > 1:
if parts[0] in finfo:
trans = finfo[parts[0]].localnames.get(parts[1], parts[1])
if trans == name:
return trans
else:
name = trans
continue
return name | 9acb7d318d416adeddaa9e7e49371f6e2925ae9b | 31,889 |
import math
def distance(A, B):
""" Finds the distance between two points
@parameter A: point #1
@parameter B: point #2
@returns: distance between the points A and B"""
return math.sqrt((A[0] - B[0]) ** 2 + (A[1] - B[1]) ** 2) | e189723dafbb984656ae28b2824f4bbebbffa99c | 31,891 |
from collections import Counter
def CheckPermutation(string1, string2):
"""Check permutation of two string"""
counter = Counter()
if len(string1) != len(string2):
return False
for x in string1:
counter[x] += 1
for x in string2:
counter[x] -= 1
for x in counter:
if counter[x]!=0:
return False
return True | 4c14943c0978d41939e0a2b38297f1856dcbffa5 | 31,892 |
def decimalToBinaryv1(decimal):
"""assumes decimal is an int, representing a number in base 10
returns an int, representing the same number in base 2
"""
return bin(decimal) | cfd97e488d27f167551cc84ad95c339b49fa52c4 | 31,893 |
def _create_pred_input_plot(x_pred,
x_pred_error,
axes,
vline_kwargs=None,
vspan_kwargs=None):
"""Create plot for prediction input data (vertical lines)."""
if vline_kwargs is None:
vline_kwargs = {'color': 'k', 'linestyle': ':', 'label': 'Observation'}
if vspan_kwargs is None:
vspan_kwargs = {'color': 'k', 'alpha': 0.1}
axes.axvline(x_pred, **vline_kwargs)
axes.axvspan(x_pred - x_pred_error, x_pred + x_pred_error, **vspan_kwargs)
return axes | 2430d0b181b401cd8188aebc091abd8a9a28b650 | 31,894 |
def get_results(soup):
"""
Gets the results div from the HTML soup
@param {BeautifulSoup} soup Google Scholar BeautifulSoup instance
@return {BeautifulSoup[]} List of Google Scholar results as
BeautifulSoup instances
"""
return soup.find_all('div', class_='gs_r gs_or gs_scl') | efdcc2a5b827a84840868e250894c8f144ae0743 | 31,895 |
def del_pre_line(text, keyword):
"""
删除上一行
:param text:
:param keyword:
:return: str: 删除行之后的结果
"""
position = text.find(keyword)
if position != -1:
pos_line_head = text.rfind('\n', 0, position)
pos_pre_line_head = text.rfind('\n', 0, pos_line_head)
return text[0:pos_pre_line_head] + text[pos_line_head:]
else:
raise RuntimeError('keyword not in text') | 37895577e9e043d912d511dc69c9007439d156e2 | 31,897 |
import argparse
def parse_arguments():
"""
Parse user arguments
Output: list with all the user arguments
"""
# All the docstrings are very provisional and some of them are old, they would be changed in further steps!!
parser = argparse.ArgumentParser(description="""Script to perform plots. You can choose between different
type of plots (depending on your input file): 'boxplot_single' if your input is the result of the analysis
between two structures, for which you have computed the RMSD and the CA distance between them; 'boxplot_differences'
if you want to show the comparison of RMSD and CA distance in two different files; and 'boxplot_atom_distances'
if you have calculated atom-atom distances in three structures in order to show the differences.""")
required_named = parser.add_argument_group('required named arguments')
# Growing related arguments
required_named.add_argument("-t", "--type", required=True, choices=['boxplot_single','boxplot_differences',
'boxplot_atom_distances'],
help="""Plot type that you want to do. Choose between: boxplot_differences,
boxplot_single. """)
required_named.add_argument("-i", "--input", required=True,
help="""Path of the input file.""")
parser.add_argument("-i2", "--input_2", default=False,
help=""""Path of the input file 2, only set if you want to do comparisons.""")
parser.add_argument("-i3", "--input_3", default=False,
help=""""Path of the input file 2, only set if you want to do comparisons.""")
args = parser.parse_args()
return args.type, args.input, args.input_2, args.input_3 | 3bcd918b0fc687e4b95b9787e4b6cc1d096e38ea | 31,898 |
def has_ordered_sublist(lst, sublist):
"""
Determines whether the passed list contains, in the specified order, all
the elements in sublist.
"""
sub_idx = 0
max_idx = len(sublist) - 1
for x in lst:
if x == sublist[sub_idx]:
sub_idx += 1
if sub_idx > max_idx:
return True
return sub_idx > max_idx | 4aa72edc8b4020bc49d266cddccd70e18c641ab0 | 31,899 |
def mock_time(dt=None, mem=[0.0]):
"""Fake time.perf_counter()."""
if dt is not None:
mem[0] += dt
return mem[0] | 772738606653f82fb67025d815fcdab0a7f3ad33 | 31,903 |
def detecttermination(logfile):
"""
Query whether or not a process with the given process id is still running
and return true or false.
"""
# Open longbow log file and detect if the final "goodbye" lines is there.
try:
fi = open(logfile)
lines = fi.readlines()
if len(lines)>0:
if "Good bye from Longbow!" in lines[len(lines)-2]:
return True
except:
pass
return False | 8d60c93582b7ada1e30065c4ec198ade678e97bd | 31,904 |
import argparse
import pathlib
def parse_args():
"""Parse command line arguments.
Returns:
output (argparse.Namespace): Parsed command line arguments. See executable
help page for more information.
"""
parser = argparse.ArgumentParser()
parser.add_argument("--backgrounds",
type=pathlib.Path,
required=True,
help="Path to the directory containing "
"background images to be used (or file list).")
parser.add_argument("--templates-path",
required=True,
type=pathlib.Path,
help="Path (or file list) of templates.")
parser.add_argument("--augmentations",
required=True,
type=pathlib.Path,
help="Path to augmentation configuration file.")
parser.add_argument("--distractors-path",
type=pathlib.Path,
help="Path (or file list) of distractors.")
parser.add_argument(
"--random-distractors",
type=int,
default=1000,
help="Generate this many random distractors for each template.")
parser.add_argument(
"--out-path",
type=pathlib.Path,
required=True,
help="Path to the directory to save the generated images to.")
parser.add_argument("--max-images",
type=int,
required=True,
help="Number of images to be generated.")
parser.add_argument("--n",
dest="jobs",
type=int,
default=1,
help="Maximum number of parallel processes.")
parser.add_argument("--max-template-size",
type=int,
default=196,
help="Maximum template size.")
parser.add_argument("--min-template-size",
type=int,
default=16,
help="Minimum template size.")
parser.add_argument(
"--background-size",
default="800,1360",
help="If not None (or empty string), image shape 'height,width'")
return parser.parse_args() | c2367034f47037019a8eea5d470b8d307b401185 | 31,905 |
def filter_quote(environment, text, quote='"', escape_with='\\', newline=None):
"""
Filter to add quotes to text.
This is a naive implementation, as the specifics can vary a lot depending
on where you are writing the text.
:param str text: Text to adds quotes to.
:return: The text quoted out, line by line.
:rtype: str
"""
if not newline:
newline = environment.newline_sequence
def quote_line(line):
return '{0}{1}{0}'.format(
quote,
line.replace(quote, '{}{}'.format(escape_with, quote))
)
return newline.join(
map(quote_line, text.splitlines())
) | 9a930904b78f774a759e02b2915c89bec89ac37d | 31,907 |
import struct
def f2b(f):
"""
float to 32bit int
"""
return struct.unpack('I', struct.pack('f', f))[0] | cb9133141d2e6ab26b8268412ca8d5f0ca0daeb3 | 31,908 |
import os
def default_lv2_path(conf):
"""Return the default LV2_PATH for the build target as a list"""
if conf.env.DEST_OS == 'darwin':
return ['~/Library/Audio/Plug-Ins/LV2',
'~/.lv2',
'/usr/local/lib/lv2',
'/usr/lib/lv2',
'/Library/Audio/Plug-Ins/LV2']
elif conf.env.DEST_OS == 'haiku':
return ['~/.lv2',
'/boot/common/add-ons/lv2']
elif conf.env.DEST_OS == 'win32':
return ['%APPDATA%\\\\LV2',
'%COMMONPROGRAMFILES%\\\\LV2']
else:
libdirname = os.path.basename(conf.env.LIBDIR)
return ['~/.lv2',
'/usr/%s/lv2' % libdirname,
'/usr/local/%s/lv2' % libdirname] | bc3e02bc111c9837239eee8c814a043cc0ba2ba3 | 31,910 |
def accept(potList):
"""
return True if current structure meets acceptance criteria
"""
if potList['noe'].violations()>1:
return False
if potList['CDIH'].violations()>0:
return False
if potList['BOND'].violations()>0:
return False
if potList['ANGL'].violations()>0:
return False
if potList['IMPR'].violations()>1:
return False
return True | ab7788079f2545bf67eba52a7d5d28df8bb61650 | 31,911 |
from typing import List
from typing import Tuple
from typing import Dict
def convert(day_input: List[str]) -> Tuple[Dict[str, List[range]], List[int], List[List[int]]]:
"""Converts the input into a tuple with:
1. A dictionary with the fields, where for each field the value is a list
of the valid ranges, each represented as a tuple with the min and max value
2. The values for your ticket, a list of ints
3. A list with the values for the other tickets"""
iter_in = iter(day_input)
# Convert fields
fields = {}
for line in iter_in:
if line == '': break
key, vals = line.split(': ')
fields[key] = [range(int(v.split('-')[0]), int(v.split('-')[1]) + 1 ) for v in vals.split(' or ')]
while next(iter_in) != 'your ticket:':
continue
our = [int(n) for n in next(iter_in).split(',')]
while next(iter_in) != 'nearby tickets:':
continue
tickets = [[int(n) for n in line.split(',')] for line in iter_in]
return (fields, our, tickets) | 7ef01443251595891c4adcd147dd0487b8b2fedf | 31,913 |
import requests
def refine_urn(urns, metadata=None):
"""Refine a list urns using extra information"""
if metadata is None:
metadata = {}
metadata['urns'] = urns
if not ('words' in metadata):
metadata['words'] = []
if not ('next' in metadata or 'neste' in metadata):
metadata['next'] = 520
if not 'year' in metadata:
metadata['year'] = 1500
r = requests.post('https://api.nb.no/ngram/refineurn', json=metadata)
return r.json() | e2fd29863a36f6c66b98e30ec432d3045a792155 | 31,914 |
def sample_approx(approx, draws=100, include_transformed=True):
"""Draw samples from variational posterior.
Parameters
----------
approx: :class:`Approximation`
Approximation to sample from
draws: `int`
Number of random samples.
include_transformed: `bool`
If True, transformed variables are also sampled. Default is True.
Returns
-------
trace: class:`pymc.backends.base.MultiTrace`
Samples drawn from variational posterior.
"""
return approx.sample(draws=draws, include_transformed=include_transformed) | 2ff6eab14a5d42f0a9b2d531bcccee1245964f72 | 31,915 |
import os
def load_cksm(sumfile, base_filename):
"""Load the checksum from a file"""
for l in open(sumfile,'r'):
if os.path.basename(base_filename) in l:
sum_cksm, name = l.strip('\n').split()
return sum_cksm
raise Exception('could not find checksum in file') | 97d2f9e9443384e90d11b557769c33260ff7e027 | 31,918 |
def find_hosts(ipa_client, pattern=None):
""" Returns list of matching hosts from IPA.
If no pattern is provided, returns all hosts.
"""
if pattern is None:
pattern = ''
return ipa_client.get_hosts(
pattern=pattern
) | 38dd7a8e499af6372c9243c0cf91fe1c7e7f3d9b | 31,921 |
import os
def dir_path(string):
"""
Determine if string is a valid directory path
"""
if os.path.isdir(string):
return string
else:
raise NotADirectoryError(string)
# warnings.warn(
# f"{textstyle.WARNING}{string} is not a valid directory, path will be defaulted to ~/.task{textstyle.RESET}",
# ) | 76c55267b1b2a0fff861cfa5dbb5e26e6a603e70 | 31,922 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.