content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def level_marker(level=1, symbol='|', color='red'):
"""Insert 'symbol' at the beginning of the current line
"""
#if OUTPUT_MODE == "color":
# return '\033[s\033[' + str(level) + 'G' + colored_print(symbol, color) + "\033[u\033[" + str(len(symbol) + 1) + "C"
#else:
return "" | a45fd6a0f03cbbae50c0d6bfc977ae59ecc220da | 47,374 |
import time
def _format_time(epoch_time, format_string="%Y-%m-%d %H:%M:%S"):
"""Return a formatted representation of an epoch timestmap"""
return time.strftime(format_string, time.localtime(epoch_time)) | e04fa89ac516633282416e14e2327f8fae2c7b36 | 47,376 |
def if_none(value, default):
"""
Returns value or default if value is None.
"""
if (value is None): return default
return value | 7c7e83c2c633ae809a2b1a75a6e16fc7fee3eb21 | 47,377 |
def rosenbrock(x):
""" Rosenbrock test function """
assert len(x) > 1, "Must pass a vector to rosenbrock"
value = 0
for ii in range(len(x) - 1):
value += 100 * (x[ii + 1] - x[ii] ** 2) ** 2 + (1 - x[ii]) ** 2
return value | 52e65d9e0d4f1b05a3ac8e50eccdb8bbacdb10a3 | 47,379 |
def cst_efield_1freq_cut(cst_efield_1freq_cut_main):
"""Make function level cut down single freq efield beam."""
return cst_efield_1freq_cut_main.copy() | 2a7f7ede59b27cddfbaa69070f492c2ab153c240 | 47,381 |
import os
def split(path):
"""
Parse a full path and return the collection and the resource name
:param path: basestring
"""
coll_name = os.path.dirname(path)
resc_name = os.path.basename(path)
return tuple((coll_name, resc_name)) | 5ea980ccd77fbd7b88b4b8eaa506b77d696205a2 | 47,382 |
def rtimport(name):
"""Imports a module, even within a package (via the
'package.module' naming convention, and returns a reference
to the module (or object within a module!). Can raise
the ImportError exception."""
# This can raise ImportError
obj = __import__(name)
components = name.split('.')
for comp in components[1:]:
try:
obj = getattr(obj, comp)
except AttributeError:
raise ImportError
return obj | a447af189af00d2bcdb8340fab3a0d6cb6e22aa0 | 47,383 |
import os
import fnmatch
def __findFullBlueprintFiles():
"""Finds all generated blueprint files in pwd that match the ending *_*_*_*.mfpx.
Returns:
list: A list containing all filenames of FullBlueprintFiles
"""
fileNames = []
files = os.scandir()
for file in files:
if fnmatch.fnmatchcase(file.name, "*_*_*_*.mfpx"):
print(file.name)
fileNames.append(file.name)
print(fileNames)
return fileNames | 2a6905151e2ecaed5f56e6d029ac6e64cab50126 | 47,384 |
import typing
import importlib
def import_object(path: str, default=None) -> typing.Any:
"""Import object from path.
Paths have the format ``module_path:object_path``.
The ``default`` value is returned when ``path`` is ``None``. This is
a convenience for passing in settings that may be ``None``.
Examples::
>>> import_object('dijkstar.graph:Graph')
<class 'dijkstar.graph.Graph'>
>>> import_object('dijkstar.graph:Graph.load')
<bound method Graph.load of <class 'dijkstar.graph.Graph'>>
"""
if path is None:
return default
module_path, object_path = path.split(":")
module = importlib.import_module(module_path)
names = object_path.split(".")
obj = module
for name in names:
obj = getattr(obj, name)
return obj | 9c53a0616581a5958bad4b94d42cfe363e413cf8 | 47,385 |
def read_img_labels(img_label_path):
"""
Reads the csv file which has bounding boxes as the labels
Assumption 1: Only 1 object per CSV
"""
with open(img_label_path,"r") as img_label_file:
data_lines = img_label_file.readlines()
x1,y1,x2,y2 = data_lines[1].split("\t")
img_label_file.close()
return {"x1":int(x1),"y1":int(y1),"x2":int(x2),"y2":int(y2)} | 8cb079e19731d540470ea576d8df2aeb0e5168dd | 47,387 |
def temp_wien(wl):
"""
Find temperature of Spectrum as Wien's law
:return: Wien's temperature
"""
b = 0.28977721 # [cm]
Twien = b / wl
return Twien | 43bc903dd2ce166ff7d8944562afdf2a6ade7944 | 47,388 |
import argparse
def _parse_args() -> argparse.Namespace:
""" Parse the command line args """
# There are multiple ways to invoke finer-grained control over which
# images are built.
#
# (1) How many images to build
#
# all: all images
# default: images required for minimum functionality
# - excluding metrics images
# - including postgres, proxy, etc
#
# (2) Of the core orc8r images, which modules to build
#
# Defaults to all modules, but can be further specified by targeting a
# deployment type.
parser = argparse.ArgumentParser(description='Orc8r build tool')
# Run something
parser.add_argument(
'--tests', '-t',
action='store_true',
help='Run unit tests',
)
parser.add_argument(
'--mount', '-m',
action='store_true',
help='Mount the source code and create a bash shell',
)
parser.add_argument(
'--precommit', '-c',
action='store_true',
help='Mount the source code and run pre-commit checks',
)
parser.add_argument(
'--coverage', '-o',
action='store_true',
help='Generate test coverage statistics',
)
parser.add_argument(
'--lint', '-l',
action='store_true',
help='Run lint test',
)
parser.add_argument(
'--health', '-e',
action='store_true',
help='Run health test',
)
# Run something
parser.add_argument(
'--git', '-g',
action='store_true',
help='Get git info',
)
# How to do it
parser.add_argument(
'--nocache', '-n',
action='store_true',
help='Build the images with no Docker layer caching',
)
parser.add_argument(
'--parallel', '-p',
action='store_true',
default=False,
help='Build containers in parallel',
)
parser.add_argument(
'--down', '-down',
action='store_true',
default=False,
help='Leave containers up after running tests',
)
return parser.parse_args() | 12fbce2825fc1daa82bdbcb0afe426d245c74bbe | 47,389 |
from typing import Iterable
def get_entropy(prob_list: Iterable[float], info_list: Iterable[float]):
"""get entropy from list of probability of events and their information"""
return sum(p * i for p, i in zip(prob_list, info_list)) | 6b3feb37d944e6fc3e971a38e4d91cf13c4f6e5d | 47,390 |
import os
def get_path(path, suffix):
"""
Apply the suffix to the path.
"""
filename, file_extension = os.path.splitext(path)
return filename + suffix + file_extension | 9a68b38ff6b92866db7f2b04054b2a7e793d72db | 47,391 |
def make_zero_based_midi(defs):
"""
The official MIDI spec is 1 based (why???), but
clearly most things are 0 based. So this function shifts all of the
program numbers down by one and keeps 0 as piano.
:param defs:
:return:
"""
for k, v in defs.items():
pgms = [max(i - 1, 0) for i in v['program_numbers']]
defs[k]['program_numbers'] = pgms
return defs | 305b6e6b48b116a8d86ec02036a1218d8a88070d | 47,392 |
def get_distribution_names(namespace_pairs, rv_base_class):
"""
Collect names of statistical distributions and their generators.
Parameters
----------
namespace_pairs : sequence
A snapshot of (name, value) pairs in the namespace of a module.
rv_base_class : class
The base class of random variable generator classes in a module.
Returns
-------
distn_names : list of strings
Names of the statistical distributions.
distn_gen_names : list of strings
Names of the generators of the statistical distributions.
Note that these are not simply the names of the statistical
distributions, with a _gen suffix added.
"""
distn_names = []
distn_gen_names = []
for name, value in namespace_pairs:
if name.startswith('_'):
continue
if name.endswith('_gen') and issubclass(value, rv_base_class):
distn_gen_names.append(name)
if isinstance(value, rv_base_class):
distn_names.append(name)
return distn_names, distn_gen_names | a4b4f09c4629417951a90eb2737121eefeaa44fb | 47,393 |
def clamp(n, vmin, vmax):
"""Computes the value of the first specified argument clamped to a range
defined by the second and third specified arguments
:param n: input Value
:param vmin: MiniMum Value
:param vmax: Maximum Value
:returns: The clamped value of n
"""
return max(min(n, vmax), vmin) | e46ba82598b5f5cb8bad1233edee576ead0c3fd8 | 47,394 |
def walsh_iob_curve(t, insulin_action_duration):
"""Returns the fraction of a single insulin dosage remaining at the specified number of minutes
after delivery; also known as Insulin On Board (IOB).
This is a Walsh IOB curve, and is based on an algorithm that first appeared in GlucoDyn
See: https://github.com/kenstack/GlucoDyn
:param t: time in minutes since the dose began
:type t: float
:param insulin_action_duration: The duration of insulin action (DIA) of the patient, in minutes
:type insulin_action_duration: int
:return: The fraction of a insulin dosage remaining at the specified time
:rtype: float
"""
# assert insulin_action_duration in (3 * 60, 4 * 60, 5 * 60, 6 * 60)
iob = 0
if t >= insulin_action_duration:
iob = 0.0
elif t <= 0:
iob = 1.0
elif insulin_action_duration == 180:
iob = -3.2030e-9 * (t**4) + 1.354e-6 * (t**3) - 1.759e-4 * (t**2) + 9.255e-4 * t + 0.99951
elif insulin_action_duration == 240:
iob = -3.310e-10 * (t**4) + 2.530e-7 * (t**3) - 5.510e-5 * (t**2) - 9.086e-4 * t + 0.99950
elif insulin_action_duration == 300:
iob = -2.950e-10 * (t**4) + 2.320e-7 * (t**3) - 5.550e-5 * (t**2) + 4.490e-4 * t + 0.99300
elif insulin_action_duration == 360:
iob = -1.493e-10 * (t**4) + 1.413e-7 * (t**3) - 4.095e-5 * (t**2) + 6.365e-4 * t + 0.99700
return iob | 8879243950d60805445518e4caa89edb5954d7d0 | 47,396 |
def estimate_mean(sample, values, weights=None):
"""
estimate_mean(sample, values, weights=None)
Based on a sample, estimate and return the average value over all existing items.
Parameters
----------
sample: - a sample of items (iterable)
values: - function: item -> value
weights: - function: item -> sampling_weight of this item
"""
if weights==None: # uniform sample
weights = lambda x: 1
up = down = 0.
for v in sample:
up += 1.*values(v)/weights(v)
down += 1./weights(v)
return up/down | ca6a18d58b5c4d96cf67364244cdf84a54e96236 | 47,397 |
def posdict_is_sane(M1, pos_dict):
"""
Return a boolean establishing sanity of ``posdict`` wrt matroid ``M``.
INPUT:
- ``M1`` -- A matroid.
- ``posdict`` -- A dictionary mapping ground set elements to (x,y)
positions.
OUTPUT:
A boolean that is ``True`` if posdict indeed has all the required elements
to plot the geometric elements, otherwise ``False``.
EXAMPLES::
sage: from sage.matroids import matroids_plot_helpers
sage: M1=Matroid(ring=GF(2), matrix=[[1, 0, 0, 0, 1, 1, 1,0,1,0,1],
....: [0, 1, 0, 1, 0, 1, 1,0,0,1,0],[0, 0, 1, 1, 1, 0, 1,0,0,0,0]])
sage: pos_dict= {0: (0, 0), 1: (2, 0), 2: (1, 2), 3: (1.5, 1.0),
....: 4: (0.5, 1.0), 5: (1.0, 0.0), 6: (1.0, 0.6666666666666666)}
sage: matroids_plot_helpers.posdict_is_sane(M1,pos_dict)
True
sage: pos_dict= {1: (2, 0), 2: (1, 2), 3: (1.5, 1.0),
....: 4: (0.5, 1.0), 5: (1.0, 0.0), 6: (1.0, 0.6666666666666666)}
sage: matroids_plot_helpers.posdict_is_sane(M1,pos_dict)
False
.. NOTE::
This method does NOT do any checks. ``M1`` is assumed to be a
matroid and ``posdict`` is assumed to be a dictionary.
"""
L = set(M1.loops())
nP = L | set(M1.simplify().groundset())
P = set(M1.groundset())-nP
pcls = list(set([frozenset(set(M1.closure([p])) - L) for p in list(P)]))
for pcl in pcls:
pcl_list = list(pcl)
if not any(x in pos_dict for x in pcl_list):
return False
allP = []
for pcl in pcls:
allP.extend(list(pcl))
return all(x in pos_dict
for x in list(set(M1.groundset()) - (L | set(allP)))) | 7cb73bd28defd55f9a02c464e99a1a11e2356448 | 47,398 |
import base64
def base64_decode(content):
"""
base64解码
:param content: base64文本
:return: 解码后的字符串
"""
return base64.b64decode(content).decode('utf8') | 5712d69af1ac0c18a13135de7c765abde711b6a1 | 47,400 |
def _sortByCreated(a, b):
"""Sort function for object by created date"""
if a.created < b.created:
return 1
elif a.created > b.created:
return -1
else:
return 0 | e1a33ffaacc51ea3936f2b0824b11b8639718f85 | 47,401 |
import os
def split_pkg(pkg):
"""code due to isuruf and CJ-Wright
"""
if pkg.endswith(".tar.bz2"):
pkg = pkg[:-len(".tar.bz2")]
elif pkg.endswith(".conda"):
pkg = pkg[:-len(".conda")]
else:
raise RuntimeError("Can only process packages that end in .tar.bz2 or .conda!")
plat, pkg_name = pkg.split(os.path.sep)
name_ver, build = pkg_name.rsplit('-', 1)
name, ver = name_ver.rsplit('-', 1)
return plat, name, ver, build | c341375539667cdedb35e6b51db0713f83a3f81b | 47,402 |
def get_iterable(in_dict, in_key):
"""
Similar to <dict>.get(), but if value is None, False, ..., An empty tuple is returned instead
:param in_dict: a dictionary
:param in_key: the key to look for at in_dict
:return: in_dict[in_var] or () if it is None or not present
"""
if not in_dict.get(in_key):
return ()
return in_dict[in_key] | 95f9dde329ea301e8bd68105d543f0d00e563bcd | 47,403 |
def get_package_version(package):
"""
Return the version number of a Python package as a list of integers
e.g., 1.7.2 will return [1, 7, 2]
"""
return [int(num) for num in package.__version__.split('.')] | 682eb4ffdba67d189997ceb629b06cb1ccb2a437 | 47,404 |
def cudaInformation(output):
"""
Gets the output from the container and returns GPU information
"""
device_information = []
info = [i.split(":")[1] for i in output.split('\\n')[1:-1]]
# device_information['device-name'] = info[1]
device_information.append({'unit': 'multiprocessors', 'capacity': info[3]})
device_information.append({'unit': 'cuda-cores', 'capacity': info[4]})
# device_information['gpu-clock'] = info[6]
# device_information['memory-clock'] = info[7]
device_information.append({'unit': info[8].split()[-1], 'capacity': info[8].split()[0]})
return info[1], device_information | 2067c39c61cfd2463bc8530daf22c1ec3d5f9228 | 47,405 |
def getItemNames(fullname):
"""Split a fullname in the 3 parts that compose it
Args:
fullname(str): Fullname
Returns:
tuple with the names of each item in the hierarchy or None
for the items that are not present
"""
n = fullname.split(':')
if len(n) == 1:
return n[0], None, None
if len(n) == 2:
return n[0], n[1], None
if len(n) == 3:
return n[0], n[1], n[2]
return None, None, None | 949701e821eb4659e53131b0b61fa2ea248738a2 | 47,406 |
import random
def get_random_move(loc, N, M):
"""
Function: get_random_move\n
Parameter: loc -> location of UAV on the Grid, N -> Rows in the Grid, M -> Columns in the Grid\n
Returns: A tuple of valid random location where the UAV will be placed and the action taken\n
"""
# 0 -> up
# 1 -> right
# 2 -> down
# 3 -> left
# 4 -> static
dx = [-1, 0, 1, 0, 0]
dy = [0, 1, 0, -1, 0]
x, y = loc
action = 0
# In the last row
if x == N - 1:
# In the first cell of the last row
if y == 0:
# Possible move are up and right or static
action = random.randint(0, 2)
if action == 2:
action = 4
# In the last cell of the last row
elif y == M - 1:
# Possible moves are up and left or static
action = random.randint(0, 2)
# if move is right change to left
if action == 1:
action = 3
if action == 2:
action = 4
# any where in the row except the above two cases
else:
# Possible moves are up, left, right
action = random.randint(0, 3)
if action == 3:
action = 4
# if move is down change to left
if action == 2:
action = 3
# In the first row
elif x == 0:
# In the first cell of the first row
if y == 0:
# Possible moves are right and down or static
action = random.randint(1, 3)
if action == 3:
action = 4
# In the last cell of the first row
elif y == M - 1:
# Possible moves are left and down or static
action = random.randint(2, 4)
# any where in the row except the above two cases
else:
# Possible moves are left, right, down, static
action = random.randint(1, 4)
# In the last column
elif y == M - 1:
# In the first cell of the last column
if x == 0:
# Possible moves are left and down or static
action = random.randint(2, 4)
# In the last cell of the last column
elif x == N - 1:
# Possible moves are up and left or static
action = random.randint(0, 2)
if action == 2:
action = 4
# if the action is right change to left
if action == 1:
action = 3
# any where in the column except the above two cases
else:
# Possible moves are up, left and down or static
action = random.randint(0, 3)
if action == 3:
action = 4
# if the action is right change to left
if action == 1:
action = 3
# In the first column
elif y == 0:
# In the first cell of the first column
if x == 0:
# Possible moves are right and down or static
action = random.randint(1, 3)
if action == 3:
action = 4
# In the last cell of the first column
elif x == N - 1:
# Possible moves are right and up or static
action = random.randint(0, 2)
if action == 2:
action = 4
# any where in the column except the above two cases
else:
# Possible moves are up, right, down or static
action = random.randint(0, 3)
if action == 3:
action = 4
# In any other cell so can move in any direction
else:
action = random.randint(0, 4)
x += dx[action]
y += dy[action]
power_factor = random.randint(0, 2)
return (x, y, action, power_factor) | f37ca30d9cd6173276e06ea80d290a56a3c7ca7e | 47,407 |
def train_test_split(windows, labels, size=.2):
"""
Splits windows and labels into train and test splits.
"""
split_size = int(len(windows) * (1-size))
window_train = windows[:split_size]
window_test = windows[split_size:]
label_train = windows[:split_size]
label_test = windows[split_size:]
return window_train, window_test, label_train, label_test | 0ec24566e09db53860afd1e628ec66385e7ce4a3 | 47,408 |
def existing_but_not_decorated():
"""This function help to validate only methods with decorator are effectively added to the registry"""
return 42 * 42 | 0172e0bf7778bcc5d1ddb11174f11d53bc992d1c | 47,409 |
from typing import Any
async def read_root() -> Any:
"""Main handler for the root path"""
return {"message": "Hello FastAPI!"} | 892553a7f26d4f0db4c2a0e8d1d097933e83a948 | 47,410 |
def matrix_from_gauss(kernel, gauss_x, gauss_y):
"""Compute matrix for kernel from Gauss rule"""
# (1 +- x) is problematic around x = -1 and x = 1, where the quadrature
# nodes are clustered most tightly. Thus we have the need for the
# matrix method.
return kernel(gauss_x.x[:,None], gauss_y.x[None,:],
gauss_x.x_forward[:,None], gauss_x.x_backward[:,None]) | dafdc340d189a8e0a7b8f9fd9a4c26285f423c93 | 47,411 |
from typing import Union
def display_time(seconds: Union[float, int], *, precision: str = "m") -> str:
"""Converts seconds to a human readable form.
Precision can be "m", "h" or "d".
m = "X minutes and Y seconds"
h = "X hours, Y minutes and Z seconds"
d = "X days, Y hours, Z minutes and N seconds
"""
if seconds < 1:
return "0 seconds"
if seconds < 0:
raise ValueError("can't convert negative seconds")
if precision.lower() == "d":
days, remainder = divmod(seconds, 86400)
hours, remainder = divmod(remainder, 3600)
minutes, seconds = divmod(remainder, 60)
elif precision.lower() == "h":
days = 0
hours, remainder = divmod(seconds, 3600)
minutes, seconds = divmod(remainder, 60)
elif precision.lower() == "m":
days = 0
hours = 0
minutes, seconds = divmod(seconds, 60)
else:
raise ValueError(f"invalid precision: expected \"m\", \"h\" or \"d\", got \"{precision}\"")
values = {
"days": round(days),
"hours": round(hours),
"minutes": round(minutes),
"seconds": round(seconds)
}
output = []
for time, value in values.items():
if value == 0:
continue
output.append(f"{value} {time[:-1] if time == 1 else time}")
return f"{', '.join(output[:-1])} and {output[-1]}" | 78a50e818c0c52f3697361cceece098c802e9744 | 47,412 |
def biolink_prefix():
"""
Biolink associations do not generally follow the curie syntax. In such cases
the resulting statement curie identifier will be set as:
biolink_prefix() + ':' + biolink_association_id
"""
return 'biolink' | 17931cbef815db93bf7b905882850e432d80e125 | 47,414 |
def options(cdiv=False, inline=False):
"""Set behavioural options which affect the generated code.
:param cdiv: Set ``True`` to match C behaviour when performing integer division.
:param inline: Set ``True`` to always inline the function.
"""
def wrapper(decl):
decl.options.update(cdiv=cdiv, inline=inline)
return decl
return wrapper | c5647b3b83d04cea8f41bb9cdba0e3faf34e99dc | 47,416 |
def is_fits(string):
"""
Boolean function to test if the extension of the filename provided
is either .fits or .fit (upper- or lowercase).
Parameters
----------
string: str
(path to) filename to test
Returns
-------
bool
"""
string = string.upper()
return string.endswith(".FITS") or string.endswith(".FIT") | 2c31363998a7cde559f6702af03bfea68213edca | 47,417 |
from typing import get_args
def get_database_tables(curs, schema=None, table=None, exclude_schema=None, exclude_table=None):
"""
Getting a list of tables with indexes for further processing
"""
extra_conditions = ''
if schema:
if len(schema) > 0:
extra_conditions += " and schemaname in ({schema})\n".format(
schema=get_args(schema)
)
if exclude_schema:
if len(exclude_schema) > 0:
extra_conditions += " and schemaname not in ({exclude_schema})\n".format(
exclude_schema=get_args(exclude_schema)
)
if table:
if len(table) > 0:
extra_conditions = " and schemaname || '.' || tablename in ({table})\n".format(
table=get_args(table)
)
if exclude_table:
if len(exclude_table) > 0:
extra_conditions += " and schemaname || '.' || tablename not in ({exclude_table})\n".format(
exclude_table=get_args(exclude_table)
)
query = """
select
sq.schemaname,
sq.tablename
from (
select
schemaname,
tablename,
pg_indexes_size(schemaname||'.'||tablename) as indexes_size
from pg_catalog.pg_tables
where
schemaname !~ 'pg_(temp|toast|catalog).*' and
schemaname !~ '(information_schema|pg_catalog|kill|tmp|pgq|londiste|londiste_undo)' and
tablename !~ '(pg_index|kill)'
{extra_conditions}
) sq
where
sq.indexes_size > 0
order by
sq.schemaname,
sq.tablename
""".format(
extra_conditions=extra_conditions
)
curs.execute(query)
rows = curs.fetchall()
return rows | 682a3c04becfb24c19d05b1363c0ac82588ba853 | 47,418 |
import os
def get_num_samples(org_dir, file_names):
"""
Function for obtaining the number of samples
:param org_dir: original directory storing the tree files
:param file_names: list of file names in the directory
:return: number of samples
"""
count = 0
# Loop through the files, which then loop through the trees
for filename in file_names:
# Skip files that are not .mrg
if not filename.endswith('.mrg'):
continue
# File is .mrg. Start processing
file_dir = os.path.join(org_dir, filename)
with open(file_dir, 'r') as reader:
content = reader.readlines()
for _ in content:
count += 1
return count | c08110ec6053717541ed40ef1851a6a60986611f | 47,419 |
def image_check(name: str):
"""
A function that checks the string end for image file suffix.
Args:
name (str): the string to test the suffix for
Returns:
True: if the suffix suffix contains image extension
False; if the string suffix does not contain image extension
"""
name = name.lower()
checks = [".jpg", ".png", ".jpeg", ".gif", ".webp"]
for i in checks:
if name.endswith(i):
return True
return False | 378ab09b17a69dd729b9076d8a3b4882fe86381e | 47,422 |
def save_data(data):
"""
info: Saves bytes
:param data: bytes
:return: bytes
"""
data_size = len(data)
data_size_in_bytes = data_size.to_bytes(data_size.bit_length() // 8 + min(data_size.bit_length() % 8, 1),
byteorder="big")
number_size = len(data_size_in_bytes)
return bytes((number_size, *data_size_in_bytes, *data)) | 7d5acf5e90e64bd45c3e3711dfa77cdc6fad1074 | 47,425 |
import argparse
def _parse_args() -> argparse.Namespace:
"""Parses arguments."""
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='Run classifier.')
parser.add_argument(
'model',
help='path to TorchScript compiled model')
parser.add_argument(
'crops_dir',
help='path to directory containing cropped images')
parser.add_argument(
'output',
help='path to save CSV file with classifier results (can use .csv.gz '
'extension for compression)')
parser.add_argument(
'-d', '--detections-json',
help='path to detections JSON file, used to filter paths within '
'crops_dir')
parser.add_argument(
'-c', '--classifier-categories',
help='path to JSON file for classifier categories. If not given, '
'classes are numbered "0", "1", "2", ...')
parser.add_argument(
'--image-size', type=int, default=224,
help='size of input image to model, usually 224px, but may be larger '
'especially for EfficientNet models')
parser.add_argument(
'--batch-size', type=int, default=1,
help='batch size for evaluating model')
parser.add_argument(
'--num-workers', type=int, default=8,
help='# of workers for data loading')
return parser.parse_args() | f4484c6d24672b2b4199ed4ee4931ea4de971850 | 47,426 |
def source_site_noop_filter(sources, sites=None):
"""
Transparent source-site "no-op" filter -- behaves like a real filter
but never filters anything out and doesn't have any overhead.
"""
return ((src, sites) for src in sources) | dd899067c3f9a66c6ab48cc8fc4f8a168d58db1c | 47,427 |
def format_version(v):
"""
Return a PEP 440-compliant version number from VERSION.
Using "major.minor.micro" versioning.
"""
version = f'{v[0]}.{v[1]}.{v[2]}'
return version | 27d0d266f1b109ebfffc86d80b78095757030542 | 47,428 |
import colorsys
def class_specific_color(class_id, bright=True):
"""
Generate class specific color.
"""
brightness = 1.0 if bright else 0.7
hsv = (class_id / 20, 1, brightness)
color = colorsys.hsv_to_rgb(*hsv)
return color | eb7f3c0b9aef6e9256fa08a87b46ec3b762752bf | 47,430 |
import os
import shutil
def delete_file(file):
"""
Deletes a given file (you can use filecenter too) (> int)
"""
indexes_of_slash = [i for i, ltr in enumerate(file) if ltr == "\\"]
number_of_iterations = 0
for index in indexes_of_slash:
character_after_slash = file[index + 1 - number_of_iterations]
#print(character_after_slash)
if character_after_slash == ' ' or character_after_slash == '/':
file = file[:index - number_of_iterations] + file[index + 1 - number_of_iterations:]
number_of_iterations += 1
if os.path.isdir(file):
try:
shutil.rmtree(file)
return 0
except:
return 3
elif os.path.isfile(file):
try:
os.remove(file)
return 0
except:
return 2
else:
return 1 | bde7270c24f8d31d6c27125b224d915c69eca88c | 47,431 |
def get_fp_color(n, col_set=1):
"""
Get the color of a fixed point given
the number of unstable modes
Arguments:
n (int): number of unstable modes
col_set (int): which colors set to use
Returns:
color (str)
"""
if n == 0:
color = "seagreen" if col_set == 1 else "lightseagreen"
elif n == 1:
color = "salmon" if col_set == 1 else "lightsalmon"
elif n == 2:
color = "skyblue" if col_set == 1 else "deepskyblue"
else:
color = "magenta" if col_set == 1 else "purple"
return color | 5f32d2a04edaa7a4534da654eb223132d2fec3fb | 47,432 |
def get_index_columns(conn, obj):
"""
Obtiene las columnas de una tabla
"""
cur = conn.cursor()
cur.execute("""
SELECT a.attname
FROM pg_catalog.pg_attribute a
WHERE a.attrelid = %s AND a.attnum > 0 AND NOT a.attisdropped
ORDER BY a.attnum
""", [obj.oid])
return [name for (name,) in cur] | 22f0a1a10f8ef569a0d015275d2b45f0eed76f15 | 47,437 |
from typing import Any
def is_sdense(x: Any) -> bool:
"""check if an object is an `SDense` (a SAX dense S-matrix representation)"""
return isinstance(x, (tuple, list)) and len(x) == 2 | 05069ee65d1485fabf246f0bf1c3fc276d6a4d07 | 47,438 |
def get_tests_to_run(test_list, test_params, cutoff, src_timings):
"""
Returns only test that will not run longer that cutoff.
Long running tests are returned first to favor running tests in parallel
Timings from build directory override those from src directory
"""
def get_test_time(test):
# Return 0 if test is unknown to always run it
return next(
(x['time'] for x in src_timings.existing_timings if x['name'] == test), 0)
# Some tests must also be run with additional parameters. Add them to the
# list.
tests_with_params = []
for test_name in test_list:
# always execute a test without parameters
tests_with_params.append(test_name)
params = test_params.get(test_name)
if params is not None:
tests_with_params.extend(
[test_name + " " + " ".join(parameter) for parameter in params])
result = [
test for test in tests_with_params if get_test_time(test) <= cutoff]
result.sort(key=lambda x: (-get_test_time(x), x))
return result | 06d629aba71b6ce518e5556b987c364bb6a26b00 | 47,439 |
import pathlib
def acquireID( parent_folder ):
"""
Get the next pair number ID in given folder
"""
curID = int(0)
path = pathlib.Path(parent_folder)
for child in path.iterdir():
curNameAsNum = None
try:
curNameAsNum = int(str(child.name))
except:
continue
if curNameAsNum >= curID:
curID = curNameAsNum + 1
return curID | fd8081915980472b5168d467ba4a56dfb198f9e0 | 47,440 |
def concat_to_address(ip, port):
"""
ip: str for address to concat, like "127.0.0.1"
port: str for port, like "2379"
return: str like "127.0.0.1:2379"
return None if ip or port is None
"""
if ip is None or port is None:
return None
return ip.strip() + ":" + port.strip() | 6300064966bbafe549c42df4a8507c861ba5acad | 47,441 |
def split_function_name(fn):
"""
Given a method, return a tuple containing its fully-qualified
class name and the method name.
"""
qualname = fn.__qualname__
if '.' in qualname:
class_name, fn_name = qualname.rsplit('.', 1)
class_name = '%s.%s' % (fn.__module__, class_name)
else:
class_name = fn.__module__
fn_name = qualname
return (class_name, fn_name) | 0f525d93afdf72269da303c13b69cc8f29aa0661 | 47,442 |
import array
def toarr( data ):
"""Converts a string or byte array to a byte array.
"""
if isinstance( data, array.array ):
return data
else:
return array.array( 'B', data ) | 78c386c5efb124b6bc28d7e6980004076d4c220f | 47,443 |
def discardToChar(stream, char):
"""Read from the given stream until first char in line is no longer char
First line without char is at top of file
Returns first line without char"""
line_pos = stream.tell()
line = stream.readline()
while line != "":
if line[0] != char:
stream.seek(line_pos)
return line
line_pos = stream.tell()
line = stream.readline() | 725135b7fa791a72d675ac294d6730f3c860ee2e | 47,446 |
import errno
def _GetUploadTrackerData(tracker_file_name, logger):
"""Reads tracker data from an upload tracker file if it exists.
Args:
tracker_file_name: Tracker file name for this upload.
logger: for outputting log messages.
Returns:
Serialization data if the tracker file already exists (resume existing
upload), None otherwise.
"""
tracker_file = None
# If we already have a matching tracker file, get the serialization data
# so that we can resume the upload.
try:
tracker_file = open(tracker_file_name, 'r')
tracker_data = tracker_file.read()
return tracker_data
except IOError as e:
# Ignore non-existent file (happens first time a upload is attempted on an
# object, or when re-starting an upload after a
# ResumableUploadStartOverException), but warn user for other errors.
if e.errno != errno.ENOENT:
logger.warn('Couldn\'t read upload tracker file (%s): %s. Restarting '
'upload from scratch.', tracker_file_name, e.strerror)
finally:
if tracker_file:
tracker_file.close() | ef769da5a2e27e5279e7519d622670cc5b7eaaf7 | 47,447 |
import re
def xkcd_direct(html, url=None):
"""Try to return a title and link for a direct link to an xkcd comic."""
if not html:
return None
if not url:
url_match = re.search(r'Permanent link to this comic: ([^\s<>]+)', html)
if url_match:
url = url_match.group(1)
else:
return None
match = re.search(r'<title>(.+?)</title>', html)
if match:
return match.group(1) + ": " + url
else:
return None | 702ed7112414346a5231b69c29db6d18e170aa43 | 47,448 |
import os
def _output_urls(self, workflow, uid, job_name, label):
"""
Generate output files/dirs
"""
lists = ''
count = 0
for job in workflow['jobs']:
# Use provided storage if necessary
use_default_object_storage = True
if 'storage' in job:
if 'default' in job['storage']:
if job['storage']['default']:
use_default_object_storage = False
if 'outputFiles' in job and use_default_object_storage:
for filename in job['outputFiles']:
url_put = self.create_presigned_url('put',
self._config['S3_BUCKET'],
'scratch/%s/%s/%d/%s' % (uid, job_name, label, os.path.basename(filename)),
604800)
lists = lists + ' prominenceout%d="%s" ' % (count, url_put)
count += 1
if 'outputDirs' in job and use_default_object_storage:
for dirname in job['outputDirs']:
url_put = self.create_presigned_url('put',
self._config['S3_BUCKET'],
'scratch/%s/%s/%d/%s.tgz' % (uid, job_name, label, os.path.basename(dirname)),
604800)
lists = lists + ' prominenceout%d="%s" ' % (count, url_put)
count += 1
return lists | b32f62692984e4f517ad82ed7ae076896a24bc3e | 47,449 |
def names_from_results(response):
"""Returns card names from results as a list"""
return [x["name"] for x in response.json()["results"]] | c879b2cdb8f78150e50be3e115a5103992e93b79 | 47,450 |
def bs_characteristic_exp(u, p, r=0.0):
"""
Характеристическая экспонента гауссовского процесса
u - аргумент, который привычно видеть как ksi,
p - параметры модели (здесь только один p[0] - волатильность),
r - безрисковая ставка
"""
return p[0]*p[0]/2.0*u*u-1j*u*(r-p[0]*p[0]/2.0) | de70744dfb4e1778ef5e04fd9df89d416946acb8 | 47,451 |
import string
def subst (env, str):
"""
Subst var in str with values in dict
"""
old = ""
while old != str:
old = str
str = string.Template(str).safe_substitute(env)
return str | 80df3bf56c8373fc6d76e1151d9fdbf6138b94c6 | 47,452 |
def fitness_score_order(id1, id2, to_consider, order):
"""A value to choose between two resources considering the order
Actually not implemented because it takes over the distance"""
return 0 | 9fa564adf44776b7589e4a842afea442b88cd125 | 47,453 |
import configparser
def _get_token(filename='token.cfg', key_ring='openweathermap'):
"""
read in API token
Parameters
----------
filename : str
local file with API token
key_ring : str
dictionary key, appearing within [] in token file
Returns
-------
str
API token
"""
parser = configparser.ConfigParser()
parser.read(filename)
return parser[key_ring]['token'] | b5eabd3d222fa2cffae936e5fe38fa3cf312c30d | 47,454 |
def _fits_indexhdus(hdulist):
"""
Helper function for fits I/O.
Args:
hdulist: a list of hdus
Returns:
dictionary of table names
"""
tablenames = {}
for i in range(len(hdulist)):
try:
tablenames[hdulist[i].header['EXTNAME']] = i
except(KeyError):
continue
return tablenames | 031aed6610eacdaecc9215822746b7e70e083d92 | 47,455 |
def get_colors(color=None, colors=None):
"""
get color list
Args:
color (Colors):
colors (List[Colors]):
Returns:
(str) : color list by string
"""
result = ""
if color is not None:
result += color.value
if colors is not None:
for color in colors:
result += color.value
return result | 4de37c6df4bc6b8c1978a259af476f3083097a8e | 47,457 |
from typing import List
import struct
def device_number_to_fields(device_number: int) -> List[int]:
"""
Splits the device number (16 bits) into two bytes
Example with device number 1000
Full bits: 0b0000001111101000
First 8 bits: 0b00000011 == 3
Last 8 bits: 0b11101000 == 232
This function will return [232, 3] because the byte order is little endian (least significant byte first)
"""
return [byte for byte in struct.pack('<H', device_number)] | f22f277d9cb5ff8fadf6eaf15b5c8fb92e20b543 | 47,458 |
from typing import List
def is_using_stdin(paths: List[str]) -> bool:
"""Determine if we're going to read from stdin.
:param paths:
The paths that we're going to check.
:returns:
True if stdin (-) is in the path, otherwise False
"""
return "-" in paths | 9551c149dabdf1ca2ead2d74ed534f57fc5ea4ab | 47,459 |
def pass_quality_filter(s,cutoff):
"""
Check if sequence passes quality filter cutoff
Arguments:
s (str): sequence quality scores (PHRED+33)
cutoff (int): minimum quality value; all
quality scores must be equal to or greater
than this value for the filter to pass
Returns:
Boolean: True if the quality scores pass the
filter cutoff, False if not.
"""
cutoff = chr(cutoff + 33)
for c in s:
if c < cutoff:
return False
return True | 781a5e3bea1ed20fc0f28fe16f6aa90a57d3372a | 47,460 |
def best_of_gen(population):
"""
Syntactic sugar to select the best individual in a population.
:param population: a list of individuals
:param context: optional `dict` of auxiliary state (ignored)
>>> from leap_ec.data import test_population
>>> print(best_of_gen(test_population))
[0 1 1 1 1] 4
"""
assert (len(population) > 0)
return max(population) | b7efcb8d6a961843d88fe1864d129a1dc502ea33 | 47,461 |
import string
import random
def generatePRandomPW(pwlen=16, mix_case=1):
"""Generate a pseudo-random password.
Generate a pseudo-random password of given length, optionally
with mixed case. Warning: the randomness is not cryptographically
very strong.
"""
if mix_case:
chars = string.ascii_letters + string.digits
else:
chars = string.ascii_lowercase + string.digits
pw = ''
for i in range(0, pwlen):
pw += random.choice(chars)
return pw | bccc7b185f5e742d309a2e761a527c6b0bdccc6f | 47,463 |
def visualEditTypes():
"""Returns types that can be visually edited"""
return ['Config','Configs','Trajectory','Vector3','Point','RigidTransform','Rotation','WorldModel'] | 9d204a87a56f7328bf484e3480ce34c219f350c9 | 47,464 |
def default_sum_all_losses(dataset_name, batch, loss_terms):
"""
Default loss is the sum of all loss terms
"""
sum_losses = 0.0
for name, loss_term in loss_terms.items():
loss = loss_term.get('loss')
if loss is not None:
# if the loss term doesn't contain a `loss` attribute, it means
# this is not used during optimization (e.g., embedding output)
sum_losses += loss
return sum_losses | d12eaa926ae5adbbb023acf316b9bf7854cdbd84 | 47,465 |
def _compute_segseg_intersection(segment1, segment2):
"""Algorithm to compute a segment to segment intersection.
Based on this article:
https://en.wikipedia.org/wiki/Line%E2%80%93line_intersection
:param segment1: first segment (defined by two endpoints)
:type segment1: list
:param segment2: second segment (defined by two endpoints)
:type segment2: list
:return: intersection point (p_x, p_y), if it exists
:rtype: tuple or None
"""
(x1, y1), (x2, y2) = segment1
(x3, y3), (x4, y4) = segment2
# Check for parallel lines
denominator = (x1 - x2) * (y3 - y4) - (y1 - y2) * (x3 - x4)
if denominator == 0:
return None
t = ((x1 - x3) * (y3 - y4) - (y1 - y3) * (x3 - x4)) / denominator
u = -((x1 - x2) * (y1 - y3) - (y1 - y2) * (x1 - x3)) / denominator
# Check for intersections outside of the segments
if (t < 0 or t > 1) or (u < 0 or u > 1):
return None
p_x = x1 + t * (x2 - x1)
p_y = y1 + t * (y2 - y1)
return (p_x, p_y) | e30f4227f499ce7eb3adab7674a46f2bdb05b0a5 | 47,466 |
import logging
def create_logger():
""" Initialize logger """
logger = logging.getLogger('BERTopic')
logger.setLevel(logging.WARNING)
sh = logging.StreamHandler()
sh.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(message)s'))
logger.addHandler(sh)
return logger | 9f6323bab6b5abb3094bbd8103922d86f193561d | 47,467 |
def print_key_if_true(dictionary, key):
"""
Print each key string whose value in dictionary is True.
"""
if dictionary[key]:
return key + ', '
return '' | 0e6a83511cf1f18213e5aea11d6aa5b5b466501b | 47,468 |
import os
def urls_from_env(env: str = 'URLS') -> str:
"""Returns the urls string define by a environment variable"""
try:
return os.environ[env]
except KeyError as err:
raise ValueError(
f"No environment variable {env} set.\nOriginal error: {err}") | 9649485502874e02acd0ffeed6fad67eadd821a4 | 47,469 |
def sortRemoveDupes(lst):
"""Sort the list, and remove duplicate symbols.
"""
if len(lst) == 0:
return lst
lst.sort()
lst = [lst[0]] + [lst[i] for i in range(1, len(lst))
if lst[i] != lst[i - 1]]
return lst | 8f50d6aeb706330302112064492761e97e2c1935 | 47,470 |
def mul_by_num(num):
"""
Returns a function that takes one argument and returns num
times that argument.
>>> x = mul_by_num(5)
>>> y = mul_by_num(2)
>>> x(3)
15
>>> y(-4)
-8
"""
def h(x):
return num * x
return h | 174859f0db6aabb0ece1bbd9b5a7fbe6e98b1253 | 47,471 |
import string
def remove_punctuation(input_string):
"""Return a str with punctuation chars stripped out"""
for element in input_string:
if element in string.punctuation:
input_string = input_string.replace(element, '')
return input_string | 2efb60ca06ba61ff45d2ad45f554b4a0fc971947 | 47,472 |
def parse_condor_path(config_dom):
"""
Get a valid condor path from the factory config dom.
Can raise KeyError if the dom has a bad configuration or was incorrectly parsed from the config file.
"""
condor_element = config_dom.getElementsByTagName('condor_tarball')[0]
condor_path = condor_element.attributes['base_dir'].value.encode('utf-8')
return condor_path | 5cfc3276ace646f337f42dcdb7f503cf86cdb427 | 47,473 |
def crop_center(img, cropx, cropy):
"""
Crop the image to the given dimensions
:return:
"""
y, x, z = img.shape
startx = x // 2 - (cropx // 2)
starty = y // 2 - (cropy // 2)
return img[starty:starty + cropy, startx:startx + cropx] | b235389b98accc7961bd248fd883c3db8dde675e | 47,475 |
def get_subnet_set(conn, namefilter=''):
""" Returns a set of the subnets in the cloud with namefilter matching """
subnet_set = { subnet.name for subnet in conn.network.subnets()
if namefilter in subnet.name }
return subnet_set | ba9c9b4bdb8af43f1fce2cdc1820045b662d74bd | 47,476 |
def get_node_set(g1,g2,method="union"):
"""
Returns the set of nodes that have to be considered in counting
transitions of the Markov chains. The input for the keyword
argument `method` controls the method used.
"""
if (method=="intersection"):
nodes = list(set(g1.nodes()) & set(g2.nodes()))
else:
nodes = list(set(g1.nodes()) | set(g2.nodes()))
return nodes | ccecc822cc72eccaf7bc73f4b3512d8fa79519d6 | 47,477 |
def _clean_listlike(string: str) -> list:
"""Removes commas and semicolons from SQL list-like things. i,e id, number --> ['id', 'number'] """
cols = []
for item in string:
# Check if item is in list, or if user adds ; to the end of the query
if item[-1] == ',' or item[-1] == ';' or item[-1] == '\n':
cols.append(item[:-1])
else:
cols.append(item)
return cols | b7c92177982f7656a9d96d03ba6892e2d71056ac | 47,479 |
import os
def getMeterIDs(imageID):
"""get id of meters in an image"""
meterIDs = []
templateDir = 'template/'
list = os.listdir(templateDir)
for i in range(0, len(list)):
path = os.path.join(templateDir, list[i])
prefix, suffix = list[i].split(".")
prefixImageID = prefix.split("_")[0]
if os.path.isfile(path) and suffix == "jpg" and prefixImageID == imageID:
meterIDs.append(prefix)
return meterIDs | 52d28a9f56cf71008f5ef1972f2b73b136485e44 | 47,482 |
def encode_json_scale(scales):
"""Encode a ScaleDict to JSON
See https://github.com/lidatong/dataclasses-json#Overriding for more info.
"""
return [{"channelName": key, "scale": val} for key, val in scales.items()] | 32df0715fe711afa23ce6f4bdaf8abe813d11254 | 47,485 |
import json
def make_fields_values(data: dict) -> tuple:
"""
给定字典,返回fields 和 values
:param info: dict
:return:
"""
fields = list()
values = list()
for key, value in data.items():
fields.append(' `%s`=%%s ' % (key))
if isinstance(value, dict) or isinstance(value, list):
values.append(json.dumps(value))
else:
values.append(value)
return ','.join(fields), tuple(values) | f8d48a2178f354b8017cc71efa6795e61e0e65ab | 47,486 |
def deleteColumns(column_pattern, dataframe):
"""
Esta función permite eliminar aquellas columnas que contengan una palabra o un patron en el nombre de sus columnas,
de manera que podamos eliminar de golpe columnas creadas de manera automatica por error como por ejemplo la columna
unnamed
:param dataframe: Dataframe sobre el que queremos borrar las columnas.
:param column_pattern: cadena que contiene el nombre de la/s columna/columnas a eliminar.
:return: Devuelve el dataframe con la/s columna/s eliminada/s.
"""
df = dataframe.copy()
columns = list(df.columns)
deletedColumns = list()
if column_pattern is list:
for col in columns:
for word in column_pattern:
if word in col:
deletedColumns.append(col)
else:
for col in columns:
if column_pattern in col:
deletedColumns.append(col)
dataframe.drop(deletedColumns, 1)
return df | 5cdb0fa3c6491992fc829cb209c56f17a872bb71 | 47,487 |
def get_theme_color():
""" xxx """
theme_color = '#343a40'
return theme_color | b95633157600747ec2f71ad2ede9f829b3b54aa3 | 47,488 |
def to_seconds(hours, minutes, seconds):
"""Returns the amount of seconds in the given hours, minutes, and seconds."""
return hours*3600+minutes*60+seconds | bdfe64f2f261a70a4af8a63a2fb984c7b08127f1 | 47,489 |
import os
def is_file(
file_name
) -> bool:
""" Is File
Check if file exists
Later used to check if user has pretrained models
"""
return os.path.isfile(file_name) | 43231e7e4b89612b9e0bfb0fb3dd03ad614f0f68 | 47,490 |
def format_group(ballchasing_group_list):
"""Filter group's informations.
:param ballchasing_group_list:
:return: basic groups' informations.
"""
return [{'id': group['id'],
'name': group['name'],
'link': group['link'].replace('api/groups', 'group'),
'direct_replays': group['direct_replays'],
'indirect_replays': group['indirect_replays']} for group in ballchasing_group_list] | 1e628c4eb3bc798a51573e4e2fb5e71e840f2d91 | 47,491 |
import time
def timer(function):
"""Print the duration of a function making use of decorators."""
def function_(*args, **kwargs):
"""Tested function."""
ti = time.time()
result = function(*args, **kwargs)
tf = time.time()
dt = tf - ti
print("[TIMER]: " + str(function.__name__) + " took " + str(dt) + " seconds.")
return result
return function_ | f8f13907581799aad355c635201c0d4a96f91e2d | 47,493 |
def check_small_primes(n):
"""
Returns True if n is divisible by a number in SMALL_PRIMES.
Based on the MPL licensed
https://github.com/letsencrypt/boulder/blob/58e27c0964a62772e7864e8a12e565ef8a975035/core/good_key.go
"""
small_primes = [
2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47,
53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107,
109, 113, 127, 131, 137, 139, 149, 151, 157, 163, 167,
173, 179, 181, 191, 193, 197, 199, 211, 223, 227, 229,
233, 239, 241, 251, 257, 263, 269, 271, 277, 281, 283,
293, 307, 311, 313, 317, 331, 337, 347, 349, 353, 359,
367, 373, 379, 383, 389, 397, 401, 409, 419, 421, 431,
433, 439, 443, 449, 457, 461, 463, 467, 479, 487, 491,
499, 503, 509, 521, 523, 541, 547, 557, 563, 569, 571,
577, 587, 593, 599, 601, 607, 613, 617, 619, 631, 641,
643, 647, 653, 659, 661, 673, 677, 683, 691, 701, 709,
719, 727, 733, 739, 743, 751
]
for prime in small_primes:
if (n % prime == 0):
return True
return False | 9550574abcd1e10f05f0c7d9ec347ddc3c1cc9f9 | 47,494 |
import copy
def modify_tree_with_weights(tree, weights):
"""
Given an ete3 Tree object and a dictionary where keys are node names in the tree and values are multipliers (can
be generated with read_weights_file), returns a new tree where each branch in the weights dictionary is multiplied
by the multiplier specified.
:param tree: an ete3.Tree object
:param weights: Dictionary where keys are names of nodes/tips in the tree, and values are weights by which branch lengths will be multiplied
:return: A new ete3.Tree where branch lengths have been modified.
"""
newtree = copy.deepcopy(tree)
for node in weights:
# Make sure that we can actually find the node, and that more than one branch doesn't have the same name.
branch = newtree.get_leaves_by_name(node)
if len(branch) != 1:
raise AttributeError('The branch {} either could not be found in your tree or was found more than once. '
'Please verify your tree/weights dictionary and try again.'.format(node))
else:
branch[0].dist *= weights[node]
return newtree | 75da028d5f9fe8a55e94bd7b6074cc109ec2d79e | 47,495 |
def rotate_voxel(xmin, ymin, xmax, ymax):
"""
Given center position, rotate to the first quadrant
Parameters
----------
xmin: float
low point X position, mm
ymin: float
low point Y position, mm
xmax: float
high point X position, mm
ymax: float
high point Y position, mm
returns: floats
properly rotated voxel in the first quadrant
"""
xc = 0.5 * (xmin + xmax)
yc = 0.5 * (ymin + ymax)
if xc >= 0.0 and yc >= 0.0: # no rotation
return (xmin, ymin, xmax, ymax)
if xc < 0.0 and yc >= 0.0: # CW 90 rotation
return (ymin, -xmax, ymax, -xmin)
if xc < 0.0 and yc < 0.0: # CW 180 rotation
return (-xmax, -ymax, -xmin, -ymin)
# xc > 0.0 && yc < 0.0: # CW 270 rotation
return (-ymax, xmin, -ymin, xmax) | 5291043c5cd8447d44c846953a68d11488ae60dd | 47,496 |
def class_filter(instances, **filters):
"""
Search for class instances by their attributes
"""
found_instances = []
for instance in instances:
for filter_key, filter_value in filters.items():
if not filter_value:
continue
looking_for = getattr(instance, filter_key)
if isinstance(filter_value, str):
if looking_for != filter_value:
break
else:
if looking_for not in filter_value:
break
else:
found_instances.append(instance)
return len(found_instances), found_instances | c54f9c760833729a5f932b170cd860db03da0aee | 47,497 |
from typing import List
def remove_loops(sequence: List[int]) -> List[int]:
"""
This function takes in a list of numbers, finds any with duplicates, and all longest sequence between duplicates
:param sequence:
:return: list of numbers
"""
# make list copy
sequence = [item for item in sequence]
item_set = set(sequence)
if len(sequence) != len(item_set):
for item in item_set:
if sequence.count(item) > 1:
last_index = sequence[::-1].index(item)
first_index = sequence.index(item)
del sequence[first_index + 1:last_index * -1]
return sequence | 547a35729fd32b9f444df1f35d50907bfb6f6335 | 47,498 |
import attr
def attrib(*args, **kwargs):
"""Extend the attr.ib to include our metadata elements.
ATM we support additional keyword args which are then stored within
`metadata`:
- `doc` for documentation to describe the attribute (e.g. in --help)
Also, when the `default` argument of attr.ib is unspecified, set it to
None.
"""
doc = kwargs.pop('doc', None)
metadata = kwargs.get('metadata', {})
if doc:
metadata['doc'] = doc
if metadata:
kwargs['metadata'] = metadata
return attr.ib(*args, default=kwargs.pop('default', None), **kwargs) | bb7f48919a666eb362860f8fe965030d0fc8bc0e | 47,500 |
def pop(self, i):
"""
Remove the item at the given position in the list, and return it.
If no index is specified, a.pop() removes and returns the last item in the list.
"""
return self.list_output.pop(i) | b5c99ff7c0ec14fe39babd2a3e274a9355cffd38 | 47,501 |
import uuid
def new_aid() -> str:
"""Create a new, unique ari entry id."""
return uuid.uuid4().hex | d274c7616a525bda2062758622b3180f1a44b621 | 47,502 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.