content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def update_sheet_from_df(sheet: gspread.Spreadsheet, df: pd.DataFrame) -> None:
"""Dump the current dataframe onto the google sheet"""
sheet.update([df.columns.values.tolist()] + df.values.tolist()) | 36,400 |
def extract(fname: Path):
"""Extract the main information from each path entry"""
with fname.open() as f:
sw = json.load(f)
for path, defi in sw["paths"].items():
g = RE_PATH.match(path)
if g is None:
continue
path_match = g.groupdict()
if path_match["watch"]: # watch apis are deprecated
continue
key = ApiKey((path_match["group"] or "").lstrip("/"), path_match["version"], path_match["plural"])
if not key.plural:
#print(key)
continue
methods = []
resource = model_schema = None
tags = set()
namespaced = path_match["ns"] is not None
sub_action = path_match["action"].lstrip("/") if path_match["action"] else None
for method, mdef in defi.items():
if method != "parameters":
schema = mdef['responses']['200']['schema']
if '$ref' in schema:
model_schema = schema_name(schema['$ref'])
else:
model_schema = Schema(name=schema['type'])
action = mdef.get('x-kubernetes-action', method)
if action != 'connect': # TODO: add support for connect
methods.append(action)
if resource is None:
resource = mdef.get("x-kubernetes-group-version-kind")
tags.update(set(mdef.get('tags', [])))
if "parameters" in mdef:
for parameter in mdef["parameters"]:
if parameter["name"] == "watch":
methods.append("watch")
break
else:
for parameter in mdef:
if parameter["name"] == "watch":
methods.append("watch")
if resource:
resource = Resource(**resource)
else:
print(path)
if methods: # at least one method
yield SpecPath(
path=path,
group_key=key,
resource=resource,
methods=methods,
module=to_snake_case(tags.pop()),
model_schema=model_schema,
namespaced=namespaced,
sub_action=sub_action
) | 36,401 |
def greedy_decode(input_sentence, model, next_symbol=next_symbol, tokenize=tokenize, detokenize=detokenize):
"""Greedy decode function.
Args:
input_sentence (string): a sentence or article.
model (trax.layers.combinators.Serial): Transformer model.
Returns:
string: summary of the input.
"""
### START CODE HERE (REPLACE INSTANCES OF 'None' WITH YOUR CODE) ###
# Use tokenize()
cur_output_tokens = tokenize(input_sentence) + [0]
generated_output = []
cur_output = 0
EOS = 1
while cur_output != EOS:
# Get next symbol
cur_output = next_symbol(cur_output_tokens, model)
# Append next symbol to original sentence
cur_output_tokens.append(cur_output)
# Append next symbol to generated sentence
generated_output.append(cur_output)
print(detokenize(generated_output))
### END CODE HERE ###
return detokenize(generated_output) | 36,402 |
def get_browser_errors(driver):
"""
Checks browser for errors, returns a list of errors
:param driver:
:return:
"""
try:
browserlogs = driver.get_log('browser')
except (ValueError, WebDriverException) as e:
# Some browsers does not support getting logs
print(f"Could not get browser logs for driver {driver} due to exception: {e}")
return []
return [entry for entry in browserlogs if entry['level'] == 'SEVERE'] | 36,403 |
def get_user(request, user_id):
"""
Endpoint for profile given a user id.
:param request: session request.
:param user_id: id of user.
:return: 200 - user profile.
401 - login required.
404 - user not found.
"""
try:
get_user = User.objects.get(id=user_id)
except:
return JsonResponse(
"Not Found - User does not exist.", status=404, safe=False
)
# Check for share code.
valid_sc = False
if get_user.share_code:
if request.GET.get("sharecode") == get_user.share_code:
valid_sc = True
if not valid_sc:
try:
verify_user_login(request)
except PermissionDenied:
return JsonResponse(
"Unauthorized - Login required.", status=401, safe=False
)
response = get_user.serialize()
response["graphs"] = get_graphs(get_user)
return JsonResponse(response, status=200) | 36,404 |
def get_file_ext(url):
""" Returns extension of filename of the url or path """
return get_filename(url).split('.')[-1] | 36,405 |
def exists(path):
"""Return True if path exists (value can be ''), False otherwise"""
raise NotImplementedError() | 36,406 |
def weights_save(model, filename, config):
"""Saving the weights of a Keras model instance.
Args:
model (Keras model instance): A Keras model instance.
filename (str): Filename for the weights.
config (Bunch object): The JSON configuration Bunch object.
Returns:
A `.h5` file that holds weights.
"""
out = os.path.join(config.checkpoint_dir, filename)
model.save_weights(filepath=out)
logging.info("The weights have been saved.") | 36,407 |
def check_records(msg: dict) -> int:
"""
Returns the number of records
sent in the SQS message
"""
records = 0
if msg is not None:
records = len(msg[0])
if records != 1:
raise ValueError("Not expected single record")
return records | 36,408 |
def CalculateVariables(default_variables, params):
"""Calculate additional variables for use in the build (called by gyp)."""
flavor = gyp.common.GetFlavor(params)
if flavor == 'mac':
default_variables.setdefault('OS', 'mac')
default_variables.setdefault('SHARED_LIB_SUFFIX', '.dylib')
default_variables.setdefault('SHARED_LIB_DIR',
generator_default_variables['PRODUCT_DIR'])
default_variables.setdefault('LIB_DIR',
generator_default_variables['PRODUCT_DIR'])
# Copy additional generator configuration data from Xcode, which is shared
# by the Mac Make generator.
import gyp.generator.xcode as xcode_generator
global generator_additional_non_configuration_keys
generator_additional_non_configuration_keys = getattr(xcode_generator,
'generator_additional_non_configuration_keys', [])
global generator_additional_path_sections
generator_additional_path_sections = getattr(xcode_generator,
'generator_additional_path_sections', [])
global generator_extra_sources_for_rules
generator_extra_sources_for_rules = getattr(xcode_generator,
'generator_extra_sources_for_rules', [])
COMPILABLE_EXTENSIONS.update({'.m': 'objc', '.mm' : 'objcxx'})
else:
operating_system = flavor
if flavor == 'android':
operating_system = 'linux' # Keep this legacy behavior for now.
default_variables.setdefault('OS', operating_system)
default_variables.setdefault('SHARED_LIB_SUFFIX', '.so')
default_variables.setdefault('SHARED_LIB_DIR','$(builddir)/lib.$(TOOLSET)')
default_variables.setdefault('LIB_DIR', '$(obj).$(TOOLSET)') | 36,409 |
def unload_modules(name):
"""Unload all modules where the name begins with the passed name.
"""
for key in list(sys.modules.keys()):
if key.startswith(name):
del sys.modules[key] | 36,410 |
def include(*args, **kwargs):
"""
Used for including Django project settings from multiple files.
Usage::
from split_settings.tools import optional, include
include(
'components/base.py',
'components/database.py',
optional('local_settings.py'),
scope=globals() # optional scope
)
Parameters:
*args: File paths (``glob`` - compatible wildcards can be used)
**kwargs: The context for the settings,
may contain ``scope=globals()`` or be empty
Raises:
IOError: if a required settings file is not found
"""
if 'scope' not in kwargs:
# we are getting globals() from previous frame
# globals - it is caller's globals()
scope = inspect.stack()[1][0].f_globals
else:
scope = kwargs.pop('scope')
scope.setdefault('__included_files__', [])
included_files = scope.get('__included_files__')
including_file = scope.get(
'__included_file__',
scope['__file__'].rstrip('c'),
)
conf_path = os.path.dirname(including_file)
for conf_file in args:
saved_included_file = scope.get('__included_file__')
pattern = os.path.join(conf_path, conf_file)
# find files per pattern, raise an error if not found (unless file is
# optional)
files_to_include = glob.glob(pattern)
if not files_to_include and not isinstance(conf_file, _Optional):
raise IOError('No such file: {}'.format(pattern))
for included_file in files_to_include:
included_file = os.path.abspath(included_file)
if included_file in included_files:
continue
included_files.append(included_file)
scope['__included_file__'] = included_file
with open(included_file, 'rb') as to_compile:
exec(compile(to_compile.read(), included_file, 'exec'), scope)
# add dummy modules to sys.modules to make runserver autoreload
# work with settings components
module_name = '_split_settings.{}'.format(
conf_file[:conf_file.rfind('.')].replace('/', '.'),
)
module = types.ModuleType(str(module_name))
module.__file__ = included_file
sys.modules[module_name] = module
if saved_included_file:
scope['__included_file__'] = saved_included_file
elif '__included_file__' in scope:
del scope['__included_file__'] | 36,411 |
def incoming(cred):
"""[Opens DB connection with PostgreSQL host='localhost']
Arguments:
cred {[str]} -- [Credentials]
Yields:
[obj] -- [DB connection with PostgreSQL host='localhost']
"""
try:
infile = psycopg2.connect(user='postgres', password=cred[1], host='localhost', port='5432', database='postgres')
except ConnectionError as incoming_error:
exception(incoming_error)
else:
yield infile
finally:
infile.close() | 36,412 |
def get_processes_from_tags(test):
"""Extract process slugs from tags."""
tags = getattr(test, 'tags', set())
slugs = set()
for tag_name in tags:
if not tag_name.startswith('{}.'.format(TAG_PROCESS)):
continue
slugs.add(tag_name[len(TAG_PROCESS) + 1:])
return slugs | 36,413 |
def get_best_score(version):
"""
Given an existing version, retrieves the alias and score of the best score obtained
:param version: version to be evaluated (str|unicode)
:return: alias, score (str, float)
"""
logger = logging.getLogger(__name__)
logger.info("Request best submission evaluation for version {0}".format(version))
from src.common_paths import get_submissions_version_path, get_project_path
filepath = os.path.join(get_submissions_version_path(version), "upload_history.jl")
if not os.path.exists(filepath):
return None, np.Inf
with open(filepath) as f:
upload_history = [json.loads(x) for x in f.read().strip().split("\n")]
best_submission = min(upload_history, key=lambda x:x["score"])
alias, score = best_submission["alias"], best_submission["score"]
logger.info("Best submission found: {0}, {1}, {2}".format(version, alias, score))
return alias, score | 36,414 |
def eight_ball():
""" Magic eight ball.
:return: A random answer.
:rtype: str
"""
answers = [
'It is certain', 'It is decidedly so', 'Not a fucking chance!', 'without a doubt', 'Yes definitely',
'I suppose so', 'Maybe', ' No fucking way!', 'Sure :D', 'hahahaha no you plank! :P ', 'Ohhh yes!',
'You may rely on it', 'As I see it, yes', 'Most likely', 'Outlook good', 'Yes', 'Signs point to yes',
'Try again', 'Ask again later', 'Better not tell you now as you may cry like a little girl',
'Cannot predict now', 'Fucking dead right!', 'Ohhhh most definitely',
'Concentrate and ask again', 'Don\'t count on it', 'My reply is no', 'My sources say no',
'Outlook not so good', 'Very doubtful', 'Possibly, but I think you need to chillout!'
]
return random.choice(answers) | 36,415 |
def head(line, n: int):
"""returns the first `n` lines"""
global counter
counter += 1
if counter > n:
raise cbox.Stop() # can also raise StopIteration()
return line | 36,416 |
def reconstruction(datafile, **kwargs):
"""
Reconstructs the image in datafile according to given parameters. The results: image.npy, support.npy, and
errors.npy are saved in 'saved_dir' defined in kwargs, or if not defined, in the directory of datafile.
Parameters
----------
datafile : str
filename of phasing data. Should be either .tif format or .npy
kwargs : keyword arguments, option listed below
save_dir # directory where results of reconstruction are saved as npy files. If not present, the
# reconstruction outcome will be save in the same directory where datafile is.
processing # the library used when running reconstruction. When the 'auto' option is selected the
# program will use the best performing library that is available, in the following order:
# cupy, numpy. The 'cp' option will utilize cupy, and 'np' will utilize numpy. Default is auto.
device # IDs of the target devices. If not defined, it will default to -1 for the OS to select device.
algorithm_sequence # Mandatory, example: "3* (20*ER + 180*HIO) + 20*ER"
# defines algorithm applied in each iteration during modulus projection and
# during modulus. The "*" character means repeat, and the "+" means add to the sequence.
# The sequence may contain single brackets defining a group that will be repeated by the
# preceding multiplier. The alphabetic entries: 'ER', 'ERpc', 'HIO', 'HIOpc' define algorithms
# used in this iteration. The entries will invoke functions as follows:
# 'ER' definition will invoke 'er' and 'modulus' functions
# 'ERpc' will invoke 'er' and 'pc_modulus'
# 'HIO' will invoke 'hio' and 'modulus'
# 'HIOpc' will invoke 'hio' and 'pc_modulus'.
# The pc_modulus is implementation of modulus with partial coherence correction.
# If defining ERpc or HIOpc the pcdi feature must be activated. If not activated,
# the phasing will use modulus function instead.
hio_beta # used in hio algorithm
twin_trigger # example: [2]. Defines at which iteration to cut half of the array(i.e. multiply by 0s),
twin_halves # defines which half of the array is zeroed out in x and y dimensions.
# If 0, the first half in that dimension is zeroed out, otherwise, the second half.
shrink_wrap_trigger # example: [1, 1]. Defines when to update support array using the parameters below.
shrink_wrap_type # supporting "GAUSS" only. Defines which algorithm to use for shrink wrap.
shrink_wrap_threshold # only point with relative intensity greater than the threshold are selected
shrink_wrap_gauss_sigma # used to calculate the Gaussian filter
initial_support_area # If the values are fractional, the support area will be calculated by multiplying
# by the data array dimensions. The support will be set to 1s to this dimensions centered.
phase_support_trigger # defines when to update support array using the parameters below by applaying phase
# constrain.
phm_phase_min # point with phase below this value will be removed from support area
phm_phase_max # point with phase over this value will be removed from support area
pc_interval # defines iteration interval to update coherence.
pc_type # partial coherence algorithm. 'LUCY' type is supported.
pc_LUCY_iterations # number of iterations used in Lucy algorithm
pc_LUCY_kernel # coherence kernel area.
resolution_trigger # defines when to apply low resolution filter using the parameters below.
lowpass_filter_sw_sigma_range # used when applying low resolution to replace support sigma.
# The sigmas are linespaced for low resolution iterations from first value
# to last. If only one number given, the last sigma will default to
# shrink_wrap_gauss_sigma.
lowpass_filter_range # used when applying low resolution data filter while iterating.
# The det values are linespaced for low resolution iterations from first value to last.
# The filter is gauss with sigma of linespaced det. If only one number given,
# the last det will default to 1.
average_trigger # defines when to apply averaging. Negative start means it is offset from the last iteration.
progress_trigger # defines when to print info on the console. The info includes current iteration and error.
example of the simplest kwargs parameters:
algorithm_sequence='1*(20*ER+180*HIO)+20*ER',
shrink_wrap_trigger = [1, 1],
twin_trigger = [2],
progress_trigger = [0, 20]
Returns
-------
nothing
"""
error_msg = ver.verify('config_rec', kwargs)
if len(error_msg) > 0:
print(error_msg)
return
if not os.path.isfile(datafile):
print('no file found', datafile)
return
if 'processing' in kwargs:
pkg = kwargs['processing']
else:
pkg = 'auto'
if pkg == 'auto':
try:
import cupy as cp
pkg = 'cp'
except:
pkg = 'np'
if pkg == 'cp':
devlib = importlib.import_module('cohere.lib.cplib').cplib
elif pkg == 'np':
print('np')
devlib = importlib.import_module('cohere.lib.nplib').nplib
else:
print('supporting cp and np processing')
return
set_lib(devlib, False)
worker = Rec(kwargs, datafile)
if 'device' in kwargs:
device = kwargs['device'][0]
else:
device = [-1]
if worker.init_dev(device[0]) < 0:
return
if 'continue_dir' in kwargs:
continue_dir = kwargs['continue_dir']
else:
continue_dir = None
worker.init(continue_dir)
ret_code = worker.iterate()
if 'save_dir' in kwargs:
save_dir = kwargs['save_dir']
else:
save_dir, filename = os.path.split(datafile)
if ret_code == 0:
worker.save_res(save_dir) | 36,417 |
def onehot(x, numclasses=None):
""" Convert integer encoding for class-labels (starting with 0 !)
to one-hot encoding.
If numclasses (the number of classes) is not provided, it is assumed
to be equal to the largest class index occuring in the labels-array + 1.
The output is an array who's shape is the shape of the input array plus
an extra dimension, containing the 'one-hot'-encoded labels.
"""
if x.shape == ():
x = x[np.newaxis]
if numclasses is None:
numclasses = x.max() + 1
result = np.zeros(list(x.shape) + [numclasses])
z = np.zeros(x.shape)
for c in range(numclasses):
z *= 0
z[np.where(x == c)] = 1
result[..., c] += z
return result | 36,418 |
def draw_graph(image, graph):
"""
Draw the graph on the image by traversing the graph structure.
Args:
| *image* : the image where the graph needs to be drawn
| *graph* : the *.txt file containing the graph information
Returns:
"""
tmp = draw_edges(image, graph)
node_size = int(numpy.ceil((max(image.shape) / float(NODESIZESCALING))))
return draw_nodes(tmp, graph, max(node_size, 1)) | 36,419 |
def valid_octet (oct):
""" Validates a single IP address octet.
Args:
oct (int): The octet to validate
Returns:
bool: True if the octet is valid, otherwise false
"""
return oct >= 0 and oct <= 255 | 36,420 |
def main():
"""
Execute the program from the command line
"""
main_help = ("Main program for motif finding. Provide a query file and "
"optional motif file(s) with the -m argument")
parser = argparse.ArgumentParser(prog='motiffinder', description=main_help)
parser.add_argument('queries', help="File containing settings and queries")
motifs_help = ("File containing proteins used to generate motifs."
"Can specify multiple")
parser.add_argument('-m', '--motifs', action='append', help=motifs_help)
parser.add_argument('-v', '--version', action='version',
version='%(prog)s ' + __version__)
args = parser.parse_args()
mf_main(args.queries, args.motifs) | 36,421 |
def load_clean_dictionaries():
"""
is loading the combilex data into two dictionaries
word2phone and phone2word
:return: g2p_dict, p2g_dict
"""
grapheme_dict = {}
phonetic_dict = {}
with open(COMBILEX_PATH, encoding='utf-8') as combilex_file:
for line in combilex_file:
# Skip commented lines
if line[0:3] == ';;;':
continue
word, phone = line.strip().split('\t')
if not should_skip_seq(word):
if word not in grapheme_dict:
grapheme_dict[word] = []
grapheme_dict[word].append(phone)
if not should_skip_seq(phone):
if phone not in phonetic_dict:
phonetic_dict[phone] = []
phonetic_dict[phone].append(word)
return grapheme_dict, phonetic_dict | 36,422 |
def build_missing_wheels(
packages_and_envts,
build_remotely=False,
with_deps=False,
dest_dir=THIRDPARTY_DIR,
):
"""
Build all wheels in a list of tuple (Package, Environment) and save in
`dest_dir`. Return a list of tuple (Package, Environment), and a list of
built wheel filenames.
"""
not_built = []
built_filenames = []
packages_and_envts = itertools.groupby(
sorted(packages_and_envts), key=operator.itemgetter(0))
for package, pkg_envts in packages_and_envts:
envts = [envt for _pkg, envt in pkg_envts]
python_versions = sorted(set(e.python_version for e in envts))
operating_systems = sorted(set(e.operating_system for e in envts))
built = None
try:
built = build_wheels(
requirements_specifier=package.specifier,
with_deps=with_deps,
build_remotely=build_remotely,
python_versions=python_versions,
operating_systems=operating_systems,
verbose=False,
dest_dir=dest_dir,
)
print('.')
except Exception as e:
import traceback
print('#############################################################')
print('############# WHEEL BUILD FAILED ######################')
traceback.print_exc()
print()
print('#############################################################')
if not built:
for envt in pkg_envts:
not_built.append((package, envt))
else:
for bfn in built:
print(f' --> Built wheel: {bfn}')
built_filenames.append(bfn)
return not_built, built_filenames | 36,423 |
def load_reads(path, format='bed', paired=False, shift=100, name=None):
"""Read reads from file.
Parameters
----------
path : str
Path to load the reads.
format : str, optional
File format, default='bed'.
paired : bool, optional
Whether the reads are paired-end or not, default=False.
shift : int, optional
Shift size for single-end reads, default=100.
name : str, optional
Sample name. If not specified, the basename of the file will be used.
Returns
-------
reads : `Reads`
Loaded sequencing reads.
"""
logger.info(f"Loading reads from {path} [{format}]")
if format == 'bed' and paired:
raise FormatModeConflictError('bed', 'paired-end')
if format == 'bedpe' and not paired:
raise FormatModeConflictError('bedpe', 'single-end')
if name is None:
name = os.path.splitext(os.path.basename(path))[0]
reads = Reads(name=name)
parser = get_read_parser(format)(path)
for chrom, pos in parser.parse(paired=paired, shift=shift):
reads.add(chrom, pos)
reads.sort()
logger.info(f"Loaded {reads.size:,} reads")
return reads | 36,424 |
def sketch_blocks(modulepaths, pkg_dirs):
"""Creates a graph of all the modules in `modulepaths` that are related to each other by their
imports. The directories used to resolve an import is `pkg_dirs`
Args:
modulepaths (List[str]): list of modules filepaths to analyze.
pkg_dirs (List[str]): list of directories used to resolve the imports
Returns:
networkx.Graph: graph of the modules as nodes with their imports as edges.
"""
attributes = init(pkg_dirs)
graph = attributes['graph']
Python = 'python'
graph.add_node(Python, attributes[Python])
for filepath in modulepaths:
# bug - if the finder is not reinitialized, the previous modules.values()
# are kept, thus been useless
finder = modulefinder.ModuleFinder(path=pkg_dirs)
print('processing:\t', filepath)
# Calculate complexity and maintainability indexes
with open(filepath) as source:
size, color = scientist.get_size_color(source.read(), initsize=80)
# Insert current module info
module_info = {'shape':'square', 'name':filepath, 'size':size, 'color':color}
graph.add_node(filepath, module_info)
# Find module imports
finder.run_script(filepath)
for edge in scientist.compute_edges(filepath, Python, finder.modules.values(),
finder.badmodules.keys()):
graph.add_edge(*edge)
return graph | 36,425 |
def str_to_bool(string):
"""
Parses string into boolean
"""
string = string.lower()
return True if string == "true" or string == "yes" else False | 36,426 |
def rerotateExtremaPoints(minSepPoints_x, minSepPoints_y, maxSepPoints_x, maxSepPoints_y,\
lminSepPoints_x, lminSepPoints_y, lmaxSepPoints_x, lmaxSepPoints_y,\
Phi, Op, yrealAllRealInds):
""" Rotate the extrema points from (the projected ellipse centered at the origin
and x-axis aligned with semi-major axis) to the original projected ellipse
Args:
minSepPoints_x (numpy array):
the first quadrant x-coordinates of the minimum separations (with length n)
minSepPoints_y (numpy array):
the first quadrant y-coordinates of the minimum separations (with length n)
maxSepPoints_x (numpy array):
the first quadrant x-coordinates of the maximum separations (with length n)
maxSepPoints_y (numpy array):
the first quadrant y-coordinates of the maximum separations (with length n)
lminSepPoints_x (numpy array):
the first quadrant x-coordinates of the local minimum separations (with same length as yrealImagInds)
lminSepPoints_y (numpy array):
the first quadrant y-coordinates of the local minimum separations (with same length as yrealImagInds)
lmaxSepPoints_x (numpy array):
the first quadrant x-coordinates of the local maximum separations (with same length as yrealImagInds)
lmaxSepPoints_y (numpy array):
the first quadrant y-coordinates of the local maximum separations (with same length as yrealImagInds)
phi (numpy array):
angle from X-axis to semi-minor axis of projected ellipse
Op (numpy array):
the geometric center of the projected ellipse
yrealAllRealInds (numpy array):
an array of integers acting as indicies of planets which have min, max, local min, local max
Returns:
minSepPoints_x_dr (numpy array):
derotated minSepPoints_x
minSepPoints_y_dr (numpy array):
derotated minSepPoints_y
maxSepPoints_x_dr (numpy array):
derotated maxSepPoints_x
maxSepPoints_y_dr (numpy array):
derotated maxSepPoints_y
lminSepPoints_x_dr (numpy array):
derotated lminSepPoints_x
lminSepPoints_y_dr (numpy array):
derotated lminSepPoints_y
lmaxSepPoints_x_dr (numpy array):
derotated lmaxSepPoints_x
lmaxSepPoints_y_dr (numpy array):
derotated lmaxSepPoints_y
"""
minSepPoints_x_dr = np.zeros(len(minSepPoints_x))
minSepPoints_y_dr = np.zeros(len(minSepPoints_y))
maxSepPoints_x_dr = np.zeros(len(maxSepPoints_x))
maxSepPoints_y_dr = np.zeros(len(maxSepPoints_y))
lminSepPoints_x_dr = np.zeros(len(lminSepPoints_x))
lminSepPoints_y_dr = np.zeros(len(lminSepPoints_y))
lmaxSepPoints_x_dr = np.zeros(len(lmaxSepPoints_x))
lmaxSepPoints_y_dr = np.zeros(len(lmaxSepPoints_y))
minSepPoints_x_dr, minSepPoints_y_dr = rerotateEllipsePoints(minSepPoints_x, minSepPoints_y,Phi,Op[0],Op[1])
maxSepPoints_x_dr, maxSepPoints_y_dr = rerotateEllipsePoints(maxSepPoints_x, maxSepPoints_y,Phi,Op[0],Op[1])
lminSepPoints_x_dr, lminSepPoints_y_dr = rerotateEllipsePoints(lminSepPoints_x, lminSepPoints_y,Phi[yrealAllRealInds],Op[0][yrealAllRealInds],Op[1][yrealAllRealInds])
lmaxSepPoints_x_dr, lmaxSepPoints_y_dr = rerotateEllipsePoints(lmaxSepPoints_x, lmaxSepPoints_y,Phi[yrealAllRealInds],Op[0][yrealAllRealInds],Op[1][yrealAllRealInds])
return minSepPoints_x_dr, minSepPoints_y_dr, maxSepPoints_x_dr, maxSepPoints_y_dr,\
lminSepPoints_x_dr, lminSepPoints_y_dr, lmaxSepPoints_x_dr, lmaxSepPoints_y_dr | 36,427 |
def test_approve_transfer(custom_token: Contract, get_accounts: Callable) -> None:
""" Use the approve() function of the custom token contract """
(A, B) = get_accounts(2)
token = custom_token
call_and_transact(token.functions.mint(50), {"from": A})
initial_balance_A = token.functions.balanceOf(A).call()
initial_balance_B = token.functions.balanceOf(B).call()
to_transfer = 20
call_and_transact(token.functions.approve(B, to_transfer), {"from": A})
call_and_transact(token.functions.transferFrom(A, B, to_transfer), {"from": B})
assert token.functions.balanceOf(B).call() == initial_balance_B + to_transfer
assert token.functions.balanceOf(A).call() == initial_balance_A - to_transfer
assert custom_token.functions.allowance(_owner=A, _spender=B).call() == 0
assert call_and_transact(custom_token.functions.approve(_spender=B, _value=25), {"from": A})
assert custom_token.functions.allowance(_owner=A, _spender=B).call() == 25
assert custom_token.functions.allowance(_owner=A, _spender=token.address).call() == 0 | 36,428 |
def does_name_exist(name):
""" check if a file with that name already exists """
return len(glob.glob('./photos/'+name+'.*')) > 0 | 36,429 |
def cont_hires(npoints, elecs, start_timestamp=0):
"""
Retrieve hires data (sampled at 2 kHz).
Parameters and outputs are the same as the `cont_raw` function.
Args:
npoints: number of datapoints to retrieve
elecs: list of electrodes to sample
start_timestamp: NIP timestamp to start data at, or most recent if 0
Returns:
"""
return _cont_base(_c.xl_cont_hires, npoints, elecs, start_timestamp) | 36,430 |
def batch_grid_subsampling_kpconv_gpu(points, batches_len, features=None, labels=None, sampleDl=0.1, max_p=0):
"""
Same as batch_grid_subsampling, but implemented in GPU. This is a hack by using Minkowski
engine's sparse quantization functions
Note: This function is not deterministic and may return subsampled points
in a different ordering, which will cause the subsequent steps to differ slightly.
"""
if labels is not None or features is not None:
raise NotImplementedError('subsampling not implemented for features and labels')
if max_p != 0:
raise NotImplementedError('subsampling only implemented by considering all points')
B = len(batches_len)
batch_start_end = torch.nn.functional.pad(torch.cumsum(batches_len, 0), (1, 0))
device = points[0].device
coord_batched = ME.utils.batched_coordinates(
[points[batch_start_end[b]:batch_start_end[b + 1]] / sampleDl for b in range(B)], device=device)
sparse_tensor = ME.SparseTensor(
features=points,
coordinates=coord_batched,
quantization_mode=ME.SparseTensorQuantizationMode.UNWEIGHTED_AVERAGE
)
s_points = sparse_tensor.features
s_len = torch.tensor([f.shape[0] for f in sparse_tensor.decomposed_features], device=device)
return s_points, s_len | 36,431 |
def eval_wili(result_file: str, config: str) -> None:
"""
CLI function evaluating the classifier on WiLI.
Parameters
----------
result_file : str
Path to a file where the results will be stored
config : str
Path to a configuration file for the classifier
"""
globals()["config"] = lidtk.utils.load_cfg(config)
init_nn(globals()["config"])
lidtk.classifiers.eval_wili(result_file, predict) | 36,432 |
def myPrint(fk, gk_norm, orcl, iters, tmk,
alphak=0, iterLS=0, iterSolver=0, rel_res=0):
"""
A print function for every iteration.
"""
if iters%(num_every_print*10) == 0:
prthead1 = ' iters iterSolver iterLS Time f ||g||'
prthead2 = ' alphak Prop Relres'
prt = prthead1 + prthead2
print(prt)
prt1 = '%8g %8g' % (iters, iterSolver)
prt2 = '%8s %8.2f' % (iterLS, tmk)
prt3 = ' %8.2e %8.2e ' % (fk, gk_norm)
prt4 = ' %8.2e %8g ' % (alphak, orcl)
prt5 = ' %8.2e ' % (rel_res)
print(prt1, prt2, prt3, prt4, prt5) | 36,433 |
def hot_word_detection(lang='en'):
"""
Hot word (wake word / background listen) detection
What is Hot word detection?
ANSWER: Hot word listens for specific key words chosen to activate the “OK Google” voice interface. ...
Voice interfaces use speech recognition technologies to allow user input through spoken commands.
You can set your custom HOT WORD just by calling setup(). Your bot_name is your Hot word
:param lang: str
default 'en'
:return: Bool, str
status, command
"""
try:
config = configparser.ConfigParser()
config.read('config/config.ini')
bot_name = config['default']['bot_name']
except Exception as e:
raise DefaultFileNotFound
try:
r = sr.Recognizer()
with sr.Microphone() as source:
print("Background listening")
r.pause_threshold = 1
r.adjust_for_ambient_noise(source, duration=1)
audio = r.listen(source)
command = r.recognize_google(audio, language=lang).lower()
if re.search(bot_name, command):
print("Waking up...")
return True, command
else:
return False, False
except Exception:
return False, None | 36,434 |
def parse_ph5_length(length):
"""
Method for parsing length argument.
:param length: length
:type: str, numeric, or None
:returns: length value as a float
:type: float or None
"""
err_msg = "Invalid length value. %s" % (length)
return str_to_pos_float(length, err_msg) | 36,435 |
def _generate(payload: ModelSpec, is_udf: bool = True):
"""Construct a UDF to run pytorch model.
Parameters
----------
payload : ModelSpec
the model specifications object
Returns
-------
A Spark Pandas UDF.
"""
model = payload.model_type
default_device = "gpu" if torch.cuda.is_available() else "cpu"
options = payload.options
use_gpu = options.get("device", default_device) == "gpu"
num_workers = int(
options.get("num_workers", min(os.cpu_count(), DEFAULT_NUM_WORKERS))
)
batch_size = int(options.get("batch_size", DEFAULT_BATCH_SIZE))
return_type = Iterator[pd.Series]
def torch_inference_udf(
iter: Iterator[pd.DataFrame],
) -> return_type:
device = torch.device("cuda" if use_gpu else "cpu")
model.load_model(payload, device=device)
if isinstance(model, AnonymousModelType):
# We will remove them after AnonymousModelType deprecation
model.model.eval()
model.model.to(device)
try:
with torch.no_grad():
for series in iter:
dataset = PandasDataset(
series,
transform=model.transform(),
unpickle=is_udf,
use_pil=True,
)
results = []
for batch in DataLoader(
dataset,
batch_size=batch_size,
num_workers=num_workers,
):
batch = move_tensor_to_device(batch, device)
predictions = model(batch)
bin_predictions = [
_pickler.dumps(p) if is_udf else p
for p in predictions
]
results.extend(bin_predictions)
yield pd.Series(results)
finally:
if use_gpu:
model.release()
if is_udf:
return pandas_udf(torch_inference_udf, returnType=BinaryType())
else:
return torch_inference_udf | 36,436 |
def test_receipt_revalidate_without_receipt_number():
"""Test revalidation process of an invalid receipt. (Receipt without number)"""
factories.PointOfSalesFactory()
receipt = factories.ReceiptWithVatAndTaxFactory()
receipt.refresh_from_db()
validation = receipt.revalidate()
assert validation is None | 36,437 |
def check_proper_torsion(
torsion: Tuple[int, int, int, int], molecule: "Ligand"
) -> bool:
"""
Check that the given torsion is valid for the molecule graph.
"""
for i in range(3):
try:
_ = molecule.get_bond_between(
atom1_index=torsion[i], atom2_index=torsion[i + 1]
)
except TopologyMismatch:
return False
return True | 36,438 |
def find_all_occurrences_and_indexes(seq):
"""
seq: array-like of pretty_midi Note
Finds all patterns and indexes of those patterns.
"""
list_patterns = list()
list_indexes = list()
res = list()
seq_x = seq
while res!=None:
seq_x, res, indexes = find_occurrences_and_indexes(seq_x)
if res!=None:
list_patterns.append(res)
list_indexes.append(indexes)
for i in range(len(seq_x)):
# special case for non recurring patterns: notes that appear only once
if seq_x[i]!=None:
list_patterns.append([seq_x[i]])
list_indexes.append([i])
return list_patterns,list_indexes | 36,439 |
def get_drn(blocks,
simplified=False,
model_name=None,
pretrained=False,
root=os.path.join('~', '.chainer', 'models'),
**kwargs):
"""
Create DRN-C or DRN-D model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
simplified : bool, default False
Whether to use simplified scheme (D architecture).
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
if blocks == 22:
assert simplified
layers = [1, 1, 2, 2, 2, 2, 1, 1]
elif blocks == 26:
layers = [1, 1, 2, 2, 2, 2, 1, 1]
elif blocks == 38:
assert simplified
layers = [1, 1, 3, 4, 6, 3, 1, 1]
elif blocks == 42:
layers = [1, 1, 3, 4, 6, 3, 1, 1]
elif blocks == 54:
assert simplified
layers = [1, 1, 3, 4, 6, 3, 1, 1]
elif blocks == 58:
layers = [1, 1, 3, 4, 6, 3, 1, 1]
elif blocks == 105:
assert simplified
layers = [1, 1, 3, 4, 23, 3, 1, 1]
else:
raise ValueError("Unsupported DRN with number of blocks: {}".format(blocks))
if blocks < 50:
channels_per_layers = [16, 32, 64, 128, 256, 512, 512, 512]
bottlenecks_per_layers = [0, 0, 0, 0, 0, 0, 0, 0]
else:
channels_per_layers = [16, 32, 256, 512, 1024, 2048, 512, 512]
bottlenecks_per_layers = [0, 0, 1, 1, 1, 1, 0, 0]
if simplified:
simplifieds_per_layers = [1, 1, 0, 0, 0, 0, 1, 1]
residuals_per_layers = [0, 0, 1, 1, 1, 1, 0, 0]
else:
simplifieds_per_layers = [0, 0, 0, 0, 0, 0, 0, 0]
residuals_per_layers = [1, 1, 1, 1, 1, 1, 0, 0]
dilations_per_layers = [1, 1, 1, 1, 2, 4, 2, 1]
downsample = [0, 1, 1, 1, 0, 0, 0, 0]
def expand(property_per_layers):
from functools import reduce
return reduce(
lambda x, y: x + [[y[0]] * y[1]] if y[2] != 0 else x[:-1] + [x[-1] + [y[0]] * y[1]],
zip(property_per_layers, layers, downsample),
[[]])
channels = expand(channels_per_layers)
dilations = expand(dilations_per_layers)
bottlenecks = expand(bottlenecks_per_layers)
residuals = expand(residuals_per_layers)
simplifieds = expand(simplifieds_per_layers)
init_block_channels = channels_per_layers[0]
net = DRN(
channels=channels,
init_block_channels=init_block_channels,
dilations=dilations,
bottlenecks=bottlenecks,
simplifieds=simplifieds,
residuals=residuals,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
load_npz(
file=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
obj=net)
return net | 36,440 |
def get_uas_volume_admin(volume_id):
"""Get volume info for volume ID
Get volume info for volume_id
:param volume_id:
:type volume_id: str
:rtype: AdminVolume
"""
if not volume_id:
return "Must provide volume_id to get."
return UasManager().get_volume(volume_id=volume_id) | 36,441 |
def draw(canvas):
"""
Draw a ship image several times.
:param canvas: simpleguics2pygame.Canvas or simplegui.Canvas
"""
canvas.draw_line((0, 0), (WIDTH - 1, HEIGHT - 1), 1, 'Blue')
canvas.draw_line((0, HEIGHT - 1), (WIDTH, 0), 1, 'Blue')
img = loader.get_image('double_ship')
# The complete image with ship twice
canvas.draw_image(img,
(img.get_width()/2, img.get_height()/2),
(img.get_width(), img.get_height()),
(img.get_width()/2, img.get_height()/2),
(img.get_width(), img.get_height()))
# The ship without thrust
canvas.draw_image(img,
(img.get_width()/4, img.get_height()/2),
(img.get_width()/2, img.get_height()),
(img.get_width()/4, img.get_height()*3/2),
(img.get_width()/2, img.get_height()))
# The ship with thrust
canvas.draw_image(img,
(img.get_width()*3/4, img.get_height()/2),
(img.get_width()/2, img.get_height()),
(img.get_width()*3/4, img.get_height()*3/2),
(img.get_width()/2, img.get_height()))
# The rotated ship without thrust
canvas.draw_image(img,
(img.get_width()/4, img.get_height()/2),
(img.get_width()/2, img.get_height()),
(img.get_width()/4, img.get_height()*5/2),
(img.get_width()/2, img.get_height()),
-math.pi/2)
# The rotated ship with thrust
canvas.draw_image(img,
(img.get_width()*3/4, img.get_height()/2),
(img.get_width()/2, img.get_height()),
(img.get_width()*3/4, img.get_height()*5/2),
(img.get_width()/2, img.get_height()),
-math.pi/2)
# The big ship with thrust
canvas.draw_image(img,
(img.get_width()*3/4, img.get_height()/2),
(img.get_width()/2, img.get_height()),
(img.get_width()*6/4, img.get_height()*3/2),
(img.get_width(), img.get_height()*4))
# The little ship with thrust
canvas.draw_image(img,
(img.get_width()*3/4, img.get_height()/2),
(img.get_width()/2, img.get_height()),
(img.get_width()*15/8, img.get_height()/2),
(img.get_width()/4, img.get_height()))
canvas.draw_image(img,
(img.get_width()*3/4, img.get_height()/2),
(img.get_width()/2, img.get_height()),
(img.get_width()*7/4, img.get_height()*11/4),
(img.get_width()/2, img.get_height()/2))
canvas.draw_image(logo,
(32, 32), (64, 64),
(WIDTH/2, HEIGHT/2), (64, 64))
# Update and draw FPS (if started)
fps.draw_fct(canvas) | 36,442 |
def cyclic_learning_rate(global_step,
learning_rate=0.01,
max_lr=0.1,
step_size=50000.,
gamma=0.99994,
max_steps=100000.,
scale_rate=0.9,
mode='triangular',
policy=None,
name=None):
"""Cyclic learning rate (CLR).
This method is revised from [TensorFlow pull request: Add support for Cyclic Learning Rate](https://github.com/tensorflow/tensorflow/pull/20758)
From the paper:
Smith, Leslie N. "Cyclical learning
rates for training neural networks." 2017.
[https://arxiv.org/pdf/1506.01186.pdf]
This method lets the learning rate cyclically
vary between reasonable boundary values
achieving improved classification accuracy and
often in fewer iterations.
This code varies the learning rate linearly between the
minimum (learning_rate) and the maximum (max_lr).
It returns the cyclic learning rate. It is computed as:
```python
cycle = floor( 1 + global_step / ( 2 * step_size ) )
x = abs( global_step / step_size – 2 * cycle + 1 )
clr = learning_rate + ( max_lr – learning_rate ) * max( 0 , 1 - x )
```
Modes:
'triangular':
Default, linearly increasing then linearly decreasing the
learning rate at each cycle.
'triangular2':
The same as the triangular policy except the learning
rate difference is cut in half at the end of each cycle.
This means the learning rate difference drops after each cycle.
'exp_range':
The learning rate varies between the minimum and maximum
boundaries and each boundary value declines by an exponential
factor of: gamma^global_step.
Args:
global_step: A scalar `int32` or `int64` `Tensor` or a Python number.
Global step to use for the cyclic computation. Must not be negative.
learning_rate: A scalar `float32` or `float64` `Tensor` or a
Python number. The initial learning rate which is the lower bound
of the cycle (default = 0.1).
max_lr: A scalar. The maximum learning rate boundary.
step_size: A scalar. The number of iterations in half a cycle.
The paper suggests step_size = 2-8 x training iterations in epoch.
gamma: constant in 'exp_range' mode:
gamma**(global_step)
max_steps: A scalar. The number of total iterations.
scale_rate: A scale factor for decreasing the learning rate after the completion of one cycle.
Must be between 0 and 1.
mode: one of {triangular, triangular2, exp_range}.
Default 'triangular'.
Values correspond to policies detailed above.
policy: one of {None, one-cycle}.
Default 'None'.
name: String. Optional name of the operation. Defaults to
'CyclicLearningRate'.
Returns:
A scalar `Tensor` of the same type as `learning_rate`. The cyclic
learning rate.
Raises:
ValueError: if `global_step` is not supplied.
"""
if global_step is None:
raise ValueError("global_step is required for cyclic_learning_rate.")
with ops.name_scope(name, "CyclicLearningRate",
[learning_rate, global_step]) as name:
learning_rate = ops.convert_to_tensor(learning_rate, name="learning_rate")
dtype = learning_rate.dtype
global_step = math_ops.cast(global_step, dtype)
step_size = math_ops.cast(step_size, dtype)
max_steps = math_ops.cast(max_steps, dtype)
def cyclic_lr():
"""Helper to recompute learning rate; most helpful in eager-mode."""
# computing: cycle = floor( 1 + global_step / ( 2 * step_size ) )
double_step = math_ops.multiply(2., step_size)
global_div_double_step = math_ops.divide(global_step, double_step)
cycle = math_ops.floor(math_ops.add(1., global_div_double_step))
# computing: x = abs( global_step / step_size – 2 * cycle + 1 )
double_cycle = math_ops.multiply(2., cycle)
global_div_step = math_ops.divide(global_step, step_size)
tmp = math_ops.subtract(global_div_step, double_cycle)
x = math_ops.abs(math_ops.add(1., tmp))
# computing: clr = learning_rate + ( max_lr – learning_rate ) * max( 0, 1 - x )
a1 = math_ops.maximum(0., math_ops.subtract(1., x))
a2 = math_ops.subtract(max_lr, learning_rate)
clr = math_ops.multiply(a1, a2)
if mode == 'triangular2':
clr = math_ops.divide(clr, math_ops.cast(math_ops.pow(2, math_ops.cast(
cycle-1, tf.int32)), tf.float32))
if mode == 'exp_range':
clr = math_ops.multiply(math_ops.pow(gamma, global_step), clr)
return math_ops.add(clr, learning_rate, name=name)
def after_cycle():
gap = math_ops.subtract(global_step, math_ops.multiply(2., step_size))
cur_percent = math_ops.divide(gap, math_ops.subtract(max_steps, math_ops.multiply(2., step_size)))
temp = math_ops.add(1., math_ops.multiply(cur_percent, -0.99))
next_lr = math_ops.multiply(learning_rate, math_ops.multiply(temp, scale_rate))
return next_lr
if policy == 'one_cycle':
cyclic_lr = tf.cond(tf.less(global_step, 2*step_size), cyclic_lr , after_cycle)
else:
cyclic_lr = cyclic_lr()
return cyclic_lr | 36,443 |
def getElementsOnFirstLevelExceptTag(parent, element):
"""Return all elements below *parent* except for the ones tagged *element*.
:param parent: the parent dom object
:param elemnt: the tag-name of elements **not** to return
"""
elements = []
children = getElements(parent)
for c in children:
if c.parentNode == parent and c.tagName.lower() != element.lower():
elements.append(c)
return elements | 36,444 |
def run_and_schedule(characters, notifications_job):
"""
Runs a job immediately to avoid having to wait for the delay to end,
and schedules the job to be run continuously.
"""
notifications_job()
schedule.every(notification_caching_timer/len(characters)).minutes.do(notifications_job) | 36,445 |
def show(ctx: Context, output: str, maphandlers: list[str], query: list[str]):
"""Show matching rengu objects"""
out = output_handler(output)
store = storage_handler(ctx.obj["baseuri"])
q = store.query(query, with_data=True)
for x in q:
if output.startswith("list"):
out(x)
else:
out(store.get(x))
if ctx.obj["verbose"]:
print(q) | 36,446 |
def get_first_child_element(node, tag_name):
"""Get the first child element node with a given tag name.
:param node: Parent node.
:type node: xml.dom.Node
:returns: the first child element node with the given tag name.
:rtype: xml.dom.Node
:raises NodeNotFoundError:
if no child node with the given tag name was found.
"""
for child in node.childNodes:
if child.nodeType == node.ELEMENT_NODE and \
child.tagName == tag_name:
return child
raise NodeNotFoundError('no child element node with tag %s was found' %
(tag_name)) | 36,447 |
def parse_iso(filename='iso.log'):
""" parse the isotropy output file
Args:
filename: the isotropy output file name
Returns:
lname: list of irreps
lpt: list of atom coordinate
lpv: list of distortion vectors, might be multi-dimensional
"""
#read in the isotropy output
try:
with open(filename,'r') as f:
read_data = f.read()
except BaseException:
print('the output of isotropy is required here')
return
#parse the isotropy output
#pt - atom coordinates (kind of weird definition, pt = original reduced coordinate * supercell matrix)
#pv - distortion vectors
#lpt, lpv - list of wy, pt, pv
#lname - name of modes
#nmode - number of modes
nmode = 0
lname = []
lpt = []
lpv = []
pattern_name = re.compile(r"^[A-Z0-9\+\-]+(?=\s)")
pattern_coor = re.compile(r"(?<=\().*?(?=\))")
pattern_vec = re.compile(r"(?<=\()[0-9,\.\-]*(?=\))")
for line in read_data.split('\n'):
if pattern_name.search(line):
if nmode>0:
lpt.append(pt)
lpv.append(pv)
pt = []
pv = []
nmode += 1
lname.append(pattern_name.search(line).group())
if nmode==0:
continue
if re.search(r"Irrep|Enter", line):
continue
find = pattern_coor.findall(line)
find2 = pattern_vec.findall(line)
if (len(find)!=len(find2)):
npv = 0
for element in find:
coor = list(map(float, element.split(',')))
if npv==0:
pt.append(coor)
if npv==1:
pv.append([coor])
if npv>1:
pv[-1].append(coor)
npv += 1
else:
for element in find:
coor = list(map(float, element.split(',')))
if npv==1:
pv.append([coor])
if npv>1:
pv[-1].append(coor)
npv += 1
lpt.append(pt)
lpv.append(pv)
return lname, lpt, lpv | 36,448 |
def revoke_database(cursor: Cursor, user: str, db: str) -> Result:
"""
Remove any permissions for the user to create, manage and delete this database.
"""
db = db.replace("%", "%%")
return Result(_truthy(query(cursor, _format("REVOKE ALL ON {}.* FROM %s@'%%'", db), user))) | 36,449 |
def write_benchmark_csv(file_out, benchmark_result, benchmark, test_config=TestConfig()):
""" Writes benchmark to file as csv """
writer = csv.writer(file_out)
writer.writerow(('Benchmark', benchmark_result.name))
writer.writerow(('Benchmark Group', benchmark_result.group))
writer.writerow(('Failures', benchmark_result.failures))
# Write result arrays
if benchmark_result.results:
writer.writerow(('Results', ''))
writer.writerows(metrics_to_tuples(benchmark_result.results))
if benchmark_result.aggregates:
writer.writerow(('Aggregates', ''))
writer.writerows(benchmark_result.aggregates) | 36,450 |
def write_image(folder, image):
""" Writes image to directory
Args:
param1: path to image folder
param2: image
"""
coil1_img = sitk.GetImageFromArray(image)
castFilter = sitk.CastImageFilter()
castFilter.SetOutputPixelType(sitk.sitkUInt16)
corrupted_img = castFilter.Execute(coil1_img)
sitk.WriteImage(corrupted_img, folder+"/corrupted_image.dcm") | 36,451 |
def get_and_parse(p_inqueue, p_outqueue, p_process, p_counters, p_log_queue, p_log_level, p_formatter, **kwargs):
"""
Gets doc from an input queue, applies transformation according to p_process function,
then pushes the so produced new doc into an output queue
p_process must take a "doc" as a first parameter
@param p_inqueue In queue containing docs to process
@param p_outqueue Out queue where processed docs are pushed
@param p_process function taking a doc as an input and returning a list of docs as a result
@param p_nb_items_processed Number of processed items
"""
logger = get_logger_mp(__name__, p_log_queue, p_log_level, p_formatter)
current = current_process()
start = time.time()
start_idle = None
while True:
try:
try:
in_doc = p_inqueue.get(False)
except Exception:
# Idle starts with the first exception (queue empty)
if not start_idle:
start_idle = time.time()
else:
if start_idle:
elapsed_idle = time.time() - start_idle
else:
elapsed_idle = 0
# Manage poison pill
if in_doc is None:
p_inqueue.task_done()
break
# Call the proc with the arg list (keeping the * means : unwrap the list when calling the function)
start_p_process = time.time()
out_doc = p_process(in_doc, **kwargs)
elapsed_p_process = time.time() - start_p_process
for doc in out_doc:
p_outqueue.put(doc)
p_inqueue.task_done()
with p_counters['nb_items_processed'].get_lock():
p_counters['nb_items_processed'].value += 1
now = time.time()
elapsed = now - start
p_counters['whole_process_time'].value += elapsed
p_counters['real_process_time'].value += elapsed_p_process
p_counters['idle_process_time'].value += elapsed_idle
nb_items = p_counters['nb_items_processed'].value
if p_counters['nb_items_processed'].value % p_counters['log_every'] == 0:
logger.info("Process : {0} items".format(nb_items))
logger.debug(" -> Avg process time : {0}ms".format(1000*p_counters['whole_process_time'].value / nb_items))
logger.debug(" -> Avg real time : {0}ms".format(1000*p_counters['real_process_time'].value / nb_items))
logger.debug(" -> Avg idle time : {0}ms".format(1000*p_counters['idle_process_time'].value / nb_items))
logger.debug("State of queues :")
logger.debug(" -> Read : {0}".format(p_inqueue.qsize()))
logger.debug(" -> Write : {0}".format(p_outqueue.qsize()))
# Start timers reinit
start = time.time()
start_idle = None
except TimeoutError:
logger.warn('Timeout exception while parsing with %s method', p_process)
with p_counters['nb_items_error'].get_lock():
p_counters['nb_items_error'].value += 1
except KeyboardInterrupt:
logger.info("user interruption")
sys.exit(0) | 36,452 |
def get_ex1():
"""Loads array A for example 1 and its TruncatedSVD with top 10 components
Uk, Sk, Vk = argmin || A - Uk*diag(Sk)*Vk||
Over;
Uk, Sk, Vk
Where;
Uk is a Orthonormal Matrix of size (20000, 10)
Sk is a 10 dimensional non-negative vector
Vk is a Orthonormal Matrix of size (10, 8000)
Returns
-------
A : numpy.ndarray
array of size (20000, 8000)
Uk : numpy.ndarray
orthonormal array of size (20000, 10)
Top 10 Left Singular Vectors of `A`
Sk : numpy.ndarray
array of size (10, )
Top 10 Singular Values of `A`
Vk : numpy.ndarray
transposed orthonormal array of size (10, 8000)
Top 10 Right Singular Vectors of `A`
"""
try:
Uk = load_np_file('ex1_Uk.npy')
Sk = load_np_file('ex1_Sk.npy')
Vk = load_np_file('ex1_Vk.npy')
ex1 = _make_a_ex1()
return ex1, Uk, Sk, Vk
except FileNotFoundError:
raise FileNotFoundError("A, Uk, Sk, Vk cannot be loaded. Try make_ex1()") | 36,453 |
async def to_code(config):
"""Code generation entry point"""
var = await tion_lt.setup_tion_lt(config)
await tion.setup_switch(config, CONF_RECIRCULATION, var.set_recirculation, var) | 36,454 |
def render_variable(context: 'Context', raw: Any):
"""
Render the raw input. Does recursion with dict and list inputs, otherwise renders
string.
:param raw: The value to be rendered.
:return: The rendered value as literal type.
"""
if raw is None:
return None
elif isinstance(raw, str):
render_string(context, raw)
elif isinstance(raw, dict):
return {
render_string(context, k): render_variable(context, v)
for k, v in raw.items()
}
elif isinstance(raw, list):
return [render_variable(context, v) for v in raw]
else:
return raw
return render_string(context, raw) | 36,455 |
def resolve_ssh_config(ssh_config_file: str) -> str:
"""
Resolve ssh configuration file from provided string
If provided string is empty (`""`) try to resolve system ssh config files located at
`~/.ssh/config` or `/etc/ssh/ssh_config`.
Args:
ssh_config_file: string representation of ssh config file to try to use
Returns:
str: string to path fro ssh config file or an empty string
Raises:
N/A
"""
if Path(ssh_config_file).is_file():
return str(Path(ssh_config_file))
if Path(os.path.expanduser("~/.ssh/config")).is_file():
return str(Path(os.path.expanduser("~/.ssh/config")))
if Path("/etc/ssh/ssh_config").is_file():
return str(Path("/etc/ssh/ssh_config"))
return "" | 36,456 |
def check_kafka_rest_ready(host, port, service_timeout):
"""Waits for Kafka REST Proxy to be ready.
Args:
host: Hostname where Kafka REST Proxy is hosted.
port: Kafka REST Proxy port.
timeout: Time in secs to wait for the service to be available.
Returns:
False, if the timeout expires and Kafka REST Proxy is unreachable, True otherwise.
"""
# Check if you can connect to the endpoint
status = wait_for_service(host, port, service_timeout)
if status:
# Check if service is responding as expected to basic request
# Try to get topic list
# NOTE: this will only test ZK <> REST Proxy interaction
url = "http://%s:%s/topics" % (host, port)
r = requests.get(url)
if r.status_code // 100 == 2:
return True
else:
print("Unexpected response with code: %s and content: %s" % (str(r.status_code), str(r.text)), file=sys.stderr)
return False
else:
print("%s cannot be reached on port %s." % (str(host), str(port)), file=sys.stderr)
return False | 36,457 |
def is_p2wpkh_output(cscript: CScript) -> bool:
"""Checks if the output script if of the form:
OP_0 <pubkey hash>
:param script: Script to be analyzed.
:type script: CScript
:return: True if the passed in bitcoin CScript is a p2wpkh output script.
:rtype: bool
"""
if len(cscript) != 22:
return False
return cscript[0] == script.OP_0 | 36,458 |
def check_update_needed(db_table_object, repository_name, pushed_at):
"""
Returns True if there is a need to clone the github repository
"""
logger.info(f"This is the repo name from check_update <<{repository_name}>> and db_table <<{db_table_object}>>")
result = get_single_repository(db_table_object, repository_name)
logger.info(result)
if not result:
logger.info("result not found")
return True
else:
logger.info("result found")
logger.info(f"This is the result {result}")
epoch = date_parse(pushed_at).timestamp() ##the pushed_at timetsamp available in the repo right now
logger.info(f"Comparing {int(epoch)} and {result['downloaded_at']} for {repository_name}")
if int(epoch) > int(result["downloaded_at"]):
return True
return False
##Check if the updated is needed from the database | 36,459 |
def raise_(error: Type[ErrorImitation], name, text):
"""
Used to raise an ErrorImitation with more flexibility.
"""
error.__raise__(error(name, text)) | 36,460 |
def return_one(result):
"""return one statement"""
return " return " + result | 36,461 |
def inv(a):
"""The inverse rotation"""
return -a | 36,462 |
def wrapCopy(object):
"""Wrap a copy of the object."""
from App import Proxys
return eval( serialize(object), Proxys.__dict__ ) | 36,463 |
def return_edges(paths, config, bidirectional=False):
"""
Makes graph edges from osm paths
:param paths: dictionary {osm_way_id: {osmid: x, nodes:[a,b], osmtags: vals}}
:param config: genet.inputs_handler.osm_reader.Config object
:param bidirectional: bool value if True, reads all paths as both ways
:return:
"""
def extract_osm_data(data, es):
d = {}
for tag in (set(config.USEFUL_TAGS_PATH) | {'osmid', 'modes'}) - {'oneway'}:
if tag in data:
d[tag] = data[tag]
return [(es[i], d) for i in range(len(es))]
# the list of values OSM uses in its 'oneway' tag to denote True
osm_oneway_values = ['yes', 'true', '1', '-1', 'reverse']
edges = []
for data in paths.values():
# if this path is tagged as one-way and if it is not a walking network,
# then we'll add the path in one direction only
if ('oneway' in data and data['oneway'] in osm_oneway_values) and not bidirectional:
if data['oneway'] in ['-1', 'reverse']:
# paths with a one-way value of -1 are one-way, but in the
# reverse direction of the nodes' order, see osm documentation
data['nodes'] = list(reversed(data['nodes']))
# add this path (in only one direction) to the graph
es = return_edge(data, one_way=True)
edges.extend(extract_osm_data(data, es))
elif ('junction' in data and data['junction'] == 'roundabout') and not bidirectional:
# roundabout are also oneway but not tagged as is
es = return_edge(data, one_way=True)
edges.extend(extract_osm_data(data, es))
# else, this path is not tagged as one-way or it is a walking network
# (you can walk both directions on a one-way street)
else:
# add this path (in both directions) to the graph and set its
# 'oneway' attribute to False. if this is a walking network, this
# may very well be a one-way street (as cars/bikes go), but in a
# walking-only network it is a bi-directional edge
es = return_edge(data, one_way=False)
edges.extend(extract_osm_data(data, es))
return edges | 36,464 |
def pi_eq_func(ylag,pilag,v,s,slag,alpha,h,b,phi,gamma):
""" equilibrium value for inflation
Args:
ylag (float): lagged output
pilag (float): lagged inflation
v (float): demand disturbance
s (float): supply disturbance
slag (float): lagged supply disturbance
alpha (float): sensitivity of demand to real interest rate
h (float): coefficient on inflation in Taylor rule
b (float): coefficient on output in Taylor rule
phi (float): degree of sticiness in inflation expectations
gamma (float): effect of output on inflation in SRAS
Returns:
(float): equilibrium value for inflation
"""
return 1/(alpha*h)*(v-1/(alpha*b+alpha*gamma*h+1)*(alpha*b+1)*(-pilag*alpha*h+alpha*gamma*h*phi*ylag+alpha*h*phi*slag-alpha*h*s+v)) | 36,465 |
def set_mode(vehicle, mode):
"""
Set the vehicle's flight modes. 200ms period state validation.
Args:
vehicle(dronekit.Vehicle): the vehicle to be controlled.
mode(str): flight mode string, supported by the firmware.
Returns:
bool: True if success, False if failed.
Failure will set shared.status['abort'].
"""
util.log_info("Setting %s." % mode)
shared.status['manual_mode'] = mode
vehicle.mode = VehicleMode(mode)
wait_count = 0
while True:
time.sleep(.2)
wait_count = wait_count + 1
if vehicle.mode.name == mode :
return True
elif wait_count >= 45:
util.log_warning("Unable to set %s. Assume link lost." % mode)
shared.status['abort'] = True
return False
elif wait_count % 15 == 0 :
util.log_warning("Retry setting %s" % mode)
vehicle.mode = VehicleMode(mode) | 36,466 |
def apply_parallel(data_frame, num_procs, func, *args, progress_bar=False, backend='loky'):
""" This function parallelizes applying a function to the rows of a data frame using the
joblib library. The function is called on each row individually.
This function is best used when func does not have much overhead compared to
the row-specific processing. For example, this function is more appropriate than
apply_parallel_split when all of the processing in func is dependent only on the
values in the data rows.
Args:
data_frame (pandas.DataFrame): A data frame
num_procs (int): The number of processors to use
func (function pointer): The function to apply to each row in the data frame
args (variable number of arguments): The other arguments to pass to func
Returns:
list: the values returned from func for each row (in the order specified by
joblib.Parallel)
Imports:
joblib
tqdm, if progress_bar is True
"""
import joblib
if len(data_frame) == 0:
return []
if progress_bar:
import tqdm
ret_list = joblib.Parallel(n_jobs=num_procs, backend=backend)(joblib.delayed(func)(row[1], *args)
for row in tqdm.tqdm(data_frame.iterrows(), total=len(data_frame),
leave=True, file=sys.stdout))
else:
ret_list = joblib.Parallel(n_jobs=num_procs, backend=backend)(joblib.delayed(func)(row[1], *args)
for row in data_frame.iterrows())
return ret_list | 36,467 |
def _setdoc(super): # @ReservedAssignment
"""This inherits the docs on the current class. Not really needed for Python 3.5,
due to new behavior of inspect.getdoc, but still doesn't hurt."""
def deco(func):
func.__doc__ = getattr(getattr(super, func.__name__, None), "__doc__", None)
return func
return deco | 36,468 |
def test_module(client: Client) -> str:
"""Tests API connectivity and authentication'
Returning 'ok' indicates that the integration works like it is supposed to.
Connection to the service is successful.
Raises exceptions if something goes wrong.
:type client: ``Client``
:param Client: client to use
:return: 'ok' if test passed.
:rtype: ``str``
"""
# This should validate all the inputs given in the integration configuration panel,
# either manually or by using an API that uses them.
if client.client_credentials:
raise DemistoException("When using a self-deployed configuration, run the !microsoft-365-defender-auth-test"
"command in order to test the connection")
test_connection(client)
return "ok" | 36,469 |
def migrate_v8_to_v9(metadata, data, *args): # pylint: disable=unused-argument
"""Migration of archive files from v0.8 to v0.9."""
old_version = '0.8'
new_version = '0.9'
verify_metadata_version(metadata, old_version)
update_metadata(metadata, new_version)
# Apply migrations
migration_dbgroup_type_string(data) | 36,470 |
def warns(message, category=None):
"""警告装饰器
:param message: 警告信息
:param category: 警告类型:默认是None
:return: 装饰函数的对象
"""
def _(func):
@functools.wraps(func)
def warp(*args, **kwargs):
warnings.warn(message, category, stacklevel=2)
return func(*args, **kwargs)
return warp
return _ | 36,471 |
def handle_exceptions(func) -> object:
"""
This is needed since pytube current version is
quite unstable and can raise some unexpected errors.
"""
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except KeyError as e:
window.s_append('An error with the cipher has ocurred. '
'See documentation in GitHub to resolve: '
'https://github.com/f4ll-py/ytdownloader.')
except pytube.exceptions.RegexMatchError:
window.s_append('Could not find any YouTube videos with that URL.')
except urllib.error.HTTPError:
window.s_append('This video is not available. Try again later.')
except PermissionError:
window.s_append('Permission denied for the current path.')
return wrapper | 36,472 |
def templated_sequence_component(location_descriptor_tpm3):
"""Create test fixture for templated sequence component"""
params = {
"component_type": "templated_sequence",
"region": location_descriptor_tpm3.dict(exclude_none=True),
"strand": "+"
}
return TemplatedSequenceComponent(**params) | 36,473 |
def _SetupOutputDir():
"""Setup output directory."""
if os.path.exists(OUTPUT_DIR):
shutil.rmtree(OUTPUT_DIR)
# Creates |OUTPUT_DIR| and its platform sub-directory.
os.makedirs(coverage_utils.GetCoverageReportRootDirPath(OUTPUT_DIR)) | 36,474 |
def joint_probability(people, one_gene, two_genes, have_trait):
"""
Compute and return a joint probability.
The probability returned should be the probability that
* everyone in set `one_gene` has one copy of the gene, and
* everyone in set `two_genes` has two copies of the gene, and
* everyone not in `one_gene` or `two_gene` does not have the gene, and
* everyone in set `have_trait` has the trait, and
* everyone not in set` have_trait` does not have the trait.
"""
joint_p = 1
# zero_genes = set(people.keys()) - two_genes - one_gene
for person in people:
# Calculate probability to have the genes of interest
this_genes = get_nbr_genes(person, one_gene, two_genes)
if people[person]['mother'] is None: # Assumes both parents info, or nothing
gene_prob = PROBS['gene'][this_genes]
else: # If there is parent's info
prob_mother = get_parent_prob(people[person]['mother'], one_gene, two_genes)
prob_father = get_parent_prob(people[person]['father'], one_gene, two_genes)
if this_genes == 0:
gene_prob = (1 - prob_mother) * (1 - prob_father) # None can transmit
elif this_genes == 1:
gene_prob = (1 - prob_mother) * prob_father + prob_mother * (1 - prob_father) # Two possibilities
else:
gene_prob = prob_father * prob_mother # Both need to transmit
# Calculate probability to have trait, given genes of interest
trait = get_trait(person, have_trait) # Trait for this person
trait_prob = PROBS['trait'][this_genes][trait]
joint_p *= gene_prob * trait_prob # Accumulates joint probability of all people
return joint_p | 36,475 |
def _is_child_path(path, parent_path, link_name=None):
""" Checks that path is a path within the parent_path specified. """
b_path = to_bytes(path, errors='surrogate_or_strict')
if link_name and not os.path.isabs(b_path):
# If link_name is specified, path is the source of the link and we need to resolve the absolute path.
b_link_dir = os.path.dirname(to_bytes(link_name, errors='surrogate_or_strict'))
b_path = os.path.abspath(os.path.join(b_link_dir, b_path))
b_parent_path = to_bytes(parent_path, errors='surrogate_or_strict')
return b_path == b_parent_path or b_path.startswith(b_parent_path + to_bytes(os.path.sep)) | 36,476 |
def data_to_mindrecord_byte_image(image_files, image_anno_dict, dst_dir, prefix="cptn_mlt.mindrecord", file_num=1):
"""Create MindRecord file."""
mindrecord_path = os.path.join(dst_dir, prefix)
writer = FileWriter(mindrecord_path, file_num)
ctpn_json = {
"image": {"type": "bytes"},
"annotation": {"type": "int32", "shape": [-1, 5]},
}
writer.add_schema(ctpn_json, "ctpn_json")
for image_name in image_files:
with open(image_name, 'rb') as f:
img = f.read()
annos = np.array(image_anno_dict[image_name], dtype=np.int32)
print("img name is {}, anno is {}".format(image_name, annos))
row = {"image": img, "annotation": annos}
writer.write_raw_data([row])
writer.commit() | 36,477 |
def main():
"""The main progam."""
parser = argparse.ArgumentParser(
prog="mirror-gcode.py",
usage="%(prog)s [options] gcode-file",
description="Mirrors a gcode file in the X-axis about the origin"
)
parser.add_argument(
"-d", "--debug",
dest="debug",
action="store_true",
help="Turn on debugging",
default=False
)
parser.add_argument(
"gcode_filename",
help="Name of the gcode file to mirror",
)
args = parser.parse_args(sys.argv[1:])
if args.debug:
print('debug =', args.debug)
print('gcode_filename =', args.gcode_filename)
gcode_root, gcode_ext = os.path.splitext(args.gcode_filename)
mirrored_filename = gcode_root + '-mirrored' + gcode_ext
print('mirrored_filename =', mirrored_filename)
with open(args.gcode_filename, 'rb') as gcode_file:
with open(mirrored_filename, 'wb') as mirrored_file:
in_block = False
for line in gcode_file:
line = line.strip()
if args.debug:
print('Line:', line)
line = line.replace('X-', '%')
line = line.replace('X', 'X-')
line = line.replace('%', 'X')
print(line, file=mirrored_file) | 36,478 |
def zip(folder_name, files_to_be_zipped):
"""Creates a zip object from folder_name and zips a list of files (files_to_be_zipped)"""
# creates the zipped folder, D'OH
zip_file_name = folder_name + '.zip'
if type(files_to_be_zipped) != type([]):
files_to_be_zipped = [files_to_be_zipped]
with zipfile.ZipFile(zip_file_name, 'w', zipfile.ZIP_DEFLATED) as zip_folder:
# !!FIX: could use a list comprehension here instead.
for item in files_to_be_zipped:
zip_folder.write(item)
if not re.findall(".xlsm", item):
os.remove(item)
zipfile.ZipFile.close(zip_folder) | 36,479 |
def rf_predict_img_win(win_arr, trained_classifier, prob=True):
"""Predict image window using input trained classifier.
Args:
win_arr (numpy.arr): In rasterio order (channels, y, x)
trained_classifier (sklearn.model): Trained sklearn model to use for predictions.
prob (bool, optional): Generate probability of prediction or binary prediction. Defaults to True.
Returns:
numpy.arr: Array of predictions.
"""
# Get dims
b, y, x = win_arr.shape
segment_idx = b - 1
# Reshape for classifier
win_arr = np.transpose(win_arr.reshape(b, -1))
img_bnds = [i for i in range(0, b) if i != segment_idx]
win_arr = win_arr[:, img_bnds]
# No data rows
no_data = np.any(win_arr, axis=1).astype("uint8")
# Calc ndvi
# win_arr = calc_ndvi(win_arr, 2, 3)
# Prob predictions
if prob:
pred_arr = trained_classifier.predict_proba(win_arr)
# subset just the positive (forest) class probaility for all pixels
pred_arr = pred_arr[:, 1:]
# Or class predictions
else:
pred_arr = trained_classifier.predict(win_arr)
# Reshape back to image
pred_arr = pred_arr.reshape(y, x)
no_data = no_data.reshape(y, x)
# Apply no data mask so not positive prediction
pred_arr = pred_arr * no_data
return pred_arr | 36,480 |
def get_schema(passed_schema: 'Schema' = None, _cached_schema: Dict[str, Schema] = {}) -> 'Schema':
"""If passed a schema (not None) it returns it. If passed none,
it checks if the default schema has been initialized. If not
initialized, it initializes it. Then it returns the default schema."""
if passed_schema:
return passed_schema
if not _cached_schema:
# Try to load the local file first
try:
schema_file = os.path.join(os.path.dirname(os.path.realpath(__file__)))
schema_file = os.path.join(schema_file, "reference_files/schema.csv")
_cached_schema['schema'] = Schema(schema_file=schema_file)
except IOError:
# Try to load from the internet
try:
_cached_schema['schema'] = Schema()
except (HTTPError, URLError):
raise ValueError("Could not load a BMRB schema from the internet or from the local repository.")
return _cached_schema['schema'] | 36,481 |
def print_ops():
""" Helper to print what operations exists in the frozen model"""
for op in graph.get_operations():
print(op.name) | 36,482 |
def run(context, id_, **kwargs):
""" Run test steps. """
# Initialize
run = tmt.Run(id_, context.obj.tree, context=context)
context.obj.run = run | 36,483 |
def ShowIPC(cmd_args=None):
""" Routine to print data for the given IPC space
Usage: showipc <address of ipc space>
"""
if not cmd_args:
print "No arguments passed"
print ShowIPC.__doc__
return False
ipc = kern.GetValueFromAddress(cmd_args[0], 'ipc_space *')
if not ipc:
print "unknown arguments:", str(cmd_args)
return False
print PrintIPCInformation.header
PrintIPCInformation(ipc, False, False) | 36,484 |
def dqn(env, n_episodes=1001, max_t=1000 * FRAME_SKIP, eps_start=1.0,
eps_end=0.001, eps_decay=0.995, solution_threshold=13.0,
checkpointfn='checkpoint.pth', load_checkpoint=False,
reload_every=None):
"""Function that uses Deep Q Networks to learn environments.
Parameters
----------
n_episodes: int
maximum number of training episodes
max_t: int
maximum number of timesteps per episode
eps_start: float
starting value of epsilon, for epsilon-greedy action selection
eps_end: float
minimum value of epsilon
eps_decay: float
multiplicative factor (per episode) for decreasing epsilon
"""
brain_name = env.brain_names[0]
brain = env.brains[brain_name]
env_info = env.reset(train_mode=True)[brain_name]
action_size = brain.vector_action_space_size
state = env_info.vector_observations[0]
state_size = len(state)
if state_size == 0:
use_visual = True
initial_state = get_state(env_info, use_visual)
state_size = list(initial_state.shape)
state_size.insert(2, STACK_SIZE)
state_size = tuple(state_size)
if load_checkpoint:
try:
agent = Agent.load(checkpointfn, use_visual)
except Exception:
logging.exception('Failed to load checkpoint. Ignoring...')
agent = Agent(state_size, action_size, 0, use_visual)
else:
agent = Agent(state_size, action_size, 0, use_visual)
if agent.episode:
eps = (eps_start * eps_decay) ** agent.episode
else:
eps = eps_start
for i_episode in range(agent.episode, n_episodes):
state_deque = reset_deque(initial_state)
env_info = env.reset(train_mode=True)[brain_name]
state = get_state(env_info, use_visual)
state_deque.append(state)
score = 0
for t in range(max_t):
state = np.stack(state_deque, axis=-1) \
.squeeze(axis=0).transpose(0, -1, 1, 2)
action = agent.act(state, eps)
env_info = env.step(action)[brain_name]
next_state = get_state(env_info, use_visual)
state_deque.append(next_state)
next_state = np.stack(state_deque, axis=-1) \
.squeeze(axis=0).transpose(0, -1, 1, 2)
reward = env_info.rewards[0]
done = env_info.local_done[0]
agent.step(
state,
action,
reward,
next_state,
done,
)
score += reward
if done:
break
agent.scores.append(score)
eps = max(eps_end, eps_decay*eps) # decrease epsilon
agent.episode += 1
logging.debug(
'Episode {}\tAverage Score: {:.2f}\tCurrent Score: {:.2f}\tEpsilon: {:.4f}'
.format(i_episode, np.mean(agent.scores[-100:]), score, eps)
)
if (i_episode + 1) % 100 == 0:
logging.info(
'Episode {}\tAverage Score: {:.2f}'
.format(i_episode, np.mean(agent.scores[-100:]))
)
logging.info(
'Saving checkpoint file...'
)
agent.save(checkpointfn)
if np.mean(agent.scores[-100:]) >= solution_threshold:
logging.info(
'Environment solved in {:d} episodes!'
.format(i_episode - 99)
)
logging.info(
'Saving checkpoint file at %s', checkpointfn
)
agent.save(checkpointfn)
break
if reload_every and i_episode and (i_episode + 1) % reload_every == 0:
env.close()
reload_process()
return agent | 36,485 |
def get_mesh_faces(edge_array):
"""
Uses an edge array of mesh to generate the faces of the mesh. For each triangle in the mesh this returns the list of indices
contained in it as a tuple (index1, index2, index3)
"""
triangles = []
neibs = neibs_from_edges(edge_array)
for edge in edge_array:
for vert in get_opposite_verts(neibs, edge):
triangle = sorted([edge[0], edge[1], vert])
if not (triangle in triangles):
triangles.append(sorted([edge[0], edge[1], vert]))
return triangles | 36,486 |
def lc_list(aws_config=None):
"""
@type aws_config: Config
"""
asg_client = get_client(client_type='autoscaling', config=aws_config)
all_lcs = asg_client.describe_launch_configurations().get('LaunchConfigurations')
if all_lcs:
output_lc_list(lc_list=all_lcs)
else:
exit("No launch configurations were found.") | 36,487 |
def test_getitem(preston):
"""
Test getting an item by an index instead of attribute.
"""
assert preston.bloodlines().items[0].race() | 36,488 |
def _make_attribution_from_nodes(mol: Mol, nodes: np.ndarray,
global_vec: np.ndarray) -> GraphsTuple:
"""Makes an attribution from node information."""
senders, receivers = _get_mol_sender_receivers(mol)
data_dict = {
'nodes': nodes.astype(np.float32),
'senders': senders,
'receivers': receivers,
'globals': global_vec.astype(np.float32)
}
return graph_nets.utils_np.data_dicts_to_graphs_tuple([data_dict]) | 36,489 |
def test_upload_call_error(emitter, store_mock, config, tmp_path):
"""Simple upload but with a response indicating an error."""
errors = [
Error(message="text 1", code="missing-stuff"),
Error(message="other long error text", code="broken"),
]
store_response = Uploaded(ok=False, status=400, revision=None, errors=errors)
store_mock.upload.return_value = store_response
test_charm = tmp_path / "mystuff.charm"
_build_zip_with_yaml(test_charm, "metadata.yaml", content={"name": "mycharm"})
args = Namespace(filepath=test_charm, release=[], name=None)
retcode = UploadCommand(config).run(args)
assert retcode == 1
assert store_mock.mock_calls == [call.upload("mycharm", test_charm)]
expected = [
"Upload failed with status 400:",
"- missing-stuff: text 1",
"- broken: other long error text",
]
emitter.assert_messages(expected) | 36,490 |
def subrepo(repo, subset, x):
"""Changesets that add, modify or remove the given subrepo. If no subrepo
pattern is named, any subrepo changes are returned.
"""
# i18n: "subrepo" is a keyword
args = getargs(x, 0, 1, _('subrepo takes at most one argument'))
pat = None
if len(args) != 0:
pat = getstring(args[0], _("subrepo requires a pattern"))
m = matchmod.exact(repo.root, repo.root, ['.hgsubstate'])
def submatches(names):
k, p, m = util.stringmatcher(pat)
for name in names:
if m(name):
yield name
def matches(x):
c = repo[x]
s = repo.status(c.p1().node(), c.node(), match=m)
if pat is None:
return s.added or s.modified or s.removed
if s.added:
return any(submatches(c.substate.keys()))
if s.modified:
subs = set(c.p1().substate.keys())
subs.update(c.substate.keys())
for path in submatches(subs):
if c.p1().substate.get(path) != c.substate.get(path):
return True
if s.removed:
return any(submatches(c.p1().substate.keys()))
return False
return subset.filter(matches, condrepr=('<subrepo %r>', pat)) | 36,491 |
def format_output(item, show_url=False):
""" takes a voat post and returns a formatted string """
if not item["Title"]:
item["Title"] = formatting.truncate(item["Linkdescription"], 70)
else:
item["Title"] = formatting.truncate(item["Title"], 70)
item["link"] = voat_fill_url.format(item["Subverse"], item["Id"])
raw_time = isodate.parse_date(item['Date'])
item["timesince"] = timeformat.time_since(raw_time, count=1, simple=True)
item["comments"] = formatting.pluralize(item["CommentCount"], 'comment')
item["points"] = formatting.pluralize(item["Likes"], 'point')
if item["Type"] == 2:
item["warning"] = " \x02Link\x02"
else:
item["warning"] = ""
if show_url:
return "\x02{Title} : {Subverse}\x02 - {comments}, {points}" \
" - \x02{Name}\x02 {timesince} ago - {link}{warning}".format(**item)
else:
return "\x02{Title} : {Subverse}\x02 - {comments}, {points}" \
" - \x02{Name}\x02, {timesince} ago{warning}".format(**item) | 36,492 |
def get_compiled_table_name(engine, schema, table_name):
"""Returns a table name quoted in the manner that SQLAlchemy would use to query the table
Args:
engine (sqlalchemy.engine.Engine):
schema (str, optional): The schema name for the table
table_name (str): The name of the table
Returns:
str: The compiled table name
Examples:
>>> from sqlalchemy import create_engine
>>> get_compiled_table_name(create_engine('greenplum://u:p@s'), 'a_schema', 'a_table') == six.text_type('a_schema.a_table')
True
>>> get_compiled_table_name(create_engine('greenplum://u:p@s'), 'a_schema-1', 'a_table-1') == six.text_type('"a_schema-1"."a_table-1"')
True
>>> get_compiled_table_name(create_engine('greenplum://u:p@s'), None, 'a_table-1') == six.text_type('"a_table-1"')
True
>>> get_compiled_table_name(create_engine('greenplum://u:p@s'), '', 'a_table-1') == six.text_type('"a_table-1"')
True
"""
target = sqlalchemy.Table(table_name, sqlalchemy.MetaData(), schema=schema)
return engine.dialect.identifier_preparer.format_table(target) | 36,493 |
def build(filepath):
"""Returns the window with the popup content."""
ttitlebar = titlebar.build()
hheading = heading.build(HEADING_TITLE)
top_txt_filler = fillers.horizontal_filler(2, colors.BACKGROUND)
message = sg.Text(
text=MESSAGE_TEXT + filepath,
font=MESSAGE_FONT,
text_color=colors.BLACK,
background_color=colors.BACKGROUND,
justification='c',
pad=(10, None) # adds space between l/r borders.
)
# adds space between message and button
bottom_txt_filler = fillers.horizontal_filler(1, colors.BACKGROUND)
# the key is not needed
done = button.build(BUTTON_TEXT, '', BUTTON_FONT, BUTTON_SIZE)
bottom_sep = fillers.horizontal_filler(2, colors.BACKGROUND)
return sg.Window(
title='',
no_titlebar=True,
keep_on_top=True,
layout=[
[ttitlebar],
[hheading],
[top_txt_filler],
[message],
[bottom_txt_filler],
[done],
[bottom_sep]
],
element_justification='c'
) | 36,494 |
def format_float(digit=0, is_pct=False):
"""
Number display format for pandas
Args:
digit: number of digits to keep
if negative, add one space in front of positive pct
is_pct: % display
Returns:
lambda function to format floats
Examples:
>>> format_float(0)(1e5)
'100,000'
>>> format_float(1)(1e5)
'100,000.0'
>>> format_float(-1, True)(.2)
' 20.0%'
>>> format_float(-1, True)(-.2)
'-20.0%'
>>> pd.options.display.float_format = format_float(2)
"""
if is_pct:
space = ' ' if digit < 0 else ''
fmt = f'{{:{space}.{abs(int(digit))}%}}'
return lambda vv: 'NaN' if np.isnan(vv) else fmt.format(vv)
return lambda vv: 'NaN' if np.isnan(vv) else (
f'{{:,.{digit}f}}'.format(vv) if vv else '-' + ' ' * abs(digit)
) | 36,495 |
def updateTable(cur,tablename,types=None,mode="results",verbose=True):
"""
Create a table if it does not exist, or update the table if it does.
"""
# If the table does not exist, create it
cur.execute("SELECT name FROM sqlite_master WHERE type='table'")
tables = [r[0] for r in cur.fetchall()]
if "__tables__" not in tables:
cur.execute('CREATE TABLE __tables__ (NAME UNIQUE, Description VARCHAR(8000), NumEntries INT);')
else:
#
# Backwards compatibility - update __tables__ in case NumEntries is not a field
#
cur.execute('PRAGMA table_info(__tables__);')
columns = [d[1] for d in cur.fetchall()]
if "NumEntries" not in columns:
cur.execute('ALTER TABLE __tables__ ADD NumEntries INT;')
if tablename not in tables:
if mode == "results":
if types:
if 'HASH' in types.keys(): types.pop('HASH')
if 'DIR' in types.keys(): types.pop('DIR')
if 'Description' in types.keys(): types.pop('Description')
if 'Tags' in types.keys(): types.pop('Tags')
cur.execute('CREATE TABLE "{}" ('.format(tablename) +
'HASH VARCHAR(255) UNIQUE, ' +
'DIR VARCHAR(255), ' +
'Description VARCHAR(8000),' +
'Tags VARCHAR(1000)' +
(',' if len(types) else '') +
','.join(['"'+key+'" '+types[key] for key in sorted(types)]) +
');')
else:
cur.execute('CREATE TABLE "{}" ('.format(tablename) +
'HASH VARCHAR(255) UNIQUE, ' +
'DIR VARCHAR(255), ' +
'Description VARCHAR(8000),' +
'Tags VARCHAR(1000));')
elif mode == "regtest":
cur.execute('CREATE TABLE regtest ('
'HASH VARCHAR(255) UNIQUE ' +
(',' if len(types) else '') +
','.join([key+' '+types[key] for key in sorted(types)]) +
');')
elif mode == "regtest_runs":
cur.execute('CREATE TABLE regtest_runs ('
'RUN VARCHAR(255) UNIQUE ' +
(',' if len(types) else '') +
','.join([key+' '+types[key] for key in sorted(types)]) +
');')
if (verbose): print('\033[1;32mADDED TABLE\033[1;0m: ' + tablename)
else:
if (verbose): print('\033[1;34mUSING TABLE\033[1;0m: ' + tablename)
cur.execute('SELECT * FROM __tables__ WHERE NAME = "{}";'.format(tablename))
if (len(cur.fetchall()) == 0): cur.execute('INSERT INTO __tables__ (NAME, Description) VALUES ("{}", \"Description\");'.format(tablename))
# If the table exists, but new columns have been added, modify the table
# accordingly
sqlstring = 'PRAGMA table_info("{}")'.format(tablename)
if verbose: print(sqlstring)
cur.execute(sqlstring)
columns=[a[1] for a in cur.fetchall()]
if types:
for key in types:
if key not in columns:
cur.execute('ALTER TABLE "{}" ADD "{}" {}'.format(tablename,key,types[key]))
if (verbose): print('\033[1;34mADDED COLUMN\033[1;0m: ' + key + ' to ' + tablename)
# Finally, update __tables__ with # of records
cur.execute('SELECT * FROM "{}"'.format(tablename));
cur.execute('UPDATE __tables__ SET NumEntries = {} WHERE NAME = "{}"'.format(len(cur.fetchall()),tablename)) | 36,496 |
def get_overexpressed_genes(
matrix: ExpMatrix, cell_labels: pd.Series,
exp_thresh: float = 0.05, ignore_outliers: bool = True,
num_genes: int = 20) -> pd.DataFrame:
"""Determine most over-expressed genes for each cluster."""
# make sure matrix and cell_labels are aligned
matrix = matrix.loc[:, cell_labels.index]
if ignore_outliers:
# ignore the cluster named "Outliers", if it exists
sel = (cell_labels != 'Outliers')
matrix = matrix.loc[:, sel]
cell_labels = cell_labels.loc[sel]
_LOGGER.info('Ignoring mean expression values below %.3f', exp_thresh)
data = []
# scale matrix
matrix = matrix.scale()
# determine fold-changes for all clusters
vc = cell_labels.value_counts()
clusters = vc.index.tolist()
X = np.zeros((len(clusters), matrix.num_genes), dtype=np.float32)
cluster_mean = ExpMatrix(genes=matrix.genes, cells=clusters, data=X.T)
for l in clusters:
sel = (cell_labels == l)
cluster_mean.loc[:, l] = matrix.loc[:, sel].mean(axis=1)
# in calculation of fold change,
# ignore all expression values below exp_thresh
thresh_cluster_mean = cluster_mean.copy()
thresh_cluster_mean[thresh_cluster_mean < exp_thresh] = exp_thresh
# calculate fold change relative to average of other clusters
X = np.ones((len(clusters), matrix.num_genes), dtype=np.float32)
fold_change = ExpMatrix(genes=matrix.genes, cells=clusters, data=X.T)
for l in clusters:
sel = (thresh_cluster_mean.cells != l)
fold_change.loc[:, l] = thresh_cluster_mean.loc[:, l] / \
(thresh_cluster_mean.loc[:, sel].mean(axis=1))
markers = []
for l in clusters:
change = fold_change.loc[:, l].sort_values(ascending=False)
change = change[:num_genes]
# scale mean expression values to 10K transcripts
mean = cluster_mean.loc[change.index, l]
mean = (10000 / cluster_mean.loc[:, l].sum()) * mean
cluster_index = [l] * num_genes
gene_index = change.index
index = pd.MultiIndex.from_arrays(
[cluster_index, gene_index], names=['cluster', 'gene'])
data = np.c_[change.values, mean.values]
markers.append(
pd.DataFrame(
index=index,
columns=['Fold change', 'Mean expression (TP10K)'],
data=data))
markers = pd.concat(markers, axis=0)
#markers = markers.swaplevel(0, 1).sort_index(
# level=1, sort_remaining=False).swaplevel(0, 1)
return markers | 36,497 |
def printMenu():
"""
Prints the menu
"""
print()
print("Main Menu")
print("*********************")
print()
print("Please select an option:")
print()
print("1 - Run search")
print("2 - Export results")
print("3 - Export result archive")
print("4 - View all settings")
print("5 - Update API key")
print("6 - Update Search Engine ID")
print("7 - Change query mode")
print("8 - Change number of runs")
print("9 - Update indicators")
print("10 - Export duplications")
print()
print("*********************")
print("0 - Exit application") | 36,498 |
def stream_fasta(fastafile):
"""
Stream a fasta file, one read at a time. Saves memory!
Originally part of the roblib libary.
:param fastafile: The fasta file to stream
:type fastafile: str
:param whole_id: Whether to return the whole id (default) or just up to the first white space
:type whole_id:bool
:return:A single read
:rtype:str, str
"""
try:
if fastafile.endswith('.gz'):
f = gzip.open(fastafile, 'rt')
elif fastafile.endswith('.lrz'):
f = subprocess.Popen(['/usr/bin/lrunzip', '-q', '-d', '-f', '-o-', fastafile], stdout=subprocess.PIPE).stdout
else:
f = open(fastafile, 'r')
except IOError as e:
sys.stderr.write(str(e) + "\n")
sys.stderr.write("Message: \n" + str(e.message) + "\n")
sys.exit("Unable to open file " + fastafile)
posn = 0
while f:
# first line should start with >
idline = f.readline()
if not idline:
break
if not idline.startswith('>'):
sys.exit("Do not have a fasta file at: {}".format(idline))
idline = idline.strip().replace('>', '', 1)
posn = f.tell()
line = f.readline()
seq = ""
while not line.startswith('>'):
seq += line.strip()
posn = f.tell()
line = f.readline()
if not line:
break
f.seek(posn)
yield idline, seq | 36,499 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.