content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
import socket
import os
import logging
def inference(cluster_info, qname='input'):
"""
Feeds Spark partitions into the shared multiprocessing.Queue and returns inference results.
"""
def _inference(iter):
# get shared queue, reconnecting if necessary
mgr = _get_manager(cluster_info, socket.gethostname(), os.getppid())
queue_in = mgr.get_queue(qname)
logging.info("Feeding partition {0} into {1} queue {2}".format(iter, qname, queue_in))
count = 0
for item in iter:
count += 1
queue_in.put(item, block=True)
# wait for consumers to finish processing all items in queue before "finishing" this iterator
queue_in.join()
logging.info("Processed {0} items in partition".format(count))
# read result queue
results = []
queue_out = mgr.get_queue('output')
while count > 0:
result = queue_out.get(block=True)
results.append(result)
count -= 1
queue_out.task_done()
logging.info("Finished processing partition")
return results
return _inference | b7c207cbd17ecdaf7f141dec749ce95e2b599ac7 | 21,800 |
def is_array_of(obj, classinfo):
"""
Check if obj is a list of classinfo or a tuple of classinfo or a set of classinfo
:param obj: an object
:param classinfo: type of class (or subclass). See isinstance() build in function for more info
:return: flag: True or False
"""
flag = False
if isinstance(obj, classinfo):
pass
elif all(isinstance(item, classinfo) for item in obj):
flag = True
return flag | 5fecce974b5424cff7d5e6a4a9f9bd1482e10e85 | 21,801 |
from atom.models import MODELS
def create_acronym(fullname):
"""Create an acronym for an estimator.
The acronym consists of the capital letters in the name if
there are at least two. If not, the entire name is used.
Parameters
----------
fullname: str
Estimator's __name__.
Returns
-------
str
Created acronym.
"""
acronym = "".join([c for c in fullname if c.isupper()])
if len(acronym) < 2 or acronym.lower() in MODELS:
return fullname
else:
return acronym | 8343fc670080634b1b9b556122cddb509ee36e72 | 21,802 |
import uuid
def transact_update_path(path):
"""input transact update to DynamoDB"""
# transact_write_itemsใฏclientAPIใชใฎใงๆณจๆ
def update_path(path):
"""input put learning path to DynamoDB"""
input = defaultdict(
dict,
TableName="primary_table",
Key={"PK": {"S": path.PK}, "SK": {"S": path.PK}},
UpdateExpression="SET updatedAt=:date, updatedUser=:user",
ExpressionAttributeValues={
":date": {"S": timestamp_jst()},
":user": {"S": path.user},
},
)
if path.name is not None:
input["UpdateExpression"] += ", #name=:name"
# name is reserved
input["ExpressionAttributeNames"]["#name"] = "name"
input["ExpressionAttributeValues"][":name"] = {"S": path.name}
if path.description is not None:
input["UpdateExpression"] += ", description=:description"
input["ExpressionAttributeValues"][":description"] = {"S": path.description}
if path.note is not None:
input["UpdateExpression"] += ", note=:note"
input["ExpressionAttributeValues"][":note"] = {"S": path.note}
if path.invalid is not None:
input["UpdateExpression"] += ", invalid=:invalid"
input["ExpressionAttributeValues"][":invalid"] = {"BOOL": path.invalid}
return {"Update": input}
def update_path_to_video(appended, path_id, user):
"""input put video path to DynamoDB"""
def get_videos(appended):
for uri in appended:
video_id = uri.split("/")[2]
video = get_video_from_db(video_id)
yield video
def generate_input(video, path_id, user):
path_ids = video.get("learningPathIds", [])
# escape empty
# ใชใใ้ๅใฎใฑใผในใใใใฎใงๆ็คบ็ใซใชในใใซใใ
path_ids = list(path_ids)
if not path_ids[0]:
path_ids.remove("")
path_ids.append(path_id)
input = dict(
TableName="primary_table",
Key={"PK": {"S": video["PK"]}, "SK": {"S": video["PK"]}},
UpdateExpression="SET updatedAt=:date, updatedUser=:user"
+ ", learningPathIds=:paths",
ExpressionAttributeValues={
":date": {"S": timestamp_jst()},
":user": {"S": user},
":paths": {"SS": path_ids},
},
)
return {"Update": input}
it_videos = get_videos(appended)
it_inputs = (generate_input(video, path_id, user) for video in it_videos)
return it_inputs
def remove_path_from_video(removed, path_id, user):
"""input remove video path from DynamoDB"""
def get_videos(removed):
for uri in removed:
video_id = uri.split("/")[2]
video = get_video_from_db(video_id)
yield video
def generate_input(video, path_id, user):
path_ids = video.get("learningPathIds", [])
path_ids = list(path_ids)
if path_id in path_ids:
path_ids.remove(path_id)
# escape empty
if len(path_ids) <= 0:
path_ids = [""]
input = dict(
TableName="primary_table",
Key={"PK": {"S": video["PK"]}, "SK": {"S": video["PK"]}},
UpdateExpression="SET updatedAt=:date, updatedUser=:user"
+ ", learningPathIds=:paths",
ExpressionAttributeValues={
":date": {"S": timestamp_jst()},
":user": {"S": user},
":paths": {"SS": path_ids},
},
)
return {"Update": input}
it_videos = get_videos(removed)
it_inputs = (generate_input(video, path_id, user) for video in it_videos)
return it_inputs
def update_video_order(orders, path_id):
"""input append or update video orders"""
def get_orders(orders, path_id):
for order in orders:
req_order = {"PK": path_id, "uri": order.uri}
current_order = get_order(req_order=ReqOrder(**req_order))
if current_order:
yield True, order
else:
yield False, order
def generate_input(order_with_update_or_append, path_id):
is_update, order = order_with_update_or_append
if is_update:
input = dict(
TableName="primary_table",
Key={"PK": {"S": path_id}, "SK": {"S": order.uri}},
UpdateExpression="SET #order=:order",
# order is reserved
ExpressionAttributeNames={"#order": "order"},
ExpressionAttributeValues={":order": {"N": str(order.order)}},
)
return {"Update": input}
else:
input = dict(
PK={"S": path_id},
SK={"S": order.uri},
indexKey={"S": "Video"},
createdAt={"S": str(uuid.uuid1())[:8]},
order={"N": str(order.order)},
)
item = {"TableName": "primary_table", "Item": input}
return {"Put": item}
it_orders = get_orders(orders, path_id)
it_inputs = (generate_input(order, path_id) for order in it_orders)
return it_inputs
def remove_video_order(removed, path_id):
"""input remove video orders"""
inputs = [
dict(
TableName="primary_table", Key={"PK": {"S": path_id}, "SK": {"S": uri}}
)
for uri in removed
]
orders = [{"Delete": input} for input in inputs]
return orders
transact_items = []
# ๅ็ใชในใใฎใกใฟๆ
ๅ ฑใๆดๆฐใใ
transact_items.append(update_path(path))
# ๅ็ใชในใใซๅ็ปใ่ฟฝๅ ใใใๅ ดๅใๅ็ปใฎใกใฟใใผใฟใซๅ็ใชในใIDใ่ฟฝๅ ใใ
transact_items.extend(
update_path_to_video(appended=path.appended, path_id=path.PK, user=path.user)
)
# ๅ็ใชในใใใๅ็ปใๅ้คใใใๅ ดๅใๅ็ปใฎใกใฟใใผใฟใใๅ็ใชในใIDใๅ้คใใ
transact_items.extend(
remove_path_from_video(removed=path.removed, path_id=path.PK, user=path.user)
)
# ๅ็ใชในใใใๅ้คใใใๅ็ปใฎๅ็้ ใๅ้คใใ
transact_items.extend(remove_video_order(removed=path.removed, path_id=path.PK))
# ๅ็ใชในใใฎๆดๆฐ้ ใๆดๆฐใใ
transact_items.extend(update_video_order(orders=path.orders, path_id=path.PK))
return transact_items | 5cecf9ffe8ad4acf83b4ec353abaaa3c964fdb0b | 21,803 |
from typing import Counter
def codon_usage(seq, aminoacid):
"""Provides the frequency of each codon encoding a given aminoacid in a DNA sequence"""
tmpList = []
for i in range(0, len(seq) - 2, 3):
if DNA_Codons[seq[i:i + 3]] == aminoacid:
tmpList.append(seq[i:i + 3])
freqDict = dict(Counter(tmpList))
totalWight = sum(freqDict.values())
for seq in freqDict:
freqDict[seq] = round(freqDict[seq] / totalWight, 2)
return freqDict | 9e271e9c68ebd1860f3897d5a63919bf5bd5f0bf | 21,804 |
from textwrap import dedent
def make_check_stderr_message(stderr, line, reason):
"""
Create an exception message to use inside check_stderr().
"""
return dedent("""\
{reason}:
Caused by line: {line!r}
Complete stderr: {stderr}
""").format(stderr=stderr, line=line, reason=reason) | a6510e8036ab27e6386e6bc8e6c33727849282c0 | 21,805 |
import numpy
def diffusion_step(matrix, row_damping=0, column_damping=0):
"""
Return the diffusion adjacency matrix produced by the input matrix
with the specified row and column normalization exponents.
Note: the row normalization is performed second, so if a value
of row_damping=1 is used, the output will be a row-stochastic
matrix regardless of choice of column normalization. Matrix will
not be modified in place.
Parameters
==========
matrix : numpy.ndarray
adjacency matrix for a given metaedge, where the source nodes are
rows and the target nodes are columns
row_damping : int or float
exponent to use in scaling each node's row by its in-degree
column_damping : int or float
exponent to use in scaling each node's column by its column-sum
Returns
=======
numpy.ndarray
Normalized matrix with dtype.float64.
"""
# returns a newly allocated array
matrix = copy_array(matrix)
# Perform column normalization
if column_damping != 0:
column_sums = numpy.array(matrix.sum(axis=0)).flatten()
matrix = normalize(matrix, column_sums, 'columns', column_damping)
# Perform row normalization
if row_damping != 0:
row_sums = numpy.array(matrix.sum(axis=1)).flatten()
matrix = normalize(matrix, row_sums, 'rows', row_damping)
return matrix | f6636b0e4557ffad0253284d914f4d662695055e | 21,806 |
def verb_context(filtertype, aidcfg, verbose):
""" closure helper """
class VerbosityContext(object):
"""
Printing filter info in a way that avoids polluting the function
namespace. This is a hack.
This is a with_statement context class that expect a variable avail_aids
to be modified inside the context. It prints the state of the variable
before and after filtering. Several static methods can be used
at the start and end of larger filtering functions.
"""
def __init__(self, *keys, **filterextra):
self.prefix = ut.get_var_from_stack('prefix', verbose=False)
if verbose:
dictkw = dict(nl=False, explicit=True, nobraces=True)
infostr = ''
if len(keys) > 0:
subdict = ut.dict_subset(aidcfg, keys, None)
infostr += '' + ut.dict_str(subdict, **dictkw)
print('[%s] * Filter by %s' % (
self.prefix.upper(), infostr.strip()))
if verbose > 1 and len(filterextra) > 0:
infostr2 = ut.dict_str(filterextra, nl=False, explicit=False)
print('[%s] %s' % (
self.prefix.upper(), infostr2))
def __enter__(self):
aids = ut.get_var_from_stack('avail_aids', verbose=False)
self.num_before = len(aids)
def __exit__(self, exc_type, exc_value, exc_traceback):
if verbose:
aids = ut.get_var_from_stack('avail_aids', verbose=False)
num_after = len(aids)
num_removed = self.num_before - num_after
if num_removed > 0 or verbose > 1:
print('[%s] ... removed %d annots. %d remain' %
(self.prefix.upper(), num_removed, num_after))
@staticmethod
def report_annot_stats(ibs, aids, prefix, name_suffix, statskw={}):
if verbose > 1:
with ut.Indenter('[%s] ' % (prefix.upper(),)):
# TODO: helpx on statskw
#statskw = dict(per_name_vpedge=None, per_name=None)
dict_name = prefix + 'aid_stats' + name_suffix
#hashid, per_name, per_qual, per_vp, per_name_vpedge,
#per_image, min_name_hourdist
ibs.print_annot_stats(aids, prefix=prefix, label=dict_name,
**statskw)
#def report_annotconfig_stats(ref_aids, aids):
# with ut.Indenter(' '):
# ibs.print_annotconfig_stats(ref_aids, avail_aids)
@staticmethod
def startfilter(withpre=True):
"""
Args:
withpre (bool): if True reports stats before filtering
"""
if verbose:
prefix = ut.get_var_from_stack('prefix', verbose=False)
print('[%s] * [%s] %sAIDS' % (prefix.upper(), filtertype,
prefix))
if verbose > 1 and withpre:
ibs = ut.get_var_from_stack('ibs', verbose=False)
aids = ut.get_var_from_stack('avail_aids', verbose=False)
VerbosityContext.report_annot_stats(ibs, aids, prefix,
'_pre')
@staticmethod
def endfilter(withpost=True):
if verbose:
ibs = ut.get_var_from_stack('ibs', verbose=False)
aids = ut.get_var_from_stack('avail_aids', verbose=False)
prefix = ut.get_var_from_stack('prefix', verbose=False)
hashid = ibs.get_annot_hashid_semantic_uuid(
aids, prefix=prefix.upper())
if withpost:
if verbose > 1:
VerbosityContext.report_annot_stats(ibs, aids, prefix,
'_post')
print('[%s] * HAHID: %s' % (prefix.upper(), hashid))
print('[%s] * [%s]: len(avail_%saids) = %r\n' % (
prefix.upper(), filtertype, prefix, len(aids)))
return VerbosityContext | 8869cde39ff77b8314806c6ecc30a94ae97ab2dd | 21,807 |
def get_add_many_columns_function(row_function, data_types):
"""Returns a function which adds several columns to a row based on given row function"""
def add_many_columns(row):
result = row_function(row)
data = []
for i, data_type in enumerate(data_types):
try:
value = result[i]
except TypeError as e:
raise RuntimeError("UDF returned non-indexable value. Provided schema indicated an Indexable return type")
except IndexError as e:
raise RuntimeError("UDF return value did not match the number of items in the provided schema")
cast_value = valid_data_types.cast(value, data_type)
data.append(numpy_to_bson_friendly(cast_value))
# return json.dumps(data, cls=NumpyJSONEncoder)
return data
# return bson.binary.Binary(bson.BSON.encode({"array": data}))
return add_many_columns | 72bb0edae6ddd109beae118f691fc387b6bfdce7 | 21,808 |
def determine_peaks(spectrum, peak='both', amp_threshold=None):
"""Find peaks in a spectrum.
Parameters
----------
spectrum : numpy.ndarray
Array of the data values of the spectrum.
peak : 'both' (default), 'positive', 'negative'
Description of parameter `peak`.
amp_threshold : float
Required minimum threshold that at least one data point in a peak feature has to exceed.
Returns
-------
consecutive_channels or amp_vals : numpy.ndarray
If the 'amp_threshold' value is supplied an array with the maximum data values of the ranges is returned. Otherwise, the number of spectral channels of the ranges is returned.
ranges : list
List of intervals [(low, upp), ...] determined to contain peaks.
"""
if (peak == 'both') or (peak == 'positive'):
clipped_spectrum = spectrum.clip(max=0)
# Create an array that is 1 where a is 0, and pad each end with an extra 0.
iszero = np.concatenate(
([0], np.equal(clipped_spectrum, 0).view(np.int8), [0]))
absdiff = np.abs(np.diff(iszero))
# Runs start and end where absdiff is 1.
ranges = np.where(absdiff == 1)[0].reshape(-1, 2)
if (peak == 'both') or (peak == 'negative'):
clipped_spectrum = spectrum.clip(min=0)
# Create an array that is 1 where a is 0, and pad each end with an extra 0.
iszero = np.concatenate(
([0], np.equal(clipped_spectrum, 0).view(np.int8), [0]))
absdiff = np.abs(np.diff(iszero))
if peak == 'both':
# Runs start and end where absdiff is 1.
ranges = np.append(
ranges, np.where(absdiff == 1)[0].reshape(-1, 2), axis=0)
else:
ranges = np.where(absdiff == 1)[0].reshape(-1, 2)
if amp_threshold is not None:
if peak == 'positive':
mask = spectrum > abs(amp_threshold)
elif peak == 'negative':
mask = spectrum < -abs(amp_threshold)
else:
mask = np.abs(spectrum) > abs(amp_threshold)
if np.count_nonzero(mask) == 0:
return np.array([]), np.array([])
peak_mask = np.split(mask, ranges[:, 1])
mask_true = np.array([any(array) for array in peak_mask[:-1]])
ranges = ranges[mask_true]
if peak == 'positive':
amp_vals = np.array([max(spectrum[low:upp]) for low, upp in ranges])
elif peak == 'negative':
amp_vals = np.array([min(spectrum[low:upp]) for low, upp in ranges])
else:
amp_vals = np.array(
np.sign(spectrum[low])*max(np.abs(spectrum[low:upp]))
for low, upp in ranges)
# TODO: check if sorting really necessary??
sort_indices = np.argsort(amp_vals)[::-1]
return amp_vals[sort_indices], ranges[sort_indices]
else:
sort_indices = np.argsort(ranges[:, 0])
ranges = ranges[sort_indices]
consecutive_channels = ranges[:, 1] - ranges[:, 0]
return consecutive_channels, ranges | 176c0db9ccd00822dd3dc96d00f1276e5bb8c426 | 21,809 |
import torch
def sumlike_wrap(fun_name):
"""Handle torch.sum and torch.mean"""
# Define appropriate torch function, the rest of the logic is the same
assert fun_name in ['sum', 'mean']
torch_fun = getattr(torch, fun_name)
@wraps(torch_fun)
def sumlike_fun(input, dim=None, keepdim=False):
nodim = dim is None
if nodim:
# Remove stable dims, then sum over data
input = move_sdims(input, ())
data_sum = torch_fun(input.data)
scale = input.scale.view(())
output = STensor(data_sum, scale)
else:
# Convert dim to list of non-negative indices to sum over
dim_list = tupleize(dim, input.ndim)
# Make summed indices data dims, then sum over data tensor
new_sdims = tuple(i for i in input.stable_dims
if i not in dim_list)
input = move_sdims(input, new_sdims)
data_sum = torch_fun(input.data, dim, keepdim=keepdim)
scale = input.scale
if not keepdim:
scale = squeeze_dims(scale, dim_list)
output = STensor(data_sum, scale)
output.rescale_()
return output
# Register the new sum-like function
STABLE_FUNCTIONS[torch_fun] = sumlike_fun | 3aa0247b965dbbf5038e4b0f4f00b0ead9855270 | 21,810 |
def __crossover(n: int, g: np.matrix, m_list: np.array, f_list: np.array) -> np.matrix:
"""
:param n: half of g.shape[0]
:param g: bin mat of genes
:param m_list: male nums
:param f_list: female nums
:return: crossed-over bin mat of genes
"""
cros = np.random.randint(low=0, high=g.shape[1], size=n)
g_cros = np.copy(g)
for m, f, c in zip(m_list, f_list, cros):
g_cros[[m, f], :c] = g_cros[[f, m], :c]
return g_cros | 93fbd5138bdf2e293fe0515a0096ab643fbf1953 | 21,811 |
def setup_system():
"""
Galacitic center potential and Arches cluster
position from Kruijssen 2014
"""
potential = static_potentials.Galactic_Center_Potential_Kruijssen()
cluster = Particle()
# At time 2.05 in KDL15
cluster.position = [-17.55767, -53.26560, -9.39921] | units.parsec
cluster.velocity = [-187.68008, 80.45276, 33.96556] | units.kms
cluster.position += coordinate_correction
return potential, cluster | 9713feaa51bfb0430394a8e8171bdecd3590d5e2 | 21,812 |
import os
import requests
from bs4 import BeautifulSoup
def MCM_data_scraper(species_list, get_image: bool = False, display: bool = False,
filename:str='', savepath: str = ''):
"""Function that takes a list of species, and rips all the info off the
MCM Webpage about that species.(e.g. SMILES, INCHI, etc)...
Inputs:
-------
species_list - A list of MCM Species you want info for. If you'd like info
about all of the species then pass species_list= ['All']
get_image - (optional) - Boolean of whether you'd like to save the image associated with that
species. Note: This will require saving these images to your computer.
which can take up a considerable amount of space. Default is False.
filename - (optional) - Name of the .excel and .html files generated that
contain the scraped info.
savepath - (optional) Where you'd like to save the output .csv, .html and
MCM images. If none is provided, is saved in current directory with
images in a new subfolder current_path+/"MCM_images/".
display - (optional) Boolean of whether you'd like to display the web scraped
results as a table in your web browser at the end.
Outputs:
--------
(1) Function returns a pandas dataframe with all the saved data.
(2) A excel workbook file saved at: savepath+filename+'.xlsx',
which contains all of the scraped data. Easily read into python by
pandas as a dataframe using:
df=pd.read_excel(savepath+filename_'.xlsx',engine="openpyxl", index_col=0)
(3) An HTML document saved at: savepath+filename+'.htm', which contains all the of
the scraped data. A nice way to display all the data scraped AND the
images that were scraped at the same time in a scrollable/ easily visualized way.
(4) - Optional a folder at save_path+/"MCM_images/" with .pngs of the MCM molecules
scraped from the MCM website.
Author:
-------
Dr. Jessica D. Haskins (jhaskins@alum.mit.edu) GitHub: @jdhask
Change Log:
----------
10/29/2021 JDH Created
1/18/2022 JDH modded function locations to allow use with F0AM_Tools
"""
# Check the file path + file names given.
excel_file= check_filename(filename=filename, default_name='MCM_web_scrape', ext='.xlsx',
savepath=savepath, overwrite=False, return_full=True)
html_file= check_filename(filename=filename, default_name='MCM_web_scrape', ext='.html',
savepath=savepath, overwrite=False, return_full=True)
if species_list[0].lower()=='all': species_list=load_data_files(species=True)
# Create an empty pandas dataframe with column names of all the info we're gonna scrape.
df = pd.DataFrame(columns=['MCM_Name', 'Formula', 'Molecular_Weight',
'InChI', 'SMILES', 'Description', 'Image', 'NIST_url'])
if get_image is False:
# Don't need image column if not gonna grab.
df = df.drop(columns='Image')
else: # Are grabbinb images, so make a subfolder in savepath to keep them.
if not os.path.exists(savepath+'/MCM_Images/'):
os.makedirs(savepath+'/MCM_Images/')
# Loop through all speices you'd like to scrape data for.
for sps in species_list:
# This is the 2021 base URL for browing an MCM species on the MCM website.
# URL to the MCM website for a species.
url = 'http://mcm.york.ac.uk/browse.htt?species='+sps
page = requests.get(url)
soup = BeautifulSoup(page.content, 'html.parser')
table = soup.find('table', {"class": "infobox"})
tr = table.findAll(['tr'])
# The InChI / SMILES infor is all contained within a "table" of the webpage...
# Found by inspecting "soup" manually. If website changes, will need to re-inspect the soup!
for cell in tr:
th = cell.find_all('th')
data_hdr = [col.text.strip('\n') for col in th][0]
td = cell.find_all('td')
if len(td) > 0:
row = [i.text.replace('\n', '') for i in td][0]
if data_hdr.lower() == 'molecular weight':
mw = np.float64(row)
if data_hdr.lower() == 'smiles':
smiles = row
if data_hdr.lower() == 'inchi':
inchi = row
else:
inchi = ''
synl = list() # Get the list of synonyms of this compoudn.
if data_hdr.lower() == 'synonyms':
if len(data_hdr) > 0:
nms = row.split(';')
for item in nms:
item = item.replace(' ', '')
item = item.replace('\t', '')
if (len(item) > 0) and (item not in synl):
synl.append(item)
for s in synl: # Take list, convert to string...
s=s.replace("'",'')
if len(s)==0:
syn='None'
else:
if type(s)==str:
syn=s
else:
syn=','.join(s)
if len(inchi) > 0:
if get_image is True: # Option to also save the image of the MCM species in question.
image_url = 'http://mcm.york.ac.uk/pics/species/'+sps+'.png'
img_data = requests.get(image_url).content
file_nm = savepath+'/MCM_Images/'+sps+'.png'
with open(file_nm, 'wb') as handler:
handler.write(img_data)
img = '<img src="'+file_nm+'"/>' #Save img source for displaying using IPYTHON later...
# Link the the NIST website for this using its InChI indentifier.
InChI2web = inchi.replace('InChI=', '')
InChI2web = InChI2web.replace(',', '%2C')
nist_url = 'https://webbook.nist.gov/cgi/inchi/InChI%3D'+InChI2web
nist_page = requests.get(nist_url)
nist_soup = BeautifulSoup(nist_page.content, 'html.parser')
nist_main = nist_soup.find('main', attrs={'id': 'main'}).get_text()
if 'invalid' in nist_main.lower() and 'identifier' in nist_main.lower():
# The NIST url takes you to a "not" found page... so you can't strip it.
nist_url = ''
else:
form = ''
p1 = nist_soup.find("main")
if p1 is not None:
p2 = p1.find("ul")
if p2 is not None:
parent = p2.find_all("li")
dat = [i.get_text().split(':') for i in parent]
if 'Formula' in dat[0][0].strip():
form = dat[0][1].strip()
if get_image is True:
df = df.append({'MCM_Name': sps, 'Formula': form, 'Molecular_Weight': mw,'InChI': inchi, 'SMILES': smiles,
'Description': syn, 'Image': img, 'NIST_url': nist_url}, ignore_index=True)
else:
df = df.append({'MCM_Name': sps, 'Formula': form, 'Molecular_Weight': mw, 'InChI': inchi, 'SMILES': smiles,
'Description': syn, 'NIST_url': nist_url}, ignore_index=True)
# Make sure string columns are all strings...
only_strs= ['MCM_Name','Formula','InChI','SMILES','Description', 'NIST_url', 'Image']
for i in df.index:
for col in only_strs:
if type(df.loc[i,col])!=str:
if np.isnan(df.loc[i,col]): df.at[i,col]='None'
elif df.loc[i,col] =='0':
df.at[i,col]='None'
# Convert dataframe to an HTML object.
df_htm = df.to_html(escape=False)
# Write html object to a file
df.to_html(open(html_file, 'w'))
print('HTML file saved as: ' + html_file)
# Option to display the saved info in your webbrowser
if display is True:
display_MCM_table(html_file)
# Also save the data as an excel workbook.
# Read this back in using: df=pd.read_excel(savepath+filename_'.xlsx',engine="openpyxl", index_col=0)
df.to_excel(excel_file,engine="openpyxl")
print('excel file saved as: ' + excel_file)
return df | 649fc26784e90ff8a1c6b565d563c46a0f039302 | 21,813 |
from typing import Iterable
from typing import List
def collect_dynamic_libs(name: str, dest: str = ".", dependencies: bool = True,
excludes: Iterable[str] = None) -> List:
"""
Collect DLLs for distribution **name**.
Arguments:
name:
The distribution's project-name.
dest:
Target destination, defaults to ``'.'``.
dependencies:
Recursively collect libs for dependent distributions (recommended).
excludes:
Dependent distributions to skip, defaults to ``None``.
Returns:
List of DLLs in PyInstaller's ``(source, dest)`` format.
This collects libraries only from Conda's shared ``lib`` (Unix) or
``Library/bin`` (Windows) folders. To collect from inside a distribution's
installation use the regular
:func:`PyInstaller.utils.hooks.collect_dynamic_libs`.
"""
_files = []
for file in files(name, dependencies, excludes):
# A file is classified as a DLL if it lives inside the dedicated
# ``lib_dir`` DLL folder.
if file.parent == lib_dir:
_files.append((str(file.locate()), dest))
return _files | 5e4ed9f9d412c6e071d85fcb34091fbed0722258 | 21,814 |
import pickle
def make_agreements(file) -> pd.DataFrame:
"""In some of the human conditions, we hold out questions. Each randomly generated agent is
given our test and then asked it's opinion on every hold out question.
agreements.pkl is a Dict[Experiment, Tuple(ndarray, ndarray)] where each array element
contains the fraction of holdout questions a single agent answered correctly. The first array
contains agents that passed our test, and the second contains agents that didn't pass our test.
This method massages that data into a DataFrame with experiments as they keys, a column
for predicted alignment, and a column for the fraction of holdout questions answered correctly.
"""
agreements = pd.Series(pickle.load(file)).reset_index()
agreements = agreements.join(
agreements.apply(lambda x: list(x[0]), result_type="expand", axis="columns"), rsuffix="_",
)
del agreements["0"]
agreements.columns = ["epsilon", "delta", "n", "aligned", "misaligned"]
agreements = agreements.set_index(["epsilon", "delta", "n"]).stack().reset_index()
agreements.columns = ["epsilon", "delta", "n", "aligned", "value"]
agreements = agreements.explode("value")
agreements["aligned"] = agreements.aligned == "aligned"
agreements.value = agreements.value.apply(lambda x: float(x))
agreements = agreements.dropna()
return agreements | e9cb9e45aa1c2ff5b694f6712da2892c6f44fd99 | 21,815 |
def column_as_html(column, table):
"""Return column as an HTML row."""
markup = "<tr>"
markup += "<td class='field'>{0}</td>".format(column.name, column.comment)
markup += "<td>{0}</td>".format(column.formattedType)
# Check for Primary Key
if table.isPrimaryKeyColumn(column):
markup += "<td class='centered primary'>✔</td>"
else:
markup += "<td class='centered'> </td>"
# Check for Foreign Key
if table.isForeignKeyColumn(column):
markup += "<td class='centered foreign'><a href='#{0}s'>✔</a></td>".format(column.name.replace(table.name, ""))
else:
markup += "<td class='centered'> </td>"
# Check for Not Null attribute
if column.isNotNull == 1:
markup += "<td class='centered notnull'>✔</td>"
else:
markup += "<td class='centered'> </td>"
# Check for Unique attribute
if is_unique(column, table):
markup += "<td class='centered unique'>✔</td>"
else:
markup += "<td class='centered'> </td>"
# Check for Binary, Unsigned and Zero Fill attributes
flags = list(column.flags)
if flags.count("BINARY"):
markup += "<td class='centered binary'>✔</td>"
else:
markup += "<td class='centered'> </td>"
if flags.count("UNSIGNED"):
markup += "<td class='centered unsigned'>✔</td>"
else:
markup += "<td class='centered'> </td>"
if flags.count("ZEROFILL"):
markup += "<td class='centered zerofill'>✔</td>"
else:
markup += "<td class='centered'> </td>"
# Check for Auto Increment attribute
if column.autoIncrement == 1:
markup += "<td class='centered autoincrement'>✔</td>"
else:
markup += "<td class='centered'> </td>"
# Default value
markup += "<td>{0}</td>".format(column.defaultValue)
# Comment
markup += "<td class='comment'>{0}</td>".format(escape(column.comment))
markup += "</tr>"
return markup | 0c6ed56cd686a4359776022407b023d5733198c9 | 21,816 |
def covariation(x, y):
"""
Covariation of X and Y.
:param list or tuple x: 1st array.
:param list or tuple y: 2nd array.
:return: covariation.
:rtype: float
:raise ValueError: when x or y is empty
"""
if x and y:
m_x = mean(x)
m_y = mean(y)
dev_x = [i - m_x for i in x]
dev_y = [i - m_y for i in x]
return dot(dev_x, dev_y) / (len(x) - 1)
else:
raise ValueError('x or y is empty') | dd42467a453978edb5970b79653724f77c07beb7 | 21,817 |
def stripped_spaces_around(converter):
"""Make converter that strippes leading and trailing spaces.
``converter`` is called to further convert non-``None`` values.
"""
def stripped_text_converter(value):
if value is None:
return None
return converter(value.strip())
return stripped_text_converter | b92f38d3eb8d191f615488bbd11503bae56ef6de | 21,818 |
from typing import Union
from typing import Literal
from typing import Any
def ootf_inverse(
value: FloatingOrArrayLike,
function: Union[
Literal["ITU-R BT.2100 HLG", "ITU-R BT.2100 PQ"], str
] = "ITU-R BT.2100 PQ",
**kwargs: Any
) -> FloatingOrNDArray:
"""
Maps relative display linear light to scene linear light using given
inverse opto-optical transfer function (OOTF / OOCF).
Parameters
----------
value
Value.
function
Inverse opto-optical transfer function (OOTF / OOCF).
Other Parameters
----------------
kwargs
{:func:`colour.models.ootf_inverse_HLG_BT2100`,
:func:`colour.models.ootf_inverse_PQ_BT2100`},
See the documentation of the previously listed definitions.
Returns
-------
:class:`numpy.floating` or :class:`numpy.ndarray`
Luminance of scene linear light.
Examples
--------
>>> ootf_inverse(779.988360834115840) # doctest: +ELLIPSIS
0.1000000...
>>> ootf_inverse( # doctest: +ELLIPSIS
... 63.095734448019336, function='ITU-R BT.2100 HLG')
0.1000000...
"""
function = validate_method(
function,
OOTF_INVERSES,
'"{0}" inverse "OOTF" is invalid, it must be one of {1}!',
)
callable_ = OOTF_INVERSES[function]
return callable_(value, **filter_kwargs(callable_, **kwargs)) | 65c7aa374d1daa086828b87a4f30802f63b4a3b7 | 21,819 |
import subprocess
def GetMemoryUsageOfProcess(pid):
"""Queries the system for the current memory usage of a specified process.
This function only works in Linux and ChromeOS.
Args:
pid: The integer process identifier for the process to use.
Returns:
The memory usage of the process in MB, given as a float. If the process
doesn't exist on the machine, then the value 0 is returned.
"""
assert pyauto.PyUITest.IsLinux() or pyauto.PyUITest.IsChromeOS()
process = subprocess.Popen('ps h -o rss -p %s' % pid, shell=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = process.communicate()[0]
if stdout:
return float(stdout.strip()) / 1024
else:
return 0 | 21dcbe25243937f05384d15a9b3ec3caf595ceeb | 21,820 |
def modified_query(benchmark, model_spec, run_index: int, epochs=108, stop_halfway=False):
"""
NOTE:
Copied from https://github.com/google-research/nasbench/blob/b94247037ee470418a3e56dcb83814e9be83f3a8/nasbench/api.py#L204-L263 # noqa
We changed the function in such a way that we now can specified the run index (index of the evaluation) which was
in the original code sampled randomly.
OLD DOCSTRING:
Fetch one of the evaluations for this model spec.
Each call will sample one of the config['num_repeats'] evaluations of the
model. This means that repeated queries of the same model (or isomorphic
models) may return identical metrics.
This function will increment the budget counters for benchmarking purposes.
See self.training_time_spent, and self.total_epochs_spent.
This function also allows querying the evaluation metrics at the halfway
point of training using stop_halfway. Using this option will increment the
budget counters only up to the halfway point.
Args:
model_spec: ModelSpec object.
epochs: number of epochs trained. Must be one of the evaluated number of
epochs, [4, 12, 36, 108] for the full dataset.
stop_halfway: if True, returned dict will only contain the training time
and accuracies at the halfway point of training (num_epochs/2).
Otherwise, returns the time and accuracies at the end of training
(num_epochs).
Returns:
dict containing the evaluated data for this object.
Raises:
OutOfDomainError: if model_spec or num_epochs is outside the search space.
"""
if epochs not in benchmark.dataset.valid_epochs:
raise OutOfDomainError('invalid number of epochs, must be one of %s'
% benchmark.dataset.valid_epochs)
fixed_stat, computed_stat = benchmark.dataset.get_metrics_from_spec(model_spec)
# MODIFICATION: Use the run index instead of the sampled one.
# sampled_index = random.randint(0, self.config['num_repeats'] - 1)
computed_stat = computed_stat[epochs][run_index]
data = {}
data['module_adjacency'] = fixed_stat['module_adjacency']
data['module_operations'] = fixed_stat['module_operations']
data['trainable_parameters'] = fixed_stat['trainable_parameters']
if stop_halfway:
data['training_time'] = computed_stat['halfway_training_time']
data['train_accuracy'] = computed_stat['halfway_train_accuracy']
data['validation_accuracy'] = computed_stat['halfway_validation_accuracy']
data['test_accuracy'] = computed_stat['halfway_test_accuracy']
else:
data['training_time'] = computed_stat['final_training_time']
data['train_accuracy'] = computed_stat['final_train_accuracy']
data['validation_accuracy'] = computed_stat['final_validation_accuracy']
data['test_accuracy'] = computed_stat['final_test_accuracy']
benchmark.dataset.training_time_spent += data['training_time']
if stop_halfway:
benchmark.dataset.total_epochs_spent += epochs // 2
else:
benchmark.dataset.total_epochs_spent += epochs
return data | 21ccbafb230da1d984f53f36f41c6e9ceb0d7f18 | 21,821 |
def gauss(x, mu=0, sigma=1):
"""
Unnormalized Gaussian distribution.
Parameters
----------
Returns
-------
y : type(x)
Gaussian evaluated at x.
Notes
-----
Some people use alpha (1/e point)
instead of the sigma (standard deviation)
to define the width of the Gaussian.
They are related through: alpha = sigma * sqrt(2)
"""
return np.exp(-((x - mu)**2) / (2 * sigma**2)) | 24e5c5b9e42cc6b84e6d2c4aa9f2a26b44793112 | 21,822 |
def GetSpd(ea):
"""
Get current delta for the stack pointer
@param ea: end address of the instruction
i.e.the last address of the instruction+1
@return: The difference between the original SP upon
entering the function and SP for the specified address
"""
func = idaapi.get_func(ea)
if not func:
return None
return idaapi.get_spd(func, ea) | 84c00ac2bb722e51d27813a35f55c8c59fdac579 | 21,823 |
def is_fugashi_ipadic_available():
"""
Check if the library is available.
This function checks if sentencepiece is available in your environment
and returns the result as a bool value.
Returns
-------
_fugashi_ipadic_available : bool
If True, fugashi wiht ipadic is available in your environment.
Examples
--------
>>> tokenizers.is_fugashi_ipadic_available()
True
"""
return _fugashi_ipadic_available | cc3b80718691b2914f57c950452f2fbb253100d1 | 21,824 |
import torch
def pad_to_sidelength(schematic, labels=None, nothing_id=0, sidelength=32):
"""Add padding to schematics to sidelength"""
szs = list(schematic.size())
szs = np.add(szs, -sidelength)
pad = []
# this is all backwards bc pytorch pad semantics :(
for s in szs:
if s >= 0:
pad.append(0)
else:
pad.append(-s)
pad.append(0)
schematic = torch.nn.functional.pad(schematic, pad[::-1])
if labels is not None:
labels = torch.nn.functional.pad(labels, pad[::-1], value=nothing_id)
return schematic, labels | 81a7bb8deb2474106715720f79e0d3ee8937557b | 21,825 |
def segmentation_gaussian_measurement_batch(
y_true,
y_pred,
gaussian_sigma=3,
measurement=segmentation_losses.binary_crossentropy):
""" Apply metric or loss measurement to a batch of data incorporating a 2D gaussian.
Only works with batch size 1.
Loop and call this function repeatedly over each sample
to use a larger batch size.
# Arguments
y_true: is assumed to be [label, x_img_coord, y_image_coord]
y_pred: is expected to be a 2D array of labels
with shape [1, img_height, img_width, 1].
"""
with K.name_scope(name='segmentation_gaussian_measurement_batch') as scope:
if keras.backend.ndim(y_true) == 4:
# sometimes the dimensions are expanded from 2 to 4
# to meet Keras' expectations.
# In that case reduce them back to 2
y_true = K.squeeze(y_true, axis=-1)
y_true = K.squeeze(y_true, axis=-1)
y_pred_shape = tf.Tensor.get_shape(y_pred)
batch_size = y_pred_shape[0]
y_true = tf.split(y_true, batch_size)
y_pred = tf.split(y_pred, batch_size)
results = []
for y_true_img, y_pred_img in zip(y_true, y_pred):
result = segmentation_gaussian_measurement(
y_true=y_true_img, y_pred=y_pred_img,
gaussian_sigma=gaussian_sigma,
measurement=measurement
)
results = results + [result]
results = tf.concat(results, axis=0)
return results | de88b6ee1175612f7fa8e41c98dc6e1b3287a034 | 21,826 |
def save_image(img: Image, img_format=None, quality=85):
""" ะกะพั
ัะฐะฝะธัั ะบะฐััะธะฝะบั ะธะท ะฟะพัะพะบะฐ ะฒ ะฟะตัะตะผะตะฝะฝัั ะดะปั ะดะฐะปัะฝะตะนัะตะน ะพัะฟัะฐะฒะบะธ ะฟะพ ัะตัะธ
"""
if img_format is None:
img_format = img.format
output_stream = BytesIO()
output_stream.name = 'image.jpeg'
# ะฝะฐ Ubuntu ะฟะพัะตะผั-ัะพ ะฝะตั jpg, ะฝะพ ะตััั jpeg
if img.format == 'JPEG':
img.save(output_stream, img_format, quality=quality, optimize=True, progressive=True)
else:
img.convert('RGB').save(output_stream, format=img_format)
output_stream.seek(0)
return output_stream | 5696745ad33a2b1b59718f1c4d4eedf0eda7cd46 | 21,827 |
def check_input(args: dict) -> dict:
"""
Check if user entries latitude and longitude are well formated. If ok, retruns a dict with
lat and lng converted as flaots
- args: dict. request.args
"""
lat = args.get("lat")
lng = args.get("lng")
if lat is None:
abort(400, "Latitude parameter (lat) is missing")
if lng is None:
abort(400, "Longitude parameter (lng) is missing")
return {"lat": check_lat_lng(lat, "latitude"), "lng": check_lat_lng(lng, "longitude")} | 078fc0ae5665562d6849746788b1f7a5a88981eb | 21,828 |
def adjust_learning_rate(epoch, total_epochs, only_ce_epochs, learning_rate, optimizer):
"""Adjust learning rate during training.
Parameters
----------
epoch: Current training epoch.
total_epochs: Total number of epochs for training.
only_ce_epochs: Number of epochs for initial pretraining.
learning_rate: Initial learning rate for training.
"""
#We dont want to consider the only ce
#based epochs for the lr scheduler
epoch = epoch - only_ce_epochs
drocc_epochs = total_epochs - only_ce_epochs
# lr = learning_rate
if epoch <= drocc_epochs:
lr = learning_rate * 0.001
if epoch <= 0.90 * drocc_epochs:
lr = learning_rate * 0.01
if epoch <= 0.60 * drocc_epochs:
lr = learning_rate * 0.1
if epoch <= 0.30 * drocc_epochs:
lr = learning_rate
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return optimizer | 94ddbb9fcc7676799f7e032f4ab6658b1b056b32 | 21,829 |
def dummy_nullgeod():
"""
Equatorial Geodesic
"""
return Nulllike(
metric="Kerr",
metric_params=(0.5,),
position=[4., np.pi / 2, 0.],
momentum=[0., 0., 2.],
steps=50,
delta=0.5,
return_cartesian=False,
suppress_warnings=True,
) | fd5af27cebd029fbcbcdab07f154ee4f4dff2575 | 21,830 |
import os
def test_find_many_settings_precedence(monkeypatch) -> None:
"""test more than one in cwd"""
expected = os.path.join(os.getcwd(), "ansible-navigator.yml")
paths = [expected, os.path.join(os.path.expanduser("~"), ".ansible-navigator.json")]
def check_path_exists(arg):
return arg in paths
monkeypatch.setattr(os.path, "exists", check_path_exists)
messages, exit_messages, found = utils.find_settings_file()
assert expected == found | e724615c4c2012b6773b9a6100d3e10ef95dfcb8 | 21,831 |
def flatten(tensor):
"""Flattens a given tensor such that the channel axis is first.
The shapes are transformed as follows:
(N, C, D, H, W) -> (C, N * D * H * W)
"""
C = tensor.size(1)
# new axis order
axis_order = (1, 0) + tuple(range(2, tensor.dim()))
# Transpose: (N, C, D, H, W) -> (C, N, D, H, W)
transposed = tensor.permute(axis_order)
# Flatten: (C, N, D, H, W) -> (C, N * D * H * W)
return transposed.reshape(C, -1) | 67a0d89ce98e6695a9d58b1f3ab2f403b09c89ce | 21,832 |
from chiesa_correction import align_gvectors
def compare_scalar_grids(gvecs0, nkm0, gvecs1, nkm1, atol=1e-6):
"""Compare two scalar fields sampled on regular grids
Args:
gvecs0 (np.array): first grid, (npt0, ndim)
nkm0 (np.array): values, (npt0,)
gvecs1 (np.array): second grid, (npt1, ndim), expect npt1<=npt0
nkm1 (np.array): values, (npt1,)
Return:
bool: True if same scalar field
"""
comm0, comm1 = align_gvectors(gvecs0, gvecs1)
unique = len(gvecs1[comm1]) == len(gvecs1) # all unique gvecs are unique
xmatch = np.allclose(gvecs0[comm0], gvecs1[comm1],
atol=atol) # gvecs match
ymatch = np.allclose(nkm0[comm0], nkm1[comm1],
atol=atol) # nk match before unfold
return np.array([unique, xmatch, ymatch], dtype=bool) | 0f75d4387e4c8a5f497a85191df342ac33df1c11 | 21,833 |
def a_dot(t):
"""
Derivative of a, the scale factor
:param t:
:return:
"""
return H0 * ((3 / 2) * H0 * t) ** (-1 / 3) | b5557176f75ed45f6e5b38eb827d655779311e0a | 21,834 |
def frame(x, frame_length, hop_length, axis=-1, name=None):
"""
Slice the N-dimensional (where N >= 1) input into (overlapping) frames.
Args:
x (Tensor): The input data which is a N-dimensional (where N >= 1) Tensor
with shape `[..., seq_length]` or `[seq_length, ...]`.
frame_length (int): Length of the frame and `0 < frame_length <= x.shape[axis]`.
hop_length (int): Number of steps to advance between adjacent frames
and `0 < hop_length`.
axis (int, optional): Specify the axis to operate on the input Tensors. Its
value should be 0(the first dimension) or -1(the last dimension). If not
specified, the last axis is used by default.
Returns:
The output frames tensor with shape `[..., frame_length, num_frames]` if `axis==-1`,
otherwise `[num_frames, frame_length, ...]` where
`num_framse = 1 + (x.shape[axis] - frame_length) // hop_length`
Examples:
.. code-block:: python
import paddle
from paddle.signal import frame
# 1D
x = paddle.arange(8)
y0 = frame(x, frame_length=4, hop_length=2, axis=-1) # [4, 3]
# [[0, 2, 4],
# [1, 3, 5],
# [2, 4, 6],
# [3, 5, 7]]
y1 = frame(x, frame_length=4, hop_length=2, axis=0) # [3, 4]
# [[0, 1, 2, 3],
# [2, 3, 4, 5],
# [4, 5, 6, 7]]
# 2D
x0 = paddle.arange(16).reshape([2, 8])
y0 = frame(x0, frame_length=4, hop_length=2, axis=-1) # [2, 4, 3]
# [[[0, 2, 4],
# [1, 3, 5],
# [2, 4, 6],
# [3, 5, 7]],
#
# [[8 , 10, 12],
# [9 , 11, 13],
# [10, 12, 14],
# [11, 13, 15]]]
x1 = paddle.arange(16).reshape([8, 2])
y1 = frame(x1, frame_length=4, hop_length=2, axis=0) # [3, 4, 2]
# [[[0 , 1 ],
# [2 , 3 ],
# [4 , 5 ],
# [6 , 7 ]],
#
# [4 , 5 ],
# [6 , 7 ],
# [8 , 9 ],
# [10, 11]],
#
# [8 , 9 ],
# [10, 11],
# [12, 13],
# [14, 15]]]
# > 2D
x0 = paddle.arange(32).reshape([2, 2, 8])
y0 = frame(x0, frame_length=4, hop_length=2, axis=-1) # [2, 2, 4, 3]
x1 = paddle.arange(32).reshape([8, 2, 2])
y1 = frame(x1, frame_length=4, hop_length=2, axis=0) # [3, 4, 2, 2]
"""
if axis not in [0, -1]:
raise ValueError(f'Unexpected axis: {axis}. It should be 0 or -1.')
if not isinstance(frame_length, int) or frame_length <= 0:
raise ValueError(
f'Unexpected frame_length: {frame_length}. It should be an positive integer.'
)
if not isinstance(hop_length, int) or hop_length <= 0:
raise ValueError(
f'Unexpected hop_length: {hop_length}. It should be an positive integer.'
)
if frame_length > x.shape[axis]:
raise ValueError(
f'Attribute frame_length should be less equal than sequence length, '
f'but got ({frame_length}) > ({x.shape[axis]}).')
op_type = 'frame'
if in_dygraph_mode():
attrs = ('frame_length', frame_length, 'hop_length', hop_length, 'axis',
axis)
op = getattr(_C_ops, op_type)
out = op(x, *attrs)
else:
check_variable_and_dtype(
x, 'x', ['int32', 'int64', 'float16', 'float32',
'float64'], op_type)
helper = LayerHelper(op_type, **locals())
dtype = helper.input_dtype(input_param_name='x')
out = helper.create_variable_for_type_inference(dtype=dtype)
helper.append_op(
type=op_type,
inputs={'X': x},
attrs={
'frame_length': frame_length,
'hop_length': hop_length,
'axis': axis
},
outputs={'Out': out})
return out | f43420ceefa8963579776c5234179274688c83d6 | 21,835 |
import functools
def wrapped_partial(func: callable, *args, **kwargs) -> callable:
"""Wrap a function with partial args and kwargs.
Args:
func (callable): The function to be wrapped.
*args (type): Args to be wrapped.
**kwargs (type): Kwargs to be wrapped.
Returns:
callable: The wrapped function.
"""
partial_func = functools.partial(func, *args, **kwargs)
functools.update_wrapper(partial_func, func)
return partial_func | d8c3eb53e3c74104aa72acce545269c98585cd83 | 21,836 |
import typing
def with_sfw_check(
command: typing.Optional[CommandT] = None,
/,
*,
error_message: typing.Optional[str] = "Command can only be used in SFW channels",
halt_execution: bool = False,
) -> CallbackReturnT[CommandT]:
"""Only let a command run in a channel that's marked as sfw.
Parameters
----------
command : typing.Optional[CommandT]
The command to add this check to.
Other Parameters
----------------
error_message : typing.Optional[str]
The error message to send in response as a command error if the check fails.
Defaults to "Command can only be used in DMs" and setting this to `None`
will disable the error message allowing the command search to continue.
halt_execution : bool
Whether this check should raise `tanjun.errors.HaltExecution` to
end the execution search when it fails instead of returning `False`.
Defaults to `False`.
Notes
-----
* error_message takes priority over halt_execution.
* For more information on how this is used with other parameters see
`CallbackReturnT`.
Returns
-------
CallbackReturnT[CommandT]
The command this check was added to.
"""
return _wrap_with_kwargs(command, sfw_check, halt_execution=halt_execution, error_message=error_message) | 565d4e0f9e5f473a72692511a1ba3896717c9069 | 21,837 |
def algorithm_id_to_generation_class(algorithm_id):
"""
Returns the Generation class corresponding to the
provided algorithm ID (as defined in settings).
"""
return _algorithm_id_to_class_data(algorithm_id)[1] | 5cf4ede818832a57c1c279d5c78c43c2c214b9b5 | 21,838 |
def search(session, **kwargs):
"""
Searches the Discogs API for a release object
Arguments:
session (requests.Session) - API session object
**kwargs (dict) - All kwargs are added as query parameters in the search call
Returns:
dict - The first result returned in the search
Raises:
Exception if release cannot be found
"""
try:
url = DB_API + '/search?'
for param, value in kwargs.items():
url += f'{param}={value}&'
res = session.get(url)
data = res.json()
if res.status_code != 200 or 'results' not in data.keys():
raise Exception(f'Unexpected error when querying Discogs API ({res.status_code})')
if not data['results']:
raise Exception('No results found')
return data['results'][0]
except Exception as err:
print(f'Failed to find release for search {kwargs} in Discogs database: {err}')
raise | f67646b3060602b743eb4166a4aab5882b8a3c81 | 21,839 |
def _conv_general_precision_config_proto(precision):
"""Convert an integer to an XLA.PrecisionConfig."""
if precision is None:
return None
proto = xla_data_pb2.PrecisionConfig()
proto.operand_precision.append(int(precision))
return proto | 8b43272aadeccd4385ddb74bcf0691f4a779e4c1 | 21,840 |
from typing import OrderedDict
from pathlib import Path
def find_installed_packages_in_environment():
"""
Find packages under the COLCON_PREFIX_PATH.
For each prefix path the package index is being read and the first time a
package is being found its install prefix is being added to the result.
:returns: The mapping from a package name to the prefix path
:rtype: OrderedDict
"""
packages = OrderedDict()
for prefix_path in get_colcon_prefix_path():
prefix_path = Path(prefix_path)
pkgs = find_installed_packages(prefix_path)
if pkgs is None:
logger.debug(
"Ignoring prefix path '{prefix_path}'".format_map(locals()))
continue
for pkg_name in sorted(pkgs.keys()):
# ignore packages with the same name in "lower" prefix path
if pkg_name in packages:
continue
packages[pkg_name] = pkgs[pkg_name]
return packages | 82507e2eaac3e0a0c6061db74794113c764b2ce2 | 21,841 |
def list_in_list(a, l):
"""Checks if a list is in a list and returns its index if it is (otherwise
returns -1).
Parameters
----------
a : list()
List to search for.
l : list()
List to search through.
"""
return next((i for i, elem in enumerate(l) if elem == a), -1) | 494d9a880bcd2084a0f50e292102dc8845cbbb16 | 21,842 |
import sys
def start_host(session=None, load_plugins=True, plugins=None):
"""Promote the current process into python plugin host for Nvim.
Start msgpack-rpc event loop for `session`, listening for Nvim requests
and notifications. It registers Nvim commands for loading/unloading
python plugins.
The sys.stdout and sys.stderr streams are redirected to Nvim through
`session`. That means print statements probably won't work as expected
while this function doesn't return.
This function is normally called at program startup and could have been
defined as a separate executable. It is exposed as a library function for
testing purposes only.
I never noticed until now but it also initializes a logger? Wth?
"""
if load_plugins:
plugins = _goofy_way_of_loading_plugins()
if not session:
session = socket_session()
else:
if isinstance(session, str):
session = _convert_str_to_session(session)
nvim = Nvim.from_session(session)
# nvim = Nvim.from_session(session).with_decode(decode)
if nvim.version.api_level < 1:
sys.stderr.write("This version of pynvim requires nvim 0.1.6 or later")
sys.exit(1)
host = Host(nvim)
if plugins is not None:
host.start(plugins)
return host | 10743a743e19fa0405ec07163e529663fb14b6c7 | 21,843 |
def sent_to_idx(sent, word2idx, sequence_len):
"""
convert sentence to index array
"""
unknown_id = word2idx.get("UNKNOWN", 0)
sent2idx = [word2idx.get(word, unknown_id) for word in sent.split("_")[:sequence_len]]
return sent2idx | ffaa65741d8c24e02d5dfbec4ce84c03058ebeb8 | 21,844 |
from re import T
def expand_sqs_results(settings: Settings, sqs_results: T.Iterable[SQSResult],
timings: T.Optional[TimingDictionary] = None, include=('configuration',),
inplace: bool = False) -> Settings:
"""
Serializes a list of :py:class:`sqsgenerator.public.SQSResult` into a JSON/YAML serializable string
:param settings: the settings used to compute the {sqs_results}
:type settings: AttrDict
:param sqs_results:
"""
dump_include = list(include)
if 'configuration' not in dump_include:
dump_include += ['configuration']
result_document = make_result_document(settings, sqs_results, fields=dump_include, timings=timings)
if inplace:
settings.update(result_document)
keys_to_remove = {'file_name', 'input_format', 'composition', 'iterations', 'max_output_configurations',
'mode', 'threads_per_rank', 'is_sublattice'}
final_document = {k: v for k, v in settings.items() if k not in keys_to_remove}
if 'sublattice' in final_document:
final_document.update(final_document['sublattice'])
del final_document['sublattice']
else:
final_document = result_document
return Settings(final_document) | 316e6d66617f6715193502058eff173954214a89 | 21,845 |
def test_files_atlas(test_files):
"""ATLAS files"""
# ssbio/test/test_files/atlas
return op.join(test_files, 'atlas') | 9ab8f55582d85e7f51c301b1cc0c80a5b7233b47 | 21,846 |
from modefit.basics import get_polyfit
def _get_xaxis_polynomial_(xyv, degree=DEGREE, legendre=LEGENDRE,
xmodel=None, clipping = [5,5]):
""" """
x,y,v = xyv
flagin = ((np.nanmean(y) - clipping[0] * np.nanstd(y)) < y) * (y< (np.nanmean(y) + clipping[1] * np.nanstd(y)))
contmodel = get_polyfit(x[flagin], y[flagin], v[flagin], degree=degree, legendre=legendre)
contmodel.fit(a0_guess=np.nanmedian(y[flagin]))
if xmodel is not None:
return contmodel.fitvalues, contmodel.model.get_model(x=xmodel)#, contmodel
return contmodel.fitvalues | d0b4ebf790339154fe0d979ec720a9960be92da8 | 21,847 |
def generateKey():
"""
Method to generate a encryption key
"""
try:
key = Fernet.generate_key()
updateClipboard(f"export LVMANAGER_PW={str(key)[2:-1]}")
print(f"Key: {key}")
print("Export command copied to clipboard. Save this value!")
return True
except Exception as e:
print(f"Something went wrong\nException: {e}")
return False | a0d197c499d978600c6d95879aab67d595648ffc | 21,848 |
def _zpkbilinear(z, p, k, fs):
""" Return a digital filter from an analog one using a bilinear transform """
z = np.atleast_1d(z)
p = np.atleast_1d(p)
degree = _relative_degree(z, p)
fs2 = 2.0 * fs
# Bilinear transform the poles and zeros
z_z = (fs2 + z) / (fs2 - z)
p_z = (fs2 + p) / (fs2 - p)
# Any zeros that were at infinity get moved to the Nyquist frequency
z_z = np.append(z_z, -np.ones(degree))
# Compensate for gain change
k_z = k * np.real(np.prod(fs2 - z) / np.prod(fs2 - p))
return z_z, p_z, k_z | ef7e50ae81023edc599fb56e3c1e20a4579f8389 | 21,849 |
def so3exp(w):
"""
Maps so(3) --> SO(3) group with closed form expression.
"""
theta = np.linalg.norm(w)
if theta < _EPS * 3:
return np.eye(3)
else:
w_hat = S03_hat_operator(w)
R = np.eye(3) + (np.sin(theta) / theta) * w_hat + ((1 - np.cos(theta)) / theta**2) * np.dot(w_hat, w_hat)
return R | 434168c7652311a850dbcb700343e445ac808c57 | 21,850 |
def bin2ppm(nproc_old, model_tags, region, npts, nproc,
old_mesh_dir, old_model_dir, output_dir):
"""
convert the bin files to the ppm model.
"""
result = ""
julia_path = get_julia("specfem_gll.jl/src/program/get_ppm_model.jl")
latnproc, lonnproc = map(int, nproc.split("/"))
nproc_ppm2netcdf = latnproc * lonnproc
# ! note there is a issue of precompiling the code in a race condition, refer to https://github.com/simonbyrne/PkgLock.jl to solve the problem
# result += "julia --project -e 'push!(LOAD_PATH, \"@pkglock\"); using PkgLock; PkgLock.instantiate_precompile()'\n"
result += "module purge;module load GCC/8.2.0-2.31.1;module load OpenMPI/3.1.3;"
result += f"srun -n {nproc_ppm2netcdf} julia '{julia_path}' --nproc_old {nproc_old} --old_mesh_dir {old_mesh_dir} --old_model_dir {old_model_dir} --model_tags {model_tags} --output_file {output_dir} --region {region} --npts {npts} --nproc {nproc}; \n"
return result | 2e8f8be993ca7d164faf2cfce4e1539a16764ad4 | 21,851 |
def st_sdata(obs, cols):
"""return string data in given observation numbers as a list of lists,
one sub-list for each row; obs should be int or iterable of int;
cols should be a single str or int or iterable of str or int
"""
obs, cols, _ = _parseObsColsVals(obs, cols)
if not all(st_isstrvar(c) for c in cols):
raise TypeError("only string Stata variables allowed")
return [[_st_sdata(i,j) for j in cols] for i in obs] | 7e08168a42043b7de379f7f513f25b6e88a89847 | 21,852 |
def list_data(args, data):
"""List all servers and files associated with this project."""
if len(data["remotes"]) > 0:
print("Servers:")
for server in data["remotes"]:
if server["name"] == server["location"]:
print(server["user"] + "@" + server["location"])
else:
print(
server["user"] + "@" + server["name"] + " ("
+ server["location"] + ")")
else:
print("No servers added")
print("Included files and directories:")
print(data["file"] + ".py")
if len(data["files"]) > 0:
print("\n".join(data["files"]))
return data | 6a005b6e605d81985fca85ca54fd9b29b28128f5 | 21,853 |
def vecInt(xx, vv, p, interpolation = 'weighted'):
"""
Interpolates the field around this position.
call signature:
vecInt(xx, vv, p, interpolation = 'weighted')
Keyword arguments:
*xx*:
Position vector around which will be interpolated.
*vv*:
Vector field to be interpolated.
*p*:
Parameter struct.
*interpolation*:
Interpolation of the vector field.
'mean': takes the mean of the adjacent grid point.
'weighted': weights the adjacent grid points according to their distance.
"""
# find the adjacent indices
i = (xx[0]-p.Ox)/p.dx
if (i < 0):
i = 0
if (i > p.nx-1):
i = p.nx-1
ii = np.array([int(np.floor(i)), \
int(np.ceil(i))])
j = (xx[1]-p.Oy)/p.dy
if (j < 0):
j = 0
if (j > p.ny-1):
j = p.ny-1
jj = np.array([int(np.floor(j)), \
int(np.ceil(j))])
k = (xx[2]-p.Oz)/p.dz
if (k < 0):
k = 0
if (k > p.nz-1):
k = p.nz-1
kk = np.array([int(np.floor(k)), \
int(np.ceil(k))])
vv = np.swapaxes(vv, 1, 3)
# interpolate the field
if (interpolation == 'mean'):
return np.mean(vv[:,ii[0]:ii[1]+1,jj[0]:jj[1]+1,kk[0]:kk[1]+1], axis = (1,2,3))
if(interpolation == 'weighted'):
if (ii[0] == ii[1]): w1 = np.array([1,1])
else: w1 = (i-ii[::-1])
if (jj[0] == jj[1]): w2 = np.array([1,1])
else: w2 = (j-jj[::-1])
if (kk[0] == kk[1]): w3 = np.array([1,1])
else: w3 = (k-kk[::-1])
weight = abs(w1.reshape((2,1,1))*w2.reshape((1,2,1))*w3.reshape((1,1,2)))
return np.sum(vv[:,ii[0]:ii[1]+1,jj[0]:jj[1]+1,kk[0]:kk[1]+1]*weight, axis = (1,2,3))/np.sum(weight) | c93572205e1d5a3c00ef21f1780a5184c695d988 | 21,854 |
def anchor_inside_flags(flat_anchors, valid_flags, img_shape,
allowed_border=0, device='cuda'):
"""Anchor inside flags.
:param flat_anchors: flat anchors
:param valid_flags: valid flags
:param img_shape: image meta info
:param allowed_border: if allow border
:return: inside flags
"""
img_h, img_w = img_shape[:2]
if device == 'cuda':
img_h = img_h.cuda()
img_w = img_w.cuda()
img_h = img_h.float()
img_w = img_w.float()
valid_flags = valid_flags.bool()
if allowed_border >= 0:
inside_flags = (valid_flags & (flat_anchors[:, 0] >= -allowed_border) & (
flat_anchors[:, 1] >= -allowed_border) & (
flat_anchors[:, 2] < img_w + allowed_border) & (
flat_anchors[:, 3] < img_h + allowed_border))
else:
inside_flags = valid_flags
return inside_flags | 500fe39f51cbf52bd3417b14e7ab7dcb4ec2f9cc | 21,855 |
def notinLRG_mask(primary=None, rflux=None, zflux=None, w1flux=None,
rflux_snr=None, zflux_snr=None, w1flux_snr=None):
"""See :func:`~desitarget.sv1.sv1_cuts.isLRG` for details.
Returns
-------
:class:`array_like`
``True`` if and only if the object is NOT masked for poor quality.
"""
if primary is None:
primary = np.ones_like(rflux, dtype='?')
lrg = primary.copy()
lrg &= (rflux_snr > 0) & (rflux > 0) # ADM quality in r.
lrg &= (zflux_snr > 0) & (zflux > 0) # ADM quality in z.
lrg &= (w1flux_snr > 4) & (w1flux > 0) # ADM quality in W1.
return lrg | a89a02d017140f1321905695bbbcb34789b2e535 | 21,856 |
def get_theta_def(pos_balle:tuple, cote:str):
"""
Retourne les deux angles theta (voir les explications) pour que le goal soit alignรฉ avec la balle.
Ceux-ci sont calculรฉs en fonction des deux poteaux pour avoir les deux "extrรฉmitรฉs" pour รชtre correctement alignรฉes.
Paramรจtres:
- pos_balle : tuple - contient les positions x et y de la balle
- cote : str - Cรดtรฉ que l'on attaque. d pour droite, g pour gauche
( par rapport au sens de l'axe des abscisses )
"""
angles = []
if cote.lower() == "d":
alphas = get_alpha(pos_balle, goal_droit)
for alpha, poteau in zip(alphas, goal_droit):
if pos_balle[1] > poteau[1]:
angles.append(alpha)
else:
angles.append(-alpha)
elif cote.lower() == "g":
alphas = get_alpha(pos_balle, goal_gauche)
for alpha, poteau in zip(alphas, goal_gauche):
if pos_balle[1] > poteau[1]:
angles.append(pi - alpha)
else:
angles.append(alpha - pi)
return angles | 0fdde277c4c63b593c5c40c595c7181539eb0fd1 | 21,857 |
def public_doc():
"""Documentation for this api."""
return auto.html(groups=['public'], title='Ocean App Web Service Public Documentation') | 37d343ca4159566f4191a9f8608378dea7ce1bb5 | 21,858 |
def getAllTeams():
"""
returns the entire list of teams
"""
return Team.objects.order_by('name').all() | 8e06518e417657d3a24d4261a71d9b0bda31af22 | 21,859 |
def parse_lamp_flags(flags):
"""Parses flags and returns a dict that represents the lamp states."""
# flags: [0123]{8}
values = _swap_key_and_value(_LAMP_STATES) # {value: state}
states = dict([
(color, values[flags[digit]]) for color, digit in _LAMP_DIGITS.items()
])
return {'lamps': states} | 5a2416ebca980fd9d3ae717aaa4da3b008d76e95 | 21,860 |
def user_owns_item(function):
""" Decorator that checks that the item was created by current user. """
@wraps(function)
def wrapper(category_name, item_name, *args, **kwargs):
category = db_session.query(Category
).filter_by(name=category_name).one()
user_id = session['user_id']
item = db_session.query(Item
).filter_by(category=category, name=item_name
).one()
if item.user_id == user_id:
return function(category_name, item_name, *args, **kwargs)
else:
abort(403)
return wrapper | 912c93408b6297c338be6dc48414f3b4bb57aea3 | 21,861 |
def generate_bit_byte_overview(inputstring, number_of_indent_spaces=4, show_reverse_bitnumbering=False):
"""Generate a nice overview of a CAN frame.
Args:
inputstring (str): String that should be printed. Should be 64 characters long.
number_of_indent_spaces (int): Size of indentation
Raises:
ValueError when *inputstring* has wrong length.
Returns:
A multi-line string.
"""
if len(inputstring) != constants.BITS_IN_FULL_DATA:
raise ValueError("The inputstring is wrong length: {}. {!r}".format(len(inputstring), inputstring))
paddedstring = " ".join([inputstring[i:i + 8] for i in range(0, 64, 8)])
indent = " " * number_of_indent_spaces
text = indent + " 111111 22221111 33222222 33333333 44444444 55555544 66665555\n"
text += indent + "76543210 54321098 32109876 10987654 98765432 76543210 54321098 32109876\n"
text += indent + "Byte0 Byte1 Byte2 Byte3 Byte4 Byte5 Byte6 Byte7\n"
text += indent + paddedstring + "\n"
if show_reverse_bitnumbering:
text += indent + "66665555 55555544 44444444 33333333 33222222 22221111 111111\n"
text += indent + "32109876 54321098 76543210 98765432 10987654 32109876 54321098 76543210\n"
return text | 325eafc0ca9a8d91e3774cc6bc8b91052b01d261 | 21,862 |
def return_list_of_file_paths(folder_path):
"""Returns a list of file paths
Args:
folder_path: The folder path were the files are in
Returns:
file_info: List of full file paths
"""
file_info = []
list_of_file_names = [fileName for fileName in listdir(folder_path) if isfile(join(folder_path, fileName))]
list_of_file_paths = [join(folder_path, fileName) for fileName in listdir(folder_path) if
isfile(join(folder_path, fileName))]
file_info.append(list_of_file_names)
file_info.append(list_of_file_paths)
return file_info | 7bc67a17b028d68d3ef99fc82cd03e21c34ec803 | 21,863 |
import numpy
def artificial_signal( frequencys, sampling_frequency=16000, duration=0.025 ):
"""
Concatonates a sequence of sinusoids of frequency f in frequencies
"""
sins = map( lambda f : sinusoid(f, sampling_frequency, duration), frequencys)
return numpy.concatenate( tuple(sins) ) | ef67e5ca9b66da8c003108e2fe5eb4ba43d7a564 | 21,864 |
def _sources():
"""Return the subdir name and extension of each of the contact prediction types.
:return: Contact prediction types and location.
:rtype: dict [list [str]]
"""
sources = _sourcenames()
confiledir = ["deepmetapsicov", "deepmetapsicov", "deepmetapsicov"]
confilesuffix = ["psicov", "ccmpred", "deepmetapsicov.con"]
conkittype = ["psicov", "ccmpred", "psicov"]
threshold = [0.2, 0.1, 0.1]
outsinfo = {}
for n in range(len(sources)):
outsinfo[sources[n]] = [confiledir[n], confilesuffix[n],
conkittype[n], threshold[n]]
return outsinfo | f03b6059a106a5fe5619b2b673eb88d9b352e70f | 21,865 |
def pdns_forward(hostname):
"""Get the IP addresses to which the given host has resolved."""
response = get(BASE_API_URL + "pdns/forward/{}".format(hostname))
return response | 3022190035bc6acc0ff1d16da7616703ca339c53 | 21,866 |
def make_conv(in_channels, out_channels, conv_type="normal", kernel_size=3, mask_activation=None, version=2, mask_init_bias=0, depth_multiplier=1, **kwargs):
"""Create a convolution layer. Options: deformable, separable, or normal convolution
"""
assert conv_type in ("deformable", "separable", "normal")
padding = (kernel_size-1)//2
if conv_type == "deformable":
conv_layer = nn.Sequential(
DeformableConv2dBlock(
in_channels, out_channels, kernel_size, padding=padding, bias=False,
mask_activation=mask_activation, version=version, mask_init_bias=mask_init_bias
),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True)
)
elif conv_type == "separable":
hidden_channels = in_channels * depth_multiplier
conv_layer = nn.Sequential(
# dw
nn.Conv2d(in_channels, hidden_channels, kernel_size, padding=padding, groups=in_channels, bias=False),
nn.BatchNorm2d(in_channels),
nn.ReLU6(inplace=True),
# pw
nn.Conv2d(hidden_channels, out_channels, 1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU6(inplace=True)
)
nn.init.kaiming_normal_(conv_layer[0].weight, mode="fan_out", nonlinearity="relu")
nn.init.kaiming_normal_(conv_layer[3].weight, mode="fan_out", nonlinearity="relu")
else: # normal convolution
conv_layer = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size, padding=padding, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True)
)
nn.init.kaiming_normal_(conv_layer[0].weight, mode="fan_out", nonlinearity="relu")
return conv_layer | 515e0544aae6464c966612b3c32e23e878a9260d | 21,867 |
import requests
from selenium import webdriver
from time import sleep
from typing import Callable
def url_to_html_func(kind="requests") -> Callable:
"""Get a url_to_html function of a given kind."""
url_to_html = None
if kind == "requests":
def url_to_html(url):
r = requests.get(url)
if r.status_code != 200:
print(
f"An error occured. Returning the response object for you to analyze: {r}"
)
return r
return r.content
elif kind == "chrome":
def url_to_html(url, wait=2):
b = webdriver.Chrome()
b.get(url)
if isinstance(wait, (int, float)):
sleep(wait)
html = b.page_source
b.close()
return html
else:
raise ValueError(f"Unknown url_to_html value: {url_to_html}")
assert callable(url_to_html), "Couldn't make a url_to_html function"
return url_to_html | f19643f1d49212e643fc0fe50ce69f5f4a444c09 | 21,868 |
import os
def get_payload_command(job):
"""
Return the full command for executing the payload, including the sourcing of all setup files and setting of
environment variables.
:param job: job object.
:raises PilotException: TrfDownloadFailure.
:return: command (string).
"""
show_memory_usage()
# Should the pilot do the setup or does jobPars already contain the information?
preparesetup = should_pilot_prepare_setup(job.noexecstrcnv, job.jobparams)
# Get the platform value
# platform = job.infosys.queuedata.platform
# Is it a user job or not?
userjob = job.is_analysis()
logger.info('pilot is running a user analysis job') if userjob else logger.info('pilot is running a production job')
resource_name = get_resource_name() # 'grid' if no hpc_resource is set
resource = __import__('pilot.user.atlas.resource.%s' % resource_name, globals(), locals(), [resource_name], 0) # Python 3, -1 -> 0
# get the general setup command and then verify it if required
cmd = resource.get_setup_command(job, preparesetup)
if cmd:
ec, diagnostics = resource.verify_setup_command(cmd)
if ec != 0:
job.piloterrorcodes, job.piloterrordiags = errors.add_error_code(ec)
raise PilotException(diagnostics, code=ec)
# make sure that remote file can be opened before executing payload
catchall = job.infosys.queuedata.catchall.lower() if job.infosys.queuedata.catchall else ''
if config.Pilot.remotefileverification_log and 'remoteio_test=false' not in catchall:
ec = 0
diagnostics = ""
not_opened_turls = ""
try:
ec, diagnostics, not_opened_turls = open_remote_files(job.indata, job.workdir)
except Exception as e:
logger.warning('caught exception: %s' % e)
else:
# read back the base trace report
path = os.path.join(job.workdir, config.Pilot.base_trace_report)
if not os.path.exists(path):
logger.warning('base trace report does not exist (%s) - input file traces should already have been sent' % path)
else:
process_remote_file_traces(path, job, not_opened_turls)
# fail the job if the remote files could not be verified
if ec != 0:
job.piloterrorcodes, job.piloterrordiags = errors.add_error_code(ec)
raise PilotException(diagnostics, code=ec)
else:
logger.debug('no remote file open verification')
if is_standard_atlas_job(job.swrelease):
# Normal setup (production and user jobs)
logger.info("preparing normal production/analysis job setup command")
cmd = get_normal_payload_command(cmd, job, preparesetup, userjob)
else: # Generic, non-ATLAS specific jobs, or at least a job with undefined swRelease
logger.info("generic job (non-ATLAS specific or with undefined swRelease)")
cmd = get_generic_payload_command(cmd, job, preparesetup, userjob)
# add any missing trailing ;
if not cmd.endswith(';'):
cmd += '; '
# only if not using a user container
if not job.imagename:
site = os.environ.get('PILOT_SITENAME', '')
variables = get_payload_environment_variables(cmd, job.jobid, job.taskid, job.attemptnr, job.processingtype, site, userjob)
cmd = ''.join(variables) + cmd
# prepend PanDA job id in case it is not there already (e.g. runcontainer jobs)
if 'export PandaID' not in cmd:
cmd = "export PandaID=%s;" % job.jobid + cmd
cmd = cmd.replace(';;', ';')
# For direct access in prod jobs, we need to substitute the input file names with the corresponding TURLs
# get relevant file transfer info
#use_copy_tool, use_direct_access, use_pfc_turl = get_file_transfer_info(job)
#if not userjob and use_direct_access and job.transfertype == 'direct':
if not userjob and not job.is_build_job() and job.has_remoteio(): ## ported from old logic
## ported from old logic but still it looks strange (anisyonk)
## the "PoolFileCatalog.xml" should already contains proper TURLs values as it created by create_input_file_metadata()
## if the case is just to patch `writetofile` file, than logic should be cleaned and decoupled
## anyway, instead of parsing the file, it's much more easy to generate properly `writetofile` content from the beginning with TURL data
lfns = job.get_lfns_and_guids()[0]
cmd = replace_lfns_with_turls(cmd, job.workdir, "PoolFileCatalog.xml", lfns, writetofile=job.writetofile)
# Explicitly add the ATHENA_PROC_NUMBER (or JOB value)
cmd = add_athena_proc_number(cmd)
show_memory_usage()
logger.info('payload run command: %s' % cmd)
return cmd | dfdda096db2710684f7e0863491c5b28fa799d8e | 21,869 |
import sys
def parseExonBounds(start, end, n, sizes, offsets):
"""
Parse the last 2 columns of a BED12 file and return a list of tuples with
(exon start, exon end) entries.
If the line is malformed, issue a warning and return (start, end)
"""
offsets = offsets.strip(",").split(",")
sizes = sizes.strip(",").split(",")
offsets = offsets[0:n]
sizes = sizes[0:n]
try:
starts = [start + int(x) for x in offsets]
ends = [start + int(x) + int(y) for x, y in zip(offsets, sizes)]
except:
sys.stderr.write("Warning: Received an invalid exon offset ({0}) or size ({1}), using the entry bounds instead ({2}-{3})\n".format(offsets, sizes, start, end))
return [(start, end)]
if len(offsets) < n or len(sizes) < n:
sys.stderr.write("Warning: There were too few exon start/end offsets ({0}) or sizes ({1}), using the entry bounds instead ({2}-{3})\n".format(offsets, sizes, start, end))
return [(start, end)]
return [(x, y) for x, y in zip(starts, ends)] | 8252fea73d80dc0a78cd28fb05e63eb687fb1f27 | 21,870 |
def head(filename, format=None, **kwargs):
"""
Returns the header of a file. Reads the information about the content of the file
without actually loading the data. Returns either an Header class or an Archive
accordingly if the file contains a single object or it is an archive, respectively.
Parameters
----------
filename: str, file-like object
The filename of the data file to read. It can also be a file-like object.
format: str, Format
One of the implemented formats. See documentation for more details.
kwargs: dict
Additional options for performing the reading. The list of options depends
on the format.
"""
filename = find_file(filename)
return formats.get_format(format, filename=filename).head(filename, **kwargs) | f7e0bb98f95b378bd582801b00a26c72aef0b677 | 21,871 |
def uncomment_magic(
source, language="python", global_escape_flag=True, explicitly_code=True
):
"""Unescape Jupyter magics"""
parser = StringParser(language)
next_is_magic = False
for pos, line in enumerate(source):
if not parser.is_quoted() and (
next_is_magic
or is_magic(line, language, global_escape_flag, explicitly_code)
):
source[pos] = unesc(line, language)
next_is_magic = language == "python" and _LINE_CONTINUATION_RE.match(line)
parser.read_line(line)
return source | 4d3444844e51c4821f151a3ce9813c8475fa6bd7 | 21,872 |
from re import T
import logging
def clean_nice_ionice_parameters(value):
"""Verify that the passed parameters are not exploits"""
if value:
parser = ErrorCatchingArgumentParser()
# Nice parameters
parser.add_argument("-n", "--adjustment", type=int)
# Ionice parameters, not supporting -p
parser.add_argument("--classdata", type=int)
parser.add_argument("-c", "--class", type=int)
parser.add_argument("-t", "--ignore", action="store_true")
try:
parser.parse_args(value.split())
except ValueError:
# Also log at start-up if invalid parameter was set in the ini
msg = "%s: %s" % (T("Incorrect parameter"), value)
logging.error(msg)
return msg, None
return None, value | 9bbde29a4a8c19441d4c1510c29870c87d928142 | 21,873 |
def rand_alnum(length=0):
"""
Create a random string with random length
:return: A random string of with length > 10 and length < 30.
"""
jibber = ''.join([letters, digits])
return ''.join(choice(jibber) for _ in xrange(length or randint(10, 30))) | 7a095aabcec5428ea991220ae46c252c47b3436a | 21,874 |
def _GenerateGstorageLink(c, p, b):
"""Generate Google storage link given channel, platform, and build."""
return 'gs://chromeos-releases/%s-channel/%s/%s/' % (c, p, b) | e5e4a0eb9e27b0f2d74b28289c8f02dc0454f438 | 21,875 |
def parse_decl(inputtype, flags):
"""
Parse type declaration
@param inputtype: file name or C declarations (depending on the flags)
@param flags: combination of PT_... constants or 0
@return: None on failure or (name, type, fields) tuple
"""
if len(inputtype) != 0 and inputtype[-1] != ';':
inputtype = inputtype + ';'
return ida_typeinf.idc_parse_decl(None, inputtype, flags) | a5cf042256a35cface8afd024d5d31ae5eccbe72 | 21,876 |
def post_attention(h, attn_vec, d_model, n_head, d_head, dropout, is_training,
kernel_initializer, residual=True):
"""Post-attention processing."""
monitor_dict = {}
# post-attention projection (back to `d_model`)
proj_o = tf.get_variable("o/kernel", [d_model, n_head, d_head],
dtype=h.dtype, initializer=kernel_initializer)
einsum_prefix = get_einsum_prefix(attn_vec.shape.ndims - 2)
einsum_str = "{0}nd,hnd->{0}h".format(einsum_prefix)
attn_out = tf.einsum(einsum_str, attn_vec, proj_o)
proj_bias = tf.get_variable("o/bias",
[d_model], dtype=h.dtype,
initializer=tf.zeros_initializer())
attn_out += proj_bias
attn_out = tf.layers.dropout(attn_out, dropout, training=is_training)
output, res_lnorm_dict = residual_and_layer_norm(
h, attn_out, use_residual=residual)
monitor_dict = update_monitor_dict(monitor_dict, res_lnorm_dict)
return output, monitor_dict | d48157d3759ab273b78a45dd2d150e15f66b44bf | 21,877 |
def get_recursively(in_dict, search_pattern):
"""
Takes a dict with nested lists and dicts,
and searches all dicts for a key of the field
provided.
"""
fields_found = []
for key, value in in_dict.items():
if key == search_pattern:
fields_found.append(value)
elif isinstance(value, dict):
results = get_recursively(value, search_pattern)
for result in results:
fields_found.append(result)
return(fields_found) | 3c9011894c24c25a05d24f8b3b5369c9334dc2c7 | 21,878 |
def neals_funnel(ndims = 10,
name = 'neals_funnel'):
"""Creates a funnel-shaped distribution.
This distribution was first described in [1]. The distribution is constructed
by transforming a N-D gaussian with scale [3, 1, ...] by scaling all but the
first dimensions by `exp(x0 / 2)` where `x0` is the value of the first
dimension.
This distribution is notable for having a relatively very narrow "neck" region
which is challenging for HMC to explore. This distribution resembles the
posteriors of centrally parameterized hierarchical models.
Args:
ndims: Dimensionality of the distribution. Must be at least 2.
name: Name to prepend to ops created in this function, as well as to the
`code_name` in the returned `TargetDensity`.
Returns:
target: `TargetDensity` specifying the funnel distribution. The
`distribution` attribute is an instance of `TransformedDistribution`.
Raises:
ValueError: If ndims < 2.
#### References
1. Neal, R. M. (2003). Slice sampling. Annals of Statistics, 31(3), 705-767.
"""
if ndims < 2:
raise ValueError(f'ndims must be at least 2, saw: {ndims}')
with tf.name_scope(name):
def bijector_fn(x):
"""Funnel transform."""
batch_shape = tf.shape(x)[:-1]
scale = tf.concat(
[
tf.ones(tf.concat([batch_shape, [1]], axis=0)),
tf.exp(x[Ellipsis, :1] / 2) *
tf.ones(tf.concat([batch_shape, [ndims - 1]], axis=0)),
],
axis=-1,
)
return tfb.Scale(scale)
mg = tfd.MultivariateNormalDiag(
loc=tf.zeros(ndims), scale_diag=[3.] + [1.] * (ndims - 1))
dist = tfd.TransformedDistribution(
mg, bijector=tfb.MaskedAutoregressiveFlow(bijector_fn=bijector_fn))
return target_spec.TargetDensity.from_distribution(
distribution=dist,
constraining_bijectors=tfb.Identity(),
expectations=dict(
params=target_spec.expectation(
fn=tf.identity,
human_name='Parameters',
# The trailing dimensions come from a product distribution of
# independent standard normal and a log-normal with a scale of
# 3 / 2.
# See https://en.wikipedia.org/wiki/Product_distribution for the
# formulas.
# For the mean, the formulas yield zero.
ground_truth_mean=np.zeros(ndims),
# For the standard deviation, all means are zero and standard
# deivations of the normals are 1, so the formula reduces to
# `sqrt((sigma_log_normal + mean_log_normal**2))` which reduces
# to `exp((sigma_log_normal)**2)`.
ground_truth_standard_deviation=np.array([3.] +
[np.exp((3. / 2)**2)] *
(ndims - 1)),
),),
code_name=f'{name}_ndims_{ndims}',
human_name='Neal\'s Funnel',
) | 99719ce2e192034472bf8082c918c39a6ab1a96f | 21,879 |
def _has_desired_permit(permits, acategory, astatus):
"""
return True if permits has one whose
category_code and status_code match with the given ones
"""
if permits is None:
return False
for permit in permits:
if permit.category_code == acategory and\
permit.status_code == astatus:
return True
return False | 4cac23303e2b80e855e800a7d55b7826fabd9992 | 21,880 |
def colon(mac):
""" aa:aa:aa:aa:aa:aa """
return _reformat(mac, separator=':', digit_grouping=2) | 7930fdb449f99aa99a2c052be4eee24a8e4605ab | 21,881 |
import requests
from bs4 import BeautifulSoup
import re
def create_strings_from_wikipedia(minimum_length, count, lang):
"""
Create all string by randomly picking Wikipedia articles and taking sentences from them.
"""
sentences = []
while len(sentences) < count:
# We fetch a random page
page_url = "https://{}.wikipedia.org/wiki/Special:Random".format(lang)
try:
page = requests.get(page_url, timeout=3.0) # take into account timeouts
except:
continue
soup = BeautifulSoup(page.text, "html.parser")
for script in soup(["script", "style"]):
script.extract()
# Only take a certain length
lines = list(
filter(
lambda s: len(s.split(" ")) > minimum_length
and not "Wikipedia" in s
and not "wikipedia" in s,
[
" ".join(re.findall(r"[\w']+", s.strip()))[0:200]
for s in soup.get_text().splitlines()
],
)
)
# Remove the last lines that talks about contributing
sentences.extend(lines[0: max([1, len(lines) - 5])])
return sentences[0:count] | 92cc09a081a257d61530e3ddaf8f8215412e5b0d | 21,882 |
def computeHashCheck(ringInputString, ringSize):
"""Calculate the knot hash check.
Args:
ringInputString (str): The list of ints to be hashed as a comma-separated list.
ringSize (int): The size of the ring to be \"knotted\".
Returns:
int: Value of the hash check.
"""
ringInputList = [int(i) for i in ringInputString.split(',')]
ringContents = [i for i in range(ringSize)]
cursorPosition = 0
skipSize = 0
# Hashing algorithm as defined in AoC Day 10 instructions...
for length in ringInputList:
#
# Duplicate the ring contents to allow for exceeding the length of the original list
#
doubleContents = ringContents + ringContents
# Reverse the order of that length of elements in the list, starting with the element
# at the current position
sublist = doubleContents[cursorPosition:cursorPosition+length]
sublist.reverse()
doubleContents[cursorPosition:cursorPosition+length] = sublist
if cursorPosition + length > ringSize:
ringContents = doubleContents[ringSize:cursorPosition+ringSize] + doubleContents[cursorPosition:ringSize]
else:
ringContents = doubleContents[:ringSize]
# Move the current position forward by that length plus the skip size
cursorPosition = cursorPosition + length + skipSize
# Deal with going around the ring
if cursorPosition > ringSize:
cursorPosition -= ringSize
# Increase the skip size by one
skipSize += 1
# The hash is then the product of the first two elements in the transformed list
check = ringContents[0] * ringContents[1]
#print(ringContents)
return check | 75dce4aacdd4ae03fa34532471a21a43a81fbd13 | 21,883 |
from masci_tools.util.xml.xml_setters_basic import xml_delete_tag
from masci_tools.util.xml.common_functions import check_complex_xpath
from typing import Union
from typing import Iterable
from typing import Any
def delete_tag(xmltree: Union[etree._Element, etree._ElementTree],
schema_dict: 'fleur_schema.SchemaDict',
tag_name: str,
complex_xpath: 'etree._xpath' = None,
occurrences: Union[int, Iterable[int]] = None,
**kwargs: Any) -> Union[etree._Element, etree._ElementTree]:
"""
This method deletes a tag with a uniquely identified xpath.
:param xmltree: an xmltree that represents inp.xml
:param schema_dict: InputSchemaDict containing all information about the structure of the input
:param tag: str of the tag to delete
:param complex_xpath: an optional xpath to use instead of the simple xpath for the evaluation
:param occurrences: int or list of int. Which occurence of the parent nodes to delete a tag.
By default all nodes are used.
Kwargs:
:param contains: str, this string has to be in the final path
:param not_contains: str, this string has to NOT be in the final path
:returns: xmltree with deleted tags
"""
base_xpath = schema_dict.tag_xpath(tag_name, **kwargs)
if complex_xpath is None:
complex_xpath = base_xpath
check_complex_xpath(xmltree, base_xpath, complex_xpath)
xmltree = xml_delete_tag(xmltree, complex_xpath, occurrences=occurrences)
return xmltree | 2e4d9276ecc8d42c0890e81aa0afa61adf23c178 | 21,884 |
def cart2pol_vectorised(x, y):
"""
A vectorised version of the cartesian to polar conversion.
:param x:
:param y:
:return:
"""
r = np.sqrt(np.add(np.power(x, 2), np.power(y, 2)))
th = np.arctan2(y, x)
return r, th | dbef7d4663990a9e3c775e53649ab30e0dc8767a | 21,885 |
def encryption(text):
"""
encryption function for saving ideas
:param text:
:return:
"""
return AES.new(cipher_key, AES.MODE_CBC, cipher_IV456).encrypt(text * 16) | c321dd2e0c95f15c9a4b04f1b13471a1f1a7aceb | 21,886 |
import os
def alteryx_job_path_is_file(job_dict: dict) -> bool:
"""
Alteryx job path must point to an existing file.
"""
if not os.path.isfile(job_dict["path"]):
raise InvalidFilePathError("Alteryx path file not exists")
return True | 9324a906184c9046974a055dba9af101aaf4f51a | 21,887 |
def _concat(to_stack):
""" function to stack (or concatentate) depending on dimensions """
if np.asarray(to_stack[0]).ndim >= 2:
return np.concatenate(to_stack)
else:
return np.hstack(to_stack) | 1b4ab755aed3e1823629301e83d070433d918c7c | 21,888 |
import math
def make_orthonormal_matrix(n):
"""
Makes a square matrix which is orthonormal by concatenating
random Householder transformations
Note: May not distribute uniformly in the O(n) manifold.
Note: Naively using ortho_group, special_ortho_group in scipy will result in unbearable computing time! Not useful
"""
A = np.identity(n)
d = np.zeros(n)
d[n-1] = np.random.choice([-1.0, 1.0])
for k in range(n-2, -1, -1):
# generate random Householder transformation
x = np.random.randn(n-k)
s = np.sqrt((x**2).sum()) # norm(x)
sign = math.copysign(1.0, x[0])
s *= sign
d[k] = -sign
x[0] += s
beta = s * x[0]
# apply the transformation
y = np.dot(x,A[k:n,:]) / beta
A[k:n, :] -= np.outer(x,y)
# change sign of rows
A *= d.reshape(n,1)
return A | ec7f39eba0d471f377519db86cee85a0b640593b | 21,889 |
def device_to_target(device: Device):
"""Map a Netbox VirtualMachine to a Prometheus target"""
target = Target(device.name)
target.add_label("type", TargetType.DEVICE.value)
target.add_label("status", device.status)
extract_tenant(device, target)
if hasattr(device, "primary_ip") and device.primary_ip is not None:
target.add_label("ip", str(IPNetwork(device.primary_ip.address).ip))
if hasattr(device, "device_role") and device.device_role is not None:
target.add_label("role", device.device_role.name)
target.add_label("role_slug", device.device_role.slug)
if hasattr(device, "device_type") and device.device_type is not None:
target.add_label("device_type", device.device_type.model)
target.add_label("device_type_slug", device.device_type.slug)
if hasattr(device, "platform") and device.platform is not None:
target.add_label("platform", device.platform.name)
target.add_label("platform_slug", device.platform.slug)
if hasattr(device, "site") and device.site is not None:
target.add_label("site", device.site.name)
target.add_label("site_slug", device.site.slug)
services = []
for service in Service.objects.filter(device__id=device.id).all():
services.append(service.name)
if len(services) > 0:
target.add_label("services", ',{},'.format(','.join(services)))
# todo: Add more fields
# tags
return target | d1b52bc96a27ac1470ce15093ae11a25c18e96f1 | 21,890 |
from typing import Dict
from typing import Tuple
def build_synthetic_dataset_cae(window_size:int, **kwargs:Dict)->Tuple[SingleGapWindowsSequence, SingleGapWindowsSequence]:
"""Return SingleGapWindowsSequence for training and testing.
Parameters
--------------------------
window_size: int,
Windows size to use for rendering the synthetic datasets.
"""
return build_synthetic_dataset(window_size, SingleGapWindowsSequence, **kwargs) | 9ab09aadf9578a3475a2e9bbaa7cfa75a3adacdf | 21,891 |
import random
def montecarlo_2048(game, simulations_per_move, steps, count_zeros=False, print_averages=True, return_scores=False):
"""
Test each possible move, run montecarlo simulations and return a dictionary of average scores,
one score for each possible move
"""
# Retrieve game score at the current state
game_score = game.calculate_score()
# Retrieve list of possible moves
allowed_moves = game.check_allowed_moves()
# Create a dictionary to store average scores per allowable move
average_scores = np.zeros(4)
# Will contain 4 lists of scores, one list for each starting move (LEFT, DOWN, RIGHT, UP)
scores_per_move = [[0]] * 4
for move in allowed_moves:
score_list = []
for simulation in range(simulations_per_move):
# Create a a copy of the game at the current state
game_copy = deepcopy(game)
game_copy.make_move(move)
for i in range(steps):
# Check if there is any move allowed
if len(game_copy.check_allowed_moves()) > 0:
# Pick a random move within the allowed ones
random_move = random.choice(game_copy.check_allowed_moves())
game_copy.make_move(random_move)
# append simulation result
if count_zeros == True:
score_list.append(game_copy.calculate_score(score_type="simple_sum"))
else:
score_list.append(game_copy.calculate_score(score_type="simple_sum"))
scores_per_move[move-1] = score_list
average_scores[move-1] = np.average(score_list)
if print_averages:
print("[1] LEFT score: ", average_scores[0])
print("[2] DOWN score: ", average_scores[1])
print("[3] RIGHT score: ", average_scores[2])
print("[4] UP score: ", average_scores[3])
print("average_scores: ", average_scores)
choice = np.argmax(average_scores) + 1
steal = 0
for value in average_scores:
if value > 0:
steal = 1
if steal == 0:
random_scores = np.zeros(4)
random_scores[np.random.choice([0,1,2,3])] = 1
return random_scores
if return_scores:
return scores_per_move
else:
return average_scores | 1bfd9beb78e6f832105b61d28af2218b6a86eb1a | 21,892 |
def getAggregation(name, local=False, minOnly=False, maxOnly=False):
"""
Get aggregation.
"""
toReturn = STATISTICS[name].getStatistic()
if local:
return STATISTICS[name].getLocalValue()
elif minOnly and "min" in toReturn:
return toReturn["min"]
elif maxOnly and "max" in toReturn:
return toReturn["max"]
else:
return toReturn | 2f009c4db871fe56a8c26ee75728259e33b53280 | 21,893 |
def get_gene_symbol(row):
"""Extracts gene name from annotation
Args:
row (pandas.Series): annotation info (str) at 'annotation' index
Returns:
gene_symbol (str): gene name(s)
"""
pd.options.mode.chained_assignment = None
lst = row["annotation"].split(",")
genes = [token.split("|")[0] for token in lst]
gene_symbol = ",".join(set(genes))
return gene_symbol | d564266fa6a814b4c7cfce9f7f2fb8d5e1c1024f | 21,894 |
from datetime import datetime
def session_login():
"""
Session login
:return:
"""
print("Session Login")
# Get the ID token sent by the client
# id_token = request.headers.get('csfToken')
id_token = request.values.get('idToken')
# Set session expiration to 5 days.
expires_in = datetime.timedelta(days=5)
try:
# Create the session cookie. This will also verify the ID token in the process.
# The session cookie will have the same claims as the ID token.
session_cookie = auth.create_session_cookie(id_token, expires_in=expires_in)
response = jsonify({'status': 'success'})
# Set cookie policy for session cookie.
expires = datetime.datetime.now() + expires_in
response.set_cookie('session', session_cookie, expires=expires, httponly=True, secure=True)
return response
except exceptions.FirebaseError:
return abort(401, 'Failed to create a session cookie') | 6c5c27d50c4c62e3f62b67433f4161835f2a6478 | 21,895 |
from django.core.cache import get_cache
def get_cache_factory(cache_type):
"""
Helper to only return a single instance of a cache
As of django 1.7, may not be needed.
"""
if cache_type is None:
cache_type = 'default'
if not cache_type in cache_factory:
cache_factory[cache_type] = get_cache(cache_type)
return cache_factory[cache_type] | 89da971b92395c4e604d51d66148688f2ab4f362 | 21,896 |
import six
import pickle
def ruleset_from_pickle(file):
""" Read a pickled ruleset from disk
This can be either pickled Rules or Ryu Rules.
file: The readable binary file-like object, or the name of the input file
return: A ruleset, a list of Rules
"""
if six.PY3:
ruleset = pickle.load(file, encoding='latin1')
else:
ruleset = pickle.load(file)
# Did we load a list of Rules()?
if isinstance(ruleset, list) and ruleset and isinstance(ruleset[0], Rule):
return ruleset
# Must be Ryu rules
if isinstance(ruleset, dict):
ruleset = ruleset["flow_stats"]
ruleset = [rule_from_ryu(r) for r in ruleset]
return ruleset | f68e005ece3697126dd0528952e1610695054332 | 21,897 |
import os
def gpu_memory_usage():
""" return gpu memory usage for current process in MB """
try:
s = nvidia_smi(robust=False)
except Exception:
return 0
gpu_processes = _nvidia_smi_parse_processes(s)
my_pid = os.getpid()
my_memory_usage_mb = 0
for gpu_idx, pid, type, process_name, memory_usage_mb in gpu_processes:
if pid == my_pid:
my_memory_usage_mb += memory_usage_mb
return my_memory_usage_mb | 58b108ddca8a5b502cc23c97fc147bfe2d73b04d | 21,898 |
def delayed_read_band_data(fpar_dataset_name, qc_dataset_name):
"""Read band data from a HDF4 file.
Assumes the first dimensions have a size 1.
FparLai_QC.
Bit no. 5-7 3-4 2 1 0
Acceptable values:
000 00 0 0 0
001 01 0 0 0
Unacceptable mask:
110 10 1 1 1
"""
with rasterio.open(fpar_dataset_name) as dataset:
fpar_data = dataset.read()[0]
with rasterio.open(qc_dataset_name) as dataset:
qc_data = dataset.read()[0]
assert fpar_data.shape == tile_shape
assert qc_data.shape == tile_shape
# Ignore invalid and poor quality data.
fpar_data[
np.logical_or(fpar_data > max_valid, np.bitwise_and(qc_data, 0b11010111))
] = fill_value
return fpar_data | cda95533b07101d58883c0f5fa32870c48c09e2a | 21,899 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.