content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
def lpt_prototype(mesh,
nc=FLAGS.nc,
bs=FLAGS.box_size,
batch_size=FLAGS.batch_size,
a0=FLAGS.a0,
a=FLAGS.af,
nsteps=FLAGS.nsteps):
"""
Prototype of function computing LPT deplacement.
Returns output tensorflow and mesh tensorflow tensors
"""
klin = np.loadtxt('../flowpm/data/Planck15_a1p00.txt').T[0]
plin = np.loadtxt('../flowpm/data/Planck15_a1p00.txt').T[1]
ipklin = iuspline(klin, plin)
stages = np.linspace(a0, a, nsteps, endpoint=True)
# Define the named dimensions
# Parameters of the small scales decomposition
n_block_x = FLAGS.nx
n_block_y = FLAGS.ny
n_block_z = 1
halo_size = FLAGS.hsize
if halo_size >= 0.5 * min(nc // n_block_x, nc // n_block_y, nc // n_block_z):
new_size = int(0.5 *
min(nc // n_block_x, nc // n_block_y, nc // n_block_z))
print('WARNING: REDUCING HALO SIZE from %d to %d' % (halo_size, new_size))
halo_size = new_size
# Parameters of the large scales decomposition
downsampling_factor = 0
lnc = nc // 2**downsampling_factor
#
fx_dim = mtf.Dimension("nx", nc)
fy_dim = mtf.Dimension("ny", nc)
fz_dim = mtf.Dimension("nz", nc)
tfx_dim = mtf.Dimension("tx", nc)
tfy_dim = mtf.Dimension("ty", nc)
tfz_dim = mtf.Dimension("tz", nc)
tx_dim = mtf.Dimension("tx_lr", nc)
ty_dim = mtf.Dimension("ty_lr", nc)
tz_dim = mtf.Dimension("tz_lr", nc)
nx_dim = mtf.Dimension('nx_block', n_block_x)
ny_dim = mtf.Dimension('ny_block', n_block_y)
nz_dim = mtf.Dimension('nz_block', n_block_z)
sx_dim = mtf.Dimension('sx_block', nc // n_block_x)
sy_dim = mtf.Dimension('sy_block', nc // n_block_y)
sz_dim = mtf.Dimension('sz_block', nc // n_block_z)
k_dims = [tx_dim, ty_dim, tz_dim]
batch_dim = mtf.Dimension("batch", batch_size)
pk_dim = mtf.Dimension("npk", len(plin))
pk = mtf.import_tf_tensor(mesh, plin.astype('float32'), shape=[pk_dim])
# Compute necessary Fourier kernels
kvec = flowpm.kernels.fftk((nc, nc, nc), symmetric=False)
kx = mtf.import_tf_tensor(mesh,
kvec[0].squeeze().astype('float32'),
shape=[tfx_dim])
ky = mtf.import_tf_tensor(mesh,
kvec[1].squeeze().astype('float32'),
shape=[tfy_dim])
kz = mtf.import_tf_tensor(mesh,
kvec[2].squeeze().astype('float32'),
shape=[tfz_dim])
kv = [ky, kz, kx]
# kvec for low resolution grid
kvec_lr = flowpm.kernels.fftk([nc, nc, nc], symmetric=False)
kx_lr = mtf.import_tf_tensor(mesh,
kvec_lr[0].squeeze().astype('float32'),
shape=[tx_dim])
ky_lr = mtf.import_tf_tensor(mesh,
kvec_lr[1].squeeze().astype('float32'),
shape=[ty_dim])
kz_lr = mtf.import_tf_tensor(mesh,
kvec_lr[2].squeeze().astype('float32'),
shape=[tz_dim])
kv_lr = [ky_lr, kz_lr, kx_lr]
shape = [batch_dim, fx_dim, fy_dim, fz_dim]
lr_shape = [batch_dim, fx_dim, fy_dim, fz_dim]
hr_shape = [batch_dim, nx_dim, ny_dim, nz_dim, sx_dim, sy_dim, sz_dim]
part_shape = [batch_dim, fx_dim, fy_dim, fz_dim]
# Begin simulation
initc = mtfpm.linear_field(mesh, shape, bs, nc, pk, kv)
# # Reshaping array into high resolution mesh
# field = mtf.slicewise(lambda x:tf.expand_dims(tf.expand_dims(tf.expand_dims(x, axis=1),axis=1),axis=1),
# [initc],
# output_dtype=tf.float32,
# output_shape=hr_shape,
# name='my_reshape',
# splittable_dims=lr_shape[:-1]+hr_shape[1:4]+part_shape[1:3])
#
state = mtfpm.lpt_init_single(
initc,
a0,
kv_lr,
halo_size,
lr_shape,
hr_shape,
part_shape[1:],
antialias=True,
)
# Here we can run our nbody
final_state = state #mtfpm.nbody(state, stages, lr_shape, hr_shape, k_dims, kv_lr, kv_hr, halo_size, downsampling_factor=downsampling_factor)
# paint the field
final_field = mtf.zeros(mesh, shape=hr_shape)
for block_size_dim in hr_shape[-3:]:
final_field = mtf.pad(final_field, [halo_size, halo_size],
block_size_dim.name)
final_field = mesh_utils.cic_paint(final_field, final_state[0], halo_size)
# Halo exchange
for blocks_dim, block_size_dim in zip(hr_shape[1:4], final_field.shape[-3:]):
final_field = mpm.halo_reduce(final_field, blocks_dim, block_size_dim,
halo_size)
# Remove borders
for block_size_dim in hr_shape[-3:]:
final_field = mtf.slice(final_field, halo_size, block_size_dim.size,
block_size_dim.name)
#final_field = mtf.reshape(final_field, [batch_dim, fx_dim, fy_dim, fz_dim])
# Hack usisng custom reshape because mesh is pretty dumb
final_field = mtf.slicewise(lambda x: x[:, 0, 0, 0], [final_field],
output_dtype=tf.float32,
output_shape=[batch_dim, fx_dim, fy_dim, fz_dim],
name='my_dumb_reshape',
splittable_dims=part_shape[:-1] + hr_shape[:4])
return initc, final_field
## | ab9dfc52ddc26a62f9c9bc0b62dec044d0262d79 | 22,500 |
def in_collision(box1: OrientedBox, box2: OrientedBox) -> bool:
"""
Check for collision between two boxes. First do a quick check by approximating each box with a circle,
if there is an overlap, check for the exact intersection using geometry Polygon
:param box1: Oriented box (e.g., of ego)
:param box2: Oriented box (e.g., of other tracks)
:return True if there is a collision between the two boxes.
"""
return bool(box1.geometry.intersects(box2.geometry)) if collision_by_radius_check(box1, box2) else False | 290c7de8b73ff31349ec020eb745209a28cdb460 | 22,501 |
def process_embedded_query_expr(input_string):
"""
This function scans through the given script and identify any path/metadata
expressions. For each expression found, an unique python variable name will
be generated. The expression is then substituted by the variable name.
:param str input_string: The input script
:return: A 2-element tuple of the substituted string and a dict of substitutions
:rtype: (str, dict)
"""
keep = []
state = ''
idx_char = idx_var = 0
substitutions = {} # keyed by query expression
query_expr = []
while idx_char < len(input_string):
c = input_string[idx_char]
if state == STATE_EMBEDDED_QUERY:
if c == '}':
state = STATE_IDLE
s = ''.join(query_expr).strip()
query_expr = []
if s not in substitutions:
varname = 'PBK_{}'.format(idx_var)
idx_var += 1
substitutions[s] = varname
else:
varname = substitutions[s]
keep.append(varname)
else:
query_expr.append(c)
elif (c == "'" or c == '"') and state != STATE_EMBEDDED_QUERY:
if state == c: # quoting pair found, pop it
state = STATE_IDLE
elif state == '': # new quote begins
state = c
keep.append(c)
elif c == '$' and state == STATE_IDLE: # an unquoted $
if idx_char + 1 < len(input_string) and input_string[idx_char + 1] == '{':
state = STATE_EMBEDDED_QUERY
# Once it enters the embedded query state, any pond,
# double/single quotes will be ignored
idx_char += 1
else:
keep.append(c)
elif c == '#' and state == STATE_IDLE:
state = STATE_COMMENT
keep.append(c)
elif c == '\n' and state == STATE_COMMENT:
state = STATE_IDLE
keep.append(c)
else:
keep.append(c)
idx_char += 1
return ''.join(keep), substitutions | 013c37c9fb63a447ac844d94c2a08f8b53fd759b | 22,502 |
def format_elemwise(vars_):
"""Formats all the elementwise cones for the solver.
Parameters
----------
vars_ : list
A list of the LinOp expressions in the elementwise cones.
Returns
-------
list
A list of LinLeqConstr that represent all the elementwise cones.
"""
# Create matrices Ai such that 0 <= A0*x0 + ... + An*xn
# gives the format for the elementwise cone constraints.
spacing = len(vars_)
prod_size = (spacing*vars_[0].size[0], vars_[0].size[1])
# Matrix spaces out columns of the LinOp expressions.
mat_size = (spacing*vars_[0].size[0], vars_[0].size[0])
terms = []
for i, var in enumerate(vars_):
mat = get_spacing_matrix(mat_size, spacing, i)
terms.append(lu.mul_expr(mat, var, prod_size))
return [lu.create_geq(lu.sum_expr(terms))] | 36cf91dc01549c4a2a01b4d301d387f002f8eee1 | 22,503 |
def extract_stars(image, noise_threshold):
"""
Extract all star from the given image
Returns a list of rectangular images
"""
roi_list = []
image_list = []
# Threshold to remove background noise
image = image.copy()
image[image < noise_threshold] = 0.0
# Create binary image by thresholding
binary = image.copy()
binary[binary > 0] = 1
# Find the next white pixel in the image
i, j = find_next_while_pixel(binary)
while i is not None and j is not None:
# Construct the ROI around the pixel
i, j, w, h = construct_roi(binary, i, j)
# Save ROI to list or roi
roi_list.append([i, j, w, h])
# Erase ROI from image
binary[i:i+h, j:j+w] = 0
# Extract image region
image_list.append(np.array(image[i:i+h, j:j+w]))
# Find the next white pixel and repeat
i, j = find_next_while_pixel(binary)
return np.array(roi_list), image_list | 3b252525d14a875ba96e66edead179096e62b1af | 22,504 |
import torch
def lovasz_hinge(logits, labels):
"""
Binary Lovasz hinge loss
logits: [P] Variable, logits at each prediction (between -\infty and +\infty)
labels: [P] Tensor, binary ground truth labels (0 or 1)
"""
if len(labels) == 0:
# only void pixels, the gradients should be 0
return logits.sum() * 0.
signs = 2. * labels.float() - 1.
errors = (1. - logits * Variable(signs))
errors_sorted, perm = torch.sort(errors, dim=0, descending=True)
perm = perm.data
gt_sorted = labels[perm]
grad = lovasz_grad(gt_sorted)
loss = torch.dot(F.relu(errors_sorted), Variable(grad))
return loss | 07eae3d43fda67cb2c195c6f8f72774d99f3195d | 22,505 |
from bs4 import BeautifulSoup
def extract_metadata(url: str, body: BeautifulSoup) -> Website:
"""
Extract metadata from a site and put it into a `Website object`.
"""
try:
name = body.title.get_text().strip()
except AttributeError:
name = url
try:
description = (
body.find(attrs={"name": "description"}).get("content").strip()
)
except AttributeError:
description = extract_text(body)[:400] + "..."
try:
icon = urljoin(url, body.find("link", rel="icon").get("href"))
except AttributeError:
# As Browsers do, if the html doesn't specify an icon we will just try
# the default path
icon = urljoin(url, "/favicon.ico")
return Website(
url,
name,
description,
icon,
) | 534ee50ee2a8daa39730f795cd4bfb16c1dacc1e | 22,506 |
def markContinuing(key, idea, oldest_idea_id, oldest_idea_detect_time, accum):
"""
Mark IDEA as continuing event.
:return: marked key, IDEA
"""
# If idea is present
if idea:
# Equality of ID's in tuple and idea, if true mark will be added
if oldest_idea_id != idea.id:
# Add {key: (ID, DetectTime)} to accumulator
accum.add(dict([(key, (oldest_idea_id, oldest_idea_detect_time))]))
# Add id mark for continuing event
idea.aida_continuing=oldest_idea_id
# Return tuple: key for next deduplication phase and IDEA
return (key[0:3], idea) | 3f83283f284693b0d0fdee7129fe0fa51b2a9174 | 22,507 |
import torch
def box1_in_box2(corners1:torch.Tensor, corners2:torch.Tensor):
"""check if corners of box1 lie in box2
Convention: if a corner is exactly on the edge of the other box, it's also a valid point
Args:
corners1 (torch.Tensor): (B, N, 4, 2)
corners2 (torch.Tensor): (B, N, 4, 2)
Returns:
c1_in_2: (B, N, 4) Bool
"""
a = corners2[:, :, 0:1, :] # (B, N, 1, 2)
b = corners2[:, :, 1:2, :] # (B, N, 1, 2)
d = corners2[:, :, 3:4, :] # (B, N, 1, 2)
ab = b - a # (B, N, 1, 2)
am = corners1 - a # (B, N, 4, 2)
ad = d - a # (B, N, 1, 2)
p_ab = torch.sum(ab * am, dim=-1) # (B, N, 4)
norm_ab = torch.sum(ab * ab, dim=-1) # (B, N, 1)
p_ad = torch.sum(ad * am, dim=-1) # (B, N, 4)
norm_ad = torch.sum(ad * ad, dim=-1) # (B, N, 1)
# NOTE: the expression looks ugly but is stable if the two boxes are exactly the same
# also stable with different scale of bboxes
cond1 = (p_ab / norm_ab > - 1e-6) * (p_ab / norm_ab < 1 + 1e-6) # (B, N, 4)
cond2 = (p_ad / norm_ad > - 1e-6) * (p_ad / norm_ad < 1 + 1e-6) # (B, N, 4)
return cond1*cond2 | f7c5e442aadfadd15dcfdd32c3358f784ac418bc | 22,508 |
def in_line_rate(line, container_line):
"""一个线段和另一个线段的重合部分,占该线段总长的占比"""
inter = intersection_line(line, container_line)
return inter / (line[1] - line[0]) | 3f56b05c0bbe42030c1fd6f684724c2afc922135 | 22,509 |
def test_cli_requires():
"""Test to ensure your can add requirements to a CLI"""
def requires_fail(**kwargs):
return {'requirements': 'not met'}
@hug.cli(output=str, requires=requires_fail)
def cli_command(name: str, value: int):
return (name, value)
assert cli_command('Testing', 1) == ('Testing', 1)
assert hug.test.cli(cli_command, 'Testing', 1) == {'requirements': 'not met'} | 2febbfa4ed51a22e057494dfaeb45c99400b72d4 | 22,510 |
def comm_for_pid(pid):
"""Retrieve the process name for a given process id."""
try:
return slurp('/proc/%d/comm' % pid)
except IOError:
return None | 49aa200986f3fcafd053e5708a08a4ff5873b40e | 22,511 |
def get_machine_type_from_run_num(run_num):
"""these are the values to be used in config for machine dependent settings"""
id_to_machine = {
'MS001': 'miseq',
'NS001': 'nextseq',
'HS001': 'hiseq 2500 rapid',
'HS002': 'hiseq 2500',
'HS003': 'hiseq 2500',
'HS004': 'hiseq 2500',
'HS005': 'macrogen',
'HS006': 'hiseq 4000',
'HS007': 'hiseq 4000',
'HS008': 'hiseq 4000',
'NG001': 'novogene hiseq x5',
'NG002': 'novogene hiseq x5',
'NG003': 'novogene hiseq x5',
'NG004': 'novogene hiseq x5',
'NG005': 'novogene hiseq x5',
}
machine_id = run_num.split('-')[0]
try:
machine_type = id_to_machine[machine_id]
except KeyError:
logger.critical("Unknown machine id %s", machine_id)
raise
return machine_type | 117b5cb1646a0295be28f5875c3cd9d9c09c67ea | 22,512 |
import sys
def crawler(address_list):
"""
A list of addresses is provided to this method and it goes and downloads those articles. E.g an Element of the given list can be this:
https://www.jyi.org/2019-march/2019/3/1/the-implication-of-the-corticotropin-releasing-factor-in-nicotine-dependence-and-significance-for-pharmacotherapy-in-smoking-cessation
"""
print("Downloading Articles.. Please wait.")
#Whole Corpus which will contain every article from the input excel file.
corpus = []
#Setting browser options which will be running in the background.
options = ChromeOptions()
options.add_argument('headless')
options.add_argument('--log-level=3')
#options.add_argument('--disable-extensions')
browser = webdriver.Chrome('chromedriver', chrome_options=options)
#Visiting website and Downloading the text article data.
for address in address_list:
try:
#Download the whole webpage.
site = 'https://www.jyi.org' + address
browser.get(site)
except:
print("Can't connect to URL. Check your internet connection.")
sys.exit()
#Parse the webpage as HTML.
soup = bs(browser.page_source, 'html.parser')
#soup.prettify()
#This list will contain whole the article.
document = []
try:
#Finding the components which contain textual research data from the soup.
parents = soup.find('div', 'entry-content e-content')
parents = parents.find_all('div', 'sqs-block html-block sqs-block-html')
except:
print("Website structure is changed. OR Internet connection isn't smooth.")
sys.exit()
for parent in parents:
#Since there are many components in a single webpage which contain textual research data.
#We go through them one by one extracting that text.
parent = parent.find('div', 'sqs-block-content')
#Deleting some unwanted components from the soup.
disposable = parent.find_all('ol')
if disposable != None:
for all in disposable:
all.decompose()
#Each paragraph will be an element in our document list.
Children = parent.find_all('p')
for each in Children:
document.append(each.text)
#we join all the paragraphs into one single passage.
doc_str = " ".join(document)
#Removing unwanted unicode characters.
doc_str = doc_str.replace(u'\xa0', u' ')
#A single document is appended to whole corpus. 1 element of corpus list
#is a complete downloaded document.
corpus.append(doc_str)
print("All articles downloaded successfully.")
#Finding cosine similarity between all documents.
array = cosine_sim(corpus)
browser.quit()
return array, corpus | 3397a6e99276609083cc33c642eb57864c5ef618 | 22,513 |
def login():
"""Log user in"""
# Forget any user_id
session.clear()
# User reached route via POST (as by submitting a form via POST)
if request.method == "POST":
# Ensure username was submitted
if not request.form.get("username"):
return apology("must provide username", 403)
# Ensure password was submitted
elif not request.form.get("password"):
return apology("must provide password", 403)
# Query database for username
rows = db.execute("SELECT * FROM users WHERE username = :username",
username=request.form.get("username"))
# Ensure username exists and password is correct
if len(rows) != 1 or not check_password_hash(rows[0]["hash"], request.form.get("password")):
return apology("invalid username and/or password", 403)
# Remember which user has logged in
session["user_id"] = rows[0]["id"]
# Redirect user to home page
return redirect("/personal")
# User reached route via GET (as by clicking a link or via redirect)
else:
return render_template("login.html") | 4ef618ea5028fca74664ef7cfdd8de9dae6de007 | 22,514 |
def twisted_sleep(time):
"""
Return a deferred that will be triggered after the specified amount of
time passes
"""
return task.deferLater(reactor, time, lambda: None) | f26cdbc7c8af8f19658241ae01465c418253f040 | 22,515 |
import os
def setup():
"""
extension: setup the archive/experiment directory.
input: None
output: None
notes: creates an empty directory for storing data
"""
global finished_setup
global archive_path
if finished_setup is True:
logger.warn( 'archive setup called again. Ignoring' )
return None
path = os.path.join( defn.archive_path, defn.experiment )
if os.path.isdir( path ) is True:
logger.warn( '{} already exists.'.format( path ) )
if (defn.overwrite is False):
#need to generate new archive path name
extn = defn.extension
if '<E>' not in extn:
extn = '<E>' + extn
if '<I>' not in extn:
extn = extn + '<I>'
template = extn.replace( '<E>', defn.experiment )
i = 1
unique = False
while unique is False:
path = os.path.join( defn.archive_path, template.replace('<I>',str(i)) )
unique = not os.path.isdir( path )
i += 1
logger.warn( 'moved archive to {}'.format( path ) )
else:
logger.warn( 'deleted old archive.' )
archive_path = path
file_handle.empty_dir( archive_path )
if defn.desc_name != '':
#add description to archive as text file.
with open( os.path.join( archive_path, defn.desc_name ), 'w' ) as desc_file:
desc_file.write( defn.description )
finished_setup = True | b4199fb4d3c7f9bde0b0204ca8f3585a235e20ad | 22,516 |
import pickle
async def async_load_cache(
filename: str,
) -> dict[str, str | dict[str, dict[str, dict[str, dict[str, str]]]]]:
"""Load cache from file."""
async with aiofiles.open(filename, "rb") as file:
pickled_foo = await file.read()
return pickle.loads(pickled_foo) | 4b64e9f70d1dfd0627625edb69e80a166ebdeeb1 | 22,517 |
import six
def make_function(function, name, arity):
"""Make a function node, a representation of a mathematical relationship.
This factory function creates a function node, one of the core nodes in any
program. The resulting object is able to be called with NumPy vectorized
arguments and return a resulting vector based on a mathematical
relationship.
Parameters
----------
function : callable
A function with signature `function(x1, *args)` that returns a Numpy
array of the same shape as its arguments.
name : str
The name for the function as it should be represented in the program
and its visualizations.
arity : int
The number of arguments that the `function` takes.
"""
if not isinstance(arity, int):
raise ValueError('arity must be an int, got %s' % type(arity))
if not isinstance(function, np.ufunc):
if six.get_function_code(function).co_argcount != arity:
raise ValueError('arity %d does not match required number of '
'function arguments of %d.'
% (arity,
six.get_function_code(function).co_argcount))
if not isinstance(name, six.string_types):
raise ValueError('name must be a string, got %s' % type(name))
# Check output shape
args = [np.ones(10) for _ in range(arity)]
try:
function(*args)
except ValueError:
raise ValueError('supplied function %s does not support arity of %d.'
% (name, arity))
if not hasattr(function(*args), 'shape'):
raise ValueError('supplied function %s does not return a numpy array.'
% name)
if function(*args).shape != (10,):
raise ValueError('supplied function %s does not return same shape as '
'input vectors.' % name)
# Check closure for zero & negative input arguments
args = [np.zeros(10) for _ in range(arity)]
if not np.all(np.isfinite(function(*args))):
raise ValueError('supplied function %s does not have closure against '
'zeros in argument vectors.' % name)
args = [-1 * np.ones(10) for _ in range(arity)]
if not np.all(np.isfinite(function(*args))):
raise ValueError('supplied function %s does not have closure against '
'negatives in argument vectors.' % name)
return _Function(function, name, arity) | 460d453888e025832983e7a822d3dfd498f0d176 | 22,518 |
def data_type_validator(type_name='data type'):
"""
Makes sure that the field refers to a valid data type, whether complex or primitive.
Used with the :func:`field_validator` decorator for the ``type`` fields in
:class:`PropertyDefinition`, :class:`AttributeDefinition`, :class:`ParameterDefinition`,
and :class:`EntrySchema`.
Extra behavior beyond validation: generated function returns true if field is a complex data
type.
"""
def validator(field, presentation, context):
field.default_validate(presentation, context)
value = getattr(presentation, field.name)
if value is not None:
# Test for circular definitions
container_data_type = get_container_data_type(presentation)
if (container_data_type is not None) and (container_data_type._name == value):
context.validation.report(
'type of property "%s" creates a circular value hierarchy: %s'
% (presentation._fullname, safe_repr(value)),
locator=presentation._get_child_locator('type'), level=Issue.BETWEEN_TYPES)
# Can be a complex data type
if get_type_by_full_or_shorthand_name(context, value, 'data_types') is not None:
return True
# Can be a primitive data type
if get_primitive_data_type(value) is None:
report_issue_for_unknown_type(context, presentation, type_name, field.name)
return False
return validator | d949eddfcfbe941e6ee74127761336fbc1e006db | 22,519 |
def list_challenge_topics(account_name, challenge_name): # noqa: E501
"""List stargazers
Lists the challenge topics. # noqa: E501
:param account_name: The name of the account that owns the challenge
:type account_name: str
:param challenge_name: The name of the challenge
:type challenge_name: str
:rtype: ArrayOfTopics
"""
try:
account = DbAccount.objects.get(login=account_name)
account_id = account.to_dict().get("id")
db_challenge = DbChallenge.objects.get(
ownerId=account_id, name=challenge_name
) # noqa: E501
res = ArrayOfTopics(topics=db_challenge.to_dict().get("topics"))
status = 200
except DoesNotExist:
status = 404
res = Error("The specified resource was not found", status)
except Exception as error:
status = 500
res = Error("Internal error", status, str(error))
return res, status | 24daefe48f62c649ee31f362c418eb62f0dd6c33 | 22,520 |
import copy
def ee_reg2(x_des, quat_des, sim, ee_index, kp=None, kv=None, ndof=12):
"""
same as ee_regulation, but now also accepting quat_des.
"""
kp = np.eye(len(sim.data.body_xpos[ee_index]))*10 if kp is None else kp
kv = np.eye(len(sim.data.body_xpos[ee_index]))*1 if kv is None else kv
jacp,jacr=jac(sim, ee_index, ndof)
# % compute position error terms as before
xdot = np.matmul(jacp, sim.data.qvel[:ndof])
error_vel = xdot
error_pos = x_des - sim.data.body_xpos[ee_index]
pos_term = np.matmul(kp,error_pos)
vel_term = np.matmul(kv,error_vel)
# % compute orientation error terms
current_ee_quat = copy.deepcopy(sim.data.body_xquat[ee_index])
current_ee_rotmat = R.from_quat([current_ee_quat[1],
current_ee_quat[2],
current_ee_quat[3],
current_ee_quat[0]])
target_ee_rotmat = R.from_quat([quat_des[1],
quat_des[2],
quat_des[3],
quat_des[0]])
ori_error = calculate_orientation_error(target_ee_rotmat.as_dcm(), current_ee_rotmat.as_dcm())
euler_dot = np.matmul(jacr, sim.data.qvel[:ndof])
ori_pos_term = np.matmul(kp, ori_error)
ori_vel_term = np.matmul(kv, euler_dot)
# % commanding ee pose only
F_pos = pos_term - vel_term
F_ori = ori_pos_term - ori_vel_term
J_full = np.concatenate([jacp, jacr])
F_full = np.concatenate([F_pos, F_ori])
torques = np.matmul(J_full.T, F_full) + sim.data.qfrc_bias[:ndof]
return torques | 23a0f818c57cf0760eff4f74ec7b94bd337e14ab | 22,521 |
def _default_clipping(
inner_factory: factory.AggregationFactory) -> factory.AggregationFactory:
"""The default adaptive clipping wrapper."""
# Adapts relatively quickly to a moderately high norm.
clipping_norm = quantile_estimation.PrivateQuantileEstimationProcess.no_noise(
initial_estimate=1.0, target_quantile=0.8, learning_rate=0.2)
return robust.clipping_factory(clipping_norm, inner_factory) | c39c143bebe78bec0bcd7b8d9f3457a04ac7b5a4 | 22,522 |
import torch
def make_pred_multilabel(data_transforms, model, PATH_TO_IMAGES, epoch_loss, CHROMOSOME):
"""
Gives predictions for test fold and calculates AUCs using previously trained model
Args:
data_transforms: torchvision transforms to preprocess raw images; same as validation transforms
model: densenet-121 from torchvision previously fine tuned to training data
PATH_TO_IMAGES: path at which NIH images can be found
Returns:
pred_df: dataframe containing individual predictions and ground truth for each test image
auc_df: dataframe containing aggregate AUCs by train/test tuples
"""
# calc preds in batches of 16, can reduce if your GPU has less RAM
BATCH_SIZE = 32
# set model to eval mode; required for proper predictions given use of batchnorm
model.train(False)
# create dataloader
dataset = CXR.CXRDataset(
path_to_images=PATH_TO_IMAGES,
fold="test",
transform=data_transforms['val'])
dataloader = torch.utils.data.DataLoader(
dataset, BATCH_SIZE, shuffle=False, num_workers=0)
size = len(dataset)
# create empty dfs
pred_df = pd.DataFrame(columns=["Image Index"])
true_df = pd.DataFrame(columns=["Image Index"])
# iterate over dataloader
for i, data in enumerate(dataloader):
inputs, labels, _ = data
inputs, labels = Variable(inputs.cuda()), Variable(labels.cuda())
true_labels = labels.cpu().data.numpy()
batch_size = true_labels.shape
outputs = model(inputs)
probs = outputs.cpu().data.numpy()
return BATCH_SIZE | 42fb9446df2e0a8cc5d408957db21622bd5bb96e | 22,523 |
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Set up a config entry for solarlog."""
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, "sensor")
)
return True | 2cb14b9a71b16409aa9030acafd8c677efe1e22a | 22,524 |
def SqueezeNet_v1(include_top=True,
input_tensor=None, input_shape=None,
classes=10):
"""Instantiates the SqueezeNet architecture.
"""
input_shape = _obtain_input_shape(input_shape,
default_size=32,
min_size=32,
data_format=K.image_data_format(),
require_flatten=include_top)
if input_tensor is None:
img_input = Input(shape=input_shape)
else:
if not K.is_keras_tensor(input_tensor):
img_input = Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
x = Conv2D(96, (3, 3), padding='same', name='conv1')(img_input)
x = Activation('relu', name='relu_conv1')(x)
# x = MaxPooling2D(pool_size=(2, 2), name='pool1')(x)
x = fire_module(x, fire_id=2, squeeze=16, expand=64)
x = fire_module(x, fire_id=3, squeeze=16, expand=64)
x = fire_module(x, fire_id=4, squeeze=32, expand=128)
x = MaxPooling2D(pool_size=(2, 2), name='pool4')(x)
x = fire_module(x, fire_id=5, squeeze=32, expand=128)
x = fire_module(x, fire_id=6, squeeze=48, expand=192)
x = fire_module(x, fire_id=7, squeeze=48, expand=192)
x = fire_module(x, fire_id=8, squeeze=64, expand=256)
x = MaxPooling2D(pool_size=(2, 2), name='pool8')(x)
x = fire_module(x, fire_id=9, squeeze=64, expand=256)
x = BatchNormalization()(x)
# x = Dropout(0.5, name='drop9')(x)
# x = Convolution2D(1000, (1, 1), padding='valid', name='conv10')(x)
x = Activation('relu', name='relu_10')(x)
x = GlobalAveragePooling2D(name="avgpool10")(x)
x = Dense(classes, activation='softmax', name="softmax-10")(x)
# x = Activation('softmax', name='softmax')(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = get_source_inputs(input_tensor)
else:
inputs = img_input
model = Model(inputs, x, name='squeezenet')
return model | b31f613a63836e88bf04c0b38240ce256cf9b2ae | 22,525 |
def xcafdoc_ColorRefGUID(*args):
"""
* Return GUIDs for TreeNode representing specified types of colors
:param type:
:type type: XCAFDoc_ColorType
:rtype: Standard_GUID
"""
return _XCAFDoc.xcafdoc_ColorRefGUID(*args) | b5d300d656977402d95c0227462e8da6224a3eff | 22,526 |
import code
def green_on_yellow(string, *funcs, **additional):
"""Text color - green on background color - yellow. (see _combine())."""
return _combine(string, code.GREEN, *funcs, attributes=(code.BG_YELLOW,)) | 90b2ae25b1e58da8b3a7a1d3b76468cfade3887a | 22,527 |
def _register_models(format_str, cls, forward=True):
"""Registers reward models of type cls under key formatted by format_str."""
forwards = {"Forward": {"forward": forward}, "Backward": {"forward": not forward}}
control = {"WithCtrl": {}, "NoCtrl": {"ctrl_coef": 0.0}}
res = {}
for k1, cfg1 in forwards.items():
for k2, cfg2 in control.items():
fn = registry.build_loader_fn_require_space(cls, **cfg1, **cfg2)
key = format_str.format(k1 + k2)
reward_serialize.reward_registry.register(key=key, value=fn)
return res | 96c95d83841b381777ce817e401cc6c7e8a5dc1d | 22,528 |
def configure_pseudolabeler(pseudolabel: bool, pseudolabeler_builder, pseudolabeler_builder_args):
"""Pass in a class that can build a pseudolabeler (implementing __call__) or a builder function
that returns a pseudolabeling function.
"""
if pseudolabel:
return globals()[pseudolabeler_builder](*pseudolabeler_builder_args)
return None | 3e31869542a977cc4b72267b348f7e087ccb2aee | 22,529 |
import os
def get_encoders(filename=None):
"""Get an ordered list of all encoders. If a `filename` is provided,
encoders supporting that extension will be ordered first in the list.
"""
encoders = []
if filename:
extension = os.path.splitext(filename)[1].lower()
encoders += _encoder_extensions.get(extension, [])
encoders += [e for e in _encoders if e not in encoders]
return encoders | 18e2d9208d10f055115fe11dd1b64625184c4b26 | 22,530 |
def flip_dict(dict, unique_items=False, force_list_values=False):
"""Swap keys and values in a dictionary
Parameters
----------
dict: dictionary
dictionary object to flip
unique_items: bool
whether to assume that all items in dict are unique, potential speedup but repeated items will be lost
force_list_values: bool
whether to force all items in the result to be lists or to let unique items have unwrapped values. Doesn't apply if unique_items is true.
"""
if unique_items:
return {v: k for k, v in dict.items()}
elif force_list_values:
new_dict = {}
for k, v in dict.items():
if v not in new_dict:
new_dict[v] = []
new_dict[v].append(k)
return new_dict
else:
new_dict = {}
for k, v in dict.items():
if v in new_dict:
if isinstance(new_dict[v], list):
new_dict[v].append(k)
else:
new_dict[v] = [new_dict[v], k]
else:
new_dict[v] = k
return new_dict | c8344852bc76321f80b4228671707ef7b48e4f71 | 22,531 |
def randn(N, R, var = 1.0, dtype = tn.float64, device = None):
"""
A torchtt.TT tensor of shape N = [N1 x ... x Nd] and rank R is returned.
The entries of the fuill tensor are alomst normal distributed with the variance var.
Args:
N (list[int]): the shape.
R (list[int]): the rank.
var (float, optional): the variance. Defaults to 1.0.
dtype (torch.dtype, optional): the dtype of the returned tensor. Defaults to tn.float64.
device (torch.device, optional): the device where the TT cores are created (None means CPU). Defaults to None.
Returns:
torchtt.TT: the result.
"""
d = len(N)
v1 = var / np.prod(R)
v = v1**(1/d)
cores = [None] * d
for i in range(d):
cores[i] = tn.randn([R[i],N[i][0],N[i][1],R[i+1]] if isinstance(N[i],tuple) else [R[i],N[i],R[i+1]], dtype = dtype, device = device)*np.sqrt(v)
return TT(cores) | a88dc6a6602adf16617086d35ae43ed6f1eff796 | 22,532 |
def flatten_all_dimensions_but_first(a):
"""
Flattens all dimensions but the first of a multidimensional array.
Parameters
----------
a : ndarray
Array to be flattened.
Returns
-------
b : ndarray
Result of flattening, two-dimensional.
"""
s = a.shape
s_flattened = (s[0], np.prod(s[1:]))
return a.reshape(*s_flattened) | 80c150e81cd03f6195234da2419ee78c6bee1e54 | 22,533 |
def getHRLanguages(fname, hrthreshold=0):
"""
:param fname: the name of the file containing filesizes. Created using wc -l in the wikidata folder
:param hrthreshold: how big a set of transliteration pairs needs to be considered high resource
:return: a list of language names (in ISO 639-3 format?)
"""
hrlangs = set()
with open(fname) as fs:
for line in fs:
long,iso639_3,iso639_1,size = line.strip().split()
if int(size) > hrthreshold:
hrlangs.add(iso639_3)
return hrlangs | 184f91f40aba76c6ebdcd553c0054b4b1a73da5d | 22,534 |
def _wrap(func, *args, **kwargs):
"""To do."""
def _convert(func_, obj):
try:
return func_(obj)
except BaseException:
return obj
# First, decode each arguments
args_ = [_convert(decode, x) for x in args]
kwargs_ = {k: _convert(decode, v) for k, v in kwargs.items()}
# Execute the function
returned = func(*args_, **kwargs_)
if isinstance(returned, OpenMaya.MSelectionList):
returned = returned.getSelectionStrings()
# Finally encode the returned object(s)
if isinstance(returned, _STRING_TYPES):
return _convert(encode, returned)
if isinstance(returned, (list, tuple, set)):
return type(returned)(_convert(encode, x) for x in returned)
return returned | d3b951c664a098f6ce3d0c024cb2ae92b2fa9314 | 22,535 |
import itertools
def make_id_graph(xml):
"""
Make an undirected graph with CPHD identifiers as nodes and edges from correspondence and hierarchy.
Nodes are named as {xml_path}<{id}, e.g. /Data/Channel/Identifier<Ch1
There is a single "Data" node formed from the Data branch root that signifies data that can be read from the file
Args
----
xml: `lxml.etree.ElementTree.Element`
Root CPHD XML node
Returns
-------
id_graph: `networkx.Graph`
Undirected graph
* nodes: Data node, CPHD identifiers
* edges: Parent identifiers to child identifiers; corresponding identifiers across XML branches
"""
id_graph = nx.Graph()
def add_id_nodes_from_path(xml_path):
id_graph.add_nodes_from(["{}<{}".format(xml_path, n.text) for n in xml.findall('.' + xml_path)])
def add_id_nodes_from_path_with_connected_root(xml_path):
root_node = xml_path.split('/')[1]
id_graph.add_edges_from(zip(itertools.repeat(root_node),
["{}<{}".format(xml_path, n.text) for n in xml.findall('.' + xml_path)]))
def get_id_from_node_name(node_name):
return node_name.split('<')[-1]
def connect_matching_id_nodes(path_a, path_b):
all_nodes = list(id_graph.nodes)
all_a = {get_id_from_node_name(x): x for x in all_nodes if x.split('<')[0] == path_a}
all_b = {get_id_from_node_name(x): x for x in all_nodes if x.split('<')[0] == path_b}
for k in set(all_a).intersection(all_b):
id_graph.add_edge(all_a[k], all_b[k])
def add_and_connect_id_nodes(path_a, path_b):
add_id_nodes_from_path(path_a)
add_id_nodes_from_path(path_b)
connect_matching_id_nodes(path_a, path_b)
def add_and_connect_children(parent_path, parent_id_name, children_paths):
for parent in xml.findall('.' + parent_path):
parent_id = parent.findtext(parent_id_name)
for child_path in children_paths:
for child in parent.findall('.' + child_path):
id_graph.add_edge('{}/{}<{}'.format(parent_path, parent_id_name, parent_id),
'{}/{}<{}'.format(parent_path, child_path, child.text))
add_id_nodes_from_path_with_connected_root('/Data/Channel/Identifier')
add_id_nodes_from_path_with_connected_root('/Data/SupportArray/Identifier')
channel_children = ['DwellTimes/CODId', 'DwellTimes/DwellId']
channel_children += ['Antenna/'+ident for ident in ('TxAPCId', 'TxAPATId', 'RcvAPCId', 'RcvAPATId')]
channel_children += ['TxRcv/TxWFId', 'TxRcv/RcvId']
add_and_connect_children('/Channel/Parameters', 'Identifier', channel_children)
connect_matching_id_nodes('/Data/Channel/Identifier', '/Channel/Parameters/Identifier')
add_and_connect_id_nodes('/Data/SupportArray/Identifier', '/SupportArray/IAZArray/Identifier')
add_and_connect_id_nodes('/Data/SupportArray/Identifier', '/SupportArray/AntGainPhase/Identifier')
add_and_connect_id_nodes('/Data/SupportArray/Identifier', '/SupportArray/AddedSupportArray/Identifier')
add_and_connect_id_nodes('/Channel/Parameters/DwellTimes/CODId', '/Dwell/CODTime/Identifier')
add_and_connect_id_nodes('/Channel/Parameters/DwellTimes/DwellId', '/Dwell/DwellTime/Identifier')
add_and_connect_id_nodes('/Antenna/AntCoordFrame/Identifier', '/Antenna/AntPhaseCenter/ACFId')
add_and_connect_children('/Antenna/AntPattern', 'Identifier',
('GainPhaseArray/ArrayId', 'GainPhaseArray/ElementId'))
add_and_connect_children('/Antenna/AntPhaseCenter', 'Identifier', ('ACFId',))
add_and_connect_id_nodes('/Channel/Parameters/Antenna/TxAPCId', '/Antenna/AntPhaseCenter/Identifier')
add_and_connect_id_nodes('/Channel/Parameters/Antenna/TxAPATId', '/Antenna/AntPattern/Identifier')
add_and_connect_id_nodes('/Channel/Parameters/Antenna/RcvAPCId', '/Antenna/AntPhaseCenter/Identifier')
add_and_connect_id_nodes('/Channel/Parameters/Antenna/RcvAPATId', '/Antenna/AntPattern/Identifier')
connect_matching_id_nodes('/SupportArray/AntGainPhase/Identifier', '/Antenna/AntPattern/GainPhaseArray/ArrayId')
connect_matching_id_nodes('/SupportArray/AntGainPhase/Identifier', '/Antenna/AntPattern/GainPhaseArray/ElementId')
add_and_connect_id_nodes('/Channel/Parameters/TxRcv/TxWFId', '/TxRcv/TxWFParameters/Identifier')
add_and_connect_id_nodes('/Channel/Parameters/TxRcv/RcvId', '/TxRcv/RcvParameters/Identifier')
return id_graph | d83bf22f76393d1213b469ebd53d93dca30e9d90 | 22,536 |
import base64
def aes_base64_encrypt(data, key):
"""
@summary:
1. pkcs7padding
2. aes encrypt
3. base64 encrypt
@return:
string
"""
cipher = AES.new(key)
return base64.b64encode(cipher.encrypt(_pkcs7padding(data))) | 7f32b4a3848a4084ebd90c5a941c35e19d57d0ec | 22,537 |
import torch
def write_results(prediction, confidence, num_classes, nms_thresh = 0.4):
"""
@prediction salida de la red neural
@confidence objectness
@nms_conf non maximum supression confidence
@description En base a la confianza de la prediccion y las clases se devuelve
la prediccion final de la red, luego de post-procesar utilizando non-maximum
supression para obtener la prediccion mas precisa
"""
# considerar solo aquellas bounding box con confianza mayor al limite
conf_mask = (prediction[:,:,4] > confidence).float().unsqueeze(2)
prediction = prediction*conf_mask # si objecteness<confidence, objectess == 0
# obtener coordenadas x,y de esquinas del bounding box
# Ejemplo
# esquina superior izquierda, y = centro,y - alto/2
# esquina superior izquierda, x = centro,x - ancho/2
box_corner = prediction.new(prediction.shape)
box_corner[:,:,0] = (prediction[:,:,0] - (prediction[:,:,2]/2)) # top-left x
box_corner[:,:,1] = (prediction[:,:,1] - (prediction[:,:,3]/2)) # top-left y
box_corner[:,:,2] = (prediction[:,:,0] + (prediction[:,:,2]/2)) # top-right x
box_corner[:,:,3] = (prediction[:,:,1] + (prediction[:,:,3]/2)) # top-right y
# sustituir centro x,y,ancho,alto por esquina izquierda x,y, esquina derecha x,y
prediction[:,:,:4] = box_corner[:,:,:4]
batch_size = prediction.size(0) # leer numero de imagenes
#print("Batch size is: {}".format(batch_size))
write = False
# hacer Non Maximum Suppresion image por imagen (no por batch)
for ind in range(batch_size):
image_pred = prediction[ind] # leer imagen 'i'
# ****************************
# @IMPROVEMENTE: creo que esto se puede hacer antes, no en este punto
# y ganar eficiencia
# ********************************
# recuperar solo la clase con mayor confianza
max_conf, max_conf_index = torch.max(image_pred[:,5:5+num_classes], 1)
max_conf = max_conf.float().unsqueeze(1)
max_conf_index = max_conf_index.float().unsqueeze(1)
seq = (image_pred[:,:5], max_conf, max_conf_index) # almacenar info en un tuple
image_pred = torch.cat(seq, 1) # concatenar todos los valores en 1 solo tensor
# eliminar las bounding box con objecteness < confidence
non_zero_ind = (torch.nonzero(image_pred[:,4]))
#print("Objectess > Confidence para {} elementos".format(non_zero_ind.size(0)))
try:
# seleccionar solo las ubicaciones donde objectness > confidence
image_pred_ = image_pred[non_zero_ind.squeeze(),:].view(-1,7)
except:
continue
#print(">> image_pred all: {}".format(image_pred.size()))
#print(">> image_pred_ conf< {}: {}\n{}".format(confidence, image_pred_.size(), image_pred_))
# si no hay detectiones con objecteness > confidence, continuar a
# siguiente imagen
if image_pred_.shape[0]==0:
continue
img_classes = unique(image_pred_[:,-1]) # obtener las classes detectadas
#print(">> img_classes: {}".format(img_classes))
# ************************************ #
# NON MAXIMUM SUPRESSION
# See for reference: https://www.youtube.com/watch?v=VAo84c1hQX8
# ************************************ #
for cls in img_classes:
# obtener detecciones para la actual clase 'cls'
cls_mask = image_pred_*(image_pred_[:,-1] == cls).float().unsqueeze(1)
#print(">> Elementos de la clase {}:\n {}".format(cls, cls_mask))
class_mask_ind = torch.nonzero(cls_mask[:, -2]).squeeze()
image_pred_class = image_pred_[class_mask_ind].view(-1, 7)
# sort by objecteness in descending order
conf_sort_index = torch.sort(image_pred_class[:,4], descending =True)[1]
image_pred_class = image_pred_class[conf_sort_index]
#print(">> Elementos de la clase {} ordenados descendente: \n {}".format(cls, image_pred_class))
idx = image_pred_class.size(0)
#print(">> IDX: {}".format(idx))
# PERFORM Non Maximum Supression
for i in range(idx):
# obtener el IOU entre el bounding box con max conf, y el resto
try:
ious = bbox_iou(image_pred_class[i].unsqueeze(0), image_pred_class[i+1:])
except ValueError:
#print("Value error en elemento {}".format(i))
break
except IndexError:
#print("IndexError en elemento {}".format(i))
break
#print("IOUS para NMS: \n{}".format(ious))
# identificar bounding boxes cuyo IOU > threshold
iou_mask = (ious < nms_thresh).float().unsqueeze(1)
image_pred_class[i+1:] *= iou_mask
# eliminar bounding boxes cuyo IOU > threshold
non_zero_ind = torch.nonzero(image_pred_class[:,4]).squeeze()
image_pred_class = image_pred_class[non_zero_ind].view(-1, 7)
#
batch_ind = image_pred_class.new(image_pred_class.size(0),1).fill_(ind)
seq = batch_ind, image_pred_class
if not write:
output = torch.cat(seq, 1)
write = True
else:
out = torch.cat(seq, 1)
output = torch.cat((output,out))
try:
return output
except:
# Si no hubo detecciones
return 0 | ab9aa619d5d9ddc6151c649e83d243f46e1babf5 | 22,538 |
import sqlite3
def es_indexing(builder) -> int:
"""indexing all examples in lsc4 dict
TODO: 性能很差,indexing动作应该放在解析mdx文件的时候
:param builder dict builder
"""
# create index
if not create_index():
return 0
print("es is connected and index created succeed, starting indexing the examples...")
conn = sqlite3.connect(builder.get_mdx_db())
cursor = conn.execute('SELECT key_text FROM MDX_INDEX')
keys = [item[0] for item in cursor]
conn.close()
examples = []
for key in keys:
content = builder.mdx_lookup(key)
str_content = ""
if len(content) > 0:
for c in content:
str_content += c.replace("\r\n", "").replace("entry:/", "")
exs = example_parse_lsc4(key, str_content)
if exs:
examples.extend(exs)
if len(examples) > 2000:
ingest("lsc4", examples)
examples = []
ingest("lsc4", examples)
print("indexing done", len(keys)) | be9f2435c21130cbf3d2e2d43b9038fd42a67791 | 22,539 |
def mast_query_darks(instrument, aperture, start_date, end_date):
"""Use ``astroquery`` to search MAST for dark current data
Parameters
----------
instrument : str
Instrument name (e.g. ``nircam``)
aperture : str
Detector aperture to search for (e.g. ``NRCA1_FULL``)
start_date : float
Starting date for the search in MJD
end_date : float
Ending date for the search in MJD
Returns
-------
query_results : list
List of dictionaries containing the query results
"""
# Make sure instrument is correct case
if instrument.lower() == 'nircam':
instrument = 'NIRCam'
dark_template = ['NRC_DARK']
elif instrument.lower() == 'niriss':
instrument = 'NIRISS'
dark_template = ['NIS_DARK']
elif instrument.lower() == 'nirspec':
instrument = 'NIRSpec'
dark_template = ['NRS_DARK']
elif instrument.lower() == 'fgs':
instrument = 'FGS'
dark_template = ['FGS_DARK']
elif instrument.lower() == 'miri':
instrument = 'MIRI'
dark_template = ['MIR_DARKALL', 'MIR_DARKIMG', 'MIR_DARKMRS']
# monitor_mast.instrument_inventory does not allow list inputs to
# the added_filters input (or at least if you do provide a list, then
# it becomes a nested list when it sends the query to MAST. The
# nested list is subsequently ignored by MAST.)
# So query once for each dark template, and combine outputs into a
# single list.
query_results = []
for template_name in dark_template:
# Create dictionary of parameters to add
parameters = {"date_obs_mjd": {"min": start_date, "max": end_date},
"apername": aperture, "exp_type": template_name}
query = monitor_mast.instrument_inventory(instrument, dataproduct=JWST_DATAPRODUCTS,
add_filters=parameters, return_data=True, caom=False)
if 'data' in query.keys():
if len(query['data']) > 0:
query_results.extend(query['data'])
return query_results | f612068ff220cf02cf6582478d257ff842f72eef | 22,540 |
import random
def randomNumGen(choice):
"""Get a random number to simulate a d6, d10, or d100 roll."""
if choice == 1: #d6 roll
die = random.randint(1, 6)
elif choice == 2: #d10 roll
die = random.randint(1, 10)
elif choice == 3: #d100 roll
die = random.randint(1, 100)
elif choice == 4: #d4 roll
die = random.randint(1, 4)
elif choice == 5: #d8 roll
die = random.randint(1, 8)
elif choice == 6: #d12 roll
die = random.randint(1, 12)
elif choice == 7: #d20 roll
die = random.randint(1, 20)
else: #simple error message
return "Shouldn't be here. Invalid choice"
return die | 307194d60a79ee2b101f7743002a380848e68628 | 22,541 |
import json
import sys
def schedule_job_with_distance_matrix(request):
"""
:param request: HTTP request with following fields:
- distance_matrix: dictionary where keys correspond to node ids and values to coordinates.
- first_node: integer - id of the first node
:return:
"""
request_dict = json.loads(request.read())
print(request_dict)
sys.stdout.flush()
distance_matrix = request_dict["distance_matrix"]
first_node = request_dict["first_node"]
tol = 1e-2
steps = 1
if "tol" in request_dict.keys():
tol = request_dict["tol"]
if "steps" in request_dict.keys():
steps = request_dict["steps"]
current_log = TSPLog.objects.create(nodes=None, distance_matrix=distance_matrix, first_node=first_node, tol=tol, steps=steps)
current_log.save()
q = Queue(connection=conn)
result = q.enqueue(
solve_tsp, distance_matrix, first_node, steps, tol, current_log, timeout=3600)
return JsonResponse({"status_code": 200, "id": current_log.id}) | be89eba85f1c5e938a4a81e1fc1c745cce21c82c | 22,542 |
def is_distinct(coll, key=EMPTY):
"""Checks if all elements in the collection are different."""
if key is EMPTY:
return len(coll) == len(set(coll))
else:
return len(coll) == len(set(xmap(key, coll))) | 94469c2915e5164238999f1d98c850856034652e | 22,543 |
def split_data(df_data, config, test_frac=0.2):
"""
split df_data to train and test.
"""
df_train, df_test = train_test_split(df_data, test_size=test_frac)
df_train.reset_index(inplace=True, drop=True)
df_test.reset_index(inplace=True, drop=True)
df_train.to_csv(config.path_train_data, index=False)
df_test.to_csv(config.path_test_data, index=False)
return df_train | 6b9b9301d15e29562933164343d894880641aed8 | 22,544 |
import requests
def query(params, lang='en'):
"""
Simple Mediawiki API wrapper
"""
url = 'https://%s.wikipedia.org/w/api.php' % lang
finalparams = {
'action': 'query',
'format': 'json',
}
finalparams.update(params)
resp = requests.get(url, params=finalparams)
if not resp.ok:
return None
data = resp.json()
if 'query' in data:
return data['query'] | 990ca6aae015e3106920ce67eb4e29f39e8a8f4c | 22,545 |
from datetime import datetime
import time
def reporting_window(year, month):
"""
Returns the range of time when people are supposed to report
"""
last_of_last_month = datetime(year, month, 1) - timedelta(days=1)
last_bd_of_last_month = datetime.combine(
get_business_day_of_month(last_of_last_month.year, last_of_last_month.month, -1),
time()
)
last_bd_of_the_month = get_business_day_of_month(year, month, -1)
return last_bd_of_last_month, last_bd_of_the_month | 89f1c6f42257068c9483cc9870e0774fab262b13 | 22,546 |
from typing import Dict
from typing import Any
import os
import json
def load_json(path: str) -> Dict[str, Any]:
"""Loads a `.json` file from `path`.
Args:
path (str): Path to file.
Returns:
Dict[str, Any]: Returns the loaded json.
Example:
>>> # Load a json file
>>> load_json('mlnext.json')
{'name': 'mlnext'}
"""
if not os.path.isfile(path):
raise FileNotFoundError(f'Path {path} invalid.')
with open(path, 'r') as file:
data = json.load(file)
return data | 0b3d5970e20aa724b6e93eab4c8161397164080d | 22,547 |
def fit_cluster_13():
"""Fit a GMM to resolve objects in cluster 13 into C, Q, O.
Returns
-------
sklearn.mixture.GaussianMixture
The mixture model trained on the latent scores.
list
The classes represented in order by the model components.
"""
data = classy.data.load()
X13 = data.loc[data.cluster == 13, ["z1", "z3"]]
gmm = GaussianMixture(n_components=3, random_state=17).fit(X13)
# Determine which component captures which class
CLASSES = ["", "", ""]
for ind, class_ in zip(np.argsort(gmm.means_[:, 0]), ["C", "Q", "O"]):
CLASSES[ind] = class_
return gmm, CLASSES | 5e242716633a759b2dcdbcbd68cbd441c7c0281e | 22,548 |
def sidebar_left(request):
"""
Return the left sidebar values in context
"""
if request.user.is_authenticated():
moderation_obj = {
'is_visible': False,
'count_notifs': 0,
}
if request.user.is_staff:
moderation_obj['is_visible'] = True
moderation_obj['count_notifs'] = ModerationHelper.count_unmoderated(request.user)
return {
'sidebar_left': {
'moderation': moderation_obj,
},
}
return {} | 161a0bdc872f8dfff9e57156e58685cb600d2be4 | 22,549 |
import torch
def get_edge_lengths(vertices, edge_points):
"""
get edge squared length using edge_points from get_edge_points(mesh) or edge_vertex_indices(faces)
:params
vertices (N,3)
edge_points (E,4)
"""
N, D = vertices.shape
E = edge_points.shape[0]
# E,2,D (OK to do this kind of indexing on the first dimension)
edge_vertices = vertices[edge_points[:,:2]]
edges = (edge_vertices[:,0,:]-edge_vertices[:,1,:])
edges_sqrlen = torch.sum(edges * edges, dim=-1)
return edges_sqrlen | 396d7d669d96611fb65c20b99347ab8041ff3f5a | 22,550 |
def compute_pca(nparray):
"""
:param nparray: nxd array, d is the dimension
:return: evs eigenvalues, axmat dxn array, each column is an eigenvector
author: weiwei
date: 20200701osaka
"""
ca = np.cov(nparray, y=None, rowvar=False, bias=True) # rowvar row=point, bias biased covariance
pcv, pcaxmat = np.linalg.eig(ca)
return pcv, pcaxmat | 0aa1d731c0d296cc66a9275e466e4ce3d57a8621 | 22,551 |
def fac(num):
"""求阶乘"""
assert num >= 0
if num in (0, 1):
return 1
return num * fac(num - 1) | e043e03e1d528dd9ec5685c4483e70217c948a0b | 22,552 |
def entropy(logp, p):
"""Compute the entropy of `p` - probability density function approximation.
We need this in order to compute the entropy-bonus.
"""
H = -(logp * p).sum(dim=1).mean()
return H | dff7c89979e5a9cef65088fd9f8858bb66bf218f | 22,553 |
def find(query):
"""Retrieve *exactly* matching tracks."""
args = _parse_query(query)
return mpctracks('find', args) | 656b2f7dfc4642cbe5294a888f5c4873e905140a | 22,554 |
import random
def permuteregulations(graph):
"""Randomly change which regulations are repressions, maintaining activation and repression counts and directions."""
edges = list(graph.edges)
copy = graph.copy()
repressions = 0
for edge in edges:
edge_data = copy.edges[edge]
if edge_data['repress']:
repressions += 1
edge_data['repress'] = False
for new_repression in random.sample(edges, repressions):
copy.edges[new_repression]['repress'] = True
return copy | 76a12e573a6d053442c86bc81bebf10683d55dfb | 22,555 |
def editor_command(command):
"""
Is this an external editor command?
:param command: string
"""
# It is possible to have `\e filename` or `SELECT * FROM \e`. So we check
# for both conditions.
return command.strip().endswith('\\e') or command.strip().startswith('\\e ') | 0e80547b3c118bf01bd7a69e2d93fe8f65851ecf | 22,556 |
def blrObjFunction(initialWeights, *args):
"""
blrObjFunction computes 2-class Logistic Regression error function and
its gradient.
Input:
initialWeights: the weight vector (w_k) of size (D + 1) x 1
train_data: the data matrix of size N x D
labeli: the label vector (y_k) of size N x 1 where each entry can be either 0 or 1 representing the label of corresponding feature vector
Output:
error: the scalar value of error function of 2-class logistic regression
error_grad: the vector of size (D+1) x 1 representing the gradient of
error function
"""
train_data, labeli = args
n_data = train_data.shape[0]
n_features = train_data.shape[1]
error = 0
error_grad = np.zeros((n_features + 1, 1))
##################
# YOUR CODE HERE #
##################
# HINT: Do not forget to add the bias term to your input data
initw = initialWeights.reshape(n_feature + 1, 1)
inputWithBias = np.hstack((np.ones((n_data,1)),train_data))
out = sigmoid(np.dot(inputWithBias,initw))
a = np.sum((labeli * np.log(out))+(1.0 - labeli)*np.log(1.0 - out))
error = a * (-1/n_data)
b = np.sum(((out-labeli)* inputWithBias),axis=0)
error_grad = b/n_data
return error, error_grad | 3192982a54163868deffa9dfcce2a6f828b67abd | 22,557 |
from datetime import datetime
def edit_battle(battle_id):
"""
Edit battle form.
:param battle_id:
:return:
"""
battle = Battle.query.get(battle_id) or abort(404)
if battle.clan != g.player.clan and g.player.name not in config.ADMINS:
abort(403)
all_players = Player.query.filter_by(clan=g.player.clan, locked=False).order_by('lower(name)').all()
sorted_players = sorted(all_players, reverse=True, key=lambda p: p.player_role_value())
date = battle.date
map_name = battle.map_name
province = battle.map_province
battle_commander = battle.battle_commander
enemy_clan = battle.enemy_clan
battle_groups = BattleGroup.query.filter_by(clan=g.player.clan).order_by('date').all()
battle_result = battle.outcome_repr()
battle_group_final = battle.battle_group_final
players = battle.get_players()
description = battle.description
replay = battle.replay.unpickle()
duration = battle.duration
if battle.battle_group:
battle_group_description = battle.battle_group.description
else:
battle_group_description = ''
if request.method == 'POST':
players = map(int, request.form.getlist('players'))
map_name = request.form.get('map_name', '')
province = request.form.get('province', '')
enemy_clan = request.form.get('enemy_clan', '')
battle_result = request.form.get('battle_result', '')
battle_commander = Player.query.get(int(request.form['battle_commander']))
description = request.form.get('description', '')
battle_group = int(request.form['battle_group'])
battle_group_title = request.form.get('battle_group_title', '')
battle_group_description = request.form.get('battle_group_description', '')
battle_group_final = request.form.get('battle_group_final', '') == 'on'
duration = request.form.get('duration', 15 * 60)
errors = False
date = None
try:
date = datetime.datetime.strptime(request.form.get('date', ''), '%d.%m.%Y %H:%M:%S')
except ValueError:
flash(u'Invalid date format', 'error')
errors = True
if not map_name:
flash(u'Please enter the name of the map', 'error')
errors = True
if not battle_commander:
flash(u'No battle commander selected', 'error')
errors = True
if not players:
flash(u'No players selected', 'error')
errors = True
if not enemy_clan:
flash(u'Please enter the enemy clan\'s tag', 'errors')
errors = True
if not battle_result:
flash(u'Please select the correct outcome of the battle', 'errors')
errors = True
bg = None
if battle_group == -1:
# new group
bg = BattleGroup(battle_group_title, battle_group_description, g.player.clan, date)
elif battle_group >= 0:
# existing group
bg = BattleGroup.query.get(battle_group) or abort(500)
if bg.get_final_battle() is not None and bg.get_final_battle() is not battle and battle_group_final:
flash(u'Selected battle group already contains a battle marked as final')
errors = True
if not errors:
battle.date = date
battle.clan = g.player.clan
battle.enemy_clan = enemy_clan
battle.victory = battle_result == 'victory'
battle.draw = battle_result == 'draw'
battle.map_name = map_name
battle.map_province = province
battle.battle_commander_id = battle_commander.id
battle.description = description
battle.duration = duration
if bg:
battle.battle_group_final = battle_group_final
battle.battle_group = bg
db_session.add(bg)
else:
battle.battle_group = None
for ba in battle.attendances:
if not ba.reserve:
db_session.delete(ba)
for player_id in players:
player = Player.query.get(player_id)
if not player:
abort(404)
ba = BattleAttendance(player, battle, reserve=False)
db_session.add(ba)
db_session.add(battle)
db_session.commit()
logger.info(g.player.name + " updated the battle " + str(battle.id))
return redirect(url_for('battles_list', clan=g.player.clan))
return render_template('battles/edit.html', date=date, map_name=map_name, province=province, battle=battle,
battle_groups=battle_groups, duration=duration, battle_group_description=battle_group_description,
battle_commander=battle_commander, enemy_clan=enemy_clan, battle_result=battle_result,
battle_group_final=battle_group_final, players=players, description=description,
replay=replay, replays=replays, all_players=all_players, sorted_players=sorted_players) | 839a134441af0429ce141218931faef1d53f9938 | 22,558 |
def construct_epsilon_heli(epsilon_diag,
pitch,
divisions,
thickness,
handness="left"):
"""
construct the dielectric matrices of all layers
return a N*3*3 array where N is the number of layers
We define pitch to be the distance such the rotation is 180 degree e.g. apparant
period in z direction
"""
if pitch == thickness:
angles = np.linspace(0, -np.pi, divisions, endpoint=False)
elif pitch > thickness:
angles = np.linspace(
0, -np.pi * thickness / pitch, divisions, endpoint=False)
else:
raise NameError('Need thickness to be smaller than pitch')
return np.array(
[rotZ(i).dot(epsilon_diag.dot(rotZ(-i))) for i in angles]) | 3be04a06524c6011180584f39dea7651d43b5b46 | 22,559 |
import os
from typing import List
def list_fm_tsv(f_tsv: os.path.abspath, col=0) -> List[int]:
""" 2cols (pred, out_label_id) -> List[pred:int] """
return [int(line.split()[col]) for line in open(f_tsv, 'r')] | f08d91e9c5f477f6b2078537b6a8f814f31fccc8 | 22,560 |
def image_overlay(im_1, im_2, color=True, normalize=True):
"""Overlay two images with the same size.
Args:
im_1 (np.ndarray): image arrary
im_2 (np.ndarray): image arrary
color (bool): Whether convert intensity image to color image.
normalize (bool): If both color and normalize are True, will
normalize the intensity so that it has minimum 0 and maximum 1.
Returns:
np.ndarray: an overlay image of im_1*0.5 + im_2*0.5
"""
if color:
im_1 = intensity_to_rgb(np.squeeze(im_1), normalize=normalize)
im_2 = intensity_to_rgb(np.squeeze(im_2), normalize=normalize)
return im_1*0.5 + im_2*0.5 | 501a1465147e8b63c1a36c0cd7f2a1850f7a14b9 | 22,561 |
import os
def ansible_hostsfile_filepath(opts):
"""returns the filepath where the ansible hostsfile will be created"""
# if the location was specified on the cmdline, return that
if "hosts_output_file" in opts and bool(opts["hosts_output_file"]):
return opts["hosts_output_file"]
# otherwise return the default location in the temp exec directory
return os.path.join(temp_exec_dirpath(), "provision_{}.hosts".format(opts['system'])) | e258fe8b4eb8d6365f6a214968d930dbcc5b23bc | 22,562 |
def get_next_seg(ea):
"""
Get next segment
@param ea: linear address
@return: start of the next segment
BADADDR - no next segment
"""
nextseg = ida_segment.get_next_seg(ea)
if not nextseg:
return BADADDR
else:
return nextseg.start_ea | 5ea0bf1ef889bad4013a86df237cca39a4934c4c | 22,563 |
from invenio_app_ils.items.api import ITEM_PID_TYPE
def validate_item_pid(item_pid):
"""Validate item or raise and return an obj to easily distinguish them."""
if item_pid["type"] not in [BORROWING_REQUEST_PID_TYPE, ITEM_PID_TYPE]:
raise UnknownItemPidTypeError(pid_type=item_pid["type"])
# inline object with properties
return type(
"obj",
(object,),
{
"is_item": item_pid["type"] == ITEM_PID_TYPE,
"is_brw_req": item_pid["type"] == BORROWING_REQUEST_PID_TYPE,
},
) | f1e5c59e43787a736cb99c51a74e562f6a1c636f | 22,564 |
async def async_setup(hass, config):
"""Setup pool pump services."""
hass.data[DOMAIN] = {}
# Copy configuration values for later use.
hass.data[DOMAIN][ATTR_SWITCH_ENTITY_ID] = config[DOMAIN][ATTR_SWITCH_ENTITY_ID]
hass.data[DOMAIN][ATTR_POOL_PUMP_MODE_ENTITY_ID] = config[DOMAIN][ATTR_POOL_PUMP_MODE_ENTITY_ID]
hass.data[DOMAIN][ATTR_VAC_SWITCH_ENTITY_ID] = config[DOMAIN][ATTR_VAC_SWITCH_ENTITY_ID]
hass.data[DOMAIN][ATTR_POOL_VAC_MODE_ENTITY_ID] = config[DOMAIN][ATTR_POOL_VAC_MODE_ENTITY_ID]
hass.data[DOMAIN][ATTR_POOL_VAC_CONNECTED_ENTITY_ID] = config[DOMAIN][ATTR_POOL_VAC_CONNECTED_ENTITY_ID]
hass.data[DOMAIN][ATTR_SWIMMING_SEASON_ENTITY_ID] = config[DOMAIN][ATTR_SWIMMING_SEASON_ENTITY_ID]
hass.data[DOMAIN][ATTR_RUN_PUMP_IN_SWIMMING_SEASON_ENTITY_ID] = config[DOMAIN][ATTR_RUN_PUMP_IN_SWIMMING_SEASON_ENTITY_ID]
hass.data[DOMAIN][ATTR_RUN_PUMP_IN_OFF_SEASON_ENTITY_ID] = config[DOMAIN][ATTR_RUN_PUMP_IN_OFF_SEASON_ENTITY_ID]
hass.data[DOMAIN][ATTR_WATER_LEVEL_CRITICAL_ENTITY_ID] = config[DOMAIN][ATTR_WATER_LEVEL_CRITICAL_ENTITY_ID]
async def check(call):
"""Check if the pool pump should be running now."""
# Use a fixed time reference.
now = dt_util.now()
mode = hass.states.get(
hass.data[DOMAIN][ATTR_POOL_PUMP_MODE_ENTITY_ID])
_LOGGER.debug("Pool pump mode: %s", mode.state)
# Only check if pool pump is set to 'Auto'.
if mode.state == POOL_PUMP_MODE_AUTO:
manager = PoolPumpManager(hass, now)
_LOGGER.debug("Manager initialised: %s", manager)
# schedule = "Unknown"
if await manager.is_water_level_critical():
schedule = "Water Level Critical"
else:
run = manager.next_run()
_LOGGER.debug("Next run: %s", run)
if not run:
# Try tomorrow
tomorrow = now + timedelta(days=1)
next_midnight = tomorrow.replace(
hour=0, minute=0, second=0)
_LOGGER.debug("Next midnight: %s", next_midnight)
manager_tomorrow = PoolPumpManager(hass, next_midnight)
_LOGGER.debug("Manager initialised: %s", manager_tomorrow)
run = manager_tomorrow.next_run()
_LOGGER.debug("Next run: %s", run)
schedule = run.pretty_print()
# Set time range so that this can be displayed in the UI.
hass.states.async_set("{}.schedule".format(DOMAIN), schedule)
# And now check if the pool pump should be running.
await manager.check()
else:
hass.states.async_set("{}.schedule".format(DOMAIN), "Manual Mode")
hass.services.async_register(DOMAIN, 'check', check)
# Return boolean to indicate that initialization was successfully.
return True | b3bd9f0b7517e4707bd94fa13ee34712a40017f8 | 22,565 |
import _ctypes
def save_as_png(prs: pptx.presentation.Presentation, save_folder: str, overwrite: bool = False) -> bool:
"""
Save presentation as PDF.
Requires to save a temporary *.pptx first.
Needs module comtypes (windows only).
Needs installed PowerPoint.
Note: you have to give full path for save_folder, or PowerPoint might cause random exceptions.
"""
result = False
with TemporaryPPTXFile() as f:
prs.save(f.name)
try:
result = save_pptx_as_png(save_folder, f.name, overwrite)
except _ctypes.COMError as e:
print(e)
print("Couldn't save PNG file due to communication error with PowerPoint.")
result = False
return result | bf982eb1b5395e4602f00859c73a1924fca638b9 | 22,566 |
import json
def http_post(request):
"""HTTP Cloud Function.
Args:
request (flask.Request): The request object.
<https://flask.palletsprojects.com/en/1.1.x/api/#incoming-request-data>
Returns:
The response text, or any set of values that can be turned into a
Response object using `make_response`
<https://flask.palletsprojects.com/en/1.1.x/api/#flask.make_response>.
"""
# Init an empty json response
response_data = {}
request_json = request.get_json(silent=True)
request_args = request.args
if request_json and 'signed_message' in request_json:
# Grab input values
signed_message = request_json['signed_message']
elif request_args and 'signed_message' in request_args:
# Grab input values
signed_message = request_args['signed_message']
else:
response_data['status'] = 'Invalid request parameters'
return json.dumps(response_data)
# Load the QR Code Back up and Return
response_data['qr_code'] = pyqrcode.create(signed_message).png_as_base64_str(scale=2)
response_data['status'] = 'Message Created'
return json.dumps(response_data) | dd82b624a3d2cf37c1cb2538cdc8d26447f3e029 | 22,567 |
from typing import Optional
import distutils
def _get_add_noise(stddev, seed: Optional[int] = None):
"""Utility function to decide which `add_noise` to use according to tf version."""
if distutils.version.LooseVersion(
tf.__version__) < distutils.version.LooseVersion('2.0.0'):
# The seed should be only used for testing purpose.
if seed is not None:
tf.random.set_seed(seed)
def add_noise(v):
return v + tf.random.normal(
tf.shape(input=v), stddev=stddev, dtype=v.dtype)
else:
random_normal = tf.random_normal_initializer(stddev=stddev, seed=seed)
def add_noise(v):
return v + tf.cast(random_normal(tf.shape(input=v)), dtype=v.dtype)
return add_noise | 495473fb3adee75d75ec5f2e11329c85b7075db2 | 22,568 |
def create_incident_field_context(incident):
"""Parses the 'incident_fields' entry of the incident and returns it
Args:
incident (dict): The incident to parse
Returns:
list. The parsed incident fields list
"""
incident_field_values = dict()
for incident_field in incident.get('incident_field_values', []):
incident_field_values[incident_field['name'].replace(" ", "_")] = incident_field['value']
return incident_field_values | 1a56c5b76c4c82827f8b7febde30e2881e6f0561 | 22,569 |
def create_profile(body, user_id): # noqa: E501
"""Create a user profile
# noqa: E501
:param body:
:type body: dict | bytes
:param user_id: The id of the user to update
:type user_id: int
:rtype: None
"""
if connexion.request.is_json:
json = connexion.request.get_json()
json["user_id"] = user_id
profile = ProfileService().insert_profile(json)
return profile
return "Whoops..." | ff00d3a65f0e10ec3f90d0b1139033cf004d560a | 22,570 |
def load_global_recovered() -> pd.DataFrame:
"""Loads time series data for global COVID-19 recovered cases
Returns:
pd.DataFrame: A pandas dataframe with time series data for global COVID-19 recovered cases
"""
return load_csv(global_recovered_cases_location) | bb7702d3cd597dbc12314804d0d0a09f4c28d72c | 22,571 |
import urllib
def build_url(self, endpoint):
"""
Builds a URL given an endpoint
Args:
endpoint (Endpoint: str): The endpoint to build the URL for
Returns:
str: The URL to access the given API endpoint
"""
return urllib.parse.urljoin(self.base_url, endpoint) | e31bead2e87cea82c237df06bf00085dc8a3c04d | 22,572 |
def neighbors(i, diag = True,inc_self=False):
"""
determine the neighbors, returns a set with neighboring tuples {(0,1)}
if inc_self: returns self in results
if diag: return diagonal moves as well
"""
r = [1,0,-1]
c = [1,-1,0]
if diag:
if inc_self:
return {(i[0]+dr, i[1]+dc) for dr in r for dc in c}
else:
return {(i[0]+dr, i[1]+dc) for dr in r for dc in c if not (dr == 0 and dc == 0)}
else:
res = {(i[0],i[1]+1), (i[0],i[1]-1),(i[0]+1,i[1]),(i[0]-1,i[1])}
if inc_self: res.add(i)
return res | 3d4ca12795fa1d3d7e7f8f231cdf0f12257da7e0 | 22,573 |
import sys
import traceback
def readPLY(name):
"""Read a PLY mesh file."""
try:
reader = vtk.vtkPLYReader()
reader.SetFileName(name)
reader.Update()
print("Input mesh:", name)
mesh = reader.GetOutput()
del reader
# reader = None
return mesh
except BaseException:
print("PLY Mesh reader failed")
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback.print_exception(
exc_type, exc_value, exc_traceback, limit=2, file=sys.stdout)
return None | 5a4fd08b33c1f463f504686264fa6283bc8fc33b | 22,574 |
import torch
def gumbel_softmax(logits, tau=1, hard=False, eps=1e-10):
"""
NOTE: Stolen from https://github.com/pytorch/pytorch/pull/3341/commits/327fcfed4c44c62b208f750058d14d4dc1b9a9d3
Sample from the Gumbel-Softmax distribution and optionally discretize.
Args:
logits: [batch_size, n_class] unnormalized log-probs
tau: non-negative scalar temperature
hard: if True, take argmax, but differentiate w.r.t. soft sample y
Returns:
[batch_size, n_class] sample from the Gumbel-Softmax distribution.
If hard=True, then the returned sample will be one-hot, otherwise it will
be a probability distribution that sums to 1 across classes
Constraints:
- this implementation only works on batch_size x num_features tensor for now
based on
https://github.com/ericjang/gumbel-softmax/blob/3c8584924603869e90ca74ac20a6a03d99a91ef9/Categorical%20VAE.ipynb ,
(MIT license)
"""
y_soft = gumbel_softmax_sample(logits, tau=tau, eps=eps)
if hard:
shape = logits.size()
_, k = y_soft.data.max(-1)
# this bit is based on
# https://discuss.pytorch.org/t/stop-gradients-for-st-gumbel-softmax/530/5
y_hard = torch.zeros(*shape)
if y_soft.is_cuda:
y_hard = y_hard.cuda()
y_hard = y_hard.zero_().scatter_(-1, k.view(shape[:-1] + (1,)), 1.0)
# this cool bit of code achieves two things:
# - makes the output value exactly one-hot (since we add then
# subtract y_soft value)
# - makes the gradient equal to y_soft gradient (since we strip
# all other gradients)
y = y_hard - y_soft.data + y_soft
else:
y = y_soft
return y | 3d512e47771ecac396e757e4b7b8db9030b89f46 | 22,575 |
from typing import List
import re
def decompose_f_string(f_string: str) -> (List[str], List[str]):
"""
Decompose an f-string into the list of variable names and the separators between them.
An f-string is any string that contains enclosed curly brackets around text.
A variable is defined as the text expression within the enclosed curly brackets.
The separators are the strings remnants that surround the variables.
An example f-string and components would be: 'This is {an} f-string!', with variable 'an' and separators
'This is ' and ' f-string!'.
An instance of this example would be: 'This is definetely a good f-string!' with variable value 'definetely a good'.
Example
-------
variable_names, separators = decompose_f_string(f_string="a/{x}b{y}/c{z}")
# variable_names = ["x", "y", "z"]
# separators = ["a/", "b", "/c"", ""]
"""
matches = re.findall("{.*?}", f_string) # {.*?} optionally matches any characters enclosed by curly brackets
variable_names = [match.lstrip("{").rstrip("}") for match in matches]
assert not any(
(variable_name == "" for variable_name in variable_names)
), "Empty variable name detected in f-string! Please ensure there is text between all enclosing '{' and '}'."
pattern = "^.*?{|}.*?{|}.*?$"
# Description: patttern matches the all expressions outside of curly bracket enclosures
# .*?{ optionally matches any characters optionally before curly bracket opening
# | logical 'or'
# }.*?{ between a curly bracket closure and opening
# |
# }.*? after a closure
separators = [x.rstrip("{").lstrip("}") for x in re.findall(pattern=pattern, string=f_string)]
if any((separator == "" for separator in separators[1:-1])):
warn(
"There is an empty separator between two variables in the f-string! "
"The f-string will not be uniquely invertible."
)
return variable_names, separators | c463c8189539fd0c2c14e2c5620cafc9820c0f41 | 22,576 |
def process(register, instructions):
"""Process instructions on copy of register."""
cur_register = register.copy()
cur_index = 0
while cur_index < len(instructions):
cur_instruction = instructions[cur_index]
cur_index += process_instruction(cur_register, cur_instruction)
return cur_register | 5a204828261d8408467d9b17976728780db76d1d | 22,577 |
def bearing_radians(lat1, lon1, lat2, lon2):
"""Initial bearing"""
dlon = lon2 - lon1
y = sin(dlon) * cos(lat2)
x = cos(lat1) * sin(lat2) - sin(lat1) * cos(lat2) * cos(dlon)
return atan2(y, x) | 613a5496b58e09a1b79c0576e90ff2b6f49df31d | 22,578 |
import logging
import json
def RunSimulatedStreaming(vm):
"""Spawn fio to simulate streaming and gather the results.
Args:
vm: The vm that synthetic_storage_workloads_benchmark will be run upon.
Returns:
A list of sample.Sample objects
"""
test_size = min(vm.total_memory_kb / 10, 1000000)
iodepth_list = FLAGS.iodepth_list or DEFAULT_STREAMING_SIMULATION_IODEPTH_LIST
results = []
for depth in iodepth_list:
cmd = (
'--filesize=10g '
'--directory=%s '
'--ioengine=libaio '
'--overwrite=0 '
'--invalidate=1 '
'--direct=1 '
'--randrepeat=0 '
'--iodepth=%s '
'--blocksize=1m '
'--size=%dk '
'--filename=fio_test_file ') % (vm.GetScratchDir(),
depth,
test_size)
if FLAGS.maxjobs:
cmd += '--max-jobs=%s ' % FLAGS.maxjobs
cmd += (
'--name=sequential_write '
'--rw=write '
'--end_fsync=1 '
'--name=sequential_read '
'--stonewall '
'--rw=read ')
logging.info('FIO Results for simulated %s', STREAMING)
res, _ = vm.RemoteCommand('%s %s' % (fio.FIO_CMD_PREFIX, cmd),
should_log=True)
results.extend(
fio.ParseResults(fio.FioParametersToJob(cmd), json.loads(res)))
UpdateWorkloadMetadata(results)
return results | 417898b96223eb28d1d999adaad137c2e9d9e30c | 22,579 |
import os
from datetime import datetime
def create_warning_path(paths_=None):
"""It Creates the files names for both files ( strangers and spoofing )"""
if not paths_:
if not os.path.isdir('/opt/arp_warnings/'):
os.system('mkdir /opt/arp_guard/arp_warnings')
paths_ = ['/opt/arp_guard/arp_warnings/'] # default warning dir
spoofs_path = []
strangers_paths = []
date_path = str(datetime.now().year) + "_" + str(datetime.now().month) + "_" + str(datetime.now().day)
for i in paths_:
spoofs_path.append(i + "MacSpoof_warning_" + date_path)
strangers_paths.append(i + "strangers_warning_" + date_path)
return spoofs_path, strangers_paths | 42b8e87f30cd234c09f496710fd3c9c9633c123d | 22,580 |
def get_all_tutorial_info():
"""
Tutorial route to get tutorials with steps
Parameters
----------
None
Returns
-------
Tutorials with steps
"""
sql_query = "SELECT * FROM diyup.tutorials"
cur = mysql.connection.cursor()
cur.execute(sql_query)
tutorials = cur.fetchall()
output = []
for tutorial in tutorials:
tutorial_data = {}
tutorial_data['uuid'] = tutorial[0]
tutorial_data['author_username'] = tutorial[1]
tutorial_data['title'] = tutorial[2]
tutorial_data['image'] = tutorial[3]
tutorial_data['category'] = tutorial[4]
tutorial_data['description'] = tutorial[5]
tutorial_data['author_difficulty'] = str(tutorial[6])
tutorial_data['viewer_difficulty'] = \
str(average_rating_type_for_tutorial('difficulty', tutorial[0]))
tutorial_data['rating'] = \
str(average_rating_type_for_tutorial('score', tutorial[0]))
sql_query = "SELECT * FROM diyup.steps WHERE tutorial_uuid=%s"
cur.execute(sql_query, (tutorial[0],))
steps = cur.fetchall()
output_steps = []
for step in steps:
step_data = {}
step_data['index'] = step[1]
step_data['content'] = step[2]
step_data['image'] = step[3]
output_steps.append(step_data)
tutorial_data['steps'] = output_steps
output.append(tutorial_data)
cur.close()
return jsonify({'tutorials' : output}), 200 | 2565427a617ce042af9165963f7676877c97dd16 | 22,581 |
from datetime import datetime
def parse_date(datestring, default_timezone=UTC):
"""Parses ISO 8601 dates into datetime objects
The timezone is parsed from the date string. However it is quite common to
have dates without a timezone (not strictly correct). In this case the
default timezone specified in default_timezone is used. This is UTC by
default.
"""
if not isinstance(datestring, basestring):
raise ParseError("Expecting a string %r" % datestring)
m = ISO8601_REGEX.match(datestring)
if not m:
raise ParseError("Unable to parse date string %r" % datestring)
groups = m.groupdict()
tz = parse_timezone(groups["timezone"], default_timezone=default_timezone)
if groups["fraction"] is None:
groups["fraction"] = 0
else:
groups["fraction"] = int(float("0.%s" % groups["fraction"]) * 1e6)
return datetime(int(groups["year"]), int(groups["month"]), int(groups["day"]),
int(groups["hour"]), int(groups["minute"]), int(groups["second"]),
int(groups["fraction"]), tz) | 41058b1a825a9c0ee133327001ada1834c3c1732 | 22,582 |
def BigSpectrum_to_H2COdict(sp, vrange=None):
"""
A rather complicated way to make the spdicts above given a spectrum...
"""
spdict = {}
for linename,freq in pyspeckit.spectrum.models.formaldehyde.central_freq_dict.iteritems():
if vrange is not None:
freq_test_low = freq - freq * vrange[0]/pyspeckit.units.speedoflight_kms
freq_test_high = freq - freq * vrange[1]/pyspeckit.units.speedoflight_kms
else:
freq_test_low = freq_test_high = freq
if (sp.xarr.as_unit('Hz').in_range(freq_test_low) or
sp.xarr.as_unit('Hz').in_range(freq_test_high)):
spdict[linename] = sp.copy()
spdict[linename].xarr.convert_to_unit('GHz')
spdict[linename].xarr.refX = freq
spdict[linename].xarr.refX_units = 'Hz'
#spdict[linename].baseline = copy.copy(sp.baseline)
#spdict[linename].baseline.Spectrum = spdict[linename]
spdict[linename].specfit = sp.specfit.copy(parent=spdict[linename])
spdict[linename].xarr.convert_to_unit('km/s')
if vrange is not None:
try:
spdict[linename].crop(*vrange, units='km/s')
except IndexError:
# if the freq in range, but there's no data in range, remove
spdict.pop(linename)
return spdict | 961e4dd676332efea084fd87d9108337ce56fbe2 | 22,583 |
def get_thickness_model(model):
"""
Return a function calculating an adsorbate thickness.
The ``model`` parameter is a string which names the thickness equation which
should be used. Alternatively, a user can implement their own thickness model, either
as an experimental isotherm or a function which describes the adsorbed layer. In that
case, instead of a string, pass the Isotherm object or the callable function as the
``model`` parameter.
Parameters
----------
model : str or callable
Name of the thickness model to use.
Returns
-------
callable
A callable that takes a pressure in and returns a thickness
at that point.
Raises
------
ParameterError
When string is not in the dictionary of models.
"""
# If the model is a string, get a model from the _THICKNESS_MODELS
if isinstance(model, str):
if model not in _THICKNESS_MODELS:
raise ParameterError(
f"Model {model} not an implemented thickness function. ",
f"Available models are {_THICKNESS_MODELS.keys()}"
)
return _THICKNESS_MODELS[model]
# If the model is an callable, return it instead
else:
return model | 1573206c331cbb4f770ed21cea88f73d13fea385 | 22,584 |
import aiohttp
def http(session: aiohttp.ClientSession) -> Handler:
"""`aiohttp` based request handler.
:param session:
"""
async def handler(request: Request) -> Response:
async with session.request(
request.method,
request.url,
params=request.params or None,
data=request.form_data or None,
json=request.data or None,
headers=request.headers or None,
) as response:
return Response(
status=response.status,
reason=response.reason,
headers=response.headers,
data=await response.json(encoding='utf-8'),
)
return handler | 2628774af37c44a42c74ab8844b2f5d37200eaa9 | 22,585 |
def remove_package_repo_and_wait(repo_name, wait_for_package):
""" Remove a repository from the list of package sources, then wait for the removal to complete
:param repo_name: name of the repository to remove
:type repo_name: str
:param wait_for_package: the package whose version should change after the repo is removed
:type wait_for_package: str
:returns: True if successful, False otherwise
:rtype: bool
"""
return remove_package_repo(repo_name, wait_for_package) | 14b8d261c58ba07d12fd9737392858a541b8deb1 | 22,586 |
from typing import Callable
from typing import List
def lyndon_of_word(word : str, comp: Callable[[List[str]],str] = min ) -> str:
"""
Returns the Lyndon representative among set of circular shifts,
that is the minimum for th lexicographic order 'L'<'R'
:code:`lyndon_of_word('RLR')`.
Args:
`word` (str): a word (supposedly binary L&R)
`comp` ( Callable[List[str],str] ): comparision function min or max
Returns:
str: list of circular shifts
:Example:
>>> lyndon_of_word('LRRLRLL')
'LLLRRLR'
"""
if word == '':
return ''
return comp(list_of_circular_shifts(word)) | c4195244488de555871e02260c733a28a882481a | 22,587 |
def num_of_visited_nodes(driver_matrix):
""" Calculate the total number of visited nodes for multiple paths.
Args:
driver_matrix (list of lists): A list whose members are lists that
contain paths that are represented by consecutively visited nodes.
Returns:
int: Number of visited nodes
"""
return sum(len(x) for x in driver_matrix) | 2a1244cd033029cec4e4f7322b9a27d01ba4abd5 | 22,588 |
def gen_custom_item_windows_file(description, info, value_type, value_data,
regex, expect):
"""Generates a custom item stanza for windows file contents audit
Args:
description: string, a description of the audit
info: string, info about the audit
value_type: string, "POLICY_TEXT" -- included for parity with other
gen_* modules.
value_data: string, location of remote file to check
regex: string, regular expression to check file for
expect: string, regular expression to match for a pass
Returns:
A list of strings to put in the main body of a Windows file audit file.
"""
out = []
out.append('')
out.append('<custom_item>')
out.append(' type: FILE_CONTENT_CHECK')
out.append(' description: "%s"' % description.replace("\n", " "))
out.append(' info: "%s"' % info.replace("\n", " "))
out.append(' value_type: %s' % value_type)
out.append(' value_data: "%s"' % value_data)
out.append(' regex: "%s"' % regex)
out.append(' expect: "%s"' % expect)
out.append('</custom_item>')
out.append(' ')
return out | 3d0335d91eb700d30d5ae314fce13fc4a687d766 | 22,589 |
import inspect
def create_signature(args=None, kwargs=None):
"""Create a inspect.Signature object based on args and kwargs.
Args:
args (list or None): The names of positional or keyword arguments.
kwargs (list or None): The keyword only arguments.
Returns:
inspect.Signature
"""
args = [] if args is None else args
kwargs = {} if kwargs is None else kwargs
parameter_objects = []
for arg in args:
param = inspect.Parameter(
name=arg,
kind=inspect.Parameter.POSITIONAL_OR_KEYWORD,
)
parameter_objects.append(param)
for arg in kwargs:
param = inspect.Parameter(
name=arg,
kind=inspect.Parameter.KEYWORD_ONLY,
)
parameter_objects.append(param)
sig = inspect.Signature(parameters=parameter_objects)
return sig | 011acccada7896e11e2d9bb73dcf03d7dc6e751e | 22,590 |
import json
def select(type, name, optional):
"""Select data from data.json file"""
with open('data.json', 'r') as f:
data = json.load(f)
for i in data[type]:
if i == data[name]:
return data[optional] | f784137127cd77af2db6e4ac653dc360515ec056 | 22,591 |
def perform_step(polymer: str, rules: dict) -> str:
"""
Performs a single step of polymerization by performing all applicable insertions; returns new polymer template string
"""
new = [polymer[i] + rules[polymer[i:i+2]] for i in range(len(polymer)-1)]
new.append(polymer[-1])
return "".join(new) | c60f760ef6638ff3a221aff4a56dccbeae394709 | 22,592 |
import json
def load_datasets(json_file):
"""load dataset described in JSON file"""
datasets = {}
with open(json_file, 'r') as fd:
config = json.load(fd)
all_set_path = config["Path"]
for name, value in config["Dataset"].items():
assert isinstance(value, dict)
datasets[name] = Dataset()
for i in value:
if not i in ('train', 'val', 'test'):
continue
sets = []
for j in to_list(value[i]):
try:
sets += list(_glob_absolute_pattern(all_set_path[j]))
except KeyError:
sets += list(_glob_absolute_pattern(j))
datasets[name].__setitem__(i, sets)
if 'param' in value:
for k, v in value['param'].items():
datasets[name].__setitem__(k, v)
return datasets | d34d3e79582db9f0682909a88d697edbf0ef75e3 | 22,593 |
def instantiate_descriptor(**field_data):
"""
Instantiate descriptor with most properties.
"""
system = get_test_descriptor_system()
course_key = CourseLocator('org', 'course', 'run')
usage_key = course_key.make_usage_key('html', 'SampleHtml')
return system.construct_xblock_from_class(
HtmlBlock,
scope_ids=ScopeIds(None, None, usage_key, usage_key),
field_data=DictFieldData(field_data),
) | 6a640d1d66818898951298750a819d12e24c74e9 | 22,594 |
import time
def simple_switch(M_in, P_in, slack=1, animate=True, cont=False, gen_pos=None, verbose=True):
"""
A simple switch algorithm. When encountering a change in sequence, compare the value
of the switch to the value of the current state, switch if it's more. The default value
function sum(exp(length(adjoint sequences))) where length is measured in the input arrays.
"""
start_time = time.time()
M, P = np.copy(M_in), np.copy(P_in)
M_track, P_track = np.zeros_like(M), np.ones_like(P)
value_function = exp_len_value if not cont else continuity_value
if animate:
history = np.array([M,P])
for w in range(slack+1):
M, P = blurr_slack(M,w), blurr_slack(P,w) # if slack w, then sequences of length w don't make any sense
if animate:
history = np.dstack([history, [M,P]])
for i in range(1,len(M)-w):
if M[i] != M[i-1] or P[i] != P[i-1]:
val = value_function(M,P,i-1,i,gen_pos)
M_temp = np.concatenate([M[:i], [P[i+w]]*w, P[i+w:]])
P_temp = np.concatenate([P[:i], [M[i+w]]*w, M[i+w:]])
switch_val = value_function(M_temp,P_temp,i-1,i,gen_pos)
if switch_val > val and not is_steeling(M,P,i,w):
# print(i)
M, P = np.copy(M_temp), np.copy(P_temp)
M_track, P_track = track_switch(M_track, P_track, i)
if animate:
history = np.dstack([history, [M,P]])
ani = None
if animate:
# make it stop on the end for a while
for _ in range(20):
history = np.dstack([history, [M,P]])
ani = animate_history(history)
if verbose:
print("Solving time:", time.time()-start_time, "seconds")
return M,P,M_track,P_track,ani | 709f3eeab1fe498cb0a5b9a765c44d427a03b4c4 | 22,595 |
def drop_duplicates_by_type_or_node(n_df, n1, n2, typ):
"""
Drop the duplicates in the network, by type or by node.
For each set of "duplicate" edges, only the edge with the maximum weight
will be kept.
By type, the duplicates are where nd1, nd2, and typ are identical; by node,
the duplicates are where nd1, and nd2 are identical.
Parameters:
n_df (list): the data
n1 (int): the column for the firts node
n2 (int): the column for the second node
typ (int): the column for the type
Returns:
list: the modified data
"""
# If n_df is sorted, this method will work, iterating through the
# rows and only keeping the first row of a group of duplicate rows
prev_nd1_val = None
prev_nd2_val = None
prev_type_val = None
new_n_df = []
for row in n_df:
nd1_val = row[n1]
nd2_val = row[n2]
type_val = row[typ]
nodes_differ = nd1_val != prev_nd1_val or nd2_val != prev_nd2_val
type_differs = type_val != prev_type_val
if (DROP_DUPLICATES_METHOD == 'node' and nodes_differ) or (nodes_differ or type_differs):
new_n_df.append(row)
prev_nd1_val = nd1_val
prev_nd2_val = nd2_val
prev_type_val = type_val
return new_n_df | 015679f5a2625792ef57b49994408746440ce15c | 22,596 |
def voting(labels):
""" Majority voting. """
return sitk.LabelVoting(labels, 0) | 52fa5c2cfbe3551a676904ea1c2f3c6514833ba7 | 22,597 |
def user_city_country(obj):
"""Get the location (city, country) of the user
Args:
obj (object): The user profile
Returns:
str: The city and country of user (if exist)
"""
location = list()
if obj.city:
location.append(obj.city)
if obj.country:
location.append(obj.country)
if len(location):
return ", ".join(str(i) for i in location)
return 'Not available' | be4238246042371215debb608934b89b63a07dab | 22,598 |
def test_encrypted_parquet_write_kms_error(tempdir, data_table,
basic_encryption_config):
"""Write an encrypted parquet, but raise KeyError in KmsClient."""
path = tempdir / 'encrypted_table_kms_error.in_mem.parquet'
encryption_config = basic_encryption_config
# Empty master_keys_map
kms_connection_config = pe.KmsConnectionConfig()
def kms_factory(kms_connection_configuration):
# Empty master keys map will cause KeyError to be raised
# on wrap/unwrap calls
return InMemoryKmsClient(kms_connection_configuration)
crypto_factory = pe.CryptoFactory(kms_factory)
with pytest.raises(KeyError, match="footer_key"):
# Write with encryption properties
write_encrypted_parquet(path, data_table, encryption_config,
kms_connection_config, crypto_factory) | aeeffecf5ca38907506ce79b96c823652cd3ef99 | 22,599 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.