content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
def _select_command_reply(replies: list, command: int, seq: int=None):
"""
Find a valid command reply
returns dict
"""
filtered_replies = list(filter(lambda x: x["cmd"] == command, replies))
if seq is not None:
filtered_replies = list(filter(lambda x: x["seq"] == seq, filtered_replies))
if len(filtered_replies) == 0:
return None
if len(filtered_replies) > 1:
logger.info("Got multiple replies %s for request [%x:%s]", filtered_replies, command, tf.cmd_to_string.get(command, f'UNKNOWN'))
return filtered_replies[0] | 4ee65e604918a9c1fe4e639c524f6f4e4371f775 | 3,632,700 |
def remove_st_less_than(dataframe, column='ST', less_than=0.001):
"""
Remove any entry with an ST less than specified
Args:
dataframe (pandas.Dataframe): dataframe containing sensitivity analysis output
column (str): Column name, default is 'ST'
less_than (float): Remove anything less than this
Returns:
New dataframe.
"""
new_df = dataframe[dataframe[column] > less_than]
return new_df | 2ba052004c436f8d527ab9b5bc3e76c90aa5dce9 | 3,632,701 |
def count_positives_sum_negatives2(arr):
"""
More space efficient, but not as concise as above
"""
if not arr:
return arr
count = 0
total = 0
for num in arr:
if num > 0:
count += 1
else:
total += num
return [count, total] | f1f8de28dcf6669d49044a43e6a8603f0da01577 | 3,632,702 |
import json
def read_json(url, cached=True):
"""Read JSON content from url."""
content = read(url, cached)
return json.loads(content) if content else None | 9a256e2ec5842c6e77da175cf11e953e2a1c03a0 | 3,632,703 |
def spherical_Lloyd(radius,
num_cells,
dimension=3,
fixed='center',
approximation='monte-carlo',
approx_n=5000,
max_iter=500,
momentum=0.9,
verbose=0):
"""Creation of kernel point via Lloyd algorithm. We use an approximation of
the algorithm, and compute the Voronoi cell centers with discretization of
space. The exact formula is not trivial with part of the sphere as sides.
Args:
radius: Radius of the kernels
num_cells: Number of cell (kernel points) in the Voronoi diagram.
dimension: dimension of the space
fixed: fix position of certain kernel points ('none', 'center' or 'verticals')
approximation: Approximation method for Lloyd's algorithm ('discretization', 'monte-carlo')
approx_n: Number of point used for approximation.
max_iter: Maximum nu;ber of iteration for the algorithm.
momentum: Momentum of the low pass filter smoothing kernel point positions
verbose: display option
Returns:
points [num_kernels, num_points, dimension]
"""
#######################
# Parameters definition
#######################
# Radius used for optimization (points are rescaled afterwards)
radius0 = 1.0
#######################
# Kernel initialization
#######################
# Random kernel points (Uniform distribution in a sphere)
kernel_points = np.zeros((0, dimension))
while kernel_points.shape[0] < num_cells:
new_points = np.random.rand(num_cells,
dimension) * 2 * radius0 - radius0
kernel_points = np.vstack((kernel_points, new_points))
d2 = np.sum(np.power(kernel_points, 2), axis=1)
kernel_points = kernel_points[np.logical_and(d2 < radius0**2,
(0.9 *
radius0)**2 < d2), :]
kernel_points = kernel_points[:num_cells, :].reshape((num_cells, -1))
# Optional fixing
if fixed == 'center':
kernel_points[0, :] *= 0
if fixed == 'verticals':
kernel_points[:3, :] *= 0
kernel_points[1, -1] += 2 * radius0 / 3
kernel_points[2, -1] -= 2 * radius0 / 3
##############################
# Approximation initialization
##############################
# Initialize figure
if verbose > 1:
fig = plt.figure()
# Initialize discretization in this method is chosen
if approximation == 'discretization':
side_n = int(np.floor(approx_n**(1. / dimension)))
dl = 2 * radius0 / side_n
coords = np.arange(-radius0 + dl / 2, radius0, dl)
if dimension == 2:
x, y = np.meshgrid(coords, coords)
X = np.vstack((np.ravel(x), np.ravel(y))).T
elif dimension == 3:
x, y, z = np.meshgrid(coords, coords, coords)
X = np.vstack((np.ravel(x), np.ravel(y), np.ravel(z))).T
elif dimension == 4:
x, y, z, t = np.meshgrid(coords, coords, coords, coords)
X = np.vstack(
(np.ravel(x), np.ravel(y), np.ravel(z), np.ravel(t))).T
else:
raise ValueError('Unsupported dimension (max is 4)')
elif approximation == 'monte-carlo':
X = np.zeros((0, dimension))
else:
raise ValueError(
'Wrong approximation method chosen: "{:s}"'.format(approximation))
# Only points inside the sphere are used
d2 = np.sum(np.power(X, 2), axis=1)
X = X[d2 < radius0 * radius0, :]
#####################
# Kernel optimization
#####################
# Warning if at least one kernel point has no cell
warning = False
# moving vectors of kernel points saved to detect convergence
max_moves = np.zeros((0,))
for iter in range(max_iter):
# In the case of monte-carlo, renew the sampled points
if approximation == 'monte-carlo':
X = np.random.rand(approx_n, dimension) * 2 * radius0 - radius0
d2 = np.sum(np.power(X, 2), axis=1)
X = X[d2 < radius0 * radius0, :]
# Get the distances matrix [n_approx, K, dim]
differences = np.expand_dims(X, 1) - kernel_points
sq_distances = np.sum(np.square(differences), axis=2)
# Compute cell centers
cell_inds = np.argmin(sq_distances, axis=1)
centers = []
for c in range(num_cells):
bool_c = (cell_inds == c)
num_c = np.sum(bool_c.astype(np.int32))
if num_c > 0:
centers.append(np.sum(X[bool_c, :], axis=0) / num_c)
else:
warning = True
centers.append(kernel_points[c])
# Update kernel points with low pass filter to smooth mote carlo
centers = np.vstack(centers)
moves = (1 - momentum) * (centers - kernel_points)
kernel_points += moves
# Check moves for convergence
max_moves = np.append(max_moves, np.max(np.linalg.norm(moves, axis=1)))
# Optional fixing
if fixed == 'center':
kernel_points[0, :] *= 0
if fixed == 'verticals':
kernel_points[0, :] *= 0
kernel_points[:3, :-1] *= 0
if verbose:
print('iter {:5d} / max move = {:f}'.format(
iter, np.max(np.linalg.norm(moves, axis=1))))
if warning:
print('{:}WARNING: at least one point has no cell{:}'.format(
bcolors.WARNING, bcolors.ENDC))
if verbose > 1:
plt.clf()
plt.scatter(X[:, 0],
X[:, 1],
c=cell_inds,
s=20.0,
marker='.',
cmap=plt.get_cmap('tab20'))
#plt.scatter(kernel_points[:, 0], kernel_points[:, 1], c=np.arange(num_cells), s=100.0,
# marker='+', cmap=plt.get_cmap('tab20'))
plt.plot(kernel_points[:, 0], kernel_points[:, 1], 'k+')
circle = plt.Circle((0, 0), radius0, color='r', fill=False)
fig.axes[0].add_artist(circle)
fig.axes[0].set_xlim((-radius0 * 1.1, radius0 * 1.1))
fig.axes[0].set_ylim((-radius0 * 1.1, radius0 * 1.1))
fig.axes[0].set_aspect('equal')
plt.draw()
plt.pause(0.001)
plt.show(block=False)
###################
# User verification
###################
# Show the convergence to ask user if this kernel is correct
if verbose:
if dimension == 2:
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=[10.4, 4.8])
ax1.plot(max_moves)
ax2.scatter(X[:, 0],
X[:, 1],
c=cell_inds,
s=20.0,
marker='.',
cmap=plt.get_cmap('tab20'))
# plt.scatter(kernel_points[:, 0], kernel_points[:, 1], c=np.arange(num_cells), s=100.0,
# marker='+', cmap=plt.get_cmap('tab20'))
ax2.plot(kernel_points[:, 0], kernel_points[:, 1], 'k+')
circle = plt.Circle((0, 0), radius0, color='r', fill=False)
ax2.add_artist(circle)
ax2.set_xlim((-radius0 * 1.1, radius0 * 1.1))
ax2.set_ylim((-radius0 * 1.1, radius0 * 1.1))
ax2.set_aspect('equal')
plt.title('Check if kernel is correct.')
plt.draw()
plt.show()
if dimension > 2:
plt.figure()
plt.plot(max_moves)
plt.title('Check if kernel is correct.')
plt.show()
# Rescale kernels with real radius
return kernel_points * radius | cd3f202945be887a0835ce271bc38c3dc192aa33 | 3,632,704 |
import http
import os
def get_online_version():
"""Download update info and parse it."""
page = http.default_session.get(UPDATE_URL).json()
version = page.get('tag_name', None)
if os.name == 'nt':
try:
url = next((x['browser_download_url'] for x in page['assets'] if
x['content_type'] == 'application/x-msdos-program'),
configuration.Url)
except KeyError:
url = None
else:
url = page.get('tarball_url', None)
return version, url | 8f32b220de831db1af847285010ec77603ce0911 | 3,632,705 |
import traceback
import sys
def log_errors(func):
"""
Helper to print errors to stderr, since an issue with
Django Debug Toolbar prevents exceptions
from threads from being logged to the console.
"""
async def wrapper(*args, **kwargs):
try:
return await func(*args, **kwargs)
except Exception as e:
if settings.DEBUG:
traceback.print_exc(file=sys.stderr)
raise e
return wrapper | 5c943f865abfa8f8183ea8cf295f5953b2ce6548 | 3,632,706 |
def first_primes(n: Integral) -> list:
"""
Generates a list of the first n primes. A cast will be used if the input is not an integer
"""
n = int(n) - 1
bank = []
track = 2
while len(bank) < n + 1:
if all(track % y for y in range(2, min(track, 11))):
bank.append(track)
track += 1
return sorted(set(bank)) | 04927ce0be9f9ed2fc3e65a346599625e9919a95 | 3,632,707 |
def is_ne_atom(phi: Formula):
""" Return whether the given formula is an inequality """
return isinstance(phi, Atom) and phi.predicate.symbol == BuiltinPredicateSymbol.NE | 30cdee64bf622b803efe041bd1feaa7d7a2fb2fe | 3,632,708 |
def fact(n):
"""Return the factorial of the given number."""
r = 1
while n > 0:
r = r * n
n = n - 1
return r | 7bdcdc759b49a9cd72f7bf3f12a18fc03ce50668 | 3,632,709 |
def getCoatThickCorr(f, materials, wavelength, dOpt, dTE, dTR):
"""Finite coating thickness correction
:f: frequency array in Hz
:materials: gwinc optic materials structure
:wavelength: laser wavelength
:wBeam: beam radius (at 1 / e**2 power)
:dOpt: coating layer thickness array (Nlayer x 1)
Uses correction factor from LIGO-T080101, "Thick Coating
Correction" (Evans).
See getCoatThermoOptic for example usage.
"""
##############################################
# For comparison in the bTR = 0 limit, the
# equation from Fejer (PRD70, 2004)
# modified so that gFC -> 1 as xi -> 0
# gTC = (2 ./ (R * xi.**2)) .* (sh - s + R .* (ch - c)) ./ ...
# (ch + c + 2 * R * sh + R**2 * (ch - c));
# which in the limit of xi << 1 becomes
# gTC = 1 - xi * (R - 1 / (3 * R));
# parameter extraction
pS = materials.Substrate
Cs = pS.MassCM * pS.MassDensity
Ks = pS.MassKappa
# compute coating average parameters
dc, Cc, Kc, junk = getCoatAvg(materials, wavelength, dOpt)
# R and xi (from T080101, Thick Coating Correction)
w = 2 * pi * f
R = sqrt(Cc * Kc / (Cs * Ks))
xi = dc * sqrt(2 * w * Cc / Kc)
# trig functions of xi
s = sin(xi)
c = cos(xi)
sh = sinh(xi)
ch = cosh(xi)
# pR and pE (dTR = -\bar{\beta} lambda, dTE = \Delta \bar{\alpha} d)
pR = dTR / (dTR + dTE)
pE = dTE / (dTR + dTE)
# various parts of gTC
g0 = 2 * (sh - s) + 2 * R * (ch - c)
g1 = 8 * sin(xi / 2) * (R * cosh(xi / 2) + sinh(xi / 2))
g2 = (1 + R**2) * sh + (1 - R**2) * s + 2 * R * ch
gD = (1 + R**2) * ch + (1 - R**2) * c + 2 * R * sh
# and finally, the correction factor
gTC = (pE**2 * g0 + pE * pR * xi * g1 + pR**2 * xi**2 * g2) / (R * xi**2 * gD)
return gTC | 92734aced5c17e44b97e73f57850409fcddc3102 | 3,632,710 |
from datetime import datetime
import os
from shutil import copyfile
def create_logdir(directory, algorithm, env_name, config_path):
"""
Create a directory inside the specified directory for
logging experiment results and return its path name.
Include the environment name (Sharpe, etc.) in the directory name,
and also copy the config
"""
experiment_dir = f"{directory}/{algorithm}-{env_name}-{datetime.now():%Y-%m-%d_%H:%M:%S}"
if not os.path.exists(experiment_dir):
os.makedirs(experiment_dir)
if config_path is not None:
copyfile(config_path, f"{experiment_dir}/config.yml")
return experiment_dir | 9b1e188e2925dec49dad4c31bba8eec9ab0ddbd0 | 3,632,711 |
import itertools
def create_purchase(row):
"""Creates Purchase object based on BeautifulSoup table row object"""
name = next(itertools.islice(row.stripped_strings, 0, 1))
id = row.a['data-character-id']
price = "".join(next(itertools.islice(row.stripped_strings, 3, 4)).split())
level = len(row.find_all(src=BASE_LEVEL_STAR_IMAGE_URL))
purchase_date = next(itertools.islice(row.stripped_strings, 4, 5))
if "Private" in purchase_date:
# Private purchases puts the date in the next column
purchase_date = next(itertools.islice(row.stripped_strings, 5, 6))
return Purchase(name, id, price, level, purchase_date) | 3676d8e382bd9cc7441118cdd19a6e12fe955821 | 3,632,712 |
def accuracy(predictions, targets):
"""
Computes the prediction accuracy, i.e. the average of correct predictions
of the network.
Args:
predictions: 2D float array of size [batch_size, n_classes]
labels: 2D int array of size [batch_size, n_classes]
with one-hot encoding. Ground truth labels for
each sample in the batch
Returns:
accuracy: scalar float, the accuracy of predictions,
i.e. the average correct predictions over the whole batch
TODO:
Implement accuracy computation.
"""
########################
# PUT YOUR CODE HERE #
#######################
_, predictions_indices = predictions.max(1)
_, targets_indices = targets.max(1)
accuracy = (predictions_indices == targets_indices).sum().float() / predictions_indices.shape[0]
########################
# END OF YOUR CODE #
#######################
return accuracy | 42fa30e7afc1fd6a2272bf4b6f946fed112b7d29 | 3,632,713 |
import logging
import os
def prepare_intercancer_data(jx_dir):
"""
:param jx_dir:
:return:
"""
jx_counts = {}
count_df_dict = {'jx': [], 'top_x': []}
for cancer in _TCGA_ABBR:
logging.info('starting cancer {}'.format(cancer))
jx_file, flag, prev_glob = get_jx_prev_filename(jx_dir, cancer)
if not jx_file:
logging.info('no file, continuing')
continue
init_df = jx_df_from_file(
jx_file, min_per=0, max_per=1.0, chunk_it=True,
glob_form=prev_glob, sample=False, top_x=False
)
jx_list = init_df['jx'].tolist()
for jx in jx_list:
try:
jx_counts[jx] += 1
except KeyError:
jx_counts[jx] = 1
for jx, count in jx_counts.items():
count_df_dict['jx'].append(jx)
count_df_dict['top_x'].append(count)
intercan_df = pd.DataFrame(count_df_dict)
shared_jxs = intercan_df[intercan_df['top_x'] > 1].sort_values(
by='top_x', ascending=False
)
shared_file = os.path.join(
out_path, 'intracancer_shared_{}_jxs.csv'.format(flag)
)
with open(shared_file, 'w') as output:
shared_jxs.to_csv(output, index=False)
tempgroup = intercan_df.groupby(['top_x'])['jx'].count()
intercan_counts = pd.DataFrame(
{'num_cancers': tempgroup.index,
'neojx_count': tempgroup.values}
)
return intercan_counts, flag | 6756b50c28f893931601e6d789424ae546ef07a7 | 3,632,714 |
def load_tough_corners(fname):
"""
Load the mesh of the domain; discared by default. Use VTK_output = .TRUE. to obtain from MeshMaker_V2
fname shoud be the CORNERS file
"""
ndim = 3
verts = {}
cells = []
elemnames = []
f = open(fname,"r")
enum = -1
elem = []
for l in f:
if l[0:3]==">>>":
continue
sp = l.split()
if len(sp) > ndim+1:
cells.append( elem )
elem = []
enum = enum+1 #int(sp[-1])
elemnames.append(sp[-2])
verts[int(sp[ndim])] = np.array(sp[:ndim],dtype=np.double)
elem.append( int(sp[ndim]) )
cells.append( elem )# Push the last elem that wasn't triggered
cells.pop(0) # Frist loop fringe case
f.close()
# Densify dictionaries in np arrays
npverts = np.empty((len(verts),ndim),dtype=np.double)
# First, we need to make a new translation map, because they're read in
# in the original TOTAL BLOCK order, before carving
vertskey = {}
for newk,(i,v) in enumerate(verts.items()):
vertskey[i] = newk
for i,v in verts.items():
npverts[vertskey[i],:] = v
npcells = np.empty((len(cells),len(cells[1])),dtype=np.intc)
for i,v in enumerate(cells):
npcells[i,:] = [vertskey[j] for j in v]
return npverts, npcells, elemnames | a7219c39344b79fa33459e64ed7eaffe124b941b | 3,632,715 |
import io
def dumps(obj, bytes=False):
"""WARNING: can mutate obj"""
if bytes:
out = io.BytesIO()
else:
out = io.StringIO()
dump(obj, out)
return out.getvalue() | 063ad30f8bd4b1b5905da5c924e3dcd6a577a244 | 3,632,716 |
def evaluate_results(model, contexts, layer_dimension, all_tasks_test_data, superposition, task_index, first_average, use_MLP, batch_size, use_PSP=False):
"""
Evaluate the results on test data with or without using superposition. Return accuracy, AUROC and AUPRC.
:param model: torch model instance
:param contexts: binary context vectors (shape=(num_tasks-1, num of model layers))
:param layer_dimension: list of 0 (first dimension taken for context size) and 1 (second dimension taken)
:param all_tasks_test_data: list of all test data [X_test, y_test, mask_test] until the current task index
:param superposition: boolean - True, if superposition is used
:param task_index: index of the current task, which is being learned
:param first_average: string - show results on 'first' task only or the 'average' results until current task index
:param use_MLP: boolean - if True use MLP, else use Transformer
:param batch_size: batch size
:param use_PSP: boolean - if True, PSP method is used, meaning we need set of contexts for each task (including the first)
:return: accuracy, AUROC, AUPRC
"""
if superposition: # superposition used
if first_average == 'first': # not implemented for PSP
# unfold network parameters to the first task
for task_i in range(task_index - 1, -1, -1):
context_multiplication(model, contexts, layer_dimension, task_i)
# evaluate the model on the first task
acc, auroc, auprc = evaluate_current_task(model, all_tasks_test_data, 0, use_MLP)
# restore model parameters to the old ones (before context multiplication)
for task_i in range(task_index):
context_multiplication(model, contexts, layer_dimension, task_i)
return acc, auroc, auprc
elif first_average == 'average':
if use_PSP:
return evaluate_tasks_average_PSP(model, all_tasks_test_data, contexts, layer_dimension, task_index, use_MLP)
else:
return evaluate_tasks_average(model, all_tasks_test_data, contexts, layer_dimension,
superposition, task_index, use_MLP, batch_size)
else:
raise ValueError('The value of "first_average" has to be string "first" or "average".')
else: # superposition not used
if first_average == 'first':
return evaluate_current_task(model, all_tasks_test_data, 0, use_MLP)
elif first_average == 'average':
return evaluate_tasks_average(model, all_tasks_test_data, contexts, layer_dimension,
superposition, task_index, use_MLP, batch_size)
else:
raise ValueError('The value of "first_average" has to be string "first" or "average".') | 3d8bdaae78d53df9edfd767af85229c73a96b12e | 3,632,717 |
def list_column(column, original_name):
"""Get all non nan values from column."""
if original_name == 'original_formatted':
list_filled = [[x for x in row if str(x) != 'nan'] for row in column]
else:
list_filled = [[_] for _ in column]
return list_filled | ae8b900cdcf3e59ff4ede3b3dc0321d93ea07253 | 3,632,718 |
def query_audioobject(identifier: str = None, title: str = None, contributor: str = None,
creator: str = None, source: str = None, format_: str = None, name: str = None,
date: str = None, encodingformat: str = None,
embedurl: str = None, url: str = None, contenturl: str = None, language: str = None,
inlanguage: str = None, license_: str = None, filter_: dict = None,
return_items: list = None):
"""Returns a query for reading an AudioObject from the CE.
Arguments:
identifier: return nodes with this identifier
title: return nodes with this title
contributor: return nodes with this contributor
creator: return nodes with this creator
source: return nodes with this source
format_: return nodes with this format
name: return nodes with this name
date: return nodes with this date
encodingformat: return nodes with this encodingFormat
embedurl: return nodes with this embedUrl
url: return nodes with this url
contenturl: return nodes with this contentUrl
language: return nodes with this language
inlanguage: return nodes with this inLanguage
license_: return nodes with this license
filter_: return nodes with this custom filter
return_items: return these items in the response
Returns:
The string for the querying the media object.
"""
if return_items is None:
return_items = ["identifier", "creator", "title", "source"]
args = {
"identifier": identifier,
"title": title,
"contributor": contributor,
"creator": creator,
"source": source,
"format": format_,
"name": name,
"date": date,
"encodingFormat": encodingformat,
"embedUrl": embedurl,
"url": url,
"contentUrl": contenturl,
"language": language,
"inLanguage": inlanguage,
"license": license_
}
if filter_:
args["filter"] = make_filter(filter_)
args = filter_none_args(args)
return format_query("AudioObject", args, return_items) | 63ee4c20acef392dad4fa6b4d2039bd6609659f0 | 3,632,719 |
import copy
def test_create_new_grant_alt2(cbcsdk_mock):
"""Test creation of a grant and the profile inside it with more options."""
def respond_to_profile_grant(url, body, **kwargs):
ret = copy.deepcopy(POST_PROFILE_IN_GRANT_RESP_2)
ret['profile_uuid'] = body['profile_uuid']
return ret
cbcsdk_mock.mock_request('POST', '/access/v2/orgs/test/grants', POST_GRANT_RESP)
cbcsdk_mock.mock_request('POST', '/access/v2/orgs/test/grants/psc:user:ABC12345:DEF67890/profiles',
respond_to_profile_grant)
api = cbcsdk_mock.api
grant_builder = Grant.create(api, org_key='ABC12345', userid='DEF67890')
grant_builder.set_org('test').add_role("psc:role::SECOPS_ROLE_MANAGER").set_principal_name('Doug Jones')
profile_builder = grant_builder.create_profile().set_orgs(["test2"]).set_roles(["psc:role::SECOPS_ROLE_MANAGER"])
profile = profile_builder.set_disabled(True).set_expiration('20211031T12:34:56').build()
grant = grant_builder.build()
assert grant.roles == ["psc:role::SECOPS_ROLE_MANAGER"]
assert profile.orgs['allow'] == ["psc:org:test2"]
assert profile.conditions['expiration'] == '20211031T12:34:56'
assert profile.conditions['disabled'] | 75ddaa6d488409e04b4045aed34813804659c0ee | 3,632,720 |
from models.user import User
def stats() -> str:
""" GET /api/v1/stats
Return:
- the number of each objects
"""
stats = {}
stats['users'] = User.count()
return jsonify(stats) | 18ff101234c1df23460417ecac13663e60e6ae26 | 3,632,721 |
from optparse import OptionParser
def main(args=None):
"""The main function; parses options and plots"""
# ---------- build and read options ----------
optParser = OptionParser()
optParser.add_option("-n", "--net", dest="net", metavar="FILE",
help="Defines the network to read")
optParser.add_option("-v", "--verbose", dest="verbose", action="store_true",
default=False, help="If set, the script says what it's doing")
optParser.add_option("-w", "--width", dest="width",
type="float", default=20, help="Defines the width of the dots")
optParser.add_option("-c", "--color", dest="color",
default='r', help="Defines the dot color")
optParser.add_option("--edge-width", dest="defaultWidth",
type="float", default=1, help="Defines the edge width")
optParser.add_option("--edge-color", dest="defaultColor",
default='k', help="Defines the edge color")
# standard plot options
helpers.addInteractionOptions(optParser)
helpers.addPlotOptions(optParser)
# parse
options, remaining_args = optParser.parse_args(args=args)
if options.net is None:
print("Error: a network to load must be given.")
return 1
if options.verbose:
print("Reading network from '%s'" % options.net)
net = sumolib.net.readNet(options.net)
tlsn = {}
for tid in net._id2tls:
t = net._id2tls[tid]
tlsn[tid] = set()
for c in t._connections:
n = c[0].getEdge().getToNode()
tlsn[tid].add(n)
tlspX = []
tlspY = []
for tid in tlsn:
x = 0
y = 0
n = 0
for node in tlsn[tid]:
x += node._coord[0]
y += node._coord[1]
n = n + 1
x = x / n
y = y / n
tlspX.append(x)
tlspY.append(y)
fig, ax = helpers.openFigure(options)
ax.set_aspect("equal", None, 'C')
helpers.plotNet(net, {}, {}, options)
plt.plot(tlspX, tlspY, options.color, linestyle='None',
marker='o', markersize=options.width, label='Traffic light')
options.nolegend = False
helpers.closeFigure(fig, ax, options) | f387756ac777f624f5ccdc2e0b70d72ded646b68 | 3,632,722 |
def create_split(
dataset_builder: tfds.core.DatasetBuilder,
batch_size: int,
train: bool,
dtype: tf.DType = tf.float32,
image_size: int = IMAGE_SIZE,
cache: bool = False,
) -> tf.data.Dataset:
"""Creates a split from the ImageNet dataset using TensorFlow Datasets.
Args:
dataset_builder: TFDS dataset builder for ImageNet.
batch_size: the batch size returned by the data pipeline.
train: whether to load the train or evaluation split.
dtype: data type of the image.
image_size: the target size of the images.
cache: whether to cache the dataset.
Returns:
A `tf.data.Dataset`.
"""
split = 'train' if train else 'validation'
num_examples = dataset_builder.info.splits[split].num_examples
split_size = num_examples // jax.process_count()
start = jax.process_index() * split_size
split = f'{split}[{start}:{start + split_size}]'
def decode_example(example):
decode_fn = _decode_and_random_crop if train else _decode_and_center_crop
image = decode_fn(example['image'], image_size)
image = tf.reshape(image, [image_size, image_size, 3])
if train:
image = tf.image.random_flip_left_right(image)
image = normalize_image(image)
image = tf.image.convert_image_dtype(image, dtype=dtype)
return {'image': image, 'label': example['label']}
ds = dataset_builder.as_dataset(
split=split,
decoders={'image': tfds.decode.SkipDecoding()},
)
options = tf.data.Options()
options.experimental_threading.private_threadpool_size = 48
ds = ds.with_options(options)
if cache:
ds = ds.cache()
if train:
ds = ds.repeat()
ds = ds.shuffle(16 * batch_size, seed=0)
ds = ds.map(decode_example, num_parallel_calls=tf.data.experimental.AUTOTUNE)
ds = ds.batch(batch_size, drop_remainder=True)
if not train:
ds = ds.repeat()
return ds.prefetch(10) | bfb9da0d16f6ef21e0f26a7bcec9b196c1358139 | 3,632,723 |
import random
def randomString(stringLength=16):
"""Generate a random hex-string of fixed length """
# """Generate a random string of fixed length """
# letters = string.ascii_lowercase + string.ascii_uppercase + string.digits
# return ''.join(random.choice(letters) for i in range(stringLength))
return ''.join(chr(random.randrange(256)).encode("ISO-8859-1").hex() for i in range(stringLength)) | d92985a92e9bb24ed4c405c7dc45dce9d12ff135 | 3,632,724 |
def get_uri():
"""
POST body:
{
domain: string representing the domain name
term: string or number referring to be used on composing the URI for the element
}
:return:
{
uri: string with the URI for the resource
}
"""
payload = request.json
uri = DomainURLsMap.query.filter_by(domain=payload['domain'])[0]
return {'uri': uri.base_URL.format(payload['term'])} | c410eff64e01312a12792c8f9bdd36ef44a1e765 | 3,632,725 |
def split_in_groups(key_func, l, continuous=True, max_group_size=None):
"""
Split the list `l` into groups according to the `key_func`.
Go over the list and group the elements with the same key value together.
If ``continuous==False``, groups all elements with the same key together regardless of where they are in the list.
otherwise, group only continuous sequences of the elements with the same key together (element with different key in the middle will result in two groups).
If ``continuous==True`` and `max_group_size` is not ``None``, it determines the maximal size of a group; larger groups are split into separate groups.
"""
if continuous:
if len(l)==0:
return []
groups=[]
g=[l[0]]
key=key_func(l[0])
for e in l[1:]:
ek=key_func(e)
if ek!=key or (max_group_size is not None and len(g)>=max_group_size):
key=ek
groups.append(g)
g=[]
g.append(e)
groups.append(g)
return groups
else:
groups={}
for e in l:
groups.get(key_func(e),[]).append(e)
return list(viewvalues(groups)) | fc5b6d381b3111a00382f16bea15c32b495fbefd | 3,632,726 |
def delete_orderitem(request, id=None):
"""
Deleting orderitems in current order CART
"""
if request.method == "POST":
order_to_delete = OrderItem.objects.get(id=id)
order_to_delete.delete()
return redirect('create-order') | adbc9a12358bab86c44759136865a2f8a2508c26 | 3,632,727 |
import warnings
def moments_dbs(data, xi_n, t_bootstrap=0.5, r_bootstrap=500, eps_stop=1.0,
verbose=False, diagn_plots=False, sort=True):
"""Double-bootstrap procedure for moments estimator.
Parameters
----------
data : (N, ) array_like
_Numpy_ array for which double-bootstrap is performed.
Data has to be sorted in decreasing order.
By default (``sort=True``) it is sorted, but this leads to copying
the data. To avoid this pass sorted data and set ``sort=False``.
xi_n : float
Moments tail index estimate corresponding to
sqrt(n)-th order statistic.
t_bootstrap : float
Parameter controlling the size of the 2nd
bootstrap. Defined from n2 = n*(t_bootstrap).
r_bootstrap : int
Number of bootstrap resamplings for the 1st and 2nd bootstraps.
eps_stop : float
Parameter controlling range of AMSE minimization.
Defined as the fraction of order statistics to consider
during the AMSE minimization step.
verbose : int or bool
Flag controlling bootstrap verbosity.
diagn_plots : bool
Flag to switch on/off generation of AMSE diagnostic plots.
sort : bool
Should data be copied and sorted.
Returns
-------
k_star : int
number of order statistics optimal for estimation
according to the double-bootstrap procedure.
x1_arr : (N, ) array_like
Array of fractions of order statistics used for the
1st bootstrap sample.
n1_amse : (N, ) array_like
Array of AMSE values produced by the 1st bootstrap sample.
k1_min : float
Value of fraction of order statistics corresponding
to the minimum of AMSE for the 1st bootstrap sample.
max_index1 : int
Index of the 1st bootstrap sample's order statistics
array corresponding to the minimization boundary set
by eps_stop parameter.
x2_arr : (N, ) array_like
array of fractions of order statistics used for the
2nd bootstrap sample.
n2_amse : (N, ) array_like
array of AMSE values produced by the 2nd bootstrap sample.
k2_min : float
Value of fraction of order statistics corresponding
to the minimum of AMSE for the 2nd bootstrap sample.
max_index2 : int
Index of the 2nd bootstrap sample's order statistics
array corresponding to the minimization boundary set
by eps_stop parameter.
"""
# pylint: disable=too-many-locals,too-many-statements
if sort:
data = np.sort(data)[::-1]
if verbose > 0:
print("Performing moments double-bootstrap ...")
n = len(data)
eps_bootstrap = 0.5*(1+np.log(int(t_bootstrap*n))/np.log(n))
# first bootstrap with n1 sample size
n1 = int(n**eps_bootstrap)
samples_n1 = np.zeros(n1-1)
good_counts1 = np.zeros(n1-1)
for _ in range(r_bootstrap):
sample = np.random.choice(data, n1, replace=True)
sample[::-1].sort()
M1, M2, M3 = estimate_moments_3(sample, sort=False)
xi_2 = M1 + 1. - 0.5*((1. - (M1*M1)/M2))**(-1.)
xi_3 = np.sqrt(0.5*M2) + 1. - (2./3.)*(1. / (1. - M1*M2/M3))
samples_n1 += (xi_2 - xi_3)**2
good_counts1[np.where((xi_2 - xi_3)**2 != np.nan)] += 1
max_index1 = (np.abs(np.linspace(1./n1, 1.0, n1) - eps_stop)).argmin()
averaged_delta = samples_n1 / good_counts1
k1 = np.nanargmin(averaged_delta[:max_index1]) + 1 #take care of indexing
if diagn_plots:
n1_amse = averaged_delta
x1_arr = np.linspace(1./n1, 1.0, n1)
# r second bootstrap with n2 sample size
n2 = int(n1*n1/float(n))
samples_n2 = np.zeros(n2-1)
good_counts2 = np.zeros(n2-1)
for _ in range(r_bootstrap):
sample = np.random.choice(data, n2, replace=True)
sample[::-1].sort()
M1, M2, M3 = estimate_moments_3(sample, sort=False)
xi_2 = M1 + 1. - 0.5*(1. - (M1*M1)/M2)**(-1.)
xi_3 = np.sqrt(0.5*M2) + 1. - (2./3.)*(1. / (1. - M1*M2/M3))
samples_n2 += (xi_2 - xi_3)**2
good_counts2[np.where((xi_2 - xi_3)**2 != np.nan)] += 1
max_index2 = (np.abs(np.linspace(1./n2, 1.0, n2) - eps_stop)).argmin()
averaged_delta = samples_n2 / good_counts2
k2 = np.nanargmin(averaged_delta[:max_index2]) + 1 #take care of indexing
if diagn_plots:
n2_amse = averaged_delta
x2_arr = np.linspace(1./n2, 1.0, n2)
if k2 > k1:
warnings.warn(
"estimated k2 is greater than k1! Re-doing bootstrap ...",
MomentsEstimatorWarning
)
return 9*[None]
#calculate estimated optimal stopping k
prefactor = moments_dbs_prefactor(xi_n, n1, k1)
k_star = int((k1*k1/float(k2)) * prefactor)
if int(k_star) >= len(data):
warnings.warn(
"estimated threshold k is larger than the size of data",
MomentsEstimatorWarning
)
k_star = len(data) - 1
if verbose > 0:
print("--- Moments double-bootstrap information ---")
print("Size of the 1st bootstrap sample n1:", n1)
print("Size of the 2nd bootstrap sample n2:", n2)
print("Estimated k1:", k1)
print("Estimated k2:", k2)
print("Estimated constant:", prefactor)
print("Estimated optimal k:", k_star)
print("--------------------------------------------")
if not diagn_plots:
x1_arr, x2_arr, n1_amse, n2_amse = None, None, None, None
return k_star, x1_arr, n1_amse, k1/float(n1), max_index1, x2_arr, n2_amse, k2/float(n2), max_index2 | 1d812561daf3ced06d68ef069dd61ace5ce4ca5a | 3,632,728 |
def http_headers(headers=None):
"""Construct common HTTP headers from the jomiel options.
Args:
headers (dict): additional headers to use
Returns:
A headers dictionary ready to be used with `requests`
"""
result = {"user-agent": opts.http_user_agent}
if headers:
result.update(headers)
return result | 5dc0cf25778c5950d0c70966606385d95f8c3a85 | 3,632,729 |
import requests
from bs4 import BeautifulSoup
def stock_classify_board() -> dict:
"""
http://vip.stock.finance.sina.com.cn/mkt/
:return: 股票分类字典
:rtype: dict
"""
url = "http://vip.stock.finance.sina.com.cn/quotes_service/api/json_v2.php/Market_Center.getHQNodes"
r = requests.get(url)
data_json = r.json()
big_dict = {}
class_name_list = [
BeautifulSoup(item[0], "lxml").find("font").text
if "font" in item[0]
else item[0]
for item in data_json[1][0][1]
] # 沪深股市
for num, class_name in enumerate(class_name_list):
temp_df = pd.DataFrame([item for item in data_json[1][0][1][num][1:][0]])
if temp_df.shape[1] == 5:
temp_df.columns = ["name", "_", "code", "_", "_"]
temp_df = temp_df[["name", "code"]]
if temp_df.shape[1] == 4:
temp_df.columns = ["name", "_", "code", "_"]
temp_df = temp_df[["name", "code"]]
if temp_df.shape[1] == 3:
temp_df.columns = ["name", "_", "code"]
temp_df = temp_df[["name", "code"]]
big_dict.update({class_name: temp_df})
return big_dict | 258b9315f5d8d854aaf891ff6d35ba55d5a06b31 | 3,632,730 |
def compute_rgroup_dataframe(pdb_df: pd.DataFrame) -> pd.DataFrame:
"""Return the atoms that are in R-groups and not the backbone chain.
:param pdb_df: DataFrame to compute R group dataframe from
:type pdb_df: pd.DataFrame
:returns: Dataframe containing R-groups only (backbone atoms removed)
:rtype: pd.DataFrame
"""
return filter_dataframe(pdb_df, "atom_name", BACKBONE_ATOMS, False) | fe6bef0821e5c3230c961485144ab77c2cede770 | 3,632,731 |
from typing import Dict
def find_requests(spec: Dict) -> Dict:
"""
Returns a Dict like:
{
'pets': {
'create_a_pet': {'method': 'POST', 'url': 'http://petstore.swagger.io/v1/pets'},
'info_for_a_pet': {'method': 'GET', 'url': 'http://petstore.swagger.io/v1/pets/:petId'},
'list_pets': {'method': 'GET', 'url': 'http://petstore.swagger.io/v1/pets?limit=-71686804'},
},
}
"""
url, _ = extract_base_url_from_spec(spec)
paths = spec["paths"]
folders = {}
for path in paths.keys():
folder_name = path.split("/")[-1]
folder_name = normalize_class_name(folder_name)
folders[folder_name] = {}
for method in [k for k in paths[path].keys() if k in METHODS]:
function_name = normalize_function_name(paths[path][method]["summary"])
folders[folder_name][function_name] = {"method": method.upper(), "url": url + path}
return folders | 1efd26b8819b90e5371a409c56d11a46ee887e93 | 3,632,732 |
import collections
from typing import OrderedDict
import six
def flatten_ordered(dd, separator='_', prefix='', is_list_fn=lambda x: isinstance(x, list)):
"""Flatten a nested dictionary/list
Args:
separator: how to separate different hirearchical levels
prefix: what to pre-append to the function
is_list_fn: function to determine whether to split the list/numpy.array into indvidual classes or
to include the element as value.
"""
if isinstance(dd, collections.Mapping):
if not dd:
return dd
return OrderedDict([(prefix + separator + k if prefix else k, v)
for kk, vv in six.iteritems(dd)
for k, v in six.iteritems(flatten_ordered(vv, separator, kk, is_list_fn))
])
elif is_list_fn(dd):
if not dd:
return dd
return OrderedDict([(prefix + separator + k if prefix else k, v)
for kk, vv in enumerate(dd)
for k, v in six.iteritems(flatten_ordered(vv, separator, str(kk), is_list_fn))
])
else:
return OrderedDict([(prefix, dd)]) | 4475f613d8b482c31a2fe557355506a5479a9ff3 | 3,632,733 |
def error_internal_server(error):
""" Return default JSON error message for unhandled Internal Server error """
return APIError.default_handler(APIInternalError) | 9e9b543ccbb186121b79c60189deb35e192a7beb | 3,632,734 |
def apply_functions(lst, functions):
"""
:param lst: list of values
:param functions: list of functions to apply to each value.
Each function has 2 inputs: index of value and value
:return: [func(x) for x in lst], i.e apply the respective function to each
of the values
"""
assert len(lst) == len(functions)
for i, item in enumerate(lst):
func = functions[i] # get function
lst[i] = func(i, item) # apply function
return lst | 679a2219008e438249e1227d5aab6529019c497c | 3,632,735 |
import torch
def plot_graph(
codepath,
filename="graph",
directory="./",
code2in = [0,1,0,1,2,1,2,3,2,3],
code2out = [0,0,1,1,1,2,2,2,3,3],
):
""" Plot the final searched model
Args:
codepath: path to the saved .pth file, generated from the searching script.
arch_code_a: architecture code (decoded using model.decode).
arch_code_c: cell operation code (decoded using model.decode).
filename: filename to save graph.
directory: directory to save graph.
code2in, code2out: see definition in monai.networks.nets.dints.py.
Return:
graphviz graph.
"""
code = torch.load(codepath)
arch_code_a = code["arch_code_a"]
arch_code_c = code["arch_code_c"]
ga = Digraph("G", filename=filename, engine="neato")
depth = (len(code2in) + 2)//3
# build a initial block
inputs = []
for _ in range(depth):
inputs.append("(in," + str(_) + ")")
with ga.subgraph(name="cluster_all") as g:
with g.subgraph(name="cluster_init") as c:
for idx, _ in enumerate(inputs):
c.node(_,pos="0,"+str(depth-idx)+"!")
for blk_idx in range(arch_code_a.shape[0]):
with g.subgraph(name="cluster"+str(blk_idx)) as c:
outputs = [str((blk_idx,_)) for _ in range(depth)]
for idx, _ in enumerate(outputs):
c.node(_,pos=str(2+2*blk_idx)+","+str(depth-idx)+"!")
for res_idx, activation in enumerate(arch_code_a[blk_idx]):
if activation:
c.edge(inputs[code2in[res_idx]], outputs[code2out[res_idx]], \
label=str(arch_code_c[blk_idx][res_idx]))
inputs = outputs
ga.render(filename=filename, directory=directory, cleanup=True, format="png")
return ga | 45290fa6254855157b023a172beae242240046ad | 3,632,736 |
def rotate_image(image, rotation):
"""
Rotate the image givent eh cv2 rotation code
"""
return cv2.rotate(image, rotation) | 4d341e5e2efc4d15e9b14bf557928ad4c6bcbb4a | 3,632,737 |
def location_normalize(location):
"""
Normalize location name `location`
"""
#translation_table = dict.fromkeys(map(ord, '!@#$*;'), None)
def _remove_chars(chars, string):
return ''.join(x for x in string if x not in chars)
location = location.lower().replace('_', ' ').replace('+', ' ').strip()
if not location.startswith('moon@'):
location = _remove_chars(r'!@#$*;:\\', location)
return location | 4b8aaec23ef9763bc02fd4d3635ba930f8c3dcea | 3,632,738 |
def remove_car(garage_id, car_id): # noqa: E501
"""remove car from garage
# noqa: E501
:param garage_id: id of garage
:type garage_id: int
:param car_id: id of car
:type car_id: int
:rtype: None
"""
with get_db() as con:
cur = con.execute('''
delete from car where id = ? and garage_id = ?
''', (car_id, garage_id))
if cur.rowcount < 1:
return ApiResponse(code=404, type='error', message='cannot find car {} in garage {}'.format(car_id, garage_id)), 404 | ab8a09f9edb027a50cd43b9f380596bc8ff56e89 | 3,632,739 |
import sys
import platform
def check_execute(command, timeout=None,
sandbox_profile=None, max_retries=1,
stdout=sys.stdout, stderr=sys.stderr,
**kwargs):
"""Check execute a given command.
>>> check_execute(['echo', 'Hello, World!'])
0
"""
if timeout is None:
timeout = DEFAULT_EXECUTE_TIMEOUT
if sandbox_profile:
if platform.system() == 'Darwin':
command = ['sandbox-exec', '-f', sandbox_profile] + command
elif platform.system() == 'Linux':
# TODO: remove explicit dns after Firejail bug is resolved
command = ['firejail', '--quiet', '--profile=%s' % sandbox_profile,
'--private=.', '--overlay-tmpfs',
'--dns=8.8.8.8'] + command
returncode = -1
for retry in range(max_retries):
returncode = execute(command, timeout=timeout,
stdout=stdout, stderr=stderr,
**kwargs)
if returncode == 0:
return returncode
raise ExecuteCommandFailure(command, returncode) | d79f9d1766cd8de8a7a14948dd81e9a5ca9f1176 | 3,632,740 |
def review(items, config):
"""Reviews the incoming item and returns a Review for it
:param items: a list of items that must be `Sentence` instances.
:param config: a configuration map
:returns: one or more Review objects for the input items
:rtype: list of dict
"""
# We require a list: much faster than individual sents
assert type(items) == list, "A list of input sentences is required"
for item in items:
assert content.is_sentence(item), '%s' % (item)
rev_worth = config.get('worthiness_review', False)
if rev_worth:
factual_items, nfs_items = partition_factual_sentences(items, config)
else:
factual_items, nfs_items = items, [] # assume all items are factual
assert len(factual_items + nfs_items) == len(items), '%s+%s != %s %s' % (
len(factual_items), len(nfs_items), len(items), items)
factual_reviews = review_factual_items(factual_items, config)
nfs_reviews = [as_non_verifiable_reviews(item, config) for item in nfs_items]
return restore_order(items, factual_reviews + nfs_reviews) | c33e363ec66f9afa88f27db255533e22e349b5c0 | 3,632,741 |
def create(words, vector_length, window_size):
"""Create new word2vec model."""
w2vmodel = {}
for col in words.columns:
if col in words:
w2vmodel[col] = gs.models.Word2Vec([list(words[col])], min_count=1, size=vector_length,
window=window_size, seed=42, workers=1, iter=550,sg=0)
else:
#_LOGGER.warning("Skipping key %s as it does not exist in 'words'" % col)
pass
return w2vmodel | ba93628ba337cc40caa24bdc0a062c2c61933e14 | 3,632,742 |
import matplotlib as mpl
from distutils.version import LooseVersion
def aga_graph(
adata,
solid_edges='aga_adjacency_tree_confidence',
dashed_edges='aga_adjacency_full_confidence',
layout=None,
root=0,
groups=None,
color=None,
threshold_solid=None,
threshold_dashed=1e-6,
fontsize=None,
node_size_scale=1,
node_size_power=0.5,
edge_width_scale=1,
min_edge_width=None,
max_edge_width=None,
title='abstracted graph',
ext='png',
left_margin=0.01,
random_state=0,
pos=None,
cmap=None,
frameon=True,
rootlevel=None,
return_pos=False,
export_to_gexf=False,
show=None,
save=None,
ax=None):
"""Plot the abstracted graph.
This uses igraph's layout algorithms for most layouts [Csardi06]_.
Parameters
----------
adata : :class:`~scanpy.api.AnnData`
Annotated data matrix.
solid_edges : `str`, optional (default: 'aga_adjacency_tree_confidence')
Key for `adata.uns` that specifies the matrix that stores the edges
to be drawn solid black.
dashed_edges : `str` or `None`, optional (default: 'aga_adjacency_full_confidence')
Key for `adata.uns` that specifies the matrix that stores the edges
to be drawn dashed grey. If `None`, no dashed edges are drawn.
layout : {'fr', 'rt', 'rt_circular', 'eq_tree', ...}, optional (default: 'fr')
Plotting layout. 'fr' stands for Fruchterman-Reingold, 'rt' stands for
Reingold Tilford. 'eq_tree' stands for 'eqally spaced tree'. All but
'eq_tree' use the igraph layout function. All other igraph layouts are
also permitted. See also parameter `pos`.
random_state : `int` or `None`, optional (default: 0)
For layouts with random initialization like 'fr', change this to use
different intial states for the optimization. If `None`, the initial
state is not reproducible.
root : int, str or list of int, optional (default: 0)
If choosing a tree layout, this is the index of the root node or root
nodes. If this is a non-empty vector then the supplied node IDs are used
as the roots of the trees (or a single tree if the graph is
connected. If this is `None` or an empty list, the root vertices are
automatically calculated based on topological sorting.
groups : `str`, `list`, `dict`
The node (groups) labels.
color : color string or iterable, {'degree_dashed', 'degree_solid'}, optional (default: None)
The node colors. Besides cluster colors, lists and uniform colors this
also acceppts {'degree_dashed', 'degree_solid'} which are plotted using
continuous color map.
threshold_solid : `float` or `None`, optional (default: `None`)
Do not draw edges for weights below this threshold. Set to `None` if you
want all edges.
threshold_dashed : `float` or `None`, optional (default: 1e-6)
Do not draw edges for weights below this threshold. Set to `None` if you
want all edges.
fontsize : int (default: None)
Font size for node labels.
node_size_scale : float (default: 1.0)
Increase or decrease the size of the nodes.
node_size_power : float (default: 0.5)
The power with which groups sizes influence the radius of the nodes.
edge_width_scale : `float`, optional (default: 1.5)
Edge with scale in units of `rcParams['lines.linewidth']`.
min_edge_width : `float`, optional (default: `None`)
Min width of solid edges.
max_edge_width : `float`, optional (default: `None`)
Max width of solid and dashed edges.
pos : filename of `.gdf` file, array-like, optional (default: `None`)
Two-column array/list storing the x and y coordinates for drawing.
Otherwise, path to a `.gdf` file that has been exported from Gephi or
a similar graph visualization software.
export_to_gexf : `bool`, optional (default: `None`)
Export to gexf format to be read by graph visualization programs such as
Gephi.
return_pos : `bool`, optional (default: `False`)
Return the positions.
title : `str`, optional (default: `None`)
Provide title for panels either as `['title1', 'title2', ...]` or
`'title1,title2,...'`.
frameon : `bool`, optional (default: `True`)
Draw a frame around the abstracted graph.
show : `bool`, optional (default: `None`)
Show the plot, do not return axis.
save : `bool` or `str`, optional (default: `None`)
If `True` or a `str`, save the figure. A string is appended to the
default filename. Infer the filetype if ending on \{'.pdf', '.png', '.svg'\}.
ax : `matplotlib.Axes`
A matplotlib axes object.
Returns
-------
If `ax` is `None`, a matplotlib.Axes or an array of matplotlib.Axes.
If `return_pos` is `True`, in addition, the positions of the nodes.
"""
if mpl.__version__ > LooseVersion('2.0.0'):
logg.warn('Currently, `aga_graph` sometimes crashes with matplotlib version > 2.0, you have {}.\n'
'Run `pip install matplotlib==2.0` if this hits you.'
.format(mpl.__version__))
# colors is a list that contains no lists
if isinstance(color, list) and True not in [isinstance(c, list) for c in color]: color = [color]
if color is None or isinstance(color, str): color = [color]
# groups is a list that contains no lists
if isinstance(groups, list) and True not in [isinstance(g, list) for g in groups]: groups = [groups]
if groups is None or isinstance(groups, dict) or isinstance(groups, str): groups = [groups]
if title is None or isinstance(title, str): title = [title for name in groups]
if ax is None:
axs, _, _, _ = utils.setup_axes(colors=color)
else:
axs = ax
if len(color) == 1 and not isinstance(axs, list): axs = [axs]
for icolor, c in enumerate(color):
pos = _aga_graph(
adata,
axs[icolor],
solid_edges=solid_edges,
dashed_edges=dashed_edges,
threshold_solid=threshold_solid,
threshold_dashed=threshold_dashed,
layout=layout,
root=root,
rootlevel=rootlevel,
color=c,
groups=groups[icolor],
fontsize=fontsize,
node_size_scale=node_size_scale,
node_size_power=node_size_power,
edge_width_scale=edge_width_scale,
min_edge_width=min_edge_width,
max_edge_width=max_edge_width,
frameon=frameon,
cmap=cmap,
title=title[icolor],
random_state=0,
export_to_gexf=export_to_gexf,
pos=pos)
if ext == 'pdf':
logg.warn('Be aware that saving as pdf exagerates thin lines, use "svg" instead.')
utils.savefig_or_show('aga_graph', show=show, ext=ext, save=save)
if len(color) == 1 and isinstance(axs, list): axs = axs[0]
if return_pos:
return axs, pos if ax is None and show == False else pos
else:
return axs if ax is None and show == False else None | faffd062396cfadf34b7b147681bf9c7591aee31 | 3,632,743 |
def rotate(location, direction=CLOCKWISE):
"""
Find a stronghold 120 degrees clockwise or counter-clockwise from location.
location is an tuple of x and z. direction can either be CLOCKWISE or
COUNTERCLOCKWISE.
"""
location = Point(*location)
x = simplify(cos(direction) * location.x + -sin(direction) * location.z)
z = simplify(sin(direction) * location.x + cos(direction) * location.z)
return Point(x, z) | 34f98ce6171ea1cb4b488ebcc0e8686973346ae5 | 3,632,744 |
def checkWin():
"""Analyze the scores of the players and check win, return player1 or player2 as a string"""
if player1.score < player2.score:
return "player 2"
else:
return "player 1" | bf6d6ad5aca0713f63658516ce8df422a9bcc0ea | 3,632,745 |
def credits(request):
"""A view that outputs the result of a credit query."""
md = request.matchdict
model = model_from_matchdict(md)
items = items_from_matchdict(md, model)
date = date_from_matchdict(md)
return model.bulk_credits(
items,
*(s for s in md['types'].split('+') if s),
date=date
) | 34c61e5ef8a29932f296e3678df8ba51b4111fb3 | 3,632,746 |
def dict_is_test(data):
"""helper function to check whether passed argument is a proper :class:`dict` object describing a test.
:param dict data: value to check
:rtype: bool
"""
return (
isinstance(data, dict)
and "type" in data
and data["type"] == "test"
and "id" in data
and "attributes" in data
and isinstance(data["attributes"], dict)
)
# optionally, it can have "links" dict | 320b47f8f41f42f6a6554741c9b2de38b370605a | 3,632,747 |
import string
def _replace_vars(text, param_dict):
"""
Given a block of text, replace any instances of '{key}' with 'value'
if param_dict contains 'key':'value' pair.
This is done safely so that brackets in a file don't cause an error if
they don't contain a variable we want to replace.
See http://stackoverflow.com/a/17215533/2680824
Examples:
>>> _replace_vars('{last}, {first} {last}', {'first':'James', 'last':'Bond'})
'Bond, James Bond'
>>> _replace_vars('{last}, {first} {last}', {'last':'Bond'})
'Bond, {first} Bond'
"""
return string.Formatter().vformat(text, (), _Safe_Dict(param_dict)) | 4f218a9f9df46c2535c2d3060084430b74d0cd72 | 3,632,748 |
from typing import Union
from pathlib import Path
from typing import Optional
def import_key(filepath: Union[str, PathLike[str], Path], passphrase: Optional[str] = None) -> RsaKey:
"""
Import a secret key from file.
Parameters
----------
filepath : str | os.PathLike[str] | pathlib.Path
Filepath of RsaKey.
passphrase : str | None
Passphrase for the key.
Returns
-------
remoteweb.crypto.RsaKey
"""
with open(filepath, "rb") as f:
key: bytes = f.read()
return RSA.import_key(key, passphrase) | 40a0b12ea17ec391cb71883aa6aecb9f73fdeb0d | 3,632,749 |
from typing import Set
def GetZonesInRegion(region: str) -> Set[str]:
"""Returns a set of zones in the region."""
# As of 2021 all Azure AZs are numbered 1-3 for eligible regions.
return set([f'{region}-{i}' for i in range(1, 4)]) | e539662604eb5da2583630844dd54d11d266c827 | 3,632,750 |
def compare_3PC_keys(key1, key2) -> int:
"""
Return >0 if key2 is greater than key1, <0 if lesser, 0 otherwise
"""
if key1[0] == key2[0]:
return key2[1] - key1[1]
else:
return key2[0] - key1[0] | d134eaaa4ef8f218164be4e5bc6fced01c3de7eb | 3,632,751 |
import heapq
def merge_two_lists_heap(l1: ListNode, l2: ListNode) -> ListNode:
"""Returns a single sorted, in-place merged linked list of two sorted input linked lists
The linked list is made by splicing together the nodes of l1 and l2
Args:
l1:
l2:
Examples:
>>> l1 = linked_list.convert_list_to_linked_list([1,2,4])
>>> l2 = linked_list.convert_list_to_linked_list([1,3,4])
>>> merge_two_lists_heap(l1, l2).as_list()
[1, 1, 2, 3, 4, 4]
>>> l1 = linked_list.convert_list_to_linked_list([])
>>> l2 = linked_list.convert_list_to_linked_list([0])
>>> merge_two_lists_heap(l1, l2).as_list()
[0]
>>> merge_two_lists_heap(l2, l1).as_list()
[0]
>>> merge_two_lists_heap(None, None)
"""
heap = []
for k, list_k_head in enumerate([l1, l2]):
curr = list_k_head
while curr is not None:
val_k_node_triple = (curr.val, k, curr) # duplicate values tie-broken by k
heapq.heappush(heap, val_k_node_triple)
curr = curr.next
head_handle = curr = ListNode(val=None)
while heap:
val, _, next_node = heapq.heappop(heap)
curr.next = curr = next_node
return head_handle.next | 19c7507a53d6d365259f2226b008b3ad065e5f21 | 3,632,752 |
import requests
def demo():
""" Step 1 of the authentication workflow, obtain a temporary
resource owner key and use it to redirect the user. The user
will authorize the client (our flask app) to access its resources
and perform actions in its name (aka get feed and post updates)."""
# In this step you will need to supply your twitter provided key and secret
twitter = OAuth1(key, client_secret=secret)
# We will be using the default method of supplying parameters, which is
# in the authorization header.
r = requests.post(request_url, auth=twitter)
# Extract the temporary resource owner key from the response
token = parse_qs(r.content)[u"oauth_token"][0]
# Create the redirection url and send the user to twitter
# This is the start of Step 2
auth = u"{url}?oauth_token={token}".format(url=auth_url, token=token)
return redirect(auth) | 547114a93f44f152d6e8545c311b07ad86101441 | 3,632,753 |
def get_blast(pdb_id, chain_id='A'):
"""
Return BLAST search results for a given PDB ID
The key of the output dict())that outputs the full search results is
'BlastOutput_iterations'
To get a list of just the results without the metadata of the search use:
hits = full_results['BlastOutput_iterations']['Iteration']['Iteration_hits']['Hit']
Parameters
----------
pdb_id : string
A 4 character string giving a pdb entry of interest
chain_id : string
A single character designating the chain ID of interest
Returns
-------
out : dict()
A nested dict() consisting of the BLAST search results and all associated metadata
If you just want the hits, look under four levels of keys:
results['BlastOutput_iterations']['Iteration']['Iteration_hits']['Hit']
Examples
--------
>>> blast_results = get_blast('2F5N', chain_id='A')
>>> just_hits = blast_results['BlastOutput_iterations']['Iteration']['Iteration_hits']['Hit']
>>> print(just_hits[50]['Hit_hsps']['Hsp']['Hsp_hseq'])
PELPEVETVRRELEKRIVGQKIISIEATYPRMVL--TGFEQLKKELTGKTIQGISRRGKYLIFEIGDDFRLISHLRMEGKYRLATLDAPREKHDHL
TMKFADG-QLIYADVRKFGTWELISTDQVLPYFLKKKIGPEPTYEDFDEKLFREKLRKSTKKIKPYLLEQTLVAGLGNIYVDEVLWLAKIHPEKET
NQLIESSIHLLHDSIIEILQKAIKLGGSSIRTY-SALGSTGKMQNELQVYGKTGEKCSRCGAEIQKIKVAGRGTHFCPVCQQ
"""
raw_results = get_raw_blast(pdb_id, output_form='XML', chain_id=chain_id)
out = xmltodict.parse(raw_results, process_namespaces=True)
out = to_dict(out)
out = out['BlastOutput']
return out | 92a1ed6e53914e5468b491782263082a6c35b4de | 3,632,754 |
from datetime import datetime
import time
def getDayBoundaryTimeStampFromUtcTimeStamp(timeStamp, timeZoneId):
"""
get to the day boundary given a local time in the UTC timeZone.
ts is the local timestamp in the UTC timeZone i.e. what you would
get from time.time() on your computer + the offset betwen your
timezone and UTC.
"""
(ts, tzName) = getLocalTime(timeStamp, timeZoneId)
timeDiff = timeStamp - ts
dt = datetime.datetime.fromtimestamp(float(ts))
dt1 = dt.replace(hour=0, minute=0, second=0, microsecond=0)
#isDst = is_dst(ts, timeZoneId)
dbts = time.mktime(dt1.timetuple())
return dbts + timeDiff | e8c24c0a3ec47546cb30eaebe172bdcfd627c7c9 | 3,632,755 |
def generate_cylinder_square_array(size, porosity, segmented=True):
""" Generate a 2D periodic array of circles
:param size: length of one side of the output domain
:type size: int
:param porosity: porosity of the output domain
:type porosity: float
:param segmented: return a domain that is already segmented (i.e. each sphere with unique ID) or
with grayscales 0-255 with threshold=128 for the input diameter
:type segmented: bool
:return: array of circles
:rtype: Workspace
:Example:
>>> import pumapy as puma
>>> ws = puma.generate_cylinder_square_array(100, 0.8, segmented=True)
>>> puma.render_volume(ws)
"""
generator = GeneratorSquareArray(size, porosity)
generator.error_check()
generator.log_input()
ws = generator.generate()
generator.log_output()
if segmented:
ws.binarize(128)
return ws | 0c8c35d6549c6335156bc475eeeef235a442a213 | 3,632,756 |
import csv
def ephemerides(file_path,
P_orb=2.644,
T_e=2455959.0039936211,
e=0.152,
P_rot=None,
phase_start=None,
Rot_phase=False,
print_stat=True,
save_results=False,
save_name=None):
"""
Calculates the orbital and rotational phases for a star.
NOTE: The default parameters within this function are for the star GJ 436.
Parameters:
-----------
file_path: str
List of paths of the .out/.fits file containing the OBS_HJD/OBS_BJD
P_orb: int, default=2.644
Planetary orbital period in days.
Default value for GJ436b taken from http://adsabs.harvard.edu/cgi-bin/nph-bib_query?bibcode=2018A&A...609A.117T
T_e: int, default=2455959.0039936211
Epoch of periastron in HJD since the dates in .out files are HJD. Input in BJD instead if the given file_path is .fits.
Default value for GJ436b taken from http://adsabs.harvard.edu/cgi-bin/nph-bib_query?bibcode=2014AcA....64..323M
e: int, default=0.152
Orbital eccentricity.
Default value for GJ436b taken from http://adsabs.harvard.edu/cgi-bin/nph-bib_query?bibcode=2018A&A...609A.117T
P_rot: int
Stellar rotation period in days.
Used if Rot_phase is True
phase_start: int
Starting point for the rotational phase. This ideally should be the first JD of your observation.
Used if Rot_phase is True
Rot_phase: bool, default=False
Calculates the stellar rotational phases and cycle using the given 'P_rot' and 'phase_start' parameters.
print_stat: bool, default=True
Prints the status of each process within the function.
save_results: bool, default=False
Saves the results as a csv file.
save_name: str, default=None
Name with which to save the results file with.
Returns:
--------
HJD/BJD, Number of orbits done since T_e, Mean anomaly, Eccentric anomaly, True anomaly, orbital phase, rotational phase
All of these are floating points.
"""
results = [] # Empty list to which the run results will be appended
# Creating a loop to go through each given file_path in the list of file paths
# Using the tqdm function 'log_progress' to provide a neat progress bar in Jupyter Notebook which shows the total number of
# runs, the run time per iteration and the total run time for all files!
for i in log_progress(range(len(file_path)), desc='Calculating System Ephemerides'):
if file_path[i][-4:] == '.out':
file = open(file_path[i]).readlines() # Opening the .out file and reading each line as a string
string = ' Heliocentric Julian date (UTC) :' # Creating a string variable that matches the string in the .out file
idx = find_string_idx(file_path[i], string) # Using the 'find_string_idx' function to find the index of the line that contains the above string.
JD = float(file[idx][-14:-1]) # Using the line index found above, the HJD is extracted by indexing just that from the line.
elif file_path[i][-4:] == 'fits':
hdu = fits.open(file_path[i])
try:
JD = hdu[0].header['HIERARCH ESO DRS BJD']
except:
JD = hdu[0].header['HIERARCH TNG DRS BJD']
# Calculating the mean anomaly M
n = 2*np.pi/P_orb # mean motion in radians
# Total orbits done since last periastron
N = int((JD - T_e)/P_orb)
if print_stat:
print('Total number of orbits since the given periastron {}: {}'.format(T_e, N))
print('-------------------------------------------------------------------------------------------------------------------------------------------------------------')
t = T_e + (N*P_orb) # time of last periastron RIGHT before our HJD!
mean_an = (JD - t)*n # mean anomaly; (t - T)*n
# Solving for eccentric anomaly from the mean anomaly as M = E - e*sin(E) = (t - T)*n using pyasl.MarkleyKESolver()
# Instantiate the solver
ks = pyasl.MarkleyKESolver()
# Solves Kepler's Equation for a set
# of mean anomaly and eccentricity.
# Uses the algorithm presented by
# Markley 1995.
M = np.round(mean_an, 5)
if print_stat:
print('Mean Anomaly: {}'.format(M))
print('-------------------------------------------------------------------------------------------------------------------------------------------------------------')
E = np.round((ks.getE(M, e)), 5)
if print_stat:
print("Eccentric Anomaly: {}".format(E))
print('-------------------------------------------------------------------------------------------------------------------------------------------------------------')
f = np.round((2*np.arctan2(1, 1/(np.sqrt((1+e)/(1-e))*np.tan(E/2)))), 5)
# using np.arctan2 instead of np.arctan to retrive values from the positive quadrant of tan(x) values
# see https://stackoverflow.com/questions/16613546/using-arctan-arctan2-to-plot-a-from-0-to-2π
orb_phase = np.round((f/(2*np.pi)), 5) # converting f to orbital phase by dividing it with 2pi radians!
if Rot_phase:
rot_cycle = np.round(((JD - phase_start)/P_rot), 5)
rot_phase = np.round((rot_cycle - int(rot_cycle)), 5)
if print_stat:
print('True Anomaly: {}'.format(f))
print('-------------------------------------------------------------------------------------------------------------------------------------------------------------')
print('Orbital Phase: {}'.format(orb_phase))
print('-------------------------------------------------------------------------------------------------------------------------------------------------------------')
if Rot_phase:
print('Rotational Phase: {}'.format(rot_phase))
print('-------------------------------------------------------------------------------------------------------------------------------------------------------------')
if Rot_phase:
res = JD, N, M, E, f, orb_phase, rot_phase, rot_cycle
else:
res = JD, N, M, E, f, orb_phase
results.append(res)
# Saving the results in a csv file format
if save_results:
if print_stat:
print('Saving results in the working directory in file: {}.csv'.format(save_name))
print('-------------------------------------------------------------------------------------------------------------------------------------------------------------')
if Rot_phase:
header = ['JD', 'Number_of_orbits_since_T_e', 'Mean_Anomaly', 'Eccentric_Anomaly', 'True_Anomaly', 'Orbital_Phase', 'Rotational_Phase', 'Rotational_Cycle']
else:
header = ['JD', 'Number_of_orbits_since_T_e', 'Mean_Anomaly', 'Eccentric_Anomaly', 'True_Anomaly', 'Orbital_Phase']
with open('{}.csv'.format(save_name), 'w') as csvfile:
writer = csv.writer(csvfile, dialect='excel')
writer.writerow(header)
for row in results:
writer.writerow(row)
return results | 0651616848c9fa0f20862ab00826751672a86156 | 3,632,757 |
def _GetMSBuildToolSettings(msbuild_settings, tool):
"""Returns an MSBuild tool dictionary. Creates it if needed."""
return msbuild_settings.setdefault(tool.msbuild_name, {}) | 3a2cb3e9c8910a6901be0937be18aac00d532e2b | 3,632,758 |
def _apply_color(color: str, message: str) -> str:
"""Dye message with color, fall back to default if it fails."""
color_code = AvailableColors["DEFAULT"].value
try:
color_code = AvailableColors[color.upper()].value
except KeyError:
pass
return f"\033[1;{color_code}m{message}\033[0m" | 752557dc4416c5d141169f8155f41b6847eef318 | 3,632,759 |
import os
def choice_geometry(elite):
""" Positions shapes for user selection """
geo = []
# Generate shape for selection
for i in range(6):
elite[i].evaluate()
print("Adding geometry: " + str(i))
offset = 100
offset_x = i % 3 * offset
offset_y = i % 2 * offset
geo.append(add_geometry(elite[i], offset_x, offset_y))
# Capture the geometry
view = rs.CurrentView()
targetView = [offset_x, offset_y, 0]
location = [offset_x+30, offset_y+30, 30]
if location and targetView:
rs.ViewCameraTarget( view, location, targetView )
view = sc.doc.Views.ActiveView
size = System.Drawing.Size(view.Bounds.Width*0.5,view.Bounds.Height*0.5)
bitmap = view.CaptureToBitmap(size)
# Creates the shapes folder if it does not exist
if not os.path.exists("shapes"):
os.mkdir("shapes")
# Exports the chosen shapes for review in user selection
bitmap.Save(os.path.join(dir + '/shapes','generation' + str(gen) + 'shape' + str(i) + '.jpg'))
memoryStream = System.IO.MemoryStream()
format = System.Drawing.Imaging.ImageFormat.Png
System.Drawing.Bitmap.Save(bitmap, memoryStream, format)
if memoryStream.Length != 0:
boxArray[i].Image = Bitmap(memoryStream)
memoryStream.Dispose()
# Form is redrawn so we need to make sure all the checkbox's are reset
checkbox1.Checked = False
checkbox2.Checked = False
checkbox3.Checked = False
checkbox4.Checked = False
checkbox5.Checked = False
checkbox6.Checked = False
return geo | 6379b37038d69964ae8ffc294bc0e9272b1a1636 | 3,632,760 |
import scipy
def get_mel_spectrogram(audio, pextract=None):
"""Mel-band energies
Parameters
----------
audio : numpy.ndarray
Audio data.
params : dict
Parameters.
Returns
-------
feature_matrix : numpy.ndarray
(log-scaled) mel spectrogram energies per audio channel
"""
# make sure rows are channels and columns the samples
audio = audio.reshape([1, -1])
window = scipy.signal.hamming(pextract.get('win_length_samples'), sym=False)
mel_basis = librosa.filters.mel(sr=pextract.get('fs'),
n_fft=pextract.get('n_fft'),
n_mels=pextract.get('n_mels'),
fmin=pextract.get('fmin'),
fmax=pextract.get('fmax'),
htk=pextract.get('htk'),
norm=pextract.get('mel_basis_unit'))
if pextract.get('normalize_mel_bands'):
mel_basis /= np.max(mel_basis, axis=-1)[:, None]
# init mel_spectrogram expressed as features: row x col = frames x mel_bands = 0 x mel_bands (to concatenate axis=0)
feature_matrix = np.empty((0, pextract.get('n_mels')))
for channel in range(0, audio.shape[0]):
spectrogram = get_spectrogram(
y=audio[channel, :],
n_fft=pextract.get('n_fft'),
win_length_samples=pextract.get('win_length_samples'),
hop_length_samples=pextract.get('hop_length_samples'),
spectrogram_type=pextract.get('spectrogram_type') if 'spectrogram_type' in pextract else 'magnitude',
center=True,
# center=False,
window=window,
pextract=pextract
)
mel_spectrogram = np.dot(mel_basis, spectrogram)
mel_spectrogram = mel_spectrogram.T
# at this point we have row x col = time x freq = frames x mel_bands
if pextract.get('log'):
mel_spectrogram = np.log10(mel_spectrogram + pextract.get('eps'))
feature_matrix = np.append(feature_matrix, mel_spectrogram, axis=0)
return feature_matrix | 71e685ee0cf0f1359329aaf15df2a02db0f879b2 | 3,632,761 |
from pathlib import Path
def getName(value):
"""
Finds the name of a :model:`browseNet.path` or :model:`browseNet.Share`.
"""
if isinstance(value,Path):
return value.shortname
elif isinstance(value, Share):
return value.sharename
else:
return "??" | 006114fc40b75d1630ce18f55694e582711c5ee8 | 3,632,762 |
def balanced_outward(source: str, pos: int) -> list:
"""
Returns balanced CSS model: a list of all ranges that could possibly match
given location when moving in outward direction
"""
pool = []
stack = []
result = []
prop = []
prop.append(None) # Get rid of pyLint rant
def scan_callback(token_type: str, start: int, end: int, delimiter: int):
if token_type == TokenType.Selector:
stack.append(alloc_range(pool, start, end, delimiter))
elif token_type == TokenType.BlockEnd:
left = stack and stack.pop()
if left and left[0] < pos < end:
# Matching section found
inner = inner_range(source, left[2] + 1, start)
if inner:
push(result, inner)
push(result, (left[0], end))
if left:
release_range(pool, left)
if not stack:
return False
elif token_type == TokenType.PropertyName:
if prop[0]:
release_range(pool, prop[0])
prop[0] = alloc_range(pool, start, end, delimiter)
elif token_type == TokenType.PropertyValue:
p = prop[0]
if p and p[0] < pos < max(delimiter, end):
# Push full token and value range
push(result, (start, end))
push(result, (p[0], delimiter + 1 if delimiter != -1 else end))
if token_type != TokenType.PropertyName and prop[0]:
release_range(pool, prop[0])
prop[0] = None
scan(source, scan_callback)
return result | 1c187c3db71a756ef4e983a850af2a708f021cdf | 3,632,763 |
import struct
def float_2_bytes(f, is_little_endian=False):
"""
:param f:
:param is_little_endian:
:return:
"""
# 小端数据返回
if is_little_endian:
return struct.pack('<f', f)
# 大端数据返回
return struct.pack('>f', f) | cd13cfc7179baf28cda669b2c6fccdce58c63730 | 3,632,764 |
import pathlib
def setup_pluginbase(extra_policies_path=None):
"""Sets up plugin base with default path and provided path
Args:
extra_policies_path (str): Extra path to find plugins in
Returns:
PluginSource: PluginBase PluginSource for finding plugins
"""
here = pathlib.Path(__file__).parent.absolute()
default_path_obj = here / "../policies"
default_path = str(default_path_obj.resolve())
all_paths = [default_path]
if extra_policies_path:
extra_policies_obj = pathlib.Path(extra_policies_path)
if extra_policies_obj.is_dir():
extra_policies = get_directory_path(extra_policies_obj)
all_paths.insert(0, str(extra_policies))
else:
raise InvalidPoliciesDirectory
LOG.info("Searching for policies in %s", str(all_paths))
plugin_base = PluginBase(package='lavatory.policy_plugins')
plugin_source = plugin_base.make_plugin_source(searchpath=all_paths)
LOG.debug("Policies found: %s", str(plugin_source.list_plugins()))
return plugin_source | e5a7d57a35712f1656e757c5873adc51f670a273 | 3,632,765 |
from typing import Union
from typing import List
from typing import Dict
def compute_balanced_class_weights(train_y: Union[np.array, pd.Series, List[any]]
) -> Dict[int, float]:
"""Computes balanced class weight dictionary from train targets.
The key is a index of sorted unique values occuring in train_y.
"""
unique = sorted(np.unique(train_y))
weights = compute_class_weight("balanced", classes=unique, y=train_y)
return {i: weights[i] for i in range(len(unique))} | f74a56cbc051c9eefbe3149d66969c9697a60977 | 3,632,766 |
import cv2
from numpy import zeros, nan
def get_frames_index_range(filename, start, end):
"""
returns a range of frames as numpy array. The indecies comply with scimage
We can then supplement the above table as follows:
Addendum to dimension names and orders in scikit-image
Image type: 2D color video
coordinates: (time, row, col, channel)
"""
vidcap = cv2.VideoCapture(filename)
vidcap.set(cv2.CAP_PROP_POS_FRAMES,start)
i = 0
success, image = vidcap.read()
arr = zeros((end-start,image.shape[0],image.shape[1],image.shape[2]))*nan
arr[i] = image
i+=1
while start+i < end:
success, image = vidcap.read()
if success:
arr[i] = image
i+=1
return arr | 61e478272bff6c154f47f14744a5ca733ae373b1 | 3,632,767 |
import os
def create_job_env(profiler=False):
"""Create a dictionary that contains the jobs' environment.
The job environment is loaded at execution time and is available in
the process that runs the jobs command. This stores the values from the
current environment (usually the machine from which you submit your
job) and stores that information in a dictionary. The following
environment variables are stored:
``PATH``
The currently configured ``PATH`` is stored
``PYTHONPATH``
We store the python path in order to make sure that the JIP
command line utilities works as expected and the same JIP version
is loaded at job runtime.
``JIP_PATH``, ``JIP_MODULES``, ``JIP_LOGLEVEL``, ``JIP_DB_LOGLEVEL``
Any local modification of the paths to search for tools or the
module search paths are stored. In addition, the current log
level is passed on to the job, which effectively allows you
to debug jip behaviour on the job level
``LD_LIBRARY_PATH``
The library path is also stored in the environment
:param profiler: if True, ``JIP_PROFILER`` is enabled
:returns: dictionary that contains the job environment
:rtype: dict
"""
data = {
"PATH": os.getenv("PATH", ""),
"PYTHONPATH": os.getenv("PYTHONPATH", ""),
"JIP_PATH": os.getenv("JIP_PATH", ""),
"JIP_MODULES": os.getenv("JIP_MODULES", ""),
"LD_LIBRARY_PATH": os.getenv("LD_LIBRARY_PATH", ""),
"JIP_DB_LOGLEVEL": os.getenv("JIP_DB_LOGLEVEL", ""),
"JIP_LOGLEVEL": str(log.getEffectiveLevel())
}
if profiler:
data["JIP_PROFILER"] = True
return data | a0a6faecc2f2ddf5495649f34372a2dab6ee2677 | 3,632,768 |
import requests
def load_SMI_data_from_Helmholtz():
"""
Downloading SMI topsoil data from helmholtz website
"""
url = "https://www.ufz.de/export/data/2/237851_SMI_L02_Oberboden_monatlich_1951_2018_inv.zip" # noqa: E501
response = requests.get(url)
return response | 854f6b2f398764b7afc89cc3b9ca3b95b2d9a345 | 3,632,769 |
import io
import json
from typing import OrderedDict
def get_yaml_test_method(test_file, expected_load_file, expected_dump_file, regen=False):
"""
Build and return a test function closing on tests arguments and the function
name.
"""
def closure_test_function(self):
with io.open(test_file, encoding='utf-8') as inp:
test_load = saneyaml.load(inp.read())
test_dump = saneyaml.dump(test_load)
if regen:
with io.open(expected_load_file, 'w', encoding='utf-8') as out:
json.dump(test_load, out, indent=2)
with io.open(expected_dump_file, 'w', encoding='utf-8') as out:
out.write(test_dump)
with io.open(expected_load_file, encoding='utf-8') as inp:
expected_load = json.load(inp, object_pairs_hook=OrderedDict)
with io.open(expected_dump_file, encoding='utf-8') as inp:
expected_dump = inp.read()
assert expected_load == test_load
assert expected_dump == test_dump
tfn = test_file.replace(test_data_dir, '').strip('/\\')
test_name = 'test_{}'.format(tfn)
test_name = python_safe(test_name)
closure_test_function.__name__ = test_name
closure_test_function.funcname = test_name
return closure_test_function, test_name | 0c58a5a9f62dd9a343ecdfe1bbdd0fbc2d847698 | 3,632,770 |
import glob
import os
import logging
def read_date_dirs(dpath='.',dir_filter='*',
expected_date_format='%Y%m%d',
reader=pd.read_csv,
ext='csv',
verbose=False,
**kwargs):
"""Wrapper around pandas read_csv() or data reader function.
Additional readers:
- measurements/metmast
- measurements/radar
- measurements/lidar
- measurements/sodar
Return concatenated dataframe made up of dataframes read from
text files contained in _subdirectories with the expected date
format_.
Extra keyword arguments are passed to the data reader.
"""
dataframes = []
dpathlist = glob.glob(os.path.join(dpath,dir_filter))
for fullpath in sorted(dpathlist):
Nfiles = 0
dname = os.path.split(fullpath)[-1]
if os.path.isdir(fullpath):
try:
collection_date = pd.to_datetime(dname,
format=expected_date_format)
except ValueError:
if verbose:
print('Skipping '+dname)
else:
print('Processing '+fullpath)
for fname in sorted(os.listdir(fullpath)):
fpath = os.path.join(fullpath,fname)
if not fname.endswith(ext): continue
if verbose:
print(' reading '+fname)
try:
df = reader(fpath,verbose=verbose,**kwargs)
except reader_exceptions as err:
logging.exception('Error while reading {:s}'.format(fpath))
else:
dataframes.append(df)
Nfiles += 1
print(' {} dataframes added'.format(Nfiles))
if len(dataframes) == 0:
print('No dataframes were read!')
df = None
else:
df = _concat(dataframes)
return df | 9cb628c54da4eade2d81a018bdd913f41fa7a313 | 3,632,771 |
import os
def find_wfss_config_filename(pathname, instrument, filtername, mode):
"""Construct the name of the WFSS configuration file given instrument parameters
Parameters
----------
pathname : str
Path where the configuration files are located
instrument : str
Instrument name (e.g. 'nircam')
filtername : str
Name of the crossing filter (e.g. 'f444w')
mode : str
String containing dispersion direction ('R' or 'C') as well as
module name (for NIRCam). e.g. for NIRCam - modA_R and
for NIRISS GR150R
Returns
-------
config : str
Full path and filename of the appropriate configuation file
"""
config = os.path.join(pathname,"{}_{}_{}.conf".format(instrument.upper(),
filtername.upper(),
mode))
return config | eb0b7cca8e0715d28dc16bd90b90d357ea612cba | 3,632,772 |
def create_mosaic_normal(out_img, maximum):
"""Create grayscale image.
Parameters
----------
out_img: numpy array
maximum: int
Returns
-------
new_img: numpy array
"""
new_img = np.array(
[np.hstack((
np.hstack((
np.flip(out_img[i, :, :], 1).T,
np.flip(out_img[:, maximum - i - 1, :], 1).T)),
np.flip(out_img[:, :, maximum - i - 1], 1).T))
for i in range(maximum)])
return new_img | 8a4fa8e64f7c95f0e4c7867626f4c18e23d59250 | 3,632,773 |
def notices_for_cfr_part(title, part):
"""Retrieves all final notices for a title-part pair, orders them, and
returns them as a dict[effective_date_str] -> list(notices)"""
notices = fetch_notices(title, part, only_final=True)
modify_effective_dates(notices)
return group_by_eff_date(notices) | 8454ba063f2f31c57cc91fc5fc5eee771856a54c | 3,632,774 |
import os
def build_markdown_cpp_cell(cell):
"""
Save the C++ source code and try to build it
"""
# Comment out ```'s
txt = cell['source'].replace('```', '// ```')
cpp_file_name = get_filename_in_second_line(txt)
if not cpp_file_name:
# obtain temporary file name
cpp_file_name = get_temp_cpp_filename()
name, _ = os.path.splitext(cpp_file_name)
# open the temporary file and write to it
with open(cpp_file_name, 'wt') as cpp_file:
cpp_file.write(txt)
# Build the code
# Complex literal example needs C++ 14
# https://www.linuxquestions.org/questions/programming-9/trouble-with-double-complex-numbers-in-c-4175567740/
# https://stackoverflow.com/questions/31965413/compile-c14-code-with-g
if has_main_function(txt):
# if txt includes the main() function, build execution file
# if the C++ source code seems to have build command, use it
if "\n// build command" in txt.lower():
compile_command = get_build_command_in_last_line(txt)
else:
compile_command = ""
if not compile_command:
compile_command = f"g++ -Wall -g -std=c++14 {cpp_file_name} -o {name}"
compile_result = os.system(compile_command)
run_result = os.system(os.path.join(os.curdir, name))
result = (compile_result or run_result)
else:
# if txt does not include main() function, just check grammar
compile_command = f"g++ -Wall -g -std=c++14 {cpp_file_name} -fsyntax-only"
result = os.system(compile_command)
# Delete the execution file
if os.path.exists(name):
os.remove(name)
result_dict = {
'result': result,
'cpp_filename': cpp_file_name,
}
return result_dict | 6edd4ca0be3555c45eece30a47100ec9b71704c1 | 3,632,775 |
def compute_kdtree_and_dr_tractogram(tractogram, num_prototypes=None):
"""Compute the dissimilarity representation of the target tractogram and
build the kd-tree.
"""
tractogram = np.array(tractogram, dtype=np.object)
print("Computing dissimilarity matrices...")
if num_prototypes is None:
num_prototypes = 40
print("Using %s prototypes as in Olivetti et al. 2012."
% num_prototypes)
print("Using %s prototypes" % num_prototypes)
dm_tractogram, prototype_idx = compute_dissimilarity(tractogram,
num_prototypes=num_prototypes,
distance=bundles_distances_mam,
prototype_policy='sff',
n_jobs=-1,
verbose=False)
prototypes = tractogram[prototype_idx]
print("Building the KD-tree of tractogram.")
kdt = KDTree(dm_tractogram)
return kdt, prototypes | ff5ee28c628be38e51ac90e0519e2373b52aa739 | 3,632,776 |
def substitute(dictionary, variables, model_context, validation_result=None):
"""
Substitute fields in the specified dictionary with variable values.
:param dictionary: the dictionary in which to substitute variables
:param variables: a dictionary of variables for substitution
:param model_context: used to resolve variables in file paths
"""
_process_node(dictionary, variables, model_context, validation_result)
return validation_result | 95cafec2bfb3781272f4569becec60cab835d8ca | 3,632,777 |
import os
def get_status():
"""
Get status of working copy, return tuple of
(changed_paths, new_paths, deleted_paths).
"""
paths = set()
for root, dirs, files in os.walk('.'):
dirs[:] = [d for d in dirs if d != '.git']
for file in files:
path = os.path.join(root, file)
path = path.replace('\\', '/')
if path.startswith('./'):
path = path[2:]
paths.add(path)
entries_by_path = {e.path: e for e in read_index()}
entry_paths = set(entries_by_path)
changed = {p for p in (paths & entry_paths)
if hash_object(read_file(p), 'blob', write=False) !=
entries_by_path[p].sha1.hex()}
new = paths - entry_paths
deleted = entry_paths - paths
return (sorted(changed), sorted(new), sorted(deleted)) | 1cd531518b2bc129547dc9e445d0aff32d950466 | 3,632,778 |
def batch_query_entrez_from_locus_tag(locus_tag_list):
""" convert a list of locus tags to list of entrez ids
Keyword arguments:
locus_tag_list: a list of locus tags
"""
mapping_dict = {}
id_list = list(set(locus_tag_list))
# initiate the mydisease.info python client
client = get_client('gene')
params = ','.join(locus_tag_list)
res = client.querymany(params, scopes="locus_tag", fields="_id")
for _doc in res:
if '_id' not in _doc:
print('can not convert', _doc)
mapping_dict[_doc['query']] = _doc['_id'] if '_id' in _doc else _doc["query"]
return mapping_dict | 626719688e3164f1b692dc68a3761928f89fcefb | 3,632,779 |
def gather_2d(params, indices):
"""Gathers from `params` with a 2D batched `indices` array.
Args:
params: [D0, D1, D2 ... Dn] Tensor
indices: [D0, D1'] integer Tensor
Returns:
result: [D0, D1', D2 ... Dn] Tensor, where
result[i, j, ...] = params[i, indices[i, j], ...]
Raises:
ValueError: if more than one entries in [D2 ... Dn] are not known.
"""
d0 = tf.shape(params)[0]
d1 = tf.shape(params)[1]
d2_dn = params.shape.as_list()[2:]
none_indices = [i for i, s in enumerate(d2_dn) if s is None]
if none_indices:
if len(none_indices) > 1:
raise ValueError(
'More than one entry in D2 ... Dn not known for Tensor %s.' % params)
d2_dn[none_indices[0]] = -1
flatten_params = tf.reshape(params, [d0 * d1] + d2_dn)
flatten_indices = tf.expand_dims(tf.range(d0) * d1, 1) + tf.cast(
indices, dtype=tf.int32)
return tf.gather(flatten_params, flatten_indices) | 81f09fdca8e2330352e839da2f77b7320651bfaa | 3,632,780 |
def updateUserDetails(request, user_id):
"""
Update user details view
depending on is_admin flag, admin profile is either created or deleted.
"""
user = get_object_or_404(User, pk = user_id)
user_serializer = UserSerializer(user, data = request.data)
if user_serializer.is_valid():
user_serializer.save()
post_save.connect(manipulate_user_profile, sender=User)
return Response({"data":user_serializer.data}, status=status.HTTP_200_OK)
return Response(user_serializer.errors, status=status.HTTP_400_BAD_REQUEST) | ade65f5834f0e278221a3b251fdb90567b28d78a | 3,632,781 |
def png_load(pngfile):
"""
Load a PNG file. Returns a numpy matrix with shape (y, x, 3)
png = png_load(pngfile)
pix = png[4, 5]
r, g, b = pix
:param pngfile: the path of the png file
:return: a numpy array of shape (y, x, 3)
"""
#backends = [_pil_load, _pypng_load]
backends = [_pypng_load, _pil_load]
for backend in backends:
try:
img = backend(pngfile)
return img
except ImportError:
pass
raise ImportError("needs either scipy or pypng to load a png") | 57670e7cb518b2f4f5c540b9bd9b0f5f227d5d6c | 3,632,782 |
def blackbody_ratio(freq_to, freq_from, temp):
""" Function to calculate the flux ratio between two frequencies for a
blackbody at a given temperature.
Parameters
----------
freq_to: float
Frequency to which to scale assuming black body SED.
freq_from: float
Frequency from which to scale assuming black body SED.
temp: float
Temperature of the black body.
Returns
-------
float
Black body ratio between `freq_to` and `freq_from` at temperature
`temp`.
"""
return blackbody_nu(freq_to, temp) / blackbody_nu(freq_from, temp) | 33b34b9906e1ff174df375d49229229acd76a10e | 3,632,783 |
def convert_to_date(col):
"""Convert datetime to date."""
return col.date() | c6ac8febf4751e8f2c2c27fc740de286f2870cbe | 3,632,784 |
import numpy
def orthogonal_projection(all_data, training_data):
"""Projection norm of all_data over space defined by training_data
"""
o, t = subset_PCA(normalize(all_data), normalize(training_data))
norms = numpy.apply_along_axis(numpy.linalg.norm, 1, o)
return norms, o, t | 4102fdd80d61785595fbb5931aaaf5f135d2855b | 3,632,785 |
import json
import traceback
def get_iocs(prefix):
"""
Get the list of available IOCs from DatabaseServer.
Args:
prefix : The PV prefix for this instrument.
Returns:
A list of the names of available IOCs.
"""
#
try:
rawjson = dehex_and_decompress(ChannelAccess.caget(prefix + DatabasePVNames.IOCS))
return json.loads(rawjson).keys()
except Exception:
print_and_log("Could not retrieve IOC list: {}".format(traceback.format_exc()), "MAJOR")
return [] | 475a69dea9acb6f615e652fc14f0535db15ee7a3 | 3,632,786 |
import re
from typing import Counter
def perfect_match(_clipped, _seq, offset = 0, min_nt = 2):
"""
perfect match between clipped reads and breakend sequence
for whole length
min_nt : minimal nt match is better to be larger than 2,
1 nt match is 25% change by random,
2 nt match is 0.0625 change by random,
3 nt match is 0.0156 change by random,
"""
if (offset > 0):
m = [re.search(_x[offset:], _seq)
for _x in _clipped if (len(_x) > offset) and (len(_x) >= min_nt)]
else:
m = [re.search(_x, _seq) for _x in _clipped if (len(_x) >= min_nt)]
mm = [i.start() for i in m if i is not None]
if (mm):
# get the most common split point
return Counter(mm).most_common()[0]
else:
return (0, 0) | 806c0b86d3f97b71c191a2c95fb77ef16bf804e2 | 3,632,787 |
from . import yaml
import gc
import numpy
def newton(
x0,
model_evaluator,
nonlinear_tol=1.0e-10,
newton_maxiter=20,
RecyclingSolver=krypy.recycling.RecyclingMinres,
recycling_solver_kwargs=None,
vector_factory_generator=None,
compute_f_extra_args={},
eta0=1.0e-10,
forcing_term="constant",
debug=False,
yaml_emitter=None,
):
"""Newton's method with different forcing terms.
"""
# Default forcing term.
if forcing_term == "constant":
forcing_term = ForcingConstant(eta0)
if recycling_solver_kwargs is None:
recycling_solver_kwargs = {}
# Some initializations.
# Set the default error code to 'failure'.
error_code = 1
k = 0
x = x0.copy()
Fx = model_evaluator.compute_f(x, **compute_f_extra_args)
Fx_norms = [numpy.sqrt(model_evaluator.inner_product(Fx, Fx))]
eta_previous = None
linear_relresvecs = []
# get recycling solver
recycling_solver = RecyclingSolver()
# no solution in before first iteration if Newton
out = None
if debug:
if yaml_emitter is None:
yaml_emitter = yaml.YamlEmitter()
yaml_emitter.begin_doc()
yaml_emitter.begin_seq()
while Fx_norms[-1] > nonlinear_tol and k < newton_maxiter:
if debug:
yaml_emitter.add_comment("Newton step %d" % (k + 1))
yaml_emitter.begin_map()
yaml_emitter.add_key_value("Fx_norm", Fx_norms[-1][0][0])
# Get tolerance for next linear solve.
if k == 0:
eta = eta0
else:
eta = forcing_term.get(
eta_previous, out.resnorms[-1], Fx_norms[-1], Fx_norms[-2]
)
eta_previous = eta
# Setup linear problem.
jacobian = model_evaluator.get_jacobian(x, **compute_f_extra_args)
M = model_evaluator.get_preconditioner(x, **compute_f_extra_args)
Minv = model_evaluator.get_preconditioner_inverse(x, **compute_f_extra_args)
# get vector factory
if vector_factory_generator is not None:
vector_factory = vector_factory_generator(x)
else:
vector_factory = None
# Create the linear system.
linear_system = krypy.linsys.TimedLinearSystem(
jacobian,
-Fx,
M=Minv,
Minv=M,
ip_B=model_evaluator.inner_product,
normal=True,
self_adjoint=True,
)
out = recycling_solver.solve(
linear_system, vector_factory, tol=eta, **recycling_solver_kwargs
)
if debug:
yaml_emitter.add_key_value("relresvec", out.resnorms)
# yaml_emitter.add_key_value('relresvec[-1]', out['relresvec'][-1])
yaml_emitter.add_key_value("num_iter", len(out.resnorms) - 1)
yaml_emitter.add_key_value("eta", eta)
# save the convergence history
linear_relresvecs.append(out.resnorms)
# perform the Newton update
x += out.xk
# do the household
k += 1
Fx = model_evaluator.compute_f(x, **compute_f_extra_args)
Fx_norms.append(numpy.sqrt(model_evaluator.inner_product(Fx, Fx)[0, 0]))
# run garbage collector in order to prevent MemoryErrors from being
# raised
gc.collect()
if debug:
yaml_emitter.end_map()
if Fx_norms[-1] < nonlinear_tol:
error_code = 0
if debug:
yaml_emitter.begin_map()
yaml_emitter.add_key_value("Fx_norm", Fx_norms[-1])
yaml_emitter.end_map()
yaml_emitter.end_seq()
if Fx_norms[-1] > nonlinear_tol:
yaml_emitter.add_comment(
"Newton solver did not converge "
"(residual = %g > %g = tol)" % (Fx_norms[-1], nonlinear_tol)
)
return {
"x": x,
"info": error_code,
"Newton residuals": Fx_norms,
"linear relresvecs": linear_relresvecs,
"recycling_solver": recycling_solver,
} | e8e413fef00512b97dbe78cb966e0a283b50d85e | 3,632,788 |
import os
def load_images(path_to_directory):
"""Loads all images"""
images = {}
for dirpath, dirnames, filenames in os.walk(path_to_directory):
for name in filenames:
if name.endswith('.png'):
key = name[:-4]
img = pygame.image.load(os.path.join(dirpath, name)).convert_alpha()
images[key] = img
return images | 5cc71c2aeec51e1600163d34504aabf1f0b0d0ec | 3,632,789 |
def pareto_selection_diversify_ancestry(population):
"""Return a list of selected individuals from the population.
DANIEL: added comparison of lineage for each added individual. Before an inidividual is added, the lineage is
compared to the already added population. If the similarities are higher than x%, the individual is not added
All individuals in the population are ranked by their level, i.e. the number of solutions they are dominated by.
Individuals are added to a list based on their ranking, best to worst, until the list size reaches the target
population size (population.pop_size).
Parameters
----------
population : Population
This provides the individuals for selection.
Returns
-------
new_population : list
A list of selected individuals.
"""
new_population = []
difference = []
ratio = []
# SAM: moved this into calc_dominance()
# population.sort(key="id", reverse=False) # <- if tied on all objectives, give preference to newer individual
# (re)compute dominance for each individual
population.calc_dominance()
# sort the population multiple times by objective importance
population.sort_by_objectives()
# divide individuals into "pareto levels":
# pareto level 0: individuals that are not dominated,
# pareto level 1: individuals dominated one other individual, etc.
done = False
pareto_level = 0
rejected = 0
while not done:
this_level = []
size_left = population.pop_size - len(new_population)
for ind in population:
if len(ind.dominated_by) == pareto_level:
this_level += [ind]
# add best individuals to the new population.
# add the best pareto levels first until it is not possible to fit them in the new_population
if len(this_level) > 0:
if size_left >= len(this_level): # if whole pareto level can fit, add it
new_population += [this_level[0]]
for i in range(0, len(this_level)):
if this_level[i] not in new_population:
for j in range(0, len(new_population)):
if len(population.lineage_dict) != 0:
difference.append(difflib.SequenceMatcher(None, population.lineage_dict.values()[i],
population.lineage_dict.values()[j]))
ratiobuffer = difference[j].ratio()
if ratiobuffer==1:
ratio.append(0)
else:
ratio.append(ratiobuffer)
else:
ratio = [0,0]
if (sum(f>0.5 for f in ratio))/float(len(ratio))<0.25 or (sum(f>0.5 for f in ratio)) < 4: #if 0.5 of ancestors are the same for more then 0.1 of the individuals, don't select this individual
new_population += [this_level[i]]
else:
rejected +=0.000000000001
print "Individual rejected: ", this_level[i].id
continue
else: # otherwise, select by sorted ranking within the level
new_population += [this_level[0]]
while len(new_population) < population.pop_size and rejected<(len(this_level)-len(new_population)): #because individuals can be deselected, a pareto level can still be smaller then the size left after evaluation
#random_num = random.random()
#log_level_length = math.log(len(this_level))
for i in range(1, len(this_level)):
if this_level[i] not in new_population:
for j in range(0, len(new_population)):
if len(population.lineage_dict)!=0:
difference.append(difflib.SequenceMatcher(None, population.lineage_dict.values()[i],population.lineage_dict.values()[j]))
ratiobuffer = difference[j].ratio()
if ratiobuffer == 1:
ratio.append(0)
else:
ratio.append(ratiobuffer)
else: ratio=[0,0]
if (((sum(f>0.5 for f in ratio))/float(len(ratio)))<0.25 or (sum(f>0.5 for f in ratio)) < 4) and len(new_population) < population.pop_size:
new_population += [this_level[i]]
else: rejected +=1.0000000000
continue
pareto_level += 1
rejected =0
if len(new_population) == population.pop_size:
done = True
for ind in population:
if ind in new_population:
ind.selected = 1
else:
ind.selected = 0
return new_population | 3de3c557cfe9e5a354cf92c960545f5e92bfa92a | 3,632,790 |
from typing import Optional
from textwrap import dedent
def remove_continuous_aggregation_query(viewdef: ViewDefinition) -> Optional[str]:
""" Remove a continuous aggregation policy """
if not viewdef.aggregation_policy:
return None
__query = "SELECT remove_continuous_aggregate_policy('{view_name}');"
query = __query.format(view_name=viewdef.name)
return dedent(query) | e36fdce76bc5f74992a4e25be6844be66c9b48f1 | 3,632,791 |
def stop_gradient(x):
"""
Disables gradients for the given tensor.
This may switch off the gradients for `x` itself or create a copy of `x` with disabled gradients.
Implementations:
* PyTorch: [`x.detach()`](https://pytorch.org/docs/stable/autograd.html#torch.Tensor.detach)
* TensorFlow: [`tf.stop_gradient`](https://www.tensorflow.org/api_docs/python/tf/stop_gradient)
* Jax: [`jax.lax.stop_gradient`](https://jax.readthedocs.io/en/latest/_autosummary/jax.lax.stop_gradient.html)
Args:
x: `Tensor` or `TensorLike` for which gradients should be disabled.
Returns:
Copy of `x`.
"""
if isinstance(x, Tensor):
return x._op1(lambda native: choose_backend(native).stop_gradient(native))
elif isinstance(x, TensorLike):
nest, values = disassemble_tree(x)
new_values = [stop_gradient(v) for v in values]
return assemble_tree(nest, new_values)
else:
return wrap(choose_backend(x).stop_gradient(x)) | 9103b07c78bd9e4922ab8c29c13efa77c8415095 | 3,632,792 |
def get_boundary_elevations(dataset):
"""
:param dataset: netcdf AEM line dataset
:return:
an array of layer top elevations of the same shape as layer_top_depth
"""
return np.repeat(dataset.variables['elevation'][:][:, np.newaxis],
dataset.variables['layer_top_depth'].shape[1],axis=1) - \
dataset.variables['layer_top_depth'][:] | a502a1ee923f7e5eb3f1949feb0ab6f6b8d09884 | 3,632,793 |
import argparse
def get_input_args():
"""
Retrieves and parses the command line arguments provided by the user when
they run the program from a terminal window. If the user fails to provide
some or all of the arguments, then the default values are used for the
missing arguments.
This function returns these arguments as an ArgumentParser object.
Returns:
parse_args() -data structure that stores the command line arguments object
"""
# Create Parse
parser = argparse.ArgumentParser(description='Retrieving inputs from user')
# Create command line arguments
parser.add_argument('data_directory', type = str, default = './',
help = 'path to the data directory (default: ./)')
parser.add_argument('--save_dir', type = str, default = './',
help = 'path to the folder to save checkpoint file (default: ./)')
parser.add_argument('--arch', type = str, default = 'VGG16',
help = 'CNN Model Architecture: vgg16, alexnet or densenet161 (default: VGG16)')
parser.add_argument('--learning_rate', type = float, default = 0.002,
help = 'Learning rate (default: 0.002)')
parser.add_argument('--epochs', type = int, default = 1,
help = 'Epochs (default: 1)')
parser.add_argument('--dropout', type = float, default = 0.1,
help = 'Dropout (default: 0.1)')
return parser.parse_args() | 830f31adf1770ebcfaeb6645ce606c758f38bd0f | 3,632,794 |
async def train(
background_tasks: BackgroundTasks,
current_user: User = Depends(auth.get_current_user_and_bot),
):
"""
Trains the chatbot
"""
Utility.train_model(background_tasks, current_user.get_bot(), current_user.get_user(), current_user.email, 'train')
return {"message": "Model training started."} | e222373598c19a83903d701d1c5492c4bc655753 | 3,632,795 |
from datetime import datetime
def get_header():
"""
It returns the Header element of the pmml.
Returns
-------
header :
Returns the header of the pmml.
"""
copyryt = "Copyright (c) 2019 Software AG"
description = "Default Description"
timestamp = pml.Timestamp(datetime.now())
application=pml.Application(name="Nyoka",version=metadata.__version__)
header = pml.Header(copyright=copyryt, description=description, Timestamp=timestamp, Application=application)
return header | 9051b906d2fc07054f2945be6c2bdaec1e1a0167 | 3,632,796 |
def Identifiers():
"""Returns a "fake" Identifiers field.
Field expects:
"<scheme1>": "<identifier1>",
...
"<schemeN>": "<identifierN>"
"""
return fields.Dict(
# scheme
keys=SanitizedUnicode(
required=True, validate=_not_blank(_('Scheme cannot be blank.'))
),
# identifier
values=SanitizedUnicode(
required=True,
validate=_not_blank(_('Identifier cannot be blank.'))
)
) | 8357b69bbf706109292db2067fa0b145a9807c81 | 3,632,797 |
import torch
def create_edge_map(x, N=4):
"""
Creates the edge maps for the given image
Args:
x: (ndarray / Tensor, shape (N, *size))
Returns:
x: (same as input) in-place op on input x
"""
# remove the batch dimension
# TODO: could be for every image in the batch -- batch is not taken into account in the code yet
x = x[np.newaxis, ...]
ndim = x.ndim - 2
diff = []
for i in range(ndim):
diff.append(loss.finite_diff(torch.tensor(x), i, mode="central", boundary="Neumann"))
# diff_x, diff_y, diff_z = diff[0], diff[1], diff[2]
diff_x, diff_y = diff[0], diff[1]
# x = (diff_x ** 2 + diff_y ** 2 + diff_z ** 2) ** (1. / 2)
x = (diff_x ** 2 + diff_y ** 2) ** (1. / 2)
if N is not None:
# threshold the image edges to filter out the noise
threshold = torch.max(x) / N
x = torch.where(x < threshold, 0 * x, x)
return x[0].numpy() | 0dfb69e8b0105c911340635142233314688f0036 | 3,632,798 |
def reject_sv(m, s, y):
""" Sample from N(m, s^2) times SV likelihood using rejection.
SV likelihood (in x) corresponds to y ~ N(0, exp(x)).
"""
mp = m + 0.5 * s**2 * (-1. + y**2 * np.exp(-m))
ntries = 0
while True:
ntries += 1
x = stats.norm.rvs(loc=mp, scale=s)
u = stats.uniform.rvs()
if np.log(u) < -0.5 * y**2 * (np.exp(-x) - np.exp(-m) * (1. + m - x)):
break
if ntries > 1000:
print('1000 failed attempt, m,s,y=%f, %f, %f' % (m, s, y))
break
return x | 1516ecc273a7cb1a215604c78a20fbf06e8b704a | 3,632,799 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.