content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def _findCentralBond(mol, distmat):
""" Helper function to identify the atoms of the most central bond.
Arguments:
- mol: the molecule of interest
- distmat: distance matrix of the molecule
Return: atom indices of the two most central atoms (in order)
"""
from numpy import std
# get the most central atom = atom with the least STD of shortest distances
stds = []
for i in range(mol.GetNumAtoms()):
# only consider non-terminal atoms
if len(_getHeavyAtomNeighbors(mol.GetAtomWithIdx(i))) < 2:
continue
tmp = [d for d in distmat[i]]
tmp.pop(i)
stds.append((std(tmp), i))
stds.sort()
aid1 = stds[0][1]
# find the second most central bond that is bonded to aid1
i = 1
while 1:
if mol.GetBondBetweenAtoms(aid1, stds[i][1]) is None:
i += 1
else:
aid2 = stds[i][1]
break
return aid1, aid2 # most central atom comes first | 5,326,000 |
def check_python_version(program: str) -> None:
"""Report issues with the Python used to run mypy, dmypy, or stubgen"""
# Check for known bad Python versions.
if sys.version_info[:2] < (3, 5):
sys.exit("Running {name} with Python 3.4 or lower is not supported; "
"please upgrade to 3.5 or newer".format(name=program))
# this can be deleted once we drop support for 3.5
if sys.version_info[:3] == (3, 5, 0):
sys.exit("Running {name} with Python 3.5.0 is not supported; "
"please upgrade to 3.5.1 or newer".format(name=program)) | 5,326,001 |
def transform(data, transformer):
"""This hook defines how DataRobot will use the trained object from fit() to transform new data.
DataRobot runs this hook when the task is used for scoring inside a blueprint.
As an output, this hook is expected to return the transformed data.
The input parameters are passed by DataRobot based on dataset and blueprint configuration.
Parameters
-------
data: pd.DataFrame
Data that DataRobot passes for transformation.
transformer: Any
Trained object, extracted by DataRobot from the artifact created inside fit().
In this example, it's a function
Returns
-------
pd.DataFrame
Returns a dataframe with transformed data.
"""
return data.apply(transformer) | 5,326,002 |
def fill_diagonal(a, val, wrap=False):
"""Fills the main diagonal of the given array of any dimensionality.
For an array `a` with ``a.ndim > 2``, the diagonal is the list of
locations with indices ``a[i, i, ..., i]`` all identical. This function
modifies the input array in-place, it does not return a value.
Args:
a (cupy.ndarray): The array, at least 2-D.
val (scalar): The value to be written on the diagonal.
Its type must be compatible with that of the array a.
wrap (bool): If specified, the diagonal is "wrapped" after N columns.
This affects only tall matrices.
Examples
--------
>>> a = cupy.zeros((3, 3), int)
>>> cupy.fill_diagonal(a, 5)
>>> a
array([[5, 0, 0],
[0, 5, 0],
[0, 0, 5]])
.. seealso:: :func:`numpy.fill_diagonal`
"""
# The followings are imported from the original numpy
if a.ndim < 2:
raise ValueError('array must be at least 2-d')
end = a.size
if a.ndim == 2:
step = a.shape[1] + 1
if not wrap:
end = a.shape[1] * a.shape[1]
else:
if not numpy.alltrue(numpy.diff(a.shape) == 0):
raise ValueError('All dimensions of input must be of equal length')
step = 1 + numpy.cumprod(a.shape[:-1]).sum()
val = cupy.asarray(val, dtype=a.dtype)
size = end // step + 1
_fill_diagonal_kernel(0, step, val, a, size=size) | 5,326,003 |
async def sign_params(params, certificate_file, private_key_file):
"""
Signs params adding client_secret key, containing signature based on `scope`, `timestamp`, `client_id` and `state`
keys values.
:param dict params: requests parameters
:param str certificate_file: path to certificate file
:param str private_key_file: path to private key file
:return:signed request parameters
:rtype: dict
"""
plaintext = ''.join([
params.get(key, '') for key in ['scope', 'timestamp', 'client_id', 'state']
])
cmd = 'openssl smime -sign -md md_gost12_256 -signer {cert} -inkey {key} -outform DER'.format(
cert=certificate_file,
key=private_key_file
)
proc = await asyncio.create_subprocess_shell(
cmd,
stdin=asyncio.subprocess.PIPE,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.STDOUT,
)
stdout, stderr = await proc.communicate(input=plaintext.encode())
if proc.returncode != 0:
raise OpenSSLError
client_secret=base64.urlsafe_b64encode(stdout).decode('utf-8'),
return {**params, 'client_secret': client_secret} | 5,326,004 |
def sum_of_fourth_powers(matrix):
"""
:param matrix: (numpy.ndarray) A numpy array.
:return: The fourth power of the four-norm of the matrix. In other words,
the sum of the fourth power of all of its entries.
"""
squared_entries = matrix * matrix
return np.sum(squared_entries * squared_entries) | 5,326,005 |
def record_agreement_to_terms(user_id):
"""Records that the user has agreed to the license terms."""
user_settings = get_user_settings(user_id, strict=True)
user_settings.last_agreed_to_terms = datetime.datetime.utcnow()
_save_user_settings(user_settings) | 5,326,006 |
def key_in_direction(start: Key, direction: str, keypad: Keypad) -> Key:
"""
Return the value of the key in the given direction.
"""
row = next(r for r in keypad if start in r)
x_pos = row.index(start)
col = [c[x_pos] for c in keypad]
y_pos = col.index(start)
directions: Dict[str, Key] = {
"U": col[max(0, y_pos - 1)],
"D": col[min(y_pos + 1, len(col) - 1)],
"L": row[max(0, x_pos - 1)],
"R": row[min(x_pos + 1, len(row) - 1)],
}
return directions[direction] or start | 5,326,007 |
def plot_trajectory(
df,
plot_heading=False,
position="head",
invert_y_axis=True,
title="Trajectory",
):
"""
:param df: Raw (or cropped) dataframe of body positions over time
:param bool plot_heading: Plot direction of animal at each point
:param str position: Plot position of 'head' or 'body'
:param bool invert_y_axis: Invert the y-axis to match numpy
:param str title: Title of plot (appended with condition name)
:return:
"""
logging.info("Plotting trajectory")
if "global_condition_name" in globals():
title = title + " - " + global_condition_name
fig, ax = plt.subplots()
ax.set_title(title)
if position is "head":
head_left_x = df["Hear_L_x"].to_numpy().astype(float)
head_left_y = df["Hear_L_y"].to_numpy().astype(float)
head_right_x = df["Hear_R_x"].to_numpy().astype(float)
head_right_y = df["Hear_R_y"].to_numpy().astype(float)
x = (head_left_x + head_right_x).astype(float) / 2
y = (head_left_y + head_right_y).astype(float) / 2
elif position is "body":
x = df.Back_x.to_numpy().astype(float)
y = df.Back_y.to_numpy().astype(float)
else:
logging.error(
'Position marker: {} is not known. Please use "head" or '
'"body"'.format(position)
)
if plot_heading:
angles = df.absolute_head_angle.to_numpy()
vec_x = np.ones(x.shape)
vec_y = np.ones(x.shape)
ax.quiver(x, y, vec_x, vec_y, angles=angles, pivot="middle", scale=50)
else:
ax.plot(x, y, color=global_plot_color)
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
if invert_y_axis:
ax.invert_yaxis() | 5,326,008 |
def vectorize_args(nums):
"""
Decorator for vectorization of arguments of a function.
The positions of the arguments are given in the tuple nums.
See numpy.vectorize.
"""
def wrap(func):
@wraps(func)
def wrapped(*args, ** kwargs):
args = list(args)
for i, arg in enumerate(args):
if i in nums and type(arg) == list:
args[i] = np.array(arg)
for i, arg in enumerate(args):
if i in nums and type(arg) == np.ndarray:
shape = np.shape(arg)
ind = np.transpose(np.ones(shape).nonzero())
break
if i == len(args) - 1:
# no need for vectorization as all relevant
# arguments are scalars
return func(*args, ** kwargs)
res = np.array([func(
* [arg[tuple(j)] if type(arg) == np.ndarray and i in nums else arg for i, arg in enumerate(args)], ** kwargs)
for j in ind])
if np.shape(res) <> shape:
# func returns more than 1 result, this means the array has to
# be ordered differently
res = res.transpose()
if len(shape) > 1:
# more than 1D arrays, the shape of the list has to be rearanged
res = res.reshape((res.shape[0],) + shape)
return res
return wrapped
return wrap | 5,326,009 |
def parse_lambda_config(x):
"""
Parse the configuration of lambda coefficient (for scheduling).
x = "3" # lambda will be a constant equal to x
x = "0:1,1000:0" # lambda will start from 1 and linearly decrease
# to 0 during the first 1000 iterations
x = "0:0,1000:0,2000:1" # lambda will be equal to 0 for the first 1000
# iterations, then will linearly increase to 1 until iteration 2000
"""
if isinstance(x, float):
return x, None
split = x.split(',')
if len(split) == 1:
return float(x), None
else:
split = [s.split(':') for s in split]
assert all(len(s) == 2 for s in split)
assert all(k.isdigit() for k, _ in split)
assert all(int(split[i][0]) < int(split[i + 1][0]) for i in range(len(split) - 1))
return float(split[0][1]), [(int(k), float(v)) for k, v in split] | 5,326,010 |
def format_cols(colname, direction='in'):
"""Formats columns beween human-readable and pandorable
Keyword arguments:
real -- the real part (default 0.0)
imag -- the imaginary part (default 0.0)
"""
if imag == 0.0 and real == 0.0:
return complex_zero
...
if direction == 'in':
return (colname
.lower()
.replace(' ', '_')
.replace('(', '')
.replace(')', '')
)
elif direction == 'out':
return (colname.replace('_', ' ')
.title()
)
raise ValueError('Direction must be "in" or "out"') | 5,326,011 |
def convert_decimal_to_binary(number):
"""
Parameters
----------
number: int
Returns
-------
out: str
>>> convert_decimal_to_binary(10)
'1010'
"""
return bin(number)[2:] | 5,326,012 |
def fetch_tgz(
dataname: str,
urlname: str,
subfolder: Optional[str] = None,
data_home: Optional[str] = None,
) -> pathlib.Path:
"""Fetch tgz dataset.
Fetch a tgz file from a given url, unzips and stores it in a given
directory.
Parameters
----------
dataname: string
Dataset name.
urlname: string
Dataset url.
subfolder: string, default=None
The subfolder where to put the data, if any.
data_home: string, default=None
Dataset directory. If None, use the default of scikit-learn.
Returns
-------
data_home: Path
Directory.
"""
return fetch_compressed(
dataname=dataname,
urlname=urlname,
compression_open=tarfile.open,
subfolder=subfolder,
data_home=data_home,
open_format='r:gz',
) | 5,326,013 |
def evaluate_coco_PRC(model, dataset, coco, eval_type="bbox", limit=0, image_ids=None):
"""Runs official COCO evaluation.
dataset: A Dataset object with valiadtion data
eval_type: "bbox" or "segm" for bounding box or segmentation evaluation
limit: if not 0, it's the number of images to use for evaluation
"""
# Pick COCO images from the dataset
image_ids = image_ids or dataset.image_ids
# Limit to a subset
if limit:
image_ids = image_ids[:limit]
# Get corresponding COCO image IDs.
coco_image_ids = [dataset.image_info[id]["id"] for id in image_ids]
t_prediction = 0
t_start = time.time()
results = []
for i, image_id in enumerate(image_ids):
# Load image
image = dataset.load_image(image_id)
# Run detection
t = time.time()
r = model.detect([image], verbose=0)[0]
t_prediction += (time.time() - t)
r["class_ids"] = [x-1 for x in r["class_ids"]]
# Convert results to COCO format
# Cast masks to uint8 because COCO tools errors out on bool
image_results = build_coco_results(dataset, coco_image_ids[i:i + 1],
r["rois"], r["class_ids"],
r["scores"],
r["masks"].astype(np.uint8))
results.extend(image_results)
# Load results. This modifies results with additional attributes.
coco_results = coco.loadRes(results)
# Evaluate
cocoEval = COCOeval(coco, coco_results, eval_type)
cocoEval.params.imgIds = coco_image_ids
cocoEval.params.iouThrs = [0.1]
cocoEval.params.areaRng = [[0, 10000000000.0]]
cocoEval.params.maxDets = [100]
cocoEval.evaluate()
cocoEval.accumulate()
precision = cocoEval.eval['precision'][0, :, 0, 0, 0]
recall = cocoEval.params.recThrs
plt.plot(recall, precision, 'ro')
plt.xlabel('recall')
plt.ylabel('precision')
plt.title('PRC IoU 0,5')
plt.savefig(fname='PRC' + str(limit) + '.jpg')
print("Prediction time: {}. Average {}/image".format(
t_prediction, t_prediction / len(image_ids)))
print("Total time: ", time.time() - t_start)
# Pick COCO images from the dataset
# image_ids = image_ids or dataset.image_ids
# Limit to a subset
if limit:
image_ids = image_ids[:limit]
# Get corresponding COCO image IDs.
coco_image_ids = [dataset.image_info[id]["id"] for id in image_ids]
t_prediction = 0
t_start = time.time()
results = []
for i, image_id in enumerate(image_ids):
# Load image
image = dataset.load_image(image_id)
# Run detection
t = time.time()
r = model.detect([image], verbose=0)[0]
t_prediction += (time.time() - t)
# r["class_ids"] = [x-1 for x in r["class_ids"]]
# Convert results to COCO format
# Cast masks to uint8 because COCO tools errors out on bool
image_results = build_coco_results(dataset, coco_image_ids[i:i + 1],
r["rois"], r["class_ids"],
r["scores"],
r["masks"].astype(np.uint8))
results.extend(image_results)
# Load results. This modifies results with additional attributes.
coco_results = coco.loadRes(results)
# Evaluate
cocoEval = COCOeval(coco, coco_results, eval_type)
cocoEval.params.imgIds = coco_image_ids
cocoEval.evaluate()
cocoEval.accumulate()
print('Original COCO metrics')
sumcoco = cocoEval.summarize_coco()
sumcoco = pd.DataFrame(sumcoco)
print('Original PASCAL VOC metrics')
sumvoc = cocoEval.summarize_voc()
sumvoc = pd.DataFrame(sumvoc)
sumcoco.to_csv('output_coco_%s.csv' % args.model[-6:])
sumvoc.to_csv('output_voc_%s.csv' % args.model[-6:]) | 5,326,014 |
def choisir_action():
"""Choisir action de cryptage ou de décryptage
Entree : -
Sortie: True pour cryptage, False pour décryptage"""
action_est_crypter = True
action = input("Quelle est l'action, crypter ou décrypter ? \n<Entrée> pour crypter, autre touche pour decrypter, ou <Crtl> + Z ou X pour arréter.\n")
if action : action_est_crypter = False
return action_est_crypter | 5,326,015 |
def σ(u, p, μ):
"""Stress tensor of isotropic Newtonian fluid.
σ = 2 μ (symm ∇)(u) - p I
This method returns a UFL expression the whole stress tensor. If you want
to plot, extract and interpolate or project what you need. For example,
to plot the von Mises stress::
from dolfin import tr, Identity, sqrt, inner
from fenics import project, plot
# scalar function space
W = V.sub(0).collapse() # use the space of the first comp. of `V`
# W = FunctionSpace(mesh, 'P', 2) # or create your own space
def dev(T):
'''Deviatoric (traceless) part of rank-2 tensor `T`.
This assumes, for 2D, that `T` is actually 3D, but
the third row and column of `T` are zero.
'''
return T - (1 / 3) * tr(T) * Identity(T.geometric_dimension())
# `solver._μ` is the UFL `Constant` object
σ = σ(solver.u_, solver.p_, solver._μ)
s = dev(σ)
vonMises = sqrt(3 / 2 * inner(s, s))
plot(project(vonMises, W))
"""
return 2 * μ * ε(u) - p * Identity(p.geometric_dimension()) | 5,326,016 |
async def cycle(command: Command, switches: PowerSwitch, name: str, portnum: int):
"""cycle power to an Outlet"""
command.info(text=f"Cycle port {name}...")
for switch in switches:
current_status = await switch.statusAsJson(name, portnum)
if current_status:
break
# print(current_status)
# status |= await switch.statusAsJson(name, portnum) works only with python 3.9
# current_status = await switch.statusAsJson(name, portnum)
try:
# off
if current_status[name]["STATE"] == 1:
current_status = await switch_control(
"cycle", switches, False, name, portnum
)
elif current_status[name]["STATE"] == 0:
return command.fail(text=f"The Outlet {name} is OFF")
else:
return command.fail(text=f"The Outlet {name} returns wrong value")
except PowerException as ex:
return command.fail(error=str(ex))
return command.finish(text="done") | 5,326,017 |
def createDummyRoot():
""" Creates dummy root in labelDict for a hierarchy that is not rooted, i.e. one with multiple top level nodes. """
global labelDict
root = {'parents':[], 'children':[]}
for label in labelDict:
if len(labelDict[label]['parents']) == 0:
root['children'].append(label)
labelDict[label]['parents'].append('ROOT')
labelDict['ROOT'] = root | 5,326,018 |
def zk_delete_working_node(zk_client, server):
"""删除服务节点"""
node_path, root_path = get_path_to_current_working_node(server)
zk_client.ensure_path(root_path)
result = zk_client.delete(node_path, ephemeral=True)
return result | 5,326,019 |
def test_dimension_estimation():
"""
Test the dimension selection procedures
"""
from numpy.random import randn
k = 2
x = 10*np.dot(np.dot(randn(100,k),np.eye(k)),randn(k,10))
x += randn(100,10)
ek = infer_latent_dim(x)
print k, ek
assert(k==ek) | 5,326,020 |
async def give_wc_roles(member: discord.Member, score: int):
"""
Updates the WC roles of a member based on their latest total score
"""
got_role: bool = False
for min_score, role_id in common.ServerConstants.WC_ROLES:
if score >= min_score and not got_role:
# This is the role to give
got_role = True
if role_id not in map(lambda x: x.id, member.roles):
await member.add_roles(
discord.Object(role_id),
reason="Automatic bot action, adds WC event roles",
)
else:
# any other event related role to be removed
if role_id in map(lambda x: x.id, member.roles):
await member.remove_roles(
discord.Object(role_id),
reason="Automatic bot action, removes older WC event roles",
) | 5,326,021 |
def hesitations_difference(hesitations, signal_len, hesitations_mean, path):
"""Plot hesitations count comparison"""
hesitations = hesitations.shape[0] / signal_len * 60
fig, ax = gauge(['Nízký', 'Normální', 'Vysoký'], ['C0', 'C2', 'C1'], value=hesitations,
min_val=hesitations_mean[0] - 5 * hesitations_mean[1],
max_val=hesitations_mean[0] + 5 * hesitations_mean[1],
tickers_format=lambda l: f'{l:.1f}', val_format=lambda l: f'{l:.2f} váhání za minutu')
generate_graph(path, fig) | 5,326,022 |
def construct_filters_from_2d(matrix, filter_starts, decomp_level):
"""
construct the filters in the proper shape for the DWT inverse forward step
Parameters
----------
matrix
filter_starts
decomp_level
Returns
-------
"""
exp = filter_starts[0]
low = matrix[: exp ** 2].reshape((exp, exp, matrix.shape[-1]))
low = low.permute(2, 0, 1).unsqueeze(0)
highs = []
last_end = exp ** 2
for lvl in range(decomp_level):
exp = filter_starts[lvl]
lp_list = [None, None, None]
for i in range(1, 4):
next_end = last_end + exp ** 2
lp_list[i - 1] = (
matrix[last_end:next_end]
.reshape((exp, exp, matrix.shape[-1]))
.permute(2, 0, 1)
.unsqueeze(0)
.unsqueeze(2)
)
last_end = next_end
highs.append(torch.cat(lp_list, dim=2))
highs.reverse()
return low, highs | 5,326,023 |
def minpoly(firstterms):
"""
Return the minimal polynomial having at most degree n of of the
linearly recurrent sequence whose first 2n terms are given.
"""
field = ring.getRing(firstterms[0])
r_0 = uniutil.polynomial({len(firstterms):field.one}, field)
r_1 = uniutil.polynomial(enumerate(reversed(firstterms)), field)
poly_ring = r_0.getRing()
v_0 = poly_ring.zero
v_1 = poly_ring.one
n = len(firstterms) // 2
while n <= r_1.degree():
q, r = divmod(r_0, r_1)
v_0, v_1 = v_1, v_0 - q*v_1
r_0, r_1 = r_1, r
return v_1.scalar_mul(v_1.leading_coefficient().inverse()) | 5,326,024 |
def scale_gradient(tensor, scale):
"""Scales the gradient for the backward pass."""
return tf.add(tensor * scale ,tf.stop_gradient(tensor) * (1 - scale)) | 5,326,025 |
def get_cluster_assignments(args, model, dataset, groups):
"""
"""
# pseudo-labels are confusing
dataset.sub_classes = None
# swith to eval mode
model.eval()
# this process deals only with a subset of the dataset
local_nmb_data = len(dataset) // args.world_size
indices = torch.arange(args.rank * local_nmb_data, (args.rank + 1) * local_nmb_data).int()
if os.path.isfile(os.path.join(args.dump_path, 'super_class_assignments.pkl')):
# super-class assignments have already been computed in a previous run
super_class_assignements = pickle.load(open(os.path.join(args.dump_path, 'super_class_assignments.pkl'), 'rb'))
logger.info('loaded super-class assignments')
# dump cache
where_helper = get_indices_sparse(super_class_assignements[indices])
nmb_data_per_super_cluster = torch.zeros(args.nmb_super_clusters).cuda()
for super_class in range(len(where_helper)):
nmb_data_per_super_cluster[super_class] = len(where_helper[super_class][0])
else:
sampler = Subset_Sampler(indices)
# we need a data loader
loader = torch.utils.data.DataLoader(
dataset,
batch_size=args.batch_size,
sampler=sampler,
num_workers=args.workers,
pin_memory=True,
)
# initialize cache, pca and centroids
cache, centroids = initialize_cache(args, loader, model)
# empty cuda cache (useful because we're about to use faiss on gpu)
torch.cuda.empty_cache()
## perform clustering into super_clusters
super_class_assignements, centroids_sc = distributed_kmeans(
args,
args.size_dataset,
args.nmb_super_clusters,
cache,
args.rank,
args.world_size,
centroids,
)
# dump activations in the cache
where_helper = get_indices_sparse(super_class_assignements[indices])
nmb_data_per_super_cluster = torch.zeros(args.nmb_super_clusters).cuda()
for super_class in range(len(where_helper)):
ind_sc = where_helper[super_class][0]
np.save(open(os.path.join(
args.dump_path,
'cache/',
'super_class' + str(super_class) + '-' + str(args.rank),
), 'wb'), cache[ind_sc])
nmb_data_per_super_cluster[super_class] = len(ind_sc)
dist.barrier()
# dump super_class assignment and centroids of super_class
if not args.rank:
pickle.dump(
super_class_assignements,
open(os.path.join(args.dump_path, 'super_class_assignments.pkl'), 'wb'),
)
pickle.dump(
centroids_sc,
open(os.path.join(args.dump_path, 'super_class_centroids.pkl'), 'wb'),
)
# size of the different super clusters
all_counts = [torch.zeros(args.nmb_super_clusters).cuda() for _ in range(args.world_size)]
dist.all_gather(all_counts, nmb_data_per_super_cluster)
all_counts = torch.cat(all_counts).cpu().long()
all_counts = all_counts.reshape(args.world_size, args.nmb_super_clusters)
logger.info(all_counts.sum(dim=0))
# what are the data belonging to this super class
dataset.subset_indexes = np.where(super_class_assignements == args.clustering_local_world_id)[0]
div = args.batch_size * args.clustering_local_world_size
dataset.subset_indexes = dataset.subset_indexes[:len(dataset) // div * div]
dist.barrier()
# which files this process is going to read
local_nmb_data = int(len(dataset) / args.clustering_local_world_size)
low = np.long(args.clustering_local_rank * local_nmb_data)
high = np.long(low + local_nmb_data)
curr_ind = 0
cache = torch.zeros(local_nmb_data, args.dim_pca, dtype=torch.float32)
cumsum = torch.cumsum(all_counts[:, args.clustering_local_world_id].long(), 0).long()
for r in range(args.world_size):
# data in this bucket r: [cumsum[r - 1] : cumsum[r] - 1]
low_bucket = np.long(cumsum[r - 1]) if r else 0
# this bucket is empty
if low_bucket > cumsum[r] - 1:
continue
if cumsum[r] - 1 < low:
continue
if low_bucket >= high:
break
# which are the data we are interested in inside this bucket ?
ind_low = np.long(max(low, low_bucket))
ind_high = np.long(min(high, cumsum[r]))
cache_r = np.load(open(os.path.join(args.dump_path, 'cache/', 'super_class' + str(args.clustering_local_world_id) + '-' + str(r)), 'rb'))
cache[curr_ind: curr_ind + ind_high - ind_low] = torch.FloatTensor(cache_r[ind_low - low_bucket: ind_high - low_bucket])
curr_ind += (ind_high - ind_low)
# randomly pick some centroids and dump them
centroids_path = os.path.join(args.dump_path, 'centroids' + str(args.clustering_local_world_id) + '.pkl')
if not args.clustering_local_rank:
centroids = cache[np.random.choice(
np.arange(cache.shape[0]),
replace=cache.shape[0] < args.k // args.nmb_super_clusters,
size=args.k // args.nmb_super_clusters,
)]
pickle.dump(centroids, open(centroids_path, 'wb'), -1)
dist.barrier()
# read centroids
centroids = pickle.load(open(centroids_path, 'rb')).cuda()
# distributed kmeans into sub-classes
cluster_assignments, centroids = distributed_kmeans(
args,
len(dataset),
args.k // args.nmb_super_clusters,
cache,
args.clustering_local_rank,
args.clustering_local_world_size,
centroids,
world_id=args.clustering_local_world_id,
group=groups[args.clustering_local_world_id],
)
# free RAM
del cache
# write cluster assignments and centroids
if not args.clustering_local_rank:
pickle.dump(
cluster_assignments,
open(os.path.join(args.dump_path, 'cluster_assignments' + str(args.clustering_local_world_id) + '.pkl'), 'wb'),
)
pickle.dump(
centroids,
open(centroids_path, 'wb'),
)
dist.barrier()
return cluster_assignments | 5,326,026 |
def foo(a, d=1):
"""
:param a:
:param d:
"""
pass | 5,326,027 |
def t_rename_local_variables(the_ast, all_sites=False):
"""
Local variables get replaced by holes.
"""
changed = False
candidates = []
for node in ast.walk(the_ast):
if isinstance(node, ast.Name) and isinstance(node.ctx, ast.Store):
if node.id not in [ c.id for c in candidates ]:
# print(node.id, node.lineno)
candidates.append(node)
if len(candidates) == 0:
return False, the_ast
if not all_sites:
selected = [random.choice(candidates)]
else:
selected = candidates
local_var_defs = {}
for cnt, s in enumerate(selected, start=1):
local_var_defs[s.id] = cnt
to_rename = []
for node in ast.walk(the_ast):
if isinstance(node, ast.Name) and node.id in local_var_defs:
to_rename.append((node, local_var_defs[node.id]))
for node, idx in to_rename:
changed = True
node.id = 'VAR' + str(idx)
return changed, the_ast | 5,326,028 |
def get_scale(notes: List[str]) -> int:
"""Convert a list of notes to a scale constant.
# Args
- *notes*: list of notes in the scale. This should be a list of string
where each string is a note ABC notation. Sharps should be
represented with a pound sign preceding the note e.g. '#A' and flats
should be represented with a lower case b preceding the note e.g. 'bB'.
# Returns
An integer mask used to represent a musical key or scale as an argument to
any of the MusicalHash methods.
# Raises
A ValueError if an invalid string is included in the input list.
"""
note_map = {'A': 0x1,
'#A': 0x2, 'bB': 0x2,
'B': 0x4,
'C': 0x8,
'#C': 0x10, 'bD': 0x10,
'D': 0x20,
'#D': 0x40, 'bE': 0x40,
'E': 0x80,
'F': 0x100,
'#F': 0x200, 'bG': 0x200,
'G': 0x400,
'#G': 0x800, 'bA': 0x800}
scale = 0x0
for note in notes:
try:
scale |= note_map[note]
except KeyError:
raise ValueError(
'The string {} is not a valid musical note'.format(note))
return scale | 5,326,029 |
def cam_keyboard(message):
"""Start cam selection keyboard"""
keyboard = types.ReplyKeyboardMarkup(one_time_keyboard=True, row_width=1)
keys = [types.KeyboardButton(k.upper()) for k in CAMS]
keyboard.add(*keys)
msg = bot.reply_to(message, 'Escolha uma câmera:', reply_markup=keyboard) | 5,326,030 |
def slurm_format_bytes_ceil(n):
""" Format bytes as text.
SLURM expects KiB, MiB or Gib, but names it KB, MB, GB. SLURM does not handle Bytes, only starts at KB.
>>> slurm_format_bytes_ceil(1)
'1K'
>>> slurm_format_bytes_ceil(1234)
'2K'
>>> slurm_format_bytes_ceil(12345678)
'13M'
>>> slurm_format_bytes_ceil(1234567890)
'2G'
>>> slurm_format_bytes_ceil(15000000000)
'14G'
"""
if n >= (1024 ** 3):
return "%dG" % math.ceil(n / (1024 ** 3))
if n >= (1024 ** 2):
return "%dM" % math.ceil(n / (1024 ** 2))
if n >= 1024:
return "%dK" % math.ceil(n / 1024)
return "1K" % n | 5,326,031 |
def factory_payment_account(corp_number: str = 'CP0001234', corp_type_code: str = 'CP',
payment_system_code: str = 'PAYBC'):
"""Factory."""
return PaymentAccount(
corp_number=corp_number,
corp_type_code=corp_type_code,
payment_system_code=payment_system_code,
party_number='11111',
account_number='4101',
site_number='29921',
) | 5,326,032 |
def p_l_species_input_geos(wd, ver='1.7', rm_multiple_tagged_rxs=False, debug=False):
"""
Extract prod/loss species (input.geos) and reaction tags (globchem.dat)
Parameters
----------
wd (str): Specify the wd to get the results from a run.
debug (boolean): legacy debug option, replaced by python logging
ver (str): The GEOS-Chem halogen version that is being used
rm_multiple_tagged_rxs(boolean): only return one tag per rxn.
Returns
-------
(list) globchem.dat tags and prod/loss ("PD") vars from input.geos
Notes
-----
- This function is useful, but update to GEOS-Chem flexchem ( in >v11)
will make it redundent and therefore this is not being maintained.
"""
# find and open input.geos file
fn = glob.glob(wd+'/*input.geos*')[0]
if any([(i in fn) for i in ('~', '#')]):
print(('Trying next "input.geos" file - as FAIL for :', fn))
fn = glob.glob(wd+'/*input.geos*')[1]
if debug:
print(('p_l_species_input_geos called using : ', wd, fn))
file_ = open(fn, 'rb')
# Read in just the prod loss section
strs_in_1st_line = 'Number', 'of', 'P/L', 'families'
section_line_divider = '------------------------+----------' + \
'--------------------------------------------'
readrxn = False
for row in file_:
row = row.split()
# once at prod/loss section, start added to list
if all([i in row for i in strs_in_1st_line]):
readrxn = True
# if not at end of prod/loss section, add to list
if section_line_divider in row:
readrxn = False
if readrxn:
try:
rxns.append(row)
except:
rxns = [row]
# -- Only consider 'Family' ( no headers e.g. 'families' )
rxns = [i for i in rxns if ('families' not in i)]
rxns = [[i.replace(':', '') for i in r] for r in rxns]
# Kludge, adjust for extra space 12-99
# ( This is no longer required for 1.7 + )
if ver == '1.6':
[i.pop(0) for i in rxns if ('th' not in i[0])]
# Extract just PD (input.geos) and vars (globchem.dat vars )
PD = [rxn[4] for rxn in rxns]
vars = [rxn[5:] for rxn in rxns]
if debug:
print((rxns, PD, vars, ver))
# remove p/l with muliple values ( start from 12th input) - Kludge?
if rm_multiple_tagged_rxs:
PD, vars = [i[11:] for i in (PD, vars)]
vars = [i[0] for i in vars]
return PD, vars | 5,326,033 |
def foo():
"""多参数函数的传参书写格式, 和类实例化的格式"""
ret = foo_long(a=1, b=2, c=3, d=4,
e=5, f=6, g=7, h=8)
# 类实例化,传多个参数的格式
object_ = ClassName(
a=1, b=2, c=3, d=4,
e=5, f=6, g=7, h=8
)
return ret | 5,326,034 |
def posture_seq(directory,postures,sampling_fraction):
"""posture_seq grabs samples locomotion files from a directory and
converts them to strings of posture_sequences
Input:
directory = the directory containing locomotion files
postures = the mat file or numpy array of template postures
sampling_fraction = the fraction of files you want to sample
Output:
all_postures = a list of posture_sequences(of type string)
"""
num_postures = len(postures)
angle_data = loading_data(directory,sampling_fraction)[0]
i = 0
while i < len(angle_data):
if len(angle_data[i][1]) > 1000:
#get angles for the skeletons
angles, m_a = angle_data[i]
#X, Y = MA2skel(angles, m_a, 1)
#initialize Vars and posture_sequence:
#Vars = np.zeros(len(X))
posture_sequence = ''
for i in range(len(angles)):
distances = [np.inf]*num_postures
for j in range(num_postures):
distances[j] = np.linalg.norm(angles[i]-postures[:,j])
val = min(distances)
#angle_err[i] = val
ind = distances.index(val)
#Vars[i] = np.corrcoef(angles[i],postures[:,ind])[0][1]**2
posture_sequence = posture_sequence + ' ' + str(ind)
all_postures.append(posture_sequence)
i+=1
else:
i+=1
return all_postures | 5,326,035 |
def membrane_diag(voxel_size=1, bound='dct2', dim=None, weights=None):
"""Diagonal of the membrane regulariser.
If no weight map is provided, the diagonal of the membrane regulariser
is a scaled identity with scale `2 * alpha`, where
`alpha = vx.reciprocal().square().sum()`
However, is a weight map is provided, the diagonal of the regulariser
is a convolved version of the weight map. In 2D, the convolution kernel
has a first order "diamond" shape:
b0
b1 a b1
b0
Parameters
----------
weights : (..., *spatial) tensor
Weights from the reweighted least squares scheme
voxel_size : float or sequence[float], default=1
Voxel size
bound : str, default='dct2'
Boundary condition.
dim : int, optional
Number of spatial dimensions.
Default: from voxel_size
Returns
-------
diag : () or (..., *spatial) tensor
Convolved weight map if provided.
Else, central convolution weight.
"""
vx = core.utils.make_vector(voxel_size)
if dim is None:
dim = len(vx)
vx = core.utils.make_vector(vx, dim)
if weights is not None:
weights = torch.as_tensor(weights)
backend = dict(dtype=weights.dtype, device=weights.device)
# move spatial dimensions to the front
spdim = list(range(weights.dim() - dim, weights.dim()))
weights = core.utils.movedim(weights, spdim, list(range(dim)))
else:
backend = dict(dtype=vx.dtype, device=vx.device)
vx = vx.to(**backend)
vx = vx.square().reciprocal()
if weights is None:
return 2 * vx.sum()
from ._finite_differences import _window1d, _lincomb
values = [[weights]]
dims = [None] + [d for d in range(dim) for _ in range(2)]
kernel = [2 * vx.sum()]
for d in range(dim):
values.extend(_window1d(weights, d, [-1, 1], bound=bound))
kernel += [vx[d], vx[d]]
weights = _lincomb(values, kernel, dims, ref=weights)
# send spatial dimensions to the back
weights = core.utils.movedim(weights, list(range(dim)), spdim)
return weights | 5,326,036 |
def openpty(mode=None, winsz=None, name=False):
"""openpty() -> (master_fd, slave_fd)
Open a pty master/slave pair, using os.openpty() if possible."""
master_fd, slave_fd = os.openpty()
if mode:
tty.tcsetattr(slave_fd, tty.TCSAFLUSH, mode)
if tty.HAVE_WINSZ and winsz:
tty.tcsetwinsize(slave_fd, winsz)
if name:
return master_fd, slave_fd, os.ttyname(slave_fd)
else:
return master_fd, slave_fd | 5,326,037 |
def get_groups(parsed, store, conf):
"""
Return groups based on argument provided
:param Namespace parsed: arguments parsed
:param store: Otter scaling group collection
:param dict conf: config
:return: Deferred fired with list of {"tenantId": .., "groupId": ..} dict
"""
log = mock_log()
if parsed.group:
groups = [g.split(":") for g in parsed.group]
return succeed(
[{"tenantId": tid, "groupId": gid} for tid, gid in groups])
elif parsed.all:
d = store.get_all_valid_groups()
elif parsed.tenant_id:
d = get_groups_of_tenants(log, store, parsed.tenant_id)
elif parsed.disabled_tenants:
non_conv_tenants = conf["non-convergence-tenants"]
d = store.get_all_valid_groups()
d.addCallback(
filter(lambda g: g["tenantId"] not in set(non_conv_tenants)))
d.addCallback(list)
elif parsed.conf_conv_tenants:
d = get_groups_of_tenants(log, store, conf["convergence-tenants"])
else:
raise SystemExit("Unexpected group selection")
return d | 5,326,038 |
def get_content_details(site_code, release_uuid, content_type, content_key):
""" get_content_details """
publisher_api = PublisherAPI()
content_release = None
try:
if release_uuid:
# get ContentRelease
content_release = WSSPContentRelease.objects.get(
site_code=site_code,
uuid=release_uuid,
)
else:
# get live ContentRelease
response = publisher_api.get_live_content_release(site_code)
if response['status'] == 'error':
return response
else:
release = response['content']
content_release = WSSPContentRelease.objects.get(id=release.id)
release_uuid = content_release.uuid
except WSSPContentRelease.DoesNotExist:
pass
# Fetch document from the content release.
response = publisher_api.get_document_from_content_release(
site_code,
release_uuid,
content_key,
content_type,
)
base_content_release = None
if response['status'] == 'error' and response['error_code'] == 'release_document_does_not_exist':
# Release doc not found, try in the base release for preview releases.
if content_release.status == 0:
if content_release.use_current_live_as_base_release:
response = publisher_api.get_live_content_release(site_code)
if response['status'] == 'success':
release = response['content']
base_content_release = WSSPContentRelease.objects.get(id=release.id)
else:
base_content_release = content_release.base_release
if base_content_release != None:
# Fetch document from the base content release if available (should only happen for preview releases).
response = publisher_api.get_document_from_content_release(
site_code,
base_content_release.uuid,
content_key,
content_type,
)
if response['status'] == 'success':
data = json.loads(response['content'].document_json)
response_extra = publisher_api.get_document_extra_from_content_release(
site_code,
release_uuid,
content_key,
content_type,
)
if response_extra['status'] == 'success':
try:
dynamic_element_keys = json.loads(response_extra['content'].get(key='dynamic_element_keys').content)
data, updated = document_load_dynamic_elements(content_release, data, dynamic_element_keys)
except:
pass
else:
return response
return data | 5,326,039 |
def find_pgdb(root):
"""Find all the personal geodatabases below root"""
for folder, _, file_names in os.walk(root):
for file_name in file_names:
if is_access_file(file_name):
path = os.path.join(folder, file_name)
print(path)
if is_pgdb(file_name):
walk_workspaces.inspect_workspace(0, path, False) | 5,326,040 |
def _czt(x, M=None, W=None, A=1.0):
"""Calculate CZT (Stripped down to the basics)."""
# Unpack arguments
N = len(x)
if M is None:
M = N
if W is None:
W = np.exp(-2j * np.pi / M)
A = np.complex128(A)
W = np.complex128(W)
# CZT algorithm
k = np.arange(max(M, N))
Wk22 = W ** (-(k ** 2) / 2)
r = Wk22[:N]
c = Wk22[:M]
X = A ** -k[:N] * x / r
X = scipy.linalg.matmul_toeplitz((c, r), X)
X /= c
return X | 5,326,041 |
def measure_link_vsize(output_file, args):
"""
Execute |args|, and measure the maximum virtual memory usage of the process,
printing it to stdout when finished.
"""
proc = subprocess.Popen(args)
t = threading.Thread(target=measure_vsize_threadfunc,
args=(proc, output_file))
t.start()
# Wait for the linker to finish.
exitcode = proc.wait()
# ...and then wait for the background thread to finish.
t.join()
return exitcode | 5,326,042 |
def build_obs_act_forward_fc(
n_out: int,
depth: int,
hidden: int,
act_layer: Any,
last_layer: Optional[Any] = None,
) -> hk.Transformed:
"""Build a simple fully-connected forward step that takes an observation & an action.
Args:
n_out (int): Number of outputs.
depth (int): Depth of layers.
hidden (int): # of hidden units of fc.
act_layer (Any): Activation layer.
last_layer (Any): Last activation layer.
Returns:
hk.Transformed:
Takes [batch x ?] observation and [batch x ?] actions.
Returns [batch x n_out] Array.
"""
@jax.vmap
def forward(obs: Array, act: Array) -> Array:
# concat observation and action
chex.assert_equal_rank((obs, act))
obs_act = jnp.hstack((obs, act))
# set up layers
modules = []
if depth > 0:
modules.append(hk.Linear(hidden))
for _ in range(depth - 1):
modules += [act_layer, hk.Linear(hidden)]
modules += [act_layer, hk.Linear(n_out)]
else:
modules.append(hk.Linear(n_out))
if last_layer is not None:
modules.append(last_layer)
return hk.Sequential(modules)(obs_act.astype(float))
return hk.without_apply_rng(hk.transform(forward)) | 5,326,043 |
def backup_files(srcs, dests):
"""
Write each given file to a pool of destination directories. If the
filename being written to the destination already exists there it is
renamed with a timestamp before any file-writes take place.
Args:
srcs: List of src files
dests: List of destination directories
"""
for src in srcs:
backup_file(src, dests) | 5,326,044 |
def update_nested(key, d, other):
"""Update *d[key]* with the *other* dictionary preserving data.
If *d* doesn't contain the *key*, it is updated with *{key: other}*.
If *d* contains the *key*, *d[key]* is inserted into *other[key]*
(so that it is not overriden).
If *other* contains *key* (and possibly more nested *key*-s),
then *d[key]* is inserted into the deepest level
of *other.key.key...* Finally, *d[key]* becomes *other*.
Example:
>>> context = {"variable": {"name": "x"}}
>>> new_var_context = {"name": "n"}
>>> update_nested("variable", context, copy.deepcopy(new_var_context))
>>> context == {'variable': {'name': 'n', 'variable': {'name': 'x'}}}
True
>>>
>>> update_nested("variable", context, {"name": "top"})
>>> context == {
... 'variable': {'name': 'top',
... 'variable': {'name': 'n', 'variable': {'name': 'x'}}}
... }
True
*other* is modified in general. Create that on the fly
or use *copy.deepcopy* when appropriate.
Recursive dictionaries (containing references to themselves)
are strongly discouraged and meaningless when nesting.
If *other[key]* is recursive, :exc:`.LenaValueError` may be raised.
"""
# there was an idea to add a keyword argument copy_other
# (by default True), but the user can do that him/herself
# with copy.deepcopy when needed. Otherwise it would be
# unnecessary complication of this interface.
# Only one key is nested. This encourages design when
# 1) elements combine their contexts into one key
# (like {"split_into_bins": {"variable": {}, "histogram": {}}})
# 2) elements change only one key ("variable", "histogram",...).
def get_most_nested_subdict_with(key, d):
nested_dicts = []
while True:
if key in d:
if d in nested_dicts:
raise lena.core.LenaValueError(
"recursive *other* is forbidden"
)
nested_dicts.append(d)
d = d[key]
else:
return d
if key in d:
other_most_nested = get_most_nested_subdict_with(key, other)
# insert d[key] at the lowest other.key.key....
other_most_nested[key] = d[key]
d[key] = other | 5,326,045 |
def quicksort(arr, low, high):
""" Quicksort function uses the partition helper function.
"""
if low < high:
pi = partition(arr, low, high)
quicksort(arr, low, pi-1)
quicksort(arr, pi+1, high)
return arr | 5,326,046 |
def make_word_dict():
"""read 'words.txt ' and create word list from it
"""
word_dict = dict()
fin = open('words.txt')
for line in fin:
word = line.strip()
word_dict[word] = ''
return word_dict | 5,326,047 |
def voidobject(key_position: int, offset: int) -> HitObject:
"""
引数から判定のないヒットオブジェクト(シングルノーツのみ)のHitObjectクラスを生成します
引数
----
key_position : int
-> キーポジション、1から入れる場合はkey_assetから参照したものを入れてください
offset : int
-> (配置する)オフセット値
戻り値
------
HitObject
-> 空ノーツのHitObjectクラス
"""
return HitObject(key_position, max_offset, True, end_offset=offset) | 5,326,048 |
def visualize_gts(
run_dir,
dataset,
inference_config,
show_bbox=True,
show_scores=False,
show_class=True,
):
"""Visualizes gts."""
# Create subdirectory for gt visualizations
vis_dir = os.path.join(run_dir, "gt_vis")
utils.mkdir_if_missing(vis_dir)
# Feed images one by one
image_ids = dataset.image_ids
print("VISUALIZING GROUND TRUTHS")
for image_id in tqdm(image_ids):
# Load image and ground truth data and resize for net
image, _, gt_class_id, gt_bbox, gt_mask = modellib.load_image_gt(
dataset, inference_config, image_id, use_mini_mask=False
)
if inference_config.IMAGE_CHANNEL_COUNT == 1:
image = np.repeat(image, 3, axis=2)
# Visualize
scores = np.ones(gt_class_id.size) if show_scores else None
fig = plt.figure(figsize=(1.7067, 1.7067), dpi=300, frameon=False)
ax = plt.Axes(fig, [0.0, 0.0, 1.0, 1.0])
fig.add_axes(ax)
visualize.display_instances(
image,
gt_bbox,
gt_mask,
gt_class_id,
["bg", "obj"],
scores,
ax=ax,
show_bbox=show_bbox,
show_class=show_class,
)
file_name = os.path.join(vis_dir, "gt_vis_{:06d}".format(image_id))
fig.savefig(file_name, transparent=True, dpi=300)
plt.close() | 5,326,049 |
def mock_user_save():
"""Функция-пустышка для эмуляции исключения во время записи пользователя."""
def user_save(*args, **kwargs):
raise IntegrityError
return user_save | 5,326,050 |
def system(command, ignore_status=False):
"""Run a command. """
out = system_output(command, ignore_status)
if out:
logging.debug(out) | 5,326,051 |
def index():
"""首页"""
banners = Banner.query_used()
page = request.args.get("page", 1, type=int) # 指定的页码
per_page = current_app.config["MYZONE_ARTICLE_PER_PAGE"] # 每页的文章数
pagination = Article.query_order_by_createtime(page, per_page=per_page) # 创建分页器对象
articles = pagination.items # 从分页器中获取查询结果
categories = Category.query_all()
tags = Tag.query_all()
return render_template(
"main/index.html",
pagination=pagination,
articles=articles,
categories=categories,
tags=tags,
timestamp_to_strftime=timestamp_to_str,
func_id=0,
banners=banners,
) | 5,326,052 |
async def test_legacy_config_entry_diagnostics(
hass, hass_client, config_entry, setup_base_platform
):
"""Test config entry diagnostics for legacy integration doesn't fail."""
with patch("homeassistant.components.nest.legacy.Nest"):
await setup_base_platform()
assert await get_diagnostics_for_config_entry(hass, hass_client, config_entry) == {} | 5,326,053 |
def create_temporal_vis(ldf, col):
"""
Creates and populates Vis objects for different timescales in the provided temporal column.
Parameters
----------
ldf : lux.core.frame
LuxDataFrame with underspecified intent.
col : str
Name of temporal column.
Returns
-------
vlist : [Vis]
Collection of Vis objects.
"""
formatted_date = pd.to_datetime(ldf[col], format="%Y-%m-%d")
overall_vis = Vis([lux.Clause(col, data_type="temporal")], source=ldf, score=5)
year_col = col + " (year)"
year_df = LuxDataFrame({year_col: pd.to_datetime(formatted_date.dt.year, format="%Y")})
year_vis = Vis([lux.Clause(year_col, data_type="temporal")], source=year_df, score=4)
month_col = col + " (month)"
month_df = LuxDataFrame({month_col: formatted_date.dt.month})
month_vis = Vis(
[lux.Clause(month_col, data_type="temporal", timescale="month")], source=month_df, score=3
)
day_col = col + " (day)"
day_df = LuxDataFrame({day_col: formatted_date.dt.day})
day_df.set_data_type(
{day_col: "nominal"}
) # Since day is high cardinality 1-31, it can get recognized as quantitative
day_vis = Vis([lux.Clause(day_col, data_type="temporal", timescale="day")], source=day_df, score=2)
week_col = col + " (day of week)"
week_df = lux.LuxDataFrame({week_col: formatted_date.dt.dayofweek})
week_vis = Vis(
[lux.Clause(week_col, data_type="temporal", timescale="day of week")], source=week_df, score=1
)
unique_year_values = len(year_df[year_col].unique())
unique_month_values = len(month_df[month_col].unique())
unique_week_values = len(week_df[week_col].unique())
vlist = []
vlist.append(overall_vis)
if unique_year_values != 1:
vlist.append(year_vis)
if unique_month_values != 1:
vlist.append(month_vis)
if unique_week_values != 1:
vlist.append(week_vis)
return vlist | 5,326,054 |
def load(inputs):
"""load(inputs) -> data
Loads the contents of a file, an iterable of files, or an iterable of
:py:class:`bob.io.base.File`'s into a :py:class:`numpy.ndarray`.
**Parameters:**
``inputs`` : various types
This might represent several different entities:
1. The name of a file (full path) from where to load the data. In this
case, this assumes that the file contains an array and returns a loaded
numpy ndarray.
2. An iterable of filenames to be loaded in memory. In this case, this
would assume that each file contains a single 1D sample or a set of 1D
samples, load them in memory and concatenate them into a single and
returned 2D :py:class:`numpy.ndarray`.
3. An iterable of :py:class:`File`. In this case, this would assume
that each :py:class:`File` contains a single 1D sample or a set
of 1D samples, load them in memory if required and concatenate them into
a single and returned 2D :py:class:`numpy.ndarray`.
4. An iterable with mixed filenames and :py:class:`File`. In this
case, this would returned a 2D :py:class:`numpy.ndarray`, as described
by points 2 and 3 above.
**Returns:**
``data`` : :py:class:`numpy.ndarray`
The data loaded from the given ``inputs``.
"""
from collections import Iterable
import numpy
if _is_string(inputs):
if not os.path.exists(inputs):
raise RuntimeError(f"`{inputs}' does not exist!")
return File(inputs, 'r').read()
elif isinstance(inputs, Iterable):
retval = []
for obj in inputs:
if _is_string(obj):
retval.append(load(obj))
elif isinstance(obj, File):
retval.append(obj.read())
else:
raise TypeError(
"Iterable contains an object which is not a filename nor a "
"bob.io.base.File.")
return numpy.vstack(retval)
else:
raise TypeError(
"Unexpected input object. This function is expecting a filename, "
"or an iterable of filenames and/or bob.io.base.File's") | 5,326,055 |
def num_neighbours(skel) -> np.ndarray:
"""Computes the number of neighbours of each skeleton pixel.
Parameters
----------
skel : (H, W) array_like
Input skeleton image.
Returns
-------
(H, W) array_like
Array containing the numbers of neighbours at each skeleton pixel and 0 elsewhere.
"""
skel = np.asarray(skel, dtype=int)
return filters.convolve(skel, _NB_MASK, mode='constant') * skel | 5,326,056 |
def test_e():
"""
put and rename
:return:
"""
c2 = LRUCache(10000)
c2.cache_object("X", 50, 10)
for x in [100, 200, 300, 400, 500, 600, 700]:
c2.cache_object('a_%r' % x, x, x)
assert(c2.check_sanity())
# now get, remove and reput x
c2.get_cached("X", 800)
assert(c2.check_sanity())
c2.remove_cached("X")
assert(c2.check_sanity())
c2.cache_object("X", 50, 900)
c2.get_cached("X", 1000)
assert(c2.check_sanity()) | 5,326,057 |
def FakeSubject(n=300, conc=0.1, num_reads=400, prevalences=None):
"""Makes a fake Subject.
If prevalences is provided, n and conc are ignored.
n: number of species
conc: concentration parameter
num_reads: number of reads
prevalences: numpy array of prevalences (overrides n and conc)
"""
# generate random prevalences
if prevalences is None:
dirichlet = thinkbayes2.Dirichlet(n, conc=conc)
prevalences = dirichlet.Random()
prevalences.sort()
# generate a simulated sample
pmf = thinkbayes2.Pmf(dict(enumerate(prevalences)))
cdf = pmf.MakeCdf()
sample = cdf.Sample(num_reads)
# collect the species counts
hist = thinkbayes2.Hist(sample)
# extract the data
data = [count for species, count in hist.Items()]
data.sort()
# make a Subject and process
subject = Subject('simulated')
for species, count in hist.Items():
subject.Add(species, count)
subject.Done()
return subject | 5,326,058 |
async def add(gc: GroupControl, slaves):
"""Add speakers to group."""
click.echo("Adding to existing group: %s" % slaves)
click.echo(await gc.add(slaves)) | 5,326,059 |
def build_results_candidate_people():
"""
Return DataFrame containing results, candidates, and people joined
"""
people = pd.read_csv('data/people.csv')
candidates = pd.read_csv('data/candidates.csv')
results = pd.read_csv('data/results.csv')
results_candidates = pd.merge(
results #[['candidate_id', 'person_id', 'smd_id']]
, candidates #[['candidate_id']]
, how='left'
, on=['candidate_id', 'smd_id']
)
rcp = pd.merge(results_candidates, people, how='left', on='person_id') # results-candidates-people
# Determine who were incumbent candidates at the time of the election
election_date = datetime(2020, 11, 3, tzinfo=pytz.timezone('America/New_York'))
commissioners = list_commissioners(status=None)
incumbents = commissioners[(commissioners.start_date < election_date) & (election_date < commissioners.end_date)]
incumbent_candidates = pd.merge(incumbents, candidates, how='inner', on='person_id')
incumbent_candidates['is_incumbent'] = True
rcp = pd.merge(rcp, incumbent_candidates[['candidate_id', 'is_incumbent']], how='left', on='candidate_id')
rcp['is_incumbent'] = rcp['is_incumbent'].fillna(False)
# Sort by SMD ascenting, Votes descending
rcp = rcp.sort_values(by=['smd_id', 'votes'], ascending=[True, False])
# Placeholder name for all write-in candidates.
# We do not know the combination of name and vote count for write-in candidates
# We only know the name of the write-in winners
rcp['full_name'] = rcp['full_name'].fillna('Write-ins combined')
rcp['write_in_winner_int'] = rcp['write_in_winner'].astype(int)
return rcp | 5,326,060 |
def div(lhs: Value, rhs: Value) -> Value:
""" Divides `lhs` by `rhs`. """
return lhs.run() // rhs.run() | 5,326,061 |
def test_max(n, result):
"""Test max for some value of n."""
from src.max_min import max_cw
assert max_cw(n) == result | 5,326,062 |
def get_seat_total_per_area(party_id: PartyID) -> dict[AreaID, int]:
"""Return the number of seats per area for that party."""
area_ids_and_seat_counts = db.session \
.query(
DbArea.id,
db.func.count(DbSeat.id)
) \
.filter_by(party_id=party_id) \
.outerjoin(DbSeat) \
.group_by(DbArea.id) \
.all()
return dict(area_ids_and_seat_counts) | 5,326,063 |
def toCamelCase(string: str):
"""
Converts a string to camel case
Parameters
----------
string: str
The string to convert
"""
string = str(string)
if string.isupper():
return string
split = string.split("_") # split by underscore
final_split = []
for s in split:
final_split.extend(s.split(" ")) # split by space
return "".join(l.capitalize() if index > 0 else l for index, l in enumerate(final_split)) | 5,326,064 |
def sync_command(args: argparse.Namespace):
"""
Copy all files for `submission-id` from `deployment` to `dst-deployment`.
"""
src = Staging[args.deployment].ssds
dst = Staging[args.dst_deployment].ssds
for _ in ssds.sync(args.submission_id, src, dst):
pass | 5,326,065 |
def reject():
"""
Respond with red.
"""
api.respond(color = _colors.fail) | 5,326,066 |
def aggregate_testsuite(testsuite):
""" Compute aggregate results for a single test suite (ElemTree node)
:param testsuite: ElemTree XML node for a testsuite
:return: AggregateResult
"""
if testsuite is None:
return None
tests = int(testsuite.attrib.get('tests') or 0)
failures = int(testsuite.attrib.get('failures') or 0)
disabled = int(testsuite.attrib.get('disabled') or 0)
errors = int(testsuite.attrib.get('errors') or 0)
duration = float(testsuite.attrib.get('time') or 0.0)
success_rate = (tests - failures) / float(tests) if tests else 0.0
return AggregateResult(tests=tests, failures=failures, disabled=disabled, errors=errors, success_rate=success_rate,
duration=duration) | 5,326,067 |
def _print_message(message, file=None):
"""
"""
if message:
if file is None:
file = sys.stdout
file.write(message) | 5,326,068 |
def test_corner_case():
"""tricky corner case where some variable may have a
name attribute
"""
class Person:
name = "hello"
p = Person()
with pytest.raises(TypeError):
manifest = Manifest(p) | 5,326,069 |
def rowwidth(view, row):
"""Returns the number of characters of ``row`` in ``view``.
"""
return view.rowcol(view.line(view.text_point(row, 0)).end())[1] | 5,326,070 |
def patch_broken_pipe_error():
"""
Monkey patch BaseServer.handle_error to not write a stack trace to stderr
on broken pipe: <http://stackoverflow.com/a/22618740/362702>
"""
import sys
try:
from SocketServer import BaseServer
except:
from socketserver import BaseServer
from wsgiref import handlers
handle_error = BaseServer.handle_error
log_exception = handlers.BaseHandler.log_exception
def is_broken_pipe_error():
type, err, tb = sys.exc_info()
r = repr(err)
return r in ("error(32, 'Broken pipe')", "error(54, 'Connection reset by peer')")
def my_handle_error(self, request, client_address):
if not is_broken_pipe_error():
handle_error(self, request, client_address)
def my_log_exception(self, exc_info):
if not is_broken_pipe_error():
log_exception(self, exc_info)
BaseServer.handle_error = my_handle_error
handlers.BaseHandler.log_exception = my_log_exception | 5,326,071 |
def dry_query(event, *args):
"""Handles running a dry query
Args:
url: dry_query?page&page_length&review_id
body:
search: search dict <wrapper/input_format.py>
Returns:
{
<wrapper/output_format.py>
}
"""
# try:
body = json.loads(event["body"])
search = body.get('search')
try:
page = int(event.get('queryStringParameters').get('page', 1))
except AttributeError:
page = 1
try:
page_length = int(
event.get('queryStringParameters').get('page_length', 50))
except AttributeError:
page_length = 50
results = slr.conduct_query(search, page, page_length)
# (optionally) mark previously persisted results
try:
review_id = event.get('queryStringParameters').get('review_id')
review = connector.get_review_by_id(review_id)
results = slr.results_persisted_in_db(results, review)
except AttributeError:
pass
return make_response(status_code=201, body=results)
# except Exception as e:
# return make_response(status_code=500, body={"error": e}) | 5,326,072 |
def fixed_prior_to_measurements(coords, priors):
"""
Convert the fixed exchange and met conc priors to measurements.
"""
fixed_exchange = get_name_ordered_overlap(coords, "reaction_ind", ["exchange", "fixed_x_names"])
fixed_met_conc = get_name_ordered_overlap(coords, "metabolite_ind", ["metabolite", "fixed_x_names"])
prior_met_conc_fixed = extract_prior_2d("metabolite", priors, fixed_met_conc, coords["condition"],
DEFAULT_MET_CONC_MEAN, DEFAULT_MET_CONC_SCALE)
prior_exchange_fixed = extract_prior_2d("exchange", priors, fixed_exchange, coords["condition"],
DEFAULT_EXCHANGE_MEAN, DEFAULT_EXCHANGE_SCALE)
# Expand the IndPrior2d to the pandas dataframe format
fixed_met_prior_df = prior_met_conc_fixed.to_dataframe("mic").rename(
columns={"parameter": "target_id", "loc": "measurement", "scale": "error_scale"})
fixed_exchange_prior_df = prior_exchange_fixed.to_dataframe("flux").rename(
columns={"parameter": "target_id", "loc": "measurement", "scale": "error_scale"})
return fixed_exchange_prior_df, fixed_met_prior_df | 5,326,073 |
def get_batch_size():
"""Returns the batch size tensor."""
return get_global_variable(GraphKeys.BATCH_SIZE) | 5,326,074 |
def get_dataloader(config: ExperimentConfig, tfms: Tuple[List, List] = None):
""" get the dataloaders for training/validation """
if config.dim > 1:
# get data augmentation if not defined
train_tfms, valid_tfms = get_data_augmentation(config) if tfms is None else tfms
# check number of jobs requested and CPUs available
num_cpus = os.cpu_count()
if num_cpus < config.n_jobs:
logger.warning(f'Requested more workers than available (n_jobs={config.n_jobs}, # cpus={num_cpus}). '
f'Setting n_jobs={num_cpus}.')
config.n_jobs = num_cpus
# define dataset and split into training/validation set
use_nii_ds = config.ext is None or 'nii' in config.ext
dataset = MultimodalNiftiDataset.setup_from_dir(config.source_dir, config.target_dir, Compose(train_tfms),
preload=config.preload) if use_nii_ds else \
MultimodalImageDataset.setup_from_dir(config.source_dir, config.target_dir, Compose(train_tfms),
ext='*.' + config.ext, color=config.color, preload=config.preload)
logger.info(f'Number of training images: {len(dataset)}')
if config.valid_source_dir is not None and config.valid_target_dir is not None:
valid_dataset = MultimodalNiftiDataset.setup_from_dir(config.valid_source_dir, config.valid_target_dir,
Compose(valid_tfms),
preload=config.preload) if use_nii_ds else \
MultimodalImageDataset.setup_from_dir(config.valid_source_dir, config.valid_target_dir,
Compose(valid_tfms),
ext='*.' + config.ext, color=config.color, preload=config.preload)
logger.info(f'Number of validation images: {len(valid_dataset)}')
train_loader = DataLoader(dataset, batch_size=config.batch_size, num_workers=config.n_jobs, shuffle=True,
pin_memory=config.pin_memory, worker_init_fn=init_fn)
valid_loader = DataLoader(valid_dataset, batch_size=config.batch_size, num_workers=config.n_jobs,
pin_memory=config.pin_memory, worker_init_fn=init_fn)
else:
# setup training and validation set
num_train = len(dataset)
indices = list(range(num_train))
split = int(config.valid_split * num_train)
valid_idx = np.random.choice(indices, size=split, replace=False)
train_idx = list(set(indices) - set(valid_idx))
train_sampler = SubsetRandomSampler(train_idx)
valid_sampler = SubsetRandomSampler(valid_idx)
# set up data loader for nifti images
train_loader = DataLoader(dataset, sampler=train_sampler, batch_size=config.batch_size,
num_workers=config.n_jobs, pin_memory=config.pin_memory, worker_init_fn=init_fn)
valid_loader = DataLoader(dataset, sampler=valid_sampler, batch_size=config.batch_size,
num_workers=config.n_jobs, pin_memory=config.pin_memory, worker_init_fn=init_fn)
else:
try:
from altdataset import CSVDataset
except (ImportError, ModuleNotFoundError):
raise SynthtorchError('Cannot use 1D ConvNet in CLI without the altdataset toolbox.')
train_dataset, valid_dataset = CSVDataset(config.source_dir[0]), CSVDataset(config.valid_source_dir[0])
train_loader = DataLoader(train_dataset, batch_size=config.batch_size, num_workers=config.n_jobs, shuffle=True,
pin_memory=config.pin_memory)
valid_loader = DataLoader(valid_dataset, batch_size=config.batch_size, num_workers=config.n_jobs,
pin_memory=config.pin_memory)
return train_loader, valid_loader | 5,326,075 |
def retournerTas(x,numéro):
"""
retournerTas(x,numéro) retourne la partie du tas x qui commence à
l'indice numéro
"""
tasDuBas = x[:numéro]
tasDuHaut = x[numéro:]
tasDuHaut.reverse()
result = tasDuBas + tasDuHaut
# print(result)
return result | 5,326,076 |
def AddReplaceCustomAdvertisementArgs(parser, resource_str):
"""Adds common arguments for replacing custom advertisements."""
parser.add_argument(
'--advertisement-mode',
choices=_MODE_CHOICES,
type=lambda mode: mode.upper(),
metavar='MODE',
help="""The new advertisement mode for this {0}.""".format(resource_str))
parser.add_argument(
'--set-advertisement-groups',
type=arg_parsers.ArgList(
choices=_GROUP_CHOICES, element_type=lambda group: group.upper()),
metavar='GROUP',
help="""The list of pre-defined groups of IP ranges to dynamically
advertise on this {0}. This list can only be specified in
custom advertisement mode.""".format(resource_str))
parser.add_argument(
'--set-advertisement-ranges',
type=arg_parsers.ArgDict(allow_key_only=True),
metavar='CIDR_RANGE=DESC',
help="""The list of individual IP ranges, in CIDR format, to dynamically
advertise on this {0}. Each IP range can (optionally) be given a
text description DESC. For example, to advertise a specific range,
use `--set-advertisement-ranges=192.168.10.0/24`. To store a
description with the range, use
`--set-advertisement-ranges=192.168.10.0/24=my-networks`. This
list can only be specified in custom advertisement mode."""
.format(resource_str)) | 5,326,077 |
def path_complete(self, text, line, begidx, endidx):
"""
Path completition function used in various places for tab completion
when using cmd
"""
arg = line.split()[1:]
# this is a workaround to get default extension into the completion function
# may (hopefully) gets replaced.
try:
config = configparser.ConfigParser()
if not config.read(expanduser("~/.cmddocsrc")):
print("Error: your config %s could not be read" % conf)
exit(1)
extension = config.get("General", "Default_Extension")
except configparser.NoOptionError:
self.extension = "md"
if not arg:
completions = os.listdir('./')
completions[:] = [d for d in completions if d not in self.exclude]
else:
dir, part, base = arg[-1].rpartition('/')
if part == '':
dir = './'
elif dir == '':
dir = '/'
completions = []
for f in os.listdir(dir):
if f.startswith(base):
if os.path.isfile(os.path.join(dir, f)):
f = remove_fileextension(f, extension)
completions.append(f)
else:
completions.append(f+'/')
return completions | 5,326,078 |
def test_config_attributes_with_cli_args(config_with_cli_args):
"""Asserts that the class attributes of a config.Config object are of the expected value/type after
object initialization, taking into account command line arguments, which take precedence over
config arguments.
"""
# check that we get the values we expected, specifically, check that our command line arguments
# have overwritten our config arguments
for arg, value in DUMMY_ARGS_WITH_CLI_ARGS.items():
assert value == getattr(config_with_cli_args, arg) | 5,326,079 |
def test():
"""Terraform: Up & Running - Why Terraform - Web Server"""
import terrascript
import terrascript.provider
import terrascript.resource
USER_DATA = "#!/bin/bash\nsudo service apache2 start"
config = terrascript.Terrascript()
config += terrascript.provider.aws(region="us-east-2", version="~>2.0")
config += terrascript.resource.aws_instance(
"app",
instance_type="t2.micro",
availability_zone="us-east-2a",
ami="ami-0c55b159cbfafe1f0",
user_data=USER_DATA,
)
shared.assert_deep_equal(config, "test_TUAR_why_terraform.tf.json") | 5,326,080 |
def test_find_maxima():
"""
Not yet a test, Just a cool demo (FYI the maximum of the function is 10, so the printed value should be around -10)
:return:
"""
er = ExperimentRunner([blueprint.MaximaKnowledgeDiscovery], log=True)
er.run() | 5,326,081 |
def nback(n, k, length):
"""Random n-back targets given n, number of digits k and sequence length"""
Xi = random_state.randint(k, size=length)
yi = np.zeros(length, dtype=int)
for t in range(n, length):
yi[t] = (Xi[t - n] == Xi[t])
return Xi, yi | 5,326,082 |
def makeStateVector(sys, start_time=0):
"""
Constructs the initial state vector recursively.
Parameters
----------
sys: inherits from control.InputOutputSystem
start_time: float
Returns
-------
list
"""
x_lst = []
if "InterconnectedSystem" in str(type(sys)):
for sub_sys in sys.syslist:
x_lst.extend(makeStateVector(sub_sys, start_time=start_time))
elif isinstance(sys, ctl.NonlinearIOSystem):
x_lst.extend(sys.makeStateSer().values)
else:
new_state = list(np.repeat(0, sys.nstates))
x_lst.extend(new_state)
result = [float(v) for v in x_lst]
return result | 5,326,083 |
def test_create_OK(mock_category, mock_send, mock_exists):
"""Create and update object."""
vf = Vf()
vsp = Vsp()
vendor = Vendor()
vsp._identifier = "1232"
vf.vsp = vsp
vsp.vendor = vendor
vsp._csar_uuid = "1234"
expected_data = '{\n "artifacts": {},\n "attributes": [],\n "capabilities": {},\n "categories": [\n {\n "normalizedName": "generic",\n "name": "Generic",\n "uniqueId": "resourceNewCategory.generic",\n "subcategories": [{"empty": false, "groupings": null, "icons": ["objectStorage", "compute"], "name": "Abstract", "normalizedName": "abstract", "ownerId": null, "type": null, "uniqueId": "resourceNewCategory.generic.abstract", "version": null}],\n "version": null,\n "ownerId": null,\n "empty": false,\n "type": null,\n "icons": null\n }\n ],\n "componentInstances": [],\n "componentInstancesAttributes": {},\n "componentInstancesProperties": {},\n "componentType": "RESOURCE",\n "contactId": "cs0008",\n \n "csarUUID": "1234",\n "csarVersion": "1.0",\n \n "deploymentArtifacts": {},\n "description": "VF",\n "icon": "defaulticon",\n "name": "ONAP-test-VF",\n "properties": [],\n "groups": [],\n "requirements": {},\n "resourceType": "VF",\n "tags": ["ONAP-test-VF"],\n "toscaArtifacts": {},\n "vendorName": "Generic-Vendor",\n "vendorRelease": "1.0"\n}'
mock_exists.return_value = False
mock_send.return_value = {'resourceType': 'VF', 'name': 'one', 'uuid': '1234', 'invariantUUID': '5678', 'version': '1.0', 'uniqueId': '91011', 'lifecycleState': 'NOT_CERTIFIED_CHECKOUT'}
rc = ResourceCategory(
name="Generic"
)
rc.normalized_name="generic"
rc.unique_id="resourceNewCategory.generic"
rc.subcategories=[{"empty": False, "groupings": None, "icons": ["objectStorage", "compute"], "name": "Abstract", "normalizedName": "abstract", "ownerId": None, "type": None, "uniqueId": "resourceNewCategory.generic.abstract", "version": None}]
rc.version=None
rc.owner_id=None
rc.empty=False
rc.type=None
rc.icons=None
mock_category.return_value = rc
vf.create()
mock_send.assert_called_once_with("POST", "create Vf", 'https://sdc.api.fe.simpledemo.onap.org:30207/sdc1/feProxy/rest/v1/catalog/resources', data=expected_data)
assert vf.created()
assert vf._status == const.DRAFT
assert vf.identifier == "1234"
assert vf.unique_uuid == "5678"
assert vf.version == "1.0" | 5,326,084 |
def make_random_tensors(spec_structure, batch_size = 2):
"""Create random inputs for tensor_spec (for unit testing).
Args:
spec_structure: A dict, (named)tuple, list or a hierarchy thereof filled by
TensorSpecs(subclasses).
batch_size: If None, we will have a flexible shape (None,) + shape. If <= 0
we will omit an explicit batch dimension and otherwise have a fixed
(batch_size,) + shape.
Returns:
Equivalent structure as spec_structure, with TensorSpecs converted to
placeholders with variable batch size.
"""
assert_valid_spec_structure(spec_structure)
def make_random(t):
maxval = 255 if t.dtype in [tf.uint8, tf.int32, tf.int64] else 1.0
dtype = tf.int32 if t.dtype == tf.uint8 else t.dtype
shape = tuple(t.shape.as_list())
if batch_size is None:
shape = (None,) + shape
if batch_size > 0:
shape = (batch_size,) + shape
r = tf.random_uniform(shape, maxval=maxval, dtype=dtype)
return tf.cast(r, t.dtype)
return nest.map_structure(make_random, spec_structure) | 5,326,085 |
def simulate(robot, task, opt_seed, thread_count, episode_count=1):
"""Run trajectory optimization for the robot on the given task, and return the
resulting input sequence and result."""
robot_init_pos, has_self_collision = presimulate(robot)
if has_self_collision:
return None, None # return None if there are collisions in design
def make_sim_fn(): # make a simulation environment
sim = rd.BulletSimulation(task.time_step)
task.add_terrain(sim)
# Rotate 180 degrees around the y axis, so the base points to the right
sim.add_robot(robot, robot_init_pos, rd.Quaterniond(0.0, 0.0, 1.0, 0.0))
return sim
main_sim = make_sim_fn() # initialise simulation
robot_idx = main_sim.find_robot_index(robot) # get robot index of current robot
dof_count = main_sim.get_robot_dof_count(robot_idx) # get number of DOF
if episode_count >= 2:
value_estimator = rd.FCValueEstimator(main_sim, robot_idx, 'cpu', 64, 3, 1)
else:
value_estimator = rd.NullValueEstimator()
input_sampler = rd.DefaultInputSampler()
objective_fn = task.get_objective_fn() # get objective function (dot product of robot motion)
replay_obs = np.zeros((value_estimator.get_observation_size(), 0))
replay_returns = np.zeros(0)
for episode_idx in range(episode_count):
optimizer = rd.MPPIOptimizer(1.0, task.discount_factor, dof_count,
task.interval, task.horizon, 512,
thread_count, opt_seed + episode_idx,
make_sim_fn, objective_fn, value_estimator,
input_sampler)
optimizer.update() # run simulations to estimate values of final states
optimizer.set_sample_count(64) # decrease sample count
main_sim.save_state() # save simulation state
input_sequence = np.zeros((dof_count, task.episode_len))
obs = np.zeros((value_estimator.get_observation_size(),
task.episode_len + 1), order='f')
rewards = np.zeros(task.episode_len * task.interval)
for j in range(task.episode_len): # for length of episode
optimizer.update() # run simulation to estimate values of final states and update input sequence
input_sequence[:,j] = optimizer.input_sequence[:,0] # get input sequence??
optimizer.advance(1) # advance the robot(s) 1 step in the simulation
value_estimator.get_observation(main_sim, obs[:,j]) # ??
for k in range(task.interval): # for length of interval
main_sim.set_joint_targets(robot_idx, # set joint targets for each joint
input_sequence[:,j].reshape(-1, 1))
task.add_noise(main_sim, j * task.interval + k) # add noise to the force and torque of each joint
main_sim.step() # move the robot one step in the simulation
rewards[j * task.interval + k] = objective_fn(main_sim) # update the reward from return value of objective function
value_estimator.get_observation(main_sim, obs[:,-1]) # ??
main_sim.restore_state() # restore previously saved state
# Only train the value estimator if there will be another episode
if episode_idx < episode_count - 1:
returns = np.zeros(task.episode_len + 1)
# Bootstrap returns with value estimator
value_estimator.estimate_value(obs[:,task.episode_len], returns[-1:])
for j in reversed(range(task.episode_len)):
interval_reward = np.sum(
rewards[j * task.interval:(j + 1) * task.interval])
returns[j] = interval_reward + task.discount_factor * returns[j + 1]
replay_obs = np.hstack((replay_obs, obs[:,:task.episode_len]))
replay_returns = np.concatenate((replay_returns,
returns[:task.episode_len]))
value_estimator.train(replay_obs, replay_returns)
return input_sequence, np.mean(rewards) # return the stepping sequence and average reward | 5,326,086 |
def config_worker():
"""
Enable worker functionality for AIO system.
:return: True if worker-config-complete is executed
"""
if utils.get_system_type() == si_const.TIS_AIO_BUILD:
console_log("Applying worker manifests for {}. "
"Node will reboot on completion."
.format(utils.get_controller_hostname()))
sysinv.do_worker_config_complete(utils.get_controller_hostname())
time.sleep(30)
# worker-config-complete has no logs to console. So, wait
# for some time before showing the login prompt.
for i in range(1, 10):
console_log("worker-config in progress..")
time.sleep(30)
console_log("Timed out on do_worker_config_complete")
raise CloneFail("Timed out on do_worker_config_complete")
return True
else:
# worker_config_complete is not needed.
return False | 5,326,087 |
def generate_coverage_page(results, page_name):
"""Generate the code coverage HTML page with measured content."""
template = Template(filename="template/{template}".format(template=page_name))
generated_page = template.render(**results.__dict__)
with open(page_name, "w") as fout:
fout.write(generated_page) | 5,326,088 |
def test_gen_loremipsum_1():
"""Create a complete lorem ipsum string."""
result = gen_iplum()
assert result == LOREM_IPSUM_TEXT
assert result.startswith('Lorem ipsum') | 5,326,089 |
def convert_dict_to_df(dict_data: dict):
"""
This method is used to convert dictionary data to pandas data frame
:param dict_data:
:return:
"""
# create df using dict
dict_data_df = pd.DataFrame.from_dict([dict_data])
# return the converted df
return dict_data_df | 5,326,090 |
def sec2msec(sec):
"""Convert `sec` to milliseconds."""
return int(sec * 1000) | 5,326,091 |
async def _request(session:aiohttp.ClientSession, url:str, headers:dict[str,str]) -> str:
"""
获取单一url的愿望单页面
"""
async with session.get(url=url, headers=headers, proxy=PROXY) as resp:
try:
text = await resp.text()
except Exception as err:
text = ""
logger.error(f'请求愿望单时发生错误: {err}')
return text | 5,326,092 |
def get_pretrained_i2v(name, model_dir=MODEL_DIR):
"""
Parameters
----------
name
model_dir
Returns
-------
i2v model: I2V
"""
if name not in MODELS:
raise KeyError(
"Unknown model name %s, use one of the provided models: %s" % (name, ", ".join(MODELS.keys()))
)
_class, *params = MODELS[name]
return _class.from_pretrained(*params, model_dir=model_dir) | 5,326,093 |
def object_get_HostChilds(obj):
"""Return List of Objects that have set Host(s) to this object."""
# source:
# FreeCAD/src/Mod/Arch/ArchComponent.py
# https://github.com/FreeCAD/FreeCAD/blob/master/src/Mod/Arch/ArchComponent.py#L1109
# def getHosts(self,obj)
hosts = []
for link in obj.InListRecursive:
if hasattr(link, "Host"):
if link.Host:
if link.Host == obj:
hosts.append(link)
elif hasattr(link, "Hosts"):
if link.Hosts:
if obj in link.Hosts:
hosts.append(link)
return hosts | 5,326,094 |
def authorize_access(cache_id, token_id):
"""
Given a cache ID and token ID, authorize that the token has permission to access the cache.
Raises:
- caching_service.exceptions.UnauthorizedAccess if it is unauthorized.
- exceptions.MissingCache if the cache ID does not exist.
"""
metadata = get_metadata(cache_id)
existing_token_id = metadata['token_id']
if token_id != existing_token_id:
raise exceptions.UnauthorizedAccess('You do not have access to that cache') | 5,326,095 |
def remove_entrance_exam_milestone_reference(request, course_key):
"""
Remove content reference for entrance exam.
"""
course_children = modulestore().get_items(
course_key,
qualifiers={'category': 'chapter'}
)
for course_child in course_children:
if course_child.is_entrance_exam:
delete_item(request, course_child.scope_ids.usage_id)
milestones_helpers.remove_content_references(str(course_child.scope_ids.usage_id)) | 5,326,096 |
def add_xray_lines(outfil, infil=None, stop=True):
""" Pre-pend X-ray lines (as necessary)
Parameters
----------
outfil : str
Output file.
infil : str, optional
Starting file. Should use latest llist_vX.X.ascii
"""
if infil is None:
fils = glob.glob(lt_path+'/lists/sets/llist_v*')
fils.sort()
infil = fils[-1] # Should grab the latest
# Read set file
data = ascii.read(infil, format='fixed_width')
# Read galaxy lines (emission)
v96 = llp.parse_verner96()
tmp_row = copy.deepcopy(data[0])
for key in ['fISM', 'fSI', 'fHI', 'fAGN']:
tmp_row[key] = 0
tmp_row['fEUV'] = 1
# Add if new
for row in v96:
if row['wrest'] > 100.:
continue
if np.min(np.abs(row['wrest']-data['wrest'])) > 0.0001:
tmp_row['wrest'] = row['wrest']
import pdb; pdb.set_trace()
tmp_row['name'] = row['name']
data.add_row(tmp_row)
# Sort
data.sort('wrest')
# Write
print('Make sure you want to do this!')
if stop:
import pdb; pdb.set_trace()
data.write(outfil, format='ascii.fixed_width', overwrite=True) | 5,326,097 |
def collate_fn(batch):
"""
Collate function for combining Hdf5Dataset returns
:param batch: list
List of items in a batch
:return: tuple
Tuple of items to return
"""
# batch is a list of items
numEntries = [];
allTensors = [];
allLabels = [];
for item in batch:
assert(len(item) % 2 == 0), "Both labels and tensors are expected";
numEntries.append(len(item) // 2);
allTensors.extend(item[: len(item) // 2]);
allLabels.extend(item[len(item) // 2:]);
# Determine how much to pad each tensor to and pad it; always pad on the right side
maxLength = max([t.shape[-1] for t in allTensors]);
newAllTensors = [];
paddings = [];
for t in allTensors:
numTotalPad = maxLength - t.shape[-1];
if numTotalPad > 0:
pad = (0, numTotalPad);
t = torch.nn.functional.pad(t, pad);
paddings.append(numTotalPad)
else:
paddings.append(0);
newAllTensors.append(t);
allTensors = torch.stack(newAllTensors, dim=0);
allLabels = torch.Tensor(allLabels);
numEntries = torch.LongTensor(numEntries);
allPaddings = torch.LongTensor(paddings);
return allTensors, allLabels, allPaddings, numEntries; | 5,326,098 |
def baseNgenerator(base=10, keylength=5, step=1):
"""Generate keys of base <base> and length <keylength>"""
assert int(base) <= BASE
assert int(keylength) > 0
while 1:
yield to_base(random.randrange(base**(keylength-1), base**keylength), base) | 5,326,099 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.