content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
def part_2():
"""Function which calculates the solution to part 2
Arguments
---------
Returns
-------
"""
return None | 7b454841494c9f717868eda727b6273fabbf8222 | 3,630,100 |
import math
def calc_dist_enrichment(ref_pos,spots_pos,img_size,delta_dist = 100, img_density=[],flag_plot=False):
""" Calculates the expression level as a function of the distance from a
reference point.
Args:
ref_pos (tuple): position of reference point.
spots_pos (np array): RNA positions
img_size (tuple): size of image
delta_dist (int): width of histogram to calculate to calculate spatial enrichment.
Expressed in pixel.
img_density (np array): image to be displayed when results are plotted.
flag_plot (bool): show results be plotted.
Returns:
np array with histogram. 1st col: center of bins (in pixel), 2nd col
raw counts, 3rd colum counts normalized with area of concentric circles,
4th column cound normalized wiht number of pixels in image falling in distance range
of each bin.
"""
# Get distance transform image [for display purposes only]
com_image = np.ones((img_size), dtype=np.uint8)
com_image[int(ref_pos[0]),int(ref_pos[1])] = 0
dist_tr_image = ndimage.distance_transform_edt(com_image)
# Distance of all spots to reference point
Nspots = spots_pos.shape[0]
RNAdist = np.sqrt(np.sum(np.square(spots_pos - np.matlib.repmat(ref_pos, Nspots,1 )),axis=1))
RNAdist_max = np.round(np.amax(RNAdist))
# Histogram calculation and center for display
hist, bins = np.histogram(RNAdist, bins=np.arange(0,RNAdist_max,delta_dist),density=False)
width = 0.8 * (bins[1] - bins[0])
center = (bins[:-1] + bins[1:]) / 2
# Renormalize counts with area [considering simple circles]
area_bins = np.diff((list(map(lambda r: (math.pi * (r**2)),bins))))
hist_norm_area = hist/area_bins
# Renormalize considering how many pixels are really in the actual image
pixels_bins = np.diff(list(map(lambda threshold: np.sum(dist_tr_image <= threshold),bins)))
hist_norm_pixel= hist/pixels_bins
# Summarize all histograms
hist_all = np.stack((center,hist,hist_norm_area,hist_norm_pixel),axis=1)
if flag_plot:
# PLOT ROI and center of mass
fig1, ax = plt.subplots(3,2,num='dist_enrich')
fig1.set_size_inches((15,12))
# Plot image with region of interest and reference point
img1 = ax[0][0].imshow(img_density,cmap="hot")
ax[0][0].get_xaxis().set_visible(False)
ax[0][0].get_yaxis().set_visible(False)
colorbar(img1)
ax[0][0].scatter(ref_pos[1],ref_pos[0],color='g')
# Plot distance map
img2 = ax[1][0].imshow(dist_tr_image,cmap="hot")
ax[1][0].get_xaxis().set_visible(False)
ax[1][0].get_yaxis().set_visible(False)
colorbar(img2)
ax[1][0].scatter(ref_pos[1],ref_pos[0],color='g')
# plot histogram of distances with various normalizations
ax[2][0].hist(RNAdist, bins='auto') # arguments are passed to np.histogram
ax[2][0].set_xlabel('Distance [pixel]')
ax[0][1].bar(center, hist, align='center', width=width)
ax[0][1].set_xlabel('Distance [pixel]')
ax[0][1].set_ylabel('# RNAs')
ax[1][1].bar(center, hist_norm_area, align='center', width=width)
ax[1][1].set_xlabel('Distance [pixel]')
ax[1][1].set_ylabel('# RNAs/ring area')
ax[2][1].bar(center, hist_norm_pixel, align='center', width=width)
ax[2][1].set_xlabel('Distance [pixel]')
ax[2][1].set_ylabel('# RNAs/area in image')
# Set titles
ax[0][0].title.set_text('Expression density map with reference point')
ax[1][0].title.set_text('Distance from reference point [um]')
ax[2][0].title.set_text('Expression as a function of distance')
ax[0][1].title.set_text('Raw histogram with user defined range')
ax[1][1].title.set_text('Renormalized with area')
ax[2][1].title.set_text('Renormalized with number of pixels')
fig1.tight_layout(h_pad=1)
plt.draw()
return hist_all | bc224b2116c26ea734dba2ae551c0f86384797a1 | 3,630,101 |
def get_single_two_body_file(molecule_file_name):
"""
Loads the molecule from a file.
:param molecule_file_name: Filename
:return: Molecule
"""
molecule = MolecularData(filename=molecule_file_name)
molecule.load()
# _molecule = run_pyscf(molecule)
return molecule.one_body_integrals, molecule.two_body_integrals | 86e83f8afeee21c576f002131e9305be09b87902 | 3,630,102 |
def proposition_formatter(propositions):
"""Returns a list of propositions with selected fields."""
return [
{
'deadline': dateutil_parse(proposition['deadline']).strftime('%Y-%m-%d'),
'status': proposition['status'],
'modified_on':
dateutil_parse(proposition['modified_on']).isoformat()
if proposition['status'] != PropositionStatus.ONGOING else '',
'adviser_id': proposition['adviser_id'],
}
for proposition in propositions
] | 4115349c65eb6eb2ef9ea76131daeabc88a7542f | 3,630,103 |
def CondSphereAnalFunB(x, y, z, R, x0, y0, z0, sig1, sig2, E0, flag):
"""
test
Analytic function for Electro-Static problem. The set up here is
conductive sphere in whole-space.
* (x0,y0,z0)
* (x0, y0, z0 ): is the center location of sphere
* r: is the radius of the sphere
.. math::
\mathbf{E}_0 = E_0\hat{x}
"""
if (~np.size(x)==np.size(y)==np.size(z)):
print "Specify same size of x, y, z"
return
dim = x.shape
x = Utils.mkvc(x-x0)
y = Utils.mkvc(y-y0)
z = Utils.mkvc(z-z0)
ind = np.sqrt((x)**2+(y)**2+(z)**2 ) < R
r = Utils.mkvc(np.sqrt((x)**2+(y)**2+(z)**2 ))
Hx = np.zeros(x.size)
Hy = np.zeros(x.size)
Hz = np.zeros(x.size)
# Inside of the sphere
rf2 = 3*sig1/(sig2+2*sig1)
Hpy = -sig1*E0/2*z
Hpz = sig1*E0/2*y
if (flag == 'total'):
Hy[ind] = -3/2*sig2*E0*(rf2)*z[ind]
Hz[ind] = 3/2*sig2*E0*(rf2)*y[ind]
elif (flag == 'secondary'):
Hy[ind] = -3/2*sig2*E0*(rf2)*z[ind] - Hpy[ind]
Hz[ind] = 3/2*sig2*E0*(rf2)*y[ind] - Hpz[ind]
# Outside of the sphere
rf1 = (sig2-sig1)/(sig2+2*sig1)
if (flag == 'total'):
Hy[~ind] = sig1*(E0/r[~ind]**3*(R**3)*rf1*(-z[~ind]))+Hpy
Hz[~ind] = sig1*(E0/r[~ind]**3*(R**3)*rf1*( y[~ind]))+Hpz
elif (flag == 'secondary'):
Hy[~ind] = sig1*(E0/r[~ind]**3*(R**3)*rf1*(-z[~ind]))
Hz[~ind] = sig1*(E0/r[~ind]**3*(R**3)*rf1*( y[~ind]))
return np.reshape(mu_0*Hx, x.shape, order='F'), np.reshape(mu_0*Hy, x.shape, order='F'), np.reshape(mu_0*Hz, x.shape, order='F') | 4e80adc4d733fc104b23ccd567159bec02b32f17 | 3,630,104 |
def num2hr (num):
"""Given an integer, return a string with the same amount expressed with a quantifier."""
num = long (num)
for quant_abbr, quant_amount in quants_tups_dec:
if num >= quant_amount:
return "%.2f%s" % (float (num) / quant_amount, quant_abbr)
return str (num) | ff095fecb6747029842a7859d5b41305962140ed | 3,630,105 |
import os
def super_resolve_service(path):
"""
Take the input image and super resolve it
"""
# check if the post request has the file part
if 'file' not in request.files:
return BadRequest("File not present in request")
file = request.files['file']
if file.filename == '':
return BadRequest("File name is not present in request")
if not allowed_file(file.filename):
return BadRequest("Invalid file type")
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
input_filepath = os.path.join('./', filename)
output_filepath = os.path.join('/output/', filename)
file.save(input_filepath)
# Get checkpoint filename from la_muse
model = request.form.get("model") or "model_epoch_10.pth"
super_resolve(input_filepath, output_filepath, '/models/' + model, True)
return send_file(output_filepath, mimetype='image/jpg') | b03d4bcaac86c6cc03d8104e0fbc062cf100a8fc | 3,630,106 |
import decimal
def quarks_to_kin(quarks: int) -> str:
"""Converts an integer quark amount into a string Kin amount.
:param quarks: An amount, in quarks.
:return: A string Kin amount.
"""
kin = (decimal.Decimal(quarks) / _KIN_TO_QUARKS)
return str(kin) | 4a4a7d2e60f43763de1b6343e6162c889cdcbde1 | 3,630,107 |
def get_feature_representations(model, content_path, style_path, num_style_layers):
"""Helper function to compute our content and style feature representations.
This function will simply load and preprocess both the content and style
images from their path. Then it will feed them through the network to obtain
the outputs of the intermediate layers.
Arguments:
model: The model that we are using.
content_path: The path to the content image.
style_path: The path to the style image
Returns:
returns the style features and the content features.
"""
# Load our images in
content_image = load_and_process_img(content_path)
style_image = load_and_process_img(style_path)
# batch compute content and style features
stack_images = np.concatenate([style_image, content_image], axis=0)
model_outputs = model(stack_images)
# Get the style and content feature representations from our model
style_features = [style_layer[0] for style_layer in model_outputs[:num_style_layers]]
content_features = [content_layer[1] for content_layer in model_outputs[num_style_layers:]]
return style_features, content_features | d1f65d11df7398c24b7eea2ed486b4f78a51c2aa | 3,630,108 |
def check_string(sql_string, add_semicolon=False):
"""
Check whether a string is valid PostgreSQL. Returns a boolean
indicating validity and a message from ecpg, which will be an
empty string if the input was valid, or a description of the
problem otherwise.
"""
prepped_sql = sqlprep.prepare_sql(sql_string, add_semicolon=add_semicolon)
success, msg = ecpg.check_syntax(prepped_sql)
return success, msg | 111b4fc114ecc8bf315cb9ba28bd56557b9ec08f | 3,630,109 |
def interpolate_freq_stft(Y, F_coef, F_coef_new):
"""Interpolation of STFT along frequency axis
Notebook: C2/C2_STFT-FreqGridInterpol.ipynb
Args:
Y: Magnitude STFT
F_coef: Vector of frequency values
F_coef_new: Vector of new frequency values
Returns:
Y_interpol: Interploated magnitude STFT
"""
compute_Y_interpol = interp1d(F_coef, Y, kind='cubic', axis=0)
Y_interpol = compute_Y_interpol(F_coef_new)
return Y_interpol | c3be2354667b92260fab043ee8b88572a1aa7863 | 3,630,110 |
import os
def save_shapely_shapes_to_file(shapes_list, ref_shp, output_shp,copy_field=None):
"""
save shapes in shapely format to a file
Args:
shapes_list: shapes list, can be polygon, line, and so on
ref_shp: reference shapefile containing the projection information
output_shp: save path
copy_field: a list contain filed names, copy the fields from "ref_shp", the count in shapes_list should be the same as the record in ref_shp
Returns:
"""
# Create a new shapefile in memory
w = shapefile.Writer()
# w.shapeType = org_obj.shapeType ##???
# create a field
w.field('id', fieldType="N", size="24")
# the empty will be removed
remove_index = []
# to shapes in pyshp format
# shapely_shape may contain empty
pyshp_shapes = [shape_from_shapely_to_pyshp(shapely_shape, keep_holes=True) for shapely_shape in
shapes_list]
for i, save_shape in enumerate(pyshp_shapes):
if save_shape.shapeType is 0: # null, don't have geometry
basic.outputlogMessage('skip empty geometry at %d'%i)
remove_index.append(i)
continue
w._shapes.append(save_shape)
rec = [i] # add id
# rec = [org_records[i][0]] # copy id
w.records.append(rec)
# copy prj file
org_prj = os.path.splitext(ref_shp)[0] + ".prj"
out_prj = os.path.splitext(output_shp)[0] + ".prj"
io_function.copy_file_to_dst(org_prj, out_prj, overwrite=True)
# save the file
w.save(output_shp)
# copy the field from the ref_shp
operation_obj = shape_opeation()
if copy_field is not None:
field_value_list = operation_obj.get_shape_records_value(ref_shp, attributes=copy_field)
if field_value_list is not False:
# add the field values
if len(remove_index) > 0:
for index in remove_index:
del field_value_list[index]
operation_obj.add_multi_field_records_to_shapefile(output_shp,field_value_list,copy_field)
else:
basic.outputlogMessage('get field %s failed'%str(copy_field))
return True | fb2461e7c5085a6c9e3ac715d5ce3d4dfa1f7798 | 3,630,111 |
def get_guide_counts(mags, t_ccd):
"""
Get guide star fractional count in various ways.
- count_9th : fractional stars greater than "9th" magnitude (need 3.0)
- count_10th : fractional stars greater than "10th" magnitude (need 6.0)
- count_all : weighted fractional count of all stars
Parameters
----------
mags : np.ndarray
Magnitudes
t_ccd : float
CCD temperature
Returns
-------
3-tuple of floats
count_9th, count_10th, count_all
"""
count_9th = guide_count(mags, t_ccd, count_9th=True)
count_10th = guide_count(mags, t_ccd, count_9th=False)
# Generate interpolation curve for the specified input ``t_ccd``
ref_t_ccd = -10.9
# The 5.3 and 5.4 limits are not temperature dependent, these reflect the
# possibility that the star will be brighter than 5.2 mag and the OBC will
# reject it. Note that around 6th mag mean observed catalog error is
# around 0.1 mag.
ref_counts = [0.0, 1.2, 1.0, 0.5, 0.0]
ref_mags1 = [5.3, 5.4] # Not temperature dependent
ref_mags2 = [9.0, 10.0, 10.3] # Temperature dependent
ref_mags_t_ccd = (ref_mags1
+ [snr_mag_for_t_ccd(t_ccd, ref_mag, ref_t_ccd)
for ref_mag in ref_mags2])
# Do the interpolation, noting that np.interp will use the end ``counts``
# values for any ``mag`` < ref_mags[0] or > ref_mags[-1].
counts_t_ccd = np.interp(x=mags, xp=ref_mags_t_ccd, fp=ref_counts)
count_all = np.sum(counts_t_ccd)
return count_9th, count_10th, count_all | 41c428839d9bcce5db3e4cc48089fa236735f37b | 3,630,112 |
def cal2theta(qmag, energy_kev=17.794):
"""
Calculate theta at particular energy in keV from |Q|
twotheta = cal2theta(Qmag,energy_kev=17.794)
"""
energy = energy_kev * 1000.0 # energy in eV
# Calculate 2theta angles for x-rays
twotheta = 2 * np.arcsin(qmag * 1e10 * fg.h * fg.c / (energy * fg.e * 4 * np.pi))
# return x2T in degrees
twotheta = twotheta * 180 / np.pi
return twotheta | 1f71d94be21a4ed2871b358b52cc7d1529cb7255 | 3,630,113 |
import os
import re
def convert(df,to_convert,out_file=None,overwrite=False,
uid_to_pretty=False):
"""
Private function wrapped by uid_to_pretty and pretty_to_uid that converts
a file or string between uid and pretty.
df: dataframe with pretty name data and uid
to_convert: content to edit. if this is a file, read in. If not, treat as
a text string to edit.
out_file: output file name. If specified, write to a file.
overwrite: if writing an output file, whether or not to overwrite.
uid_to_pretty: if True, uid->pretty; if False, pretty->uid
returns converted string
"""
# If the file specifies an input file, read it in and convert that file
if os.path.isfile(to_convert):
f = open(to_convert,'r')
some_string = f.read()
f.close()
else:
some_string = to_convert
# Convert the string
for i in range(len(df)):
# Grab uid and pretty
row = df.iloc[i]
uid = row.uid
pretty = to_pretty(row)
escaped_pretty = re.escape(pretty)
# Set search and convert_to
if uid_to_pretty:
search = re.compile("{}".format(uid))
convert_to = pretty
else:
search = re.compile(escaped_pretty)
convert_to = uid
# Do substitutions
some_string = search.sub(convert_to,some_string)
# If an output file is specified
if out_file is not None:
if os.path.isfile(out_file):
if not overwrite:
err = f"file {out_file} already exists.\n"
raise FileExistsError(err)
f = open(out_file,'w')
f.write(some_string)
f.close()
return some_string | db31d93011b4237a71360f6823a7d9d50b4c741c | 3,630,114 |
import os
import ctypes
def _load_lib():
"""Load libary by searching possible path."""
curr_path = os.path.dirname(os.path.realpath(os.path.expanduser(__file__)))
lib_search = curr_path
lib_path = libinfo.find_lib_path(_get_lib_names(), lib_search, optional=True)
if lib_path is None:
return None, None
lib = ctypes.CDLL(lib_path[0], ctypes.RTLD_GLOBAL)
return lib, os.path.basename(lib_path[0]) | 30660e41f2831df1a6176d529b58531dd4f84c97 | 3,630,115 |
from datetime import datetime
import os
def get_train_data(
image_list,
gt_list,
out_dir=None,
name='train-unet',
shape=(10, 256, 256),
n_each=100,
channels=('z-1', 'y-1', 'x-1', 'centreness'),
scale=(4, 1, 1),
log=True,
validation_prop=0.2,
):
"""
Generate training data from whole ground truth volumes.
Parameters
----------
image_paths: str
path to the image zarr file, which must be in tczyx dim format (ome bioformats)
gt_paths: str
path to the ground truth zarr file, which must be in tczyx dim format
shape: tuple of int
Shape of the test data to
channels: tuple of str
tuple of channels to be added to the training data.
Affinities: 'axis-n' (pattern: r'[xyz]-\d+' e.g., 'z-1')
Centreness: 'centreness'
scale: tuple of numeric
Scale of channels. This is used in calculating centreness score.
log: bool
Should print out be recorded in out_dir/log.txt?
Returns
-------
chunk_dict: dict
Dict containing generated training data of the form
{
'channels' : {
'name' : (<channel name>, ...),
...
},
'save_dir' : <path/to/random/chunks>,
'labels_dirs' : {
'name' : <path/to/random/chunks/labels>,
...
},
'ids' : [<chunk id string>, ...]
'x' : [<image chunk tensor>, ...],
'ground_truth' : [<gt chunk tensor>, ...],
'ys' : {
'name'
}
}
Notes
-----
if there is only one set of training labels generated, the name
of the labels (anywhere you see 'name' above) will be 'y'.
"""
assert len(image_list) == len(gt_list)
now = datetime.now()
d = now.strftime("%y%m%d_%H%M%S") + '_' + name
out_dir = os.path.join(out_dir, d)
os.makedirs(out_dir, exist_ok=True)
chunk_dicts = []
if not isinstance(scale, list):
scale = [scale, ] * len(image_list)
for i in range(len(image_list)):
chunk_dict = get_random_chunks(
image_list[i],
gt_list[i],
out_dir,
name=name,
shape=shape,
n=n_each,
channels=channels,
scale=scale[i],
log=log,
image_no=i
)
chunk_dicts.append(chunk_dict)
chunk_dicts = concat_chunk_dicts(chunk_dicts)
train_dicts = chunk_dict_to_train_dict(chunk_dicts, validation_prop)
return train_dicts | 33410d527e06ef057206919327038f4979d82d87 | 3,630,116 |
def data_combiner(n_actions: int,
subject_ids: list,
n_takes: int,
modalities: list,
skeleton_pose_model: str):
"""Combines skeleton point information for all actions, all takes, given list of subject ids and given list of
modalities.
Args:
n_actions: Total number of actions in the original dataset.
subject_ids: List of subjects in the current set.
n_takes: Total number of takes in the original dataset.
modalities: Current combination of modalities.
skeleton_pose_model: Current skeleton pose model name which will be used to import skeleton point
information.
Returns:
A pandas dataframe which contains combined skeleton point information for all actions, all takes, given list
of subject ids and given list of modalities.
"""
combined_modality_skeleton_information = pd.DataFrame()
# Iterates across actions, subject_ids, takes, and modalities to combine skeleton point information.
for i in range(1, n_actions + 1):
for j in range(len(subject_ids)):
for k in range(1, n_takes + 1):
data_name = 'a{}_s{}_t{}'.format(i, subject_ids[j], k)
# Iterates across modalities to import skeleton point information file and adds it to
# combined_modality_skeleton_information. If file not found, it moves on to the next combination.
try:
# Imports 1st modality's skeleton point information for current data_name and skeleton_pose_model.
current_data_name_modality_information = pd.read_csv('../data/normalized_data/{}/{}_{}.csv'.format(
modalities[0], data_name, skeleton_pose_model))
except FileNotFoundError:
continue
# Since, length of modalities in each combination is different. Hence, if length of modalities is
# greater than 1, then the imported skeleton point information for other modalities will be merged to
# the skeleton point information for the first modality.
if len(modalities) != 1:
for m in range(1, len(modalities)):
current_skeleton_point_information = pd.read_csv('../data/normalized_data/{}/{}_{}.csv'.format(
modalities[m], data_name, skeleton_pose_model))
current_data_name_modality_information = pd.merge(current_data_name_modality_information,
current_skeleton_point_information,
on='frame', how='outer')
# Adds data_name to the imported skeleton point information.
current_data_name_modality_information['data_name'] = [data_name for _ in range(len(
current_data_name_modality_information))]
# Removes frame column from the imported skeleton point information.
current_data_name_modality_information = current_data_name_modality_information.drop(columns=['frame'])
# Adds action column to the imported skeleton point information.
current_data_name_modality_information['action'] = [i for _ in range(len(
current_data_name_modality_information))]
# Appends currently imported & modified skeleton point information to the combined modality skeleton
# point information
combined_modality_skeleton_information = combined_modality_skeleton_information.append(
current_data_name_modality_information)
return combined_modality_skeleton_information | c210423ad563de2f29a5bdaaabee5ae9920cf4bc | 3,630,117 |
import torch
def yolo_eval(yolo_outputs, model_image_size, true_image_size=None, max_boxes=9, score_threshold=.6, iou_threshold=.5, on_true=False):
"""Converts the output of YOLO encoding (a lot of boxes) to your predicted boxes along with their scores, box coordinates and classes.
Parameters:
-----------
yolo_outputs -- output of the encoding model (for image_shape of (model_width, model_height, 3)), contains 4 tensors:
box_confidence: tensor of shape (GR x GC x A x 1) => (4, 4, 1, 1)
box_xy: tensor of shape (GR x GC x A x 2) = > (4, 4, 1, 2)
box_wh: tensor of shape (GR x GC x A x 2) => (4, 4, 1, 2)
box_class_probs: tensor of shape (GR x GC x A x C) => (4, 4, 1, 2)
model_image_size (tuple): width and height which is the input dimension to the model
true_image_size (tuple): optional- true width and height of the image which is used to rescale the
coordinates if given else it according to model size
max_boxes (int): maximum number of predicted boxes you'd like
score_threshold (float): if [conf * class_prob < threshold], then get rid of the corresponding box
iou_threshold (float) "intersection over union" threshold used for NMS filtering
on_true (bool): if the yolo_outputs is target then no need to compute sigmoid on conf and class_prob
Returns:
--------
scores (numpy.ndarray): shape (None, ), predicted score for each box
boxes (numpy.ndarray): shape (None, 4), predicted box coordinates
classes (numpy.ndarray): shape (None,), predicted class for each box
Note: "None" actually represents the number of predicted boxes, it varies between 0 and max_boxes.
"""
### START CODE HERE ###
# Retrieve outputs of the YOLO model (≈1 line)
# print('*********************** [In] yolo_eval ***********************')
if type(yolo_outputs) == np.ndarray:
box_confidence = yolo_outputs[..., 0:1]
delta_box_xy, delta_box_wh = yolo_outputs[..., 1:3], yolo_outputs[..., 3:5]
box_class_probs = yolo_outputs[..., 5:yolo_outputs.shape[-1]]
else: # type(yolo_outputs) == tuple
box_confidence, delta_box, box_class_probs = yolo_outputs
delta_box_xy, delta_box_wh = delta_box[..., 0:2], delta_box[..., 2:4]
# TODO: uncomment
# pdb.set_trace()
if not on_true:
box_confidence = torch.tensor(box_confidence).float().sigmoid().numpy()
box_class_probs = torch.tensor(box_class_probs).float().sigmoid().numpy()
# Convert boxes to be ready for filtering functions and NOTE: that coordinates are transformed back which is according to original size
boxes = yolo_boxes_to_corners(delta_box_xy, delta_box_wh, model_image_size, true_image_size)
# pdb.set_trace()
# Use one of the functions you've implemented to perform Score-filtering with a threshold of score_threshold (≈1 line)
# print(box_confidence, '\n\n\n\n\n', box_class_probs)
scores, boxes, classes = yolo_filter_boxes(box_confidence, boxes, box_class_probs, threshold = score_threshold)
# Use one of the functions you've implemented to perform Non-max suppression with a threshold of iou_threshold (≈1 line)
scores, boxes, classes = yolo_non_max_suppression(scores, boxes, classes, max_boxes = max_boxes, iou_threshold = iou_threshold)
### END CODE HERE ###
return scores, boxes, classes | 06fe4e2bbdf0b31e510138d28341431b8e719ea9 | 3,630,118 |
def input_fn_builder(features, seq_length, is_training, drop_remainder):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
all_input_ids = []
all_input_mask = []
all_segment_ids = []
all_label_ids = []
for feature in features:
all_input_ids.append(feature.input_ids)
all_input_mask.append(feature.input_mask)
all_segment_ids.append(feature.segment_ids)
all_label_ids.append(feature.label_ids)
def input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
num_examples = len(features)
# This is for demo purposes and does NOT scale to large data sets. We do
# not use Dataset.from_generator() because that uses tf.py_func which is
# not TPU compatible. The right way to load data is with TFRecordReader.
d = tf.data.Dataset.from_tensor_slices({
"input_ids":
tf.constant(
all_input_ids, shape=[num_examples, seq_length],
dtype=tf.int32),
"input_mask":
tf.constant(
all_input_mask,
shape=[num_examples, seq_length],
dtype=tf.int32),
"segment_ids":
tf.constant(
all_segment_ids,
shape=[num_examples, seq_length],
dtype=tf.int32),
"label_ids":
tf.constant(all_label_ids, shape=[
num_examples, len(LABEL_COLUMNS)], dtype=tf.int32),
})
if is_training:
d = d.repeat()
d = d.shuffle(buffer_size=100)
d = d.batch(batch_size=batch_size, drop_remainder=drop_remainder)
return d
return input_fn | c73b4fca8e9fe415a3a6d39831c2a03e37a063a1 | 3,630,119 |
import glob
def expand_files(files):
"""Expands a wildcard to a list of paths for Windows compatibility"""
# Split at whitespace
files = files.split()
# Handle wildcard expansion
if len(files) == 1 and '*' in files[0]:
files = glob.glob(files[0])
# Convert to Path objects
return files | 46e2d6e7ee1609c144d04a2d429dc07ff1786cf1 | 3,630,120 |
def get_index():
"""
Return the index from a loaded index if loaded or from building and loading from files.
"""
global _LICENSES_INDEX
if not _LICENSES_INDEX:
_LICENSES_INDEX = get_license_index()
return _LICENSES_INDEX | 7b75493e03da1eedc107b57809e6e47c45f52282 | 3,630,121 |
import typing
def historical_daily_discounted_cash_flow(
apikey: str, symbol: str, limit: int = DEFAULT_LIMIT
) -> typing.List[typing.Dict]:
"""
Query FMP /historical-daily-discounted-cash-flow/ API.
:param apikey: Your API key.
:param symbol: Company ticker.
:param limit: Number of rows to return.
:return: A list of dictionaries.
"""
path = f"historical-daily-discounted-cash-flow/{symbol}"
query_vars = {"apikey": apikey, "limit": limit}
return __return_json(path=path, query_vars=query_vars) | 1a0b77e2bb342989990f9af1d11cd205ee0b30e5 | 3,630,122 |
def split_exon(exon, cds):
"""Takes an exon and a CDS, and returns a map of regions for each
feature (UTR5/3, CDS) that may be inferred from the arguments.
Note that the CDS is simply returned as is, to simplify
downstream handling of these features."""
results = [cds]
if exon["start"] < cds["start"]:
utr = dict(exon)
utr.update(
end=cds["start"] - 1, feature=(exon["strand"] == "+" and "UTR5" or "UTR3")
)
results.append(utr)
if exon["end"] > cds["end"]:
utr = dict(exon)
utr.update(
start=cds["end"] + 1, feature=(exon["strand"] == "+" and "UTR3" or "UTR5")
)
results.append(utr)
return results | e2bb12a688bbe3e5c79039c2a9cce4e5aa9e9a1b | 3,630,123 |
def state_dict_to_cpu(state_dict):
"""Make a copy of the state dict onto the cpu."""
# .state_dict() references tensors, so we detach and copy to cpu
return {key: par.detach().cpu() for key, par in state_dict.items()} | 2d1fcc07ab8eac192a846cbcdb8d7363ffd8e9e8 | 3,630,124 |
def NpapiFromNPVariant(scope, type_defn, input_expr, variable, success,
exception_context, npp):
"""Gets the string to get a value from a NPVariant.
This function creates a string containing a C++ code snippet that is used to
retrieve a value from a NPVariant. If an error occurs, like if the NPVariant
is not of the correct type, the snippet will set the success status variable
to false.
Args:
scope: a Definition for the scope in which the glue will be written.
type_defn: a Definition, representing the type of the value.
input_expr: an expression representing the NPVariant to get the value from.
variable: a string, representing a name of a variable that can be used to
store a reference to the value.
success: the name of a bool variable containing the current success status.
exception_context: the name of a string containing context information, for
use in exception reporting.
npp: a string, representing the name of the variable that holds the pointer
to the NPP instance.
Returns:
a (string, string) pair, the first string being the code snippet and the
second one being the expression to access that value.
"""
(scope, npp) = (scope, npp) # silence gpylint.
glue_namespace = npapi_utils.GetGlueFullNamespace(type_defn)
text = _from_npvariant_template.substitute(ClassGlueNS=glue_namespace,
variable=variable,
input=input_expr,
success=success,
context=exception_context)
return (text, '%s->value()' % variable) | e17533806bff882408910ad49acbcd30db0c2030 | 3,630,125 |
from lane_lines_finder.lane_lines_detector import FindLines
def find_lane_lines():
"""
FindLines used in Udacity self-driving-car nanodegree.
:return: FindLines
"""
return FindLines(window_number=10, window_width=150, window_min_n_pixels=50, search_width=150,
pixels_to_meters=pixels_to_meters(), history_len=25) | 7f218a2fd6c063b684dcafb52de35f9abedf61c8 | 3,630,126 |
import struct
def CollectBmpTermination(sock, msg_length, verbose=False):
"""Collect a BMP Termination message.
Args:
sock: socket from which to read.
Returns:
A list of strings.
Raises:
ValueError: an unexpected value was found in the message
"""
print_msg = []
indent_str = indent.IndentLevel(indent.BMP_CONTENT_INDENT)
# get the remainder of the message
#
term_msg_len = msg_length - BMP.HEADER_LEN_V3
term_msg_buf = CollectBytes(sock, term_msg_len)
term_msg_pos = 0
while term_msg_pos < term_msg_len:
info_type, info_len = struct.unpack_from(">HH",
term_msg_buf,
term_msg_pos)
term_msg_pos += 4
info_str = term_msg_buf[term_msg_pos:term_msg_pos + info_len]
term_msg_pos += info_len
if info_type == BMP.TERM_INFO_TYPE_STRING:
print_msg.append("%s%s: %s\n" % (indent_str,
BMP.TERM_INFO_TYPE_STR[info_type],
info_str.tostring()))
elif info_type == BMP.TERM_INFO_TYPE_REASON:
reason_code = struct.unpack(">H", info_str)[0]
print_msg.append("%s%s: %d (%s)\n" % (indent_str,
BMP.TERM_INFO_TYPE_STR[info_type],
reason_code,
BMP.TERM_INFO_REASON_STR[reason_code]))
else:
raise ValueError("Found unexpected Init Msg Info Type %d", info_type)
# Return list of strings representing collected message.
#
return print_msg | 5d1af7fda44cc1d02b7c970f4d6b35d9492fe196 | 3,630,127 |
def near_field_map_vect_vjp(params: NearFieldParams,
nu_vect: jnp.ndarray) -> jnp.ndarray:
"""function to comput the near field in a circle of radious 1,
in this case we use the ls_solver_batched_sigma, which already has
a custom vjp """
Rhs = -(params.ls_params.omega**2)\
*nu_vect.reshape((-1,1))*params.G_sample
# defining the batched transformations
solver_batched = jit(vmap(jit(partial(ls_solver_batched_sigma,
params.ls_params,
nu_vect)),
in_axes=1,
out_axes=1))
# solve for all the rhs in a vectorized fashion
Sigma = solver_batched(Rhs)
nm, n_angles = params.G_sample.shape
# computing the near field by integrating with the Green's
# function, the sign is due we are convolving with
# 4/i H^{(1)}_0(k)
near_field = -(Sigma.T.reshape((n_angles, nm))\
@ params.G_sample.reshape((nm, n_angles)))\
*params.hx*params.hy
return near_field.reshape((-1,)) | bb81e4328f77ebbb118840e3b50a573d3f5f1eed | 3,630,128 |
import unicodedata
def normalize_str(text):
"""
Normalizes unicode input text (for example remove national characters)
:param text: text to normalize
:type text: unicode
"""
# unicodedata NFKD doesn't convert properly polish ł
trans_dict = {
u'ł': u'l',
u'Ł': u'L'
}
trans = dict((ord(k), ord(v)) for k, v in trans_dict.items())
text = text.translate(trans)
return unicodedata.normalize('NFKD', text).encode('ascii', 'ignore') | 40c8f77cdbf08b12a3867cd4a9d9bb91b323b50b | 3,630,129 |
def start():
"""Route for starting the server."""
try:
if not session.get('logged_in'):
return redirect(url_for('login'))
else:
do_start_server()
return redirect(url_for('home'))
except Exception as e:
abort(500, {'message': str(e)}) | df1e7e2dc6ed295b5960e86fdf161ae9471581bb | 3,630,130 |
def get_context_data(data) -> dict:
"""Look for 'context' item in 'queries' item."""
if "queries" in data:
if "context" in data["queries"]:
if isinstance(data["queries"], list):
return data["queries"][0]["context"]
else:
return data["queries"]["context"]
return dict() | 4e05d3d9041a8199f32b4201dcfc69d3adef1034 | 3,630,131 |
from datetime import datetime
def dttm_to_epoch(date_str, frmt='%Y-%m-%dT%H:%M:%SZ'):
"""Convert a date string to epoch seconds."""
return int((datetime.datetime.strptime(date_str, frmt) -
datetime.datetime(1970, 1, 1)).total_seconds()) | 99dbf72be2e4923c6d8e0010ed75c57b9a2c7ecf | 3,630,132 |
import copy
def merge(src: list, dst: list) -> list:
"""Merge `src` into `dst` and return a copy of `dst`."""
# Avoid side effects.
dst = copy.deepcopy(dst)
def find_dict(data: list, key: str) -> dict:
"""Find and return the dictionary in `data` that has the `key`."""
tmp = [_ for _ in data if isinstance(_, dict) and set(_.keys()) == {key}]
assert len(tmp) == 1
return tmp[0]
def _update(src, dst):
# Add all string keys from `src` to `dst` if they are not yet in `dst`.
str_keys = [_ for _ in src if isinstance(_, str) and _ not in dst]
dst.extend(str_keys)
# Find the all dicts and their one and only key.
dict_src = {tuple(_.keys())[0] for _ in src if isinstance(_, dict)}
dict_dst = {tuple(_.keys())[0] for _ in dst if isinstance(_, dict)}
# Recursively merge the dictionaries.
for key in dict_src:
if key not in dict_dst:
# `dst` does not have the dict at all - just copy it.
dst.append(find_dict(src, key))
else:
# `dst` already has a dict for `key` -> recursively update it.
src_val = find_dict(src, key)[key]
dst_val = find_dict(dst, key)[key]
_update(src_val, dst_val)
# Sort the list alphabetically (if the entry is a dict then use its one
# and only key as the comparative element).
dst.sort(key=lambda _: _ if isinstance(_, str) else tuple(_.keys())[0])
_update(src, dst)
return dst | 84976322fda7306d6bc15507750314b6b4fcad44 | 3,630,133 |
def restart_component(service_name, template_name):
"""Stop an component, then start it."""
ret = RefCPSServiceExtent.host_template_instance_operate(service_name,
template_name,
'stop')
if not ret:
LOG.error(
"cps template_instance_action stop for %s failed." % template_name)
return ret
ret = RefCPSServiceExtent.host_template_instance_operate(service_name,
template_name,
'start')
if not ret:
LOG.error(
"cps template_instance_action start for %s failed." % template_name)
return ret | 76ecf29bfb09e3ec005dd35598968421f65470c5 | 3,630,134 |
import traceback
def handle_unknown_errors(exc):
"""All not HTTP errors should result in a formatted server error."""
return jsonify(dict(
traceback=traceback.format_exc(),
message=str(exc),
)), 500 | b20d63a6b956e7c460d8c80dfd5d6eb19ecad5de | 3,630,135 |
def to_device(device, x):
"""Send an array to a given device.
This method sends a given array to a given device. This method is used in
:func:`~chainer.dataset.concat_examples`.
You can also use this method in a custom converter method used in
:class:`~chainer.training.Updater` and :class:`~chainer.training.Extension`
such as :class:`~chainer.training.updaters.StandardUpdater` and
:class:`~chainer.training.extensions.Evaluator`.
See also :func:`chainer.dataset.concat_examples`.
Args:
device (None or int or device specifier): A device to which an array
is sent. If it is a negative integer, an array is sent to CPU.
If it is a positive integer, an array is sent to GPU with the
given ID. If it is``None``, an array is left in the original
device. Also, any of device specifiers described at
:class:`~chainer.backend.DeviceId` is accepted.
x (:ref:`ndarray`): An array to send.
Returns:
Converted array.
"""
device = _get_device(device)
if device is None:
return x
return device.send(x) | fea2c853ace3ddb843d6deabd389f24ed65d5b56 | 3,630,136 |
def grab_receivers(apk) :
"""
@param apk : an APK instance
@rtype : the android:name attribute of all receivers
"""
return apk.get_elements("receiver", "android:name") | 632d6903b63ca9d815a9f9d81ff19b8d6dc12a84 | 3,630,137 |
def itos(x):
"""Converts intergers to strings"""
if type(x) != int:
raise ValueError("Input value not an integer!")
return '{}'.format(x) | 96efe311cade41b37c4f671ed0b7e5e2a74f3d0b | 3,630,138 |
import json
import re
def translate_reference_entities(ref_entities, mappings=None):
"""Transform MaaS reference data for comparison with test deployment.
Positional arguments:
ref_entities -- the reference entity data
Keyword arguments:
mappings -- describe the relationship between the reference and the test
To compare the entities being tested with the reference data, the reference
data must be manipulated so that the hostnames used match those of the
entities being tested.
"""
if mappings is None:
return ref_entities
checks_by_host_type = {}
for mapping in mappings:
test_label, reference_labels = mapping.split(':')
reference_labels = reference_labels.split(',')
for label in reference_labels:
json_blob = json.dumps(ref_entities[label])
json_blob = re.sub(label, test_label, json_blob)
entity = json.loads(json_blob)
if test_label in checks_by_host_type:
checks = checks_by_host_type[test_label]['checks']
checks.update(entity['checks'])
else:
checks_by_host_type[test_label] = entity
return checks_by_host_type | 8e7a6144b5d51fb25908a70100e0d1e03b10b3d5 | 3,630,139 |
def get_data_base(arr):
"""For a given array, finds the base array that "owns" the actual data."""
base = arr
while isinstance(base.base, np.ndarray):
base = base.base
return base | d66596618eb464ef7267de9fb3911f4afe13743b | 3,630,140 |
import os
def file_size_feed(filename):
"""file_size_feed(filename) -> function that returns given file's size"""
def sizefn(filename=filename,os=os):
try:
return os.stat(filename)[6]
except:
return 0
return sizefn | da6c5d15df0f3d99022f3d42c95bb33a82065e32 | 3,630,141 |
def compute_discriminator_loss(real_logit, fake_logit):
"""Computes the discriminator hinge loss given logits.
Args:
real_logit: A list of logits produced from the real image
fake_logit: A list of logits produced from the fake image
Returns:
Scalars discriminator loss, adv_loss, patchwise accuracy of discriminator
at detecting real and fake patches respectively.
"""
# Multi-scale disc returns a list.
real_loss, fake_loss = 0, 0
real_total, fake_total = 0, 0
real_correct, fake_correct = 0, 0
for real_l, fake_l in zip(real_logit, fake_logit):
real_loss += tf.reduce_mean(tf.nn.relu(1 - real_l))
fake_loss += tf.reduce_mean(tf.nn.relu(1 + fake_l))
real_total += tf.cast(tf.reduce_prod(tf.shape(real_l)), tf.float32)
fake_total += tf.cast(tf.reduce_prod(tf.shape(fake_l)), tf.float32)
real_correct += tf.reduce_sum(tf.cast(real_l >= 0, tf.float32))
fake_correct += tf.reduce_sum(tf.cast(fake_l < 0, tf.float32))
# Avg of all outputs.
real_loss = real_loss / float(len(real_logit))
fake_loss = fake_loss / float(len(fake_logit))
real_accuracy = real_correct / real_total
fake_accuracy = fake_correct / fake_total
disc_loss = real_loss + fake_loss
return disc_loss, real_accuracy, fake_accuracy | 17846d008a0c54658af429774849ce3eea35513c | 3,630,142 |
import os
def get_ade20_vqa_data(file_name="ade20k_vqa.jsonl"):
"""
Get the general project configpretrained_dir = conf["captioning"]["pretrained_dir"]
:return:
"""
conf = get_config()
vqa_file = conf["ade20k_vqa_dir"]
file = os.path.join(vqa_file, file_name)
print(f"Reading {file}")
with jsonlines.open(file) as reader:
data = [i for i in iter(reader)]
return data | 1bf762d0b547d14c46a4847332623fd8eeff5e32 | 3,630,143 |
def gap_calculator(condition_vector):
"""
This function calculates max gaps between daily activites in a time series.
Requires only a single binary vector describing whether a condition was met.
"""
# Find the index of all days in which the condition is true
max_gap = None
indices = np.where(condition_vector)
# If there are no condition days, max_gap equals the vector length
if len(indices[0]) == 0:
max_gap = len(condition_vector)
# If there is only one condition day, get max_gap
elif len(indices[0]) == 1:
start_gap = indices[0][0]
end_gap = len(condition_vector) - indices[0][0]
max_gap = max(start_gap, end_gap)
# If there are multiple condition days, calculate longest gap
elif len(indices[0] > 1):
start_gap = indices[0][0]
mid_gap = max(abs(x - y) for (x, y) in zip(indices[0][1:], indices[0][:-1]))
end_gap = len(condition_vector) - indices[0][-1]
max_gap = max(start_gap, mid_gap, end_gap)
return max_gap | ba2fc38505cea38d7e228e8e07b831209e30feb7 | 3,630,144 |
def inverse_warp(img, depth, pose, intrinsics, intrinsics_inv, target_image):
"""Inverse warp a source image to the target image plane
Part of the code modified from
https://github.com/tensorflow/models/blob/master/transformer/spatial_transformer.py
Args:
img: the source image (where to sample pixels) -- [B, H, W, 3]
depth: depth map of the target image -- [B, H, W]
pose: 6DoF pose parameters from target to source -- [B, 6]
intrinsics: camera intrinsic matrix -- [B, 3, 3]
intrinsics_inv: inverse of the intrinsic matrix -- [B, 3, 3]
Returns:
Source image warped to the target image plane
"""
def _pixel2cam(depth, pixel_coords, intrinsics_inv):
"""Transform coordinates in the pixel frame to the camera frame"""
cam_coords = tf.matmul(intrinsics_inv, pixel_coords) * depth
return cam_coords
def _repeat(x, n_repeats):
with tf.variable_scope('_repeat'):
rep = tf.transpose(
tf.expand_dims(tf.ones(shape=tf.stack([n_repeats, ])), 1), [1, 0])
rep = tf.cast(rep, 'int32')
x = tf.matmul(tf.reshape(x, (-1, 1)), rep)
return tf.reshape(x, [-1])
def _cam2pixel(cam_coords, proj_c2p):
"""Transform coordinates in the camera frame to the pixel frame"""
pcoords = tf.matmul(proj_c2p, cam_coords)
X = tf.slice(pcoords, [0, 0, 0], [-1, 1, -1])
Y = tf.slice(pcoords, [0, 1, 0], [-1, 1, -1])
Z = tf.slice(pcoords, [0, 2, 0], [-1, 1, -1])
# Not tested if adding a small number is necessary
X_norm = X / (Z + 1e-10)
Y_norm = Y / (Z + 1e-10)
pixel_coords = tf.concat([X_norm, Y_norm], axis=1)
return pixel_coords
def _meshgrid_abs(height, width):
"""Meshgrid in the absolute coordinates"""
x_t = tf.matmul(tf.ones(shape=tf.stack([height, 1])),
tf.transpose(tf.expand_dims(tf.linspace(-1.0, 1.0, width), 1), [1, 0]))
y_t = tf.matmul(tf.expand_dims(tf.linspace(-1.0, 1.0, height), 1),
tf.ones(shape=tf.stack([1, width])))
x_t = (x_t + 1.0) * 0.5 * tf.cast(width, tf.float32)
y_t = (y_t + 1.0) * 0.5 * tf.cast(height, tf.float32)
x_t_flat = tf.reshape(x_t, (1, -1))
y_t_flat = tf.reshape(y_t, (1, -1))
ones = tf.ones_like(x_t_flat)
grid = tf.concat([x_t_flat, y_t_flat, ones], axis=0)
return grid
def _euler2mat(z, y, x):
"""Converts euler angles to rotation matrix
TODO: remove the dimension for 'N' (deprecated for converting all source
poses altogether)
Reference: https://github.com/pulkitag/pycaffe-utils/blob/master/rot_utils.py#L174
Args:
z: rotation angle along z axis (in radians) -- size = [B, N]
y: rotation angle along y axis (in radians) -- size = [B, N]
x: rotation angle along x axis (in radians) -- size = [B, N]
Returns:
Rotation matrix corresponding to the euler angles -- size = [B, N, 3, 3]
"""
B = tf.shape(z)[0]
N = 1
z = tf.clip_by_value(z, -np.pi, np.pi)
y = tf.clip_by_value(y, -np.pi, np.pi)
x = tf.clip_by_value(x, -np.pi, np.pi)
# Expand to B x N x 1 x 1
z = tf.expand_dims(tf.expand_dims(z, -1), -1)
y = tf.expand_dims(tf.expand_dims(y, -1), -1)
x = tf.expand_dims(tf.expand_dims(x, -1), -1)
zeros = tf.zeros([B, N, 1, 1])
ones = tf.ones([B, N, 1, 1])
cosz = tf.cos(z)
sinz = tf.sin(z)
rotz_1 = tf.concat([cosz, -sinz, zeros], axis=3)
rotz_2 = tf.concat([sinz, cosz, zeros], axis=3)
rotz_3 = tf.concat([zeros, zeros, ones], axis=3)
zmat = tf.concat([rotz_1, rotz_2, rotz_3], axis=2)
cosy = tf.cos(y)
siny = tf.sin(y)
roty_1 = tf.concat([cosy, zeros, siny], axis=3)
roty_2 = tf.concat([zeros, ones, zeros], axis=3)
roty_3 = tf.concat([-siny,zeros, cosy], axis=3)
ymat = tf.concat([roty_1, roty_2, roty_3], axis=2)
cosx = tf.cos(x)
sinx = tf.sin(x)
rotx_1 = tf.concat([ones, zeros, zeros], axis=3)
rotx_2 = tf.concat([zeros, cosx, -sinx], axis=3)
rotx_3 = tf.concat([zeros, sinx, cosx], axis=3)
xmat = tf.concat([rotx_1, rotx_2, rotx_3], axis=2)
rotMat = tf.matmul(tf.matmul(xmat, ymat), zmat)
return rotMat
def _pose_vec2mat(vec):
"""Converts 6DoF parameters to transformation matrix
Args:
vec: 6DoF parameters in the order of tx, ty, tz, rx, ry, rz -- [B, 6]
Returns:
A transformation matrix -- [B, 4, 4]
"""
translation = tf.slice(vec, [0, 0], [-1, 3])
translation = tf.expand_dims(translation, -1)
rx = tf.slice(vec, [0, 3], [-1, 1])
ry = tf.slice(vec, [0, 4], [-1, 1])
rz = tf.slice(vec, [0, 5], [-1, 1])
rot_mat = _euler2mat(rz, ry, rx)
rot_mat = tf.squeeze(rot_mat, squeeze_dims=[1])
filler = tf.constant([0.0, 0.0, 0.0, 1.0], shape=[1, 1, 4])
filler = tf.tile(filler, [batch_size, 1, 1])
transform_mat = tf.concat([rot_mat, translation], axis=2)
transform_mat = tf.concat([transform_mat, filler], axis=1)
return transform_mat
def _interpolate_ms(im, x, y, out_size, target, name='_interpolate'):
with tf.variable_scope('_interpolate'):
x = tf.reshape(x, [-1])
y = tf.reshape(y, [-1])
# constants
num_batch = tf.shape(im)[0]
height = tf.shape(im)[1]
width = tf.shape(im)[2]
channels = tf.shape(im)[3]
x = tf.cast(x, 'float32')
y = tf.cast(y, 'float32')
height_f = tf.cast(height, 'float32')
width_f = tf.cast(width, 'float32')
out_height = out_size[0]
out_width = out_size[1]
zero = tf.zeros([], dtype='int32')
max_y = tf.cast(tf.shape(im)[1] - 1, 'int32')
max_x = tf.cast(tf.shape(im)[2] - 1, 'int32')
# scale indices from [-1, 1] to [0, width/height]
x = (x + 1.0)*(width_f-1) / 2.0
y = (y + 1.0)*(height_f-1) / 2.0
# do sampling
x0 = tf.cast(tf.floor(x), 'int32')
x1 = x0 + 1
y0 = tf.cast(tf.floor(y), 'int32')
y1 = y0 + 1
xn1 = x0 - 1
yn1 = y0 - 1
x2 = x0 + 2
y2 = y0 + 2
xn2 = x0 - 2
yn2 = y0 - 2
x3 = x0 + 3
y3 = y0 + 3
xn3 = x0 - 3
yn3 = y0 - 3
x4 = x0 + 4
y4 = y0 + 4
dim2 = width
dim1 = width*height
base = _repeat(tf.range(num_batch)*dim1, out_height*out_width)
im_flat = tf.reshape(im, tf.stack([-1, channels]))
im_flat = tf.cast(im_flat, 'float32')
target_flat = tf.reshape(target, tf.stack([-1, channels]))
target_flat = tf.cast(target_flat, 'float32')
# def helper(x0_, x1_, y0_, y1_, x_range, y_range):
# scale = min(x_range, y_range)
# scale_area = x_range * y_range / (scale*scale)
# x0_c = tf.clip_by_value(x0_, zero, max_x)
# x1_c = tf.clip_by_value(x1_, zero, max_x)
# y0_c = tf.clip_by_value(y0_, zero, max_y)
# y1_c = tf.clip_by_value(y1_, zero, max_y)
# base_y0 = base + y0_c*dim2
# base_y1 = base + y1_c*dim2
# idx_a = base_y0 + x0_c
# idx_b = base_y1 + x0_c
# idx_c = base_y0 + x1_c
# idx_d = base_y1 + x1_c
# Ia = tf.gather(im_flat, idx_a)
# Ib = tf.gather(im_flat, idx_b)
# Ic = tf.gather(im_flat, idx_c)
# Id = tf.gather(im_flat, idx_d)
# # and finally calculate interpolated values
# x0_f = tf.cast(x0_, 'float32')
# x1_f = tf.cast(x1_, 'float32')
# y0_f = tf.cast(y0_, 'float32')
# y1_f = tf.cast(y1_, 'float32')
# x_mid = (x0_f + x1_f) / 2.0
# y_mid = (y0_f + y1_f) / 2.0
# x0_f = (x0_f - x_mid) / scale + x_mid
# x1_f = (x1_f - x_mid) / scale + x_mid
# y0_f = (y0_f - y_mid) / scale + y_mid
# y1_f = (y1_f - y_mid) / scale + y_mid
# wa = tf.expand_dims(((x1_f-x) * (y1_f-y) / scale_area), 1)
# wb = tf.expand_dims(((x1_f-x) * (y-y0_f) / scale_area), 1)
# wc = tf.expand_dims(((x-x0_f) * (y1_f-y) / scale_area), 1)
# wd = tf.expand_dims(((x-x0_f) * (y-y0_f) / scale_area), 1)
# output = tf.add_n([wa*Ia, wb*Ib, wc*Ic, wd*Id])
# return output, [tf.expand_dims(Ia, axis=-1),
# tf.expand_dims(Ib, axis=-1),
# tf.expand_dims(Ic, axis=-1),
# tf.expand_dims(Id, axis=-1)]
# output1, v1 = helper(x0, x1, y0, y1, 1.0, 1.0)
# output2, v2 = helper(x0, x1, yn1, y2, 1.0, 3.0)
# output3, v3 = helper(xn1, x2, y0, y1, 3.0, 1.0)
# output4, v4 = helper(xn1, x2, yn1, y2, 3.0, 3.0)
# output5, v5 = helper(xn2, x3, yn2, y3, 5.0, 5.0)
# output6, v6 = helper(x0, x1, yn2, y3, 1.0, 5.0)
# output7, v7 = helper(xn1, x2, yn2, y3, 3.0, 5.0)
# output8, v8 = helper(xn2, x3, y0, y1, 5.0, 1.0)
# output9, v9 = helper(xn2, x3, yn1, y2, 5.0, 3.0)
# output10, v10 = helper(xn3, x4, yn3, y4, 7.0, 7.0)
# output11, v11 = helper(xn3, x4, yn1, y2, 7.0, 3.0)
# output12, v12 = helper(xn1, x2, yn3, y4, 3.0, 7.0)
# candidates = tf.concat(v1+v2+v3+v4+v5+v6+v7+v8+v9+v10+v11+v12, axis=2)
# idx = tf.argmin(tf.reduce_mean(tf.abs(candidates - tf.expand_dims(target_flat, axis=-1)), axis=1, keep_dims=True), axis=2)
# idx = tf.tile(idx, [1, channels])
# error_small_pred = tf.tile(tf.reduce_mean(tf.abs(output1 - target_flat), axis=1, keep_dims=True), [1, channels]) < 0.1
# output = tf.where(tf.logical_or(error_small_pred, tf.logical_and(idx>=0, idx<4)), output1,
# tf.where(tf.logical_and(idx>=4, idx<8), output2,
# tf.where(tf.logical_and(idx>=8, idx<12), output3,
# tf.where(tf.logical_and(idx>=12, idx<16), output4,
# tf.where(tf.logical_and(idx>=16, idx<20), output5,
# tf.where(tf.logical_and(idx>=20, idx<24), output6,
# tf.where(tf.logical_and(idx>=24, idx<28), output7,
# tf.where(tf.logical_and(idx>=28, idx<32), output8,
# tf.where(tf.logical_and(idx>=32, idx<36), output9,
# tf.where(tf.logical_and(idx>=36, idx<40), output10,
# tf.where(tf.logical_and(idx>=40, idx<44), output11, output12)))))))))))
def helper(x0_, x1_, y0_, y1_, scale):
x0_c = tf.clip_by_value(x0_, zero, max_x)
x1_c = tf.clip_by_value(x1_, zero, max_x)
y0_c = tf.clip_by_value(y0_, zero, max_y)
y1_c = tf.clip_by_value(y1_, zero, max_y)
base_y0 = base + y0_c*dim2
base_y1 = base + y1_c*dim2
idx_a = base_y0 + x0_c
idx_b = base_y1 + x0_c
idx_c = base_y0 + x1_c
idx_d = base_y1 + x1_c
Ia = tf.gather(im_flat, idx_a)
Ib = tf.gather(im_flat, idx_b)
Ic = tf.gather(im_flat, idx_c)
Id = tf.gather(im_flat, idx_d)
# and finally calculate interpolated values
x0_f = tf.cast(x0_, 'float32')
x1_f = tf.cast(x1_, 'float32')
y0_f = tf.cast(y0_, 'float32')
y1_f = tf.cast(y1_, 'float32')
wa = tf.expand_dims(((x1_f-x) * (y1_f-y) / scale), 1)
wb = tf.expand_dims(((x1_f-x) * (y-y0_f) / scale), 1)
wc = tf.expand_dims(((x-x0_f) * (y1_f-y) / scale), 1)
wd = tf.expand_dims(((x-x0_f) * (y-y0_f) / scale), 1)
output = tf.add_n([wa*Ia, wb*Ib, wc*Ic, wd*Id])
return output, [tf.expand_dims(Ia, axis=-1),
tf.expand_dims(Ib, axis=-1),
tf.expand_dims(Ic, axis=-1),
tf.expand_dims(Id, axis=-1)]
output1, v1 = helper(x0, x1, y0, y1, 1.0)
output2, v2 = helper(x0, x1, yn1, y2, 3.0)
output3, v3 = helper(xn1, x2, y0, y1, 3.0)
output4, v4 = helper(xn1, x2, yn1, y2, 9.0)
output5, v5 = helper(xn2, x3, yn2, y3, 25.0)
candidates = tf.concat(v1+v2+v3+v4+v5, axis=2)
idx = tf.argmin(tf.reduce_mean(tf.abs(candidates - tf.expand_dims(target_flat, axis=-1)), axis=1, keep_dims=True), axis=2)
idx = tf.tile(idx, [1, channels])
error_small_pred = tf.tile(tf.reduce_mean(tf.abs(output1 - target_flat), axis=1, keep_dims=True), [1, channels]) < 0.1
output = tf.where(tf.logical_or(error_small_pred, tf.logical_and(idx>=0, idx<4)), output1,
tf.where(tf.logical_and(idx>=4, idx<8), output2,
tf.where(tf.logical_and(idx>=8, idx<12), output3,
tf.where(tf.logical_and(idx>=12, idx<16), output4, output5))))
output = tf.reshape(output, shape=tf.stack([num_batch, height, width, channels]))
return output
def _interpolate(im, x, y, name='_interpolate'):
"""Perform bilinear sampling on im given x,y coordinates.
Implements the differentiable sampling mechanism with bilinear kerenl
in https://arxiv.org/abs/1506.02025.
x,y are tensors specifying normalized coordinates [-1,1] to be sampled on im.
(e.g.) (-1,-1) in x,y corresponds to pixel location (0,0) in im, and
(1,1) in x,y corresponds to the bottom right pixel in im.
"""
with tf.variable_scope(name):
x = tf.reshape(x, [-1])
y = tf.reshape(y, [-1])
# constants
num_batch = tf.shape(im)[0]
_, height, width, channels = im.get_shape().as_list()
x = tf.to_float(x)
y = tf.to_float(y)
height_f = tf.cast(height, 'float32')
width_f = tf.cast(width, 'float32')
zero = tf.constant(0, dtype=tf.int32)
max_y = tf.cast(tf.shape(im)[1] - 1, 'int32')
max_x = tf.cast(tf.shape(im)[2] - 1, 'int32')
# scale indices from [-1, 1] to [0, width-1/height-1]
x = (x + 1.0) * (width_f - 1.0) / 2.0
y = (y + 1.0) * (height_f - 1.0) / 2.0
# do sampling
x0 = tf.cast(tf.floor(x), 'int32')
x1 = x0 + 1
y0 = tf.cast(tf.floor(y), 'int32')
y1 = y0 + 1
x0 = tf.clip_by_value(x0, zero, max_x)
x1 = tf.clip_by_value(x1, zero, max_x)
y0 = tf.clip_by_value(y0, zero, max_y)
y1 = tf.clip_by_value(y1, zero, max_y)
dim2 = width
dim1 = width * height
# Create base index
base = tf.range(num_batch) * dim1
base = tf.reshape(base, [-1, 1])
base = tf.tile(base, [1, height * width])
base = tf.reshape(base, [-1])
base_y0 = base + y0 * dim2
base_y1 = base + y1 * dim2
idx_a = base_y0 + x0
idx_b = base_y1 + x0
idx_c = base_y0 + x1
idx_d = base_y1 + x1
# use indices to lookup pixels in the flat image and restore channels dim
im_flat = tf.reshape(im, tf.stack([-1, channels]))
im_flat = tf.to_float(im_flat)
pixel_a = tf.gather(im_flat, idx_a)
pixel_b = tf.gather(im_flat, idx_b)
pixel_c = tf.gather(im_flat, idx_c)
pixel_d = tf.gather(im_flat, idx_d)
# and finally calculate interpolated values
x1_f = tf.to_float(x1)
y1_f = tf.to_float(y1)
wa = tf.expand_dims(((x1_f - x) * (y1_f - y)), 1)
wb = tf.expand_dims((x1_f - x) * (1.0 - (y1_f - y)), 1)
wc = tf.expand_dims(((1.0 - (x1_f - x)) * (y1_f - y)), 1)
wd = tf.expand_dims(((1.0 - (x1_f - x)) * (1.0 - (y1_f - y))), 1)
output = tf.add_n([wa * pixel_a, wb * pixel_b, wc * pixel_c, wd * pixel_d])
output = tf.reshape(output,
shape=tf.stack([num_batch, height, width, channels]))
return output
def _spatial_transformer(img, coords, target_image):
"""Spatial transforming the values in 'img' with bilinear sampling based on
coordinates specified in 'coords'. This is just a wrapper of '_interpolate()'
to take absolute coordinates as input.
"""
img_height = tf.cast(tf.shape(img)[1], tf.float32)
img_width = tf.cast(tf.shape(img)[2], tf.float32)
img_channels = img.get_shape().as_list()[3]
px = tf.slice(coords, [0, 0, 0, 0], [-1, -1, -1, 1])
py = tf.slice(coords, [0, 0, 0, 1], [-1, -1, -1, 1])
# determine which part "fly out" of the boundary of the target image
flyout_mask = tf.cast((px<0) | (px>img_width) | (py<0) | (py>img_height), tf.float32)
# print("shape of flyout_mask:")
# print(flyout_mask.get_shape().as_list())
flyout_mask = tf.tile(flyout_mask,[1,1,1,img_channels])
# print("shape of target image:")
# print(target_image.get_shape().as_list())
# scale to normalized coordinates [-1, 1] to match the input to 'interpolate'
px = tf.clip_by_value(px/img_width*2.0 - 1.0, -1.0, 1.0)
py = tf.clip_by_value(py/img_height*2.0 - 1.0, -1.0, 1.0)
out_img = _interpolate(img, px, py, 'spatial_transformer')
out_size = tf.shape(target_image)[1:3]
# print("shape of out image:")
# print(out_img.get_shape().as_list())
# out_img = _interpolate_ms(img, px, py, out_size, target_image, 'spatial_transformer')
# the flyout part in out_image should be replaced with the same part in target image
out_img = target_image*flyout_mask + out_img*(1.0-flyout_mask)
return out_img, flyout_mask
dims = tf.shape(img)
batch_size, img_height, img_width = dims[0], dims[1], dims[2]
depth = tf.reshape(depth, [batch_size, 1, img_height*img_width])
grid = _meshgrid_abs(img_height, img_width)
grid = tf.tile(tf.expand_dims(grid, 0), [batch_size, 1, 1])
cam_coords = _pixel2cam(depth, grid, intrinsics_inv)
ones = tf.ones([batch_size, 1, img_height*img_width])
cam_coords_hom = tf.concat([cam_coords, ones], axis=1)
if len(pose.get_shape().as_list()) == 3:
pose_mat = pose
else:
pose_mat = _pose_vec2mat(pose)
# Get projection matrix for tgt camera frame to source pixel frame
hom_filler = tf.constant([0.0, 0.0, 0.0, 1.0], shape=[1, 1, 4])
hom_filler = tf.tile(hom_filler, [batch_size, 1, 1])
intrinsics = tf.concat([intrinsics, tf.zeros([batch_size, 3, 1])], axis=2)
intrinsics = tf.concat([intrinsics, hom_filler], axis=1)
proj_cam_to_src_pixel = tf.matmul(intrinsics, pose_mat)
src_pixel_coords = _cam2pixel(cam_coords_hom, proj_cam_to_src_pixel)
src_pixel_coords = tf.reshape(src_pixel_coords,
[batch_size, 2, img_height, img_width])
src_pixel_coords = tf.transpose(src_pixel_coords, perm=[0,2,3,1])
projected_img, flyout_mask = _spatial_transformer(img, src_pixel_coords, target_image)
return projected_img, flyout_mask | bb28cc5e4e29ad50a731bb8b39667e19e96721f4 | 3,630,145 |
from keystoneauth1 import session
from keystoneauth1.identity import v2
def _get_session_keystone_v2():
"""
Returns a keystone session variable.
"""
user, password, auth_uri, project_name, project_id, user_domain_name = _get_connection_info('2')
auth = v2.Password(username=user, password=password,
tenant_name=project_name, auth_url=auth_uri)
return session.Session(auth=auth) | 577dbf6dc82857dbc4d881643e3821532eb79925 | 3,630,146 |
from typing import Dict
from typing import Any
from typing import List
import os
def fetch_afl_data(path: str, params: Dict[str, Any] = {}) -> List[Dict[str, Any]]:
"""
Fetch data from the afl_data service.
Params
------
path (string): API endpoint to call.
params (dict): Query parameters to include in the API request.
Returns
-------
list of dicts, representing the AFL data requested.
"""
if os.getenv("PYTHON_ENV") == "production":
service_host = AFL_DATA_SERVICE
headers = {"Authorization": f'Bearer {os.getenv("AFL_DATA_SERVICE_TOKEN")}'}
else:
service_host = LOCAL_AFL_DATA_SERVICE
headers = {}
service_url = service_host + path
response = _make_request(service_url, params=params, headers=headers)
return _handle_response_data(response) | e0c713cbae39bf4eb2f4dc6b3b2efd057168c3ee | 3,630,147 |
def mock_diffuser_v1_battery_cartridge():
"""Create and return a mock version 1 Diffuser with battery and a cartridge."""
return mock_diffuser(hublot="lot123v1") | 4371298c9004b33bd69ae57ccc02220c3b37baf6 | 3,630,148 |
def hist(array : np.ndarray):
"""
given array of integer values,
returns the histogram of consecutive integer values without hole
"""
bins = np.append(np.arange(0,array.max()+1)-0.5,array.max()+0.5)
return np.histogram(array, bins = bins)[0] | 37bbfa2e313984d93c75c99a786435922b4e8ead | 3,630,149 |
def derive_shared_secret(private_key: bytes, public_key: bytes):
"""Generate a shared secret from keys in byte format."""
derive = ECDH(curve=NIST256p)
derive.load_private_key_bytes(unhexlify(private_key))
derive.load_received_public_key_bytes(unhexlify(public_key))
secret = derive.generate_sharedsecret_bytes()
return secret | e9e398ec26bc7871c43d719e48a1df32a5418737 | 3,630,150 |
def process_5p(chrom, positions, strand, vertex_IDs, gene_ID, gene_starts, edge_dict,
locations, run_info):
""" Conduct permissive match for 5' end and return assigned vertex,
edge, and distance """
# First get a permissively matched start vertex
start_vertex, diff_5p, known_start = permissive_match_with_gene_priority(chrom,
positions[0], strand, positions[1],
"start", gene_ID, gene_starts,
locations, run_info)
if start_vertex == None:
start_vertex = create_vertex(chrom, positions[0], run_info,
locations)['location_ID']
# Then get the start exon
start_exon, start_novelty = match_or_create_edge(start_vertex,
vertex_IDs[0],
"exon", strand,
edge_dict, run_info)
# If known_start == 1, the start vertex is a known startpoint of this gene.
# start novelty refers to the novelty of the first exon (1 if yes, 0 if no)
return start_vertex, start_exon, start_novelty, known_start, diff_5p | 3c97f243a86f6c8b624390d886be17f73cbe2665 | 3,630,151 |
def create_salt(length: int = 128) -> bytes:
"""
Create a new salt
:param int length: How many bytes should the salt be long?
:return: The salt
:rtype: bytes
"""
return b''.join(bytes([SystemRandom().randint(0, 255)]) for _ in range(length)) | 013f0e9ec856c2d89660e3d01069e6f0396186a8 | 3,630,152 |
from typing import Dict
from typing import List
def convert_xclim_inputs_to_pywps(params: Dict, parent=None) -> List[PywpsInput]:
"""Convert xclim indicators properties to pywps inputs."""
# Ideally this would be based on the Parameters docstring section rather than name conventions.
inputs = []
# Mapping from xclim's InputKind to data_type
# Only for generic types
data_types = {
InputKind.QUANTITY_STR: "string",
InputKind.NUMBER: "integer",
InputKind.NUMBER_SEQUENCE: "integer",
InputKind.STRING: "string",
InputKind.DAY_OF_YEAR: "string",
InputKind.DATE: "datetime",
}
for name, attrs in params.items():
if name in xclim_netcdf_variables and attrs['kind'] in [InputKind.VARIABLE, InputKind.OPTIONAL_VARIABLE]:
inputs.append(make_nc_input(name))
elif name in ["freq"]:
inputs.append(make_freq(name, default=attrs['default'], abstract=attrs['description']))
elif name in ["indexer"]:
inputs.append(make_month())
inputs.append(make_season())
elif attrs['kind'] in data_types:
choices = list(attrs['choices']) if 'choices' in attrs else None
inputs.append(
LiteralInput(
name,
title=name.capitalize().replace('_', ' '),
abstract=attrs['description'],
data_type=data_types[attrs['kind']],
min_occurs=0,
max_occurs=1 if attrs['kind'] != InputKind.NUMBER_SEQUENCE else 99,
default=attrs["default"],
allowed_values=choices,
)
)
elif attrs['kind'] < 50:
# raise NotImplementedError(f"{parent}: {name}")
LOGGER.warning(f"{parent}: Argument {name} of kind {attrs['kind']} is not implemented.")
return inputs | bdab2d3f365a3d1f0c8f3ff4208d38f72ad8d0be | 3,630,153 |
def is_readable_key_pressed(code) -> bool:
"""
押されたキーがアルファベットキー、数字キー、「-」キーのいずれかかを判断する。
:param code: pygame.event.get()[n].keyから取得できる文字コード。
:return: 上記の条件に当てはまればTrue、なければFalse
"""
if chr(code) == "-":
return True
if not chr(code).isalnum():
return False
if not chr(code).islower():
return False
modifier = pygame.key.get_mods()
if modifier != 0 and modifier != 4096:
return False
return True | fe708e74068cfee265f782d94f149137e65342c3 | 3,630,154 |
from typing import Iterable
from typing import List
import itertools
def extend(sequence: Iterable[_T], minsize: int) -> List[_T]:
"""
Extend ``sequence`` by repetition until it is at least as long as ``minsize``.
.. versionadded:: 2.3.0
:param sequence:
:param minsize:
:rtype:
.. seealso:: :func:`~.extend_with` and :func:`~.extend_with_none`
"""
output = list(sequence)
cycle = itertools.cycle(output)
while len(output) < minsize:
output.append(next(cycle))
return output | 4040f0c415fdfa0d53c53ed3da0e6d0839ed7a92 | 3,630,155 |
def admin_view(view, cacheable=False):
"""
Overwrite the default admin view to return 404 for not logged in users.
"""
def inner(request, *args, **kwargs):
if not request.user.is_active and not request.user.is_staff:
raise Http404()
return view(request, *args, **kwargs)
if not cacheable:
inner = never_cache(inner)
# We add csrf_protect here so this function can be used as a utility
# function for any view, without having to repeat 'csrf_protect'.
if not getattr(view, 'csrf_exempt', False):
inner = csrf_protect(inner)
return update_wrapper(inner, view) | 8fa7d481c8eb3b5d11dce7448a83b3ed9beed051 | 3,630,156 |
from datetime import datetime
def rtn_dates(beg, end=None, using_weekend=False, rtn_string=True):
"""
:param beg: Date String, e.g. 20170901, 2017-09-01
:param end: Date String, e.g. 20170901, 2017-09-01, if not specified, using current date
:param using_weekend:
:param rtn_string: (indicate whether the returned value is a string or a python date object
:return:
Example:
rtn_dates('20170801', '20170805', using_weekend=True, rtn_string=True)
=['20170801', '20170802', '20170803', '20170804', '20170805']
"""
beg = beg.replace("-", "")
beg_date = datetime.strptime(beg, "%Y%m%d").date()
if isinstance(end, str) and len(end):
end = end.replace("-", "")
end_date = datetime.strptime(end, "%Y%m%d").date()
else:
end_date = datetime.now().date()
test_dates = []
while beg_date <= end_date:
week_day = beg_date.weekday() + 1 # .weekday() : Monday: 0, Sunday: 6
if (week_day != 6 and week_day != 7) or using_weekend:
test_dates.append(beg_date)
beg_date += timedelta(days=1)
if rtn_string:
test_dates = list(map(lambda x: x.strftime("%Y%m%d"), test_dates))
return test_dates | 9fd6a9ee30cc901d5de2074c87c49b68d74d86ea | 3,630,157 |
def pr_auc_score(y_true: np.ndarray, y_score: np.ndarray):
"""
Area under Curve for Precision Recall Curve
Args:
y_true: Array of actual y values
y_score: Array of predicted probability for all y values
Returns:
Area under Curve for Precision Recall Curve
"""
assert y_true.shape == y_score.shape
precision, recall, _ = precision_recall_curve(y_true, y_score)
return auc(recall, precision) | 4e412e911ee14e8d52b042a5b34e03ee26800fad | 3,630,158 |
async def save_legal_person(request: LegalPersonInput):
"""
### Recurso que tem por objetivo salvar uma pessoa fisica.
"""
try:
manage_legal_person = ManageLegalPerson()
legal_person = LegalPerson(**request.dict())
legal_person = await manage_legal_person.save_legal_person(legal_person)
return legal_person.dict()
except RuntimeError as error:
errors = list(error.args)
return JSONResponse(status_code=400, content=errors) | 0d65ae23ee51708918b8bab87068ca1bcdbca55e | 3,630,159 |
def describe_outputs(path):
"""Return a list of :class:`WorkflowOutput` objects for target workflow."""
workflow = _raw_dict(path)
outputs = []
for (order_index, step) in workflow["steps"].items():
step_outputs = step.get("workflow_outputs", [])
for step_output in step_outputs:
output = WorkflowOutput(
int(order_index),
step_output["output_name"],
step_output["label"],
)
outputs.append(output)
return outputs | 85a45cc4642e885ce260912c8935a0a9a3683e33 | 3,630,160 |
from typing import Union
def parse_content_length_value(stream: Union[str, int]) -> ContentLengthValue:
"""Parses the `Content-length` header value.
:param stream: String or integer value of the header.
:return: A `ContentLengthValue` instance.
:raises ParseError: When the value cannot be cast to an integer.
"""
try:
value = int(stream)
except ValueError as error:
raise ParseError(
"Unable to parse Content-length value, must be integer"
) from error
return ContentLengthValue(length=value) | 9f964289b7784f694a50b15b5c4685682f5895e1 | 3,630,161 |
import os
def is_stale(target, source):
"""Test whether the target file/directory is stale based on the source
file/directory.
"""
if not os.path.exists(target):
return True
target_mtime = recursive_mtime(target) or 0
return compare_recursive_mtime(source, cutoff=target_mtime) | 3e4311f0e2622008986f5609bd9bb86c48c6d6aa | 3,630,162 |
def newsToEmail(item,eList):
"""
- item : One news item
- eList : A list of all person objects
- return : a list of validated email objects
"""
IDs = []
for e in eList:
print getPref(item)
if countCommon(getPref(item),e.pref) > 0:
IDs.append(e)
return IDs | c5ce5e670f02a3da8252b5adb56aaa75a45e8c80 | 3,630,163 |
def combine(background_img, figure_img):
"""
:param background_img: (SimpleImage) the original image that will replace the green screen
:param figure_img: (SimpleImage) the original image with green screen
:return: the updated image with green screen replaced as the background space ship
"""
background_img.make_as_big_as(figure_img)
for x in range(figure_img.width):
for y in range(figure_img.height):
figure_p = figure_img.get_pixel(x, y)
avg = (figure_p.red + figure_p.green + figure_p.blue) // 3
total = figure_p.red + figure_p.green + figure_p.blue
if figure_p.green > avg * THRESHOLD and total > BLACK_PIXEL:
bg_p = background_img.get_pixel(x, y)
figure_p.red = bg_p.red
figure_p.green = bg_p.green
figure_p.blue = bg_p.blue
return figure_img | 212e69e8c600373017e4c7f9fd1b2125ac1d2e59 | 3,630,164 |
def slot_into_containers(container_objects, package_objects, overlap_threshold=0.5,
unique_assignment=True, forced_assignment=False):
"""
Slot a collection of objects into the container they occupy most (the container which holds the largest fraction of the object).
"""
best_match_scores = []
container_assignments = [[] for container in container_objects]
package_assignments = [[] for package in package_objects]
if len(container_objects) == 0 or len(package_objects) == 0:
return container_assignments, package_assignments, best_match_scores
match_scores = defaultdict(dict)
for package_num, package in enumerate(package_objects):
match_scores = []
package_rect = Rect(package['bbox'])
package_area = package_rect.getArea()
for container_num, container in enumerate(container_objects):
container_rect = Rect(container['bbox'])
intersect_area = container_rect.intersect(package['bbox']).getArea()
overlap_fraction = intersect_area / package_area
match_scores.append({'container': container, 'container_num': container_num, 'score': overlap_fraction})
sorted_match_scores = sort_objects_by_score(match_scores)
best_match_score = sorted_match_scores[0]
best_match_scores.append(best_match_score['score'])
if forced_assignment or best_match_score['score'] >= overlap_threshold:
container_assignments[best_match_score['container_num']].append(package_num)
package_assignments[package_num].append(best_match_score['container_num'])
if not unique_assignment: # slot package into all eligible slots
for match_score in sorted_match_scores[1:]:
if match_score['score'] >= overlap_threshold:
container_assignments[match_score['container_num']].append(package_num)
package_assignments[package_num].append(match_score['container_num'])
else:
break
return container_assignments, package_assignments, best_match_scores | 5c14ea3de0a517f35966f20b782519f39aed1694 | 3,630,165 |
def return_function_info_data(data_kind, data_func_name):
"""
闭包,返回读取数据库函数
参数:
data_kind (str):数据种类,如'futures'
data_func_name (str) : 数据表名称,如'futures_date'
示例:
info_futures_basic = return_function_info_data(data_kind='futures', data_func_name='futures_date')
info_futures_basic为一个函数,执行该函数里的info_data函数。
"""
@func_time
def info_data(source, **keywords):
"""
查看配置文件夹中的数据库文件
Parameters:
source (str): 数据源名称
db (str) : 数据库名称
log (str) : log等级,如info, debug等,默认normal,
示例:
info_db() 查看下载文件夹下所有数据库
"""
#输入参数和默认参数
my_keywords = KeyWords(keywords, source_kind=source, data_kind=data_kind, data_func_name=data_func_name, function_kind='read')
db = my_keywords["db"]
data_table = my_keywords["table"]
log_level = my_keywords['log']
log = Log(log_level) #初始化log等级
#打印参数
log.standard('normal', db=db, table=data_table)
log.info('log_level='+log_level)
db_tool.db_info(db=db, table=data_table, log_level=log_level)
#返回函数info_data
return info_data | f011fcb14e494d9172ea319fe1a66ac696cd305d | 3,630,166 |
def landscapes(request):
"""
"""
images = Landscapes.display_image()
return render(request, 'all-photos/landscapes.html', {"images": images}) | d3c570febea95a00791d824d2809cc3c8e0fbbc6 | 3,630,167 |
def make_sparse_matrix(df):
"""
Make sparse matrix
:param df: train_df [userId, movieId, rating, ...]
:return: sparse_matrix (movie_n) * (user_n)
"""
sparse_matrix = (
df
.groupby('movieId')
.apply(lambda x: pd.Series(x['rating'].values, index=x['userId']))
.unstack()
)
sparse_matrix.index.name = 'movieId'
test_set = [] # (movie_id, user_id, rating)
idx, jdx = sparse_matrix.fillna(0).to_numpy().nonzero()
indice = list(zip(idx, jdx))
np.random.shuffle(indice)
for i, j in indice[:df.shape[0] // 5]:
test_set.append((i, j, sparse_matrix.iloc[i, j]))
sparse_matrix.iloc[i, j] = 0
return sparse_matrix, test_set | 90b2cf8e738da4bdca01bb2f29b231b1781bfcf1 | 3,630,168 |
from datetime import datetime
def genreport():
"""Generated report includes taskId, data time, task status and type
Args:
Examples:
>>> genreport()
"""
taks_list = []
status = ee.data.getTaskList()
for items in status:
ttype = items["task_type"]
tdesc = items["description"]
tstate = items["state"]
tid = items["id"]
tcreate = datetime.datetime.fromtimestamp(
items["creation_timestamp_ms"] / 1000
).strftime("%Y-%m-%d %H:%M:%S")
tstart = datetime.datetime.fromtimestamp(
items["start_timestamp_ms"] / 1000
).strftime("%Y-%m-%d %H:%M:%S")
tupdate = datetime.datetime.fromtimestamp(
items["update_timestamp_ms"] / 1000
).strftime("%Y-%m-%d %H:%M:%S")
tdiffstart = (
items["start_timestamp_ms"] / 1000 - items["creation_timestamp_ms"] / 1000
)
tdiffend = (
items["update_timestamp_ms"] / 1000 - items["start_timestamp_ms"] / 1000
)
try:
error_message = items["error_message"]
except:
error_message = "NULL"
dict_summary = {
"tid": tid,
"tstate": tstate,
"tdesc": tdesc,
"ttype": ttype,
"tcreate": tcreate,
"tdiffstart": tdiffstart,
"tdiffend": tdiffend,
"error_message": error_message,
}
taks_list.append(dict_summary)
return taks_list | 6bce32d0cb51f8aa322a280e2c6c9b64a89169ff | 3,630,169 |
def input_file(ntemps, formula, delta_h, enthalpy_temp=0.0, break_temp=1000.0):
""" Writes a string for the input file for ThermP.
:param ntemps: number of temperatures
:type ntemps: int
:param formula: chemical formula for species
:type formula: str
:param delta_h: enthalpy of formation
:type delta_h: float
:param enthalpy_temp: temperature corresponding to enthalpy
:type enthalpy_temp: float
:param break_temp: temperature delineating low-T and high-T for fits
:type break_temp: float
:rtype: str
"""
# Get the stoichiometry of all elements to build composition string
atom_dict = util.get_atom_counts_dict(formula)
composition_str = ''
for key, val in atom_dict.items():
composition_str += '{0} {1}\n'.format(key, val)
composition_str = composition_str.rstrip()
# Create a fill value dictionary
thermp_keys = {
'ntemps': ntemps,
'formula': formula,
'deltaH': delta_h,
'enthalpyT': enthalpy_temp,
'breakT': break_temp,
'composition_str': composition_str
}
return build_mako_str(
template_file_name='thermp.mako',
template_src_path=TEMPLATE_PATH,
template_keys=thermp_keys) | c1ea1506719d59f570687c6c69ac20c6a499ca8e | 3,630,170 |
import typing
def plot_facet_meshfunction(f: dolfin.MeshFunction,
names: typing.Optional[IntEnum] = None,
invalid_values: typing.Optional[typing.List[int]] = None) -> None:
"""Plot a `size_t` meshfunction defined on facets of a 2D mesh.
Useful for checking whether boundaries have been tagged as expected.
`dolfin.plot` should be preferred, but as of FEniCS 2019, it does not support
plotting a mesh function defined on facets.
Colors follow `matplotlib`'s default color cycle, with the tag value 0 mapped
to the zeroth color.
No MPI support - for use in serial mode only.
`f`: Mesh function of type `size_t` on facets of a mesh. Any facet for which `f` has
a nonzero value will be plotted, and colored according to the value of `f`.
The colors follow the default color cycle of Matplotlib.
`names`: If provided, names for the integer values are looked up in this `IntEnum`,
and the lines are labeled (so that `matplotlib.pyplot.legend` can then be
used to see which is which).
Any value of the `MeshFunction` that does not have a corresponding entry
in `names` is ignored (for example, internal facets inside the domain).
Thus, only facets whose tags are *whitelisted* by presence in `names` will be plotted.
They will be labeled (for `legend`) as e.g. "INLET (ID#1)" where "INLET" is a name
from `names`, and `1` is the corresponding tag value.
`invalid_values`: Alternative for `names`.
If provided, these tag values will be ignored. Useful values:
[0] for a manually generated `MeshFunction`, and
[2**64 - 1] for Gmsh import via `meshio`.
Thus, all facets whose tags are *not blacklisted* by presence in `invalid_values`
will be plotted. They will be labeled (for `legend`) as "<boundary> (ID#X)",
where "X" is the tag value.
If `names` is provided, it takes precedence.
No return value.
"""
mesh = f.mesh()
if mesh.topology().dim() != 2:
raise NotImplementedError(f"This function only supports meshes of topological dimension 2, got {mesh.topology().dim()}")
# Simplifying assumption: in geometric dimension 2, we can just discard the third coordinate of the vertices.
if mesh.geometric_dimension() != 2:
raise NotImplementedError(f"This function only supports meshes of geometric dimension 2, got {mesh.geometric_dimension()}")
if f.dim() != 1:
raise NotImplementedError(f"This function only supports mesh functions on facets (dimension 1); got a function of dimension {f.dim()}")
if dolfin.MPI.comm_world.size > 1:
# TODO: add MPI support.
# Like the MPI plotter above, we should gather all data to the root process.
# Not that important to implement, though, because mesh generation and import
# (visualizing which is the primary use case for this function) is often done in serial mode.
raise NotImplementedError("Facet meshfunction plotting currently only supported in serial mode.")
if names:
tag_to_name = {item.value: item.name for item in names}
def empty_list() -> typing.List:
return []
plot_data = defaultdict(empty_list)
for facet in dolfin.facets(mesh):
tag = f[facet]
ignore_tag = (names and tag not in tag_to_name) or (invalid_values is not None and tag in invalid_values)
if not ignore_tag:
vtxs = [vtx.point().array()[:2] for vtx in dolfin.vertices(facet)] # [[x1, y1], [x2, y2]]
plot_data[tag].append(vtxs)
# Insert a NaN entry to force matplotlib to draw each facet separately,
# instead of connecting them. (They are not in any useful order, and in general,
# facets with the same tag need not form a connected line.)
# https://stackoverflow.com/questions/21352580/matplotlib-plotting-numerous-disconnected-line-segments-with-different-colors
plot_data[tag].append(np.array([[np.nan, np.nan]]))
plot_data = {tag: np.concatenate(vtxss) for tag, vtxss in sorted(plot_data.items(), key=lambda item: item[0])}
for tag, vtxs in plot_data.items():
label = f"{tag_to_name[tag]} (ID#{tag})" if names else f"<boundary> (ID#{tag})"
plt.plot(vtxs[:, 0], vtxs[:, 1], color=colors[tag % len(colors)], label=label) | e249382e30e907e2d42ec73b1b690faca32dcdfb | 3,630,171 |
def get_etf_ticker_name(ticker: str) -> str:
"""종목 이름 조회
Args:
ticker (str): 티커
Returns:
str: 종목명
>> get_etf_ticker_name("069500")
KODEX 200
"""
return krx.get_etx_name(ticker) | 100141820a830fa6c3299b2aa6a0ed08039db240 | 3,630,172 |
import logging
def getLogger(name='root') -> logging.Logger:
"""Method to get logger for tests.
Should be used to get correctly initialized logger. """
return logging.getLogger(name) | dde177b07f9d8528d216fbc4c719e5bff9c67939 | 3,630,173 |
from pathlib import Path
def process_load_magic(path, cell):
"""Replace load magics with the solution."""
modified = False
# Find any load magics
load_magics = find_load_magics_in_cell(cell)
# Replace load magics with file contents
for magic_string in load_magics:
path = Path(path)
script_path = path.parent / magic_string.split('load ')[1]
formatted_script = format_script_for_cell(script_path)
cell_str = get_cell_content_as_string(cell)
find_extra_content(cell_str)
cell['source'] = cell_str + formatted_script
modified = True
return modified | 9620f66161ffa2eb293e537021f5e47294b49076 | 3,630,174 |
def public_dict(obj):
"""Same as obj.__dict__, but without private fields."""
return {k: v for k, v in obj.__dict__.items() if not k.startswith('_')} | 2edee1a17d0dad6ab4268f80eb565406656a77b4 | 3,630,175 |
import time
import os
import pickle
def save_preds( t_params, m_params, li_preds, li_timestamps, li_truevalues, custom_test_loc=None, count=0 ):
"""Save predictions to file
Args:
t_params (dict): dictionary for train/test params
m_params (dict): dictionary for m params
li_preds (list): list of predictions
li_timestamps (list): corresponding list of timestamps
li_truevalues (list): corresponding list of true values
custom_test_loc ([type], optional): [description]. Defaults to None.
Returns:
bool
"""
li_preds = [ np.where(tnsr<0.0, 0.0, tnsr) for tnsr in li_preds ]
#li_preds = [ tnsr.numpy() for tnsr in li_preds ] #list of preds: (tss, samples ) or (tss, h, w, samples )
if custom_test_loc in ["All"] or t_params['t_settings'].get('region_pred', False)==True:
li_truevalues = [ np.squeeze(tens,axis=1) for tens in li_truevalues] #2D - (tss, h, w)
else:
li_truevalues = [ np.reshape(tens, [-1]) for tens in li_truevalues] #list of 1D - (preds )
li_timestamps = [ np.array(_li).reshape([-1]) for _li in li_timestamps ]
data_tuple = (li_timestamps, li_preds, li_truevalues)
t1 = time.strftime('%Y-%m-%d', time.localtime(li_timestamps[0][0]))
t2 = time.strftime('%Y-%m-%d', time.localtime(li_timestamps[-1][-1]))
# Savging to pickle
if type( t_params['ctsm_test'] ) == str:
_path_pred = t_params['output_dir'] + "/{}/Predictions".format(utility.model_name_mkr(m_params, train_test="test", t_params=t_params, custom_test_loc=custom_test_loc, htuning=m_params.get('htuning',False) ))
if t_params['t_settings'].get('region_pred', False) == True:
fn = f"_regional"
else:
fn = f"local"
if count >0:
fn += f"_chunk{count}"
fn += ".dat"
if(not os.path.exists(_path_pred) ):
os.makedirs(_path_pred)
pickle.dump( data_tuple, open( _path_pred + "/" +fn ,"wb"), protocol=4 )
print("Saved predictions\t", t1, "--", t2)
return True | cb21ceb3c7f4232a1e5e0bdb7a1f2c3b6a4e78ef | 3,630,176 |
def first_der_K_mulvar(x, kernel_type='Gaussian'):
""" First derivative of Multivariate seperable and isotropic, identity scale kernel( R^d--> R)
∇_c(K(x))=first_der_k_one_dim(x_c)*(Π_(l=1&& l!=c)^(dim) ((k_one_dim(x_l))))
"""
dim=x.size
loop_range=range(dim) #dimension of x
grad_K=np.ones(shape=(1,dim))
for c in loop_range:
for l in loop_range:
if c==l:
grad_K[0,c]= grad_K[0,c]*first_der_k_one_dim(x[l], kernel_type)
else:
grad_K[0,c]= grad_K[0,c]*k_one_dim(x[l], kernel_type)
return grad_K | cd06168abf2a7bd9354b4fa5b16107099857dce4 | 3,630,177 |
from typing import Union
from typing import List
from typing import Dict
def _to_serializable_prompt(
prompt, at_least_one_token=False
) -> Union[str, List[Dict[str, str]]]:
"""
Validates that a prompt and emits the format suitable for serialization as JSON
"""
if isinstance(prompt, str):
if at_least_one_token:
if len(prompt) == 0:
raise ValueError("prompt must contain at least one character")
# Just pass the string through as is.
return prompt
elif isinstance(prompt, list):
return [_to_prompt_item(item) for item in prompt]
raise ValueError(
"Invalid prompt. Prompt must either be a string, or a list of valid multimodal propmt items."
) | 12e11b85bbcaf9137605f987981ad6e48cc6a949 | 3,630,178 |
def lrepeat(elem, n):
"""
>>> lrepeat(1, 2)
[1, 1]
"""
return list(repeat(elem, n)) | ef36f2e62cf8e9c3eb3fecef176a4aee9b60b951 | 3,630,179 |
def wer(ref_path, hyp_path):
""" Compute Word Error Rate between two files """
with open(ref_path) as ref_fp, open(hyp_path) as hyp_fp:
ref_line = ref_fp.readline()
hyp_line = hyp_fp.readline()
wer_score = 0.0
line_cpt = 0.0
while ref_line and hyp_line:
wer_score += sentence_wer(
ref_line.strip().split(), hyp_line.strip().split()
)
line_cpt = line_cpt + 1
ref_line = ref_fp.readline()
hyp_line = hyp_fp.readline()
mean_wer = 1.0
if line_cpt > 0:
mean_wer = wer_score / line_cpt
return mean_wer | 39860a89d94614aced191049f4dc77031438f0a8 | 3,630,180 |
def get_supported_hmac_hash(hash_type_str: str) -> crypto.SupportedHashes:
"""Return a crypto SupportedHashes enum type from a string hash type
Args:
hash_type_str: String hashtype, i.e. SHA256
Returns:
appropriate crypto.SupportedHashes enum value
Raises:
ValueError when bad hash_type_str
"""
if hash_type_str == "SHA256":
return crypto.SupportedHashes.sha256
elif hash_type_str == "BLAKE2b512":
return crypto.SupportedHashes.blake2b
elif hash_type_str == "SHA3-256":
return crypto.SupportedHashes.sha3_256
else:
raise ValueError(f"{hash_type_str} is an unsupported HMAC hash type") | 74e3e128268ef3007c4fa9c91af9c103f03523f1 | 3,630,181 |
import os
def export_cleaned_data(df):
""" Function to export merged df into specified folder
Args:
path (str): Path of the folder
filename(str): Name of the file
"""
path = os.getcwd()
filename = 'cleaned_merged_seasons.csv'
filepath = join(dirname(dirname("__file__")), path, 'data', filename)
df.to_csv(filepath, encoding = 'utf-8')
return df | 27810bc66a9f7eef88517883dc83a5860e785dc8 | 3,630,182 |
import tempfile
import os
def get_tmpfile_name():
"""Get a new temporary file name"""
tmp_dir = tempfile._get_default_tempdir()
tmp_file = next(tempfile._get_candidate_names())
return os.path.join(tmp_dir, tmp_file) | 273f5d90db50f63d6b2320c763e5300e9750d5b6 | 3,630,183 |
def adjustEdge(tu, isStart):
"""
Adjust tu time based on slot edge type
"""
# Adjust start and end so they don't pick the wrong slot when the end of one slot overlaps
# by one minute the start of the next
if isStart:
t = (tu + timedelta(minutes=1)).time()
else:
t = (tu - timedelta(minutes=1)).time()
return t | d5aeb868944833b6f9dceb0e42271adb2959c0c0 | 3,630,184 |
import struct
def parse_extensions(buf):
"""
Parse TLS extensions in passed buf. Returns an ordered list of extension tuples with
ordinal extension type as first value and extension data as second value.
Passed buf must start with the 2-byte extensions length TLV.
http://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml
"""
extensions_length = struct.unpack('!H', buf[:2])[0]
extensions = []
pointer = 2
while pointer < extensions_length:
ext_type = struct.unpack('!H', buf[pointer:pointer+2])[0]
pointer += 2
ext_data, parsed = parse_variable_array(buf[pointer:], 2)
extensions.append((ext_type, ext_data))
pointer += parsed
return ( extensions_length, extensions ) | a93d997ffa72a540a87787cbf185fe0d29b4587f | 3,630,185 |
def COS(
number: func_xltypes.XlNumber
) -> func_xltypes.XlNumber:
"""Returns the cosine of the given angle.
https://support.office.com/en-us/article/
cos-function-0fb808a5-95d6-4553-8148-22aebdce5f05
"""
return np.cos(float(number)) | 2b4353caeddd955579be6fdfef7538436acef97c | 3,630,186 |
def validate_name(dataset):
"""Check wfs/cache and the bcdc api to see if dataset name is valid
"""
if dataset.upper() in list_tables():
return dataset.upper()
else:
return get_table_name(dataset.upper()) | a2f5c872cbbaddbddfa1985708b1edebe2f891f9 | 3,630,187 |
import torch
def farthest_point_sample(xyz, npoint):
"""
Input:
xyz: pointcloud data, [B, N, C], where C is probably 3
npoint: number of samples
Return:
centroids: sampled pointcloud index, [B, npoint]
"""
device = xyz.device
B, N, C = xyz.shape
centroids = torch.zeros(B, npoint, dtype=torch.long).to(device)
distance = torch.ones(B, N).to(device) * 1e10
farthest = torch.randint(0, N, (B,), dtype=torch.long).to(device)
batch_indices = torch.arange(B, dtype=torch.long).to(device)
for i in range(npoint):
centroids[:, i] = farthest
centroid = xyz[batch_indices, farthest, :].view(B, 1, C)
dist = torch.sum((xyz - centroid) ** 2, -1)
mask = dist < distance
distance[mask] = dist[mask]
farthest = torch.max(distance, -1)[1]
return centroids | 3d62a8785bc998970b4ef126b97c3270fa6f3ca6 | 3,630,188 |
def auto_scaling(setup_trainer_and_train, config, num_iters=2):
"""
Auto-scale the number of envs and batch size to maximize GPU utilization.
param num_iters: number of iterations to use when performing auto-scaling.
"""
def launch_process(func, args):
"""
Run a Python function on a separate process.
"""
p = ProcessWrapper(target=func, args=args)
p.start()
p.join()
if p.exception:
raise p.exception
def set_num_envs_and_train(num_envs, run_config=config):
run_config["trainer"]["num_envs"] = num_envs
# Note that we also set the train batch size equal to
# the number of environments, so that each block only
# captures one timestep of the simulation.
run_config["trainer"]["train_batch_size"] = num_envs
# Set the appropriate number of episodes in order only
# run for just `num_iters` iterations (i.e., train_batch_size = num_envs).
run_config["trainer"]["num_episodes"] = (
num_iters
* run_config["trainer"]["train_batch_size"]
/ run_config["env"]["episode_length"]
)
# Performing training on a separate process
launch_process(setup_trainer_and_train, (config, False))
def set_batch_size_per_env_and_train(train_batch_size_per_env, run_config=config):
run_config["trainer"]["train_batch_size"] = (
train_batch_size_per_env * config["trainer"]["num_envs"]
)
# Set the appropriate number of episodes in order only
# run for just `num_iters` iterations (i.e., train_batch_size = num_envs).
run_config["trainer"]["num_episodes"] = (
num_iters
* run_config["trainer"]["train_batch_size"]
/ run_config["env"]["episode_length"]
)
# Performing training on a separate process
launch_process(setup_trainer_and_train, (config, False))
# Save some initial configs
num_episodes = config["trainer"]["num_episodes"]
use_wandb = config["saving"]["use_wandb"]
# disable wandb
config["saving"]["use_wandb"] = False
# First, determine the maximum number of environments (i.e., GPU blocks)
# that can be run in parallel before running out of thread memory.
print("=" * 80)
print("Determining the maximum number of environment replicas to run in parallel.")
print("=" * 80)
num_envs = config["trainer"]["num_envs"]
max_envs = best_param_search(low=num_envs, func=set_num_envs_and_train)
# Set the `num_envs` parameter to the max value found from above.
config["trainer"]["num_envs"] = max_envs
# Next, determine the maximum batch size that can be used
# without running out of memory.
print("=" * 80)
print("Determining the maximum training batch size.")
print("=" * 80)
max_batch_size_per_env = best_param_search(func=set_batch_size_per_env_and_train)
config["trainer"]["train_batch_size"] = (
max_batch_size_per_env * config["trainer"]["num_envs"]
)
# Put back the original number of episodes and use_wandb settings.
config["trainer"]["num_episodes"] = num_episodes
config["saving"]["use_wandb"] = use_wandb
return config | 100fe0cb9fef8963e1f2e07a8874e6bb7ddefdee | 3,630,189 |
def eta_sat_Vargaftik_and_Yargin_Table():
"""Dynamic viscosity of saturated Li vapor
Returns
-------
array
Array of temperature and dynamic viscosity data
References
----------
Vargaftik, N B, and V S Yargin.
Ch 7.4: Thermal Conductivity and Viscosity of the Gaseous Phase.
Handbook of Thermodynamic and Transport Properties of Alkali Metals,
edited by R. W. Ohse, 45. Blackwell Scientific Publications, 1985.
Table 36
"""
t = [700, 725, 750, 775, 800, 825, 850, 875, 900, 925, 950, 975,
1000, 1025, 1050, 1075, 1100, 1125, 1150, 1175, 1200,
1225, 1250, 1275, 1300, 1325, 1350, 1375, 1400, 1425,
1450, 1475, 1500, 1525, 1550, 1575, 1600, 1625, 1650,
1675, 1700, 1725, 1750, 1775, 1800, 1825, 1850, 1875,
1900, 1925, 1950, 1975, 2000]
eta_sats = np.array([98.6, 100.9, 103.0, 105.0, 107.0, 108.9,
110.6, 112.3, 113.8, 115.3, 116.6, 117.8, 119.0,
120.0, 121.0, 121.9, 122.7, 123.4, 124.1, 124.7,
125.3, 125.8, 126.2, 126.7, 127.1, 127.4, 127.8,
128.1, 128.4, 128.7, 129.0, 129.3, 129.6, 129.9,
130.2, 130.5, 130.7, 131.0, 131.3, 131.7, 132.0,
132.3, 132.6, 133.0, 133.3, 133.7, 134.1, 134.4,
134.8, 135.2, 135.6, 136.0, 136.4])
data = np.array([t, 1.0e-7 * eta_sats]).T
return data | ef7547e5c106960b2d14ebc9d53cf585c3e0c822 | 3,630,190 |
def getColumn(data, colN):
""" Return the column colN (counted starting from 0) in the data. """
return transpose(data)[colN] | 65151d9a1b79b424a187a425602da5608735398a | 3,630,191 |
def humanize_arrow_date(date):
"""
Date is internal UTC ISO format string.
Output should be "today", "yesterday", "in 5 days", etc.
Arrow will try to humanize down to the minute, so we
need to catch 'today' as a special case.
"""
try:
then = arrow.get(date)
now = arrow.utcnow()
now = now.replace(hour=0, minute=0, second=0)
if then.date() == now.date():
human = "Today"
else:
human = then.humanize(now)
if human == "in a day":
human = "Tomorrow"
elif human == "a day ago":
human = "Yesterday"
except:
human = date
return human | b1ff01887bb45acc75083d90818add9017284875 | 3,630,192 |
def file_compare_files_sum(filelist1,filelist2,filetype='blankspace'):
"""
This is built on file_add_data_files to compare and plot the two list of files by checking if their respective summing is equal.
Notes
-----
Now only the 1Darray is allowed to contained in each file
"""
outarr1 = file_add_data_files(filelist1,filetype=filetype)
outarr2 = file_add_data_files(filelist2,filetype=filetype)
fig, (ax1,ax2) = g.Create_2VAxes()
ax1.plot(outarr1,label='filelist1')
ax1.plot(outarr2,label='filelist2')
ax1.legend()
ax2.plot(outarr1-outarr2, label='difference')
ax2.legend()
return (outarr1, outarr2, fig) | 03f5cfee8a1588dd72654c315d8a152b9a4b165a | 3,630,193 |
def toluene_material_stream():
"""
Create a homogeneous material_stream model
"""
class material_stream(MaterialStream):
def __init__(self, name, description, pp = pp_toluene):
super().__init__(name, description, property_package=pp())
self.mdot.setValue(100.)
self.T.setValue(350.)
self.P.setValue(101325)
self.property_package.resolve_mixture(T=self.T.value, P=self.P.value)
self.H.setValue(self.property_package["*"].H)
return material_stream | 71301c6324128c5411abb2d8cd3c49fb6718e86e | 3,630,194 |
def wilight_to_opp_position(value):
"""Convert wilight position 1..255 to opp.format 0..100."""
return min(100, round((value * 100) / 255)) | 4f6e4298a77c29ff0375d0ce5e5fd23e77e30622 | 3,630,195 |
import sh
def has_staged_uncommitted():
"""
Return a boolean indicating whether the repository has staged, but
uncommitted changes
"""
try:
sh.git('diff', '--cached', '--exit-code')
return False
except sh.ErrorReturnCode_1:
return True | 22d3fdb24fcfd0e802ed3e3d1318974f427858ab | 3,630,196 |
def get_transition_matrix(exp, group_name, bnames):
"""Gets a markov transition matrix for a given FixedCourtshipExperiment
and list of behavioral states.
Parameters
----------
exp : FixedCourtshipExperiment
group_name : string
Name of group to get transition matrix for.
bnames : list of string of shape [N_b]
Each behavior name must be a valid behavior key in each TrackingSummary
present in the passed `exp`.
Returns
-------
transition_matrix : np.ndarray of shape [N_b, N_b, len(group)]
The matrix represents the transitional probability from behaviors
along the rows to behaviors along the columns. Each matrix within
the returned transition matrix (that is, all matrices along dimension
2) represent the transition matrix for a single TrackingSummary within
specified experimental group. Therefore, to get the mean transitional
probabilities for a group, just take the mean along dimension 2.
"""
group = getattr(exp, group_name)
N_b = len(bnames)
N_ts = len(group)
tm = np.zeros(shape=(N_b, N_b, N_ts))
for i, ts in enumerate(group):
probas = get_transition_probas(ts, bnames)
tm[:, :, i] = probas
return tm | b75330c39a3ade485d09084af5e6bb9f5437b533 | 3,630,197 |
def get_task_link(task_id, task_df):
"""Get the link from the PYBOSSA task."""
try:
task = task_df.loc[int(task_id)]
except KeyError:
return None
return task['info']['link'] | d90e994d2f0a4718bbedf8fd5fd534f6d5d32549 | 3,630,198 |
from datetime import datetime
def hchart(request):
"""Renders the about page."""
assert isinstance(request, HttpRequest)
return render(
request,
'research/bar.html',
{
'title':'Chart',
'message':'Highcharts Based',
'year':datetime.now().year,
'hchart_url':'bar',
}
) | 2bceb733c75f1cd65170f7a1352564337820e01b | 3,630,199 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.