content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def representative_dataset_gen(export_config):
"""Gets a python generator of numpy arrays for the given dataset."""
quantization_config = export_config.quantization_config
dataset = tfds.builder(
quantization_config.dataset_name,
data_dir=quantization_config.dataset_dir)
dataset.download_and_prepare()
data = dataset.as_dataset()[quantization_config.dataset_split]
iterator = data.as_numpy_iterator()
for _ in range(quantization_config.num_calibration_steps):
features = next(iterator)
image = features['image']
image = preprocess_for_quantization(image, export_config.image_size)
image = tf.reshape(
image, [1, export_config.image_size, export_config.image_size, 3])
yield [image]
| 5,344,100
|
def DesignCustomSineWave(family_list, how_many_gen, amp, per, shift_h, shift_v,
show=False, print_phase_mse=False, return_phases=False):
""" "Grid Search" Approach:
Create sine waves with unknown amp, per, shift_h and shift_v in combinatorial manner
and align families to minimize loss across the entire dataset - plot the best fit!
:param family_list: (list) ->
:param how_many_gen: (int) ->
:param amp: (float) ->
:param per: (float) ->
:param shift_h: (float) ->
:param shift_v: (float) ->
:param show: (bool) ->
:param print_phase_mse: (bool) ->
:return: best_model_mse (float) -> the best possible phasing of families
to reach lowest mse for given model
"""
# Specify how many generations do your families have:
if int(how_many_gen) != 3 and int(how_many_gen) != 2:
raise Exception("Warning, number of generations to consider is not specified: how_many_gen must be 2 or 3!")
# Prepare the sine wave specified by the function parameters:
repeats = int(72.0 / per)
if repeats <= 1:
repeats += 1
x_sine = np.linspace(0, repeats * per + 1, int(repeats * per * 5))
y_sine = sine_function(x=x_sine, amp=amp, per=per, shift_h=shift_h, shift_v=shift_v)
if show is True:
plt.plot(x_sine, y_sine, color="dodgerblue")
# Create the return variable - list of all best MSE that could be fitted for each family which will be summed:
mse_best_list = []
phase_best_list = []
for family in family_list:
mse_list = []
mse_family = 100
phase_family = 0
# Increments of 0.1 for chosen period:
for phase in np.linspace(0, per, int(per*10) + 1):
# Create x & y axes:
x_data = np.array([phase, phase + family[0]])
if how_many_gen == 3:
x_data = np.array([phase, phase + family[0], phase + family[0] + family[1]])
y_data_true = np.array(family)
y_data_sine = sine_function(x=x_data, amp=amp, per=per, shift_h=shift_h, shift_v=shift_v)
# Calculate mean squared error:
mse = (np.square(y_data_true - y_data_sine)).mean(axis=None)
mse_list.append(mse)
if print_phase_mse is True:
print ("Mean Square Error = {}; for Phase {} for Family {} for Sine Wave: {} * sin(2*pi/{}*x + {}) + {}"
.format(mse, phase, family, amp, per, shift_h, shift_v))
# Update the lowest mse & the phase when such number was reached:
if mse < mse_family:
mse_family = mse
phase_family = phase
if print_phase_mse is True:
print ("Lowest MSE reached: {} for Phase: {}".format(mse_family, phase_family))
# Plot the best result for the family:
x_best = np.array([phase_family, phase_family + family[0]])
if how_many_gen == 3:
x_best = np.array([phase_family, phase_family + family[0], phase_family + family[0] + family[1]])
y_best = np.array(family)
if show is True:
plt.scatter(x=x_best, y=y_best)
# Append the lowest MSE for this model:
mse_best_list.append(mse_family)
phase_best_list.append(phase_family)
sum = float(np.sum(mse_best_list))
best_model_mse = round(sum, 2)
# Annotate the plot:
if show is True:
plt.xticks(np.arange(0, repeats * per + 1, 6))
plt.xlabel("Oscillation Period / Time [hours]")
plt.ylabel("Cell Cycle Duration [hours]")
plt.title("Sine Wave Parameters: y(x) = {} * sin(2*pi/{}*x + {}) + {}\n"
"Sum of Lowest MSE per each family = {}"
.format(amp, per, shift_h, shift_v, best_model_mse))
plt.grid(axis="both")
plt.savefig("/Users/kristinaulicna/Documents/Rotation_2/Top_Solution_Sine_Wave_{}_gen_families.png"
.format(how_many_gen), bbox_inches="tight")
plt.show()
plt.close()
return best_model_mse
| 5,344,101
|
def train_deepfm():
""" train_deepfm """
if config.rank_size > 1:
if config.device_target == "Ascend":
device_id = int(os.getenv('DEVICE_ID'))
context.set_context(mode=context.GRAPH_MODE, device_target=config.device_target, device_id=device_id)
context.reset_auto_parallel_context()
context.set_auto_parallel_context(parallel_mode=ParallelMode.DATA_PARALLEL,
gradients_mean=True,
all_reduce_fusion_config=[9, 11])
init()
rank_id = int(os.environ.get('RANK_ID'))
elif config.device_target == "GPU":
init()
context.set_context(mode=context.GRAPH_MODE, enable_graph_kernel=True, device_target=config.device_target)
context.set_context(graph_kernel_flags="--enable_cluster_ops=MatMul")
context.reset_auto_parallel_context()
context.set_auto_parallel_context(device_num=get_group_size(),
parallel_mode=ParallelMode.DATA_PARALLEL,
gradients_mean=True)
rank_id = get_rank()
else:
print("Unsupported device_target ", config.device_target)
exit()
else:
if config.device_target == "Ascend":
device_id = int(os.getenv('DEVICE_ID'))
context.set_context(mode=context.GRAPH_MODE, device_target=config.device_target, device_id=device_id)
elif config.device_target == "GPU":
context.set_context(mode=context.GRAPH_MODE, enable_graph_kernel=True, device_target=config.device_target)
context.set_context(graph_kernel_flags="--enable_cluster_ops=MatMul")
else:
context.set_context(mode=context.GRAPH_MODE, device_target=config.device_target)
config.rank_size = None
rank_id = None
ds_train = create_dataset(config.dataset_path,
train_mode=True,
epochs=1,
batch_size=config.batch_size,
data_type=DataType(config.data_format),
rank_size=config.rank_size,
rank_id=rank_id)
steps_size = ds_train.get_dataset_size()
if config.convert_dtype:
config.convert_dtype = config.device_target != "CPU"
model_builder = ModelBuilder(config, config)
train_net, eval_net = model_builder.get_train_eval_net()
auc_metric = AUCMetric()
model = Model(train_net, eval_network=eval_net, metrics={"auc": auc_metric})
time_callback = TimeMonitor(data_size=ds_train.get_dataset_size())
loss_callback = LossCallBack(loss_file_path=config.loss_file_name)
callback_list = [time_callback, loss_callback]
if config.save_checkpoint:
if config.rank_size:
config.ckpt_file_name_prefix = config.ckpt_file_name_prefix + str(get_rank())
config.ckpt_path = os.path.join(config.ckpt_path, 'ckpt_' + str(get_rank()) + '/')
if config.device_target != "Ascend":
config_ck = CheckpointConfig(save_checkpoint_steps=steps_size,
keep_checkpoint_max=config.keep_checkpoint_max)
else:
config_ck = CheckpointConfig(save_checkpoint_steps=config.save_checkpoint_steps,
keep_checkpoint_max=config.keep_checkpoint_max)
ckpt_cb = ModelCheckpoint(prefix=config.ckpt_file_name_prefix,
directory=config.ckpt_path,
config=config_ck)
callback_list.append(ckpt_cb)
if config.do_eval:
ds_eval = create_dataset(config.dataset_path, train_mode=False,
epochs=1,
batch_size=config.batch_size,
data_type=DataType(config.data_format))
eval_callback = EvalCallBack(model, ds_eval, auc_metric,
eval_file_path=config.eval_file_name)
callback_list.append(eval_callback)
model.train(config.train_epochs, ds_train, callbacks=callback_list)
| 5,344,102
|
def test_two_days():
"""
Test if the code works as intended when we have only two open days
"""
test_data, test_data_results = load_data_results(base_filename="two_days")
for data, result in zip(test_data, test_data_results):
response = client.post('/prettify', data=data)
assert response.status_code == 200
assert response.json() == result
| 5,344,103
|
def RadarRngEq(G, beam_el, filename):
"""Prints SNR, will be modified for other uses later
SNR = (pt*g^2*lambda_^2*sigma)/((4*pi)^3*k*temp_s*nf*l*r^4)
pt = power transmitted - Watts
freq = radar freq - Hz
gain = antenna gain - db (default = 45)
sigma = RCS - m^2
BW = bandwidth - Hz
NF = noisefigure dB
loss = radar losses dB
range = Km
snr = dB
MOST OF THESE WILL BE MADE TO BE F_N INPUTS FOR NOW THIS IS
FROM RADARPARAMS
"""
#######################
# TOGGLE PLOTTING #
#######################
# snrplot = 1
# propplot = 1
# pat_plot = 1
# rcsplot = 1
gainplot = 0
# Local Vars
lambda_ = c0/freq #wavelength
#beta = el_angle
range_ = target_range
ht = target_alt
sigma = target_rcs
F = atmos_effects.multipath(range_, ht, hr)
# dB Conversions
lambda_sqdb = w2db(lambda_**2)
pt_db = w2db(pt) #peak power in dB
k_db = w2db(kb)
sigma_db = w2db(sigma)
To_db = w2db(To)
BW_db = w2db(BW)
range_db = w2db(range_**4)
four_pi_db = w2db((4*pi)**3)
F_db = 4 * w2db(0.0015+F)
tau_db = 10*log10(.2/12000)
det_thresh = 13
# Data Shaping
if isscalar(G) is False:
range_vec = linspace(2000, 100000, G.size) # for graphing
G_vec = broadcast_to(G, (range_vec.size, G.size))
range_vec = broadcast_to(range_vec, (range_vec.size, G.size))
else:
range_vec = linspace(2000, 250000, 1000)
F_graph = atmos_effects.multipath(range_vec[0], ht, hr)
F_graph = 4*w2db(0.0015+F_graph)
#L_a = atmos_effects.atmo_absorp(ht, hr, freq, beta)
# Radar Range Eq
tx_db = pt_db + G + G + lambda_sqdb + sigma_db + F_db
rx_db = four_pi_db + k_db + To_db + BW_db + NF + loss + range_db
snr = tx_db - rx_db
# TODO: Return to this
# R_p = pt_db + gain + gain + lambda_sqdb + sigma_db + tau_db + F_graph + w2db(.01)
# R_n = four_pi_db + k_db + To_db + NF + det_thresh
# R_max = (R_p - R_n)**(1/4)
# R_max = db2w(R_max)
tx_db_graph = pt_db + G.max() + G.max() + lambda_sqdb + sigma_db + F_graph
rx_db_graph = four_pi_db + k_db + To_db + BW_db + NF + loss + w2db(range_vec[0]**4)
snr_graph = tx_db_graph.real - rx_db_graph
tx_noF = pt_db + G.max() + G.max() + lambda_sqdb + sigma_db
rx_noF = four_pi_db + k_db + To_db + BW_db + NF + loss + w2db(range_vec[0]**4)
snr_noF = tx_noF - rx_noF
print("The range at which your target first drops out due to multipath is " +
str(range_vec[0][argmax(snr_graph < det_thresh)]) + " meters")
# HACK: return all variable for viewing sanity check
return snr_graph, range_, sigma_db, F_graph, range_vec, snr_noF
| 5,344,104
|
def get_environment_names():
"""Return a list of defined environment names, with user preference first."""
envlist = [r[0] for r in _session.query(models.Environment.name).order_by(models.Environment.name).all()]
# move user preference to top of list
userenvname = _config.userconfig.get("environmentname", u"default")
envlist.remove(userenvname)
envlist.insert(0, userenvname)
return envlist
| 5,344,105
|
def plot(direction, speed, **kwargs):
"""Create a WindrosePlot, add bars and other standard things.
Args:
direction (pint.Quantity): wind direction from North.
speed (pint.Quantity): wind speeds with units attached.
**bins (pint.Quantity): wind speed bins to produce the histogram for.
**nsector (int): The number of directional centers to divide the wind
rose into. The first sector is centered on north.
**rmax (float): Hard codes the max radius value for the polar plot.
**cmap (colormap): Matplotlib colormap to use.
Returns:
WindrosePlot
"""
wp = WindrosePlot(**kwargs)
bins = kwargs.get("bins")
if bins is None:
bins = np.array([2, 5, 10, 20]) * units("mph")
nsector = kwargs.get("nsector", 8)
wp.barplot(direction, speed, bins, nsector, cmap=kwargs.get("cmap"))
wp.plot_calm()
wp.draw_arrows()
wp.draw_logo()
return wp
| 5,344,106
|
def convert_floor(node, **kwargs):
"""Map MXNet's floor operator attributes to onnx's Floor operator
and return the created node.
"""
return create_basic_op_node('Floor', node, kwargs)
| 5,344,107
|
def journal_zk_cleanup():
"""
Zk history sqlite node cleanup
"""
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--cfg',
help='Journal config file')
parser.add_argument('-p', '--primary',
help='Zookeeper journal')
parser.add_argument('-n', '--nfspath',
help='NFS path')
parser.add_argument('-i', '--interval',
default=900, type=int,
help='Interval in seconds')
parser.add_argument('-a', '--age',
default=3600, type=int,
help='age in seconds')
parser.add_argument('-r', '--nfsregex', required=True,
help='Pattern of files in nfs')
parser.add_argument('-o', '--outfile', required=True,
help='dump output file name')
args = parser.parse_args()
journal_zk_cleanup_main.main(args)
| 5,344,108
|
def test_no_attribute_name_following_value_and_no_close():
"""
Make sure to test an attribute name without a following attribute value and no close bracket.
"""
# Arrange
input_tag_name = "<meta http:equiv"
start_index = 16
expected_resultant_index = len(input_tag_name)
# Act
actual_resultant_index = HtmlHelper.extract_optional_attribute_value(
input_tag_name, start_index
)
# Assert
assert expected_resultant_index == actual_resultant_index
| 5,344,109
|
async def get_qrcode_login_info():
"""获取二维码登录信息"""
url = f"{BASE_URL}qrcode/auth_code"
return await post(url, reqtype="app")
| 5,344,110
|
def get_global_free_state(self):
"""
Recurse get_global_free_state on all child parameters, and hstack them.
Return: Stacked np-array for all Param except for LocalParam
"""
# check if the child has 'get_local_free_state' method
for p in self.sorted_params:
if isinstance(p, (param.Param,param.Parameterized)) and \
not hasattr(p, 'get_global_free_state'):
self.set_local_methods(p)
# Here, additional empty array allows hstacking of empty list
return np.hstack([p.get_global_free_state() for p in self.sorted_params
if isinstance(p, (param.Parameterized, param.Param))]
+ [np.empty(0, np_float_type)])
| 5,344,111
|
def case_activity_update_type():
""" Case Activity Update Types: RESTful CRUD Controller """
return crud_controller()
| 5,344,112
|
def dftregistration(buf1ft,buf2ft,usfac=100):
"""
# function [output Greg] = dftregistration(buf1ft,buf2ft,usfac);
# Efficient subpixel image registration by crosscorrelation. This code
# gives the same precision as the FFT upsampled cross correlation in a
# small fraction of the computation time and with reduced memory
# requirements. It obtains an initial estimate of the
crosscorrelation peak
# by an FFT and then refines the shift estimation by upsampling the DFT
# only in a small neighborhood of that estimate by means of a
# matrix-multiply DFT. With this procedure all the image points
are used to
# compute the upsampled crosscorrelation.
# Manuel Guizar - Dec 13, 2007
# Portions of this code were taken from code written by Ann M. Kowalczyk
# and James R. Fienup.
# J.R. Fienup and A.M. Kowalczyk, "Phase retrieval for a complex-valued
# object by using a low-resolution image," J. Opt. Soc. Am. A 7, 450-458
# (1990).
# Citation for this algorithm:
# Manuel Guizar-Sicairos, Samuel T. Thurman, and James R. Fienup,
# "Efficient subpixel image registration algorithms," Opt. Lett. 33,
# 156-158 (2008).
# Inputs
# buf1ft Fourier transform of reference image,
# DC in (1,1) [DO NOT FFTSHIFT]
# buf2ft Fourier transform of image to register,
# DC in (1,1) [DO NOT FFTSHIFT]
# usfac Upsampling factor (integer). Images will be registered to
# within 1/usfac of a pixel. For example usfac = 20 means the
# images will be registered within 1/20 of a pixel.
(default = 1)
# Outputs
# output = [error,diffphase,net_row_shift,net_col_shift]
# error Translation invariant normalized RMS error between f and g
# diffphase Global phase difference between the two images (should be
# zero if images are non-negative).
# net_row_shift net_col_shift Pixel shifts between images
# Greg (Optional) Fourier transform of registered version of buf2ft,
# the global phase difference is compensated for.
"""
# Compute error for no pixel shift
if usfac == 0:
CCmax = np.sum(buf1ft*np.conj(buf2ft))
rfzero = np.sum(abs(buf1ft)**2)
rgzero = np.sum(abs(buf2ft)**2)
error = 1.0 - CCmax*np.conj(CCmax)/(rgzero*rfzero)
error = np.sqrt(np.abs(error))
diffphase = np.arctan2(np.imag(CCmax),np.real(CCmax))
return error, diffphase
# Whole-pixel shift - Compute crosscorrelation by an IFFT and locate the
# peak
elif usfac == 1:
ndim = np.shape(buf1ft)
m = ndim[0]
n = ndim[1]
CC = sf.ifft2(buf1ft*np.conj(buf2ft))
max1,loc1 = idxmax(CC)
rloc = loc1[0]
cloc = loc1[1]
CCmax=CC[rloc,cloc]
rfzero = np.sum(np.abs(buf1ft)**2)/(m*n)
rgzero = np.sum(np.abs(buf2ft)**2)/(m*n)
error = 1.0 - CCmax*np.conj(CCmax)/(rgzero*rfzero)
error = np.sqrt(np.abs(error))
diffphase=np.arctan2(np.imag(CCmax),np.real(CCmax))
md2 = np.fix(m/2)
nd2 = np.fix(n/2)
if rloc > md2:
row_shift = rloc - m
else:
row_shift = rloc
if cloc > nd2:
col_shift = cloc - n
else:
col_shift = cloc
ndim = np.shape(buf2ft)
nr = int(round(ndim[0]))
nc = int(round(ndim[1]))
Nr = sf.ifftshift(np.arange(-np.fix(1.*nr/2),np.ceil(1.*nr/2)))
Nc = sf.ifftshift(np.arange(-np.fix(1.*nc/2),np.ceil(1.*nc/2)))
Nc,Nr = np.meshgrid(Nc,Nr)
Greg = buf2ft*np.exp(1j*2*np.pi*(-1.*row_shift*Nr/nr-1.*col_shift*Nc/nc))
Greg = Greg*np.exp(1j*diffphase)
image_reg = sf.ifft2(Greg) * np.sqrt(nr*nc)
#return error,diffphase,row_shift,col_shift
return error,diffphase,row_shift,col_shift, image_reg
# Partial-pixel shift
else:
# First upsample by a factor of 2 to obtain initial estimate
# Embed Fourier data in a 2x larger array
ndim = np.shape(buf1ft)
m = int(round(ndim[0]))
n = int(round(ndim[1]))
mlarge=m*2
nlarge=n*2
CC=np.zeros([mlarge,nlarge],dtype=np.complex128)
CC[int(m-np.fix(m/2)):int(m+1+np.fix((m-1)/2)),int(n-np.fix(n/2)):int(n+1+np.fix((n-1)/2))] = (sf.fftshift(buf1ft)*np.conj(sf.fftshift(buf2ft)))[:,:]
# Compute crosscorrelation and locate the peak
CC = sf.ifft2(sf.ifftshift(CC)) # Calculate cross-correlation
max1,loc1 = idxmax(np.abs(CC))
rloc = int(round(loc1[0]))
cloc = int(round(loc1[1]))
CCmax = CC[rloc,cloc]
# Obtain shift in original pixel grid from the position of the
# crosscorrelation peak
ndim = np.shape(CC)
m = ndim[0]
n = ndim[1]
md2 = np.fix(m/2)
nd2 = np.fix(n/2)
if rloc > md2:
row_shift = rloc - m
else:
row_shift = rloc
if cloc > nd2:
col_shift = cloc - n
else:
col_shift = cloc
row_shift=row_shift/2
col_shift=col_shift/2
# If upsampling > 2, then refine estimate with matrix multiply DFT
if usfac > 2:
### DFT computation ###
# Initial shift estimate in upsampled grid
row_shift = 1.*np.round(row_shift*usfac)/usfac;
col_shift = 1.*np.round(col_shift*usfac)/usfac;
dftshift = np.fix(np.ceil(usfac*1.5)/2); ## Center of output array at dftshift+1
# Matrix multiply DFT around the current shift estimate
CC = np.conj(dftups(buf2ft*np.conj(buf1ft),np.ceil(usfac*1.5),np.ceil(usfac*1.5),usfac,\
dftshift-row_shift*usfac,dftshift-col_shift*usfac))/(md2*nd2*usfac**2)
# Locate maximum and map back to original pixel grid
max1,loc1 = idxmax(np.abs(CC))
rloc = int(round(loc1[0]))
cloc = int(round(loc1[1]))
CCmax = CC[rloc,cloc]
rg00 = dftups(buf1ft*np.conj(buf1ft),1,1,usfac)/(md2*nd2*usfac**2)
rf00 = dftups(buf2ft*np.conj(buf2ft),1,1,usfac)/(md2*nd2*usfac**2)
rloc = rloc - dftshift
cloc = cloc - dftshift
row_shift = 1.*row_shift + 1.*rloc/usfac
col_shift = 1.*col_shift + 1.*cloc/usfac
# If upsampling = 2, no additional pixel shift refinement
else:
rg00 = np.sum(buf1ft*np.conj(buf1ft))/m/n;
rf00 = np.sum(buf2ft*np.conj(buf2ft))/m/n;
error = 1.0 - CCmax*np.conj(CCmax)/(rg00*rf00);
error = np.sqrt(np.abs(error));
diffphase = np.arctan2(np.imag(CCmax),np.real(CCmax));
# If its only one row or column the shift along that dimension has no
# effect. We set to zero.
if md2 == 1:
row_shift = 0
if nd2 == 1:
col_shift = 0;
# Compute registered version of buf2ft
if usfac > 0:
ndim = np.shape(buf2ft)
nr = ndim[0]
nc = ndim[1]
Nr = sf.ifftshift(np.arange(-np.fix(1.*nr/2),np.ceil(1.*nr/2)))
Nc = sf.ifftshift(np.arange(-np.fix(1.*nc/2),np.ceil(1.*nc/2)))
Nc,Nr = np.meshgrid(Nc,Nr)
Greg = buf2ft*np.exp(1j*2*np.pi*(-1.*row_shift*Nr/nr-1.*col_shift*Nc/nc))
Greg = Greg*np.exp(1j*diffphase)
elif (nargout > 1) and (usfac == 0):
Greg = np.dot(buf2ft,np.exp(1j*diffphase))
#plt.figure(3)
image_reg = sf.ifft2(Greg) * np.sqrt(nr*nc)
#imgplot = plt.imshow(np.abs(image_reg))
#a_ini = np.zeros((100,100))
#a_ini[40:59,40:59] = 1.
#a = a_ini * np.exp(1j*15.)
#plt.figure(6)
#imgplot = plt.imshow(np.abs(a))
#plt.figure(3)
#imgplot = plt.imshow(np.abs(a)-np.abs(image_reg))
#plt.colorbar()
# return error,diffphase,row_shift,col_shift,Greg
return error,diffphase,row_shift,col_shift, image_reg
| 5,344,113
|
def parametrize_simulations(args):
"""Parametrize simulations"""
if args.type == INSTANCE_COUNTS:
return instance_count_sims(args)
if args.type == FEATURE_COUNTS:
return feature_count_sims(args)
if args.type == NOISE_LEVELS:
return noise_level_sims(args)
if args.type == SHUFFLING_COUNTS:
return shuffling_count_sims(args)
raise NotImplementedError("Unknown simulation type")
| 5,344,114
|
def connect(base_url: Union[str, URL], database_id: int = DJ_DATABASE_ID) -> Connection:
"""
Create a connection to the database.
"""
if not isinstance(base_url, URL):
base_url = URL(base_url)
return Connection(base_url, database_id)
| 5,344,115
|
def micore_tf_deps():
"""Dependencies for Tensorflow builds.
Returns:
list of dependencies which must be used by each cc_library
which refers to Tensorflow. Enables the library to compile both for
Android and for Linux. Use this macro instead of directly
declaring dependencies on Tensorflow.
"""
return micore_if(
android = [
# Link to library which does not contain any ops.
"@org_tensorflow//tensorflow/core:portable_tensorflow_lib_lite",
"@gemmlowp//:eight_bit_int_gemm",
"@fft2d//:fft2d",
],
ios = [
"@org_tensorflow//tensorflow/core:portable_tensorflow_lib",
"@gemmlowp//:eight_bit_int_gemm",
"@fft2d//:fft2d",
],
default = [
# Standard references for Tensorflow when building for Linux. We use
# an indirection via the alias targets below, to facilitate whitelisting
# these deps in the mobile license presubmit checks.
"@local_config_tf//:libtensorflow_framework",
"@local_config_tf//:tf_header_lib",
],
)
| 5,344,116
|
def de_bruijn(k, n):
"""
de Bruijn sequence for alphabet k
and subsequences of length n.
"""
try:
# let's see if k can be cast to an integer;
# if so, make our alphabet a list
_ = int(k)
alphabet = list(map(str, range(k)))
except (ValueError, TypeError):
alphabet = k
k = len(k)
a = [0] * k * n
sequence = []
def db(t, p):
if t > n:
if n % p == 0:
sequence.extend(a[1:p + 1])
else:
a[t] = a[t - p]
db(t + 1, p)
for j in range(a[t - p] + 1, k):
a[t] = j
db(t + 1, t)
db(1, 1)
return "".join(alphabet[i] for i in sequence)
| 5,344,117
|
def audit_(
config: Configuration,
report_only: bool,
report_format: str,
file_format: str,
sources: List[str],
file: TextIO,
) -> None:
"""
Checks a given dependency file against advisory databases.
\b
FILE is the path to the dependency file to audit.
"""
config.report_only = report_only
config.report_format = report_format
# Only override sources if at least once --source is passed.
if len(sources) > 0:
config.sources = list(set(sources))
if len(config.sources) == 0:
raise click.ClickException(
"Please specify or configure at least one advisory source."
)
packages = extract_package_list_from(config, file, file_format)
if config.verbose:
click.secho("Checking ", nl=False, err=True)
click.secho(f"{len(packages)}", fg="green", nl=False, err=True)
click.secho(" package(s).", err=True)
click.secho("Using ", nl=False, err=True)
click.secho(f"{config.sources}", fg="green", nl=False, err=True)
click.secho(" as source(s).", err=True)
results, vulnerable = audit(config, packages)
report(config, results)
if len(vulnerable) > 0 and config.verbose:
click.secho("", err=True)
click.secho(
f" Found {len(vulnerable)} vulnerable packages!",
fg="red",
blink=True,
err=True,
)
click.secho("", err=True)
elif config.verbose:
click.secho("", err=True)
click.secho(f" No vulnerable packages found!", fg="green", err=True)
# By default we want to exit with a non-zero exit-code when we encounter
# any findings.
if not config.report_only and len(vulnerable) > 0:
sys.exit(1)
| 5,344,118
|
def append_artist(songs, artist):
"""
When the songs gathered from the description just contains the
titles of the songs usually means it's an artist's album.
If an artist was provided appends the song title to the artist
using a hyphen (artist - song)
:param list songs: List of song titles (only song title)
:param str artist: Artist to search for with the song names
:return list: song titles along with the artist
"""
songs_complete = []
for song in songs:
song_complete = f'{artist} - {song}'
songs_complete.append(song_complete)
return songs_complete
| 5,344,119
|
def pose2pandas(pose: pyrosetta.Pose, scorefxn: pyrosetta.ScoreFunction) -> pd.DataFrame:
"""
Return a pandas dataframe from the scores of the pose
:param pose:
:return:
"""
pose.energies().clear_energies()
scorefxn.weights() # neccessary?
emopts = pyrosetta.rosetta.core.scoring.methods.EnergyMethodOptions(scorefxn.energy_method_options())
emopts.hbond_options().decompose_bb_hb_into_pair_energies(True)
scorefxn.set_energy_method_options(emopts)
scorefxn(pose)
scores = pd.DataFrame(pose.energies().residue_total_energies_array())
pi = pose.pdb_info()
scores['residue'] = scores.index.to_series() \
.apply(lambda r: pose.residue(r + 1) \
.name1() + pi.pose2pdb(r + 1)
)
return scores
| 5,344,120
|
def rantest(seed,N=100):
"""get some random numbers"""
buff = np.zeros(N,dtype=np.double)
ct_buff = buff.ctypes.data_as(ct.POINTER(ct.c_double))
sim.rantest(seed,N,ct_buff)
return buff
| 5,344,121
|
def resolve_nomination_action_items():
"""Resolve action items.
Resolve all the action items relevant to nomination reminders after the
10th day of each month.
"""
today = now().date()
if today.day == NOMINATION_END_DAY:
mentors = UserProfile.objects.filter(user__groups__name='Mentor')
action_model = ContentType.objects.get_for_model(UserProfile)
# All the completed action items are always resolved
name = u'{0} {1}'.format(NOMINATION_ACTION_ITEM, today.strftime('%B'))
items = (ActionItem.objects.filter(content_type=action_model,
object_id__in=mentors,
name=name)
.exclude(completed=True))
items.update(resolved=True)
| 5,344,122
|
def tf_efficientnet_b0_ap(pretrained=False, **kwargs):
""" EfficientNet-B0 AdvProp. Tensorflow compatible variant """
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_efficientnet(
'tf_efficientnet_b0_ap', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs)
return model
| 5,344,123
|
def pi_del(
shape,
y_tgt_star,
pad_symbol=0,
plh_symbol=0,
bos_symbol=0,
eos_symbol=0,
Kmax=100,
device="cpu",
):
"""Operations and states to edit a partially deleted version of y_star back to y_star."""
# shape = B x N x M
# y_tgt_star : B x M
shape = list(shape)
shape[-1] = y_tgt_star.size(-1)
shape = tuple(shape)
del_tgt = torch.ones(shape, dtype=torch.long, device=device)
plh_tgt = -torch.ones(
(shape[0], shape[1], shape[2] - 1), dtype=torch.long, device=device
)
cmb_tgt = -torch.ones(shape[0], shape[2], shape[1], dtype=torch.long, device=device)
y_plh = torch.full(
(shape[0], shape[1], shape[2]), pad_symbol, dtype=torch.long, device=device
)
y_cmb = torch.full(shape, pad_symbol, dtype=torch.long, device=device)
y_tok = torch.full_like(y_tgt_star, pad_symbol, dtype=torch.long, device=device)
y_star_n = y_tgt_star.view(shape[0], 1, shape[-1]).expand(shape)
# tok_mask = torch.zeros_like(y_star_n, dtype=bool, device=device)
mask = (
((torch.rand(y_star_n.shape, device=device) > 0.2) & (y_star_n.ne(pad_symbol)))
| (y_star_n == bos_symbol)
| (y_star_n == eos_symbol)
)
tok_mask = mask.any(1)
sorted_ = mask.long().sort(-1, descending=True)
sorted_mask = sorted_[0].bool()
y_plh[sorted_mask] = y_star_n[mask]
y_cmb[y_star_n.ne(pad_symbol)] = plh_symbol
y_cmb[mask] = y_star_n[mask]
y_tok[y_tgt_star.ne(pad_symbol)] = plh_symbol
y_tok[tok_mask] = y_tgt_star[tok_mask]
idx = sorted_[1]
plh_tgt = idx[:, :, 1:] - idx[:, :, :-1] - 1
plh_tgt[~sorted_mask[:, :, 1:]] = 0
plh_tgt = plh_tgt.clamp(0, Kmax - 1)
cmb_tgt = mask.long()
plh_mask = y_plh.ne(pad_symbol)[:, :, 1:]
del_mask = torch.zeros(shape, dtype=bool, device=device)
cmb_mask = y_tgt_star.ne(pad_symbol).view(shape[0], 1, shape[-1]).expand_as(y_cmb)
return {
"del_tgt": del_tgt,
"plh_tgt": plh_tgt,
"cmb_tgt": cmb_tgt,
"tok_tgt": y_tgt_star,
"del_mask": del_mask,
"plh_mask": plh_mask,
"cmb_mask": cmb_mask,
"tok_mask": tok_mask,
"y_plh": y_plh,
"y_cmb": y_cmb,
"y_tok": y_tok,
}
| 5,344,124
|
def create_service_account(project_id: str, service_account_name: str,
role_name: str, file_name: str) -> Dict[str, Any]:
"""Create a new service account.
Args:
project_id: GCP project id.
service_account_name: The service account name.
role_name: The role to be assigned to the service account.
file_name: The file where service account key will be stored.
Returns:
service_account: The newly created service account.
Raises:
ValueError: If the service_account_name is empty.
ValueError: If the file_name is empty.
"""
if not service_account_name:
raise ValueError('Service account name cannot be empty.')
if not file_name:
raise ValueError('The file name cannot be empty.')
service_account_details = get_service_account(project_id,
service_account_name)
if service_account_details:
return service_account_details
logging.info('Creating "%s" service account in "%s" project',
service_account_name, project_id)
request = _get_service_account_client().create(
name='projects/' + project_id,
body={
'accountId': service_account_name,
'serviceAccount': {
'displayName': service_account_name.upper()
},
})
service_account_details = utils.execute_request(request)
set_service_account_role(project_id, service_account_name, role_name)
create_service_account_key(project_id, service_account_name, file_name)
return service_account_details
| 5,344,125
|
def test_ode_FE():
"""Test that a linear u(t)=a*t+b is exactly reproduced."""
def exact_solution(t):
return a*t + b
def f(u, t): # ODE
return a + (u - exact_solution(t))**m
a = 4
b = -1
m = 6
dt = 0.5
T = 20.0
u, t = ode_FE(f, exact_solution(0), dt, T)
diff = abs(exact_solution(t) - u).max()
tol = 1E-15 # Tolerance for float comparison
success = diff < tol
assert success
| 5,344,126
|
def file_finder():
"""
This function allows to the user
to select a file using the dialog with tkinter.
:return path_name
:rtype str
the string of the path_name file.
"""
root = Tk()
root.title("File Finder")
root.geometry("500x400")
root.attributes("-topmost", True)
root.withdraw()
path_name = filedialog.askopenfilename()
root.withdraw()
return path_name
| 5,344,127
|
def htm_search_cone(IndexFile_data,Long,Lat,Radius,Ind=None,Son_index=np.arange(2,6),PolesLong_index=np.arange(6,11,2),PolesLat_index=np.arange(7,12,2)):
#print('I am running htm_search_cone')
"""Description: Search for all HTM leafs intersecting a small circles
Input :-Either a table of HTM data or an open HDF5 object in which the HTM data is stored
-Longitude (radians) to search
-Latitutde (radians) to search
-Radius of the small circle
Output : a vector of indexes of the winner(s):the "adress" in the indexfile of the smallest leaf(s) intercepting the cone
By : Maayane Soumagnac (original Matlab function by Eran Ofek) Feb 2018
"""
if Ind is None:
Sons=np.arange(8)
else:
Sons=Ind.astype(int)
ID=[]
Nsons=len(Sons)
PolesLong=np.zeros((3,Nsons)) #3 lines, Nsons colomns, on veut mettre a chaque colomne les longitudes des poles du mesh
PolesLat=np.zeros((3, Nsons)) #3 lignes, Nsons colomnes
for i in range(Nsons):#OPTIMIZE
PolesLong[:,i]=IndexFile_data[PolesLong_index[:],Sons[i]] # array where each colomn is the 3 poles longitudes of a son mesh HERE: THIS? OR INVERSE?
PolesLat[:,i]=IndexFile_data[PolesLat_index[:],Sons[i]] # array where each colomn is the 3 poles latitude of a son mesh HERE: THIS? OR INVERSE?
Flag=celestial.cone_in_polysphere(PolesLong,PolesLat,Long,Lat,Radius) #check if the cone intercept any of the sons meshes
for i in range(Nsons): #OPTIMIZABLE?
if Flag[i]==1: #i.e. if the cone overlap the son with index i
if np.isnan(IndexFile_data[Son_index[:],Sons[i]]).all()==True:# there are nans in the index_file at the son's index, which means the data is where you are and you cannot go further in the tree
ID.append(Sons[i])
else:
Ind = IndexFile_data[Son_index[:], Sons[i]] - 1.
#RECURION IS HERE
ID.extend(htm_search_cone(IndexFile_data,Long,Lat,Radius,Ind=Ind))
return ID
| 5,344,128
|
def getHostOsVersion():
"""
Returns the host OS version. This is platform.release with additional
distro indicator on linux.
"""
sVersion = platform.release();
sOs = getHostOs();
if sOs == 'linux':
sDist = '';
try:
# try /etc/lsb-release first to distinguish between Debian and Ubuntu
oFile = open('/etc/lsb-release');
for sLine in oFile:
oMatch = re.search(r'(?:DISTRIB_DESCRIPTION\s*=)\s*"*(.*)"', sLine);
if oMatch is not None:
sDist = oMatch.group(1).strip();
except:
pass;
if sDist:
sVersion += ' / ' + sDist;
else:
asFiles = \
[
[ '/etc/debian_version', 'Debian v'],
[ '/etc/gentoo-release', '' ],
[ '/etc/oracle-release', '' ],
[ '/etc/redhat-release', '' ],
[ '/etc/SuSE-release', '' ],
];
for sFile, sPrefix in asFiles:
if os.path.isfile(sFile):
try:
oFile = open(sFile);
sLine = oFile.readline();
oFile.close();
except:
continue;
sLine = sLine.strip()
if sLine:
sVersion += ' / ' + sPrefix + sLine;
break;
elif sOs == 'solaris':
sVersion = platform.version();
if os.path.isfile('/etc/release'):
try:
oFile = open('/etc/release');
sLast = oFile.readlines()[-1];
oFile.close();
sLast = sLast.strip();
if sLast:
sVersion += ' (' + sLast + ')';
except:
pass;
elif sOs == 'darwin':
sOsxVersion = platform.mac_ver()[0];
codenames = {"4": "Tiger",
"5": "Leopard",
"6": "Snow Leopard",
"7": "Lion",
"8": "Mountain Lion",
"9": "Mavericks",
"10": "Yosemite",
"11": "El Capitan",
"12": "Sierra",
"13": "High Sierra",
"14": "Unknown 14", }
sVersion += ' / OS X ' + sOsxVersion + ' (' + codenames[sOsxVersion.split('.')[1]] + ')'
elif sOs == 'win':
class OSVersionInfoEx(ctypes.Structure):
""" OSVERSIONEX """
kaFields = [
('dwOSVersionInfoSize', ctypes.c_ulong),
('dwMajorVersion', ctypes.c_ulong),
('dwMinorVersion', ctypes.c_ulong),
('dwBuildNumber', ctypes.c_ulong),
('dwPlatformId', ctypes.c_ulong),
('szCSDVersion', ctypes.c_wchar*128),
('wServicePackMajor', ctypes.c_ushort),
('wServicePackMinor', ctypes.c_ushort),
('wSuiteMask', ctypes.c_ushort),
('wProductType', ctypes.c_byte),
('wReserved', ctypes.c_byte)]
_fields_ = kaFields # pylint: disable=invalid-name
def __init__(self):
super(OSVersionInfoEx, self).__init__()
self.dwOSVersionInfoSize = ctypes.sizeof(self)
oOsVersion = OSVersionInfoEx()
rc = ctypes.windll.Ntdll.RtlGetVersion(ctypes.byref(oOsVersion))
if rc == 0:
# Python platform.release() is not reliable for newer server releases
if oOsVersion.wProductType != 1:
if oOsVersion.dwMajorVersion == 10 and oOsVersion.dwMinorVersion == 0:
sVersion = '2016Server';
elif oOsVersion.dwMajorVersion == 6 and oOsVersion.dwMinorVersion == 3:
sVersion = '2012ServerR2';
elif oOsVersion.dwMajorVersion == 6 and oOsVersion.dwMinorVersion == 2:
sVersion = '2012Server';
elif oOsVersion.dwMajorVersion == 6 and oOsVersion.dwMinorVersion == 1:
sVersion = '2008ServerR2';
elif oOsVersion.dwMajorVersion == 6 and oOsVersion.dwMinorVersion == 0:
sVersion = '2008Server';
elif oOsVersion.dwMajorVersion == 5 and oOsVersion.dwMinorVersion == 2:
sVersion = '2003Server';
sVersion += ' build ' + str(oOsVersion.dwBuildNumber)
if oOsVersion.wServicePackMajor:
sVersion += ' SP' + str(oOsVersion.wServicePackMajor)
if oOsVersion.wServicePackMinor:
sVersion += '.' + str(oOsVersion.wServicePackMinor)
return sVersion;
| 5,344,129
|
def archived_changes(archivedir='./edi_requests/', scope='knb-lter-jrn',
dedup=True, parsedt=False):
"""
Load archived PASTA change records from xml files and parse into dataframe.
Options
archivedir path to archive directory string ('./edi_requests')
scope EDI scope string ('knb-lter-jrn')
dedup remove duplicates boolean (True)
parsedt parse 'date' field to datetime index boolean (False)
"""
# List files and select scope
files = os.listdir(archivedir)
scopefiles = sorted([f for f in files if scope in f])
# Load each archive, convert to dataframe, and concatenate
for i, f in enumerate(scopefiles):
print('Reading archived PASTA request {0}'.format(f))
root = rq.load_xml(os.path.join('edi_requests', f))
df = changeroot_to_df(root)
if i==0:
df_out = df
else:
df_out = pd.concat([df_out, df])
# dedup and parsedt options
if dedup:
df_out = drop_duplicates(df_out)
if parsedt:
df_out.index = pd.to_datetime(df_out['date'])
#, format='%Y-%b-%dT%H:%M:%S.%f')
return(df_out)
| 5,344,130
|
def test_ubuntu_too_old():
"""
Error with a useful message when running in older Ubuntu
"""
output = run_bootstrap('old-distro-test', 'ubuntu:16.04')
assert output.stdout == 'The Littlest JupyterHub requires Ubuntu 18.04 or higher\n'
assert output.returncode == 1
| 5,344,131
|
def get_missing_columns(missing_data):
"""
Returns columns names as list that containes missing data
:param
missing_data : return of missing_data(df)
:return
list: list containing columns with missing data
"""
missing_data = missing_data[missing_data['percent'] > 0]
missing_columns = missing_data.index.tolist()
return missing_columns
| 5,344,132
|
def read(request):
"""Render the page for a group."""
pubid = request.matchdict["pubid"]
slug = request.matchdict.get("slug")
group = models.Group.get_by_pubid(pubid)
if group is None:
raise exc.HTTPNotFound()
if slug is None or slug != group.slug:
url = request.route_url('group_read',
pubid=group.pubid,
slug=group.slug)
return exc.HTTPMovedPermanently(url)
if not request.authenticated_userid:
return _login_to_join(request, group)
else:
if group in request.authenticated_user.groups:
return _read_group(request, group)
else:
return _join(request, group)
| 5,344,133
|
def train_model(
model,
device,
train_data_loader,
valid_data_loader,
criterion, optimizer, scheduler, num_epochs=5):
"""
training
Parameters
--------------
model : DogClassificationModel
Network model to be trained.
device : device
cuda or cpu
train_data_loader : dataloader
dataloader for training
valid_data_loader : dataloader
dataloader for validation
criterion :
Loss function.
optimizer :
Optimizer.
scheduler :
Learning rate scheduler.
num_epochs : int
The number of epochs.
Returns
--------------
model : DogClassificationModel
Trained model.
"""
since = time.time()
model = model.to(device)
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
for epoch in range(num_epochs):
bar = tqdm(total = len(train_data_loader))
bar.set_description("Epoch: {}/{}".format(epoch+1, num_epochs))
"""
Training Phase
"""
model.train()
running_loss = 0.0
running_corrects = 0
for j, (inputs, labels) in enumerate(train_data_loader):
optimizer.zero_grad()
tmp_loss_item = 0.0
# training
with torch.set_grad_enabled(True):
outputs = model(inputs.to(device))
torch.cuda.empty_cache()
_, preds = torch.max(outputs, 1)
loss = criterion(outputs, labels.to(device))
# backward + optimize only if in training phase
loss.backward()
optimizer.step()
tmp_loss_item = loss.item()
# statistics
running_loss += tmp_loss_item * inputs.size(0)
running_corrects += torch.sum(preds.to('cpu') == labels.data)
# progress bar
bar.update(1)
tmp_loss = float(running_loss / (j+1)) / 32 # 32: mini-batch size
tmp_acc = float(running_corrects // (j+1)) / 32
bar.set_postfix(OrderedDict(loss=tmp_loss, acc=tmp_acc))
# update learning rate scheduler
scheduler.step()
dataset_size = len(train_data_loader.dataset)
epoch_loss = running_loss / dataset_size
epoch_acc = running_corrects.double() / dataset_size
"""
Validation Phase
"""
model.eval() # Set model to validation mode
val_running_loss = 0.0
val_running_corrects = 0
# Iterate over data.
for inputs, labels in valid_data_loader:
val_inputs = inputs.to(device)
val_labels = labels.to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward
# track history if only in train
with torch.no_grad():
val_outputs = model(val_inputs)
_, preds = torch.max(val_outputs, 1)
loss = criterion(val_outputs, val_labels)
# statistics
val_running_loss += loss.item() * val_inputs.size(0)
val_running_corrects += torch.sum(preds == val_labels.data)
dataset_size = len(valid_data_loader.dataset)
val_epoch_loss = val_running_loss / dataset_size
val_epoch_acc = val_running_corrects.double() / dataset_size
print('VALIDATION Loss: {:.4f} Acc: {:.4f}'.format(val_epoch_loss, val_epoch_acc))
print("Elapsed time: {} [sec]".format(time.time() - since))
# deep copy the model
if val_epoch_acc > best_acc:
best_acc = val_epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))
print('Best val Acc: {:4f}'.format(best_acc))
# load best model weights
model.load_state_dict(best_model_wts)
return model
| 5,344,134
|
def run(parameter):
"""
The entry function of this code.
Args:
parameter: the super-parameter
"""
print(json.dumps(parameter, indent=2))
time.sleep(2)
slot_set = pickle.load(file=open(parameter["slot_set"], "rb"))
action_set = pickle.load(file=open(parameter["action_set"], "rb"))
disease_symptom = pickle.load(file=open(parameter["disease_symptom"], "rb"))
steward = RunningSteward(parameter=parameter,checkpoint_path=parameter["checkpoint_path"])
print('action_set', action_set)
warm_start = parameter.get("warm_start")
warm_start_epoch_number = parameter.get("warm_start_epoch_number")
train_mode = parameter.get("train_mode")
agent_id = parameter.get("agent_id")
simulate_epoch_number = parameter.get("simulate_epoch_number")
# Warm start.
if warm_start == True and train_mode == True:
print("warm starting...")
agent = AgentRule(action_set=action_set,slot_set=slot_set,disease_symptom=disease_symptom,parameter=parameter)
steward.dialogue_manager.set_agent(agent=agent)
steward.warm_start(epoch_number=warm_start_epoch_number)
# exit()
if agent_id.lower() == 'agentdqn':
agent = AgentDQN(action_set=action_set,slot_set=slot_set,disease_symptom=disease_symptom,parameter=parameter)
elif agent_id.lower() == 'agentrandom':
agent = AgentRandom(action_set=action_set,slot_set=slot_set,disease_symptom=disease_symptom,parameter=parameter)
elif agent_id.lower() == 'agentrule':
agent = AgentRule(action_set=action_set,slot_set=slot_set,disease_symptom=disease_symptom,parameter=parameter)
elif agent_id.lower() == 'agenthrl':
agent = AgentHRL(action_set=action_set, slot_set=slot_set, disease_symptom=disease_symptom, parameter=parameter)
elif agent_id.lower() == 'agentwithgoaljoint':
agent = AgentWithGoalJoint(action_set=action_set, slot_set=slot_set, disease_symptom=disease_symptom, parameter=parameter)
elif agent_id.lower() == 'agentwithgoal':
agent = AgentWithGoal(action_set=action_set, slot_set=slot_set, disease_symptom=disease_symptom, parameter=parameter)
elif agent_id.lower() == 'agentwithgoal2':
agent = AgentWithGoal2(action_set=action_set, slot_set=slot_set, disease_symptom=disease_symptom, parameter=parameter)
elif agent_id.lower() == 'agentwithgoal3':
from src.dialogue_system.agent.agent_with_goal_3 import AgentWithGoal as AgentWithGoal3
agent = AgentWithGoal3(action_set=action_set, slot_set=slot_set, disease_symptom=disease_symptom,
parameter=parameter)
else:
raise ValueError('Agent id should be one of [AgentRule, AgentDQN, AgentRandom, AgentHRL, AgentWithGoal, AgentWithGoal2, AgentWithGoalJoint].')
steward.dialogue_manager.set_agent(agent=agent)
if train_mode is True: # Train
steward.simulate(epoch_number=simulate_epoch_number, train_mode=train_mode)
else: # test
for index in range(simulate_epoch_number):
res = steward.evaluate_model(dataset='test', index=index)
return res
| 5,344,135
|
def convert(
value: str,
conversion_recipes: Iterable[ConversionRecipe[ConvertResultType]]) -> ConvertResultType:
"""
Given a string value and a series of conversion recipes, attempt to convert the value using the
recipes.
If none of the recipes declare themselves as applicable, then raise
:py:class:`NoApplicableConversionRecipeError`. If none of the recipes that declare themselves
as eligible run successfully, then raise :py:class:`NoSuccessfulConversionRecipeError`.
Parameters
----------
value : str
The string value we are attempting to convert.
conversion_recipes : Iterable[ConversionRecipe[ConvertResultType]]
A series of conversion recipes.
Returns
-------
The converted value.
"""
none_applied = True
for conversion_recipe in conversion_recipes:
if conversion_recipe.applicable(value):
none_applied = False
try:
return conversion_recipe.load(value)
except Exception:
pass
if none_applied:
raise NoApplicableConversionRecipeError(
f"Could not find applicable gonversion recipe for {value}")
raise NoSuccessfulConversionRecipeError(
f"All applicable conversion recipes failed to run successfully for {value}.")
| 5,344,136
|
def test_get_cell_type_error_not_found(graph_cases, selenium_extras):
"""
:type graph_cases: qmxgraph.tests.conftest.GraphCaseFactory
:type selenium_extras: qmxgraph.tests.conftest.SeleniumExtras
"""
graph = graph_cases('empty')
cell_id = "999"
with pytest.raises(WebDriverException) as e:
graph.eval_js_function("api.getCellType", cell_id)
assert selenium_extras.get_exception_message(e) == \
"Unable to find cell with id {}".format(cell_id)
| 5,344,137
|
def create_window(seq, n=2):
"""Returns a sliding window (of width n) over data from the iterable,
code taken from https://docs.python.org/release/2.3.5/lib/itertools-example.html"""
it = iter(seq)
result = tuple(itertools.islice(it, n))
if len(result) == n:
yield result
for elem in it:
result = result[1:] + (elem,)
yield result
| 5,344,138
|
async def test_get_item_from_missing_item_collection(app_client):
"""Test reading an item from a collection which does not exist"""
resp = await app_client.get("/collections/invalid-collection/items/some-item")
assert resp.status_code == 404
| 5,344,139
|
def start(log_file: str) -> None:
"""Create the application log."""
global _LOG_FILENAME
# Disable UDS/ISO-TP library logging
logging.getLogger().addHandler(logging.NullHandler())
# Create the directory if needed
filename = os.path.expanduser(log_file)
filename_parts = os.path.split(filename)
if filename_parts[0] and not os.path.isdir(filename_parts[0]):
os.mkdir(filename_parts[0])
filename = os.path.abspath(filename)
_LOG_FILENAME = filename
# Log to a file
log_format = '{asctime} {module:16s} {levelname:6s} {message}'
file_handler = logging.handlers.WatchedFileHandler(filename, mode='w', encoding='utf-8')
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(logging.Formatter(fmt=log_format, style='{'))
# Add some console output
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setLevel(logging.INFO)
console_handler.setFormatter(logging.Formatter(fmt=log_format, style='{'))
# Create loggers
logger = logging.getLogger('mme')
logger.setLevel(logging.DEBUG)
logger.addHandler(file_handler)
logger.addHandler(console_handler)
# First log entry
logger.info("Created application log %s", filename)
| 5,344,140
|
def get_function_name(fcn):
"""Returns the fully-qualified function name for the given function.
Args:
fcn: a function
Returns:
the fully-qualified function name string, such as
"eta.core.utils.function_name"
"""
return fcn.__module__ + "." + fcn.__name__
| 5,344,141
|
def add_to_braindb(run, missing_keys, brain_alternate=None):
"""
This function adds to the braindump database the missing
keys from missing_keys.
"""
my_config = {}
if brain_alternate is None:
my_braindb = os.path.join(run, brainbase)
else:
my_braindb = os.path.join(run, brain_alternate)
try:
my_file = open(my_braindb, 'a')
except IOError:
return
try:
for key in missing_keys:
my_file.write("%s %s\n" % (str(key), str(missing_keys[key])))
except IOError:
# Nothing to do...
pass
try:
my_file.close()
except IOError:
pass
| 5,344,142
|
def log_time(action: str, until: datetime.datetime = None) -> None:
"""Log the start time in the shelve, or remove the start time on stopping.
action: start/stop, as a string."""
if action == "start":
now = datetime.datetime.now()
with shelve.open(get_block_path() + "/data") as data:
data["START"] = now
data["UNTIL"] = until
elif action == "stop":
with shelve.open(get_block_path() + "/data") as data:
del data["START"]
del data["UNTIL"]
else:
raise exc.InternalException("Unidentified action; expecting start or stop.")
| 5,344,143
|
def clean_cases(text):
"""
Makes text all lowercase.
:param text: the text to be converted to all lowercase.
:type: str
:return: lowercase text
:type: str
"""
return text.lower()
| 5,344,144
|
def process(sourceDir, endDir, projectDir, op, software, version, opDescr, inputMaskPath, arguments,
props):
"""
Perform the bulk journaling. If no endDir is specified, it is assumed that the user wishes
to add on to existing projects in projectDir.
:param sourceDir: Directory of source images
:param endDir: Directory of manipulated images (1 manipulation stage). Optional.
:param projectDir: Directory for projects to be placed
:param opDescr: Operation performed between source and ending dir
:param software: Manipulation software used
:param version: Version of manipulation software
:param descr: Description of manipulation. Optional
:param inputMaskPath: Directory of input masks. Optional.
:param arguments: Dictionary of additional args ({rotation:90,...})
:param props: Dictionary containing project properties
:return: None
"""
startingImages = create_image_list(sourceDir)
# Decide what to do with input (create new or add)
if endDir:
endingImages = create_image_list(endDir)
else:
endingImages = None
if inputMaskPath:
inputMaskImages = create_image_list(inputMaskPath)
else:
inputMaskImages = None
# begin looping through the different projects
total = len(startingImages)
processNo = 1
for sImg in startingImages:
sImgName = ''.join(sImg.split('.')[:-1])
if inputMaskPath:
maskIm = os.path.join(inputMaskPath, find_corresponding_image(sImgName, inputMaskImages))
else:
maskIm = None
project = find_json_path(sImgName, projectDir)
# open the project
new = not os.path.exists(project)
sm = maskgen.scenario_model.ImageProjectModel(project)
if new:
print 'Creating...' + project
lastNodeName = sm.addImage(os.path.join(sourceDir, sImg))
#lastNodeName = sImgName
for prop, val in props.iteritems():
sm.setProjectData(prop, val)
eImg = None if endDir is None else os.path.join(endDir, find_corresponding_image(sImgName, endingImages))
else:
eImg = os.path.join(sourceDir, sImg)
#print sm.G.get_nodes()
lastNodes = [n for n in sm.G.get_nodes() if len(sm.G.successors(n)) == 0]
lastNodeName = lastNodes[-1]
lastNode = sm.G.get_node(lastNodeName)
sm.selectImage(lastNodeName)
if software is not None or version is not None and op is not None and eImg is not None:
# prepare details for new link
softwareDetails = Software(software, version)
if arguments:
opDetails = maskgen.scenario_model.Modification(op, opDescr, software=softwareDetails, inputMaskName=maskIm,
arguments=arguments, automated='yes')
else:
opDetails = maskgen.scenario_model.Modification(op, opDescr, software=softwareDetails, inputMaskName=maskIm,automated='yes')
position = ((lastNode['xpos'] + 50 if lastNode.has_key('xpos') else
80), (lastNode['ypos'] + 50 if lastNode.has_key('ypos') else 200))
# create link
sm.addNextImage(eImg, mod=opDetails,
sendNotifications=False, position=position)
print 'Operation ' + op + ' complete on project (' + str(processNo) + '/' + str(total) + '): ' + project
elif eImg is not None:
print 'Operation, Software and Version need to be defined to complete the link.'
sm.save()
processNo += 1
| 5,344,145
|
def root():
"""Refreshes data in database"""
db.drop_all()
db.create_all()
# Get data from api, make objects with it, and add to db
for row in df.index:
db_comment = Comment(user=df.User[row],text=df.Text[row]) # rating = df.Rating[row]
db.session.add(db_comment)
db.session.commit()
return 'Data stored'
| 5,344,146
|
def run(bind_address='0.0.0.0', port=8008):
"""Run the webapp in a simple server process."""
from werkzeug import run_simple
print "* Starting on %s:%s" % (bind_address, port)
run_simple(bind_address, port, webapp(),
use_reloader=False, threaded=True)
| 5,344,147
|
def beast_production_wrapper():
"""
This does all of the steps for a full production run, and can be used as
a wrapper to automatically do most steps for multiple fields.
* make datamodel.py file
* make source density map
* split catalog by source density
* make physics model (SED grid)
* make input list for ASTs
* make noise model
* generate batch script to trim models
* generate batch script to fit models
* merge stats files back together
* spatially reorder the results
Places for user to manually do things:
* editing code before use
- datamodel_template.py: setting up the file with desired parameters
- here: list the catalog filter names with the corresponding BEAST names
- here: choose settings (pixel size, filter, mag range) for the source density map
- here: choose settings (pixel size, reference image) for the background map
- here: choose settings (filter, number per file) for dividing catalog by source density
- here: choose settings (# files, nice level) for the trimming/fitting batch scripts
* process the ASTs, as described in BEAST documentation
* run the trimming scripts
* run the fitting scripts
BEWARE: When running the trimming/fitting scripts, ensure that the correct
datamodel.py file is in use. Since it gets updated every time this code is
run, you may be unexpectedly be using one from another field.
"""
# list of field names:
# These should be the base of the GST file names, and will be used
# to create the project name
field_names = ['14675_LMC-5665ne-12232']
n_field = len(field_names)
# Need to know what the correspondence is between filter names in the
# catalog and the BEAST filter names.
#
# These will be used to automatically determine the filters present in
# each GST file and fill in the datamodel.py file. The order doesn't
# matter, as long as the order in one list matches the order in the other
# list.
#
gst_filter_names = ['F225W','F275W','F336W','F475W','F814W','F110W','F160W']
beast_filter_names = ['HST_WFC3_F225W','HST_WFC3_F275W','HST_WFC3_F336W',
'HST_ACS_WFC_F475W','HST_ACS_WFC_F814W',
'HST_WFC3_F110W','HST_WFC3_F160W']
for b in range(n_field):
print('********')
print('field ' + field_names[b])
print('********')
# -----------------
# data file names
# -----------------
# paths for the data/AST files
gst_file = './data/' + field_names[b]+'.gst.fits'
ast_file = './data/' + field_names[b]+'.gst.fake.fits'
# path for the reference image (if using for the background map)
im_file = './data/'+field_names[b]+'_F475W.fits.gz'
# -----------------
# make datamodel file
# -----------------
# need to do this first, because otherwise any old version that exists
# will be imported, and changes made here won't get imported again
print('')
print('creating datamodel file')
print('')
create_datamodel(gst_file, gst_filter_names, beast_filter_names)
# -----------------
# make a source density map
# -----------------
print('')
print('making source density map')
print('')
if not os.path.isfile(gst_file.replace('.fits','_source_den_image.fits')):
# - pixel size of 10 arcsec
# - use F475W between vega mags of 17 and 27
sourceden_args = \
types.SimpleNamespace(subcommand='sourceden',
catfile=gst_file, pixsize=10,
mag_name='F475W_VEGA',
mag_cut=[17,27])
create_background_density_map.make_map_main(sourceden_args)
# new file name with the source density column
gst_file_new = gst_file.replace('.fits', '_with_sourceden.fits')
# -----------------
# make a background map
# -----------------
print('')
print('making background map')
print('')
if not os.path.isfile(gst_file_new.replace('.fits','_F475W_bg_map.hd5')):
# - pixel dimensions: 15x15
background_args = \
types.SimpleNamespace(subcommand='background',
catfile=gst_file, npix=15,
reference=im_file)
create_background_density_map.main_make_map(background_args)
# new file name with the background column
#gst_file_new = gst_file_new.replace('.fits', '_with_bg.fits')
# -----------------
# split observations by source density
# -----------------
print('')
print('splitting observations by source density')
print('')
if len(glob.glob(gst_file_new.replace('.fits','*sub*fits') )) == 0:
# a smaller value for Ns_file will mean more individual files/runs,
# but each run will take a shorter amount of time
subdivide_obscat_by_source_density.split_obs_by_source_density(gst_file_new, bin_width=1,
sort_col='F475W_RATE', Ns_file=6250)
# figure out how many files there are
tot_sub_files = len(glob.glob(gst_file_new.replace('.fits','*sub*fits') ))
print('** total subfiles: '+str(tot_sub_files))
# -----------------
# make physics model
# -----------------
print('')
print('making physics model')
print('')
# the file name for the model grid
#physics_model_file = './' + field_names[b] + '_beast/' + field_names[b] + '_beast_seds.grid.hd5'
physics_model_file = 'METAL_seds.grid.hd5'
# only make the physics model if it doesn't already exist
if not os.path.isfile(physics_model_file):
run_beast_production(gst_file, physicsmodel=True)
# -----------------
# make ASTs
# -----------------
# only create an AST input list if the ASTs don't already exist
ast_input_file = './' + field_names[b] + '_beast/' + field_names[b] + '_inputAST.txt'
if not os.path.isfile(ast_file):
if not os.path.isfile(ast_input_file):
print('')
print('creating artificial stars')
print('')
run_beast_production(gst_file, ast=True)
print('\n**** go run ASTs for '+field_names[b]+'! ****\n')
continue
# -----------------
# make noise model
# -----------------
print('')
print('making noise model')
print('')
# eventually, this may be divided into files based on source density,
# but for now it'll be one giant file
# the file name for the model grid
noise_model_file = './' + field_names[b] + '_beast/' + field_names[b] + '_beast_noisemodel.hd5'
if not os.path.isfile(noise_model_file):
run_beast_production(gst_file, observationmodel=True)#, source_density='0', sub_source_density='0')
# -----------------
# make script to trim models
# -----------------
print('')
print('setting up script to trim models')
print('')
# check if the trimmed grids exist before moving on
trim_files = glob.glob('./' + field_names[b] + '_beast/' + field_names[b] + '_beast_*_sed_trim.grid.hd5')
if len(trim_files) < tot_sub_files:
# choose num_subtrim to be the number of CPUs you'll run it on
# (if it's more than 1, you'll need to split the joblist file manually)
setup_batch_beast_trim.setup_batch_beast_trim(field_names[b] + '_beast',
gst_file, ast_file,
num_subtrim=1, nice=19,
seds_fname=physics_model_file)
print('\n**** go run trimming code for '+field_names[b]+'! ****')
print('Here is the command to run:')
print('at -f ./'+field_names[b]+'_beast/trim_batch_jobs/beast_batch_trim.joblist now \n')
continue
else:
print('all files are trimmed for '+field_names[b])
# -----------------
# make script to fit models
# -----------------
print('')
print('setting up script to fit models')
print('')
fit_run_info = setup_batch_beast_fit.setup_batch_beast_fit(field_names[b] + '_beast',
gst_file,
num_percore=1, nice=19,
overwrite_logfile=False)
# check if the fits exist before moving on
tot_remaining = len(fit_run_info['done']) - np.sum(fit_run_info['done'])
if tot_remaining > 0:
print('\n**** go run fitting code for '+field_names[b]+'! ****')
print('Here are the '+str(len(fit_run_info['files_to_run']))+' commands to run:')
for job_file in fit_run_info['files_to_run']:
print('at -f ./'+job_file+' now')
continue
else:
print('all fits are complete for '+field_names[b])
# -----------------
# merge stats files from each fit
# -----------------
print('')
print('merging stats files')
print('')
stats_filebase = './' + field_names[b] + '_beast/' + field_names[b] + '_beast'
merge_beast_stats.merge_stats_files(glob.glob(stats_filebase+'*sub*_stats.fits'), stats_filebase)
# -----------------
# reorganize results into spatial regions
# -----------------
print('')
print('doing spatial reorganizing')
print('')
region_filebase = './' + field_names[b] + '_beast/' + field_names[b] + '_beast_sd'
output_filebase = './' + field_names[b] + '_beast/spatial/' + field_names[b]
reorder_beast_results_spatial.reorder_beast_results_spatial(stats_filename=stats_filebase + '_stats.fits',
region_filebase=region_filebase,
output_filebase=output_filebase)
condense_beast_results_spatial.condense_files(filedir='./' + field_names[b] + '_beast/spatial/')
| 5,344,148
|
def test_eap_fast_tlv_nak_oom(dev, apdev):
"""EAP-FAST Phase 2 TLV NAK OOM"""
if not openssl_imported:
raise HwsimSkip("OpenSSL python method not available")
check_eap_capa(dev[0], "FAST")
hapd = start_ap(apdev[0])
with alloc_fail(dev[0], 1, "eap_fast_tlv_nak"):
run_eap_fast_phase2(dev, struct.pack(">HHB", EAP_TLV_TYPE_MANDATORY,
1, 0xff), False)
| 5,344,149
|
def construct_grammar(grammar_string, allow_sub_grammar_definitions=False, default_flags=flags.DEFAULTS):
"""
Function which accepts a user-defined grammar string and returns an instance of Grammar representing it.
Inputs: grammar_string - The user-defined grammar string representing the grammar we should construct.
allow_sub_grammar_definitions - A boolean, indicating whether or not we should process sub grammars
See the README for more information on what sub grammars are used for
and the dangers of allowing them when parsing untrusted third-party grammars
default_flags - Can be passed as a set of flags, which will set the defaults for all elements in the grammar
Outputs: An instance of a Grammar class which can be used to parse input strings.
"""
grammar_tokens = tokenize_grammar(grammar_string)
# The grammar stack; opening tokens add a new item to the stack, closing tokens pop one off
# Pre-populated with an outer-most grammar that will be returned from this function
grammar_stack = [elements.Grammar()]
# A stack of sub_grammars; used to handle nested sub_grammar definitions
# We prepopulate it with an outer-most subgrammar to handle global sub-grammar definitions
sub_grammar_stack = [elements.SubGrammarDefinition()]
token_dict = None
error_params = lambda: grammar_string, token_dict, grammar_stack
sub_grammar_error_params = lambda: grammar_string, token_dict, sub_grammar_stack
@contextlib.contextmanager
def inject_parsing_context_into_errors():
""" Injects information about the state of parsing into errors for better messages """
try:
yield
except errors.TokexError as e:
e.inject_stack(grammar_string, token_dict, grammar_stack, sub_grammar_stack)
raise
with inject_parsing_context_into_errors():
for token_idx, token_dict in enumerate(grammar_tokens):
token_match = token_dict["match"]
token_flags = token_dict["flags"]
token = token_dict["token"]
# Openers
if token == "{":
element = elements.OneOfSet(token, token_flags, default_flags, token_dict)
grammar_stack[-1].add_sub_element(element)
grammar_stack.append(element)
elif token[:2] == "*(":
element = elements.ZeroOrMore(token, token_flags, default_flags, token_dict)
grammar_stack[-1].add_sub_element(element)
grammar_stack.append(element)
elif token[:2] == "+(":
element = elements.OneOrMore(token, token_flags, default_flags, token_dict)
grammar_stack[-1].add_sub_element(element)
grammar_stack.append(element)
elif token[:2] == "?(":
element = elements.ZeroOrOne(token, token_flags, default_flags, token_dict)
grammar_stack[-1].add_sub_element(element)
grammar_stack.append(element)
elif token[0] == "(":
element = elements.Grammar(token, token_flags, default_flags, token_dict)
grammar_stack[-1].add_sub_element(element)
grammar_stack.append(element)
elif token[0] == "<":
element = elements.NamedElement(token, token_flags, default_flags, token_dict)
grammar_stack[-1].add_sub_element(element)
grammar_stack.append(element)
elif token[:3].lower() == "sep":
element = elements.IteratorDelimiter(token, token_flags, default_flags, token_dict)
if grammar_stack[-1].delimiter_grammar:
raise errors.DuplicateDelimiterError(grammar_stack[-1])
if not getattr(grammar_stack[-1], "can_have_delimiter", False):
raise errors.InvalidDelimiterError(grammar_stack[-1])
grammar_stack[-1].delimiter_grammar = element
grammar_stack.append(element)
# Closers
elif token == "}":
if grammar_stack[-1].__class__ in \
(elements.SubGrammarDefinition, elements.IteratorDelimiter, elements.OneOfSet):
if isinstance(grammar_stack[-1], elements.SubGrammarDefinition):
new_sub_grammar = sub_grammar_stack.pop()
sub_grammar_stack[-1].sub_grammars[new_sub_grammar.name] = new_sub_grammar
grammar_stack.pop()
else:
raise errors.MismatchedBracketsError(token, grammar_stack[-1])
elif token == ")":
if len(grammar_stack) == 1:
raise errors.ExtraClosingBracketsError(token)
if grammar_stack[-1].__class__ in \
(elements.ZeroOrMore, elements.ZeroOrOne, elements.OneOrMore, elements.Grammar):
grammar_stack.pop()
else:
raise errors.MismatchedBracketsError(token, grammar_stack[-1])
elif token == ">":
if grammar_stack[-1].__class__ in (elements.NamedElement, ):
grammar_stack.pop()
else:
raise errors.MismatchedBracketsError(token, grammar_stack[-1])
# Singular tokens
elif token[0] in ("'", '"'):
grammar_stack[-1].add_sub_element(elements.StringLiteral(token, token_flags, default_flags, token_dict))
elif token[0] == "~":
grammar_stack[-1].add_sub_element(elements.RegexString(token, token_flags, default_flags, token_dict))
elif token[0] in ("'", '"'):
grammar_stack[-1].add_sub_element(elements.StringLiteral(token, token_flags, default_flags, token_dict))
elif token == "$":
grammar_stack[-1].add_sub_element(elements.Newline(token, token_flags, default_flags, token_dict))
elif token == ".":
grammar_stack[-1].add_sub_element(elements.AnyString(token, token_flags, default_flags, token_dict))
# Sub Grammar open
elif token[:3].lower() == "def":
element = elements.SubGrammarDefinition(token, token_flags, default_flags, token_dict)
if not allow_sub_grammar_definitions:
raise errors.SubGrammarsDisabledError(element.name)
# Only allow definition of a new subgrammar within the global scope and other subgrammars
for stack_element in reversed(grammar_stack[1:]):
if not isinstance(stack_element, elements.SubGrammarDefinition):
raise errors.SubGrammarScopeError(stack_element, element.name)
element = elements.SubGrammarDefinition(token, token_flags, default_flags, token_dict)
grammar_stack.append(element)
sub_grammar_stack.append(element)
# Sub Grammar Usage
elif token[-1] == ")":
# Find the referenced sub_grammar
sub_grammar_name = elements.SubGrammarUsage(token, token_flags, default_flags, token_dict).name
for parent_sub_grammar in reversed(sub_grammar_stack):
if sub_grammar_name in parent_sub_grammar.sub_grammars:
for sub_element in parent_sub_grammar.sub_grammars[sub_grammar_name].sub_elements:
grammar_stack[-1].add_sub_element(sub_element)
break
else:
raise errors.UndefinedSubGrammarError(sub_grammar_name)
else:
raise errors.GrammarParsingError("Unknown token: %r" % token)
if len(grammar_stack) > 1:
raise errors.ExtraOpeningBracketsError(grammar_stack[-1])
return grammar_stack[0]
| 5,344,150
|
def print_last(limit=None, file=None):
"""This is a shorthand for 'print_exception(sys.last_type,
sys.last_value, sys.last_traceback, limit, file)'."""
if not hasattr(sys, "last_type"):
raise ValueError("no last exception")
if file is None:
file = sys.stderr
print_exception(sys.last_type, sys.last_value, sys.last_traceback,
limit, file)
| 5,344,151
|
def bond_value_to_description(value):
"""bond_value_to_description(value) -> string
Convert from a bond type string into its text description,
separated by "|"s. The result are compatible with
OEGetFPBontType and are in canonical order.
"""
return _get_type_description("bond", _btype_flags, value)
| 5,344,152
|
def check_cfg(cfg):
"""Check options of configuration and catch errors."""
for operation in ('sum', 'mean'):
if operation in cfg:
cfg[operation] = list(set(cfg[operation]))
for coord in cfg.get('sum', []):
if coord in cfg.get('mean', []):
raise ValueError(f"Coordinate '{coord}' given in 'sum' and 'mean'")
| 5,344,153
|
def read_set_from_file(filename):
"""
Extract a de-duped collection (set) of text from a file.
Expected file format is one item per line.
"""
collection = set()
with open(filename, 'r', encoding='utf-8') as file_:
for line in file_:
collection.add(line.rstrip())
return collection
| 5,344,154
|
def filter_0_alleles(allele_df, allele_num=2):
"""Drop alleles that do not appear in any of the strains.
"""
drop_cols = []
for col in allele_df.columns:
if allele_df[col].sum()<allele_num:
drop_cols.append(col)
allele_df.drop(drop_cols, inplace=True, axis=1)
return allele_df
| 5,344,155
|
async def test_available_after_update(
hass, pywemo_registry, pywemo_device, wemo_entity
):
"""Test the avaliability when an On call fails and after an update."""
pywemo_device.on.side_effect = ActionException
pywemo_device.get_state.return_value = 1
await entity_test_helpers.test_avaliable_after_update(
hass, pywemo_registry, pywemo_device, wemo_entity, SWITCH_DOMAIN
)
| 5,344,156
|
def gradient_of_rmse(y_hat, y, Xn):
"""
Returns the gradient of the Root Mean Square error with respect to the
parameters of the linear model that generated the prediction `y_hat'.
Hence, y_hat should have been generated by a linear process of the form
Xn.T.dot(theta)
Args:
y_hat (np.array of shape N,): The predictions of the linear model
y (np.array of shape N,): The "ground-truth" values.
Returns:
The RMSE between y_hat and y
"""
N = y.shape[0]
assert N > 0, ('At least one sample is required in order to compute the '
'RMSE loss')
losses = y - y_hat
gradient = - 2 * Xn.T.dot(losses) / N
return gradient
| 5,344,157
|
def is_commit_in_public_upstream(revision: str, public_upstream_branch: str, source_dir: str):
"""
Determine if the public upstream branch includes the specified commit.
:param revision: Git commit hash or reference
:param public_upstream_branch: Git branch of the public upstream source
:param source_dir: Path to the local Git repository
"""
cmd = ["git", "merge-base", "--is-ancestor", "--", revision, "public_upstream/" + public_upstream_branch]
# The command exits with status 0 if true, or with status 1 if not. Errors are signaled by a non-zero status that is not 1.
# https://git-scm.com/docs/git-merge-base#Documentation/git-merge-base.txt---is-ancestor
rc, out, err = exectools.cmd_gather(cmd)
if rc == 0:
return True
if rc == 1:
return False
raise IOError(
f"Couldn't determine if the commit {revision} is in the public upstream source repo. `git merge-base` exited with {rc}, stdout={out}, stderr={err}")
| 5,344,158
|
def cli(config, **options):
"""Build versioned Sphinx docs for every branch and tag pushed to origin.
Supports only building locally with the "build" sub command
For more information, run with its own --help.
The options below are global and must be specified before the sub command name (e.g. -N build ...).
\f
:param sphinxcontrib.versioning.lib.Config config: Runtime configuration.
:param dict options: Additional Click options.
"""
def pre(rel_source):
"""To be executed in a Click sub command.
Needed because if this code is in cli() it will be executed when the user runs: <command> <sub command> --help
:param tuple rel_source: Possible relative paths (to git root) of Sphinx directory containing conf.py.
"""
# Setup logging.
if not NO_EXECUTE:
setup_logging(verbose=config.verbose, colors=not config.no_colors)
log = logging.getLogger(__name__)
# Change current working directory.
if config.chdir:
os.chdir(config.chdir)
log.debug('Working directory: %s', os.getcwd())
else:
config.update(dict(chdir=os.getcwd()), overwrite=True)
# Get and verify git root.
try:
config.update(dict(git_root=get_root(config.git_root or os.getcwd())), overwrite=True)
except GitError as exc:
log.error(exc.message)
log.error(exc.output)
raise HandledError
# Look for local config.
if config.no_local_conf:
config.update(dict(local_conf=None), overwrite=True)
elif not config.local_conf:
candidates = [p for p in (os.path.join(s, 'conf.py') for s in rel_source) if os.path.isfile(p)]
if candidates:
config.update(dict(local_conf=candidates[0]), overwrite=True)
else:
log.debug("Didn't find a conf.py in any REL_SOURCE.")
elif os.path.basename(config.local_conf) != 'conf.py':
log.error('Path "%s" must end with conf.py.', config.local_conf)
raise HandledError
config['pre'] = pre # To be called by Click sub commands.
config.update(options)
| 5,344,159
|
def align_down(x: int, align: int) -> int:
"""
Align integer down.
:return:
``y`` such that ``y % align == 0`` and ``y <= x`` and ``(x - y) < align``
"""
return x - (x % align)
| 5,344,160
|
def read_config_file(config_id: str = "sample_config") -> ConfigType:
"""Read a config file
Args:
config_id (str, optional): Id of the config file to read.
Defaults to "sample_config".
Returns:
ConfigType: Config object
"""
project_root = str(Path(__file__).resolve()).split("/codes")[0]
config_name = "{}.yaml".format(config_id)
config = OmegaConf.load(os.path.join(project_root, "config", config_name))
assert isinstance(config, DictConfig)
return config
| 5,344,161
|
def null():
"""return an empty bit buffer"""
return bits()
| 5,344,162
|
def find_coherent_patch(correlations, window=11):
"""Looks through 3d stack of correlation layers and finds strongest correlation patch
Also accepts a 2D array of the pre-compute means of the 3D stack.
Uses a window of size (window x window), finds the largest average patch
Args:
correlations (ndarray): 3D array of correlations:
correlations = read_stack('path/to/correlations', '.cc')
window (int): size of the patch to consider
Returns:
tuple[int, int]: the row, column of center of the max patch
Example:
>>> corrs = np.arange(25).reshape((5, 5))
>>> print(find_coherent_patch(corrs, window=3))
(3, 3)
>>> corrs = np.stack((corrs, corrs), axis=0)
>>> print(find_coherent_patch(corrs, window=3))
(3, 3)
"""
if correlations.ndim == 2:
mean_stack = correlations
elif correlations.ndim == 3:
mean_stack = np.mean(correlations, axis=0)
else:
raise ValueError("correlations must be a 2D mean array, or 3D correlations")
conv = uniform_filter(mean_stack, size=window, mode='constant')
max_idx = conv.argmax()
return np.unravel_index(max_idx, mean_stack.shape)
| 5,344,163
|
def test():
"""HI :)"""
return 'Hi!'
| 5,344,164
|
def soft_sign(x: ArrayLike, *, constant: Optional[bool] = None) -> Tensor:
"""Returns the soft sign function x / (1 + |x|).
Parameters
----------
x : ArrayLike
Input data.
constant : boolean, optional (default=False)
If ``True``, the returned tensor is a constant (it
does not back-propagate a gradient).
Returns
-------
mygrad.Tensor
The soft sign function applied to `x` elementwise.
Examples
--------
>>> import mygrad as mg
>>> from mygrad.nnet.activations import soft_sign
>>> x = mg.arange(-5, 6)
>>> x
Tensor([-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5])
>>> y = soft_sign(x); y
Tensor([-0.83333333, -0.8 , -0.75 , -0.66666667, -0.5 ,
0. , 0.5 , 0.66666667, 0.75 , 0.8 ,
0.83333333])
.. plot::
>>> import mygrad as mg
>>> from mygrad.nnet.activations import soft_sign
>>> import matplotlib.pyplot as plt
>>> x = mg.linspace(-10, 10, 100)
>>> y = soft_sign(x)
>>> plt.title("soft_sign(x)")
>>> y.backward()
>>> plt.plot(x, x.grad, label="df/dx")
>>> plt.plot(x, y, label="f(x)")
>>> plt.legend()
>>> plt.grid()
>>> plt.show()
"""
return divide(x, 1 + abs(x), constant=constant)
| 5,344,165
|
def get_vdfdx(stuff_for_time_loop, vdfdx_implementation="exponential"):
"""
This function enables VlaPy to choose the implementation of the vdfdx stepper
to use in the lower level sections of the simulation
:param stuff_for_time_loop: (dictionary) contains the derived parameters for the simulation
:param vdfdx_implementation: (string) the chosen v df/dx implementation for for this simulation
:return:
"""
if vdfdx_implementation == "exponential":
vdfdx = get_vdfdx_exponential(
kx=stuff_for_time_loop["kx"], v=stuff_for_time_loop["v"]
)
elif vdfdx_implementation == "sl":
vdfdx = get_vdfdx_sl(x=stuff_for_time_loop["x"], v=stuff_for_time_loop["v"])
else:
raise NotImplementedError(
"v df/dx: <"
+ vdfdx_implementation
+ "> has not yet been implemented in NumPy/SciPy"
)
return vdfdx
| 5,344,166
|
def worker(args):
"""
1. Create the envelope request object
2. Send the envelope
"""
envelope_args = args["envelope_args"]
# 1. Create the envelope request object
envelope_definition = make_envelope(envelope_args)
# 2. call Envelopes::create API method
# Exceptions will be caught by the calling function
api_client = ApiClient()
api_client.host = args["base_path"]
api_client.set_default_header("Authorization", "Bearer " + args["ds_access_token"])
envelope_api = EnvelopesApi(api_client)
results = envelope_api.create_envelope(args["account_id"], envelope_definition=envelope_definition)
envelope_id = results.envelope_id
# app.logger.info(f"Envelope was created. EnvelopeId {envelope_id}")
return {"envelope_id": envelope_id}
| 5,344,167
|
def write_colocated_data_time_avg(coloc_data, fname):
"""
Writes the time averaged data of gates colocated with two radars
Parameters
----------
coloc_data : dict
dictionary containing the colocated data parameters
fname : str
file name where to store the data
Returns
-------
fname : str
the name of the file where data has written
"""
filelist = glob.glob(fname)
if not filelist:
with open(fname, 'w', newline='') as csvfile:
csvfile.write('# Colocated radar gates data file\n')
csvfile.write('# Comment lines are preceded by "#"\n')
csvfile.write('#\n')
fieldnames = [
'rad1_time', 'rad1_ray_ind', 'rad1_rng_ind', 'rad1_ele',
'rad1_azi', 'rad1_rng', 'rad1_dBZavg', 'rad1_PhiDPavg',
'rad1_Flagavg', 'rad2_time', 'rad2_ray_ind', 'rad2_rng_ind',
'rad2_ele', 'rad2_azi', 'rad2_rng', 'rad2_dBZavg',
'rad2_PhiDPavg', 'rad2_Flagavg']
writer = csv.DictWriter(csvfile, fieldnames)
writer.writeheader()
for i, rad1_time in enumerate(coloc_data['rad1_time']):
writer.writerow({
'rad1_time': rad1_time.strftime('%Y%m%d%H%M%S'),
'rad1_ray_ind': coloc_data['rad1_ray_ind'][i],
'rad1_rng_ind': coloc_data['rad1_rng_ind'][i],
'rad1_ele': coloc_data['rad1_ele'][i],
'rad1_azi': coloc_data['rad1_azi'][i],
'rad1_rng': coloc_data['rad1_rng'][i],
'rad1_dBZavg': coloc_data['rad1_dBZavg'][i],
'rad1_PhiDPavg': coloc_data['rad1_PhiDPavg'][i],
'rad1_Flagavg': coloc_data['rad1_Flagavg'][i],
'rad2_time': (
coloc_data['rad2_time'][i].strftime('%Y%m%d%H%M%S')),
'rad2_ray_ind': coloc_data['rad2_ray_ind'][i],
'rad2_rng_ind': coloc_data['rad2_rng_ind'][i],
'rad2_ele': coloc_data['rad2_ele'][i],
'rad2_azi': coloc_data['rad2_azi'][i],
'rad2_rng': coloc_data['rad2_rng'][i],
'rad2_dBZavg': coloc_data['rad2_dBZavg'][i],
'rad2_PhiDPavg': coloc_data['rad2_PhiDPavg'][i],
'rad2_Flagavg': coloc_data['rad2_Flagavg'][i]})
csvfile.close()
else:
with open(fname, 'a', newline='') as csvfile:
fieldnames = [
'rad1_time', 'rad1_ray_ind', 'rad1_rng_ind', 'rad1_ele',
'rad1_azi', 'rad1_rng', 'rad1_dBZavg', 'rad1_PhiDPavg',
'rad1_Flagavg', 'rad2_time', 'rad2_ray_ind', 'rad2_rng_ind',
'rad2_ele', 'rad2_azi', 'rad2_rng', 'rad2_dBZavg',
'rad2_PhiDPavg', 'rad2_Flagavg']
writer = csv.DictWriter(csvfile, fieldnames)
for i, rad1_time in enumerate(coloc_data['rad1_time']):
writer.writerow({
'rad1_time': rad1_time.strftime('%Y%m%d%H%M%S'),
'rad1_ray_ind': coloc_data['rad1_ray_ind'][i],
'rad1_rng_ind': coloc_data['rad1_rng_ind'][i],
'rad1_ele': coloc_data['rad1_ele'][i],
'rad1_azi': coloc_data['rad1_azi'][i],
'rad1_rng': coloc_data['rad1_rng'][i],
'rad1_dBZavg': coloc_data['rad1_dBZavg'][i],
'rad1_PhiDPavg': coloc_data['rad1_PhiDPavg'][i],
'rad1_Flagavg': coloc_data['rad1_Flagavg'][i],
'rad2_time': (
coloc_data['rad2_time'][i].strftime('%Y%m%d%H%M%S')),
'rad2_ray_ind': coloc_data['rad2_ray_ind'][i],
'rad2_rng_ind': coloc_data['rad2_rng_ind'][i],
'rad2_ele': coloc_data['rad2_ele'][i],
'rad2_azi': coloc_data['rad2_azi'][i],
'rad2_rng': coloc_data['rad2_rng'][i],
'rad2_dBZavg': coloc_data['rad2_dBZavg'][i],
'rad2_PhiDPavg': coloc_data['rad2_PhiDPavg'][i],
'rad2_Flagavg': coloc_data['rad2_Flagavg'][i]})
csvfile.close()
return fname
| 5,344,168
|
def get_method(client, request, xslt_code, operation,
current_date=datetime.datetime.now(pytz.timezone('US/Pacific')).strftime("%Y-%m-%dT%H:%M:%S"),
print_to_console=False, count=100, add_response_filter=True):
"""
:param client: Zeep client
:param request: request object
:param xslt_code: xslt code as string
:param operation: Webservice Operation
:param print_to_console: print page numbers fetched in console
:param count: count returned per page
:param add_response_filter: control to add response filter in request or not. this is necessary when report is called
:return: All suppliers returned from api
"""
# current_date = datetime.datetime.now(pytz.timezone('US/Pacific')).strftime("%Y-%m-%dT%H:%M:%S")
transform = etree.XSLT(etree.XML(xslt_code))
total_pages = 1
current_page = 0
final_response_result = []
if print_to_console:
print("calling workday for operation: " + operation + ", with results per page: " + str(count))
while current_page < total_pages:
try:
request['_soapheaders'] = [workday_common_header]
if add_response_filter:
request['Response_Filter'] = {
"As_Of_Effective_Date": current_date,
"As_Of_Entry_DateTime": current_date,
"Page": current_page + 1,
"Count": count
}
with client.settings(raw_response=True):
result = client.service[operation](**request)
if result.status_code == 200:
transformed_response = transformedresponse(result, transform)
if transformed_response['root']['Total_Results'] == '0':
print("No data returned by the API!")
return
total_pages = int(transformed_response['root']['Total_Pages'])
current_page = int(transformed_response['root']['Page'])
if print_to_console:
print_to_console_call_details(transformed_response)
final_response_result = prepare_response(transformed_response, final_response_result)
elif result.status_code == 500:
print(result.text)
return
except zeep.exceptions.Fault as ex:
print("error in " + operation + " : " + str(ex))
print("Unexpected error: ", sys.exc_info()[0])
break
except zeep.exceptions.XMLParseError as ex:
print("xml error in " + operation + " : " + str(ex))
print("Unexpected error: ", sys.exc_info()[0])
current_page = current_page + 1
except Exception as ex:
print("generic error in " + operation + " : " + str(ex))
print("Unexpected error: ", sys.exc_info()[0])
break
return final_response_result
| 5,344,169
|
def _min(group_idx, a, size, fill_value, dtype=None):
"""Same as aggregate_numpy.py"""
dtype = minimum_dtype(fill_value, dtype or a.dtype)
dmax = np.iinfo(a.dtype).max if issubclass(a.dtype.type, np.integer)\
else np.finfo(a.dtype).max
ret = np.full(size, fill_value, dtype=dtype)
if fill_value != dmax:
ret[group_idx] = dmax # min starts from maximum
np.minimum.at(ret, group_idx, a)
return ret
| 5,344,170
|
def autoflake(command):
"""Runs autoflake to remove unused imports on all .py files recursively
Arguments:
command {[type]} -- [description]
"""
print(
"""
Running autoflake to remove unused imports on all .py files recursively
=======================================================================
"""
)
# command.run("RUN rm -rf .mypy_cache/; exit 0")
command.run(
"autoflake --imports=pytest,pandas,numpy,plotly,dash,urllib3 --in-place --recursive .",
echo=True,
)
| 5,344,171
|
def find_word(ch, row, boggle_lst, used_positions_lst, current, ans):
"""
:param ch: int, index for each character in a row
:param row: int, index for each row in the boggle list
:param boggle_lst: list, list for all rows
:param used_positions_lst: tuple, index of ch and row that indicates the position of an used character
:param current: str, current character composition that might become a vocabulary
:param ans: list, answer list for all found vocabularies
:return: answer list (ans)
"""
if has_prefix(current): # process only suitable prefixes to save time
# Base Case
if len(current) >= 4: # consider those with at least 4 characters
if current in dictionary_lst:
if current not in ans: # avoid repeated found words
print('Found: ' + current)
ans.append(current)
# Recursive
# Loop over surrounding characters
for i in range(-1, 2):
for j in range(-1, 2):
# Make sure it won't loop outside the bound
if 0 <= ch+i < len(boggle_lst[row]):
if 0 <= row+j < len(boggle_lst):
# Make sure current and used positions are not considered
if i != 0 or j != 0:
if not (ch+i, row+j) in used_positions_lst:
# Choose
current += boggle_lst[row+j][ch+i]
# Explore
if len(current) > 1:
used_positions_lst.append((ch, row))
find_word(ch + i, row + j, boggle_lst, used_positions_lst, current, ans)
# Un-choose:
used_positions_lst.pop()
current = current[:len(current) - 1]
return ans
| 5,344,172
|
def lazy_tt_ranks(tt):
"""Returns static TT-ranks of a TensorTrain if defined, and dynamic otherwise.
This operation returns a 1-D integer numpy array of TT-ranks if they are
available on the graph compilation stage and 1-D integer tensor of dynamic
TT-ranks otherwise.
Args:
tt: `TensorTrain` object.
Returns:
A 1-D numpy array or `tf.Tensor`
"""
static_tt_ranks = tt.get_tt_ranks()
if static_tt_ranks.is_fully_defined():
return np.array(static_tt_ranks.as_list())
else:
return tt_ranks(tt)
| 5,344,173
|
def word_errors(reference, hypothesis, ignore_case=False, delimiter=' '):
"""Compute the levenshtein distance between reference sequence and
hypothesis sequence in word-level.
:param reference: The reference sentence.
:type reference: str
:param hypothesis: The hypothesis sentence.
:type hypothesis: str
:param ignore_case: Whether case-sensitive or not.
:type ignore_case: bool
:param delimiter: Delimiter of input sentences.
:type delimiter: char
:return: Levenshtein distance and word number of reference sentence.
:rtype: list
"""
if ignore_case:
reference = reference.lower()
hypothesis = hypothesis.lower()
ref_words = list(filter(None, reference.split(delimiter)))
hyp_words = list(filter(None, hypothesis.split(delimiter)))
edit_distance = _levenshtein_distance(ref_words, hyp_words)
# `editdistance.eavl precision` less than `_levenshtein_distance`
# edit_distance = editdistance.eval(ref_words, hyp_words)
return float(edit_distance), len(ref_words)
| 5,344,174
|
def backup_fs_format(repos_path):
"""Rename the filesystem format file for repository REPOS_PATH.
Do not raise an error if the file is already renamed."""
format_path = os.path.join(repos_path, 'db', 'format')
try:
statinfo = os.stat(format_path)
except OSError:
# The file probably doesn't exist.
return
format_bak_path = os.path.join(repos_path, 'db', 'format.bak')
# On Windows, we need to ensure the file is writable before we can
# rename/remove it.
os.chmod(format_path, statinfo.st_mode | stat.S_IWUSR)
try:
os.rename(format_path, format_bak_path)
except OSError:
# Unexpected but try to go on
os.remove(format_bak_path)
os.rename(format_path, format_bak_path)
| 5,344,175
|
def augment_note_matrix(nmat, length, shift):
"""Pitch shift a note matrix in R_base format."""
aug_nmat = nmat.copy()
aug_nmat[0: length, 1] += shift
return aug_nmat
| 5,344,176
|
def breed_list(request):
""" Фикстура возвращает список всех пород собак """
return request.param
| 5,344,177
|
def test_tablemodel(mock_program_dao):
"""Get a data model instance for each test function."""
# Create the device under test (dut) and connect to the database.
dut = RAMSTKDesignElectricTable()
dut.do_connect(mock_program_dao)
yield dut
# Unsubscribe from pypubsub topics.
pub.unsubscribe(dut.do_get_attributes, "request_get_design_electric_attributes")
pub.unsubscribe(dut.do_set_attributes, "request_set_design_electric_attributes")
pub.unsubscribe(dut.do_set_attributes, "wvw_editing_design_electric")
pub.unsubscribe(dut.do_set_tree, "succeed_calculate_design_electric")
pub.unsubscribe(dut.do_update, "request_update_design_electric")
pub.unsubscribe(dut.do_get_tree, "request_get_design_electric_tree")
pub.unsubscribe(dut.do_select_all, "selected_revision")
pub.unsubscribe(dut.do_delete, "request_delete_design_electric")
pub.unsubscribe(dut.do_insert, "request_insert_design_electric")
pub.unsubscribe(dut.do_derating_analysis, "request_derating_analysis")
pub.unsubscribe(dut.do_stress_analysis, "request_stress_analysis")
# Delete the device under test.
del dut
| 5,344,178
|
def norm_layer(norm_type, nc):
"""tbd"""
# normalization layer 1d
norm = norm_type.lower()
if norm == 'batch':
layer = batch_norm_1d(nc)
elif norm == 'layer':
layer = nn.LayerNorm(nc)
else:
raise NotImplementedError('normalization layer [%s] is not found' % norm)
return layer
| 5,344,179
|
def does_equipment_channel_exist(code, channel_id, debug):
"""Check if equipment channel exist."""
if debug:
print '\n** does_equipment_channel_exist'
code_exists = False
channel_id_exists = False
query_string = ("SELECT count(*) FROM cable_plugs_view "
"WHERE code='%s' AND channel_id='%s';" % (code, channel_id))
output = query(query_string)
if debug:
print ' ', query_string
print ' ', output
if not output:
return code_exists, channel_id_exists
count_channel = output[0][0]
if count_channel > 0:
channel_id_exists = True
code_exists = True
if debug:
print ' Equipment', code, 'exists?: ', code_exists
print ' Channel', channel_id, 'exists?: ', channel_id_exists
return code_exists, channel_id_exists
| 5,344,180
|
def tail(file, n=1, bs=1024):
""" Read Last n Lines of file
credit:
https://www.roytuts.com/read-last-n-lines-from-file-using-python/
https://github.com/roytuts/python/blob/master/read-lines-from-last/last_lines_file.py
"""
f = open(file)
f.seek(0, 2)
l = 1-f.read(1).count('\n')
B = f.tell()
while n >= l and B > 0:
block = min(bs, B)
B -= block
f.seek(B, 0)
l += f.read(block).count('\n')
f.seek(B, 0)
l = min(l, n)
lines = f.readlines()[-l:]
f.close()
return lines
| 5,344,181
|
def create_hparams(state, FLAGS): # pylint: disable=invalid-name
"""Creates hyperparameters to pass into Ray config.
Different options depending on search or eval mode.
Args:
state: a string, 'train' or 'search'.
FLAGS: parsed command line flags.
Returns:
tf.hparams object.
"""
epochs = 0
tf.logging.info('data path: {}'.format(FLAGS.data_path))
hparams = tf.contrib.training.HParams(
train_size=FLAGS.train_size,
validation_size=FLAGS.val_size,
dataset=FLAGS.dataset,
data_path=FLAGS.data_path,
expsize=FLAGS.expsize,
batch_size=FLAGS.bs,
max_seq_length = FLAGS.max_seq_length,
gradient_clipping_by_global_norm=0.1,
explore=FLAGS.explore,
aug_policy=FLAGS.aug_policy,
recompute_dset_stats=FLAGS.recompute_dset_stats,
lr=FLAGS.lr,
weight_decay_rate=FLAGS.wd,
test_batch_size=FLAGS.test_bs)
if state == 'train':
hparams.add_hparam('no_aug', FLAGS.no_aug)
hparams.add_hparam('development', FLAGS.development)
hparams.add_hparam('use_hp_policy', FLAGS.use_hp_policy)
hparams.add_hparam('limit_test_data', False)
if FLAGS.use_hp_policy:
if FLAGS.hp_policy == 'random':
tf.logging.info('RANDOM SEARCH')
parsed_policy = []
for i in range(NUM_HP_TRANSFORM * 2):
if i % 2 == 0:
parsed_policy.append(random.random()) # --- probability
else:
parsed_policy.append(random.random()) # --- magnitude
elif FLAGS.hp_policy.endswith('.txt') or FLAGS.hp_policy.endswith(
'.p'):
# --- will be loaded in in data_utils
parsed_policy = FLAGS.hp_policy
else:
# --- parse input into a fixed augmentation policy
print(FLAGS.hp_policy)
print(type(FLAGS.hp_policy))
parsed_policy = FLAGS.hp_policy.split(',')
parsed_policy = [float(p) for p in parsed_policy]
hparams.add_hparam('hp_policy', parsed_policy)
hparams.add_hparam('hp_policy_epochs', FLAGS.hp_policy_epochs)
hparams.add_hparam('flatten', FLAGS.flatten)
elif state == 'search':
hparams.add_hparam('no_aug', False)
hparams.add_hparam('development', FLAGS.development)
hparams.add_hparam('use_hp_policy', True)
hparams.add_hparam('limit_test_data', True)
hparams.add_hparam('hp_policy',
[0 for _ in range(2 * NUM_HP_TRANSFORM)]) # --- default start values of 0
else:
raise ValueError('unknown state')
# -- Add new model here
if FLAGS.model_name == 'bert':
hparams.add_hparam('model_name', 'bert')
else:
raise ValueError('Not Valid Model Name: %s' % FLAGS.model_name)
if FLAGS.epochs > 0:
tf.logging.info('overwriting with custom epochs')
epochs = FLAGS.epochs
hparams.add_hparam('num_epochs', epochs)
tf.logging.info('epochs: {}, lr: {}, wd: {}'.format(
hparams.num_epochs, hparams.lr, hparams.weight_decay_rate))
return hparams
| 5,344,182
|
def resources_match(resource_one, resource_two):
"""
Checks if resource_one and resource_two match. If two folders, recursively compares contents.
If two files, compares versions.
"""
if resource_one['type'] == FOLDER:
match = recursively_compare_folders(resource_one, resource_two)
else:
match = compare_versions(resource_one, resource_two)
return match
| 5,344,183
|
def plotfootprints(simplefootprintlist, borderpixels, dimshape, chips, groupid, firstid, nextid, grouplen, heirarchy,
centroid, colorlist, verbose):
# ------------------------------------------------------------------------------------------------------------------------
"""
This function plots the footprints using matplotlib
"""
import matplotlib
matplotlib.interactive(True)
import pylab
print whocalls(verbose=verbose) + "Plotting ..."
pylab.figure(5)
pylab.clf()
pylab.hold(True)
pylab.plot([0], [0], 'w')
pylab.plot([dimshape[1]], [dimshape[0]], 'w')
ax = pylab.axes()
ax.set_autoscale_on(False)
ax.set_aspect('equal', 'box', 'C')
pylab.xlim([0, dimshape[1]])
pylab.ylim([0, dimshape[0]])
for g in chips:
index = pylab.find(groupid == g)
if verbose:
pylab.plot(borderpixels[index, 1], borderpixels[index, 0], '.',
color=colorlist[(g + 1) % len(colorlist)][0])
pos = firstid[g]
for i in range(grouplen[g]):
#pylab.text(borderpixels[pos,1],borderpixels[pos,0],str(pos))
pos = nextid[pos]
for fp in range(len(simplefootprintlist)):
for i in range(len(simplefootprintlist[fp]) - 1):
j = i
dj = 1
numbertext = "(" + str(fp) + ")"
linestyle = ":"
if heirarchy[1][fp] == 0:
j = len(simplefootprintlist[fp]) - 1 - i
dj = -1
numbertext = str(fp)
linestyle = "-"
pylab.plot([borderpixels[simplefootprintlist[fp][j], 1], borderpixels[simplefootprintlist[fp][j + dj], 1]],\
[borderpixels[simplefootprintlist[fp][j], 0], borderpixels[simplefootprintlist[fp][j + dj], 0]],
linestyle=linestyle
,
color=
colorlist[
fp % len(
colorlist)][
0])
if verbose:
pylab.plot([borderpixels[simplefootprintlist[fp][j], 1]],\
[borderpixels[simplefootprintlist[fp][j], 0]], 'ko')
else:
pylab.plot([borderpixels[simplefootprintlist[fp][j], 1]],\
[borderpixels[simplefootprintlist[fp][j], 0]], 'k.')
if len(simplefootprintlist) > 1:
pylab.text(centroid[fp][1], centroid[fp][2], numbertext, horizontalalignment="center",
verticalalignment="center", color=colorlist[fp % len(colorlist)][0])
pylab.plot([centroid['total'][1]], [centroid['total'][2]], 'rx')
r = ''
r = raw_input("Hit return to quit ")
| 5,344,184
|
def read():
"""Read content of predefined numpy archive file."""
return _read(tml.value('numpy', section='data', subkey='fname'))
| 5,344,185
|
def test_run_method_should_raise_command_error_when_command_does_not_exist():
"""
Test HelpCommand.run for non-existing command
"""
options_mock = Mock()
args = ('mycommand',)
help_cmd = HelpCommand()
with pytest.raises(CommandError):
help_cmd.run(options_mock, args)
| 5,344,186
|
def elexon_b1630(args):
""" Actual or forecast Wind & Solar Generation """
if not check_api_key(args):
return None
api = B1630(args.apikey)
if args.settlement_period is None:
print("A settlement period should be supplied using the --settlement-period flag (range 1 to 50)."
"Defaulting to 1")
if args.date is None:
print("A date should be supplied using the --date flag. Format is YYYY-MM-DD. Defaulting to today")
if not api.get_data(**{'SettlementDate': args.date or date.today().strftime("%Y-%m-%d"),
'Period': args.settlement_period or 1}):
print("No data returned.")
return None
fmt = StdoutFormatter("10s", "6s", "6s", "10.1f", "20s", "30s")
print("\n" + fmt.titles('Date', 'Period', 'Active', 'Output', 'Type', 'Reference'))
for item in sorted(api.items, key=lambda xxx: xxx['documentid']):
print(fmt.row(item['settlementdate'],
str(item['settlementperiod']),
str(item['activeflag']),
float(item['quantity']),
item.get('powersystemresourcetype', 'n/a'),
item['documentid'] + " - " + item['documentrevnum']))
return api
| 5,344,187
|
def null_count(df):
"""
df is a dataframe
Check a dataframe for nulls and return the number of missing values.
"""
return df.isnull().sum().sum()
| 5,344,188
|
def rpy2r(roll, pitch=None, yaw=None, *, unit="rad", order="zyx"):
"""
Create an SO(3) rotation matrix from roll-pitch-yaw angles
:param roll: roll angle
:type roll: float
:param pitch: pitch angle
:type pitch: float
:param yaw: yaw angle
:type yaw: float
:param unit: angular units: 'rad' [default], or 'deg'
:type unit: str
:param order: rotation order: 'zyx' [default], 'xyz', or 'yxz'
:type order: str
:return: SO(3) rotation matrix
:rtype: ndarray(3,3)
:raises ValueError: bad argument
- ``rpy2r(⍺, β, γ)`` is an SO(3) orthonormal rotation matrix (3x3)
equivalent to the specified roll (⍺), pitch (β), yaw (γ) angles angles.
These correspond to successive rotations about the axes specified by
``order``:
- 'zyx' [default], rotate by γ about the z-axis, then by β about the new
y-axis, then by ⍺ about the new x-axis. Convention for a mobile robot
with x-axis forward and y-axis sideways.
- 'xyz', rotate by γ about the x-axis, then by β about the new y-axis,
then by ⍺ about the new z-axis. Convention for a robot gripper with
z-axis forward and y-axis between the gripper fingers.
- 'yxz', rotate by γ about the y-axis, then by β about the new x-axis,
then by ⍺ about the new z-axis. Convention for a camera with z-axis
parallel to the optic axis and x-axis parallel to the pixel rows.
- ``rpy2r(RPY)`` as above but the roll, pitch, yaw angles are taken
from ``RPY`` which is a 3-vector with values (⍺, β, γ).
.. runblock:: pycon
>>> from spatialmath.base import *
>>> rpy2r(0.1, 0.2, 0.3)
>>> rpy2r([0.1, 0.2, 0.3])
>>> rpy2r([10, 20, 30], unit='deg')
:seealso: :func:`~eul2r`, :func:`~rpy2tr`, :func:`~tr2rpy`
"""
if base.isscalar(roll):
angles = [roll, pitch, yaw]
else:
angles = base.getvector(roll, 3)
angles = base.getunit(angles, unit)
if order == "xyz" or order == "arm":
R = rotx(angles[2]) @ roty(angles[1]) @ rotz(angles[0])
elif order == "zyx" or order == "vehicle":
R = rotz(angles[2]) @ roty(angles[1]) @ rotx(angles[0])
elif order == "yxz" or order == "camera":
R = roty(angles[2]) @ rotx(angles[1]) @ rotz(angles[0])
else:
raise ValueError("Invalid angle order")
return R
| 5,344,189
|
def plot3d_embeddings(dataset, embeddings, figure=None):
"""Plot sensor embedding in 3D space using mayavi.
Given the dataset and a sensor embedding matrix, each sensor is shown as
a sphere in the 3D space. Note that the shape of embedding matrix is
(num_sensors, 3) where num_sensors corresponds to the length of
``dataset.sensor_list``. All embedding vectors range between 0 and 1.
Args:
dataset (:obj:`~pymrt.casas.CASASDataset`): CASAS smart home dataset.
embeddings (:obj:`numpy.ndarray`): 3D sensor vector embedding.
"""
show_figure = False
if figure is None:
show_figure = True
figure = mlab.figure('Sensor Embedding (3D)')
# Plot sensors, texts and outlines
figure.scene.disable_render = True
points = mlab.points3d(embeddings[:, 0], embeddings[:, 1], embeddings[:, 2],
scale_factor=0.015)
for i, x in enumerate(embeddings):
mlab.text3d(x[0], x[1], x[2], dataset.sensor_list[i]['name'],
scale=(0.01, 0.01, 0.01))
mlab.outline(None, color=(.7, .7, .7), extent=[0, 1, 0, 1, 0, 1])
ax = mlab.axes(None, color=(.7, .7, .7), extent=[0, 1, 0, 1, 0, 1],
ranges=[0, 1, 0, 1, 0, 1], nb_labels=6)
ax.label_text_property.font_size = 3
ax.axes.font_factor = 0.3
figure.scene.disable_render = False
if show_figure:
mlab.show()
return figure, points
| 5,344,190
|
def geometry(cnf_save_fs, mod_thy_info, conf='sphere', hbond_cutoffs=None):
""" get the geometry
"""
assert conf in ('minimum', 'sphere')
# Read the file system
if conf == 'minimum':
geom = _min_energy_conformer(
cnf_save_fs, mod_thy_info, hbond_cutoffs=hbond_cutoffs)
elif conf == 'sphere':
geom = _spherical_conformer(cnf_save_fs)
return geom
| 5,344,191
|
def exportToVtk(gridFunction, dataType, dataLabel, fileNamesBase, filesPath=None, type='ascii'):
"""
Export a grid function to a VTK file.
*Parameters:*
- gridFunction (GridFunction)
The grid function to be exported.
- dataType ('cell_data' or 'vertex_data')
Determines whether data are attaches to vertices or cells.
- dataLabel (string)
Label used to identify the function in the VTK file.
- fileNamesBase (string)
Base name of the output files. It should not contain any directory
part or filename extensions.
- filesPath (string)
Output directory. Can be set to None (default), in which case the files are
output in the current directory.
- type ('ascii', 'base64', 'appendedraw' or 'appendedbase64')
Output type. See the Dune reference manual for more details.
"""
return _constructObjectTemplatedOnBasisAndResult(
core, "exportToVtk",
gridFunction.basisFunctionType(), gridFunction.resultType(),
gridFunction, dataType, dataLabel, fileNamesBase, filesPath, type)
| 5,344,192
|
def plotSources(coords, observations, diameter=12,
include=None, exclude=None,
mosaic=False, width=60, length=60, pa=0.,
mframe='icrs', freq=345., mosonly=False,
plotroot='source', plotsizeo=120., plottype='pdf'):
"""
Plot observations that are nearby the specified coordinates.
Inputs:
coords : proposed coordinates from the user set by the
function getSourceCoordinates()
diameter : diameter of the telescope in meters
exclude : if set, it contains a list of spreadsheet row numbers
that should not be plotted.
freq : proposed observing frequency in GHz
include : if set, it contains a list of spreadsheet row
numbers that can be plotted if all other criteria
are set.
length : length of the mosaic in arcseconds
mframe : coordinate system of the mosaic (icrs or galactic)
mosaic : if True, the proposed observations are a mosaic
mosonly : if True, plot/print mosaic observations only
observations : existing observations from readObservations()
pa : position angle of the mosaic in degrees
plotroot : root name for the file containing the plot. The root
name will be appended with a plot number, which is
useful when plotting.
multiple sources) and the plottype.
plotsizeo : plot size in arcseconds
plottype : Type of plot to generate. The plot type must be
supported by your version of python. "pdf" and "png"
are usually safe. If plottype=None, then no plot is
saved.
Default is "pdf".
width : width of the mosaic in arcseconds
"""
# Set spacing for formatting purposes
spaces = ''
# Make a plot for each source
for i in range(coords[ORIGINAL].size):
# Initialize plot
py.figure(i+1, figsize=(9,9))
py.clf()
ax = py.subplot(1, 1, 1, aspect='equal')
# Set plot width
plotsize = coords[PLOTSIZE][i]
if plotsize is None:
plotsize = plotsizeo
# Find sources that overlap
if coords[NOCOORD][i]:
result = observations[DATA][TARGET][observations[DATA][TARGET].str.lower() == coords[ORIGINAL][i].lower()]
else:
# Compute separation in degrees
sep = coords[COORDS][i].separation(observations[COORDS])
sepArcsec = sep.arcsec
# Find entries within plot size
separationArcsec = sepArcsec - 0.5*observations[DATA][MAX_SIZE]
result = separationArcsec[separationArcsec <= (0.5*plotsize)]
jindices = result.index
# Print summary of observation
if len(jindices) == 0:
sra = convertHmsString(float(coords[COORDS][i].ra.deg/15.0), ndec=1, delimiter='hms')
sdec = convertHmsString(float(coords[COORDS][i].dec.deg), ndec=1, delimiter='dms')
print('')
print('Summary information for %s' % coords[ORIGINAL][i])
print(' No observations found within %g x %g arcsec region centered around (ra, dec) = (%s, %s) J2000' % \
(plotsize, plotsize, sra, sdec))
else:
printSummaryHeader('%g x %g arcsec region around %s' % \
(plotsize, plotsize, coords[ORIGINAL][i]), spaces=spaces)
# Set limits based on plotsize
ax.set_xlim( 0.5*plotsize, -0.5*plotsize)
ax.set_ylim(-0.5*plotsize, 0.5*plotsize)
ax.set_xlabel('$\\Delta\\alpha\ \ \\mathrm{(arcsec)}$')
ax.set_ylabel('$\\Delta\\delta\ \ \\mathrm{(arcsec)}$')
# Get row lists to include/exclude
rows_include = makeList(include)
rows_exclude = makeList(exclude)
# Set plot center.
# Sources will be plotted in offsets relative to this coordinate.
ra_center = coords[COORDS][i].ra.deg
dec_center = coords[COORDS][i].dec.deg
# Loop over observations which overlap the field
legend_symbols = []
legend_labels = []
number = 0
for indx in jindices:
# Get excel line
excelRow = getExcelFromIndex(indx)
if rows_include is not None and excelRow not in rows_include:
continue
if rows_exclude is not None and excelRow in rows_exclude:
continue
# If not mosaic and mosonly=True, then skip
if not isObsMosaic(observations[DATA], indx) and mosonly:
continue
# Get ra and dec offset from nominal position in arcseconds
if coords[NOCOORD][i]:
dalp = 0
ddec = 0
else:
# Compute offset
ddec = (observations[DATA][DEC_DEG][indx] - dec_center) * 3600.0
dra = (observations[DATA][RA_DEG][indx] - ra_center)
if abs(dra) > 180.0:
# we are on either side of RA=0
if dra > 0:
dra -= 360.0
else:
dra += 360.0
dalp = dra * 3600.0 * np.cos(dec_center / 180.0 * np.pi)
# Set center as offset coordinates
xy = (dalp, ddec)
# Print summary of observation
number += 1
printSummarySource(number, observations, indx, spaces=spaces)
# Set plot color based on band
color = getBandColor(observations[DATA][REF_FREQ][indx])
# Plot mosaic or single pointing
label = 'N = %d' % number
if isObsMosaic(observations[DATA], indx, mtype=MOSAIC_TYPE_RECTANGLE):
# Get the coordinates of the rectangle in RA/DEC offset units
mosaicCorners = getMosaicCorners(\
observations[DATA][RA_DEG][indx],
observations[DATA][DEC_DEG][indx],
observations[DATA][MOS_LENGTH][indx],
observations[DATA][MOS_WIDTH][indx],
observations[DATA][MOS_PA][indx],
observations[DATA][MOS_COORD][indx],
center=[ra_center, dec_center])
result = plotMosaic(ax, mosaicCorners, fc=color, ec=color , hatch=None, alpha=0.1)
else:
result = plotPrimaryBeam(ax, xy, observations[DATA][REF_FREQ][indx], diameter,
fc='None', ec=color)
legend_symbols.append(result)
legend_labels.append(label)
# Loop over observations which overlap the field
# color = getBandColor(freq)
color = 'tan'
if mosaic:
corners = getMosaicCorners(ra_center, dec_center, length, width, pa, mframe)
result = plotMosaic(ax, corners, fc=color, ec='None', alpha=0.5, linewidth=3)
else:
result = plotPrimaryBeam(ax, (0,0), freq, diameter, fc=color, ec='None', alpha=0.5)
legend_symbols.append(result)
legend_labels.append('Proposed')
# Plot title with original entry and translation
sra = convertHmsString(float(coords[COORDS][i].ra.deg/15.0), ndec=1, delimiter='hms')
sdec = convertHmsString(float(coords[COORDS][i].dec.deg), ndec=1, delimiter='dms')
if coords[NOCOORD][i]:
labelTranslated = ''
else:
labelTranslated = '%s, %s' % (sra, sdec)
labelOriginal = '%s' % coords[ORIGINAL][i]
fs = 14
# ax.set_title(labelOriginal, loc='left', fontsize=fs)
# ax.set_title(labelTranslated, loc='right', fontsize=fs)
ax.annotate(s='Entered:', xy=(0,1.05), xycoords='axes fraction', size=fs)
ax.annotate(s=labelOriginal, xy=(0.2,1.05), xycoords='axes fraction', size=fs)
ax.annotate(s='Translated:', xy=(0,1.01), xycoords='axes fraction', size=fs)
ax.annotate(s=labelTranslated, xy=(0.2,1.01), xycoords='axes fraction', size=fs)
# py.legend(legend_symbols, legend_labels)
# Save plot to a file
py.show()
if plottype is not None:
# Set name
root = plotroot
if root is None:
root = 'source'
# Try creating plot
try:
plotfile = '%s%d.%s' % (root, i+1, plottype)
py.savefig(plotfile)
print(' Plot saved to %s' % plotfile)
except:
print(' Warning: Could not create plot %s.' % plotfile)
print(' Is that plot type supported by your python extension?')
| 5,344,193
|
def save_img(img, path):
"""
Writes the image to disk
:param img: the rgb image to save
:param path: the target path
"""
img = to_rgb(img)
smisc.imsave(path, img.round().astype(np.uint8))
| 5,344,194
|
def get_fullname(user):
""" Get from database fullname for user
"""
data = frappe.db.sql("""
SELECT full_name FROM `tabUser` WHERE name=%s and docstatus<2""", user, True)
return data
| 5,344,195
|
def process_song_data(spark, input_data, output_data):
"""
Gets the data from the s3 bucket and creates the
artists and songs tables
:param spark: the spark session
:param input_data: the basic path for the input data
:param output_data: the basic path where the data will be written
:return none:
"""
# get filepath to song data file
song_data = input_data + 'song_data/*/*/*/*.json'
song_schema = StructType([
StructField("num_songs", IntegerType(), True),
StructField("artist_id", StringType(), False),
StructField("artist_latitude", DoubleType(), True),
StructField("artist_longitude", DoubleType(), True),
StructField("artist_location", StringType(), True),
StructField("artist_name", StringType(), True),
StructField("song_id", StringType(), False),
StructField("title", StringType(), False),
StructField("duration", FloatType(), True),
StructField("year", IntegerType(), False)
])
# read song data file
df_song = spark.read.csv(song_data, schema = song_schema)
# extract columns to create songs table
songs_table = df_song.select('song_id',
col('title').alias('song_title'),
'artist_id',
'year',
'duration').dropDuplicates()
## Table created user feedback
user_feedback(songs_table, 'Created Songs Table')
# write songs table to parquet files partitioned by year and artist
songs_table.write.partitionBy('Year', 'artist_id').parquet(output_data + 'songs/')
# extract columns to create artists table
artists_table = artists_table = df_song.select('artist_id',
'artist_name',
'artist_location',
'artist_latitude',
'artist_longitude')\
.dropDuplicates()
# write artists table to parquet files
artists_table.write.parquet(output_data + 'artists/')
## Table created user feedback
user_feedback(artists_table, 'Created Artists Table')
| 5,344,196
|
def greedy_inference(original, protein_column = 'Protein Accession', peptide_column = 'Base Sequence'):
"""
Greedy protein inference algorithm for matching peptids to corresponding proteins
Notaion:
G : original graph
Gi : inferred graph
Gr : remaining graph
Gd: dropped graph
p : greedily selcted protein
s : peptides connected to p
Select peptides in G that only match to single protein
Add proteins corresponding to peptides and all attached peptides to Gi
Remove said proteins from Gr
While Gr has edges connected proteins and peptides
Greedily select best protein p
Add p and connected peptides Gi
Add peptide-protein edges where protein is not p and peptide is in s in Gd
Remove edgees where peptides is in s from Gr
Remake Gi and make Gd
Gi remade to contain all protein-peptide edges that connect to an inferred protein
Gd made to contain all protein-peptide edges that do not connect to an inferred protein
Parameters
---------
original : pandas DataFrame
original peptide-protien graph
protein_column : str
column associated with protein accession
peptide_column : str
column associated with peptide
Returns
--------
inferred: pandas DataFrame
Gi, subgraph of G of proteins and their associated peptides
dropped: pandas DataFrame
Gd, subgraph of G of proteins and their associated peptides
"""
# Find peptides that attach to only one protein
# Add those proteins to inferred proteins bag
# Remove any peptides that connect to inferred proteins
peptide_sizes = original.groupby(peptide_column).size().reset_index().rename(columns = {0:'size'})
single_peptides = list(peptide_sizes[peptide_sizes['size'] == 1][peptide_column])
inferred_proteins = list(original[original[peptide_column].isin(single_peptides)][protein_column])
attached_peptides = set(original[original[protein_column].isin(inferred_proteins)][peptide_column])
remaining = original[~original[peptide_column].isin(attached_peptides)]
while len(remaining) > 0:
# Greedy select best protein
best_protein = find_best_protein(remaining, original, protein_column)
inferred_proteins.append(best_protein)
# Remove peptides that connect to protein from remaining
attached_peptides = set(remaining[remaining[protein_column] == best_protein][peptide_column])
is_matched_peptide = remaining[peptide_column].isin(attached_peptides)
remaining = remaining[~is_matched_peptide]
inferred = original[original[protein_column].isin(inferred_proteins)]
dropped = original[~original[protein_column].isin(inferred_proteins)]
# Rescue proteins
inferred, dropped, rescued = rescue_matched_proteins(inferred, dropped)
return inferred, dropped, rescued
| 5,344,197
|
def cmd_display(current_board : board.Board, flags : dict):
"""cmd: display [-a]. Display the board state. -a for more information"""
graphics.display_board(current_board)
if 'a' in flags:
current_board.print_data()
if 'p' in flags:
current_board.print_active_pieces()
| 5,344,198
|
def statistics_command(message):
"""Statistics command handler"""
sqlighter = db_worker.SQLighter(os.getenv('DB_PATH'))
chat_id = message.chat.id
notes = sqlighter.get('chat_id', chat_id)
ready_num = unready_num = 0
for note in notes:
if note[3]:
ready_num += 1
else:
unready_num += 1
data = {
'all_num': len(notes),
'unready_num': unready_num,
'ready_num': ready_num}
text = utils.statistics_template(data)
bot.reply_to(message, 'Your statistics of all time:')
bot.send_message(chat_id, text, parse_mode='HTML')
| 5,344,199
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.