content stringlengths 5 1.05M |
|---|
import numpy as np
from scipy.linalg import eigvalsh
"""# Objective Functions
"""
class quadratic_function_fast:
def __init__(self, Psi, Y, alpha=0.0):
self.sizePsi, _ = Psi.shape
self.sizeY = Y.shape[0]
self.Y = Y.copy()
self.alpha = alpha
self.Psi = Psi.copy()
self.hessian_mat = None
self.L = None
self.mu = None
self.y_dot_psi = np.dot(self.Y,self.Psi.T)
self.psi_dot_psi = np.dot(self.Psi,self.Psi.T)
return
# Evaluate function.
def f(self, A):
return (
np.linalg.norm(
self.Y - np.dot(A.reshape(self.sizeY, self.sizePsi), self.Psi)
)
** 2
+ self.alpha * np.linalg.norm(A) ** 2
)
def gradient(self, A):
return (
-2.0
* self.y_dot_psi.flatten()
+2.0
* np.dot(A.reshape(self.sizeY, self.sizePsi), self.psi_dot_psi).flatten()
+ 2.0 * self.alpha * A.flatten()
)
# Evaluate hessian.
def hessian(self):
return 2.0 * self.psi_dot_psi + 2.0 * self.alpha * np.identity(len(self.psi_dot_psi))
# Line Search.
def line_search(self, grad, d, x, maxStep=1.0):
grad_aux = grad.reshape(self.sizeY, self.sizePsi)
d_aux = d.reshape(self.sizeY, self.sizePsi)
return min(
maxStep,
-np.trace(np.dot(0.5 * grad_aux.T, d_aux))
/ (
np.linalg.norm(np.dot(d_aux, self.Psi)) ** 2
+ self.alpha * np.linalg.norm(d_aux) ** 2
),
)
def psi_val(self):
return self.Psi
def y_val(self):
return self.Y
def number_basis_functions(self):
return self.sizePsi
def number_dimensions(self):
return self.sizeY
# Return largest eigenvalue.
def largest_eigenvalue(self):
if self.L is None or self.mu is None:
w = eigvalsh(self.hessian())
self.L = np.max(w)
self.mu = np.min(w)
return self.L
# Return smallest eigenvalue.
def smallest_eigenvalue(self):
if self.L is None or self.mu is None:
w = eigvalsh(self.hessian())
self.L = np.max(w)
self.mu = np.min(w)
return self.mu
def proximal_operator(self, A, lambda_val):
return np.sign(A) * np.maximum(np.abs(A) - lambda_val, 0.0)
class quadratic_LASSO:
def __init__(self, Psi, Y):
self.sizePsi, _ = Psi.shape
self.sizeY = Y.shape[0]
self.Y = Y.copy()
self.Psi = Psi.copy()
self.hessian_mat = None
self.L = None
return
# Evaluate function.
#Where f = F + G, F is smooth convex and differentiable, and G is convex.
def f_smooth(self, A):
return (
0.5* np.linalg.norm(
self.Y - np.dot(A.reshape(self.sizeY, self.sizePsi), self.Psi)
)
** 2
)
def f(self, A, regularization):
return (
0.5* np.linalg.norm(
self.Y - np.dot(A.reshape(self.sizeY, self.sizePsi), self.Psi)
)
** 2 + regularization*np.sum(np.abs(A))
)
# Evaluate gradient.
def gradient_smooth(self, A):
return (
-np.dot(
self.Y - np.dot(A.reshape(self.sizeY, self.sizePsi), self.Psi),
self.Psi.T,
)
)
# Evaluate hessian.
def hessian(self):
if self.hessian_mat is None:
self.hessian_mat = np.dot(self.Psi, self.Psi.T)
return self.hessian_mat
# Return largest eigenvalue.
def largest_eigenvalue_smooth(self):
if(self.L is None):
from scipy.sparse.linalg.eigen.arpack import eigsh
hessian_mat = np.dot(self.Psi, self.Psi.T)
self.L = eigsh(hessian_mat, 1, which='LM', return_eigenvectors = False)[0]
return self.L
def proximal_operator(self, A, lambda_val):
return np.sign(A) * np.maximum(np.abs(A) - lambda_val, 0.)
def starting_point(self):
return np.zeros((self.sizeY, self.sizePsi))
def return_Psi(self):
return self.Psi
def return_Y(self):
return self.Y
def return_shape(self):
return (self.sizeY, self.sizePsi)
class solution_polishing:
import numpy as np
from scipy.sparse import issparse
def __init__(self, Q, b):
self.Q = Q.copy()
self.b = b.copy()
w = eigvalsh(self.Q.todense())
self.L = np.max(w)
self.Mu = np.min(w)
return
# Evaluate function.
def f(self, x):
return self.b.dot(x) + 0.5 * x.T.dot(self.Q.dot(x))
# Evaluate gradient.
def gradient(self, x):
return self.b + self.Q.dot(x)
# Line Search.
def line_search(self, grad, d, x, maxStep=1.0):
alpha = -d.dot(grad) / d.T.dot(self.Q.dot(d))
return min(alpha, maxStep)
def largest_eigenvalue(self):
return self.L
def smallest_eigenvalue(self):
return self.Mu
|
'''Main code with the analysis for the Methane project'''
import os
import time
import numpy as np
import pandas as pd
import scipy as sc
import matplotlib.pyplot as plt
import methfun as mf # module for this project
import methdata as md
import spectral_analysis as sa
from scipy.stats import ks_2samp
# import stats as st
# TODOs ###
# - check writing / reading of dataframe with multi index
# - check stationarity measure vs Matlab code
# - check partition and intermittence surface renewals
# - check IWATA partitioning & wavelet coeffients
###
####################### move all this is methdata at some point ###########
# 1. Read turbulence data
fs = 10 # sampling rate [Hz]
dt = 1/fs # delta time between two observations
z_ref = 2.8 #[m]
# refscal = 'H2O' # for ebullition detection
# do not include in the analysis runs with less than minsamplesize observations
minsamplesize = 10000 # keep only complete records
nobs_min = 2**14 # after max-correlation shift
################################################################################
# read atmospheric pressure if needed to convert concentration values
dfpr = pd.read_csv(os.path.join(md.datapath, 'SMEARII_p.csv'))
dfpr['datetime'] = pd.to_datetime(dfpr.iloc[:,:6])
dfpr.rename(columns = {'HYY_META.Pamb0':'patm_hPa'}, inplace = True)
dfpr30 = dfpr.resample('30min', on = 'datetime').agg({'patm_hPa':np.mean})
dfpr30['patm'] = dfpr30['patm_hPa']*100 # in Pascal
################################################################################
# counters needed below
first_time1 = True
first_time2 = True
init_time = time.time()
# to save the cleaned data in csv files for further analyses
save_cleaned_data = True
# ordered list of variables to read on Siikaneva_1 files
site = 'Siikaneva1'
myvars = ['u', 'v', 'w', 'T', 'CO2', 'H2O', 'CH4']
# scalar quantities to analyze and save in csv
scalars = ['u', 'T', 'CO2', 'H2O', 'CH4']
# scalars = ['H2O', 'CH4']
# range of date-times when observations were collected
# date_times = pd.date_range(start='2013-06-01 00:00:00',
# end='2013-08-31 23:30:00', freq='30min')
# date_times = pd.date_range(start='2013-07-17 10:00:00',
# end='2013-07-17 10:00:00', freq='30min')
# date_times = pd.date_range(start='2013-06-01 10:00:00',
# end='2013-06-1 10:00:00', freq='30min')
# olli's code is one date ahead (compare previous day here)
date_times = pd.date_range(start='2013-06-1 18:00:00',
end='2013-08-31 20:00:00', freq='30min')
# date_times = pd.date_range(start='2013-07-09 14:00:00',
# end='2013-07-09 14:00:00', freq='30min')
# remove a corrupted file:
if np.size(np.where(date_times == '2013-08-27 01:00:00')) > 0:
date_times = date_times.delete(
np.where(date_times == '2013-08-27 01:00:00')[0][0])
# create folders for output data if they do not exist already::
if not os.path.exists(md.outfolder_df):
os.makedirs(md.outfolder_df)
if not os.path.exists(md.outdir_data):
os.makedirs(md.outdir_data)
if not os.path.exists(md.outdir_plot):
os.makedirs(md.outdir_plot)
# select relevant dates
# data available are from June 1st (DOY 152) to August 31st (DOY 243)
#date_times = pd.date_range(start='2013-06-01 00:30:00',
# end='2013-06-09 23:30:00', freq='30min')
# date_times = date_times[:100]
###########################################################################
# loop on runs
nruns = len(date_times)
# for ir, element in enumerate(date_times):
# EXS = np.zeros(len(date_times), dtype= bool)
# SIZES = np.zeros(len(date_times))
for ir, mydate in enumerate(date_times):
print(ir)
# df0, exists = me.load_data_Siikaneva(mydate, md.datapath, myvars)
df0, exists = mf.load_data_Siikaneva(date_times[ir], md.datapath, myvars)
# EXS[ir] = exists
if exists:
# keep only the longest stretch without missing data:
df01 = mf.remove_missing_data(df0, nobs_min=nobs_min)[0]
if exists and (df01.shape[0] < minsamplesize):
exists = False
# df1, nspikes = mf.despike(df1, myvars, plot = True)
if exists:
df1, nspikes = mf.despike(df01, myvars)
# compute mean wind direction and rotate axes:
windir = mf.WindDir(np.mean(df1['u'].values), np.mean(df1['v'].values))
df2, angles = mf.coordrot(df1)
# correct gas concentration to be relative to dry air
# skip correction for CH4 for now
df2['CO2'] = df2['CO2'].values/(1-df2['H2O'].values*1e-3) # check it does not affect df0
df2['H2O'] = df2['H2O'].values/(1-df2['H2O'].values*1e-3)
df2['CH4'] = df2['CH4'].values/(1-df2['H2O']*1e-3) ## ADDED OCT
# compute concentrations per unit volume
# using average surface atm pressure pa = 101325 Pa and dry air
TmeanK = np.mean(df2['T'])
# use average or local atmospheric pressure
localp = True
if localp:
print('using local atm pressure')
# read from the csv the pressure value for this date time
patm = dfpr30['patm'].loc[mydate]
else:
print('using average atm pressure')
patm = 101325
univ_gas_const = 8.314
corr_conc = patm/univ_gas_const/TmeanK
df2['CO2'] = df2['CO2']*corr_conc
df2['CH4'] = df2['CH4']*corr_conc
df2['H2O'] = df2['H2O']*corr_conc
# apply spectral correction - SKIP
# further check for remaining CH4 spikes:
cn = (df2['CH4'].values - np.mean(df2['CH4'].values))/np.std(df2['CH4'].values)
cn0 = (df01['CH4'].values - np.mean(df01['CH4'].values))/np.std(df01['CH4'].values)
if np.max(cn) > 60:
print('Possible CH4 spike')
plt.figure()
plt.title('run {}'.format(ir))
plt.plot(cn)
plt.plot(cn0)
# plt.plot(cn, 'o')
plt.show()
# compute the lag between gas and velocity time series
df3, lag_dict = mf.correct_lags(df2, md.laglim, fs)
# remove spikes in excess of 12 stdv
# df4, nspikes, totspikes = mf.simple_despiking(df3, lim = 12)
# df4, nspikes = mf.despike(df3, myvars)
# df4 = df3
# keep only the longest stretch without missing data for all variables
# (we repeat this after despiking and translating data)
df, enough_data = mf.remove_missing_data(df3, nobs_min = nobs_min)
if not enough_data:
exists = False
if exists:
##################### save df with clean data if so ##############################
# mydate = date_times[ir] # use element instead
# name for output csv file in format YYYYMMDD_HHMM
csv_name = str(mydate.year) + mf.dbs(mydate.month) \
+ mf.dbs(mydate.day) + '_' \
+ mf.dbs(mydate.hour) + mf.dbs(mydate.minute)
if save_cleaned_data:
df.to_csv( os.path.join(md.outfolder_df,
'{}.csv'.format(csv_name)), index = False)
# print(df.shape[0])
print(mydate)
########################################################
# compute turbulent quantities
turb_stats = mf.turb_quant(df, fs, z_ref)
# modified: only check for u, T, H2O stationarity, not CH4
fst, is_stationary = mf.flux_stationarity(df,fs,ws = 300,lim_diff=0.3)
for mykey, myval in fst.items():
turb_stats['stat_foken_{}'.format(mykey)] = myval
# add other relevant quantities to then summary statistics::
turb_stats['windir'] = windir
turb_stats['angles_theta'] = angles[0]
turb_stats['angles_phi'] = angles[1]
turb_stats['angles_psi'] = angles[2]
turb_stats['date'] = date_times[ir]
turb_stats['exists'] = exists # file is there and has at least 2**14 data points
# turb_stats['enough_data'] = enough_data # included in exists
turb_stats['length'] = df.shape[0]
turb_stats['is_stationary']= is_stationary
turb_stats['csv_name'] = str(csv_name)
#
if first_time1:
print('initializing dataframe with turbulent quantities')
turb_variables = list(turb_stats.keys())
init_data0 = np.zeros((nruns, len(turb_variables)))*np.nan
tdf = pd.DataFrame(init_data0, index=np.arange(nruns),
columns = turb_variables)
# tdf['exists'] = tdf['exists'].astype('bool')
# tdf['is_stationary'] = tdf['is_stationary'].astype('bool')
# # tdf['enough_data'] = tdf['enough_data'].astype('bool')
tdf['csv_name'] = tdf['csv_name'].astype('str')
tdf['exists'] = np.zeros(nruns).astype('bool')
tdf['is_stationary'] = np.zeros(nruns).astype('bool')
# tdf['csv_name'] = np.zeros(nruns).astype('bool')
first_time1 = False
for varx in turb_stats.keys():
tdf.at[ir, varx] = turb_stats[varx]
# repeat the main analysis in Gaby's paper for all the scalars
for var in scalars:
squants = mf.scalar_quant(var, df, turb_stats, fs, z_ref)
# print(var, squants['flux_ec'])
mixmom = mf.mixed_moments(df[var].values, df['w'].values)
me_DSo = mf.Delta_So(df[var].values, df['w'].values)
res_eT = mf.Transport_Eff(df[var].values, df['w'].values)
betaREAres = mf.REA_Beta(df[var].values, df['w'].values)
betaREA_Milne = mf.REA_Beta_Milne(df[var].values, df['w'].values)
###################################################################
# merge all results for the scalar I wanna keep in a single dictionary
sdict = {**squants, **mixmom, **me_DSo, **res_eT, **betaREAres,
**betaREA_Milne}
# **flux_wa}
if first_time2 and var == scalars[0]:
print('initializing multi index data frame with results')
variables = list(sdict.keys())
scal_cols = pd.MultiIndex.from_product([variables, scalars],
names=['variable', 'scalar'])
init_data1 = np.zeros((nruns, len(variables)*len(scalars)))*np.nan
sdf = pd.DataFrame(init_data1, index=np.arange(nruns),
columns = scal_cols)
first_time2 = False
for variable in sdict.keys():
sdf[variable, var].iloc[ir] = sdict[variable]
#
#
#
# # save results for each scalar for each run in a dataframe
# # first remove missing datasets
tdf3 = tdf[tdf['exists']==True].copy()
sdf3 = sdf[tdf['exists']==True].copy()
tdf.to_csv( os.path.join(md.outdir_data, 'all_results_tdf.csv'))
sdf.to_csv( os.path.join(md.outdir_data, 'all_scalars_sdf.csv'))
# test for non locality of H2O records:
h2o_is_local = mf.test_non_locality(tdf3, sdf3, plot = False)
tdf3['h2o_is_local'] = h2o_is_local
# tdf3.assign(h2o_is_local = h2o_is_local)
tdf3.to_csv( os.path.join(md.outdir_data, 'results_tdf.csv'))
sdf3.to_csv( os.path.join(md.outdir_data, 'scalars_sdf.csv'))
plot = False
if plot:
os.system('python meth_plots_old.py')
final_time = time.time()
exec_time = (final_time - init_time)/60.0 # in minutes
print('execution time was {} minutes'.format(exec_time))
# tdf2 = tdf[tdf['exists'] == True]
#
# plt.plot(df['CH4'])
|
"""
Desafio 001
Problema: Crie um programa que escreva "Olá mundo" na tela.
Resolução do problema:
"""
print("Olá, Mundo!!!")
|
import msal
from settings import settings
from office365.graph_client import GraphClient
def acquire_token_msal():
"""
Acquire token via MSAL
"""
authority_url = 'https://login.microsoftonline.com/{0}'.format(settings['tenant'])
app = msal.ConfidentialClientApplication(
authority=authority_url,
client_id=settings['client_credentials']['client_id'],
client_credential=settings['client_credentials']['client_secret']
)
result = app.acquire_token_for_client(scopes=["https://graph.microsoft.com/.default"])
return result
client = GraphClient(acquire_token_msal)
teams = client.teams.get_all().execute_query()
for team in teams:
print(team.id)
|
#!/usr/bin/python
import os
import time
import requests
import json
import uuid
import sys
from pymongo import MongoClient
def db_connect(host='localhost', port=27017):
client = MongoClient("mongodb://%s:%d" % (host, port))
return client.registry
def get_instalation_id():
db = db_connect()
if "dockerPamameters" not in db.collection_names():
db.create_collection("dockerPamameters")
db.dockerPamameters.insert(
{
"instalationId": str(uuid.uuid4())
}
)
document = db.dockerPamameters.find_one({"instalationId": { "$exists": True }})
if document is None:
db.dockerPamameters.insert(
{
"instalationId": str(uuid.uuid4())
}
)
document = db.dockerPamameters.find_one({"instalationId": { "$exists": True }})
return document['instalationId']
def get_total_devices():
db = db_connect()
count = db.devices.count()
return count
def get_last_device():
db = db_connect()
last = db.devices.find_one({}, sort=[('$natural', -1)])
if last is None:
return 0;
return last["registrationDate"]
def get_total_incoming_messages():
db = db_connect()
count = db.incomingEvents.count()
return count
def get_last_incoming_message():
db = db_connect()
last = db.incomingEvents.find_one({}, sort=[('$natural', -1)])
if last is None:
return 0;
return last["ts"]
def send_statistics(devicesCount, lastRegisteredEvent, incomingEventsCount, lastIncomingReceivedEvent):
url = 'https://umc.konkerlabs.net/stats'
data = {"timestamp" : int(time.time()),
"instalationId" : instalationId,
"devicesCount" : devicesCount,
"lastRegisteredEvent": lastRegisteredEvent,
"incomingEventsCount": incomingEventsCount,
"lastIncomingReceivedEvent": lastIncomingReceivedEvent}
data_json = json.dumps(data)
headers = {'Content-type': 'application/json'}
response = requests.post(url, data=data_json, headers=headers)
return response
###########################################################################
usageStatsParam = ''
if os.environ.get('USAGE_STATS') is not None:
usageStatsParam = os.environ.get('USAGE_STATS')
if usageStatsParam.lower() == 'disabled' or usageStatsParam.lower() == 'false':
sys.stdout.write("usage statistics disabled...\n")
sys.stdout.flush()
exit(0);
sys.stdout.write("usage statistics enabled...\n")
sys.stdout.flush()
try:
# Get instalation id
instalationId = get_instalation_id()
# Get current usage
devicesCount = get_total_devices()
lastRegisteredEvent = get_last_device()
incomingEventsCount = get_total_incoming_messages()
lastIncomingReceivedEvent = get_last_incoming_message()
send_statistics(devicesCount, lastRegisteredEvent, incomingEventsCount, lastIncomingReceivedEvent)
except Exception as e:
print e
# Main Loop
while True:
try:
newDevicesCount = get_total_devices()
newLastRegisteredEvent = get_last_device()
newIncomingEventsCount = get_total_incoming_messages()
newIncomingLastReceivedEvent = get_last_incoming_message()
changed = False
# Check changes
if devicesCount != newDevicesCount: changed = True
if lastRegisteredEvent != newLastRegisteredEvent: changed = True
if incomingEventsCount != newIncomingEventsCount: changed = True
if lastIncomingReceivedEvent != newIncomingLastReceivedEvent: changed = True
if changed:
send_statistics(newDevicesCount, newLastRegisteredEvent, newIncomingEventsCount, newIncomingLastReceivedEvent)
devicesCount = newDevicesCount
lastRegisteredEvent = newLastRegisteredEvent
incomingEventsCount = newIncomingEventsCount
lastIncomingReceivedEvent = newIncomingLastReceivedEvent
except Exception as e:
print e
# Sleep for 15 minutes
time.sleep(15 * 60)
|
from pycatenary import cable
from scipy.integrate import solve_bvp
import matplotlib.pyplot as plt
import numpy as np
# Cable length [m]
L = 10
R1x = 0.2 * 5 # Horizontal force at fairlead
R1y = 7 # Vertical force at fairlead
p = 1 # Lineic weight
# Indices
i_x = 0
i_y = 1
i_T = 2
i_theta = 3
# https://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.solve_bvp.html
def fun(s, state):
'''Returns the derivative of state vector which describes the ordinary differential equation of catenary'''
param = {
"f": 1,
"p": 1
}
return np.vstack((np.cos(state[i_theta]), \
np.sin(state[i_theta]), \
param["p"] * np.sin(state[i_theta]),\
(param["f"] + param["p"] * np.cos(state[i_theta])) / state[i_T]))
def bc(ya, yb):
'''Returns a vector of functions have to zero to satisfy boundary conditions '''
return np.array(
[ya[i_x] - 0,\ # x coordinate of first extremity of cable
ya[i_y] - 0,\ # y coordinate of first extremity of cable
yb[i_T] * np.cos(yb[i_theta]) - R1x,\ # Horizontal equilibrium of force at other extremity
yb[i_T] * np.sin(yb[i_theta]) - R1y]) # Vertical equilibrium of force at other extremity
def analytical_solution(s, p, R1x, R1y, L):
"""
:param p: linear weight [N/m]
:param R1x: horizontal force at end of catenary [N]
:param R1y: vertical force at end of catenary [N]
:param L: length of catenary [m]
:return: horizontal and vertical coordinates describing catenary curve with weight
only according to https://api-mecaspa.pagesperso-orange.fr/aero_energies/eolien_volant/idees_sur_le_calcul_du_cable.htm
"""
K = p / R1x # Ratio of lineic weight to constant horizontal force in catenary
ttheta0 = (R1y - p * L) / R1x # Tangent of cable inclination at starting extremity
lambda0 = -1 / K * np.arcsinh(ttheta0)
x = 1 / K * np.arcsinh(s * p / R1x - np.sinh(K * lambda0)) + lambda0
y = 1 / K * (np.cosh(np.arcsinh(s * p / R1x - np.sinh(K * lambda0))) - np.cosh(K * lambda0))
return (x, y)
def init_guess(s, p, R1x, R1y, L):
(x, y) = analytical_solution(s, p, R1x, R1y, L)
ttheta0 = (R1y - p * L) / R1x
guess0 = np.vstack((x, y, R1y - p * (L - s), np.arctan(ttheta0) * (L - s) / L + np.arctan(R1y / R1x) * s / L))
return guess0
# curvilinear abscissa
curvi = np.linspace(0, L, 100)
(x, y) = analytical_solution(curvi, p, R1x, R1y, L)
guess0 = init_guess(curvi, p, R1x, R1y, L)
print(guess0)
print(x, y)
plt.plot(x, y, label='Geom')
plt.show()
sol_init = np.zeros((4, curvi.size))
sol_init[i_x, :] = x
sol_init[i_y, :] = y
sol_init[i_T, :] = guess0[i_T, :]
sol_init[i_theta, :] = guess0[i_theta, :]
param = {
"f": 10,
"p": 10
}
p = np.array([0, 0])
p[0] = param["f"]
p[1] = param["p"]
res_b = solve_bvp(fun, bc, curvi, sol_init)
x_plot = np.linspace(0, 1, L)
y_plot_b = res_b.sol(x_plot)[0]
print(res_b)
plt.plot(res_b.y[i_x, :], res_b.y[i_y, :], label='Geom')
plt.legend()
plt.xlabel("x")
plt.ylabel("y")
plt.show()
# define properties of cable
length = L # length of line
w = p # submerged weight
EA = 560e3 # axial stiffness (for elastic catenary)
floor = False # if True, contact is possible at the level of the anchor
anchor = [0., 0., 0.]
fairlead = [x[-1], 0., y[-1]]
# create cable instance
l1 = cable.MooringLine(L=length,
w = w,
EA = None,
anchor = anchor,
fairlead = fairlead,
floor = floor)
# compute calculation
l1.computeSolution()
# change fairlead position
l1.setFairleadCoords([x[-1], 0., y[-1]])
# recompute solution
l1.computeSolution()
# get tension along line (between 0. and total line length)
T = curvi.copy()
for index, item in enumerate(curvi):
print(item)
print(index)
print(l1.getTension(item))
T[index] = np.linalg.norm(l1.getTension(item))
print(T)
# get xyz coordinates along line
print(L)
print(curvi[-1])
xyz = 0 * guess0[0:3, :]
for index, item in enumerate(curvi[0:-2]):
xyz[:, index] = l1.s2xyz(item)
plt.plot(xyz[0, :], xyz[2, :], label='Geom')
plt.show()
|
import datetime
def to_html(keyword_rank, now):
"""
:param keyword_rank: 메일 내용에 추가할 dataframe
:param now: 현재시각
:return: html 포맷으로 된 내용
"""
content = '<html>\n<body>\n'
content += "<h3>"+str("무신사 스토어 {} 검색어 랭킹".format(now))+"</h3>\n"
content += "<table>\n<tr>\n<th>순위</th>\n<th>키워드</th>\n<th colspan=\"2\">상승/감소</th>\n</tr>\n"
for i in range(len(keyword_rank)):
content += "<tr>\n"
content += "<td>" + str(keyword_rank.index[i]) + "</td>\n"
content += "<td>" + str(keyword_rank['Item'].iloc[i]) + "</td>\n"
content += "<td>" + str(keyword_rank['Status'].iloc[i]) + "</td>\n"
content += "<td>" + str(keyword_rank['Change'].iloc[i]) + "</td>\n"
content += "</tr>\n"
content += "</table>\n</body>\n</html>"
return content |
import random
answer = random.randint(1, 100)
print('猜数字游戏,大小范围为1~100')
counter = 0
while True:
counter += 1
number = int(input('请输入:'))
if(number > answer):
print('大了点')
elif(number < answer):
print('小了点')
else:
print('恭喜你猜对了')
break
print('你共猜了%d次' % counter)
if(counter > 7):
print('你的智商余额明显不足')
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
sys.path.append("../")
import argparse
import random
import tensorflow as tf
from models.transformer import Transformer
from deeploglizer.common.preprocess import FeatureExtractor
from deeploglizer.common.dataloader import load_sessions
from deeploglizer.common.utils import seed_everything, dump_final_results, dump_params
parser = argparse.ArgumentParser()
class tf_data_generator(tf.keras.utils.Sequence):
def __init__(self, session_dict, batch_size, feature_type="semantics", shuffle=True):
self.shuffle = shuffle,
self.max_input_size = 0
self.batch_size = batch_size,
flatten_data_list = []
self.feature_len = 0
# flatten all sessions
for session_idx, data_dict in enumerate(session_dict.values()):
features = data_dict["features"][feature_type]
window_labels = data_dict["window_labels"]
window_anomalies = data_dict["window_anomalies"]
# This is for making the input work
self.max_input_size = max(self.max_input_size, len(window_labels))
for window_idx in range(len(window_labels)):
sample = {
"session_idx": session_idx, # not session id
"features": features[window_idx],
"window_labels": window_labels[window_idx],
"window_anomalies": window_anomalies[window_idx],
}
self.feature_len = len(features[window_idx])
flatten_data_list.append(sample)
self.flatten_data_list = flatten_data_list
def on_epoch_end(self):
if self.shuffle:
random.shuffle(self.flatten_data_list)
def __getitem__(self, idx):
batches = self.flatten_data_list[idx * self.batch_size[0]:(idx + 1) * self.batch_size[0]]
return batches
def __len__(self):
return len(self.flatten_data_list) // self.batch_size[0]
##### Model params
parser.add_argument("--model_name", default="Transformer", type=str)
parser.add_argument("--hidden_size", default=128, type=int)
parser.add_argument("--num_layers", default=2, type=int)
parser.add_argument("--embedding_dim", default=32, type=int)
parser.add_argument("--nhead", default=2, type=int)
##### Dataset params
parser.add_argument("--dataset", default="HDFS", type=str)
parser.add_argument(
"--data_dir", default="../data/processed/HDFS_100k/hdfs_0.0_tar", type=str
)
parser.add_argument("--window_size", default=10, type=int)
parser.add_argument("--stride", default=1, type=int)
##### Input params
parser.add_argument("--feature_type", default="sequentials", type=str)
parser.add_argument("--use_attention", action="store_true")
parser.add_argument("--label_type", default="next_log", type=str)
parser.add_argument("--use_tfidf", action="store_true")
parser.add_argument("--max_token_len", default=50, type=int)
parser.add_argument("--min_token_count", default=1, type=int)
# Uncomment the following to use pretrained word embeddings. The "embedding_dim" should be set as 300
# parser.add_argument(
# "--pretrain_path", default="../data/pretrain/wiki-news-300d-1M.vec", type=str
# )
##### Training params
parser.add_argument("--epoches", default=100, type=int)
parser.add_argument("--batch_size", default=1024, type=int)
parser.add_argument("--learning_rate", default=0.01, type=float)
parser.add_argument("--topk", default=10, type=int)
parser.add_argument("--patience", default=3, type=int)
##### Others
parser.add_argument("--random_seed", default=42, type=int)
parser.add_argument("--gpu", default=0, type=int)
params = vars(parser.parse_args())
model_save_path = dump_params(params)
if __name__ == "__main__":
seed_everything(params["random_seed"])
session_train, session_test = load_sessions(data_dir=params["data_dir"])
ext = FeatureExtractor(**params)
session_train = ext.fit_transform(session_train)
session_test = ext.transform(session_test, datatype="test")
dataset_train = tf_data_generator(session_train, feature_type=params["feature_type"],
batch_size=params["batch_size"], shuffle=True)
dataset_test = tf_data_generator(session_test, feature_type=params["feature_type"], batch_size=params["batch_size"],
shuffle=True)
curr_batch_size = 1024
# ext.meta_data = {'num_labels': 14, 'vocab_size': 14}
model = Transformer(
meta_data=ext.meta_data, batch_sz=curr_batch_size, model_save_path=model_save_path, **params
)
eval_results = model.fit(
train_loader=dataset_train,
test_loader=dataset_test,
epoches=params["epoches"],
)
result_str = "\t".join(["{}-{:.4f}".format(k, v) for k, v in eval_results.items()])
key_info = [
"dataset",
"train_anomaly_ratio",
"feature_type",
"label_type",
"use_attention",
]
args_str = "\t".join(
["{}:{}".format(k, v) for k, v in params.items() if k in key_info]
)
dump_final_results(params, eval_results, model)
|
from .model_pgn import PGNModel
from .utils import decode_labels, inv_preprocess, prepare_label, save, load
from .ops import conv2d, max_pool, linear
from .image_reader import ImageReader
from .image_reader_pgn import ImageReaderPGN |
#!/usr/bin/env python3.7
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 30 17:37:18 2018
@author: ejreidelbach
:DESCRIPTION:
:REQUIRES:
:TODO:
"""
#==============================================================================
# Package Import
#==============================================================================
import os
import pandas as pd
import pathlib
#==============================================================================
# Function Definitions / Reference Variable Declaration
#==============================================================================
def function_name(var1, var2, var3):
'''
Purpose: Stuff goes here
Input:
(1) var1 (type): description
(2) var2 (type): description
(3) var3 (type): description
Output:
(1) output1 (type): description
'''
def createRankingVariable():
# National Ranking
# Conference Ranking
# Division Ranking
#==============================================================================
# Working Code
#==============================================================================
# Set the project working directory
path_project = pathlib.Path(__file__).resolve().parents[2]
#path_project = pathlib.Path('/home/ejreidelbach/Projects/cfbAnalysis')
os.chdir(path_project)
|
"""
All ACM related deployment classes and functions should go here.
"""
import os
import logging
import tempfile
import shutil
import requests
from ocs_ci.framework import config
from ocs_ci.ocs import constants
from ocs_ci.ocs.exceptions import (
CommandFailed,
DRPrimaryNotFoundException,
)
from ocs_ci.ocs.utils import get_non_acm_cluster_config
from ocs_ci.utility.utils import run_cmd, run_cmd_interactive
from ocs_ci.ocs.node import get_typed_worker_nodes, label_nodes
logger = logging.getLogger(__name__)
def run_subctl_cmd(cmd=None):
"""
Run subctl command
Args:
cmd: subctl command to be executed
"""
cmd = " ".join(["subctl", cmd])
run_cmd(cmd)
def run_subctl_cmd_interactive(cmd, prompt, answer):
"""
Handle interactive prompts with answers during subctl command
Args:
cmd (str): Command to be executed
prompt (str): Expected question during command run which needs to be provided
answer (str): Answer for the prompt
Raises:
InteractivePromptException: in case something goes wrong
"""
cmd = " ".join(["subctl", cmd])
run_cmd_interactive(
cmd, {prompt: answer}, timeout=config.ENV_DATA["submariner_prompt_timeout"]
)
class Submariner(object):
"""
Submariner configuaration and deployment
"""
def __init__(self):
# whether upstream OR downstream
self.source = config.ENV_DATA["submariner_source"]
# Designated broker cluster index where broker will be deployed
self.designated_broker_cluster_index = self.get_primary_cluster_index()
# sequence number for the clusters from submariner perspective
# Used mainly to run submariner commands, for each cluster(except ACM hub) we will
# assign a seq number with 1 as primary and continue with subsequent numbers
self.cluster_seq = 1
# List of index to all the clusters which are participating in DR (except ACM)
# i.e index in the config.clusters list
self.dr_only_list = []
def deploy(self):
if self.source == "upstream":
self.deploy_upstream()
else:
self.deploy_downstream()
def deploy_upstream(self):
self.download_binary()
self.submariner_configure_upstream()
def deploy_downstream(self):
raise NotImplementedError("Deploy downstream functionality not implemented")
def download_binary(self):
if self.source == "upstream":
# This script puts the platform specific binary in ~/.local/bin
# we need to move the subctl binary to ocs-ci/bin dir
try:
resp = requests.get(constants.SUBMARINER_DOWNLOAD_URL)
except requests.ConnectionError:
logger.exception(
"Failed to download the downloader script from submariner site"
)
raise
tempf = tempfile.NamedTemporaryFile(
dir=".", mode="wb", prefix="submariner_downloader_", delete=False
)
tempf.write(resp.content)
# Actual submariner binary download
cmd = f"bash {tempf.name}"
try:
run_cmd(cmd)
except CommandFailed:
logger.exception("Failed to download submariner binary")
raise
# Copy submariner from ~/.local/bin to ocs-ci/bin
# ~/.local/bin is the default path selected by submariner script
shutil.copyfile(
os.path.expanduser("~/.local/bin/subctl"),
os.path.join(config.RUN["bin_dir"], "subctl"),
)
def submariner_configure_upstream(self):
"""
Deploy and Configure upstream submariner
Raises:
DRPrimaryNotFoundException: If there is no designated primary cluster found
"""
if self.designated_broker_cluster_index < 0:
raise DRPrimaryNotFoundException("Designated primary cluster not found")
# Deploy broker on designated cluster
# follow this config switch statement carefully to be mindful
# about the context with which we are performing the operations
config.switch_ctx(self.designated_broker_cluster_index)
logger.info(f"Switched context: {config.cluster_ctx.ENV_DATA['cluster_name']}")
deploy_broker_cmd = "deploy-broker"
try:
run_subctl_cmd(deploy_broker_cmd)
except CommandFailed:
logger.exception("Failed to deploy submariner broker")
raise
# Label the gateway nodes on all non acm cluster
restore_index = config.cur_index
for cluster in get_non_acm_cluster_config():
config.switch_ctx(cluster.MULTICLUSTER["multicluster_index"])
gateway_node = self.get_default_gateway_node()
label_nodes([gateway_node], constants.SUBMARINER_GATEWAY_NODE_LABEL)
config.switch_ctx(restore_index)
# Join all the clusters (except ACM cluster in case of hub deployment)
for cluster in config.clusters:
print(len(config.clusters))
cluster_index = cluster.MULTICLUSTER["multicluster_index"]
if cluster_index != config.get_acm_index():
join_cmd = (
f"join --kubeconfig {cluster.RUN['kubeconfig']} "
f"{config.ENV_DATA['submariner_info_file']} "
f"--clusterid c{self.cluster_seq} --natt=false"
)
try:
run_subctl_cmd(
join_cmd,
)
logger.info(
f"Subctl join succeded for {cluster.ENV_DATA['cluster_name']}"
)
except CommandFailed:
logger.exception("Cluster failed to join")
raise
self.cluster_seq = self.cluster_seq + 1
self.dr_only_list.append(cluster_index)
# Verify submariner connectivity between clusters(excluding ACM)
kubeconf_list = []
for i in self.dr_only_list:
kubeconf_list.append(config.clusters[i].RUN["kubeconfig"])
connct_check = f"verify {' '.join(kubeconf_list)} --only connectivity"
run_subctl_cmd(connct_check)
def get_primary_cluster_index(self):
"""
Return list index (in the config list) of the primary cluster
A cluster is primary from DR perspective
Returns:
int: Index of the cluster designated as primary
"""
for i in range(len(config.clusters)):
if config.clusters[i].MULTICLUSTER.get("primary_cluster"):
return i
return -1
def get_default_gateway_node(self):
"""
Return the default node to be used as submariner gateway
Returns:
str: Name of the gateway node
"""
# Always return the first worker node
return get_typed_worker_nodes()[0]
|
from panda3d.core import NodePath
from panda3d.core import Point3
from panda3d.core import LineSegs
from wecs import panda3d as wp3d
from wecs import mechanics
from wecs.aspects import Aspect
from wecs.panda3d import aspects
from mapedit.helpers import draw_grid
from mapedit import mapedit
from mapedit.cursor import cursor
system_types = [
wp3d.ManageGeometry,
mechanics.DetermineTimestep,
wp3d.UpdateCharacter,
mapedit.cursor.Cursoring, # Horizontal movement with optional grid-snapping.
wp3d.ExecuteMovement,
wp3d.UpdateCameras,
mapedit.mapeditor.UpdateMapEditor, # Handles Creator and Tilemap (to be split up later)
]
# empty scene with a grid.
gridsize = 500 # Size of grid in cells
cellsize = 2 # Size of cells in meters
aspects.empty_scene.add(
base.ecs_world.create_entity(),
overrides = {
panda3d.Model: dict(node=draw_grid(gridsize, gridsize, cellsize)),
}
)
# cursor entity.
cursor_node = NodePath("cursor")
cursor_model = loader.loadModel("../../assets/cursor.bam")
cursor_model.set_scale(cellsize)
cursor_model.reparent_to(cursor_node)
cursor.add(
base.ecs_world.create_entity(),
overrides={
panda3d.ThirdPersonCamera: dict(distance=15.0, focus_height=0),
panda3d.TurntableCamera: dict(pitch=-90),
panda3d.CursorMovement: dict(move_snap=cellsize),
panda3d.Model: dict(node=cursor_node),
panda3d.Position: dict(value=Point3(gridsize/2, gridsize/2, 0)),
}
)
|
from test_base import MainTestCase
from main.views import home, show_submission
from odk_viewer.views import survey_responses
from django.core.urlresolvers import reverse
from guardian.shortcuts import assign
class TestFormShowSubmission(MainTestCase):
def setUp(self):
MainTestCase.setUp(self)
self._create_user_and_login()
self._publish_transportation_form_and_submit_instance()
self.submission = self.xform.surveys.reverse()[0]
self.url = reverse(show_submission, kwargs={
'username': self.user.username,
'id_string': self.xform.id_string,
'uuid': self.submission.uuid,
})
self.survey_url = reverse(survey_responses, kwargs={
'instance_id': self.submission.pk })
def test_get_survey_by_uuid(self):
response = self.client.get(self.url)
self.assertEquals(response.status_code, 302)
self.assertEquals(self.base_url + self.survey_url, response['Location'])
def test_no_anon_get_survey_by_uuid(self):
response = self.anon.get(self.url)
self.assertEquals(response.status_code, 302)
self.assertEquals(self.base_url + reverse(home), response['Location'])
def test_no_without_perm_get_survey_by_uuid(self):
self._create_user_and_login('alice', 'alice')
response = self.client.get(self.url)
self.assertEquals(response.status_code, 302)
self.assertEquals(self.base_url + reverse(home), response['Location'])
def test_with_perm_get_survey_by_uuid(self):
self._create_user_and_login('alice', 'alice')
assign('view_xform', self.user, self.xform)
response = self.client.get(self.url)
self.assertEquals(response.status_code, 302)
self.assertEquals(self.base_url + self.survey_url, response['Location'])
def test_get_survey(self):
response = self.client.get(self.survey_url)
self.assertEquals(response.status_code, 200)
def test_no_anon_get_survey(self):
response = self.anon.get(self.survey_url)
self.assertEquals(response.status_code, 302)
self.assertEquals(self.base_url + reverse(home), response['Location'])
def test_no_without_perm_get_survey(self):
self._create_user_and_login('alice', 'alice')
response = self.client.get(self.survey_url)
self.assertEquals(response.status_code, 302)
self.assertEquals(self.base_url + reverse(home), response['Location'])
def test_with_perm_get_survey(self):
self._create_user_and_login('alice', 'alice')
assign('view_xform', self.user, self.xform)
response = self.client.get(self.survey_url)
self.assertEquals(response.status_code, 200)
|
from autogluon.tabular.models.lgb.lgb_model import LGBModel
def test_lightgbm_binary(fit_helper):
fit_args = dict(
hyperparameters={LGBModel: {}},
)
dataset_name = 'adult'
fit_helper.fit_and_validate_dataset(dataset_name=dataset_name, fit_args=fit_args)
def test_lightgbm_multiclass(fit_helper):
fit_args = dict(
hyperparameters={LGBModel: {}},
)
dataset_name = 'covertype'
fit_helper.fit_and_validate_dataset(dataset_name=dataset_name, fit_args=fit_args)
def test_lightgbm_regression(fit_helper):
fit_args = dict(
hyperparameters={LGBModel: {}},
)
dataset_name = 'ames'
fit_helper.fit_and_validate_dataset(dataset_name=dataset_name, fit_args=fit_args)
|
import http.server
import socketserver
import threading
def runHttpServer(host, port, directory):
class Handler(http.server.SimpleHTTPRequestHandler):
def __init__(self, *args, **kwargs):
super().__init__(*args, directory=directory, **kwargs)
server = socketserver.TCPServer((host, port), Handler)
threading.Thread(target = server.serve_forever, daemon = True).start()
return server
if __name__ == "__main__":
import time
runHttpServer("localhost", 8000, ".")
while True:
time.sleep(1) |
"""
Paths to Academia.edu Sections for BeautifulSoup Parsing
"""
# Professor/Author Info
PROF_NAME_PATH = ".InlineList-item"
PROF_AFFILIATION_PATH = ".Affiliation-text--affiliation"
PROF_RESEARCH_INTERESTS_PATH = ".Affiliation-text--researchInterests"
PROF_PORTRAIT_PATH = ".profile-avatar"
PROF_BIO_PATH = ".js-profile-about"
# Works Info
TITLES_PATH = ".js-work-strip-work-link.text-gray-darker"
WORKS_PATH = ".media-body"
TOP_LEVEL_WORKS_PATH =".media.work.js-work-strip"
DOWNLOAD_URL_PATH = ".work-download"
ABSTRACT_TRUNCATED_PATH = ".js-work-more-abstract-truncated" #currently unused
ABSTRACT_UNTRUNCATED_PATH = ".js-work-more-abstract-untruncated" |
from abc import ABC, abstractmethod
from banco import Banco
from cliente import Cliente
class Conta(ABC):
def __init__(self, agencia, numero_conta, saldo) -> None:
self.agencia = agencia
self.numero_conta = numero_conta
self.saldo = saldo
@abstractmethod
def sacar(self, valor):
pass
def depositar(self, valor):
self.saldo += valor
self.mostrar()
def mostrar(self):
print(f'Agência: {self.agencia}\n'
f'N° da conta: {self.numero_conta}\n'
f'Saldo: {self.saldo}')
class ContaPoupanca(Conta):
def sacar(self, valor):
if valor > self.saldo:
print('Saldo insulficiente')
return
self.saldo -= valor
self.mostrar()
class ContaCorrente(Conta):
def __init__(self, agencia, numero_conta, saldo, limite=300) -> None:
super().__init__(agencia, numero_conta, saldo)
self.limite = limite
def sacar(self, valor):
if valor > (self.saldo + self.limite):
print('Limite insulficente')
return
self.saldo -= valor
self.mostrar()
|
from django.contrib import admin
from learning_logs.models import Topic, Entry # 导入创建的模型类
# Register your models here.
admin.site.register(Topic) # 让Django通过管理网站注册自定义创建的模型
admin.site.register(Entry) |
from __future__ import unicode_literals
import frappe
def execute():
frappe.reload_doc('stock', 'doctype', 'quality_inspection_parameter')
# get all distinct parameters from QI readigs table
reading_params = frappe.db.get_all("Quality Inspection Reading", fields=["distinct specification"])
reading_params = [d.specification for d in reading_params]
# get all distinct parameters from QI Template as some may be unused in QI
template_params = frappe.db.get_all("Item Quality Inspection Parameter", fields=["distinct specification"])
template_params = [d.specification for d in template_params]
params = list(set(reading_params + template_params))
for parameter in params:
if not frappe.db.exists("Quality Inspection Parameter", parameter):
frappe.get_doc({
"doctype": "Quality Inspection Parameter",
"parameter": parameter,
"description": parameter
}).insert(ignore_permissions=True) |
class Solution:
def updateBoard(self, board: List[List[str]], click: List[int]) -> List[List[str]]:
"""
board =
[["E","E","E","E","E"],
["E","E","M","E","E"],
["E","E","E","E","E"],
["E","E","E","E","E"]], click = [3,0]
Output:
[["B","1","E","1","B"],
["B","1","M","1","B"],
["B","1","1","1","B"],
["B","B","B","B","B"]]
"""
if board[click[0]][click[1]] == "M":
board[click[0]][click[1]] = "X"
else:
self.dfs(board, click[0], click[1])
return board
def check(self, board, x, y):
if not(0 <= x < len(board) and 0 <= y < len(board[0])):
return 0
if board[x][y] == "M":
return 1
return 0
def dfs(self, board, x, y):
if not(0 <= x < len(board) and 0 <= y < len(board[0])):
return
if board[x][y] != "E":
return
mines = 0
dirs = [(1,0), (-1, 0), (0,1), (0,-1), (-1,-1),(-1,1),(1,-1),(1,1)]
for dirx, diry in dirs:
mines += self.check(board, x+dirx, y+diry)
if mines:
board[x][y] = str(mines)
return
board[x][y] = "B"
for dirx, diry in dirs:
self.dfs(board, x+dirx, y+diry)
return 0 |
#
# Copyright (c) 2018-2020 Charles Godwin <magnum@godwin.ca>
#
# SPDX-License-Identifier: BSD-3-Clause
#
import argparse
import time
import traceback
import json
from os.path import abspath
try:
from magnum import magnum
except:
import magnum
# import magnum
# from magnum import magnum, __version__
# from magnum import Magnum, __version__
args = None
class dummypackets(magnum.Magnum):
global args
def readPackets(self):
line = args.filename.readline()
# If the file is not empty keep reading one line at a time, till the file is empty
packets = []
while line:
ix = line.find("=>")
decode = line.find("decode:")
if ix >= 0:
decode = line.find("decode:") # supports traced packets
if decode > ix:
stop = decode
else:
stop = len(line)
packets.append(bytes.fromhex(line[ix+2:stop].strip()))
line = args.filename.readline()
args.filename.close()
return packets
def main():
global args
parser = argparse.ArgumentParser("Magnum packet tester")
parser.add_argument('-d', "--dump", action="store_true", default=False,
help="Display packets as JSON (default: %(default)s)")
parser.add_argument("--trace", action="store_true", default=False,
help="Show unpacked info for packet data (default: %(default)s)")
parser.add_argument("filename", type=argparse.FileType("r", encoding="UTF-8"),
help="File name with dummy packets" )
args = parser.parse_args()
# print('Version:{0}'.format(__version__))
print("Options:{}".format(str(args).replace("Namespace(", "").replace(")", "")))
dummyreader = dummypackets(cleanpackets=False)
try:
if args.dump:
devices = dummyreader.getDevices()
print(json.dumps(devices, indent=4, ensure_ascii=True, allow_nan=True, separators=(',', ':')))
else:
packets=dummyreader.getPackets()
formatstring = "Length:{0:2} {1:10}=>{2}"
if args.trace:
end = ' decode:'
else:
end ='\n'
for packet in packets:
print(formatstring.format(
len(packet[1]), packet[0], packet[1].hex().upper()), end = end)
if args.trace:
print(*packet[2], end = ' ')
print(packet[3])
except:
print("Error detected attempting to read data - test failed")
traceback.print_exc()
exit(2)
if __name__ == '__main__':
main()
|
import sympy as sym
from mockdown.model import ViewLoader, ViewBuilder as V
class TestViewLoader:
def test_loading_ints(self) -> None:
loader = ViewLoader(number_type=sym.Integer)
view = loader.load_dict({
'name': 'root',
'rect': [0, 0, 100, 100],
'children': [{
'name': 'child',
'rect': [10, 10, 90, 90]
}]
})
assert view == V('root', (0, 0, 100, 100), [
V('child', (10, 10, 90, 90))
]).build(number_type=sym.Integer)
view = loader.load_dict({
'name': 'root',
'rect': [0.0, 0.0, 100.0, 100.0]
})
assert view == V('root', (0, 0, 100, 100)).build(number_type=sym.Integer)
|
import pyinputplus
import datetime
start_datem = pyinputplus.inputDate(
'Start date (YYYY-MM-DD): ', formats=['%Y-%m-%d'])
end_datem = pyinputplus.inputDate(
'End date: (YYYY-MM-DD): ', formats=['%Y-%m-%d'])
start_year = start_datem.year
start_month = start_datem.month
start_day = start_datem.day
start_epoch = round(datetime.datetime(
start_year, start_month, start_day).timestamp())
start_epoch_GMT = round(datetime.datetime(
start_year, start_month, start_day, 0, 0, 0, 0, datetime.timezone.utc).timestamp())
end_year = end_datem.year
end_month = end_datem.month
end_day = end_datem.day
end_epoch = round(datetime.datetime(
end_year, end_month, end_day).timestamp())
end_epoch_GMT = round(end_epoch + 36000)
print(start_epoch)
print(start_epoch_GMT)
|
import os
import sys
import logging
from argparse import Namespace
from dataclasses import dataclass, field
from typing import Optional
from fairseq.data.mimic3.mimic3_dataset import MIMIC3_Dataset
from fairseq.data.text_compressor import TextCompressionLevel
from fairseq.dataclass import FairseqDataclass
from fairseq.tasks import FairseqTask, register_task
from fairseq.tasks.audio_pretraining import InferredW2vConfig
from omegaconf import II, MISSING, OmegaConf
logger = logging.getLogger(__name__)
@dataclass
class MIMIC3_PretrainingConfig(FairseqDataclass):
data: str = field(default=MISSING, metadata={"help": "path to data directory"})
labels: Optional[str] = field(
default=None,
metadata={
"help": "extension of the label file to load, used for fine-tuning"},
)
binarized_dataset: bool = field(
default=False,
metadata={
"help": "if true, loads binarized dataset (useful for very large datasets). "
"See examples/wav2vec/scripts/binarize_manifest.sh"
},
)
normalize: bool = field(
default=False,
metadata={"help": "if set, normalizes input to have 0 mean and unit variance"},
)
enable_padding: bool = field(
default=False, metadata={"help": "pad shorter samples instead of cropping"}
)
max_sample_size: Optional[int] = field(
default=None, metadata={"help": "max sample size to crop to for batching"}
)
min_sample_size: Optional[int] = field(
default=None, metadata={"help": "min sample size to skip small examples"}
)
num_batch_buckets: int = field(
default=0,
metadata={"help": "number of buckets"},
)
precompute_mask_indices: bool = field(
default=False,
metadata={
"help": "flag to compute mask indices in data preparation.",
},
)
inferred_w2v_config: Optional[InferredW2vConfig] = field(
default=None,
metadata={
"help": "wav2vec 2.0 masking arguments used to pre-compute masks (required for TPU)",
},
)
tpu: bool = II("common.tpu")
@register_task("mimic3_pretraining", dataclass=MIMIC3_PretrainingConfig)
class MIMIC3_PretrainingTask(FairseqTask):
""" """
cfg: MIMIC3_PretrainingConfig
@classmethod
def setup_task(cls, cfg: MIMIC3_PretrainingConfig, **kwargs):
"""Setup the task (e.g., load dictionaries).
Args:
cfg (MIMIC3_PretrainingConfig): configuration of this task
"""
return cls(cfg)
def _get_mask_precompute_kwargs(self, cfg):
if self.cfg.precompute_mask_indices or self.cfg.tpu:
assert (
cfg.inferred_w2v_config is not None
), "inferred_w2v_config must be set"
return OmegaConf.to_container(
cfg.inferred_w2v_config, resolve=True, enum_to_str=True
)
else:
return {}
def load_dataset(self, split: str, task_cfg: FairseqDataclass = None, **kwargs):
datadir = self.cfg.data
task_cfg = task_cfg or self.cfg
# upgrade old task
if isinstance(task_cfg, Namespace):
if not hasattr(task_cfg, "autoregressive"):
task_cfg.autoregressive = not task_cfg.criterion == "ctc"
self.datasets[split] = MIMIC3_Dataset(
datadir, split,
max_sample_size=self.cfg.max_sample_size,
min_sample_size=self.cfg.min_sample_size,
pad=task_cfg.labels is not None or task_cfg.enable_padding,
normalize=task_cfg.normalize,
num_buckets=self.cfg.num_batch_buckets or int(self.cfg.tpu),
compute_mask_indices=(self.cfg.precompute_mask_indices or self.cfg.tpu),
**self._get_mask_precompute_kwargs(task_cfg),
)
@property
def source_dictionary(self):
return None
@property
def target_dictionary(self):
return None
def max_positions(self):
"""Maximum input length supported by the encoder."""
return sys.maxsize, sys.maxsize
def build_model(self, model_cfg: FairseqDataclass):
model = super().build_model(model_cfg)
actualized_cfg = getattr(model, "cfg", None)
if actualized_cfg is not None:
# if "w2v_args" in actualized_cfg:
if hasattr(actualized_cfg, "w2v_args"):
model_cfg.w2v_args = actualized_cfg.w2v_args
return model
|
import json
import logging
import pickle
import random
import time
from pathlib import Path
from typing import Any, Dict, Generator, List, Optional, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from jaxtalk.data import BasicDataProvider
from jaxtalk.losses import CaptionLoss
from jaxtalk.models import CaptionModel
from jaxtalk.optimizer import Optimizer
from jaxtalk.utils import Logger, Mean
class Trainer:
def __init__(
self,
ixtoword: Dict[int, str],
grad_clip=1,
regc=1e-3,
evaluate_every=1,
name: str = "",
) -> None:
self._name = name or "Trainer"
self._num_tokens: int = len(ixtoword) - 1 # minus pad
self._pad_token: int = len(ixtoword)
self._evaluate_every = evaluate_every
self._optimizer = Optimizer(1e-3, grad_clip)
self._loss_fn: CaptionLoss = CaptionLoss(self._num_tokens, self._pad_token)
self._weight_decay: float = regc
self._logger = Logger(self._name, logging.INFO)
self._global_step = 0
self._epoch = 0
self.history: List[Dict[str, Any]] = []
self._state: Dict[str, Any] = {}
self._time = time.time()
self._best_perplexity = np.inf
self._checkpoint_path = Path(f"logs/checkpoint/{self._name}")
self._checkpoint_path.mkdir(parents=True, exist_ok=True)
history_path = Path(f"logs/histories/{self._name}")
history_path.mkdir(parents=True, exist_ok=True)
self._histroy_file = history_path / "history.json"
def _step(
self,
model: CaptionModel,
nest_batch: Dict[str, jnp.ndarray],
) -> jnp.ndarray:
img_feats = nest_batch["feats"]
tokens = jnp.concatenate(
[jnp.zeros((img_feats.shape[0], 1)), nest_batch["tokens"]], axis=1
)
output_logits = model.forward(img_feats, tokens)
output_probs = jax.nn.softmax(output_logits, axis=2)[:, 1:, :]
return output_probs
def _grad(
self,
model: CaptionModel,
nest_batch: Dict[str, jnp.ndarray],
) -> jnp.float32:
output_probs = self._step(model, nest_batch)
tokens = nest_batch["tokens"]
loss, _ = self._loss_fn(output_probs, tokens)
regloss = 0
for value in model.get_parameters().values():
regloss = regloss + jnp.sum(value["weight"] * value["weight"])
loss = loss + self._weight_decay * (regloss * (1 / tokens.shape[0]))
return loss
def _train_step(
self,
model: CaptionModel,
nest_batch: Dict[str, jnp.ndarray],
) -> Tuple[jnp.float32, jnp.float32, jnp.ndarray]:
output_probs = jax.jit(self._step)(model, nest_batch)
loss, log2_perplexity = jax.jit(self._loss_fn)(
output_probs, nest_batch["tokens"]
)
grads = jax.jit(jax.grad(self._grad))(model, nest_batch)
# import pdb
# pdb.set_trace()
self._global_step += 1
return loss, log2_perplexity, output_probs, grads
def _eval_step(
self,
model: CaptionModel,
nest_batch: Dict[str, jnp.ndarray],
) -> Tuple[jnp.float32, jnp.float32, jnp.ndarray]:
output_probs = self._step(model, nest_batch)
loss, log2_perplexity = self._loss_fn(output_probs, nest_batch["tokens"])
return loss, log2_perplexity, output_probs
def _evaluate(self, model: CaptionModel, data_gen: Generator) -> None:
loss = Mean()
perplexity = Mean()
batch = None
nest_batch = None
for batch, nest_batch in data_gen:
(_loss, _log2_perplexity, _,) = jax.jit(
self._eval_step
)(model, nest_batch)
loss.update(_loss)
perplexity.update(_log2_perplexity)
if batch is None or nest_batch is None:
raise ValueError(
"batch or nest_batch cannot be None. Please supply a valid data"
" generator"
)
self._logger.info(
(f"Evaluation:\nLoss: {loss} | Perplexity: {perplexity.state():.2f}")
)
words, gt_words = self.predict(model, batch, nest_batch)
self._set_state(
"eval",
float(loss.state()),
float(perplexity.state()),
float(loss.state()),
float(perplexity.state()),
words,
gt_words,
)
if perplexity.state() < self._best_perplexity:
checkpoint_file = self._checkpoint_path / (
f"checkpoint_{self._epoch}_"
f"{self._global_step}_"
f"{perplexity.state():.2f}.jxc"
)
pickle.dump(
model.get_parameters(),
open(
str(checkpoint_file.absolute()),
"wb",
),
)
self._best_perplexity = perplexity.state()
def predict(
self,
model: CaptionModel,
batch: List[Dict[str, Any]],
nest_batch: Dict[str, jnp.ndarray],
) -> Tuple[List[str], List[str]]:
idx = random.randint(1, len(batch) - 1)
words = model.predict(
nest_batch["feats"][idx][None, :],
max_len=(nest_batch["tokens"].shape[1] + 5),
)
self._logger.info(
(
f"\nSample Predictions:\nPrediction: {words}\nGround truth: "
f"{[batch[idx]['sentence']['raw']]}"
)
)
return words, [batch[idx]["sentence"]["raw"]]
def _set_state(
self,
split: str,
loss: float,
perplexity: float,
mean_loss: float,
mean_perplexity: float,
predicted_words: List[str] = [],
gt_words: List[str] = [],
) -> None:
self._state = {
"split": split,
"loss": loss,
"perplexity": perplexity,
"mean_loss": mean_loss,
"mean_perplexity": mean_perplexity,
"global_step": self._global_step,
"predicted_words": predicted_words,
"gt_words": gt_words,
}
self.history += [self._state]
def _update_state(self, **kwargs) -> None:
self._state.update(kwargs)
self.history[-1].update(self._state)
def _get_last_state(self, split) -> Optional[Dict[str, Any]]:
for i in range(len(self.history) - 1, -1, -1):
state = self.history[i]
if state["split"] == split:
return state
return None
def _dump_history(self) -> None:
json.dump(
self.history,
open(
str(self._histroy_file.absolute()),
"w",
),
)
def fit(
self,
model: CaptionModel,
data_provider: BasicDataProvider,
epochs: int,
lr: float,
) -> None:
self._optimizer.set_lr(lr)
self._model = model
mean_loss = Mean()
mean_perplexity = Mean()
for epoch in range(1, epochs + 1):
batch = None
nest_batch = None
self._epoch = epoch
for i, (batch, nest_batch) in enumerate(
data_provider.iter_image_sentence_pair_batch("train")
):
self._time = time.time()
loss, log2_perplexity, _, grads = jax.jit(self._train_step)(
model, nest_batch
)
self._optimizer.step(model, grads)
tm = time.time() - self._time
# import pdb
# pdb.set_trace()
# for key in prev_params.keys():
# for key2 in ["weight", "bias"]:
# np.testing.assert_raises(
# AssertionError,
# np.testing.assert_array_equal,
# prev_params[key][key2],
# new_params[key][key2],
# )
mean_loss.update(loss)
mean_perplexity.update(log2_perplexity)
self._logger.info(
f"Epoch: {epoch}/{epochs} | Batch: {i:3d} | Time: {tm:1.2f} | Loss: "
f"{loss:3.2f} | Perplexity: {log2_perplexity:3.2f} | "
f"Mean Loss: {mean_loss.state():3.2f} | Mean Perplexity: "
f"{mean_perplexity.state():3.2f}"
)
self._set_state(
"train",
float(loss),
float(log2_perplexity),
float(mean_loss.state()),
float(mean_perplexity.state()),
)
words, gt_words = self.predict(model, batch, nest_batch)
self._update_state(
predicted_words=words,
gt_words=gt_words,
)
if epoch % self._evaluate_every == 0:
self._evaluate(
model,
data_provider.iter_image_sentence_pair_batch("val"),
)
self._dump_history()
|
'''
Author: Makson Vinicio
Algoritmos de busca
'''
class Search:
def binary_seach(self, lista, element):
left, right = 0, len(lista) - 1
while left <= right:
middle = (left + right) // 2
if lista[middle] == element:
return middle
elif lista[middle] > element:
right = middle - 1
else:
left = middle + 1
return False
def sequential(self, lista, element):
for i in range(len(lista)):
if lista[i] == element:
return i
return False
t = Search()
print(t.binary_seach([1, 2, 3, 4, 5, 6], 4))
print(t.sequential([1, 2, 3, 4, 5, 6], 4))
|
from __future__ import unicode_literals
import unittest
import datetime
import urlparse
import mock
from billy_client import BillyAPI
from billy_client import BillyError
from billy_client import NotFoundError
from billy_client.api import DuplicateExternalIDError
from billy_client.api import Company
from billy_client.api import Customer
from billy_client.api import Plan
from billy_client.api import Invoice
from billy_client.api import Subscription
class TestResource(unittest.TestCase):
def make_one(self, *args, **kwargs):
from billy_client.api import Resource
return Resource(*args, **kwargs)
def test_resource_to_unicode(self):
res = self.make_one(None, dict(key='value'))
self.assertEqual(unicode(res), "<Resource {'key': u'value'}>")
def test_resource_get_attr(self):
res = self.make_one(None, dict(key='value'))
self.assertEqual(res.key, 'value')
self.assertEqual(res.api, None)
self.assertEqual(res.json_data, dict(key='value'))
def test_resource_not_such_attr(self):
res = self.make_one(None, dict(key='value'))
with self.assertRaises(AttributeError):
print(res.no_such_thing)
class TestAPI(unittest.TestCase):
def make_one(self, *args, **kwargs):
return BillyAPI(*args, **kwargs)
@mock.patch('requests.post')
def test_billy_error(self, post_method):
mock_company_data = dict(guid='MOCK_COMPANY_GUID')
post_method.return_value = mock.Mock(
json=lambda: mock_company_data,
status_code=503,
content='Server error',
)
api = self.make_one(None, endpoint='http://localhost')
with self.assertRaises(BillyError):
api.create_company('MOCK_PROCESSOR_KEY')
@mock.patch('requests.post')
def test_create_company(self, post_method):
mock_company_data = dict(
guid='MOCK_COMPANY_GUID',
api_key='MOCK_API_KEY',
)
post_method.return_value = mock.Mock(
json=lambda: mock_company_data,
status_code=200,
)
api = self.make_one(None, endpoint='http://localhost')
company = api.create_company('MOCK_PROCESSOR_KEY')
self.assertEqual(company.guid, 'MOCK_COMPANY_GUID')
self.assertEqual(company.api, api)
self.assertEqual(company.api.api_key, 'MOCK_API_KEY')
post_method.assert_called_once_with(
'http://localhost/v1/companies',
data=dict(processor_key='MOCK_PROCESSOR_KEY'),
)
@mock.patch('requests.get')
def _test_get_record(self, get_method, method_name, path_name):
mock_record_data = dict(guid='MOCK_GUID')
mock_response = mock.Mock(
json=lambda: mock_record_data,
status_code=200,
)
get_method.return_value = mock_response
api = self.make_one('MOCK_API_KEY', endpoint='http://localhost')
method = getattr(api, method_name)
record = method('MOCK_GUID')
self.assertEqual(record.guid, 'MOCK_GUID')
self.assertEqual(record.api, api)
get_method.assert_called_once_with(
'http://localhost/v1/{}/MOCK_GUID'.format(path_name),
auth=('MOCK_API_KEY', '')
)
@mock.patch('requests.get')
def _test_get_record_not_found(self, get_method, method_name, path_name):
mock_record_data = dict(
guid='MOCK_GUID',
)
mock_response = mock.Mock(
json=lambda: mock_record_data,
status_code=404,
content='Not found',
)
get_method.return_value = mock_response
api = self.make_one('MOCK_API_KEY', endpoint='http://localhost')
method = getattr(api, method_name)
with self.assertRaises(NotFoundError):
method('MOCK_GUID')
get_method.assert_called_once_with(
'http://localhost/v1/{}/MOCK_GUID'.format(path_name),
auth=('MOCK_API_KEY', '')
)
def test_get_company(self):
self._test_get_record(
method_name='get_company',
path_name='companies',
)
def test_get_company_not_found(self):
self._test_get_record_not_found(
method_name='get_company',
path_name='companies',
)
def test_get_customer(self):
self._test_get_record(
method_name='get_customer',
path_name='customers',
)
def test_get_customer_not_found(self):
self._test_get_record_not_found(
method_name='get_customer',
path_name='customers',
)
def test_get_plan(self):
self._test_get_record(
method_name='get_plan',
path_name='plans',
)
def test_get_plan_not_found(self):
self._test_get_record_not_found(
method_name='get_plan',
path_name='plans',
)
def test_get_subscription(self):
self._test_get_record(
method_name='get_subscription',
path_name='subscriptions',
)
def test_get_subscription_not_found(self):
self._test_get_record_not_found(
method_name='get_subscription',
path_name='subscriptions',
)
def test_get_invoice(self):
self._test_get_record(
method_name='get_invoice',
path_name='invoices',
)
def test_get_invoice_not_found(self):
self._test_get_record_not_found(
method_name='get_invoice',
path_name='invoices',
)
def test_get_transaction(self):
self._test_get_record(
method_name='get_transaction',
path_name='transactions',
)
def test_get_transaction_not_found(self):
self._test_get_record_not_found(
method_name='get_transaction',
path_name='transactions',
)
@mock.patch('requests.post')
def test_create_customer(self, post_method):
api = self.make_one('MOCK_API_KEY', endpoint='http://localhost')
company = Company(api, dict(guid='MOCK_COMPANY_GUID'))
mock_customer_data = dict(guid='CUMOCK_CUSTOMER')
mock_response = mock.Mock(
json=lambda: mock_customer_data,
status_code=200,
)
post_method.return_value = mock_response
customer = company.create_customer(
processor_uri='MOCK_BALANCED_CUSTOMER_URI',
)
self.assertEqual(customer.guid, 'CUMOCK_CUSTOMER')
post_method.assert_called_once_with(
'http://localhost/v1/customers',
data=dict(processor_uri='MOCK_BALANCED_CUSTOMER_URI'),
auth=('MOCK_API_KEY', '')
)
@mock.patch('requests.post')
def test_create_plan(self, post_method):
api = self.make_one('MOCK_API_KEY', endpoint='http://localhost')
company = Company(api, dict(guid='MOCK_COMPANY_GUID'))
mock_plan_data = dict(guid='MOCK_PLAN_GUID')
mock_response = mock.Mock(
json=lambda: mock_plan_data,
status_code=200,
)
post_method.return_value = mock_response
plan = company.create_plan(
plan_type=Plan.TYPE_DEBIT,
frequency=Plan.FREQ_DAILY,
amount='5566',
interval=123,
)
self.assertEqual(plan.guid, 'MOCK_PLAN_GUID')
post_method.assert_called_once_with(
'http://localhost/v1/plans',
data=dict(
plan_type=Plan.TYPE_DEBIT,
frequency=Plan.FREQ_DAILY,
amount='5566',
interval=123,
),
auth=('MOCK_API_KEY', ''),
)
@mock.patch('requests.post')
def test_subscribe(self, post_method):
api = self.make_one('MOCK_API_KEY', endpoint='http://localhost')
customer = Customer(api, dict(guid='MOCK_CUSTOMER_GUID'))
plan = Plan(api, dict(guid='MOCK_PLAN_GUID'))
now = datetime.datetime.utcnow()
mock_subscription_data = dict(guid='MOCK_SUBSCRIPTION_GUID')
mock_response = mock.Mock(
json=lambda: mock_subscription_data,
status_code=200,
)
post_method.return_value = mock_response
subscription = plan.subscribe(
customer_guid=customer.guid,
funding_instrument_uri='MOCK_INSTRUMENT_URI',
amount='5566',
appears_on_statement_as='hello baby',
started_at=now,
)
self.assertEqual(subscription.guid, 'MOCK_SUBSCRIPTION_GUID')
post_method.assert_called_once_with(
'http://localhost/v1/subscriptions',
data=dict(
plan_guid='MOCK_PLAN_GUID',
customer_guid='MOCK_CUSTOMER_GUID',
funding_instrument_uri='MOCK_INSTRUMENT_URI',
appears_on_statement_as='hello baby',
amount='5566',
started_at=now.isoformat(),
),
auth=('MOCK_API_KEY', ''),
)
@mock.patch('requests.post')
def test_cancel_subscription(self, post_method):
api = self.make_one('MOCK_API_KEY', endpoint='http://localhost')
subscription = Subscription(api, dict(guid='MOCK_SUBSCRIPTION_GUID'))
mock_subscription_data = dict(
guid='MOCK_SUBSCRIPTION_GUID',
canceled=True,
)
mock_response = mock.Mock(
json=lambda: mock_subscription_data,
status_code=200,
)
post_method.return_value = mock_response
subscription = subscription.cancel()
self.assertEqual(subscription.guid, 'MOCK_SUBSCRIPTION_GUID')
self.assertEqual(subscription.canceled, True)
post_method.assert_called_once_with(
'http://localhost/v1/subscriptions/{}/cancel'
.format('MOCK_SUBSCRIPTION_GUID'),
auth=('MOCK_API_KEY', ''),
)
@mock.patch('requests.post')
def test_invoice(self, post_method):
api = self.make_one('MOCK_API_KEY', endpoint='http://localhost')
customer = Customer(api, dict(guid='MOCK_CUSTOMER_GUID'))
mock_invoice_data = dict(
guid='MOCK_INVOICE_GUID',
)
mock_response = mock.Mock(
json=lambda: mock_invoice_data,
status_code=200,
)
post_method.return_value = mock_response
invoice = customer.invoice(
amount='5566',
title='I want you bankrupt invoice',
funding_instrument_uri='MOCK_INSTRUMENT_URI',
appears_on_statement_as='hi there',
items=[
dict(name='foo', amount=1234),
dict(type='debit', name='bar', amount=56, quantity=78,
volume=90, unit='unit'),
],
adjustments=[
dict(amount=-100, reason='A Lannister always pays his debts!'),
dict(amount=20, reason='you owe me'),
],
)
self.assertEqual(invoice.guid, 'MOCK_INVOICE_GUID')
post_method.assert_called_once_with(
'http://localhost/v1/invoices',
data=dict(
customer_guid='MOCK_CUSTOMER_GUID',
funding_instrument_uri='MOCK_INSTRUMENT_URI',
title='I want you bankrupt invoice',
appears_on_statement_as='hi there',
amount='5566',
# item1
item_name0='foo',
item_amount0='1234',
# item2
item_type1='debit',
item_name1='bar',
item_amount1='56',
item_quantity1='78',
item_volume1='90',
item_unit1='unit',
# adjustment1
adjustment_amount0='-100',
adjustment_reason0='A Lannister always pays his debts!',
# adjustment2
adjustment_amount1='20',
adjustment_reason1='you owe me',
),
auth=('MOCK_API_KEY', ''),
)
@mock.patch('requests.post')
def test_refund_invoice(self, post_method):
api = self.make_one('MOCK_API_KEY', endpoint='http://localhost')
invoice = Invoice(api, dict(guid='MOCK_INVOICE_GUID'))
mock_invoice_data = dict(guid='MOCK_INVOICE_GUID')
mock_response = mock.Mock(
json=lambda: mock_invoice_data,
status_code=200,
)
post_method.return_value = mock_response
invoice = invoice.refund(amount=999)
self.assertEqual(invoice.guid, 'MOCK_INVOICE_GUID')
post_method.assert_called_once_with(
'http://localhost/v1/invoices/{}/refund'
.format('MOCK_INVOICE_GUID'),
data=dict(amount=999),
auth=('MOCK_API_KEY', ''),
)
@mock.patch('requests.post')
def test_invoice_with_duplicate_external_id(self, post_method):
api = self.make_one('MOCK_API_KEY', endpoint='http://localhost')
customer = Customer(api, dict(guid='MOCK_CUSTOMER_GUID'))
mock_invoice_data = dict(
guid='MOCK_INVOICE_GUID',
)
mock_response = mock.Mock(
json=lambda: mock_invoice_data,
status_code=409,
content='Duplicate',
)
post_method.return_value = mock_response
with self.assertRaises(DuplicateExternalIDError):
customer.invoice(
amount='5566',
funding_instrument_uri='MOCK_URI',
external_id='duplaite one',
)
@mock.patch('requests.get')
def _test_list_records(
self,
get_method,
method_name,
resource_url,
extra_query=None,
):
api = self.make_one('MOCK_API_KEY', endpoint='http://localhost')
result = [
dict(
offset=0,
limit=2,
items=[
dict(guid='MOCK_RECORD_GUID1'),
dict(guid='MOCK_RECORD_GUID2'),
],
),
dict(
offset=2,
limit=2,
items=[
dict(guid='MOCK_RECORD_GUID3'),
dict(guid='MOCK_RECORD_GUID4'),
],
),
dict(
offset=4,
limit=2,
items=[
dict(guid='MOCK_RECORD_GUID5'),
],
),
dict(
offset=6,
limit=2,
items=[],
),
]
get_method.return_value = mock.Mock(
json=lambda: result.pop(0),
status_code=200,
)
method = getattr(api, method_name)
extra_kwargs = extra_query if extra_query is not None else {}
records = method(**extra_kwargs)
self.assertEqual(map(lambda r: r.guid, records), [
'MOCK_RECORD_GUID1',
'MOCK_RECORD_GUID2',
'MOCK_RECORD_GUID3',
'MOCK_RECORD_GUID4',
'MOCK_RECORD_GUID5',
])
# ensure url
call_urls = [
args[0].split('?')[0]
for args, _ in get_method.call_args_list
]
self.assertEqual(call_urls, [resource_url] * 4)
# ensure query
qs_list = []
for args, _ in get_method.call_args_list:
o = urlparse.urlparse(args[0])
query = urlparse.parse_qs(o.query)
# flatten all values
for k, v in query.iteritems():
query[k] = v[0]
qs_list.append(query)
expected_querys = [
dict(),
dict(offset='2', limit='2'),
dict(offset='4', limit='2'),
dict(offset='6', limit='2'),
]
if extra_kwargs:
for query in expected_querys:
query.update(extra_kwargs)
self.assertEqual(qs_list, expected_querys)
# ensure auth
call_auths = [kwargs['auth'] for _, kwargs in get_method.call_args_list]
self.assertEqual([('MOCK_API_KEY', '')] * 4, call_auths)
@mock.patch('requests.get')
def _test_list_records_under_resource(
self,
get_method,
resource_cls,
method_name,
resource_url,
extra_query=None,
):
api = self.make_one('MOCK_API_KEY', endpoint='http://localhost')
resource = resource_cls(api, dict(guid='MOCK_RESOURCE_GUID'))
result = [
dict(
offset=0,
limit=2,
items=[
dict(guid='MOCK_RECORD_GUID1'),
dict(guid='MOCK_RECORD_GUID2'),
],
),
dict(
offset=2,
limit=2,
items=[
dict(guid='MOCK_RECORD_GUID3'),
dict(guid='MOCK_RECORD_GUID4'),
],
),
dict(
offset=4,
limit=2,
items=[
dict(guid='MOCK_RECORD_GUID5'),
],
),
dict(
offset=6,
limit=2,
items=[],
),
]
get_method.return_value = mock.Mock(
json=lambda: result.pop(0),
status_code=200,
)
method = getattr(resource, method_name)
extra_kwargs = extra_query if extra_query is not None else {}
records = method(**extra_kwargs)
self.assertEqual(map(lambda r: r.guid, records), [
'MOCK_RECORD_GUID1',
'MOCK_RECORD_GUID2',
'MOCK_RECORD_GUID3',
'MOCK_RECORD_GUID4',
'MOCK_RECORD_GUID5',
])
# ensure url
call_urls = [
args[0].split('?')[0]
for args, _ in get_method.call_args_list
]
expected_url = resource_url.format('MOCK_RESOURCE_GUID')
self.assertEqual(call_urls, [expected_url] * 4)
# ensure query
qs_list = []
for args, _ in get_method.call_args_list:
o = urlparse.urlparse(args[0])
query = urlparse.parse_qs(o.query)
# flatten all values
for k, v in query.iteritems():
query[k] = v[0]
qs_list.append(query)
expected_querys = [
dict(),
dict(offset='2', limit='2'),
dict(offset='4', limit='2'),
dict(offset='6', limit='2'),
]
if extra_kwargs:
for query in expected_querys:
query.update(extra_kwargs)
self.assertEqual(qs_list, expected_querys)
# ensure auth
call_auths = [kwargs['auth'] for _, kwargs in get_method.call_args_list]
self.assertEqual([('MOCK_API_KEY', '')] * 4, call_auths)
def test_list_customers(self):
self._test_list_records(
method_name='list_customers',
resource_url='http://localhost/v1/customers',
extra_query=dict(external_id='id'),
)
def test_list_plans(self):
self._test_list_records(
method_name='list_plans',
resource_url='http://localhost/v1/plans',
)
def test_list_subscriptions(self):
self._test_list_records(
method_name='list_subscriptions',
resource_url='http://localhost/v1/subscriptions',
)
def test_list_invoices(self):
self._test_list_records(
method_name='list_invoices',
resource_url='http://localhost/v1/invoices',
extra_query=dict(external_id='id'),
)
def test_list_transactions(self):
self._test_list_records(
method_name='list_transactions',
resource_url='http://localhost/v1/transactions',
)
def test_list_plan_customer(self):
self._test_list_records_under_resource(
resource_cls=Plan,
method_name='list_customers',
resource_url='http://localhost/v1/plans/{}/customers',
)
def test_list_plan_subscription(self):
self._test_list_records_under_resource(
resource_cls=Plan,
method_name='list_subscriptions',
resource_url='http://localhost/v1/plans/{}/subscriptions',
)
def test_list_plan_invoice(self):
self._test_list_records_under_resource(
resource_cls=Plan,
method_name='list_invoices',
resource_url='http://localhost/v1/plans/{}/invoices',
)
def test_list_plan_transaction(self):
self._test_list_records_under_resource(
resource_cls=Plan,
method_name='list_transactions',
resource_url='http://localhost/v1/plans/{}/transactions',
)
def test_list_customer_subscription(self):
self._test_list_records_under_resource(
resource_cls=Customer,
method_name='list_subscriptions',
resource_url='http://localhost/v1/customers/{}/subscriptions',
)
def test_list_customer_invoice(self):
self._test_list_records_under_resource(
resource_cls=Customer,
method_name='list_invoices',
resource_url='http://localhost/v1/customers/{}/invoices',
)
def test_list_customer_transaction(self):
self._test_list_records_under_resource(
resource_cls=Customer,
method_name='list_transactions',
resource_url='http://localhost/v1/customers/{}/transactions',
)
def test_list_subscription_invoice(self):
self._test_list_records_under_resource(
resource_cls=Subscription,
method_name='list_invoices',
resource_url='http://localhost/v1/subscriptions/{}/invoices',
)
def test_list_subscription_transaction(self):
self._test_list_records_under_resource(
resource_cls=Subscription,
method_name='list_transactions',
resource_url='http://localhost/v1/subscriptions/{}/transactions',
)
def test_list_invoice_transaction(self):
self._test_list_records_under_resource(
resource_cls=Invoice,
method_name='list_transactions',
resource_url='http://localhost/v1/invoices/{}/transactions',
)
|
#!/usr/bin/env python
import numpy as np
import rospy
from geometry_msgs.msg import PoseStamped
from styx_msgs.msg import Lane, Waypoint
from scipy.spatial import KDTree
from std_msgs.msg import Int32
import math
'''
This node will publish waypoints from the car's current position to some `x` distance ahead.
As mentioned in the doc, you should ideally first implement a version which does not care
about traffic lights or obstacles.
Once you have created dbw_node, you will update this node to use the status of traffic lights too.
Please note that our simulator also provides the exact location of traffic lights and their
current status in `/vehicle/traffic_lights` message. You can use this message to build this node
as well as to verify your TL classifier.
TODO (for Yousuf and Aaron): Stopline location for each traffic light.
'''
LOOKAHEAD_WPS = 50 # Number of waypoints we will publish. You can change this number
MAX_DECEL = .5
class WaypointUpdater(object):
def __init__(self):
rospy.init_node('waypoint_updater')
rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)
rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)
# Add a subscriber for /traffic_waypoint and /obstacle_waypoint below
rospy.Subscriber('/traffic_waypoint', Int32, self.traffic_cb)
self.final_waypoints_pub = rospy.Publisher('final_waypoints', Lane, queue_size=1)
# Member variables to store values from callback functions
self.pose = None
self.base_waypoints = None
self.waypoints_2d = None
self.waypoint_tree = None
self.stopline_wp_idx = -1
self.loop()
def loop(self):
# control publishing frequency
# waypoint follower has frequency around 30 Hz
rate = rospy.Rate(50)
while not rospy.is_shutdown():
if self.pose and self.base_waypoints and self.waypoint_tree:
self.publish_waypoints()
rate.sleep()
def get_closest_waypoint_idx(self):
# load current position and find closest waypoint by KDTree
x = self.pose.pose.position.x
y = self.pose.pose.position.y
closest_idx = self.waypoint_tree.query([x, y], 1)[1]
# Check if closest i ahead or behind vehilcle
closest_coord = self.waypoints_2d[closest_idx]
prev_coord = self.waypoints_2d[closest_idx-1]
# Equation for hyperplane through closest coords
cl_vect = np.array(closest_coord)
prev_vect = np.array(prev_coord)
pos_vect = np.array([x, y])
val = np.dot(cl_vect-prev_vect, pos_vect-cl_vect)
if val > 0:
closest_idx = (closest_idx + 1) % len(self.waypoints_2d)
return closest_idx
def publish_waypoints(self):
# extract waypoints starting from closest index with the number of
# points we want to look ahead.
closest_wp_idx = self.get_closest_waypoint_idx()
farthest_wp_idx = closest_wp_idx + LOOKAHEAD_WPS
waypoints = self.base_waypoints.waypoints[closest_wp_idx:farthest_wp_idx]
lane = Lane()
lane.header = self.base_waypoints.header
# if the next stop line is not in the waypoint range, we ignore it
if self.stopline_wp_idx == -1 or \
(self.stopline_wp_idx >= farthest_wp_idx):
lane.waypoints = waypoints
# otherwise, we want to decelerate and stop at the point
else:
lane.waypoints = self.decelerate_waypoints(waypoints, closest_wp_idx)
self.final_waypoints_pub.publish(lane)
def decelerate_waypoints(self, waypoints, closest_idx):
# two waypoints back from the stop line, so front of car stops at the line
stop_idx = max(self.stopline_wp_idx - closest_idx - 2, 0)
# create new waypoint points that stops at the stop line
new_wps = []
for i, wp in enumerate(waypoints):
p = Waypoint()
p.pose = wp.pose
dist = self.distance(waypoints, i, stop_idx)
# decrease velocity based on distance to the stop line
vel = math.sqrt(2 * MAX_DECEL * dist)
# stop the car when the velocity is small TODO: modify this
vel = vel if vel >= 1. else 0.
p.twist.twist.linear.x = min(vel, wp.twist.twist.linear.x)
new_wps.append(p)
return new_wps
def pose_cb(self, msg):
# about 50 Hz
self.pose = msg
def waypoints_cb(self, waypoints):
# load base waypoints
self.base_waypoints = waypoints
if not self.waypoints_2d:
# convert waypoints to (x,y) list
self.waypoints_2d = [
[
waypoint.pose.pose.position.x,
waypoint.pose.pose.position.y
] for waypoint in waypoints.waypoints
]
# build KDTree
self.waypoint_tree = KDTree(self.waypoints_2d)
def traffic_cb(self, msg):
self.stopline_wp_idx = msg.data
def obstacle_cb(self, msg):
# TODO: Callback for /obstacle_waypoint message. We will implement it later
pass
def get_waypoint_velocity(self, waypoint):
return waypoint.twist.twist.linear.x
def set_waypoint_velocity(self, waypoints, waypoint, velocity):
waypoints[waypoint].twist.twist.linear.x = velocity
def distance(self, waypoints, wp1, wp2):
dist = 0
dl = lambda a, b: math.sqrt((a.x-b.x)**2 + (a.y-b.y)**2 + (a.z-b.z)**2)
for i in range(wp1, wp2+1):
dist += dl(waypoints[wp1].pose.pose.position, waypoints[i].pose.pose.position)
wp1 = i
return dist
if __name__ == '__main__':
try:
WaypointUpdater()
except rospy.ROSInterruptException:
rospy.logerr('Could not start waypoint updater node.')
|
def binary_search(numbers, number_to_find, low, high):
if low > high:
return False
mid = int((low + high) / 2)
print(numbers[mid])
if numbers[mid] == number_to_find:
return True
elif numbers[mid] > number_to_find:
return binary_search(numbers, number_to_find, low, mid - 1)
else:
return binary_search(numbers, number_to_find, mid + 1, high)
if __name__ == '__main__':
numbers = [ 1,2,3,4,5,6,7,8,9,10,15,20,24,30,34,50]
print(len(numbers) - 1)
number_to_find = int(input('Digite un numero: '))
result = binary_search(numbers,number_to_find, 0, len(numbers) - 1)
if result is True:
print('El numero si esta en la lista')
else:
print('El numero no esta en la lista') |
from __future__ import print_function
import numpy as np
from sklearn.metrics import accuracy_score
import os
import sys
import argparse
file_path = os.path.dirname(os.path.realpath(__file__))
lib_path2 = os.path.abspath(os.path.join(file_path, '..', '..', 'common'))
sys.path.append(lib_path2)
import candle
additional_definitions = [
{'name':'train_features',
'action':'store',
'default':'data/task0_0_train_feature.csv;data/task1_0_train_feature.csv;data/task2_0_train_feature.csv',
'help':'training feature data filenames'},
{'name':'train_truths',
'action':'store',
'default':'data/task0_0_train_label.csv;data/task1_0_train_label.csv;data/task2_0_train_label.csv',
'help':'training truth data filenames'},
{'name':'valid_features',
'action':'store',
'default':'data/task0_0_test_feature.csv;data/task1_0_test_feature.csv;data/task2_0_test_feature.csv',
'help':'validation feature data filenames'},
{'name':'valid_truths',
'action':'store',
'default':'data/task0_0_test_label.csv;data/task1_0_test_label.csv;data/task2_0_test_label.csv',
'help':'validation truth data filenames'},
{'name':'output_files',
'action':'store',
'default':'result0_0.csv;result1_0.csv;result2_0.csv',
'help':'output filename'},
{'name':'shared_nnet_spec',
'nargs':'+',
'type': int,
'help':'network structure of shared layer'},
{'name':'ind_nnet_spec',
'action':'list-of-lists',
'help':'network structure of task-specific layer'},
{'name':'case',
'default':'CenterZ',
'choices':['Full', 'Center', 'CenterZ'],
'help':'case classes'},
{'name':'fig',
'type': candle.str2bool,
'default': False,
'help':'Generate Prediction Figure'},
{'name':'feature_names',
'nargs':'+',
'type': str},
{'name':'n_fold',
'action':'store',
'type':int},
{'name':'emb_l2',
'action':'store',
'type':float},
{'name':'w_l2',
'action':'store',
'type':float},
{'name':'wv_len',
'action':'store',
'type':int},
{'name':'filter_sets',
'nargs':'+',
'type': int},
{'name':'filter_sizes',
'nargs':'+',
'type': int},
{'name':'num_filters',
'nargs':'+',
'type': int}
]
required = [
'learning_rate', 'batch_size', 'epochs', 'dropout', \
'optimizer', 'wv_len', \
'filter_sizes', 'filter_sets', 'num_filters', 'emb_l2', 'w_l2']
class BenchmarkP3B3(candle.Benchmark):
def set_locals(self):
"""Functionality to set variables specific for the benchmark
- required: set of required parameters for the benchmark.
- additional_definitions: list of dictionaries describing the additional parameters for the
benchmark.
"""
if required is not None:
self.required = set(required)
if additional_definitions is not None:
self.additional_definitions = additional_definitions
|
import numpy as np
from ddm import BoundConstant, Drift, Fittable, Model, NoiseConstant
from src.models.ddms.pyddmTwoStage import TwoStagePyDDM
class TwoStageWithin(TwoStagePyDDM):
def __init__(self, dt=0.01, dx=0.01, T_dur=3, agentVars=None, label=None):
"""[summary]
Parameters
----------
Agent : [type]
[description]
agentVars : [type], optional
[description], by default None
"""
super().__init__(agentVars=agentVars)
self.dt = dt
self.dx = dx
self.T_dur = T_dur
if label is None:
self.label = "TwoStageWithin"
else:
self.label = label
def compute_stage_two_drift(self, trial):
"""
Compute stage 2 drift.
"""
eu0 = trial["p0"] * trial["m0"] ** self.agentVars.alpha
eu1 = trial["p1"] * trial["m1"] ** self.agentVars.alpha
drift = trial["g0"] * (eu0 - self.agentVars.theta * eu1) + trial["g1"] * (
self.agentVars.theta * eu0 - eu1
)
last_stage_bias = (
(1 - trial["presentation01"])
* self.agentVars.b_last
* ((trial["last_stage_favours"] * (-2)) + 1) # recodes 0, 1 to 1, -1
)
if trial["presentation"] == "attributes":
assert last_stage_bias == 0
return drift + last_stage_bias
def build_model(self):
"""Builds the pyDDM model for this agent.
Args:
dx (float, optional): Resolution of evidence space. Defaults to 0.01.
dt (float, optional): Resolution of time domain. Defaults to 0.01.
T_dur (int, optional): Maximum trial duration. Defaults to 5.
Returns:
pyDDM model instance
"""
model = Model(
name=self.label,
drift=TwoStageWithinDrift(
theta=Fittable(minval=0, maxval=1),
alpha=Fittable(minval=0, maxval=5),
v=Fittable(minval=0, maxval=30),
b_last=Fittable(minval=-1, maxval=1),
),
noise=NoiseConstant(noise=Fittable(minval=0.001, maxval=4)),
bound=BoundConstant(B=1),
dx=self.dx,
dt=self.dt,
T_dur=self.T_dur,
)
self.model = model
self.required_conditions = [
"p0",
"p1",
"m0",
"m1",
"g0",
"g1",
"presentation01",
"last_stage_favours",
]
return model
class TwoStageWithinDrift(Drift):
"""Drift class for the Two-Stage Within-alternative integration pyDDM."""
name = "Gaze-biased within-alternative integration Stage II drift"
required_parameters = ["alpha", "theta", "v", "b_last"]
required_conditions = [
"p0",
"p1",
"m0",
"m1",
"g0",
"g1",
"presentation01",
"last_stage_favours",
]
def get_drift(self, conditions, **kwargs):
eu0 = conditions["p0"] * conditions["m0"] ** self.alpha
eu1 = conditions["p1"] * conditions["m1"] ** self.alpha
drift = conditions["g0"] * (eu0 - self.theta * eu1) + conditions["g1"] * (
self.theta * eu0 - eu1
)
last_stage_bias = (
(1 - conditions["presentation01"])
* self.b_last
* ((conditions["last_stage_favours"] * (-2)) + 1) # recodes 0, 1 to 1, -1
)
return self.v * (drift + last_stage_bias)
|
"""
Gebauer Weibull Paraemeters
===========================
These are the tree-specific parameters of Gebauer et al. (2008) for the
4-parameter Weibull function for sap velocity distribution in the sapwood.
References
----------
Gebauer, T., Horna, V., and Leuschner, C.: Variability in radial sap flux
density patterns and sapwood area among seven co-occurring temperate
broad-leaved tree species, Tree Physiol., 28, 1821–1830, 2008.
"""
gp = {
'beech': {'name':'Fagus sylvatica', 'a': 2.69, 'b': 3.42, 'c': 1.00, 'd': 2.44},
'hornbeam': {'name':'Carpinus betulus', 'a': 1.37, 'b': 5.88, 'c': 2.43, 'd': 2.79},
'limeA': {'name':'Tilia sp. (A)', 'a': 1.62, 'b': 6.35, 'c': 2.71, 'd': 3.28},
'limeB': {'name':'Tilia sp. (B)', 'a': 1.11, 'b': 4.52, 'c': 1.67, 'd': 1.88},
'sycamoremaple': {'name':'Acer pseudoplatanus', 'a': 1.44, 'b': 8.98, 'c': 3.47, 'd': 3.42},
'maple': {'name':'Acer campestre', 'a': 1.74, 'b': 4.86, 'c': 1.94, 'd': 2.50},
'ash': {'name':'Fraxinus excelsior', 'a': 1.00, 'b': 1.44, 'c': 1.54, 'd': 0.42}
} |
from amino import List, Map
special_codes = Map({
b'\x80\xffX': 'c-@',
b'\x80kb': 'bs',
9: 'tab',
b'\x80kB': 's-tab',
10: 'c-j',
11: 'c-k',
12: 'fe',
13: 'cr',
27: 'esc',
32: 'space',
60: 'lt',
92: 'bslash',
124: 'bar',
b'\x0b': 'c-k',
b'\x80kD': 'del',
b'\x9B': 'csi',
b'\x80\xfdP': 'xcsi',
b'\x80ku': 'up',
b'\x80kd': 'down',
b'\x80kl': 'left',
b'\x80kr': 'right',
b'\x80\xfd': 's-up',
b'\x80\xfd': 's-down',
b'\x80#4': 's-left',
b'\x80%i': 's-right',
b'\x80\xfdT': 'c-left',
b'\x80\xfdU': 'c-right',
b'\x80k1': 'f1',
b'\x80k2': 'f2',
b'\x80k3': 'f3',
b'\x80k4': 'f4',
b'\x80k5': 'f5',
b'\x80k6': 'f6',
b'\x80k7': 'f7',
b'\x80k8': 'f8',
b'\x80k9': 'f9',
b'\x80k;': 'f10',
b'\x80F1': 'f11',
b'\x80F2': 'f12',
b'\x80\xfd\x06': 's-f1',
b'\x80\xfd\x07': 's-f2',
b'\x80\xfd\x08': 's-f3',
b'\x80\xfd\x09': 's-f4',
b'\x80\xfd\x0A': 's-f5',
b'\x80\xfd\x0B': 's-f6',
b'\x80\xfd\x0C': 's-f7',
b'\x80\xfd\x0D': 's-f8',
b'\x80\xfd\x0E': 's-f9',
b'\x80\xfd\x0F': 's-f10',
b'\x80\xfd\x10': 's-f11',
b'\x80\xfd\x11': 's-f12',
b'\x80%1': 'help',
b'\x80&8': 'undo',
b'\x80kI': 'insert',
b'\x80kh': 'home',
b'\x80@7': 'end',
b'\x80kP': 'pageup',
b'\x80kN': 'pagedown',
b'\x80K1': 'khome',
b'\x80K4': 'kend',
b'\x80K3': 'kpageup',
b'\x80K5': 'kpagedown',
b'\x80K6': 'kplus',
b'\x80K7': 'kminus',
b'\x80K9': 'kmultiply',
b'\x80K8': 'kdivide',
b'\x80KA': 'kenter',
b'\x80KB': 'kpoint',
b'\x80KC': 'k0',
b'\x80KD': 'k1',
b'\x80KE': 'k2',
b'\x80KF': 'k3',
b'\x80KG': 'k4',
b'\x80KH': 'k5',
b'\x80KI': 'k6',
b'\x80KJ': 'k7',
b'\x80KK': 'k8',
b'\x80KL': 'k9',
})
modifier_codes = List(
(2, 'shift'),
(4, 'control'),
(8, 'alt'),
(16, 'meta'),
(32, 'mouse_double'),
(64, 'mouse_triple'),
(96, 'mouse_quadruple'),
(128, 'command'),
)
__all__ = ('special_codes', 'modifier_codes',)
|
import os
from .Base import Base
from sccloud.tools import run_de_analysis
class DeAnalysis(Base):
"""
Perform DE analysis.
Usage:
sccloud de_analysis [options] <input_h5ad_file> <output_spreadsheet>
sccloud de_analysis -h
Arguments:
input_h5ad_file Single cell data with clustering calculated. DE results would be written back.
output_spreadsheet Output spreadsheet with DE results.
Options:
-p <threads> Use <threads> threads. [default: 1]
--labels <attr> <attr> used as cluster labels. [default: louvain_labels]
--result-key <key> Store DE results into AnnData varm with key = <key>. [default: de_res]
--auc Calculate area under ROC (AUROC) and area under Precision-Recall (AUPR).
--t Calculate Welch's t-test.
--fisher Calculate Fisher's exact test.
--mwu Calculate Mann-Whitney U test.
--temp-folder <temp_folder> Joblib temporary folder for memmapping numpy arrays.
--alpha <alpha> Control false discovery rate at <alpha>. [default: 0.05]
--ndigits <ndigits> Round non p-values and q-values to <ndigits> after decimal point in the excel. [default: 3]
--quiet Do not show detailed intermediate outputs.
-h, --help Print out help information.
Outputs:
input_h5ad_file DE results would be written back to the 'varm' field with name set by --result-key <key>.
output_spreadsheet An excel spreadsheet containing DE results. Each cluster has two tabs in the spreadsheet. One is for up-regulated genes and the other is for down-regulated genes.
Examples:
sccloud de_analysis -p 26 --labels louvain_labels --auc --t --fisher --mwu manton_bm.h5ad manton_bm_de.xlsx
"""
def execute(self):
run_de_analysis(
self.args["<input_h5ad_file>"],
self.args["<output_spreadsheet>"],
self.args["--labels"],
result_key=self.args["--result-key"],
n_jobs=int(self.args["-p"]),
auc=self.args["--auc"],
t=self.args["--t"],
fisher=self.args["--fisher"],
mwu=self.args["--mwu"],
temp_folder=self.args["--temp-folder"],
verbose=not self.args["--quiet"],
alpha=float(self.args["--alpha"]),
ndigits=int(self.args["--ndigits"]),
)
|
from django.shortcuts import render
from .models import Post
posts = [
{
'catname': 'Samsung',
'catcontact': 'MOBILE: 24 HOURS, 7 DAYS A WEEK'
},
{
'catname': 'Toshiba ',
'catcontact': 'call 1-800-GO-TOSHIBA (1-800-468-6744)'
},
{
'catname': 'Panasonic',
'catcontact': 'Be the first to know about new products, promotions, technology and more!'
},
]
def home(request):
context = {'title': 'category',
'posts': Post.objects.all()
}
return render(request, 'cat/index.html', context)
|
import sys
import time
import requests
def run():
# Source URL
url = 'https://corona.lmao.ninja/v2/'
# Set URL endpoint
if len(sys.argv) == 1:
url += 'all'
print('===== COVID19 GLOBAL DATA =====')
else:
country = sys.argv[1]
print(f'==== COVID19 IN {country.upper()} ====')
url += f'countries/{country}'
# Request data
data = requests.get(url).json()
# Process data
cases = data['cases']
deaths = data['deaths']
recovered = data['recovered']
deathr = round(deaths/cases*100, 2)
recovr = round(recovered/cases*100, 2)
tests = data['tests']
todayCases = data['todayCases']
todayDeaths = data['todayDeaths']
active = data['active']
critical = data['critical']
casesPerOneMillion = data['casesPerOneMillion']
deathsPerOneMillion = data['deathsPerOneMillion']
testsPerOneMillion = data['testsPerOneMillion']
if len(sys.argv) == 1:
affectedCountries = data['affectedCountries']
# Print processed data
print(f'Total Cases: {cases}')
print(f'Deaths: {deaths} ({deathr}%)')
print(f'Recovered: {recovered} ({recovr}%)')
print('-------------------------')
print(f'Today Cases: {todayCases}')
print(f'Today Deaths: {todayDeaths}')
print('-------------------------')
print(f'Active Cases: {active}')
print(f'Critical Cases: {critical}')
print('-------------------------')
print(f'Total Tests: {tests}')
print(f'Tests per Million: {testsPerOneMillion}')
print(f'Cases per Million: {casesPerOneMillion}')
print(f'Deaths per Million: {deathsPerOneMillion}')
print('-------------------------')
if len(sys.argv) == 1:
print(f'Affected Countries: {affectedCountries}')
print('=========================')
def test():
print('.')
# Start counting elapsed time
init_time = time.perf_counter()
# Run program
run()
# Stopt counting elapsed time
elapsed = round(time.perf_counter() - init_time, 2)
print(f' *** Elapsed time: {elapsed} s ***\n.')
if __name__ == "__main__":
test() |
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida_core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session
from sqlalchemy.orm import sessionmaker
# The next two serve as global variables, set in the `load_dbenv` call and should be properly reset upon forking.
ENGINE = None
SCOPED_SESSION_CLASS = None
def get_scoped_session():
"""Return a scoped session
According to SQLAlchemy docs, this returns always the same object within a thread, and a different object in a
different thread. Moreover, since we update the session class upon forking, different session objects will be used.
"""
global SCOPED_SESSION_CLASS
if SCOPED_SESSION_CLASS is None:
reset_session()
return SCOPED_SESSION_CLASS()
def recreate_after_fork(engine):
"""Callback called after a fork.
Not only disposes the engine, but also recreates a new scoped session to use independent sessions in the fork.
:param engine: the engine that will be used by the sessionmaker
"""
global ENGINE
global SCOPED_SESSION_CLASS
ENGINE.dispose()
SCOPED_SESSION_CLASS = scoped_session(sessionmaker(bind=ENGINE, expire_on_commit=True))
def reset_session(profile=None):
"""
Resets (global) engine and sessionmaker classes, to create a new one
(or creates a new one from scratch if not already available)
:param profile: the profile whose configuration to use to connect to the database
"""
from multiprocessing.util import register_after_fork
from aiida.manage.configuration import get_profile
from .utils import loads_json, dumps_json
global ENGINE
global SCOPED_SESSION_CLASS
if profile is None:
profile = get_profile()
separator = ':' if profile.database_port else ''
engine_url = 'postgresql://{user}:{password}@{hostname}{separator}{port}/{name}'.format(
separator=separator,
user=profile.database_username,
password=profile.database_password,
hostname=profile.database_hostname,
port=profile.database_port,
name=profile.database_name)
ENGINE = create_engine(engine_url, json_serializer=dumps_json, json_deserializer=loads_json, encoding='utf-8')
SCOPED_SESSION_CLASS = scoped_session(sessionmaker(bind=ENGINE, expire_on_commit=True))
register_after_fork(ENGINE, recreate_after_fork)
|
#!/usr/bin/env python3
# Copyright 2019 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Used to generate an external link format JSON file for the given tast data file.
In order to use provide the name of the data file that you want to upload as
well as the name of the test directory that the file is used for.
Usage:
./generate_external_file.py <data_file> <test_directory> [--upload]
data_file: The name of the data_file used to produce the external link file.
test_directory: The name of the test directory that |data_file| is used in.
For example if you are adding a data file for the test
"audio.Microphone" then you would pass "audio".
upload: Whether to upload |data_file| to Google Cloud Storage.
Example:
./generate_external_file.py test_data.mp3 audio
Will produce a file called 'test_data.mp3.external' in the external link format
in the current directory.
If the '--upload' option is provided then the given data file will be uploaded
to the following path in Google Cloud Storage:
//chromiumos-test-assets-public/tast/cros/<test_dir>/<data_file>.external
"""
import argparse
import hashlib
import json
import os
import subprocess
from datetime import datetime
_GCP_PREFIX = 'chromiumos-test-assets-public/tast/cros'
def _parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('data_file', help='name of the data file')
parser.add_argument(
'test_dir',
help='name of the associated test used to fill the url field')
parser.add_argument(
'--upload',
help='upload data file to Google Cloud Storage',
action='store_true')
return parser.parse_args()
def _get_sha256_digest(path):
sha256 = hashlib.sha256()
with open(path, 'rb') as infile:
while True:
buf = infile.read(1024)
if not buf:
break
sha256.update(buf)
return sha256.hexdigest()
def main():
args = _parse_args()
timestamp = datetime.now().strftime('%Y%m%d-%H%M%S')
url = 'gs://{prefix}/{test_dir}/{data_file}_{timestamp}'.format(
prefix=_GCP_PREFIX,
test_dir=args.test_dir,
data_file=args.data_file,
timestamp=timestamp)
if not os.path.exists(args.data_file):
print('No such file:', args.data_file)
return
size = os.path.getsize(args.data_file)
digest = _get_sha256_digest(args.data_file)
link = {'url': url, 'size': size, 'sha256sum': digest}
# Write out the the JSON file in the external link format.
external_file = args.data_file + '.external'
# Warn the user if the file already exists.
if os.path.exists(external_file):
ans = input(
'File {0} already exists. Overwrite it? Y/N '.format(external_file))
if ans.lower() not in ['y', 'yes']:
print('Exiting')
return
with open(external_file, 'w') as outfile:
json.dump(link, outfile, sort_keys=True, indent=2)
outfile.write('\n')
if args.upload:
try:
print('Uploading file...')
subprocess.check_call(['gsutil', 'cp', '-n', args.data_file, url])
except subprocess.CalledProcessError as e:
print('Failed to upload file')
if __name__ == '__main__':
main()
|
import pytest
from datetime import datetime, timedelta
import yaml
import numpy as np
import os
from copy import deepcopy
from wavy.insitumod import insitu_class as ic
from wavy.filtermod import apply_land_mask
sd = "2021-8-2 01"
ed = "2021-8-3 12"
#ico = ic(nID,sensor,sd,ed)
#test_dict = deepcopy(ico.vars())
#@pytest.fixture
#def test_data():
# return os.path.abspath(os.path.join(\
# os.path.dirname( __file__ ),'data'))
def test_landmask():
vardict = { 'latitude':[60.12,62.24, 64.08,65.08, 67.65,68.95],
'longitude':[-23.47,-21.54, -19.32,-17.8, -13.97,-10.99]}
d,m = apply_land_mask(vardict)
assert len(m[m==False]) == int(2)
def test_cleaners():
nID = 'D_Breisundet_wave'
sensor = 'wavescan'
ico = ic(nID,sensor,sd,ed,priorOp='square',cleaner='linearGAM',postOp='root',date_incr=1,filterData=True)
assert len(vars(ico).keys()) == 15
assert 'filter' in vars(ico).keys()
assert 'filterSpecs' in vars(ico).keys()
def test_smoothers():
nID = 'D_Breisundet_wave'
sensor = 'wavescan'
ico = ic(nID,sensor,sd,ed,smoother='blockMean',date_incr=1,filterData=True)
assert len(vars(ico).keys()) == 15
assert 'filter' in vars(ico).keys()
assert 'filterSpecs' in vars(ico).keys()
|
#! python2.7
from soundrts import clientmain
clientmain.main()
|
import sys
import os
from PyQt5.Qt import QMimeData, QUrl, QApplication, QClipboard
from PyQt5.QtCore import QTimer
app = QApplication(sys.argv)
def main(argv):
directory = os.getcwd()
listFiles = sys.argv[1:]
files = []
for str in listFiles:
files.append("file://" + directory + "/" + str)
#print(files)
clipboard = QApplication.clipboard()
mimeData = QMimeData()
mimeData.setText("x-special/nautilus-clipboard\ncopy\n" + "\n".join(files))
qUrls = []
for file in files:
qUrls.append(QUrl(file))
#clipboard.dataChanged.connect(clipboardChanged)
mimeData.setUrls(qUrls)
clipboard.setMimeData(mimeData, QClipboard.Clipboard)
timer = QTimer()
#timer.timeout.connect(clipboardChanged)
timer.timeout.connect(lambda: None)
timer.start(100)
sys.exit(app.exec_())
def clipboardChanged():
#print("CHNAGED")
#print (QApplication.clipboard().text())
sys.exit(0)
if __name__ == "__main__":
main(sys.argv[0:])
|
# Definition for binary tree with next pointer.
# class TreeLinkNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
# self.next = None
class Solution:
# @param root, a tree link node
# @return nothing
def connect(self, root):
if not root: return
if not root.left and not root.right: return
head = root
while head:
next = None
temp = TreeLinkNode(-1)
while head:
if head.left:
if not next:
next = head.left
temp.next = head.left
temp = temp.next
if head.right:
if not next:
next = head.right
temp.next = head.right
temp = temp.next
head = head.next
head = next
|
import copy
from django.shortcuts import render
import pickle, base64
from rest_framework import status
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from django_redis import get_redis_connection
from .serializers import CartSerializer, CartSKUSerializer
from meiduo_mall.libs import constants
from goods.models import SKU
class CartView(APIView):
"""购物车的增删改查"""
# 由于本项目前端已经做了判断,所以不存在认证信息为空的情况
# def perform_authentication(self, request):
# """
# 重写父类的用户验证方法,不在进入视图前就检查JWT
# """
# pass
def post(self, request):
serializer = CartSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
valid_data = serializer.validated_data
sku_id = valid_data.get('sku_id')
count = valid_data.get('count')
spec = valid_data.get('spec')
# 判定用户是否登陆且认证通过
try:
user = request.user
except:
user = None
response = Response(serializer.data, status=status.HTTP_201_CREATED)
if user and user.is_authenticated:
# 如果是登陆的正常用户则将购物车数据保存到redis
redis_con = get_redis_connection('cart')
# # 判断当前商品同一规格是否已经存在
# origin_count = redis_con.hget('cart_%d' % user.id, '%s_%s' % (sku_id, spec))
# if origin_count:
# count += int(origin_count)
# 写入redis,hincrby 已经实现相同的key累加
redis_con.hincrby('cart_%d' % user.id, '%s_%s' % (sku_id, spec), count)
return response
# 如果用户未登陆或者未通过认证 则存为cookie,但是用户购买的东西是敏感信息所以这里
# 使用pickle和base64模块对数据进行处理
# 先判断是否由已经保存的cart信息
cart = request.COOKIES.get('cart')
if cart:
cart = pickle.loads(base64.b64decode(cart.encode()))
else:
cart = {}
origin_count = cart.get('%s_%s' % (sku_id, spec))
if origin_count:
count += origin_count
cart['%s_%s' % (sku_id, spec)] = count
# 再将cart数据专程bytes
cart = base64.b64encode(pickle.dumps(cart)).decode()
# 创建响应并设置cookie
response.set_cookie('cart', cart, max_age=constants.CART_COOKIE_EXPIRES)
return response
def get(self, request):
"""获取购物车信息"""
# 判定用户是否登陆且认证通过
try:
user = request.user
except:
user = None
# 获取sku信息并组装
temp_id = 0
temp_sku = ''
sku_list = []
if user and user.is_authenticated:
redis_con = get_redis_connection('cart')
redis_cart = redis_con.hgetall('cart_%d' % user.id)
for item, num in redis_cart.items():
item = item.decode().split('_', 1)
sku_id = int(item[0])
spec = item[1]
count = int(num)
if temp_id != sku_id:
temp_id = sku_id
temp_sku = SKU.objects.get(id=temp_id)
else:
temp_sku = copy.deepcopy(temp_sku)
temp_sku.spec = spec
temp_sku.count = count
sku_list.append(temp_sku)
else:
# 未登陆用户从cookie中获取
cookie_cart = request.COOKIES.get('cart')
if cookie_cart:
# 将数据转换成字典
cookie_cart = pickle.loads(base64.b64decode(cookie_cart.encode()))
for item, num in cookie_cart.items():
item = item.split('_', 1)
sku_id = int(item[0])
spec = item[1]
count = int(num)
if temp_id != sku_id:
temp_id = sku_id
temp_sku = SKU.objects.get(id=temp_id)
else:
temp_sku = copy.deepcopy(temp_sku)
temp_sku.spec = spec
temp_sku.count = count
sku_list.append(temp_sku)
serializer = CartSKUSerializer(sku_list, many=True)
return Response(serializer.data)
def put(self, request):
"""修改购物车数据"""
serializer = CartSerializer(data=request.data.get('data'))
serializer.is_valid(raise_exception=True)
valid_data = serializer.validated_data
# 判定用户是否登陆且认证通过
try:
user = request.user
except:
user = None
response = Response(serializer.data)
if user and user.is_authenticated:
# 如果是登陆的正常用户则将购物车数据保存到redis
redis_con = get_redis_connection('cart')
sku_id = valid_data.get('sku_id')
count = valid_data.get('count')
spec = valid_data.get('spec')
# 写入redis,此处是修改商品的数量所以直接覆盖某个商品的值
redis_con.hset('cart_%d' % user.id, '%s_%s' % (sku_id, spec), count)
return response
# 不是登陆用户就检查cookie
cart = pickle.loads(base64.b64decode(request.COOKIES.get('cart').encode()))
if cart:
pass
else:
# 如果是未登陆用户,又没有携带cookie,那么就直接返回错误
return Response({'msg': '缺少cookie信息,请先添加购物车再做修改'}, status=status.HTTP_400_BAD_REQUEST)
sku_id = valid_data.get('sku_id')
count = valid_data.get('count')
spec = valid_data.get('spec')
# 更新缓存
cart['%s_%s' % (sku_id, spec)] = count
# 将cart数据专程bytes,直接废弃掉以前的cookie,重新生成
cart = base64.b64encode(pickle.dumps(cart)).decode()
# 创建响应并设置cookie
response.set_cookie('cart', cart, max_age=constants.CART_COOKIE_EXPIRES)
return response
def delete(self, request):
valid_data = request.data
sku_id = valid_data.get('id')
spec = valid_data.get('spec')
# 判定用户是否登陆且认证通过
try:
user = request.user
except:
user = None
if user and user.is_authenticated:
# 如果是登陆的正常用户则将购物车数据保存到redis
redis_con = get_redis_connection('cart')
# 写入redis,此处是修改商品的数量所以直接覆盖某个商品的值
redis_con.hdel('cart_%d' % user.id, '%s_%s' % (sku_id, spec))
return Response()
cart = request.COOKIES.get('cart')
if cart:
cart = pickle.loads(base64.b64decode(cart.encode()))
else:
# 如果是未登陆用户,又没有携带cookie,那么就直接返回错误
return Response({'msg': '缺少cookie信息,请先添加购物车再做修改'}, status=status.HTTP_400_BAD_REQUEST)
# 删除cart中的某一规格sku
cart.pop('%s_%s' % (sku_id, spec))
# 将cart数据专程bytes,直接废弃掉以前的cookie,重新生成
cart = base64.b64encode(pickle.dumps(cart)).decode()
# 创建响应并设置cookie
response = Response()
response.set_cookie('cart', cart, max_age=constants.CART_COOKIE_EXPIRES)
return response
class SelectedSkusView(APIView):
"""生成订单之前,redis暂时记录勾选的商品信息"""
permission_classes = [IsAuthenticated]
def post(self, request):
# 保存之前,都先清空该用户之前的数据。这里以列表的形式保存
skus = request.data.get('skus')
user = request.user
# 如果是登陆的正常用户则将购物车数据保存到redis
redis_con = get_redis_connection('cart')
# 先清空已有的预生成订单值
redis_con.ltrim('%d' % user.id, 1, 0)
# 将选中的sku写入到列表
for sku in skus:
redis_con.rpush('%d' % user.id, '%s_%s' % (sku['sku_id'], sku['spec']))
# redis_con.hdel('cart_%d' % user.id, '%s_%s' % (sku['sku_id'], sku['spec']))
return Response(skus, status=status.HTTP_201_CREATED)
# return Response({'msg': "lalal"}, status=status.HTTP_403_FORBIDDEN)
|
# -*- coding: utf-8 -*-
"""
Created on Sat Jul 25 08:59:36 2020
@author: Ham
A sample one-liner to do a listop on a list of command-line args
"""
python -c "import sys; print(sum(map(int, sys.argv[1:])))" 3 4 5 |
from typing import List
from xsdata.codegen.mixins import ContainerInterface
from xsdata.codegen.mixins import RelativeHandlerInterface
from xsdata.codegen.models import Attr
from xsdata.codegen.models import AttrType
from xsdata.codegen.models import Class
from xsdata.codegen.models import get_restriction_choice
from xsdata.codegen.models import get_slug
from xsdata.codegen.models import Restrictions
from xsdata.codegen.utils import ClassUtils
from xsdata.models.enums import DataType
from xsdata.models.enums import Tag
from xsdata.utils.collections import group_by
class AttributeCompoundChoiceHandler(RelativeHandlerInterface):
"""Group attributes that belong in the same choice and replace them by
compound fields."""
__slots__ = "compound_fields"
def __init__(self, container: ContainerInterface):
super().__init__(container)
self.compound_fields = container.config.output.compound_fields
def process(self, target: Class):
if self.compound_fields:
groups = group_by(target.attrs, get_restriction_choice)
for choice, attrs in groups.items():
if (
choice
and len(attrs) > 1
and any(
attr.is_list or get_restriction_choice(attr) for attr in attrs
)
):
self.group_fields(target, attrs)
for index in range(len(target.attrs)):
self.reset_sequential(target, index)
def group_fields(self, target: Class, attrs: List[Attr]):
"""Group attributes into a new compound field."""
pos = target.attrs.index(attrs[0])
choice = attrs[0].restrictions.choice
sum_occurs = choice and choice.startswith("effective_")
names = []
choices = []
min_occurs = []
max_occurs = []
for attr in attrs:
target.attrs.remove(attr)
names.append(attr.local_name)
min_occurs.append(attr.restrictions.min_occurs or 0)
max_occurs.append(attr.restrictions.max_occurs or 0)
choices.append(self.build_attr_choice(attr))
name = self.choose_name(target, names)
target.attrs.insert(
pos,
Attr(
name=name,
index=0,
types=[AttrType(qname=str(DataType.ANY_TYPE), native=True)],
tag=Tag.CHOICE,
restrictions=Restrictions(
min_occurs=sum(min_occurs) if sum_occurs else min(min_occurs),
max_occurs=sum(max_occurs) if sum_occurs else max(max_occurs),
),
choices=choices,
),
)
def choose_name(self, target: Class, names: List[str]) -> str:
reserved = set(map(get_slug, self.base_attrs(target)))
reserved.update(map(get_slug, target.attrs))
if len(names) > 3 or len(names) != len(set(names)):
name = "choice"
else:
name = "_Or_".join(names)
return ClassUtils.unique_name(name, reserved)
@classmethod
def build_attr_choice(cls, attr: Attr) -> Attr:
"""
Converts the given attr to a choice.
The most important part is the reset of certain restrictions
that don't make sense as choice metadata like occurrences.
"""
restrictions = attr.restrictions.clone()
restrictions.min_occurs = None
restrictions.max_occurs = None
restrictions.sequential = None
return Attr(
name=attr.local_name,
namespace=attr.namespace,
default=attr.default,
types=attr.types,
tag=attr.tag,
help=attr.help,
restrictions=restrictions,
)
@classmethod
def reset_sequential(cls, target: Class, index: int):
"""Reset the attribute at the given index if it has no siblings with
the sequential restriction."""
attr = target.attrs[index]
before = target.attrs[index - 1] if index - 1 >= 0 else None
after = target.attrs[index + 1] if index + 1 < len(target.attrs) else None
if not attr.is_list:
attr.restrictions.sequential = False
if (
not attr.restrictions.sequential
or (before and before.restrictions.sequential)
or (after and after.restrictions.sequential and after.is_list)
):
return
attr.restrictions.sequential = False
|
# Copyright (C) 2017 Verizon. All Rights Reserved.
#
# File: _envelopes.py
# Author: John Hickey, Phil Chandler
# Date: 2017-02-17
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import logging
import uuid
# Third Party
from lxml import etree
# Module
from dractor.exceptions import WSMANSOAPEnvelopeError
from dractor.types import CIM_Reference
from ._namespace import NS
LOGGER = logging.getLogger(__name__)
class IdentifyEnvelope(object):
"""
This is a little bit of an odd one. It is not derived from our WSMANSoapEnvelope. I don't know
if it is worth adding a more fundamental SOAP Envelope class since it would only be for this,
which is just a template.
From DSP0266:
Note the absence of any WS-Addressing namespace, WS-Management namespace, or other versionspecific
concepts. This message is compatible only with the basic SOAP specification, and the presence
of the wsmid:Identify block in the s:Body is the embodiment of the request operation.
"""
ENVELOPE_TEMPLATE = """<?xml version="1.0"?>
<s:Envelope xmlns:s="http://www.w3.org/2003/05/soap-envelope" xmlns:wsmid="http://schemas.dmtf.org/wbem/wsman/identity/1/wsmanidentity.xsd">
<s:Header></s:Header>
<s:Body>
<wsmid:Identify>
</wsmid:Identify>
</s:Body>
</s:Envelope>
"""
@property
def document(self):
""" Return xml document as string for consumption """
# Back and forth to make sure our Template is valid XML
root = etree.fromstring(self.ENVELOPE_TEMPLATE)
xml = etree.tostring(root, pretty_print=True, encoding='unicode')
return xml
class WSMANSOAPEnvelope(object):
"""
This is our basic message structure. It contains the necessary
Addressing and WSMAN namespaces that are fundamental to the basic
wsman calls.
I use XPath to update the required addressing tags rather than adding them
dynamically. I do this to make the basic required structure more clear, as
far as xml can be clear, in the template itself.
"""
ENVELOPE_TEMPLATE = """<?xml version="1.0"?>
<s:Envelope xmlns:s="http://www.w3.org/2003/05/soap-envelope"
xmlns:wsa="http://schemas.xmlsoap.org/ws/2004/08/addressing"
xmlns:wsman="http://schemas.dmtf.org/wbem/wsman/1/wsman.xsd">
<s:Header>
<wsa:Action s:mustUnderstand="true"></wsa:Action>
<wsa:To s:mustUnderstand="true"></wsa:To>
<wsman:ResourceURI s:mustUnderstand="true"></wsman:ResourceURI>
<wsa:MessageID s:mustUnderstand="true"></wsa:MessageID>
<wsa:ReplyTo>
<wsa:Address>http://schemas.xmlsoap.org/ws/2004/08/addressing/role/anonymous</wsa:Address>
</wsa:ReplyTo>
</s:Header>
<s:Body>
</s:Body>
</s:Envelope>
"""
def __init__(self, to_url, action_ns_prefix, action, resource_uri, additional_namespaces=None):
self._nsmap = copy.deepcopy(NS)
if additional_namespaces:
self._nsmap.update(additional_namespaces)
# NS shortcuts
self._action_ns_prefix = action_ns_prefix
self._resource_uri = resource_uri
# Use a WSMAN SOAP Template to save on the boiler plate
self._root = etree.fromstring(self.ENVELOPE_TEMPLATE)
# Update the To
self._set_text("/s:Envelope/s:Header/wsa:To", to_url)
# Set the action
action_uri = "{}/{}".format(self._nsmap[action_ns_prefix], action)
self._set_text("/s:Envelope/s:Header/wsa:Action", action_uri)
# Set the Resource URI
self._set_text("/s:Envelope/s:Header/wsman:ResourceURI", resource_uri)
def _set_message_id(self):
""" Set a UUID for each message """
message_id = self._get_one_xpath("/s:Envelope/s:Header/wsa:MessageID")
message_id.text = "uuid:{}".format(str(uuid.uuid4()))
@property
def document(self):
""" Return as string for consumption """
self._set_message_id() # Make sure to generate a fresh UUID
xml = etree.tostring(self._root, pretty_print=True, encoding='unicode')
return xml
def _get_one_xpath(self, path):
""" Make sure our path exists and returns one element """
# Xpath returns an array of matches
element = self._root.xpath(path, namespaces=self._nsmap)
if not element:
raise WSMANSOAPEnvelopeError("Xpath '{}' did not return element".format(path))
if len(element) != 1:
raise WSMANSOAPEnvelopeError("Xpath '{}' returned multiple elements".format(path))
return element.pop()
def _set_text(self, path, text):
""" Set the text of the single element returned by path """
element = self._get_one_xpath(path)
element.text = text
def _add_wsman_selectors(self, selectors):
""" Add the selectors """
header = self._get_one_xpath("/s:Envelope/s:Header")
selectorset = etree.SubElement(header, "{{{wsman}}}SelectorSet".format(**self._nsmap))
for key, value in selectors.items():
selector = etree.SubElement(selectorset, "{{{wsman}}}Selector".format(**self._nsmap))
selector.set("{{{wsman}}}Name".format(**self._nsmap), key)
selector.text = value
class GetEnvelope(WSMANSOAPEnvelope):
""" SOAP Envelop for Get request """
ACTION_NS_PREFIX = "wstransfer" # Not used
ACTION = "Get"
def __init__(self, to_uri, dcim_class, selectors):
""" Setup an Enumeration for resource, such as DCIM_NICView """
resource_uri = "{}/{}".format(NS['dcim'], dcim_class)
super(GetEnvelope, self).__init__(to_uri,
self.ACTION_NS_PREFIX,
self.ACTION,
resource_uri)
self._add_wsman_selectors(selectors)
class EnumerationEnvelopes(WSMANSOAPEnvelope):
""" This forms the basis of our two Enumeration calls, Enumerate and Pull """
ACTION_NS_PREFIX = "wsen"
ACTION = None
def _setup_body(self):
pass
def __init__(self, to_uri, dcim_class):
""" Setup an Enumeration for dcim_class, such as DCIM_NICView """
resource_uri = "{}/{}".format(NS['dcim'], dcim_class)
super(EnumerationEnvelopes, self).__init__(to_uri,
self.ACTION_NS_PREFIX,
self.ACTION,
resource_uri)
self._setup_body()
class EnumerateEnvelope(EnumerationEnvelopes):
ACTION = "Enumerate"
def _setup_body(self):
""" Add the Enumeration element to the body """
body = self._get_one_xpath("/s:Envelope/s:Body")
etree.SubElement(body, "{{{wsen}}}Enumerate".format(**self._nsmap))
class PullEnvelope(EnumerationEnvelopes):
ACTION = "Pull"
def __init__(self, to_uri, dcim_class, context, max_elements=50):
self._context = context
self._max_elements = int(max_elements)
super(PullEnvelope, self).__init__(to_uri, dcim_class)
def _setup_body(self):
body = self._get_one_xpath("/s:Envelope/s:Body")
pull = etree.SubElement(body, "{{{wsen}}}Pull".format(**self._nsmap))
context_xml = etree.SubElement(pull, "{{{wsen}}}EnumerationContext".format(**self._nsmap))
context_xml.text = self._context
if self._max_elements > 1:
etree.SubElement(pull, "{{{wsman}}}OptimizeEnumeration".format(**self._nsmap))
max_elements = etree.SubElement(pull, "{{{wsman}}}MaxElements".format(**self._nsmap))
max_elements.text = str(self._max_elements)
class InvokeEnvelope(WSMANSOAPEnvelope):
def __init__(self, to_uri, dcim_class, method, selectors, properties):
resource_uri = "{}/{}".format(NS['dcim'], dcim_class)
action = "{}/{}".format(resource_uri, method)
additional_namespaces = {'dcim_class': resource_uri}
super(InvokeEnvelope, self).__init__(to_uri, 'dcim_class', method, resource_uri,
additional_namespaces)
self._add_wsman_selectors(selectors)
self._add_wsman_properties(method, properties)
def _add_wsman_properties(self, method, properties):
body = self._get_one_xpath("/s:Envelope/s:Body")
element_name = "{{{}}}{}_INPUT".format(self._resource_uri, method)
input_element = etree.SubElement(body, element_name)
for key, value in properties:
prop_name = "{{{}}}{}".format(self._resource_uri, key)
prop_element = etree.SubElement(input_element, prop_name)
if isinstance(value, str):
prop_element.text = value
elif isinstance(value, CIM_Reference):
# Construct a cim_reference
address = etree.SubElement(prop_element,
"{{{wsa}}}Address".format(**self._nsmap))
address.text = "http://schemas.xmlsoap.org/ws/2004/08/addressing/role/anonymous"
ref_params = etree.SubElement(prop_element,
"{{{wsa}}}ReferenceParameters".format(**self._nsmap))
resource_uri = etree.SubElement(ref_params,
"{{{wsman}}}ResourceURI".format(**self._nsmap))
resource_uri.text = value.resource_uri
selector_set = etree.SubElement(ref_params,
"{{{wsman}}}SelectorSet".format(**self._nsmap))
for name, value in value.selector_set.items():
selector = etree.SubElement(selector_set,
"{{{wsman}}}Selector".format(**self._nsmap))
selector.set("Name", name)
selector.text = value
else:
message = ("Unkown value type for {}: {} ({})").format(key, type(value), value)
raise WSMANSOAPEnvelopeError(message)
|
#!/usr/bin/env python
"""
Copyright (c) 2006-2017 sqlmap developers (http://sqlmap.org/)
See the file 'LICENSE' for copying permission
"""
from lib.core.data import logger
from lib.core.dicts import DBMS_DICT
from lib.core.enums import DBMS
from lib.core.settings import IS_WIN
def checkDependencies():
missing_libraries = set()
for dbmsName, data in DBMS_DICT.items():
if data[1] is None:
continue
try:
if dbmsName in (DBMS.MSSQL, DBMS.SYBASE):
__import__("_mssql")
import pymssql
if not hasattr(pymssql, "__version__") or pymssql.__version__ < "1.0.2":
warnMsg = "'%s' third-party library must be " % data[1]
warnMsg += "version >= 1.0.2 to work properly. "
warnMsg += "Download from %s" % data[2]
logger.warn(warnMsg)
elif dbmsName == DBMS.MYSQL:
__import__("pymysql")
elif dbmsName == DBMS.PGSQL:
__import__("psycopg2")
elif dbmsName == DBMS.ORACLE:
__import__("cx_Oracle")
elif dbmsName == DBMS.SQLITE:
__import__("sqlite3")
elif dbmsName == DBMS.ACCESS:
__import__("pyodbc")
elif dbmsName == DBMS.FIREBIRD:
__import__("kinterbasdb")
elif dbmsName == DBMS.DB2:
__import__("ibm_db_dbi")
elif dbmsName == DBMS.HSQLDB:
__import__("jaydebeapi")
__import__("jpype")
elif dbmsName == DBMS.INFORMIX:
__import__("ibm_db_dbi")
except:
warnMsg = "sqlmap requires '%s' third-party library " % data[1]
warnMsg += "in order to directly connect to the DBMS "
warnMsg += "'%s'. Download from %s" % (dbmsName, data[2])
logger.warn(warnMsg)
missing_libraries.add(data[1])
continue
debugMsg = "'%s' third-party library is found" % data[1]
logger.debug(debugMsg)
try:
__import__("impacket")
debugMsg = "'python-impacket' third-party library is found"
logger.debug(debugMsg)
except ImportError:
warnMsg = "sqlmap requires 'python-impacket' third-party library for "
warnMsg += "out-of-band takeover feature. Download from "
warnMsg += "http://code.google.com/p/impacket/"
logger.warn(warnMsg)
missing_libraries.add('python-impacket')
try:
__import__("ntlm")
debugMsg = "'python-ntlm' third-party library is found"
logger.debug(debugMsg)
except ImportError:
warnMsg = "sqlmap requires 'python-ntlm' third-party library "
warnMsg += "if you plan to attack a web application behind NTLM "
warnMsg += "authentication. Download from http://code.google.com/p/python-ntlm/"
logger.warn(warnMsg)
missing_libraries.add('python-ntlm')
try:
__import__("websocket.ABNF")
debugMsg = "'python websocket-client' library is found"
logger.debug(debugMsg)
except ImportError:
warnMsg = "sqlmap requires 'websocket-client' third-party library "
warnMsg += "if you plan to attack a web application using WebSocket. "
warnMsg += "Download from https://pypi.python.org/pypi/websocket-client/"
logger.warn(warnMsg)
missing_libraries.add('websocket-client')
if IS_WIN:
try:
__import__("pyreadline")
debugMsg = "'python-pyreadline' third-party library is found"
logger.debug(debugMsg)
except ImportError:
warnMsg = "sqlmap requires 'pyreadline' third-party library to "
warnMsg += "be able to take advantage of the sqlmap TAB "
warnMsg += "completion and history support features in the SQL "
warnMsg += "shell and OS shell. Download from "
warnMsg += "http://ipython.scipy.org/moin/PyReadline/Intro"
logger.warn(warnMsg)
missing_libraries.add('python-pyreadline')
if len(missing_libraries) == 0:
infoMsg = "all dependencies are installed"
logger.info(infoMsg)
|
import tensorflow as tf
import numpy as np
from sklearn import cross_validation
from data_utils import load_CIFAR10
from extract import create_graph, iterate_mini_batches, batch_pool3_features
from datetime import datetime
import matplotlib.pyplot as plt
from tsne import tsne
import os
import sys
import input_data_sketches
#samples = np.load('pure_samples50k.npy').transpose(0,2,3,1)
cifar10_dir ='../../datasets/cifar-10-python/cifar-10-batches-py' # Change this line to direct to the sketches dataset to test for sketches
def load_pool3_data():
# Update these file names after you serialize pool_3 values
X_test_file = 'X_test_1.npy'
y_test_file = 'y_test_1.npy'
X_train_file = 'X_train_1.npy'
y_train_file = 'y_train_1.npy'
return np.load(X_train_file), np.load(y_train_file), np.load(X_test_file), np.load(y_test_file)
def serialize_cifar_pool3(X,filename):
print 'About to generate file: %s' % filename
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.1)
sess = tf.InteractiveSession(config=tf.ConfigProto(gpu_options=gpu_options))
X_pool3 = batch_pool3_features(sess,X)
np.save(filename,X_pool3)
def serialize_data():
X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir) # Change this line to take the sketches dataset as input using input_data_sketches.read_data_sets() for testing with sketches
serialize_cifar_pool3(X_train, 'X_train_1')
serialize_cifar_pool3(X_test, 'X_test_1')
np.save('y_train_1',y_train)
np.save('y_test_1',y_test)
graph=create_graph() # Comment this line while calculating the inception scores
serialize_data() # Comment this line while calculating the inception scores
X_sample = np.load('samples50k.npy').transpose(0,2,3,1).astype("float")
X_sample=X_sample*128+127.5
serialize_cifar_pool3(X_sample,'X_sample_1') # Comment this line while calculating the inception scores
X_sample_pool3 = np.load('X_sample_1.npy')
print(X_sample_pool3)
#sys.exit()
classes = np.array(['plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']) # Change this line to test for sketches
X_train_orig, y_train_orig, X_test_orig, y_test_orig = load_CIFAR10(cifar10_dir) # Change this line to take the sketches dataset as input using input_data_sketches.read_data_sets() for testing with sketches
X_train_pool3, y_train_pool3, X_test_pool3, y_test_pool3 = load_pool3_data()
X_train, X_validation, Y_train, y_validation = cross_validation.train_test_split(X_train_pool3, y_train_pool3, test_size=0.20, random_state=42)
print 'Training data shape: ', X_train_pool3.shape
print 'Training labels shape: ', y_train_pool3.shape
print 'Test data shape: ', X_test_pool3.shape
print 'Test labels shape: ', y_test_pool3.shape
print 'Sample data shape: ', X_sample_pool3.shape
#
# Tensorflow stuff
# #
FLAGS = tf.app.flags.FLAGS
BOTTLENECK_TENSOR_NAME = 'pool_3/_reshape'
BOTTLENECK_TENSOR_SIZE = 2048
tf.app.flags.DEFINE_integer('how_many_training_steps', 100,
"""How many training steps to run before ending.""")
tf.app.flags.DEFINE_float('learning_rate', 0.005,
"""How large a learning rate to use when training.""")
tf.app.flags.DEFINE_string('final_tensor_name', 'final_result',
"""The name of the output classification layer in"""
""" the retrained graph.""")
tf.app.flags.DEFINE_integer('eval_step_interval', 100,
"""How often to evaluate the training results.""")
# https://github.com/tensorflow/tensorflow/blob/master/tensorflow/examples/image_retraining/retrain.py
def ensure_name_has_port(tensor_name):
"""Makes sure that there's a port number at the end of the tensor name.
Args:
tensor_name: A string representing the name of a tensor in a graph.
Returns:
The input string with a :0 appended if no port was specified.
"""
if ':' not in tensor_name:
name_with_port = tensor_name + ':0'
else:
name_with_port = tensor_name
return name_with_port
# https://github.com/tensorflow/tensorflow/blob/master/tensorflow/examples/image_retraining/retrain.py
def add_final_training_ops(graph, class_count, final_tensor_name,
ground_truth_tensor_name):
"""Adds a new softmax and fully-connected layer for training.
We need to retrain the top layer to identify our new classes, so this function
adds the right operations to the graph, along with some variables to hold the
weights, and then sets up all the gradients for the backward pass.
The set up for the softmax and fully-connected layers is based on:
https://tensorflow.org/versions/master/tutorials/mnist/beginners/index.html
Args:
graph: Container for the existing model's Graph.
class_count: Integer of how many categories of things we're trying to
recognize.
final_tensor_name: Name string for the new final node that produces results.
ground_truth_tensor_name: Name string of the node we feed ground truth data
into.
Returns:
Nothing.
"""
bottleneck_tensor = graph.get_tensor_by_name(ensure_name_has_port(
BOTTLENECK_TENSOR_NAME))
layer_weights = tf.Variable(
tf.truncated_normal([BOTTLENECK_TENSOR_SIZE, class_count], stddev=0.001),
name='final_weights')
layer_biases = tf.Variable(tf.zeros([class_count]), name='final_biases')
logits = tf.matmul(bottleneck_tensor, layer_weights,
name='final_matmul') + layer_biases
tf.nn.softmax(logits, name=final_tensor_name)
ground_truth_placeholder = tf.placeholder(tf.float32,
[None, class_count],
name=ground_truth_tensor_name)
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(
logits, ground_truth_placeholder)
cross_entropy_mean = tf.reduce_mean(cross_entropy)
train_step = tf.train.GradientDescentOptimizer(FLAGS.learning_rate).minimize(
cross_entropy_mean)
return train_step, cross_entropy_mean
# https://github.com/tensorflow/tensorflow/blob/master/tensorflow/examples/image_retraining/retrain.py
def add_evaluation_step(graph, final_tensor_name, ground_truth_tensor_name):
"""Inserts the operations we need to evaluate the accuracy of our results.
Args:
graph: Container for the existing model's Graph.
final_tensor_name: Name string for the new final node that produces results.
ground_truth_tensor_name: Name string for the node we feed ground truth data
into.
Returns:
Nothing.
"""
result_tensor = graph.get_tensor_by_name(ensure_name_has_port(
final_tensor_name))
ground_truth_tensor = graph.get_tensor_by_name(ensure_name_has_port(
ground_truth_tensor_name))
correct_prediction = tf.equal(
tf.argmax(result_tensor, 1), tf.argmax(ground_truth_tensor, 1))
evaluation_step = tf.reduce_mean(tf.cast(correct_prediction, 'float'))
return evaluation_step
def encode_one_hot(nclasses,y):
return np.eye(nclasses)[y]
def do_train(sess,X_input, Y_input, X_validation, Y_validation, X_sample_pool3):
ground_truth_tensor_name = 'ground_truth'
mini_batch_size = 1
n_train = X_input.shape[0]
graph = create_graph()
train_step, cross_entropy = add_final_training_ops(
graph, len(classes), FLAGS.final_tensor_name,
ground_truth_tensor_name)
t_vars = tf.trainable_variables()
final_vars = [var for var in t_vars if 'final_' in var.name]
saver = tf.train.Saver(final_vars, max_to_keep=10)
init = tf.initialize_all_variables()
sess.run(init)
evaluation_step = add_evaluation_step(graph, FLAGS.final_tensor_name, ground_truth_tensor_name)
# Get some layers we'll need to access during training.
bottleneck_tensor = graph.get_tensor_by_name(ensure_name_has_port(BOTTLENECK_TENSOR_NAME))
ground_truth_tensor = graph.get_tensor_by_name(ensure_name_has_port(ground_truth_tensor_name))
saver.restore(sess,tf.train.latest_checkpoint(os.getcwd()+"/train/test2/"))
result_tensor = graph.get_tensor_by_name(ensure_name_has_port(FLAGS.final_tensor_name))
''' # Uncomment this line for calculating the inception score
splits=10
preds=[]
print(X_sample_pool3)
for Xj, Yj in iterate_mini_batches(X_sample_pool3,np.zeros([X_sample_pool3.shape[0],10]),mini_batch_size):
pred = sess.run(result_tensor,feed_dict={bottleneck_tensor: Xj})
preds.append(pred)
preds = np.concatenate(preds, 0)
argmax = preds.argmax(axis=1)
scores = []
# Calculating the inception score
for i in range(splits):
part = preds[argmax==i]
logp= np.log(part)
self = np.sum(part*logp,axis=1)
cross = np.mean(np.dot(part,np.transpose(logp)),axis=1)
diff = self - cross
kl = np.mean(self - cross)
kl1 = []
for j in range(splits):
diffj = diff[(j * diff.shape[0] // splits):((j+ 1) * diff.shape[0] //splits)]
kl1.append(np.exp(diffj.mean()))
print("category: %s scores_mean = %.2f, scores_std = %.2f" % (classes[i], np.mean(kl1),np.std(kl1)))
scores.append(np.exp(kl))
print("scores_mean = %.2f, scores_std = %.2f" % (np.mean(scores),
np.std(scores)))
''' # Uncomment this line for calculating the inception score
# The block commented out below has to be uncommented for transfer learning and the block above has to be commented
# ''' # Comment this line when doing transfer learning
i=0
epocs = 1
for epoch in range(epocs):
shuffledRange = np.random.permutation(n_train)
y_one_hot_train = encode_one_hot(len(classes), Y_input)
y_one_hot_validation = encode_one_hot(len(classes), Y_validation)
shuffledX = X_input[shuffledRange,:]
shuffledY = y_one_hot_train[shuffledRange]
for Xi, Yi in iterate_mini_batches(shuffledX, shuffledY, mini_batch_size):
sess.run(train_step,
feed_dict={bottleneck_tensor: Xi,
ground_truth_tensor: Yi})
# Every so often, print out how well the graph is training.
is_last_step = (i + 1 == FLAGS.how_many_training_steps)
if (i % FLAGS.eval_step_interval) == 0 or is_last_step:
train_accuracy, cross_entropy_value = sess.run(
[evaluation_step, cross_entropy],
feed_dict={bottleneck_tensor: Xi,
ground_truth_tensor: Yi})
if (i % 1000)==0:
saver.save(sess,os.getcwd()+"/train/test2/", global_step=i)
i+=1
validation_accuracy=0
for Xj, Yj in iterate_mini_batches(X_validation, y_one_hot_validation, mini_batch_size):
validation_accuracy = validation_accuracy + sess.run(evaluation_step,feed_dict={bottleneck_tensor:Xj,ground_truth_tensor:Yj})
validation_accuracy = validation_accuracy/X_validation.shape[0]
print('%s: Step %d: Train accuracy = %.1f%%, Cross entropy = %f, Validation accuracy = %.1f%%' %
(datetime.now(), i, train_accuracy * 100, cross_entropy_value, validation_accuracy * 100))
for Xi, Yi in iterate_mini_batches(X_test_pool3,encode_one_hot(len(classes),y_test_pool3), mini_batch_size):
test_accuracy = sess.run(evaluation_step, feed_dict={bottleneck_tensor:Xi,ground_truth_tensor:Yi})
print('Final test accuracy = %.1f%%' % (test_accuracy * 100))
# ''' # Comment this line when doing transfer learning
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.1)
sess = tf.InteractiveSession(config=tf.ConfigProto(gpu_options=gpu_options))
do_train(sess,X_train,Y_train,X_validation,y_validation,X_sample_pool3)
|
import cv2
import imutils
from PixelRatio import get_pixel_per_metric
from imutils import contours
ppm = get_pixel_per_metric("capture.png", 100)
cord = []
frame = cv2.imread("capture.png")
# Our operations on the frame come here
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (7, 7), 0)
# img = cv2.medianBlur(gray, 5)
# ret, thresh = cv2.threshold(gray, 170, 255, cv2.THRESH_BINARY_INV)
img = cv2.Canny(gray, 0, 255)
img = cv2.dilate(img, None, iterations=1)
img = cv2.erode(img, None, iterations=1)
# print thresh
# find the contours in the mask, then sort them from left to right
cnts = cv2.findContours(img.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if imutils.is_cv2() else cnts[1]
if len(cnts) > 0:
cnts = contours.sort_contours(cnts)[0]
# loop over the contours
for (i, c) in enumerate(cnts):
# draw the bright spot on the image
if cv2.contourArea(c) < 100:
continue
elif cv2.contourArea(c) > 100000:
continue
else:
((cX, cY), radius) = cv2.minEnclosingCircle(c)
cord.append([cX / ppm, cY / ppm])
# print(str(cX) + " " + str(cY))
# print(len(cnts))
# cv2.imshow("frame", frame)
# cv2.imshow("gray", gray)
ht, wd, c = frame.shape
print(cord, ht, wd)
cv2.imshow("edge", img)
while True:
if cv2.waitKey(0) & 0xFF == ord('q'):
break
# cv2.imshow("thresh", thresh)
# Check to Terminate
cv2.destroyAllWindows()
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from typing import Any, TYPE_CHECKING
import uamqp
from ._base_handler import _parse_conn_str, ServiceBusSharedKeyCredential
from ._servicebus_sender import ServiceBusSender
from ._servicebus_receiver import ServiceBusReceiver
from ._common._configuration import Configuration
from ._common.utils import create_authentication
if TYPE_CHECKING:
from azure.core.credentials import TokenCredential
class ServiceBusClient(object):
"""The ServiceBusClient class defines a high level interface for
getting ServiceBusSender and ServiceBusReceiver.
:ivar fully_qualified_namespace: The fully qualified host name for the Service Bus namespace.
The namespace format is: `<yournamespace>.servicebus.windows.net`.
:vartype fully_qualified_namespace: str
:param str fully_qualified_namespace: The fully qualified host name for the Service Bus namespace.
The namespace format is: `<yournamespace>.servicebus.windows.net`.
:param ~azure.core.credentials.TokenCredential credential: The credential object used for authentication which
implements a particular interface for getting tokens. It accepts
:class:`ServiceBusSharedKeyCredential<azure.servicebus.ServiceBusSharedKeyCredential>`, or credential objects
generated by the azure-identity library and objects that implement the `get_token(self, *scopes)` method.
:keyword str entity_name: Optional entity name, this can be the name of Queue or Topic.
It must be specified if the credential is for specific Queue or Topic.
:keyword bool logging_enable: Whether to output network trace logs to the logger. Default is `False`.
:keyword transport_type: The type of transport protocol that will be used for communicating with
the Service Bus service. Default is `TransportType.Amqp`.
:paramtype transport_type: ~azure.servicebus.TransportType
:keyword dict http_proxy: HTTP proxy settings. This must be a dictionary with the following
keys: `'proxy_hostname'` (str value) and `'proxy_port'` (int value).
Additionally the following keys may also be present: `'username', 'password'`.
.. admonition:: Example:
.. literalinclude:: ../samples/sync_samples/sample_code_servicebus.py
:start-after: [START create_sb_client_sync]
:end-before: [END create_sb_client_sync]
:language: python
:dedent: 4
:caption: Create a new instance of the ServiceBusClient.
"""
def __init__(
self,
fully_qualified_namespace,
credential,
**kwargs
):
# type: (str, TokenCredential, Any) -> None
self.fully_qualified_namespace = fully_qualified_namespace
self._credential = credential
self._config = Configuration(**kwargs)
self._connection = None
self._entity_name = kwargs.get("entity_name")
self._auth_uri = "sb://{}".format(self.fully_qualified_namespace)
if self._entity_name:
self._auth_uri = "{}/{}".format(self._auth_uri, self._entity_name)
# Internal flag for switching whether to apply connection sharing, pending fix in uamqp library
self._connection_sharing = False
def __enter__(self):
if self._connection_sharing:
self._create_uamqp_connection()
return self
def __exit__(self, *args):
self.close()
def _create_uamqp_connection(self):
auth = create_authentication(self)
self._connection = uamqp.Connection(
hostname=self.fully_qualified_namespace,
sasl=auth,
debug=self._config.logging_enable
)
def close(self):
# type: () -> None
"""
Close down the ServiceBus client and the underlying connection.
:return: None
"""
if self._connection_sharing and self._connection:
self._connection.destroy()
@classmethod
def from_connection_string(
cls,
conn_str,
**kwargs
):
# type: (str, Any) -> ServiceBusClient
"""
Create a ServiceBusClient from a connection string.
:param str conn_str: The connection string of a Service Bus.
:keyword str entity_name: Optional entity name, this can be the name of Queue or Topic.
It must be specified if the credential is for specific Queue or Topic.
:keyword bool logging_enable: Whether to output network trace logs to the logger. Default is `False`.
:keyword transport_type: The type of transport protocol that will be used for communicating with
the Service Bus service. Default is `TransportType.Amqp`.
:paramtype transport_type: ~azure.servicebus.TransportType
:keyword dict http_proxy: HTTP proxy settings. This must be a dictionary with the following
keys: `'proxy_hostname'` (str value) and `'proxy_port'` (int value).
Additionally the following keys may also be present: `'username', 'password'`.
:rtype: ~azure.servicebus.ServiceBusClient
.. admonition:: Example:
.. literalinclude:: ../samples/sync_samples/sample_code_servicebus.py
:start-after: [START create_sb_client_from_conn_str_sync]
:end-before: [END create_sb_client_from_conn_str_sync]
:language: python
:dedent: 4
:caption: Create a new instance of the ServiceBusClient from connection string.
"""
host, policy, key, entity_in_conn_str = _parse_conn_str(conn_str)
return cls(
fully_qualified_namespace=host,
entity_name=entity_in_conn_str or kwargs.pop("entity_name", None),
credential=ServiceBusSharedKeyCredential(policy, key),
**kwargs
)
def get_queue_sender(self, queue_name, **kwargs):
# type: (str, Any) -> ServiceBusSender
"""Get ServiceBusSender for the specific queue.
:param str queue_name: The path of specific Service Bus Queue the client connects to.
:keyword int retry_total: The total number of attempts to redo a failed operation when an error occurs.
Default value is 3.
:rtype: ~azure.servicebus.ServiceBusSender
:raises: :class:`ServiceBusConnectionError`
:class:`ServiceBusAuthorizationError`
.. admonition:: Example:
.. literalinclude:: ../samples/sync_samples/sample_code_servicebus.py
:start-after: [START create_servicebus_sender_from_sb_client_sync]
:end-before: [END create_servicebus_sender_from_sb_client_sync]
:language: python
:dedent: 4
:caption: Create a new instance of the ServiceBusSender from ServiceBusClient.
"""
# pylint: disable=protected-access
sender = ServiceBusSender(
fully_qualified_namespace=self.fully_qualified_namespace,
queue_name=queue_name,
credential=self._credential,
logging_enable=self._config.logging_enable,
transport_type=self._config.transport_type,
http_proxy=self._config.http_proxy,
connection=self._connection,
**kwargs
)
return sender
def get_queue_receiver(self, queue_name, **kwargs):
# type: (str, Any) -> ServiceBusReceiver
"""Get ServiceBusReceiver for the specific queue.
:param str queue_name: The path of specific Service Bus Queue the client connects to.
:keyword mode: The mode with which messages will be retrieved from the entity. The two options
are PeekLock and ReceiveAndDelete. Messages received with PeekLock must be settled within a given
lock period before they will be removed from the queue. Messages received with ReceiveAndDelete
will be immediately removed from the queue, and cannot be subsequently rejected or re-received if
the client fails to process the message. The default mode is PeekLock.
:paramtype mode: ~azure.servicebus.ReceiveSettleMode
:keyword session_id: A specific session from which to receive. This must be specified for a
sessionful entity, otherwise it must be None. In order to receive messages from the next available
session, set this to NEXT_AVAILABLE.
:paramtype session_id: str or ~azure.servicebus.NEXT_AVAILABLE
:keyword int prefetch: The maximum number of messages to cache with each request to the service.
The default value is 0, meaning messages will be received from the service and processed
one at a time. Increasing this value will improve message throughput performance but increase
the change that messages will expire while they are cached if they're not processed fast enough.
:keyword float idle_timeout: The timeout in seconds between received messages after which the receiver will
automatically shutdown. The default value is 0, meaning no timeout.
:keyword int retry_total: The total number of attempts to redo a failed operation when an error occurs.
Default value is 3.
:rtype: ~azure.servicebus.ServiceBusReceiver
:raises: :class:`ServiceBusConnectionError`
:class:`ServiceBusAuthorizationError`
.. admonition:: Example:
.. literalinclude:: ../samples/sync_samples/sample_code_servicebus.py
:start-after: [START create_servicebus_receiver_from_sb_client_sync]
:end-before: [END create_servicebus_receiver_from_sb_client_sync]
:language: python
:dedent: 4
:caption: Create a new instance of the ServiceBusReceiver from ServiceBusClient.
"""
# pylint: disable=protected-access
receiver = ServiceBusReceiver(
fully_qualified_namespace=self.fully_qualified_namespace,
queue_name=queue_name,
credential=self._credential,
logging_enable=self._config.logging_enable,
transport_type=self._config.transport_type,
http_proxy=self._config.http_proxy,
connection=self._connection,
**kwargs
)
return receiver
def get_topic_sender(self, topic_name, **kwargs):
# type: (str, Any) -> ServiceBusSender
"""Get ServiceBusSender for the specific topic.
:param str topic_name: The path of specific Service Bus Topic the client connects to.
:keyword int retry_total: The total number of attempts to redo a failed operation when an error occurs.
Default value is 3.
:keyword float retry_backoff_factor: Delta back-off internal in the unit of second between retries.
Default value is 0.8.
:keyword float retry_backoff_max: Maximum back-off interval in the unit of second. Default value is 120.
:rtype: ~azure.servicebus.ServiceBusSender
.. admonition:: Example:
.. literalinclude:: ../samples/sync_samples/sample_code_servicebus.py
:start-after: [START create_topic_sender_from_sb_client_sync]
:end-before: [END create_topic_sender_from_sb_client_sync]
:language: python
:dedent: 4
:caption: Create a new instance of the ServiceBusSender from ServiceBusClient.
"""
sender = ServiceBusSender(
fully_qualified_namespace=self.fully_qualified_namespace,
topic_name=topic_name,
credential=self._credential,
logging_enable=self._config.logging_enable,
transport_type=self._config.transport_type,
http_proxy=self._config.http_proxy,
connection=self._connection,
**kwargs
)
return sender
def get_subscription_receiver(self, topic_name, subscription_name, **kwargs):
# type: (str, str, Any) -> ServiceBusReceiver
"""Get ServiceBusReceiver for the specific subscription under the topic.
:param str topic_name: The name of specific Service Bus Topic the client connects to.
:param str subscription_name: The name of specific Service Bus Subscription
under the given Service Bus Topic.
:keyword mode: The mode with which messages will be retrieved from the entity. The two options
are PeekLock and ReceiveAndDelete. Messages received with PeekLock must be settled within a given
lock period before they will be removed from the subscription. Messages received with ReceiveAndDelete
will be immediately removed from the subscription, and cannot be subsequently rejected or re-received if
the client fails to process the message. The default mode is PeekLock.
:paramtype mode: ~azure.servicebus.ReceiveSettleMode
:keyword session_id: A specific session from which to receive. This must be specified for a
sessionful entity, otherwise it must be None. In order to receive messages from the next available
session, set this to NEXT_AVAILABLE.
:paramtype session_id: str or ~azure.servicebus.NEXT_AVAILABLE
:keyword int prefetch: The maximum number of messages to cache with each request to the service.
The default value is 0, meaning messages will be received from the service and processed
one at a time. Increasing this value will improve message throughput performance but increase
the change that messages will expire while they are cached if they're not processed fast enough.
:keyword float idle_timeout: The timeout in seconds between received messages after which the receiver will
automatically shutdown. The default value is 0, meaning no timeout.
:keyword int retry_total: The total number of attempts to redo a failed operation when an error occurs.
Default value is 3.
:keyword float retry_backoff_factor: Delta back-off internal in the unit of second between retries.
Default value is 0.8.
:keyword float retry_backoff_max: Maximum back-off interval in the unit of second. Default value is 120.
:rtype: ~azure.servicebus.ServiceBusReceiver
.. admonition:: Example:
.. literalinclude:: ../samples/sync_samples/sample_code_servicebus.py
:start-after: [START create_subscription_receiver_from_sb_client_sync]
:end-before: [END create_subscription_receiver_from_sb_client_sync]
:language: python
:dedent: 4
:caption: Create a new instance of the ServiceBusReceiver from ServiceBusClient.
"""
# pylint: disable=protected-access
receiver = ServiceBusReceiver(
fully_qualified_namespace=self.fully_qualified_namespace,
topic_name=topic_name,
subscription_name=subscription_name,
credential=self._credential,
logging_enable=self._config.logging_enable,
transport_type=self._config.transport_type,
http_proxy=self._config.http_proxy,
connection=self._connection,
**kwargs
)
return receiver
|
# -*- coding: utf-8 -*-
""" Python library for operations with Spotify Web API and database """
from .database.sqlite3db import Sqlite3db
from .spotify.client import Client, chunk_playlist
def news(database_f, original_playlist_id):
""" Accumulate song from source playlist in local database and based
on that find new tracks, which are not yet in database.
If this situation occurs, create clone of original playlist.
:param database_f: name of sqlite3db file. It will be created if does not exist
:param original_playlist_id: playlist ID, URI or URL
:return: A list of track IDs
"""
db = Sqlite3db(database_f=database_f)
client = Client()
known_track_ids = db.select_songs(playlist_id=original_playlist_id)
mirror_playlist_item_ids = client.update_mirror_playlist(original_playlist_id=original_playlist_id,
known_track_ids=known_track_ids,
playlist_part_name='news')
db.insert_songs(playlist_id=original_playlist_id, song_ids=mirror_playlist_item_ids)
db.close()
return mirror_playlist_item_ids
def collections(database_f, original_playlist_id):
""" Get accumulated songs from local database and based
on that create clone of original playlist with all tracks.
:param database_f: name of sqlite3db file. It will be created if does not exist
:param original_playlist_id: playlist ID, URI or URL
:return: A list of track IDs
"""
db = Sqlite3db(database_f=database_f)
client = Client()
known_track_ids = db.select_songs(playlist_id=original_playlist_id)
playlist_id = client.get_mirror_playlist_id(original_playlist_id=original_playlist_id,
playlist_part_name='complete')
client.playlist_remove_all_items(playlist_id=playlist_id)
for chunk in chunk_playlist(known_track_ids):
client.playlist_add_items(playlist_id=playlist_id, items=chunk)
db.close()
return known_track_ids
|
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A sensor prototype class.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import typing
_ARRAY = typing.Iterable[float]
_FLOAT_OR_ARRAY = typing.Union[float, _ARRAY]
_DATATYPE_LIST = typing.Iterable[typing.Any]
class Sensor(object):
"""A prototype class of sensors."""
def __init__(self,
name: typing.Text):
"""A basic constructor of the sensor.
This initialized a robot as none. This instance may be regularly updated
by the environment, when it resets the simulation environment.
Args:
name: the name of the sensor
"""
self._robot = None
self._name = name
def get_name(self) -> typing.Text:
return self._name
def get_observation_datatype(self):
"""Returns the data type for the numpy structured array.
It is recommended to define a list of tuples: (name, datatype, shape)
Reference: https://docs.scipy.org/doc/numpy-1.15.0/user/basics.rec.html
Ex:
return [('motor_angles', np.float64, (8, ))] # motor angle sensor
return [('IMU_x', np.float64), ('IMU_z', np.float64), ] # IMU
Returns:
datatype: a list of data types.
"""
pass
def get_lower_bound(self):
"""Returns the lower bound of the observation.
Returns:
lower_bound: the lower bound of sensor values in np.array format
"""
pass
def get_upper_bound(self):
"""Returns the upper bound of the observation.
Returns:
upper_bound: the upper bound of sensor values in np.array format
"""
pass
def get_observation(self):
"""Returns the observation data.
Returns:
observation: the observed sensor values in np.array format
"""
pass
def set_robot(self, robot):
"""Set a robot instance."""
self._robot = robot
def get_robot(self):
"""Returns the robot instance."""
return self._robot
def on_reset(self, env):
"""A callback function for the reset event.
Args:
env: the environment who invokes this callback function.
"""
pass
def on_step(self, env):
"""A callback function for the step event.
Args:
env: the environment who invokes this callback function.
"""
pass
def on_terminate(self, env):
"""A callback function for the terminate event.
Args:
env: the environment who invokes this callback function.
"""
pass
class BoxSpaceSensor(Sensor):
"""A prototype class of sensors with Box shapes."""
def __init__(self,
name: typing.Text,
shape: typing.Tuple[int, ...],
lower_bound: _FLOAT_OR_ARRAY = -np.pi,
upper_bound: _FLOAT_OR_ARRAY = np.pi,
dtype=np.float64) -> None:
"""Constructs a box type sensor.
Args:
name: the name of the sensor
shape: the shape of the sensor values
lower_bound: the lower_bound of sensor value, in float or np.array.
upper_bound: the upper_bound of sensor value, in float or np.array.
dtype: data type of sensor value
"""
super(BoxSpaceSensor, self).__init__(name)
self._shape = shape
self._dtype = dtype
if isinstance(lower_bound, float):
self._lower_bound = np.full(shape, lower_bound, dtype=dtype)
else:
self._lower_bound = np.array(lower_bound)
if isinstance(upper_bound, float):
self._upper_bound = np.full(shape, upper_bound, dtype=dtype)
else:
self._upper_bound = np.array(upper_bound)
def get_shape(self) -> typing.Tuple[int, ...]:
return self._shape
def get_dimension(self) -> int:
return len(self._shape)
def get_dtype(self):
return self._dtype
def get_observation_datatype(self) -> _DATATYPE_LIST:
"""Returns box-shape data type."""
return [(self._name, self._dtype, self._shape)]
def get_lower_bound(self) -> _ARRAY:
"""Returns the computed lower bound."""
return self._lower_bound
def get_upper_bound(self) -> _ARRAY:
"""Returns the computed upper bound."""
return self._upper_bound
def get_observation(self) -> np.ndarray:
return np.asarray(self._get_observation(), dtype=self._dtype)
|
"""This module includes everything that is needed to
submit flags to the game server."""
import time
import asyncio
import logging
import codecs
from helperlib.logging import scope_logger
from ctfpwn.shared import TooMuchConnectionsException
log = logging.getLogger(__name__)
LOG_LEVEL_STAT = logging.INFO + 1
logging.addLevelName(LOG_LEVEL_STAT, 'STATISTIC')
@scope_logger
class FlagSubmissionProtocol(asyncio.Protocol):
"""An interface to the gameserver for submitting flags.
Every instance may submit one or more flags at once."""
def __init__(self, flags, flag_db, config, future,
loop=None, timeout=5):
self.flags = flags
self.db = flag_db
self.loop = loop or asyncio.get_event_loop()
self.config = config
self.future = future
self.current_flag = None
self.flags_success = list()
self.flags_failed = list()
self.flags_expired = list()
self.flags_pending = list()
self.connection_timeout = timeout
self.ready_for_submission = False
def timeout(self):
"""This function will be called
if a timeout occurs."""
self.transport.close()
def reset_timer(self):
# Restart timeout timer
self.h_timeout.cancel()
self.h_timeout = asyncio.get_event_loop().call_later(
self.connection_timeout, self.timeout)
def connection_made(self, transport):
self.transport = transport
self.log.debug('[GAMESERVER] Connection established')
# Start 5 seconds timeout timer
self.h_timeout = asyncio.get_event_loop().call_later(
self.connection_timeout, self.timeout)
def data_received(self, incoming):
incoming = incoming.decode().strip()
if not self.ready_for_submission:
if not incoming.endswith(self.config.get('game_server_msg_ready')):
self.log.debug('[GAMESERVER] Gameserver is not ready yet')
self.reset_timer()
return
else:
self.log.debug('[GAMESERVER] Gameserver is ready, sending flags')
self.ready_for_submission = True
else:
if self.config.get('game_server_msg_success') in incoming or \
self.config.get('game_server_msg_success2') in incoming:
self.flags_success.append(self.current_flag.get('flag'))
elif self.config.get('game_server_msg_service_down') in incoming:
self.log.error('[GAMESERVER] [%s] GAMESERVER REPORTED SERVICE NOT AVAILABLE!',
self.current_flag.get('service'))
self.flags_pending.append(self.current_flag.get('flag'))
elif self.config.get('game_server_msg_expired') in incoming:
# log.debug('Flag expired')
self.flags_expired.append(self.current_flag.get('flag'))
elif self.config.get('game_server_msg_invalid') in incoming:
# log.debug('Invalid flag')
self.flags_failed.append(self.current_flag.get('flag'))
elif self.config.get('game_server_msg_own_flag') in incoming:
# log.debug('Own flag')
self.flags_failed.append(self.current_flag.get('flag'))
elif self.config.get('game_server_msg_already_submitted') in incoming:
# log.debug('Flag already submitted')
self.flags_failed.append(self.current_flag.get('flag'))
elif self.config.get('game_server_msg_too_much') in incoming:
self.log.warning('[GAMESERVER] Too much connections to the gameserver!')
self.future.set_exception(TooMuchConnectionsException)
self.transport.close()
return
else:
self.log.warning('Unknown gameserver message: %r', incoming.strip())
if not len(self.flags):
self.transport.close()
return
self.reset_timer()
self.current_flag = self.flags.pop(0)
self._writeline(self.current_flag.get('flag'))
def connection_lost(self, reason):
log.debug('[GAMESERVER] Lost Gameserver connection')
self.h_timeout.cancel()
if not self.future.done():
self.future.set_result(True)
self.loop.create_task(self._update_flag_states())
def _writeline(self, data):
if isinstance(data, str):
data = codecs.encode(data)
self.transport.write(data + b'\n')
async def _update_flag_states(self):
t0 = time.time()
if self.flags_success:
for flag in self.flags_success:
await self.db.update_submitted(flag)
if self.flags_pending:
for flag in self.flags_pending:
await self.db.update_pending(flag)
if self.flags_expired:
for flag in self.flags_expired:
await self.db.update_expired(flag)
if self.flags_failed:
for flag in self.flags_failed:
await self.db.update_failed(flag)
# If the connection has been lost prematurely,
# mark the not yet processed flags as PENDING.
if self.flags:
for flag in self.flags:
await self.db.update_pending(flag)
self.log.log(LOG_LEVEL_STAT, '[SUBMISSION] [ACCEPTED %d] [PENDING %d] [EXPIRED %d] [UNKNOWN %d]',
len(self.flags_success),
len(self.flags_pending + self.flags),
len(self.flags_expired),
len(self.flags_failed))
self.log.log(LOG_LEVEL_STAT, '[GAMESERVER] Updating flag stats took %f seconds', time.time() - t0)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-09-09 07:47
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0003_certification_skill'),
]
operations = [
migrations.AddField(
model_name='certification',
name='order',
field=models.PositiveIntegerField(default=1),
),
migrations.AddField(
model_name='skill',
name='order',
field=models.PositiveIntegerField(default=1),
),
]
|
# relpath.py
from webpie import WPApp, WPHandler
class MyHandler(WPHandler):
def hello(self, request, relpath):
return "Hello %s!\n" % (relpath,) # 1
WPApp(MyHandler).run_server(8080)
|
import re
import logging
import os
import gzip
import pandas
import Expression
from ..misc import Math
########################################################################################################################
class ExpressionManager(Expression.ExpressionManager):
def __init__(self, folder, pattern=None, standardise=False):
gene_map, file_map = _structure(folder, pattern)
self.gene_map = gene_map
self.file_map = file_map
self.d = None
self.standardise = standardise
def expression_for_gene(self, gene):
m_ = self.gene_map[gene]
d = {model:self.d[model][gene].values for model in sorted(m_.keys())}
if self.standardise:
k_ = {}
for key, value in k.iteritems():
t_ = Math.standardize(value)
if t_ is not None:
k_[key] = t_
d = k_
return d
def get_genes(self):
return self.gene_map.keys()
def enter(self):
d = {}
for name in sorted(self.file_map.keys()):
path = self.file_map[name]
logging.log(9, "Loading %s", path)
d[name] = pandas.read_table(path)
self.d = d
def exit(self):
pass
class ExpressionManagerMemoryEfficient(Expression.ExpressionManager):
def __init__(self, folder, pattern, standardise = False):
gene_map, file_map = _structure(folder, pattern)
self.gene_map = gene_map
self.file_map = file_map
self.standardise = standardise
def expression_for_gene(self, gene):
m_ = self.gene_map[gene]
k = {model:pandas.read_table(self.file_map[model], usecols=[gene])[gene].values for model in sorted(m_.keys())}
if self.standardise:
k_ = {}
for key, value in k.iteritems():
t_ = Math.standardize(value)
if t_ is not None:
k_[key] = t_
k = k_
return k
def get_genes(self):
return self.gene_map.keys()
_exregex = re.compile("TW_(.*)_0.5.expr.txt")
def _structure(folder, pattern=None):
logging.info("Acquiring expression files")
files = os.listdir(folder)
gene_map = {}
file_map = {}
_regex = re.compile(pattern) if pattern else None
for file in files:
if _regex:
if _regex.search(file):
name = _regex.match(file).group(1)
else:
continue
else:
name = file
name = name.replace("-", "_")
path = os.path.join(folder, file)
_o = gzip.open if ".gz" in file else open
with _o(path) as f:
comps = f.readline().strip().split()
for i,gene in enumerate(comps):
if not gene in gene_map:
gene_map[gene] = {}
gene_map[gene][name] = i
file_map[name] = path
return gene_map, file_map
########################################################################################################################
class Expression(object):
def __init__(self, path):
self.path = path
self.d = None
def expression_for_gene(self, gene):
k = self.d[gene].values
return k
def get_genes(self):
return self.d.columns.values
def enter(self):
self.d = pandas.read_table(self.path)
def exit(self):
pass |
import gym
import numpy as np
from envs.deep_cure_env import DeepCure, random_base_infect_rate, random_lifetime, ForeignCountry
import matplotlib.pyplot as plt
from plotting import plot
from stable_baselines3 import DQN, A2C
import torch as th
from stable_baselines3.common.callbacks import EvalCallback
def lr(progress):
return 0.001*np.sqrt(progress/100)
env = DeepCure(foreign_countries = [ForeignCountry(0.1,100,100_000, save_history=True)], use_discrete = True, save_history=True)
eval_callback = EvalCallback(env, best_model_save_path='./',
log_path='./', eval_freq=500,
deterministic=True, render=False)
policy_kwargs = dict(activation_fn=th.nn.Sigmoid, net_arch=[5])
model = DQN("MlpPolicy", env, batch_size=2, learning_rate=lr, policy_kwargs=policy_kwargs, verbose=1)
model.learn(total_timesteps=100000, n_eval_episodes=100000, callback=eval_callback)
model.save("dqn_stable")
model = DQN.load("dqn_stable")
obs = env.reset(rate=2.7)
while True:
action, _states = model.predict(obs, deterministic=True)
obs, reward, done, info = env.step(action)
if done:
break
plot(env)
|
NOT_AUTHENTICATED = "Not authenticated"
NO_PUBLICKEY = "JWK public Attribute for authorization token not found"
NOT_VERIFIED = "Not verified"
SCOPE_NOT_MATCHED = "Scope not matched"
NOT_VALIDATED_CLAIMS = "Validation Error for Claims"
|
# Do NOT edit this file!
# It was generated by IdlC class idl.json.python.ProxyAsnVisitor.
#
# Section generated from "/home/nb/builds/MEGA/px2-3.0.0-3.0.9-branch-20140613-none-release-none-pdu-raritan/fwcomponents/mkdist/tmp/px2_final/libisys/src/idl/DiagLogSettings.idl"
#
import raritan.rpc
from raritan.rpc import (
Interface,
Structure,
ValueObject,
Enumeration,
typecheck,
DecodeException,
)
import raritan.rpc.diag
# interface
class DiagLogSettings(Interface):
idlType = "diag.DiagLogSettings:1.0.0"
# enumeration
class LogLevel(Enumeration):
idlType = "diag.DiagLogSettings.LogLevel:1.0.0"
values = [
"LOG_LEVEL_NONE",
"LOG_LEVEL_ERR",
"LOG_LEVEL_WARN",
"LOG_LEVEL_INFO",
"LOG_LEVEL_DEBUG",
"LOG_LEVEL_TRACE",
]
LogLevel.LOG_LEVEL_NONE = LogLevel(0)
LogLevel.LOG_LEVEL_ERR = LogLevel(1)
LogLevel.LOG_LEVEL_WARN = LogLevel(2)
LogLevel.LOG_LEVEL_INFO = LogLevel(3)
LogLevel.LOG_LEVEL_DEBUG = LogLevel(4)
LogLevel.LOG_LEVEL_TRACE = LogLevel(5)
# structure
class LogLevelEntry(Structure):
idlType = "diag.DiagLogSettings.LogLevelEntry:1.0.0"
elements = ["ctxName", "logLevel"]
def __init__(self, ctxName, logLevel):
typecheck.is_string(ctxName, AssertionError)
typecheck.is_enum(
logLevel, raritan.rpc.diag.DiagLogSettings.LogLevel, AssertionError
)
self.ctxName = ctxName
self.logLevel = logLevel
@classmethod
def decode(cls, json, agent):
obj = cls(
ctxName=json["ctxName"],
logLevel=raritan.rpc.diag.DiagLogSettings.LogLevel.decode(
json["logLevel"]
),
)
return obj
def encode(self):
json = {}
json["ctxName"] = self.ctxName
json["logLevel"] = raritan.rpc.diag.DiagLogSettings.LogLevel.encode(
self.logLevel
)
return json
ERR_NONE = 0
ERR_UNKNOWN_LOG_CONTEXT_NAME = 1
ERR_UNKNOWN_LOG_LEVEL = 2
def resetLogLevelsForAllCtxNames(self):
agent = self.agent
args = {}
rsp = agent.json_rpc(self.target, "resetLogLevelsForAllCtxNames", args)
def getLogLevelsForAllCtxNames(self):
agent = self.agent
args = {}
rsp = agent.json_rpc(self.target, "getLogLevelsForAllCtxNames", args)
_ret_ = [
raritan.rpc.diag.DiagLogSettings.LogLevelEntry.decode(x0, agent)
for x0 in rsp["_ret_"]
]
for x0 in _ret_:
typecheck.is_struct(
x0, raritan.rpc.diag.DiagLogSettings.LogLevelEntry, DecodeException
)
return _ret_
def setLogLevelForAllCtxNames(self, logLevel):
agent = self.agent
typecheck.is_enum(
logLevel, raritan.rpc.diag.DiagLogSettings.LogLevel, AssertionError
)
args = {}
args["logLevel"] = raritan.rpc.diag.DiagLogSettings.LogLevel.encode(logLevel)
rsp = agent.json_rpc(self.target, "setLogLevelForAllCtxNames", args)
_ret_ = rsp["_ret_"]
typecheck.is_int(_ret_, DecodeException)
return _ret_
def getLogLevelByCtxName(self, ctxName):
agent = self.agent
typecheck.is_string(ctxName, AssertionError)
args = {}
args["ctxName"] = ctxName
rsp = agent.json_rpc(self.target, "getLogLevelByCtxName", args)
_ret_ = rsp["_ret_"]
logLevel = raritan.rpc.diag.DiagLogSettings.LogLevel.decode(rsp["logLevel"])
typecheck.is_int(_ret_, DecodeException)
typecheck.is_enum(
logLevel, raritan.rpc.diag.DiagLogSettings.LogLevel, DecodeException
)
return (_ret_, logLevel)
def setLogLevelByCtxName(self, ctxName, logLevel):
agent = self.agent
typecheck.is_string(ctxName, AssertionError)
typecheck.is_enum(
logLevel, raritan.rpc.diag.DiagLogSettings.LogLevel, AssertionError
)
args = {}
args["ctxName"] = ctxName
args["logLevel"] = raritan.rpc.diag.DiagLogSettings.LogLevel.encode(logLevel)
rsp = agent.json_rpc(self.target, "setLogLevelByCtxName", args)
_ret_ = rsp["_ret_"]
typecheck.is_int(_ret_, DecodeException)
return _ret_
|
# Generated by Django 2.2.12 on 2020-06-22 18:37
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('lametro', '0004_add_subject_classification'),
]
operations = [
migrations.AlterField(
model_name='lametrosubject',
name='classification',
field=models.CharField(choices=[
('bill_type_exact', 'Board Report Type'),
('lines_and_ways_exact', 'Lines / Ways'),
('phase_exact', 'Phase'),
('project_exact', 'Project'),
('metro_location_exact', 'Metro Location'),
('geo_admin_location_exact', 'Geographic / Administrative Location'),
('significant_date_exact', 'Significant Date'),
('motion_by_exact', 'Motion By'),
('topics_exact', 'Subject')
], default='topics_exact', max_length=256),
),
]
|
import unittest
from archie.projectmodel.entities.ProjectLayout import ProjectLayout
from archie.projectmodel.entities.ProjectServices import ProjectServices
from archie.dependencies.behaviours.FindIncludeDependencies import FindIncludeDependencies, ModuleCollection
from archie.dependencies.entities.IncludeDependencyAnalyzer import IncludeDependencyAnalyzer
class StubProjectServices(ProjectServices):
def __init__(self):
self.linked_files = dict()
self.files_lists = dict()
self.folders_lists = dict()
self.file_dates = dict()
self.removed_files = set()
def createLinkedFile(self, source_file, dest_folder):
self.linked_files[source_file] = dest_folder
def removeFile(self, file_path):
self.removed_files.add(file_path)
def folderExists(self, folder_path):
return folder_path in self.folders_lists
def createFolder(self, folder_path):
self.folders_lists[folder_path] = []
self.files_lists[folder_path] = []
def statFile(self, file_path):
return self.file_dates[file_path]
def fileExists(self, file_path):
return file_path in self.file_dates
def listFiles(self, folder):
return self.files_lists[folder]
def listFolders(self, folder):
return self.folders_lists[folder]
class StubIncludeDependencyAnalyzer(IncludeDependencyAnalyzer):
def __init__(self):
self.include_dependencies = dict()
def listIncludes(self, translation_unit):
if len(translation_unit.source_files) > 0:
return self.include_dependencies[translation_unit.source_files[0]]
elif len(translation_unit.header_files) > 0:
return self.include_dependencies[translation_unit.header_files[0]]
else:
return []
class TestFindIncludeDependencies(unittest.TestCase):
def test_find_include_dependencies(self):
layout = ProjectLayout()
layout.addSourceFolder('Source')
services = StubProjectServices()
services.files_lists['Source'] = []
services.files_lists['Source/Module1'] = ['File1.cpp', 'File1.h', 'File2.h']
services.files_lists['build/include/T1'] = []
services.folders_lists['Source'] = ['Source/Module1']
services.folders_lists['Source/Module1'] = []
analyzer = StubIncludeDependencyAnalyzer()
analyzer.include_dependencies['Source/Module1/File1.cpp'] = ['File1.h', 'File2.h']
analyzer.include_dependencies['Source/Module1/File2.h'] = []
resolver = FindIncludeDependencies(layout, services, analyzer)
resolver.findIncludeDependencies()
self.assertEquals(['Source/Module1/File1.h', 'Source/Module1/File2.h'],
resolver.dependenciesOf('Source/Module1/File1.cpp'))
def test_find_ordered_include_dependencies(self):
layout = ProjectLayout()
layout.addSourceFolder('Source')
services = StubProjectServices()
services.files_lists['Source'] = []
services.files_lists['Source/Hangman'] = ['HangmanGame.cpp', 'HangmanGame.h', 'Main.cpp', 'Wordlist_p.h', 'Wordlist_p.cpp']
services.files_lists['build/include/T1'] = []
services.folders_lists['Source'] = ['Source/Hangman']
services.folders_lists['Source/Hangman'] = []
analyzer = StubIncludeDependencyAnalyzer()
analyzer.include_dependencies['Source/Hangman/Main.cpp'] = ['HangmanGame.h']
analyzer.include_dependencies['Source/Hangman/HangmanGame.cpp'] = ['HangmanGame.h', 'Wordlist_p.h']
analyzer.include_dependencies['Source/Hangman/Wordlist_p.cpp'] = ['Wordlist_p.h']
analyzer.include_dependencies['Source/Hangman/Wordlist_p.h'] = []
analyzer.include_dependencies['Source/Hangman/HangmanGame.h'] = []
resolver = FindIncludeDependencies(layout, services, analyzer)
resolver.findIncludeDependencies()
self.assertEquals([['Wordlist_p', 'HangmanGame', 'Main']], resolver.getModuleList())
self.assertEquals(1, resolver.numDependenciesTo('Main', 'HangmanGame'))
self.assertEquals(0, resolver.numDependenciesTo('Main', 'Wordlist_p'))
self.assertEquals(1, resolver.numDependenciesTo('HangmanGame', 'Wordlist_p'))
self.assertEquals(0, resolver.numDependenciesTo('HangmanGame', 'Main'))
self.assertEquals(1, resolver.numTransitiveDependenciesTo('Main', 'HangmanGame'))
self.assertEquals(2, resolver.numTransitiveDependenciesTo('Main', 'Wordlist_p'))
self.assertEquals(1, resolver.numTransitiveDependenciesTo('HangmanGame', 'Wordlist_p'))
def test_handle_loops_in_include_dependencies(self):
layout = ProjectLayout()
layout.addSourceFolder('Source')
services = StubProjectServices()
services.files_lists['Source'] = []
services.files_lists['Source/Hangman'] = ['HangmanGame.cpp', 'HangmanGame.h', 'Main.cpp', 'Main.h', 'Wordlist_p.h', 'Wordlist_p.cpp']
services.files_lists['build/include/T1'] = []
services.folders_lists['Source'] = ['Source/Hangman']
services.folders_lists['Source/Hangman'] = []
analyzer = StubIncludeDependencyAnalyzer()
analyzer.include_dependencies['Source/Hangman/Main.cpp'] = ['HangmanGame.h']
analyzer.include_dependencies['Source/Hangman/HangmanGame.cpp'] = ['Wordlist_p.h']
analyzer.include_dependencies['Source/Hangman/Wordlist_p.cpp'] = ['Main.h']
analyzer.include_dependencies['Source/Hangman/Wordlist_p.h'] = []
analyzer.include_dependencies['Source/Hangman/HangmanGame.h'] = []
analyzer.include_dependencies['Source/Hangman/Main.h'] = []
resolver = FindIncludeDependencies(layout, services, analyzer)
resolver.findIncludeDependencies()
self.assertEquals(['(Source/Hangman:3)'], resolver.getModuleList())
self.assertEquals(1, resolver.numDependenciesTo('Main', 'HangmanGame'))
self.assertEquals(0, resolver.numDependenciesTo('Main', 'Wordlist_p'))
self.assertEquals(1, resolver.numDependenciesTo('HangmanGame', 'Wordlist_p'))
self.assertEquals(0, resolver.numDependenciesTo('HangmanGame', 'Main'))
def test_handle_sub_component_loops_in_include_dependencies(self):
layout = ProjectLayout()
layout.addSourceFolder('Source')
services = StubProjectServices()
services.files_lists['Source'] = []
services.files_lists['Source/Hangman'] = ['HangmanGame.cpp', 'HangmanGame.h', 'Main.cpp', 'Wordlist_p.h', 'Wordlist_p.cpp', 'Loop1.h', 'Loop2.h', 'Loop3.h']
services.files_lists['build/include/T1'] = []
services.folders_lists['Source'] = ['Source/Hangman']
services.folders_lists['Source/Hangman'] = []
analyzer = StubIncludeDependencyAnalyzer()
analyzer.include_dependencies['Source/Hangman/Main.cpp'] = ['HangmanGame.h']
analyzer.include_dependencies['Source/Hangman/HangmanGame.cpp'] = ['HangmanGame.h', 'Wordlist_p.h']
analyzer.include_dependencies['Source/Hangman/Wordlist_p.cpp'] = ['Wordlist_p.h', 'Loop1.h']
analyzer.include_dependencies['Source/Hangman/Wordlist_p.h'] = []
analyzer.include_dependencies['Source/Hangman/HangmanGame.h'] = []
analyzer.include_dependencies['Source/Hangman/Loop1.h'] = ['Loop2.h']
analyzer.include_dependencies['Source/Hangman/Loop2.h'] = ['Loop3.h']
analyzer.include_dependencies['Source/Hangman/Loop3.h'] = ['Loop1.h']
resolver = FindIncludeDependencies(layout, services, analyzer)
resolver.findIncludeDependencies()
self.assertEquals([[['Loop1', 'Loop2', 'Loop3'], 'Wordlist_p', 'HangmanGame', 'Main']], resolver.getModuleList())
self.assertEquals(1, resolver.numDependenciesTo('Main', 'HangmanGame'))
self.assertEquals(0, resolver.numDependenciesTo('Main', 'Wordlist_p'))
self.assertEquals(1, resolver.numDependenciesTo('HangmanGame', 'Wordlist_p'))
self.assertEquals(0, resolver.numDependenciesTo('HangmanGame', 'Main'))
|
#!/bin/python3
# -*- coding: utf-8 -*-
"""
This file is part of the TangoMan PyValidator package.
(c) "Matthias Morin" <mat@tangoman.io>
This source file is subject to the MIT license that is bundled
with this source code in the file LICENSE.
"""
from typing import Any
from pyvalidator.constraints.abstract_validator import AbstractValidator
from pyvalidator.type_hint import TypeHint
class AnyValidator(AbstractValidator):
"""Validate Any alias. (Always return True)"""
def __init__(self, hint: TypeHint = None, value: Any = None) -> None:
super().__init__(hint, value)
@property
def is_supported(self) -> bool:
return self.hint.annotation is Any
@property
def is_valid(self) -> bool:
return self.hint.annotation is Any
|
import os
import librosa
import numpy as np
import matplotlib.pyplot as plt
import h5py
dataset_path = '/Users/wiktor/Documents/Projects/IRDM/shufl/magnatagatune_dataset/mp3/'
tags_file = 'data/tags'
idToPath_file = 'data/idToPath'
idToPath = {}
stft_window = 2048
hop = stft_window / 4
#create h5py file for storing spectrogram arrays
spectrograms_file = h5py.File('data/spectrograms.hdf5','a')
#debug: compression tests
# spectrograms_file_comp = h5py.File('data/spectrograms_comp.hdf5','a')
#create clip_id <-> filename mappings
for line in open(idToPath_file):
tags = line.split()
idToPath[tags[0]] = tags[1]
total_clips = 25863
#debug: compression tests
# test = 0
# generate spectogram for each clip with non empty tags
with open(tags_file, "r") as f:
for index, line in enumerate(f):
tags = line.split()
if (len(tags) > 0):
path = dataset_path+idToPath[tags[0]]
try:
y, sr = librosa.load(path, sr=16000)
except IOError:
print idToPath[tags[0]] + ' not found.'
except EOFError:
print 'EOFError: not sure what it is. Path: ' + idToPath[tags[0]]
else:
if tags[0] not in spectrograms_file:
S = librosa.feature.melspectrogram(
y=y, sr=sr, n_mels=128, fmax=sr/2,
n_fft = stft_window, hop_length=hop)
# this gives us a bit over 10% compression (uncompressed
# size for 100 spectrograms is 372877408 bytes)
# spectrograms_file.create_dataset(tags[0],
# chunks=(128,1214), shuffle=True, data=S,
# compression="gzip", compression_opts=9)
# limiting precision to 10 decimal points gives ~50%
# compression, 8 decimals 65%
# spectrograms_file.create_dataset(tags[0],
# chunks=(128,1214), shuffle=True, scaleoffset=8, data=S,
# compression="gzip", compression_opts=9)
spectrograms_file.create_dataset(
tags[0], chunks=(128,456), shuffle=True,
scaleoffset=8, data=S, compression="gzip",
compression_opts=9)
# arr = spectrograms_file[tags[0]][()]
# debug: compression tests
test+=1;
if test == 100:
break;
if index%260 == 0:
print str(index*100/total_clips) + "%"
# comment out to inspect the spectrogram shape
# break;
# generate and display the spectrogram plot
# librosa.display.specshow(
# librosa.logamplitude(arr,ref_power=np.max),sr=sr, hop_length=hop,
# y_axis='mel', fmax=sr/2,x_axis='time')
# plt.colorbar(format='%+2.0f dB')
# plt.title('Mel spectrogram')
# plt.tight_layout()
# plt.show()
spectrograms_file.close()
|
#!/usr/bin/env python3
import isce
from isceobj.Sensor import createSensor
import shelve
import argparse
import glob
from isceobj.Util import Poly1D
from isceobj.Planet.AstronomicalHandbook import Const
import os
from mroipac.dopiq.DopIQ import DopIQ
#from isceobj.Util.decorators import use_api
import copy
def cmdLineParse():
'''
Command line parser.
'''
parser = argparse.ArgumentParser(description='Unpack CSK SLC data and store metadata in pickle file.')
parser.add_argument('-i','--input', dest='h5dir', type=str,
required=True, help='Input CSK directory')
parser.add_argument('-o', '--output', dest='slcdir', type=str,
required=True, help='Output SLC directory')
parser.add_argument('-f', '--resample_flag', dest='resampFlag', type=str,
default=None, help='fbd2fbs or fbs2fbd resampling flag')
parser.add_argument('-p', '--polarization', dest='polarization', type=str,
default='HH', help='polarization in case if quad or full pol data exists. Deafult: HH')
parser.add_argument('-m', '--mult', dest='multiple',
action='store_true', default=False,
help='Use multiple frames')
return parser.parse_args()
#@use_api
def unpack(hdf5, slcname, multiple=False):
'''
Unpack HDF5 to binary SLC file.
'''
os.makedirs(slcname, exist_ok=True)
date = os.path.basename(slcname)
obj = createSensor('ALOS')
obj.configure()
if multiple:
print('Trying multiple subdirs...')
obj._imageFileList = glob.glob(os.path.join(hdf5, '*', 'IMG-' + inps.polarization + '*'))
obj._leaderFileList = glob.glob(os.path.join(hdf5, '*', 'LED*'))
if (len(obj._imageFileList) == 0) or (len(obj._leaderFileList) == 0):
print('No imagefiles / leaderfiles found in sub-dirs. Trying same directory ...')
obj._imageFileList = glob.glob(os.path.join(hdf5, 'IMG-' + inps.polarization + '*'))
obj._leaderFileList = glob.glob(os.path.join(hdf5, 'LED*'))
else:
imgname = glob.glob(os.path.join(hdf5, '*' , 'IMG-' + inps.polarization + '*'))[0]
ldrname = glob.glob(os.path.join(hdf5, '*' , 'LED*'))[0]
obj._leaderFileList = [ldrname]
obj._imageFileList = [imgname]
obj.output = os.path.join(slcname, date+'.raw')
print(obj._leaderFileList)
print(obj._imageFileList)
print(obj.output)
#if inps.fbd2fbs:
# print('fbd2fbs flag activated')
# obj._resampleFlag = 'dual2single'
if inps.resampFlag == 'fbd2fbs':
print('fbd2fbs flag activated')
obj._resampleFlag = 'dual2single'
elif inps.resampFlag == 'fbs2fbd':
print('fbs2fbd flag activated')
obj._resampleFlag = 'single2dual'
obj.extractImage()
obj.frame.getImage().renderHdr()
#####Estimate doppler
dop = DopIQ()
dop.configure()
img = copy.deepcopy(obj.frame.getImage())
img.setAccessMode('READ')
dop.wireInputPort('frame', object=obj.frame)
dop.wireInputPort('instrument', object=obj.frame.instrument)
dop.wireInputPort('image', object=img)
dop.calculateDoppler()
dop.fitDoppler()
fit = dop.quadratic
coef = [fit['a'], fit['b'], fit['c']]
obj.frame._dopplerVsPixel = [x*obj.frame.PRF for x in coef]
pickName = os.path.join(slcname, 'raw')
with shelve.open(pickName) as db:
db['frame'] = obj.frame
if __name__ == '__main__':
'''
Main driver.
'''
inps = cmdLineParse()
if inps.slcdir.endswith('/'):
inps.slcdir = inps.slcdir[:-1]
if inps.h5dir.endswith('/'):
inps.h5dir = inps.h5dir[:-1]
unpack(inps.h5dir, inps.slcdir,
multiple=inps.multiple)
|
# Copyright (C) 2009 - 2019 Isotropix SAS. All rights reserved.
#
# The information in this file is provided for the exclusive use of
# the software licensees of Isotropix. Contents of this file may not
# be distributed, copied or duplicated in any form, in whole or in
# part, without the prior written permission of Isotropix SAS.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
alembic_export_ui_name = "alembic_export_ui"
alembic_export_ui = ix.item_exists("project://default/" + alembic_export_ui_name)
if alembic_export_ui == None:
alembic_export_ui = ix.create_object(alembic_export_ui_name, "AlembicExportUI", ix.get_item("project://default"))
alembic_export_ui.set_private(True)
alembic_export_ui.set_static(True)
if ix.application.inspect(alembic_export_ui, ix.api.AppDialog.cancel(), ix.api.AppDialog.STYLE_OK_CANCEL, "Alembic Export").is_ok():
options = ix.api.AbcExportOptions(ix.application)
# Context to be exported
options.export_mode = ix.api.AbcExportOptions.EXPORT_MODE_CONTEXT
options.context = ix.get_current_context()
# Output Alembic file
options.filename = ix.api.CoreString(alembic_export_ui.get_attribute("filename").get_string())
# Frame range to be exported
options.frame_range_mode = ix.api.AbcExportOptions.FRAME_RANGE_MODE_CUSTOM_RANGE
options.frame_range[0] = alembic_export_ui.get_attribute("frame_range").get_long(0)
options.frame_range[1] = alembic_export_ui.get_attribute("frame_range").get_long(1)
# Write one per frame per file : true/false
options.export_one_frame_per_file = alembic_export_ui.get_attribute("write_one_frame_per_file").get_bool()
# Transfer data from source Alembic to the output Alembic: true/false
options.transfer_source_data = alembic_export_ui.get_attribute("transfer_source_data").get_bool()
# Export combiners: true/false
options.export_combiners = alembic_export_ui.get_attribute("export_combiners").get_bool()
# Export scatterers: true/false
options.export_scatterers = alembic_export_ui.get_attribute("export_scatterers").get_bool()
# Scatterers export mode: as geometries, or as bounding boxes
options.scatterer_export_mode = alembic_export_ui.get_attribute("scatterer_export_mode").get_long()
# Properties option
options.export_properties = alembic_export_ui.get_attribute("export_properties").get_bool()
options.fill_sparse_properties = alembic_export_ui.get_attribute("fill_sparse_properties").get_bool()
ix.api.IOHelpers.export_to_alembic(options)
|
import contextlib
import io
from unittest import TestCase
from unittest.mock import patch
from design_patterns.template_method.fahrenheit_celsius_template import ConverterTemplate
class TestConverterTemplate(TestCase):
@patch("design_patterns.template_method.fahrenheit_celsius.sys")
def test_run(self, mock_sys):
expected = "Input Fahrenheit:\n" \
+ "32.00℉ = 0.00℃\n" \
+ "Input Fahrenheit:\n" \
+ "99.00℉ = 37.22℃\n" \
+ "Input Fahrenheit:\n" \
+ "Fahrenheit to Celsius Finished!\n"
mock_sys.stdin.readline.side_effect = ["32", "99", "a"]
converter = ConverterTemplate()
buf = io.StringIO()
with contextlib.redirect_stdout(buf):
converter.run()
self.assertEqual(expected, buf.getvalue())
|
from pathlib import Path
from time import sleep
from airflow.contrib.hooks.aws_hook import AwsHook
from airflow.exceptions import AirflowException
from airflow.utils.decorators import apply_defaults
from dagger.dag_creator.airflow.operators.dagger_base_operator import DaggerBaseOperator
from dagger.dag_creator.airflow.utils.decorators import lazy_property
class AWSBatchOperator(DaggerBaseOperator):
"""
Execute a job on AWS Batch Service
.. warning: the queue parameter was renamed to job_queue to segregate the
internal CeleryExecutor queue from the AWS Batch internal queue.
:param job_name: the name for the job that will run on AWS Batch
:type job_name: str
:param job_definition: the job definition name on AWS Batch
:type job_definition: str
:param job_queue: the queue name on AWS Batch
:type job_queue: str
:param overrides: the same parameter that boto3 will receive on
containerOverrides (templated):
http://boto3.readthedocs.io/en/latest/reference/services/batch.html#submit_job
:type overrides: dict
:param max_retries: exponential backoff retries while waiter is not
merged, 4200 = 48 hours
:type max_retries: int
:param aws_conn_id: connection id of AWS credentials / region name. If None,
credential boto3 strategy will be used
(http://boto3.readthedocs.io/en/latest/guide/configuration.html).
:type aws_conn_id: str
:param region_name: region name to use in AWS Hook.
Override the region_name in connection (if provided)
:type region_name: str
:param cluster_name: Batch cluster short name or arn
:type region_name: str
"""
ui_color = "#c3dae0"
client = None
arn = None
template_fields = ("overrides",)
@apply_defaults
def __init__(
self,
job_queue,
job_name=None,
absolute_job_name=None,
overrides=None,
job_definition=None,
aws_conn_id=None,
region_name=None,
cluster_name=None,
*args,
**kwargs,
):
super().__init__(*args, **kwargs)
self.job_name = self._validate_job_name(job_name, absolute_job_name)
self.aws_conn_id = aws_conn_id
self.region_name = region_name
self.cluster_name = cluster_name
self.job_definition = job_definition or self.job_name
self.job_queue = job_queue
self.overrides = overrides or {}
self.job_id = None
@lazy_property
def batch_client(self):
return AwsHook(aws_conn_id=self.aws_conn_id, client_type="batch").get_client_type(
"batch", region_name=self.region_name
)
@lazy_property
def logs_client(self):
return AwsHook(aws_conn_id=self.aws_conn_id, client_type="logs").get_client_type(
"logs", region_name=self.region_name
)
@lazy_property
def ecs_client(self):
return AwsHook(aws_conn_id=self.aws_conn_id, client_type="ecs").get_client_type(
"ecs", region_name=self.region_name
)
def _validate_job_name(self, job_name, absolute_job_name):
if absolute_job_name is None and job_name is None:
raise Exception("Both job_name and absolute_job_name cannot be null")
if absolute_job_name is not None:
return absolute_job_name
job_path = Path().home() / "dags" / job_name.replace("-", "/")
assert (
job_path.is_dir()
), f"Job name `{job_name}`, points to a non-existing folder `{job_path}`"
return job_name
def execute(self, context):
self.task_instance = context["ti"]
res = self.batch_client.submit_job(
jobName=self.job_name,
jobQueue=self.job_queue,
jobDefinition=self.job_definition,
containerOverrides=self.overrides,
)
job_path = self.job_name.replace("-", "/")
self.job_id = res["jobId"]
self.log.info(
"\n"
f"\n\tJob name: {self.job_name}"
f"\n\tJob definition: {self.job_definition}"
f"\n\tJob id: {self.job_id}"
"\n"
)
self.poll_task()
def poll_task(self):
log_offset = 0
print_logs_url = True
while True:
res = self.batch_client.describe_jobs(jobs=[self.job_id])
if len(res["jobs"]) == 0:
sleep(3)
continue
job = res["jobs"][0]
job_status = job["status"]
log_stream_name = job["container"].get("logStreamName")
if print_logs_url and log_stream_name:
print_logs_url = False
self.log.info(
"\n"
f"\n\tLogs at: https://{self.region_name}.console.aws.amazon.com/cloudwatch/home?"
f"region={self.region_name}#logEventViewer:group=/aws/batch/job;stream={log_stream_name}"
"\n"
)
if job_status in ("RUNNING", "FAILED", "SUCCEEDED") and log_stream_name:
try:
log_offset = self.print_logs(log_stream_name, log_offset)
except self.logs_client.exceptions.ResourceNotFoundException:
pass
else:
self.log.info(f"Job status: {job_status}")
if job_status == "FAILED":
status_reason = res["jobs"][0]["statusReason"]
exit_code = res["jobs"][0]["container"].get("exitCode")
reason = res["jobs"][0]["container"].get("reason", "")
failure_msg = f"Status: {status_reason} | Exit code: {exit_code} | Reason: {reason}"
container_instance_arn = job["container"]["containerInstanceArn"]
self.retry_check(container_instance_arn)
raise AirflowException(failure_msg)
if job_status == "SUCCEEDED":
self.log.info("AWS Batch Job has been successfully executed")
return
sleep(7.5)
def retry_check(self, container_instance_arn):
res = self.ecs_client.describe_container_instances(
cluster=self.cluster_name, containerInstances=[container_instance_arn]
)
instance_status = res["containerInstances"][0]["status"]
if instance_status != "ACTIVE":
self.log.warning(
f"Instance in {instance_status} state: setting the task up for retry..."
)
self.retries += self.task_instance.try_number + 1
self.task_instance.max_tries = self.retries
def print_logs(self, log_stream_name, log_offset):
logs = self.logs_client.get_log_events(
logGroupName="/aws/batch/job",
logStreamName=log_stream_name,
startFromHead=True,
)
for event in logs["events"][log_offset:]:
self.log.info(event["message"])
log_offset = len(logs["events"])
return log_offset
def on_kill(self):
res = self.batch_client.terminate_job(
jobId=self.job_id, reason="Task killed by the user"
)
self.log.info(res)
|
#!/usr/bin/python
import sys, httplib, uuid, random,time
httplib.HTTPConnection.debuglevel = 0
KeyspaceName = 'm2m'
tablename = 'sensor'
HAProxyIP = '10.99.0.39'
HAProxyport = '8080'
BaseURL = HAProxyIP+':'+HAProxyport
def executeRESTCall(restMethod, serviceBaseURL, resourceName, content):
connection = httplib.HTTPConnection(serviceBaseURL)
#read composition rules file
headers={
'Content-Type':'application/xml; charset=utf-8'#,
# 'Accept':'application/xml, multipart/related'
}
connection.request(restMethod, '/'+resourceName, body=content,headers=headers,)
result = connection.getresponse()
def _issueWriteRequest(Table, KeyspaceName, ColumnNames, Values):
Table=tablename
createRowStatement='<CreateRowsStatement><Table name="'+Table+'"><Keyspace name="' + KeyspaceName + '"/></Table><Row>'
for i in range(0, len(ColumnNames)):
createRowStatement=createRowStatement+('<Column name="%s" value="%s"/>' % (ColumnNames[i],Values[i]))
createRowStatement=createRowStatement + '</Row></CreateRowsStatement>'
executeRESTCall('PUT', BaseURL, 'DaaS/api/xml/table/row', createRowStatement)
def _issueReadQuery(Table, KeyspaceName, ColumnName, KeyColumn,KeysToRead):
read = '<Query><Table name="'+Table+'"><Keyspace name="' + KeyspaceName + '"/></Table><Condition>'+KeyColumn+' in ('+KeysToRead+')</Condition></Query>'
executeRESTCall('POST', BaseURL, 'DaaS/api/xml/table/row', read)
|
#!/usr/bin/env python
import math
import os
import random
import sys
from pprint import pprint
sys.path.append("..")
import cppgen
import cmfc
from exceptions import CmfParseError
from visitor import Visitor
from walker import Walker
MAX_SIZE = 5
OUTPUT_DIR = 'TEST_OUTPUT'
def randuint(max_value):
return str(random.randint(0, max_value))
def randint(min_value, max_value):
return str(random.randint(min_value, max_value))
def randstring():
return '"' + random.choice(["a", "b", "c", "aa", "bb", "cc", "abcdef"
]) + '"'
def byte_example():
return "{0,1,2,3,4,5}"
def instance_name(msg_name, id):
"""Generate the name of an instance from a msg name"""
return "_" + msg_name.lower() + str(id)
def test_name(msg_name):
"""Generate the name of a serialization unit test given a message name"""
return "test_{}_serialization".format(msg_name)
def type_instance_from_variable_instance(variable_instance):
"""
Take a generated variable instance and extract just the instance and type name for use inline
in other message instances
"""
# Get the type from the variable declaration
type_end = variable_instance.index(' ')
type = variable_instance[0:type_end]
# Strip off the type and instance name from the variable declaration, as well as the closing semicolon
instance_start = variable_instance.index('{')
return type + variable_instance[instance_start:-1]
class InstanceVisitor(Visitor):
"""
A visitor that generates instantiation of generated types along with serialization and
deserialization code.
"""
def __init__(self):
# How many elements to generate for nested types.
# This really isn't supported yet...
self.size = 0
# A dict keyed by a msg name that contains a set of generated instances of various sizes as strings
# This dict should be maintained across all instantiatiions of a single visitor
self.existing_instances = dict()
# The current msg name of the instance being generated
self.msg_name = ''
# The current msg instance being generated as a string
self.instance = ''
# All Enum definitions.
self.enums = dict()
def create_enum(self, name, tags):
self.enums[name] = tags
def msg_start(self, name, id):
self.msg_name = name
self.instance = f'{name} {instance_name(name, self.size)}{{'
if not name in self.existing_instances:
self.existing_instances[name] = []
def msg_end(self):
self.instance += '};'
self.existing_instances[self.msg_name].append(self.instance)
self.msg_name = ''
self.instance = ''
def field_start(self, name, type):
pass
def field_end(self):
self.instance += ","
def bool(self):
self.instance += random.choice(["true", "false"])
def uint8(self):
self.instance += randuint(0xFF) + "u"
def uint16(self):
self.instance += randuint(0xFFFF) + "u"
def uint32(self):
self.instance += randuint(0xFFFFFFFF) + "u"
def uint64(self):
self.instance += randuint(0xFFFFFFFFFFFFFFFF) + "u"
def int8(self):
self.instance += randint(-128, 127)
def int16(self):
self.instance += randint(-32768, 32767)
def int32(self):
self.instance += randint(-2147483648, 2147483647)
def int64(self):
self.instance += randint(-9223372036854775808, 9223372036854775807)
def string(self):
self.instance += randstring()
def bytes(self):
self.instance += byte_example()
def msgname_ref(self, name):
variable_instance = random.choice(self.existing_instances[name])
self.instance += type_instance_from_variable_instance(
variable_instance)
def kvpair_start(self):
self.instance += "{"
def kvpair_key_end(self):
self.instance += ","
def kvpair_end(self):
self.instance += "}"
def list_start(self):
self.instance += "{"
def list_end(self):
self.instance += "}"
# Initialize the first std::array element with a random value only, i.e. `instance{value}`.
# Other values are default-initialized to 0. See `map` for more information about the
# limitations.
def fixedlist_start(self):
self.instance += "{"
def fixedlist_type_end(self):
pass
def fixedlist_end(self, size):
self.instance += "}"
# Map instances are tricky to generate. Uniform initialization of maps is done by lists of
# std::pairs, which themselves are represented as initializer lists. For this reason, we
# actually need to know when a map starts and ends so we can generate `self.size` numbers
# of std::pair initializer lists internally. This is further made complicated by the fact
# that maps can be nested. This latter part is true for lists as well.
#
# Unfortunately there is only one callback per type, and so we'd need to build up some
# datastructure that looks just like the AST being walked so we could easily generate multiple
# internal pairs. This just happens to be one of the cases where walking the AST directly for
# code generation is easier than using visitor callbacks. However, we use visitor callbacks to
# prevent tying us to a specific AST structure and needing to modify every single code
# generator.
#
# What we do because of this is just generate a single KV PAIR for now. Generating a single
# pair just means using double brackets for map_start and map_end. We can think of more
# sophisticated strategies later.
def map_start(self):
self.instance += "{{"
def map_key_end(self):
self.instance += ","
def map_end(self):
self.instance += "}}"
def optional_start(self):
self.instance += "{"
def optional_end(self):
self.instance += "}"
def oneof(self, msgs):
name = random.choice(list(msgs.keys()))
variable_instance = random.choice(self.existing_instances[name])
self.instance += type_instance_from_variable_instance(
variable_instance)
def enum(self, name):
self.instance += (name + "::" + random.choice(self.enums[name]))
def testSerializationStr(msg_name):
"""
Create a function that roundtrip serializes and deserializes all instances of a given message type.
"""
s = "void {}() {{\n".format(test_name(msg_name))
for i in range(0, MAX_SIZE):
instance = instance_name(msg_name, i)
s += f"""
{{
{msg_name} {instance}_computed;
std::vector<uint8_t> output;
serialize(output, {instance});
deserialize(output, {instance}_computed);
assert({instance} == {instance}_computed);
{msg_name} {instance}_str_computed;
std::string output_str;
serialize(output_str, {instance});
deserialize(output_str, {instance}_str_computed);
assert({instance} == {instance}_str_computed);
}}
"""
s += "}\n"
return s
def testIntegerSerialization():
print("Generating integer serialization tests")
with open("test_integer_serialization.cpp") as f:
test_code = f.read()
return test_code
def file_header(namespace):
return """/***************************************
Autogenerated by test_cppgen.py. Do not modify.
***************************************/
#include "example.hpp"
#include <cassert>
namespace {} {{
""".format(namespace)
def file_trailer(namespace, ast):
s = """
}} // namespace {}
int main() {{
""".format(namespace)
for t in ast:
for msg in t.msgs or []:
s += " {}::{}();\n".format(namespace, test_name(msg.name))
s += " cmf::test::test_integer_serialization();\n"
s += "}"
return s
def run_command(command):
assert os.system(command) == 0
def generate_code_and_tests(ast, header_file):
""" Walk concord message format(CMF) AST and generate C++ code and C++ tests"""
namespace = "cmf::test"
print("Generating C++ Message structs and serialization code")
header, code = cppgen.translate(ast, header_file, namespace)
test_code = file_header(namespace)
print("Generating C++ Message instances and serialization tests")
visitor = InstanceVisitor()
# We generate `max_size` msg instances for tests
for i in range(0, MAX_SIZE):
visitor.size = i
walker = Walker(ast, visitor)
walker.walk()
for msg_name, instances in visitor.existing_instances.items():
for instance in instances:
test_code += instance + "\n\n"
test_code += testSerializationStr(msg_name)
test_code += testIntegerSerialization()
return header, code, test_code + file_trailer(namespace, ast)
def compile_cmf_lib():
print("Compiling CMF lib with g++")
run_command(
"g++ -std=c++17 -g -c -fPIC -fsanitize=undefined -fsanitize=address -fsanitize=leak -fno-sanitize-recover "
f"-o {OUTPUT_DIR}/example.o {OUTPUT_DIR}/example.cpp"
)
run_command(
"g++ -std=c++17 -shared -fsanitize=undefined -fsanitize=address -fsanitize=leak -fno-sanitize-recover "
f"-o {OUTPUT_DIR}/libexample.so {OUTPUT_DIR}/example.o"
)
def compile_tests():
print("Compiling tests with g++")
run_command(
"g++ -std=c++17 -g -fsanitize=undefined -fsanitize=address -fsanitize=leak -fno-sanitize-recover "
f"-o {OUTPUT_DIR}/test_serialization -L{OUTPUT_DIR} {OUTPUT_DIR}/test_serialization.cpp -lexample"
)
def run_tests():
print("Running tests")
os.environ["LD_LIBRARY_PATH"] = OUTPUT_DIR
run_command(f"./{OUTPUT_DIR}/test_serialization")
def test_serialization():
"""
1. Generate C++ code for messages from example.cmf and write it to example.h.
2. Generate instances of the messages as well as tests that round trip serialize and deserialize them.
3. Run a test to verify integer serialization produces the expected results
4. Compile that C++ code via g++
5. Run the compiled C++ code as a test
"""
with open("../grammar.ebnf") as f:
print("Reading ../grammar.ebnf")
grammar = f.read()
with open("../../example.cmf") as f2:
print("Reading ../../example.cmf")
cmf = f2.read()
ast, _ = cmfc.parse(grammar, cmf)
# Uncomment to show the generated AST for debugging purposes
# pprint(ast)
run_command(f'mkdir -p {OUTPUT_DIR}')
header, code, tests = generate_code_and_tests(ast, "example.hpp")
with open(f"{OUTPUT_DIR}/example.hpp", "w") as f:
f.write(header)
with open(f"{OUTPUT_DIR}/example.cpp", "w") as f:
f.write(code)
with open(f"{OUTPUT_DIR}/test_serialization.cpp", "w") as f:
f.write(tests)
compile_cmf_lib()
compile_tests()
run_tests()
if __name__ == "__main__":
test_serialization()
print("Tests passed.")
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from collections import defaultdict
from telemetry.core.platform import tracing_category_filter
from telemetry.core.platform import tracing_options
from telemetry.timeline import model as model_module
from telemetry.value import trace
from telemetry.web_perf.metrics import gpu_timeline
from telemetry.web_perf.metrics import layout
from telemetry.web_perf.metrics import responsiveness_metric
from telemetry.web_perf.metrics import smoothness
from telemetry.web_perf import timeline_interaction_record as tir_module
from telemetry.web_perf import smooth_gesture_util
# TimelineBasedMeasurement considers all instrumentation as producing a single
# timeline. But, depending on the amount of instrumentation that is enabled,
# overhead increases. The user of the measurement must therefore chose between
# a few levels of instrumentation.
NO_OVERHEAD_LEVEL = 'no-overhead'
MINIMAL_OVERHEAD_LEVEL = 'minimal-overhead'
DEBUG_OVERHEAD_LEVEL = 'debug-overhead'
ALL_OVERHEAD_LEVELS = [
NO_OVERHEAD_LEVEL,
MINIMAL_OVERHEAD_LEVEL,
DEBUG_OVERHEAD_LEVEL
]
def _GetAllTimelineBasedMetrics():
# TODO(nednguyen): use discovery pattern to return all the instances of
# all TimelineBasedMetrics class in web_perf/metrics/ folder.
# This cannot be done until crbug.com/460208 is fixed.
return (smoothness.SmoothnessMetric(),
responsiveness_metric.ResponsivenessMetric(),
layout.LayoutMetric(),
gpu_timeline.GPUTimelineMetric())
class InvalidInteractions(Exception):
pass
# TODO(nednguyen): Get rid of this results wrapper hack after we add interaction
# record to telemetry value system (crbug.com/453109)
class ResultsWrapperInterface(object):
def __init__(self, results, label):
self._results = results
self._result_prefix = label
@property
def current_page(self):
return self._results.current_page
def AddValue(self, value):
raise NotImplementedError
class _TBMResultWrapper(ResultsWrapperInterface):
def AddValue(self, value):
value.name = '%s-%s' % (self._result_prefix, value.name)
self._results.AddValue(value)
def _GetRendererThreadsToInteractionRecordsMap(model):
threads_to_records_map = defaultdict(list)
interaction_labels_of_previous_threads = set()
for curr_thread in model.GetAllThreads():
for event in curr_thread.async_slices:
# TODO(nduca): Add support for page-load interaction record.
if tir_module.IsTimelineInteractionRecord(event.name):
interaction = tir_module.TimelineInteractionRecord.FromAsyncEvent(event)
# Adjust the interaction record to match the synthetic gesture
# controller if needed.
interaction = (
smooth_gesture_util.GetAdjustedInteractionIfContainGesture(
model, interaction))
threads_to_records_map[curr_thread].append(interaction)
if interaction.label in interaction_labels_of_previous_threads:
raise InvalidInteractions(
'Interaction record label %s is duplicated on different '
'threads' % interaction.label)
if curr_thread in threads_to_records_map:
interaction_labels_of_previous_threads.update(
r.label for r in threads_to_records_map[curr_thread])
return threads_to_records_map
class _TimelineBasedMetrics(object):
def __init__(self, model, renderer_thread, interaction_records,
results_wrapper_class=_TBMResultWrapper):
self._model = model
self._renderer_thread = renderer_thread
self._interaction_records = interaction_records
self._results_wrapper_class = results_wrapper_class
def AddResults(self, results):
interactions_by_label = defaultdict(list)
for i in self._interaction_records:
interactions_by_label[i.label].append(i)
for label, interactions in interactions_by_label.iteritems():
are_repeatable = [i.repeatable for i in interactions]
if not all(are_repeatable) and len(interactions) > 1:
raise InvalidInteractions('Duplicate unrepeatable interaction records '
'on the page')
wrapped_results = self._results_wrapper_class(results, label)
self.UpdateResultsByMetric(interactions, wrapped_results)
def UpdateResultsByMetric(self, interactions, wrapped_results):
if not interactions:
return
for metric in _GetAllTimelineBasedMetrics():
metric.AddResults(self._model, self._renderer_thread,
interactions, wrapped_results)
class Options(object):
"""A class to be used to configure TimelineBasedMeasurement.
This is created and returned by
Benchmark.CreateTimelineBasedMeasurementOptions.
"""
def __init__(self, overhead_level=NO_OVERHEAD_LEVEL):
"""As the amount of instrumentation increases, so does the overhead.
The user of the measurement chooses the overhead level that is appropriate,
and the tracing is filtered accordingly.
overhead_level: Can either be a custom TracingCategoryFilter object or
one of NO_OVERHEAD_LEVEL, MINIMAL_OVERHEAD_LEVEL or
DEBUG_OVERHEAD_LEVEL.
"""
if (not isinstance(overhead_level,
tracing_category_filter.TracingCategoryFilter) and
overhead_level not in ALL_OVERHEAD_LEVELS):
raise Exception("Overhead level must be a TracingCategoryFilter object"
" or valid overhead level string."
" Given overhead level: %s" % overhead_level)
self._overhead_level = overhead_level
self._extra_category_filters = []
def ExtendTraceCategoryFilters(self, filters):
self._extra_category_filters.extend(filters)
@property
def extra_category_filters(self):
return self._extra_category_filters
@property
def overhead_level(self):
return self._overhead_level
class TimelineBasedMeasurement(object):
"""Collects multiple metrics based on their interaction records.
A timeline based measurement shifts the burden of what metrics to collect onto
the user story under test. Instead of the measurement
having a fixed set of values it collects, the user story being tested
issues (via javascript) an Interaction record into the user timing API that
describing what is happening at that time, as well as a standardized set
of flags describing the semantics of the work being done. The
TimelineBasedMeasurement object collects a trace that includes both these
interaction records, and a user-chosen amount of performance data using
Telemetry's various timeline-producing APIs, tracing especially.
It then passes the recorded timeline to different TimelineBasedMetrics based
on those flags. As an example, this allows a single user story run to produce
load timing data, smoothness data, critical jank information and overall cpu
usage information.
For information on how to mark up a page to work with
TimelineBasedMeasurement, refer to the
perf.metrics.timeline_interaction_record module.
Args:
options: an instance of timeline_based_measurement.Options.
results_wrapper_class: A class that has the __init__ method takes in
the page_test_results object and the interaction record label. This
class follows the ResultsWrapperInterface. Note: this class is not
supported long term and to be removed when crbug.com/453109 is resolved.
"""
def __init__(self, options, results_wrapper_class=_TBMResultWrapper):
self._tbm_options = options
self._results_wrapper_class = results_wrapper_class
def WillRunUserStory(self, tracing_controller,
synthetic_delay_categories=None):
"""Configure and start tracing.
Args:
app: an app.App subclass instance.
synthetic_delay_categories: iterable of delays. For example:
['DELAY(cc.BeginMainFrame;0.014;alternating)']
where 'cc.BeginMainFrame' is a timeline event, 0.014 is the delay,
and 'alternating' is the mode.
"""
if not tracing_controller.IsChromeTracingSupported():
raise Exception('Not supported')
if isinstance(self._tbm_options.overhead_level,
tracing_category_filter.TracingCategoryFilter):
category_filter = self._tbm_options.overhead_level
else:
assert self._tbm_options.overhead_level in ALL_OVERHEAD_LEVELS, (
"Invalid TBM Overhead Level: %s" % self._tbm_options.overhead_level)
if self._tbm_options.overhead_level == NO_OVERHEAD_LEVEL:
category_filter = tracing_category_filter.CreateNoOverheadFilter()
elif self._tbm_options.overhead_level == MINIMAL_OVERHEAD_LEVEL:
category_filter = tracing_category_filter.CreateMinimalOverheadFilter()
else:
category_filter = tracing_category_filter.CreateDebugOverheadFilter()
for new_category_filter in self._tbm_options.extra_category_filters:
category_filter.AddIncludedCategory(new_category_filter)
# TODO(slamm): Move synthetic_delay_categories to the TBM options.
for delay in synthetic_delay_categories or []:
category_filter.AddSyntheticDelay(delay)
options = tracing_options.TracingOptions()
options.enable_chrome_trace = True
options.enable_platform_display_trace = True
tracing_controller.Start(options, category_filter)
def Measure(self, tracing_controller, results):
"""Collect all possible metrics and added them to results."""
trace_result = tracing_controller.Stop()
results.AddValue(trace.TraceValue(results.current_page, trace_result))
model = model_module.TimelineModel(trace_result)
threads_to_records_map = _GetRendererThreadsToInteractionRecordsMap(model)
for renderer_thread, interaction_records in (
threads_to_records_map.iteritems()):
meta_metrics = _TimelineBasedMetrics(
model, renderer_thread, interaction_records,
self._results_wrapper_class)
meta_metrics.AddResults(results)
def DidRunUserStory(self, tracing_controller):
if tracing_controller.is_tracing_running:
tracing_controller.Stop()
|
lista = (
'Lapis', 1.50,
'Caderno', 15.75,
'Mochila', 100,
'Caneta', 1.25,
'Notebook', 2500
)
print('-'*50)
print(f'{"Lista de preços":^40}')
print('-'*50)
for pos in range(0, len(lista)):
if pos % 2 == 0:
print(f'{lista[pos]:.<30}', end='')
if pos % 2 != 0:
print(f' R$:{lista[pos]:.2f}')
print('-'*50)
|
"""This module contains classes implementing different Evolutionary algorithms.
"""
__all__ = [
"RVEA",
"NSGAIII",
"BaseEA",
"BaseDecompositionEA",
"PPGA",
"TournamentEA",
"IOPIS_NSGAIII",
"IOPIS_RVEA",
"MOEA_D",
]
from desdeo_emo.EAs.BaseEA import BaseEA, BaseDecompositionEA
from desdeo_emo.EAs.RVEA import RVEA
from desdeo_emo.EAs.NSGAIII import NSGAIII
from desdeo_emo.EAs.PPGA import PPGA
from desdeo_emo.EAs.TournamentEA import TournamentEA
from desdeo_emo.EAs.IOPIS import IOPIS_NSGAIII, IOPIS_RVEA
from desdeo_emo.EAs.MOEAD import MOEA_D
|
# Copyright (c) 2013, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import unittest
from frappe.utils import nowdate, add_months
from erpnext.selling.report.pending_so_items_for_purchase_request.pending_so_items_for_purchase_request\
import execute
from erpnext.selling.doctype.sales_order.sales_order import make_material_request
from erpnext.selling.doctype.sales_order.test_sales_order import make_sales_order
class TestPendingSOItemsForPurchaseRequest(unittest.TestCase):
def test_result_for_partial_material_request(self):
so = make_sales_order()
mr=make_material_request(so.name)
mr.items[0].qty = 4
mr.schedule_date = add_months(nowdate(),1)
mr.submit()
report = execute()
l = len(report[1])
self.assertEqual((so.items[0].qty - mr.items[0].qty), report[1][l-1]['pending_qty'])
def test_result_for_so_item(self):
so = make_sales_order()
report = execute()
l = len(report[1])
self.assertEqual(so.items[0].qty, report[1][l-1]['pending_qty'])
|
#!/usr/bin/python
#from Solution_v1_On2 import Solution
from Solution import Solution
obj = Solution()
#A = [5,6,7,8]
A = [59828,77802,75493,91030,57013,95808,52342,62073,31928,33106,94814,48919,78767,44287,63250,88940,52580,39345,5260,30711,66449,14137,55180,55823,35306,75693,52528,77736,3743,21500,67136,21809,39398,78231,91440,68693,26013,74448,98547,18291,17026,86955,43667,7052,11849,40538,18542,45365,60125,51904,21672,60371,111,86326,19332,26268,22554,39490,40309,10535,24196,30958,26650,57220,11819,94914,32078,20204,89747,72791,49055,649,62567,20884,45183,13096,61709,72068,90019,51649,91591,27852,61907,87614,49322,60750,92567,31817,12560,50801,16844,41728,17502,52414,42876,37119,88441,95732,55653,78242,51623,70008,9393,45465,64004,87803,97050,76865,3526,29058,79761,23352,30823,48383,47485,44545,65727,209,80029,78535,98525,52191,10128,84618,30157,18099,65530,24638,5800,27687,39576,57069,12171,44511,50341,85727,23577,55799,19759,37351,40776,50701,88510,63001,4565,34213,55278,23530,21436,52195,89436,67810,13951,60939,10832,45389,89924,96033,13292,85967,53016,1603,21088,41816,16875,75325,66450,50331,89955,59598,1519,63720,85698,87844,52653,55373,7145,39709,55704,33902,97264,83116,15199,79120,59018,65807,48867,62069,93327,43918,70127,47824,14638,23441,55584,69693,99564,50263,88437,10487,19712,912,44187,92742,77353,33796,25902,67489,80547,95372,59637,57002,6363,60918,36925,5223,15932,64176,60326,48118,6564,78556,89967,48405,33073,55798,44232,88474,38817,3085,71140,35028,23427,38436,46778,54665,2297,58560,42551,39018,83360,4669,78090,14639,46825,95933,4799,69635,96821,51933,32589,22969,11017,70122,12396,16523,95262,18959,82959,64678,56506,36005,45271,60036,90308,80904,31995,96035,68624,74589,60086,5327,10032,42807,48864,5019,19235,79870,94872,83600,13255,50059,67192,95080,10324,44273,83459,98208,39582,55382,52526,2934,24109,97881,33667,94581,85577,97346,10392,56812,15261,25120,74829,34207,39742,80686,67377,70669,31950,90998,6401,49052,75149,61987,28208,99709,57272,36017,97894,90291,24342,36288,65741,88007,5996,79250,52255,23759,88895,37345,35741,663,53371,27150,2454,54307,86143,17009,36015,35004,60614,78523,83212,41702,12734,29010,29469,2369,92932,42548,28975,8353,80136,37727,95442,93328,79865,60378,77060,55151,83201,78019,43091,70970,50993,62339,73094,1648,44952,44643,41897,45399,58247,19894,60549,97331,65402,27609,69762,91453,22284,88210,51127,33515,52814,4230,86600,74884,10304,82757,63772,73575,45260,10892,27205,98044,69415,48391,39851,38250,98417,41332,59179,88409,61261,3876,20435,81903,76669,21855,81824,46089,74042,56562,54447,3069,98486,37210,72934,97525,64325,30766,49190,70464,17262,49602,79577,16187,12712,51872,28614,27697,14501,74809,37926,23413,20856,96268,52576,72099,53546,46141,98472,28542,54812,78545,36554,41865,47603,82612,13847,87063,90027,82985,99845,83390,41652,65839,70206,69797,51403,29505,52949,64859,99199,60807,37524,58912,20575,23282,52436,37803,438,69818,43786,100000,60821,86009,22580,89569,12888,50247,76043,85604,54802,54824,53779,24757,83417,3807,81118,29386,18163,79916,81255,65977,10553,78587,74788,95965,53579,2993,42602,79930,32761,21819,74621,81985,61855,89701,11924,48536,2978,97382,78820,85737,51032,72360,67022,25302,21153,60001,48374,94773,31368,22966,61889,93428,26187,42301,17387,38402,8496,4049,27133,75708,60448,60188,32119,89611,79070,58151,31705,23184,60478,65565,84067,14570,79169,43419,20876,32095,60584,94641,9155,903,98122,4237,89395,88720,53261,68127,61632,50505,9223,54669,34083,93845,70431,15858,93821,79558,84816,70015,93686,3110,57478,63021,95163,7837,38400,52219,95047,5231,47629,52355,93930,67052,82480,85149,57678,56176,16261,13583,19135,34407,99345,75998,98773,14652,94634,22146,48034,54119,86054,44543,18606,31860,63255,27537,34670,12731,43258,84153,90927,9709,60063,92941,64432,98633,32078,99219,34079,44885,38705,90335,58285,43341,59032,91693,33807,74480,36084,77913,4686,99789,18353,19402,68391,37806,3401,28970,48609,73865,47543,93560,38171,40195,93827,20750,74402,83086,39944,62297,93465,57654,88248,38651,20243,97483,91106,16850,19206,79620,38317,50362,33726,83462,29874,3671,27743,76446,39710,19447,76577,1582,34003,47732,94522,66663,97709,67741,53195,62518,81063,22336,45117,84391,20432,76958,65680,41196,68595,49069,49543,63603,84985,22168,68844,63184,22989,63439,49791,1866,87654,8488,44875,29469,40542,27058,29401,19673,36480,79776,95521,15676,85197,25814,46728,65378,60205,74634,83708,1600,97814,68698,29934,97174,26547,47653,87064,4305,52232,82104,12956,43191,66509,8808,35583,72521,70092,97159,70482,10414,7337,58542,74279,68544,32503,32073,33564,96920,23945,90401,85303,32125,29929,90393,94029,59778,23257,5274,79841,34835,72349,36370,11975,97401,24906,96120,8346,17374,68493,39684,7978,27228,29831,31974,76766,99835,76189,62559,60853,12881,70377,6696,41794,83088,9576,25746,62692,15258,57921,10930,62395,25874,13091,71090,77251,28180,78420,63863,39317,74624,1259,76247,56329,14618,94370,58018,84231,25533,77194,83222,80533,99031,59803,65,32998,98902,53369,78821,35487,64567,82724,23139,43076,66492,63345,67398,37193,84579,39275,34630,69756,32640,44715,36083,88847,73567,65938,40476,82041,57678,14611,99171,23879,63909,855,41840,66788,61671,64984,21382,23436,8594,99673,56626,88209,39930,12396,58091,44884,83609,90042,70595,23942,25375,14250,99989,30383,27947,40266,65770,40028,41946,69521,61850,13490,89752,39330,79892,27564,74844,88066,20264,42437,20499,25154,51936,22516,20702,79971,86848,20596,42688,67695,62099,85655,78018,54646,95220,35309,29980,34453,61864,39796,48601,19004,40139,43742,95252,46391,97677,41334,58945,40373,55745,73832,51583,99968,54674,744,99907,52964,7250,53581,46349,84646,18341,59886,35941,2554,92320,26505,27343,86777,93936,33546,38640,48953,3104,42185,56034,50123,42689,73148,73397,96865,26143,28178,80945,65922,57686,35909,14004,9756,31869,21027,42662,94746,62873,94538,77718,61588,6030,86432,51127,90152,92601,63838,32748,36719,79523,76824,49155,7775,13296,7356,83838,44918,36182,70504,48425,57426,37256,14554,61846,17601,98282,9698,42539,96004,54235,19894,19716,77308,39172,75247,26214,73642,11536,57444,32143,42971,75070,71643,99981,86599,34173,72965,69603,38197,90384,2865,18412,8914,91653,93245,96870,41152,47348,58438,97947,28156,24136,42110,61385,29843,60399,47475,43649,26082,88824,92283,33952,33471,79118,97997,3033,18386,15623,84180,53842,56012,51945,63748,6296,69259,7902,67144,48953,2124,74150,57306,4206,66089,42870,70623,64489,37048,75343,43746,83149,8797,96264,9903,74096,12078,73177,14724,69700,20739,67306,11499,75226,61401,79959,85220,25021,98555,9580,89648,56486,36290,61043,59152,96854,19950,85255,43311,9572,23813,84914,75396,44540,59513,69169,90779,96923,56676,2816,52621,75333,38511,833,81749,51112,2474,1120,29522,42256,55988,19726,33929,29215,76596,88414,42811,69779,59999,85671,87216,26258,75293,46485,93704,75610,20003,24753,19436,27020,40434,23801,13225,3438,69917,78896,5172,16689,70789,45682,9079,42921,41332,30161,86268,27369,26438,39706,21403,55612,35531,30538,28474,46576,3578,17998,57625,16137,49167,3333,58190,97511,96982,27340,49862,6451,58565,57482,52322,26036,26953,49874,77361,16339,12171,22951,66415,75800,94965,67699,15952,21350,99170,27478,15776,64109,97078,79279,46975,98263,41535,54412,25033,27569,11901,66442,47168,62650,33461,66681,14680,59894,2117,952,49812,55053,52470,13116,46489,47238,40630,8560,56905,85460,39413,30063,91910,11070,85254,84356,51077,61783,2078,36008,95634,84851,42372,5947,18447,32637,23247,74001,1216,16771,67977,20416,18379,91105,61580,99915,5111,8020,78146,27503,96974,86633,38416,48720,80945,54379,20642,54951,22600,22891,67974,19842,97252,87644,23805,47851,14339,21283,3930,65216,26749,40117,52908,81961,48914,34758,81516,42084,3733,3386,43462,3736,61670,4486,96332,18920,53433,6095,70711,49419,99943,78448,94703,68840,22249,20732,13123,1959,45701,29190,12075,55035,8694,14734,62350,60875,54405,70350,57727,17617,79072,83474,95,40820,62423,61537,33668,16804,42321,86033,70778,47918,80428,87613,93889,12636,62625,64593,98137,74874,25728,16494,28746,67468,17015,18691,29564,86404,10773,97411,12425,94014,52932,37002,80612,13793,32740,29652,32856,24200,95687,86425,39467,43074,93122,60575,87603,77436,68147,67752,75651,34961,99601,17264,42985,55316,49122,86666,19035,75939,96523,4486,98034,50467,82822,6294,25939,23878,1878,18738,83183,4472,31173,21219,52005,42275,1536,74985,63017,63419,45076,24609,82407,75655,53171,59259,28160,59401,81437,34447,71550,57654,15443,97644,36610,85913,67436,44218,44205,24422,33461,44694,63879,71946,59811,86934,12649,87558,44876,68036,34455,8273,47897,55905,84187,3332,71214,16545,12398,29770,59957,14567,15766,72151,93415,38760,16555,45564,40244,59319,57169,72522,10875,66156,23936,52109,66966,25544,18575,89205,29080,2192,26042,96130,97596,46392,68889,78766,19893,73779,69542,68819,6160,69882,47535,70708,30203,90924,69583,21857,49083,20808,13126,89181,21711,83219,83141,81027,29657,98449,93859,19930,70887,79911,55577,26503,36494,6412,52544,38863,51089,68387,55216,62462,61343,42195,17842,94575,39321,47,9814,32943,92844,72845,87572,40843,27362,96137,80363,77855,8758,7763,98982,53789,18390,18848,63037,21005,31781,51281,20899,84684,54921,51250,77519,25346,32581,45929,9245,29663,36205,18408,52618,16184,95156,96949,76799,88122,19903,26979,58115,18666,88112,36107,99692,42923,1412,83461,15374,68239,40785,68714,13450,82385,78440,61750,30433,86563,57446,39168,16367,70944,97375,79959,59555,20143,9228,98862,94373,44745,35666,5371,50005,33483,14790,82169,61149,55825,76675,22080,93030,90629,63872,99413,38156,29444,68530,13507,11610,95914,79078,78519,47521,31627,87152,64820,91960,56686,55270,7426,83632,13788,36727,94903,96003,36159,32987,83921,66562,43101,55004,87423,32932,46749,10892,1695,13256,93467,63642,31817,76780,64878,29018,77475,56855,87246,34833,13627,92869,63946,73489,79514,74137,49260,79512,68023,87499,50147,5519,78862,14130,77378,84238,12921,75449,12441,15398,31276,47278,76038,88161,92971,79143,55291,51609,94148,97559,5226,45522,84816,57704,60391,75150,68520,4886,52258,82329,97569,56597,87697,3361,28317,74309,23690,3165,82644,43118,12951,38310,54021,3720,95560,59851,33229,47001,89524,45096,46187,32598,36259,58460,6266,42535,60058,91634,83206,34495,21050,27444,73201,71733,84973,42190,97086,52602,98740,45684,22552,65515,11649,43748,46112,80794,70548,96975,29829,41489,40087,13278,94627,11854,83815,53224,41016,81501,65169,83818,24405,99165,50161,26497,34693,92720,63628,45954,44172,57380,47378,98930,20869,22092,42305,2088,14612,24877,76264,40702,66339,91781,84189,73464,38825,99192,73390,70551,23319,52163,81065,91179,9953,26034,12238,11868,93280,78378,86926,53037,34521,19106,93833,44586,69839,46374,52004,17437,50495,13583,96507,38524,80403,74726,10776,42446,29469,58005,5345,32232,71826,20318,87494,35886,3455,64815,29422,91106,28702,32159,29601,57719,84921,28308,19055,1628,38902,31940,2723,80245,63344,57535,54333,72919,72251,74461,25198,54045,24387,67971,76140,52379,82216,75427,50739,97884,34275,92635,47707,34545,56534,48527,47778,30764,41964,70828,46803,10098,90859,17629,42161,92070,32362,70553,75886,16878,12665,66513,92846,62356,70171,41090,13738,79180,29862,94386,45658,94053,92032,84537,72624,29356,60987,35276,75270,6974,60325,81109,57393,65161,77472,57172,8475,22459,2899,73395,58782,73269,85234,7543,45562,62473,92578,19383,8360,89393,46454,8574,94224,36188,74679,33032,95390,3615,93535,78347,64332,56470,92304,17480,51937,28208,86744,59260,40610,70569,91385,88431,39933,43324,30483,74693,66579,53365,18905,70729,89303,76923,41217,889,79901,6944,93339,98890,46970,76998,15688,9308,80459,8340,69439,54226,95733,21130,96018,25510,32611,78844,99461,23507,23756,3865,85120,64878,95508,15664,43489,11680,12342,52895,75236,91873,89831,50784,53422,49977,91281,62182,18155,44997,23985,12565,40790,46552,23254,92703,56409,33032,65495,11893,24192,34145,52616,26294,29707,46033,84834,82970,10692,44528,59209,98542,12325,68665,33787,2674,79451,7983,73024,7753,89485,48910,12517,22088,227,69353,1649,611,42843,50431,40143,3625,74460,73842,49131,9095,22596,24705,83425,81379,65301,23828,70152,28623,45253,12710,89546,20274,78607,49043,75234,23191,57764,92586,23265,66497,19511,58341,3977,81538,5083,70463,15766,40851,38199,14127,45752,89527,23648,77347,67960,21940,13071,65229,25318,79103,20350,80373,44224,59595,86310,71825,50967,58712,15255,87649,72675,8581,44348,32273,47933,18532,31386,64326,24687,52991,5452,46966,7684,62971,86463,92752,98909,77482,12948,68137,11922,86968,85827,42016,69120,73794,75378,92040,69721,68377,51667,99212,71616,84306,26337,37036,41985,17758,58708,25958,38399,82096,55879,52985,57035,49423,10799,64287,3124,64388,13674,42142,73565,63453,79910,50962,40095,65224,37644,89661,91607,7225,69148,5457,24652,11690,40255,77001,98023,74879,71145,22723,84009,41112,96896,6286,70031,3666,6188,39708,50450,24673,46107,70486,6873,89675,56860,96802,66634,68680,23369,66683,48605,89278,91644,5862,28115,12931,59931,24341,29042,93378,41984,9564,93590,26688,64917,27098,74862,83942,80350,2044,5929,50003,27005,90360,15248,38069,622,31509,86455,43353,23208,51108,29934,48731,52686,5867,20438,18569,78480,32267,81496,87626,33841,55423,89181,74782,93084,82077,40599,26854,85635,96553,35733,53636,90477,1341,54512,54217,49432,36450,24838,63992,33217,78661,1368,82122,90771,51835,5479,91227,58869,16894,86896,90375,29226,9244,83572,14993,15110,20831,64034,8717,24034,70378,42911,21938,97006,33620,89248,93720,91098,52194,37331,70289,37545,37317,74726,34276,14221,99745,15651,51762,26494,11339,72713,65270,26318,16662,67925,4679,61468,62760,14332,14603,29969,4985,50865,24180,10959,99948,90914,8835,31687,57141,80652,48615,57424,96777,18712,57937,78251,5381,86885,66011,24769,27582,95016,41671,5736,74554,1863,20017,44742,32874,23007,45891,60560,26300,84429,7397,51291,49812,20067,38248,66270,8343,29523,24113,82211,25473,44026,86247,94527,57524,3318,57742,9095,32215,78861,69279,37724,72988,21116,18791,71713,41623,7434,14597,89754,8537,53730,67902,70029,2289,81080,94436,22272,94907,42161,48546,5038,18808,13605,89695,30627,774,49967,449,73756,7657,50794,74312,15682,29429,99900,82351,4614,73878,71511,9690,2595,85048,62447,74907,84514,50601,37024,89865,80747,50695,58940,98051,31152,89507,64299,80296,32774,45240,83454,59636,62801,20073,59536,70315,47204,29340,18928,37600,83506,95811,22853,50916,33714,21014,48245,96995,2745,56589,39303,18873,5578,60285,53789,93152,70976,29752,52722,16388,82117,46261,27277,53189,55200,54753,60370,96135,59286,58182,50914,60755,47653,44277,42731,87212,52283,17448,94806,69460,26990,32245,28438,93401,25031,74500,25507,95592,86420,81118,47634,60736,71855,53505,49128,84411,40226,27986,51489,18525,61635,14241,71150,85491,59287,39364,81736,54018,6670,26138,28466,44261,72312,20542,86462,50370,67830,56595,23635,58063,3125,90960,27061,50590,74370,87425,22590,35825,97777,89212,73842,20429,84049,52675,75376,25928,19751,53909,41484,61418,32205,96793,17113,99959,63791,41447,80001,80259,69546,96380,82325,4628,88630,12723,8567,11240,2814,1137,6915,43452,11790,41065,46753,26609,97464,19740,27920,40313,8654,47572,81689,41354,76474,58539,62271,44384,92223,22967,37977,39747,13304,73648,20947,19926,21399,54420,15525,69771,81041,53698,20747,73619,72394,82403,61548,91733,36862,90435,32689,14859,87015,19222,58315,12724,99048,5162,17774,49932,61973,49725,62506,22343,2616,76832,90194,47499,57795,76663,74962,23834,58633,2603,64457,88050,74290,45067,44536,11143,48493,9030,23231,17688,81794,39336,88785,51593,19594,14546,87675,25839,83129,42035,38319,93246,10889,46493,79012,64848,2092,81509,77375,54754,22160,47870,33124,25169,67296,36733,68215,3073,64175,41756,21874,6818,85119,31301,91818,40192,34487,58843,56748,44240,5303,35762,98304,99584,86075,60043,86431,10518,43637,72257,4113,17125,81812,1400,96291,75190,2519,39677,49896,26168,61620,49810,50908,4717,91133,88068,35427,91339,49171,43993,48077,67186,55642,53372,92705,44844,17347,45383,94257,34287,72402,25285,64072,36916,34954,10920,58966,88056,11115,87324,41762,66689,14880,3016,16847,99886,14032,31846,9754,23214,48941,8515,20186,74962,31057,11454,9885,1265,94780,1763,96535,44714,80932,9226,20480,76383,82068,60394,82707,43372,87155,13872,80291,15988,7362,92295,18182,26152,97789,10638,24324,12020,94144,12005,504,56751,89864,30550,27554,6554,93976,10409,50712,41997,81980,69381,25250,62397,25861,92831,64921,79359,92573,24388,61501,37758,95888,88626,77570,8031,58002,24239,66531,828,10428,23050,25916,34507,18861,71041,75424,24818,91747,16255,92127,99834,9933,24668,24517,42128,99128,29848,21290,51730,83270,77636,38892,78847,54039,66409,83965,37409,28608,55906,32893,45189,96397,31559,69541,916,85832,89592,32987,36891,98868,21138,8475,8077,8230,64740,45119,96065,7918,81245,99045,56374,10480,88348,93862,58450,1573,97553,88409,11565,41734,77089,47380,91499,89271,79365,88893,8635,74607,34399,95309,41821,59086,46939,97281,62974,48555,65497,39354,64106,41088,71555,97645,24942,29900,76970,22355,5597,90554,46255,53705,60942,60509,42516,52854,60769,60869,59268,20013,18624,96713,98377,50537,54998,3209,18276,41514,49999,82863,19581,52353,47935,1829,56936,86740,22647,19797,77718,24106,28442,12432,2149,51288,61904,19648,35507,44213,25921,2842,65186,51411,20751,39784,72002,36395,29851,24897,63772,79538,94861,65423,56736,57031,20949,63877,87267,7833,46317,13896,8770,42374,92540,62483,79668,37922,18544,30822,67996,64780,28365,54572,64292,1700,40372,82774,20789,44379,82636,40713,89661,81066,56230,80168,15971,78528,16965,69421,36949,44709,94996,48937,90245,16795,48440,74099,62066,8274,2322,58600,48498,54658,32259,27454,42756,90215,30598,40965,83083,62415,36770,72745,44664,75658,43393,91532,25478,16664,42184,82927,75010,30006,92813,77705,79874,31067,32650,64041,73833,61572,2543,41086,66973,12529,50351,98330,19446,81738,42160,10658,69368,64349,60646,78984,95398,34867,8773,58915,51063,25321,36204,93312,18755,47682,7737,62095,32321,29273,82978,401,42889,46684,81351,63250,73647,19153,35751,41948,63531,41179,79269,70296,26223,84652,24668,78327,79310,15777,38748,96131,7939,96403,25552,68617,66078,37023,32536,49285,55443,91836,1431,39083,34209,26771,18883,38811,3314,3778,85412,84409,99926,55237,10317,48318,60941,19349,58548,82037,50073,72780,20576,77837,58215,99414,41169,14123,35856,59762,36170,25924,92576,79501,59420,37325,21352,76872,16848,42311,2223,16242,6872,42800,72428,62535,48343,18580,41323,94438,49156,72107,3338,82430,56920,84716,42453,51844,23331,42810,80358,99685,4527,84361,37082,42470,74137,99155,74519,5162,67071,7322,94736,94015,93542,17285,65101,8373,8900,18339,19867,13403,68522,21415,93567,30804,12041,98915,47228,46804,44324,10638,54086,45033,98416,9120,64548,52503,71449,8147,68771,54573,84125,51904,91041,20315,71841,11728,27166,58786,20849,15106,99978,34178,29806,31361,7408,84510,89591,73097,3176,6880,84267,91395,29560,62744,5557,5529,28705,99422,30496,20006,18046,65613,41873,42330,50794,99086,83143,87853,51880,2524,66880,37964,64518,3082,87772,93683,91374,54387,72094,8908,5260,26231,77290,4720,27373,30082,76381,64418,938,45795,91990,10568,76230,48250,66736,21520,52060,27890,19957,70285,964,82272,97551,81631,36886,34819,72911,49061,66963,50911,20954,26423,98851,79642,33591,43842,25661,42869,34124,51464,15620,85258,37406,59199,3778,58399,79085,85382,88061,67084,53175,26817,24227,14771,5461,50785,13990,97363,26947,26234,99584,74739,96431,97947,39140,90407,88370,28095,77760,39764,47309,60789,7127,84913,82067,30122,74298,33074,28418,66991,3122,40620,1621,28368,23714,43770,27048,65483,94365,50931,94368,42908,60016,51217,78881,98810,21661,64446,62938,73510,78216,37401,87429,32574,10181,80789,14059,36,4090,81170,6558,1470,35945,2,39774,58335,49994,95519,74464,20905,12745,7018,48242,28221,84643,25007,55466,97236,48867,9975,36698,54721,85673,2936,38990,4587,15739,39591,18592,34436,59891,26826,15772,83455,97552,89369,3981,62457,22883,1881,14757,11285,95003,30777,59670,75993,91085,73066,35801,35399,2957,92128,73824,62879,8295,95871,90676,20093,28243,8541,52107,88870,49470,3919,4871,65199,56688,61041,54058,22941,1740,75598,5772,86064,8259,36809,93049,36906,94224,62806,26569,35034,3200,87982,35647,24651,30779,90398,58719,256,85221,5784,73822,57062,73262,92137,64033,30346,93792,97325,6199,37555,47715,11589,78388,48143,59803,61929,71381,21483,48925,86325,48664,95185,72224,70880,19097,42037,96364,83797,45455,53347,22802,76050,52663,48774,61275,45540,38332,19351,88167,98522,64817,88567,71077,53455,47230,56586,11331,78926,33188,18041,43622,47012,84603,61153,59084,98790,70676,89174,21533,5605,45018,66369,81929,19013,78485,44781,70807,82929,97622,20897,75196,18288,26977,90862,51951,10715,54917,14665,65718,71303,5503,68101,13608,75428,530,33387,72149,87116,21588,96956,29725,89655,93029,43896,64118,15326,42663,95926,48933,71887,51769,68567,86101,51486,28715,75632,46024,98282,53478,90152,98510,13950,29598,83948,84398,59462,20409,90957,98740,24685,93875,79,66128,14130,64908,18921,9465,91087,40181,31698,99065,53231,39697,27427,71037,83657,12660,25036,3081,2126,4077,38770,15870,27144,24342,89193,9351,8785,33219,59324,15592,55302,46861,86716,52823,1386,91465,78324,39350,44980,50823,49141,27322,39818,56168,81168,77292,62711,83260,80584,77770,5489,89014,64401,4132,65536,41895,46072,4069,33526,91834,62076,50300,98687,70651,12846,55405,90256,76956,81296,46014,89765,64266,3582,49804,5069,63891,99491,11381,62243,33051,71269,12066,10875,32164,94933,93797,82746,12322,59597,49788,98180,44222,31638,79117,61510,4502,42320,40608,72434,92280,59140,28385,80442,53820,12446,1350,89631,16828,77782,88506,3701,66362,58253,9058,65808,23755,60710,36011,91375,74063,50880,70211,85683,94940,6386,33188,76764,15424,91517,67730,64370,22796,59209,38456,21332,5219,38049,78791,43440,55933,14376,90022,20290,10070,26100,63196,63734,32643,747,50765,97588,85705,23152,49378,5751,39621,23944,29285,44574,89527,76490,64665,29981,93563,58284,91853,15217,13309,21553,18246,17302,97040,79326,73764,3104,57423,65031,48061,91609,56358,2692,92853,68836,90940,48273,83209,71236,13937,54870,62084,4396,36557,57186,73045,92019,55346,8774,8543,11436,34443,87124,46877,62211,52603,66469,31881,18026,15862,3161,21234,11322,74767,24484,87116,25758,58472,86490,71555,48932,73056,34473,62637,8759,86018,24316,76300,54167,15185,73357,5211,30442,26536,19319,39367,17461,97864,91944,51212,1468,48741,7864,66873,1595,14779,68756,87283,34851,76753,69537,1161,39760,17965,41882,31940,62966,27992,24729,87037,62228,62689,62749,28066,12983,22600,50838,72488,83021,82187,70963,9296,19152,87436,16641,66830,19059,99683,8913,62612,67291,9515,74208,21592,32384,82175,98752,54200,82002,47412,29537,34403,53267,7822,98615,56607,5625,42007,51725,3627,25246,88036,95224,48980,51220,58960,79487,29455,98122,16689,62832,80596,66788,53285,84678,99016,83276,20616,31633,78510,68323,37700,65161,9838,56721,78620,4052,54699,8695,73622,11121,92337,19080,84070,5237,48434,36358,51547,36810,60884,85219,44759,39780,70162,79338,42873,40389,61593,67036,63936,50607,99277,90237,77275,91128,73425,89416,79326,78557,86640,75234,17992,61516,44182,11926,90627,64183,44881,69828,67764,80789,82755,8249,32977,82461,35156,35554,37873,77914,67308,29864,41033,42489,31381,74189,25070,45681,40154,25146,43909,67853,88618,53942,36507,34592,78350,55555,87777,86392,24688,80355,18256,90222,38669,24900,90061,89913,47381,93405,71143,72471,42093,26800,60646,76252,68173,22730,15364,94974,94732,38203,83702,32379,26966,78399,83027,44221,23997,84867,65137,49152,32196,51332,63484,93985,35487,74474,60173,70440,44748,19182,33388,79428,9444,33101,16310,49357,72180,51522,429,8723,24764,33399,64888,59607,72175,76492,36319,66994,74378,55115,26109,34826,6684,56061,82328,64168,78024,14167,62439,57302,50710,41045,15656,18315,23738,16111,47361,99632,7144,5379,9212,36354,14086,71892,70153,35182,16702,12569,97390,90011,55479,26856,5235,86547,74262,7849,1292,14606,44251,81837,94316,36330,37250,56336,31547,30211,28894,81044,79124,74792,98531,34962,72433,46341,30981,19885,3009,72576,76562,41513,99923,28820,22524,50237,26627,71004,80780,2286,21024,11951,74874,97054,80496,35354,63735,16327,12542,32270,53268,28964,31723,60229,79903,21712,22747,28881,36631,14076,416,31297,87971,40435,57042,75975,78607,50686,58480,27970,30433,13156,10748,53548,93379,58214,58003,19712,96884,50394,80617,98395,49829,3019,70945,35616,26887,88253,64425,57233,68674,44865,42216,2716,65410,66527,80693,50843,57959,79503,26932,48885,18942,92204,10140,78769,91611,89559,49502,22071,52954,27150,28565,41185,46304,53098,35933,32607,93708,38100,30264,49735,78701,61567,3182,89980,73835,31992,84604,91434,8480,59277,93464,2868,50970,74929,45244,47124,49935,2687,64351,57186,94920,58370,55593,11726,57326,33414,45644,29958,35275,48051,34061,62722,20723,22092,28723,76742,84543,67787,78526,69591,25653,4758,49997,47786,6289,99708,29961,11793,92682,20809,12109,80669,91137,72460,6563,32820,10389,68847,33443,59389,19534,38425,18565,57134,36506,24383,30028,53670,40842,21841,68225,8753,30385,12095,81389,6835,75054,85803,97137,23062,37781,71820,61637,55479,22011,30966,2253,25610,78989,93393,10099,8832,13306,35265,23427,69987,16271,60626,72541,99728,48357,94793,23452,20018,69948,24486,32198,1147,42662,22518,35696,67273,2716,65656,66128,7452,51617,17689,21890,6337,61482,20436,17340,31710,79819,48129,75218,36606,60609,32217,59631,22957,93089,14402,93511,32535,21246,83904,79432,98896,5541,8792,49470,17814,76,37645,60395,69324,64220,88805,98863,51776,72131,70972,95190,52940,68820,61161,84850,68113,46392,79367,75691,94157,98218,53111,98518,32956,68613,53436,21876,65950,65501,16547,4774,27118,9623,78617,48170,39118,15144,79220,54056,50145,49276,39259,48972,3130,41229,63862,26045,98345,2103,28256,57087,83684,98350,19290,58975,77389,99452,98203,18330,85076,39581,51952,32047,18654,2147,29763,57545,33151,63803,52330,61126,93440,60976,48703,26830,60726,17094,54500,6731,32161,19076,83890,13276,38722,49026,6963,45948,60961,50667,85647,56805,10240,78413,38529,46501,67183,72301,62536,5312,62844,8881,46201,43445,25360,40678,66417,21751,1450,13278,19129,97562,93206,73609,35339,7850,3236,66783,28665,82664,68301,48663,3165,58768,52877,43784,80338,60332,45473,81497,42801,37346,48231,6674,67764,76759,16161,57462,99418,63900,56808,67368,4740,87336,71827,95508,98165,74233,66316,8820,20098,49536,43274,86589,93621,57654,95873,94706,83258,35011,18869,18640,99892,25734,12215,80855,50318,36145,51695,28867,5491,5769,39704,1588,17425,25493,54838,74619,25384,54735,19877,55957,71704,74474,33443,27606,75,8681,21417,4911,34801,95321,63264,8663,28718,61382,21707,88161,64682,14974,10482,87742,60242,44924,80845,95411,48775,61464,33828,75635,73213,71818,97271,43333,82776,87156,42800,69037,20395,87524,89386,93922,77304,68164,89470,36565,77838,32152,29649,2914,56869,63360,35677,43012,66695,39397,65389,94933,53047,6723,77555,39367,33770,13959,19395,72747,29312,28699,32603,60863,20088,12924,91408,71069,66302,47579,54087,97325,52410,54958,74262,43413,39202,10634,31785,21293,81666,63069,86886,8693,41151,7459,57099,53206,56646,20192,54294,62020,4011,43750,48547,15497,40372,30892,63770,64470,51950,77126,28054,53126,56375,30817,37548,31882,80029,21614,44055,20866,78379,42880,77785,49395,10864,13194,10761,10789,96657,1113,19667,70828,56170,64573,12100,91408,84931,19909,6732,274,61763,23275,4285,17496,38198,48073,88964,89872,91749,46826,87302,74973,11472,5300,21656,40602,43273,13104,5105,76398,10794,5180,80526,28,25199,22274,51232,64273,14096,35040,77775,38199,80667,68956,9396,80313,37283,18169,8513,60600,79452,34077,20630,84341,91743,67488,12599,65067,79494,29333,90313,37185,4199,51563,44389,87165,21340,91084,61953,59312,47023,66700,40161,38490,85655,92492,52573,54785,30819,40544,87328,42810,27597,31547,60462,19494,92175,20345,4824,10415,7701,77029,15430,19357,14114,58841,16530,19573,94735,9627,37025,43612,90142,41937,75927,31360,94194,63124,57061,53724,16072,84452,87592,88034,72368,95602,77028,39656,92945,27496,62027,86008,4435,47738,20517,51223,25099,45232,8659,6412,4113,33054,51061,24454,65224,60753,86289,52100,92949,42413,58932,17420,42849,97686,92719,55359,86299,41260,98503,74262,91732,24113,976,21520,32847,54500,12219,63819,43851,19421,5509,18395,31461,67212,79158,64928,36420,26511,94475,23661,54772,5040,1210,82777,47282,93241,44953,80594,48893,44195,359,13710,63282,65678,26439,27665,19374,43584,89176,86333,96541,97525,66189,35842,69583,51209,69303,77397,80277,80246,25777,57009,95850,5168,59632,94255,77461,71713,38037,70621,99061,59827,5835,37289,43596,2349,53391,77244,82188,8232,62196,54690,60758,79792,74960,30271,41312,77450,29216,58887,34695,39359,85467,5074,30078,87613,75965,33311,51749,34783,562,69566,30045,5315,29072,58885,31324,52033,295,56363,6261,9976,35866,70158,71637,54625,2143,65766,96044,83889,40135,78676,86942,6711,65096,76045,27973,70005,86677,72399,37691,15188,30781,50991,73500,95122,43069,23231,71443,85997,26249,1863,89796,56296,10444,43094,84784,32191,62897,2712,13037,17024,98227,95816,33842,22155,86175,38315,88056,33698,54471,21216,55896,26751,95573,21484,47374,12794,37851,40566,71083,90644,71917,83758,8991,9992,20391,15918,437,31399,8649,99412,35397,23459,14860,59152,44241,15918,65597,70071,60988,66949,16004,63078,87556,80527,36478,10915,46696,12063,46300,23121,50841,24812,71475,32805,95069,2390,81310,11328,8315,21272,19112,44840,21017,68431,90379,70099,25451,62743,99263,82782,46057,3905,44589,51024,99190,56676,13576,67164,52752,27835,21958,24923,81406,83557,63364,76855,98008,91404,8769,97603,29721,86118,64799,73068,60012,50761,42234,84218,48349,26099,5501,4769,59364,24653,488,74835,46586,2245,87108,48830,85029,44398,54465,98269,975,41310,60750,11171,17575,12580,77942,68225,73586,15270,77711,17796,62054,64795,72560,72300,95146,99823,44657,15697,81913,4258,83635,41559,32249,40903,70397,55790,38464,5104,13969,7198,39349,87140,17947,91214,33854,49753,20059,36788,44557,54006,9752,69595,47683,9846,65312,40421,96814,39299,53330,2980,3428,37376,42960,24219,93298,12200,18013,68807,79025,78584,13124,47518,14861,12473,34630,43750,41722,16218,23937,75552,27828,76314,10334,30346,6433,36244,40828,99698,72117,98939,70951,10442,93430,71951,75676,90503,71574,38956,72400,86233,39000,13165,52908,98606,5340,40881,55073,12186,86232,43682,35265,97137,6823,17943,9011,12363,77111,526,14003,62519,5574,61503,47334,53214,71037,14011,34641,20816,77445,73557,32451,10750,43787,22070,24266,84999,74582,91842,13615,82686,2208,61646,28629,95519,92821,16602,22200,96714,89450,66806,31273,53466,693,89314,68548,35824,75589,95664,24583,64573,71771,31184,7782,56325,27658,80381,87410,50914,25228,69178,38366,45124,17854,53022,77434,53942,75804,37705,35306,32641,80791,16338,92656,55080,33424,84979,79333,65232,191,15023,53156,77430,25057,97883,33153,90611,16803,75579,24165,83994,76049,15726,87953,32609,17361,93202,5270,30259,32768,97633,6104,95847,9464,89663,51737,17428,28592,36248,14761,75311,370,85440,27552,94010,14520,91256,1804,54180,14019,44743,27045,58599,18925,19266,94938,84930,16154,48334,42614,53888,49641,86445,68009,89171,36054,58119,86917,26418,83238,19126,27104,51015,46292,24642,87276,86810,31821,64214,64083,35901,57338,6414,33045,401,56793,60763,20789,8049,7811,98085,60129,68719,37890,26718,54186,14473,37646,18442,37152,36613,23208,55857,1805,62028,99960,69415,8559,76872,59563,45593,39741,84444,29402,58897,26336,90235,36126,62500,51197,35343,21032,62961,28910,57427,17497,2248,30727,84571,63689,50408,28689,72319,66808,84134,18966,14395,35085,94535,48606,68424,33399,90428,12955,35627,86284,67697,40661,97833,52916,2661,8254,74339,14041,31618,37069,33044,32021,99319,87615,72871,80426,75290,69458,23980,82251,65148,69249,442,19543,2566,42910,90393,72598,55015,74443,44735,192,63049,25352,24560,14217,96026,40153,94488,55686,32922,91230,35644,76834,26145,82425,52685,87797,78850,74084,73928,46778,12595,49798,61847,81871,25726,32098,42562,6004,44060,46778,61786,99475,31611,69795,43704,64639,29830,56469,73720,2154,90405,9997,13526,74077,17957,30318,53836,5752,26865,61222,54049,70121,96812,9517,13450,89255,71902,57840,94167,17790,28932,96735,28120,28636,48612,12696,4800,33615,1534,77005,53627,15766,36755,84282,77153,58962,27406,12253,44672,31026,80187,15569,12257,32810,78424,70372,97835,82459,99735,31616,5790,62392,90309,36318,70920,21894,86154,99265,91209,95508,834,64167,28359,53977,43352,13120,11628,66022,76400,30059,75455,36395,13,55982,91276,45739,46780,98741,59292,72914,91219,21535,3526,96850,12781,82210,74935,2800,87465,36079,25238,22554,20573,30700,96663,5926,96003,23514,25672,27404,25906,17384,38667,60406,8032,86726,58913,29574,81088,17632,4128,15451,95125,94905,26761,48057,82200,99168,99758,39269,90015,56600,20176,90167,8293,20850,62093,49557,1000,22463,49092,4605,69866,22951,89711,55097,14225,11119,20649,27760,9597,20151,54402,14214,56967,30050,91710,21783,34884,93546,68192,93550,72745,88755,69582,77554,76759,43261,59383,20631,77893,69842,86576,74357,28258,58860,55691,22563,80079,58337,36593,41384,34109,79467,28069,96131,2626,25726,53192,38737,49221,70183,60932,11931,76206,15119,91032,16618,7051,30113,96601,91786,69933,14833,73409,62826,51722,85152,76109,93383,96681,17724,6925,67890,23566,70652,30383,16572,31521,6653,66640,79548,71260,9722,31654,70649,72389,10954,44449,68823,70460,12261,34034,34229,43526,71947,17850,23372,5770,56063,64292,26910,18313,28903,91590,83395,11716,67890,19376,81900,57980,9364,55329,94493,83081,71661,10507,96698,99634,42185,60411,21579,89305,36217,44261,3927,4340,1512,3582,87148,90556,3173,94777,3978,10963,42271,21617,99295,97282,83499,76788,10626,10831,90172,24959,63017,12880,57538,70652,84876,48699,7504,82687,98104,79282,3388,94271,19632,46313,41006,49683,88104,15094,66063,84622,37993,98636,38292,34185,20877,11147,71536,19392,78689,87575,75548,70781,62493,37642,64375,68449,10041,19737,23033,37877,65722,81482,5566,41172,49735,61457,5294,66473,56976,61252,3441,5787,78540,6404,367,86775,34248,32193,49532,45266,83450,44760,11625,47379,94883,67619,36056,69473,19656,53109,49509,73794,16115,30908,54130,19566,35313,67068,29865,42159,48440,86682,6423,48000,40649,22314,75606,49195,95631,5489,72423,93624,5162,81923,61574,6952,53312,27253,31509,53764,78919,80506,73173,60297,81015,46541,2228,81842,83394,68921,24218,16485,8457,7958,35103,6159,16045,51338,33137,69282,16288,72849,69376,96798,652,34290,81996,96114,33904,84382,23259,71759,62082,65333,32639,67590,25410,49557,94134,24601,69249,84901,8411,52391,35102,31750,20298,88212,8590,18547,91311,52440,81144,88855,38687,95157,22397,68384,74898,84566,87753,87423,95755,40305,53431,39006,20788,2715,31814,55024,99154,93978,17107,48269,94667,95058,69249,56478,28033,13644,40578,33011,10770,60551,5345,51143,38224,82790,51103,20536,46489,98343,48978,99308,38092,5547,24457,52859,30026,94520,88573,48372,44556,71234,89547,24505,38514,25123,95584,3050,87644,11090,61582,49450,40335,53896,36673,38090,67238,75460,69515,66220,2949,29605,95785,14283,48284,31647,72888,91411,65664,67994,23391,36102,43458,95093,17597,61038,79967,91383,8065,88301,90212,44556,4921,37972,90024,83766,7540,38192,31303,26823,28330,27386,46539,25472,68972,33780,76956,41298,92915,16015,51737,95480,29083,18536,13393,11805,12397,93233,78625,59605,68972,61139,94536,16224,71353,6335,79490,26034,31601,77768,11983,82690,88970,25951,64564,28415,17657,53084,84415,42339,29586,5140,53301,4556,94813,10072,10993,98310,80200,99793,66177,65100,99405,42919,51032,12493,60518,65626,53373,72424,94410,68744,1099,1399,24226,27356,3483,3854,76821,13992,43856,54398,14682,72056,16766,78276,68665,1720,32033,90230,49453,43759,61902,83472,92123,65189,34589,95586,37179,78129,15690,94999,54462,403,81404,89145,28011,51349,48223,79921,45324,17554,7595,9995,40794,79128,9295,16145,68939,83793,75371,37597,14268,70678,38305,29602,30324,52036,69454,62567,44952,3458,76882,18258,99159,53383,25521,22361,94760,20668,28258,79505,78901,66304,19384,9634,31476,59152,36048,43935,44263,54824,10060,12089,33443,4782,91150,97782,95778,38529,17047,68116,70269,53379,97930,5617,48082,82639,2786,44236,31150,64537,51839,56187,57065,97486,6077,68071,90923,12581,15272,97825,16881,55905,2356,5526,25289,7260,68192,26632,47360,51196,52145,79617,11585,99974,3575,50632,20185,89773,59721,9527,41993,97670,27900,13101,23005,28653,33296,44635,46635,6823,56334,66726,72128,24040,57179,74979,14258,63621,82101,63606,55637,73581,84431,18691,81420,27638,68747,34575,38064,98260,5288,15182,31733,39847,72684,650,69182,58646,25438,44907,39118,31120,62507,87440,72128,96556,69286,17891,53407,6046,27783,30638,84560,49061,80545,94878,16271,7000,13432,87429,48571,59184,90585,90960,2367,79388,6225,66506,48267,73986,76133,92740,8499,37926,37456,75247,23827,2068,93671,59390,33456,76402,21706,52804,99120,78362,92559,75077,81760,82047,55147,81853,75067,49266,86085,38852,72444,84116,79255,99549,78684,97783,95651,88593,88904,99757,59796,11752,68448,38739,19030,70159,47997,75928,50345,83464,74549,32533,19259,43301,74101,13494,38589,2789,65594,40580,74644,81106,25488,22954,24953,14203,24243,29233,38592,18520,56345,52981,73914,73381,7180,53454,81556,91709,3010,73127,61995,20317,52166,83918,15430,59924,54750,4001,84140,57933,22796,98210,28300,40568,73618,94949,29899,68157,79526,28888,42701,87838,86841,78958,47423,85222,31977,68185,18157,86149,73552,99224,2294,37317,403,45179,53409,86978,15441,18673,91114,74222,79955,56905,44741,3925,71992,19300,53246,77400,47019,71369,88678,19217,56905,86628,34871,12255,72171,50127,75093,49585,45183,53337,82622,9543,28395,1958,73636,63168,55831,12879,88082,89873,19100,24942,69273,41117,74909,44152,98360,15754,33022,25172,35003,52255,70972,19524,31537,77020,40730,66720,35639,82057,5317,3861,21528,62708,98610,29326,129,854,87519,67358,56918,78695,51207,51025,8206,7229,15308,50750,66048,26485,9838,10645,87598,25341,80202,43889,3341,6281,8680,49178,69182,7339,80225,84973,36602,10500,38409,82558,79500,9590,96696,42126,7121,59394,53450,23225,82515,68395,25677,76345,58107,84158,53889,82180,18829,6593,95680,48282,28903,83559,36674,35406,2205,64916,57757,58025,78491,36897,51215,21275,7627,93627,58855,18315,95162,52851,53956,88001,17044,78160,17983,45161,2551,72806,12068,97762,64500,95026,81972,62147,50394,6252,21099,74111,91657,78879,8755,34544,2980,52073,68680,88426,69932,34446,8803,25595,26010,26564,59490,30424,11690,17389,41183,46908,93706,22446,26558,45186,52601,14250,61236,71394,10375,60851,1395,26859,21532,88872,13325,10302,79027,21415,61,49786,14762,28688,98349,55300,31192,67279,87771,41649,17349,61307,64979,71581,75162,45216,49281,15895,24955,28607,58654,9744,87540,71459,38893,40252,34682,77668,6447,95157,22112,75866,42340,6264,18462,51851,38538,66882,5637,12389,56513,36097,37325,6934,31000,59211,26856,89398,32785,13319,52214,61657,70017,14193,18583,60714,97114,65492,87462,67292,22324,24465,9854,1837,21166,83764,12094,64432,77201,70287,23907,13414,82349,25348,34223,68083,55033,22495,75042,71582,44583,96279,64397,95844,90018,79495,57401,30654,99337,55840,30361,70695,31855,15072,67542,23229,71240,89330,41739,53534,3822,44365,30004,75029,98340,58936,96835,3004,64596,92120,77151,71382,94265,83448,56190,52794,89227,15597,59451,62695,91766,8008,52778,65073,30927,53096,67932,19208,2027,47853,296,49299,70861,18826,11057,86365,22406,23652,29349,82468,45784,42978,88328,20869,11297,51559,88965,86450,47978,1945,43916,70658,58627,13750,90014,34723,53076,74637,19706,96009,9551,68530,85368,22835,9536,74793,18186,42912,74432,52158,59864,98955,77880,5169,73077,77416,70027,89374,35196,30831,56356,81716,79818,71023,37855,71661,2119,20754,16630,4953,43370,4445,79321,90675,95635,30121,86811,71079,56774,94266,78411,93946,90625,32870,66501,38256,84984,17038,39078,79377,44266,96394,79919,67136,72554,82512,8654,84840,11012,37168,45454,59227,82060,71525,66524,60016,98458,4693,91132,85831,57856,12015,46785,23468,30839,71300,52646,58666,51184,62601,25689,70684,86079,24480,3437,80686,59246,36032,49867,10223,80482,8321,94295,82290,86992,60478,64868,80011,69757,64550,50394,71038,19650,81742,75314,57209,66297,65173,94181,20174,16973,41959,67649,75603,25711,64738,98274,5005,84320,69749,41715,89334,68733,86,52245,84352,24411,89436,81947,31128,48324,6799,91161,35302,3919,86976,47233,99034,51139,49931,95380,81205,43500,89865,59853,74606,5023,4016,88213,16362,22010,36648,71011,23057,97842,44910,49091,35170,32927,23928,68628,51535,60384,77804,51652,25705,18397,46272,80136,38557,24339,83664,32326,83093,17446,89592,69823,96910,75505,35292,51300,29124,25503,71590,75052,13514,44589,53072,21552,34175,10435,89715,89199,42976,24459,69867,99812,44288,94690,63348,23318,45004,50998,21979,75425,22526,52908,77218,29053,91043,29134,45001,69435,79684,89973,48504,13399,29302,66124,13922,73392,93142,51278,48396,47947,3364,84650,64178,68018,34453,50631,73228,42517,2049,79939,42001,99089,63933,89143,37658,34303,29388,997,25449,40190,82330,81682,15726,88530,67150,75624,9767,14837,63075,78513,63472,21603,6146,26103,68360,92365,43728,76281,30936,35276,4941,11274,47740,27550,51712,40316,11562,42631,38490,59465,14094,42831,42961,26516,14054,28390,95543,22743,47987,81184,28111,54983,62372,95400,68751,12764,31215,55870,58633,41592,6003,2105,81366,34833,19620,91374,84321,14301,37157,11422,4613,46382,23243,59852,50192,80704,69365,15993,91237,77560,68646,61983,73596,7883,79308,96011,17254,99322,62655,882,35610,47316,17010,30804,67405,69047,73113,45655,52717,21338,17938,43371,73061,56119,54537,7651,42527,76490,66381,54463,98867,58954,70637,57961,36640,39145,21967,18117,64581,92551,44288,66875,44209,97099,87729,36650,70956,82861,44504,96062,77113,55308,47673,41593,93412,80433,37754,23302,65803,16895,39917,80529,15553,10877,90857,56113,43555,3329,17843,821,5165,5620,20440,40219,48825,83777,30091,76852,84849,30421,9536,30989,17188,84234,15170,42727,41600,9076,27309,731,42465,44556,62984,4429,68392,26751,83024,82970,55796,72817,96752,23875,89391,13156,37213,39750,37273,68133,68786,80860,50210,50571,76866,81260,93945,64308,18884,65482,90487,29110,55453,58852,83536,57812,94289,38361,84299,54170,19623,97898,835,17005,48011,67544,96830,92234,21982,48344,37738,27414,9122,68922,28632,22659,92743,71220,13876,96607,17528,24960,8247,16554,26239,79506,18772,84561,929,82171,39960,81798,12138,7732,49435,96770,1064,11777,68104,32080,54444,34393,64165,20992,26734,94051,72450,60711,45221,3176,69798,21635,61822,52957,67985,92559,11416,87184,59773,23193,23681,45887,62925,60852,42055,34124,97889,47604,73668,82279,34817,63418,17783,49665,20285,74871,4286,42656,18671,81083,11828,66969,87790,97243,59451,50124,86204,72588,31601,95625,13403,51446,6960,72008,23810,94148,57611,15851,84597,61443,23010,8300,59922,90110,28190,25995,58052,94005,95461,28479,73989,85905,76357,24399,3837,42707,23828,22616,20342,75583,74302,80585,28063,97124,53994,22439,13214,96051,55981,42855,86147,76701,9878,83933,39741,30969,72501,72176,18858,30993,21832,42612,85632,47670,92075,32050,75708,70801,26633,41970,72869,40379,34968,54772,16012,64108,95000,84490,60690,96616,97474,64632,28688,21177,51523,37991,32393,2564,74848,43598,34215,33279,75709,65290,80113,93781,95032,15268,23654,58567,79117,76319,57514,353,24484,74848,22850,57691,9160,21246,7267,43400,32308,87389,78106,61409,99582,69075,65235,94210,14748,28081,64269,92789,42845,18125,75967,73180,68020,31855,17127,19562,3681,88503,98665,12918,17658,47751,52053,95579,95173,7116,71825,24700,22711,4936,5829,34775,99746,72642,64088,79506,89918,61028,67978,65990,66312,74570,57459,45786,89601,27602,90875,96991,94640,92400,48785,87471,67415,46841,24204,31038,83549,70123,51688,7980,98384,5301,26476,2273,36456,39598,30971,50837,84595,10750,75061,75216,42040,14350,60322,8705,45468,21366,27532,95415,74779,12265,2994,19558,84268,75654,21952,95652,76924,98210,45646,87033,3150,12121,39688,26063,62649,36332,85689,96328,67028,25278,7431,80227,25734,644,75570,54472,61617,10833,27580,90017,77790,65519,12181,79784,15096,19030,52143,6347,83007,19783,95913,19896,15147,27165,20632,15477,90019,91804,60339,53755,61153,98653,86532,18303,7013,15096,79409,97783,88269,11697,15196,14774,50146,52783,13698,16975,23686,5486,15855,80939,94591,4184,58523,17825,94765,44950,86733,97332,69358,99609,57362,32595,25946,81655,97882,95641,28062,4853,38485,74958,25225,56551,49845,44213,27316,70275,1758,86552,23441,59683,85279,87182,39514,68288,13583,19613,36969,23087,13168,11810,37284,2977,73189,89528,51746,81966,68789,18431,44192,1146,75554,87153,42814,17647,19678,78162,37581,49228,16884,2244,77517,25383,91833,48068,45872,94579,49873,8654,97271,96467,85873,163,28131,93805,87419,74563,78146,45252,16804,91038,76476,65085,2240,12737,29998,59472,15552,18597,36106,25003,55557,47057,72050,42814,70732,48063,50774,40232,99952,17084,27789,3545,33995,9824,97341,20470,3362,18461,63158,7054,55373,16137,47425,80512,21228,62825,30253,73077,64862,36429,7905,21805,78521,81190,8064,58080,94220,94575,11191,56522,55863,77439,2934,54047,29911,17038,16579,36884,69606,85210,14030,44878,41870,34218,20788,89713,82803,84154,994,29077,91908,93105,37416,4245,32729,7688,16344,2210,80759,47604,53155,65063,73981,83865,59176,48309,55469,40405,81646,8505,48614,58041,77349,88745,45618,48755,37362,55728,22608,32257,32936,62859,13950,17939,32913,60522,56910,359,14492,5784,72738,7786,15534,67395,16075,5077,27161,3493,96559,54077,72594,30128,13157,58895,52251,36199,11794,64093,21218,55550,67670,5587,71353,1721,67745,12771,95259,87844,48005,58689,14078,19416,77693,3996,94345,14180,44921,92315,7970,60747,11068,25623,66633,5565,53555,59371,13326,32310,7063,60549,93405,30041,43535,10448,6266,37713,47333,69560,36117,64492,39353,26149,52510,22936,14970,92284,25763,3250,2285,74073,57029,23820,45736,69690,69223,88653,46575,58566,55825,92198,56294,78140,33261,65035,3891,26813,15794,73566,717,43747,26079,87956,26263,79556,52767,20524,57604,33322,57341,51594,57254,19519,10346,46300,76697,8399,68763,31530,18118,91682,62260,2488,4206,7548,34866,52820,54370,55780,13130,40712,43298,82161,77243,3269,98972,8889,15268,6630,58581,77138,63656,87365,61258,16296,33072,4896,2576,98176,85866,34432,76752,30927,71288,72357,8906,93704,73784,99260,91742,92735,82717,91284,35332,75258,397,41294,32676,87519,92764,8622,41507,22561,5544,33492,80028,79804,65777,4968,77287,39011,84750,78373,65131,51774,79444,69362,64596,83740,29319,84939,8536,26040,60719,77375,57543,89805,15766,54340,50099,79035,72948,98292,38354,13377,53856,50071,13052,19457,28349,63565,37573,6437,63573,62901,74845,34888,32538,30579,22833,99903,42248,57265,23989,27648,96596,90294,67553,87784,77461,78004,37894,76398,74510,14935,80026,58468,1922,8782,93257,34618,21646,56465,86550,18294,3449,28095,61246,6683,67577,60391,70852,22483,13854,11669,53837,7199,20757,86607,87210,86412,3596,82684,86566,82284,34013,12463,71533,10970,30830,48415,89699,91989,98426,80637,74916,67235,78050,66121,76640,58797,19216,14675,10945,58256,2938,95808,70452,66015,82046,77025,37981,86703,66614,4003,9255,71647,52298,34084,39861,70138,18120,21066,99331,15761,59152,46669,68413,67259,96659,30558,74854,57719,66434,48995,57366,1512,70661,22620,8090,3368,34014,70792,6138,54892,62169,41970,62034,87371,12282,23571,24762,49841,73850,11158,39264,78078,52853,11994,84929,87554,11015,20688,66965,65495,88388,77389,35444,21673,78631,87873,55506,73360,66151,19719,60734,95186,98587,51394,75838,87855,62540,87109,83282,6557,35014,5654,77181,46484,30060,93505,15456,33902,11592,76150,55512,14745,46480,55988,5770,64797,72332,84192,32521,23204,63899,37240,88368,90562,72511,73163,29378,43688,94748,49465,46406,14026,60325,76158,86329,87822,84041,52188,23820,38385,83886,5921,79266,55265,92014,83841,37099,79931,49325,50297,71670,32592,94419,34712,80191,56271,3359,34819,59384,67070,40947,19883,8489,58189,1523,20048,73510,44305,13397,50586,18657,20947,46606,76022,99622,31872,82617,53298,36074,71123,71659,16392,45,65868,75219,29099,34855,70074,52446,69823,91317,65774,29879,80489,5386,57136,18018,33887,51799,62853,19837,98684,69103,58350,78898,75465,8933,55695,2891,81690,8312,11690,39309,9681,28109,14805,76874,99181,94998,32544,46891,86663,99024,78603,83899,21397,26814,93682,53397,14151,30494,27964,62274,94335,53430,56,44792,24456,32588,5740,33360,42103,72915,66984,12373,52509,43575,91794,2875,21703,47228,36493,98367,39596,64959,18959,34365,16328,80778,12762,56220,41666,85235,4138,25330,91360,37900,35862,3704,74347,54578,52700,8282,61610,99039,97038,63471,97980,8564,67606,16617,91776,33908,77929,70549,75136,98699,71647,82640,5483,71658,70916,95616,374,52522,60520,72241,19066,12701,35586,10436,58046,35504,54225,67761,13709,29685,42789,81599,24211,51711,82129,49353,34738,71555,55337,80802,86923,70570,80925,32634,91604,76254,51882,91289,81074,72031,57718,40845,66418,49960,74347,40941,90268,11836,42904,44602,77117,34239,41099,86496,6462,14503,16269,80439,17665,94684,78527,89067,23670,7680,90106,62621,47359,32296,91356,50003,91065,5832,46041,14863,9226,62192,10304,55376,89492,95447,8110,29763,78120,70493,98659,72777,51759,27151,55214,41208,32579,53844,92977,84885,16485,13109,67459,75926,34067,42947,48810,78577,7316,49675,63637,1007,6443,3248,77873,85992,85371,75026,39471,17811,39153,69515,76070,32797,23453,6155,38446,49328,49158,89445,35911,97067,72975,80399,57005,56537,65209,53800,6554,42845,57122,34359,21212,1882,40730,41255,55521,47735,64230,99126,58078,44554,70454,3826,74870,59064,40439,41804,28932,25204,66378,79772,3787,55726,17152,31788,48435,33829,53011,49973,17146,83039,24160,54885,44107,91829,37905,7104,14209,51969,82920,43779,79122,79724,45486,30003,7039,1201,91068,88509,55771,21007,24477,15521,22926,19944,38550,23730,99882,59235,56123,62701,90958,86068,62122,40714,52591,17608,41420,80383,8826,51137,3638,13587,10364,23184,36823,92949,13097,12098,48826,65977,32926,69915,67774,83810,50889,7861,803,99680,70367,1968,14202,62205,77177,67669,51916,63072,93188,64820,73247,16758,25675,10373,22661,75739,78057,46972,58208,1786,35809,58474,17683,94147,53550,34488,21511,83702,55705,9980,19304,95742,14537,21578,7009,26725,69306,32930,29991,96393,99579,62748,44847,29882,56372,45315,91284,77168,95617,50577,98640,69769,6304,60451,72628,45922,83304,22126,48300,34699,67379,22916,40602,16767,16176,88048,73072,91058,98459,30483,65990,42507,33190,47452,98622,35174,6180,4121,88582,56261,33354,40043,85567,48160,37383,85487,61240,44374,96963,19090,25797,10054,14602,18343,25369,20207,93445,58649,37417,70421,80581,96532,95810,64390,89414,81369,96134,11124,59081,57461,52186,27038,31837,87098,38466,84550,84085,88935,97290,21805,64132,22246,4025,25118,72757,70635,49743,89796,22246,9539,62225,46352,93614,1654,85860,71160,7967,8818,79527,50729,32367,5467,33415,43117,93685,83276,25476,4352,50675,79808,30087,42599,94266,71477,65935,18983,34966,81806,30073,43388,30084,40054,1549,4688,91752,83149,97034,13611,67464,9061,93737,51949,57743,62151,66212,37566,67396,8777,55704,45265,22578,36550,33306,33344,87822,47116,34296,34327,56033,7011,69696,27560,70135,47400,90235,8646,62274,79006,81205,28501,59027,92351,72414,14045,2041,84121,89947,85317,99045,95162,87756,42009,31579,84365,78087,16166,19279,70338,59834,65843,67174,86648,33484,70631,96383,50594,81540,74436,75928,55783,76851,69166,67436,20317,90519,78493,7986,40118,41223,63078,75862,92154,17413,62829,13718,35946,58906,89264,18027,33266,49394,30481,4775,11737,95539,78410,70838,98003,49983,93124,33114,22942,35102,17328,26704,72974,85027,29673,77647,58921,21330,68350,30863,31231,74451,13528,19786,14776,84682,86447,72052,65532,3944,51367,20594,74263,27537,22598,67775,67461,6472,93283,75770,73014,27800,21992,19098,15877,5870,15332,1954,24264,30150,23034,277,2134,8262,90658,68423,50586,60465,56882,49730,15023,5939,3907,66575,82757,78233,88012,73153,88702,63026,93699,50625,15952,1469,71794,10960,38729,24819,48847,50295,12182,37849,8089,94963,30833,72488,37928,41855,85181,31978,84038,11148,77285,66891,98975,6209,63254,36224,55200,98445,72134,84085,18122,25558,41270,40545,53565,77163,41471,88642,12801,4571,479,26026,96120,2216,59906,98514,52990,23905,460,48505,36563,43179,97281,77114,49702,57349,9770,43439,2562,90313,6040,57136,56735,87615,51110,55324,35216,12063,9677,1710,84844,51510,48624,39203,11344,92925,11836,92286,29465,36988,208,58592,70996,42459,13183,52275,80494,77527,35241,81083,61530,5500,68524,80633,1972,48141,73623,99643,25513,56348,55010,71859,26517,38562,72609,18459,49535,70032,95108,74380,74004,81505,53118,20881,50911,79552,60177,2022,69201,30682,70771,56814,73143,72801,68163,83268,74155,15508,61929,69356,89408,86562,83997,63609,80527,9523,47722,4016,48628,77511,56198,23673,47582,59446,10163,74626,59569,49422,3064,90861,77949,31031,16582,2920,11401,17394,56573,91953,46991,59580,82715,68417,75670,73279,24481,49826,1926,39679,10311,72522,767,99753,74926,48306,18246,9548,60423,47345,43990,66912,41204,15438,60368,63788,59890,38115,92546,11825,87945,98036,18084,64000,38902,50476,78922,24233,9782,12658,71017,88209,84539,72277,18961,36744,12046,79765,93425,77624,52798,18058,57114,34901,23204,21328,18470,79058,62591,43814,67139,77023,9759,58888,6999,92686,34647,94504,95868,48031,34918,17288,13794,72556,45432,89263,38490,48870,63715,5947,41807,57032,66296,16570,379,75757,18552,16559,48126,65136,8,93102,75304,2368,96249,6525,8306,82854,52337,28778,92292,44850,53741,36060,6926,60209,22503,27692,17828,36497,61605,23282,65943,21359,73400,80089,43005,94519,36700,663,7657,11661,63633,68180,85453,67727,23956,21092,90289,34317,93989,59327,31506,11809,17859,55009,9502,51051,71074,48909,819,63108,8640,7723,46063,73723,84081,91491,98705,7734,11989,15227,75283,48733,135,80009,23331,60795,14315,86107,11295,41286,24371,56021,10600,97981,5718,97501,47544,29808,57749,6979,74648,17848,26164,5748,4303,60605,9291,56386,77811,41145,89193,1928,604,63422,33611,91838,85675,11354,66084,75236,40929,73633,93242,42891,91636,38409,29754,84655,18462,74382,68407,61385,54160,77739,72789,75116,27963,54019,45170,17237,78012,33698,36573,61895,15148,54188,96037,11547,81843,94497,67459,18515,62146,26501,40745,70572,10833,27947,95074,9270,4656,33793,43554,5952,20150,46111,73595,87031,80056,52859,29070,89733,24010,9008,61378,38899,27963,95470,47179,57390,33559,68685,31714,44224,19843,55139,69461,7927,53167,35784,71523,57072,5102,97693,56265,58695,25397,77108,71822,28709,94077,32948,39266,71034,65980,25590,71718,39799,23184,41671,29865,50152,82657,39049,53202,18075,70805,6218,43365,43778,6957,58395,58240,36268,92365,6570,51574,47200,58850,81249,43983,46195,10494,47740,52045,20901,27000,74000,23957,95576,1276,77866,4459,70890,43688,42540,14044,5572,64669,82437,91730,40564,95764,3612,87305,78655,13333,30615,13072,10923,13123,60647,66521,50843,12420,78778,90005,2504,18655,43062,38658,92296,58352,89389,50996,7076,90013,92104,23220,22046,83080,3119,53232,93990,81394,48622,30646,41305,35134,91572,10900,89469,23217,86,10839,87407,17726,23887,57376,41124,74173,46838,7802,1387,20698,54747,99556,91703,6656,10347,41879,14549,44206,28128,94303,23999,45486,85810,89966,89967,77097,34031,57692,83063,80955,44537,4194,57418,37031,5745,64529,13094,58058,92299,25070,16927,60778,77983,72188,6491,46382,92838,97408,77318,88352,47993,84400,3677,30226,87350,98435,8620,6311,1275,51469,37729,88807,81533,28342,39855,94552,1600,26643,90444,75888,15535,394,36364,45641,16523,4120,28117,56153,5978,66386,80161,91584,99657,37253,8443,9439,71141,39308,82164,80404,12294,29017,85714,44147,80659,80819,22841,71493,47078,45366,99611,98310,31934,48995,89919,74329,44626,19204,5991,47363,50846,9262,4360,7776,53420,63819,74632,75709,1711,84427,2520,52139,74868,56161,71762,7706,9659,71662,50707,93996,57572,62587,34442,5779,91155,31672,62782,65662,35004,91015,89158,16425,93934,28549,86798,25408,74748,37877,40208,76160,91605,49148,29495,2689,365,51463,64328,60094,49412,99453,20602,43801,43004,1056,28570,21061,82954,45606,1449,9553,64649,27855,49740,41040,33437,17041,51271,24071,37470,13864,60212,70417,33394,24088,76299,8015,82279,24481,86704,75063,92073,19589,28684,29010,40333,43389,12485,9142,1997,96631,4188,74884,5222,84300,55281,88706,6082,46684,32019,737,83520,70912,72848,54265,66904,33198,31848,1828,19854,15290,11585,50336,51457,58206,28829,17837,80776,852,36909,88912,43846,59840,43789,98860,73738,72940,71177,59133,42373,74134,24389,11499,22413,77781,96594,77804,55509,40854,21953,51806,6086,30271,31493,54930,87955,23486,1111,90993,18543,59976,48503,46364,68245,24450,10190,33533,60548,60735,3233,91083,78098,16526,144,58317,15013,11906,45122,21248,29577,93546,98558,72186,803,23716,24779,56551,91806,22053,12973,84809,79692,95101,86541,22403,8851,46213,68739,16318,68453,75085,64073,94738,561,89968,47978,74127,12382,56851,13949,14689,65947,57217,27714,70194,48684,43558,72121,72169,10690,95726,77142,41019,74577,98255,60785,90475,61663,21952,92272,90536,12347,50033,72878,67734,5828,92908,6599,98359,44522,30977,62751,70121,91668,79151,28305,8804,71761,49210,79778,20261,66653,76269,62862,22594,66725,36191,43477,40763,7002,86162,12580,67360,18102,22037,94685,98828,62492,21301,160,7075,23283,72247,28017,80615,38607,70341,16707,81956,29969,17731,86834,20381,7755,33026,11624,36363,36536,63869,36538,85831,27712,95833,17260,98214,68800,98013,14380,8656,55242,47174,75665,78964,93132,25728,91895,1338,88229,16585,96433,82222,96310,1710,16723,4734,83106,27245,6647,36307,93638,91211,19565,28708,17197,17116,93971,57529,65278,47078,16672,91155,13532,31943,48881,94091,25424,5431,99524,17124,65974,12261,54779,43971,6165,53368,33223,51888,25827,379,89415,7529,45146,93749,77971,30253,57668,59890,60241,46644,52152,76620,12577,80099,70421,29393,17305,9312,89626,3004,85428,21221,78410,11752,54489,89475,89888,97295,9703,61562,39130,49561,79708,5358,20857,76290,98183,69585,28596,15642,3824,34560,32499,97239,70938,36164,8767,81277,50738,51365,21261,19771,74382,10638,26611,15535,55024,63813,1834,65679,7324,48090,53198,73031,48310,54747,51545,60360,98716,5319,50223,30654,91807,51643,20051,11025,74755,67128,51606,47455,47405,7923,39741,76181,37610,33389,39388,85987,4742,57800,56556,51317,25736,89766,40131,79254,24716,94016,29455,74391,54636,45062,1119,39624,84833,79236,49405,21926,59457,24874,16971,22144,50508,17655,56865,15398,11595,38735,19249,33519,28370,53863,87224,18016,96842,45027,85793,72720,91151,42017,34938,88278,19832,13293,89022,67462,67272,83393,5150,69967,96332,64618,77873,90079,80964,12857,96981,81629,53421,19336,83817,31471,94943,40782,93221,11094,86248,78747,74829,89483,49843,34560,28915,26348,73362,44412,31382,17561,94394,36928,60416,24466,47925,87388,7551,47799,69682,30914,29480,32182,34176,26517,4553,90378,31574,10165,10314,92591,47952,87617,69338,65472,53172,3669,49003,94211,35007,48470,61199,95390,51172,90418,73888,88492,35202,65455,43362,59446,52610,76917,93338,94445,35161,94601,83327,43976,26132,47251,31448,57225,48591,73967,61891,35502,51515,50681,95438,77942,65902,50110,8334,56480,27345,36747,16578,10365,23211,44992,37428,48489,2901,70188,91455,1039,75064,43522,81046,40154,66591,11474,44787,24297,99670,52311,78112,18691,42349,48656,75461,49977,52046,39596,58548,23819,14791,31396,1776,59402,36200,17919,31405,48227,94032,77137,15554,34826,23802,50438,16062,37476,6900,87517,34705,85284,71364,76484,25614,32482,20633,54455,15250,39453,41516,40495,27983,94162,7319,59351,74262,54087,28838,86218,60018,29623,13665,89217,2369,3931,70014,84623,89950,95797,15989,64971,69577,46697,21891,18624,15762,10408,48282,95669,71957,58542,12109,73005,36812,41725,20503,27410,47547,86238,36617,96879,23098,56748,66256,60379,34048,35004,89166,85261,88961,65282,6372,14737,65172,25428,84256,21444,84949,88698,3319,90637,69342,53861,44753,97252,19345,29172,58289,60661,59372]
print(obj.maxAlternatingSum(A)) |
n = int(input("Enter N: "))
l = []
for i in range(0 , n):
inp = int(input("Enter numbers: "))
l.append(inp)
l.sort()
a = 0
b = 0
for i in range(0 , n):
if i % 2 != 0:
a = a * 10 + l[i]
else:
b = b * 10 + l[i]
c = a + b
print(c)
|
import argparse
import math
import time
import numpy as np
import torch
from torch import nn
from torch import optim
import Corpus as c
from model import make_transformer, make_mos_transformer
parser = argparse.ArgumentParser(description='Transformer-MOS')
parser.add_argument('--data', type=str, default='./data/wikitext-103', help='location of corpus')
parser.add_argument('--mos', default=True, action='store_true', help='use mixture of softmax decoder')
parser.add_argument('--mixtures', type=int, default=10, help='num mixtures of softmax')
parser.add_argument('--dmodel', type=int, default=300, help='dimension of model')
parser.add_argument('--layers', type=int, default=4, help='number of transformer encoder layers')
parser.add_argument('--ffhidden', type=int, default=300, help='number of feed forward hidden units')
parser.add_argument('--dropout', type=float, default=.35, help='dropout rate')
parser.add_argument('--nhead', type=int, default=4, help='number of attention heads')
parser.add_argument('--seed', type=int, default=26, help='seed')
parser.add_argument('--cuda', default=True, action='store_true', help='cuda')
parser.add_argument('--batch_size', type=int, default=128, help='training batch size')
parser.add_argument('--bptt', type=int, default=35, help='sequence length')
parser.add_argument('--lr', type=float, default=7, help='learning rate')
parser.add_argument('--epochs', type=int, default=50, help='num epochs')
parser.add_argument('--decoder-strategy', type=str, default='greedy')
parser.add_argument('--log-interval', type=int, default=200, metavar='N',
help='report interval')
parser.add_argument('--clip', type=float, default=0.25,
help='gradient clipping')
EVAL_BATCH_SIZE = 10
MODEL_SAVE_DIR = './model'
model = None
ntokens = 0
criterion = nn.NLLLoss()
opt = None
device = None
def batchify(data, bsz):
nbatch = data.size(0) // bsz
data = data.narrow(0, 0, nbatch * bsz)
data = data.view(bsz, -1).t().contiguous()
return data
def get_batch(source, i):
seq_len = min(args.bptt, len(source) - 1 - i)
data = source[i:i + seq_len]
target = source[i + 1:i + 1 + seq_len].view(-1)
return data.to(device), target.to(device)
def train_epoch(train_data, epoch, args, lr):
model.train()
total_loss = 0.
start_time = time.time()
src_mask = model.generate_mask(args.bptt).to(device)
for batch, i in enumerate(range(0, train_data.size(0) - 1, args.bptt)):
data, targets = get_batch(train_data, i)
if data.size(0) != args.bptt:
src_mask = model.generate_mask(data.size(0)).to(device)
opt.zero_grad()
output = model(data, src_mask)
output = output.view(-1, ntokens)
loss = criterion(output, targets)
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), args.clip)
opt.step()
total_loss += loss.item()
if batch % args.log_interval == 0 and batch > 0:
cur_loss = total_loss / args.log_interval
elapsed = time.time() - start_time
print('| epoch {:3d} | {:5d}/{:5d} batches | lr {:02.2f} | ms/batch {:5.2f} | '
'loss {:5.2f} | ppl {:8.2f}'.format(
epoch, batch, len(train_data) // args.bptt, lr,
elapsed * 1000 / args.log_interval, cur_loss, math.exp(cur_loss)))
total_loss = 0
start_time = time.time()
def train(train_data, val_data, args):
best_val_loss = float("inf")
lr = args.lr
epoch = 1
while epoch <= args.epochs + 1 or lr >= 1e-3:
train_epoch(train_data, epoch, args, lr)
val_loss = evaluate(val_data, args)
print('=' * 89)
print('| End of epoch {} | val loss {:5.2f} | val ppl {:8.2f}'.format(epoch, val_loss, math.exp(val_loss)))
print('=' * 89)
if val_loss < best_val_loss:
best_val_loss = val_loss
PATH = "encoder-weights-mos.pth" if args.mos else "encoder-weights.pth"
torch.save(model.encoder.state_dict(), PATH)
else:
lr = lr / 1.75
for g in opt.param_groups:
g['lr'] = lr
epoch += 1
def evaluate(data_source, args):
model.eval()
total_loss = 0.0
src_mask = model.generate_mask(args.bptt).to(device)
with torch.no_grad():
for i in range(0, data_source.size(0) - 1, args.bptt):
data, targets = get_batch(data_source, i)
if data.size(0) != args.bptt:
src_mask = model.generate_mask(data.size(0)).to(device)
output = model(data, src_mask)
output = output.view(-1, ntokens)
total_loss += len(data) * criterion(output, targets).item()
return total_loss / (len(data_source) - 1)
if __name__ == '__main__':
args = parser.parse_args()
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.cuda and torch.cuda.is_available() is False:
raise Exception("CUDA is not available for use try running without --cuda")
device = torch.device("cuda" if args.cuda else "cpu")
print("USING {}".format(device))
print("Training on {}".format(args.data))
corpus = c.Corpus(args.data, device)
ntokens = len(corpus.dictionary)
train_data = batchify(corpus.train, args.batch_size)
val_data = batchify(corpus.valid, EVAL_BATCH_SIZE)
test_data = batchify(corpus.test, EVAL_BATCH_SIZE)
make = make_mos_transformer if args.mos else make_transformer
if args.mos:
model = make_mos_transformer(n_experts=args.mixtures, n_tokens=ntokens, dim_model=args.dmodel,
n_heads=args.nhead, n_layers=args.layers,
n_ff_hid=args.ffhidden, dropout=args.dropout)
print("Using MOS")
else:
model = make_transformer(n_tokens=ntokens, dim_model=args.dmodel, n_heads=args.nhead, n_layers=args.layers,
n_ff_hid=args.ffhidden, dropout=args.dropout)
print("Using non-mos")
total_params = sum(x.data.nelement() for x in model.parameters())
print("total number of params: {}".format(total_params))
model.to(device)
LR = args.lr
opt = optim.SGD(model.parameters(), lr=LR)
try:
print('-' * 100)
print("Starting training...")
train(train_data, val_data, args)
except KeyboardInterrupt:
print('-' * 100)
print('Exiting from training...')
test_loss = evaluate(test_data, args)
print('=' * 100)
print('|test loss {:5.2f} | test ppl {:8.2f}'.format(
test_loss, math.exp(test_loss)))
print('=' * 100) |
"""
.. include:: ../README.md
"""
__title__ = "pygdbmi"
__version__ = "0.9.0.2"
__author__ = "Chad Smith"
__copyright__ = "Copyright Chad Smith"
__pdoc__ = {"StringStream": False, "printcolor": False}
|
from .bishop import Bishop
from .king import King
from .knight import Knight
from .pawn import Pawn
from .piece import Piece
from .queen import Queen
from .rook import Rook
from .piece_const import PieceValues
__all__ = ['Bishop', 'King', 'Knight', 'Pawn', 'Piece', 'piece_const', 'Queen', 'Rook']
|
# -*- coding: utf-8 -*-
"""Classes capturing the payloads used when running actions."""
import collections
from typing import Dict, Mapping, Optional
from django.conf import settings
from django.contrib.sessions.backends.base import SessionBase
from django.contrib.sessions.models import Session
action_session_dictionary = 'action_run_payload'
class ActionPayload(collections.MutableMapping):
"""Objects to store the information required for action execution.
Look at the subclasses in this file for the different varieties
"""
fields = []
def __init__(self, initial_values=None):
"""Initialize the store and store given arguments."""
super().__init__()
self.store = {
'exclude_values': [],
'prev_url': '',
'post_url': '',
'button_label': '',
'valuerange': 0,
'step': 0,
}
if initial_values:
self.update(initial_values)
def __getitem__(self, key):
"""Verify that the key is in the allowed fields.
:param key: For lookup
:return: Value
"""
if settings.DEBUG:
if key not in self.fields:
raise Exception('Incorrect key: ' + key)
return self.store[self.__keytransform__(key)]
def __setitem__(self, key, item_value):
"""Verify that the key is in the allowed fields.
:param key: lookup
:param item_value: to be set
:return: Nothing
"""
if settings.DEBUG:
if key not in self.fields:
raise Exception('Incorrect key lookup.')
self.store[self.__keytransform__(key)] = item_value
def __delitem__(self, key): # noqa: Z434
"""Delete an item."""
del self.store[self.__keytransform__(key)] # noqa: Z420
def __iter__(self):
"""Return iterator."""
return iter(self.store)
def __len__(self):
"""Return length."""
return len(self.store)
def __keytransform__(self, key):
"""Transform the key."""
return key
def get_store(self):
"""Return the store."""
return self.store
class EmailPayload(ActionPayload):
"""Objects to store the information required for email execution.
Object to package the items required to carry out the email execution of an
action. The object has the following fields:
- action id: PK for the action being executed
- subject: email subject
- item_column: Name of the column that contains the target email addresses
- cc_email: List of emails to include in the cc
- bcc_email: List of emails to include in the bcc
- confirm_items: Boolean encoding if a final item confirmation is needed
- send_confirmation: Boolean encoding if a confirmation email is required
- track_read: Boolean encoding if the email read is going to be tracked
- export_wf: Boolean encoding if the workflow needs to be exported
- exclude_values: Values in item_column that must be excluded
- prev_url: URL to go back to the previous step in the process
- post_url: URL to go next in the process
- button_label: To use the right button label in the web page
- valuerange: Range of steps considered
- step: current step on that range
"""
fields = [
'action_id',
'subject',
'item_column',
'cc_email',
'bcc_email',
'confirm_items',
'send_confirmation',
'track_read',
'export_wf',
'exclude_values',
'prev_url',
'post_url',
'button_label',
'valuerange',
'step',
]
class SendListPayload(ActionPayload):
"""Objects to store the information required for send list execution.
Object to package the items required to carry out the execution of an
action of type send list. The object has the following fields:
- action id: PK for the action being executed
- subject: email subject
- email_to: Destination email
- cc_email: List of emails to include in the cc
- bcc_email: List of emails to include in the bcc
- export_wf: Boolean encoding if the workflow needs to be exported
"""
fields = [
'action_id',
'subject',
'email_to',
'cc_email',
'bcc_email',
'export_wf',
]
class CanvasEmailPayload(ActionPayload):
"""Objects to store the information required for Canvas Email execution.
Object to package the items required to carry out the JSON execution of an
action. The object has the following fields:
- action id: PK for the action being executed
- prev_url: URL to go back to the previous step in the process
- post_url: URL to go next in the process
- button_label: To use the right button label in the web page
- valuerange: Range of steps considered
- step: current step on that range
"""
fields = [
'action_id',
'subject',
'item_column',
'export_wf',
'target_url',
'confirm_items',
'exclude_values',
'prev_url',
'post_url',
'button_label',
'valuerange',
'step',
]
class JSONPayload(ActionPayload):
"""Objects to store the information required for JSON execution.
Object to package the items required to carry out the JSON execution of an
action. The object has the following fields:
- action id: PK for the action being executed
- token: for identification when making the request
- item_column: Column that contains the value to personalize
- exclude_values: Values in item_column that must be excluded
- prev_url: URL to go back to the previous step in the process
- post_url: URL to go next in the process
- button_label: To use the right button label in the web page
- valuerange: Range of steps considered
- step: current step on that range
"""
fields = [
'action_id',
'token',
'item_column',
'export_wf',
'confirm_items',
'exclude_values',
'prev_url',
'post_url',
'button_label',
'valuerange',
'step',
]
class JSONListPayload(ActionPayload):
"""Object to store the information required for JSON List execution.
Object to package the items required to carry out the execution of a JSON
list action. The object has the following fields:
- action id: PK for the action being executed
- token: for identification when making the request
- prev_url: URL to go back to the previous step in the process
- post_url: URL to go next in the process
- button_label: To use the right button label in the web page
- valuerange: Range of steps considered
- step: current step on that range
"""
fields = [
'action_id',
'token',
'item_column',
'export_wf',
]
class ZipPayload(ActionPayload):
"""Objects to store the information required for JSON execution.
Object to package the items required to carry out the ZIP execution of an
action. The object has the following fields:
- action id: PK for the action being executed
- item_column: Column that contains the value to personalize
- exclude_values: Values in item_column that must be excluded
- prev_url: URL to go back to the previous step in the process
- post_url: URL to go next in the process
- button_label: To use the right button label in the web page
- valuerange: Range of steps considered
- step: current step on that range
"""
fields = [
'action_id',
'item_column',
'confirm_items',
'exclude_values',
'user_fname_column',
'file_suffix',
'zip_for_moodle',
'prev_url',
'post_url',
'button_label',
'valuerange',
'step',
]
def get_action_payload(session: SessionBase) -> Dict:
"""Get the payload from the current session.
:param session: Session object
:return: request.session[session_dictionary_name] or None
"""
return session.get(action_session_dictionary)
def set_action_payload(
session: SessionBase,
payload: Optional[Mapping] = None,
):
"""Set the payload in the current session.
:param session: Session object
:param payload: Dictionary to store
"""
session[action_session_dictionary] = payload
def get_or_set_action_info(
session: Session,
payloadclass,
action_info: Optional[ActionPayload] = None,
initial_values: Optional[Dict] = None,
) -> Optional[ActionPayload]:
"""Get (from the session object) or create an ActionPayload object.
First check if one is given. If not, check in the session. If there is no
object in the session, create a new one with the initial values.
:param session: HTTP session object
:param payloadclass: class to use to create a action_info object.
:param action_info: ActionInfo object just in case it is present.
:param initial_values: A dictionary to initialize the class if required
:return: Existing,newly created ActionInfo object, or None
"""
if action_info:
# Already exists, no need to create a new one
return action_info
action_info = session.get(action_session_dictionary)
if action_info:
return payloadclass(action_info)
if not initial_values:
# Nothing found in the session and no initial values given.
return None
# Create the object with the given class
action_info = payloadclass(initial_values)
session[action_session_dictionary] = action_info.get_store()
session.save()
return payloadclass(initial_values)
|
from .tox21 import Tox21 |
# Copyright (c) LinkedIn Corporation. All rights reserved. Licensed under the BSD-2 Clause license.
# See LICENSE in the project root for license information.
def init(application, config):
from . import teams, team, team_summary, team_oncall, team_changes
application.add_route('/api/v0/teams', teams)
application.add_route('/api/v0/teams/{team}', team)
application.add_route('/api/v0/teams/{team}/summary', team_summary)
application.add_route('/api/v0/teams/{team}/oncall', team_oncall)
application.add_route('/api/v0/teams/{team}/oncall/{role}', team_oncall)
application.add_route('/api/v0/teams/{team}/changes', team_changes)
from . import team_admins, team_admin
application.add_route('/api/v0/teams/{team}/admins', team_admins)
application.add_route('/api/v0/teams/{team}/admins/{user}', team_admin)
from . import team_users, team_user
application.add_route('/api/v0/teams/{team}/users', team_users)
application.add_route('/api/v0/teams/{team}/users/{user}', team_user)
from . import rosters, roster
application.add_route('/api/v0/teams/{team}/rosters', rosters)
application.add_route('/api/v0/teams/{team}/rosters/{roster}', roster)
from . import roster_users, roster_user
application.add_route('/api/v0/teams/{team}/rosters/{roster}/users', roster_users)
application.add_route('/api/v0/teams/{team}/rosters/{roster}/users/{user}', roster_user)
from . import schedules, schedule
application.add_route('/api/v0/teams/{team}/rosters/{roster}/schedules', schedules)
application.add_route('/api/v0/schedules/{schedule_id}', schedule)
from . import populate
application.add_route('/api/v0/schedules/{schedule_id}/populate', populate)
from . import services, service, service_oncall
application.add_route('/api/v0/services', services)
application.add_route('/api/v0/services/{service}', service)
application.add_route('/api/v0/services/{service}/oncall', service_oncall)
application.add_route('/api/v0/services/{service}/oncall/{role}', service_oncall)
from . import team_services, team_service, service_teams
application.add_route('/api/v0/teams/{team}/services', team_services)
application.add_route('/api/v0/teams/{team}/services/{service}', team_service)
application.add_route('/api/v0/services/{service}/teams', service_teams)
from . import roles, role
application.add_route('/api/v0/roles', roles)
application.add_route('/api/v0/roles/{role}', role)
from . import events, event, event_swap, event_override, event_link, events_link
application.add_route('/api/v0/events', events)
application.add_route('/api/v0/events/{event_id}', event)
application.add_route('/api/v0/events/swap', event_swap)
application.add_route('/api/v0/events/override', event_override)
application.add_route('/api/v0/events/link', events_link)
application.add_route('/api/v0/events/link/{link_id}', event_link)
from . import users, user, user_teams, user_notifications
application.add_route('/api/v0/users', users)
application.add_route('/api/v0/users/{user_name}', user)
application.add_route('/api/v0/users/{user_name}/teams', user_teams)
application.add_route('/api/v0/users/{user_name}/notifications', user_notifications)
from . import user_notification
application.add_route('/api/v0/notifications/{notification_id}', user_notification)
from . import notification_types, modes
application.add_route('/api/v0/notification_types', notification_types)
application.add_route('/api/v0/modes', modes)
from . import search
application.add_route('/api/v0/search', search)
from . import upcoming_shifts
application.add_route('/api/v0/users/{user_name}/upcoming', upcoming_shifts)
from . import user_pinned_teams, user_pinned_team
application.add_route('/api/v0/users/{user_name}/pinned_teams', user_pinned_teams)
application.add_route('/api/v0/users/{user_name}/pinned_teams/{team_name}', user_pinned_team)
from . import timezones
application.add_route('/api/v0/timezones', timezones)
from . import team_subscription, team_subscriptions
application.add_route('/api/v0/teams/{team}/subscriptions', team_subscriptions)
application.add_route('/api/v0/teams/{team}/subscriptions/{subscription}/{role}', team_subscription)
# Optional Iris integration
from . import iris_settings
application.add_route('/api/v0/iris_settings', iris_settings)
from ... import iris
if iris.client and config.get('iris_plan_integration', {}).get('activated'):
from . import team_iris_escalate
application.add_route('/api/v0/teams/{team}/iris_escalate', team_iris_escalate)
|
# WHERE IS BOB !?! EDABIT SOLUTION:
def find_bob(names):
# creating a for-loop to iterate through the list of names.
for i in range(len(nums)):
# creating a nested if-statement to check for the name "Bob".
if nums[i] == "Bob":
# returning the index of the name if it is found in the list.
return i
# returning "-1" if the name "Bob" is not present within the list.
return -1
|
S = str(input('Qual o seu sexo:F/M')).strip().upper()
while S not in 'MF':
print(f'{S} não é um sexo válido. Certifique-se de responder apenas com F ou M')
S = str(input('Qual o seu sexo:F/M')).strip().upper()
print('Sexo registrado!')
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import subprocess
from typing import Dict, List, Optional
import spacy
from jina import DocumentArray, Executor, requests
from jina_commons.batching import get_docs_batch_generator
_EXCLUDE_COMPONENTS = [
'tagger',
'parser',
'ner',
'senter',
'lemmatizer',
'attribute_ruler',
]
class SpacyTextEncoder(Executor):
"""
:class:`SpacyTextEncoder` encodes ``Document`` using models offered by Spacy
:param model_name: pre-trained spaCy language pipeline name
:param require_gpu: device to use for encoding ['cuda', 'cpu] - if not set,
the device is detected automatically
:param default_batch_size: Default batch size, used if ``batch_size`` is not
provided as a parameter in the request
:param default_traversal_paths: Default traversal paths, used if ``traversal_paths``
are not provided as a parameter in the request.
:param args: Additional positional arguments.
:param kwargs: Additional positional arguments.
"""
def __init__(
self,
model_name: str = 'en_core_web_sm',
require_gpu: bool = False,
download_data: bool = True,
default_batch_size: int = 32,
default_traversal_paths: List[str] = ['r'],
*args,
**kwargs,
):
super().__init__(*args, **kwargs)
self.default_batch_size = default_batch_size
self.default_traversal_paths = default_traversal_paths
if require_gpu:
spacy.require_gpu()
if download_data:
subprocess.run(
['python', '-m', 'spacy', 'download', model_name], check=True
)
self.spacy_model = spacy.load(model_name, exclude=_EXCLUDE_COMPONENTS)
@requests
def encode(self, docs: Optional[DocumentArray], parameters: Dict, **kwargs):
"""
Encode all docs with text and store the encodings in the embedding
attribute of the docs.
:param docs: documents sent to the encoder. The docs must have the
``text`` attribute.
:param parameters: dictionary to define the ``traversal_path`` and the
``batch_size``. For example,
``parameters={'traversal_paths': ['r'], 'batch_size': 10}``
"""
if docs:
batch_size = parameters.get('batch_size', self.default_batch_size)
document_batches_generator = get_docs_batch_generator(
docs,
traversal_path=parameters.get(
'traversal_paths', self.default_traversal_paths
),
batch_size=batch_size,
needs_attr='text',
)
for document_batch in document_batches_generator:
texts = [doc.text for doc in document_batch]
for doc, spacy_doc in zip(
document_batch, self.spacy_model.pipe(texts, batch_size=batch_size)
):
doc.embedding = spacy_doc.vector
|
from flask import render_template, request, redirect, url_for, abort
from flask_login import login_required, current_user
from . forms import UpdateProfile, UpdatePitch, CommentForm
from app.models import User, Pitch, Comment
from . import main
from .. import db
@main.route('/')
def index():
# profile = profile
pitchs = Pitch.query.all()
title = 'Got a Pitch ..?'
return render_template('index.html', title = title, pitchs = pitchs)
@main.route('/pitch/new', methods=['GET', 'POST'])
@login_required
def add_pitch():
form = UpdatePitch()
if form.validate_on_submit():
title = form.title.data
pitch = form.pitch.data
category = form.category.data
posted = form.category.data
# Update pitch instance
new_pitch = Pitch(title=title, description=pitch,
category=category, posted=posted, user=current_user)
# Save pitch method
new_pitch.save_pitch()
return redirect(url_for('.index'))
title = 'New Pitch'
return render_template('add_pitch.html', title=title, form=form)
@main.route('/user/<uname>')
def profile(uname):
user = User.query.filter_by(username=uname).first()
if user is None:
abort(404)
pitches = Pitch.query.filter_by(user_id=user.id).all()
return render_template("index.html", user=user, pitches=pitches)
@main.route('/user/<uname>/update', methods=['GET', 'POST'])
@login_required
def update_profile(uname):
user = User.query.filter_by(username=uname).first()
if user is None:
abort(404)
form = UpdateProfile()
if form.validate_on_submit():
user.bio = form.bio.data
db.session.add(user)
db.session.commit()
return redirect(url_for('.profile', uname=user.username))
return render_template('profile/update.html', form=form)
@main.route('/pitches/<category>')
def get_pitches(category):
pitches = Pitch.get_pitches(category)
return render_template("index.html", pitches=pitches)
@main.route('/comment/new/<int:id>', methods=['GET', 'POST'])
@login_required
def add_comment(id):
form = CommentForm()
if form.validate_on_submit():
comment = form.comment.data
comment = Comment(comment=comment, pitch_id=id)
# let's save the comments
db.session.add(comment)
db.session.commit()
comment = Comment.query.filter_by(pitch_id=id).all()
return render_template('comment.html', comment=comment, comment_form=form, post='New Comment')
|
from keycloak_admin_aio.types import RoleRepresentation
from ..... import KeycloakResource
class ClientScopesScopeMappingsRealm(KeycloakResource):
"""Realm scope mappings for a client scope by id.
.. code:: python
from keycloak_admin_aio import KeycloakAdmin, RoleRepresentation
kc: KeycloakAdmin # must be instantiated
client_scope_id: str # uuid
"""
def get_url(self) -> str:
return f"{self._get_parent_url()}/realm"
async def create(self, role_representations: list[RoleRepresentation]):
"""Add roles to the realm scope mappings of a client scope by id.
.. code:: python
role_representations: list[RoleRepresentation] = [] # needs to be populated
await kc.client_scopes.by_id(client_scope_id).scope_mappings.realm.create(roles)
"""
connection = await self._get_connection()
await connection.post(
self.get_url(),
json=RoleRepresentation.to_dict_list(role_representations),
)
async def get(self) -> list[RoleRepresentation]:
"""Get realm scope mappings for a client scope by id.
.. code:: python
client_scope_resource = kc.client_scopes.by_id(client_scope_id)
roles: list[RoleRepresentation] = await client_scope_resource.scope_mappings.realm.get()
"""
connection = await self._get_connection()
response = await connection.get(self.get_url())
return RoleRepresentation.from_list(response.json())
async def delete(self, role_representations: list[RoleRepresentation]):
"""Remove roles from realm scope mappings for a client scope by id.
.. code:: python
role_representations: list[RoleRepresentation] = [] # needs to be populated
client_scope_resource = kc.client_scopes.by_id(client_scope_id)
await client_scope_resource.scope_mappings.realm.delete(role_representations)
"""
connection = await self._get_connection()
await connection.request(
method="DELETE",
url=self.get_url(),
json=RoleRepresentation.to_dict_list(role_representations),
)
|
# IMPORTS
import pygame
# THE CLASS
class Ball():
def __init__(self, x, y, radius, color, velocity, direction_x, direction_y):
self.x = x
self.y = y
self.radius = radius
self.color = color
self.velocity = velocity
self.direction_x = direction_x
self.direction_y = direction_y
self.display = pygame.Surface((self.radius*2, self.radius*2))
def update_position(self, x, y):
if x != 0:
self.x = x
if y != 0:
self.y = y
def update_color(self, new_color):
self.color = new_color
def update_direction(self, new_direction_x, new_direction_y):
if new_direction_x != 0:
self.direction_x = new_direction_x
if new_direction_y != 0:
self.direction_y = new_direction_y
def update_velocity(self, new_velocity):
self.velocity = new_velocity |
"""Common Exceptions for digirock functions and classes.
"""
class Error(Exception):
"""Base class for excceptions in this module."""
pass
class WorkflowError(Error):
"""Exception raised for errors where the defined workflow has not been followed
correctly.
Attributes:
expression -- input expression in which the error occurred
message -- explanation of the error
"""
def __init__(self, name, message):
"""Constructor
Args:
name (object or tuple): Where this error was raised and parents names
message (string): Which part of the workflow wasn't followed and
what to do.
"""
self.name = name
self.message = message
def __str__(self):
if isinstance(self.name, tuple):
insert = " in ".join(self.name)
else:
insert = self.name
return ("Workflow for {!r} has not been followed, {!r}").format(
insert, self.message
)
class PrototypeError(Error):
"""Exception raised when trying to call a method proto-type in a class that doesn't actually have
a call, the class needs to be sub-classed and the method overwritten.
"""
def __init__(self, name, message):
"""Constructor
Args:
name (object or tuple): Where this error was raised and parents names
message (string): Which part of the workflow wasn't followed and
what to do.
"""
self.name = name
self.message = message
def __str__(self):
if isinstance(self.name, tuple):
insert = " in ".join(self.name)
else:
insert = self.name
return (
"This is a prototype method for {!r}, use a child class instead of the baseclass {!r}."
).format(self.message, insert)
|
from tinydb import TinyDB
class Database:
"""Model for database, encapsulates a TinyDB object and the created objects.
Attributes:
location (str): The local path for the TinyDB JSON file.
players (dict): The Player objects created from the database.
tournaments (dict): The tournament objects created from the database.
db (tinydb.database.TinyDB): The TinyDB object created from the JSON file.
"""
def __init__(self, location: str):
"""Constructor for Database. Initiates TinyDB loading.
Args:
location (str): The local path for the TinyDB JSON file.
"""
self.location = location
self.players = {}
self.tournaments = {}
self.db = None
self.load_database()
def create_empty_database():
"""Creates an empty database JSON file if needed."""
with open(self.location, "w+") as f:
f.write("{}")
def load_database(self):
"""Loads a TinyDB object from a JSON file."""
try:
self.db = TinyDB(self.location)
except FileNotFoundError:
self.create_empty_database()
self.load_database()
|
import json
import os
from typing import Union, Dict
from urllib.request import urlopen, Request
from .common import ReleaseFile, EnvironmentInformation
def make_post(release: ReleaseFile):
GITHUB_API_TOKEN = os.getenv("DBT_GITHUB_API_TOKEN")
data: Dict[str, Union[str, bool]] = {
"tag_name": f"v{release.version}",
"target_commitish": release.branch,
"name": f"dbt {release.version}",
"body": release.notes,
}
if release.is_prerelease:
data["prerelease"] = True
request = Request(
url="https://api.github.com/repos/dbt-labs/dbt/releases",
headers={
"Content-Type": "application/json",
"Authorization": f"token {GITHUB_API_TOKEN}",
},
data=json.dumps(data).encode("utf-8"),
)
print(f"Creating release with data:\n{data}")
try:
with urlopen(request) as fp:
resp_data = fp.read()
except Exception as exc:
raise ValueError(f"Could not create release {release.version}: {exc}") from exc
print(f"Github response:\n{resp_data}")
def make_github_release(args=None):
env = EnvironmentInformation()
release = ReleaseFile.from_artifacts(env)
make_post(release)
def add_github_parsers(subparsers):
github_sub = subparsers.add_parser("github", help="Create the github release")
github_subs = github_sub.add_subparsers(title="Available sub-commands")
create_release = github_subs.add_parser("create-release")
create_release.set_defaults(func=make_github_release)
|
# Generated by Django 3.0.7 on 2021-03-09 23:13
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('stockmarket', '0017_auto_20210307_1803'),
]
operations = [
migrations.AddField(
model_name='companydailybasic',
name='pb',
field=models.FloatField(blank=True, null=True, verbose_name='市净率'),
),
migrations.AddField(
model_name='companydailybasic',
name='ps',
field=models.FloatField(blank=True, null=True, verbose_name='市销率'),
),
migrations.AddField(
model_name='companydailybasic',
name='ps_ttm',
field=models.FloatField(blank=True, null=True, verbose_name='市销率TTM'),
),
migrations.CreateModel(
name='IndexDailyBasic',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('created_time', models.DateTimeField(default=django.utils.timezone.now, verbose_name='创建时间')),
('last_mod_time', models.DateTimeField(default=django.utils.timezone.now, verbose_name='最后更新时间')),
('ts_code', models.CharField(blank=True, max_length=50, unique=True, verbose_name='TS代码')),
('trade_date', models.DateField(blank=True, null=True, verbose_name='交易日期')),
('turnover_rate', models.FloatField(blank=True, max_length=50, null=True, verbose_name='换手率')),
('turnover_rate_f', models.FloatField(blank=True, max_length=50, null=True, verbose_name='换手率(自由流通)')),
('pe', models.FloatField(blank=True, null=True, verbose_name='市盈率')),
('pe_ttm', models.FloatField(blank=True, null=True, verbose_name='市盈率TTM')),
('pb', models.FloatField(blank=True, null=True, verbose_name='市净率')),
('total_share', models.FloatField(blank=True, null=True, verbose_name='总股本')),
('float_share', models.FloatField(blank=True, null=True, verbose_name='流通股本')),
('free_share', models.FloatField(blank=True, null=True, verbose_name='自由流通股本')),
('total_mv', models.FloatField(blank=True, null=True, verbose_name='总市值')),
('float_mv', models.FloatField(blank=True, null=True, verbose_name='流通市值')),
('company', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='stockmarket.StockNameCodeMap')),
],
options={
'verbose_name': '指数每日基本',
'verbose_name_plural': '指数每日基本',
'ordering': ['-last_mod_time'],
'get_latest_by': 'id',
},
),
]
|
# -*- coding: utf-8 -*-
"""
"""
from setuptools import setup, find_packages
with open('requirements.txt') as fd:
requirements = fd.readlines()
with open('test-requirements.txt') as fd:
test_requirements = fd.readlines()
setup(
name='remarkable-friend',
packages=find_packages(),
setup_requires=[],
install_requires=requirements,
entry_points={
'console_scripts': 'rmfriend=rmfriend.tools.manage:main'
},
)
|
#
# PySNMP MIB module HH3C-SAN-AGG-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/HH3C-SAN-AGG-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 19:16:46 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, ObjectIdentifier, OctetString = mibBuilder.importSymbols("ASN1", "Integer", "ObjectIdentifier", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsIntersection, ValueSizeConstraint, SingleValueConstraint, ConstraintsUnion, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "ValueSizeConstraint", "SingleValueConstraint", "ConstraintsUnion", "ValueRangeConstraint")
hh3cSan, = mibBuilder.importSymbols("HH3C-VSAN-MIB", "hh3cSan")
ifIndex, ifDescr = mibBuilder.importSymbols("IF-MIB", "ifIndex", "ifDescr")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
NotificationType, Unsigned32, ObjectIdentity, iso, TimeTicks, MibScalar, MibTable, MibTableRow, MibTableColumn, MibIdentifier, Integer32, Bits, Counter32, Counter64, Gauge32, ModuleIdentity, IpAddress = mibBuilder.importSymbols("SNMPv2-SMI", "NotificationType", "Unsigned32", "ObjectIdentity", "iso", "TimeTicks", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "MibIdentifier", "Integer32", "Bits", "Counter32", "Counter64", "Gauge32", "ModuleIdentity", "IpAddress")
RowStatus, DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "RowStatus", "DisplayString", "TextualConvention")
hh3cSanAgg = ModuleIdentity((1, 3, 6, 1, 4, 1, 25506, 2, 127, 2))
hh3cSanAgg.setRevisions(('2013-02-25 09:40',))
if mibBuilder.loadTexts: hh3cSanAgg.setLastUpdated('201302250940Z')
if mibBuilder.loadTexts: hh3cSanAgg.setOrganization('Hangzhou H3C Tech. Co., Ltd.')
class Hh3cMemberList(TextualConvention, OctetString):
status = 'current'
hh3cSanAggMibObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 127, 2, 1))
hh3cSanAggMaxMemberNumber = MibScalar((1, 3, 6, 1, 4, 1, 25506, 2, 127, 2, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hh3cSanAggMaxMemberNumber.setStatus('current')
hh3cSanAggGroupTable = MibTable((1, 3, 6, 1, 4, 1, 25506, 2, 127, 2, 2), )
if mibBuilder.loadTexts: hh3cSanAggGroupTable.setStatus('current')
hh3cSanAggGroupEntry = MibTableRow((1, 3, 6, 1, 4, 1, 25506, 2, 127, 2, 2, 1), ).setIndexNames((0, "HH3C-SAN-AGG-MIB", "hh3cSanAggGroupNumber"))
if mibBuilder.loadTexts: hh3cSanAggGroupEntry.setStatus('current')
hh3cSanAggGroupNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 25506, 2, 127, 2, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535))).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hh3cSanAggGroupNumber.setStatus('current')
hh3cSanAggGroupIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 25506, 2, 127, 2, 2, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hh3cSanAggGroupIndex.setStatus('current')
hh3cSanAggMemberList = MibTableColumn((1, 3, 6, 1, 4, 1, 25506, 2, 127, 2, 2, 1, 3), Hh3cMemberList()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hh3cSanAggMemberList.setStatus('current')
hh3cSanAggMemberStateList = MibTableColumn((1, 3, 6, 1, 4, 1, 25506, 2, 127, 2, 2, 1, 4), Hh3cMemberList()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hh3cSanAggMemberStateList.setStatus('current')
hh3cSanAggGroupRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 25506, 2, 127, 2, 2, 1, 5), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hh3cSanAggGroupRowStatus.setStatus('current')
hh3cSanAggObjForNotification = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 127, 2, 3))
hh3cSanAggGroupPreviousSpeed = MibScalar((1, 3, 6, 1, 4, 1, 25506, 2, 127, 2, 3, 1), Integer32()).setUnits('gigabit bps').setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hh3cSanAggGroupPreviousSpeed.setStatus('current')
hh3cSanAggGroupCurrentSpeed = MibScalar((1, 3, 6, 1, 4, 1, 25506, 2, 127, 2, 3, 2), Integer32()).setUnits('gigabit bps').setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hh3cSanAggGroupCurrentSpeed.setStatus('current')
hh3cSanAggNotifications = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 127, 2, 4))
hh3cSanAggNotificationPrefix = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 127, 2, 4, 0))
hh3cSanAggGroupSpeedChange = NotificationType((1, 3, 6, 1, 4, 1, 25506, 2, 127, 2, 4, 0, 1)).setObjects(("HH3C-SAN-AGG-MIB", "hh3cSanAggGroupNumber"), ("HH3C-SAN-AGG-MIB", "hh3cSanAggGroupPreviousSpeed"), ("HH3C-SAN-AGG-MIB", "hh3cSanAggGroupCurrentSpeed"))
if mibBuilder.loadTexts: hh3cSanAggGroupSpeedChange.setStatus('current')
hh3cSanAggMemberInactive = NotificationType((1, 3, 6, 1, 4, 1, 25506, 2, 127, 2, 4, 0, 2)).setObjects(("HH3C-SAN-AGG-MIB", "hh3cSanAggGroupNumber"), ("IF-MIB", "ifIndex"), ("IF-MIB", "ifDescr"))
if mibBuilder.loadTexts: hh3cSanAggMemberInactive.setStatus('current')
hh3cSanAggMemberActive = NotificationType((1, 3, 6, 1, 4, 1, 25506, 2, 127, 2, 4, 0, 3)).setObjects(("HH3C-SAN-AGG-MIB", "hh3cSanAggGroupNumber"), ("IF-MIB", "ifIndex"), ("IF-MIB", "ifDescr"))
if mibBuilder.loadTexts: hh3cSanAggMemberActive.setStatus('current')
mibBuilder.exportSymbols("HH3C-SAN-AGG-MIB", hh3cSanAggNotifications=hh3cSanAggNotifications, hh3cSanAggMemberActive=hh3cSanAggMemberActive, PYSNMP_MODULE_ID=hh3cSanAgg, hh3cSanAggGroupEntry=hh3cSanAggGroupEntry, hh3cSanAggGroupNumber=hh3cSanAggGroupNumber, hh3cSanAggGroupSpeedChange=hh3cSanAggGroupSpeedChange, hh3cSanAggGroupTable=hh3cSanAggGroupTable, hh3cSanAggMemberList=hh3cSanAggMemberList, hh3cSanAgg=hh3cSanAgg, Hh3cMemberList=Hh3cMemberList, hh3cSanAggGroupRowStatus=hh3cSanAggGroupRowStatus, hh3cSanAggGroupPreviousSpeed=hh3cSanAggGroupPreviousSpeed, hh3cSanAggNotificationPrefix=hh3cSanAggNotificationPrefix, hh3cSanAggObjForNotification=hh3cSanAggObjForNotification, hh3cSanAggMemberStateList=hh3cSanAggMemberStateList, hh3cSanAggMaxMemberNumber=hh3cSanAggMaxMemberNumber, hh3cSanAggGroupIndex=hh3cSanAggGroupIndex, hh3cSanAggMemberInactive=hh3cSanAggMemberInactive, hh3cSanAggMibObjects=hh3cSanAggMibObjects, hh3cSanAggGroupCurrentSpeed=hh3cSanAggGroupCurrentSpeed)
|
from __future__ import print_function
import healpy as hp
from lenspyx import lensing
import numpy as np
def test_t():
lmax = 200
nside = 256
cls_unl = np.ones(lmax + 1, dtype=float)
tunl = hp.synalm(cls_unl, new=True)
dlm = np.zeros_like(tunl)
hp.almxfl(dlm, np.sqrt(np.arange(lmax + 1) * np.arange(1, lmax + 2, dtype=float)), inplace=True)
T = hp.alm2map(tunl, nside)
T1 = lensing.alm2lenmap(tunl, [dlm, None], nside, verbose=True, nband=8, facres=-2)
assert np.max(np.abs(T - T1)) / np.std(T) < 1e-5
d1Re, d1Im = hp.alm2map_spin([dlm, np.zeros_like(dlm)], nside, 1, lmax)
T2 = lensing.alm2lenmap(tunl, [d1Re, d1Im], nside, verbose=True, nband=8, facres=-2)
assert np.max(np.abs(T - T2)) / np.std(T) < 1e-5
T3 = lensing.alm2lenmap(tunl, [dlm, dlm.copy()], nside, verbose=True, nband=8, facres=-2)
assert np.all(T2 == T3)
def test_pol():
lmax = 200
nside = 256
facres= -1
cls_unl = np.ones(lmax + 1, dtype=float)
tunl = hp.synalm(cls_unl, new=True)
dlm = np.zeros_like(tunl)
hp.almxfl(dlm, np.sqrt(np.arange(lmax + 1) * np.arange(1, lmax + 2, dtype=float)), inplace=True)
Q, U = hp.alm2map_spin([tunl, np.zeros_like(tunl)], nside, 2, lmax)
Q1, U1 = lensing.alm2lenmap_spin([tunl, None], [dlm, None], nside, 2, verbose=True, nband=8, facres=facres)
assert np.max(np.abs(Q - Q1)) / np.std(Q) < 1e-5, np.max(np.abs(Q - Q1)) / np.std(Q)
assert np.max(np.abs(U - U1)) / np.std(U) < 1e-5, np.max(np.abs(U - U1)) / np.std(U)
d1Re, d1Im = hp.alm2map_spin([dlm, np.zeros_like(dlm)], nside, 1, lmax)
Q2, U2 = lensing.alm2lenmap_spin([tunl ,None], [d1Re, d1Im], nside, 2, verbose=True, nband=8, facres=facres)
assert np.allclose(Q2, Q1, rtol=1e-10)
assert np.allclose(U2, U1, rtol=1e-10)
Q3, U3 = lensing.alm2lenmap_spin([tunl,tunl * 0.], [d1Re, d1Im], nside, 2, verbose=True, nband=8, facres=facres)
assert np.all(Q3 == Q2)
assert np.all(U3 == U2)
if __name__ == '__main__':
#test_t()
test_pol() |
from .sheet import IRow, ISheet
from .datasheet import IDataRow, IDataSheet, DataRowBase, DataSheet, PartialDataSheet
from .multisheet import IMultiRow, IMultiSheet, MultiRow, MultiSheet
from .language import Language
from .header import Header
from .column import Column
from .excollection import ExCollection
|
# https://leetcode.com/explore/interview/card/top-interview-questions-easy/99/others/721/
# https://leetcode.com/problems/valid-parentheses/description/
# Valid Parentheses
#
# Given a string containing just the characters '(', ')', '{', '}',
# '[' and ']', determine if the input string is valid.
import unittest
class Solution:
# Note: use stack and push in the expected close paren.
#
# for each open paren, push to stack corresponding close paren. If
# encounter a close paren that doesn't match top of stack, error.
# at the end, stack should be empty.
# 76 / 76 test cases passed.
# Status: Accepted
# Runtime: 40 ms (beats 90.78% py3)
def isValid(self, s):
"""
:type s: str
:rtype: bool
"""
stack = []
for c in s:
if c in '({[':
# for each char, push the required matching to stack
if c == '(':
stack.append(')')
elif c == '{':
stack.append('}')
elif c == '[':
stack.append(']')
elif c in ')}]':
if not stack: # nothing on stack
return False # fail
v = stack.pop() # top of stack should match current char
if v != c:
return False
return not stack # stack should be empty at the end
class TestParen(unittest.TestCase):
def setUp(self):
self.sol = Solution()
def test1(self):
self.assertFalse(self.sol.isValid("([)]"))
if __name__ == '__main__':
unittest.main()
|
#!/usr/bin/env python3
from setuptools import setup
import re
import os
def get_version():
VERSION_RE = re.compile(r'''VERSION\s+=\s+['"]([0-9.]+)['"]''')
with open(os.path.join(os.path.dirname(__file__), 'moverscore.py'), encoding='utf-8') as fin:
return VERSION_RE.search(fin.read()).group(1)
setup(
name = 'moverscore',
version = 0.95,
description = 'MoverScore: Evaluating text generation with contextualized embeddings and earth mover distance',
long_description = 'MoverScore is a semantic-based evaluation metric for text generation tasks, e.g., machine translation, text summarization, image captioning, question answering and etc, where the system and reference texts are encoded by contextualized word embeddings finetuned on Multi-Natural-Language-Inference, then the Earth Mover Distance is leveraged to compute the semantic distance by comparing two sets of embeddings resp. to the system and reference text',
url = 'https://github.com/AIPHES/emnlp19-moverscore',
author = 'Wei Zhao',
author_email='andyweizhao1@gmail.com',
maintainer_email='andyweizhao1@gmail.com',
license = 'Apache License 2.0',
python_requires = '>=3',
classifiers = [
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Text Processing',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3 :: Only',
],
keywords = ['machine translation, evaluation, NLP, natural language processing, computational linguistics'],
py_modules = ["moverscore"],
install_requires = ['typing', 'portalocker'],
extras_require = {},
entry_points={
'console_scripts': [
'moverscore = moverscore:main',
],
},
)
|
"""
Copyright 2013 Rackspace
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import datetime
import imp
import os
import platform
import sys
import textwrap
import getpass
import shutil
from subprocess import Popen, PIPE
from ConfigParser import SafeConfigParser
from cafe.engine.config import EngineConfig
if not platform.system().lower() == 'windows':
import pwd
class PackageNotFoundError(Exception):
pass
class PlatformManager(object):
USING_WINDOWS = (platform.system().lower() == 'windows')
USING_VIRTUALENV = hasattr(sys, 'real_prefix')
@classmethod
def get_current_user(cls):
"""Returns the name of the current user. For linux, always tries to
return a user other than 'root' if it can.
"""
real_user = os.getenv("SUDO_USER")
effective_user = os.getenv("USER")
if not cls.USING_WINDOWS and not cls.USING_VIRTUALENV:
if effective_user == 'root' and real_user not in ['root', None]:
# Running 'sudo'.
return real_user
elif cls.USING_WINDOWS:
return getpass.getuser()
# Return the effective user, or root if all else fails
return effective_user or 'root'
@classmethod
def get_user_home_path(cls):
if cls.USING_VIRTUALENV:
return sys.prefix
else:
return os.path.expanduser("~{0}".format(cls.get_current_user()))
@classmethod
def get_user_uid(cls):
if not cls.USING_WINDOWS:
working_user = cls.get_current_user()
return pwd.getpwnam(working_user).pw_uid
@classmethod
def get_user_gid(cls):
if not cls.USING_WINDOWS:
working_user = cls.get_current_user()
return pwd.getpwnam(working_user).pw_gid
@classmethod
def safe_chown(cls, path):
if not cls.USING_WINDOWS:
uid = cls.get_user_uid()
gid = cls.get_user_gid()
os.chown(path, uid, gid)
@classmethod
def safe_create_dir(cls, directory_path):
if not os.path.exists(directory_path):
os.makedirs(directory_path)
class TestEnvManager(object):
"""Manages setting all required and optional environment variables used by
the engine and it's implementations.
Usefull for writing bootstrappers for runners and scripts.
"""
class _lazy_property(object):
'''
meant to be used for lazy evaluation of an object attribute.
property should represent non-mutable data, as it replaces itself.
'''
def __init__(self, func):
self.func = func
self.func_name = func.__name__
def __get__(self, obj, cls):
if obj is None:
return None
value = self.func(obj)
setattr(obj, self.func_name, value)
return value
def __init__(
self, product_name, test_config_file_name,
engine_config_path=None, test_repo_package_name=None):
self._test_repo_package_name = test_repo_package_name
self.product_name = product_name
self.test_config_file_name = test_config_file_name
self.engine_config_path = engine_config_path or \
EngineConfigManager.ENGINE_CONFIG_PATH
self.engine_config_interface = EngineConfig(self.engine_config_path)
def finalize(self, create_log_dirs=True, set_os_env_vars=True):
"""Sets all non-configured values to the defaults in the engine.config
file. set_defaults=False will override this behavior, but note that
unless you manually set the os environment variables yourself, this
will result in undefined behavior
Creates all log dir paths (overriden by sending create_log_dirs=False)
Checks that all set paths exists, raises exception if they dont.
"""
def _check(path):
if not os.path.exists(path):
raise Exception('{0} does not exist'.format(path))
def _create(path):
if not os.path.exists(path):
os.makedirs(path)
_check(self.test_repo_path)
_check(self.test_data_directory)
_check(self.test_config_file_path)
if create_log_dirs:
_create(self.test_root_log_dir)
_create(self.test_log_dir)
_check(self.test_root_log_dir)
_check(self.test_log_dir)
if set_os_env_vars:
os.environ['CAFE_ENGINE_CONFIG_FILE_PATH'] = \
self.engine_config_path
os.environ["CAFE_TEST_REPO_PACKAGE"] = self.test_repo_package
os.environ["CAFE_TEST_REPO_PATH"] = self.test_repo_path
os.environ["CAFE_DATA_DIR_PATH"] = self.test_data_directory
os.environ["CAFE_ROOT_LOG_PATH"] = self.test_root_log_dir
os.environ["CAFE_TEST_LOG_PATH"] = self.test_log_dir
os.environ["CAFE_CONFIG_FILE_PATH"] = self.test_config_file_path
os.environ["CAFE_LOGGING_VERBOSITY"] = self.test_logging_verbosity
os.environ["CAFE_MASTER_LOG_FILE_NAME"] = \
self.test_master_log_file_name
@_lazy_property
def test_repo_path(self):
"""NOTE: There is no default for test_repo_path because we don't
officially support test repo paths yet, even though every runner just
gets the path to the test repo package.
"""
module_info = None
try:
module_info = imp.find_module(self.test_repo_package)
except ImportError:
raise PackageNotFoundError(
"Cannot find test repo '{0}'".format(self.test_repo_package))
return str(module_info[1])
@_lazy_property
def test_repo_package(self):
"""NOTE: The actual test repo package is never used by any current
runners, instead they reference the root path to the tests. For that
reason, it sets the CAFE_TEST_REPO_PATH directly as well as
CAFE_TEST_REPO_PACKAGE
"""
return os.path.expanduser(
self._test_repo_package_name
or self.engine_config_interface.default_test_repo)
@_lazy_property
def test_data_directory(self):
return os.path.expanduser(self.engine_config_interface.data_directory)
@_lazy_property
def test_root_log_dir(self):
return os.path.expanduser(
os.path.join(
self.engine_config_interface.log_directory, self.product_name,
self.test_config_file_name))
@_lazy_property
def test_log_dir(self):
log_dir_name = str(datetime.datetime.now()).replace(" ", "_").replace(
":", "_")
return os.path.expanduser(
os.path.join(self.test_root_log_dir, log_dir_name))
@_lazy_property
def test_config_file_path(self):
return os.path.expanduser(
os.path.join(
self.engine_config_interface.config_directory,
self.product_name, self.test_config_file_name))
@_lazy_property
def test_logging_verbosity(self):
"""Currently supports STANDARD and VERBOSE.
TODO: Implement 'OFF' option that adds null handlers to all loggers
"""
return self.engine_config_interface.logging_verbosity
@_lazy_property
def test_master_log_file_name(self):
return self.engine_config_interface.master_log_file_name
class EngineDirectoryManager(object):
class _Namespace(dict):
"""Converts the top-level keys of this dictionary into a namespace.
Raises exception if any self.keys() collide with internal attributes.
"""
def __init__(self, **kwargs):
dict.__init__(self, **kwargs)
collisions = set(kwargs) & set(dir(self))
if bool(collisions):
raise Exception(
"Cannot set attribute {0}. Namespace cannot contain "
"any keys that collide with internal attribute "
"names.".format([c for c in collisions]))
def __getattr__(self, name):
try:
return self[name]
except KeyError:
raise AttributeError(
"Namespace has no attribute '{0}'".format(name))
wrapper = textwrap.TextWrapper(
initial_indent="* ", subsequent_indent=" ", break_long_words=False)
# .opencafe Directories
OPENCAFE_ROOT_DIR = os.path.join(
PlatformManager.get_user_home_path(), ".opencafe")
OPENCAFE_SUB_DIRS = _Namespace(
LOG_DIR=os.path.join(OPENCAFE_ROOT_DIR, 'logs'),
DATA_DIR=os.path.join(OPENCAFE_ROOT_DIR, 'data'),
TEMP_DIR=os.path.join(OPENCAFE_ROOT_DIR, 'temp'),
CONFIG_DIR=os.path.join(OPENCAFE_ROOT_DIR, 'configs'),
PLUGIN_CACHE=os.path.join(OPENCAFE_ROOT_DIR, 'plugin_cache'))
@classmethod
def create_engine_directories(cls):
print cls.wrapper.fill('Creating default directories in {0}'.format(
cls.OPENCAFE_ROOT_DIR))
# Create the opencafe root dir and sub dirs
PlatformManager.safe_create_dir(cls.OPENCAFE_ROOT_DIR)
print cls.wrapper.fill('...created {0}'.format(cls.OPENCAFE_ROOT_DIR))
for _, directory_path in cls.OPENCAFE_SUB_DIRS.items():
PlatformManager.safe_create_dir(directory_path)
print cls.wrapper.fill('...created {0}'.format(directory_path))
@classmethod
def set_engine_directory_permissions(cls):
"""Recursively changes permissions default engine directory so that
everything is user-owned
"""
PlatformManager.safe_chown(cls.OPENCAFE_ROOT_DIR)
for root, dirs, files in os.walk(cls.OPENCAFE_ROOT_DIR):
for d in dirs:
PlatformManager.safe_chown(os.path.join(root, d))
for f in files:
PlatformManager.safe_chown(os.path.join(root, f))
@classmethod
def build_engine_directories(cls):
"""Updates, creates, and owns (as needed) all default directories"""
cls.create_engine_directories()
cls.set_engine_directory_permissions()
class EngineConfigManager(object):
wrapper = textwrap.TextWrapper(
initial_indent="* ", subsequent_indent=" ", break_long_words=False)
# Openafe config defaults
ENGINE_CONFIG_PATH = os.path.join(
EngineDirectoryManager.OPENCAFE_ROOT_DIR, 'engine.config')
@staticmethod
def rename_section(
config_parser_object, current_section_name, new_section_name):
items = config_parser_object.items(current_section_name)
config_parser_object.add_section(new_section_name)
for item in items:
config_parser_object.set(new_section_name, item[0], item[1])
config_parser_object.remove_section(current_section_name)
return config_parser_object
@staticmethod
def rename_section_option(
config_parser_object, section_name, current_option_name,
new_option_name):
current_option_value = config_parser_object.get(
section_name, current_option_name)
config_parser_object.set(
section_name, new_option_name, current_option_value)
config_parser_object.remove_option(section_name, current_option_name)
return config_parser_object
@staticmethod
def read_config_file(path):
config = SafeConfigParser()
cfp = open(path, 'r')
config.readfp(cfp)
cfp.close()
return config
@classmethod
def write_config_backup(cls, config):
config_backup_location = "{0}{1}".format(
cls.ENGINE_CONFIG_PATH, '.backup')
print cls.wrapper.fill(
"Creating backup of {0} at {1}".format(
cls.ENGINE_CONFIG_PATH, config_backup_location))
cls.write_and_chown_config(config, config_backup_location)
@classmethod
def update_engine_config(cls):
"""
Applies to an existing engine.config file all modifications made to
the default engine.config file since opencafe's release in the order
those modification where added.
"""
class _UpdateTracker(object):
def __init__(self):
self._updated = False
self._backed_up = False
def register_update(self, config=None, backup=True):
if not self._backed_up and backup:
EngineConfigManager.write_config_backup(config)
self._backed_up = True
self._updated = True
config = None
update_tracker = _UpdateTracker()
# Read config from current default location ('.opencafe/engine.config)
config = config or cls.read_config_file(cls.ENGINE_CONFIG_PATH)
# UPDATE CODE GOES HERE
if not update_tracker._updated:
wrapper = textwrap.TextWrapper(initial_indent=" ")
print wrapper.fill(
"...no updates applied, engine.config is newest version")
return config
@classmethod
def generate_default_engine_config(cls):
config = SafeConfigParser()
config.add_section('OPENCAFE_ENGINE')
config.set(
'OPENCAFE_ENGINE', 'config_directory',
EngineDirectoryManager.OPENCAFE_SUB_DIRS.CONFIG_DIR)
config.set(
'OPENCAFE_ENGINE', 'data_directory',
EngineDirectoryManager.OPENCAFE_SUB_DIRS.DATA_DIR)
config.set(
'OPENCAFE_ENGINE', 'log_directory',
EngineDirectoryManager.OPENCAFE_SUB_DIRS.LOG_DIR)
config.set(
'OPENCAFE_ENGINE', 'temp_directory',
EngineDirectoryManager.OPENCAFE_SUB_DIRS.TEMP_DIR)
config.set(
'OPENCAFE_ENGINE', 'master_log_file_name', 'cafe.master')
config.set(
'OPENCAFE_ENGINE', 'logging_verbosity', 'STANDARD')
config.set(
'OPENCAFE_ENGINE', 'default_test_repo', 'pciservicesroast')
return config
@staticmethod
def write_and_chown_config(config_parser_object, path):
cfp = open(path, 'w+')
config_parser_object.write(cfp)
cfp.close()
PlatformManager.safe_chown(path)
@classmethod
def build_engine_config(cls):
config = None
if os.path.exists(cls.ENGINE_CONFIG_PATH):
print cls.wrapper.fill('Checking for updates to engine.config...')
config = cls.update_engine_config()
else:
print cls.wrapper.fill(
"Creating default engine.config at {0}".format(
cls.ENGINE_CONFIG_PATH))
config = cls.generate_default_engine_config()
cls.write_and_chown_config(config, cls.ENGINE_CONFIG_PATH)
@classmethod
def install_optional_configs(cls, source_directory, print_progress=True):
if print_progress:
twrap = textwrap.TextWrapper(
initial_indent='* ', subsequent_indent=' ',
break_long_words=False)
print twrap.fill(
'Installing reference configuration files in ...'.format(
EngineDirectoryManager.OPENCAFE_ROOT_DIR))
twrap = textwrap.TextWrapper(
initial_indent=' ', subsequent_indent=' ',
break_long_words=False)
_printed = []
for root, sub_folders, files in os.walk(source_directory):
for file_ in files:
source = os.path.join(root, file_)
destination_dir = os.path.join(
EngineDirectoryManager.OPENCAFE_ROOT_DIR, root)
destination_file = os.path.join(destination_dir, file_)
PlatformManager.safe_create_dir(destination_dir)
PlatformManager.safe_chown(destination_dir)
if print_progress:
'Installing {0} at {1}'.format(source, destination_dir)
shutil.copyfile(source, destination_file)
if print_progress:
if destination_dir not in _printed:
print twrap.fill('{0}'.format(destination_dir))
_printed.append(destination_dir)
PlatformManager.safe_chown(destination_file)
class EnginePluginManager(object):
@classmethod
def copy_plugin_to_cache(
cls, plugins_src_dir, plugins_dest_dir, plugin_name):
""" Copies an individual plugin to the .opencafe plugin cache """
src_plugin_path = os.path.join(plugins_src_dir, plugin_name)
dest_plugin_path = os.path.join(plugins_dest_dir, plugin_name)
if os.path.exists(dest_plugin_path):
shutil.rmtree(dest_plugin_path)
shutil.copytree(src_plugin_path, dest_plugin_path)
@classmethod
def populate_plugin_cache(cls, plugins_src_dir):
""" Handles moving all plugin src data from package into the user's
.opencafe folder for installation by the cafe-config tool.
"""
default_dest = EngineDirectoryManager.OPENCAFE_SUB_DIRS.PLUGIN_CACHE
plugins = os.walk(plugins_src_dir).next()[1]
for plugin_name in plugins:
cls.copy_plugin_to_cache(
plugins_src_dir, default_dest, plugin_name)
@classmethod
def list_plugins(cls):
""" Lists all plugins currently available in user's .opencafe cache"""
plugin_cache = EngineDirectoryManager.OPENCAFE_SUB_DIRS.PLUGIN_CACHE
plugin_folders = os.walk(plugin_cache).next()[1]
wrap = textwrap.TextWrapper(initial_indent=" ",
subsequent_indent=" ",
break_long_words=False).fill
for plugin_folder in plugin_folders:
print wrap('... {name}'.format(name=plugin_folder))
@classmethod
def install_plugins(cls, plugin_names):
""" Installs a list of plugins into the current environment"""
for plugin_name in plugin_names:
cls.install_plugin(plugin_name)
@classmethod
def install_plugin(cls, plugin_name):
""" Install a single plugin by name into the current environment"""
plugin_cache = EngineDirectoryManager.OPENCAFE_SUB_DIRS.PLUGIN_CACHE
plugin_dir = os.path.join(plugin_cache, plugin_name)
wrap = textwrap.TextWrapper(initial_indent=" ",
subsequent_indent=" ",
break_long_words=False).fill
# Pretty output of plugin name
print wrap('... {name}'.format(name=plugin_name))
# Verify that the plugin exists
if not os.path.exists(plugin_dir):
print wrap('* Failed to install plugin: {0}'.format(plugin_name))
return
# Install Plugin
process, standard_out, standard_error = None, None, None
cmd = 'pip install {name} --upgrade'.format(name=plugin_dir)
try:
process = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True)
standard_out, standard_error = process.communicate()
except Exception as e:
msg = '* Plugin install failed {0}\n{1}\n'.format(cmd, e)
print wrap(msg)
# Print failure if we receive an error code
if process and process.returncode != 0:
print wrap(standard_out)
print wrap(standard_error)
print wrap('* Failed to install plugin: {0}'.format(plugin_name))
|
"""
Transmission Preprocessing
--------------------------
preprocessing functions for The Transmission component
"""
import os.path
from pandas import read_csv
import numpy as np
def preprocess (preprocessor, **kwargs):
"""preprocess wind power data
Parameters
----------
preprocessor: preprocessor.Preprocessor
a preprocessor object
Returns
-------
dict
preprocessed data
"""
data = read_csv(
os.path.join(preprocessor.data_dir,'transmission_distances.csv'),
comment = '#',
index_col = 0)
if not preprocessor.process_intertie:
if preprocessor.intertie_status == 'child':
ids = [preprocessor.communities[1],preprocessor.aliases[1]]
else:
ids = [preprocessor.communities[0],preprocessor.aliases[0]]
else:
ids = [preprocessor.communities[0],preprocessor.aliases[0]]
data = data.ix[ids][data.ix[ids].isnull().all(1) == False]
try:
max_savings = float(data['Maximum savings ($/kWh)'])
nearest_comm = data['Nearest Community with Lower Price Power']
nearest_comm = str(nearest_comm.values[0])
if 'nan' == nearest_comm:
nearest_comm = ''
distance = float(data['Distance to Community'])
except TypeError:
max_savings = np.nan
nearest_comm = ''
distance = np.nan
yto = 5 # years tp operation
start_year = preprocessor.data['community']['current year'] + yto
return {
"Transmission and Interties" : {
'enabled': True,
'lifetime': 20,
'start year': start_year,
'transmission loss per mile': .001 * 100,
'nearest community with lower price': nearest_comm,
'distance to community': distance,
'maximum savings': max_savings,
'percent o&m': 5,
'heat recovery o&m' : 1500,
'est. intertie cost per mile': {
'road needed': 500000,
'road not needed': 250000
},
'diesel generator o&m': { # upper kW limit: price
'150': 84181.00,
'360': 113410.00,
'600': 134434.00,
'else': 103851.00
}
}
}
## list of wind preprocessing functions
preprocess_funcs = [preprocess]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.