blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f8c4b43d6d9cc36156f18a132ac87630172fdd51
|
fc05249c73f910a4d36f471eb91e05256a64cdfe
|
/phd/BC-NBUC_FM_orig.py
|
38718187d3142fc611bc4b4069940eab2ebf29b2
|
[] |
no_license
|
rsoutelino/sandbox
|
f51b37619cd7a61a0446d83e2e1c2af58f14802a
|
814d215582d8e14514ba93daf1b41f6d118b906c
|
refs/heads/master
| 2023-03-02T12:05:18.703732
| 2023-03-02T01:58:15
| 2023-03-02T01:58:15
| 28,204,889
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 24,435
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###################################################################################
# Building coupled BC-NBUC Feature Model
# Rafael Soutelino - rsoutelino@gmail.com
# Last Modification: Apr, 2011
###################################################################################
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.mlab import griddata
from roms_setup import near
import scipy.io as sp
from mpl_toolkits.basemap import Basemap
import seawater.csiro as sw
import netCDF4 as nc
from cookb_signalsmooth import smooth
# FUNCTIONS #################################################################
def transp(lon, z, v):
"""
Slices some 3D field within some lon, lat limits
Be carefull with the aproximation on distance computing
"""
dx = np.diff(lon, axis=1) * 111 * 1000 # valid only for low latitudes!!!
aux = dx[:,0]; aux.shape = (np.size(aux), 1)
dx = np.concatenate( (dx, aux), axis=1)
dz = np.diff(z, axis=0)
aux = dz[0,:]; aux.shape = (1, np.size(aux))
dz = np.concatenate( (dz, aux), axis=0)
transp = np.abs(dx) * np.abs(dz) * v; transp = transp.sum()
transp = transp / 1e6
return transp
# Input Parameters ##################################################################
# Common System Parameters =========================================================
D = 3. # length of the transect or total length of the
# modeled jet [degrees]
tgmax = 0.15 # tan of the maximum angle that the transect is
# allowed to have to a parallel
dr = 0.1 # horizontal spacing for the feature model [degree]
ln = 30. # jump on the isobath to define the # of transects
zmax = 1500. # maximum depth for the system
dz = 1. # vertical resolution of the transects [m]
# DO NOT CHANGE THAT!!!! If changed,
# require changes in the code
delta = 100. # jets width [km]
# NBUC parameters ==================================================================
z0_NBUC_S = 500. # incoming NBUC core depth [m]
z0_NBUC_N = 200. # outgoing NBUC core depth [m]
v0_NBUC_S = 0.2 # incoming NBUC core velocity [m/s]
v0_NBUC_N = 0.5 # outgoing NBUC core velocity [m/s]
ds_NBUC = 100. # NBUC thickness from core to surface [m]
db_NBUC = 360. # NBUC thickness from core to bottom [m]
# BC parameters ===================================================================
BC_origin = -14 # BC Origin latitude
z0_BC = 0. # BC core depth [m]
v0_BC_S = -0.2 # outgoing BC core velocity [m/s]
v0_BC_N = 0 # BC origin core velocity [m/s]
d_BC = 150. # BC thickness [m]
#####################################################################################
# analytical integration of the functions to check on the transport
# NBUC at North
## Analytical Expression of the Transport: T = v0 . d ( ds + db )
#plt.figure(10, figsize=(14,8), facecolor='w')
## v0, d, ds fixed:
#db = np.arange(150, 500, 10)
#T = ( v0_NBUC_N * d*1e3 * ( ds_NBUC + db ) ) * 1e-6
#p1 = plt.subplot(221); plt.plot(db, T, 'g', linewidth=2)
#plt.plot( (150,500) , (23,23) , 'k', alpha='0.2', linewidth=10); grid()
#plt.title('$v_0$ = '+ str(v0_NBUC_N) +'m/s, $\delta$ = '+str(int(d)) +' km, $\delta_s$ = '+str(int(ds_NBUC)) +' m')
#plt.xlabel('$\delta_b$ [m]'); plt.ylabel('NBUC Transport [Sv]'); p1.set_ylim(15,30)
#ds = np.arange(20, 300, 10)
#T = ( v0_NBUC_N * d*1e3 * ( ds + db_NBUC ) ) * 1e-6
#p2 = plt.subplot(222); plt.plot(ds, T, 'g', linewidth=2)
#plt.plot( (20,300) , (23,23) , 'k', alpha='0.2', linewidth=10); grid()
#plt.title('$v_0$ = '+ str(v0_NBUC_N) +'m/s, $\delta$ = '+str(int(d)) +' km, $\delta_b$ = '+str(int(db_NBUC)) +' m')
#plt.xlabel('$\delta_s$ [m]'); p2.set_ylim(15,30)
#dd = np.arange(50, 150, 10)
#T = ( v0_NBUC_N * dd*1e3 * ( ds_NBUC + db_NBUC ) ) * 1e-6
#p3 = plt.subplot(223, position=(0.125, 0.1, 0.35, 0.3)); plt.plot(dd, T, 'g', linewidth=2)
#plt.plot( (50,150) , (23,23) , 'k', alpha='0.2', linewidth=10); grid()
#plt.title('$v_0$ = '+ str(v0_NBUC_N) +'m/s, $\delta_s$ = '+str(int(ds_NBUC)) +' m, $\delta_b$ = '+str(int(db_NBUC)) +' m')
#plt.xlabel('$\delta$ [km]'); plt.ylabel('NBUC Transport [Sv]'); p3.set_ylim(15,30);
#v0 = np.arange(0.3, 0.8, 0.03)
#T = ( v0 * d*1e3 * ( ds_NBUC + db_NBUC ) ) * 1e-6
#p4 = plt.subplot(224, position=(0.55, 0.1, 0.35, 0.3)); plt.plot(v0, T, 'g', linewidth=2)
#plt.plot( (0.3,0.8) , (23,23) , 'k', alpha='0.2', linewidth=10); grid()
#plt.title('$\delta$ = '+ str(int(d)) +'km, $\delta_s$ = '+str(int(ds_NBUC)) +' m, $\delta_b$ = '+str(int(db_NBUC)) +' m')
#plt.xlabel('$v_0$ [m/s]'); p4.set_ylim(15,30)
# ======================================================
# CREATING ISOBATH-FOLLOWING NBUC FEATURE MODEL:
# ======================================================
# ======================================================
# loading roms grid to get limits and topography
print ' '
print ' \n' + '==> ' + ' READING GRID NETCDF FILE ...\n' + ' '
print ' '
# I need a bigger grid to get the isobath
grdfile = nc.Dataset('/home/rsoutelino/rsoutelino/myroms/phd_run/phd1_grd.nc')
# assigning some variables from grid file
lonr = grdfile.variables['lon_rho'][:]
latr = grdfile.variables['lat_rho'][:]
h = grdfile.variables['h'][:]
# getting an isobath
plt.figure(); con = plt.contour(lonr, latr, h, levels=[100] )
col = con.collections[0]; paths = col.get_paths()
path0 = paths[0]; isob = path0.vertices; plt.close('all')
# limiting isobath within model domain
f = np.where( (isob[:,1] >= -24) & (isob[:,1] <= -8) )
isob = isob[f[0],:]
# smoothing isobath
isob[:,0] = smooth(isob[:,0],window_len=201,window='hanning')
isob[:,1] = smooth(isob[:,1],window_len=101,window='hanning')
# now I load original small grid
grdfile = nc.Dataset('/home/rsoutelino/rsoutelino/myroms/phd_run/phd8_grd.nc')
# assigning some variables from grid file
lonr = grdfile.variables['lon_rho'][:]
latr = grdfile.variables['lat_rho'][:]
h = grdfile.variables['h'][:]
# creating adimensional pairs of the parameters
dr = dr / D # adimensional horizontal resolution
r = D * np.arange(0, 1, dr) # adimensional horizontal transects
r0 = (1.0/D) * D # defining transect center
d = ( delta / 111.0 ) / D # normalized jet width
### NBUC FEATURE MODEL #####################################################
# ======================================================
# defining domain, buffering variables
looprange = range(0, len(isob), int(ln)) # scanning the isobath
li = np.size(looprange); lj = np.size(r)
X = np.zeros( [li , lj]); Y = X.copy()
Z = np.arange(-zmax, 0.0+dz, dz); lk = np.size(Z)
U = np.zeros( [ lk , li , lj ] )
V = U.copy(); VS = U.copy()
v = np.zeros( [ lk , lj ] )
# ======================================================
# defining velocity-axis vertical structure v0 = v0(y,z)
# Y-dependance:
# Jet core depth will increase as a linear function from south to north
z0max = np.linspace(-z0_NBUC_S, -z0_NBUC_N, li)
# Core velocity will also increase as a linear function from south to north
v0max = np.linspace(v0_NBUC_S, v0_NBUC_N, li)
v0 = np.zeros( [lk, li] )
# Z-dependance:
# NBUC core in Z0 m decaying in a gauss curve until 0 m/s at bottom
# this will also by Y-dependant, to allow increasing of the jet thickness
d1 = ds_NBUC / dz # gaussian vertical upper width, normalized by dz
# another gaussian will be adopted to decay velocities to surface
d2 = np.linspace(db_NBUC/dz, db_NBUC/dz, li) # gaussian lower width, normalized by dz
# starting the looping to create the slope-following NBUC-FM
print ' '
print '======== CREATING SLOPE-FOLLOWING NBUC-FM ========'
print ' '
i = -1 # initializing index counter
for c in looprange:
print ' Transect ' + str(i+1) + ' / ' + str(np.size(looprange))
i = i + 1
x0 = isob[c:c+6, 0]; y0 = isob[c:c+6, 1]
tgr, b = np.polyfit(x0, y0, 1) # finding isobath-tangent straight line
x0 = x0[0]; y0 = y0[0]
tgr = -1.0 / tgr; b = y0 - tgr * x0 # finding normal straight line
if tgr >= tgmax: # enforcing maximun angle
tgr = tgmax
elif tgr <= -tgmax:
tgr = -tgmax
# =======================================
# assembling vertical jet-core structure
# upper part
z1 = Z[z0max[i]/dz:]
v01 = v0max[i] * np.exp( (-1)* (z1 - (z0max[i]/dz))**2 / (2*d1**2) )
v0[z0max[i]/dz:, i] = v01
# lower part
z2 = Z[:z0max[i]/dz]
v02 = v0max[i] * np.exp( (-1)* (z2 - (z0max[i]/dz))**2 / (2*d2[i]**2) )
v0[:z0max[i]/dz, i] = v02
# ==========================================================
# writing NBUC-FM to transect based on cross-slope structure
for k in range( 0, lk):
v[k, :] = v0[k, i] * np.exp( (-1)* ( ( r-r0 )**2 / ( 2*d**2 ) ) )
# georeferencing velocities and coordinates
angr = np.arctan(tgr)
cosr, sinr = np.cos(angr), np.sin(angr)
X[i, :] = r * cosr + x0
Y[i, :] = r * sinr + y0
U[:, i, :] = v * sinr * (-1)
V[:, i, :] = v * cosr
VS[:, i, :] = v
### BC FEATURE MODEL #####################################################
# ======================================================
# defining domain, buffering variables
U2 = np.zeros( [ lk , li , lj ] )
V2 = U2.copy(); VS2 = U2.copy()
v2 = np.zeros( [ lk , lj ] )
# ======================================================
# defining velocity-axis vertical structure v0 = v0(y,z)
# Y-dependance:
# Core velocity will also increase as a linear function from south to north
v0max = np.zeros(li)
lataux = Y[:,0]
fcb = np.where(lataux <= BC_origin); fcb = fcb[0]
v0max[fcb] = np.linspace(v0_BC_S, v0_BC_N, fcb.size)
v0 = np.zeros( [lk, li] )
# Z-dependance:
# NBUC core in Z0 m decaying in a gauss curve until 0 m/s at bottom
# this will also by Y-dependant, to allow increasing of the jet thickness
d1 = d_BC / dz # gaussian vertical upper width, normalized by dz
# starting the looping to create the slope-following NBUC-FM
print ' '
print '======== CREATING SLOPE-FOLLOWING BC-FM ========'
print ' '
i = -1 # initializing index counter
for c in looprange:
print ' Transect ' + str(i+1) + ' / ' + str(np.size(looprange))
i = i + 1
x0 = isob[c:c+6, 0]; y0 = isob[c:c+6, 1]
tgr, b = np.polyfit(x0, y0, 1) # finding isobath-tangent straight line
x0 = x0[0]; y0 = y0[0]
tgr = -1.0 / tgr; b = y0 - tgr * x0 # finding normal straight line
if tgr >= tgmax: # enforcing maximun angle
tgr = tgmax
elif tgr <= -tgmax:
tgr = -tgmax
# =======================================
# assembling vertical jet-core structure
v0[:,i] = v0max[i] * np.exp( (-1)* (Z - (z0_BC/dz))**2 / (2*d1**2) )
# ==========================================================
# writing NBUC-FM to transect based on cross-slope structure
for k in range( 0, lk):
v2[k, :] = v0[k, i] * np.exp( (-1)* ( ( r-r0 )**2 / ( 2*d**2 ) ) )
# georeferencing velocities and coordinates
angr = np.arctan(tgr)
cosr, sinr = np.cos(angr), np.sin(angr)
X[i, :] = r * cosr + x0
Y[i, :] = r * sinr + y0
U2[:, i, :] = v2 * sinr * (-1)
V2[:, i, :] = v2 * cosr
VS2[:, i, :] = v2
# Gathering both Feature Models
U = U + U2; V = V + V2; VS = VS + VS2
# some plotting and transport computation
plt.figure()
plt.plot(isob[:,0], isob[:,1]); plt.axis('equal')
plt.pcolormesh(X, Y, V[-1,...], vmin=-0.20, vmax=0.20, cmap=plt.cm.RdBu)
plt.grid(); plt.colorbar(); plt.title('V-Vel @ Surface')
plt.xlabel('Longitude'); plt.ylabel('Latitude')
plt.figure()
plt.plot(isob[:,0], isob[:,1]); plt.axis('equal')
plt.pcolormesh(X, Y, V[-400/dz,...], vmin=-0.50, vmax=0.50, cmap=plt.cm.RdBu)
plt.grid(); plt.colorbar(); plt.title('V-Vel @ 400 m')
plt.xlabel('Longitude'); plt.ylabel('Latitude')
plt.figure()
plt.contourf(X[0,:],Z, np.squeeze(V[:,0,:]),np.arange(-0.2,0.2+0.03,0.03), cmap=plt.cm.RdBu, alpha=0.5)
plt.colorbar(); plt.title('V-Vel @ South Boundary')
plt.xlabel('Longitude'); plt.ylabel('Depth')
xx,zz = np.meshgrid(X[0,:],Z)
Tcb = transp(xx[-200:,:], zz[-200:,:], np.squeeze(V[-200:,0,:]))
Tnbuc = transp(xx[:-200,:], zz[:-200,:], np.squeeze(V[:-200,0,:]))
plt.text(-42.5, -100,'BC: '+str(np.round(Tcb))+' Sv',color='k',fontsize=12,fontweight='bold')
plt.text(-42.5, -700,'NBUC: '+str(np.round(Tnbuc))+' Sv',color='k',fontsize=12,fontweight='bold')
###############################################
plt.figure()
plt.contourf(X[-1,:],Z, np.squeeze(V[:,-1,:]),np.arange(-0.5,0.5+0.05,0.05), cmap=plt.cm.RdBu, alpha=0.5)
fwhm_s = 2*np.sqrt(2*np.log1p(2)) * ds_NBUC; zd = (-z0_NBUC_N, -z0_NBUC_N + fwhm_s/2 )
plt.plot((-34.13,-34.13),zd,'k', linewidth=5, alpha=0.4)
plt.text(-34, -90, '$\delta_s$', fontsize=16, fontweight='bold')
plt.text(-33.92, -90, ' = '+ str(int(ds_NBUC)) +'m')
fwhm_b = 2*np.sqrt(2*np.log1p(2)) * db_NBUC; zd = (-z0_NBUC_N, -z0_NBUC_N - fwhm_b/2 )
plt.plot((-34.13,-34.13),zd,'b', linewidth=5, alpha=0.4)
plt.text(-34, -500, '$\delta_b$', fontsize=16, fontweight='bold', color='b')
plt.text(-33.92, -500, ' = '+ str(int(db_NBUC)) +'m', fontsize=12, color='b')
fwhm = delta/111; xd = (-34.13 + fwhm/2, -34.13 - fwhm/2)
plt.plot(xd,(-200,-200),'g', linewidth=5, alpha=0.4)
plt.text(-33.8, -250, '$\delta$', fontsize=16, fontweight='bold', color='g')
plt.text(-33.72, -250, ' = '+ str(int(delta)) +'km', fontsize=12, color='g')
plt.text(-34.2, -220, '$v_0$', fontsize=20, fontweight='bold', color='k')
plt.text(-32.9, -250, '$v_0$', fontsize=20, fontweight='bold', color='k')
plt.text(-32.78, -250, ' = '+ str(v0_NBUC_N) +' m/s', fontsize=12, color='k')
plt.colorbar(); plt.title('V-Vel @ North Boundary')
plt.xlabel('Longitude'); plt.ylabel('Depth')
xx,zz = np.meshgrid(X[0,:],Z)
Tnbuc = transp(xx, zz, np.squeeze(V[:,-1,:]))
plt.text(-33, -200,'NBUC: '+str(np.round(Tnbuc))+' Sv',color='k',fontsize=12,fontweight='bold')
plt.axis([-35, -32, -1000, 10])
plt.show()
# ==========================================================
# COMPUTING GEOSTROPHICALLY BALANCED STATE VARIABLES
print ' '
print '======== COMPUTING GEOSTROPHICALLY BALANCED STATE VARIABLES ========'
print ' '
# integration the thermal wind equation:
# rho(x,z) = rho0(z) - rho_bar.f/g * int_0^L{ dv/dz dx}
stop
# obtaining rho0 and rho_bar from WOA2009:
MeanDens = sp.loadmat('MeanDens.mat')
rho0 = MeanDens['potdens'][:].ravel(); rho0 = rho0[::-1]
zrho = MeanDens['z'][:].ravel(); zrho = zrho[::-1]
salt0 = MeanDens['salt'][:].ravel(); salt0 = salt0[::-1]
rho0 = np.interp(Z, zrho, rho0)
salt0 = np.interp(Z, zrho, salt0)
rho0.shape = (np.size(rho0), 1)
salt0.shape = (np.size(salt0), 1)
rho_bar = rho0.mean()
print ' Density'
# obtaining dv/dz:
dvdz = np.zeros([ lk , li , lj])
for i in range(0, li):
vaux = np.squeeze(VS[:,i,:])
aux = np.array(np.gradient(vaux))
dvdz[:,i,:] = np.squeeze(aux[0, :, :])
# obtaining dS [m]: where S is the cross-slope axis
S = r * 111 * 1000; S, tmp = np.meshgrid(S, Z)
dS = np.array(np.gradient(S))
dS = np.squeeze(dS[1, :, :])
# constants
g = 9.8
f0 = 4 * np.pi * np.sin( np.pi * latr.mean()/180 ) / ( 24*3600 )
# COMPUTING DENSITY:
RHO = np.zeros([ lk , li , lj])
for i in range(0, li):
aux = dvdz[:,i,:]
rhoaux = rho0 - ((rho_bar*f0) / g) * np.cumsum( aux*dS, axis=1 )
RHO[:,i,:] = rhoaux
# COMPUTING TEMPERATURE AND SALINITY
# linearized equation of seawater state
alpha = 2.2e-4
beta = 8e-4
S0 = 37
rho_ref = 1000.7
TEMP = np.zeros([ lk , li , lj])
SALT = np.zeros([ lk , li , lj])
print ' Temperature, Salinity'
for i in range(0, li):
TEMP[:,i,:] = ( -RHO[:,i,:]/rho_ref + 1 + beta*salt0 ) / alpha
SALT[:,i,:] = salt0 + 0.01 * TEMP[:,i,:]
plt.figure()
plt.plot(isob[:,0], isob[:,1]); plt.axis('equal')
plt.pcolormesh(X, Y, TEMP[-1,...],cmap=plt.cm.Spectral_r);
plt.grid(); plt.colorbar(); plt.title('Temperature @ Surface')
plt.xlabel('Longitude'); plt.ylabel('Latitude')
plt.figure()
plt.plot(isob[:,0], isob[:,1]); plt.axis('equal')
plt.pcolormesh(X, Y, TEMP[-400,...], cmap=plt.cm.Spectral_r)
plt.grid(); plt.colorbar(); plt.title('Temperature @ 400 m')
plt.xlabel('Longitude'); plt.ylabel('Latitude')
plt.figure()
plt.contourf(X[0,:],Z, np.squeeze(TEMP[:,0,:]),30, cmap=plt.cm.Spectral_r); plt.colorbar();
plt.contour(X[0,:],Z, np.squeeze(V[:,0,:]),10, colors='0.5')
plt.title('Temperature @ South Boundary')
plt.xlabel('Longitude'); plt.ylabel('Depth')
plt.figure()
plt.contourf(X[-1,:],Z, np.squeeze(TEMP[:,-1,:]),30, cmap=plt.cm.Spectral_r); plt.colorbar()
plt.contour(X[-1,:],Z, np.squeeze(V[:,-1,:]),10, colors='0.5')
plt.title('Temperature @ North Boundary')
plt.xlabel('Longitude'); plt.ylabel('Depth')
plt.show()
plt.figure()
plt.contourf(Y[:,-1],Z, np.squeeze(TEMP[:,:,-1]),30, cmap=plt.cm.Spectral_r); plt.colorbar()
plt.title('Temperature @ East Boundary')
plt.xlabel('Latitude'); plt.ylabel('Depth')
plt.show()
# clearing memory
del h, VS, MeanDens, RHO, U2, V2, VS2
del c, col, con, cosr, d, d1, d2, dvdz, fcb, grdfile
# extrapolate to the rest of the domain
rpt = 20
XAUX = np.zeros([li, rpt]); YAUX = XAUX * 0
print ' '
print ' Extrapolating values to the east'
print ' '
for i in range(0,li):
lastx = X[i,-1]; xaux = np.linspace(lastx+0.25, lonr.max()+2, rpt)
XAUX[i,:] = xaux
lasty = Y[i,-1]; yaux = np.linspace(lasty, lasty, rpt)
YAUX[i,:] = yaux
# coordinates:
X = np.hstack((X, XAUX))
Y = np.hstack((Y, YAUX))
# velocity:
# computing geostrophic velocity to EASTERN boundary, to check SEC structure
temp = np.squeeze(TEMP[...,-1])
salt = np.squeeze(SALT[...,-1])
y = Y[:,-1]
y, z = np.meshgrid(y,Z); z = -z
gp = sw.gpan(salt, temp, z)
gp = (gp - gp[-1,:]) * -1 # to reference in the bottom
dgp = np.array(np.gradient(gp))
dgp = np.squeeze(dgp[1,:,:])
dy = np.array(np.gradient(y))
dy = np.squeeze(dy[1,:,:]) * 111000
dgpdy = dgp / dy
usec = -dgpdy / f0
# getting the right transport
usec = usec*0.95
f = np.where(usec > 0); usec[f] = 0
Tsec = transp(y, z, usec)
plt.figure(20, figsize=(10, 5), facecolor='w')
plt.contourf(y, -z, usec, np.arange(-0.1, 0.1+0.01, 0.01), cmap=plt.cm.RdBu, extend='both')
plt.colorbar(); plt.title('SEC velocities from BC-NBUC Feature Model')
plt.xlabel('Latitude'); plt.ylabel('Z[m]')
plt.axis([y.min(), y.max(), -1000, 0])
plt.text(-18, -450, str(np.round(Tsec))+' Sv')
plt.text(-23, -700, 'Should be -17 Sv to fulfil the BC-NBUC inbalance.')
plt.show()
uaux = usec.repeat(rpt, axis=1)
uaux.shape = (lk, li, rpt)
# decaying uaux to zero close to the western boundary
dcj = 3 # number of grid points to do the linear decay
for k in range(0, lk-1):
for i in range(0, li):
utmp = np.linspace(0, usec[k, i], dcj)
uaux[k, i, :dcj] = utmp
U = np.concatenate((U, uaux), axis=2)
V = np.concatenate((V, uaux*0), axis=2)
del uaux
# salt and temp:
lastS = SALT[:,:,-1]; lastT = TEMP[:,:,-1]
saux = lastS.repeat(rpt, axis=1); taux = lastT.repeat(rpt, axis=1)
saux.shape = (lk, li, rpt); taux.shape = (lk, li, rpt)
SALT = np.concatenate((SALT, saux), axis=2)
TEMP = np.concatenate((TEMP, taux), axis=2)
lk, li, lj = U.shape
rpt=10
XAUX = np.zeros([li, rpt]); YAUX = XAUX * 0
print ' '
print ' Extrapolating values to the west'
print ' '
for i in range(0,li):
firstx = X[i,0]; xaux = np.linspace(lonr.min() - 2, firstx-0.25, rpt)
XAUX[i,:] = xaux
firsty = Y[i,0]; yaux = np.linspace(firsty, firsty, rpt)
YAUX[i,:] = yaux
X = np.hstack((XAUX, X))
Y = np.hstack((YAUX, Y))
firstu = U[:,:,0]; firstu = firstu*0;
uaux = firstu.repeat(rpt, axis=1)
uaux.shape = (lk, li, rpt)
U = np.concatenate((uaux, U), axis=2)
V = np.concatenate((uaux, V), axis=2)
firstS = SALT[:,:,0]; firstT = TEMP[:,:,0]
saux = firstS.repeat(rpt, axis=1); taux = firstT.repeat(rpt, axis=1)
saux.shape = (lk, li, rpt); taux.shape = (lk, li, rpt)
SALT = np.concatenate((saux, SALT), axis=2)
TEMP = np.concatenate((taux, TEMP), axis=2)
del lastS, lastT, lastx, lasty, firstS, firstT, firstu, firstx, firsty
del lataux, looprange, r, rho0, rhoaux, taux, saux, tmp, v, v0, v01, v02, v0max
del v2, vaux, xx, yaux, z0max, z1, z2, zrho, zz, XAUX, YAUX, aux, lonr, latr, uaux
d = 20
TEMP = TEMP[::d,...]; SALT = SALT[::d,...]; Z = Z[::d]
U = U[::d,...]; V = V[::d,...]
lk, li, lj = U.shape
# PREPARING FIELDS TO BUILD ROMS INITIAL FIELDS straight from FM:
# flipping arrays to keep depths positive
TEMP = TEMP.reshape(lk, li*lj); SALT = SALT.reshape(lk, li*lj)
U = U.reshape(lk, li*lj); V = V.reshape(lk, li*lj)
TEMP = np.flipud(TEMP); SALT = np.flipud(SALT); U = np.flipud(U); V = np.flipud(V);
Z = np.flipud(Z); Z = -Z; Z = np.squeeze(Z)
TEMP.shape = (lk,li,lj); SALT.shape = (lk,li,lj); U.shape = (lk,li,lj); V.shape = (lk,li,lj)
# creating depth-averaged velocities
UBAR = U.mean(axis=0)
VBAR = V.mean(axis=0)
# creating SSH
TEMP2 = TEMP.reshape(lk, li*lj)
SALT2 = SALT.reshape(lk, li*lj)
Z2 = Z.reshape(lk,1)
gpan = sw.gpan(SALT2, TEMP2, Z2); del TEMP2, SALT2, Z2
gpan = (gpan - gpan[-1,:]) # to reference in the bottom
SSH = gpan / g; SSH = SSH[0,:]; SSH = SSH - SSH.mean()
SSH.shape = (li,lj)
matdict = {'lon':X, 'lat': Y, 'z': Z, 'temp':TEMP, 'salt':SALT, 'u':U, 'v':V, 'ubar':UBAR, 'vbar':VBAR, 'ssh':SSH}
sp.savemat('BC-NBUC_FM.mat', matdict)
# CREATING A FLAT Tracers FM + M3 FM velocities
for k in range(0,lk-1):
TEMP[k,...] = TEMP[k,...].mean()
SALT[k,...] = SALT[k,...].mean()
matdict = {'lon':X, 'lat': Y, 'z': Z, 'temp':TEMP, 'salt':SALT, 'u':U, 'v':V, 'ubar':UBAR, 'vbar':VBAR, 'ssh':SSH}
sp.savemat('FLAT-BC-NBUC_FM.mat', matdict)
# CREATING A FLAT Tracers FM + FLAT M3 FM
U = U*0; UBAR = UBAR*0
V = V*0; VBAR = VBAR*0; SSH = SSH*0
matdict = {'lon':X, 'lat': Y, 'z': Z, 'temp':TEMP, 'salt':SALT, 'u':U, 'v':V, 'ubar':UBAR, 'vbar':VBAR, 'ssh':SSH}
sp.savemat('FLAT_FM.mat', matdict)
# PREPARING FIELDS TO AOME
print ' '
print 'Please run fm2mod.py if you want to create a MODS-file'
print ' '
STOP
# comparing with OA fields
dataset = nc.Dataset('/home/rsoutelino/rsoutelino/myroms/phd_run/init/hops_oa/work/bc-nbuc_fm.nc')
lon = dataset.variables['grid3'][:,:,0]
lat = dataset.variables['grid3'][:,:,1]
temp = dataset.variables['temp'][:]
salt = dataset.variables['salt'][:]
dynht = dataset.variables['dynht'][:]
z = dataset.variables['zout'][:,2]
dynht = dynht[0,...]
psi = (-1) * ( dynht / f0 )
gradpsi = np.gradient(psi)
u = (-1) * ( gradpsi[0] / ( np.diff(lon).mean() * 111000 ) )
v = gradpsi[1] / ( np.diff(lon).mean() * 111000 )
plt.figure()
plt.contourf(lon, lat, dynht[...,0], 30, cmap=plt.cm.RdBu); colorbar()
plt.plot(isob[:,0], isob[:,1],'k', linewidth=2); axis('equal')
plt.axis([-40, -31, -23, -10])
plt.title('$\Delta \Phi$ @ Surface')
plt.xlabel('Longitude'); plt.ylabel('Latitude')
plt.figure()
plt.contourf(lon, lat, dynht[...,20], 30, cmap=plt.cm.RdBu); plt.colorbar()
plt.plot(isob[:,0], isob[:,1],'k', linewidth=2); axis('equal')
plt.axis([-40, -31, -23, -10])
plt.title('$\Delta \Phi$ @ 400 m')
plt.xlabel('Longitude'); plt.ylabel('Latitude')
plt.figure()
plt.quiver(lon, lat, u[...,0], v[...,0])
plt.plot(isob[:,0], isob[:,1],'k', linewidth=2);
plt.axis([-40, -31, -23, -10])
plt.title('Velocity @ Surface')
plt.xlabel('Longitude'); plt.ylabel('Latitude')
plt.figure()
plt.quiver(lon, lat, u[...,20], v[...,20])
plt.plot(isob[:,0], isob[:,1],'k', linewidth=2);
plt.axis([-40, -31, -23, -10])
plt.title('Velocity @ 400 m')
plt.xlabel('Longitude'); plt.ylabel('Latitude')
plt.figure()
plt.contourf(X[0,:],Z, np.squeeze(V[:,0,:]),np.arange(-0.2,0.2+0.03,0.03), cmap=plt.cm.RdBu, alpha=0.5)
plt.contourf(lat[:,-1],z, np.squeeze(u[:,-60,:]).transpose(),30, cmap=plt.cm.RdBu); plt.colorbar()
plt.title('Temperature @ South Boundary')
plt.xlabel('Longitude'); plt.ylabel('Depth')
plt.show()
|
[
"rsoutelino@gmail.com"
] |
rsoutelino@gmail.com
|
3899ac470777b3ed162ec4b63771700c70a1e8f9
|
ad150ad36e2996171be8ec04ad9f025bd383ed39
|
/delivery_app/apps/myapp/models/salida.py
|
de081d9a59a1c4c9702bd3b764ea6f82885638d5
|
[] |
no_license
|
pacifi/Dijkstra
|
740150d22c5571bf5f19c90ca15e56f2759822cb
|
f3095997b829b2d17cd55265da4ec99a70eab6c3
|
refs/heads/main
| 2022-05-01T02:26:07.467951
| 2022-04-30T14:23:37
| 2022-04-30T14:23:37
| 138,086,549
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 422
|
py
|
from django.db import models
from backend_apps.backend_auth.models import User
class Salida(models.Model):
user = models.ForeignKey(User) # encargado del pedido
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Meta:
verbose_name = "Salida"
verbose_name_plural = "Salidas"
def __str__(self):
return "%s" % self.id
|
[
"pacifi.bnr@gmail.com"
] |
pacifi.bnr@gmail.com
|
118314a66db3e17343985da81c3e0df4373d4c87
|
446571f13b3c1604cdfbcee8fdc2f956568d7c8d
|
/codeeval/medium/reverse_and_add.py
|
a9462c0696c93b46bb61868ec3a6c47549ccdc51
|
[] |
no_license
|
ssarangi/algorithms
|
2e8f0a4be6bf0f4a3d75b224ed993e1fb0ca0229
|
e151307f2706214cf8cefa6231290aeb2e5cfd82
|
refs/heads/master
| 2021-01-17T02:28:23.291588
| 2018-01-06T18:35:13
| 2018-01-06T18:35:13
| 51,458,833
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,253
|
py
|
# https://www.codeeval.com/open_challenges/45
import sys
def check_palindrome_and_sum(num):
total_digits = 0
arr = []
while num > 0:
digit = num % 10
total_digits += 1
arr.append(digit)
num = num // 10
is_palindrome = True
# Now the arr has the individual elemenets
p1 = 0
p2 = len(arr) - 1
while p1 < p2:
if arr[p1] != arr[p2]:
is_palindrome = False
break
p1 += 1
p2 -= 1
# Now find the reverse of the digit
ten_pow = 0
reversed_num = 0
while len(arr) > 0:
digit = arr.pop()
reversed_num += digit * pow(10, ten_pow)
ten_pow += 1
return reversed_num, is_palindrome
def compute_soln(num):
iteration = 0
is_palindrome = False
while iteration <= 100 and not is_palindrome:
reversed_num, is_palindrome = check_palindrome_and_sum(num)
if not is_palindrome:
num = reversed_num + num
iteration += 1
return iteration, num
with open(sys.argv[1], 'r') as test_cases:
for test in test_cases:
num = int(test)
iteration, palindrome = compute_soln(num)
print(iteration, palindrome)
|
[
"satyajit.sarangi@gmail.com"
] |
satyajit.sarangi@gmail.com
|
d6a866e67509d27ae4a6e59c536f48c56f8085e5
|
1ce46403c6b0db95c5d41706bf6fa7de48010820
|
/flask_mobility/mobility.py
|
ee97dd3473b3a8f0a9d3d62b95a02bbd8fc48ac5
|
[
"BSD-3-Clause"
] |
permissive
|
rehandalal/flask-mobility
|
1d90f5ec651129ef34b2334b61d47a7cbcb57cf2
|
a26f13d7c678265702c5687b9a5c87a705bb0b9d
|
refs/heads/master
| 2023-03-15T13:15:52.763579
| 2021-07-30T18:10:11
| 2021-07-30T18:10:11
| 8,322,591
| 67
| 14
|
BSD-3-Clause
| 2023-03-06T10:56:43
| 2013-02-20T21:28:17
|
Python
|
UTF-8
|
Python
| false
| false
| 941
|
py
|
import re
from flask import _request_ctx_stack as stack
class Mobility(object):
def __init__(self, app=None):
self.app = app
if self.app is not None:
self.init_app(app)
def init_app(self, app):
app.config.setdefault("MOBILE_USER_AGENTS", "android|fennec|iemobile|iphone|opera (?:mini|mobi)|mobile")
app.config.setdefault("MOBILE_COOKIE", "mobile")
self.USER_AGENTS = re.compile(app.config.get("MOBILE_USER_AGENTS"))
@app.before_request
def before_request():
ctx = stack.top
if ctx is not None and hasattr(ctx, "request"):
self.process_request(ctx.request, app)
def process_request(self, request, app):
ua = request.user_agent.string.lower()
mc = request.cookies.get(app.config.get("MOBILE_COOKIE"))
request.MOBILE = mc == "on" or (mc != "off" and self.USER_AGENTS.search(ua) is not None)
|
[
"rehandalal@gmail.com"
] |
rehandalal@gmail.com
|
12800ccf2b7d3ba08d5e9ae38ebc5e702c5992c8
|
8c96a9d66d49a5cb38d71174c6135af8f37c2259
|
/libpass/__init__.py
|
d9e7d85ad43ad73b4a0bd96430eccbfd4d21451a
|
[] |
no_license
|
jensecj/libpass-py
|
614c00fddb6b98f8e6ac5f33ab477e86e780a847
|
71e3c9dc945ca8f4aaa31b66c783eab8d22a4e0f
|
refs/heads/master
| 2020-08-27T20:02:52.729262
| 2019-10-31T09:04:15
| 2019-10-31T09:04:15
| 217,477,187
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,579
|
py
|
import os
import sys
import subprocess
class PasswordStore:
def __init__(self, store_path=None):
# TODO: locate gpg binary, or error
# TODO: locate pass binary, or error
if store_path:
store_path = os.path.expanduser(store_path)
else:
store_path = os.path.expanduser("~/.password-store")
if not os.path.exists(store_path):
raise FileNotFoundError(f"Path to password store does not exist: {store_path}")
if not os.path.isdir(store_path):
raise ValueError(f"Path to password store is not a directory: {store_path}")
self._store_path = store_path
def _cmd(self, cmd):
popen = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
return_code = popen.wait()
if return_code == 0:
output = popen.stdout.read().strip()
return output
err = popen.stderr.read() if popen.stderr else None
if err:
raise ValueError(f"return code: {return_code}: {err}")
def unlock(self):
"""
Make the gpg-agent ask for passphrase for unlocking the
password-stores gpg-key, if it is not already in the
agent.
"""
self._cmd(
[
"gpg",
"--quiet",
"--no-greeting",
"--clearsign",
"--output",
"/dev/null",
"/dev/null",
]
)
def list(self):
"""
Return a list of all entries in the password store.
"""
# an entry in the password store is the relative path from the
# root of the store, to some .gpg file in a subdirectory.
files = []
for root, _, fs in os.walk(self._path):
files += [os.path.join(root, f) for f in fs]
rel_files = [os.path.relpath(f, start=self._store_path) for f in files]
rel_files.remove(".gpg-id")
entries = [f.strip(".gpg") for f in rel_files]
return entries
def get(self, path):
"""
Return the secret for `path', if found in the password store, None otherwise.
"""
return self._cmd(["pass", path])
def set(self, path, secret):
"""
Insert `secret' into the password store, at `path'.
"""
self._cmd(["pass", "insert", path, secret])
def generate(self, path):
"""
Insert a generated secret into `path' in the password store.
"""
self._cmd(["pass", "generate", path])
|
[
"jensecj@gmail.com"
] |
jensecj@gmail.com
|
5ef25e5c0ef8975460d7523a61c4044e44e2b6d2
|
67603326996032afeab86b870212aa7f6f83e60f
|
/st.py
|
5f3072ae400cfe13544243250e8867f763cce8c5
|
[] |
no_license
|
1079658109/RL-MPC-LaneMerging
|
3af6eb1efcc735004da1206d50e545965cac0db5
|
7f1948bd62e0833bb0477fa35242211726892e5b
|
refs/heads/master
| 2023-09-01T00:20:34.006054
| 2021-10-26T16:40:21
| 2021-10-26T16:41:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 37,913
|
py
|
import heapq
from scipy import interpolate
from cvxopt import solvers, matrix
import matplotlib.pyplot as plt
import numpy as np
import control
from config import Settings
import prediction
if Settings.USE_CYTHON:
import st_cy
solvers.options['show_progress'] = False
solvers.options['maxiters'] = 10
def get_range_index(min_s, delta_s, s):
# Gets the last index before s in the list [min_s, min_s + delta_s, min_s + 2*delta_s, ...]
return int((s - min_s) / delta_s)
def find_s_t_obstacles_from_state(current_state, future_s=150, delta_s=0.5, delta_t=0.2, time_limit=5, start_uncertainty=0.0, uncertainty_per_second=0.1):
ego_position = current_state.ego_position
ego_speed = current_state.ego_speed
start_s = control.get_ego_s(ego_position)
# We discretize the s and t space, and store the lookup for s and t values in these arrays
s_values = np.arange(start_s, start_s + future_s + delta_s, delta_s)
t_values = np.arange(0, time_limit + delta_t, delta_t)
obstacles = np.zeros((t_values.size, s_values.size), dtype=np.bool)
distances = np.zeros(obstacles.shape, dtype=np.float)
distances += 1E10 # Big number but not NaN
discrete_length = int(Settings.CAR_LENGTH / delta_s)
predicted_state = current_state
for (t_index, t) in enumerate(t_values):
uncertainty = start_uncertainty + uncertainty_per_second * t
discrete_uncertainty = int(uncertainty/delta_s)
if t_index != 0:
predicted_state, _ = predicted_state.predict_step_without_ego(delta_t)
for vehicle_index, vehicle_x in enumerate(predicted_state.other_xs):
current_obstacle_s = control.get_obstacle_s_from_x(vehicle_x)
if current_obstacle_s < Settings.CRASH_MIN_S - Settings.MIN_ALLOWED_DISTANCE:
break # Cars do not obstruct path until the merge point
elif current_obstacle_s > s_values[-1] + Settings.CAR_LENGTH:
continue
# calculate the distance from each point to this obstacle vehicle at time t
obstacle_distances_front = np.abs(s_values - (current_obstacle_s - Settings.CAR_LENGTH - uncertainty))
obstacle_distances_back = np.abs(s_values - (current_obstacle_s + Settings.CAR_LENGTH + uncertainty))
# the distance is the minimum of the distance from the front of the ego vehicle to the back of the
# obstacle vehicle, and from the front of the obstacle to the back of the ego vehicle
distances[t_index] = np.minimum(obstacle_distances_front, distances[t_index, :])
distances[t_index] = np.minimum(obstacle_distances_back, distances[t_index, :])
# Within a vehicle length of the obstacle's s position, register the presence of an obstacle
start_s_index = get_range_index(start_s, delta_s, current_obstacle_s)
index_min = max(start_s_index - discrete_length - discrete_uncertainty, 0)
index_max = min(start_s_index + discrete_length + discrete_uncertainty, s_values.size)
if index_min < s_values.size and index_max > 0:
obstacles[t_index, index_min:index_max] = True
distances[t_index, index_min:index_max] = 0
# plt.close()
# plot_s_t_obstacles(obstacles, s_values, t_values)
# plt.show()
return obstacles, s_values, t_values, ego_speed, distances
def find_s_t_obstacles(future_s=150, delta_s=0.5, delta_t=0.2, time_limit=5, start_uncertainty=0.0, uncertainty_per_second=0.1):
"""
For the current state of the system, predict and plot the positions of and distances to all obstacles in the future
:param future_s: how far in the future to look in the s space
:param delta_s: the discretization of the s space
:param delta_t: the discretization of the t space
:param time_limit: how far in the future to look in the t space
:param start_uncertainty: makes other cars start out this much longer for collision detection
:param uncertainty_per_second: makes cars this much longer for each second in the future
"""
current_state = prediction.HighwayState.from_sumo()
return find_s_t_obstacles_from_state(current_state)
def plot_s_t_obstacles(obstacles, s_values, t_values, color='blue'):
nonzero_t, nonzero_s = np.nonzero(obstacles)
ts = t_values[nonzero_t]
ss = s_values[nonzero_s]
plt.figure()
plt.scatter(ts, ss, c=color)
plt.ylim(s_values[0], s_values[-1])
plt.xlim(t_values[0], t_values[-1])
plt.xlabel('t')
plt.ylabel('s')
def plot_s_path(obstacles, s_values, t_values, s_path):
plot_s_t_obstacles(obstacles, s_values, t_values)
plt.plot(t_values, s_path, c='red')
def get_feasible_next_s_range_no_jerk_limits(s, prev_s, delta_t):
v = (s - prev_s) / delta_t
min_v = max(v + Settings.MAX_NEGATIVE_ACCELERATION * delta_t, 0)
max_v = min(v + Settings.MAX_POSITIVE_ACCELERATION * delta_t, Settings.MAX_SPEED)
min_s = s + min_v * delta_t # note: automatically greater than zero
max_s = s + max_v * delta_t # note: automatically capped wrt max speed
return min_s, max_s
def get_feasible_next_s_range_with_jerk_limits(s, s_1, s_2, delta_t):
prev_v = (s_1 - s_2) / delta_t
v = (s - s_1) / delta_t
a = (v - prev_v) / delta_t
min_a = max(a + Settings.MINIMUM_NEGATIVE_JERK * delta_t, Settings.MAX_NEGATIVE_ACCELERATION)
max_a = min(a + Settings.MAXIMUM_POSITIVE_JERK * delta_t, Settings.MAX_POSITIVE_ACCELERATION)
min_v = max(v + min_a * delta_t, 0)
max_v = min(v + max_a * delta_t, Settings.MAX_SPEED)
min_s = s + min_v * delta_t # note: automatically greater than zero
max_s = s + max_v * delta_t # note: automatically capped wrt max speed
return min_s, max_s
def distance_penalty(min_distance):
if min_distance < Settings.MIN_ALLOWED_DISTANCE:
return 1000000.0 * Settings.D_WEIGHT / max(min_distance, 1.0)
else:
return Settings.D_WEIGHT / min_distance
def cost_no_jerk(s, s_1, s_2, t_discretization, min_distance):
v = (s - s_1) / t_discretization
a = (s - 2*s_1 + s_2) / (t_discretization**2)
return Settings.V_WEIGHT * (v - Settings.DESIRED_SPEED)**2 + Settings.A_WEIGHT * a**2 + distance_penalty(min_distance)
def cost(s, s_1, s_2, s_3, t_discretization, min_distance):
v = (s - s_1) / t_discretization
a = (s - 2*s_1 + s_2) / (t_discretization**2)
j = (s - 3*s_1 + 3*s_2 - s_3) / (t_discretization**3)
return Settings.V_WEIGHT * (v - Settings.DESIRED_SPEED)**2 + Settings.A_WEIGHT * a**2 + Settings.J_WEIGHT * j**2 + distance_penalty(min_distance)
def get_all_range_indices(start_s, delta_s, range_min, range_max):
"""
Gets the indices in [start_s, start_s + delta_s, start_s + 2*delta_s, ...] between range_min and range_max (inclusive)
:param start_s: The start of the list
:param delta_s: The discretization of the list
:param range_min: The minimum bound of the desired range
:param range_max: The maximum bound of the desired range
:return: A list of the desired indices
"""
min_index = (range_min - start_s) / delta_s
if int(min_index) < min_index:
min_index = int(min_index) + 1
else:
min_index = int(min_index)
max_index = int((range_max - start_s) / delta_s)
return list(range(min_index, max_index + 1))
def readable_solve_s_t_path_no_jerk(obstacles, s_values, t_values, ego_start_speed, distances):
"""
Finds the optimal path through the discretized s-t space
Adheres to velocity, acceleration, and monotonicity contraints (but not jerk constraints). If we assume that the
discretization of the s and t space have maximum horizons of s_max and t_max respectively, with quantization sizes
delta_s and delta_t, then we are given information about the discretized space as follows (where num_s = s_max/delta_s
and num_t = t_max / delta_t)
For this version we will use Djikstra's algorithm. For later improvements, consider making a better heuristic for A*
:param obstacles: a boolean ndarray of size (num_t x num_s), encoding the projected positions of obstacles
:param s_values: an ndarray of size num_s, encoding the actual s values
:param t_values: an ndarray of size num_t, encoding the actual t values
:param ego_start_speed: the starting speed of the ego car
:param distances: an ndarray of size (num_t x num_s), encoding the distances to the nearest obstacle
:return:
"""
delta_s = s_values[1] - s_values[0]
delta_t = t_values[1] - t_values[0]
num_s = s_values.size
num_t = t_values.size
start_s = s_values[0]
# For this version we will work forwards instead of backwards for the DP, as it is more readable that way
best_previous_s = np.zeros((num_t, num_s, num_s), dtype=np.int32)
encountered = np.zeros((num_t, num_s, num_s), dtype=bool)
estimated_previous_s = start_s - delta_t * ego_start_speed
entry_order = 0
min_first_s, max_first_s = get_feasible_next_s_range_no_jerk_limits(start_s, estimated_previous_s, delta_t)
# We transform the raw s value range to a list of possible s indices
possible_first_s_indices = get_all_range_indices(start_s, delta_s, min_first_s, max_first_s)
node_priority_queue = []
for s_index in possible_first_s_indices:
s_value = s_values[s_index]
if not obstacles[1, s_index]:
s_cost = cost_no_jerk(s_value, start_s, estimated_previous_s, delta_t, distances[1, s_index])
node_priority_queue.append((s_cost, entry_order, 1, s_index, 0, 0))
entry_order -= 1 # We want the queue to be LIFO, as this tends to be faster for shortest path problems
heapq.heapify(node_priority_queue)
best_last_s_tuple = (-1, -1)
best_t_index = 0
while len(node_priority_queue) > 0:
# We get the (t, s, prev_s) tuple with the lowest cost so far
total_cost, _, t_index, s_index, prev_s_index, second_s_index = heapq.heappop(node_priority_queue)
s_value = s_values[s_index]
prev_s_value = s_values[prev_s_index]
if encountered[t_index, s_index, prev_s_index]:
continue
else:
encountered[t_index, s_index, prev_s_index] = True
best_previous_s[t_index, s_index, prev_s_index] = second_s_index
# We keep track of the furthest point in time we can safely reach in case we cannot reach the end
if t_index > best_t_index:
best_t_index = t_index
best_last_s_tuple = (s_index, prev_s_index)
if t_index == num_t - 1:
break
# Again, calculate the possible next values of s
min_next_s, max_next_s = get_feasible_next_s_range_no_jerk_limits(s_value, prev_s_value, delta_t)
possible_next_s_indices = get_all_range_indices(start_s, delta_s, min_next_s, max_next_s)
next_t = t_index + 1
for next_s_index in possible_next_s_indices:
# We can't exceed the bounds of our simulation, but if this is happening it may be a good idea to increase the planning horizon
if next_s_index >= num_s:
break
# If we have not yet encountered the next (s, prev_s) tuple at the specified time, we have found the optimal path to reach it
if not encountered[next_t, next_s_index, s_index]:
if obstacles[next_t, next_s_index]:
continue # No colliding with obstacles
next_s_value = s_values[next_s_index]
s_cost = cost_no_jerk(next_s_value, s_value, prev_s_value, delta_t, distances[next_t, next_s_index])
heapq.heappush(node_priority_queue, (total_cost + s_cost, entry_order, next_t, next_s_index, s_index, prev_s_index))
entry_order -= 1
# Reconstruct the best sequence of s values, using the saved values from best_previous_s
best_s_index, best_prev_s_index = best_last_s_tuple
s_sequence = np.zeros(num_t)
for t_index in range(best_t_index, 1, -1):
s_sequence[t_index] = s_values[best_s_index]
second_s_index = best_previous_s[t_index, best_s_index, best_prev_s_index]
best_s_index = best_prev_s_index
best_prev_s_index = second_s_index
s_sequence[0] = s_values[best_prev_s_index]
s_sequence[1] = s_values[best_s_index]
return s_sequence
def get_path_mean_abs_jerk(s_sequence, ego_start_speed, ego_start_acceleration, delta_t):
prev_a = ego_start_acceleration
prev_v = ego_start_speed
path_cost = 0
for i, s in enumerate(s_sequence):
if i == 0:
continue
s_1 = s_sequence[i - 1]
v = (s - s_1)/delta_t
a = (v - prev_v)/delta_t
j = (a - prev_a)/delta_t
prev_v = v
prev_a = a
path_cost += abs(j)
return path_cost / (len(s_sequence) - 1)
def get_path_cost(s_sequence, ego_start_speed, ego_start_acceleration, delta_t, s_values, distances):
"""
Get the cost of a path produced by an s-t path planning algorithm
:param s_sequence: The path as an ndarray of s coordinates
:param ego_start_speed: The starting speed of the ego car
:param delta_t: The time between points on the path
:param s_values: An ndarray of possible s coordinates, as given as input to the s-t solver
:param distances: An ndarray of distances to the nearest obstacle, as given as input to the s-t solver
:return: The total cost of the given path
"""
path_cost = 0
prev_a = ego_start_acceleration
prev_v = ego_start_speed
for i in range(1, len(s_sequence)):
s = s_sequence[i]
s_1 = s_sequence[i - 1]
if i == 1:
s_2 = s_1 - ego_start_speed * delta_t
s_3 = s_2 - (ego_start_speed - ego_start_acceleration * delta_t) * delta_t
elif i == 2:
s_2 = s_sequence[i - 2]
s_3 = s_1 - ego_start_speed * delta_t
else:
s_2 = s_sequence[i - 2]
s_3 = s_sequence[i - 3]
matches = np.where(s_values == s)[0]
v = (s - s_1)/delta_t
a = (v - prev_v)/delta_t
j = (a - prev_a)/delta_t
if v > Settings.MAX_SPEED:
print("Exceeded speed limit")
if a > Settings.MAX_POSITIVE_ACCELERATION or a < Settings.MAX_NEGATIVE_ACCELERATION:
print("Exceeded acceleration limit")
if j > Settings.MAXIMUM_POSITIVE_JERK or j < Settings.MINIMUM_NEGATIVE_JERK:
print("Exceeded jerk limit")
prev_v = v
prev_a = a
if len(matches) > 0:
s_index = np.where(s_values == s)[0][0]
path_cost += cost(s, s_1, s_2, s_3, delta_t, distances[i, s_index])
else:
# No valid path
path_cost = np.infty
break
return path_cost
def valid_descendant_s_indices_no_jerk(t_index, start_s, s, s_1, delta_s, delta_t, obstacles):
min_s, max_s = get_feasible_next_s_range_no_jerk_limits(s, s_1, delta_t)
descendant_s_indices = get_all_range_indices(start_s, delta_s, min_s, max_s)
descendants = []
for s_index in descendant_s_indices:
if not obstacles[t_index, s_index]:
descendants.append(s_index)
return descendants
def valid_descendant_s_indices_with_jerk(t_index, start_s, s, s_1, s_2, delta_s, delta_t, obstacles):
min_s, max_s = get_feasible_next_s_range_with_jerk_limits(s, s_1, s_2, delta_t)
descendant_s_indices = get_all_range_indices(start_s, delta_s, min_s, max_s)
descendants = []
for s_index in descendant_s_indices:
if s_index >= obstacles.shape[1]:
break
if not obstacles[t_index + 1, s_index]:
descendants.append(s_index)
return descendants
def solve_st_fast_v2(obstacles, s_values, t_values, ego_start_speed, ego_start_acceleration, distances):
"""
A much faster st solver that still attempts not to crash, but produces suboptimal solutions
:param obstacles: a boolean ndarray of size (num_t x num_s), encoding the projected positions of obstacles
:param s_values: an ndarray of size num_s, encoding the actual s values
:param t_values: an ndarray of size num_t, encoding the actual t values
:param ego_start_speed: the starting speed of the ego car
:param ego_start_acceleration: the starting acceleration of the ego car
:param distances: an ndarray of size (num_t x num_s), encoding the distances to the nearest obstacle
:return: an ndarray of size num_t, giving the planned trajectory in the s space
"""
# Extracting some relevant quantities from the input arrays
delta_s = s_values[1] - s_values[0]
delta_t = t_values[1] - t_values[0]
num_s = s_values.size
num_t = t_values.size
start_s = s_values[0]
estimated_previous_s = start_s - delta_t * ego_start_speed
estimated_second_s = estimated_previous_s - delta_t * (ego_start_speed - ego_start_acceleration * delta_t)
# Initialize arrays to backtrack after the search is done and avoid visiting a node twice
best_previous_s = np.zeros((num_t, num_s), dtype=np.int32)
encountered = np.zeros((num_t, num_s), dtype=bool)
# We need a priority queue for a more efficient implementation of Dijkstra's algorithm
node_priority_queue = []
heapq.heapify(node_priority_queue)
# The priority queue is sorted by tuples of the form:
# Total cost (0), t index (1), s index (2), s value (3), previous s index (4), previous s value (5), index for s_{t-2} (6), value for s_{t-2} (7)
first_heap_item = (0, 0, 0, start_s, 0, estimated_previous_s, 0, estimated_second_s)
heapq.heappush(node_priority_queue, first_heap_item)
# Our nodes in our graph are in the form t_index, s_index. This keeps track of the best node we have reached so far
best_node = (0, 0)
while len(node_priority_queue) > 0:
# We get the (t, s, prev_s) tuple with the lowest cost so far
total_cost, t_index, s_index, s_value, prev_s_index, prev_s_value, second_s_index, second_s_value = heapq.heappop(node_priority_queue)
node = t_index, s_index
# We may add the same node to the priority queue multiple times (cost depending on the path taken to get there)
# However, only the first, and therefore lowest cost, instance has its neighbors expanded.
if encountered[node]:
continue
else:
encountered[node] = True
best_previous_s[node] = prev_s_index
# We keep track of the furthest ("best") point in time we can safely reach in case we cannot reach the end
if t_index > best_node[0]:
best_node = (t_index, s_index)
if t_index == num_t - 1:
break # We have found the best path to the end of our planning period
# Calculate the possible next values of s given the velocity and acceleration constraints
possible_next_s_indices = valid_descendant_s_indices_with_jerk(t_index, start_s, s_value, prev_s_value, second_s_value, delta_s, delta_t, obstacles)
next_t = t_index + 1
for next_s_index in possible_next_s_indices:
# We can't exceed the bounds of our simulation, but if this is happening it may be a good idea
# to increase the planning horizon in the s dimension
if next_s_index >= num_s:
break
next_node = (next_t, next_s_index)
if not encountered[next_node]:
if obstacles[next_node]:
continue # No colliding with obstacles
next_s_value = s_values[next_s_index]
s_cost = cost(next_s_value, s_value, prev_s_value, second_s_value, delta_t, distances[next_node])
# Total cost (0), t index (1), s index (2), s value (3), previous s index (4),
# previous s value (5), index for s_{t-2} (6), value for s_{t-2} (7)
heapq.heappush(node_priority_queue, (total_cost + s_cost, next_t, next_s_index, next_s_value, s_index, s_value, prev_s_index, prev_s_value))
# Reconstruct the best sequence of s values, using the saved values from best_previous_s
best_t_index, best_s_index = best_node
s_sequence = np.zeros(num_t)
for t_index in range(best_t_index, 0, -1):
s_sequence[t_index] = s_values[best_s_index]
node = t_index, best_s_index
best_s_index = best_previous_s[node]
s_sequence[0] = s_values[best_s_index]
return s_sequence
def solve_st_fast(obstacles, s_values, t_values, ego_start_speed, distances):
"""
A much faster st solver that still attempts not to crash, but produces suboptimal solutions
:param obstacles: a boolean ndarray of size (num_t x num_s), encoding the projected positions of obstacles
:param s_values: an ndarray of size num_s, encoding the actual s values
:param t_values: an ndarray of size num_t, encoding the actual t values
:param ego_start_speed: the starting speed of the ego car
:param distances: an ndarray of size (num_t x num_s), encoding the distances to the nearest obstacle
:return: an ndarray of size num_t, giving the planned trajectory in the s space
"""
delta_s = s_values[1] - s_values[0]
delta_t = t_values[1] - t_values[0]
num_s = s_values.size
num_t = t_values.size
start_s = s_values[0]
# For this version we will work forwards instead of backwards for the DP, as it is more readable that way
best_previous_s = np.zeros((num_t, num_s), dtype=np.int32)
encountered = np.zeros((num_t, num_s), dtype=bool)
estimated_previous_s = start_s - delta_t * ego_start_speed
entry_order = 0
min_first_s, max_first_s = get_feasible_next_s_range_no_jerk_limits(start_s, estimated_previous_s, delta_t)
# We transform the raw s value range to a list of possible s indices
possible_first_s_values = get_all_range_indices(start_s, delta_s, min_first_s, max_first_s)
node_priority_queue = []
for s_index in possible_first_s_values:
s_value = s_values[s_index]
if not obstacles[1, s_index]:
s_cost = cost_no_jerk(s_value, start_s, estimated_previous_s, delta_t, distances[1, s_index])
node_priority_queue.append((s_cost, entry_order, 1, s_index, 0))
entry_order -= 1 # We want the queue to be LIFO, as this tends to be faster for shortest path problems
heapq.heapify(node_priority_queue)
best_last_s = -1
best_t_index = 0
while len(node_priority_queue) > 0:
# We get the (t, s, prev_s) tuple with the lowest cost so far
total_cost, entry_order, t_index, s_index, prev_s_index = heapq.heappop(node_priority_queue)
s_value = s_values[s_index]
prev_s_value = s_values[prev_s_index]
if encountered[t_index, s_index]:
continue
else:
encountered[t_index, s_index] = True
best_previous_s[t_index, s_index] = prev_s_index
# We keep track of the furthest point in time we can safely reach in case we cannot reach the end
if t_index > best_t_index:
best_t_index = t_index
best_last_s = s_index
if t_index == num_t - 1:
break
# Again, calculate the possible next values of s
min_next_s, max_next_s = get_feasible_next_s_range_no_jerk_limits(s_value, prev_s_value, delta_t)
possible_next_s_values = get_all_range_indices(start_s, delta_s, min_next_s, max_next_s)
next_t = t_index + 1
for next_s_index in possible_next_s_values:
# We can't exceed the bounds of our simulation, but if this is happening it may be a good idea to increase the planning horizon
if next_s_index >= num_s:
break
if not encountered[next_t, next_s_index]:
if obstacles[next_t, next_s_index]:
continue # No colliding with obstacles
next_s_value = s_values[next_s_index]
s_cost = cost_no_jerk(next_s_value, s_value, prev_s_value, delta_t, distances[next_t, next_s_index])
heapq.heappush(node_priority_queue, (total_cost + s_cost, entry_order, next_t, next_s_index, s_index))
entry_order -= 1
# Reconstruct the best sequence of s values, using the saved values from best_previous_s
best_s_index = best_last_s
s_sequence = np.zeros(num_t)
for t_index in range(best_t_index, 0, -1):
s_sequence[t_index] = s_values[best_s_index]
best_s_index = best_previous_s[t_index, best_s_index]
s_sequence[0] = s_values[best_s_index]
return s_sequence
def get_before_after_constraints(s_sequence, t_values):
last_ego_position = s_sequence[-1]
last_future_time = t_values[-1]
before_car_start_pos = np.inf
before_car_speed = 0
after_car_start_pos = np.inf
after_car_speed = 0
before_car_end_pos = -np.inf
after_car_end_pos = np.inf
vehicle_ids = control.get_vehicle_ids()
positions = control.get_vehicle_positions(vehicle_ids)
speeds = control.get_vehicle_speeds(vehicle_ids)
ego_position = positions["ego"]
for vehicle_id in vehicle_ids:
if vehicle_id != "ego":
obstacle_s = control.get_obstacle_s(positions[vehicle_id])
obstacle_speed = speeds[vehicle_id]
end_obstacle_s = obstacle_s + obstacle_speed * last_future_time
if end_obstacle_s < -Settings.CAR_LENGTH:
continue
if end_obstacle_s > last_ego_position and end_obstacle_s < after_car_end_pos:
after_car_end_pos = end_obstacle_s
after_car_start_pos = obstacle_s
after_car_speed = obstacle_speed
elif end_obstacle_s < last_ego_position and end_obstacle_s > before_car_end_pos:
before_car_end_pos = end_obstacle_s
before_car_start_pos = obstacle_s
before_car_speed = obstacle_speed
return before_car_start_pos, before_car_speed, after_car_start_pos, after_car_speed
def finer_fit(s_sequence, delta_t, coarse_delta_t, start_speed, start_acceleration, before_after_cars=None):
s_length = len(s_sequence)
if s_length == 1:
return s_sequence
t = np.arange(s_length) * coarse_delta_t
sub_length = int(np.round(t[-1] / delta_t + 1))
if (sub_length - 1)*delta_t > t[-1]:
sub_length -= 1
finer_t = np.arange(sub_length) * delta_t
# Calculate a linear interpolation of our original sequence
interpolation = interpolate.interp1d(t, s_sequence)
interpolated = interpolation(finer_t)
# QP objective ||Ax - b||^2
A = np.identity(sub_length)
b = interpolated
# In the form (1/2)x^TPx + q^Tx
P = 2 * np.dot(A.T, A)
q = -2 * np.dot(A.T, b)
# Velocity min constraints: velocity \geq 0 in the form V_1 x \leq h
V_1 = np.zeros((sub_length - 1, sub_length))
h_1 = np.zeros(sub_length - 1)
for i in range(sub_length - 1):
V_1[i, i] = 1 / delta_t
V_1[i, i+1] = -1 / delta_t
# (s_{i+1} - s_i)/delta_t \geq 0
# Velocity max constraints: velocity \leq v_max
V_2 = -V_1
h_2 = np.zeros(sub_length - 1)
for i in range(sub_length - 1):
h_2[i] = Settings.MAX_SPEED
# Acceleration max constraints: (s_t - 2*s_{t-1} + s_{t-2})/(delta_t ** 2) \leq a_max
delta_t_2 = delta_t ** 2
A_3 = np.zeros((sub_length - 1, sub_length))
h_3 = np.zeros(sub_length - 1)
A_3[0, 0] = -1 / delta_t_2
A_3[0, 1] = 1 / delta_t_2
h_3[0] = Settings.MAX_POSITIVE_ACCELERATION + start_speed / delta_t
for i in range(1, sub_length - 1):
A_3[i, i-1] = 1 / delta_t_2
A_3[i, i] = -2 / delta_t_2
A_3[i, i+1] = 1 / delta_t_2
h_3[i] = Settings.MAX_POSITIVE_ACCELERATION
# Acceleration min constraints: (s_t - 2*s_{t-1} + s_{t-2})/(delta_t ** 2) \geq a_min
h_4 = np.zeros(sub_length - 1)
A_4 = -A_3
h_4[0] = -Settings.MAX_NEGATIVE_ACCELERATION - start_speed / delta_t
for i in range(1, sub_length - 1):
h_4[i] = -Settings.MAX_NEGATIVE_ACCELERATION
# Jerk max constraints: (s_t - 3*s_{t-1} + 3*s_{t-2} - s_{t-3})/(delta_t**3) \leq j_max
delta_t_3 = delta_t ** 3
J_5 = np.zeros((sub_length - 1, sub_length))
h_5 = np.zeros(sub_length - 1)
J_5[0, 0] = -1 / delta_t_3
J_5[0, 1] = 1 / delta_t_3
h_5[0] = Settings.MAXIMUM_POSITIVE_JERK + start_acceleration / delta_t + start_speed / delta_t_2
if sub_length > 2:
J_5[1, 0] = 2 / delta_t_3
J_5[1, 1] = -3 / delta_t_3
J_5[1, 2] = 1 / delta_t_3
h_5[1] = Settings.MAXIMUM_POSITIVE_JERK - start_speed / delta_t_2
for i in range(2, sub_length - 1):
J_5[i, i-2] = -1 / delta_t_3
J_5[i, i-1] = 3 / delta_t_3
J_5[i, i] = -3 / delta_t_3
J_5[i, i+1] = 1 / delta_t_3
h_5[i] = Settings.MAXIMUM_POSITIVE_JERK
# Jerk min constraints: (s_t - 3*s_{t-1} + 3*s_{t-2} - s_{t-3})/(delta_t**3) \geq j_min
J_6 = -J_5
h_6 = np.zeros(sub_length - 1)
h_6[0] = -Settings.MINIMUM_NEGATIVE_JERK - start_acceleration / delta_t - start_speed / delta_t_2
if sub_length > 2:
h_6[1] = -Settings.MINIMUM_NEGATIVE_JERK + start_speed / delta_t_2
for i in range(2, sub_length - 1):
h_6[i] = -Settings.MINIMUM_NEGATIVE_JERK
C_7 = None
h_7 = None
if before_after_cars is not None:
before_s, before_speed, after_s, after_speed = before_after_cars
before_ts = []
before_ss = []
if not np.isinf(before_s):
for i, t in enumerate(finer_t):
before_s_projected = before_s + t * before_speed
if before_s_projected < -Settings.CAR_LENGTH:
continue
else:
before_ts.append(i)
before_ss.append(before_s_projected)
after_ts = []
after_ss = []
if not np.isinf(after_s):
for i, t in enumerate(finer_t):
after_s_projected = after_s + t * after_speed
if after_s_projected < -Settings.CAR_LENGTH:
continue
else:
after_ts.append(i)
after_ss.append(after_s_projected)
C_7 = np.zeros((len(before_ts) + len(after_ts), sub_length))
h_7 = np.zeros(len(before_ts) + len(after_ts))
index = 0
for i, t_index in enumerate(before_ts):
C_7[index, t_index] = -1
h_7[index] = -before_ss[i] - Settings.CAR_LENGTH
index += 1
for i, t_index in enumerate(after_ts):
C_7[index, t_index] = 1
h_7[index] = after_ss[i] - Settings.CAR_LENGTH
index += 1
# Equality constraints, start at the correct point please
A = np.zeros((1, sub_length))
A[0, 0] = 1
b = np.zeros(1)
b[0] = s_sequence[0]
# Put together in the form Gx \leq h
if C_7 is not None:
G = np.vstack((V_1, V_2, A_3, A_4, J_5, J_6, C_7))
h = np.concatenate((h_1, h_2, h_3, h_4, h_5, h_6, h_7))
else:
G = np.vstack((V_1, V_2, A_3, A_4, J_5, J_6))
h = np.concatenate((h_1, h_2, h_3, h_4, h_5, h_6))
# Solve the QP
sol = solvers.qp(matrix(P), matrix(q), matrix(G), matrix(h), matrix(A), matrix(b))
return np.array(sol['x']).flatten()
def get_appropriate_base_st_path_and_obstacles(state):
obstacles, s_values, t_values, ego_speed, distances = find_s_t_obstacles_from_state(
state,
Settings.FUTURE_S,
Settings.S_DISCRETIZATION,
Settings.T_DISCRETIZATION,
Settings.FUTURE_T,
Settings.START_UNCERTAINTY,
Settings.UNCERTAINTY_PER_SECOND)
ego_acceleration = state.ego_acceleration
# Do the ST path planning
if Settings.USE_FAST_ST_SOLVER:
if Settings.USE_CYTHON:
s_sequence = st_cy.solve_s_t_path_fast(obstacles, s_values, t_values, ego_speed, ego_acceleration,
distances, Settings.D_WEIGHT, Settings.V_WEIGHT,
Settings.A_WEIGHT, Settings.J_WEIGHT, Settings.DESIRED_SPEED,
Settings.MAX_SPEED, Settings.MAX_NEGATIVE_ACCELERATION,
Settings.MAX_POSITIVE_ACCELERATION,
Settings.MINIMUM_NEGATIVE_JERK,
Settings.MAXIMUM_POSITIVE_JERK, Settings.MIN_ALLOWED_DISTANCE)
else:
s_sequence = solve_st_fast_v2(obstacles, s_values, t_values, ego_speed, ego_acceleration, distances)
else:
if Settings.USE_CYTHON:
s_sequence = st_cy.solve_s_t_path_no_jerk_djikstra(obstacles, s_values, t_values, ego_speed, distances)
else:
s_sequence = readable_solve_s_t_path_no_jerk(obstacles, s_values, t_values, ego_speed, distances)
return s_sequence, obstacles, s_values, t_values, distances
def do_st_control(state):
ego_acceleration = state.ego_acceleration
ego_speed = state.ego_speed
s_sequence, obstacles, s_values, t_values, distances = get_appropriate_base_st_path_and_obstacles(state)
# Trim the zeros from the end of the planned sequence (in the case where pathfinding failed)
end_point = len(s_sequence)
while s_sequence[end_point - 1] == 0:
end_point -= 1
if end_point != len(s_sequence):
print("ST Solver finds crash inevitable")
s_sequence = s_sequence[:end_point]
# If the planning was done at a t discretization different from the tick length, smooth with a QP
if Settings.TICK_LENGTH < Settings.T_DISCRETIZATION:
s_sequence = finer_fit(s_sequence, Settings.TICK_LENGTH, Settings.T_DISCRETIZATION, ego_speed, ego_acceleration)
# If the st solver predicts an immediate crash, nothing we can do
if len(s_sequence) <= 1:
control.set_ego_speed(ego_speed)
return ego_speed
# Plan using Euler updates
planned_distance_first_step = s_sequence[1] - s_sequence[0]
end_speed_first_step = planned_distance_first_step / (Settings.TICK_LENGTH)
control.set_ego_speed(end_speed_first_step)
return end_speed_first_step
def get_s_state():
return control.get_ego_s(control.get_ego_position())
def test_guaranteed_crash_from_state(state):
s_sequence, obstacles, s_values, t_values, distances = get_appropriate_base_st_path_and_obstacles(state)
end_point = len(s_sequence)
while s_sequence[end_point - 1] == 0:
end_point -= 1
if end_point != len(s_sequence):
return True
for i, s in enumerate(s_sequence):
s_index = get_range_index(s_values[0], s_values[1] - s_values[0], s)
distance = distances[i, s_index]
if distance < Settings.COMBINATION_MIN_DISTANCE - Settings.CAR_LENGTH:
return True
return False
def do_conditional_st_based_on_first_step(state, start_speed):
next_state, crashed = state.predict_step_with_ego(start_speed, delta_t=Settings.TICK_LENGTH)
crash_guaranteed = test_guaranteed_crash_from_state(next_state)
if crashed or crash_guaranteed:
print("ST solver taking over")
# Then the ST solver can't find a valid path after the predicted first step
return do_st_control(state)
else:
control.set_ego_speed(start_speed)
return start_speed
def evaluate_st(num_episodes=1000):
aggregate_stats = control.evaluate_control(do_st_control, num_episodes=num_episodes, state_function=prediction.HighwayState.from_sumo, verbose=True)
aggregate_stats.print_stats()
def evaluate_st_and_dump_crash(num_episodes=1000):
aggregate_stats = control.evaluate_control(do_st_control, num_episodes, state_function=prediction.HighwayState.from_sumo, crash_callback=plot_crash, verbose=True, save_state_on_crash=True)
aggregate_stats.print_stats()
def replay_crash():
import pickle
saved_data = pickle.load(open("crashed_state_history.pkl", 'rb'))
for i, item in enumerate(saved_data):
obstacles, s_values, t_values, ego_speed, ego_acceleration, distances = item
s_sequence = st_cy.solve_s_t_path_fast(obstacles, s_values, t_values, ego_speed, ego_acceleration,
distances, Settings.D_WEIGHT, Settings.V_WEIGHT,
Settings.A_WEIGHT, Settings.J_WEIGHT, Settings.DESIRED_SPEED,
Settings.MAX_SPEED, Settings.MAX_NEGATIVE_ACCELERATION,
Settings.MAX_POSITIVE_ACCELERATION,
Settings.MINIMUM_NEGATIVE_JERK,
Settings.MAXIMUM_POSITIVE_JERK, Settings.MIN_ALLOWED_DISTANCE)
end_point = len(s_sequence)
while s_sequence[end_point-1] == 0:
end_point -= 1
s_sequence2 = finer_fit(s_sequence[:end_point], Settings.TICK_LENGTH, Settings.T_DISCRETIZATION, ego_speed, ego_acceleration)
print(s_sequence2)
plot_s_path(obstacles, s_values, t_values, s_sequence)
plt.plot(np.linspace(t_values[0], Settings.TICK_LENGTH*(len(s_sequence2) - 1), len(s_sequence2)), s_sequence2, c='green')
plt.savefig("plots/{}.png".format(i))
plt.close()
def plot_crash(states):
import os
plotdir = os.path.join(Settings.FULL_LOG_DIR, "plots")
if not os.path.exists(plotdir):
os.mkdir(plotdir)
for j, start_state in enumerate(states):
s_sequence, obstacles, s_values, t_values, distances = get_appropriate_base_st_path_and_obstacles(start_state)
plot_s_path(obstacles, s_values, t_values, s_sequence)
plt.savefig(os.path.join(plotdir, "st_{}".format(j)))
plt.close()
|
[
"jlubars@gmail.com"
] |
jlubars@gmail.com
|
9ce337f1935bb126d192a22cb87ea589a0a39729
|
32849fb3e8fc2b7a9b1c8c234b36519845bd2c1c
|
/wav2vec/server.py
|
15db0b4b3a1a282a27c24926db3308d30a737694
|
[
"MIT"
] |
permissive
|
lucasgris/wav2vec-infer
|
06ce8eaf03bb23e460d8d7df7fa1ba4774a29bec
|
df8d5c02d541a0c8c02cca86661beeab455d40a0
|
refs/heads/main
| 2023-05-31T00:25:19.664855
| 2021-04-12T05:26:58
| 2021-04-12T05:26:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,447
|
py
|
import torch
import logging
import argparse
import soundfile as sf
import torch.nn.functional as F
import itertools as it
from fairseq import utils
from fairseq.models import BaseFairseqModel
from fairseq.data import Dictionary
from fairseq.models.wav2vec.wav2vec2_asr import base_architecture, Wav2VecEncoder
from wav2letter.common import create_word_dict, load_words
from wav2letter.decoder import CriterionType,DecoderOptions,KenLM,LM,LMState,SmearingMode,Trie,LexiconDecoder
from wav2letter.criterion import CpuViterbiPath, get_data_ptr_as_bytes
import numpy as np
from tqdm import tqdm
import os
from tempfile import NamedTemporaryFile
import torch
from flask import Flask, request, jsonify
import sys
from flask_cors import CORS, cross_origin
import json
app = Flask(__name__)
cors = CORS(app)
app.config['CORS_HEADERS'] = 'Content-Type'
ALLOWED_EXTENSIONS = set(['.wav', '.mp3', '.ogg', '.webm'])
#cs = ConfigStore.instance()
#cs.store(name="config", node=ServerConfig)
class Wav2VecCtc(BaseFairseqModel):
def __init__(self, w2v_encoder, args):
super().__init__()
self.w2v_encoder = w2v_encoder
self.args = args
def upgrade_state_dict_named(self, state_dict, name):
super().upgrade_state_dict_named(state_dict, name)
return state_dict
@classmethod
def build_model(cls, args, target_dict):
"""Build a new model instance."""
base_architecture(args)
w2v_encoder = Wav2VecEncoder(args, target_dict)
return cls(w2v_encoder, args)
def get_normalized_probs(self, net_output, log_probs):
"""Get normalized probabilities (or log probs) from a net's output."""
logits = net_output["encoder_out"]
if log_probs:
return utils.log_softmax(logits.float(), dim=-1)
else:
return utils.softmax(logits.float(), dim=-1)
def forward(self, **kwargs):
x = self.w2v_encoder(**kwargs)
return x
class W2lDecoder(object):
def __init__(self, tgt_dict):
self.tgt_dict = tgt_dict
self.vocab_size = len(tgt_dict)
self.nbest = 1
self.criterion_type = CriterionType.CTC
self.blank = (
tgt_dict.index("<ctc_blank>")
if "<ctc_blank>" in tgt_dict.indices
else tgt_dict.bos()
)
self.asg_transitions = None
def generate(self, models, sample, **unused):
"""Generate a batch of inferences."""
# model.forward normally channels prev_output_tokens into the decoder
# separately, but SequenceGenerator directly calls model.encoder
encoder_input = {
k: v for k, v in sample["net_input"].items() if k != "prev_output_tokens"
}
emissions = self.get_emissions(models, encoder_input)
return self.decode(emissions)
def get_emissions(self, models, encoder_input):
"""Run encoder and normalize emissions"""
# encoder_out = models[0].encoder(**encoder_input)
encoder_out = models[0](**encoder_input)
if self.criterion_type == CriterionType.CTC:
emissions = models[0].get_normalized_probs(encoder_out, log_probs=True)
return emissions.transpose(0, 1).float().cpu().contiguous()
def get_tokens(self, idxs):
"""Normalize tokens by handling CTC blank, ASG replabels, etc."""
idxs = (g[0] for g in it.groupby(idxs))
idxs = filter(lambda x: x != self.blank, idxs)
return torch.LongTensor(list(idxs))
class W2lViterbiDecoder(W2lDecoder):
def __init__(self, tgt_dict):
super().__init__(tgt_dict)
def decode(self, emissions):
B, T, N = emissions.size()
hypos = list()
if self.asg_transitions is None:
transitions = torch.FloatTensor(N, N).zero_()
else:
transitions = torch.FloatTensor(self.asg_transitions).view(N, N)
viterbi_path = torch.IntTensor(B, T)
workspace = torch.ByteTensor(CpuViterbiPath.get_workspace_size(B, T, N))
CpuViterbiPath.compute(
B,
T,
N,
get_data_ptr_as_bytes(emissions),
get_data_ptr_as_bytes(transitions),
get_data_ptr_as_bytes(viterbi_path),
get_data_ptr_as_bytes(workspace),
)
return [
[{"tokens": self.get_tokens(viterbi_path[b].tolist()), "score": 0}] for b in range(B)
]
class W2lKenLMDecoder(W2lDecoder):
def __init__(self, args, tgt_dict):
super().__init__( tgt_dict)
self.silence = (
tgt_dict.index("<ctc_blank>")
if "<ctc_blank>" in tgt_dict.indices
else tgt_dict.bos()
)
self.lexicon = load_words(args['lexicon'])
self.word_dict = create_word_dict(self.lexicon)
self.unk_word = self.word_dict.get_index("<unk>")
self.lm = KenLM(args['kenlm_model'], self.word_dict)
self.trie = Trie(self.vocab_size, self.silence)
print('h1')
print(len(self.lexicon.items()))
start_state = self.lm.start(False)
for i, (word, spellings) in enumerate(self.lexicon.items()):
print(i, word, spellings)
word_idx = self.word_dict.get_index(word)
_, score = self.lm.score(start_state, word_idx)
for spelling in spellings:
spelling_idxs = [tgt_dict.index(token) for token in spelling]
assert (
tgt_dict.unk() not in spelling_idxs
), f"{spelling} {spelling_idxs}"
self.trie.insert(spelling_idxs, word_idx, score)
self.trie.smear(SmearingMode.MAX)
print('h2')
if args['beam_size_token']:
argument_2 = int(args['beam_size_token'])
else:
argument_2 = int(len(tgt_dict))
self.decoder_opts = DecoderOptions(
args['beam'],
argument_2,
args['beam_threshold'],
args['lm_weight'],
args['word_score'],
args['unk_weight'],
args['sil_weight'],
0,
False,
self.criterion_type,
)
if self.asg_transitions is None:
N = 768
# self.asg_transitions = torch.FloatTensor(N, N).zero_()
self.asg_transitions = []
self.decoder = LexiconDecoder(
self.decoder_opts,
self.trie,
self.lm,
self.silence,
self.blank,
self.unk_word,
self.asg_transitions,
False,
)
def decode(self, emissions):
B, T, N = emissions.size()
hypos = []
print('Decoding with Kenlm')
for b in range(B):
emissions_ptr = emissions.data_ptr() + 4 * b * emissions.stride(0)
results = self.decoder.decode(emissions_ptr, T, N)
nbest_results = results[: self.nbest]
hypos.append(
[
{
"tokens": self.get_tokens(result.tokens),
"score": result.score,
"words": [
self.word_dict.get_entry(x) for x in result.words if x >= 0
],
}
for result in nbest_results
]
)
return hypos
def get_results(wav_path,target_dict_path,use_cuda=False,w2v_path=None,model=None):
sample = dict()
net_input = dict()
feature = get_feature(wav_path)
target_dict = Dictionary.load(target_dict_path)
model[0].eval()
#generator = W2lViterbiDecoder(target_dict)
net_input["source"] = feature.unsqueeze(0)
padding_mask = torch.BoolTensor(net_input["source"].size(1)).fill_(False).unsqueeze(0)
net_input["padding_mask"] = padding_mask
sample["net_input"] = net_input
sample = utils.move_to_cuda(sample) if use_cuda else sample
with torch.no_grad():
hypo = generator.generate(model, sample, prefix_tokens=None)
hyp_pieces = target_dict.string(hypo[0][0]["tokens"].int().cpu())
text=post_process(hyp_pieces, 'letter')
return text
def get_feature(filepath):
def postprocess(feats, sample_rate):
if feats.dim == 2:
feats = feats.mean(-1)
assert feats.dim() == 1, feats.dim()
with torch.no_grad():
feats = F.layer_norm(feats, feats.shape)
return feats
wav, sample_rate = sf.read(filepath)
feats = torch.from_numpy(wav).float()
feats = postprocess(feats, sample_rate)
return feats
def post_process(sentence: str, symbol: str):
if symbol == "sentencepiece":
sentence = sentence.replace(" ", "").replace("\u2581", " ").strip()
elif symbol == 'wordpiece':
sentence = sentence.replace(" ", "").replace("_", " ").strip()
elif symbol == 'letter':
sentence = sentence.replace(" ", "").replace("|", " ").strip()
elif symbol == "_EOW":
sentence = sentence.replace(" ", "").replace("_EOW", " ").strip()
elif symbol is not None and symbol != 'none':
sentence = (sentence + " ").replace(symbol, "").rstrip()
return sentence
def load_gpu_model(model_path):
return torch.load(model_path,map_location=torch.device("cuda"))
def load_cpu_model(model_path):
return torch.load(model_path,map_location=torch.device("cpu"))
#import wav
import cgi
import contextlib
import wave
import os
import subprocess
@app.route('/transcribe', methods=['POST'])
@cross_origin()
def parse_transcription():
if request.method == 'POST':
res = {}
language = request.args.get("lang")
model_path = model_dict[language]
file = request.files['file']
filename = file.filename
_, file_extension = os.path.splitext(filename)
if file_extension.lower() not in ALLOWED_EXTENSIONS:
res['status'] = "error"
res['message'] = "{} is not supported format.".format(file_extension)
return jsonify(res)
filename_final = ''
with NamedTemporaryFile(suffix=file_extension,delete=False) as tmp_saved_audio_file:
file.save(tmp_saved_audio_file.name)
filename_final = tmp_saved_audio_file.name
filename_local = filename_final.split('/')[-1][:-4]
filename_new = '/tmp/'+filename_local+'_16.wav'
delete = True
subprocess.call(["sox {} -r {} -b 16 -c 1 {}".format(filename_final, str(16000), filename_new)], shell=True)
dict_path = "/".join(model_path.split('/')[:-1]) + '/dict.ltr.txt'
if cuda:
gpu_model = load_gpu_model(model_path)
result = get_results( filename_new , dict_path,cuda,model=gpu_model)
else:
cpu_model = load_cpu_model(model_path)
result = get_results( filename_new , dict_path,cuda,model=cpu_model)
if delete:
cmd = 'rm -f {}'.format(filename_final)
cmd2 = 'rm -f {}'.format(filename_new)
os.system(cmd)
os.system(cmd2)
logging.info('File transcribed')
res['status'] = "OK"
res['transcription'] = result
return jsonify(res)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Run')
parser.add_argument('-m', '--model-path', type=str, required=True, help="Model path")
parser.add_argument('-c', '--cuda',default=False, type=bool, help="CUDA path")
args_local = parser.parse_args()
global model_dict, cuda, generator
with open(args_local.model_path) as f:
model_dict = json.load(f)
dict_path = '/home/harveen.chadha/deployed_models/hi/dict.ltr.txt'
args_lm = {}
args_lm['lexicon'] = '/home/harveen.chadha/github/lm/LM_v2/lexicon.lst'
args_lm['kenlm_model'] = '/home/harveen.chadha/github/lm/LM_v2/lm.binary'
args_lm['beam'] = 128
args_lm['beam_threshold'] = 25
args_lm['lm_weight'] = 0.4
args_lm['word_score'] = 0.3
args_lm['unk_weight'] = -np.inf
args_lm['sil_weight'] = 0
print(args_lm)
print('heere')
#print(args_lm.lexicon)
print('heere 2 in kenlm')
target_dict = Dictionary.load(dict_path)
generator = W2lKenLMDecoder(args_lm, target_dict)
cuda = args_local.cuda
print(cuda)
logging.info('Server initialised')
app.run(host='0.0.0.0', port=8020, debug=True, use_reloader=False)
|
[
"harveen"
] |
harveen
|
0fc39f54934aafa76fa2f211a3d361fefb7f1d9f
|
272ff98ed048ddf518f888fea19b81cc262d4716
|
/gtp.py
|
9b0b6206bc7f2a68dc22bb15417a4a61201104a7
|
[] |
no_license
|
sugar-activities/4083-activity
|
b58ac46f8eb778d684b6b7050f9771777a62fa25
|
63c0e8a6a2706714afbb4247182e09cd9b63b2a6
|
refs/heads/master
| 2021-01-19T23:14:21.098118
| 2017-04-21T05:07:37
| 2017-04-21T05:07:37
| 88,938,021
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,614
|
py
|
# -*- coding: UTF-8 -*-
# Copyright 2007-2008 One Laptop Per Child
# Copyright 2008 Andrés Ambrois <andresambrois@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
from subprocess import Popen, PIPE
import logging
from sugar.activity.activity import get_bundle_path
from os.path import exists, join, abspath
from os import pathsep, environ
from string import split
logger = logging.getLogger('PlayGo')
def search_for_gnugo():
paths = split(environ['PATH'], pathsep)
for path in paths:
if exists(join(path, 'gnugo')):
return abspath(join(path, 'gnugo'))
default_path = join(get_bundle_path(), 'bin', 'gnugo')
if exists(default_path):
return abspath(default_path)
return False
class gnugo:
''' A wrapper for talking to gnugo over GTP '''
def __init__(self, boardsize=19, handicap=0, komi=5.5, level=3):
''' Start the gnugo subprocess '''
self.size = boardsize
self.path = search_for_gnugo()
if self.path:
logger.debug('Found gnugo at %s', self.path)
try:
self.gnugo = Popen([self.path, '--mode', 'gtp', '--boardsize', str(boardsize),
'--handicap', str(handicap), '--komi', str(komi), '--level', str(level) ],
stdout=PIPE, stdin=PIPE)
except OSError, data:
logger.error('Could not start gnugo subprocess: %s', data)
raise
else:
logger.debug('Successfuly loaded gnugo!')
self.stdin = self.gnugo.stdin
self.stdout = self.gnugo.stdout
else:
logger.error('Could not find gnugo')
def __del__(self):
logger.debug('Closing gnugo')
self.stdin.write('quit \n')
self.stdin.flush()
def _xy_to_coords(self, x, y):
return dict(zip(range(25), 'ABCDEFGHJKLMNOPQRSTUVWXYZ'))[x] + str(self.size - y)
def _coords_to_xy(self, coords):
return int(dict(zip('ABCDEFGHJKLMNOPQRSTUVWXYZ', range(25)))[coords[0]]), self.size - int(coords[1:])
def short_to_long_colors(self, short_color):
if short_color == 'B':
return 'black'
return 'white'
def make_play(self, color, x, y):
color = self.short_to_long_colors(color)
self.stdin.write('play %s %s\n' % (color, self._xy_to_coords(x, y)))
self.stdin.flush()
logger.debug('Sent play by %s at %s to gnugo', color, self._xy_to_coords(x, y))
output = self.stdout.readline()
self.stdout.readline()
if output[0] == '?':
return False
return True
def get_move(self, color):
color = self.short_to_long_colors(color)
self.stdin.write('kgs-genmove_cleanup %s\n' % color)
self.stdin.flush()
output = self.stdout.readline()
self.stdout.readline()
if output[0] == '?':
# FIXME: Handle error
return False
elif output[2:] == 'PASS\n':
return -1, -1
logger.debug('Generated move %s', output[2:])
return self._coords_to_xy(output[2:])
def pass_move(self, color):
color = self.short_to_long_colors(color)
self.stdin.write('%s pass\n' % color)
self.stdin.flush()
self.stdout.readline()
self.stdout.readline()
def undo(self):
self.stdin.write('undo\n')
self.stdin.flush()
self.stdout.readline()
self.stdout.readline()
def clear(self):
self.stdin.write('clear_board\n')
self.stdin.flush()
self.stdout.readline()
self.stdout.readline()
def dump_board(self):
self.stdin.write('showboard\n')
self.stdin.flush()
output = ''
for i in range(0, self.size+4):
output = output + self.stdout.readline()
return output
|
[
"ignacio@sugarlabs.org"
] |
ignacio@sugarlabs.org
|
f59557cfff4041cf36174e07605dea498512d51d
|
d3ce90d1f2164d51bc899397082378406672224e
|
/main.py
|
a58dae2f6f68af2063134027d57b3b137d0fdfbd
|
[
"MIT"
] |
permissive
|
automainint/ip-monitor
|
28ee0b99dab2dc0a345e805a1d4da497f73b778b
|
46efa1bc96e202213e41605fc750a22687a183ce
|
refs/heads/main
| 2023-07-13T21:57:36.297491
| 2021-08-30T15:33:44
| 2021-08-30T15:33:44
| 397,872,453
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,887
|
py
|
#!/usr/bin/python3
import configparser
import argparse
import requests
import os, shutil, time
from smtplib import SMTP_SSL
from email.message import EmailMessage
def parse_args(config_file):
config = configparser.ConfigParser(
defaults = { 'helper': 'https://api.ipify.org',
'server': 'in-v3.mailjet.com',
'port': '0',
'user': 'user',
'pass': '',
'sender': 'user@yandex.ru',
'sendto': 'user@yandex.ru',
'subject': 'IP monitor',
'delay': '60',
'notify': '100000' })
config.read(config_file)
cfg = config['DEFAULT']
parser = argparse.ArgumentParser(
prog = 'ip-monitor',
description = 'Public IP address monitoring'
)
parser.add_argument(
'--stop',
help = 'stop background process',
dest = 'stop',
action = 'store_const',
const = True,
default = False
)
parser.add_argument(
'--ip-helper',
type = str,
metavar = 'URL',
default = cfg.get('helper'),
help = 'public IP service provider',
dest = 'ip_helper'
)
parser.add_argument(
'--server',
type = str,
metavar = 'URL',
default = cfg.get('server'),
help = 'SMTP server address',
dest = 'smtp_server'
)
parser.add_argument(
'--port',
type = int,
metavar = 'PORT',
default = cfg.get('port'),
help = 'SMTP server port',
dest = 'smtp_port'
)
parser.add_argument(
'--user',
type = str,
metavar = 'NAME',
default = cfg.get('user'),
help = 'SMTP username',
dest = 'smtp_user'
)
parser.add_argument(
'--pass',
type = str,
metavar = '****',
default = cfg.get('pass'),
help = 'SMTP password',
dest = 'smtp_pass'
)
parser.add_argument(
'--sender',
type = str,
metavar = 'EMAIL',
default = cfg.get('sender'),
help = 'sender email address',
dest = 'sender'
)
parser.add_argument(
'--sendto',
type = str,
metavar = 'EMAIL',
default = cfg.get('sendto'),
help = 'where to send notifications',
dest = 'sendto'
)
parser.add_argument(
'--subject',
type = str,
metavar = 'TITLE',
default = cfg.get('subject'),
help = 'notification email subject',
dest = 'subject'
)
parser.add_argument(
'--delay',
type = int,
metavar = 'TIME',
default = cfg.get('delay'),
help = 'IP check delay in seconds',
dest = 'delay'
)
parser.add_argument(
'--notify',
type = int,
metavar = 'TIME',
default = cfg.get('notify'),
help = 'email notification timeout in seconds',
dest = 'notify'
)
return parser.parse_args()
def send_notification(
smtp_url: str,
smtp_port: int,
user: str,
password: str,
sender: str,
sendto: str,
subject: str,
ip: str
):
global token
msg = EmailMessage()
msg.set_content(ip)
msg['Subject'] = subject
msg['From'] = sender
msg['To'] = sendto
msg['Precedence'] = 'bulk'
with SMTP_SSL(smtp_url, smtp_port) as smtp:
smtp.login(user, password)
smtp.send_message(msg, sender, sendto)
args = parse_args('ip-monitor.ini')
if args.stop:
with open('.stop', 'w'): pass
raise SystemExit
a_helper = args.ip_helper
a_smtp = args.smtp_server
a_port = args.smtp_port;
a_user = args.smtp_user
a_pass = args.smtp_pass
a_sender = args.sender;
a_sendto = args.sendto;
a_subject = args.subject;
a_delay = args.delay
a_notify = args.notify
address = ''
addr_new = ''
time_check = 0
time_notify = 0
with open('ip-monitor.log', 'a') as f:
f.write('\n\nIP monitor started\n\n')
f.write('Helper: ' + a_helper + '\n')
f.write('SMTP: ' + a_smtp + '\n')
f.write('Port: ' + str(a_port) + '\n')
f.write('User: ' + a_user + '\n')
f.write('Sender: ' + a_sender + '\n')
f.write('Send to: ' + a_sendto + '\n')
f.write('Subject: ' + a_subject + '\n')
f.write('Delay: ' + str(a_delay) + '\n')
f.write('Notify: ' + str(a_notify) + '\n\n')
while not os.path.exists('.stop'):
if time_check <= 0:
try:
addr_new = requests.get(a_helper).text
time_check = a_delay
if addr_new != address or time_notify <= 0:
send_notification(
a_smtp, a_port, a_user, a_pass,
a_sender, a_sendto, a_subject,
addr_new
)
with open('ip-monitor.log', 'a') as f:
f.write(
'Notification sent. Current IP: ' +
addr_new + '\n')
address = addr_new
time_notify = a_notify
except Exception as e:
with open('ip-monitor.log', 'a') as f:
f.write(str(e) + '\n')
time_check -= 1
time_notify -= 1
time.sleep(1)
os.remove('.stop')
|
[
"0x7fffff@guattari.ru"
] |
0x7fffff@guattari.ru
|
a6c8fbdeae4c919bf654c2ccbeb8f8779ff51ed5
|
835af2ea1c7dbd5a4605b7cc1c72b83cdf0c0254
|
/places/settings/dev.py
|
36990a404b30921ffcd565580e6dfb837303c4fb
|
[] |
no_license
|
Spudwars/wheregonow
|
965b3a2c3e880e16ad28ad19d57d72f8939e05e0
|
7600cb813b11ea9ca7b474361850a807c44c4233
|
refs/heads/master
| 2021-03-22T03:57:52.317081
| 2013-02-12T23:36:16
| 2013-02-12T23:36:16
| 7,997,671
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,005
|
py
|
from .base import *
DEBUG = TEMPLATE_DEBUG = True
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'dev.db',
}
}
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
# Disable caching while in development
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
}
}
# set up Django Debug Toolbar if installed
try:
import debug_toolbar
MIDDLEWARE_CLASSES += (
'debug_toolbar.middleware.DebugToolbarMiddleware',
)
INSTALLED_APPS += (
'debug_toolbar',
)
DEBUG_TOOLBAR_CONFIG = {
'INTERCEPT_REDIRECTS': False,
'SHOW_TOOLBAR_CALLBACK': lambda *args, **kwargs: True
}
except ImportError:
pass
# set up devserver if installed
try:
import devserver
INSTALLED_APPS += (
'devserver',
)
except ImportError:
pass
# Don't use Sentry logging even if configured for production
LOGGING = BASE_LOGGING
GA_TRACKING_CODE = ''
|
[
"chris.jesse@flightdataservices.com"
] |
chris.jesse@flightdataservices.com
|
373b206f3f3ba5a1e44d6a4ab81c719d7ae250f4
|
ea21c75c6d42dddec7ef6c9e1c3337ef44dbed98
|
/Passport/ocr_v2_passport.py
|
1446181175b7edfe9272e163eae659e9c3624dc6
|
[] |
no_license
|
HarshitPatel25/OCR
|
d9aa44d3cac61bc527376419355665271f686953
|
ae04728f34a1b4af5135f58b26dfb5b79c20cb05
|
refs/heads/master
| 2022-12-02T02:19:30.321235
| 2020-08-19T07:18:10
| 2020-08-19T07:18:10
| 288,661,497
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,445
|
py
|
# import the necessary packages
from PIL import Image
import pytesseract as pt
import argparse
import cv2
import os
import re
import io
import json
import ftfy
# from nostril import nonsense
################################################################################################################
############################# Section 1: Initiate the command line interface ###################################
################################################################################################################
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required=True,
help="path to input image to be OCR'd")
ap.add_argument("-p", "--preprocess", type=str, default="thresh",
help="type of preprocessing to be done, choose from blur, linear, cubic or bilateral")
args = vars(ap.parse_args())
'''
Our command line arguments are parsed. We have two command line arguments:
--image : The path to the image we’re sending through the OCR system.
--preprocess : The preprocessing method. This switch is optional and for this tutorial and can accept the following
parameters to be passed (refer sections to know more:
- blur
- adaptive
- linear
- cubic
- gauss
- bilateral
- thresh (meadian threshold - default)
--------------------------- Use Blur when the image has noise/grain/incident light etc. --------------------------
'''
##############################################################################################################
###################### Section 2: Load the image -- Preprocess it -- Write it to disk ########################
##############################################################################################################
# load the example image and convert it to grayscale
image = cv2.imread(args["image"])
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# check to see if we should apply thresholding to preprocess the
# image
if args["preprocess"] == "thresh":
gray = cv2.threshold(gray, 0, 255,
cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
elif args["preprocess"] == "adaptive":
gray = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 31, 2)
'''
What we would like to do is to add some additional preprocessing steps as in most cases, you may need to scale your
image to a larger size to recognize small characters.
In this case, INTER_CUBIC generally performs better than other alternatives, though it’s also slower than others.
If you’d like to trade off some of your image quality for faster performance,
you may want to try INTER_LINEAR for enlarging images.
'''
if args["preprocess"] == "linear":
gray = cv2.resize(gray, None, fx=2, fy=2, interpolation=cv2.INTER_LINEAR)
elif args["preprocess"] == "cubic":
gray = cv2.resize(gray, None, fx=2, fy=2, interpolation=cv2.INTER_CUBIC)
# make a check to see if blurring should be done to remove noise, first is default median blurring
if args["preprocess"] == "blur":
gray = cv2.medianBlur(gray, 3)
elif args["preprocess"] == "bilateral":
gray = cv2.bilateralFilter(gray, 9, 75, 75)
elif args["preprocess"] == "gauss":
gray = cv2.GaussianBlur(gray, (5,5), 0)
# write the grayscale image to disk as a temporary file so we can
# apply OCR to it
filename = "{}.png".format(os.getpid())
cv2.imwrite(filename, gray)
##############################################################################################################
######################################## Section 3: Running PyTesseract ######################################
##############################################################################################################
# load the image as a PIL/Pillow image, apply OCR, and then delete
# the temporary file
pt.pytesseract.tesseract_cmd = r'C:\Program Files\Tesseract-OCR\tesseract.exe'
text = pt.image_to_string(Image.open(filename), lang = 'eng')
# add +hin after eng within the same argument to extract hindi specific text - change encoding to utf-8 while writing
os.remove(filename)
# print(text)
# show the output images
# cv2.imshow("Image", image)
# cv2.imshow("Output", gray)
# cv2.waitKey(0)
# writing extracted data into a text file
text_output = open('outputbase.txt', 'w', encoding='utf-8')
text_output.write(text)
text_output.close()
file = open('outputbase.txt', 'r', encoding='utf-8')
text = file.read()
# print(text)
# Cleaning all the gibberish text
text = ftfy.fix_text(text)
text = ftfy.fix_encoding(text)
'''for god_damn in text:
if nonsense(god_damn):
text.remove(god_damn)
else:
print(text)'''
# print(text)
############################################################################################################
###################################### Section 4: Extract relevant information #############################
############################################################################################################
# Initializing data variable
surname = None
first_name = None
dob = None
gender = None
number = None
doe = None
text0 = []
text1 = []
# Searching for PAN
lines = text.split('\n')
for lin in lines:
s = lin.strip()
s = lin.replace('\n','')
s = s.rstrip()
s = s.lstrip()
text1.append(s)
text1 = list(filter(None, text1))
# print(text1)
# to remove any text read from the image file which lies before the line 'Income Tax Department'
lineno = 0 # to start from the first line of the text file.
# text1 = list(text1)
text0 = text1[lineno+1:]
print(text0) # Contains all the relevant extracted text in form of a list - uncomment to check
def findword(textlist, wordstring):
lineno = -1
for wordline in textlist:
xx = wordline.split( )
if ([w for w in xx if re.search(wordstring, w)]):
lineno = textlist.index(wordline)
textlist = textlist[lineno+1:]
return textlist
return textlist
###############################################################################################################
######################################### Section 5: Dishwasher part ##########################################
###############################################################################################################
try:
# Cleaning Surname
surname = text0[3]
surname = surname.rstrip()
surname = surname.lstrip()
surname = re.sub('[^a-zA-Z] +', ' ', surname)
# Cleaning First Name
first_name = text0[5]
first_name = first_name.rstrip()
first_name = first_name.lstrip()
first_name = re.sub('[^a-zA-Z] +', ' ', first_name)
# Cleaning DOB
dob = text0[7]
dob = dob.rstrip()
dob = dob.lstrip()
dob = dob[-12:]
# Cleaning Gender
gender = text0[4]
gender = 'M' # need to fix this
# Cleaning Passport Number
number = text0[1]
number = number[-8:]
number = number.rstrip()
number = number.lstrip()
# Cleaning DOE
doe = text0[14]
doe = doe.rstrip()
doe = doe.lstrip()
doe = doe[-12:-2]
except:
pass
# Making tuples of data
data = {}
data['Surname'] = surname
data['First Name'] = first_name
data['Date of Birth'] = dob
data['Gender'] = gender
data['Number'] = number
data['Date of Expiry'] = doe
# print(data)
###############################################################################################################
######################################### Section 6: Write Data to JSONs ######################################
###############################################################################################################
# Writing data into JSON
try:
to_unicode = unicode
except NameError:
to_unicode = str
# Write JSON file
with io.open('data.json', 'w', encoding='utf-8') as outfile:
str_ = json.dumps(data, indent=4, sort_keys=True, separators=(',', ': '), ensure_ascii=False)
outfile.write(to_unicode(str_))
# Read JSON file
with open('data.json', encoding = 'utf-8') as data_file:
data_loaded = json.load(data_file)
# print(data == data_loaded)
# Reading data back JSON(give correct path where JSON is stored)
with open('data.json', 'r', encoding= 'utf-8') as f:
ndata = json.load(f)
print(ndata)
|
[
"harshitcode25@gmail.com"
] |
harshitcode25@gmail.com
|
a2cac299398ecea8a12b0f3ff9a50c53d50df052
|
cdb29a347d67eb80f3deb09b685ea1e82ae47e7d
|
/leetcode/climbStairs.py
|
1b0262740f2eee0f8e5b7fd0918f00163ed889b2
|
[] |
no_license
|
ishankkm/pythonProgs
|
f41e82c86591f4078c4c1317ecb4829087961c76
|
be98ba8e50cc7844d519a5ae5b6e4d9901e175ca
|
refs/heads/master
| 2021-01-24T16:48:50.161323
| 2018-09-08T23:30:19
| 2018-09-08T23:30:19
| 123,213,632
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 418
|
py
|
'''
Created on May 5, 2018
@author: ishank
You are climbing a stair case. It takes n steps to reach to the top.
Each time you can either climb 1 or 2 steps. In how many distinct ways can you climb to the top?
'''
def climbStairs(n):
first, second = 1, 2
for _ in range(2, n):
second = first + second
first = second - first
return second
print(climbStairs(10))
|
[
"imishra@usc.edu"
] |
imishra@usc.edu
|
23fe111852b94e634ef05790a8368e8de8f9dd08
|
c72df3759d1c61f1356a5b82cdb30a4608f23ae8
|
/convert_to_line.py
|
af43eedd3428bc127ec44d85b8b7640a2b3e2f24
|
[] |
no_license
|
abedalbaset/n-queen-solutions-list
|
0e3d7204aa453075471759fb572fac8936d55c93
|
8ebd98e2c57d271f216c06d76a4cccd72a5413de
|
refs/heads/master
| 2020-05-01T06:25:06.039055
| 2019-05-28T23:03:57
| 2019-05-28T23:03:57
| 177,329,633
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 521
|
py
|
#convert n queen square solution to lines
# global change according to file
boardlenghth=12
file_name="12_12_sol.txt"
outputfilename="12_12_lines.txt"
#end global change according to file
with open(file_name) as f:
content = f.readlines()
content = [x.strip() for x in content]
numberofsol=len(content)/(boardlenghth+2)
f = open(outputfilename, "a")
for c in range(1,len(content),boardlenghth+2):
sum=""
for cc in range(0,boardlenghth):
sum=sum+content[c+cc]+" "
f.write(sum+"\n")
f.close()
|
[
"noreply@github.com"
] |
abedalbaset.noreply@github.com
|
8be026d5adf82df06e866838bdcc191ae2c44a9b
|
1500da3c58fd3c6becf45c86430f5a08e4cfa289
|
/parcels/particlefile.py
|
25efc792a6a69c176066fcd4d5f5905212a9ac06
|
[
"MIT"
] |
permissive
|
rm1911/parcels
|
26e06ab32f75992da5f8631788159462ce70c4de
|
9c7f957bdb513f69d93c56b701287f8ad3b2b6a9
|
refs/heads/master
| 2021-01-18T04:28:56.551640
| 2016-08-12T10:09:31
| 2016-08-12T10:09:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,037
|
py
|
import numpy as np
import netCDF4
__all__ = ['ParticleFile']
class ParticleFile(object):
def __init__(self, name, particleset, initial_dump=True):
"""Initialise netCDF4.Dataset for trajectory output.
The output follows the format outlined in the Discrete
Sampling Geometries section of the CF-conventions:
http://cfconventions.org/cf-conventions/v1.6.0/cf-conventions.html#discrete-sampling-geometries
The current implementation is based on the NCEI template:
http://www.nodc.noaa.gov/data/formats/netcdf/v2.0/trajectoryIncomplete.cdl
Developer note: We cannot use xray.Dataset here, since it does
not yet allow incremental writes to disk:
https://github.com/xray/xray/issues/199
:param name: Basename of the output file
:param particlset: ParticleSet to output
:param initial_dump: Perform initial output at time 0.
:param user_vars: A list of additional user defined particle variables to write
"""
self.dataset = netCDF4.Dataset("%s.nc" % name, "w", format="NETCDF4")
self.dataset.createDimension("obs", None)
self.dataset.createDimension("trajectory", particleset.size)
self.dataset.feature_type = "trajectory"
self.dataset.Conventions = "CF-1.6"
self.dataset.ncei_template_version = "NCEI_NetCDF_Trajectory_Template_v2.0"
# Create ID variable according to CF conventions
self.trajectory = self.dataset.createVariable("trajectory", "i4", ("trajectory",))
self.trajectory.long_name = "Unique identifier for each particle"
self.trajectory.cf_role = "trajectory_id"
self.trajectory[:] = np.arange(particleset.size, dtype=np.int32)
# Create time, lat, lon and z variables according to CF conventions:
self.time = self.dataset.createVariable("time", "f8", ("trajectory", "obs"), fill_value=np.nan)
self.time.long_name = ""
self.time.standard_name = "time"
if particleset.time_origin == 0:
self.time.units = "seconds"
else:
self.time.units = "seconds since " + str(particleset.time_origin)
self.time.calendar = "julian"
self.time.axis = "T"
self.lat = self.dataset.createVariable("lat", "f4", ("trajectory", "obs"), fill_value=np.nan)
self.lat.long_name = ""
self.lat.standard_name = "latitude"
self.lat.units = "degrees_north"
self.lat.axis = "Y"
self.lon = self.dataset.createVariable("lon", "f4", ("trajectory", "obs"), fill_value=np.nan)
self.lon.long_name = ""
self.lon.standard_name = "longitude"
self.lon.units = "degrees_east"
self.lon.axis = "X"
self.z = self.dataset.createVariable("z", "f4", ("trajectory", "obs"), fill_value=np.nan)
self.z.long_name = ""
self.z.standard_name = "depth"
self.z.units = "m"
self.z.positive = "down"
self.user_vars = []
for v in particleset.ptype.variables:
if v.name in ['time', 'lat', 'lon', 'z']:
continue
setattr(self, v.name, self.dataset.createVariable(v.name, "f4", ("trajectory", "obs"), fill_value=0.))
getattr(self, v.name).long_name = ""
getattr(self, v.name).standard_name = v.name
getattr(self, v.name).units = "unknown"
self.user_vars += [v.name]
self.idx = 0
if initial_dump:
self.write(particleset, 0.)
def __del__(self):
self.dataset.close()
def write(self, pset, time):
"""Write particle set data to file"""
self.time[:, self.idx] = time
self.lat[:, self.idx] = np.array([p.lat for p in pset])
self.lon[:, self.idx] = np.array([p.lon for p in pset])
self.z[:, self.idx] = np.zeros(pset.size, dtype=np.float32)
for var in self.user_vars:
getattr(self, var)[:, self.idx] = np.array([getattr(p, var) for p in pset])
self.idx += 1
|
[
"michael.lange@imperial.ac.uk"
] |
michael.lange@imperial.ac.uk
|
30b87bccc18ed555b6af4d806c48ce473e2752e6
|
f68a3657ddf6fe528208fc76caf70bbf6515c30e
|
/TwitterSentimentAnalsis/archive/sentiment_supervised-trial.py
|
9e737ebda98521bf013c8c42f5ff848ed7e63812
|
[] |
no_license
|
nikilohiya/fintweet
|
eb3084bac2bb150a0b6b4633d61b4154355d778b
|
53b18e9e83e7df3d40a4d098802ff5057e406dc3
|
refs/heads/master
| 2021-04-03T06:04:32.714006
| 2018-03-19T15:51:51
| 2018-03-19T15:51:51
| 125,084,884
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,585
|
py
|
from nltk.tokenize import TweetTokenizer
import nltk
from nltk.corpus import stopwords
import string
# -*- coding: UTF-8 -*-
import HTMLParser
###
import pandas as pd
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.svm import LinearSVC
class SentimentAnalysisSupervized():
def sentiment_analysis_LinearSVC(self, df_training, df_new, filepath):
traing_tweet_texts = df_training['text']
traing_tweet_targets = df_training['sentiment']
# Target details 0 - the polarity of the tweet (0 = negative, 2 = neutral, 4 = positive)
p8_2 = Pipeline([
('tfidf', TfidfVectorizer(stop_words=None, token_pattern='[A-Za-z0-9]+(?=\\s+)', min_df=3)),
('clf', LinearSVC(loss='squared_hinge'))
])
p8_2.fit(traing_tweet_texts, traing_tweet_targets)
predicted = p8_2.predict(df_new['text'])
df_processed = pd.DataFrame()
df_processed['date'] = df_new['date']
df_processed['text'] = df_new['text']
#df_processed['sentiment'] = df_new['sentiment']
df_processed['predicted'] = predicted
df_processed.to_csv(filepath, index=False)
def sentiment_analysis2(self, df, filepath):
tweet = "This is a cooool #dummysmiley: :-) :-P <3 and some arrows < > -> <--"
vocab = set(word.lower() for word in nltk.corpus.words.words())
stop_words = stopwords.words('english')
'''tokens = [token.strip() \
for token in nltk.word_tokenize(tweet.lower()) \
if token.strip() not in stop_words and \
token.strip() not in string.punctuation \
and token.strip() in vocab]'''
tokens = [token.strip() \
for token in nltk.word_tokenize(tweet.lower()) \
if token.strip() not in stop_words and \
token.strip() not in string.punctuation]
print token
print "-------"
tknzr = TweetTokenizer()
tokens2 = tknzr.tokenize(tweet)
print tokens2
def main():
sa = SentimentAnalysisSupervized()
input_file_name = 'APPL.csv'
training_file_path = "data/manually_labeled_data/" + input_file_name
training_df = pd.read_csv(training_file_path)
new_tweets_file_path = "data/twitter_clean_data/" + input_file_name
new_tweets_df = pd.read_csv(new_tweets_file_path)
output_file_path = 'results/sentiment_analysis_LinearSVC/' + input_file_name
sa.sentiment_analysis_LinearSVC(training_df, new_tweets_df, output_file_path)
main()
|
[
"nikilohiya@gmail.com"
] |
nikilohiya@gmail.com
|
591f2a518119961b336fdf7115bfdb1cdd13eb2c
|
fd800a1748b418402df23a91bdb7eaade92e5e0e
|
/code/castle_mod/algorithms/gradient/__init__.py
|
d4616d3a74998dc80bd7b01c55fa09ca7560e541
|
[] |
no_license
|
xwbxxx/PCIC2021_causal_discovery_dmirlab
|
b2a8569e30ba7b1d5a6da091a7b93f2d9d065dd7
|
1f88f40817b62d7a3f90531839eb1a0ad9d817f5
|
refs/heads/main
| 2023-08-03T05:28:36.686431
| 2021-09-15T05:22:20
| 2021-09-15T05:22:20
| 436,819,662
| 1
| 0
| null | 2021-12-10T02:01:03
| 2021-12-10T02:01:03
| null |
UTF-8
|
Python
| false
| false
| 1,033
|
py
|
# coding=utf-8
# Copyright (C) 2021. Huawei Technologies Co., Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .notears.linear import Notears
from .notears.nonlinear import NotearsMLP
from .notears.nonlinear import NotearsSob
from .notears.low_rank import NotearsLowRank
from .notears.golem import GOLEM
from .gran_dag.gran_dag import GraN_DAG
from .graph_auto_encoder.gae import GAE
from .masked_csl.mcsl import MCSL
from .rl.rl import RL
from .corl1.corl1 import CORL1
from .corl2.corl2 import CORL2
|
[
"2018770887@qq.com"
] |
2018770887@qq.com
|
35e40d0e0276de9c456c162e3d62690341e6ce6f
|
c9708d43ce0581e19aca656103b28781f69f3da1
|
/tests/seleniumwire/proxy/test_storage.py
|
4bb25d9082dfdba58e0cc7aa850fa3398ee9e8bb
|
[
"MIT"
] |
permissive
|
spinda/selenium-wire
|
9d7f5d5b5cbf9ffe8aaf6169fef75ed2fe472f22
|
e7687d80907d32226ba352f39d6e81bf14cf5380
|
refs/heads/master
| 2020-05-20T00:49:37.401477
| 2019-04-16T10:29:26
| 2019-04-16T10:29:26
| 185,296,120
| 0
| 0
|
MIT
| 2019-05-07T01:17:27
| 2019-05-07T01:17:25
| null |
UTF-8
|
Python
| false
| false
| 11,236
|
py
|
from datetime import datetime, timedelta
from fnmatch import fnmatch
import glob
import gzip
from http.client import HTTPMessage
from io import BytesIO
import os
import pickle
import shutil
from unittest import TestCase
from unittest.mock import Mock
from seleniumwire.proxy.storage import RequestStorage
class RequestStorageTest(TestCase):
def test_initialise(self):
RequestStorage(base_dir=self.base_dir)
storage_dir = glob.glob(os.path.join(self.base_dir, '.seleniumwire', 'storage-*'))
self.assertEqual(len(storage_dir), 1)
def test_cleanup_removes_storage(self):
storage = RequestStorage(base_dir=self.base_dir)
storage.cleanup()
# The 'seleniumwire' parent folder should have been cleaned up
# when there is nothing left inside of it.
self.assertFalse(os.listdir(self.base_dir))
def test_cleanup_does_not_remove_parent_folder(self):
# There is an existing storage folder
os.makedirs(os.path.join(self.base_dir, '.seleniumwire', 'teststorage'))
storage = RequestStorage(base_dir=self.base_dir)
storage.cleanup()
# The existing storage folder is not cleaned up
self.assertEqual(len(os.listdir(self.base_dir)), 1)
self.assertTrue(os.path.exists(os.path.join(self.base_dir, '.seleniumwire', 'teststorage')))
def test_initialise_clears_old_folders(self):
test_dir = os.path.join(self.base_dir, '.seleniumwire', 'storage-test')
os.makedirs(test_dir)
two_days_ago = (datetime.now() - timedelta(days=2)).timestamp()
os.utime(test_dir, times=(two_days_ago, two_days_ago))
RequestStorage(base_dir=self.base_dir)
self.assertFalse(os.path.exists(test_dir))
def test_save_request(self):
mock_request = self._create_mock_request()
storage = RequestStorage(base_dir=self.base_dir)
request_id = storage.save_request(mock_request)
request_file_path = self._get_stored_path(request_id, 'request')
with open(request_file_path[0], 'rb') as loaded:
loaded_request = pickle.load(loaded)
self.assertEqual(loaded_request['id'], request_id)
self.assertEqual(loaded_request['path'], 'http://www.example.com/test/path/')
self.assertEqual(loaded_request['method'], 'GET')
self.assertEqual(loaded_request['headers'], {
'Host': 'www.example.com',
'Accept': '*/*'
})
self.assertIsNone(loaded_request['response'])
def test_save_request_with_body(self):
mock_request = self._create_mock_request()
request_body = b'test request body'
storage = RequestStorage(base_dir=self.base_dir)
request_id = storage.save_request(mock_request, request_body=request_body)
request_body_path = self._get_stored_path(request_id, 'requestbody')
with open(request_body_path[0], 'rb') as loaded:
loaded_body = pickle.load(loaded)
self.assertEqual(loaded_body, b'test request body')
def test_save_response(self):
mock_request = self._create_mock_request()
storage = RequestStorage(base_dir=self.base_dir)
request_id = storage.save_request(mock_request)
mock_response = self._create_mock_resonse()
storage.save_response(request_id, mock_response)
response_file_path = self._get_stored_path(request_id, 'response')
with open(response_file_path[0], 'rb') as loaded:
loaded_response = pickle.load(loaded)
self.assertEqual(loaded_response['status_code'], 200)
self.assertEqual(loaded_response['reason'], 'OK')
self.assertEqual(loaded_response['headers'], {
'Content-Type': 'application/json',
'Content-Length': '500'
})
def test_save_response_with_body(self):
mock_request = self._create_mock_request()
storage = RequestStorage(base_dir=self.base_dir)
request_id = storage.save_request(mock_request)
mock_response = self._create_mock_resonse()
response_body = b'some response body'
storage.save_response(request_id, mock_response, response_body=response_body)
response_body_path = self._get_stored_path(request_id, 'responsebody')
with open(response_body_path[0], 'rb') as loaded:
loaded_body = pickle.load(loaded)
self.assertEqual(loaded_body, b'some response body')
def test_save_response_no_request(self):
mock_request = self._create_mock_request()
storage = RequestStorage(base_dir=self.base_dir)
request_id = storage.save_request(mock_request)
mock_response = self._create_mock_resonse()
storage.clear_requests()
storage.save_response(request_id, mock_response)
response_file_path = self._get_stored_path(request_id, 'response')
self.assertFalse(response_file_path)
def test_load_requests(self):
mock_request_1 = self._create_mock_request()
mock_request_2 = self._create_mock_request()
storage = RequestStorage(base_dir=self.base_dir)
request_id1 = storage.save_request(mock_request_1)
request_id2 = storage.save_request(mock_request_2)
requests = storage.load_requests()
self.assertEqual(len(requests), 2)
self.assertEqual(requests[0]['id'], request_id1)
self.assertEqual(requests[1]['id'], request_id2)
self.assertIsNone(requests[0]['response'])
self.assertIsNone(requests[1]['response'])
def test_load_response(self):
mock_request = self._create_mock_request()
storage = RequestStorage(base_dir=self.base_dir)
request_id = storage.save_request(mock_request)
mock_response = self._create_mock_resonse()
storage.save_response(request_id, mock_response)
requests = storage.load_requests()
self.assertIsNotNone(requests[0]['response'])
def test_load_request_body(self):
mock_request = self._create_mock_request()
storage = RequestStorage(base_dir=self.base_dir)
request_id = storage.save_request(mock_request, request_body=b'test request body')
request_body = storage.load_request_body(request_id)
self.assertEqual(request_body, b'test request body')
def test_load_response_body(self):
mock_request = self._create_mock_request()
storage = RequestStorage(base_dir=self.base_dir)
request_id = storage.save_request(mock_request, request_body=b'test request body')
mock_response = self._create_mock_resonse()
storage.save_response(request_id, mock_response, response_body=b'test response body')
response_body = storage.load_response_body(request_id)
self.assertEqual(response_body, b'test response body')
def test_load_response_body_encoded(self):
io = BytesIO()
with gzip.GzipFile(fileobj=io, mode='wb') as f:
f.write(b'test response body')
mock_request = self._create_mock_request()
storage = RequestStorage(base_dir=self.base_dir)
request_id = storage.save_request(mock_request, request_body=b'test request body')
mock_response = self._create_mock_resonse()
mock_response.headers['Content-Encoding'] = 'gzip'
storage.save_response(request_id, mock_response, response_body=io.getvalue())
response_body = storage.load_response_body(request_id)
self.assertEqual(response_body, b'test response body')
def test_load_last_request(self):
mock_request_1 = self._create_mock_request()
mock_request_2 = self._create_mock_request()
storage = RequestStorage(base_dir=self.base_dir)
storage.save_request(mock_request_1)
request_id2 = storage.save_request(mock_request_2)
last_request = storage.load_last_request()
self.assertEqual(last_request['id'], request_id2)
def test_load_last_request_none(self):
storage = RequestStorage(base_dir=self.base_dir)
last_request = storage.load_last_request()
self.assertIsNone(last_request)
def test_clear_requests(self):
mock_request_1 = self._create_mock_request()
mock_request_2 = self._create_mock_request()
storage = RequestStorage(base_dir=self.base_dir)
storage.save_request(mock_request_1)
storage.save_request(mock_request_2)
storage.clear_requests()
requests = storage.load_requests()
self.assertFalse(requests)
self.assertFalse(glob.glob(os.path.join(self.base_dir, '.seleniumwire', 'storage-*', '*')))
def test_get_cert_dir(self):
storage = RequestStorage(base_dir=self.base_dir)
self.assertTrue(fnmatch(storage.get_cert_dir(),
os.path.join(self.base_dir, '.seleniumwire', 'storage-*', 'certs')))
def test_find(self):
mock_request_1 = self._create_mock_request('http://www.example.com/test/path/?foo=bar')
mock_request_2 = self._create_mock_request('http://www.stackoverflow.com/other/path/?x=y')
mock_response = self._create_mock_resonse()
storage = RequestStorage(base_dir=self.base_dir)
request_id = storage.save_request(mock_request_1)
storage.save_response(request_id, mock_response)
storage.save_request(mock_request_2)
self.assertEqual(storage.find('/test/path/')['id'], request_id)
self.assertEqual(storage.find('/test/path/?foo=bar')['id'], request_id)
self.assertEqual(storage.find('http://www.example.com/test/path/?foo=bar')['id'], request_id)
self.assertEqual(storage.find('http://www.example.com/test/path/')['id'], request_id)
self.assertIsNone(storage.find('/different/path'))
self.assertIsNone(storage.find('/test/path/?x=y'))
self.assertIsNone(storage.find('http://www.example.com/different/path/?foo=bar'))
self.assertIsNone(storage.find('http://www.different.com/test/path/?foo=bar'))
self.assertIsNone(storage.find('http://www.example.com/test/path/?x=y'))
def _get_stored_path(self, request_id, filename):
return glob.glob(os.path.join(self.base_dir, '.seleniumwire', 'storage-*',
'request-{}'.format(request_id), filename))
def _create_mock_request(self, path='http://www.example.com/test/path/'):
mock_request = Mock()
mock_request.path = path
mock_request.command = 'GET'
headers = HTTPMessage()
headers.add_header('Host', 'www.example.com')
headers.add_header('Accept', '*/*')
mock_request.headers = headers
return mock_request
def _create_mock_resonse(self):
mock_response = Mock()
mock_response.status = 200
mock_response.reason = 'OK'
headers = HTTPMessage()
headers.add_header('Content-Type', 'application/json')
headers.add_header('Content-Length', '500')
mock_response.headers = headers
return mock_response
def setUp(self):
self.base_dir = os.path.join(os.path.dirname(__file__), 'data')
def tearDown(self):
shutil.rmtree(os.path.join(self.base_dir), ignore_errors=True)
|
[
"will@zifferent.com"
] |
will@zifferent.com
|
d2217e68e3197de61f832e9ef1b5fe09b9863383
|
3156e6e4a078052e9554c48f5037cf4e8e3ce4fb
|
/techpedia_project/wsgi.py
|
57d879363cfb1ac1df595a088915def95c81a36c
|
[] |
no_license
|
Rishav09/techpedia
|
18edc5d7edd6264963d680603cafdfa658be8267
|
3283251998530a8b09372c7f79ec2e6a7d844960
|
refs/heads/master
| 2021-01-19T23:13:46.501278
| 2017-05-26T11:46:20
| 2017-05-26T11:46:20
| 88,950,556
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 411
|
py
|
"""
WSGI config for techpedia_project project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "techpedia_project.settings")
application = get_wsgi_application()
|
[
"rishavsapahia@gmail.com"
] |
rishavsapahia@gmail.com
|
0d106b6db7d554ad1d1969843cd053d447c9ee62
|
1d3f11a26595d232fb6d4ecb3522b79ca9ba0910
|
/train.py
|
339aebc581a23b10d18b43008eae91d4db00bd3e
|
[] |
no_license
|
LorSong/GenderClassification
|
fa47fd512965339238e4d526beadd56d05c8630a
|
524d6f0aae5a5d4279872e3eae4e4a9c17fb8988
|
refs/heads/master
| 2022-12-22T10:23:51.814804
| 2020-10-01T08:17:57
| 2020-10-01T08:17:57
| 295,976,091
| 0
| 0
| null | 2020-09-19T14:12:55
| 2020-09-16T08:47:23
|
Python
|
UTF-8
|
Python
| false
| false
| 4,512
|
py
|
import os
import sys
import numpy as np
# Silencing tensorflow warnings
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import tensorflow as tf
import tensorflow_hub as hub
if tf.__version__ != "2.3.0":
print("TF version is not 2.3.0, behavior may not be correct")
# Silencing tensorflow depreciation warnings
from tensorflow.python.util import deprecation
deprecation._PRINT_DEPRECATION_WARNINGS = False
def create_dataset(data_dir):
# Generator that performs data augmentation
train_datagen = tf.keras.preprocessing.image.ImageDataGenerator(
rotation_range=20,
width_shift_range=0.15,
height_shift_range=0.15,
horizontal_flip=True,
zoom_range=0.15,
fill_mode="constant",
cval=0) # Black padding
# Setup flow from directory
train_generator = train_datagen.flow_from_directory(
data_dir,
target_size=(96, 96),
batch_size=32,
class_mode='binary')
return train_generator
def warmup_scheduler(epoch, lr):
if epoch < 20:
return lr * 1.6
else:
return lr
def train_and_save(data):
MODULE_HANDLE ="https://tfhub.dev/google/imagenet/mobilenet_v2_100_96/feature_vector/4"
# Loading MobilenetV2
base_model = hub.KerasLayer(MODULE_HANDLE, trainable=False)
inputs = tf.keras.layers.Input(shape=(96, 96, 3))
# Normalization of inputs
x = tf.keras.layers.experimental.preprocessing.Rescaling(1./255)(inputs)
x = base_model(x, training=False)
x = tf.keras.layers.Dropout(rate=0.2)(x)
outputs = tf.keras.layers.Dense(1, activation="sigmoid")(x)
model = tf.keras.Model(inputs, outputs)
# Training only top layer
print("Training first 10 epochs with freezed base model. 40 more epochs ahead")
optimizer = tf.keras.optimizers.SGD(lr=0.05, momentum=0.9, decay=0.01)
model.compile(optimizer=optimizer,
loss=tf.keras.losses.BinaryCrossentropy(),
metrics=['accuracy'])
freezed_history = model.fit(data,
epochs=10,
verbose=1)
# Unfreezing model
base_model.trainable = True
# Changing optimizer and adding learning rate schedule
lr_scheduler = tf.keras.callbacks.LearningRateScheduler(warmup_scheduler)
model.compile(optimizer=tf.keras.optimizers.Adam(lr=1e-7),
loss=tf.keras.losses.BinaryCrossentropy(),
metrics=['accuracy'])
print("Unfreezing weights. Training full model for 40 epochs")
unfreezed_history = model.fit(data,
initial_epoch=10,
epochs=50,
callbacks=[lr_scheduler],
verbose=1)
# Saving model
model_path = "./model"
model.save(model_path)
# Uniting and saving histories
h1 = freezed_history.history
h2 = unfreezed_history.history
for key in h2:
if key != "lr":
h1[key].extend(h2[key])
np.save('history', h1)
print("Finished. Created model and history files.")
def main():
# Taking path argument
try:
path_to_images = sys.argv[1]
except:
path_to_images = "."
print("Path to images is not provided, looking in the current folder")
# Preventing memory errors with GPU (copied from TF documentation)
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
try:
# Currently, memory growth needs to be the same across GPUs
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
except RuntimeError as e:
# Memory growth must be set before GPUs have been initialized
print(e)
else:
print("Failed to connect GPU. Training can be slow!")
dataset = create_dataset(path_to_images)
train_and_save(dataset)
if __name__ == "__main__":
main()
|
[
"noreply@github.com"
] |
LorSong.noreply@github.com
|
ebcf6416af5895de3d67cec6208e3c16f9a9dede
|
f3743bd1bec80159913243e0ba39f161db052ab0
|
/backend/app/alembic/versions/6894c5975cd5_join_3_tables_for_tags_users_rides.py
|
f43caafd27299be0b8e54ff357f18735145764a4
|
[] |
no_license
|
eric-do/helo-pelo
|
f8a4e95007ca5bdd26354fd2098d4f54ec4f4aac
|
a4714bb3dc183b96ba576735e0ef4203a6921c2f
|
refs/heads/master
| 2023-05-09T05:09:48.593538
| 2021-06-03T21:16:57
| 2021-06-03T21:16:57
| 347,272,759
| 0
| 0
| null | 2021-06-03T21:16:58
| 2021-03-13T04:34:29
|
TypeScript
|
UTF-8
|
Python
| false
| false
| 1,630
|
py
|
"""Join 3 tables for tags users rides
Revision ID: 6894c5975cd5
Revises: abc4ccfa9c9c
Create Date: 2021-03-10 20:40:09.352601-08:00
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '6894c5975cd5'
down_revision = 'abc4ccfa9c9c'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('user_ride_tag',
sa.Column('user_id', sa.Integer(), nullable=False),
sa.Column('ride_id', sa.Integer(), nullable=False),
sa.Column('tag_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['ride_id'], ['ride.id'], ),
sa.ForeignKeyConstraint(['tag_id'], ['tag.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('user_id', 'ride_id', 'tag_id'),
sa.UniqueConstraint('user_id', 'ride_id', 'tag_id')
)
op.drop_table('ride_tag')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('ride_tag',
sa.Column('ride_id', sa.INTEGER(), autoincrement=False, nullable=False),
sa.Column('tag_id', sa.INTEGER(), autoincrement=False, nullable=False),
sa.Column('tag_count', sa.INTEGER(), autoincrement=False, nullable=True),
sa.ForeignKeyConstraint(['ride_id'], ['ride.id'], name='ride_tag_ride_id_fkey'),
sa.ForeignKeyConstraint(['tag_id'], ['tag.id'], name='ride_tag_tag_id_fkey'),
sa.PrimaryKeyConstraint('ride_id', 'tag_id', name='ride_tag_pkey')
)
op.drop_table('user_ride_tag')
# ### end Alembic commands ###
|
[
"ericdo.617@gmail.com"
] |
ericdo.617@gmail.com
|
95b50eb937a9c5c4d9c33679b54ac220d7720dd9
|
c55036e604c3a1a714301dd4ec6def16f7ead18c
|
/split_dataset.py
|
339ad1c633ebf4bb4ae4cfedb9a0b2b6a3be9e07
|
[] |
no_license
|
weizh888/ProductImageSegmentation
|
3326ab5a9ababd2afd9fdfb1e0665d805e3df499
|
be718cdbe26663220ce1ea994f325de0e3bacf04
|
refs/heads/master
| 2021-09-10T06:20:32.291629
| 2018-03-21T12:26:49
| 2018-03-21T12:26:49
| 113,643,096
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,608
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
np.random.seed(1)
full_labels = pd.read_csv('data/labels.csv')
grouped = full_labels.groupby('filename')
grouped.apply(lambda x: len(x)).value_counts()
print(grouped.apply(lambda x: len(x)).value_counts())
gb = full_labels.groupby('filename')
grouped_list = [gb.get_group(x) for x in gb.groups]
print('The total number of samples is {}.'.format(len(grouped_list)))
n_train_images = len(grouped_list) * 4 / 5
n_test_images = len(grouped_list) - n_train_images
print('The number of training samples is {}.'.format(n_train_images))
print('The number of testing samples is {}.'.format(n_test_images))
train_index = np.random.choice(len(grouped_list), size=n_train_images,
replace=False)
test_index = np.setdiff1d(list(range(len(grouped_list))), train_index)
# take first 200 files
train = pd.concat([grouped_list[i] for i in train_index])
test = pd.concat([grouped_list[i] for i in test_index])
train.to_csv('data/train_labels.csv', index=None)
test.to_csv('data/test_labels.csv', index=None)
# Summary of training dataset and testing dataset
train_summary = train.groupby('class'
).size().reset_index(name='counts_train')
test_summary = test.groupby('class'
).size().reset_index(name='counts_test')
all_summary = pd.merge(train_summary, test_summary)
all_summary['total'] = all_summary.apply(lambda x: x['counts_train'] \
+ x['counts_test'], axis=1)
all_summary.to_csv('data/summary.csv', index=None)
print(all_summary)
|
[
"weizh888@gmail.com"
] |
weizh888@gmail.com
|
88d9128ceed61e034574acdbbdbc508324c444a6
|
69c29bd4b424b0e90ae9c439d29791f7011c993e
|
/Builder-Pattern/mycomputer_builder.py
|
996474f691a684e223e90eccfca3674df82cb961
|
[] |
no_license
|
mdizhar3103/Python-Design-Patterns
|
b3681aa23416d3ff5aae169f71819d37ccf910da
|
be477db006864e5c7e30a862765a4348f3113af6
|
refs/heads/main
| 2023-08-14T22:52:06.941745
| 2021-09-19T13:04:26
| 2021-09-19T13:04:26
| 403,862,000
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 576
|
py
|
from abs_builder import AbsBuilder
class MyComputerBuilder(AbsBuilder):
def get_case(self):
self._computer.case = "Coolermaster N300"
def build_mainboard(self):
self._computer.mainboard = "MSI 970"
self._computer.cpu = "Intel Core i7-4770"
self._computer.memory = "Corsair Vengeance 16GB"
def install_mainboard(self):
pass
def install_video_card(self):
self._computer.video_card = "GeForce GTX 1070"
def install_hard_drive(self):
self._computer.hard_drive = "Seagate 2TB"
|
[
"mdizhar3103@gmail.com"
] |
mdizhar3103@gmail.com
|
20a166f17484a3091f0d9551afab34b5a95bd3fd
|
903ba270c95a6aa9b4903484a2f0cc49ba82ea16
|
/Iniciante/1072.py
|
cfbdeffc0c007bdf5b3605701b3d289a8b1b5586
|
[] |
no_license
|
pedroheck/uri-online-judge-training
|
fb3a0b1388e0a9d7a4f959bc582474f952d6efcb
|
7a8ed57d5fab703dde523ac2d0a3d5afca06d267
|
refs/heads/main
| 2023-07-13T22:23:43.600520
| 2021-08-25T18:40:44
| 2021-08-25T18:40:44
| 398,272,452
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 236
|
py
|
n = int(input())
numeros = []
dentro, fora = 0, 0
for i in range(0, n):
numeros.append(int(input()))
if numeros[i] in range(10, 21):
dentro += 1
else:
fora += 1
print(dentro, " in\n", fora, " out", sep='')
|
[
"pedroscheck@hotmail.com"
] |
pedroscheck@hotmail.com
|
c260a43c4960371a37bba0d5dd8c8410caa61953
|
b0fb4008bf17616942d7eb6d526b95b0359bd118
|
/app/common/thread/get_meta_data_thread.py
|
3e4a521df6315b4feefbbbf00c39fba8cc227af1
|
[] |
no_license
|
hunye/Groove
|
d364d8ee79618b8f4722eec69d9cfb3562e41874
|
7b06c2530352dc5f159c0f9f674e469a305f741a
|
refs/heads/master
| 2023-08-28T15:21:52.626998
| 2021-10-25T14:49:33
| 2021-10-25T14:49:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,825
|
py
|
# coding:utf-8
import os
from common.meta_data_writer import writeAlbumCover, writeSongInfo
from common.crawler.qq_music_crawler import QQMusicCrawler
from PyQt5.QtCore import pyqtSignal, QThread
class GetMetaDataThread(QThread):
""" 获取歌曲元数据线程 """
crawlSignal = pyqtSignal(str)
def __init__(self, folderPaths: list, parent=None):
super().__init__(parent=parent)
self.__isStopped = False
self.folderPaths = folderPaths
self.crawler = QQMusicCrawler()
def run(self):
""" 获取歌曲元数据 """
# 创建一个本地专辑封面缓存文件夹
cover_folder = 'crawl_album_covers'
os.makedirs(cover_folder, exist_ok=True)
albumCovers = {}
songPaths, fileNames = self.__getAudioFiles()
for i, (songPath, fileName) in enumerate(zip(songPaths, fileNames)):
if self.__isStopped:
break
songInfo = self.crawler.getSongInfo(fileName)
if songInfo:
# 修改歌曲信息
songInfo["songPath"] = songPath
writeSongInfo(songInfo)
key = songInfo["singer"]+'_'+songInfo['album']
# 从网上或者本地缓存文件夹获取专辑封面
if key not in albumCovers:
coverPath = f'{cover_folder}/{key}.jpg'
url = self.crawler.getAlbumCoverURL(
songInfo["albummid"], coverPath)
if url:
albumCovers[key] = coverPath
writeAlbumCover(songPath, coverPath)
else:
coverPath = albumCovers[key]
writeAlbumCover(songPath, coverPath)
# 发送信号
text = self.tr("Current progress: ")
self.crawlSignal.emit(text+f"{(i+1)/len(songPaths):>3.0%}")
def stop(self):
""" 停止爬取歌曲信息 """
self.__isStopped = True
def __getAudioFiles(self):
""" 获取音频文件路径和不包含后缀名的文件名
Parameters
----------
folderPaths: list
文件夹列表
Returns
-------
songPaths: list
歌曲路径列表
fileNames: list
不含后缀名的歌曲文件名称列表
"""
songPaths = []
fileNames = []
for folderPath in self.folderPaths:
files = os.listdir(folderPath)
for file in files:
if file.endswith(('.mp3', '.flac', '.m4a')):
songPaths.append(os.path.join(
folderPath, file).replace('\\', '/'))
fileNames.append(os.path.splitext(file)[0])
return songPaths, fileNames
|
[
"1319158137@qq.com"
] |
1319158137@qq.com
|
196f2bbc75b037236f40aa7b8286b7e49495ecb6
|
db1aa4a65017f4b06b9762d683fa26cf01ac06c1
|
/venv/bin/pip2.7
|
6e3a768cbc6c72aae70bed0931133ed040cfe4d4
|
[] |
no_license
|
oumar90/FlaskApp1
|
119b87dcb27b2fd124ce1d4a3f5cf0d4fb1efcf0
|
81b69ce3d0007cf1fd917a3406aef47cad994965
|
refs/heads/master
| 2021-09-03T04:11:46.757154
| 2018-01-05T13:35:52
| 2018-01-05T13:35:52
| 116,170,340
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 238
|
7
|
#!/var/www/html/FlaskApp/FlaskApp1/venv/bin/python2
# -*- coding: utf-8 -*-
import re
import sys
from pip import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"oudjira90@gmail.com"
] |
oudjira90@gmail.com
|
8f9f4a16e1e15f1351ada271a24b1c294bcd6f0c
|
2df82996d274b5e49e5d4a1b520cebe1b19a3639
|
/LocalBigData/County_Wise/county_aqi_predict.py
|
276400d37db3b064dca8081803073259e70e3f23
|
[] |
no_license
|
amandeepkapoor/AQI_Prediction_for_US
|
fce65ed74da912ea6c074f650f200c06bcbed163
|
6fb5d9c937add226bc981ac3ab4ad1a861f16cc5
|
refs/heads/master
| 2020-03-28T20:35:58.652810
| 2018-09-17T07:33:38
| 2018-09-17T07:33:38
| 149,085,899
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,987
|
py
|
import sys, os
from pyspark import SparkConf, SparkContext
from pyspark.sql import SparkSession, types, functions
from pyspark.ml import Pipeline
from pyspark.ml.feature import VectorAssembler, StringIndexer, SQLTransformer
from pyspark.ml.regression import LinearRegression, RandomForestRegressor, GBTRegressor, DecisionTreeRegressor
from pyspark.ml.evaluation import RegressionEvaluator
from pyspark.sql import SparkSession
os.environ["PYSPARK_PYTHON"] = "python3"
os.environ["PYSPARK_DRIVER_PYTHON"] = "python3"
cluster_seeds = ['199.60.17.171', '199.60.17.188']
cluster_seeds = ['199.60.17.171', '199.60.17.188']
conf = SparkConf().setAppName('example code') \
.set('spark.cassandra.connection.host', ','.join(cluster_seeds))
spark = SparkSession.builder.appName('Big Data Project').getOrCreate()
sc = spark.sparkContext
assert sys.version_info >= (3, 4) # make sure we have Python 3.4+
assert spark.version >= '2.2' # make sure we have Spark 2.2+
inputs = '/home/ldua/Desktop/County/max_value_combined/county_max_value_combined.csv'#sys.argv[1]
output = '/home/ldua/Desktop/County/predicted_aqi/'
def aqi_cal(val,aqilevel,gaslevel):
length = len(gaslevel)
for i in range(len(gaslevel)):
if (val < gaslevel[i]):
num1 = val - gaslevel[i - 1]
num2 = aqilevel[i] - aqilevel[i - 1]
den = gaslevel[i] - gaslevel[i - 1]
aqival = ((num1 * num2) / den) + aqilevel[i - 1]
break
else:
if (val >= gaslevel[length-1]):
aqival = aqilevel[length-1]-1
break
return aqival
# def aqi_so2(val):
#
# return aqi
def transform(line):
val = line.split(',')
if val[0] == 'county_code':
return (val[0],val[1],val[2],val[3],val[4],val[5],val[6],val[7],val[8],'global_aqi')
#return line+',Global_AQI'
else:
aqi_level = [0,51,101,151,201,301,401,500]
ozone_level = [0,.055,.071,.086,.106,.201]
so_level = [0,36,76,186,305,605,805,1005]
co_level = [0,4.5,9.5,12.5,15.5,30.5,40.5,50.5]
no_level = [0,54,101,361,650,1250,1650,2050]
pm_level = [0,12.1,35.5,55.5,150.5,250.5,350.5,500.5]
aqi_oz = aqi_cal(float(val[3]),aqi_level,ozone_level)
aqi_so = aqi_cal(float(val[4]), aqi_level, so_level)
aqi_co = aqi_cal(float(val[5]), aqi_level, co_level)
aqi_no = aqi_cal(float(val[6]), aqi_level, no_level)
aqi_pma = aqi_cal(float(val[7]), aqi_level, pm_level)
aqi_pmb = aqi_cal(float(val[8]), aqi_level, pm_level)
# val[3] = float(val[3])
# val[4] = float(val[4])
# val[5] = float(val[5])
# val[6] = float(val[6])
# for i in range(len(ozone_level)):
# if(val[3]< ozone_level[i]):
# num1 = val[3] - ozone_level[i-1]
# num2 = aqi_level[i] - aqi_level[i-1]
# den = ozone_level[i] - ozone_level[i-1]
# aqi_oz = ((num1 * num2)/den)+aqi_level[i-1]
# break
# else:
# if(val[3] >= ozone_level[5]):
# aqi_oz = 300
# break
#
# for i in range(len(so_level)):
# if (val[4] < so_level[i]):
# num1 = val[4] - so_level[i - 1]
# num2 = aqi_level[i] - aqi_level[i - 1]
# den = so_level[i] - so_level[i - 1]
# aqi_so = ((num1 * num2) / den) + aqi_level[i - 1]
# break
# else:
# if (val[4] > so_level[7]):
# aqi_so = 500
# break
#
# for i in range(len(co_level)):
# if (val[5] < co_level[i]):
# num1 = val[5] - co_level[i - 1]
# num2 = aqi_level[i] - aqi_level[i - 1]
# den = co_level[i] - co_level[i - 1]
# aqi_co = ((num1 * num2) / den) + aqi_level[i - 1]
# break
# else:
# if (val[5] > co_level[7]):
# aqi_co = 500
# break
#
# for i in range(len(no_level)):
# if (val[6] < no_level[i]):
# num1 = val[6] - no_level[i - 1]
# num2 = aqi_level[i] - aqi_level[i - 1]
# den = no_level[i] - no_level[i - 1]
# aqi_no = ((num1 * num2) / den) + aqi_level[i - 1]
# break
# else:
# if (val[6] > no_level[7]):
# aqi_no = 500
# break
glo = [aqi_no,aqi_so,aqi_oz,aqi_co,aqi_pma,aqi_pmb]
return (val[0],val[1],val[2],aqi_oz,aqi_so,aqi_co,aqi_no,aqi_pma,aqi_pmb,max(glo))
# explicit_schema = types.StructType([types.StructField('State Code', types.IntegerType(), True),
# types.StructField('Month', types.IntegerType(), True),
# types.StructField('Year',types.IntegerType(), True),
# types.StructField('AM_Predicted_44201', types.DoubleType(), True),
# types.StructField('AM_Predicted_42401', types.DoubleType(), True)])
#State Code,Year,Month,AM_Predicted_44201,AM_Predicted_42401
#Row(State Code=1, Month=2011, Year=1, AM_Predicted_44201=0.02665985549600323, AM_Predicted_42401=1.6022149730848756)
#training = sc.textFile("/home/ldua/Desktop/BigDataProject/Output/AQI/part-00000-e88f6806-9bdc-4906-84f7-0647e9a022d8-c000.csv")
#training = spark.read.csv("/home/ldua/Desktop/BigDataProject/Output/AQI/part-00000-e88f6806-9bdc-4906-84f7-0647e9a022d8-c000.csv", header= True, schema= explicit_schema)
#aqi = training.map(transform)
training = sc.textFile(inputs)
#temp = training.rdd
aqi = training.map(transform)
header = aqi.first()
data = aqi.filter(lambda row : row != header).toDF(header)
data.show()
data.coalesce(1).write.csv('/home/ldua/Desktop/County/predicted_aqi', sep=',', header=True)
#print(aqi.collect())
#training.show(
|
[
"aman0609@gmail.com"
] |
aman0609@gmail.com
|
90183dd66049d8068183095a0e87e5343aca7ef3
|
6cdfe80ca94f52191f13f5684794b21b1e4747c1
|
/day6/day6.py
|
b01b570b1d9930b4f7aee46a283f24a806232e1f
|
[] |
no_license
|
TheFunctionalGuy/adventofcode
|
0ce3515eba463817ebb239e045710e9da573c98f
|
dcc614a2d2bef54497ae7c5cf0edeececbba55a2
|
refs/heads/master
| 2020-09-22T11:22:11.864866
| 2019-12-11T20:46:16
| 2019-12-11T20:49:24
| 225,173,913
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,993
|
py
|
from typing import Dict
from anytree import Node, PreOrderIter
# Solution for: https://adventofcode.com/2019/day/6
def count_orbits():
with open('input.txt', mode='r') as input_file:
# Part one
lines = [line.rstrip() for line in input_file]
nodes = {}
for line in lines:
orbits = line.split(')')
# Create new node or get existing node
if orbits[0] not in nodes:
node_1 = Node(orbits[0])
nodes[orbits[0]] = node_1
else:
node_1 = nodes[orbits[0]]
# Create new node or get existing node
if orbits[1] not in nodes:
node_2 = Node(orbits[1], parent=node_1)
nodes[orbits[1]] = node_2
else:
nodes[orbits[1]].parent = node_1
# Traversal tree
number_of_ancestors = [len(node.ancestors) for node in PreOrderIter(nodes['COM'])]
number_of_orbits = sum(number_of_ancestors)
print(f'The total number of orbits is: {number_of_orbits}')
# Part two
get_number_of_orbital_transfers_required(nodes)
def get_number_of_orbital_transfers_required(nodes: Dict[str, Node]):
# Get connection node
you_path = nodes['YOU'].path
santa_path = nodes['SAN'].path
intersected_path = set(you_path).intersection(set(santa_path))
# Get path length towards connection node
path_length = 0
for node in intersected_path:
if path_length < len(node.ancestors):
path_length = len(node.ancestors)
path_from_connection_node_to_you = list(filter(lambda x: len(x.ancestors) > path_length, you_path))
path_from_connection_node_to_santa = list(filter(lambda x: len(x.ancestors) > path_length, santa_path))
print(f'Minimum number of orbital transfers is: '
f'{len(path_from_connection_node_to_you) + len(path_from_connection_node_to_santa) - 2}')
if __name__ == '__main__':
count_orbits()
|
[
"jvesper95@gmail.com"
] |
jvesper95@gmail.com
|
2cdad8f3013b066d06ef7a8f532a32106d81ba9c
|
913e24ea110f839c73363bc1aac9673e561fa5f8
|
/widowx_ros_packages/arbotix_ros/arbotix_controllers/bin/one_side_gripper_controller.py
|
ccba46afcab9bd3a34ddf04ca83e2d1e08759637
|
[
"MIT"
] |
permissive
|
PierreExeter/WidowX-reacher
|
24e2b3f72e9aec24a9a61e6a8958c200e0dbe893
|
560c6779dc91a887191f344c43de24926ba75b4d
|
refs/heads/master
| 2023-03-06T13:48:12.810858
| 2021-02-22T15:36:52
| 2021-02-22T15:36:52
| 264,480,232
| 4
| 0
|
MIT
| 2021-02-22T15:27:44
| 2020-05-16T16:36:53
|
Python
|
UTF-8
|
Python
| false
| false
| 3,250
|
py
|
#!/usr/bin/env python
"""
one_side_gripper_controller.py - controls a gripper built with one servo
Copyright (c) 2011 Vanadium Labs LLC. All right reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Vanadium Labs LLC nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL VANADIUM LABS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import rospy
import thread
from std_msgs.msg import Float64
from math import asin
class OneSideGripperController:
""" A simple controller that operates a servos to
open/close to a particular size opening. """
def __init__(self):
rospy.init_node("one_side_gripper_controller")
rospy.logwarn("one_side_gripper_controller.py is deprecated and will be removed in ROS Indigo, please use gripper_controller")
self.pad_width = rospy.get_param("~pad_width", 0.01)
self.finger_length = rospy.get_param("~finger_length", 0.02)
self.center = rospy.get_param("~center", 0.0)
self.invert = rospy.get_param("~invert", False)
# publishers
self.pub = rospy.Publisher("gripper_joint/command", Float64, queue_size=5)
# subscribe to command and then spin
rospy.Subscriber("~command", Float64, self.commandCb)
rospy.spin()
def commandCb(self, msg):
""" Take an input command of width to open gripper. """
# check limits
#if msg.data > self.max_opening or msg.data < self.min_opening:
# rospy.logerr("Command exceeds limits.")
# return
# compute angle
angle = asin((msg.data - self.pad_width)/(2*self.finger_length))
# publish message
if self.invert:
self.pub.publish(-angle + self.center)
else:
self.pub.publish(angle + self.center)
if __name__=="__main__":
try:
OneSideGripperController()
except rospy.ROSInterruptException:
rospy.loginfo("Hasta la Vista...")
|
[
"pierre.aumjaud@gmail.com"
] |
pierre.aumjaud@gmail.com
|
ec72ba07483ae3889ec827f95dc7a8cc4c03a7f8
|
252575ae85fb3c1bd754c6812c63ab8339a0c47b
|
/wine.py
|
df3f106d4c30571c1d2f3ecfb240a260dd7a95a4
|
[] |
no_license
|
Shally1130/CS7641-assignment3
|
ca11243ab2c19907bfaf5733b211ecd23378d093
|
a3b72a808de3465dd2e72e887de028c45800c4d8
|
refs/heads/master
| 2020-04-04T15:18:24.839408
| 2018-11-03T23:33:19
| 2018-11-03T23:33:19
| 156,032,615
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 30,095
|
py
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from sklearn.cluster import KMeans
from sklearn.mixture import GaussianMixture
from sklearn.decomposition import PCA
from sklearn.decomposition import FastICA
from sklearn.random_projection import GaussianRandomProjection
from sklearn.feature_selection import SelectKBest
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics.cluster import normalized_mutual_info_score
from sklearn.metrics import silhouette_samples, silhouette_score
#################################################
#Data set 1: wine quality data set
data = pd.read_csv('winequality.csv')
X = data.iloc[:,:11]
y = data.iloc[:,11]
features = list(X.columns.values)
scaler = MinMaxScaler(feature_range=[0,100])
scaler.fit(X)
X_norm = pd.DataFrame(scaler.transform(X))
print(X_norm)
#################################################
#K means clustering
range_n_clusters = [2,4,6,8,10]
for n_clusters in range_n_clusters:
# Create a subplot with 1 row and 2 columns
fig, (ax1, ax2) = plt.subplots(1, 2)
fig.set_size_inches(18, 7)
# The 1st subplot is the silhouette plot
# The silhouette coefficient can range from -1, 1 but in this example all
# lie within [-0.1, 1]
ax1.set_xlim([-0.1, 1])
# The (n_clusters+1)*10 is for inserting blank space between silhouette
# plots of individual clusters, to demarcate them clearly.
ax1.set_ylim([0, len(X_norm) + (n_clusters + 1) * 10])
# Initialize the clusterer with n_clusters value and a random generator
# seed of 10 for reproducibility.
clusterer = KMeans(n_clusters=n_clusters, random_state=10).fit(X_norm)
cluster_labels = clusterer.labels_
print("NMI score: %.6f" % normalized_mutual_info_score(y, cluster_labels))
# The silhouette_score gives the average value for all the samples.
# This gives a perspective into the density and separation of the formed
# clusters
silhouette_avg = silhouette_score(X_norm, cluster_labels)
print("For n_clusters =", n_clusters,
"The average silhouette_score is :", silhouette_avg)
# Compute the silhouette scores for each sample
sample_silhouette_values = silhouette_samples(X_norm, cluster_labels)
y_lower = 10
for i in range(n_clusters):
# Aggregate the silhouette scores for samples belonging to
# cluster i, and sort them
ith_cluster_silhouette_values = \
sample_silhouette_values[cluster_labels == i]
ith_cluster_silhouette_values.sort()
size_cluster_i = ith_cluster_silhouette_values.shape[0]
y_upper = y_lower + size_cluster_i
cmap = cm.get_cmap("Spectral")
color = cmap(float(i) / n_clusters)
ax1.fill_betweenx(np.arange(y_lower, y_upper),
0, ith_cluster_silhouette_values,
facecolor=color, edgecolor=color, alpha=0.7)
# Label the silhouette plots with their cluster numbers at the middle
ax1.text(-0.05, y_lower + 0.5 * size_cluster_i, str(i))
# Compute the new y_lower for next plot
y_lower = y_upper + 10 # 10 for the 0 samples
ax1.set_title("The silhouette plot for the various clusters.")
ax1.set_xlabel("The silhouette coefficient values")
ax1.set_ylabel("Cluster label")
# The vertical line for average silhouette score of all the values
ax1.axvline(x=silhouette_avg, color="red", linestyle="--")
ax1.set_yticks([]) # Clear the yaxis labels / ticks
ax1.set_xticks([-0.1, 0, 0.2, 0.4, 0.6, 0.8, 1])
# 2nd Plot showing the actual clusters formed
cmap = cm.get_cmap("Spectral")
colors = cmap(cluster_labels.astype(float) / n_clusters)
ax2.scatter( X_norm.iloc[:, 10], X_norm.iloc[:, 8], marker='.', s=30, lw=0, alpha=0.7,
c=colors, edgecolor='k')
# Labeling the clusters
centers = clusterer.cluster_centers_
# Draw white circles at cluster centers
ax2.scatter(centers[:, 10], centers[:, 8], marker='o',
c="white", alpha=1, s=200, edgecolor='k')
for i, c in enumerate(centers):
ax2.scatter( c[10], c[8], marker='$%d$' % i, alpha=1,
s=50, edgecolor='k')
ax2.set_title("The visualization of the clustered data.")
ax2.set_xlabel("Feature space for the 1st feature")
ax2.set_ylabel("Feature space for the 2nd feature")
plt.suptitle(("Silhouette analysis for KMeans clustering on sample data "
"with n_clusters = %d" % n_clusters),
fontsize=14, fontweight='bold')
plt.show()
#################################################
#Expectation Maximization clustering
for n_clusters in range_n_clusters:
fig = plt.gcf()
fig.set_size_inches(7, 7)
ax = fig.add_subplot(111)
# Initialize the clusterer with n_clusters value and a random generator
# seed of 10 for reproducibility.
clusterer = GaussianMixture(n_components=n_clusters, random_state=10).fit(X_norm)
cluster_labels = clusterer.predict(X_norm)
print("NMI score: %.6f" % normalized_mutual_info_score(y, cluster_labels))
# 2nd Plot showing the actual clusters formed
cmap = cm.get_cmap("Spectral")
colors = cmap(cluster_labels.astype(float) / n_clusters)
plt.scatter( X_norm.iloc[:, 10], X_norm.iloc[:, 8], marker='.', s=30, lw=0, alpha=0.7,
c=colors, edgecolor='k')
# Labeling the clusters
centers = clusterer.means_
# Draw white circles at cluster centers
plt.scatter(centers[:, 10], centers[:, 8], marker='o',
c="white", alpha=1, s=200, edgecolor='k')
for i, c in enumerate(centers):
ax.scatter( c[10], c[8], marker='$%d$' % i, alpha=1,
s=50, edgecolor='k')
ax.set_title("The visualization of the clustered data.")
ax.set_xlabel("Feature space for the 1st feature")
ax.set_ylabel("Feature space for the 2nd feature")
plt.suptitle(("Clusters plot for EM clustering on sample data "
"with n_clusters = %d" % n_clusters),
fontsize=14, fontweight='bold')
plt.show()
#################################################
#PCA feature transformation
pca = PCA(n_components=11, random_state=10)
X_r = pca.fit(X).transform(X)
X_pca = X_r
print('explained variance ratio (first two components): %s'
% str(pca.explained_variance_ratio_))
plt.figure()
colors = ["b","g","r","c","m","y","k"]
lw = 2
for color, i in zip(colors, [4,8]):
plt.scatter(X_r[y == i, 0], X_r[y == i, 1], color=color, alpha=.8, lw=lw, label=i)
plt.legend(loc='best', shadow=False, scatterpoints=1)
plt.title('PCA of Wine Quality dataset')
#################################################
#ICA feature transformation
ica = FastICA(n_components=11, random_state=10)
X_r = ica.fit(X).transform(X)
X_ica = X_r
plt.figure()
colors = ["b","g","r","c","m","y","k"]
lw = 2
for color, i in zip(colors, [4,8]):
plt.scatter(X_r[y == i, 0], X_r[y == i, 1], color=color, alpha=.8, lw=lw, label=i)
plt.legend(loc='best', shadow=False, scatterpoints=1)
plt.title('ICA of Wine Quality dataset')
#################################################
#Random Projection feature transformation
rca = GaussianRandomProjection(n_components=11, random_state=10)
X_r = rca.fit_transform(X)
X_rca = X_r
plt.figure()
colors = ["b","g","r","c","m","y","k"]
lw = 2
for color, i in zip(colors, [4,8]):
plt.scatter(X_r[y == i, 0], X_r[y == i, 1], color=color, alpha=.8, lw=lw, label=i)
plt.legend(loc='best', shadow=False, scatterpoints=1)
plt.title('Random Projection of Wine Quality dataset')
#################################################
#Univariate feature selection (K best)
from sklearn.feature_selection import chi2
from sklearn.feature_selection import mutual_info_classif
X_new = SelectKBest(chi2, k=5).fit_transform(X, y)
X_fs = X_new
plt.figure()
colors = ["b","g","r","c","m","y","k"]
lw = 2
for color, i in zip(colors, [4,8]):
plt.scatter(X_new[y == i, 4], X_new[y == i, 0], color=color, alpha=.8, lw=lw, label=i)
plt.legend(loc='best', shadow=False, scatterpoints=1)
plt.title('Chi square feature selection of Wine Quality dataset')
plt.show()
#################################################
#Rerun clustering on transformed features
# range_n_clusters = [2,4,6,8,10]
# X_test=pd.DataFrame(X_pca)
# n_clusters = 6
# # for n_clusters in range_n_clusters:
# fig = plt.gcf()
# fig.set_size_inches(7, 7)
# ax = fig.add_subplot(111)
# clusterer = KMeans(n_clusters=n_clusters, random_state=10).fit(X_test)
# cluster_labels = clusterer.labels_
# silhouette_avg = silhouette_score(X_test, cluster_labels)
# print("For n_clusters =", n_clusters,
# "The average silhouette_score is :", silhouette_avg)
# print("The NMI score is: %.6f" % normalized_mutual_info_score(y, cluster_labels))
# cmap = cm.get_cmap("Spectral")
# colors = cmap(cluster_labels.astype(float) / n_clusters)
# ax.scatter( X_test.iloc[:, 0], X_test.iloc[:, 1], marker='.', s=30, lw=0, alpha=0.7,
# c=colors, edgecolor='k')
# centers = clusterer.cluster_centers_
# ax.scatter(centers[:, 0], centers[:, 1], marker='o',
# c="white", alpha=1, s=200, edgecolor='k')
# for i, c in enumerate(centers):
# ax.scatter( c[0], c[1], marker='$%d$' % i, alpha=1,
# s=50, edgecolor='k')
# ax.set_title("The visualization of the clustered data.")
# ax.set_xlabel("Feature space for the 1st feature")
# ax.set_ylabel("Feature space for the 2nd feature")
# plt.suptitle(("KMeans clustering using PCA feature transformation "
# "with n_clusters = %d" % n_clusters),
# fontsize=14, fontweight='bold')
# plt.show()
#################################################################
# n_clusters = 6
# X_test=pd.DataFrame(X_ica)
# # for n_clusters in range_n_clusters:
# fig = plt.gcf()
# fig.set_size_inches(7, 7)
# ax = fig.add_subplot(111)
# clusterer = KMeans(n_clusters=n_clusters, random_state=10).fit(X_test)
# cluster_labels = clusterer.labels_
# silhouette_avg = silhouette_score(X_test, cluster_labels)
# print("For n_clusters =", n_clusters,
# "The average silhouette_score is :", silhouette_avg)
# print("The NMI score is: %.6f" % normalized_mutual_info_score(y, cluster_labels))
# cmap = cm.get_cmap("Spectral")
# colors = cmap(cluster_labels.astype(float) / n_clusters)
# ax.scatter( X_test.iloc[:, 0], X_test.iloc[:, 1], marker='.', s=30, lw=0, alpha=0.7,
# c=colors, edgecolor='k')
# centers = clusterer.cluster_centers_
# ax.scatter(centers[:, 0], centers[:, 1], marker='o',
# c="white", alpha=1, s=200, edgecolor='k')
# for i, c in enumerate(centers):
# ax.scatter( c[0], c[1], marker='$%d$' % i, alpha=1,
# s=50, edgecolor='k')
# ax.set_title("The visualization of the clustered data.")
# ax.set_xlabel("Feature space for the 1st feature")
# ax.set_ylabel("Feature space for the 2nd feature")
# plt.suptitle(("KMeans clustering using ICA feature transformation "
# "with n_clusters = %d" % n_clusters),
# fontsize=14, fontweight='bold')
# plt.show()
# # ###################################################################
# n_clusters = 6
# X_test=pd.DataFrame(X_fs)
# # for n_clusters in range_n_clusters:
# fig = plt.gcf()
# fig.set_size_inches(7, 7)
# ax = fig.add_subplot(111)
# clusterer = KMeans(n_clusters=n_clusters, random_state=10).fit(X_test)
# cluster_labels = clusterer.labels_
# silhouette_avg = silhouette_score(X_test, cluster_labels)
# print("For n_clusters =", n_clusters,
# "The average silhouette_score is :", silhouette_avg)
# print("The NMI score is: %.6f" % normalized_mutual_info_score(y, cluster_labels))
# cmap = cm.get_cmap("Spectral")
# colors = cmap(cluster_labels.astype(float) / n_clusters)
# ax.scatter( X_test.iloc[:, 0], X_test.iloc[:, 1], marker='.', s=30, lw=0, alpha=0.7,
# c=colors, edgecolor='k')
# centers = clusterer.cluster_centers_
# ax.scatter(centers[:, 0], centers[:, 1], marker='o',
# c="white", alpha=1, s=200, edgecolor='k')
# for i, c in enumerate(centers):
# ax.scatter( c[0], c[1], marker='$%d$' % i, alpha=1,
# s=50, edgecolor='k')
# ax.set_title("The visualization of the clustered data.")
# ax.set_xlabel("Feature space for the 1st feature")
# ax.set_ylabel("Feature space for the 2nd feature")
# plt.suptitle(("KMeans clustering using feature selection transformation "
# "with n_clusters = %d" % n_clusters),
# fontsize=14, fontweight='bold')
# plt.show()
# # ###################################################################
# n_clusters = 6
# X_test=pd.DataFrame(X_rca)
# # for n_clusters in range_n_clusters:
# fig = plt.gcf()
# fig.set_size_inches(7, 7)
# ax = fig.add_subplot(111)
# clusterer = KMeans(n_clusters=n_clusters, random_state=10).fit(X_test)
# cluster_labels = clusterer.labels_
# silhouette_avg = silhouette_score(X_test, cluster_labels)
# print("For n_clusters =", n_clusters,
# "The average silhouette_score is :", silhouette_avg)
# print("The NMI score is: %.6f" % normalized_mutual_info_score(y, cluster_labels))
# cmap = cm.get_cmap("Spectral")
# colors = cmap(cluster_labels.astype(float) / n_clusters)
# ax.scatter( X_test.iloc[:, 0], X_test.iloc[:, 1], marker='.', s=30, lw=0, alpha=0.7,
# c=colors, edgecolor='k')
# centers = clusterer.cluster_centers_
# ax.scatter(centers[:, 0], centers[:, 1], marker='o',
# c="white", alpha=1, s=200, edgecolor='k')
# for i, c in enumerate(centers):
# ax.scatter( c[0], c[1], marker='$%d$' % i, alpha=1,
# s=50, edgecolor='k')
# ax.set_title("The visualization of the clustered data.")
# ax.set_xlabel("Feature space for the 1st feature")
# ax.set_ylabel("Feature space for the 2nd feature")
# plt.suptitle(("KMeans clustering using RCA transformation "
# "with n_clusters = %d" % n_clusters),
# fontsize=14, fontweight='bold')
# plt.show()
###################################################################
n_clusters = 6
X_test=pd.DataFrame(X_rca)
# for n_clusters in range_n_clusters:
fig = plt.gcf()
fig.set_size_inches(7, 7)
ax = fig.add_subplot(111)
clusterer = GaussianMixture(n_components=n_clusters, random_state=10).fit(X_test)
cluster_labels = clusterer.predict(X_test)
print("RCA NMI score: %.6f" % normalized_mutual_info_score(y, cluster_labels))
cmap = cm.get_cmap("Spectral")
colors = cmap(cluster_labels.astype(float) / n_clusters)
plt.scatter( X_test.iloc[:, 0], X_test.iloc[:, 1], marker='.', s=30, lw=0, alpha=0.7,
c=colors, edgecolor='k')
centers = clusterer.means_
plt.scatter(centers[:, 0], centers[:, 1], marker='o',
c="white", alpha=1, s=200, edgecolor='k')
for i, c in enumerate(centers):
ax.scatter( c[0], c[1], marker='$%d$' % i, alpha=1,
s=50, edgecolor='k')
ax.set_title("The visualization of the clustered data.")
ax.set_xlabel("Feature space for the 1st feature")
ax.set_ylabel("Feature space for the 2nd feature")
plt.suptitle(("Clusters plot for EM clustering on RCA data "
"with n_clusters = %d" % n_clusters), fontsize=14, fontweight='bold')
plt.show()
##################################################################
n_clusters = 6
X_test=pd.DataFrame(X_ica)
fig = plt.gcf()
fig.set_size_inches(7, 7)
ax = fig.add_subplot(111)
clusterer = GaussianMixture(n_components=n_clusters, random_state=10).fit(X_test)
cluster_labels = clusterer.predict(X_test)
print("ICA NMI score: %.6f" % normalized_mutual_info_score(y, cluster_labels))
cmap = cm.get_cmap("Spectral")
colors = cmap(cluster_labels.astype(float) / n_clusters)
plt.scatter( X_test.iloc[:, 0], X_test.iloc[:, 1], marker='.', s=30, lw=0, alpha=0.7,
c=colors, edgecolor='k')
centers = clusterer.means_
plt.scatter(centers[:, 0], centers[:, 1], marker='o',
c="white", alpha=1, s=200, edgecolor='k')
for i, c in enumerate(centers):
ax.scatter( c[0], c[1], marker='$%d$' % i, alpha=1,
s=50, edgecolor='k')
ax.set_title("The visualization of the clustered data.")
ax.set_xlabel("Feature space for the 1st feature")
ax.set_ylabel("Feature space for the 2nd feature")
plt.suptitle(("Clusters plot for EM clustering on ICA data "
"with n_clusters = %d" % n_clusters), fontsize=14, fontweight='bold')
plt.show()
##################################################################
n_clusters = 6
X_test=pd.DataFrame(X_fs)
# for n_clusters in range_n_clusters:
fig = plt.gcf()
fig.set_size_inches(7, 7)
ax = fig.add_subplot(111)
clusterer = GaussianMixture(n_components=n_clusters, random_state=10).fit(X_test)
cluster_labels = clusterer.predict(X_test)
print("FS NMI score: %.6f" % normalized_mutual_info_score(y, cluster_labels))
cmap = cm.get_cmap("Spectral")
colors = cmap(cluster_labels.astype(float) / n_clusters)
plt.scatter( X_test.iloc[:, 0], X_test.iloc[:, 1], marker='.', s=30, lw=0, alpha=0.7,
c=colors, edgecolor='k')
centers = clusterer.means_
plt.scatter(centers[:, 0], centers[:, 1], marker='o',
c="white", alpha=1, s=200, edgecolor='k')
for i, c in enumerate(centers):
ax.scatter( c[0], c[1], marker='$%d$' % i, alpha=1,
s=50, edgecolor='k')
ax.set_title("The visualization of the clustered data.")
ax.set_xlabel("Feature space for the 1st feature")
ax.set_ylabel("Feature space for the 2nd feature")
plt.suptitle(("Clusters plot for EM clustering on feature selection data "
"with n_clusters = %d" % n_clusters), fontsize=14, fontweight='bold')
plt.show()
#####################################################
n_clusters = 6
X_test=pd.DataFrame(X_pca)
# for n_clusters in range_n_clusters:
fig = plt.gcf()
fig.set_size_inches(7, 7)
ax = fig.add_subplot(111)
clusterer = GaussianMixture(n_components=n_clusters, random_state=10).fit(X_test)
cluster_labels = clusterer.predict(X_test)
print("PCA NMI score: %.6f" % normalized_mutual_info_score(y, cluster_labels))
cmap = cm.get_cmap("Spectral")
colors = cmap(cluster_labels.astype(float) / n_clusters)
plt.scatter( X_test.iloc[:, 0], X_test.iloc[:, 1], marker='.', s=30, lw=0, alpha=0.7,
c=colors, edgecolor='k')
centers = clusterer.means_
plt.scatter(centers[:, 0], centers[:, 1], marker='o',
c="white", alpha=1, s=200, edgecolor='k')
for i, c in enumerate(centers):
ax.scatter( c[0], c[1], marker='$%d$' % i, alpha=1,
s=50, edgecolor='k')
ax.set_title("The visualization of the clustered data.")
ax.set_xlabel("Feature space for the 1st feature")
ax.set_ylabel("Feature space for the 2nd feature")
plt.suptitle(("Clusters plot for EM clustering on PCA data "
"with n_clusters = %d" % n_clusters), fontsize=14, fontweight='bold')
plt.show()
#################################################
#Rerun ANN on transformed features
from sklearn.neural_network import MLPClassifier
from sklearn.model_selection import learning_curve
def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None,
n_jobs=1, train_sizes=np.linspace(.1, 1.0, 5)):
plt.figure()
plt.title(title)
if ylim is not None:
plt.ylim(*ylim)
plt.xlabel("Training examples")
plt.ylabel("Score")
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.grid()
plt.fill_between(train_sizes, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.1,
color="r")
plt.fill_between(train_sizes, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.1, color="g")
plt.plot(train_sizes, train_scores_mean, 'o-', color="r",
label="Training score")
plt.plot(train_sizes, test_scores_mean, 'o-', color="g",
label="Cross-validation score")
plt.legend(loc="best")
plt.show()
clf = MLPClassifier(hidden_layer_sizes=(20, 5), random_state=0, solver="lbfgs")
plot_learning_curve(clf, "MLP using PCA transformed features", X_pca, y, ylim=[0,1])
plot_learning_curve(clf, "MLP using ICA transformed features", X_ica, y, ylim=[0,1])
plot_learning_curve(clf, "MLP using RCA transformed features", X_rca, y, ylim=[0,1])
plot_learning_curve(clf, "MLP using Selected 5 features", X_fs, y, ylim=[0,1])
#################################################
#Rerun ANN on transformed features with clusters new feature
clf = MLPClassifier(hidden_layer_sizes=(20, 5), random_state=0, solver="lbfgs")
clusterer = KMeans(n_clusters=10, random_state=10).fit(X_pca)
y_kmeans = clusterer.labels_
X_df = pd.DataFrame(X_pca)
X_df[11] = y_kmeans
plot_learning_curve(clf, "MLP using PCA transformed features", X_df, y, ylim=[0,1])
clusterer = KMeans(n_clusters=10, random_state=10).fit(X_ica)
y_kmeans = clusterer.labels_
X_df = pd.DataFrame(X_ica)
X_df[11] = y_kmeans
plot_learning_curve(clf, "MLP using ICA transformed features", X_df, y, ylim=[0,1])
clusterer = KMeans(n_clusters=10, random_state=10).fit(X_rca)
y_kmeans = clusterer.labels_
X_df = pd.DataFrame(X_rca)
X_df[11] = y_kmeans
plot_learning_curve(clf, "MLP using RCA transformed features", X_df, y, ylim=[0,1])
clusterer = KMeans(n_clusters=10, random_state=10).fit(X_fs)
y_kmeans = clusterer.labels_
X_df = pd.DataFrame(X_fs)
X_df[11] = y_kmeans
plot_learning_curve(clf, "MLP using selected 5 features", X_df, y, ylim=[0,1])
#################################################
# #Data set 2: Gene expression data set
# from sklearn.preprocessing import quantile_transform
# data = pd.read_csv('sle_data.csv')
# X = data.iloc[:, 1:5090]
# y = np.append(np.repeat("HC",34), np.repeat("Disease",42))
# features = list(X.columns.values)
# scaler = MinMaxScaler(feature_range=[0,100])
# scaler.fit(X)
# X_norm = pd.DataFrame(quantile_transform(X))
# #################################################
# #Clustering, K means and EM
# range_n_clusters = list(range(1,20))
# sse = []
# nmi = []
# for n_clusters in range_n_clusters:
# clusterer = KMeans(n_clusters=n_clusters, random_state=0).fit(X)
# cluster_labels = clusterer.labels_
# sse.append(clusterer.inertia_)
# nmi.append(normalized_mutual_info_score(y, cluster_labels))
# plt.plot(range_n_clusters, sse, 'bx-')
# plt.xlabel('k')
# plt.ylabel('Sum of Squared Errors')
# plt.title('The Elbow Method showing the optimal k')
# plt.show()
# plt.plot(range_n_clusters, nmi, 'bx-')
# plt.xlabel('k')
# plt.ylabel('Normalized Mutual Information')
# plt.title('The NMI metric showing the optimal k')
# plt.show()
# range_n_clusters = list(range(1,6))
# nmi = []
# for n_clusters in range_n_clusters:
# clusterer = GaussianMixture(n_components=n_clusters, random_state=0).fit(X)
# cluster_labels = clusterer.predict(X)
# nmi.append(normalized_mutual_info_score(y, cluster_labels))
# plt.plot(range_n_clusters, nmi, 'bx-')
# plt.xlabel('N components')
# plt.ylabel('Normalized Mutual Information')
# plt.title('The NMI metric showing EM clustering')
# plt.show()
# n_clusters=3
# clusterer = GaussianMixture(n_components=n_clusters, random_state=10).fit(X)
# cluster_labels = clusterer.predict(X)
# print("NMI score: %.6f" % normalized_mutual_info_score(y, cluster_labels))
# # 2nd Plot showing the actual clusters formed
# cmap = cm.get_cmap("Spectral")
# colors = cmap(y.astype(float) / n_clusters)
# plt.scatter( X.iloc[:, 3], X.iloc[:, 7], marker='.', s=90, lw=0, alpha=0.7,
# c=colors, edgecolor='k')
# # Labeling the clusters
# centers = clusterer.means_
# # Draw white circles at cluster centers
# #plt.scatter(centers[:, 3], centers[:, 7], marker='o',
# # c="white", alpha=1, s=200, edgecolor='k')
# for i, c in enumerate(centers):
# ax.scatter( c[3], c[7], marker='$%d$' % i, alpha=1,
# s=50, edgecolor='k')
# ax.set_title("The visualization of the clustered data.")
# ax.set_xlabel("Feature space for the 1st feature")
# ax.set_ylabel("Feature space for the 2nd feature")
# plt.suptitle(("EM clustering on raw sample data "
# "with n_clusters = %d" % n_clusters),
# fontsize=14, fontweight='bold')
# plt.show()
# #################################################
# #PCA Feature transformation
# pca = PCA(n_components=10, random_state=10)
# X_r = pca.fit(X).transform(X)
# X_pca = X_r
# print('explained variance ratio (first two components): %s'
# % str(pca.explained_variance_ratio_))
# plt.figure()
# colors = ["b","g","r","c","m","y","k"]
# lw = 2
# for color, i in zip(colors, ["HC","Disease"]):
# plt.scatter(X_r[y == i, 0], X_r[y == i, 1], color=color, alpha=.8, lw=lw, label=i)
# plt.legend(loc='best', shadow=False, scatterpoints=1)
# plt.title('PCA of Disease/Health data set')
# #################################################
# #ICA Feature transformation
# ica = FastICA(n_components=10, random_state=10)
# X_r = ica.fit(X).transform(X)
# X_ica = X_r
# plt.figure()
# colors = ["b","g","r","c","m","y","k"]
# lw = 2
# for color, i in zip(colors, ["HC","Disease"]):
# plt.scatter(X_r[y == i, 0], X_r[y == i, 1], color=color, alpha=.8, lw=lw, label=i)
# plt.legend(loc='best', shadow=False, scatterpoints=1)
# plt.title('ICA of Disease/Health data set')
# #################################################
# #Random Projection feature transformation
# rca = GaussianRandomProjection(n_components=10, random_state=10)
# X_r = rca.fit_transform(X)
# X_rca = X_r
# plt.figure()
# colors = ["b","g","r","c","m","y","k"]
# lw = 2
# for color, i in zip(colors, ["HC","Disease"]):
# plt.scatter(X_r[y == i, 0], X_r[y == i, 1], color=color, alpha=.8, lw=lw, label=i)
# plt.legend(loc='best', shadow=False, scatterpoints=1)
# plt.title('Random Projection of Disease/Health data set')
# #################################################
# #Univariate feature selection (K best)
# X_new = SelectKBest(chi2, k=10).fit_transform(X, y)
# X_fs = X_new
# plt.figure()
# colors = ["b","g","r","c","m","y","k"]
# lw = 2
# for color, i in zip(colors, ["HC","Disease"]):
# plt.scatter(X_new[y == i, 1], X_new[y == i, 0], color=color, alpha=.8, lw=lw, label=i)
# plt.legend(loc='best', shadow=False, scatterpoints=1)
# plt.title('Chi square feature selection of Disease/Health data set')
# #################################################
# #Rerun clustering on transformed features
# range_n_clusters = [2,3,4,5,6]
# X_test=pd.DataFrame(X_fs)
# for n_clusters in range_n_clusters:
# fig = plt.gcf()
# fig.set_size_inches(7, 7)
# ax = fig.add_subplot(111)
# clusterer = KMeans(n_clusters=n_clusters, random_state=10).fit(X_test)
# cluster_labels = clusterer.labels_
# silhouette_avg = silhouette_score(X_test, cluster_labels)
# print("For n_clusters =", n_clusters,
# "The average silhouette_score is :", silhouette_avg)
# print("The NMI score is: %.6f" % normalized_mutual_info_score(y, cluster_labels))
# cmap = cm.get_cmap("Spectral")
# colors = cmap(cluster_labels.astype(float) / n_clusters)
# ax.scatter( X_test.iloc[:, 0], X_test.iloc[:, 1], marker='.', s=200, lw=0, alpha=0.7,
# c=colors, edgecolor='k')
# centers = clusterer.cluster_centers_
# ax.scatter(centers[:, 0], centers[:, 1], marker='o',
# c="white", alpha=1, s=200, edgecolor='k')
# for i, c in enumerate(centers):
# ax.scatter( c[0], c[1], marker='$%d$' % i, alpha=1,
# s=50, edgecolor='k')
# ax.set_title("The visualization of the clustered data.")
# ax.set_xlabel("Feature space for the 1st feature")
# ax.set_ylabel("Feature space for the 2nd feature")
# plt.suptitle(("KMeans clustering using Selected 10 genes "
# "with n_clusters = %d" % n_clusters),
# fontsize=14, fontweight='bold')
# plt.show()
# X_test=pd.DataFrame(X_fs)
# for n_clusters in range_n_clusters:
# fig = plt.gcf()
# fig.set_size_inches(7, 7)
# ax = fig.add_subplot(111)
# clusterer = GaussianMixture(n_components=n_clusters, random_state=10).fit(X_test)
# cluster_labels = clusterer.predict(X_test)
# print("NMI score: %.6f" % normalized_mutual_info_score(y, cluster_labels))
# cmap = cm.get_cmap("Spectral")
# colors = cmap(cluster_labels.astype(float) / n_clusters)
# plt.scatter( X_test.iloc[:, 0], X_test.iloc[:, 1], marker='.', s=30, lw=0, alpha=0.7,
# c=colors, edgecolor='k')
# centers = clusterer.means_
# plt.scatter(centers[:, 0], centers[:, 1], marker='o',
# c="white", alpha=1, s=200, edgecolor='k')
# for i, c in enumerate(centers):
# ax.scatter( c[0], c[1], marker='$%d$' % i, alpha=1,
# s=50, edgecolor='k')
# ax.set_title("The visualization of the clustered data.")
# ax.set_xlabel("Feature space for the 1st feature")
# ax.set_ylabel("Feature space for the 2nd feature")
# plt.suptitle(("Clusters plot for EM clustering on PCA data "
# "with n_clusters = %d" % n_clusters), fontsize=14, fontweight='bold')
# plt.show()
# #################################################
# #Rerun ANN on transformed features
# clf = MLPClassifier(hidden_layer_sizes=(20, 5), random_state=0, solver="lbfgs")
# plot_learning_curve(clf, "MLP using FS transformed expression", X_fs, y, ylim=[0,1])
# clf = MLPClassifier(hidden_layer_sizes=(20, 5), random_state=0, solver="lbfgs")
# clusterer = KMeans(n_clusters=6, random_state=10).fit(X_pca)
# y_kmeans = clusterer.labels_
# X_df = pd.DataFrame(X_pca)
# X_df[11] = y_kmeans
# plot_learning_curve(clf, "MLP using PCA transformed features", X_df, y, ylim=[0,1])
|
[
"noreply@github.com"
] |
Shally1130.noreply@github.com
|
cd32d9d2c2d3031d15b301e0fbba6be7e552c401
|
b985f1abc806f7cf4962374140668aa65e330a71
|
/pages/transition.py
|
58dc84bb39840ae7d0e60944c2f2fa2623a51c35
|
[] |
no_license
|
Decentorage/User-Node
|
2665862706130c1bc14f2a7248a3ed0c0603088a
|
30afa71ac68a4bea73d28796186be390d00dd8c5
|
refs/heads/main
| 2023-06-26T11:21:00.136614
| 2021-07-27T13:44:13
| 2021-07-27T13:44:13
| 357,921,279
| 0
| 0
| null | 2021-07-27T13:44:14
| 2021-04-14T13:48:13
|
Python
|
UTF-8
|
Python
| false
| false
| 407
|
py
|
from PyQt5 import QtCore, QtWidgets
class Transition(QtWidgets.QWidget):
# Signals
okay_switch = QtCore.pyqtSignal()
def __init__(self, ui, helper):
QtWidgets.QWidget.__init__(self)
self.ui = ui
self.helper = helper
# Connectors
self.ui.transition_okay_pb.clicked.connect(self.okay_pressed)
def okay_pressed(self):
self.okay_switch.emit()
|
[
"amr.ahmed.abdelbaqi@gmail.com"
] |
amr.ahmed.abdelbaqi@gmail.com
|
fcd886b1b6502bdaedb01d6a0154932c1d898228
|
7d7d8f79e8bae80a8c99240b158c6f3d2abbf94d
|
/election/migrations/0003_auto_20190319_2337.py
|
a6df03312c9f5673d2129268fbf5d3f008f101e9
|
[
"MIT"
] |
permissive
|
ecss-soton/ecssweb
|
feeb208a504bc80b9453ba306c51cae6da3718cd
|
06ddda86863ddb85e5da39a6f7b7fb29af902b16
|
refs/heads/master
| 2022-12-16T02:59:45.147472
| 2022-12-11T22:13:04
| 2022-12-11T22:13:04
| 133,257,221
| 4
| 3
|
MIT
| 2022-12-11T22:13:06
| 2018-05-13T16:58:48
|
HTML
|
UTF-8
|
Python
| false
| false
| 1,723
|
py
|
# Generated by Django 2.1.2 on 2019-03-19 23:37
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
dependencies = [
('election', '0002_auto_20190310_1218'),
]
operations = [
migrations.CreateModel(
name='Vote',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('time', models.DateTimeField(auto_now_add=True)),
('position', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='election.Position')),
],
),
migrations.CreateModel(
name='Voter',
fields=[
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('username', models.CharField(max_length=50)),
('position', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='election.Position')),
],
),
migrations.CreateModel(
name='VoteRecord',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('rank', models.IntegerField()),
('nomination', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='election.Nomination')),
('vote', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='election.Vote')),
],
),
migrations.AlterUniqueTogether(
name='voter',
unique_together={('username', 'position')},
),
]
|
[
"i@cjxol.com"
] |
i@cjxol.com
|
8e2d8002d1adb3ba31abe658807413d2afd3505e
|
6092d481d042ae9383454f29567be7f4d0847fd9
|
/3vm-demo/trex/v2.35/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_conn.py
|
e64ddbd9053a357deda37d89225bd8de38546e6a
|
[] |
no_license
|
ilsffun19/ovs-dpdk
|
02971cc31190eb70264499dbdd3fb20cb3fd1b8f
|
d760f1fd8f76513caa665a6dbec65ce1f0c1ecc7
|
refs/heads/master
| 2020-04-03T17:54:27.838112
| 2020-03-30T19:20:37
| 2020-03-30T19:20:37
| 155,463,472
| 2
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,679
|
py
|
from .trex_stl_types import *
from .trex_stl_jsonrpc_client import JsonRpcClient, BatchMessage, ErrNo as JsonRpcErrNo
from .trex_stl_async_client import CTRexAsyncClient
import time
import signal
import os
############################ RPC layer #############################
############################ #############################
############################ #############################
class CCommLink(object):
"""Describes the connectivity of the stateless client method"""
def __init__(self, server="localhost", port=5050, virtual=False, client = None):
self.server = server
self.port = port
self.rpc_link = JsonRpcClient(self.server, self.port, client)
# API handler provided by the server
self.api_h = None
def get_server (self):
return self.server
def get_port (self):
return self.port
def connect(self):
return self.rpc_link.connect()
def disconnect(self):
self.api_h = None
return self.rpc_link.disconnect()
def transmit(self, method_name, params = None, retry = 0):
return self.rpc_link.invoke_rpc_method(method_name, params, self.api_h, retry = retry)
def transmit_batch(self, batch_list, retry = 0):
batch = self.rpc_link.create_batch()
for command in batch_list:
batch.add(command.method, command.params, self.api_h)
# invoke the batch
return batch.invoke(retry = retry)
class Connection(object):
'''
Manages that connection to the server
connection state object
describes the connection to the server state
can be either fully disconnected, fully connected
or marked for disconnection
'''
DISCONNECTED = 1
CONNECTED = 2
MARK_FOR_DISCONNECT = 3
def __init__ (self, conn_info, logger, client):
self.conn_info = conn_info
self.logger = logger
self.sigint_on_conn_lost = False
# API classes
self.api_ver = {'name': 'STL', 'major': 4, 'minor': 1}
# low level RPC layer
self.rpc = CCommLink(self.conn_info['server'],
self.conn_info['sync_port'],
self.conn_info['virtual'],
client)
self.async = CTRexAsyncClient(self.conn_info['server'],
self.conn_info['async_port'],
client)
# save pointers
self.conn_info = conn_info
# init state
self.state = (self.DISCONNECTED, None)
def disconnect (self):
'''
disconnect from both channels
sync and async
'''
try:
self.rpc.disconnect()
self.async.disconnect()
finally:
self.state = (self.DISCONNECTED, None)
def connect (self):
'''
connect to the server (two channels)
'''
# first disconnect if already connected
if self.is_connected():
self.disconnect()
# connect
rc = self.__connect()
if not rc:
self.disconnect()
return rc
def barrier (self):
'''
executes a barrier
when it retruns, an async barrier is guaranteed
'''
return self.async.barrier()
def sync (self):
'''
fully sync the client with the server
must be called after all the config
was done
'''
return self.async.barrier(baseline = True)
def mark_for_disconnect (self, cause):
'''
A multithread safe call
any thread can mark the current connection
as not valid
and will require the main thread to reconnect
'''
# avoid any messages handling for the async thread
self.async.set_as_zombie()
# change state
self.state = (self.MARK_FOR_DISCONNECT, cause)
# if the flag is on, a SIGINT will be sent to the main thread
# causing the ZMQ RPC to stop what it's doing and report an error
if self.sigint_on_conn_lost:
os.kill(os.getpid(), signal.SIGINT)
def sigint_on_conn_lost_enable (self):
'''
when enabled, if connection
is lost a SIGINT will be sent
to the main thread
'''
self.sigint_on_conn_lost = True
def sigint_on_conn_lost_disable (self):
'''
disable SIGINT dispatching
on case of connection lost
'''
self.sigint_on_conn_lost = False
def is_alive (self):
'''
return True if any data has arrived
the server in the last 3 seconds
'''
return ( self.async.last_data_recv_ts is not None and ((time.time() - self.async.last_data_recv_ts) <= 3) )
def is_connected (self):
return (self.state[0] == self.CONNECTED)
def is_marked_for_disconnect (self):
return self.state[0] == self.MARK_FOR_DISCONNECT
def get_disconnection_cause (self):
return self.state[1]
########## private ################
def __connect (self):
'''
connect to the server (two channels)
'''
# start with the sync channel
self.logger.pre_cmd("Connecting to RPC server on {0}:{1}".format(self.conn_info['server'], self.conn_info['sync_port']))
rc = self.rpc.connect()
if not rc:
return rc
# API sync V2
rc = self.rpc.transmit("api_sync_v2", params = self.api_ver)
self.logger.post_cmd(rc)
if not rc:
# api_sync_v2 is not present in v2.30 and older
if rc.errno() == JsonRpcErrNo.MethodNotSupported:
return RC_ERR('Mismatch between client and server versions')
return rc
# get the API_H and provide it to the RPC channel from now on
self.rpc.api_h = rc.data()['api_h']
# connect async channel
self.logger.pre_cmd("Connecting to publisher server on {0}:{1}".format(self.conn_info['server'], self.conn_info['async_port']))
rc = self.async.connect()
self.logger.post_cmd(rc)
if not rc:
return rc
self.state = (self.CONNECTED, None)
return RC_OK()
|
[
"irene.liew@intel.com"
] |
irene.liew@intel.com
|
abe2253ea7350d7773d326c712e376a3a3925019
|
65388f96457bc2ed38fa48dea7c947a7aca7e396
|
/{{cookiecutter.project_name}}/app/apps/accounts/test_views.py
|
2f1441c3bb996de8bcd16d784f48f43ddba74de0
|
[] |
no_license
|
JTarball/cookiecutter-django-project
|
0478f1f4f6068e6e0e8c4e207d624df45523d82f
|
658f83a36b087e8d20a8d25bc97425245f4434a0
|
refs/heads/master
| 2020-12-26T08:54:56.688819
| 2016-12-10T22:17:39
| 2016-12-10T22:17:39
| 68,531,018
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 49,715
|
py
|
"""
accounts.test_views
===================
Tests the REST API calls.
Add more specific social registration tests
"""
import responses
import copy
from django.core.urlresolvers import reverse
from django.core import mail
from django.contrib.sites.models import Site
from django.contrib.auth import get_user_model
from django.test.utils import override_settings
from rest_framework import status
from rest_framework.test import APIClient, APITestCase
from allauth.account import app_settings
from allauth.socialaccount.models import SocialApp
from allauth.socialaccount.providers.facebook.provider import GRAPH_API_URL
from .serializers import LoginSerializer
from django.conf import settings
class TestRegistrations(APITestCase):
""" Tests Registration. """
def setUp(self):
self.login_url = reverse('accounts:rest_login')
self.logout_url = reverse('accounts:rest_logout')
self.register_url = reverse('accounts:rest_register')
self.password_reset_url = reverse('accounts:rest_password_reset')
self.rest_password_reset_confirm_url = reverse('accounts:rest_password_reset_confirm')
self.password_change_url = reverse('accounts:rest_password_change')
self.verify_url = reverse('accounts:rest_verify_email')
self.user_url = reverse('accounts:rest_user_details')
self.client = APIClient()
self.reusable_user_data = {'username': 'admin', 'email': 'admin@email.com', 'password': 'password12'}
self.reusable_user_data_change_password = {'username': 'admin', 'email': 'admin@email.com', 'password': 'password_same'}
self.reusable_register_user_data = {'username': 'admin', 'email': 'admin@email.com', 'password1': 'password12', 'password2': 'password12'}
self.reusable_register_user_data_different_email = {'username': 'admin', 'email': 'admin@email.com', 'password1': 'password12', 'password2': 'password12'}
self.reusable_register_user_data_different_username = {'username': 'admin1', 'email': 'admin@email.com', 'password1': 'password12', 'password2': 'password12'}
self.reusable_register_user_data_no_username = {'email': 'admin@email.com', 'password1': 'password12', 'password2': 'password12'}
self.reusable_register_user_data_no_email = {'username': 'admin', 'password1': 'password12', 'password2': 'password12'}
self.change_password_data_incorrect = {"new_password1": "password_not_same", "new_password2": "password_same"}
self.change_password_data = {"new_password1": "password_same", "new_password2": "password_same"}
self.change_password_data_old_password_field_enabled = {"old_password": "password12", "new_password1": "password_same", "new_password2": "password_same"}
def common_test_registration_basic(self, data):
response = self.client.post(self.register_url, data, format='json')
self.assertEquals(response.status_code, status.HTTP_201_CREATED, response.content)
return response
def common_test_registration_400(self, data):
response = self.client.post(self.register_url, data, format='json')
self.assertEquals(response.status_code, status.HTTP_400_BAD_REQUEST, response.content)
return response
def common_test_registration_email_verification_mandatory(self):
self.common_test_registration_basic(self.reusable_register_user_data1)
response = self.client.post(self.login_url, {'email': 'admin1@email.com', 'password': 'password12'}, format='json')
self.assertEquals(response.status_code, status.HTTP_200_OK)
# Normal Registration with Tests - From Here
# ===========================================
@override_settings(ACCOUNT_USERNAME_REQUIRED=True)
def test_registration_username_required(self):
""" Tests username is required during registration when ACCOUNT_USERNAME_REQUIRED is set. """
self.common_test_registration_400(self.reusable_register_user_data_no_username)
@override_settings(ACCOUNT_EMAIL_REQUIRED=True)
def test_registration_email_required(self):
""" Tests email is required during registration when ACCOUNT_EMAIL_REQUIRED is set. """
self.common_test_registration_400(self.reusable_register_user_data_no_email)
@override_settings(ACCOUNT_EMAIL_REQUIRED=True, ACCOUNT_USERNAME_REQUIRED=True)
def test_registration_email_and_username_required(self):
""" Tests email and username is required for registration. """
self.common_test_registration_basic(self.reusable_register_user_data)
@override_settings(ACCOUNT_EMAIL_REQUIRED=True, ACCOUNT_USERNAME_REQUIRED=False)
def test_registration_email_required_username_not_required(self):
""" Tests email is required even when username is not required for registration. """
self.common_test_registration_basic(self.reusable_register_user_data_no_username)
@override_settings(ACCOUNT_EMAIL_REQUIRED=False, ACCOUNT_USERNAME_REQUIRED=True)
def test_registration_username_required_email_not_required(self):
""" Tests username is required even when email is not required for registration. """
self.common_test_registration_basic(self.reusable_register_user_data_no_email)
@override_settings(ACCOUNT_EMAIL_VERIFICATION="none")
def test_registration_email_verification_not_necessary(self):
""" Tests email verification is not needed for logged in when ACCOUNT_EMAIL_VERIFICATION is set to none. """
self.common_test_registration_basic(self.reusable_register_user_data)
print settings.STATICFILES_STORAGE
response = self.client.post(self.login_url, self.reusable_user_data, format='json')
self.assertEquals(response.status_code, status.HTTP_200_OK)
@override_settings(ACCOUNT_EMAIL_VERIFICATION="optional")
def test_registration_email_verification_optional(self):
""" Tests email verification is not needed for logged in when ACCOUNT_EMAIL_VERIFICATION is set to optional. """
self.common_test_registration_basic(self.reusable_register_user_data)
response = self.client.post(self.login_url, self.reusable_user_data, format='json')
self.assertEquals(response.status_code, status.HTTP_200_OK)
@override_settings(ACCOUNT_EMAIL_VERIFICATION="mandatory")
def test_registration_email_verification_mandatory(self):
""" Tests email verification is needed for logged in when ACCOUNT_EMAIL_VERIFICATION is set to mandatory. """
self.common_test_registration_basic(self.reusable_register_user_data)
response = self.client.post(self.login_url, self.reusable_user_data, format='json')
self.assertEquals(response.status_code, status.HTTP_400_BAD_REQUEST)
@override_settings(ACCOUNT_UNIQUE_EMAIL=False)
def test_registration_email_doesnt_need_to_be_unique(self):
""" Tests registration doesnt need an unique email when ACCOUNT_UNIQUE_EMAIL is set. """
different_username = copy.deepcopy(self.reusable_register_user_data)
different_username['username'] = 'admin_different'
self.common_test_registration_basic(self.reusable_register_user_data)
self.common_test_registration_basic(different_username)
@override_settings(ACCOUNT_UNIQUE_EMAIL=True)
def test_registration_email_needs_to_be_unique(self):
""" Tests registration needs an unique email when ACCOUNT_UNIQUE_EMAIL is set. """
different_username = copy.deepcopy(self.reusable_register_user_data)
different_username['username'] = 'admin_different'
self.common_test_registration_basic(self.reusable_register_user_data)
response = self.common_test_registration_400(different_username)
self.assertEquals(response.content, '{"email":["A user is already registered with this e-mail address."]}')
@override_settings(ACCOUNTS_REGISTRATION_OPEN=False)
def test_registration_basic_registration_not_open(self):
""" Tests basic registration fails if registration is closed. """
response = self.client.post(self.register_url, self.reusable_register_user_data, format='json')
self.assertEquals(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEquals(response.content, '{"message":"Registration is current closed. Please try again soon."}')
# Normal Registration with Tests - what we normal want
# WARNING: If you change the settings these tests will fail
# this is to ensure we dont by accident change something
# =========================================================
# username or email login
# email verification
# require email and username when registering
# email doesnt have to be unique
# ACCOUNT_EMAIL_REQUIRED = True
# ACCOUNT_USERNAME_REQUIRED = True
# ACCOUNT_EMAIL_VERIFICATION = "mandatory"
# ACCOUNT_AUTHENTICATION_METHOD = "username"
# ACCOUNT_UNIQUE_EMAIL = False
# ACCOUNT_ADAPTER = "apps.accounts.adapter.DefaultAccountAdapter"
def test_registration_normal_use_username_required_when_registering(self):
""" Checks username is required when registering."""
response = self.common_test_registration_400(self.reusable_register_user_data_no_username)
self.assertEquals(response.content, '{"username":["This field is required."]}')
def test_registration_normal_use_email_required_when_registering(self):
""" Checks email is required when registering."""
response = self.common_test_registration_400(self.reusable_register_user_data_no_email)
self.assertEquals(response.content, '{"email":["This field is required."]}')
def test_registration_normal_use_email_doesnt_need_to_be_unique_when_registering(self):
""" Checks email is not required to be unique when registering."""
self.common_test_registration_basic(self.reusable_register_user_data)
self.common_test_registration_basic(self.reusable_register_user_data_different_username)
def common_registration_email_verification_neccessary_verified_login_post(self, login_data):
mail_count = len(mail.outbox)
reg_response = self.common_test_registration_basic(self.reusable_register_user_data)
self.assertEquals(len(mail.outbox), mail_count + 1)
new_user = get_user_model().objects.latest('id')
login_response = self.client.post(self.login_url, login_data, format='json')
self.assertEquals(login_response.status_code, status.HTTP_400_BAD_REQUEST)
# verify email
email_confirmation = new_user.emailaddress_set.get(email=self.reusable_register_user_data['email']).emailconfirmation_set.order_by('-created')[0]
verify_response = self.client.post(self.verify_url, {'key': email_confirmation.key}, format='json')
self.assertEquals(verify_response.status_code, status.HTTP_200_OK)
login_response = self.client.post(self.login_url, login_data, format='json')
self.assertEquals(login_response.status_code, status.HTTP_200_OK)
def common_registration_email_verification_neccessary_verified_login_get(self, login_data):
mail_count = len(mail.outbox)
reg_response = self.common_test_registration_basic(self.reusable_register_user_data)
self.assertEquals(len(mail.outbox), mail_count + 1)
new_user = get_user_model().objects.latest('id')
login_response = self.client.post(self.login_url, login_data, format='json')
self.assertEquals(login_response.status_code, status.HTTP_400_BAD_REQUEST)
# verify email
email_confirmation = new_user.emailaddress_set.get(email=self.reusable_register_user_data['email']).emailconfirmation_set.order_by('-created')[0]
verify_response = self.client.get(self.verify_url + '?key=' + email_confirmation.key, format='json')
self.assertEquals(verify_response.status_code, status.HTTP_200_OK)
login_response = self.client.post(self.login_url, login_data, format='json')
self.assertEquals(login_response.status_code, status.HTTP_200_OK)
def test_registration_email_verification_neccessary_verified_login_username(self):
""" Proper Registration Test - mandatory email verification needed + username login via post verify. """
self.common_registration_email_verification_neccessary_verified_login_post({'username': 'admin', 'password': 'password12'})
@override_settings(ACCOUNT_EMAIL_VERIFICATION="mandatory", ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod.USERNAME)
def test_registration_email_verification_neccessary_verified_login_username(self):
""" Proper Registration Test - mandatory email verification needed + username login via get verify. """
self.common_registration_email_verification_neccessary_verified_login_get({'username': 'admin', 'password': 'password12'})
class TestPasswordResets(APITestCase):
def setUp(self):
self.login_url = reverse('accounts:rest_login')
self.logout_url = reverse('accounts:rest_logout')
self.register_url = reverse('accounts:rest_register')
self.password_reset_url = reverse('accounts:rest_password_reset')
self.rest_password_reset_confirm_url = reverse('accounts:rest_password_reset_confirm')
self.password_change_url = reverse('accounts:rest_password_change')
self.verify_url = reverse('accounts:rest_verify_email')
self.user_url = reverse('accounts:rest_user_details')
self.client = APIClient()
self.reusable_user_data = {'username': 'admin', 'email': 'admin@email.com', 'password': 'password12'}
self.reusable_user_data_change_password = {'username': 'admin', 'email': 'admin@email.com', 'password': 'password_same'}
self.reusable_register_user_data = {'username': 'admin', 'email': 'admin@email.com', 'password1': 'password12', 'password2': 'password12'}
self.reusable_register_user_data1 = {'username': 'admin1', 'email': 'admin1@email.com', 'password1': 'password12', 'password2': 'password12'}
self.reusable_register_user_data_no_username = {'email': 'admin@email.com', 'password1': 'password12', 'password2': 'password12'}
self.reusable_register_user_data_no_email = {'username': 'admin', 'password1': 'password12', 'password2': 'password12'}
self.change_password_data_incorrect = {"new_password1": "password_not_same", "new_password2": "password_same"}
self.change_password_data = {"new_password1": "password_same", "new_password2": "password_same"}
self.change_password_data_old_password_field_enabled = {"old_password": "password12", "new_password1": "password_same", "new_password2": "password_same"}
def create_user_and_login(self):
""" Helper function to create a basic user, login and assign token credentials. """
get_user_model().objects.create_user('admin', 'admin@email.com', 'password12')
response = self.client.post(self.login_url, self.reusable_user_data, format='json')
self.assertEquals(response.status_code, status.HTTP_200_OK, "Snap! Basic Login has failed with a helper function 'create_user_and_login'. Something is really wrong here.")
self.client.credentials(HTTP_AUTHORIZATION='Token ' + response.data['key'])
def _generate_uid_and_token(self, user):
result = {}
from django.utils.encoding import force_bytes
from django.contrib.auth.tokens import default_token_generator
from django import VERSION
if VERSION[1] == 5:
from django.utils.http import int_to_base36
result['uid'] = int_to_base36(user.pk)
else:
from django.utils.http import urlsafe_base64_encode
result['uid'] = urlsafe_base64_encode(force_bytes(user.pk))
result['token'] = default_token_generator.make_token(user)
return result
"""
Password Reset Tests
====================
"""
def test_password_reset(self):
""" Test basic functionality of password reset. """
get_user_model().objects.create_user('admin', 'admin@email.com', 'password12')
payload = {'email': 'admin@email.com'}
response = self.client.post(self.password_reset_url, payload, format='json')
self.assertEquals(response.status_code, status.HTTP_200_OK)
self.assertEquals(response.content, '{"success":"Password reset e-mail has been sent."}')
@override_settings(ACCOUNTS_PASSWORD_RESET_NOTIFY_EMAIL_NOT_IN_SYSTEM=True)
def test_password_reset_fail_no_user_with_email_no_notify_not_in_system(self):
""" Test basic functionality of password reset fails when there is no email on record (notify email not in system). """
payload = {'email': 'admin@email.com'}
response = self.client.post(self.password_reset_url, payload, format='json')
self.assertEquals(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEquals(response.content, '{"error":"User with email doesn\'t exist. Did not send reset email."}')
@override_settings(ACCOUNTS_PASSWORD_RESET_NOTIFY_EMAIL_NOT_IN_SYSTEM=False)
def test_password_reset_no_user_with_email_no_notify_not_in_system(self):
""" Test basic functionality of password reset fails when there is no email on record. """
payload = {'email': 'admin@email.com'}
response = self.client.post(self.password_reset_url, payload, format='json')
self.assertEquals(response.status_code, status.HTTP_200_OK)
self.assertEquals(response.content, '{"success":"Password reset e-mail has been sent."}')
def test_password_reset_confirm_fail_invalid_token(self):
""" Test password reset confirm fails if token is invalid. """
user = get_user_model().objects.create_user('admin', 'admin@email.com', 'password12')
url_kwargs = self._generate_uid_and_token(user)
data = {
'new_password1': 'new_password',
'new_password2': 'new_password',
'uid': url_kwargs['uid'],
'token': '-wrong-token-'
}
response = self.client.post(self.rest_password_reset_confirm_url, data, format='json')
self.assertEquals(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEquals(response.content, '{"token":["Invalid value"]}')
def test_password_reset_confirm_fail_invalid_uid(self):
""" Test password reset confirm fails if uid is invalid. """
user = get_user_model().objects.create_user('admin', 'admin@email.com', 'password12')
url_kwargs = self._generate_uid_and_token(user)
data = {
'new_password1': 'new_password',
'new_password2': 'new_password',
'uid': 0,
'token': url_kwargs['token']
}
response = self.client.post(self.rest_password_reset_confirm_url, data, format='json')
self.assertEquals(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEquals(response.content, '{"uid":["Invalid value"]}')
def test_password_reset_confirm_fail_passwords_not_the_same(self):
""" Test password reset confirm fails if uid is invalid. """
user = get_user_model().objects.create_user('admin', 'admin@email.com', 'password12')
url_kwargs = self._generate_uid_and_token(user)
data = {
'new_password1': 'new_password',
'new_password2': 'new_not_the_same_password',
'uid': url_kwargs['uid'],
'token': url_kwargs['token']
}
response = self.client.post(self.rest_password_reset_confirm_url, data, format='json')
self.assertEquals(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEquals(response.content, '{"new_password2":["The two password fields didn\'t match."]}')
def test_password_reset_confirm_login(self):
""" Tests password reset confirm works -> can login afterwards. """
user = get_user_model().objects.create_user('admin', 'admin@email.com', 'password12')
url_kwargs = self._generate_uid_and_token(user)
data = {
'new_password1': 'new_password',
'new_password2': 'new_password',
'uid': url_kwargs['uid'],
'token': url_kwargs['token']
}
response = self.client.post(self.rest_password_reset_confirm_url, data, format='json')
self.assertEquals(response.status_code, status.HTTP_200_OK)
response = self.client.post(self.login_url, {'username': 'admin', 'email': 'admin@email.com', 'password': 'new_password'}, format='json')
self.assertEquals(response.status_code, status.HTTP_200_OK)
def test_password_reset_confirm_login_fails_with_old_password(self):
""" Tests password reset confirm fails with old password. """
user = get_user_model().objects.create_user('admin', 'admin@email.com', 'password12')
url_kwargs = self._generate_uid_and_token(user)
data = {
'new_password1': 'new_password',
'new_password2': 'new_password',
'uid': url_kwargs['uid'],
'token': url_kwargs['token']
}
response = self.client.post(self.rest_password_reset_confirm_url, data, format='json')
self.assertEquals(response.status_code, status.HTTP_200_OK)
response = self.client.post(self.login_url, {'username': 'admin', 'email': 'admin@email.com', 'password': 'password12'}, format='json')
self.assertEquals(response.status_code, status.HTTP_400_BAD_REQUEST)
class TestLogins(APITestCase):
@override_settings(ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod.EMAIL, ACCOUNT_EMAIL_VERIFICATION="none")
def test_registration_account_authentication_method_email(self):
""" Tests authentication is email works when AUTHENTICATION_AUTHENTICATION_METHOD is set to email. """
self.common_test_registration_basic(self.reusable_register_user_data)
response = self.client.post(self.login_url, {'email': 'admin@email.com', 'password': 'password12'}, format='json')
self.assertEquals(response.status_code, status.HTTP_200_OK)
@override_settings(ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod.EMAIL, ACCOUNT_EMAIL_VERIFICATION="none")
def test_registration_account_authentication_method_email_username_attempted(self):
""" Tests authentication is not username when AUTHENTICATION_AUTHENTICATION_METHOD is set to email. """
self.common_test_registration_basic(self.reusable_register_user_data)
response = self.client.post(self.login_url, {'username': 'admin', 'password': 'password12'}, format='json')
self.assertEquals(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEquals(response.content, '{"non_field_errors":["Must include \\"email\\" and \\"password\\"."]}')
@override_settings(ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod.USERNAME, ACCOUNT_EMAIL_VERIFICATION="none")
def test_registration_account_authentication_method_username(self):
""" Tests authentication is username when AUTHENTICATION_AUTHENTICATION_METHOD is set to username. """
self.common_test_registration_basic(self.reusable_register_user_data)
response = self.client.post(self.login_url, {'email': 'admin@email.com', 'password': 'password12'}, format='json')
self.assertEquals(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEquals(response.content, '{"non_field_errors":["Must include \\"username\\" and \\"password\\"."]}')
@override_settings(ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod.USERNAME_EMAIL, ACCOUNT_EMAIL_VERIFICATION="none")
def test_registration_account_authentication_method_username_email(self):
""" Tests authentication is username or email when AUTHENTICATION_AUTHENTICATION_METHOD is set to username or email. """
self.common_test_registration_basic(self.reusable_register_user_data)
response = self.client.post(self.login_url, {'email': 'admin@email.com', 'password': 'password12'}, format='json')
self.assertEquals(response.status_code, status.HTTP_200_OK)
response = self.client.post(self.logout_url, format='json')
self.assertEquals(response.status_code, status.HTTP_200_OK)
response = self.client.post(self.login_url, {'username': 'admin', 'password': 'password12'}, format='json')
self.assertEquals(response.status_code, status.HTTP_200_OK)
class TestAccounts(APITestCase):
""" Tests normal use - non social login. """
def setUp(self):
self.login_url = reverse('accounts:rest_login')
self.logout_url = reverse('accounts:rest_logout')
self.register_url = reverse('accounts:rest_register')
self.password_reset_url = reverse('accounts:rest_password_reset')
self.rest_password_reset_confirm_url = reverse('accounts:rest_password_reset_confirm')
self.password_change_url = reverse('accounts:rest_password_change')
self.verify_url = reverse('accounts:rest_verify_email')
self.user_url = reverse('accounts:rest_user_details')
self.client = APIClient()
self.reusable_user_data = {'username': 'admin', 'email': 'admin@email.com', 'password': 'password12'}
self.reusable_user_data_change_password = {'username': 'admin', 'email': 'admin@email.com', 'password': 'password_same'}
self.reusable_register_user_data = {'username': 'admin', 'email': 'admin@email.com', 'password1': 'password12', 'password2': 'password12'}
self.reusable_register_user_data1 = {'username': 'admin1', 'email': 'admin1@email.com', 'password1': 'password12', 'password2': 'password12'}
self.reusable_register_user_data_no_username = {'email': 'admin@email.com', 'password1': 'password12', 'password2': 'password12'}
self.reusable_register_user_data_no_email = {'username': 'admin', 'password1': 'password12', 'password2': 'password12'}
self.change_password_data_incorrect = {"new_password1": "password_not_same", "new_password2": "password_same"}
self.change_password_data = {"new_password1": "password_same", "new_password2": "password_same"}
self.change_password_data_old_password_field_enabled = {"old_password": "password12", "new_password1": "password_same", "new_password2": "password_same"}
def create_user_and_login(self):
""" Helper function to create a basic user, login and assign token credentials. """
get_user_model().objects.create_user('admin', 'admin@email.com', 'password12')
response = self.client.post(self.login_url, self.reusable_user_data, format='json')
self.assertEquals(response.status_code, status.HTTP_200_OK, "Snap! Basic Login has failed with a helper function 'create_user_and_login'. Something is really wrong here.")
self.client.credentials(HTTP_AUTHORIZATION='Token ' + response.data['key'])
def cleanUp(self):
pass
@override_settings(ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod.USERNAME)
def test_login_basic_username_auth_method(self):
""" Tests basic functionality of login with authentication method of username. """
# Assumes you provide username,password and returns a token
get_user_model().objects.create_user('admin3', '', 'password12')
data = {"username": 'admin3', "email": "", "password": 'password12'}
response = self.client.post(self.login_url, data, format='json')
self.assertEquals(response.status_code, status.HTTP_200_OK)
self.assertIn('key', response.content)
@override_settings(ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod.EMAIL,
ACCOUNT_EMAIL_REQUIRED=True)
def test_login_basic_email_auth_method(self):
""" Tests basic functionality of login with authentication method of email. """
# Assumes you provide username,password and returns a token
get_user_model().objects.create_user('admin', 'email.login@gmail.com', 'password12')
data = {"username": '', "email": "email.login@gmail.com", "password": 'password12'}
response = self.client.post(self.login_url, data, format='json')
self.assertEquals(response.status_code, status.HTTP_200_OK)
self.assertIn('key', response.content)
@override_settings(ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod.USERNAME_EMAIL)
def test_login_basic_username_email_auth_method(self):
""" Tests basic functionality of login with authentication method of username or email. """
# Assumes you provide username,password and returns a token
get_user_model().objects.create_user('admin', 'email.login@gmail.com', 'password12')
# Check email
data = {"username": '', "email": "email.login@gmail.com", "password": 'password12'}
response = self.client.post(self.login_url, data, format='json')
self.assertEquals(response.status_code, status.HTTP_200_OK)
# Check username
data = {"username": 'admin', "email": '', "password": 'password12'}
response = self.client.post(self.login_url, data, format='json')
self.assertEquals(response.status_code, status.HTTP_200_OK)
self.assertIn('key', response.content)
@override_settings(ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod.USERNAME)
def test_login_auth_method_username_fail_no_users_in_db(self):
""" Tests login fails with a 400 when no users in db for login auth method of 'username'. """
serializer = LoginSerializer({'username': 'admin', 'password': 'password12'})
response = self.client.post(self.login_url, serializer.data, format='json')
self.assertEquals(response.status_code, status.HTTP_400_BAD_REQUEST)
@override_settings(ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod.EMAIL)
def test_login_email_auth_method_fail_no_users_in_db(self):
""" Tests login fails with a 400 when no users in db for login auth method of 'email'. """
serializer = LoginSerializer({'username': 'admin', 'password': 'password12'})
response = self.client.post(self.login_url, serializer.data, format='json')
self.assertEquals(response.status_code, status.HTTP_400_BAD_REQUEST)
@override_settings(ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod.USERNAME_EMAIL)
def test_login_username_email_auth_method_fail_no_users_in_db(self):
""" Tests login fails with a 400 when no users in db for login auth method of 'username_email'. """
serializer = LoginSerializer({'username': 'admin', 'password': 'password12'})
response = self.client.post(self.login_url, serializer.data, format='json')
self.assertEquals(response.status_code, status.HTTP_400_BAD_REQUEST)
def common_test_login_fail_incorrect_change(self):
# Create user, login and try and change password INCORRECTLY
self.create_user_and_login()
self.client.post(self.password_change_url, data=self.change_password_data_incorrect, format='json')
# Remove credentials
self.client.credentials()
response = self.client.post(self.login_url, self.reusable_user_data, format='json')
self.assertEquals(response.status_code, status.HTTP_200_OK)
self.assertIn('key', response.content)
@override_settings(ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod.USERNAME)
def test_login_username_auth_method_fail_incorrect_password_change(self):
""" Tests login fails with an incorrect/invalid password change (login auth username). """
self.common_test_login_fail_incorrect_change()
@override_settings(ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod.EMAIL)
def test_login_email_auth_method_fail_incorrect_password_change(self):
""" Tests login fails with an incorrect/invalid password change (login auth email). """
self.common_test_login_fail_incorrect_change()
@override_settings(ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod.USERNAME_EMAIL)
def test_login_username_email_auth_method_fail_incorrect_password_change(self):
""" Tests login fails with an incorrect/invalid password change (login auth username_email). """
self.common_test_login_fail_incorrect_change()
def common_test_login_correct_password_change(self):
# Create user, login and try and change password successfully
self.create_user_and_login()
response = self.client.post(self.password_change_url, data=self.change_password_data, format='json')
self.assertEquals(response.status_code, status.HTTP_200_OK)
# Remove credentials
self.client.credentials()
response = self.client.post(self.login_url, self.reusable_user_data_change_password, format='json')
self.assertEquals(response.status_code, status.HTTP_200_OK)
self.assertIn('key', response.content)
@override_settings(ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod.USERNAME)
def test_login_username_auth_method_correct_password_change(self):
""" Tests login is succesful with a correct password change (login auth username). """
self.common_test_login_correct_password_change()
@override_settings(ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod.EMAIL)
def test_login_email_auth_method_correct_password_change(self):
""" Tests login is succesful with a correct password change (login auth email). """
self.common_test_login_correct_password_change()
@override_settings(ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod.USERNAME_EMAIL)
def test_login_username_email_auth_method_correct_password_change(self):
""" Tests login is succesful with a correct password change (login auth username_email). """
self.common_test_login_correct_password_change()
def test_login_fail_no_input(self):
""" Tests login fails when you provide no username and no email (login auth username_email). """
get_user_model().objects.create_user('admin', 'email.login@gmail.com', 'password12')
data = {"username": '', "email": '', "password": ''}
response = self.client.post(self.login_url, data, format='json')
self.assertEquals(response.status_code, status.HTTP_400_BAD_REQUEST)
@override_settings(ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod.USERNAME)
def test_login_username_auth_method_fail_no_input(self):
""" Tests login fails when you provide no username (login auth username). """
get_user_model().objects.create_user('admin', 'email.login@gmail.com', 'password12')
data = {"username": '', "email": "email.login@gmail.com", "password": 'password12'}
response = self.client.post(self.login_url, data, format='json')
self.assertEquals(response.status_code, status.HTTP_400_BAD_REQUEST)
@override_settings(ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod.EMAIL)
def test_login_email_auth_method_fail_no_input(self):
""" Tests login fails when you provide no username (login auth email). """
get_user_model().objects.create_user('admin', 'email.login@gmail.com', 'password12')
data = {"username": "admin", "email": '', "password": 'password12'}
response = self.client.post(self.login_url, data, format='json')
self.assertEquals(response.status_code, status.HTTP_400_BAD_REQUEST)
@override_settings(ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod.USERNAME_EMAIL)
def test_login_username_email_auth_method_fail_no_input(self):
""" Tests login fails when you provide no username and no email (login auth username_email). """
get_user_model().objects.create_user('admin', 'email.login@gmail.com', 'password12')
data = {"username": '', "email": '', "password": 'password12'}
response = self.client.post(self.login_url, data, format='json')
self.assertEquals(response.status_code, status.HTTP_400_BAD_REQUEST)
# need to check for token
# test login with password change
# test login with wrong password chaneg if fails
def test_logout(self):
""" Tests basic logout functionality. """
self.create_user_and_login()
response = self.client.post(self.logout_url, format='json')
self.assertEquals(response.status_code, status.HTTP_200_OK)
self.assertEquals(response.content, '{"success":"Successfully logged out."}')
def test_logout_but_already_logged_out(self):
""" Tests logout when already logged out. """
self.create_user_and_login()
response = self.client.post(self.logout_url, format='json')
self.assertEquals(response.status_code, status.HTTP_200_OK)
self.assertEquals(response.content, '{"success":"Successfully logged out."}')
self.client.credentials() # remember to remove manual token credential
response = self.client.post(self.logout_url, format='json')
self.assertEquals(response.status_code, status.HTTP_200_OK, response.content)
self.assertEquals(response.content, '{"success":"Successfully logged out."}')
def test_change_password_basic(self):
""" Tests basic functionality of 'change of password'. """
self.create_user_and_login()
response = self.client.post(self.password_change_url, data=self.change_password_data, format='json')
self.assertEquals(response.status_code, status.HTTP_200_OK)
self.assertEquals(response.content, '{"success":"New password has been saved."}')
def test_change_password_basic_fails_not_authorised(self):
""" Tests basic functionality of 'change of password' fails if not authorised. """
get_user_model().objects.create_user('admin', 'admin@email.com', 'password12')
response = self.client.post(self.password_change_url, data=self.change_password_data, format='json')
self.assertEquals(response.status_code, status.HTTP_401_UNAUTHORIZED)
self.assertEquals(response.content, '{"detail":"Authentication credentials were not provided."}')
def common_change_password_login_fail_with_old_password(self, password_change_data):
self.create_user_and_login()
response = self.client.post(self.password_change_url, data=password_change_data, format='json')
self.assertEquals(response.status_code, status.HTTP_200_OK)
self.client.credentials() # Remove credentials
response = self.client.post(self.login_url, self.reusable_user_data, format='json')
self.assertEquals(response.status_code, status.HTTP_400_BAD_REQUEST)
def common_change_password_login_pass_with_new_password(self, password_change_data):
self.create_user_and_login()
response = self.client.post(self.password_change_url, password_change_data, format='json')
self.assertEquals(response.status_code, status.HTTP_200_OK)
self.client.credentials() # Remove credentials
response = self.client.post(self.login_url, self.reusable_user_data_change_password, format='json')
self.assertEquals(response.status_code, status.HTTP_200_OK)
def common_change_password_login_fail_with_old_password_pass_with_new_password(self, password_change_data):
""" Tests change of password with old password fails but new password successes. """
self.create_user_and_login()
response = self.client.post(self.password_change_url, password_change_data, format='json')
self.assertEquals(response.status_code, status.HTTP_200_OK, response.content)
self.client.credentials() # Remove credentials
response = self.client.post(self.login_url, self.reusable_user_data, format='json')
self.assertEquals(response.status_code, status.HTTP_400_BAD_REQUEST)
response = self.client.post(self.login_url, self.reusable_user_data_change_password, format='json')
self.assertEquals(response.status_code, status.HTTP_200_OK, response.content)
def test_change_password_login_fail_with_old_password(self):
""" Tests change of password with old password. """
self.common_change_password_login_fail_with_old_password(self.change_password_data)
def test_change_password_login_pass_with_new_password(self):
""" Tests change of password with new password. """
self.common_change_password_login_pass_with_new_password(self.change_password_data)
def test_change_password_login_fail_with_old_password_pass_with_new_password(self):
""" Tests change of password with old password fails but new password successes. """
self.common_change_password_login_fail_with_old_password_pass_with_new_password(self.change_password_data)
@override_settings(OLD_PASSWORD_FIELD_ENABLED=True)
def test_change_password_old_password_field_required_old_password_field_enabled(self):
""" Tests basic functionality of 'change of password' fails if old password not given as part of input (old password field enabled). """
self.create_user_and_login()
response = self.client.post(self.password_change_url, data=self.change_password_data, format='json')
self.assertEquals(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEquals(response.content, '{"old_password":["This field is required."]}')
@override_settings(OLD_PASSWORD_FIELD_ENABLED=True)
def test_change_password_basic_old_password_field_enabled(self):
""" Tests basic functionality of 'change of password' (old password enabled). """
self.create_user_and_login()
response = self.client.post(self.password_change_url, data=self.change_password_data_old_password_field_enabled, format='json')
self.assertEquals(response.status_code, status.HTTP_200_OK)
self.assertEquals(response.content, '{"success":"New password has been saved."}')
@override_settings(OLD_PASSWORD_FIELD_ENABLED=True)
def test_change_password_basic_fails_not_authorised_old_password_field_enabled(self):
""" Tests basic functionality of 'change of password' fails if not authorised (old password field enabled). """
get_user_model().objects.create_user('admin', 'admin@email.com', 'password12')
response = self.client.post(self.password_change_url, data=self.change_password_data_old_password_field_enabled, format='json')
self.assertEquals(response.status_code, status.HTTP_401_UNAUTHORIZED)
self.assertEquals(response.content, '{"detail":"Authentication credentials were not provided."}')
@override_settings(OLD_PASSWORD_FIELD_ENABLED=True)
def test_change_password_login_fail_with_old_password_old_password_field_enabled(self):
""" Tests change of password with old password (old password field enabled). """
self.common_change_password_login_fail_with_old_password(self.change_password_data_old_password_field_enabled)
@override_settings(OLD_PASSWORD_FIELD_ENABLED=True)
def test_change_password_login_pass_with_new_password_old_password_field_enabled(self):
""" Tests change of password with new password (old password field enabled). """
self.common_change_password_login_pass_with_new_password(self.change_password_data_old_password_field_enabled)
@override_settings(OLD_PASSWORD_FIELD_ENABLED=True)
def test_change_password_login_fail_with_old_password_pass_with_new_password_old_password_field_enabled(self):
""" Tests change of password with old password fails but new password successes (old password field enabled). """
self.common_change_password_login_fail_with_old_password_pass_with_new_password(self.change_password_data_old_password_field_enabled)
class TestUserDetails(APITestCase):
"""
User Detail Tests
=================
"""
def test_user_details_get(self):
""" Test to retrieve user details. """
self.create_user_and_login()
response = self.client.get(self.user_url, format='json')
self.assertEquals(response.status_code, status.HTTP_200_OK)
self.assertEquals(response.content, '{"username":"admin","email":"admin@email.com","first_name":"","last_name":""}')
def test_user_details_put(self):
""" Test to put update user details. """
self.create_user_and_login()
response = self.client.put(self.user_url, {"username":"changed","email":"changed@email.com","first_name":"changed","last_name":"name"}, format='json')
self.assertEquals(response.status_code, status.HTTP_200_OK)
self.assertEquals(response.content, '{"username":"changed","email":"changed@email.com","first_name":"changed","last_name":"name"}')
def test_user_details_patch(self):
""" Test to patch update user details. """
self.create_user_and_login()
response = self.client.patch(self.user_url, {'username': 'changed_username', 'email': 'changed@email.com'}, format='json')
self.assertEquals(response.status_code, status.HTTP_200_OK)
self.assertEquals(response.content, '{"username":"changed_username","email":"changed@email.com","first_name":"","last_name":""}')
def test_user_details_put_not_authenticated(self):
""" Test to put update user details. """
get_user_model().objects.create_user('admin', 'admin@email.com', 'password12')
response = self.client.put(self.user_url, {"username":"changed","email":"changed@email.com","first_name":"changed","last_name":"name"}, format='json')
self.assertEquals(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_user_details_patch_not_authenticated(self):
""" Test to patch update user details. """
get_user_model().objects.create_user('admin', 'admin@email.com', 'password12')
response = self.client.patch(self.user_url, {'username': 'changed_username', 'email': 'changed@email.com'}, format='json')
self.assertEquals(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_user_details_get_not_authenticated(self):
""" Test to retrieve user details. """
get_user_model().objects.create_user('admin', 'admin@email.com', 'password12')
response = self.client.get(self.user_url, format='json')
self.assertEquals(response.status_code, status.HTTP_401_UNAUTHORIZED)
class TestAccountsSocial(APITestCase):
""" Tests normal for social login. """
urls = 'accounts.test_social_urls'
def setUp(self):
self.fb_login_url = reverse('fb_login')
social_app = SocialApp.objects.create(
provider='facebook',
name='Facebook',
client_id='123123123',
secret='321321321',
)
site = Site.objects.get_current()
social_app.sites.add(site)
self.graph_api_url = GRAPH_API_URL + '/me'
@responses.activate
def test_social_auth(self):
""" Tests Social Login. """
resp_body = '{"id":"123123123123","first_name":"John","gender":"male","last_name":"Smith","link":"https:\\/\\/www.facebook.com\\/john.smith","locale":"en_US","name":"John Smith","timezone":2,"updated_time":"2014-08-13T10:14:38+0000","username":"john.smith","verified":true}' # noqa
responses.add(
responses.GET,
self.graph_api_url,
body=resp_body,
status=200,
content_type='application/json'
)
users_count = get_user_model().objects.all().count()
response = self.client.post(self.fb_login_url, {'access_token': 'abc123'}, format='json')
self.assertEquals(response.status_code, status.HTTP_200_OK)
self.assertIn('key', response.data)
self.assertEqual(get_user_model().objects.all().count(), users_count + 1)
@responses.activate
def test_social_auth_only_one_user_created(self):
""" Tests Social Login. """
resp_body = '{"id":"123123123123","first_name":"John","gender":"male","last_name":"Smith","link":"https:\\/\\/www.facebook.com\\/john.smith","locale":"en_US","name":"John Smith","timezone":2,"updated_time":"2014-08-13T10:14:38+0000","username":"john.smith","verified":true}' # noqa
responses.add(
responses.GET,
self.graph_api_url,
body=resp_body,
status=200,
content_type='application/json'
)
users_count = get_user_model().objects.all().count()
response = self.client.post(self.fb_login_url, {'access_token': 'abc123'}, format='json')
self.assertEquals(response.status_code, status.HTTP_200_OK)
self.assertIn('key', response.data)
self.assertEqual(get_user_model().objects.all().count(), users_count + 1)
# make sure that second request will not create a new user
response = self.client.post(self.fb_login_url, {'access_token': 'abc123'}, format='json')
self.assertEquals(response.status_code, status.HTTP_200_OK)
self.assertIn('key', response.data)
self.assertEqual(get_user_model().objects.all().count(), users_count + 1)
@responses.activate
def test_failed_social_auth(self):
# fake response
responses.add(
responses.GET,
self.graph_api_url,
body='',
status=400,
content_type='application/json'
)
response = self.client.post(self.fb_login_url, {'access_token': 'abc123'}, format='json')
self.assertEquals(response.status_code, status.HTTP_400_BAD_REQUEST)
|
[
"james.tarball@gmail.com"
] |
james.tarball@gmail.com
|
1498b82da7b6fe2ae9a6854b6fcd8b22571bd599
|
9372026aec32fa10896225813986346e472f7a7c
|
/Algorithm/class49/stack_queue.py
|
18afc982243a82b1014b3d2e20e95d8dfe19d907
|
[] |
no_license
|
mjsong0712/learn_python
|
5809df5b0366b37836633f5fa5e2d96a9cb99798
|
1cc31ca750e76b436596e3a4f6b8f39d7b873624
|
refs/heads/master
| 2022-05-17T00:54:17.975679
| 2022-05-01T08:07:33
| 2022-05-01T08:07:33
| 235,795,862
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,005
|
py
|
import sys
class Stack:
def __init__(self):
self.L = [0 for i in range(100001)]
self.top = -1
def push(self, n):
self.L[self.top+1] = n
self.top += 1
def pop(self):
item = self.L[self.top]
self.top -= 1
return item
def isEmpty(self):
if self.top == -1:
return True
else:
return False
def Pmaker(n, P):
PM = []
S = Stack()
L = [i for i in range(1,n+1)]
cl = 0
cp = 0
while True:
if cp == n:
return PM
if cl == n:
while cp != len(P):
if S.pop() == P[cp]:
PM.append("-")
cp+=1
else:
return False
return PM
if L[cl] <= P[cp]:
while (cl < n) and (L[cl] <= P[cp]):
S.push(L[cl])
PM.append("+")
cl+=1
S.pop()
PM.append("-")
cp+=1
elif L[cl] > P[cp]:
a = S.pop()
PM.append("-")
if P[cp] != a:
return False
else:
cp += 1
n = int(raw_input())
P = []
for i in range(n):
p = int(raw_input())
P.append(p)
res = Pmaker(n,P)
if res:
for c in res:
print c
else:
print "NO"
|
[
"mjsong070712@gmail.com"
] |
mjsong070712@gmail.com
|
df2b7b7f4286fae602e375dcdd454832dbd70659
|
e5ccd2611e53968a34c879f6a664d25d100eb7f6
|
/src/colorslider_test.pyw
|
04341c70496dea5512cb12bfb768da8bed7f5aef
|
[] |
no_license
|
sergeyfarin/kyui
|
ec9b32605616fbd0ca0c21d10e130ec1c5164d4a
|
320f8df348491bc01bca0c76fc92e1d5e6d841a2
|
refs/heads/master
| 2020-12-24T15:49:12.880822
| 2012-01-06T14:32:17
| 2012-01-06T14:32:17
| 32,626,909
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,657
|
pyw
|
#UTF-8
#colorslider_test.pyw
from PyQt4.QtCore import *
from PyQt4.QtGui import *
import sys
from Widgets.colorslider import ColorSlider_Old
from template_test import TemplateDialog
class Dialog(TemplateDialog):
def __init__(self, parent = None):
super(QDialog, self).__init__(parent)
self.setObjectName('dialog')
self._color = QColor(0, 0, 0)
self.setupUi()
self.connectSignals()
def setupUi(self):
super().setupUi()
self.debugBox.hide()
self.testBox = QGroupBox(self, objectName='testBox')
self.testLayout = QBoxLayout(QBoxLayout.TopToBottom,
parent=self.testBox,
objectName='testLayout')
self.testWidget1 = ColorSlider_Old(QColor.Rgb, 0,
Qt.Horizontal, self.testBox)
self.testWidget1.setObjectName('testWidget1')
self.testLayout.addWidget(self.testWidget1)
self.testWidget2 = ColorSlider_Old(QColor.Rgb, 1,
Qt.Horizontal, self.testBox)
self.testWidget2.setObjectName('testWidget2')
self.testLayout.addWidget(self.testWidget2)
self.testWidget3 = ColorSlider_Old(QColor.Rgb, 2,
Qt.Horizontal, self.testBox)
self.testWidget3.setObjectName('testWidget3')
self.testLayout.addWidget(self.testWidget3)
self.layout.insertWidget(0, self.testBox)
self.specLabel = QLabel(self.settingsBox)
self.specLabel.setObjectName('specLabel')
self.specBox = QComboBox(self.settingsBox)
self.specBox.setObjectName('specBox')
self.specBox.addItem('RGB', QColor.Rgb)
self.specBox.addItem('HSV', QColor.Hsv)
self.specBox.addItem('HSL', QColor.Hsl)
self.specLabel.setBuddy(self.specBox)
self.settingsLayout.addRow(self.specLabel, self.specBox)
self.orientBox = QCheckBox(self)
self.orientBox.setObjectName('orientBox')
self.settingsLayout.addWidget(self.orientBox)
self.dynamicBox = QCheckBox(self)
self.dynamicBox.setObjectName('dynamicBox')
self.dynamicBox.setChecked(True)
self.settingsLayout.addWidget(self.dynamicBox)
self.retranslateUi()
def retranslateUi(self):
super().retranslateUi()
self.testBox.setTitle(self.trUtf8('&Test'))
self.specLabel.setText(self.trUtf8('&Spec'))
self.orientBox.setText('&Vertical Sliders')
self.dynamicBox.setText('&Dynamic Gradients')
def connectSignals(self):
super().connectSignals()
self.setWindowTitle(self.trUtf8('ColorSlider Test'))
self.specBox.currentIndexChanged[int].connect(self.onSpecChanged)
self.orientBox.toggled.connect(self.onOrientationChanged)
self.dynamicBox.toggled.connect(self.setDynamic)
self.setDynamic(True)
def onSpecChanged(self, index : int):
if index == 0:
qDebug('Spec: RGB')
self.testWidget1.setColorChannel(QColor.Rgb, 0)
self.testWidget2.setColorChannel(QColor.Rgb, 1)
self.testWidget3.setColorChannel(QColor.Rgb, 2)
elif index == 1:
self.testWidget1.setColorChannel(QColor.Hsv, 0)
self.testWidget2.setColorChannel(QColor.Hsv, 1)
self.testWidget3.setColorChannel(QColor.Hsv, 2)
elif index == 2:
self.testWidget1.setColorChannel(QColor.Hsl, 0)
self.testWidget2.setColorChannel(QColor.Hsl, 1)
self.testWidget3.setColorChannel(QColor.Hsl, 2)
def onOrientationChanged(self):
if self.orientBox.isChecked():
direction = QBoxLayout.LeftToRight
orient = Qt.Vertical
else:
direction = QBoxLayout.TopToBottom
orient = Qt.Horizontal
self.testLayout.setDirection(direction)
self.testWidget1.setOrientation(orient)
self.testWidget2.setOrientation(orient)
self.testWidget3.setOrientation(orient)
def setDynamic(self, dynamic):
if dynamic:
self.testWidget1.valueChanged.connect(self.onSlider1Changed)
self.testWidget2.valueChanged.connect(self.onSlider2Changed)
self.testWidget3.valueChanged.connect(self.onSlider3Changed)
else:
self.testWidget1.valueChanged.disconnect(self.onSlider1Changed)
self.testWidget2.valueChanged.disconnect(self.onSlider2Changed)
self.testWidget3.valueChanged.disconnect(self.onSlider3Changed)
def onSlider1Changed(self, value):
channel = self.testWidget1.colorChannel()
self.testWidget2.setChannelValue(channel, value)
self.testWidget3.setChannelValue(channel, value)
def onSlider2Changed(self, value):
channel = self.testWidget2.colorChannel()
self.testWidget1.setChannelValue(channel, value)
self.testWidget3.setChannelValue(channel, value)
def onSlider3Changed(self, value):
channel = self.testWidget3.colorChannel()
self.testWidget1.setChannelValue(channel, value)
self.testWidget2.setChannelValue(channel, value)
if __name__ == '__main__':
app = QApplication(sys.argv)
dlg = Dialog()
dlg.show()
sys.exit(app.exec_())
|
[
"mnijph@gmail.com@61556d0e-e001-f3ff-fadc-bb871643678f"
] |
mnijph@gmail.com@61556d0e-e001-f3ff-fadc-bb871643678f
|
cbdf7656ac78f0a708d02e0937ee94e89283794c
|
07b62e1b4500c2fa6df424f81a588d3f82258e8c
|
/recipe_recommender/etl/__init__.py
|
d2d9c0f724f66f3cadca908a80af41627f149a41
|
[
"BSD-3-Clause"
] |
permissive
|
janash/recipe_recommender
|
fd77ad167e75669f56df3145468c42f7df17417b
|
ffc5c0c55578a0c0a81c1fef6ce2290bea5051d0
|
refs/heads/master
| 2020-04-12T11:32:11.181349
| 2019-09-15T16:51:22
| 2019-09-15T16:51:22
| 162,462,572
| 1
| 1
|
BSD-3-Clause
| 2019-09-01T16:25:32
| 2018-12-19T16:20:45
|
Python
|
UTF-8
|
Python
| false
| false
| 119
|
py
|
"""
Imports for 'etl' (extract, transform, load) to database.
"""
from . import utils
from . import index_bodybuilding
|
[
"janash@vt.edu"
] |
janash@vt.edu
|
2663ee7656653db5742f85fb25439e84c3b69a74
|
0aa273ad48b7b52cb8853464657752f0d651c844
|
/cap6/eliminar.py
|
ee8c8174d38be8ce098d4195971568595e2a1447
|
[] |
no_license
|
mrjamrd/diplopython
|
607eb05ade42ee706681b5ebcf776be663161e89
|
83fea1036743fb821c2d2df4bda3e222df3f1273
|
refs/heads/main
| 2023-08-31T10:23:42.195813
| 2021-09-15T04:11:55
| 2021-09-15T04:11:55
| 393,474,479
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 425
|
py
|
from io import open
import pathlib
import shutil
import os
import os.path
#eliminar
#ruta = str(pathlib.Path().absolute()) + "/fichero_textonew.txt"
#os.remove(ruta)
#print(os.path.absolute("./"))
#ruta = os.path.abspath("./") + "/fichero_texto1.txt"
ruta = "./fichero_texto1.txt"
#Comprobar si existe un archivo en una ruta
if os.path.isfile(ruta):
print("El archivo existe")
else:
print("El Archivo no existe")
|
[
"joseam1789@gmail.com"
] |
joseam1789@gmail.com
|
67054ed0f58b4c83e7aca1c443931dcdd4cd01f7
|
b567f026aa6cac669c3987247a5ce3bfa1ff003b
|
/todo_api/apps/todo/urls.py
|
5aa1fcac1434b1f21e01ec940950f6e8f9bb6edc
|
[] |
no_license
|
scorpaena/todo_list
|
999a84f366a4d4d03249313e3ffc6ce6f25e8b81
|
9a4bbad868a670e37ca00dc0f94e11d9457f55b5
|
refs/heads/master
| 2023-06-19T15:28:45.333049
| 2021-07-13T16:16:54
| 2021-07-13T16:16:54
| 385,237,717
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 182
|
py
|
from rest_framework.routers import DefaultRouter
from .views import ToDoViewSet
router = DefaultRouter()
router.register(r'', ToDoViewSet, basename='todo')
urlpatterns = router.urls
|
[
"mvalyn@gmail.com"
] |
mvalyn@gmail.com
|
8b42b10c453a5a2872ae60c1b75bf8b2aa310647
|
75d8667735782cd1d0eb4877e52c89da5cd92dde
|
/nova/conf/vmware.py
|
48c3c487055130e00713481e2f51b8e88526ccf0
|
[
"Apache-2.0"
] |
permissive
|
bopopescu/nova-token
|
ffecfd3ec561936b7d9d7e691bc57383cde05436
|
ec98f69dea7b3e2b9013b27fd55a2c1a1ac6bfb2
|
refs/heads/master
| 2022-11-22T09:53:31.073483
| 2016-05-14T02:47:01
| 2016-05-15T22:02:55
| 282,105,621
| 0
| 0
|
Apache-2.0
| 2020-07-24T02:42:19
| 2020-07-24T02:42:18
| null |
UTF-8
|
Python
| false
| false
| 10,441
|
py
|
begin_unit
comment|'# Copyright 2016 OpenStack Foundation'
nl|'\n'
comment|'# All Rights Reserved.'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# Licensed under the Apache License, Version 2.0 (the "License"); you may'
nl|'\n'
comment|'# not use this file except in compliance with the License. You may obtain'
nl|'\n'
comment|'# a copy of the License at'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# http://www.apache.org/licenses/LICENSE-2.0'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# Unless required by applicable law or agreed to in writing, software'
nl|'\n'
comment|'# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT'
nl|'\n'
comment|'# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the'
nl|'\n'
comment|'# License for the specific language governing permissions and limitations'
nl|'\n'
comment|'# under the License.'
nl|'\n'
nl|'\n'
name|'import'
name|'itertools'
newline|'\n'
nl|'\n'
name|'from'
name|'oslo_config'
name|'import'
name|'cfg'
newline|'\n'
nl|'\n'
DECL|variable|vmware_group
name|'vmware_group'
op|'='
name|'cfg'
op|'.'
name|'OptGroup'
op|'('
string|"'vmware'"
op|','
name|'title'
op|'='
string|"'VMWare Options'"
op|')'
newline|'\n'
nl|'\n'
DECL|variable|vmwareapi_vif_opts
name|'vmwareapi_vif_opts'
op|'='
op|'['
nl|'\n'
name|'cfg'
op|'.'
name|'StrOpt'
op|'('
string|"'vlan_interface'"
op|','
nl|'\n'
DECL|variable|default
name|'default'
op|'='
string|"'vmnic0'"
op|','
nl|'\n'
DECL|variable|help
name|'help'
op|'='
string|"'Physical ethernet adapter name for vlan networking'"
op|')'
op|','
nl|'\n'
name|'cfg'
op|'.'
name|'StrOpt'
op|'('
string|"'integration_bridge'"
op|','
nl|'\n'
DECL|variable|help
name|'help'
op|'='
string|"'This option should be configured only when using the '"
nl|'\n'
string|"'NSX-MH Neutron plugin. This is the name of the '"
nl|'\n'
string|"'integration bridge on the ESXi. This should not be set '"
nl|'\n'
string|"'for any other Neutron plugin. Hence the default value '"
nl|'\n'
string|"'is not set.'"
op|')'
op|','
nl|'\n'
op|']'
newline|'\n'
nl|'\n'
DECL|variable|vmware_utils_opts
name|'vmware_utils_opts'
op|'='
op|'['
nl|'\n'
name|'cfg'
op|'.'
name|'IntOpt'
op|'('
string|"'console_delay_seconds'"
op|','
nl|'\n'
DECL|variable|help
name|'help'
op|'='
string|"'Set this value if affected by an increased network '"
nl|'\n'
string|"'latency causing repeated characters when typing in '"
nl|'\n'
string|"'a remote console.'"
op|')'
op|','
nl|'\n'
name|'cfg'
op|'.'
name|'StrOpt'
op|'('
string|"'serial_port_service_uri'"
op|','
nl|'\n'
DECL|variable|help
name|'help'
op|'='
string|"'Identifies the remote system that serial port traffic '"
nl|'\n'
string|"'will be sent to. If this is not set, no serial ports '"
nl|'\n'
string|"'will be added to the created VMs.'"
op|')'
op|','
nl|'\n'
name|'cfg'
op|'.'
name|'StrOpt'
op|'('
string|"'serial_port_proxy_uri'"
op|','
nl|'\n'
DECL|variable|help
name|'help'
op|'='
string|"'Identifies a proxy service that provides network access '"
nl|'\n'
string|"'to the serial_port_service_uri. This option is ignored '"
nl|'\n'
string|"'if serial_port_service_uri is not specified.'"
op|')'
op|','
nl|'\n'
op|']'
newline|'\n'
nl|'\n'
DECL|variable|vmwareapi_opts
name|'vmwareapi_opts'
op|'='
op|'['
nl|'\n'
name|'cfg'
op|'.'
name|'StrOpt'
op|'('
string|"'host_ip'"
op|','
nl|'\n'
DECL|variable|help
name|'help'
op|'='
string|"'Hostname or IP address for connection to VMware '"
nl|'\n'
string|"'vCenter host.'"
op|')'
op|','
nl|'\n'
name|'cfg'
op|'.'
name|'PortOpt'
op|'('
string|"'host_port'"
op|','
nl|'\n'
DECL|variable|default
name|'default'
op|'='
number|'443'
op|','
nl|'\n'
DECL|variable|help
name|'help'
op|'='
string|"'Port for connection to VMware vCenter host.'"
op|')'
op|','
nl|'\n'
name|'cfg'
op|'.'
name|'StrOpt'
op|'('
string|"'host_username'"
op|','
nl|'\n'
DECL|variable|help
name|'help'
op|'='
string|"'Username for connection to VMware vCenter host.'"
op|')'
op|','
nl|'\n'
name|'cfg'
op|'.'
name|'StrOpt'
op|'('
string|"'host_password'"
op|','
nl|'\n'
DECL|variable|help
name|'help'
op|'='
string|"'Password for connection to VMware vCenter host.'"
op|','
nl|'\n'
DECL|variable|secret
name|'secret'
op|'='
name|'True'
op|')'
op|','
nl|'\n'
name|'cfg'
op|'.'
name|'StrOpt'
op|'('
string|"'ca_file'"
op|','
nl|'\n'
DECL|variable|help
name|'help'
op|'='
string|"'Specify a CA bundle file to use in verifying the '"
nl|'\n'
string|"'vCenter server certificate.'"
op|')'
op|','
nl|'\n'
name|'cfg'
op|'.'
name|'BoolOpt'
op|'('
string|"'insecure'"
op|','
nl|'\n'
DECL|variable|default
name|'default'
op|'='
name|'False'
op|','
nl|'\n'
DECL|variable|help
name|'help'
op|'='
string|"'If true, the vCenter server certificate is not '"
nl|'\n'
string|"'verified. If false, then the default CA truststore is '"
nl|'\n'
string|"'used for verification. This option is ignored if '"
nl|'\n'
string|'\'"ca_file" is set.\''
op|')'
op|','
nl|'\n'
name|'cfg'
op|'.'
name|'StrOpt'
op|'('
string|"'cluster_name'"
op|','
nl|'\n'
DECL|variable|help
name|'help'
op|'='
string|"'Name of a VMware Cluster ComputeResource.'"
op|')'
op|','
nl|'\n'
name|'cfg'
op|'.'
name|'StrOpt'
op|'('
string|"'datastore_regex'"
op|','
nl|'\n'
DECL|variable|help
name|'help'
op|'='
string|"'Regex to match the name of a datastore.'"
op|')'
op|','
nl|'\n'
name|'cfg'
op|'.'
name|'FloatOpt'
op|'('
string|"'task_poll_interval'"
op|','
nl|'\n'
DECL|variable|default
name|'default'
op|'='
number|'0.5'
op|','
nl|'\n'
DECL|variable|help
name|'help'
op|'='
string|"'The interval used for polling of remote tasks.'"
op|')'
op|','
nl|'\n'
name|'cfg'
op|'.'
name|'IntOpt'
op|'('
string|"'api_retry_count'"
op|','
nl|'\n'
DECL|variable|default
name|'default'
op|'='
number|'10'
op|','
nl|'\n'
DECL|variable|help
name|'help'
op|'='
string|"'The number of times we retry on failures, e.g., '"
nl|'\n'
string|"'socket error, etc.'"
op|')'
op|','
nl|'\n'
name|'cfg'
op|'.'
name|'PortOpt'
op|'('
string|"'vnc_port'"
op|','
nl|'\n'
DECL|variable|default
name|'default'
op|'='
number|'5900'
op|','
nl|'\n'
DECL|variable|help
name|'help'
op|'='
string|"'VNC starting port'"
op|')'
op|','
nl|'\n'
name|'cfg'
op|'.'
name|'IntOpt'
op|'('
string|"'vnc_port_total'"
op|','
nl|'\n'
DECL|variable|default
name|'default'
op|'='
number|'10000'
op|','
nl|'\n'
DECL|variable|help
name|'help'
op|'='
string|"'Total number of VNC ports'"
op|')'
op|','
nl|'\n'
name|'cfg'
op|'.'
name|'BoolOpt'
op|'('
string|"'use_linked_clone'"
op|','
nl|'\n'
DECL|variable|default
name|'default'
op|'='
name|'True'
op|','
nl|'\n'
DECL|variable|help
name|'help'
op|'='
string|"'Whether to use linked clone'"
op|')'
op|','
nl|'\n'
name|'cfg'
op|'.'
name|'StrOpt'
op|'('
string|"'wsdl_location'"
op|','
nl|'\n'
DECL|variable|help
name|'help'
op|'='
string|"'Optional VIM Service WSDL Location '"
nl|'\n'
string|"'e.g http://<server>/vimService.wsdl. '"
nl|'\n'
string|"'Optional over-ride to default location for bug '"
nl|'\n'
string|"'work-arounds'"
op|')'
nl|'\n'
op|']'
newline|'\n'
nl|'\n'
DECL|variable|spbm_opts
name|'spbm_opts'
op|'='
op|'['
nl|'\n'
name|'cfg'
op|'.'
name|'BoolOpt'
op|'('
string|"'pbm_enabled'"
op|','
nl|'\n'
DECL|variable|default
name|'default'
op|'='
name|'False'
op|','
nl|'\n'
DECL|variable|help
name|'help'
op|'='
string|"'The PBM status.'"
op|')'
op|','
nl|'\n'
name|'cfg'
op|'.'
name|'StrOpt'
op|'('
string|"'pbm_wsdl_location'"
op|','
nl|'\n'
DECL|variable|help
name|'help'
op|'='
string|"'PBM service WSDL file location URL. '"
nl|'\n'
string|"'e.g. file:///opt/SDK/spbm/wsdl/pbmService.wsdl '"
nl|'\n'
string|"'Not setting this will disable storage policy based '"
nl|'\n'
string|"'placement of instances.'"
op|')'
op|','
nl|'\n'
name|'cfg'
op|'.'
name|'StrOpt'
op|'('
string|"'pbm_default_policy'"
op|','
nl|'\n'
DECL|variable|help
name|'help'
op|'='
string|"'The PBM default policy. If pbm_wsdl_location is set and '"
nl|'\n'
string|"'there is no defined storage policy for the specific '"
nl|'\n'
string|"'request then this policy will be used.'"
op|')'
op|','
nl|'\n'
op|']'
newline|'\n'
nl|'\n'
DECL|variable|vimutil_opts
name|'vimutil_opts'
op|'='
op|'['
nl|'\n'
name|'cfg'
op|'.'
name|'IntOpt'
op|'('
string|"'maximum_objects'"
op|','
nl|'\n'
DECL|variable|default
name|'default'
op|'='
number|'100'
op|','
nl|'\n'
DECL|variable|help
name|'help'
op|'='
string|"'The maximum number of ObjectContent data '"
nl|'\n'
string|"'objects that should be returned in a single '"
nl|'\n'
string|"'result. A positive value will cause the '"
nl|'\n'
string|"'operation to suspend the retrieval when the '"
nl|'\n'
string|"'count of objects reaches the specified '"
nl|'\n'
string|"'maximum. The server may still limit the count '"
nl|'\n'
string|"'to something less than the configured value. '"
nl|'\n'
string|"'Any remaining objects may be retrieved with '"
nl|'\n'
string|"'additional requests.'"
op|')'
nl|'\n'
op|']'
newline|'\n'
nl|'\n'
DECL|variable|vmops_opts
name|'vmops_opts'
op|'='
op|'['
nl|'\n'
name|'cfg'
op|'.'
name|'StrOpt'
op|'('
string|"'cache_prefix'"
op|','
nl|'\n'
DECL|variable|help
name|'help'
op|'='
string|"'The prefix for where cached images are stored. This is '"
nl|'\n'
string|"'NOT the full path - just a folder prefix. '"
nl|'\n'
string|"'This should only be used when a datastore cache should '"
nl|'\n'
string|"'be shared between compute nodes. Note: this should only '"
nl|'\n'
string|"'be used when the compute nodes have a shared file '"
nl|'\n'
string|"'system.'"
op|')'
op|','
nl|'\n'
op|']'
newline|'\n'
nl|'\n'
DECL|variable|ALL_VMWARE_OPTS
name|'ALL_VMWARE_OPTS'
op|'='
name|'list'
op|'('
name|'itertools'
op|'.'
name|'chain'
op|'('
nl|'\n'
name|'vmwareapi_vif_opts'
op|','
nl|'\n'
name|'vmware_utils_opts'
op|','
nl|'\n'
name|'vmwareapi_opts'
op|','
nl|'\n'
name|'spbm_opts'
op|','
nl|'\n'
name|'vimutil_opts'
op|','
nl|'\n'
name|'vmops_opts'
op|')'
op|')'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|function|register_opts
name|'def'
name|'register_opts'
op|'('
name|'conf'
op|')'
op|':'
newline|'\n'
indent|' '
name|'conf'
op|'.'
name|'register_group'
op|'('
name|'vmware_group'
op|')'
newline|'\n'
name|'conf'
op|'.'
name|'register_opts'
op|'('
name|'ALL_VMWARE_OPTS'
op|','
name|'group'
op|'='
name|'vmware_group'
op|')'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|function|list_opts
dedent|''
name|'def'
name|'list_opts'
op|'('
op|')'
op|':'
newline|'\n'
indent|' '
name|'return'
op|'{'
name|'vmware_group'
op|':'
name|'ALL_VMWARE_OPTS'
op|'}'
newline|'\n'
dedent|''
endmarker|''
end_unit
|
[
"dmg@uvic.ca"
] |
dmg@uvic.ca
|
61d1cec7299b8b9ea78c02334cf93f6792b541fe
|
e249af1edb0d4796657e086497b014d3b616bddc
|
/main.py
|
8f5b8d7862338456fda0d17b82f3749c4eb98177
|
[
"MIT"
] |
permissive
|
mpMelnikov/ddi
|
98488491f94fe0f051d54997aee149198a98770f
|
71675b586ebf65e883355058af6522a2e4ad0688
|
refs/heads/master
| 2021-05-06T08:32:04.150962
| 2017-12-12T19:31:38
| 2017-12-12T19:31:38
| 114,029,665
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,825
|
py
|
import argparse
from actions.TfidfLearningAction import TfidfLearningAction
from actions.FrequencyAction import FrequencyAction
from actions.TfidfClassificationAction import TfidfClassificationAction
from actions.TfidfAction import TfidfAction
from actions.PreprocessAction import PreprocessAction
commands = dict(frequency=FrequencyAction,
# preprocess=PreprocessAction,
tfidf=TfidfAction,
tfidfLearning=TfidfLearningAction,
tfidfClassification=TfidfClassificationAction)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='DDI NLP program')
parser.add_argument('command', action="store", help='command name')
parser.add_argument('-log', '-l', action="store", help='turn on log', default=False)
parser.add_argument('-input', action="store", help='input file')
parser.add_argument('-output', action="store", help='output file')
args = parser.parse_args()
command = commands[args.command](args.input, args.output)
command.make()
input("Press Enter to continue...")
# sequency for tf-idf:
# don't need it: frequency -input "data\DDICorpus\Train\DrugBank" -output "data\frequencies"
# 1. tfidf -input "data\DDICorpus\Train\DrugBank" -output "data\tfidf\tfidf_results.xml"
# 2. tfidfLearning -input "data\tfidf\tfidf_results.xml" -output ""
# 3. tfidfClassification -input "data\tfidf\tfidf_results.xml" -output ""
# параметры debug configuration для разных задач:
#
# посчитать значения tfIdf
# -l -c tfidf -output data/tfIdfResults.xml
# tfidf -output data/tfIdfResults.xml
#
# обучение по tfIdf
# -l -c tfidfLearning -input data/tfIdfResults.xml
#
# классификация по tfIdf
# -l -c tfidfClassification -input data/tfIdfResults.xml
|
[
"m.p.melnikov@gmail.com"
] |
m.p.melnikov@gmail.com
|
687a968db4862eb5a0e8ac433902ebf3f521252b
|
e8d371d8d572e0aa3895c3109f6a68bc6c594af1
|
/web_scraping/selenium_fighter.py
|
b7886759333981efaa3b5e0cce59914087055240
|
[] |
no_license
|
kshitijjain91/problem-solving-python-new
|
9c27dfe418763c233002531fa3d2f6f7285767e1
|
f40de1982ce4a3f69e826e11ae18a4a693d77fdb
|
refs/heads/master
| 2021-01-13T03:11:03.539041
| 2016-12-27T04:42:19
| 2016-12-27T04:42:19
| 77,426,755
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 332
|
py
|
from selenium import webdriver
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.support.ui import WebDriverWait # available since 2.4.0
from selenium.webdriver.support import expected_conditions as EC # available since 2.26.0
# Create a new instance of the Firefox driver
driver = webdriver.Firefox()
|
[
"kshitijjain91@gmail.com"
] |
kshitijjain91@gmail.com
|
90c7d035efeb2f6dcb6eacddce89a7954e4a30bc
|
2f3cb8a1c66f1dc2927299d2fd2e9469068d4fc0
|
/create_min.py
|
ac798c85aa5342ea8c29a90b47ab9e1de75510bd
|
[
"MIT"
] |
permissive
|
NeilBostian/x86-Quine
|
ed158aab74c7501f8f288b01ddeb811c482b7ffc
|
065ecf515885460d1257f309372e397ceac09646
|
refs/heads/master
| 2022-08-01T12:46:22.182272
| 2020-05-27T01:29:17
| 2020-05-27T01:29:17
| 267,183,083
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,069
|
py
|
import re
if '__main__' == __name__:
re_comment = re.compile(r"^(.*);.*$")
re_newline = re.compile(r"^(.*)\n$")
re_whitespace = re.compile(r"^\s*$")
def process_line(line):
m = re_comment.match(line)
if m:
line = m.groups(1)[0]
m = re_newline.match(line)
if m:
line = m.groups(1)[0]
line = line.rstrip()
if re_whitespace.match(line):
return None
else: return line
with open('./quine.s', 'r') as fin:
all_lines = [y for y in [process_line(x) for x in fin] if y is not None]
with open('./quine.min.s', 'w') as fout:
for line in all_lines:
fout.write(line + '\n')
first_line = True
for line in all_lines:
if first_line:
fout.write(" ")
first_line = False
else:
fout.write(" , ")
line = line.replace("\"", "\", 0x22, \"")
fout.write("\"" + line + "\", 0x0A \\\n")
fout.write(" , 0x00\n")
|
[
"neil.bostian@gmail.com"
] |
neil.bostian@gmail.com
|
f4ef588066795e7d66f9a0fd519298849bc2b657
|
dd3a3f7f5fa6db42f879a8fb3c56667b0cdc32dc
|
/core/migrations/0002_alter_pontoturistico_aprovado.py
|
8415efa755155e763e98ad46caa8eb0cfe6d1c95
|
[] |
no_license
|
paulo9405/DRF_ponto_turistico_api
|
09895f611603366a56e3fc872e5fa9446925a340
|
d288c803e410a318b8dbb9e373d1e24cbc732a5c
|
refs/heads/main
| 2023-07-08T03:11:44.550779
| 2021-08-04T14:34:32
| 2021-08-04T14:34:32
| 390,068,866
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 385
|
py
|
# Generated by Django 3.2.5 on 2021-07-20 11:24
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='pontoturistico',
name='aprovado',
field=models.BooleanField(default=False),
),
]
|
[
"paulo.ricardo1137.pr@gmail.com"
] |
paulo.ricardo1137.pr@gmail.com
|
589f7ff120d540392566e41de19dfc82f4334952
|
33e3af05a6339c9dd9e15fdc97b0fb1fb6266465
|
/1616.分割两个字符串得到回文串.py
|
2667e3c311d417d29d6a0f9c3b6c91021c4c4dd9
|
[
"MIT"
] |
permissive
|
cpingor/leetcode
|
78b4a8d30ca790dfca8236e5005c5c3db30ec8f4
|
d946f7c5941255b940d9b8c4b214b176584e51ed
|
refs/heads/main
| 2023-08-18T09:27:40.993959
| 2021-10-10T13:16:02
| 2021-10-10T13:16:02
| 387,167,571
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 688
|
py
|
#
# @lc app=leetcode.cn id=1616 lang=python3
#
# [1616] 分割两个字符串得到回文串
#
# @lc code=start
class Solution:
def checkPalindromeFormation(self, a: str, b: str) -> bool:
left = len(a) // 2 - 1
left = min(self.is_palindrome(a, a, left), self.is_palindrome(b, b, left))
left = min(self.is_palindrome(a, b, left), self.is_palindrome(b, a, left))
return left == -1
def is_palindrome(self, s_l, s_r, left):
right = len(s_l) - 1 - left
while left >= 0 and right < len(s_l):
if s_l[left] != s_r[right]:
break
left -= 1
right += 1
return left
# @lc code=end
|
[
"chipingchuan@hotmail.com"
] |
chipingchuan@hotmail.com
|
1f90091ef85d316579233d6f60283809bc6c59e4
|
cc1eac077f5f4f665533fcf9f7347b673988c4b9
|
/newmodule.py
|
c58509d167f6c5d562faae390c1d8d1792492ac8
|
[] |
no_license
|
KR0NTAB/RGC
|
b14498f17fca028cd58f3a32108a6dcd582bca18
|
a8523b6bbc7bf8da92d9179b7074a96730e1cd16
|
refs/heads/master
| 2020-06-02T22:01:00.032358
| 2011-01-14T19:46:45
| 2011-01-14T19:46:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 66
|
py
|
print "I'm a new module"
print "I'm cool!"
print "the third line"
|
[
"anton.c@live.com"
] |
anton.c@live.com
|
aa29ccb495481a8d4f885361df16652ad74d0cb9
|
c96700961f09bbac141858d98141428d643322e8
|
/tests/components/search/test_init.py
|
57d2c365e71a42fe6199b79cbe79c1b797f607ac
|
[
"Apache-2.0"
] |
permissive
|
DerMetzger69/core
|
b3b6f30535f2e607e08dd6544e130b452f44c3a1
|
02a82d3f00c610f94d3366cc34540bdfa94a2c8e
|
refs/heads/dev
| 2023-03-18T10:42:52.605222
| 2021-03-13T09:53:26
| 2021-03-13T09:53:26
| 345,092,595
| 1
| 0
|
Apache-2.0
| 2021-03-06T13:32:49
| 2021-03-06T12:49:54
| null |
UTF-8
|
Python
| false
| false
| 10,432
|
py
|
"""Tests for Search integration."""
from homeassistant.components import search
from homeassistant.helpers import (
area_registry as ar,
device_registry as dr,
entity_registry as er,
)
from homeassistant.setup import async_setup_component
from tests.common import MockConfigEntry
from tests.components.blueprint.conftest import stub_blueprint_populate # noqa: F401
async def test_search(hass):
"""Test that search works."""
area_reg = ar.async_get(hass)
device_reg = dr.async_get(hass)
entity_reg = er.async_get(hass)
living_room_area = area_reg.async_create("Living Room")
# Light strip with 2 lights.
wled_config_entry = MockConfigEntry(domain="wled")
wled_config_entry.add_to_hass(hass)
wled_device = device_reg.async_get_or_create(
config_entry_id=wled_config_entry.entry_id,
name="Light Strip",
identifiers=({"wled", "wled-1"}),
)
device_reg.async_update_device(wled_device.id, area_id=living_room_area.id)
wled_segment_1_entity = entity_reg.async_get_or_create(
"light",
"wled",
"wled-1-seg-1",
suggested_object_id="wled segment 1",
config_entry=wled_config_entry,
device_id=wled_device.id,
)
wled_segment_2_entity = entity_reg.async_get_or_create(
"light",
"wled",
"wled-1-seg-2",
suggested_object_id="wled segment 2",
config_entry=wled_config_entry,
device_id=wled_device.id,
)
# Non related info.
kitchen_area = area_reg.async_create("Kitchen")
hue_config_entry = MockConfigEntry(domain="hue")
hue_config_entry.add_to_hass(hass)
hue_device = device_reg.async_get_or_create(
config_entry_id=hue_config_entry.entry_id,
name="Light Strip",
identifiers=({"hue", "hue-1"}),
)
device_reg.async_update_device(hue_device.id, area_id=kitchen_area.id)
hue_segment_1_entity = entity_reg.async_get_or_create(
"light",
"hue",
"hue-1-seg-1",
suggested_object_id="hue segment 1",
config_entry=hue_config_entry,
device_id=hue_device.id,
)
hue_segment_2_entity = entity_reg.async_get_or_create(
"light",
"hue",
"hue-1-seg-2",
suggested_object_id="hue segment 2",
config_entry=hue_config_entry,
device_id=hue_device.id,
)
await async_setup_component(
hass,
"group",
{
"group": {
"wled": {
"name": "wled",
"entities": [
wled_segment_1_entity.entity_id,
wled_segment_2_entity.entity_id,
],
},
"hue": {
"name": "hue",
"entities": [
hue_segment_1_entity.entity_id,
hue_segment_2_entity.entity_id,
],
},
"wled_hue": {
"name": "wled and hue",
"entities": [
wled_segment_1_entity.entity_id,
wled_segment_2_entity.entity_id,
hue_segment_1_entity.entity_id,
hue_segment_2_entity.entity_id,
],
},
}
},
)
await async_setup_component(
hass,
"scene",
{
"scene": [
{
"name": "scene_wled_seg_1",
"entities": {wled_segment_1_entity.entity_id: "on"},
},
{
"name": "scene_hue_seg_1",
"entities": {hue_segment_1_entity.entity_id: "on"},
},
{
"name": "scene_wled_hue",
"entities": {
wled_segment_1_entity.entity_id: "on",
wled_segment_2_entity.entity_id: "on",
hue_segment_1_entity.entity_id: "on",
hue_segment_2_entity.entity_id: "on",
},
},
]
},
)
await async_setup_component(
hass,
"script",
{
"script": {
"wled": {
"sequence": [
{
"service": "test.script",
"data": {"entity_id": wled_segment_1_entity.entity_id},
},
]
},
"hue": {
"sequence": [
{
"service": "test.script",
"data": {"entity_id": hue_segment_1_entity.entity_id},
},
]
},
}
},
)
assert await async_setup_component(
hass,
"automation",
{
"automation": [
{
"alias": "wled_entity",
"trigger": {"platform": "template", "value_template": "true"},
"action": [
{
"service": "test.script",
"data": {"entity_id": wled_segment_1_entity.entity_id},
},
],
},
{
"alias": "wled_device",
"trigger": {"platform": "template", "value_template": "true"},
"action": [
{
"domain": "light",
"device_id": wled_device.id,
"entity_id": wled_segment_1_entity.entity_id,
"type": "turn_on",
},
],
},
]
},
)
# Explore the graph from every node and make sure we find the same results
expected = {
"config_entry": {wled_config_entry.entry_id},
"area": {living_room_area.id},
"device": {wled_device.id},
"entity": {wled_segment_1_entity.entity_id, wled_segment_2_entity.entity_id},
"scene": {"scene.scene_wled_seg_1", "scene.scene_wled_hue"},
"group": {"group.wled", "group.wled_hue"},
"script": {"script.wled"},
"automation": {"automation.wled_entity", "automation.wled_device"},
}
for search_type, search_id in (
("config_entry", wled_config_entry.entry_id),
("area", living_room_area.id),
("device", wled_device.id),
("entity", wled_segment_1_entity.entity_id),
("entity", wled_segment_2_entity.entity_id),
("scene", "scene.scene_wled_seg_1"),
("group", "group.wled"),
("script", "script.wled"),
("automation", "automation.wled_entity"),
("automation", "automation.wled_device"),
):
searcher = search.Searcher(hass, device_reg, entity_reg)
results = searcher.async_search(search_type, search_id)
# Add the item we searched for, it's omitted from results
results.setdefault(search_type, set()).add(search_id)
assert (
results == expected
), f"Results for {search_type}/{search_id} do not match up"
# For combined things, needs to return everything.
expected_combined = {
"config_entry": {wled_config_entry.entry_id, hue_config_entry.entry_id},
"area": {living_room_area.id, kitchen_area.id},
"device": {wled_device.id, hue_device.id},
"entity": {
wled_segment_1_entity.entity_id,
wled_segment_2_entity.entity_id,
hue_segment_1_entity.entity_id,
hue_segment_2_entity.entity_id,
},
"scene": {
"scene.scene_wled_seg_1",
"scene.scene_hue_seg_1",
"scene.scene_wled_hue",
},
"group": {"group.wled", "group.hue", "group.wled_hue"},
"script": {"script.wled", "script.hue"},
"automation": {"automation.wled_entity", "automation.wled_device"},
}
for search_type, search_id in (
("scene", "scene.scene_wled_hue"),
("group", "group.wled_hue"),
):
searcher = search.Searcher(hass, device_reg, entity_reg)
results = searcher.async_search(search_type, search_id)
# Add the item we searched for, it's omitted from results
results.setdefault(search_type, set()).add(search_id)
assert (
results == expected_combined
), f"Results for {search_type}/{search_id} do not match up"
for search_type, search_id in (
("entity", "automation.non_existing"),
("entity", "scene.non_existing"),
("entity", "group.non_existing"),
("entity", "script.non_existing"),
("entity", "light.non_existing"),
("area", "non_existing"),
("config_entry", "non_existing"),
("device", "non_existing"),
("group", "group.non_existing"),
("scene", "scene.non_existing"),
("script", "script.non_existing"),
("automation", "automation.non_existing"),
):
searcher = search.Searcher(hass, device_reg, entity_reg)
assert searcher.async_search(search_type, search_id) == {}
async def test_ws_api(hass, hass_ws_client):
"""Test WS API."""
assert await async_setup_component(hass, "search", {})
area_reg = ar.async_get(hass)
device_reg = dr.async_get(hass)
kitchen_area = area_reg.async_create("Kitchen")
hue_config_entry = MockConfigEntry(domain="hue")
hue_config_entry.add_to_hass(hass)
hue_device = device_reg.async_get_or_create(
config_entry_id=hue_config_entry.entry_id,
name="Light Strip",
identifiers=({"hue", "hue-1"}),
)
device_reg.async_update_device(hue_device.id, area_id=kitchen_area.id)
client = await hass_ws_client(hass)
await client.send_json(
{
"id": 1,
"type": "search/related",
"item_type": "device",
"item_id": hue_device.id,
}
)
response = await client.receive_json()
assert response["success"]
assert response["result"] == {
"config_entry": [hue_config_entry.entry_id],
"area": [kitchen_area.id],
}
|
[
"noreply@github.com"
] |
DerMetzger69.noreply@github.com
|
a59023c73e4c83a56165318808a850c0a4773679
|
a1905ff01ec05d860480b1ec6624c51759b55f9b
|
/core/migrations/0002_client.py
|
35acfe33f6b1658a1480381b91965a1cf0b3a602
|
[] |
no_license
|
mthlimao/DjangoFirst
|
ac1a57685924b86724696c8edb7d1f89bfc86335
|
e696f71e4ddc9596046ecf7058416c27a4dd875a
|
refs/heads/master
| 2023-03-10T10:31:06.042017
| 2021-02-26T15:34:40
| 2021-02-26T15:34:40
| 342,607,367
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 695
|
py
|
# Generated by Django 3.1.7 on 2021-02-25 19:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Client',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100, verbose_name='Nome')),
('surname', models.CharField(max_length=100, verbose_name='Sobrenome')),
('email', models.EmailField(max_length=100, verbose_name='E-Mail')),
],
),
]
|
[
"mthlima@poli.ufrj.br"
] |
mthlima@poli.ufrj.br
|
ce7ad080e2c9a94a6ecefbb14a754599ca303ed1
|
2f6da5dc2dd05e0a9ad8310e2721df56a00d29ef
|
/apps/xfzauth/migrations/0001_initial.py
|
15b55649dfaa3aa7a35c4c41e9dcc68cb0ca97e1
|
[] |
no_license
|
Qinyhao/xfz
|
274b481c68a6e571b759482bfb78e17663db3e55
|
5f3faffd9fad875b0dbb09272fd93cf48137e352
|
refs/heads/master
| 2022-12-13T09:46:42.900424
| 2019-10-02T01:34:09
| 2019-10-02T01:34:09
| 212,029,607
| 1
| 0
| null | 2022-12-08T06:13:50
| 2019-10-01T06:44:58
|
Python
|
UTF-8
|
Python
| false
| false
| 1,897
|
py
|
# Generated by Django 2.2.4 on 2019-09-19 08:47
from django.db import migrations, models
import shortuuidfield.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0011_update_proxy_permissions'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('uid', shortuuidfield.fields.ShortUUIDField(blank=True, editable=False, max_length=22, primary_key=True, serialize=False)),
('telephone', models.CharField(max_length=11, unique=True)),
('password', models.CharField(max_length=200)),
('email', models.EmailField(max_length=254, null=True, unique=True)),
('username', models.CharField(max_length=100)),
('is_active', models.BooleanField(default=True)),
('is_staff', models.BooleanField(default=False)),
('data_joined', models.DateTimeField(auto_now_add=True)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
},
),
]
|
[
"1042816768@qq.com"
] |
1042816768@qq.com
|
1057765bd210fa0face28a8efa2798eeb147f722
|
b11c09407c59393e27d8cfba46ffab9c576969ae
|
/z_download_francischan.py
|
3a4c58d333a846b56b59f760d631b2477accbfc0
|
[] |
no_license
|
emeth-/emethplayer
|
296cf277802ddf1854db66959bcfe36522153a9a
|
6d4066f0e5b8d8e4a929c43e0bc5d93918ff82a3
|
refs/heads/master
| 2021-01-19T09:44:52.482389
| 2013-09-01T03:17:27
| 2013-09-01T03:17:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,221
|
py
|
import urllib2, urllib, os
import httplib
import json
def extract_data(html):
data = {'title':'', 'description':'', 'scripture':'', 'church':'', 'file_loc':'', 'file_loc_s3':''}
if 'resourceLinks' in html:
p = html.split('resourceLinks')[1][:220].replace(' <a href="/index.php?id=305&file=', "").split("listenWindow")[0].replace('" class="', '')
if 'fileadmin' in p:
p = p[p.index('fileadmin'):]
data['file_loc'] = "http://www.preachitteachit.org/"+p
data['file_loc_s3'] = os.path.basename(data['file_loc'])
if 'sermonTitle' in html:
data['title'] = html.split('sermonTitle')[1].split("</h2>")[0].split('href')[1].split('</a>')[0].split('" >')[1]
if 'sermonDescription' in html:
data['description'] = html.split("sermonDescription")[1].split("</div>")[0].split("bodytext'>")[1].split("</p>")[0].replace("Used by permission.", "").replace("Preached at Cornerstone Church, Simi Valley, California.", "").replace("Preached at Cornerstone Church, Simi Valley California.", "").replace("From Cornerstone Church, Simi Valley, California.", "").replace("”", '"').replace("“", '"').replace(" ", " ").replace("’", "'").strip()
if "Passages:" in html:
p = html.split("Passages:")[1].split("</a></p>")[0]
if 'target="_blank" >' in p:
data['scripture'] = p.split('target="_blank" >')[1]
data['church'] = "Cornerstone Church, Simi Valley, California"
if data['file_loc'] == "":
return -1
data['download_me'] = 1
data['author_name'] = "Francis Chan"
data['church_website'] = "http://www.cornerstonesimi.com/"
conn = urllib.urlopen(data['file_loc'])
data['sermon_timestamp'] = conn.headers['last-modified']
data['file_loc_s3'] = "media/francis_chan/" + data['file_loc_s3']
return data
#http://www.preachitteachit.org/fileadmin/Release_1/sermons/sermon_series/Frances_Chan/OHolyNightChan.mp3
urls = [
"http://www.preachitteachit.org/about-us/the-team/francis-chan/sermons/",
"http://www.preachitteachit.org/about-us/the-team/francis-chan/sermons/resource////1/",
"http://www.preachitteachit.org/about-us/the-team/francis-chan/sermons/resource////2/",
"http://www.preachitteachit.org/about-us/the-team/francis-chan/sermons/resource////3/",
"http://www.preachitteachit.org/about-us/the-team/francis-chan/sermons/resource////4/"
]
for url in urls:
html = urllib.urlopen(url).read()
for p in html.split('sermonWrap'):
x = extract_data(p)
if x != -1:
"""
local_dir = os.getcwd() + '/media/francis_chan/' + x['filename']
if not os.path.exists(local_dir):
urllib.urlretrieve(x['url'], local_dir)
print x['filename'] + " downloaded."
"""
url = 'http://localhost:8888/emethplayer/ajax.php?act=add_sermon'
x['password'] = "royale"
data = urllib.urlencode(x)
req = urllib2.Request(url, data)
response = urllib2.urlopen(req)
print response.read()
|
[
"seanybob@gmail.com"
] |
seanybob@gmail.com
|
6352feb72a2b9d647522c254a16f9af534795611
|
fc802a0cabc5cd8d93b62185a08f3c465c38df2a
|
/tools/profile.py
|
8100e07c5ad27cb592ee1e4ea413a4f8e52bdf2c
|
[
"BSD-3-Clause"
] |
permissive
|
astanway/osquery
|
861d72e39a5c3daca8022236f4aa40dccfb38893
|
9effc14903589b33d3d81c10ee1b19b359aec44c
|
refs/heads/master
| 2021-01-12T21:03:02.718690
| 2014-11-10T21:14:48
| 2014-11-10T21:14:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,437
|
py
|
#!/usr/bin/env python
# Copyright 2004-present Facebook. All Rights Reserved.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
try:
import argparse
except ImportError:
print ("Cannot import argparse.")
print ("Try: sudo yum install python-argparse")
exit(1)
import json
import os
import psutil
import tempfile
import shutil
import subprocess
import sys
import time
def red(msg):
return "\033[41m\033[1;30m %s \033[0m" % str(msg)
def yellow(msg):
return "\033[43m\033[1;30m %s \033[0m" % str(msg)
def green(msg):
return "\033[42m\033[1;30m %s \033[0m" % str(msg)
def blue(msg):
return "\033[46m\033[1;30m %s \033[0m" % str(msg)
KB = 1024 * 1024
RANGES = {
"colors": (blue, green, yellow, red),
"utilization": (8, 20, 50),
"cpu_time": (0.4, 1, 10),
"memory": (8 * KB, 12 * KB, 24 * KB),
"fds": (6, 12, 50),
"duration": (0.8, 1, 3),
}
def queries_from_tables(path, restrict):
"""Construct select all queries from all tables."""
# Let the caller limit the tables
restrict_tables = [t.strip() for t in restrict.split(",")]
tables = []
for base, folders, files in os.walk(path):
for spec in files:
spec_platform = os.path.basename(base)
table_name = spec.split(".table", 1)[0]
if spec_platform not in ["x", platform]:
continue
# Generate all tables to select from, with abandon.
tables.append("%s.%s" % (spec_platform, table_name))
tables = [t for t in tables if t.split(".")[1] not in restrict_tables]
queries = {}
for table in tables:
queries[table] = "SELECT * FROM %s;" % table.split(".", 1)[1]
return queries
def get_stats(p, interval=1):
"""Run psutil and downselect the information."""
utilization = p.cpu_percent(interval=interval)
return {
"utilization": utilization,
"counters": p.io_counters() if sys.platform != "darwin" else None,
"fds": p.num_fds(),
"cpu_times": p.cpu_times(),
"memory": p.memory_info_ex(),
}
def check_leaks_linux(shell, query, supp_file=None):
"""Run valgrind using the shell and a query, parse leak reports."""
start_time = time.time()
suppressions = "" if supp_file is None else "--suppressions=%s" % supp_file
cmd = "valgrind --tool=memcheck %s %s --query=\"%s\"" % (
suppressions, shell, query)
proc = subprocess.Popen(cmd,
shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = proc.communicate()
summary = {
"definitely": None,
"indirectly": None,
"possibly": None,
}
for line in stderr.split("\n"):
for key in summary:
if line.find(key) >= 0:
summary[key] = line.split(":")[1].strip()
return summary
def check_leaks_darwin(shell, query):
start_time = time.time()
proc = subprocess.Popen([shell, "--query", query, "--delay", "1"],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
leak_checks = None
while proc.poll() is None:
leaks = subprocess.Popen(["leaks", "%s" % proc.pid],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, _ = leaks.communicate()
try:
for line in stdout.split("\n"):
if line.find("total leaked bytes") >= 0:
leak_checks = line.split(":")[1].strip()
except:
print (stdout)
return {"definitely": leak_checks}
def check_leaks(shell, query, supp_file=None):
if sys.platform == "darwin":
return check_leaks_darwin(shell, query)
else:
return check_leaks_linux(shell, query, supp_file=supp_file)
def profile_leaks(shell, queries, count=1, rounds=1, supp_file=None):
report = {}
for name, query in queries.iteritems():
print ("Analyzing leaks in query: %s" % query)
# Apply count
summary = check_leaks(shell, query * count, supp_file)
display = []
for key in summary:
output = summary[key]
if output is not None and output[0] != "0":
# Add some fun colored output if leaking.
if key == "definitely":
output = red(output)
if key == "indirectly":
output = yellow(output)
display.append("%s: %s" % (key, output))
print (" %s" % "; ".join(display))
report[name] = summary
return report
def run_query(shell, query, timeout=0, count=1):
"""Execute the osquery run testing wrapper with a setup/teardown delay."""
start_time = time.time()
proc = subprocess.Popen(
[shell, "--query", query, "--iterations", str(count),
"--delay", "1"],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p = psutil.Process(pid=proc.pid)
delay = 0
step = 0.5
percents = []
# Calculate the CPU utilization in intervals of 1 second.
while p.is_running():
try:
stats = get_stats(p, step)
percents.append(stats["utilization"])
except psutil.AccessDenied:
break
delay += step
if timeout > 0 and delay >= timeout + 2:
proc.kill()
break
duration = time.time() - start_time - 2;
utilization = [p for p in percents if p != 0]
if len(utilization) == 0:
avg_utilization = 0
else:
avg_utilization = sum(utilization)/len(utilization)
return {
"utilization": avg_utilization,
"duration": duration,
"memory": stats["memory"].rss,
"user_time": stats["cpu_times"].user,
"system_time": stats["cpu_times"].system,
"cpu_time": stats["cpu_times"].user + stats["cpu_times"].system,
"fds": stats["fds"],
}
def summary(results, display=False):
"""Map the results to simple thresholds."""
def rank(value, ranges):
for i, r in enumerate(ranges):
if value < r: return i
return len(ranges)
summary_results = {}
for name, result in results.iteritems():
summary_result = {}
for key in RANGES:
if key == "colors":
continue
summary_result[key] = rank(result[key], RANGES[key])
if display:
print ("%s:" % name, end=" ")
for key, v in summary_result.iteritems():
print (RANGES["colors"][v](
"%s: %s (%s)" % (key, v, result[key])), end=" ")
print ("")
summary_results[name] = summary_result
return summary_results
def profile(shell, queries, timeout=0, count=1, rounds=1):
report = {}
for name, query in queries.iteritems():
print ("Profiling query: %s" % query)
results = {}
for i in range(rounds):
result = run_query(shell, query, timeout=timeout, count=count)
summary({"%s (%d/%d)" % (name, i+1, rounds): result}, display=True)
# Store each result round to return an average.
for k, v in result.iteritems():
results[k] = results.get(k, [])
results[k].append(v)
average_results = {}
for k in results:
average_results[k] = sum(results[k])/len(results[k])
report[name] = average_results
summary({"%s avg" % name: report[name]}, display=True)
return report
if __name__ == "__main__":
platform = sys.platform
if platform == "linux2":
platform = "linux"
parser = argparse.ArgumentParser(description=("Profile osquery, "
"individual tables, or a set of osqueryd config queries."))
parser.add_argument("--restrict", default="",
help="Limit to a list of comma-separated tables.")
parser.add_argument("--tables", default="./osquery/tables/specs",
help="Path to the osquery table specs.")
parser.add_argument("--config", default=None,
help="Use scheduled queries from a config.")
parser.add_argument("--output", default=None,
help="Write JSON output to file.")
parser.add_argument("--summary", default=False, action="store_true",
help="Write a summary instead of stats.")
parser.add_argument("--query", default=None,
help="Profile a single query.")
parser.add_argument("--timeout", default=0, type=int,
help="Max seconds a query may run --count times.")
parser.add_argument("--count", default=1, type=int,
help="Number of times to run each query.")
parser.add_argument("--rounds", default=1, type=int,
help="Run the profile for multiple rounds and use the average.")
parser.add_argument("--leaks", default=False, action="store_true",
help="Check for memory leaks instead of performance.")
parser.add_argument("--suppressions", default=None,
help="Add a suppressions files to memory leak checking.")
parser.add_argument("--shell",
default="./build/%s/tools/run" % (platform),
help="Path to osquery run wrapper.")
args = parser.parse_args()
if not os.path.exists(args.shell):
print ("Cannot find --daemon: %s" % (args.shell))
exit(1)
if args.config is None and not os.path.exists(args.tables):
print ("Cannot find --tables: %s" % (args.tables))
exit(1)
queries = {}
if args.config is not None:
if not os.path.exists(args.config):
print ("Cannot find --config: %s" % (args.config))
exit(1)
print ("--config is not yet supported.")
exit(2)
elif args.query is not None:
queries["manual"] = args.query
else:
queries = queries_from_tables(args.tables, args.restrict)
if args.leaks:
results = profile_leaks(args.shell, queries, count=args.count,
rounds=args.rounds, supp_file=args.suppressions)
exit(0)
# Start the profiling!
results = profile(args.shell, queries,
timeout=args.timeout, count=args.count, rounds=args.rounds)
if args.output is not None and not args.summary:
with open(args.output, "w") as fh:
fh.write(json.dumps(results, indent=1, sort_keys=True))
if args.summary is True:
with open(args.output, "w") as fh:
fh.write(json.dumps(summary(results), indent=1, sort_keys=True))
|
[
"teddy@prosauce.org"
] |
teddy@prosauce.org
|
a3644cf3157aa4e51f12b00a4e14fe4aabe39792
|
a1b522a663eb23d1fecdc215851005d82fdc7703
|
/selenium_pytest_demo/test_user_password.py
|
9bce041aa5c5dc19284136cf04b8f2b2c8eba9cd
|
[] |
no_license
|
answermvp/web_ui_test
|
bdc4f8ad8c9a9e54be828a14a06c01ee46bbb3d7
|
85e0893bbf62fcfea49ecdf52ff59882fa9932c4
|
refs/heads/master
| 2020-04-14T04:48:34.005030
| 2018-12-31T06:51:59
| 2018-12-31T06:51:59
| 163,645,991
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 487
|
py
|
import pytest
import json
# fixture
class TestUserPassword(object):
@pytest.fixture
def users(self):
return json.loads(open('./users.dev.json', 'r').read())
def test_user_password(self, users):
for user in users:
passwd = user['password']
assert len(passwd) >= 6
msg = 'user %s has a weak passpword' % (user['name'])
assert passwd != 'password', msg
assert passwd != '123456', msg
|
[
"answer_dp@163.com"
] |
answer_dp@163.com
|
4414071dbcbc0774e0e10c7e9250b9a50c420220
|
c87a4c661f6093b38ff7571b482898e88a078e2b
|
/quiz/models.py
|
d29f045fe294a5d3ef722435984c022b2ea92a83
|
[] |
no_license
|
CharanR24/Quiz-App-Using-Django
|
aaa3e36e8f45f0577a1547ca38be70efb1d7ff66
|
c8aa03462c2da87bad23265af8298e203622f79a
|
refs/heads/master
| 2023-01-15T21:33:29.423472
| 2020-11-25T01:50:55
| 2020-11-25T01:50:55
| 315,796,967
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 21,639
|
py
|
import re
import json
import csv
from django.db import models
from django.core.exceptions import ValidationError, ImproperlyConfigured
from django.core.validators import MaxValueValidator, validate_comma_separated_integer_list
from django.utils.timezone import now
from django.conf import settings
from django.utils.translation import ugettext as _
from model_utils.managers import InheritanceManager
from django.db.models.signals import pre_save, post_save
import io
from .signals import csv_uploaded
from .validators import csv_file_validator
from django.contrib.auth.models import User
from django.contrib import messages
class CategoryManager(models.Manager):
def new_category(self, category):
new_category = self.create(category=re.sub('\s+', '-', category)
.lower())
new_category.save()
return new_category
class Category(models.Model):
category = models.CharField(
verbose_name=_("Category"),
max_length=250, blank=True,
unique=True, null=True)
objects = CategoryManager()
class Meta:
verbose_name = _("Category")
verbose_name_plural = _("Categories")
def __str__(self):
return self.category
class Quiz(models.Model):
title = models.CharField(
verbose_name=_("Title"),
max_length=60, blank=False)
description = models.TextField(
verbose_name=_("Description"),
blank=True, help_text=_("a description of the quiz"))
url = models.SlugField(
max_length=60, blank=False,
help_text=_("a user friendly url"),
verbose_name=_("user friendly url"))
category = models.ForeignKey(
Category, null=True, blank=True,
verbose_name=_("Category"), on_delete=models.CASCADE)
random_order = models.BooleanField(
blank=False, default=False,
verbose_name=_("Random Order"),
help_text=_("Display the questions in "
"a random order or as they "
"are set?"))
max_questions = models.PositiveIntegerField(
blank=True, null=True, verbose_name=_("Max Questions"),
help_text=_("Number of questions to be answered on each attempt."))
answers_at_end = models.BooleanField(
blank=False, default=False,
help_text=_("Correct answer is NOT shown after question."
" Answers displayed at the end."),
verbose_name=_("Answers at end"))
exam_paper = models.BooleanField(
blank=False, default=False,
help_text=_("If yes, the result of each"
" attempt by a user will be"
" stored. Necessary for marking."),
verbose_name=_("Exam Paper"))
single_attempt = models.BooleanField(
blank=False, default=False,
help_text=_("If yes, only one attempt by"
" a user will be permitted."
" Non users cannot sit this exam."),
verbose_name=_("Single Attempt"))
pass_mark = models.SmallIntegerField(
blank=True, default=0,
verbose_name=_("Pass Mark"),
help_text=_("Percentage required to pass exam."),
validators=[MaxValueValidator(100)])
success_text = models.TextField(
blank=True, help_text=_("Displayed if user passes."),
verbose_name=_("Success Text"))
fail_text = models.TextField(
verbose_name=_("Fail Text"),
blank=True, help_text=_("Displayed if user fails."))
draft = models.BooleanField(
blank=True, default=False,
verbose_name=_("Draft"),
help_text=_("If yes, the quiz is not displayed"
" in the quiz list and can only be"
" taken by users who can edit"
" quizzes."))
def save(self, force_insert=False, force_update=False, *args, **kwargs):
self.url = re.sub('\s+', '-', self.url).lower()
self.url = ''.join(letter for letter in self.url if
letter.isalnum() or letter == '-')
if self.single_attempt is True:
self.exam_paper = True
if int(self.pass_mark) > 100:
raise ValidationError('%s is above 100' % self.pass_mark)
super(Quiz, self).save(force_insert, force_update, *args, **kwargs)
class Meta:
verbose_name = _("Quiz")
verbose_name_plural = _("Quizzes")
def __str__(self):
return self.title
def get_questions(self):
return self.question_set.all().select_subclasses()
@property
def get_max_score(self):
return self.get_questions().count()
def anon_score_id(self):
return str(self.id) + "_score"
def anon_q_list(self):
return str(self.id) + "_q_list"
def anon_q_data(self):
return str(self.id) + "_data"
# progress manager
class ProgressManager(models.Manager):
def new_progress(self, user):
new_progress = self.create(user=user,
score="")
new_progress.save()
return new_progress
class Progress(models.Model):
"""
Progress is used to track an individual signed in users score on different
quiz's and categories
Data stored in csv using the format:
category, score, possible, category, score, possible, ...
"""
user = models.OneToOneField(settings.AUTH_USER_MODEL, verbose_name=_("User"), on_delete=models.CASCADE)
score = models.CharField(validators=[validate_comma_separated_integer_list], max_length=1024,
verbose_name=_("Score"))
correct_answer = models.CharField(max_length=10, verbose_name=_('Correct Answers'))
wrong_answer = models.CharField(max_length=10, verbose_name=_('Wrong Answers'))
objects = ProgressManager()
class Meta:
verbose_name = _("User Progress")
verbose_name_plural = _("User progress records")
@property
def list_all_cat_scores(self):
"""
Returns a dict in which the key is the category name and the item is
a list of three integers.
The first is the number of questions correct,
the second is the possible best score,
the third is the percentage correct.
The dict will have one key for every category that you have defined
"""
score_before = self.score
output = {}
for cat in Category.objects.all():
to_find = re.escape(cat.category) + r",(\d+),(\d+),"
# group 1 is score, group 2 is highest possible
match = re.search(to_find, self.score, re.IGNORECASE)
if match:
score = int(match.group(1))
possible = int(match.group(2))
try:
percent = int(round((float(score) / float(possible))
* 100))
except:
percent = 0
output[cat.category] = [score, possible, percent]
else: # if category has not been added yet, add it.
self.score += cat.category + ",0,0,"
output[cat.category] = [0, 0]
if len(self.score) > len(score_before):
# If a new category has been added, save changes.
self.save()
return output
def update_score(self, question, score_to_add=0, possible_to_add=0):
"""
Pass in question object, amount to increase score
and max possible.
Does not return anything.
"""
category_test = Category.objects.filter(category=question.category)\
.exists()
if any([item is False for item in [category_test,
score_to_add,
possible_to_add,
isinstance(score_to_add, int),
isinstance(possible_to_add, int)]]):
return _("error"), _("category does not exist or invalid score")
to_find = re.escape(str(question.category)) +\
r",(?P<score>\d+),(?P<possible>\d+),"
match = re.search(to_find, self.score, re.IGNORECASE)
if match:
updated_score = int(match.group('score')) + abs(score_to_add)
updated_possible = int(match.group('possible')) +\
abs(possible_to_add)
new_score = ",".join(
[
str(question.category),
str(updated_score),
str(updated_possible), ""
])
# swap old score for the new one
self.score = self.score.replace(match.group(), new_score)
self.save()
else:
# if not present but existing, add with the points passed in
self.score += ",".join(
[
str(question.category),
str(score_to_add),
str(possible_to_add),
""
])
self.save()
def show_exams(self):
"""
Finds the previous quizzes marked as 'exam papers'.
Returns a queryset of complete exams.
"""
return Sitting.objects.filter(user=self.user, complete=True)
def __str__(self):
return self.user.username + ' - ' + self.score
class SittingManager(models.Manager):
def new_sitting(self, user, quiz):
if quiz.random_order is True:
question_set = quiz.question_set.all() \
.select_subclasses() \
.order_by('?')
else:
question_set = quiz.question_set.all() \
.select_subclasses()
question_set = [item.id for item in question_set]
if len(question_set) == 0:
raise ImproperlyConfigured('Question set of the quiz is empty. '
'Please configure questions properly')
if quiz.max_questions and quiz.max_questions < len(question_set):
question_set = question_set[:quiz.max_questions]
questions = ",".join(map(str, question_set)) + ","
new_sitting = self.create(user=user,
quiz=quiz,
question_order=questions,
question_list=questions,
incorrect_questions="",
current_score=0,
complete=False,
user_answers='{}')
return new_sitting
def user_sitting(self, user, quiz):
if quiz.single_attempt is True and self.filter(user=user,
quiz=quiz,
complete=True)\
.exists():
return False
try:
sitting = self.get(user=user, quiz=quiz, complete=False)
except Sitting.DoesNotExist:
sitting = self.new_sitting(user, quiz)
except Sitting.MultipleObjectsReturned:
sitting = self.filter(user=user, quiz=quiz, complete=False)[0]
return sitting
class Sitting(models.Model):
"""
Used to store the progress of logged in users sitting a quiz.
Replaces the session system used by anon users.
Question_order is a list of integer pks of all the questions in the
quiz, in order.
Question_list is a list of integers which represent id's of
the unanswered questions in csv format.
Incorrect_questions is a list in the same format.
Sitting deleted when quiz finished unless quiz.exam_paper is true.
User_answers is a json object in which the question PK is stored
with the answer the user gave.
"""
user = models.ForeignKey(settings.AUTH_USER_MODEL, verbose_name=_("User"), on_delete=models.CASCADE)
quiz = models.ForeignKey(Quiz, verbose_name=_("Quiz"), on_delete=models.CASCADE)
question_order = models.CharField(validators=[validate_comma_separated_integer_list],
max_length=1024, verbose_name=_("Question Order"))
question_list = models.CharField(validators=[validate_comma_separated_integer_list],
max_length=1024, verbose_name=_("Question List"))
incorrect_questions = models.CharField(validators=[validate_comma_separated_integer_list],
max_length=1024, blank=True, verbose_name=_("Incorrect questions"))
current_score = models.IntegerField(verbose_name=_("Current Score"))
complete = models.BooleanField(default=False, blank=False,
verbose_name=_("Complete"))
user_answers = models.TextField(blank=True, default='{}',
verbose_name=_("User Answers"))
start = models.DateTimeField(auto_now_add=True,
verbose_name=_("Start"))
end = models.DateTimeField(null=True, blank=True, verbose_name=_("End"))
objects = SittingManager()
class Meta:
permissions = (("view_sittings", _("Can see completed exams.")),)
def get_first_question(self):
"""
Returns the next question.
If no question is found, returns False
Does NOT remove the question from the front of the list.
"""
if not self.question_list:
return False
first, _ = self.question_list.split(',', 1)
question_id = int(first)
return Question.objects.get_subclass(id=question_id)
def remove_first_question(self):
if not self.question_list:
return
_, others = self.question_list.split(',', 1)
self.question_list = others
self.save()
def add_to_score(self, points):
self.current_score += int(points)
self.save()
@property
def get_current_score(self):
return self.current_score
def _question_ids(self):
return [int(n) for n in self.question_order.split(',') if n]
@property
def get_percent_correct(self):
dividend = float(self.current_score)
divisor = len(self._question_ids())
if divisor < 1:
return 0 # prevent divide by zero error
if dividend > divisor:
return 100
correct = int(round((dividend / divisor) * 100))
if correct >= 1:
return correct
else:
return 0
def mark_quiz_complete(self):
self.complete = True
self.end = now()
self.save()
def add_incorrect_question(self, question):
"""
Adds uid of incorrect question to the list.
The question object must be passed in.
"""
if len(self.incorrect_questions) > 0:
self.incorrect_questions += ','
self.incorrect_questions += str(question.id) + ","
if self.complete:
self.add_to_score(-1)
self.save()
@property
def get_incorrect_questions(self):
"""
Returns a list of non empty integers, representing the pk of
questions
"""
return [int(q) for q in self.incorrect_questions.split(',') if q]
def remove_incorrect_question(self, question):
current = self.get_incorrect_questions
current.remove(question.id)
self.incorrect_questions = ','.join(map(str, current))
self.add_to_score(1)
self.save()
@property
def check_if_passed(self):
return self.get_percent_correct >= self.quiz.pass_mark
@property
def result_message(self):
if self.check_if_passed:
return self.quiz.success_text
else:
return self.quiz.fail_text
def add_user_answer(self, question, guess):
current = json.loads(self.user_answers)
current[question.id] = guess
self.user_answers = json.dumps(current)
self.save()
def get_questions(self, with_answers=False):
question_ids = self._question_ids()
questions = sorted(
self.quiz.question_set.filter(id__in=question_ids)
.select_subclasses(),
key=lambda q: question_ids.index(q.id))
if with_answers:
user_answers = json.loads(self.user_answers)
for question in questions:
question.user_answer = user_answers[str(question.id)]
return questions
@property
def questions_with_user_answers(self):
return {
q: q.user_answer for q in self.get_questions(with_answers=True)
}
@property
def get_max_score(self):
return len(self._question_ids())
def progress(self):
"""
Returns the number of questions answered so far and the total number of
questions.
"""
answered = len(json.loads(self.user_answers))
total = self.get_max_score
return answered, total
class Question(models.Model):
"""
Base class for all question types.
Shared properties placed here.
"""
quiz = models.ManyToManyField(Quiz,
verbose_name=_("Quiz"),
blank=True)
category = models.ForeignKey(Category,
verbose_name=_("Category"),
blank=True,
null=True, on_delete=models.CASCADE)
figure = models.ImageField(upload_to='uploads/%Y/%m/%d',
blank=True,
null=True,
verbose_name=_("Figure"))
content = models.CharField(max_length=1000,
blank=False,
help_text=_("Enter the question text that "
"you want displayed"),
verbose_name=_('Question'))
explanation = models.TextField(max_length=2000,
blank=True,
help_text=_("Explanation to be shown "
"after the question has "
"been answered."),
verbose_name=_('Explanation'))
objects = InheritanceManager()
class Meta:
verbose_name = _("Question")
verbose_name_plural = _("Questions")
ordering = ['category']
def __str__(self):
return self.content
def upload_csv_file(instance, filename):
qs = instance.__class__.objects.filter(user=instance.user)
if qs.exists():
num_ = qs.last().id + 1
else:
num_ = 1
return f'csv/{num_}/{instance.user.username}/{filename}'
class CSVUpload(models.Model):
title = models.CharField(max_length=100, verbose_name=_('Title'), blank=False)
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
file = models.FileField(upload_to=upload_csv_file, validators=[csv_file_validator])
completed = models.BooleanField(default=False)
# questions = models.BooleanField(default=True)
# students = models.BooleanField(default=False)
def __str__(self):
return self.user.username
def create_user(data):
user = User.objects.create_user(username=data['username'],
email=data['email'],
password=data['password'],
first_name=data['first_name'],
last_name=data['last_name']
)
user.is_admin=False
user.is_staff=False
user.save()
def convert_header(csvHeader):
header_ = csvHeader[0]
cols = [x.replace(' ', '_').lower() for x in header_.split(",")]
return cols
def csv_upload_post_save(sender, instance, created, *args, **kwargs):
if not instance.completed:
csv_file = instance.file
decoded_file = csv_file.read().decode('utf-8')
io_string = io.StringIO(decoded_file)
reader = csv.reader(io_string, delimiter=';', quotechar='|')
header_ = next(reader)
header_cols = convert_header(header_)
print(header_cols, str(len(header_cols)))
parsed_items = []
'''
if using a custom signal
'''
for line in reader:
# print(line)
parsed_row_data = {}
i = 0
print(line[0].split(','), len(line[0].split(',')))
row_item = line[0].split(',')
for item in row_item:
key = header_cols[i]
parsed_row_data[key] = item
i+=1
create_user(parsed_row_data) # create user
parsed_items.append(parsed_row_data)
# messages.success(parsed_items)
print(parsed_items)
csv_uploaded.send(sender=instance, user=instance.user, csv_file_list=parsed_items)
'''
if using a model directly
for line in reader:
new_obj = YourModelKlass()
i = 0
row_item = line[0].split(',')
for item in row_item:
key = header_cols[i]
setattr(new_obj, key) = item
i+=1
new_obj.save()
'''
instance.completed = True
instance.save()
post_save.connect(csv_upload_post_save, sender=CSVUpload)
|
[
"charanravichandran24@gmail.com"
] |
charanravichandran24@gmail.com
|
fa4069b3aae284cd85d86fe63a1219a3c5b21221
|
2f66b4070ee313fda229f6d7b9984eac8447c76c
|
/0108_etsy_1.py
|
024e79660fe7376d8040068ea7e596c93bef7f4b
|
[] |
no_license
|
hqpr/esty
|
207b1b871bfb4d49e3fd8392295f092515aeb38e
|
2c51040e1a9269a53b60c21271f8cbac5ff8361b
|
refs/heads/master
| 2020-05-18T04:12:36.027545
| 2018-02-09T17:54:38
| 2018-02-09T17:54:38
| 23,795,634
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 988
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'o.dubnyak'
__version__ = 1.0
"""
https://github.com/geduldig/TwitterAPI
https://dev.twitter.com/docs/api/1.1
https://hootsuite.com/developers/api/oauth2
consumer_key = "IqoIbOEZn4MVJJ5DePtvA"
consumer_secret = "MLdKfOS65GN7gDn5XSJyO9sjgGdcK1rUZMuLW2uPZg"
access_token_key = "23428449-GU5Ecm0gPC24kYDiC9xPLff0JvUd3LvHBwn7JOZGs"
access_token_secret = "XedJlYAc29XTAOiBjVMVueHJMbYPUzpL8alC9ID4A"
"""
from TwitterAPI import TwitterAPI
import time
import csv
reader = csv.reader(open('keys.csv', 'rb'), delimiter=';', quotechar='"')
for row in reader:
consumer_key = row[0]
consumer_secret = row[1]
access_token_key = row[2]
access_token_secret = row[3]
api = TwitterAPI(consumer_key, consumer_secret, access_token_key, access_token_secret)
msg = ['test', 'is', 'this']
for m in msg:
r = api.request('statuses/update', {'status': m})
time.sleep(5)
print r.status_code
|
[
"adubnyak@gmail.com"
] |
adubnyak@gmail.com
|
3a747dbe6324f8ee0d8ec7fda4c8284b76e6ed1b
|
d61142cbb171f4aa9d7b792bd29a0068200cd36f
|
/tests/mock_app/blueprint_one/__init__.py
|
c3a6bc399e084ef952013bd4ebbd3825a89f995b
|
[
"MIT"
] |
permissive
|
joelcolucci/flask-registerblueprints
|
1f9a0187e63d4b956a550468b8def190b569a09b
|
c117404691b66594f2cc84bff103ce893c633ecc
|
refs/heads/master
| 2023-01-23T04:20:20.210667
| 2016-10-14T23:09:24
| 2016-10-14T23:09:24
| 70,648,263
| 5
| 0
|
MIT
| 2022-12-26T19:57:31
| 2016-10-12T00:58:44
|
Python
|
UTF-8
|
Python
| false
| false
| 30
|
py
|
from api import blueprint_api
|
[
"joelcolucci@gmail.com"
] |
joelcolucci@gmail.com
|
c4791c2e825413ab4394faae014a9ff440211360
|
aacd813df79d65fda68fafc7f171c689e5dbe8e1
|
/kobert_transformers/kobert_transformers/utils.py
|
1a6580a439779f850974be84b99148a96a177a46
|
[
"Apache-2.0"
] |
permissive
|
KKbeom/DistilKoBERT
|
e685a4407cebbf63886634d7b79222c637cbf9f0
|
b09f2b78b0d554be4885b5a9042a41e4ca76fd78
|
refs/heads/master
| 2020-12-10T05:11:59.909126
| 2020-01-12T11:02:45
| 2020-01-12T11:02:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 140
|
py
|
from .tokenization_kobert import KoBertTokenizer
def get_tokenizer():
return KoBertTokenizer.from_pretrained('monologg/distilkobert')
|
[
"adieujw@gmail.com"
] |
adieujw@gmail.com
|
32cfa54f40707c030336eea1fd9e4f3640daf9e6
|
c6f9aae90a71a621a00c606d1d5c6b2c2feb4360
|
/comparison.py
|
4d27d029e8105f413e720568a6bbaba6c876ad92
|
[] |
no_license
|
yungkk98/korean_music_classifier
|
5114ae1a511d04e5498fb31f3fc9ed4b2cc8a586
|
0dd30172bc3db6f56a4910d2e708226a0c66340f
|
refs/heads/master
| 2020-03-18T21:41:08.350311
| 2018-05-29T12:52:57
| 2018-05-29T12:52:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,272
|
py
|
import numpy as np
import statistics
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import make_classification
from sklearn.neural_network import MLPClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.naive_bayes import GaussianNB
import librosa
from sklearn.metrics import precision_score, recall_score
from sklearn.metrics import accuracy_score
from sklearn.externals import joblib
import csv
import os
f=open('./ex_2_fin_good.csv', 'w')
csvWriter = csv.writer(f)
channel = 16
names = ["NearestNeighbors", "K-NN_k=3", "Linear_SVM", "RBF_SVM", "Gaussian_Process",
"Decision_Tree_5", "Decision_Tree_10", "Random_Forest", "Neural_Net", "AdaBoost",
"Naive_Bayes"]
classifiers = [
KNeighborsClassifier(1),
KNeighborsClassifier(3),
SVC(kernel="linear", C=0.025),
SVC(gamma=2, C=1),
GaussianProcessClassifier(1.0 * RBF(1.0)),
DecisionTreeClassifier(max_depth=5),
DecisionTreeClassifier(max_depth=10),
RandomForestClassifier(max_depth=5, n_estimators=10, max_features=1),
MLPClassifier(alpha=1),
AdaBoostClassifier(),
GaussianNB()
]
X, y = make_classification(n_features=2, n_redundant=0, n_informative=2,
random_state=1, n_clusters_per_class=1)
mfccs = []
mfcc_record = []
y_list = []
songs = []
cn = 0
for category in ['balad', 'dance', 'fork/bluse', 'korea_tradition', 'rap/hiphop', 'rock', 'trote']:
for root, dirs, files in os.walk('./music/' + category):
for fname in files:
full_fname = os.path.join(root, fname)
songs.append(full_fname)
print(full_fname)
y_list.append(cn)
cn += 1
for song in songs:
audio_path = librosa.util.example_audio_file()
y, sr = librosa.load(song, offset=15.0, duration=30.0)
S = librosa.feature.melspectrogram(y, sr=sr, n_mels=64)
log_S = librosa.logamplitude(S, ref_power=np.max)
mfcc = librosa.feature.mfcc(S=log_S, n_mfcc=channel)
l = []
for tmp in mfcc:
l.append(np.mean(tmp))
for tmp in mfcc:
l.append(np.var(tmp))
for tmp in mfcc:
l.append(np.std(tmp))
for tmp in mfcc:
l.append(np.max(tmp) - np.min(tmp))
for tmp in mfcc:
l.append(statistics.median(tmp))
mfccs.append(l)
print(song)
print(l)
X = np.array(mfccs, dtype=np.float32)
y = np.array(y_list, dtype=np.int64)
# 1 : balad, 2 : dance, 3 : fork&bluse, 4 : korea_traition, 5 : rap, 6 : rock, 7 : trote
scores = []
for i in range(6):
csvWriter.writerow([i])
if i < 5:
X = np.array(mfccs, dtype=np.float32)[:, i * channel: (i+1)*channel]
else:
X = np.array(mfccs, dtype=np.float32)[:, 0: 2*channel]
linearly_separable = (X, y)
datasets = [linearly_separable]
# iterate over datasets
for ds_cnt, ds in enumerate(datasets):
# preprocess dataset, split into training and test part
X, y = ds
X = StandardScaler().fit_transform(X)
X_train, X_test, y_train, y_test = \
train_test_split(X, y, test_size=.2, random_state=42)
print(y_test)
for name, clf in zip(names, classifiers):
clf.fit(X_train, y_train)
#score = clf.score(X_test, y_test)
y_pred = clf.predict(X_test)
score = accuracy_score(y_test, y_pred)
scores.append(score)
precision = precision_score(y_test, y_pred, average=None)
recall = recall_score(y_test, y_pred, average=None)
print(name, precision, recall, score)
csvWriter.writerow([name, score])
csvWriter.writerow(precision)
csvWriter.writerow(recall)
joblib.dump(clf, './classifier/15/'+name + '_' + str(i) + '_' + str(score) + '.pkl')
print(max(scores))
|
[
"noreply@github.com"
] |
yungkk98.noreply@github.com
|
3db5355de3a03eca4dd6b1722acf89864c6867ab
|
e36c5a91306f8d8cf487368d3a1dfae4c03da3c0
|
/build/navigation/rotate_recovery/catkin_generated/pkg.develspace.context.pc.py
|
00e97c42a68da3f3841d2d8e40e6199bfe123bf1
|
[] |
no_license
|
DocDouze/RobMob
|
84ae5b96a16028586c9da2008f7c7772bdaa1334
|
6a2e7505eb2207d61b1c354cfd255075b1efbc73
|
refs/heads/master
| 2020-04-11T07:24:28.958201
| 2018-12-17T11:56:54
| 2018-12-17T11:56:54
| 161,607,677
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 614
|
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/aubailly/Bureau/RobMob/src/navigation/rotate_recovery/include".split(';') if "/home/aubailly/Bureau/RobMob/src/navigation/rotate_recovery/include" != "" else []
PROJECT_CATKIN_DEPENDS = "costmap_2d;geometry_msgs;nav_core;pluginlib;roscpp;tf2;tf2_ros".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-lrotate_recovery".split(';') if "-lrotate_recovery" != "" else []
PROJECT_NAME = "rotate_recovery"
PROJECT_SPACE_DIR = "/home/aubailly/Bureau/RobMob/devel"
PROJECT_VERSION = "1.16.2"
|
[
"quentin.aubailly@gmail.com"
] |
quentin.aubailly@gmail.com
|
9f66502d6f8856bbb13b38d5f97327f839f6260c
|
d027466f39070797a13c0210c59b7a52ad210f91
|
/grafana_dashboards/components/projects.py
|
33d1affcf12f992b15242e9f4eca0aa04ba979ca
|
[
"Apache-2.0"
] |
permissive
|
jakubplichta/grafana-dashboard-builder
|
4d3c7e64686128dba19b4dd6c9cfd71e2acccb1c
|
fa6bc51d7f1668d329dacd7e717b1f61eb35756e
|
refs/heads/master
| 2023-06-25T14:36:34.185076
| 2023-06-18T19:42:28
| 2023-06-18T19:42:28
| 36,573,600
| 149
| 44
|
Apache-2.0
| 2023-06-18T19:42:29
| 2015-05-30T20:02:10
|
Python
|
UTF-8
|
Python
| false
| false
| 1,678
|
py
|
# -*- coding: utf-8 -*-
# Copyright 2015-2019 grafana-dashboard-builder contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
from grafana_dashboards.components.base import ComponentBase, get_placeholders
from grafana_dashboards.components.dashboards import Dashboard
from grafana_dashboards.context import Context
__author__ = 'Jakub Plichta <jakub.plichta@gmail.com>'
class Project(ComponentBase):
def __init__(self, data, registry):
super(Project, self).__init__(data, registry)
self._placeholders = [placeholder for dashboard in self._get_dashboard_names()
for placeholder in get_placeholders(dashboard)]
def _get_dashboard_names(self):
return self.data.get('dashboards', [])
def get_dashboards(self):
return [self.registry.get_component(Dashboard, dashboard_name) for dashboard_name in
self._get_dashboard_names()]
def get_contexts(self, context=None):
if context is None:
context = {}
data = self.data.copy()
data.update(context)
return Context.create_context(data, self._placeholders)
|
[
"jakub.plichta@gmail.com"
] |
jakub.plichta@gmail.com
|
eaa0862ac25519a84af0807c03bc01c1fa2de9ea
|
72063d10710268a8dfc45aee02f5afdc1890dd5a
|
/Python/connect.py
|
72dfe9e4fd85115c728129ac79f76477aa579b84
|
[
"MIT"
] |
permissive
|
RitRa/Little-Python-application-gmit-semester-3-2019
|
580c8b5e45ed8f4a2ab23f6a8404afa4fec93826
|
9c9ff525363785339db1a4cb2b8e12c23547be86
|
refs/heads/master
| 2020-05-20T08:02:43.666047
| 2019-05-17T17:55:02
| 2019-05-17T17:55:02
| 185,465,498
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,731
|
py
|
import pymysql.cursors
# Connect to the database
conn = pymysql.connect(host='localhost',
user='root',
password='Password!',
db='world',
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor)
print ("connect successful!!")
print(conn)
# choice 1: shows all cities
def showcity():
with conn:
cursor = conn.cursor()
sql = ('Select * from city limit 15;')
cursor.execute(sql)
data = cursor.fetchall()
for row in data:
#print (row)
print (row["ID"], ":", row["Name"] ,":", row["CountryCode"],":", row["District"], ":",row["Population"])
# choice 2
# pulls in an operator (< > =) and population value
def findpopulation(operator, population):
#print("findpopulation", operator, population)
with conn:
cursor = conn.cursor()
sql = ('Select * from city where population %s %s'% (operator, population))
cursor.execute(sql)
data = cursor.fetchall()
for row in data:
#print (row)
print (row["ID"], ":", row["Name"] ,":", row["CountryCode"],":", row["District"], ":",row["Population"])
# choice 3
def addcity(city, countrycode, district, population):
print("add city ", city, countrycode, district, population)
with conn:
try:
cursor = conn.cursor()
sql = ("Insert into city (Name, CountryCode, District, Population) VALUES (%s, %s, %s, %s)")
cursor.execute(sql, (city, countrycode, district, population))
cursor.close()
print("Insert Successful")
except Exception as e:
#print(e)
print("****Error***:CountryCode",countrycode, "does not exist" )
#finally:
def find_Country(country):
#print("country: ", country)
with conn:
cursor = conn.cursor()
sql = ("""SELECT *
from country
where Name LIKE '%%%s%%' """ % (country,) )
cursor.execute(sql)
data = cursor.fetchall()
for row in data:
print (row["Name"], ":", row["Continent"] ,":", row["Population"],":", row["HeadOfState"])
def country_Population(operator,population):
print("user input", operator, population)
with conn:
cursor = conn.cursor()
sql = ('Select * from country where population %s %s'% (operator, population))
cursor.execute(sql)
data = cursor.fetchall()
for row in data:
print (row["Code"], ":", row["Name"] ,":", row["Continent"],":", row["Population"])
|
[
"noreply@github.com"
] |
RitRa.noreply@github.com
|
90862e1d171c8a187214dbacae754a9ed607ac2b
|
934ab23486fa1357f65bd46a6bea18b18713ba5f
|
/ML/scraping/losers/monthly_top_losers.py
|
eb2476b61ed17613a6b42cbf9fe3b07312111748
|
[
"MIT"
] |
permissive
|
CodeChefVIT/Stocker
|
80b249b5c2715fcdd274d0cf319a30a14191182c
|
a9bcaa7458240241495d2f88acb396640ab2b333
|
refs/heads/master
| 2023-01-20T02:23:45.591327
| 2020-11-22T03:29:47
| 2020-11-22T03:29:47
| 277,299,425
| 2
| 4
|
MIT
| 2020-11-22T03:29:48
| 2020-07-05T12:39:12
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,408
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 30 00:01:33 2020
@author: pranjal27bhardwaj
"""
import requests
from bs4 import BeautifulSoup
import pandas as pd
import matplotlib.pyplot as plt
def monthly_losers():
dfs = pd.read_html('https://money.rediff.com/losers/bse/monthly',header=0)
for df in dfs[:-1]:
df = df
# df['% Change'] = df['% Change'].str.replace(' ', "")
df1 = df[['Company', '% Change', 'Current Price (Rs)']][:10]
data = {}
col = list(df1)
for i in range(10):
current = 'Company {}'.format(i+1)
data[current] = {}
c=0
for j in col:
if c==0:
data[current]['Company Name'] = df[j][i]
elif c==1:
data[current]['% Change'] = df[j][i]
else:
data[current]['Current Price (Rs)'] = df[j][i]
c+=1
return data
df1.to_csv('monthly_top_losers.csv', index=False)
#monthly_losers()
def plot_monthly_losers():
plt.style.use('fivethirtyeight')
data_monthly_losers = pd.read_csv('monthly_top_losers.csv')
data_monthly_losers_final = data_monthly_losers[:16]
x6 = data_monthly_losers_final.plot.bar(x = 'Company', y = '% Change', title = 'Monthly Top Losers', color='Black')
plt.savefig('monthly_top_losers.png', bbox_inches='tight')
plt.show(x6)
plot_monthly_losers()
|
[
"pranjalb2709@gmail.com"
] |
pranjalb2709@gmail.com
|
cbbab1f660f4fd8b9bd2e98e3c8d6da7bdade02b
|
f243198dcacd7362499ccfc27cb9fefdc763ba47
|
/diff/RandomFrameDiff.py
|
9ecd34728b1426525aad30b6ecf7ee6a92f37cd2
|
[] |
no_license
|
fletch22/kaggleDeepFakeDetection
|
a60dbd170660de99567a74aa5b5e246038cedd9d
|
592be28729613ae5ebdfd80af1fc1c3ccd18600b
|
refs/heads/master
| 2022-04-19T16:47:19.359334
| 2020-04-08T01:53:22
| 2020-04-08T01:53:22
| 233,509,674
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 319
|
py
|
import numpy as np
class RandomFrameDiff():
def __init__(self, image: np.ndarray, frame_index: int, x: int, y: int, height: int, width: int, score: float):
self.image = image
self.frame_index = frame_index
self.x = x
self.y = y
self.height = height
self.width = width
self.score = score
|
[
"chris@fletch22.com"
] |
chris@fletch22.com
|
959cbc67c9b7a1374f42dae6b177f64978ea49c6
|
eb297ff1e0011438fd184cc338b3fb86859b81c9
|
/Chapters 18 to 20/port_book/ll_env/Scripts/django-admin.py
|
c61bd159b5643f7569b205d7e0a93224e7dd107c
|
[] |
no_license
|
mynameisbenzo/PythonCrashCourse
|
c73a4505d9cdfe4df78e3ed01adb3491debf8a9b
|
831a9962a3c6cab53ecfdb1d2cceb0dd2d9c5a0a
|
refs/heads/master
| 2021-04-12T08:13:51.772957
| 2018-05-02T05:54:57
| 2018-05-02T05:54:57
| 126,091,903
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 207
|
py
|
#!c:\users\me\desktop\python\pythoncrashcourse\chapters 18 to 20\port_book\ll_env\scripts\python.exe
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
|
[
"lorhernandez@csumb.edu"
] |
lorhernandez@csumb.edu
|
3dcdbaa3bf98fb662eb2a35b4e341b5b19b624dc
|
d638bbf120a54a02643d728b7c2c441efa6aacb7
|
/cogs/basic.py
|
72531f97e8757e865993f43cf43f8dbe4af54148
|
[
"MIT"
] |
permissive
|
MarkFrankle/DiscordPicBot
|
72a13c25e6c6d9b07d999f1017aea6a46e60fb10
|
7b953ead134475862bff99f5b0eb346508651c73
|
refs/heads/master
| 2020-03-19T01:11:23.195445
| 2018-05-31T03:56:51
| 2018-05-31T03:56:51
| 135,528,472
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,841
|
py
|
import praw
import discord
from discord.ext import commands
import math
import random
# discord.py calls groups of commands cogs
# cogs can also be handlers for different types of events
# and respond to changes in data as they happen
# setup
usedLinks = []
class BasicCog:
def __init__(self, bot):
self.bot = bot
# Get EyeBleach command
@commands.command()
async def eyebleach(self, ctx):
await ctx.send(getSubmission('eyebleach'))
# Get EyeBleach command
@commands.command()
async def subpic(self, ctx, subreddit):
await ctx.send(getSubmission(subreddit))
# Get EyeBleach command
@commands.command()
async def subrange(self, ctx, subreddit, endIdx):
for i in range(1, (int)(endIdx) - 1):
await ctx.send(getSubmissionIndex(subreddit,i))
# add this cog to the bot
def setup(bot):
bot.add_cog(BasicCog(bot))
def getSubmissionIndex(subreddit, index):
subreddit = getSubreddit(subreddit)
hot_sub = subreddit.hot(limit = index)
i = 0
for submission in hot_sub:
if i == index:
return submission
i += 1
def getSubmission(subreddit):
cap = 100
subreddit = getSubreddit(subreddit)
hot_sub = subreddit.hot(limit = cap)
for submission in hot_sub:
if submission.url not in usedLinks and not submission.stickied and submission.url.endswith('.jpg'):
usedLinks.append(submission.url)
return submission.url
return 'No more pics :('
def getSubreddit(subreddit):
reddit = praw.Reddit(client_id = '',
client_secret = '',
username = '',
password = '',
user_agent = '')
subreddit = reddit.subreddit(subreddit)
return subreddit
|
[
"mfrankle@uw.edu"
] |
mfrankle@uw.edu
|
24d9c572f9c6ea820fb911ea4bcf16d134dd2b43
|
52d9b1152c23c7725884c0a25f71da1e9f5fbecc
|
/conferences.py
|
f09cbacac848ec23c48709d3f28f9f428618cd61
|
[] |
no_license
|
inwyrd/bibformatter
|
6680986496afae7e25e356bb5ab3068cd04b8059
|
e3ddd54327861bdc414bcacacf9c74879dc07137
|
refs/heads/master
| 2021-01-01T16:13:15.172142
| 2014-11-21T00:25:31
| 2014-11-21T00:25:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,701
|
py
|
"""List of canonical names for conferences.
Keywords are ordered by increasing specificity.
Example:
Security and Privacy (assumed IEEE S&P)
Web 2.0 Security and Privacy (assumed W2SP)
"""
CONFERENCE_KEYWORDS = [
[["Symposium on Architectures for Networking", "ANCS"],
("Proceedings of the ACM/IEEE Symposium on Architectures for "
"Networking and Communications Systems")],
[["Privacy Enhancing Technologies", "PET"],
"Proceedings of the Privacy Enhancing Technologies Symposium"],
[["EuroSys", "European Conference in Computer Systems"],
"Proceedings of the ACM European Conference in Computer Systems"],
[["Mobile Computing and Networking", "Mobicom"],
"Proceedings of the ACM Conference on Mobile Computing and Networking"],
[["World Wide Web", "WWW"],
"Proceedings of the International Conference on the World Wide Web"],
[["Computer Security Applications", "ACSAC"],
"Proceedings of the Annual Computer Security Applications Conference"],
[["Communications Security", "CCS"],
"Proceedings of the Conference on Computer and Communications Security"],
[["Security and Privacy", "Security \\\& Privacy", "Security Privacy", "Oakland"],
"Proceedings of the IEEE Symposium on Security and Privacy"],
[["Web 2.0 Security and Privacy", "W2SP"],
"Proceedings of the Workshop on Web 2.0 Security and Privacy"],
[["IMC", "Internet Measurement"],
"Proceedings of the ACM SIGCOM Internet Measurement Conference"],
[["NDSS", "Network and Distributed System Security"],
"Proceedings of the Network and Distributed System Security Conference"],
[["NSDI", "Network System Design and Implementation"],
"Proceedings of the Symposium on Network System Design and Implementation"],
[["LEET", "Scale Exploits"], # Now defunct
"Proceedings of the USENIX Workshop on Large-Scale Exploits and Emergent Threats"],
[["Hotbot", "Hot Topics in Understanding Botnets"], # Now defunct
"Proceedings of the USENIX Workshop on Hot Topics in Understanding Botnets"],
[["WIFS", "Information Forensics and Security"],
"Proceedings of the Workshop on Information Forensics and Security"],
[["WEIS", "Economics of Information Security"],
"Proceedings of the Workshop on Workshop on Economics of Information Security"],
[["WOSN", "Workshop on Online Social Networks"],
"Proceedings of the Workshop on Online Social Networks"],
[["AIRWeb", "Adversarial Information Retrieval"],
"Proceedings of SIGIR Workshop on Adversarial Information Retrieval on the Web"],
[["ICWSM", "Weblogs and Social Media"],
"Proceedings of the AAAI International Conference on Weblogs and Social Media"],
[["Collaboration, Electronic", "CEAS", "Electronic Messaging"],
"Proceedings of the Annual Collaboration, Electronic messaging, Anti-Abuse and Spam Conference"],
[["Financial Cryptography", "FC"],
"Proceedings of the International Conference on Financial Cryptography and Data Security"],
[["OSDI", "Operating Systems Design"],
"Proceedings of the Symposium on Operating Systems Design and Implementation"],
[["eCrime", "eCRS"],
"Proceedings of the IEEE eCrime Researchers Summit"],
[["FTCS", "Symposium on Fault-Tolerant Computing"],
"Proceedings of the International Symposium on Fault-Tolerant Computing"],
[["Measurement and Modeling of Computer Systems"],
"Proceedings of the ACM Conference on Measurement and Modeling of Computer Systems"],
[["Management and Performance Evaluation"],
"Proceedings of the International Conference on Management and Performance Evaluation of Computer Systems"],
[["VLDB", "Very Large Data Bases"],
"Proceedings of the International Conference on Very Large Data Bases"],
[["PODC", "Principles of Distributed Computing"],
"Proceedings of the Symposium on Principles of Distributed Computing"],
[["Large Installation System Administration", "LISA"],
"Proceedings of the Large Installation System Administration Conference"],
[["Hot Topics in Networks", "Hotnets"],
"Proceedings of the Workshop on Hot Topics in Networks"],
[["Hot Topics in Operating", "Hotos"],
"Proceedings of the Workshop on Hot Topics in Operating Systems"],
[["Usenix annual", "Usenix Technical"],
"Proceedings of the USENIX Annual Technical Conference"],
[["Symposium on Computer Architecture", "ISCA"],
"Proceedings of the International Symposium on Computer Architecture"],
[["Principles of Programming Languages", "POPL"],
"Proceedings of the Annual Symposium on Principles of Programming Languages"],
[["International Information Security Conference"],
"Proceedings of the IFIP International Information Security and Privacy Conference"],
[["INFOCOM", "Computer and Communications Societies"],
"Proceedings of the Annual Joint Conference of the IEEE Computer and Communications Societies"],
[["SOSP", "Symposium on Operating Systems Principles"],
"Proceedings of the ACM Symposium on Operating Systems Principles"],
[["ASPLOS", "Architectural Support"],
"Proceedings of the Symposium on Architectural Support for Programming Languages and Operating Systems"],
[["Development in Information Retrieval"],
"Proceedings of the Annual International ACM Conference on Research and Development in Information Retrieval"],
[["Hypertext and Social"],
"Proceedings of the ACM Conference on Hypertext and Social Media"],
[["Malicious and Unwanted Software"],
"Proceedings of the International Conference on Malicious and Unwanted Software"],
[["PLDI", "Programming Language Design"],
"Proceedings of the Conference on Programming Language Design and Implementation"],
[["RAID", "Recent Advances"],
"Proceedings of International Symposium on Recent Advances in Intrusion Detection"],
[["ICML", "International Conference on Machine Learning"],
"Proceedings of the International Conference on Machine Learning"],
[["Economics and Computation"],
"Proceedings of the ACM Conference on Economics and Computation"],
[["KDD", "Knowledge Discovery"],
"Proceedings of the SIGKDD International Conference on Knowledge Discovery and Data Mining"],
[["ICDCS", "Distributed Computing Systems"],
"Proceedings of the International Conference on Distributed Computing Systems"],
[["PST", "Privacy Security and Trust"],
"Proceedings of the Annual Conference on Privacy Security and Trust"],
[["DIMVA", "Intrusions and Malware"],
"Proceedings of the International Conference on Detection of Intrusions and Malware and Vulnerability Assessment"],
[["Usenix Security", "Security Symposium", "usenix-security"],
"Proceedings of the USENIX Security Symposium"]
]
|
[
"kurtthomas@google.com"
] |
kurtthomas@google.com
|
a06fd4836b1a818695a509b6c189ae422c8091cb
|
93f98dde611c138055629ae3b3b2cb1e301adc49
|
/DS/urls.py
|
f5caa443ef573ed57d9fdef5444e09360181e647
|
[
"BSD-3-Clause"
] |
permissive
|
gitter-badger/djangochannel
|
ebd9a69bef62376d9877a5aece3f54b211880736
|
f9e33254739457c461e84b66879172007512f9b0
|
refs/heads/master
| 2020-08-06T01:43:26.679657
| 2019-10-04T10:16:32
| 2019-10-04T10:16:32
| 212,788,313
| 0
| 0
|
BSD-3-Clause
| 2019-10-04T10:18:48
| 2019-10-04T10:18:47
| null |
UTF-8
|
Python
| false
| false
| 2,177
|
py
|
from django.contrib import admin
from django.contrib.sitemaps.views import sitemap
from django.http import HttpResponse
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
from django.views.generic import TemplateView
from backend.blog.sitemap import PostSitemap
from backend.pages.sitemap import PagesSitemap
from backend.forum.sitemap import TopicSitemap
from backend.courses.sitemap import CourseSitemap
from backend.reviews.sitemap import ReviewSitemap
sitemaps = {
'blog': PostSitemap,
'pages': PagesSitemap,
'forum': TopicSitemap,
'course': CourseSitemap,
'review': ReviewSitemap,
}
urlpatterns = [
path('djadmin/', admin.site.urls),
path('sitemap.xml', sitemap, {'sitemaps': sitemaps}),
path('accounts/', include('allauth.urls')),
path('ckeditor/', include('ckeditor_uploader.urls')),
path('blog/', include('backend.blog.urls')),
path('course/', include('backend.courses.urls')),
path('forum/', include('backend.forum.urls')),
path('profile/', include('backend.profile.urls')),
path('test/', include('backend.dc_tests.urls')),
path('reviews/', include('backend.reviews.urls')),
path('moderation/', include('moderation.urls')),
path('pay/', include('backend.pay.urls')),
path('contact/', include('backend.contact.urls')),
path('task/', include('backend.dc_task.urls')),
path('friends/', include('backend.followers.urls')),
path('groups/', include('backend.community.urls')),
path('', include("backend.pages.urls")),
path('google1ca7c2f55e09214b.html/',
lambda r: HttpResponse("google-site-verification: google1ca7c2f55e09214b.html",
mimetype="text/plain")),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
# urlpatterns += DS_url
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
urlpatterns += [
path('robots.txt', TemplateView.as_view(template_name="robots.txt", content_type='text/plain')),
]
if settings.DEBUG:
import debug_toolbar
urlpatterns = [path('__debug__/', include(debug_toolbar.urls))] + urlpatterns
|
[
"l2maximum@mail.ru"
] |
l2maximum@mail.ru
|
02e1a00b37345e850dd03bb5a86d0c9ed8846a8c
|
6385e650e1112599b76ad508a12cfb221fe8a716
|
/susy/script/runbatch/filesHadder.py
|
7c5540bee16b29c121798bd40ae44da95c47ac8e
|
[] |
no_license
|
fanxia/ana2016
|
663aabedba3c8de9f747c2e4edfb74f879d47ad3
|
60e14f64778f69dab0008f775f8639527d9be917
|
refs/heads/master
| 2021-01-12T13:08:08.402117
| 2018-10-24T02:47:05
| 2018-10-24T02:47:05
| 72,113,815
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,405
|
py
|
#!/usr/bin/env python
#./filesHadder.py Mar08
import os, re
import commands
import sys
import math
date=sys.argv[1]
#MClist=['TTJets_DiLept','TTJets_SingleLeptFromT','TTJets_SingleLeptFromTbar']
#MClist=['TT','TTGJets','TTWJetsToLNu','TTWJetsToQQ','TTZToLLNuNu','TTZToQQ','W4JetsToLNu','DYJetsToLL','WGToLNuG','WW','WZ','ZGTo2LG','ZZ','ST_s-channel_4f_leptonDecays','ST_tW_antitop_5f_inclusiveDecays','ST_tW_top_5f_inclusiveDecays','ST_t-channel_antitop_4f_inclusiveDecays','ST_t-channel_top_4f_inclusiveDecays','W3JetsToLNu','W2JetsToLNu']
MClist=[]
#EleDatalist=['SingleEle_Run2016B_FebReminiAOD','SingleEle_Run2016C_FebReminiAOD','SingleEle_Run2016D_FebReminiAOD','SingleEle_Run2016E_FebReminiAOD','SingleEle_Run2016F_FebReminiAOD1','SingleEle_Run2016F_FebReminiAOD2','SingleEle_Run2016G_FebReminiAOD','SingleEle_Run2016H_FebReminiAODv2','SingleEle_Run2016H_FebReminiAODv3']
EleDatalist=[]
#MuDatalist=['SingleMu_Run2016B_FebReminiAOD','SingleMu_Run2016C_FebReminiAOD','SingleMu_Run2016D_FebReminiAOD','SingleMu_Run2016E_FebReminiAOD','SingleMu_Run2016F_FebReminiAOD1','SingleMu_Run2016F_FebReminiAOD2','SingleMu_Run2016G_FebReminiAOD','SingleMu_Run2016H_FebReminiAODv2','SingleMu_Run2016H_FebReminiAODv3']
#MuDatalist=['SingleMu_Run2016B_FebReminiAOD']
MuDatalist=['SingleMu_Run2016B_FebReminiAOD00','SingleMu_Run2016B_FebReminiAOD01','SingleMu_Run2016B_FebReminiAOD02','SingleMu_Run2016C_FebReminiAOD','SingleMu_Run2016D_FebReminiAOD','SingleMu_Run2016E_FebReminiAOD','SingleMu_Run2016F_FebReminiAOD1','SingleMu_Run2016F_FebReminiAOD2','SingleMu_Run2016G_FebReminiAOD00','SingleMu_Run2016G_FebReminiAOD01','SingleMu_Run2016H_FebReminiAODv200','SingleMu_Run2016H_FebReminiAODv201','SingleMu_Run2016H_FebReminiAODv3']
#MuDatalist=[]
# hadd step1 mc outputs and mv it to ntupleStore/
for mc in MClist:
os.system("hadd -k step1_{0}.root MC_Out_step1/{0}/ana_root{1}/step1*.root".format(mc,date))
os.system("mv step1_{0}.root ../../ntupleStore".format(mc))
#sys.exit()
for Eledata in EleDatalist:
os.system("hadd -k -f step1_{0}.root Data_Out_step1/{0}/ana_root{1}/step1*.root".format(Eledata,date))
os.system("mv step1_{0}.root ../../ntupleStore".format(Eledata))
#sys.exit()
for Mudata in MuDatalist:
os.system("hadd -k -f step1_{0}.root Data_Out_step1/{0}*/ana_root{1}/step1*.root".format(Mudata,date))
os.system("mv step1_{0}.root ../../ntupleStore".format(Mudata))
|
[
"fanxia08@gmail.com"
] |
fanxia08@gmail.com
|
8f291d75a4c4080d178001ad4f28b5f4b5f686a4
|
4a2d484c1cc7023cf984e936ea64f8b04b5a794b
|
/demo5.py
|
97ab5bc4a905e950480cf885aaaaec815f3ffcf6
|
[] |
no_license
|
zsu13579/pptgen
|
3466aef81fb59d101cf7c90950ddaae70d10fad1
|
2a6692a451b97b8f8c8386f8d3de83d751372d0e
|
refs/heads/master
| 2020-07-19T13:03:02.340435
| 2016-09-07T02:24:43
| 2016-09-07T02:24:43
| 66,757,708
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 824
|
py
|
from pptx import Presentation
from pptx.enum.shapes import MSO_SHAPE
from pptx.util import Inches
prs = Presentation()
title_only_slide_layout = prs.slide_layouts[5]
slide = prs.slides.add_slide(title_only_slide_layout)
shapes = slide.shapes
shapes.title.text = 'Adding an AutoShape'
left = Inches(0.93) # 0.93" centers this overall set of shapes
top = Inches(3.0)
width = Inches(1.75)
height = Inches(1.0)
shape = shapes.add_shape(MSO_SHAPE.PENTAGON, left, top, width, height)
shape.text = 'Step 1'
left = left + width - Inches(0.4)
width = Inches(2.0) # chevrons need more width for visual balance
for n in range(2, 6):
shape = shapes.add_shape(MSO_SHAPE.CHEVRON, left, top, width, height)
shape.text = 'Step %d' % n
left = left + width - Inches(0.4)
prs.save('test51.pptx')
|
[
"jacklvabcd@163.com"
] |
jacklvabcd@163.com
|
2296045aadeec7074e22751268bd354962d7a6e3
|
d9b0248186471079022d0db2af5f3f16847a2258
|
/board/models.py
|
ddc05a259e2a2d7e73c67e95e545e9259e5839b9
|
[] |
no_license
|
jungting20/-django-
|
1ec04f6b5ff89fd404d7a9d8782e8e55c9a389b9
|
9af46cf359990f8d82aaa91548a62eb86564465c
|
refs/heads/master
| 2021-05-15T02:57:11.623526
| 2017-10-07T09:37:09
| 2017-10-07T09:37:09
| 106,086,504
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,045
|
py
|
from django.db import models
from django.db.models.deletion import CASCADE
from django.db.models.fields import DateTimeField
from django.core.urlresolvers import reverse_lazy
from member.models import User
class Board(models.Model):
image = models.ImageField(upload_to='%Y/%m/%d/orig',blank=True,null=True)
author = models.ForeignKey(User,on_delete=models.CASCADE,)
title = models.CharField(max_length=100)
content = models.TextField(max_length=500,blank=True,null=True)
created_at = models.DateTimeField(auto_now_add=True)
def get_absolute_url(self):
url = reverse_lazy('detailboard',kwargs = {'pk':self.pk})
return url
def delete(self,*args,**kwargs):
self.image.delete()
super(Board,self).delete()
class BoardComment(models.Model):
author = models.ForeignKey(User,on_delete=models.CASCADE)
content = models.TextField(max_length=300)
board_id = models.ForeignKey(Board,on_delete=models.CASCADE)
created_at = models.DateTimeField(auto_now_add=True)
|
[
"jungting20@gmail.com"
] |
jungting20@gmail.com
|
33d359b45b72b589770d7f1a0a8508da682da219
|
e6200a978e64e4be068c4664050c82bc1f2dde8a
|
/address/lib/python2.7/site-packages/localflavor/pk/forms.py
|
700d42a23fd9fa6e67381c7ec6b478371d777bba
|
[] |
no_license
|
avs8/address
|
5979311445273954326cface402f60c2df2753c2
|
62f92355fc282a47e1ed39c743430d2142e6f3c0
|
refs/heads/master
| 2021-01-01T18:42:07.593555
| 2017-11-28T14:50:28
| 2017-11-28T14:50:28
| 39,541,958
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,970
|
py
|
"""Pakistani-specific Form helpers."""
from __future__ import unicode_literals
import re
from django.forms import ValidationError
from django.forms.fields import CharField, RegexField, Select
from django.utils.encoding import force_text
from django.utils.translation import ugettext_lazy as _
from localflavor.compat import EmptyValueCompatMixin
from localflavor.generic.forms import DeprecatedPhoneNumberFormFieldMixin
from .pk_states import STATE_CHOICES
POSTCODE_DIGITS_RE = re.compile(r'^(\d{5})$')
PHONE_DIGITS_RE = re.compile(r'^(\d{9,11})$')
class PKPostCodeField(RegexField):
"""
Pakistani post code field.
Assumed to be 5 digits.
"""
default_error_messages = {
'invalid': _('Enter a 5 digit postcode.'),
}
def __init__(self, *args, **kwargs):
super(PKPostCodeField, self).__init__(POSTCODE_DIGITS_RE, *args, **kwargs)
class PKPhoneNumberField(EmptyValueCompatMixin, CharField, DeprecatedPhoneNumberFormFieldMixin):
"""
A form field that validates input as an Pakistani phone number.
Valid numbers have nine to eleven digits.
"""
default_error_messages = {
'invalid': _('Phone numbers must contain 9, 10 or 11 digits.'),
}
def clean(self, value):
"""
Validate a phone number.
Strips parentheses, whitespace and hyphens.
"""
super(PKPhoneNumberField, self).clean(value)
if value in self.empty_values:
return self.empty_value
value = re.sub('(\(|\)|\s+|-)', '', force_text(value))
phone_match = PHONE_DIGITS_RE.search(value)
if phone_match:
return '%s' % phone_match.group(1)
raise ValidationError(self.error_messages['invalid'])
class PKStateSelect(Select):
"""A Select widget that uses a list of Pakistani states/territories as its choices."""
def __init__(self, attrs=None):
super(PKStateSelect, self).__init__(attrs, choices=STATE_CHOICES)
|
[
"ajitavsingh_8@yahoo.com"
] |
ajitavsingh_8@yahoo.com
|
6a73711d13a7fda4beb2f22832630ed938e377e0
|
739f160655268e242d8daae10df2d858c7ce11b5
|
/webot/data.py
|
c7d7c393ad3d7873e8fb60e7d8e75b6a6d11ea34
|
[
"MIT"
] |
permissive
|
csqner/Webot
|
760f9e22dbd6ef5f208090e92fcf37737da65f2c
|
6bb7b3980fab34d9c03d27390a4a0acbd3d1372c
|
refs/heads/master
| 2022-04-24T00:18:10.521288
| 2020-04-29T07:43:24
| 2020-04-29T07:43:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,271
|
py
|
"""
基础过程
"""
API_target = "https://wx.qq.com" # 主页
API_target_login = "https://login.wx.qq.com" # 登录
API_jsLogin = f"{API_target_login}/jslogin?appid=wx782c26e4c19acffb&redirect_uri={API_target}/cgi-bin/mmwebwx-bin/webwxnewloginpage&fun=new&lang=zh_CN"
API_qrcode = f"{API_target_login}/qrcode/" # 二维码
API_login = f"{API_target}/cgi-bin/mmwebwx-bin/login"
API_check_login = f"{API_target_login}/cgi-bin/mmwebwx-bin/login"
API_synccheck = "https://webpush.wx.qq.com/cgi-bin/mmwebwx-bin/synccheck" # 消息监测
API_webwxdownloadmedia = f"{API_target}/cgi-bin/mmwebwx-bin/webwxgetmedia"
API_webwxuploadmedia = f"{API_target}/cgi-bin/mmwebwx-bin/webwxuploadmedia"
API_webwxpreview = f"{API_target}/cgi-bin/mmwebwx-bin/webwxpreview"
API_webwxinit = f"{API_target}/cgi-bin/mmwebwx-bin/webwxinit"
API_webwxgetcontact = f"{API_target}/cgi-bin/mmwebwx-bin/webwxgetcontact"
API_webwxsync = f"{API_target}/cgi-bin/mmwebwx-bin/webwxsync"
API_webwxbatchgetcontact = f"{API_target}/cgi-bin/mmwebwx-bin/webwxbatchgetcontact"
API_webwxgeticon = f"{API_target}/cgi-bin/mmwebwx-bin/webwxgeticon"
API_webwxlogout = f"{API_target}/cgi-bin/mmwebwx-bin/webwxlogout"
"""
消息发送
"""
API_webwxsendmsg = f"{API_target}/cgi-bin/mmwebwx-bin/webwxsendmsg"
API_webwxsendmsgimg = f"{API_target}/cgi-bin/mmwebwx-bin/webwxsendmsgimg"
API_webwxsendmsgvedio = f"{API_target}/cgi-bin/mmwebwx-bin/webwxsendvideomsg"
API_webwxsendemoticon = f"{API_target}/cgi-bin/mmwebwx-bin/webwxsendemoticon"
API_webwxsendappmsg = f"{API_target}/cgi-bin/mmwebwx-bin/webwxsendappmsg"
"""
消息获取
"""
API_webwxgetheadimg = f"{API_target}/cgi-bin/mmwebwx-bin/webwxgetheadimg"
API_webwxgetmsgimg = f"{API_target}/cgi-bin/mmwebwx-bin/webwxgetmsgimg"
API_webwxgetmedia = f"{API_target}/cgi-bin/mmwebwx-bin/webwxgetmedia"
API_webwxgetvideo = f"{API_target}/cgi-bin/mmwebwx-bin/webwxgetvideo"
API_webwxgetvoice = f"{API_target}/cgi-bin/mmwebwx-bin/webwxgetvoice"
API_webwxupdatechatroom = f"{API_target}/cgi-bin/mmwebwx-bin/webwxupdatechatroom"
API_webwxcreatechatroom = f"{API_target}/cgi-bin/mmwebwx-bin/webwxcreatechatroom"
# 获取msgid
API_webwxstatusnotify = f"{API_target}/cgi-bin/mmwebwx-bin/webwxstatusnotify"
API_webwxcheckurl = f"{API_target}/cgi-bin/mmwebwx-bin/webwxcheckurl"
API_webwxverifyuser = f"{API_target}/cgi-bin/mmwebwx-bin/webwxverifyuser"
API_webwxfeedback = f"{API_target}/cgi-bin/mmwebwx-bin/webwxsendfeedback"
API_webwxreport = f"{API_target}/cgi-bin/mmwebwx-bin/webwxstatreport"
API_webwxsearch = f"{API_target}/cgi-bin/mmwebwx-bin/webwxsearchcontact"
API_webwxoplog = f"{API_target}/cgi-bin/mmwebwx-bin/webwxoplog"
API_checkupload = f"{API_target}/cgi-bin/mmwebwx-bin/webwxcheckupload"
API_webwxrevokemsg = f"{API_target}/cgi-bin/mmwebwx-bin/webwxrevokemsg"
API_webwxpushloginurl = f"{API_target}/cgi-bin/mmwebwx-bin/webwxpushloginurl"
# -------------------------------------------------------------------------
oplogCmdId = {"TOPCONTACT": 3, "MODREMARKNAME": 2}
SP_CONTACT_FILE_HELPER = "filehelper"
SP_CONTACT_NEWSAPP = "newsapp"
SP_CONTACT_RECOMMEND_HELPER = "fmessage"
CONTACTFLAG_CONTACT = 1
CONTACTFLAG_CHATCONTACT = 2
CONTACTFLAG_CHATROOMCONTACT = 4
CONTACTFLAG_BLACKLISTCONTACT = 8
CONTACTFLAG_DOMAINCONTACT = 16
CONTACTFLAG_HIDECONTACT = 32
CONTACTFLAG_FAVOURCONTACT = 64
CONTACTFLAG_3RDAPPCONTACT = 128
CONTACTFLAG_SNSBLACKLISTCONTACT = 256
CONTACTFLAG_NOTIFYCLOSECONTACT = 512
CONTACTFLAG_TOPCONTACT = 2048
MM_USERATTRVERIFYFALG_BIZ = 1
MM_USERATTRVERIFYFALG_FAMOUS = 2
MM_USERATTRVERIFYFALG_BIZ_BIG = 4
MM_USERATTRVERIFYFALG_BIZ_BRAND = 8
MM_USERATTRVERIFYFALG_BIZ_VERIFIED = 16
MM_DATA_TEXT = 1
MM_DATA_HTML = 2
MM_DATA_IMG = 3
MM_DATA_PRIVATEMSG_TEXT = 11
MM_DATA_PRIVATEMSG_HTML = 12
MM_DATA_PRIVATEMSG_IMG = 13
MM_DATA_VOICEMSG = 34
MM_DATA_PUSHMAIL = 35
MM_DATA_QMSG = 36
MM_DATA_VERIFYMSG = 37
MM_DATA_PUSHSYSTEMMSG = 38
MM_DATA_QQLIXIANMSG_IMG = 39
MM_DATA_POSSIBLEFRIEND_MSG = 40
MM_DATA_SHARECARD = 42
MM_DATA_VIDEO = 43
MM_DATA_VIDEO_IPHONE_EXPORT = 44
MM_DATA_EMOJI = 47
MM_DATA_LOCATION = 48
MM_DATA_APPMSG = 49
MM_DATA_VOIPMSG = 50
MM_DATA_STATUSNOTIFY = 51
MM_DATA_VOIPNOTIFY = 52
MM_DATA_VOIPINVITE = 53
MM_DATA_MICROVIDEO = 62
MM_DATA_SYSNOTICE = 9999
MM_DATA_SYS = 1e4
MM_DATA_RECALLED = 10002
MSGTYPE_TEXT = 1
MSGTYPE_IMAGE = 3
MSGTYPE_VOICE = 34
MSGTYPE_VIDEO = 43
MSGTYPE_MICROVIDEO = 62
MSGTYPE_EMOTICON = 47
MSGTYPE_APP = 49
MSGTYPE_VOIPMSG = 50
MSGTYPE_VOIPNOTIFY = 52
MSGTYPE_VOIPINVITE = 53
MSGTYPE_LOCATION = 48
MSGTYPE_STATUSNOTIFY = 51
MSGTYPE_SYSNOTICE = 9999
MSGTYPE_POSSIBLEFRIEND_MSG = 40
MSGTYPE_VERIFYMSG = 37
MSGTYPE_SHARECARD = 42
MSGTYPE_SYS = 1e4
MSGTYPE_RECALLED = 10002
MSG_SEND_STATUS_READY = 0
MSG_SEND_STATUS_SENDING = 1
MSG_SEND_STATUS_SUCC = 2
MSG_SEND_STATUS_FAIL = 5
APPMSGTYPE_TEXT = 1
APPMSGTYPE_IMG = 2
APPMSGTYPE_AUDIO = 3
APPMSGTYPE_VIDEO = 4
APPMSGTYPE_URL = 5
APPMSGTYPE_ATTACH = 6
APPMSGTYPE_OPEN = 7
APPMSGTYPE_EMOJI = 8
APPMSGTYPE_VOICE_REMIND = 9
APPMSGTYPE_SCAN_GOOD = 10
APPMSGTYPE_GOOD = 13
APPMSGTYPE_EMOTION = 15
APPMSGTYPE_CARD_TICKET = 16
APPMSGTYPE_REALTIME_SHARE_LOCATION = 17
APPMSGTYPE_TRANSFERS = 2e3
APPMSGTYPE_RED_ENVELOPES = 2001
APPMSGTYPE_READER_TYPE = 100001
UPLOAD_MEDIA_TYPE_IMAGE = 1
UPLOAD_MEDIA_TYPE_VIDEO = 2
UPLOAD_MEDIA_TYPE_AUDIO = 3
UPLOAD_MEDIA_TYPE_ATTACHMENT = 4
PROFILE_BITFLAG_NOCHANGE = 0
PROFILE_BITFLAG_CHANGE = 190
CHATROOM_NOTIFY_OPEN = 1
CHATROOM_NOTIFY_CLOSE = 0
StatusNotifyCode_READED = 1
StatusNotifyCode_ENTER_SESSION = 2
StatusNotifyCode_INITED = 3
StatusNotifyCode_SYNC_CONV = 4
StatusNotifyCode_QUIT_SESSION = 5
VERIFYUSER_OPCODE_ADDCONTACT = 1
VERIFYUSER_OPCODE_SENDREQUEST = 2
VERIFYUSER_OPCODE_VERIFYOK = 3
VERIFYUSER_OPCODE_VERIFYREJECT = 4
VERIFYUSER_OPCODE_SENDERREPLY = 5
VERIFYUSER_OPCODE_RECVERREPLY = 6
ADDSCENE_PF_QQ = 4
ADDSCENE_PF_EMAIL = 5
ADDSCENE_PF_CONTACT = 6
ADDSCENE_PF_WEIXIN = 7
ADDSCENE_PF_GROUP = 8
ADDSCENE_PF_UNKNOWN = 9
ADDSCENE_PF_MOBILE = 10
ADDSCENE_PF_WEB = 33
TIMEOUT_SYNC_CHECK = 0
EMOJI_FLAG_GIF = 2
KEYCODE_BACKSPACE = 8
KEYCODE_ENTER = 13
KEYCODE_SHIFT = 16
KEYCODE_ESC = 27
KEYCODE_DELETE = 34
KEYCODE_ARROW_LEFT = 37
KEYCODE_ARROW_UP = 38
KEYCODE_ARROW_RIGHT = 39
KEYCODE_ARROW_DOWN = 40
KEYCODE_NUM2 = 50
KEYCODE_AT = 64
KEYCODE_NUM_ADD = 107
KEYCODE_NUM_MINUS = 109
KEYCODE_ADD = 187
KEYCODE_MINUS = 189
MM_NOTIFY_CLOSE = 0
MM_NOTIFY_OPEN = 1
MM_SOUND_CLOSE = 0
MM_SOUND_OPEN = 1
MM_SEND_FILE_STATUS_QUEUED = 0
MM_SEND_FILE_STATUS_SENDING = 1
MM_SEND_FILE_STATUS_SUCCESS = 2
MM_SEND_FILE_STATUS_FAIL = 3
MM_SEND_FILE_STATUS_CANCEL = 4
MM_EMOTICON_WEB = "_web"
# -------------------------------------------------------------------------
API_checktimeout = 25.04
API_checknums = 5
from webot.common import init_path
YOUR_NAME = "张三"
API_conf_path = init_path("extra/")
API_log_path = init_path(f"{API_conf_path}/log/") # 聊天记录 markdown
API_static_path = init_path(f"{API_conf_path}/static/") # 生成的配置文件及实时记录
API_analysis_path = init_path(f"{API_conf_path}/analysis/") # 各类分析结果及导出数据
API_media_path = init_path(f"{API_conf_path}/meidas/") # 媒体数据
API_media_icon_path = init_path(f"{API_media_path}/icons/") # 头像
API_meida_voice_path = init_path(f"{API_media_path}/voices/") # 语音
API_meida_image_path = init_path(f"{API_media_path}/images/") # 图片
API_meida_emoji_path = init_path(f"{API_media_path}/emoji/") # 表情
API_meida_video_path = init_path(f"{API_media_path}/videos/") # 视频
API_hotreload_file = f"{API_static_path}/wxbot.pkl"
API_qrcode_name = f"{API_static_path}/qrcode.jpg"
Webot_logger_format = "[%(asctime)s] >>> %(levelname)s %(name)s: %(message)s"
MSG_TYPES = {
1: "TEXT",
3: "IMAGE",
34: "VOICE",
43: "VIDEO",
62: "MICROVIDEO",
47: "EMOTICON",
49: "APP",
50: "VOIPMSG",
52: "VOIPNOTIFY",
53: "VOIPINVITE",
48: "LOCATION",
51: "STATUSNOTIFY",
9999: "SYSNOTICE",
40: "POSSIBLEFRIEND_MSG",
37: "VERIFYMSG",
42: "SHARECARD",
10000: "SYS",
10002: "RECALLED",
}
|
[
"aoii103@126.com"
] |
aoii103@126.com
|
70054672c56c8027712f7a9016fddbe96debf347
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03085/s105066382.py
|
5ca148b3207680bf521622653edd87455c2d3d1c
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 98
|
py
|
b = input()
if b=="A": print("T")
elif b=="T": print("A")
elif b=="C": print("G")
else: print("C")
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
89cef373a6b91c3ff9293f001aefe1d221fb5795
|
e9cd5b115858c57a805553fd27f0fc831297b779
|
/weak_disentangle/tensorsketch/utils.py
|
8c288585e745c9dd64c2ef11d9d70d5bd62697cc
|
[
"Apache-2.0"
] |
permissive
|
dtch1997/disentangle-gen
|
516fde31b360ca68bd35d77dddf9ee0f4ce3432e
|
9c50dcb09063db018aa0090a564f96b798125a2f
|
refs/heads/master
| 2023-04-06T04:52:18.349321
| 2019-12-06T08:42:36
| 2019-12-06T08:42:36
| 220,395,617
| 0
| 0
|
Apache-2.0
| 2023-03-24T23:31:47
| 2019-11-08T05:48:07
|
Python
|
UTF-8
|
Python
| false
| false
| 3,724
|
py
|
# coding=utf-8
# Copyright 2019 The Weak Disentangle Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# python3
"""Tensorsketch utilities.
"""
import numpy as np
import tensorflow as tf
# String utilities
def count_leading_whitespace(string):
return len(string) - len(string.lstrip(" "))
def shorten(string, num_lines=4):
strings = string.split("\n")
if len(strings) <= num_lines:
return string
head = strings[:num_lines - 2]
mid = " " * count_leading_whitespace(strings[num_lines - 2]) + "...,"
tail = strings[-1]
return "\n".join(head + [mid, tail])
def indent(string, spaces=4):
strings = string.split("\n")
return "\n".join([" " * spaces + string for string in strings])
# Tensor utilities
def pack(x):
if isinstance(x, tuple):
return x
else:
return (x,)
def shapes_to_zeros(*maybe_typed_shapes):
tensors = []
for maybe_typed_shape in maybe_typed_shapes:
if elem_isinstance(maybe_typed_shape, int):
tensors.append(tf.zeros(maybe_typed_shape))
else:
shape, dtype = maybe_typed_shape
tensors.append(tf.zeros(shape, dtype))
return tuple(tensors)
# List utilities
def elem_isinstance(lst, cls):
return all([isinstance(x, cls) for x in lst])
# Layer utilities
def compute_fan(kernel):
shape = kernel.shape
receptive_field = np.prod(kernel.shape[:-2]) # returns 1 if kernel is 2D
fan_in = int(receptive_field * shape[-2])
fan_out = int(receptive_field * shape[-1])
return fan_in, fan_out
def compute_out_dims(in_dims, kernel_size, stride,
padding, output_padding,
dilation):
"""Computes the output dimensions of convolution.
The formulas below are based on what Keras does.
Args:
in_dims: number of input dimensions.
kernel_size: size of kernel.
stride: size of stride.
padding: amount of padding on both ends of input.
output_padding: padding adjustment for disambiguating out_dims.
dilation: amount of dilation for convolution.
Returns:
The computed value of output dimensions.
"""
kernel_size = (kernel_size - 1) * dilation + 1
if output_padding is None:
if padding == "same":
out_dims = in_dims * stride
elif padding == "valid":
out_dims = in_dims * stride + max(kernel_size - stride, 0)
else:
if padding == "same":
out_dims = ((in_dims - 1) * stride + output_padding)
elif padding == "valid":
out_dims = ((in_dims - 1) * stride + kernel_size + output_padding)
return out_dims
# Tensor utilities
def assign_moving_average(target, value, momentum):
target.assign(momentum * target + (1 - momentum) * value)
# tf.function utilities
class Function(object):
"""A python function wrapper to support tf.function with resetting.
"""
def __init__(self, python_function):
self.tf_function = tf.function(python_function)
self.python_function = python_function
def reset(self):
self.tf_function = tf.function(self.python_function)
def __call__(self, *args, **kwargs):
return self.tf_function(*args, **kwargs)
def advanced_function(function):
return Function(function)
def reset_tf_function(tf_function):
return tf.function(tf_function.python_function)
|
[
"dtch009@gmail.com"
] |
dtch009@gmail.com
|
e6bad036db699b1e75ec63b62ffcaf86a99693cf
|
3119bda86736a216b5847b9d00fd7f65ea7f8646
|
/MHD/FEniCS/MHD/Stabilised/SaddlePointForm/Test/SplitMatrix/ScottTest/MHDprec.py
|
a6a6fc14980282f66177b6b5105afa481026324c
|
[
"MIT"
] |
permissive
|
wathen/UBC
|
5004f0c1c81d0ab931d3269d09b562b78553af59
|
35524f40028541a4d611d8c78574e4cf9ddc3278
|
refs/heads/master
| 2021-09-18T15:50:28.820698
| 2018-07-16T22:44:30
| 2018-07-16T22:44:30
| 56,339,239
| 3
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 31,873
|
py
|
import petsc4py
import sys
petsc4py.init(sys.argv)
from petsc4py import PETSc
import numpy as np
from dolfin import tic, toc
import HiptmairSetup
import PETScIO as IO
import scipy.sparse as sp
import MatrixOperations as MO
import HiptmairSetup
class BaseMyPC(object):
def setup(self, pc):
pass
def reset(self, pc):
pass
def apply(self, pc, x, y):
raise NotImplementedError
def applyT(self, pc, x, y):
self.apply(pc, x, y)
def applyS(self, pc, x, y):
self.apply(pc, x, y)
def applySL(self, pc, x, y):
self.applyS(pc, x, y)
def applySR(self, pc, x, y):
self.applyS(pc, x, y)
def applyRich(self, pc, x, y, w, tols):
self.apply(pc, x, y)
class Matrix(object):
def __init__(self):
pass
def create(self, mat):
pass
def destroy(self, mat):
pass
class InnerOuterMAGNETICinverse(BaseMyPC):
def __init__(self, W, kspF, kspA, kspQ,Fp,kspScalar, kspCGScalar, kspVector, G, P, A, Hiptmairtol,Options):
self.W = W
self.kspF = kspF
self.kspA = kspA
self.kspQ = kspQ
self.Fp = Fp
self.kspScalar = kspScalar
self.kspCGScalar = kspCGScalar
self.kspVector = kspVector
# self.Bt = Bt
self.HiptmairIts = 0
self.CGits = 0
# print range(self.W[0].dim(),self.W[0].dim()+self.W[1].dim())
# ss
self.P = P
self.G = G
self.AA = A
self.tol = Hiptmairtol
self.u_is = PETSc.IS().createGeneral(range(self.W[0].dim()))
self.p_is = PETSc.IS().createGeneral(range(self.W[0].dim(),self.W[0].dim()+self.W[1].dim()))
self.b_is = PETSc.IS().createGeneral(range(self.W[0].dim()+self.W[1].dim(),
self.W[0].dim()+self.W[1].dim()+self.W[2].dim()))
self.r_is = PETSc.IS().createGeneral(range(self.W[0].dim()+self.W[1].dim()+self.W[2].dim(),
self.W[0].dim()+self.W[1].dim()+self.W[2].dim()+self.W[3].dim()))
def create(self, pc):
print "Create"
def setUp(self, pc):
A, P = pc.getOperators()
print A.size
if A.type == 'python':
self.Ct = A.getPythonContext().getMatrix("Ct")
self.Bt = A.getPythonContext().getMatrix("Bt")
else:
self.Ct = A.getSubMatrix(self.b_is,self.u_is)
self.Bt = A.getSubMatrix(self.p_is,self.u_is)
self.Dt = A.getSubMatrix(self.r_is,self.b_is)
# print self.Ct.view()
#CFC = sp.csr_matrix( (data,(row,column)), shape=(self.W[1].dim(),self.W[1].dim()) )
#print CFC.shape
#CFC = PETSc.Mat().createAIJ(size=CFC.shape,csr=(CFC.indptr, CFC.indices, CFC.data))
#print CFC.size, self.AA.size
# MO.StoreMatrix(B,"A")
# print FC.todense()
OptDB = PETSc.Options()
OptDB["pc_factor_mat_ordering_type"] = "rcm"
OptDB["pc_factor_mat_solver_package"] = "mumps"
self.kspA.setType('preonly')
self.kspA.getPC().setType('lu')
self.kspA.setFromOptions()
self.kspA.setPCSide(0)
self.kspQ.setType('preonly')
self.kspQ.getPC().setType('lu')
self.kspQ.setFromOptions()
self.kspQ.setPCSide(0)
self.kspScalar.setType('preonly')
self.kspScalar.getPC().setType('lu')
self.kspScalar.setFromOptions()
self.kspScalar.setPCSide(0)
kspMX = PETSc.KSP()
kspMX.create(comm=PETSc.COMM_WORLD)
pcMX = kspMX.getPC()
kspMX.setType('preonly')
pcMX.setType('lu')
OptDB = PETSc.Options()
kspMX.setOperators(self.AA,self.AA)
self.kspMX = kspMX
# self.kspCGScalar.setType('preonly')
# self.kspCGScalar.getPC().setType('lu')
# self.kspCGScalar.setFromOptions()
# self.kspCGScalar.setPCSide(0)
self.kspVector.setType('preonly')
self.kspVector.getPC().setType('lu')
self.kspVector.setFromOptions()
self.kspVector.setPCSide(0)
print "setup"
def apply(self, pc, x, y):
br = x.getSubVector(self.r_is)
xr = br.duplicate()
self.kspScalar.solve(br, xr)
# print self.D.size
x2 = x.getSubVector(self.p_is)
y2 = x2.duplicate()
y3 = x2.duplicate()
xp = x2.duplicate()
self.kspA.solve(x2,y2)
self.Fp.mult(y2,y3)
self.kspQ.solve(y3,xp)
# self.kspF.solve(bu1-bu4-bu2,xu)
bb = x.getSubVector(self.b_is)
xb = bb.duplicate()
xxr = bb.duplicate()
self.Dt.multTranspose(xr,xxr)
self.kspMX.solve(bb-xxr,xb)
bu1 = x.getSubVector(self.u_is)
bu2 = bu1.duplicate()
bu4 = bu1.duplicate()
self.Bt.multTranspose(xp,bu2)
self.Ct.multTranspose(xb,bu4)
XX = bu1.duplicate()
xu = XX.duplicate()
self.kspF.solve(bu1-bu4+bu2,xu)
#self.kspF.solve(bu1,xu)
y.array = (np.concatenate([xu.array, -xp.array,xb.array,xr.array]))
def ITS(self):
return self.CGits, self.HiptmairIts , self.CGtime, self.HiptmairTime
class InnerOuterMAGNETICapprox(BaseMyPC):
def __init__(self, W, kspF, kspA, kspQ,Fp,kspScalar, kspCGScalar, kspVector, G, P, A, Hiptmairtol,Options):
self.W = W
self.kspF = kspF
self.kspA = kspA
self.kspQ = kspQ
self.Fp = Fp
self.kspScalar = kspScalar
self.kspCGScalar = kspCGScalar
self.kspVector = kspVector
# self.Bt = Bt
self.HiptmairIts = 0
self.CGits = 0
# print range(self.W[0].dim(),self.W[0].dim()+self.W[1].dim())
# ss
self.P = P
self.G = G
self.AA = A
self.tol = Hiptmairtol
self.u_is = PETSc.IS().createGeneral(range(self.W[0].dim()))
self.p_is = PETSc.IS().createGeneral(range(self.W[0].dim(),self.W[0].dim()+self.W[1].dim()))
self.b_is = PETSc.IS().createGeneral(range(self.W[0].dim()+self.W[1].dim(),
self.W[0].dim()+self.W[1].dim()+self.W[2].dim()))
self.r_is = PETSc.IS().createGeneral(range(self.W[0].dim()+self.W[1].dim()+self.W[2].dim(),
self.W[0].dim()+self.W[1].dim()+self.W[2].dim()+self.W[3].dim()))
def create(self, pc):
print "Create"
def setUp(self, pc):
A, P = pc.getOperators()
print A.size
if A.type == 'python':
self.Ct = A.getPythonContext().getMatrix("Ct")
self.Bt = A.getPythonContext().getMatrix("Bt")
else:
self.Ct = A.getSubMatrix(self.b_is,self.u_is)
self.Bt = A.getSubMatrix(self.p_is,self.u_is)
self.Dt = A.getSubMatrix(self.r_is,self.b_is)
# print self.Ct.view()
#CFC = sp.csr_matrix( (data,(row,column)), shape=(self.W[1].dim(),self.W[1].dim()) )
#print CFC.shape
#CFC = PETSc.Mat().createAIJ(size=CFC.shape,csr=(CFC.indptr, CFC.indices, CFC.data))
#print CFC.size, self.AA.size
# MO.StoreMatrix(B,"A")
# print FC.todense()
#self.kspF.setType('preonly')
#self.kspF.getPC().setType('lu')
#self.kspF.setFromOptions()
#self.kspF.setPCSide(0)
print "setup"
def apply(self, pc, x, y):
br = x.getSubVector(self.r_is)
xr = br.duplicate()
self.kspScalar.solve(br, xr)
# print self.D.size
x2 = x.getSubVector(self.p_is)
y2 = x2.duplicate()
y3 = x2.duplicate()
xp = x2.duplicate()
self.kspA.solve(x2,y2)
self.Fp.mult(y2,y3)
self.kspQ.solve(y3,xp)
# self.kspF.solve(bu1-bu4-bu2,xu)
bb = x.getSubVector(self.b_is)
xb = bb.duplicate()
#self.kspMX.solve(bb,xb)
xxr = bb.duplicate()
self.Dt.multTranspose(xr,xxr)
xb, its, self.HiptmairTime = HiptmairSetup.HiptmairApply(self.AA, bb-xxr, self.kspScalar, self.kspVector, self.G, self.P, self.tol)
bu1 = x.getSubVector(self.u_is)
bu2 = bu1.duplicate()
bu4 = bu1.duplicate()
self.Bt.multTranspose(xp,bu2)
self.Ct.multTranspose(xb,bu4)
XX = bu1.duplicate()
xu = XX.duplicate()
self.kspF.solve(bu1-bu4+bu2,xu)
#self.kspF.solve(bu1,xu)
y.array = (np.concatenate([xu.array, -xp.array,xb.array,xr.array]))
def ITS(self):
return self.CGits, self.HiptmairIts , self.CGtime, self.HiptmairTime
class P(Matrix):
def __init__(self, Fspace,P,Mass,L,F,M):
self.Fspace = Fspace
self.P = P
self.Mass = Mass
self.L = L
self.kspFp = F
self.M = M
# self.N = (n, n, n)
# self.F = zeros([n+2]*3, order='f')
def create(self, A):
self.IS = MO.IndexSet(self.Fspace)
self.F = self.P.getSubMatrix(self.IS[0],self.IS[0])
self.Bt = self.P.getSubMatrix(self.IS[0],self.IS[2])
self.Ct = self.P.getSubMatrix(self.IS[0],self.IS[1])
self.C = self.P.getSubMatrix(self.IS[1],self.IS[0])
self.A = self.P.getSubMatrix(self.IS[3],self.IS[3])
# ksp = PETSc.KSP()
# ksp.create(comm=PETSc.COMM_WORLD)
# pc = ksp.getPC()
# ksp.setType('preonly')
# pc.setType('hypre')
# ksp.max_it = 1
# ksp.setOperators(self.FF)
# self.ksp = ksp
print 13333
def mult(self, A, x, y):
print 'multi apply'
print 333
u = x.getSubVector(self.IS[0])
p = x.getSubVector(self.IS[2])
b = x.getSubVector(self.IS[1])
r = x.getSubVector(self.IS[3])
FQp = p.duplicate()
uOut = self.F*u+self.Bt*p+self.Ct*b
Qp =self.Mass*p
self.kspFp.solve(Qp,FQp)
pOut = -self.L*FQp
bOut = self.C*u+self.M*b
rOut = self.A*r
y.array = (np.concatenate([uOut.array, bOut.array, pOut.array, rOut.array]))
print "$$$$$$$/$$$$$$$$"
# print x.array
def multTranspose(self, A, x, y):
"y <- A' * x"
self.mult(x, y)
# def getSubMatrix(self, isrow, iscol, submat=None):
# submat = self.P.get
class ApproxInv(BaseMyPC):
def __init__(self, W, kspF, kspA, kspQ,Fp,kspScalar, kspCGScalar, kspVector, G, P, A, Hiptmairtol,Options):
self.W = W
self.kspF = kspF
self.kspA = kspA
self.kspQ = kspQ
self.Fp = Fp
self.kspScalar = kspScalar
self.kspCGScalar = kspCGScalar
self.kspVector = kspVector
self.Options = Options
# self.Bt = Bt
self.HiptmairIts = 0
self.CGits = 0
# print range(self.W[0].dim(),self.W[0].dim()+self.W[1].dim())
# ss
self.P = P
self.G = G
self.AA = A
self.tol = Hiptmairtol
self.u_is = PETSc.IS().createGeneral(range(self.W[0].dim()))
self.p_is = PETSc.IS().createGeneral(range(self.W[0].dim(),self.W[0].dim()+self.W[1].dim()))
self.b_is = PETSc.IS().createGeneral(range(self.W[0].dim()+self.W[1].dim(),
self.W[0].dim()+self.W[1].dim()+self.W[2].dim()))
self.r_is = PETSc.IS().createGeneral(range(self.W[0].dim()+self.W[1].dim()+self.W[2].dim(),
self.W[0].dim()+self.W[1].dim()+self.W[2].dim()+self.W[3].dim()))
def create(self, pc):
print "Create"
def setUp(self, pc):
A, P = pc.getOperators()
print A.size
if A.type == 'python':
self.Ct = A.getPythonContext().getMatrix("Ct")
self.Bt = A.getPythonContext().getMatrix("Bt")
else:
self.Ct = A.getSubMatrix(self.b_is,self.u_is)
self.Bt = A.getSubMatrix(self.p_is,self.u_is)
self.Dt = A.getSubMatrix(self.r_is,self.b_is)
# print self.Ct.view()
#CFC = sp.csr_matrix( (data,(row,column)), shape=(self.W[1].dim(),self.W[1].dim()) )
#print CFC.shape
#CFC = PETSc.Mat().createAIJ(size=CFC.shape,csr=(CFC.indptr, CFC.indices, CFC.data))
#print CFC.size, self.AA.size
# MO.StoreMatrix(B,"A")
# print FC.todense()
OptDB = PETSc.Options()
OptDB["pc_factor_mat_ordering_type"] = "rcm"
OptDB["pc_factor_mat_solver_package"] = "mumps"
self.kspA.setType('preonly')
self.kspA.getPC().setType('lu')
self.kspA.setFromOptions()
self.kspA.setPCSide(0)
self.kspQ.setType('preonly')
self.kspQ.getPC().setType('lu')
self.kspQ.setFromOptions()
self.kspQ.setPCSide(0)
self.kspScalar.setType('preonly')
self.kspScalar.getPC().setType('lu')
self.kspScalar.setFromOptions()
self.kspScalar.setPCSide(0)
kspMX = PETSc.KSP()
kspMX.create(comm=PETSc.COMM_WORLD)
pcMX = kspMX.getPC()
kspMX.setType('preonly')
pcMX.setType('lu')
OptDB = PETSc.Options()
kspMX.setOperators(self.AA,self.AA)
self.kspMX = kspMX
# self.kspCGScalar.setType('preonly')
# self.kspCGScalar.getPC().setType('lu')
# self.kspCGScalar.setFromOptions()
# self.kspCGScalar.setPCSide(0)
self.kspVector.setType('preonly')
self.kspVector.getPC().setType('lu')
self.kspVector.setFromOptions()
self.kspVector.setPCSide(0)
print "setup"
def apply(self, pc, x, y):
if self.Options == 'BT':
b = x.getSubVector(self.b_is)
Mxb = b.duplicate()
self.kspMX.solve(b,Mxb)
r = x.getSubVector(self.r_is)
Lr = r.duplicate()
self.kspScalar.solve(r, Lr)
DL = b.duplicate()
self.Dt.multTranspose(Lr,DL)
K = b.duplicate()
self.kspMX.solve(DL,K)
DM = r.duplicate()
self.Dt.mult(Mxb,DM)
E = r.duplicate()
self.kspScalar.solve(DM,E)
p = x.getSubVector(self.p_is)
Sp2 = p.duplicate()
Sp3 = p.duplicate()
Sp = p.duplicate()
self.kspA.solve(p,Sp2)
self.Fp.mult(Sp2,Sp3)
self.kspQ.solve(Sp3,Sp)
u = x.getSubVector(self.u_is)
Fu = u.duplicate()
Cb = u.duplicate()
Bp = u.duplicate()
self.Ct.multTranspose(Mxb,Cb)
self.Bt.multTranspose(Sp,Bp)
self.kspF.solve(u-Cb+Bp,Fu)
y.array = (np.concatenate([Fu.array, -Sp.array, Mxb.array+K.array,E.array]))
else:
u = x.getSubVector(self.u_is)
Fu = u.duplicate()
self.kspF.solve(u,Fu)
p = x.getSubVector(self.p_is)
Sp2 = p.duplicate()
Sp3 = p.duplicate()
Sp = p.duplicate()
self.kspA.solve(p,Sp2)
self.Fp.mult(Sp2,Sp3)
self.kspQ.solve(Sp3,Sp)
b = x.getSubVector(self.b_is)
Mxb = b.duplicate()
self.kspMX.solve(b,Mxb)
r = x.getSubVector(self.r_is)
Lr = r.duplicate()
self.kspScalar.solve(r, Lr)
if self.Options == 'p4':
Q = u.duplicate()
else:
Q1 = u.duplicate()
self.Bt.multTranspose(Sp,Q1)
Q = u.duplicate()
self.kspF(Q1,Q)
Y1 = u.duplicate()
self.Ct.multTranspose(Mxb,Y1)
Y = u.duplicate()
self.kspF(Y1,Y)
BF = p.duplicate()
self.Bt.mult(Fu,BF)
if self.Options == 'p3':
H = p.duplicate()
else:
H1 = p.duplicate()
H2 = p.duplicate()
H = p.duplicate()
self.kspA.solve(BF,H1)
self.Fp.mult(H1,H2)
self.kspQ.solve(H2,H)
if self.Options == 'p3':
J = p.duplicate()
else:
BY = p.duplicate()
self.Bt.mult(Fu,BY)
J1 = p.duplicate()
J2 = p.duplicate()
J = p.duplicate()
self.kspA.solve(BY,J1)
self.Fp.mult(J1,J2)
self.kspQ.solve(J2,J)
CF = b.duplicate()
self.Ct.mult(Fu,CF)
T = b.duplicate()
self.kspMX.solve(CF,T)
if self.Options == 'p4':
V = b.duplicate()
else:
CQ = b.duplicate()
self.Ct.mult(Q,CQ)
V = b.duplicate()
self.kspMX.solve(CQ,V)
DL = b.duplicate()
self.Dt.multTranspose(Lr,DL)
K = b.duplicate()
self.kspMX.solve(DL,K)
DM = r.duplicate()
self.Dt.mult(Mxb,DM)
E = r.duplicate()
self.kspScalar.solve(DM,E)
y.array = (np.concatenate([Fu.array+Q.array-Y.array, H.array-Sp.array-J.array, T.array+V.array+Mxb.array+K.array,E.array]))
def ITS(self):
return self.CGits, self.HiptmairIts , self.CGtime, self.HiptmairTime
class ApproxInvApprox(BaseMyPC):
def __init__(self, W, kspF, kspA, kspQ,Fp,kspScalar, kspCGScalar, kspVector, G, P, A, Hiptmairtol,Options):
self.W = W
self.kspF = kspF
self.kspA = kspA
self.kspQ = kspQ
self.Fp = Fp
self.kspScalar = kspScalar
self.kspCGScalar = kspCGScalar
self.kspVector = kspVector
self.Options = Options
# self.Bt = Bt
self.HiptmairIts = 0
self.CGits = 0
# print range(self.W[0].dim(),self.W[0].dim()+self.W[1].dim())
# ss
self.P = P
self.G = G
self.AA = A
self.tol = Hiptmairtol
self.u_is = PETSc.IS().createGeneral(range(self.W[0].dim()))
self.p_is = PETSc.IS().createGeneral(range(self.W[0].dim(),self.W[0].dim()+self.W[1].dim()))
self.b_is = PETSc.IS().createGeneral(range(self.W[0].dim()+self.W[1].dim(),
self.W[0].dim()+self.W[1].dim()+self.W[2].dim()))
self.r_is = PETSc.IS().createGeneral(range(self.W[0].dim()+self.W[1].dim()+self.W[2].dim(),
self.W[0].dim()+self.W[1].dim()+self.W[2].dim()+self.W[3].dim()))
def create(self, pc):
print "Create"
def setUp(self, pc):
A, P = pc.getOperators()
print A.size
if A.type == 'python':
self.Ct = A.getPythonContext().getMatrix("Ct")
self.Bt = A.getPythonContext().getMatrix("Bt")
else:
self.Ct = A.getSubMatrix(self.b_is,self.u_is)
self.Bt = A.getSubMatrix(self.p_is,self.u_is)
self.Dt = A.getSubMatrix(self.r_is,self.b_is)
print "setup"
def apply(self, pc, x, y):
if self.Options == 'BT':
b = x.getSubVector(self.b_is)
Mxb = b.duplicate()
# self.kspMX.solve(b,Mxb)
Mxb, its, self.HiptmairTime = HiptmairSetup.HiptmairApply(self.AA, b, self.kspScalar, self.kspVector, self.G, self.P, self.tol)
r = x.getSubVector(self.r_is)
Lr = r.duplicate()
self.kspScalar.solve(r, Lr)
DL = b.duplicate()
self.Dt.multTranspose(Lr,DL)
K = b.duplicate()
K, its, self.HiptmairTime = HiptmairSetup.HiptmairApply(self.AA, DL, self.kspScalar, self.kspVector, self.G, self.P, self.tol)
DM = r.duplicate()
self.Dt.mult(Mxb,DM)
E = r.duplicate()
self.kspScalar.solve(DM,E)
p = x.getSubVector(self.p_is)
Sp2 = p.duplicate()
Sp3 = p.duplicate()
Sp = p.duplicate()
self.kspA.solve(p,Sp2)
self.Fp.mult(Sp2,Sp3)
self.kspQ.solve(Sp3,Sp)
u = x.getSubVector(self.u_is)
Fu = u.duplicate()
Cb = u.duplicate()
Bp = u.duplicate()
self.Ct.multTranspose(Mxb,Cb)
self.Bt.multTranspose(Sp,Bp)
self.kspF.solve(u-Cb+Bp,Fu)
y.array = (np.concatenate([Fu.array, -Sp.array, Mxb.array+K.array,E.array]))
else:
u = x.getSubVector(self.u_is)
Fu = u.duplicate()
self.kspF.solve(u,Fu)
p = x.getSubVector(self.p_is)
Sp2 = p.duplicate()
Sp3 = p.duplicate()
Sp = p.duplicate()
self.kspA.solve(p,Sp2)
self.Fp.mult(Sp2,Sp3)
self.kspQ.solve(Sp3,Sp)
b = x.getSubVector(self.b_is)
Mxb = b.duplicate()
Mxb, its, self.HiptmairTime = HiptmairSetup.HiptmairApply(self.AA, b, self.kspScalar, self.kspVector, self.G, self.P, self.tol)
r = x.getSubVector(self.r_is)
Lr = r.duplicate()
self.kspScalar.solve(r, Lr)
if self.Options == 'p4':
Q = u.duplicate()
else:
Q1 = u.duplicate()
self.Bt.multTranspose(Sp,Q1)
Q = u.duplicate()
self.kspF(Q1,Q)
Y1 = u.duplicate()
self.Ct.multTranspose(Mxb,Y1)
Y = u.duplicate()
self.kspF(Y1,Y)
BF = p.duplicate()
self.Bt.mult(Fu,BF)
if self.Options == 'p3':
H = p.duplicate()
else:
H1 = p.duplicate()
H2 = p.duplicate()
H = p.duplicate()
self.kspA.solve(BF,H1)
self.Fp.mult(H1,H2)
self.kspQ.solve(H2,H)
BY = p.duplicate()
self.Bt.mult(Fu,BY)
if self.Options == 'p3':
J = p.duplicate()
else:
J1 = p.duplicate()
J2 = p.duplicate()
J = p.duplicate()
self.kspA.solve(BY,J1)
self.Fp.mult(J1,J2)
self.kspQ.solve(J2,J)
CF = b.duplicate()
self.Ct.mult(Fu,CF)
T, its, self.HiptmairTime = HiptmairSetup.HiptmairApply(self.AA, CF, self.kspScalar, self.kspVector, self.G, self.P, self.tol)
if self.Options == 'p4':
V = b.duplicate()
else:
CQ = b.duplicate()
self.Ct.mult(Q,CQ)
V, its, self.HiptmairTime = HiptmairSetup.HiptmairApply(self.AA, CQ, self.kspScalar, self.kspVector, self.G, self.P, self.tol)
DL = b.duplicate()
self.Dt.multTranspose(Lr,DL)
K = b.duplicate()
K, its, self.HiptmairTime = HiptmairSetup.HiptmairApply(self.AA, DL, self.kspScalar, self.kspVector, self.G, self.P, self.tol)
DM = r.duplicate()
self.Dt.mult(Mxb,DM)
E = r.duplicate()
self.kspScalar.solve(DM,E)
y.array = (np.concatenate([Fu.array+Q.array-Y.array, H.array-Sp.array-J.array, T.array+V.array+Mxb.array+K.array,E.array]))
def ITS(self):
return self.CGits, self.HiptmairIts , self.CGtime, self.HiptmairTime
# class ApproxBT(BaseMyPC):
# def __init__(self, W, kspF, kspA, kspQ,Fp,kspScalar, kspCGScalar, kspVector, G, P, A, Hiptmairtol,Options):
# self.W = W
# self.kspF = kspF
# self.kspA = kspA
# self.kspQ = kspQ
# self.Fp = Fp
# self.kspScalar = kspScalar
# self.kspCGScalar = kspCGScalar
# self.kspVector = kspVector
# self.Options = Options
# # self.Bt = Bt
# self.HiptmairIts = 0
# self.CGits = 0
# # print range(self.W[0].dim(),self.W[0].dim()+self.W[1].dim())
# # ss
# self.P = P
# self.G = G
# self.AA = A
# self.tol = Hiptmairtol
# self.u_is = PETSc.IS().createGeneral(range(self.W[0].dim()))
# self.p_is = PETSc.IS().createGeneral(range(self.W[0].dim(),self.W[0].dim()+self.W[1].dim()))
# self.b_is = PETSc.IS().createGeneral(range(self.W[0].dim()+self.W[1].dim(),
# self.W[0].dim()+self.W[1].dim()+self.W[2].dim()))
# self.r_is = PETSc.IS().createGeneral(range(self.W[0].dim()+self.W[1].dim()+self.W[2].dim(),
# self.W[0].dim()+self.W[1].dim()+self.W[2].dim()+self.W[3].dim()))
# def create(self, pc):
# print "Create"
# def setUp(self, pc):
# A, P = pc.getOperators()
# print A.size
# if A.type == 'python':
# self.Ct = A.getPythonContext().getMatrix("Ct")
# self.Bt = A.getPythonContext().getMatrix("Bt")
# else:
# self.Ct = A.getSubMatrix(self.b_is,self.u_is)
# self.Bt = A.getSubMatrix(self.p_is,self.u_is)
# self.Dt = A.getSubMatrix(self.r_is,self.b_is)
# # print self.Ct.view()
# #CFC = sp.csr_matrix( (data,(row,column)), shape=(self.W[1].dim(),self.W[1].dim()) )
# #print CFC.shape
# #CFC = PETSc.Mat().createAIJ(size=CFC.shape,csr=(CFC.indptr, CFC.indices, CFC.data))
# #print CFC.size, self.AA.size
# # MO.StoreMatrix(B,"A")
# # print FC.todense()
# OptDB = PETSc.Options()
# OptDB["pc_factor_mat_ordering_type"] = "rcm"
# OptDB["pc_factor_mat_solver_package"] = "mumps"
# self.kspA.setType('preonly')
# self.kspA.getPC().setType('lu')
# self.kspA.setFromOptions()
# self.kspA.setPCSide(0)
# self.kspQ.setType('preonly')
# self.kspQ.getPC().setType('lu')
# self.kspQ.setFromOptions()
# self.kspQ.setPCSide(0)
# self.kspScalar.setType('preonly')
# self.kspScalar.getPC().setType('lu')
# self.kspScalar.setFromOptions()
# self.kspScalar.setPCSide(0)
# kspMX = PETSc.KSP()
# kspMX.create(comm=PETSc.COMM_WORLD)
# pcMX = kspMX.getPC()
# kspMX.setType('preonly')
# pcMX.setType('lu')
# OptDB = PETSc.Options()
# kspMX.setOperators(self.AA,self.AA)
# self.kspMX = kspMX
# # self.kspCGScalar.setType('preonly')
# # self.kspCGScalar.getPC().setType('lu')
# # self.kspCGScalar.setFromOptions()
# # self.kspCGScalar.setPCSide(0)
# self.kspVector.setType('preonly')
# self.kspVector.getPC().setType('lu')
# self.kspVector.setFromOptions()
# self.kspVector.setPCSide(0)
# print "setup"
# def apply(self, pc, x, y):
# def ITS(self):
# return self.CGits, self.HiptmairIts , self.CGtime, self.HiptmairTime
def FluidSchur(A, b):
if len(A) == 1:
print "exact Schur complement"
x = b.duplicate()
A[0].solve(b, x)
return x
else:
print "PCD Schur complement"
x1 = b.duplicate()
x2 = b.duplicate()
x3 = b.duplicate()
A[0].solve(b,x1)
A[1].mult(x1,x2)
A[2].solve(x2,x3)
return x3
class ApproxInv(BaseMyPC):
def __init__(self, W, kspF, kspA, kspQ,Fp,kspScalar, kspCGScalar, kspVector, G, P, A, Hiptmairtol,Options):
self.W = W
self.kspF = kspF
self.kspA = kspA
self.kspQ = kspQ
self.Fp = Fp
self.kspScalar = kspScalar
self.kspCGScalar = kspCGScalar
self.kspVector = kspVector
# self.Bt = Bt
self.HiptmairIts = 0
self.CGits = 0
# print range(self.W[0].dim(),self.W[0].dim()+self.W[1].dim())
# ss
self.P = P
self.G = G
self.AA = A
self.tol = Hiptmairtol
self.u_is = PETSc.IS().createGeneral(range(self.W[0].dim()))
self.p_is = PETSc.IS().createGeneral(range(self.W[0].dim(),self.W[0].dim()+self.W[1].dim()))
self.b_is = PETSc.IS().createGeneral(range(self.W[0].dim()+self.W[1].dim(),
self.W[0].dim()+self.W[1].dim()+self.W[2].dim()))
self.r_is = PETSc.IS().createGeneral(range(self.W[0].dim()+self.W[1].dim()+self.W[2].dim(),
self.W[0].dim()+self.W[1].dim()+self.W[2].dim()+self.W[3].dim()))
def create(self, pc):
print "Create"
def setUp(self, pc):
A, P = pc.getOperators()
print A.size
if A.type == 'python':
self.Ct = A.getPythonContext().getMatrix("Ct")
self.Bt = A.getPythonContext().getMatrix("Bt")
else:
self.C = A.getSubMatrix(self.u_is,self.b_is)
self.B = A.getSubMatrix(self.u_is,self.p_is)
self.D = A.getSubMatrix(self.b_is,self.r_is)
# print self.Ct.view()
#CFC = sp.csr_matrix( (data,(row,column)), shape=(self.W[1].dim(),self.W[1].dim()) )
#print CFC.shape
#CFC = PETSc.Mat().createAIJ(size=CFC.shape,csr=(CFC.indptr, CFC.indices, CFC.data))
#print CFC.size, self.AA.size
# MO.StoreMatrix(B,"A")
# print FC.todense()
OptDB = PETSc.Options()
OptDB["pc_factor_mat_ordering_type"] = "rcm"
OptDB["pc_factor_mat_solver_package"] = "mumps"
self.kspA.setType('preonly')
self.kspA.getPC().setType('lu')
self.kspA.setFromOptions()
self.kspA.setPCSide(0)
self.kspQ.setType('preonly')
self.kspQ.getPC().setType('lu')
self.kspQ.setFromOptions()
self.kspQ.setPCSide(0)
self.kspScalar.setType('preonly')
self.kspScalar.getPC().setType('lu')
self.kspScalar.setFromOptions()
self.kspScalar.setPCSide(0)
kspMX = PETSc.KSP()
kspMX.create(comm=PETSc.COMM_WORLD)
pcMX = kspMX.getPC()
kspMX.setType('preonly')
pcMX.setType('lu')
OptDB = PETSc.Options()
kspMX.setOperators(self.AA,self.AA)
self.kspMX = kspMX
# self.kspCGScalar.setType('preonly')
# self.kspCGScalar.getPC().setType('lu')
# self.kspCGScalar.setFromOptions()
# self.kspCGScalar.setPCSide(0)
self.kspVector.setType('preonly')
self.kspVector.getPC().setType('lu')
self.kspVector.setFromOptions()
self.kspVector.setPCSide(0)
print "setup"
def apply(self, pc, x, y):
bu = x.getSubVector(self.u_is)
invF = bu.duplicate()
bb = x.getSubVector(self.b_is)
invMX = bb.duplicate()
br = x.getSubVector(self.r_is)
invL = br.duplicate()
self.kspF.solve(bu,invF)
invS = FluidSchur([kspA, Fp, KspQ], bp)
self.kspMX.solve(bb,invMX)
self.kspScalar.solve(br,invL)
# outP = barF - invS - Schur(B*F(C'*invMx));
# outU = invF - F(B'*barF) + barS;
xp1 = xp.duplicate()
self.B.mult(invF, xp1)
barF = FluidSchur([kspA, Fp, KspQ], xp1)
xu1 = xu.duplicate()
barS = xu.duplicate()
self.B.multTranspose(invS, xu1)
self.kspF.solve(xu1, barS)
# outR = (L(D*invMx));
xr1 = xr.duplicate()
outR = xr.duplicate()
self.D.mult(invMX, xr1)
self.kspScalar(xr1, outR)
# outB = (Mx(C*barS) + invMx + Mx(D'*invL));
xb1 = invMX.duplicate()
xb2 = invMX.duplicate()
xb3 = invMX.duplicate()
xb4 = invMX.duplicate()
self.D.multTranspose(invL, xb1)
self.kspMX.solve(xb1, xb2)
self.C.mult(xp, xb3)
self.kspMX.solve(xb3, xb4)
outB = xb4 + xb + xb2
xp1 = xu.duplicate()
xp2 = xu.duplicate()
xp3 = xp.duplicate()
self.C.multTranspose(xb, xp1)
self.kspF.solve(xp1, xp2)
self.B.mult(xp2, xp3)
xp4 = FluidSchur([kspA, Fp, KspQ], xp3)
outP = barF - xp - xp4;
xu1 = xu.duplicate()
xu2 = xu.duplicate()
self.B.multTranspose(barF, xu1)
self.kspF.solve(xu1, xu2)
outU = xu - xu2 + barS;
y.array = (np.concatenate([outU.array, outP.array, outB.array, outR.array]))
def ITS(self):
return self.CGits, self.HiptmairIts , self.CGtime, self.HiptmairTime
|
[
"mwathen@cs.ubc.ca"
] |
mwathen@cs.ubc.ca
|
d778743a397c2fe79bba785b1f4d109a09e3251c
|
223593dfc133e6fdc96f95773a3a7e235b00e637
|
/essentials/chatsocket.py
|
267b1dc414250cda379e3c6da7787af766f6cfe5
|
[] |
no_license
|
ronts2/chat
|
7a1daaea232074310e28f54720be49bf9612859d
|
c4ee61f155693b47b6d53575eabab20cb43443d2
|
refs/heads/master
| 2021-07-09T09:20:13.473097
| 2017-10-04T12:47:42
| 2017-10-04T12:47:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,138
|
py
|
"""
This module contains the ChatClient class, used for client-server communication.
The ChatClient follows the communication protocol: send size of data - then the data itself.
"""
import socket
import jsonpickle as pickle
from threading import Thread
from time import sleep
import file_handler
import messages
import protocols
MSG_LEN_SIZE = 10 # The size of the length of a message
# the default server ip address - the current computer
DEF_SERVER_IP = socket.gethostbyname(socket.gethostname())
# the default server port - the host's choice
DEF_SERVER_PORT = 9900
DEF_DATA_CHUNK_SIZE = 1048576
DEF_LISTEN = 5
CHUNK_SEND_WAIT = 0.1
class ChatSocket(socket.socket):
"""
The chat socket follows the communication protocol: send size of data - then the data itself
The chat socket contains the chat socket socket and the server's info
"""
def __init__(self, server_ip=DEF_SERVER_IP, port=DEF_SERVER_PORT, msg_len_size=MSG_LEN_SIZE,
data_chunk_size=DEF_DATA_CHUNK_SIZE, listen=DEF_LISTEN, _sock=None):
"""
The class constructor.
:param server_ip: IP of the server.
:param port: port of the server.
:param msg_len_size: the maximum number of digits representing data size.
:param data_chunk_size: the size of a data chunk (used to split sent file data)
"""
self.port = port
self.server_ip = server_ip
self.msg_len_size = msg_len_size
self.data_chunk_size = data_chunk_size
self.listen = listen
if _sock:
super(ChatSocket, self).__init__(_sock=_sock)
else:
super(ChatSocket, self).__init__()
self.open = False
def connect(self):
super(ChatSocket, self).connect((self.server_ip, self.port))
self.open = True
def initialize_server_socket(self):
"""
Initializes the server socket.
"""
self.bind((self.server_ip, self.port))
super(ChatSocket, self).listen(self.listen)
def accept(self):
"""
Accepts a client connection.
:return: client socket and address as returned by the socket.accept method.
"""
sock, address = super(ChatSocket, self).accept()
return ChatSocket(_sock=sock), address
def receive(self):
"""
Gathers data sent from the server
:return: message from the server or None if the server closed
"""
size = self._receive_all(MSG_LEN_SIZE)
if not size:
return ''
data = self._receive_all(int(size))
return data
def _receive_all(self, size):
"""
Receives data sent from the server until all data is received
:param size: the size of the data
:return: received data
"""
try:
data = self.recv(size)
while len(data) < size:
data += self.recv(size - len(data))
return data
except:
return ''
def receive_obj(self):
"""
Receives an object from the server.
:return: sent object.
"""
try:
return pickle.loads(self.receive())
except:
return ''
def send_str(self, msg):
"""
Sends a string
:param msg: the message object
"""
self.sendall(str(len(msg)).zfill(MSG_LEN_SIZE))
self.sendall(msg)
def send_obj(self, obj):
"""
Sends and object.
:param obj: an object.
"""
self.send_str(pickle.dumps(obj))
def send_msg(self, header, data):
"""
Sends a message.
:param header: the message's protocol header.
:param data: the message's data.
"""
self.send_obj(messages.Message(header, data))
def send_regular_msg(self, data):
"""
Sends a regular-type message.
:param data: the message's data
"""
self.send_msg(protocols.build_header(protocols.REGULAR), data)
def _send_chunks(self, chunks, path):
"""
Sends chunks of a file.
:param chunks: a collection of a file's data in chunks.
:param path: the file's path.
"""
for chunk in chunks:
self.send_msg(protocols.build_header(protocols.FILE_CHUNK, path), chunk)
sleep(CHUNK_SEND_WAIT)
self.send_msg(protocols.build_header(protocols.FILE_END, path), '')
def send_file(self, path):
"""
Sends a file.
:param path: a path of a file.
Name is necessary for instances where the receiver has no indication of the sender's identity.
"""
file_chunks = file_handler.generate_chunks(path, DEF_DATA_CHUNK_SIZE)
path = file_handler.GET_FILE_NAME(path)
sender = Thread(target=self._send_chunks, args=[file_chunks, path])
sender.start()
def close_sock(self):
"""
Closes the socket.
"""
try:
self.shutdown(socket.SHUT_RDWR) # Stop receiving/sending
except:
pass
self.close()
self.open = False
|
[
"ron.tsip1@gmail.com"
] |
ron.tsip1@gmail.com
|
39cbec8d577289fe1474fea741e0e59411aca1c7
|
7d0adad278552c3e5027f06fc59e03c74709e3f4
|
/json_test.py
|
2811bb7ba943971a188448466d9d43f7c618f543
|
[] |
no_license
|
jimhorng/python-test
|
b6796392b6cec65e413c9dc36b4d3c41dbd6d17b
|
3549f6f992a9387d1d218c826b758729e05da04a
|
refs/heads/master
| 2020-05-19T16:56:17.261650
| 2015-03-19T06:26:43
| 2015-03-19T06:26:43
| 22,061,174
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 391
|
py
|
'''
Created on Feb 26, 2014
@author: jimhorng
'''
import json
from multiprocessing import Manager
d_proxy = Manager().dict()
d_proxy['test1'] = 123
d_proxy['test2'] = {'foo' : 123}
print type(d_proxy)
print type(d_proxy.items())
print d_proxy['test2']
print type(d_proxy['test2'])
print json.dumps(d_proxy['test2'])
print json.dumps(d_proxy.items())
if __name__ == '__main__':
pass
|
[
"jimhorng@qnap.com"
] |
jimhorng@qnap.com
|
000e84959aaa01493e19b1f31e423080e0da4188
|
6e99e7ee2ee3cc8c68e6df043293c7d2b614356d
|
/lib/gzip_assets.py
|
009ef35eafc5398ea328168aa1acc1f3b81fbc6b
|
[] |
no_license
|
sfchronicle/homelessness-map
|
68dd6bec1b6b7abbaa90850b21042982e60aaaad
|
aa009ee5774d53c27a54da6731832beb74e5dc22
|
refs/heads/master
| 2021-01-16T22:08:55.542077
| 2015-10-28T18:01:18
| 2015-10-28T18:01:18
| 44,569,838
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 786
|
py
|
#!/bin/env python
import os
import gzip
import shutil
class FakeTime:
def time(self):
return 1261130520.0
# Hack to override gzip's time implementation
# http://stackoverflow.com/a/264303/868724
gzip.time = FakeTime()
project_dir = 'homelessness'
shutil.rmtree(os.path.join(project_dir, 'gzip'), ignore_errors=True)
shutil.copytree(
os.path.join(project_dir, 'static'),
os.path.join(project_dir, 'gzip/static')
)
for path, dirs, files in os.walk(os.path.join(project_dir, 'gzip/static')):
for filename in files:
file_path = os.path.join(path, filename)
f_in = open(file_path, 'rb')
contents = f_in.readlines()
f_in.close()
f_out = gzip.open(file_path, 'wb')
f_out.writelines(contents)
f_out.close()
|
[
"aaron.colby.williams@gmail.com"
] |
aaron.colby.williams@gmail.com
|
0ef1c7f6f37da9f059d220e414cfbdf4b7a58513
|
345c14ae0f990841c0323b8347bda6a27236ced2
|
/apps/dialog15_jiangliziji.py
|
d33e5dc2ac88fa219abbb98fce9bc9a9eb07f794
|
[] |
no_license
|
wdc63/personal-RPG
|
03bdd7c6869a23f20ea3e403a2fe90b00249f881
|
c9d7db50eacf815ff44ce79f751737e195efc2d3
|
refs/heads/master
| 2020-05-21T16:44:52.575847
| 2016-12-11T16:52:40
| 2016-12-11T16:52:40
| 41,748,103
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,809
|
py
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'dialog15_jiangliziji.ui'
#
# Created by: PyQt5 UI code generator 5.5
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(447, 352)
Dialog.setModal(True)
self.gridLayout = QtWidgets.QGridLayout(Dialog)
self.gridLayout.setObjectName("gridLayout")
self.pushButton = QtWidgets.QPushButton(Dialog)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.pushButton.sizePolicy().hasHeightForWidth())
self.pushButton.setSizePolicy(sizePolicy)
self.pushButton.setObjectName("pushButton")
self.gridLayout.addWidget(self.pushButton, 3, 1, 1, 1)
self.listWidget = QtWidgets.QListWidget(Dialog)
self.listWidget.setObjectName("listWidget")
self.gridLayout.addWidget(self.listWidget, 1, 0, 1, 3)
self.pushButton_3 = QtWidgets.QPushButton(Dialog)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.pushButton_3.sizePolicy().hasHeightForWidth())
self.pushButton_3.setSizePolicy(sizePolicy)
self.pushButton_3.setObjectName("pushButton_3")
self.gridLayout.addWidget(self.pushButton_3, 3, 2, 1, 1)
self.label = QtWidgets.QLabel(Dialog)
self.label.setObjectName("label")
self.label_2 = QtWidgets.QLabel(Dialog)
self.label_2.setObjectName("label_2")
self.gridLayout.addWidget(self.label_2, 2, 0, 1, 1)
self.label_3 = QtWidgets.QLabel(Dialog)
self.label_3.setObjectName("label_3")
self.gridLayout.addWidget(self.label_3, 3, 0, 1, 1)
self.gridLayout.addWidget(self.label, 0, 0, 1, 1)
self.retranslateUi(Dialog)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def dudang():
import pickle
path = open('D:/MyRPG/data.dat','rb')
global dianshu,nengli,zhuangbei,xiguangeverday,xiguanall,renwu,renwufinish,rizhi,rizhifinish,jiangli,jiangliget,yuanwang
dianshu,nengli,zhuangbei,xiguangeverday,xiguanall,renwu,renwufinish,rizhi,rizhifinish,jiangli,jiangliget,yuanwang = pickle.load(path)
path.close()
del path
dudang()
self.label_2.setText('娱乐点剩余:'+"<span style=\" font-size:10pt; font-weight:bold;color:#00df00;\">"+str(dianshu['d8'])+"</span>")
global count7
count7 = 1
self.listWidget.clear()
self.listWidget.setWordWrap(True)
for i in jiangli:
item = QtWidgets.QListWidgetItem()
value = '('+str(count7)+') '+i[0]+'(将消耗'+str(i[2])+'点娱乐点)'
item.setText(value)
item.setToolTip(i[1])
item.setCheckState(0)
self.listWidget.addItem(item)
count7 += 1
def queding():
import datetime
time = str(datetime.date.today().year)+'-'+str(datetime.date.today().month)+'-'+str(datetime.date.today().day)+' '+str(datetime.datetime.today().time().hour)+':'+str(datetime.datetime.today().time().minute)
global jiangli,jiangliget,dianshu,add
add = []
for i in range(self.listWidget.count()):
if int(self.listWidget.item(i).checkState()) == 2:
import copy
s = copy.deepcopy(jiangli[i])
s.insert(0,time)
jiangliget.insert(0,s)
dianshu['d8'] = dianshu['d8']-jiangli[i][2]
add.insert(0,s)
if dianshu['d8']<0:
self.pushButton.setText('娱乐点不足')
add = []
dudang()
return
else:
import pickle
path = open('D:/MyRPG/data.dat','wb')
pickle.dump((dianshu,nengli,zhuangbei,xiguangeverday,xiguanall,renwu,renwufinish,rizhi,rizhifinish,jiangli,jiangliget,yuanwang),path)
path.close()
del path
Dialog.destroy()
def tuichu():
Dialog.destroy()
def quedingchongzhi():
self.pushButton.setText('获得奖励')
fenshu = 0
for i in range(self.listWidget.count()):
if int(self.listWidget.item(i).checkState()) == 2:
fenshu = fenshu + jiangli[i][2]
if fenshu == 0:
self.label_3.setText('')
else:
self.label_3.setText('所选娱乐点:'+"<span style=\" font-size:10pt; font-weight:bold;color:#ff0000;\">"+str(fenshu)+"</span>")
self.pushButton.clicked.connect(queding)
self.pushButton_3.clicked.connect(tuichu)
self.listWidget.itemPressed.connect(quedingchongzhi)
self.listWidget.itemClicked.connect(quedingchongzhi)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "奖励自己"))
self.pushButton.setText(_translate("Dialog", "获得奖励"))
self.pushButton_3.setText(_translate("Dialog", "取消"))
self.label.setText(_translate("Dialog", "所有奖励列表"))
# import sys
# app = QtWidgets.QApplication(sys.argv)
# Dialog = QtWidgets.QDialog()
# ui = Ui_Dialog()
# ui.setupUi(Dialog)
# Dialog.show()
# sys.exit(app.exec_())
|
[
"214801017@qq.com"
] |
214801017@qq.com
|
fe854a3a397e4ad98c259bae3d3b8c85e54672c7
|
dda40d877091952a016409d9ae104e2e5568f4b6
|
/blog/migrations/0001_initial.py
|
58e203f76bf4d4f41ae0cbd9fc87863dd974f6c0
|
[] |
no_license
|
Ochika3310/my-first-blog
|
3e4130000ddbe6b3d40c6c2c819e79b9c03ccdd3
|
3919367bc416886cf3b55b9e252010ca90b6d7d2
|
refs/heads/master
| 2020-09-27T08:54:23.365165
| 2019-12-08T08:59:08
| 2019-12-08T08:59:08
| 226,478,855
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 986
|
py
|
# Generated by Django 2.2.6 on 2019-11-24 10:05
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('text', models.TextField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"noanoa0g03292@gmail.com"
] |
noanoa0g03292@gmail.com
|
8fad9a8a9923ddda823b8b2ffdd4378d64dc08be
|
b0766b974cf8919b636a24d8998ebd5747bde8bd
|
/tests.py
|
ec200e53030cf43a52e7d436bae632c941be0324
|
[
"MIT"
] |
permissive
|
marciomazza/Duolingo
|
02b5ce3cd91a3a4742d220850140b98d02fad34d
|
74dd96fb7ee9f1b8b2afd9c166fb40e848d72095
|
refs/heads/master
| 2021-01-13T16:02:15.666025
| 2016-12-17T18:47:32
| 2016-12-17T18:47:32
| 76,740,430
| 0
| 0
| null | 2016-12-17T18:37:36
| 2016-12-17T18:37:36
| null |
UTF-8
|
Python
| false
| false
| 2,768
|
py
|
import os
import unittest
import duolingo
USERNAME = os.environ.get('DUOLINGO_USER', 'kartik')
PASSWORD = os.environ.get('DUOLINGO_PASSWORD')
class DuolingoTest(unittest.TestCase):
lingo = duolingo.Duolingo(USERNAME, password=PASSWORD)
def setUp(self):
self.lang = self.lingo.user_data.learning_language
def test_get_user_info(self):
response = self.lingo.get_user_info()
def test_get_settings(self):
response = self.lingo.get_settings()
def test_get_languages(self):
response = self.lingo.get_languages(abbreviations=False)
response = self.lingo.get_languages(abbreviations=True)
def test_get_friends(self):
response = self.lingo.get_friends()
def test_get_calendar(self):
response = self.lingo.get_calendar()
response = self.lingo.get_calendar(self.lang)
def test_get_streak_info(self):
response = self.lingo.get_streak_info()
def test_get_certificates(self):
response = self.lingo.get_certificates()
def test_get_language_details(self):
response = self.lingo.get_language_details(self.lang)
def test_get_language_progress(self):
response = self.lingo.get_language_progress(self.lang)
def test_get_known_topics(self):
response = self.lingo.get_known_topics(self.lang)
def test_get_known_words(self):
response = self.lingo.get_known_words(self.lang)
def test_get_learned_skills(self):
response = self.lingo.get_learned_skills(self.lang)
def test_get_language_from_abbr(self):
response = self.lingo.get_language_from_abbr(self.lang)
def test_get_abbreviation_of(self):
response = self.lingo.get_abbreviation_of('portuguese')
def test_get_activity_stream(self):
response = self.lingo.get_activity_stream()
def test_get_translations(self):
response = self.lingo.get_translations('e')
response = self.lingo.get_translations('e', self.lang)
response = self.lingo.get_translations('e', self.lang, 'fr')
response = self.lingo.get_translations(['e', 'a'])
@unittest.skipIf(not PASSWORD, "You must have valid username/password")
def test_get_vocabulary(self):
response = self.lingo.get_vocabulary()
response = self.lingo.get_vocabulary(self.lang)
@unittest.skipIf(not PASSWORD, "You must have valid username/password")
def test_get_audio_url(self):
response = self.lingo.get_audio_url('o')
response = self.lingo.get_audio_url('o', self.lang)
@unittest.skipIf(not PASSWORD, "You must have valid username/password")
def test_get_related_words(self):
response = self.lingo.get_related_words('o')
if __name__ == '__main__':
unittest.main()
|
[
"montheanthony@hotmail.com"
] |
montheanthony@hotmail.com
|
246fa7fe510fafbeb202db62ebca8d435612f783
|
0ea4636495ffb83544819df977967c5c4f11347a
|
/flip_heap.py
|
22ac73c0b52cb78704e91ec9026f1632b9de9448
|
[
"MIT"
] |
permissive
|
fregulationn/SANM
|
eb823ae72bd3aac41c5179a320cfc937219b8fe0
|
7a2580893da32eefcab2d77138fda0f27068dd3c
|
refs/heads/master
| 2022-12-15T12:50:05.626346
| 2020-01-09T13:00:58
| 2020-01-09T13:00:58
| 232,237,658
| 1
| 2
|
MIT
| 2022-12-08T05:25:27
| 2020-01-07T03:49:37
|
Python
|
UTF-8
|
Python
| false
| false
| 144
|
py
|
import cv2 as cv
from PIL import Image
im = Image.open("/home/junjie/Downloads/addd.jpg")
out = im.transpose(Image.FLIP_TOP_BOTTOM)
out.show()
|
[
"fregulationn@gmail.com"
] |
fregulationn@gmail.com
|
524c4d523fb8b8196bbb7bfd0a58ce7338f3040a
|
79ab25621fad01a72afe952cc2ce0d1d78be9651
|
/evaluate.py
|
d064606d01708a7d55c586318e0be5870e466eff
|
[
"Apache-2.0"
] |
permissive
|
juhongm999/hpf
|
598056e8787cbf52f01a26ed4755a93843c44592
|
5befc6f29411ad81c2f022fa9c6d36dc88629810
|
refs/heads/master
| 2022-10-06T17:24:59.856563
| 2022-09-14T00:55:07
| 2022-09-14T00:55:07
| 210,576,626
| 82
| 15
| null | 2022-09-14T00:54:52
| 2019-09-24T10:34:13
|
Python
|
UTF-8
|
Python
| false
| false
| 3,763
|
py
|
r"""Runs Hyperpixel Flow framework"""
import argparse
import datetime
import os
from torch.utils.data import DataLoader
import torch
from model import hpflow, geometry, evaluation, util
from data import download
def run(datapath, benchmark, backbone, thres, alpha, hyperpixel,
logpath, beamsearch, model=None, dataloader=None, visualize=False):
r"""Runs Hyperpixel Flow framework"""
# 1. Logging initialization
if not os.path.isdir('logs'):
os.mkdir('logs')
if not beamsearch:
cur_datetime = datetime.datetime.now().__format__('_%m%d_%H%M%S')
logfile = os.path.join('logs', logpath + cur_datetime + '.log')
util.init_logger(logfile)
util.log_args(args)
if visualize: os.mkdir(logfile + 'vis')
# 2. Evaluation benchmark initialization
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
if dataloader is None:
download.download_dataset(os.path.abspath(datapath), benchmark)
split = 'val' if beamsearch else 'test'
dset = download.load_dataset(benchmark, datapath, thres, device, split)
dataloader = DataLoader(dset, batch_size=1, num_workers=0)
# 3. Model initialization
if model is None:
model = hpflow.HyperpixelFlow(backbone, hyperpixel, benchmark, device)
else:
model.hyperpixel_ids = util.parse_hyperpixel(hyperpixel)
# 4. Evaluator initialization
evaluator = evaluation.Evaluator(benchmark, device)
for idx, data in enumerate(dataloader):
# a) Retrieve images and adjust their sizes to avoid large numbers of hyperpixels
data['src_img'], data['src_kps'], data['src_intratio'] = util.resize(data['src_img'], data['src_kps'][0])
data['trg_img'], data['trg_kps'], data['trg_intratio'] = util.resize(data['trg_img'], data['trg_kps'][0])
data['alpha'] = alpha
# b) Feed a pair of images to Hyperpixel Flow model
with torch.no_grad():
confidence_ts, src_box, trg_box = model(data['src_img'], data['trg_img'])
# c) Predict key-points & evaluate performance
prd_kps = geometry.predict_kps(src_box, trg_box, data['src_kps'], confidence_ts)
evaluator.evaluate(prd_kps, data)
# d) Log results
if not beamsearch:
evaluator.log_result(idx, data=data)
if visualize:
vispath = os.path.join(logfile + 'vis', '%03d_%s_%s' % (idx, data['src_imname'][0], data['trg_imname'][0]))
util.visualize_prediction(data['src_kps'].t().cpu(), prd_kps.t().cpu(),
data['src_img'], data['trg_img'], vispath)
if beamsearch:
return (sum(evaluator.eval_buf['pck']) / len(evaluator.eval_buf['pck'])) * 100.
else:
evaluator.log_result(len(dset), data=None, average=True)
if __name__ == '__main__':
# Argument parsing
parser = argparse.ArgumentParser(description='Hyperpixel Flow in pytorch')
parser.add_argument('--datapath', type=str, default='../Datasets_HPF')
parser.add_argument('--dataset', type=str, default='pfpascal')
parser.add_argument('--backbone', type=str, default='resnet101')
parser.add_argument('--thres', type=str, default='auto', choices=['auto', 'img', 'bbox'])
parser.add_argument('--alpha', type=float, default=0.1)
parser.add_argument('--hyperpixel', type=str, default='')
parser.add_argument('--logpath', type=str, default='')
parser.add_argument('--visualize', action='store_true')
args = parser.parse_args()
run(datapath=args.datapath, benchmark=args.dataset, backbone=args.backbone, thres=args.thres, alpha=args.alpha,
hyperpixel=args.hyperpixel, logpath=args.logpath, beamsearch=False, visualize=args.visualize)
|
[
"juhongm999@gmail.com"
] |
juhongm999@gmail.com
|
cb48ef2c6152a119590df8e9d113d099a4b36092
|
6ed05164fc54018ac2b80bfbc9ce08ca49e90898
|
/ext14.py
|
5a1a7995a99abe9023c89e6cc282d5dc4b9347df
|
[] |
no_license
|
pengxie/python-learing
|
02fc99d2c8817619b7fae2676dd237a8f2761604
|
448bb8c628635be03d3dfcebe9bbe2bcd9cd5063
|
refs/heads/master
| 2016-09-06T07:40:40.274158
| 2014-08-12T09:18:51
| 2014-08-12T09:18:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 660
|
py
|
# -*- coding: utf-8 -*-
from sys import argv
script, user_name = argv #一个变量为脚本名,二为第一个变量名
prompt = 'ubuntu@localhost: '
print type(prompt)
print "%s" % (script)
print "Hi %s, I'm the %s script." % (user_name, script)
print "I'd like to ask you a few questions."
print "Do you like me %s?" % user_name
likes = raw_input(prompt)
print "Where do you live %s" % user_name
lives = raw_input(prompt)
print "what kind of computer do you hava?"
computer = raw_input(prompt)
print """
Alright, so you said %r about liking me.
You live in %s. Not sure where that is.
And you have %r computer. Nice.
""" % (likes, lives, computer)
|
[
"pengxie@anjuke.com"
] |
pengxie@anjuke.com
|
7b57490969b42931c6bbb1861b27967bc327a16f
|
300eb733976a31d73a68ddf20d986ba6aceb6ef5
|
/OttBands5minFixedOtt/jessepickerdata/dnafiles/BEL-USDT 2021-09-11 2021-10-11.py
|
828a526524c78761e00c259cd9501042a9721776
|
[] |
no_license
|
ajthummar/jesse_strategies
|
f168ae455970bd91845807dd7b0346e77471db09
|
5d23b44f97006e6cecf8519a3951accbfde09fc7
|
refs/heads/master
| 2023-08-12T21:35:22.458840
| 2021-10-18T13:26:12
| 2021-10-18T13:26:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,868
|
py
|
dnas = [
['E^c[A', 58, 34, 10.18, 50, 10, -1.0, {'ott_len': 29, 'ott_percent': 219, 'ott_bw': 135, 'tps_qty_index': 81, 'max_risk': 31}],
['_W6,U', 40, 84, 9.31, 50, 14, 6.21, {'ott_len': 32, 'ott_percent': 210, 'ott_bw': 79, 'tps_qty_index': 6, 'max_risk': 43}],
['FVW*?', 50, 53, 22.75, 36, 11, -1.52, {'ott_len': 29, 'ott_percent': 209, 'ott_bw': 120, 'tps_qty_index': 3, 'max_risk': 30}],
['vNqn]', 81, 11, 12.65, 100, 4, 9.27, {'ott_len': 35, 'ott_percent': 199, 'ott_bw': 152, 'tps_qty_index': 111, 'max_risk': 49}],
['HPqWv', 64, 17, 16.65, 60, 5, 2.69, {'ott_len': 29, 'ott_percent': 201, 'ott_bw': 152, 'tps_qty_index': 74, 'max_risk': 64}],
['7Fpob', 66, 12, 12.15, 75, 4, 3.62, {'ott_len': 27, 'ott_percent': 189, 'ott_bw': 151, 'tps_qty_index': 112, 'max_risk': 52}],
['ui*5<', 44, 84, 12.12, 42, 14, 6.81, {'ott_len': 35, 'ott_percent': 232, 'ott_bw': 64, 'tps_qty_index': 21, 'max_risk': 28}],
['q9da]', 71, 14, 11.37, 75, 4, 3.13, {'ott_len': 34, 'ott_percent': 172, 'ott_bw': 136, 'tps_qty_index': 90, 'max_risk': 49}],
['o4,@X', 42, 98, 8.28, 45, 20, 5.45, {'ott_len': 34, 'ott_percent': 166, 'ott_bw': 66, 'tps_qty_index': 38, 'max_risk': 45}],
['?dpMr', 61, 26, 28.05, 50, 8, 2.43, {'ott_len': 28, 'ott_percent': 226, 'ott_bw': 151, 'tps_qty_index': 59, 'max_risk': 62}],
['3OWXC', 54, 57, 20.13, 63, 11, 5.57, {'ott_len': 26, 'ott_percent': 200, 'ott_bw': 120, 'tps_qty_index': 76, 'max_risk': 32}],
['=bVNC', 46, 62, 22.8, 50, 12, -0.59, {'ott_len': 28, 'ott_percent': 224, 'ott_bw': 119, 'tps_qty_index': 60, 'max_risk': 32}],
['@XW*?', 50, 54, 25.83, 50, 10, 1.55, {'ott_len': 28, 'ott_percent': 211, 'ott_bw': 120, 'tps_qty_index': 3, 'max_risk': 30}],
['MVsWv', 66, 18, 17.72, 60, 5, 4.17, {'ott_len': 30, 'ott_percent': 209, 'ott_bw': 155, 'tps_qty_index': 74, 'max_risk': 64}],
['LewDb', 76, 17, 19.15, 80, 5, 8.45, {'ott_len': 30, 'ott_percent': 227, 'ott_bw': 160, 'tps_qty_index': 44, 'max_risk': 52}],
['@_W*?', 44, 58, 22.34, 55, 9, 4.25, {'ott_len': 28, 'ott_percent': 220, 'ott_bw': 120, 'tps_qty_index': 3, 'max_risk': 30}],
['=ptVt', 73, 26, 30.29, 50, 8, 1.89, {'ott_len': 28, 'ott_percent': 241, 'ott_bw': 156, 'tps_qty_index': 73, 'max_risk': 63}],
['@^W*?', 45, 57, 21.06, 55, 9, 4.26, {'ott_len': 28, 'ott_percent': 219, 'ott_bw': 120, 'tps_qty_index': 3, 'max_risk': 30}],
['eVswv', 63, 19, 11.55, 100, 4, 5.34, {'ott_len': 33, 'ott_percent': 209, 'ott_bw': 155, 'tps_qty_index': 125, 'max_risk': 64}],
['3OWXE', 55, 58, 20.61, 63, 11, 5.57, {'ott_len': 26, 'ott_percent': 200, 'ott_bw': 120, 'tps_qty_index': 76, 'max_risk': 33}],
['XRV.a', 50, 54, 25.07, 58, 12, 1.52, {'ott_len': 31, 'ott_percent': 204, 'ott_bw': 119, 'tps_qty_index': 9, 'max_risk': 51}],
['ePjWv', 68, 22, 19.21, 66, 6, 5.68, {'ott_len': 33, 'ott_percent': 201, 'ott_bw': 144, 'tps_qty_index': 74, 'max_risk': 64}],
['-VW*9', 57, 49, 23.19, 27, 11, -0.52, {'ott_len': 26, 'ott_percent': 209, 'ott_bw': 120, 'tps_qty_index': 3, 'max_risk': 26}],
['YOR9c', 51, 60, 21.87, 58, 12, 2.39, {'ott_len': 31, 'ott_percent': 200, 'ott_bw': 114, 'tps_qty_index': 27, 'max_risk': 52}],
['@VW*F', 49, 55, 26.22, 45, 11, -0.99, {'ott_len': 28, 'ott_percent': 209, 'ott_bw': 120, 'tps_qty_index': 3, 'max_risk': 34}],
['?VuWv', 62, 16, 15.34, 60, 5, 2.75, {'ott_len': 28, 'ott_percent': 209, 'ott_bw': 157, 'tps_qty_index': 74, 'max_risk': 64}],
['@cW*?', 47, 61, 22.93, 50, 12, 0.29, {'ott_len': 28, 'ott_percent': 225, 'ott_bw': 120, 'tps_qty_index': 3, 'max_risk': 30}],
['hPmHf', 73, 19, 19.46, 75, 4, 4.96, {'ott_len': 33, 'ott_percent': 201, 'ott_bw': 147, 'tps_qty_index': 51, 'max_risk': 54}],
['CMNWv', 52, 71, 22.36, 58, 12, 4.3, {'ott_len': 28, 'ott_percent': 197, 'ott_bw': 109, 'tps_qty_index': 74, 'max_risk': 64}],
['?VW.v', 49, 59, 25.96, 58, 12, 2.45, {'ott_len': 28, 'ott_percent': 209, 'ott_bw': 120, 'tps_qty_index': 9, 'max_risk': 64}],
[':VWWv', 55, 61, 23.82, 58, 12, 3.32, {'ott_len': 27, 'ott_percent': 209, 'ott_bw': 120, 'tps_qty_index': 74, 'max_risk': 64}],
['7VWWv', 55, 61, 23.82, 58, 12, 3.32, {'ott_len': 27, 'ott_percent': 209, 'ott_bw': 120, 'tps_qty_index': 74, 'max_risk': 64}],
['@VW4?', 49, 55, 17.59, 45, 11, -1.73, {'ott_len': 28, 'ott_percent': 209, 'ott_bw': 120, 'tps_qty_index': 19, 'max_risk': 30}],
['-VW.v', 49, 61, 23.86, 58, 12, 4.35, {'ott_len': 26, 'ott_percent': 209, 'ott_bw': 120, 'tps_qty_index': 9, 'max_risk': 64}],
['ftUQf', 46, 66, 10.18, 58, 12, 3.51, {'ott_len': 33, 'ott_percent': 246, 'ott_bw': 117, 'tps_qty_index': 65, 'max_risk': 54}],
['-VW<v', 58, 60, 23.78, 58, 12, 3.9, {'ott_len': 26, 'ott_percent': 209, 'ott_bw': 120, 'tps_qty_index': 32, 'max_risk': 64}],
['-VWMv', 50, 61, 23.08, 58, 12, 3.48, {'ott_len': 26, 'ott_percent': 209, 'ott_bw': 120, 'tps_qty_index': 59, 'max_risk': 64}],
['-VWWu', 57, 61, 24.09, 58, 12, 3.47, {'ott_len': 26, 'ott_percent': 209, 'ott_bw': 120, 'tps_qty_index': 74, 'max_risk': 64}],
[',VWWv', 57, 61, 24.09, 58, 12, 3.47, {'ott_len': 26, 'ott_percent': 209, 'ott_bw': 120, 'tps_qty_index': 74, 'max_risk': 64}],
['-VWWs', 57, 61, 24.09, 58, 12, 3.47, {'ott_len': 26, 'ott_percent': 209, 'ott_bw': 120, 'tps_qty_index': 74, 'max_risk': 62}],
['?VW2v', 52, 59, 27.13, 58, 12, 2.6, {'ott_len': 28, 'ott_percent': 209, 'ott_bw': 120, 'tps_qty_index': 16, 'max_risk': 64}],
['3OW?n', 54, 59, 24.5, 66, 12, 3.73, {'ott_len': 26, 'ott_percent': 200, 'ott_bw': 120, 'tps_qty_index': 36, 'max_risk': 59}],
['3OW5n', 50, 59, 24.24, 66, 12, 4.05, {'ott_len': 26, 'ott_percent': 200, 'ott_bw': 120, 'tps_qty_index': 21, 'max_risk': 59}],
['-SUNv', 51, 64, 24.47, 58, 12, 3.76, {'ott_len': 26, 'ott_percent': 205, 'ott_bw': 117, 'tps_qty_index': 60, 'max_risk': 64}],
['-VW=v', 55, 61, 21.99, 58, 12, 3.27, {'ott_len': 26, 'ott_percent': 209, 'ott_bw': 120, 'tps_qty_index': 33, 'max_risk': 64}],
['eRNDv', 53, 67, 17.86, 53, 13, 3.08, {'ott_len': 33, 'ott_percent': 204, 'ott_bw': 109, 'tps_qty_index': 44, 'max_risk': 64}],
['-VWWl', 57, 61, 24.09, 58, 12, 3.47, {'ott_len': 26, 'ott_percent': 209, 'ott_bw': 120, 'tps_qty_index': 74, 'max_risk': 58}],
['-VWLv', 50, 60, 24.44, 58, 12, 2.84, {'ott_len': 26, 'ott_percent': 209, 'ott_bw': 120, 'tps_qty_index': 57, 'max_risk': 64}],
['ePPW\\', 52, 63, 19.94, 63, 11, 4.8, {'ott_len': 33, 'ott_percent': 201, 'ott_bw': 111, 'tps_qty_index': 74, 'max_risk': 48}],
['-VUWv', 53, 66, 19.51, 58, 12, 3.47, {'ott_len': 26, 'ott_percent': 209, 'ott_bw': 117, 'tps_qty_index': 74, 'max_risk': 64}],
['WrVZ;', 49, 65, 9.97, 50, 10, -1.45, {'ott_len': 31, 'ott_percent': 244, 'ott_bw': 119, 'tps_qty_index': 79, 'max_risk': 27}],
['3fWHn', 56, 64, 27.28, 58, 12, 4.26, {'ott_len': 26, 'ott_percent': 229, 'ott_bw': 120, 'tps_qty_index': 51, 'max_risk': 59}],
['-VW;v', 56, 60, 21.81, 58, 12, 3.24, {'ott_len': 26, 'ott_percent': 209, 'ott_bw': 120, 'tps_qty_index': 30, 'max_risk': 64}],
['`RNWv', 51, 68, 21.49, 53, 13, 1.56, {'ott_len': 32, 'ott_percent': 204, 'ott_bw': 109, 'tps_qty_index': 74, 'max_risk': 64}],
['cRNWv', 51, 68, 21.49, 53, 13, 1.56, {'ott_len': 32, 'ott_percent': 204, 'ott_bw': 109, 'tps_qty_index': 74, 'max_risk': 64}],
['\\RNWv', 51, 68, 21.49, 53, 13, 1.56, {'ott_len': 32, 'ott_percent': 204, 'ott_bw': 109, 'tps_qty_index': 74, 'max_risk': 64}],
[']RNWv', 51, 68, 21.49, 53, 13, 1.56, {'ott_len': 32, 'ott_percent': 204, 'ott_bw': 109, 'tps_qty_index': 74, 'max_risk': 64}],
['aRNWv', 51, 68, 21.49, 53, 13, 1.56, {'ott_len': 32, 'ott_percent': 204, 'ott_bw': 109, 'tps_qty_index': 74, 'max_risk': 64}],
['^RNWv', 51, 68, 21.49, 53, 13, 1.56, {'ott_len': 32, 'ott_percent': 204, 'ott_bw': 109, 'tps_qty_index': 74, 'max_risk': 64}],
['_RNWv', 51, 68, 21.49, 53, 13, 1.56, {'ott_len': 32, 'ott_percent': 204, 'ott_bw': 109, 'tps_qty_index': 74, 'max_risk': 64}],
['-VW?v', 55, 61, 23.02, 58, 12, 3.04, {'ott_len': 26, 'ott_percent': 209, 'ott_bw': 120, 'tps_qty_index': 36, 'max_risk': 64}],
['O=ITi', 49, 69, 21.32, 61, 13, 4.06, {'ott_len': 30, 'ott_percent': 177, 'ott_bw': 102, 'tps_qty_index': 70, 'max_risk': 56}],
['eRNWk', 52, 67, 17.52, 53, 13, 2.3, {'ott_len': 33, 'ott_percent': 204, 'ott_bw': 109, 'tps_qty_index': 74, 'max_risk': 57}],
['@VW.?', 49, 55, 20.38, 45, 11, -0.98, {'ott_len': 28, 'ott_percent': 209, 'ott_bw': 120, 'tps_qty_index': 9, 'max_risk': 30}],
['@VW*A', 49, 55, 25.8, 45, 11, -0.99, {'ott_len': 28, 'ott_percent': 209, 'ott_bw': 120, 'tps_qty_index': 3, 'max_risk': 31}],
['eVkWv', 72, 22, 20.19, 66, 6, 5.82, {'ott_len': 33, 'ott_percent': 209, 'ott_bw': 145, 'tps_qty_index': 74, 'max_risk': 64}],
['g^VGt', 57, 61, 16.78, 63, 11, 5.52, {'ott_len': 33, 'ott_percent': 219, 'ott_bw': 119, 'tps_qty_index': 49, 'max_risk': 63}],
['@VW)?', 48, 54, 23.4, 45, 11, -1.08, {'ott_len': 28, 'ott_percent': 209, 'ott_bw': 120, 'tps_qty_index': 2, 'max_risk': 30}],
['hPPHs', 57, 63, 21.8, 63, 11, 5.36, {'ott_len': 33, 'ott_percent': 201, 'ott_bw': 111, 'tps_qty_index': 51, 'max_risk': 62}],
['ePPHt', 57, 63, 21.8, 63, 11, 5.36, {'ott_len': 33, 'ott_percent': 201, 'ott_bw': 111, 'tps_qty_index': 51, 'max_risk': 63}],
['?^WWv', 53, 60, 21.94, 63, 11, 6.61, {'ott_len': 28, 'ott_percent': 219, 'ott_bw': 120, 'tps_qty_index': 74, 'max_risk': 64}],
['fVPWv', 52, 65, 21.16, 53, 13, 1.82, {'ott_len': 33, 'ott_percent': 209, 'ott_bw': 111, 'tps_qty_index': 74, 'max_risk': 64}],
['gVPWv', 52, 65, 21.16, 53, 13, 1.82, {'ott_len': 33, 'ott_percent': 209, 'ott_bw': 111, 'tps_qty_index': 74, 'max_risk': 64}],
['eSNWd', 50, 67, 18.68, 53, 13, 2.22, {'ott_len': 33, 'ott_percent': 205, 'ott_bw': 109, 'tps_qty_index': 74, 'max_risk': 53}],
['eRQWv', 52, 63, 17.59, 63, 11, 4.81, {'ott_len': 33, 'ott_percent': 204, 'ott_bw': 112, 'tps_qty_index': 74, 'max_risk': 64}],
['[\\sta', 66, 18, 12.71, 80, 5, 5.61, {'ott_len': 31, 'ott_percent': 216, 'ott_bw': 155, 'tps_qty_index': 120, 'max_risk': 51}],
['-dW6n', 51, 64, 27.68, 58, 12, 5.23, {'ott_len': 26, 'ott_percent': 226, 'ott_bw': 120, 'tps_qty_index': 22, 'max_risk': 59}],
['3eWXn', 53, 64, 29.51, 58, 12, 4.39, {'ott_len': 26, 'ott_percent': 227, 'ott_bw': 120, 'tps_qty_index': 76, 'max_risk': 59}],
['@TW*?', 51, 56, 14.24, 45, 11, -1.0, {'ott_len': 28, 'ott_percent': 206, 'ott_bw': 120, 'tps_qty_index': 3, 'max_risk': 30}],
[':YY:_', 54, 59, 21.43, 58, 12, 3.52, {'ott_len': 27, 'ott_percent': 212, 'ott_bw': 122, 'tps_qty_index': 28, 'max_risk': 50}],
['ePPWb', 52, 63, 19.94, 63, 11, 4.8, {'ott_len': 33, 'ott_percent': 201, 'ott_bw': 111, 'tps_qty_index': 74, 'max_risk': 52}],
['@VWF?', 54, 55, 19.17, 45, 11, -1.64, {'ott_len': 28, 'ott_percent': 209, 'ott_bw': 120, 'tps_qty_index': 47, 'max_risk': 30}],
['-VWWa', 58, 60, 22.96, 58, 12, 3.47, {'ott_len': 26, 'ott_percent': 209, 'ott_bw': 120, 'tps_qty_index': 74, 'max_risk': 51}],
['QYRcn', 50, 65, 19.63, 58, 12, 3.49, {'ott_len': 30, 'ott_percent': 212, 'ott_bw': 114, 'tps_qty_index': 93, 'max_risk': 59}],
['-VWW^', 58, 60, 22.96, 58, 12, 3.47, {'ott_len': 26, 'ott_percent': 209, 'ott_bw': 120, 'tps_qty_index': 74, 'max_risk': 49}],
['ePRWv', 53, 60, 20.61, 63, 11, 4.2, {'ott_len': 33, 'ott_percent': 201, 'ott_bw': 114, 'tps_qty_index': 74, 'max_risk': 64}],
['eRNWZ', 52, 67, 17.52, 53, 13, 2.3, {'ott_len': 33, 'ott_percent': 204, 'ott_bw': 109, 'tps_qty_index': 74, 'max_risk': 47}],
['ePPHa', 57, 63, 21.8, 63, 11, 5.36, {'ott_len': 33, 'ott_percent': 201, 'ott_bw': 111, 'tps_qty_index': 51, 'max_risk': 51}],
['ePPH]', 57, 63, 21.8, 63, 11, 5.36, {'ott_len': 33, 'ott_percent': 201, 'ott_bw': 111, 'tps_qty_index': 51, 'max_risk': 49}],
['eVPWc', 52, 65, 21.16, 53, 13, 1.82, {'ott_len': 33, 'ott_percent': 209, 'ott_bw': 111, 'tps_qty_index': 74, 'max_risk': 52}],
['wVW*?', 48, 52, 15.52, 40, 10, -0.23, {'ott_len': 35, 'ott_percent': 209, 'ott_bw': 120, 'tps_qty_index': 3, 'max_risk': 30}],
['IVWWv', 51, 58, 22.46, 58, 12, 1.85, {'ott_len': 29, 'ott_percent': 209, 'ott_bw': 120, 'tps_qty_index': 74, 'max_risk': 64}],
['BVV.a', 50, 59, 27.82, 58, 12, 2.71, {'ott_len': 28, 'ott_percent': 209, 'ott_bw': 119, 'tps_qty_index': 9, 'max_risk': 51}],
['<VV.a', 50, 59, 27.82, 58, 12, 2.71, {'ott_len': 28, 'ott_percent': 209, 'ott_bw': 119, 'tps_qty_index': 9, 'max_risk': 51}],
['-VW*=', 55, 54, 29.83, 33, 12, -1.75, {'ott_len': 26, 'ott_percent': 209, 'ott_bw': 120, 'tps_qty_index': 3, 'max_risk': 28}],
['[VsWv', 63, 19, 14.24, 75, 4, 6.76, {'ott_len': 31, 'ott_percent': 209, 'ott_bw': 155, 'tps_qty_index': 74, 'max_risk': 64}],
['WVsWv', 63, 19, 14.24, 75, 4, 6.76, {'ott_len': 31, 'ott_percent': 209, 'ott_bw': 155, 'tps_qty_index': 74, 'max_risk': 64}],
['t]bik', 57, 35, 9.33, 62, 8, 4.47, {'ott_len': 35, 'ott_percent': 217, 'ott_bw': 134, 'tps_qty_index': 103, 'max_risk': 57}],
['@VX*?', 50, 53, 24.04, 50, 10, 1.23, {'ott_len': 28, 'ott_percent': 209, 'ott_bw': 121, 'tps_qty_index': 3, 'max_risk': 30}],
['eEsWv', 66, 9, 10.3, 75, 4, 5.13, {'ott_len': 33, 'ott_percent': 187, 'ott_bw': 155, 'tps_qty_index': 74, 'max_risk': 64}],
]
|
[
"yunusseyhandede@gmail.com"
] |
yunusseyhandede@gmail.com
|
c9f037387ffa13eecb11fa24424adc949647dfe6
|
aa3062fc36e6721c30e149618562c85eb26dc046
|
/movieapi.py
|
01526b28172122c39058b343c61aafd03763d481
|
[] |
no_license
|
VolodymyrBor/movieAPI
|
9b5ab466d8a9ae7cdb785eeb495e697690f06514
|
967e5295088504c50de43e9259ebfce2bffd855b
|
refs/heads/master
| 2023-02-28T12:02:23.498605
| 2021-02-07T12:35:32
| 2021-02-07T12:35:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 537
|
py
|
import uvicorn
from fastapi import FastAPI
from api.shared import logger
from database import database, configs
logger.logger_configure()
log = logger.get_logger('Movie')
app = FastAPI()
@app.on_event('startup')
async def startup():
log.info('Setting up database...')
await database.setup(config=configs.MYSQL_CONFIG)
@app.on_event('shutdown')
async def startup():
log.info('Shutting down database...')
await database.shutdown()
if __name__ == '__main__':
uvicorn.run('movieapi:app', reload=True, port=8001)
|
[
"volodymyrb@fornova.net"
] |
volodymyrb@fornova.net
|
5a6f5abcee8c2309a8577d6bdbe01f6ac78ea2c8
|
e5d8f9629b8baf936ddae154f3d72cc3ef172397
|
/flight_control/booking/mocks.py
|
4b29237b6732b19c2671d531120cd5e7a42496b4
|
[
"MIT"
] |
permissive
|
kenware/flight-api
|
67fa3b923bbe54865c0fdc17bcb3a6f1765e4917
|
adfa6f320b6c5bd9830bfb7c3c947028acf39e23
|
refs/heads/staging
| 2022-12-14T15:49:31.373788
| 2019-06-16T20:14:20
| 2019-06-16T20:14:20
| 190,599,919
| 0
| 0
|
MIT
| 2022-12-08T01:46:31
| 2019-06-06T14:55:20
|
Python
|
UTF-8
|
Python
| false
| false
| 594
|
py
|
from datetime import datetime
def get_date():
date = datetime.today().strftime('%Y-%m-%d')
day = int(date[-2:]) + 1
date = date[:-2] + str(day)
return date
flight_data = {
'name': 'emirate',
'tag': 'dgsfdygg'
}
booking_data = {
"flightSeat": "front",
"location": "USA",
"flightDate": get_date(),
}
booking_list_data = [{
"flightSeat": "front",
"location": "USA",
"flightDate": get_date(),
},
{
"flightSeat": "middle",
"location": "USA",
"flightDate": get_date(),
},
{
"flightSeat": "front",
"location": "USA",
"flightDate": get_date(),
}
]
|
[
"kenware"
] |
kenware
|
ea90ccd879f594f48de80bc9dfbc97f0e6d8c95b
|
275a96a33ae1f89e7b2ee0ecdbac7d78abe6d6cc
|
/swagger_client/models/captcha_provider_enum.py
|
c58f04f1f540e205062cd6b82edabe5ba48ac130
|
[] |
no_license
|
cascadiarc/cyclos-python-client
|
8029ce07174f2fe92350a92dda9a60976b2bb6c2
|
a2e22a30e22944587293d51be2b8268bce808d70
|
refs/heads/main
| 2023-04-03T16:52:01.618444
| 2021-04-04T00:00:52
| 2021-04-04T00:00:52
| 354,419,532
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,775
|
py
|
# coding: utf-8
"""
Cyclos 4.11.5 API
The REST API for Cyclos 4.11.5 # noqa: E501
OpenAPI spec version: 4.11.5
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from swagger_client.configuration import Configuration
class CaptchaProviderEnum(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
allowed enum values
"""
INTERNAL = "internal"
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
}
attribute_map = {
}
def __init__(self, _configuration=None): # noqa: E501
"""CaptchaProviderEnum - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self.discriminator = None
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(CaptchaProviderEnum, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CaptchaProviderEnum):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, CaptchaProviderEnum):
return True
return self.to_dict() != other.to_dict()
|
[
"dan@leftcoastfs.com"
] |
dan@leftcoastfs.com
|
efc53235042f396368503795aa86f40f6af1a101
|
9f5c9adf472501392e8a8e066d5930abe55327ea
|
/settings/test.py
|
d6aaef78103b47fa0b2cb133c3697c3726602b63
|
[] |
no_license
|
kaj/kratsblog
|
b3af805b0eb89a64234fcc09f5ab6188d811f7f0
|
3b6f1b8123785568e54e52fafa0808fd541bc205
|
refs/heads/master
| 2022-10-26T20:46:37.165840
| 2022-10-04T16:08:02
| 2022-10-04T16:08:02
| 957,209
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 124
|
py
|
from . import *
DATABASES['default'] = {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': '/tmp/tygbittar.sqlite'
}
|
[
"kaj@kth.se"
] |
kaj@kth.se
|
67088f748695fee374d4a791bcb80841011ad87c
|
57e42bba49f071bfba6f54d241d1839c615b9e33
|
/execptions.py
|
569d3b0f086a7def9006605d79f147d813709562
|
[
"MIT"
] |
permissive
|
flifloo/MinecraftServerGUI
|
1452cdc329a7dfd0791a8468c98f7167c389e195
|
adb3d8932593398f5d84154345ced57096e2783b
|
refs/heads/master
| 2020-08-28T23:34:42.370444
| 2019-10-27T20:41:21
| 2019-10-27T20:41:21
| 217,854,019
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 170
|
py
|
class InvalidLogin(Exception):
pass
class InvalidURL(Exception):
pass
class InvalidMethod(Exception):
pass
class InvalidServerState(Exception):
pass
|
[
"flifloo@gmail.com"
] |
flifloo@gmail.com
|
9bf4756621fa261828be3e0e30be105fa1e8716f
|
571055077369585c3fdcd2394142a6406f4567ff
|
/storage.py
|
664684dbf6b174d42797a1b608bee351f72b645d
|
[
"MIT"
] |
permissive
|
DylanCheetah/android-file
|
cd76d445828974f20d9a615bf60ccd7464733def
|
19e91047bff2bcbfdee4b0e305a00723f586a695
|
refs/heads/main
| 2023-02-21T05:26:13.053945
| 2021-01-24T14:43:25
| 2021-01-24T14:43:25
| 332,369,181
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 635
|
py
|
"""External storage utilities for Android API 29+."""
import builtins
import os
from jnius import autoclass
#Globals
#===============================================================================
__version__ = "1.0.0"
__author__ = "Eric Snyder"
__license__ = "MIT"
_activity = autoclass("org.kivy.android.PythonActivity").mActivity
_external_storage_path = _activity.getExternalFilesDir(None).getPath()
#Functions
#===============================================================================
def get_external_storage_path():
"""Returns the external storage path for the current app."""
return _external_storage_path
|
[
"dylan_the_cheetah@outlook.com"
] |
dylan_the_cheetah@outlook.com
|
17a4438c5f8065744f4f3e842051af12673c53db
|
c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c
|
/cases/pa3/benchmarks/tree-676.py
|
3e2f9cbced03452408ccc1647f0547e56c110f40
|
[] |
no_license
|
Virtlink/ccbench-chocopy
|
c3f7f6af6349aff6503196f727ef89f210a1eac8
|
c7efae43bf32696ee2b2ee781bdfe4f7730dec3f
|
refs/heads/main
| 2023-04-07T15:07:12.464038
| 2022-02-03T15:42:39
| 2022-02-03T15:42:39
| 451,969,776
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,456
|
py
|
# Binary-search trees
class TreeNode(object):
value:int = 0
left:"TreeNode" = None
right:"TreeNode" = None
def insert(self:"TreeNode", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode(x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode(x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class Tree(object):
root:TreeNode = None
size:int = 0
def insert(self:"Tree", x:int) -> object:
if self.root is None:
self.root = makeNode(x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def makeNode(x: int) -> TreeNode:
b:TreeNode = None
b = TreeNode()
b.value = x
return b
# Input parameters
n:int = 100
c:int = 4
# Data
t:Tree = None
i:int = 0
k:int = 37813
# Crunch
t = Tree()
while i < n:
t.insert(k)
k = (k * 37813) % 37831
if i % c != 0:
t.insert(i)
i = i + 1
print(t.size)
for i in [$Exp, 8, 15, 16, 23, 42]:
if t.contains(i):
print(i)
|
[
"647530+Virtlink@users.noreply.github.com"
] |
647530+Virtlink@users.noreply.github.com
|
b4da2baec3936324ef7366bd58c2f474fff0cf53
|
fd65851c7977176cfa69056ea5d63ca529e74271
|
/sdk/python/kfp/v2/compiler_cli_tests/test_data/pipeline_with_gcpc_types.py
|
e99aa8485b0fbdaa197a5c09d171cc92e2ce9478
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT",
"BSD-2-Clause"
] |
permissive
|
NikeNano/pipelines
|
dad9f45267a7f4c495a30880dd6fe1570f26fa64
|
73804f8928ce671839d34800627b6d3ea9f820a7
|
refs/heads/master
| 2022-01-29T21:24:43.693120
| 2021-11-20T18:18:35
| 2021-11-20T18:18:35
| 221,051,451
| 1
| 1
|
Apache-2.0
| 2021-04-23T20:07:11
| 2019-11-11T19:11:29
|
Python
|
UTF-8
|
Python
| false
| false
| 1,361
|
py
|
# Copyright 2021 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from kfp.v2 import components
from kfp.v2.dsl import component, Input, Output
from kfp.v2 import compiler
from kfp.v2 import dsl
class VertexModel(dsl.Artifact):
TYPE_NAME = 'google.VertexModel'
producer_op = components.load_component_from_text("""
name: producer
outputs:
- {name: model, type: google.VertexModel}
implementation:
container:
image: dummy
command:
- cmd
args:
- {outputPath: model}
""")
@component
def consumer_op(model: Input[VertexModel]):
pass
@dsl.pipeline(name='pipeline-with-gcpc-types')
def my_pipeline():
consumer_op(model=producer_op().outputs['model'])
if __name__ == '__main__':
compiler.Compiler().compile(
pipeline_func=my_pipeline,
package_path=__file__.replace('.py', '.json'))
|
[
"noreply@github.com"
] |
NikeNano.noreply@github.com
|
1f4584aa70f9bce1613d6653570e7fc7c6d6136e
|
e77b8179db63a4c1655991f6875f6bd0a6d3b648
|
/backend/app.py
|
49986d9153959b1475b8f8768580afd6c07ea3c9
|
[] |
no_license
|
cptodd757/spotifun
|
81713316858d3f26e92fa567dc1af16ef89c92e5
|
9f6dac4eecd06dbe822b6903b9bd2fb4d072d5ea
|
refs/heads/master
| 2022-04-02T03:15:29.988996
| 2020-02-14T16:15:01
| 2020-02-14T16:15:01
| 198,119,456
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,917
|
py
|
from flask import Flask, jsonify, redirect, request
from flask_cors import CORS
import json
from bson import json_util
import re
import urllib
import requests
import base64
import pandas as pd
import numpy as np
import unidecode
from datetime import datetime
import sys
sys.path.append('./helpers')
import config
from get_token_helper import get_token_helper
from compile_liked_songs_helper import compile_liked_songs_helper
from create_playlist_helper import create_playlist_helper
from analyze_helper import analyze_helper
#BEFORE RUNNING: Ensure RUN_LOCALLY is set to the appropriate state in config.py
#RUN_LOCALLY = True
app = Flask(__name__)
CORS(app)
users = {}
artist_genres = {} #{artist_id:{"name":"bob smith","genres":["genre1","genre2","genre3"]}}
hostname = config.hostname
client_id = config.client_id
client_secret = config.client_secret
scope = config.scope
redirect_uri = config.redirect_uri
login_url = config.login_url
token_url = config.token_url
auth_str = config.auth_str
b64_auth_str = config.b64_auth_str
token_headers = config.token_headers
create_playlist_url = config.create_playlist_url
add_to_playlist_url = config.add_to_playlist_url
audio_features = config.audio_features
#Prompt the user to login to Spotify.
@app.route('/login', methods=['GET','POST'])
def login():
return redirect(login_url)
#With the authorization code granted by user's login to Spotify, obtain an API Key.
@app.route('/get_token', methods=['GET','POST'])
def get_token():
return jsonify(get_token_helper(request))
#Compile a DataFrame of a given user's liked songs.
@app.route('/compile_liked_songs',methods=['GET','POST'])
def compile_liked_songs():
data = json.loads(request.data)
if config.read_from_temp_csv:
users[data['uid']] = {"liked_songs":pd.read_csv('charlie_liked_songs_verbose.csv')}
elif data['uid'] not in users.keys():
df = compile_liked_songs_helper(data)
users[data['uid']] = {"liked_songs":df}
#print(users.keys(),'users.keys')
return {'hello':'world'}
#Create a playlist from user's liked songs, based on additional specified parameters.
@app.route('/create_playlist',methods=['GET','POST'])
def create_playlist():
data = json.loads(request.data)
df = users[data['uid']]["liked_songs"]
response = create_playlist_helper(data, df)
print(response)
return jsonify(response)
#A work in progress. Basic analysis of user's liked songs.
@app.route('/analyze', methods=['GET','POST'])
def analyze():
data = json.loads(request.data)
df = users[data['uid']]['liked_songs']
analyze_helper(data, df)
return 'analyze response'
#NOT USED. Recently played song data not expansive enough for practical use.
@app.route('/compile_recently_played',methods=['GET','POST'])
def compile_recently_played():
return {'hello':'world'}
if __name__ == '__main__':
app.run(host='0.0.0.0',port=4000)
|
[
"cpt8@duke.edu"
] |
cpt8@duke.edu
|
cbaacde4e88caa40782155db716adfec598caffb
|
8d353115f54b228e6d1dc12f32d2c590be78c23f
|
/node_modules/webpack-dev-server/node_modules/fsevents/build/config.gypi
|
26b24d29781ecc4452c8fd64840100e5843fa86d
|
[
"MIT"
] |
permissive
|
fernndzaky/FP_Cads
|
f6e34c654d316a806d4809294a5dddef0f834a74
|
71891923715d36101e923f9d677ad684d6050371
|
refs/heads/master
| 2023-02-10T05:14:05.143848
| 2021-01-06T13:09:19
| 2021-01-06T13:09:19
| 326,116,022
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,512
|
gypi
|
# Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"asan": 0,
"build_v8_with_gn": "false",
"coverage": "false",
"debug_nghttp2": "false",
"enable_lto": "false",
"enable_pgo_generate": "false",
"enable_pgo_use": "false",
"force_dynamic_crt": 0,
"host_arch": "x64",
"icu_data_in": "../../deps/icu-small/source/data/in/icudt64l.dat",
"icu_endianness": "l",
"icu_gyp_path": "tools/icu/icu-generic.gyp",
"icu_locales": "en,root",
"icu_path": "deps/icu-small",
"icu_small": "true",
"icu_ver_major": "64",
"is_debug": 0,
"llvm_version": "0",
"napi_build_version": "4",
"node_byteorder": "little",
"node_code_cache": "yes",
"node_debug_lib": "false",
"node_enable_d8": "false",
"node_install_npm": "true",
"node_module_version": 72,
"node_no_browser_globals": "false",
"node_prefix": "/usr/local",
"node_release_urlbase": "https://nodejs.org/download/release/",
"node_report": "true",
"node_shared": "false",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_nghttp2": "false",
"node_shared_openssl": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_target_type": "executable",
"node_use_bundled_v8": "true",
"node_use_dtrace": "true",
"node_use_etw": "false",
"node_use_large_pages": "false",
"node_use_node_snapshot": "true",
"node_use_openssl": "true",
"node_use_v8_platform": "true",
"node_with_ltcg": "false",
"node_without_node_options": "false",
"openssl_fips": "",
"openssl_is_fips": "false",
"shlib_suffix": "72.dylib",
"target_arch": "x64",
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 1,
"v8_enable_inspector": 1,
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 1,
"v8_promise_internal_field_count": 1,
"v8_random_seed": 0,
"v8_trace_maps": 0,
"v8_use_siphash": 1,
"v8_use_snapshot": 1,
"want_separate_host_toolset": 0,
"xcode_version": "8.0",
"nodedir": "/Users/fernndzaky/.node-gyp/12.7.0",
"standalone_static_library": 1,
"dry_run": "",
"legacy_bundling": "",
"save_dev": "",
"browser": "",
"commit_hooks": "true",
"only": "",
"viewer": "man",
"also": "",
"rollback": "true",
"sign_git_commit": "",
"audit": "true",
"usage": "",
"globalignorefile": "/usr/local/etc/npmignore",
"init_author_url": "",
"maxsockets": "50",
"shell": "/bin/bash",
"metrics_registry": "https://registry.npmjs.org/",
"parseable": "",
"shrinkwrap": "true",
"init_license": "ISC",
"timing": "",
"if_present": "",
"cache_max": "Infinity",
"init_author_email": "",
"sign_git_tag": "",
"cert": "",
"git_tag_version": "true",
"local_address": "",
"long": "",
"preid": "",
"fetch_retries": "2",
"noproxy": "",
"registry": "https://registry.npmjs.org/",
"key": "",
"message": "%s",
"versions": "",
"globalconfig": "/usr/local/etc/npmrc",
"always_auth": "",
"logs_max": "10",
"prefer_online": "",
"cache_lock_retries": "10",
"global_style": "",
"update_notifier": "true",
"audit_level": "low",
"heading": "npm",
"fetch_retry_mintimeout": "10000",
"offline": "",
"read_only": "",
"searchlimit": "20",
"access": "",
"json": "",
"allow_same_version": "",
"description": "true",
"engine_strict": "",
"https_proxy": "",
"init_module": "/Users/fernndzaky/.npm-init.js",
"userconfig": "/Users/fernndzaky/.npmrc",
"cidr": "",
"node_version": "12.7.0",
"user": "",
"save": "true",
"auth_type": "legacy",
"editor": "vi",
"ignore_prepublish": "",
"script_shell": "",
"tag": "latest",
"before": "",
"global": "",
"progress": "true",
"ham_it_up": "",
"optional": "true",
"searchstaleness": "900",
"bin_links": "true",
"force": "",
"save_prod": "",
"searchopts": "",
"depth": "Infinity",
"node_gyp": "/usr/local/lib/node_modules/npm/node_modules/node-gyp/bin/node-gyp.js",
"rebuild_bundle": "true",
"sso_poll_frequency": "500",
"unicode": "true",
"fetch_retry_maxtimeout": "60000",
"ca": "",
"save_prefix": "^",
"scripts_prepend_node_path": "warn-only",
"sso_type": "oauth",
"strict_ssl": "true",
"tag_version_prefix": "v",
"save_exact": "true",
"dev": "",
"fetch_retry_factor": "10",
"group": "20",
"cache_lock_stale": "60000",
"prefer_offline": "",
"version": "",
"cache_min": "10",
"otp": "",
"cache": "/Users/fernndzaky/.npm",
"searchexclude": "",
"color": "true",
"package_lock": "true",
"package_lock_only": "",
"save_optional": "",
"user_agent": "npm/6.10.0 node/v12.7.0 darwin x64",
"ignore_scripts": "",
"cache_lock_wait": "10000",
"production": "",
"save_bundle": "",
"send_metrics": "",
"init_version": "1.0.0",
"node_options": "",
"umask": "0022",
"scope": "",
"git": "git",
"init_author_name": "",
"onload_script": "",
"tmp": "/var/folders/1q/btmfy1mj02d6f172lgvjdvh40000gn/T",
"unsafe_perm": "true",
"link": "",
"prefix": "/usr/local"
}
}
|
[
"fernandhadzaky@hotmail.com"
] |
fernandhadzaky@hotmail.com
|
cbce3ee71741adcf00706a9dc0b0fc7177d127f5
|
3711172b26851f4e7901ee49c5730f4b20d271c0
|
/linear_differential/toy_cipher.py
|
a436e6de2dba87bd39d056579c43352491ca9595
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
mdgaxs/crypto_misc
|
49ad8a4d2a8b0ae170ea348499dcaa1108848382
|
f00714cc64a5276d95114deebfd6172742780fd0
|
refs/heads/master
| 2022-04-16T12:27:23.727009
| 2020-04-13T10:40:17
| 2020-04-13T10:40:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,658
|
py
|
P = [0, 4, 8, 12, 1, 5, 9, 13, 2, 6, 10, 14, 3, 7, 11, 15]
S = [14, 4, 13, 1, 2, 15, 11, 8, 3, 10, 6, 12, 5, 9, 0, 7]
Pinv = [P.index(i) for i in xrange(16)]
Sinv = [S.index(i) for i in xrange(16)]
def split4(x):
assert 0 <= x < 2**16
return [x & 0xf, (x >> 4) & 0xf, (x >> 8) & 0xf, (x >> 12) & 0xf]
def merge4(x):
assert len(x) == 4
assert all([0 <= t < 16 for t in x])
return x[0] + x[1] * 2**4 + x[2] * 2**8 + x[3] * 2**12
def split8(x):
assert 0 <= x < 2**16
return [x & 0xff, x >> 8]
def merge8(x):
assert len(x) == 2
assert all([0 <= t < 256 for t in x])
return x[0] + x[1] * 2**8
def split16_bits(x):
assert 0 <= x < 2**16
return map(int, format(x, '016b')[::-1])
def merge16_bits(x):
return int(''.join(map(str, x[::-1])), 2)
def Sbox(i):
return S[i]
def Pbox(i):
i = split16_bits(i)
o = split16_bits(0)
for j in xrange(16):
o[P[j]] = i[j]
return merge16_bits(o)
def SboxInv(i):
return Sinv[i]
def PboxInv(i):
i = split16_bits(i)
o = split16_bits(0)
for j in xrange(16):
o[Pinv[j]] = i[j]
return merge16_bits(o)
class ToyCipher(object):
def __init__(s, key, rounds=4):
assert 0 <= key < 2**16
s.k = key
s.r = rounds
def encrypt(s, m):
assert 0 <= m < 2**16
for r in xrange(s.r):
m = Pbox(merge4(map(Sbox, split4(m)))) ^ s.k
return m
def decrypt(s, c):
assert 0 <= c < 2**16
for r in xrange(s.r):
c = merge4(map(SboxInv, split4(PboxInv(c ^ s.k))))
return c
def main():
cipher = ToyCipher(0xdead)
m = 0x1234
c = cipher.encrypt(m)
m2 = cipher.decrypt(c)
print hex(c)
print m2 == m
if __name__ == '__main__':
main()
|
[
"shiho.elliptic@gmail.com"
] |
shiho.elliptic@gmail.com
|
169652375466e1e23d94c2247ca9ee3ab43a37d0
|
ec6a2406905d7743c4ab7ba0f1213fd0f32acb8b
|
/conftest.py
|
f6172bb1afb18641036094d50611e1abb434d4b4
|
[
"BSD-2-Clause-Views",
"BSD-3-Clause"
] |
permissive
|
oii/ogreserver
|
4277c1da689911fa52e2486213c3c8281370c134
|
29ca97765fde949c60d33ee0e301f9fc11679eb9
|
refs/heads/master
| 2021-06-07T18:07:12.511077
| 2017-07-05T09:43:28
| 2017-07-07T10:13:52
| 96,296,318
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 156
|
py
|
# Please do not put fixtures into this file - add them to test/conftest.py
# This file exists in the project root to help py.test find our main app package
|
[
"github@mafro.net"
] |
github@mafro.net
|
7dc2feeef36ab07f2529109add4df40295cdace0
|
da90c21c325a532ad2663b1322f00f32edd5a109
|
/app/mod_api/controllers.py
|
5656c6373ec7b59efcfbc229e821989fcc7c1859
|
[] |
no_license
|
bdh1011/realmapia_server
|
5d1b7bc6b5450c16d162fae1a85d2ccfccd7916e
|
80044d3d0e1801ea0a36af9aa59f9ed1abf6dc6d
|
refs/heads/master
| 2016-08-11T21:46:34.029609
| 2015-11-17T19:18:26
| 2015-11-17T19:18:26
| 44,907,471
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 34,652
|
py
|
# -*- coding: utf-8 -*-
from flask import current_app,Blueprint,render_template, flash,jsonify, session, url_for, g, request, redirect, make_response, Response, send_file
from app import db
from app import app
from app import redis
import hashlib, os, sys, random, re, json, ast
from functools import wraps
from datetime import date, time, datetime
import time as ptime
from flask.ext.login import login_user, logout_user, current_user, login_required
from sqlalchemy import or_, and_, desc, asc
from ..models import User, Follow, User_alert, Like, Comment, Post, Hashtag_to_post, Hashtag,\
Placetag_to_post, Placetag, Usertag_to_post, Group, Group_member, Push, Noti
from flask.ext.login import LoginManager, login_user, logout_user, current_user, login_required
import decorator
from flask_wtf.csrf import CsrfProtect
import base64
from werkzeug import secure_filename
from gcm import GCM
GCM_API_KEY = "AIzaSyDjsPRiKm9o6LqEOGYt5TFR7U6ry22Gvwc"
reg_ids = 'gcm_registered device'
registered_devices = set()
# from forms import LoginForm
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
ALLOWED_PHOTO_EXTENSIONS = set(['png','jpg','jpeg','gif'])
ALLOWED_MOVIE_EXTENSIONS = set(['avi','mp4','mpec','exo'])
api = Blueprint('api', __name__, url_prefix='/api')
base_url = 'http://52.192.0.214/api/'
def token_required(f):
@wraps(f)
def decorated_function(*args, **kwargs):
print 'token!',request.headers.get('Authorization')
try:
if request.headers.get('Authorization')[6:] == '1':
session['userid'] = 'admin'
print 'testing access'
pass
else:
token = request.headers.get('Authorization')
print 'token@',token
if token is None:
return jsonify({'error':'no token headers'}),400
token = token[6:]
if app.r.get(token) is None:
return jsonify({'error':'token invalid'}),400
print 'valid token'
session['userid'] = ast.literal_eval(app.r.get(token))['id']
if request.method == 'POST':
print request.json
return f(*args, **kwargs)
except Exception as e:
print e
return jsonify({'message':'token error'}),400
return decorated_function
#Deferred Request Callbacks
def after_this_request(f):
if not hasattr(g, 'after_request_callbacks'):
g.after_request_callbacks = []
g.after_request_callbacks.append(f)
return f
@app.after_request
def call_after_request_callbacks(response):
for callback in getattr(g, 'after_request_callbacks', ()):
callback(response)
return response
@token_required
def post_name():
name = request.json.get('name')
user = User.query.filter_by(id=session['userid']).first()
if not user:
return jsonfiy({'message':'user not exist'}),400
if profile_pic is None:
return jsonify({'message':'needs name attribute'}),400
user.name = name
return jsonify({'result':'success'})
@token_required
def post_pw():
pw = request.json.get('pw')
user = User.query.filter_by(id=session['userid']).first()
if not user:
return jsonfiy({'message':'user not exist'}),400
if pw is None:
return jsonify({'message':'needs pw attribute'}),400
user.hash_password(pw)
token = user.generate_auth_token()
return jsonify({'result':{'token':token,'name':user.name,'profile_pic':base_url+'profile_pic/'+user.profile_pic if user.profile_pic is not None else None}})
@token_required
def post_profile_pic():
profile_pic = request.json.get('photo')
ext = request.json.get('ext')
user = User.query.filter_by(id=session['userid']).first()
if not user:
return jsonfiy({'message':'user not exist'}),400
if profile_pic is None:
return jsonify({'message':'needs photo attribute'}),400
print profile_pic
if 'http' in profile_pic:
user.profile_pic = profile_pic
return jsonify({'result':'success'})
else:
data = base64.b64decode(profile_pic)
filepath = "./app/static/profile_pic/"+str(user.id)+"."+ext
#not exist
if not os.path.exists(filepath):
with open(filepath,"w") as photo_file:
photo_file.write(data)
file_dir, filename = os.path.split(filepath)
user.profile_pic = filename
db.session.commit()
#test
return jsonify({'result':{'profile_pic_path':base_url+'profile_pic/'+filename}})
def login():
if request.method=='POST':
login_id = request.json.get('id')
login_pw = request.json.get('pw')
user = User.query.filter_by(id=login_id).first()
if user is None:
return jsonify({'message':'user not exist'}),400
else:
print user.serialize
user.recent_login_timestamp = datetime.now()
db.session.commit()
try:
if not user.verify_password(login_pw):
raise ValueError('Could not find correct user!')
except:
return jsonify({'message':'id or pw is invalid'}),400
token = user.generate_auth_token()
print ptime.time()
now = int(ptime.time())
expires = now + (current_app.config['ONLINE_LAST_MINUTES'] * 600) + 10
p = app.r.pipeline()
if app.r.get(token) is None:
p.set(token,{'id':user.id, 'time':int(ptime.time())})
p.expireat(token, expires)
p.execute()
print 'app.r', ast.literal_eval(app.r.get(token))['id']
#redis.flushdb()
return jsonify({'result':{'token':token,'name':user.name,'profile_pic':base_url+'profile_pic/'+user.profile_pic if user.profile_pic is not None else None}})
@token_required
def token_login():
if request.method=='GET':
login_token = request.headers.get('Authorization')[6:]
user = User.query.filter_by(id=session['userid']).first()
if user is None:
return jsonify({'message':'user not exist'}),400
else:
print user.serialize
user.recent_login_timestamp = datetime.now()
db.session.commit()
return jsonify({'result':{'token':login_token,'name':user.name,'profile_pic':base_url+'profile_pic/'+user.profile_pic if user.profile_pic is not None else None}})
def register():
db.session.rollback()
register_id = request.json.get('id')
register_name = request.json.get('name')
register_pw = request.json.get('pw')
print 'id :', register_id
print 'pw :', register_pw
if register_id is None or register_pw is None:
return jsonify({'message':'missing arguments'}), 400
if User.query.filter_by(id=register_id).first() is not None:
return jsonify({'message':'existing user'}), 400
user = User(id=register_id, name=register_name)
user.hash_password(register_pw)
db.session.add(user)
db.session.commit()
g.user = user
token = user.generate_auth_token()
return jsonify({ 'result': {'token':token,'name':user.name}}), 200
# {'Location': url_for('get_user', id=user.username, _external=True)})
@token_required
def logout():
token = request.headers.get('Authorization')[6:]
app.r.delete(token)
return jsonify({'result':'success'})
#mobile api
@token_required
def get_user_list():
name = request.args.get('name')
if name is not None:
user_list = db.session.query(User).filter(User.name.contains(name)).all()
else:
user_list = User.query.order_by(User.id).all()
return jsonify({'result':[
{
'id':user.id,
'name':user.name,
'profile_pic':user.profile_pic,
'recent_login_timestamp':user.recent_login_timestamp,
'register_login_timestamp':user.register_timestamp
} for user in user_list]})
#mobile api
@token_required
def get_user(userid):
try:
user = User.query.filter_by(id=userid).first()
except:
return jsonify({'message':'unexpected exception'}),400
return jsonify({'result':user.serialize})
@token_required
def about_me():
my_info = User.query.filter_by(id=session['userid']).first()
if my_info is None:
return jsonify({'message':'login first'}),400
print my_info.serialize
return jsonify({'result':my_info.serialize})
@token_required
def get_my_posts():
return jsonify({'result':'hi'})
@token_required
def get_my_post(post_id):
return jsonify({'result':'hi'})
@token_required
def get_posts():
map_type=request.args.get('map_type')
group_id=request.args.get('group_id')
user_id=request.args.get('user_id')
lat=request.args.get('lat')
lng=request.args.get('lng')
level=request.args.get('level')
circle=request.args.get('circle_id')
if circle:
map_type='public'
get_posts_query = db.session.query(Post).filter(Post.id==circle)
else:
get_posts_query = db.session.query(Post).filter(Post.map_type==map_type)
if map_type=='group':
get_posts_query = get_posts_query.filter(Post.target_group==group_id)
if user_id is not None:
get_posts_query = get_posts_query.filter(Post.user_id==user_id)
if (lat is not None) and (lng is not None) and (level is not None):
pass #level calculate
posts_list = get_posts_query.all()
if posts_list is None:
return jsonify({'result':[]})
return jsonify({'result':[
{
'post_id': each_post.id,
'profile_pic': User.query.filter_by(id=each_post.user_id).first().profile_pic if (User.query.filter_by(id=each_post.user_id).first() is not None) else None,
'photo' : base_url+'photo/'+each_post.photo if (each_post.photo is not None) else None,
'video' : base_url+'video/'+each_post.video if (each_post.video is not None) else None,
'username':User.query.filter_by(id=each_post.user_id).first().name,
'timestamp':each_post.register_timestamp.strftime("%Y-%m-%d %H:%M:%S"),
'content':each_post.content,
'lat':each_post.lat,
'lng':each_post.lng,
'like_num':Like.query.filter_by(post_id=each_post.id).count(),
'comment_num':Comment.query.filter_by(post_id=each_post.id).count(),
'placetag':db.session.query(Placetag, Placetag_to_post).filter(Placetag_to_post.post_id==each_post.id).filter(Placetag.id==Placetag_to_post.placetag_id).with_entities(Placetag.content).first()[0],
'hashtag_list':[hashtag.Hashtag.content for hashtag in db.session.query(Hashtag, Hashtag_to_post ).filter(Hashtag_to_post.post_id==each_post.id).filter(Hashtag.id==Hashtag_to_post.hashtag_id).all()],
'usertag_list':[{'userid':user.id,'username':user.name} for user in db.session.query(User, Usertag_to_post ).filter(Usertag_to_post.post_id==each_post.id).filter(User.id==Usertag_to_post.user_id).with_entities(User).all()]
} for each_post in posts_list]})
@token_required
def get_circle():
center_lat=request.args.get('center_lat')
center_lng=request.args.get('center_lng')
level=request.args.get('level')
map_type=request.args.get('map_type')
group_id=request.args.get('group_id')
if map_type is None or center_lng is None or center_lng is None or level is None:
return jsonify({'message':'parameter miss, needs center_lat, center_lng, level, map_type'}),400
get_circle_query = db.session.query(Post).filter(Post.map_type==map_type)
if map_type=='group':
get_circle_query = get_circle_query.filter(Post.target_group==group_id)
elif map_type=='follow':
pass
elif map_type=='private':
get_circle_query = get_circle_query.filter(Post.user_id==session['userid'])
#get_circle_query.filter(Post.lat.between(float(center_lat)-0.1,float(center_lat)+0.1 ))
#get_circle_query.filter(Post.lng.between(float(center_lng)-0.1,float(center_lng)+0.1 ))
posts_list = get_circle_query.all()
print posts_list
return jsonify({'result':[
{
'circle_id': each_post.id,
'center_lat':each_post.lat,
'center_lng':each_post.lng,
'post_num':1,
'radius': 30
} for each_post in posts_list]})
@token_required
def get_post(post_id):
post = Post.query.filter_by(id=post_id).first()
if post is None:
return jsonify({'message':'wrong post id'}),404
placetag = db.session.query(Placetag).filter(Placetag_to_post.post_id==post_id).filter(Placetag.id==Placetag_to_post.placetag_id).with_entities(Placetag.content).first()
if placetag is not None:
placetag = placetag[0]
hashtag_list = [hashtag.content for hashtag in db.session.query(Hashtag).filter(Hashtag_to_post.post_id==post_id).filter(Hashtag.id==Hashtag_to_post.hashtag_id).all()]
usertag_list = [{'userid':user.id,'username':user.name} for user in db.session.query(User).filter(Usertag_to_post.post_id==post_id).filter(User.id==Usertag_to_post.user_id).with_entities(User).all()]
photo = base_url+'photo/'+post.photo if (post.photo is not None) else None
video = base_url+'video/'+post.video if (post.video is not None) else None
return jsonify({'result':{
'userid':post.user_id,
'photo':photo,
'video':video,
'map_type': post.map_type,
'target_group':post.target_group,
'timestamp':post.register_timestamp.strftime("%Y-%m-%d %H:%M:%S"),
'content':post.content,
'like_num':Like.query.filter_by(post_id=post.id).count(),
'comment_num':Comment.query.filter_by(post_id=post.id).count(),
'lat':post.lat,
'lng':post.lng,
'placetag':placetag,
'hashtag_list':hashtag_list,
'usertag_list':usertag_list}})
@token_required
def get_synced_sns():
post_list = db.session.query(Post.sns).filter(Post.user_id==session['userid']).distinct(Post.sns)
return jsonify({'result':[post.sns for post in post_list if post.sns is not None]})
@token_required
def post_sns_post():
db.session.rollback()
print request.json
posts = request.json.get("posts")
if not posts:
return jsonify({'result':'posts key needs'}),400
for post_id, sns_post in posts.iteritems():
sns = sns_post.get("sns")
content = sns_post.get("content")
lat = sns_post.get("lat")
lng = sns_post.get("lng")
placetag_content = sns_post.get("placetag")
hashtag_list = sns_post.get("hashtag")
usertag_list = sns_post.get("usertag")
photo = sns_post.get("photo")
video = sns_post.get("video")
ext = sns_post.get("ext")
map_type = sns_post.get("map_type")
post = Post(user_id=session['userid'],lat=lat,lng=lng,content=content,map_type=map_type, sns=sns, photo=photo, video=video)
db.session.add(post)
db.session.commit()
#add placetag
if placetag_content is None:
pass
else:
placetag = Placetag.query.filter_by(content=placetag_content).first()
if placetag is None:
placetag = Placetag(content=placetag_content)
db.session.add(placetag)
db.session.commit()
#check if it works without commit
placetag_to_post = Placetag_to_post(post_id=post.id,placetag_id=placetag.id)
db.session.add(placetag_to_post)
db.session.commit()
placetag.update_placetaged_num()
db.session.commit()
#too many commit, how can I shrink it?
#add hashtag
if hashtag_list is None:
pass
else:
for each_hashtag in hashtag_list:
print 'each hashtag',each_hashtag
hashtag = Hashtag.query.filter_by(content=each_hashtag).first()
print 'hashtag',hashtag
if hashtag is None:
hashtag = Hashtag(content=each_hashtag)
db.session.add(hashtag)
db.session.commit()
#check if it works without commit
hashtag_to_post = Hashtag_to_post(post_id=post.id,hashtag_id=hashtag.id)
db.session.add(hashtag_to_post)
db.session.commit()
hashtag.update_hashtaged_num()
db.session.commit()
#too many commit, how can I shrink it?
return jsonify({'result':{'posts_num':len(posts)}})
@token_required
def post_post():
content = request.json.get("content")
lat = request.json.get("lat")
lng = request.json.get("lng")
placetag_content = request.json.get("placetag")
hashtag_list = request.json.get("hashtag")
usertag_list = request.json.get("usertag")
photo = request.json.get("photo")
ext = request.json.get("ext")
map_type = request.json.get("map_type")
print request.json
video = request.json.get("video")
#post_to = request.json.get("post_to")
post = Post(user_id=session['userid'],lat=lat,lng=lng,content=content,map_type=map_type)
db.session.add(post)
db.session.commit()
if photo is not None:
data = base64.b64decode(photo)
filepath = app.config['PHOTO_UPLOAD_FOLDER']+str(post.id)+"."+ext
#not exist
if not os.path.exists(filepath):
with open(filepath,"w") as photo_file:
photo_file.write(data)
file_dir, filename = os.path.split(filepath)
post.photo = filename
db.session.commit()
'''
with open(filepath,"r") as photo_file:
photo_file.read()
mp3_list.append(mp3_encoded)'''
if video is not None:
data = base64.b64decode(video)
filepath = app.config['VIDEO_UPLOAD_FOLDER']+str(post.id)+"."+ext
#not exist
if not os.path.exists(filepath):
with open(filepath,"w") as photo_file:
photo_file.write(data)
file_dir, filename = os.path.split(filepath)
post.video = filename
db.session.commit()
#add placetag
if placetag_content is None:
pass
else:
placetag = Placetag.query.filter_by(content=placetag_content).first()
if placetag is None:
placetag = Placetag(content=placetag_content)
db.session.add(placetag)
db.session.commit()
#check if it works without commit
placetag_to_post = Placetag_to_post(post_id=post.id,placetag_id=placetag.id)
db.session.add(placetag_to_post)
db.session.commit()
placetag.update_placetaged_num()
db.session.commit()
#too many commit, how can I shrink it?
#add hashtag
if hashtag_list is None:
pass
else:
for each_hashtag in hashtag_list:
print 'each hashtag',each_hashtag
hashtag = Hashtag.query.filter_by(content=each_hashtag).first()
print 'hashtag',hashtag
if hashtag is None:
hashtag = Hashtag(content=each_hashtag)
db.session.add(hashtag)
db.session.commit()
#check if it works without commit
hashtag_to_post = Hashtag_to_post(post_id=post.id,hashtag_id=hashtag.id)
db.session.add(hashtag_to_post)
db.session.commit()
placetag.update_placetaged_num()
db.session.commit()
#too many commit, how can I shrink it?
#add usertag
if usertag_list is None:
pass
else:
for usertag in usertag_list:
user = User.query.filter_by(id=usertag).first()
if user is None:
return jsonify({'message':'wrong usertag'}),400
usertag_to_post = Usertag_to_post(post_id=post.id,user_id=user.id)
db.session.add(usertag_to_post)
db.session.commit() #too many commit, how can I shrink it?
noti_post_taged(session['userid'],post.id,user.id)
print Post.query.filter_by(id=post.id).all()
return jsonify({'result':{'post_id':post.id}})
def get_profile_pic(filename):
try:
return send_file(app.config['PROFILE_PIC_DOWNLOAD_FOLDER']+filename )
except Exception as e:
return jsonify({'message':'no profile picture'}),404
def get_my_profile_pic():
profile_pic = User.query.filter_by(id=session['userid']).first().profile_pic
if profile_pic is not None:
return send_file(app.config['PROFILE_PIC_DOWNLOAD_FOLDER']+profile_pic)
return jsonify({'message':'no profile picture'}),404
def get_photo(filename):
root_dir = os.path.dirname(os.getcwd())
return send_file( app.config['PHOTO_DOWNLOAD_FOLDER']+filename)
def get_movie(filename):
return send_file(app.config['PHOTO_DOWNLOAD_FOLDER']+filename)
@token_required
def get_comments():
print 'get comment'
post_id = request.args.get('post_id')
print 'post id',post_id
user_id = request.args.get('user_id')
name = request.args.get('name')
get_comments_query = []
if post_id is not None:
get_comments_query.append(Comment.post_id==post_id)
if user_id is not None:
get_comments_query.append(Comment.user_id==user_id)
if name is not None:
get_comments_query.append(User.name.contains(name))
comments_list = db.session.query(Comment).outerjoin(User).filter(and_(
*get_comments_query)).order_by(Comment.id).all()
return jsonify({'result':[{
'post_id':comment.post_id,
'user_id':comment.user_id,
'name': User.query.filter_by(id=comment.user_id).first().name,
'profile_pic':User.query.filter_by(id=comment.user_id).first().profile_pic,
'content':comment.content,
'timestamp':comment.register_timestamp.strftime("%Y-%m-%d %H:%M:%S")} for comment in comments_list]})
@token_required
def post_comment():
postid = request.json.get('post_id')
content = request.json.get('content')
post = Post.query.filter_by(id=postid)
if post is None:
return jsonify({'message':'invalid post id'}),400
comment = Comment(user_id=session['userid'],post_id=postid,content=content)
db.session.add(comment)
db.session.commit()
noti_comment(session['userid'],postid)
return jsonify({'result':'success'})
@token_required
def get_follow():
from_user_id = request.args.get('from_user_id')
to_user_id = request.args.get('to_user_id')
if from_user_id is not None:
follow_list = Follow.query.filter_by(from_user_id=from_user_id).all()
return jsonify({'result': [follow.to_serialize for follow in follow_list]})
elif to_user_id is not None:
follow_list = Follow.query.filter_by(to_user_id=to_user_id).all()
return jsonify({'result': [follow.from_serialize for follow in follow_list]})
else:
return jsonify({'message':'parameter error'}),400
@token_required
def post_follow():
to_user_id = request.json.get('to_user_id')
if Follow.query.filter_by(from_user_id=session['userid'],to_user_id=to_user_id).first() is not None:
return jsonify({'message':'already following'}),400
follow = Follow(from_user_id=session['userid'],to_user_id=to_user_id)
db.session.add(follow)
db.session.commit()
noti_follow(session['userid'],to_user_id)
return jsonify({'result':'success'})
@token_required
def delete_follow():
to_user_id = request.args.get('to_user_id')
follow = Follow.query.filter_by(from_user_id=session['userid'],to_user_id=to_user_id).first()
db.session.delete(follow)
db.session.commit()
return jsonify({'result': 'success'})
@token_required
def get_alert():
return jsonify({'result':'hi'})
@token_required
def get_like():
user_id = request.args.get('user_id')
post_id = request.args.get('post_id')
if user_id is not None:
like_list = Like.query.filter_by(user_id=user_id).all()
if post_id is not None:
like_list = Like.query.filter_by(post_id=post_id).all()
return jsonify({'result':[ like.serialize for like in like_list ]})
@token_required
def get_hashtag(hashtag_query):
hashtag_list = db.session.query(Hashtag).filter(Hashtag.content.contains(hashtag_query)).all()
return jsonify({'result':[ hashtag.serialize for hashtag in hashtag_list ]})
@token_required
def get_placetag(placetag_query):
placetag_list = db.session.query(Placetag).filter(Placetag.content.contains(placetag_query)).all()
return jsonify({'result':[ placetag.serialize for placetag in placetag_list ]})
@token_required
def get_all_hashtag():
hashtag_list = db.session.query(Hashtag).all()
return jsonify({'result':[ hashtag.serialize for hashtag in hashtag_list ]})
@token_required
def get_all_placetag():
placetag_list = db.session.query(Placetag).all()
return jsonify({'result':[ placetag.serialize for placetag in placetag_list ]})
@token_required
def post_like():
post_id = request.json.get('post_id')
if Like.query.filter_by(user_id=session['userid'], post_id=post_id).first() is not None:
return jsonify({'message':'already like it'})
like = Like(user_id=session['userid'], post_id=post_id)
db.session.add(like)
db.session.commit()
noti_like(session['userid'],post_id)
return jsonify({'result':'success'})
@token_required
def delete_like():
user_id = request.args.get('user_id')
post_id = request.args.get('post_id')
like = Like.query.filter_by(user_id=user_id,post_id=post_id).first()
db.session.delete(like)
db.session.commit()
return jsonify({'result':'success'})
@token_required
def get_groups():
name = request.args.get('name')
member = request.args.get('member')
# print Group.query.filter_by(name=name).all()
get_groups_query = db.session.query(Group).join(Group_member).distinct(name)
# print get_groups_query.all()
if member is not None:
get_groups_query = get_groups_query.filter(Group_member.user_id==member).filter(Group.id==Group_member.group_id)
if name is not None:
get_groups_query = get_groups_query.filter(Group.id.contains(name))
group_list = get_groups_query.all()
print group_list
return jsonify({'result':[
{'name':group.id,
'members':[user.user_id for user in Group_member.query.filter_by(group_id=group.id).with_entities(Group_member.user_id).all()],
'privacy':group.privacy,
} for group in group_list ]})
@token_required
def get_group(group_id):
group= db.session.query(Group).join(Group_member).filter(Group.id==group_id).first()
if group:
return jsonify({'result':{'name':group.id,
'members':[user.user_id for user in Group_member.query.filter_by(group_id=group.name).with_entities(Group_member.user_id).all()],
'privacy':group.privacy,
}})
else:
return jsonify({'message':'group not exists'}),400
@token_required
def post_group():
name = request.json.get('name')
if Group.query.filter_by(id=name).first():
return jsonify({'message':'already exists'}),400
members = request.json.get('members')
privacy = request.json.get('privacy')
if Group.query.filter_by(id=name).first() is not None:
return jsonify({'message':'group name already exist'}),400
group = Group(id=name, privacy=privacy)
db.session.add(group)
member = Group_member(role='manager',user_id=session['userid'],group_id=name)
db.session.add(member)
for each_member in members:
if each_member == session['userid']:
continue
member = Group_member(user_id=each_member, role='member',group_id=name)
db.session.add(member)
db.session.commit()
db.session.rollback()
return jsonify({'result':'success'})
@token_required
def invite_group_member(group_id):
member_list = request.json.get('members')
for each_member in member_list:
if Group_member.query.filter_by(group_id=group_id, user_id=each_member) is not None:
pass
else:
group_member = Group_member(group_id=group_id, user_id=each_member)
db.session.add(group_member)
db.session.commit()
return jsonify({'result':'success'})
@token_required
def delete_group():
group_id = request.args.get('group_id')
member_list = Group_member.query.filter_by(group_id = group_id).all()
for each_member in member_list:
db.session.delete(each_member)
group = Group.query.filter_by(group_id=group_id).first()
db.session.delete(group)
db.session.commit()
return jsonify({'result':'success'})
@token_required
def post_reg_id():
reg_id = request.json.get('reg_id')
if reg_id:
push = Push.query.filter_by(id=reg_id).first()
if not push:
push = Push(id=reg_id, user_id=session['userid'])
db.session.add(push)
db.session.commit()
return jsonify({'result':'success'})
else:
return jsonify({'message':'need reg_id'}),404
@token_required
def delete_reg_id():
push = Push.query.filter_by(user_id=session['userid']).first()
if push:
db.session.delete(push)
db.session.commit()
return jsonify({'result':'success'})
else:
return jsonify({'message':'not registered user'}),400
@token_required
def test_push():
msg = request.args.get('msg')
return send_push(session['userid'], msg)
def send_push(user_id, msg):
user = User.query.filter_by(id=user_id).first()
if not user:
return jsonify({'message':'user not exist'}),400
push_list = Push.query.filter_by(user_id=user_id).all()
if push_list is None:
return jsonify({'message':'register first'}),400
url = 'https://gcm-http.googleapis.com/gcm/send'
if msg:
try:
gcm = GCM(GCM_API_KEY)
data = {'title':'MAPIA','message':msg}
ids = [push.id for push in push_list]
response = gcm.json_request(registration_ids=ids, data=data)
return jsonify({'result':str(response)})
except Exception as e:
print e
return jsonify({'message':'wrong register id'}),400
else:
return jsonify({'message':'msg parameter needs'}),400
def noti_like(user_from, post_id):
user_id = Post.query.filter_by(id=post_id).first().user_id
input_noti(user_from, 'like', user_id, post_id)
def noti_comment(user_from, post_id):
user_id = Post.query.filter_by(id=post_id).first().user_id
input_noti(user_from, 'comment', user_id, post_id)
def noti_follow(user_from, user_to):
input_noti(user_from, 'follow', user_to, None)
def noti_post_taged(user_from, post_id, user_to):
input_noti(user_from, 'tag', user_to, post_id)
def input_noti(user_from, noti_type, user_to, post_id):
noti = Noti(user_from=user_from, noti_type=noti_type,user_to=user_to, post_id=post_id)
db.session.add(noti)
db.session.commit()
if noti_type == 'like':
send_push(user_to, user_from + "님이 회원님의 게시글을 좋아합니다.")
elif noti_type == 'comment':
send_push(user_to, user_from + "님이 회원님의 글에 댓글을 달았습니다.")
elif noti_type == 'follow':
send_push(user_to, user_from + "님이 회원님을 Follow 하기 시작했습니다.")
elif noti_type == 'tag':
send_push(user_to, user_from + "님이 회원님을 게시글에 태그했습니다.")
else:
print 'noti type error'
@token_required
def get_noti():
noti_list = Noti.query.filter_by(user_to=session['userid']).all()
if not noti_list:
return jsonify({'result':[]})
return jsonify({'result':[
{'user_from':noti.user_from,
'user_to':noti.user_to,
'noti_type':noti.noti_type,
'post_id':noti.post_id,
'timestamp':noti.register_timestamp.strftime("%Y-%m-%d %H:%M:%S")
} for noti in noti_list]
})
@token_required
def get_noti_status():
user = User.query.filter_by(id=session['userid']).first()
if not user:
return jsonify({'message':'user not exists'}),400
return jsonify({'result':user.noti_flag})
@token_required
def activate_noti():
user = User.query.filter_by(id=session['userid']).first()
if not user:
return jsonify({'message':'user not exists'}),400
user.noti_flag = True
return jsonify({'result':'success'})
@token_required
def deactivate_noti():
user = User.query.filter_by(id=session['userid']).first()
if not user:
return jsonify({'message':'user not exists'}),400
user.noti_flag = False
return jsonify({'result':'success'})
api.add_url_rule('/users/register', 'register', register, methods=['POST'])
api.add_url_rule('/users/login', 'login', login, methods=['POST'])
api.add_url_rule('/users/login/token', 'token login', token_login, methods=['GET'])
api.add_url_rule('/users/logout', 'logout', logout, methods=['GET'])
api.add_url_rule('/users', 'get_user_list', get_user_list)
api.add_url_rule('/users/<userid>', 'get_user', get_user)
api.add_url_rule('/users/me', 'about me', about_me)
api.add_url_rule('/users/me/name', 'change name', post_name)
api.add_url_rule('/users/me/pw', 'change pw', post_pw)
api.add_url_rule('/users/me/profile_pic', 'post profile pic', post_profile_pic, methods=['POST'])
api.add_url_rule('/users/me/posts', 'get my posts', get_my_posts)
api.add_url_rule('/users/me/posts/<post_id>', 'get my post', get_my_post)
api.add_url_rule('/posts', 'get posts', get_posts, methods=['GET'])
api.add_url_rule('/posts/<post_id>', 'get post', get_post, methods=['GET'])
api.add_url_rule('/posts', 'post posts', post_post, methods=['POST'])
api.add_url_rule('/sns/posts', 'post sns posts', post_sns_post, methods=['POST'])
api.add_url_rule('/sns/sync', 'get synced sns', get_synced_sns, methods=['GET'])
api.add_url_rule('/circle', 'get cicles', get_circle, methods=['GET'])
api.add_url_rule('/profile_pic/<filename>','get profile_pic', get_profile_pic, methods=['GET'])
api.add_url_rule('/photo/<filename>','get photo', get_photo, methods=['GET'])
api.add_url_rule('/movie/<filename>','get movie', get_movie, methods=['GET'])
api.add_url_rule('/hashtag/<hashtag_query>','get hashtag', get_hashtag, methods=['GET'])
api.add_url_rule('/placetag/<placetag_query>','get placetag', get_placetag, methods=['GET'])
api.add_url_rule('/hashtag','get all hashtag', get_all_hashtag, methods=['GET'])
api.add_url_rule('/placetag','get all placetag', get_all_placetag, methods=['GET'])
api.add_url_rule('/comments', 'get comments', get_comments, methods=['GET'])
api.add_url_rule('/comments', 'post comments', post_comment, methods=['POST'])
api.add_url_rule('/follow', 'get following', get_follow, methods=['GET'])
api.add_url_rule('/follow', 'post following', post_follow, methods=['POST'])
api.add_url_rule('/follow', 'quit following', delete_follow, methods=['DELETE'])
api.add_url_rule('/alert', 'get alert', get_alert, methods=['GET'])
api.add_url_rule('/like', 'get like', get_like, methods=['GET'])
api.add_url_rule('/like', 'post like', post_like, methods=['POST'])
api.add_url_rule('/like', 'delete like', delete_like, methods=['DELETE'])
api.add_url_rule('/groups', 'get groups', get_groups, methods=['GET'])
api.add_url_rule('/groups/<group_id>', 'get group', get_group, methods=['GET'])
api.add_url_rule('/groups', 'post groups', post_group, methods=['POST'])
api.add_url_rule('/groups/<group_id>/members', 'invite group member', invite_group_member, methods=['POST'])
api.add_url_rule('/groups/<group_id>', 'delete group', delete_group, methods=['DELETE'])
api.add_url_rule('/groups/<group_id>/members', 'invite group member', invite_group_member, methods=['POST'])
api.add_url_rule('/groups/<group_id>', 'delete group', delete_group, methods=['DELETE'])
api.add_url_rule('/push/reg_id', 'register push id', post_reg_id, methods=['POST'])
api.add_url_rule('/push/reg_id', 'delete push id', delete_reg_id, methods=['DELETE'])
api.add_url_rule('/push/test', 'get test push', test_push, methods=['GET'])
api.add_url_rule('/noti/contents', 'get my noti contents', get_noti, methods=['GET'])
api.add_url_rule('/noti/status', 'get status notification', get_noti_status, methods=['GET'])
api.add_url_rule('/noti/status/activate', 'activate account notification', activate_noti, methods=['GET'])
api.add_url_rule('/noti/status/deactivate', 'deactivate account notification', deactivate_noti, methods=['GET'])
|
[
"bdh931101@gmail.com"
] |
bdh931101@gmail.com
|
2f1963b7dcab8d3722a0aead506bd0147a04fcdc
|
0f4ff6fe47803cdf485f64b9abf6053ad02ebcde
|
/test/common/page.py
|
e9800f2c67415b1bb1464ce1332c85dac63e0cc8
|
[] |
no_license
|
wowo665636/dongtest
|
944a7dd01c362da4ddac8232b343d5343a177858
|
1cd4856382e39d07712d4c0a45c6de3bb5c431df
|
refs/heads/master
| 2021-06-29T17:49:31.124568
| 2017-09-14T13:07:01
| 2017-09-14T13:07:01
| 103,113,922
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 474
|
py
|
from test.common.browser import Brower
class Page(Brower):
def __init__(self, page=None, brower_type='firefox'):
if page:
self.driver = page.driver
else:
super(Page, self).__init__(brower_type=brower_type)
def get_driver(self):
return self.driver
def find_element(self, *args):
return self.driver.find_element(*args)
def find_elements(self, *args):
return self.driver.find_elements(*args)
|
[
"392386038@qq.com"
] |
392386038@qq.com
|
9ffb18b539fc4d52d55686384f909bad4853cc27
|
e7b4114f517a0f8d650e75d59f6c9c958061205a
|
/examples/dm_response.py
|
96a9a5869fe0c41f4490b12ad39978d2a7a3b436
|
[
"MIT"
] |
permissive
|
xezzz/Sentinel
|
4d7607463441ed94b695b26bf313b874be13f5e3
|
c0b427c67a9038213b6ed361007189a8c66408ad
|
refs/heads/master
| 2023-06-07T01:58:07.648326
| 2021-07-04T21:49:08
| 2021-07-04T21:49:08
| 382,418,906
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 344
|
py
|
# import the client class ⬆️
from sentinel import SentinelClient
# define the classic client ⚙️
client = SentinelClient(token="YOUR_BOT_TOKEN", app_id=123456789).build()
@client.slash_command(name="dm", guild_id=123456789, description="🎉 Sends a simple DM to the author")
def dm(ctx):
ctx.dm("Only you can see this message!")
|
[
"pschaper18@gmail.com"
] |
pschaper18@gmail.com
|
4072030fa3b51275fc733f742b865cbb2987d105
|
18da7621287c06dc2fd094073350fddf6ce3be39
|
/api/serializers.py
|
93364058a69c4239fbe6cb732b0a8b1092f3d208
|
[] |
no_license
|
saiyadfaizan/twenty
|
1486eaad044dce81983b15738353792a2ab2b826
|
35be3d7f9792d6b8a5e19440caa4365666804f88
|
refs/heads/master
| 2023-02-25T16:45:40.270079
| 2021-01-19T12:31:04
| 2021-01-19T12:31:04
| 330,972,009
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,235
|
py
|
from rest_framework import serializers
from store.models import *
class AdminSerializer(serializers.ModelSerializer):
class Meta:
model = Admin
fields = ('user', 'name', 'email')
class CustomerSerializer(serializers.ModelSerializer):
class Meta:
model = Customer
fields = ('user', 'name', 'email')
class CategorySerializer(serializers.ModelSerializer):
class Meta:
model = Category
fields = '__all__'
class ProductSerializer(serializers.ModelSerializer):
class Meta:
model = Product
fields = '__all__'
#fields = ('name', 'category', 'price', 'description', 'digital', 'image')
class OrderSerializer(serializers.ModelSerializer):
class Meta:
model = Order
fields = '__all__'
#fields = ('customer', 'emailAddress', 'date_ordered', 'complete', 'transaction_id', 'status')
class OrderItemSerializer(serializers.ModelSerializer):
class Meta:
model = Admin
fields = ('product', 'order', 'quantity', 'date_added')
class ShippingAddressSerializer(serializers.ModelSerializer):
class Meta:
model = ShippingAddress
fields = '__all__'
|
[
"saliali@bestpeers.com"
] |
saliali@bestpeers.com
|
aa528e237efb5d592e5fa08b8117e3c149b1e032
|
c1b8e586975804106602a3d13dacd60f9e5b329c
|
/old/manage.py
|
4ed1b9d9dc095f342b8ff13f023543865fee1468
|
[] |
no_license
|
mrweber2/abbwear-hs-app
|
9a54eb3c2a00e831b625777bdd289b2886dfef2f
|
a2b08ca48c141c44161d95eacbf65551d201a6be
|
refs/heads/master
| 2020-06-10T23:28:34.874743
| 2019-06-27T02:21:53
| 2019-06-27T02:21:53
| 193,789,648
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 541
|
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'abbwearHS.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
|
[
"matthew.weber@abbvie.com"
] |
matthew.weber@abbvie.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.