blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
281
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 6
116
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 313
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 18.2k
668M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
38.2k
| gha_license_id
stringclasses 17
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 107
values | src_encoding
stringclasses 20
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.02M
| extension
stringclasses 78
values | content
stringlengths 2
6.02M
| authors
listlengths 1
1
| author
stringlengths 0
175
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7018f257a6effb3e3a626e56384ae5a7ae0ba684
|
e7b4ddbf23fca6ff3929cca13614b378b4449f6c
|
/3rd_year/ex_2/dn16018_ex2_code.py
|
432ec1afbc07388f7fbc2084a6d79431c40b7a8c
|
[] |
no_license
|
dazzabaijan/py_comp_model
|
598479d70ca50ac99e2929398e899d0cc66f8881
|
2e61bf34f1aabc484c73d1e3dc6dd35897704f51
|
refs/heads/master
| 2021-09-17T14:27:28.293477
| 2021-08-17T12:55:25
| 2021-08-17T12:55:25
| 229,984,890
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 22,700
|
py
|
# -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pylab as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from copy import copy
from scipy.sparse import diags
from scipy.sparse.linalg import spsolve
from time import clock
import sys
"""Boundary conditions for second Physics problem"""
def hot_cold(m, l, u, F):
"""A boundary condition function for when rod is in a furnace one end and
in an ice bath for another.
Parameters:
m, l, u : ndarray(s) representing the main, lower and upper diagonal
elements of a tridiagonal matrix
F : a constant within a triadiagonal matrix
Returns:
m, l, u : The original ndarray with certain elements assigned to specific
values according to the initial condition.
"""
m[0], u[0] = 1, 0
l[-1], m[-1] = 0, 1
return m, l, u
def adiabatic(m, l, u, F):
"""A boundary condition function for when the poker is adiabatic at either
end.
Parameters:
m, l, u : ndarray(s) representing the main, lower and upper diagonal
elements of a tridiagonal matrix
F : a constant within a triadiagonal matrix
Returns:
m, l, u : The original ndarray with certain elements assigned to specific
values according to the initial condition.
"""
m[0], u[0] = 1, 0
l[-1] = -2*F
return m, l, u
def both_ice(m, l, u, F):
"""A boundary condition function for when the poker is in an ice bath at
both ends.
Parameters:
m, l, u : ndarray(s) representing the main, lower and upper diagonal
elements of a tridiagonal matrix
F : a constant within a triadiagonal matrix
Returns:
m, l, u : The original ndarray with certain elements assigned to specific
values according to the initial condition.
F : Not being returned since will be taken in within another function
"""
m[0], u[0] = 1, 0
l[-1], m[-1] = 0, 1
return m, l, u
def potential(x_axis, y_axis, cbar, x_label, y_label, cbar_label, int_pol,
colorMap, xmin=None, xmax=None, ymin=None, ymax=None, plot=None):
"""A general plotter allowing for the plotting of a heatmap(primarily used
here for potential function) which takes in relevant data about the
graph. It also allows for the option of overlaying either a quiver or a
streamline plot, or not!
Parameters:
x_axis, y_axis : The corresponding x and y axis data lists of a plot
cbar: The heatmap list data which usually corresponds to x and y axis
x_label, y_label, : The x, y and z label of the graph dtype = string
cbar_label : The colourbar label dtype = string
int_pol: The colour interpolation of the heatmap dtype = integer
colorMap: The style and colour of heatmap dtype = string
xmin, xmax: The minimum and maximum value of the x-axis
ymin, ymax: The minimum and maximum value of the y-axis
plot: "quiver", "stream" allowing for the overlay of quiver or streamline
plot
Returns:
Image : AxesImage
"""
plt.contourf(x_axis, y_axis, cbar, int_pol, cmap=colorMap)
cbar_tag = plt.colorbar()
Z = cbar
plt.xlabel(x_label)
plt.ylabel(y_label)
cbar_tag.set_label(cbar_label, rotation=270)
cbar_tag.set_clim(-1000.0, 1000.0)
E = np.gradient(Z)
E = E/np.sqrt(E[0]**2 + E[1]**2)
if plot is not None:
if plot == "quiver":
print("\nQuiver plot:")
plt.quiver(x_axis, y_axis, E[1], E[0])
if plot == "stream":
print("\nStreamline Plot:")
plt.streamplot(x_axis, y_axis, -E[1], -E[0], color='black')
if xmin is not None and xmax is not None:
plt.xlim(xmin, xmax)
if ymin is not None and ymax is not None:
plt.ylim(ymin, ymax)
plt.show()
def e_field(x_axis, y_axis, cbar, x_label, y_label, cbar_label, int_pol,
colorMap, xmin=None, xmax=None, ymin=None, ymax=None, plot=None):
"""A general plotter allowing for the plotting of a heatmap(primarily used
here for electric field) which takes in relevant data about the graph
It also allows for the option of overlaying either a quiver or a
streamline plot, or not!
Parameters:
x_axis, y_axis : The corresponding x and y axis data lists of a plot
cbar: The heatmap list data which usually corresponds to x and y axis
x_label, y_label, : The x, y and z label of the graph dtype = string
cbar_label : The colourbar label dtype = string
int_pol: The colour interpolation of the heatmap dtype = integer
colorMap: The style and colour of heatmap dtype = string
xmin, xmax: The minimum and maximum value of the x-axis dtype = int
ymin, ymax: The minimum and maximum value of the y-axis dtype = int
plot: "quiver", "stream" allowing for the overlay of quiver or streamline
plot
Returns:
Image : AxesImage
"""
a, d = np.gradient(cbar)
cbar2 = -a
plt.contourf(x_axis, y_axis, cbar2, int_pol, cmap=colorMap)
cbar2_tag = plt.colorbar()
plt.xlabel(x_label)
plt.ylabel(y_label)
cbar2_tag.set_label(cbar_label, rotation=270)
E = np.gradient(cbar)
E = E/np.sqrt(E[0]**2 + E[1]**2)
if plot is not None:
if plot == "quiver":
print("\nQuiver plot:")
plt.quiver(x_axis, y_axis, E[1], E[0])
if plot == "stream":
print("\nStreamline Plot:")
plt.streamplot(x_axis, y_axis, -E[1], -E[0], color='black')
if xmin is not None and xmax is not None:
plt.xlim(xmin, xmax)
if ymin is not None and ymax is not None:
plt.ylim(ymin, ymax)
plt.show()
def multiline(x_axis, y_axis, x_label, y_label, l_names, l_title, y_max,
loca=None, anchor_x=None, anchor_y=None):
"""Allows for ANY number of line to be plotted on the same graph, with the
ability to label every line.
Parameters:
x_axis, y_axis: Takes in lists of lists of x and y data points
x_label, y_label: The x and y label of the graph
l_names: Takes in lists of strings as the corresponding label of each line
l_title: Title of the legend dtype = string
y_max: Maximum value of the y axis dtype = float
loca: Location of the legend box
anchor_x, anchor_y: Coordinates for which the legend box is anchored
Returns:
Image : AxesImage
"""
l_labels = l_names*len(y_axis)
fig = plt.figure()
ax = fig.add_subplot(111)
for i, (y_axis, l_labels) in enumerate(zip(y_axis, l_labels)):
ax.plot(x_axis, y_axis, label=l_labels)
ax.legend(title=l_title, ncol=3)
if loca is not None and anchor_x is not None and anchor_y is not None:
ax.legend(title=l_title, bbox_to_anchor=(anchor_x, anchor_y), loc=loca,
ncol=3)
ax.set_xlabel(x_label)
ax.set_ylabel(y_label)
ax.set_ylim(0, y_max)
plt.show()
def capacitor_bc(v, V):
"""Sets the boundary condition for a parallel plate capacitor
Parameters:
V : ndarray of a grid, will only be called within gauss_solver or
jacobi_solver
v : Potential difference between plates
Returns:
Updated ndarray of the initial grid taken into account of BC
"""
V[16, 10:31] = -v # here V[y, x] because Python is weird
V[25, 10:31] = v
return V
def linecharge_bc(v, V):
"""Sets the boundary condition for a line charge
Parameters:
V : ndarray of a grid, will only be called within gauss_solver or
jacobi_solver
v : Potential difference of the line charge
Returns:
Updated ndarray of the initial grid taken into account of BC
"""
V[-1, 0:-1] = v
return V
def gauss_solver(a, d, h, v, BC):
"""An iterative solver for the capacitor, works for both Gauss-Seidel and
Jacobi methods. It allows for the input of different boundary conditions
which runs within the function.
Parameters:
a : Length of grid
d : Height of grid
h : Grid density
v : Potential difference
BC : A boundary condition function
Returns:
X, Y, Z : Lists of meshgrid coordinates and potential(V)
"""
M = np.zeros(shape=(int((a/h)+1), int((d/h)+1)))
max_iter = 20000
M1 = copy(M)
count = 0
while count < max_iter:
count += 1
M2 = copy(M1)
for i in range(1, (M.shape[0]-1)):
for j in range(1, (M.shape[1]-1)):
# top left
if i == j == 0:
M1[i, j] = 0.5*(M1[i+1, j]+M1[i, j+1])
# top edge no corners
elif i == 0 and j > 0 and j < (M.shape[1]-1):
M1[i, j] = (1/3)*(M1[i, j-1]+M1[i, j+1]+M1[i+1, j])
# top right
elif i == 0 and j == (M.shape[1]-1):
M1[i, j] = 0.5*(M1[i+1, j]+M1[i, j-1])
# right edge no corners
elif j == (M.shape[1]-1) and i > 0 and i < (M.shape[0]-1):
M1[i, j] = (1/3)*(M1[i-1, j]+M1[i+1, j]+M1[i, j-1])
# bot right
elif i == (M.shape[0]-1) and j == (M.shape[1]-1):
M1[i, j] = 0.5*(M1[i-1, j]+M1[i, j-1])
# bot edge
elif i == (M.shape[0]-1) and j > 0 and j < (M.shape[1]-1):
M1[i, j] = (1/3)*(M1[i, j-1]+M1[i, j+1]+M1[i-1, j])
# bot left no corners
elif i == (M.shape[0]-1) and j == 0:
M1[i, j] = 0.5*(M1[i-1, j]+M1[i, j+1])
# left edge
elif j == 0 and i > 0 and i < (M.shape[0]-1):
M1[i, j] = (1/3)*(M1[i-1, j]+M1[i+1, j]+M1[i, j+1])
else:
M1[i, j] = 0.25*(M1[i-1, j]+M1[i+1, j]+M1[i, j-1]+M1[i, j+1])
BC(v, M1)
if np.allclose(M1, M2, rtol=1e-3):
print("\nConvergence occurs after {} iterations.".format(count))
break
else:
sys.stdout.write("\r"+"Convergence did not happen before {} iterations.".format(count))
x = np.linspace(0, a, int(a/h)+1)
y = np.linspace(0, d, int(d/h)+1)
return x, y, M1
def jacobi_solver(a, d, h, v, BC):
"""An iterative solver for the capacitor, works for both Gauss-Seidel and
Jacobi methods. It allows for the input of different boundary conditions
which runs within the function.
Parameters:
a : Length of grid
d : Height of grid
h : Grid density
v : Potential difference
BC : A boundary condition function
Returns:
X, Y, Z : Lists of meshgrid coordinates and potential(V)
"""
M = np.zeros(shape=(int((a/h)+1), int((d/h)+1)))
max_iter = 20000
M1 = copy(M)
count = 0
while count < max_iter:
count += 1
M2 = copy(M1)
for i in range(1, (M.shape[0]-1)):
for j in range(1, (M.shape[1]-1)):
# top left
if i == j == 0:
M1[i, j] = 0.5*(M2[i+1, j]+M2[i, j+1])
# top edge no corners
elif i == 0 and j > 0 and j < (M.shape[1]-1):
M1[i, j] = (1/3)*(M2[i, j-1]+M2[i, j+1]+M2[i+1, j])
# top right
elif i == 0 and j == (M.shape[1]-1):
M1[i, j] = 0.5*(M2[i+1, j]+M2[i, j-1])
# right edge no corners
elif j == (M.shape[1]-1) and i > 0 and i < (M.shape[0]-1):
M1[i, j] = (1/3)*(M2[i-1, j]+M2[i+1, j]+M2[i, j-1])
# bot right
elif i == (M.shape[0]-1) and j == (M.shape[1]-1):
M1[i, j] = 0.5*(M2[i-1, j]+M2[i, j-1])
# bot edge no corners
elif i == (M.shape[0]-1) and j > 0 and j < (M.shape[1]-1):
M1[i, j] = (1/3)*(M2[i, j-1]+M1[i, j+1]+M2[i-1, j])
# bot left
elif i == (M.shape[0]-1) and j == 0:
M1[i, j] = 0.5*(M2[i-1, j]+M2[i, j+1])
# left edge no corners
elif j == 0 and i > 0 and i < (M.shape[0]-1):
M1[i, j] = (1/3)*(M2[i-1, j]+M2[i+1, j]+M2[i, j+1])
else:
M1[i, j] = 0.25*(M2[i-1, j]+M2[i+1, j]+M2[i, j-1]+M2[i, j+1])
BC(v, M1)
if np.allclose(M1, M2, rtol=1e-3):
print("\nConvergence occurs after {} iterations.".format(count))
break
else:
sys.stdout.write("\r"+"Convergence did not happen before {} iterations.".format(count))
x = np.linspace(0, a, int(a/h)+1)
y = np.linspace(0, d, int(d/h)+1)
return x, y, M1
def heat_eq(T, bc, temp_i, temp_f):
"""Solving the heat equation of a rod for a general boundary condition by
using the backwards-Euler method. Since the matrix is tridiagonal, a
sparse matrix is precomputed to save run time by not having to compute
the elements with 0 value.
Parameters:
T : The maximum run time for which dt is also calculated.
bc : A specific boundary condition that's suited for the situation
temp_i : The initial temperature of the start of the rod
temp_f : The initial temperature of the tail of the rod
Returns:
x : Length of the rod segmented up into points and stored in a list.
u : The temperature of the rod at time T.
"""
L, Nx, alpha = 0.5, 99, 59/(450*7900)
x = np.linspace(0, L, Nx+1)
t = np.linspace(0, T, Nx+1)
dx, dt = x[1]-x[0], t[1]-t[0]
u, u_n = np.zeros(Nx+1), np.zeros(Nx+1)
K = alpha*dt/(dx**2)
# Initiate sparse matrix and RHS solution of equation
main = np.zeros(Nx+1)
b = np.zeros(Nx+1)
lower, upper = np.zeros(Nx), np.zeros(Nx)
# Precompute sparse matrix
main[:] = 1 + 2*K
lower[:] = -K
upper[:] = -K
# Insert boundary conditions
main, lower, upper = bc(main, lower, upper, K)
A = diags(diagonals=[main, lower, upper], offsets=[0, -1, 1], shape=(Nx+1,
Nx+1), format='csr')
# print(A.todense()) # Check that A is correct
# Set initial condition
for i in range(0, Nx+1):
u_n[i] = 20
for n in range(0, T):
b = u_n
b[0] = temp_i # bc start of rod
b[-1] = temp_f # bc end of rod
u[:] = spsolve(A, b)
u_n[:] = u
return x, u
def wireframe(x_axis, y_axis, cbar, offset, rs, cs):
fig = plt.figure()
ax = Axes3D(fig)
ax.plot_wireframe(x_axis, y_axis, cbar, color='red', rstride=rs, cstride=cs,linewidth=0.5)
ax.set_xlabel("x(m)")
ax.set_ylabel("y(m)")
ax.set_zlabel("$\phi (x,y)$", rotation = 180)
plt.show()
def wire_anal(x, y, L, N):
series = 0
if N > 114:
for n in range(1, N):
series += 4000*np.sin(((2*n)-1)*np.pi*x/L)*np.exp(((2*n)-1)*np.pi*((y/L)-1))/(((2*n)-1)*np.pi)
else:
for n in range(1, N):
series += (4000/(((2*n)-1)*np.pi))*np.sin(((2*n)-1)*np.pi*x/L)*(np.sinh(((2*n)-1)*np.pi*y/L))/(np.sinh(np.pi*((2*n)-1)))
return series
def heat_anal(x, t, L, N):
k = 59/(450*7900)
T_0 = 20 # u(x,0) = 0, u(0,0) = 0
series = 0
for n in range(1, N):
series += (4*T_0/(((2*n)-1)*np.pi))*(np.sin((((2*n)-1)*x*np.pi)/L))*np.exp((-k*t*(np.pi*((2*n)-1)/L)**2))
return series
def choice_a():
"""Handles choice b in MainMenu()"""
t1 = clock()
X, Y, V = gauss_solver(50, 50, 1, 1000, linecharge_bc)
print("\n\nPotential")
potential(X, Y, V, "x(cm)", "y(cm)", "Potential(V)", 30, cm.jet, 0, 50, 0,
50, "stream")
t1 = clock() - t1
print("Took {}(s)".format(t1))
t1 = clock()
print("\n\nElectric field")
e_field(X, Y, V, "x(cm)", "y(cm)", "Electric field", 30, cm.jet, 0, 50, 0,
50, "stream")
t1 = clock() - t1
print("Took {}(s)".format(t1))
Y = 0.25
X = np.linspace(0, 50, 101)
u0 = wire_anal(X, Y, 50, 40000)
X, Y, V = gauss_solver(50, 50, 0.5, 1000, linecharge_bc)
print("\n\nDifference between analytical Fourier series and GS solution.")
multiline(X, [u0, V[26,:]], "y(cm)", "$\phi (x, y=0.25)$", ["Analytical",
"GS"], "Method", 60, 'upper center', 0.5, 1.1)
X = Y = np.linspace(0, 0.5, 101)
X, Y = np.meshgrid(X, Y)
Z = wire_anal(X, Y, 0.5, 21)
print("\n\nGibbs phenomenon")
wireframe(X, Y, Z, 0, 3, 3)
def choice_b():
"""Handles choice b in MainMenu()"""
t1 = clock()
print("\n\nPotential overlayed with quiver plot using Gauss-Seidel method")
X, Y, V = gauss_solver(40, 40, 1, 1000, capacitor_bc)
potential(X, Y, V, "x(cm)", "y(cm)", "Potential(V)", 30, cm.jet, 0, 40, 0,
40, "quiver")
t1 = clock() - t1
print("Took {}(s)".format(t1))
t2 = clock()
potential(X, Y, V, "x(cm)", "y(cm)", "Potential(V)", 25, cm.hot, 0, 40, 0,
40, "stream")
t2 = clock() - t2
print("Took {}(s)".format(t2))
t1 = clock()
print("\n\nElectric field overlayed with streamline plot using Gauss-Seidel method")
e_field(X, Y, V, "x(cm)", "y(cm)", "Electric field(V/m)", 30, cm.hot, 0,
40,0,40, "stream")
t1 = clock() - t1
print("Took {}(s)".format(t1))
t3 = clock()
print("\n\nPotential overlayed with quiver plot using Jacobi method")
X, Y, V = jacobi_solver(40, 40, 1, 1000, capacitor_bc)
potential(X, Y, V, "x(cm)", "y(cm)", "Potential(V)", 30, cm.jet, 0, 40, 0,
40, "quiver")
t3 = clock() - t3
print("Took {}(s)".format(t3))
t4 = clock()
print("\n\nElectric field overlayed with streamline plot using Jacobi method")
e_field(X, Y, V, "x(cm)", "y(cm)", "Electric field(V/m)", 25, cm.hot, 0,
40, 0, 40, "stream")
t4 = clock() - t4
print("Took {}(s)".format(t4))
def choice_d():
"""Handles choice d in MainMenu()"""
x, u0 = heat_eq(1, both_ice, 0, 0)
sols0 = heat_anal(x, 1, 0.5, 1000)
k0 = np.abs(sum(sols0-u0))/len(sols0)
x, u = heat_eq(50, both_ice, 0, 0)
sols = heat_anal(x, 50, 0.5, 1000)
k = np.abs(sum(sols-u))/len(sols)
x, u2 = heat_eq(150, both_ice, 0, 0)
sols2 = heat_anal(x, 150, 0.5, 1000)
k2 = np.abs(sum(sols2-u2))/len(sols2)
x, u3 = heat_eq(250, both_ice, 0, 0)
sols3 = heat_anal(x, 250, 0.5, 1000)
k3 = np.abs(sum(sols3-u3))/len(sols3)
x, u4 = heat_eq(350, both_ice, 0, 0)
sols4 = heat_anal(x, 350, 0.5, 1000)
k4 = np.abs(sum(sols4-u4))/len(sols4)
x, u5 = heat_eq(450, both_ice, 0, 0)
sols5 = heat_anal(x, 450, 0.5, 1000)
k5 = np.abs(sum(sols5-u5))/len(sols5)
x, u6 = heat_eq(550, both_ice, 0, 0)
sols6 = heat_anal(x, 550, 0.5, 1000)
k6 = np.abs(sum(sols6-u6))/len(sols6)
x, u7 = heat_eq(650, both_ice, 0, 0)
sols7 = heat_anal(x, 650, 0.5, 1000)
k7 = np.abs(sum(sols7-u7))/len(sols7)
x, u8 = heat_eq(750, both_ice, 0, 0)
sols8 = heat_anal(x, 750, 0.5, 1000)
k8 = np.abs(sum(sols8-u8))/len(sols8)
x, u9 = heat_eq(1000, both_ice, 0, 0)
sols9 = heat_anal(x, 1000, 0.5, 1000)
k9 = np.abs(sum(sols9-u9))/len(sols9)
x, u10 = heat_eq(1200, both_ice, 0, 0)
sols10 = heat_anal(x, 1200, 0.5, 1000)
k10 = np.abs(sum(sols10-u10))/len(sols10)
x, u11 = heat_eq(1400, both_ice, 0, 0)
sols11 = heat_anal(x, 1400, 0.5, 1000)
k11 = np.abs(sum(sols11-u11))/len(sols11)
multiline(x, [u0, u, u2, u3, u4, u5, u6, u7, u8], "Length(m)",
"Temperature($^{\circ}$C)", [1, 50, 150, 250, 350, 450, 550, 650,
750], "Time(s)", 21, 'upper center', 0.5, 1.1)
print("\n\nAbsolute error between analytical Fourier series solution and GS solution.")
a = [1, 50, 150, 250, 350, 450, 550, 650, 750, 1000, 1200, 1400]
b = [k0, k, k2, k3, k4, k5, k6, k7, k8, k9, k10, k11]
plt.plot(a, b, 'ro-')
plt.xlabel("Total Time(s)")
plt.ylabel("Absolute Error")
def choice_e():
"""Handles choice d in MainMenu()"""
x, u0 = heat_eq(1, adiabatic, 1000, 20)
x, u = heat_eq(50, adiabatic, 1000, 20)
x, u2 = heat_eq(150, adiabatic, 1000, 20)
x, u3 = heat_eq(350, adiabatic, 1000, 20)
x, u4 = heat_eq(750, adiabatic, 1000, 20)
x, u5 = heat_eq(4000, adiabatic, 1000, 20)
x, u6 = heat_eq(10000, adiabatic, 1000, 20)
x, u7 = heat_eq(20000, adiabatic, 1000, 20)
x, u8 = heat_eq(50000, adiabatic, 1000, 20)
multiline(x, [u0, u, u2, u3, u4, u5, u6, u7, u8], "Length(m)",
"Temperature(Degree Celsius)", [1, 50, 150, 350, 750, 4000,
"$1x10^{4}$", "$2x10^{4}$", "$5x10^{4}$"], "Time(s)", 1100,
'upper center', 0.5, 1.1)
def choice_f():
"""Handles choice d in MainMenu()"""
x, u0 = heat_eq(1, hot_cold, 1000, 0)
x, u = heat_eq(25, hot_cold, 1000, 0)
x, u2 = heat_eq(100, hot_cold, 1000, 0)
x, u3 = heat_eq(200, hot_cold, 1000, 0)
x, u4 = heat_eq(300, hot_cold, 1000, 0)
x, u5 = heat_eq(400, hot_cold, 1000, 0)
x, u6 = heat_eq(500, hot_cold, 1000, 0)
x, u7 = heat_eq(600, hot_cold, 1000, 0)
x, u8 = heat_eq(700, hot_cold, 1000, 0)
multiline(x, [u0, u, u2, u3, u4, u5, u6, u7, u8], "Length(m)",
"Temperature(Degree Celsius)", [1, 25, 100, 200, 300, 400, 500,
600, 700], "Time(s)", 1100)
def MainMenu():
choice = '0'
while choice != 'q':
print("\n%s\nData Analysis\n%s" % (13*'=', 13*'='))
print("(a)Solves Laplace's equation for a line charge.")
print("(b)Calculate the potential and electric field within and around",
"a parallel plate capacitor")
print("(c)Investigate field configuration as a/d becomes large.")
print("Temperature distribution plotted at different times:")
print("(d)Starting with ice at both ends of poker, and compared with",
"its analytical Fourier series solution.")
print("(e)with no heat loss from the far end of the poker")
print("(f)with far end of poker immersed in a block of ice at 0*C.")
print("(g)\n(q)")
choice = (input("Please enter your choice [a-q] : ").lower())
if choice == 'a':
choice_a()
elif choice == 'b':
choice_b()
elif choice == 'd':
choice_d()
elif choice == 'e':
choice_e()
elif choice == 'f':
choice_f()
elif choice != 'q':
print("Invalid choice. Please try again.")
MainMenu()
|
[
"dn16018@bristol.ac.uk"
] |
dn16018@bristol.ac.uk
|
5ced071bc4465f0c1a0fcf55338b9614fdb8f92c
|
661c86ff31d4e74ba2e7d868117e4be46f6500aa
|
/0148 Sort List.py
|
808ee7005e5a85f56f1561fe12139701180c2689
|
[] |
no_license
|
Desolve/LeetCode
|
0f6bf95ae20bc034c5dae6f51aed6d87f2c8a135
|
145dda3a75a748dc2509fdcbb55327a5cbc945f2
|
refs/heads/master
| 2021-06-03T14:37:10.390154
| 2020-07-30T15:31:12
| 2020-07-30T15:31:12
| 143,541,878
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,162
|
py
|
# Adapted from jeantimex
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def sortList(self, head: ListNode) -> ListNode:
if not head or not head.next: return head
prev, slow, fast = None, head, head
while fast and fast.next:
prev = slow
slow = slow.next
fast = fast.next.next
# Clear the first half's last node's next to NIL
prev.next = None
# n1 : head to prev, n2: slow to the last node
n1 = self.sortList(head)
n2 = self.sortList(slow)
return self.merge(n1, n2)
def merge(self, n1: ListNode, n2: ListNode) -> ListNode:
n = ListNode(0)
ite = n
while n1 and n2:
if n1.val < n2.val:
ite.next = n1
n1 = n1.next
else:
ite.next = n2
n2 = n2.next
ite = ite.next
if n1: ite.next = n1
if n2: ite.next = n2
return n.next
|
[
"fp60403@gmail.com"
] |
fp60403@gmail.com
|
ba99bdbe64af5381bc3553611178b6160490ccfd
|
c02a4f10cee910f48a52cfd08cf7a05902f284f1
|
/api/serializers/update_profile.py
|
b9ee6e8937eb87087e01389f2f4a28d450114a67
|
[] |
no_license
|
shiro102/canvas-gamification
|
6d984eae1a48465ea740fac42daeef64acc7de3b
|
7818889dc51520e03ad69176cf2ce6550b9948ba
|
refs/heads/master
| 2023-06-12T18:11:05.235574
| 2021-06-30T21:26:28
| 2021-06-30T21:26:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,144
|
py
|
from rest_framework.validators import UniqueValidator
from accounts.models import MyUser
from rest_framework import serializers
import api.error_messages as ERROR_MESSAGES
class UpdateProfileSerializer(serializers.ModelSerializer):
class Meta:
model = MyUser
fields = ['id', 'first_name', 'last_name', 'email']
email = serializers.EmailField(
required=True,
error_messages=ERROR_MESSAGES.EMAIL.ERROR_MESSAGES,
validators=[UniqueValidator(
queryset=MyUser.objects.all(),
message=ERROR_MESSAGES.EMAIL.UNIQUE,
)]
)
first_name = serializers.CharField(
required=True,
error_messages=ERROR_MESSAGES.FIRSTNAME.ERROR_MESSAGES,
)
last_name = serializers.CharField(
required=True,
error_messages=ERROR_MESSAGES.LASTNAME.ERROR_MESSAGES,
)
def create(self, validated_data):
user = self.context['request'].user
user.first_name = validated_data['first_name']
user.last_name = validated_data['last_name']
user.email = validated_data['email']
user.save()
return user
|
[
"noreply@github.com"
] |
noreply@github.com
|
0b0f9eb22c4e7ab366c091e291c1f8854f5f5aa4
|
8e257ec5a47699f6c76f558f98e1edd536159a18
|
/snalla/in-class/2014-09-29 JSON & XML/ModelForms sample solution/sio/views.py
|
3dce92dde0245c7e64348686140e269b84231631
|
[] |
no_license
|
nshikha/437
|
6e854f5c86123788c34965bc5d60ae9fa6f98b2d
|
35b984784b778815a2b4ffd95e7c1c89011b9a8f
|
refs/heads/master
| 2021-01-25T03:20:19.011146
| 2014-12-28T06:11:56
| 2014-12-28T06:11:56
| 28,555,189
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,248
|
py
|
from django.shortcuts import render
from django.db import transaction
from models import *
from forms import *
def make_view(request,
messages=[],
create_student_form=CreateStudentForm(),
create_course_form=CreateCourseForm(),
register_student_form=RegisterStudentForm()):
context = {
'courses':Course.objects.all(),
'messages':messages,
'create_student_form':create_student_form,
'create_course_form':create_course_form,
'register_student_form':register_student_form,
}
return render(request, 'sio.html', context)
def home(request):
return make_view(request, [])
@transaction.atomic
def create_student(request):
form = CreateStudentForm(request.POST)
if not form.is_valid():
return make_view(request, create_student_form=form)
new_student = Student(andrew_id=form.cleaned_data['andrew_id'],
first_name=form.cleaned_data['first_name'],
last_name=form.cleaned_data['last_name'])
new_student.save()
return make_view(request, ['Added %s'%new_student])
@transaction.atomic
def create_course(request):
form = CreateCourseForm(request.POST)
if not form.is_valid():
return make_view(request, create_course_form=form)
new_course = Course(course_number=request.POST['course_number'],
course_name=request.POST['course_name'],
instructor=request.POST['instructor'])
new_course.save()
return make_view(request, messages=['Added %s'%new_course])
@transaction.atomic
def register_student(request):
form = RegisterStudentForm(request.POST)
if not form.is_valid():
return make_view(request, register_student_form=form)
course = Course.objects.get(course_number=request.POST['course_number'])
student = Student.objects.get(andrew_id=request.POST['andrew_id'])
course.students.add(student)
course.save()
return make_view(request, messages=['Added %s to %s' % (student, course)])
# Complete this action to generate a JSON response containing all courses
def get_all_courses(request):
return None
|
[
"shikha@Shikhas-MacBook-Air.local"
] |
shikha@Shikhas-MacBook-Air.local
|
dda9f5d4466062d8ad277427e9721c6efad04a50
|
e9d52dcf101aea0327c6b0d7e5244c91dfd62cf6
|
/spexy/bases/regular.py
|
ee2e4fd35ec1af3c62bc446c89556cd8cd5295c7
|
[] |
no_license
|
drufat/spexy
|
6eba9f44a5539245486cd4ef8fefd24bdb7ade6a
|
53255009c1830501986afbf6688142ddefe17b9a
|
refs/heads/master
| 2021-09-18T19:51:47.313946
| 2018-07-19T05:09:02
| 2018-07-19T05:09:02
| 100,453,374
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,976
|
py
|
# Copyright (C) 2010-2016 Dzhelil S. Rufat. All Rights Reserved.
from spexy.bases import basesimp
class BasesImp(basesimp.BasesImp):
def module(self):
return 'spexy.bases.circular'
def numbers(self):
N = self.N
N0 = N + 1
N1 = N
N0d = N
N1d = N + 1
return (N0, N1), (N0d, N1d)
def cells_index(self):
half = self.imp.half
i0 = lambda n: (n,)
i1 = lambda n: (n, n + 1)
id0 = lambda n: (n + half,)
id1 = lambda n: (n - half, n + half)
return (i0, i1), (id0, id1)
def points(self, n):
N = self.N
return self.imp.points_regular_clamped(N, n)
def bases(self, correct=True):
imp = self.imp
N, half = imp.S(self.N), imp.half
def corr0(kappa):
# primal boundary vertex
if correct:
return lambda N, n, x: kappa(N, n, x) * imp.correction0(N, n)
return kappa
# Bases Functions
kappa0 = lambda n: lambda x: corr0(imp.kappa)(N, n, x)
kappa1 = lambda n: lambda x: imp.kappa_star(N, n + half, x)
kappad0 = lambda n: lambda x: imp.kappa(N, n + half, x)
kappad1 = lambda n: lambda x: imp.kappa_star(N, n, x)
# Gradients
kappa0.grad = lambda n: lambda x: corr0(imp.kappa_grad)(N, n, x)
kappad0.grad = lambda n: lambda x: imp.kappa_grad(N, n + half, x)
return (kappa0, kappa1), (kappad0, kappad1)
def boundary(self):
pi = self.imp.pi
return None, (0, pi)
def run_kappa():
"""
>>> from sympy.abc import x
>>> (kappa0, kappa1), (kappad0, kappad1) = BasesImp(2, 'sym').bases()
>>> kappa0(0)(x)
cos(x)/2 + cos(2*x)/4 + 1/4
>>> kappa0(1)(x)
-cos(2*x)/2 + 1/2
>>> kappa0(2)(x)
-cos(x)/2 + cos(2*x)/4 + 1/4
>>> kappa1(0)(x)
cos(x)/2 + 1/pi
>>> kappa1(1)(x)
-cos(x)/2 + 1/pi
>>> kappad0(0)(x)
sqrt(2)*cos(x)/2 + 1/2
>>> kappad0(1)(x)
-sqrt(2)*cos(x)/2 + 1/2
>>> kappad1(0)(x)
sqrt(2)*cos(x)/2 + cos(2*x)/2 + 1/pi
>>> kappad1(1)(x)
-cos(2*x)/2 + 1/pi
>>> kappad1(2)(x)
-sqrt(2)*cos(x)/2 + cos(2*x)/2 + 1/pi
"""
pass
def run(N):
"""
>>> run(1)
zero-form
[1, 0]
[0, 1]
one-form
[1]
dual zero-form
[1]
dual one-form
[1, 0]
[0, 1]
>>> run(2)
zero-form
[1, 0, 0]
[0, 1, 0]
[0, 0, 1]
one-form
[1, 0]
[0, 1]
dual zero-form
[1, 0]
[0, 1]
dual one-form
[1, 0, 0]
[0, 1, 0]
[0, 0, 1]
>>> run(3)
zero-form
[1, 0, 0, 0]
[0, 1, 0, 0]
[0, 0, 1, 0]
[0, 0, 0, 1]
one-form
[1, 0, 0]
[0, 1, 0]
[0, 0, 1]
dual zero-form
[1, 0, 0]
[0, 1, 0]
[0, 0, 1]
dual one-form
[1, 0, 0, 0]
[0, 1, 0, 0]
[0, 0, 1, 0]
[0, 0, 0, 1]
"""
from spexy.bases.symintegrals import run_integrals
run_integrals(BasesImp)(N)
|
[
"drufat@caltech.edu"
] |
drufat@caltech.edu
|
27fa882eea596627bf8a09a59032bd37ff44641b
|
1a64bbc7079bf2a916219821ddd8167e7fbeb37c
|
/nets/hourglass_segm.py
|
b47f8befd82aabf68b04872c493a848949458adb
|
[] |
no_license
|
shuaiqi361/PointCenterNet_project
|
697da0dfc60046e4a0331e45a37cfb64fa7d17c1
|
16aa6a9aaaf94b5f4ca3073ff7646004d57ec64c
|
refs/heads/master
| 2023-03-04T21:27:37.087211
| 2021-02-18T22:42:58
| 2021-02-18T22:42:58
| 292,664,025
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,024
|
py
|
import numpy as np
import torch
import torch.nn as nn
class convolution(nn.Module):
def __init__(self, k, inp_dim, out_dim, stride=1, with_bn=True):
super(convolution, self).__init__()
pad = (k - 1) // 2
self.conv = nn.Conv2d(inp_dim, out_dim, (k, k), padding=(pad, pad), stride=(stride, stride), bias=not with_bn)
self.bn = nn.BatchNorm2d(out_dim) if with_bn else nn.Sequential()
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
conv = self.conv(x)
bn = self.bn(conv)
relu = self.relu(bn)
return relu
class residual(nn.Module):
def __init__(self, k, inp_dim, out_dim, stride=1, with_bn=True):
super(residual, self).__init__()
self.conv1 = nn.Conv2d(inp_dim, out_dim, (3, 3), padding=(1, 1), stride=(stride, stride), bias=False)
self.bn1 = nn.BatchNorm2d(out_dim)
self.relu1 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(out_dim, out_dim, (3, 3), padding=(1, 1), bias=False)
self.bn2 = nn.BatchNorm2d(out_dim)
self.skip = nn.Sequential(nn.Conv2d(inp_dim, out_dim, (1, 1), stride=(stride, stride), bias=False),
nn.BatchNorm2d(out_dim)) \
if stride != 1 or inp_dim != out_dim else nn.Sequential()
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
conv1 = self.conv1(x)
bn1 = self.bn1(conv1)
relu1 = self.relu1(bn1)
conv2 = self.conv2(relu1)
bn2 = self.bn2(conv2)
skip = self.skip(x)
return self.relu(bn2 + skip)
# inp_dim -> out_dim -> ... -> out_dim
def make_layer(kernel_size, inp_dim, out_dim, modules, layer, stride=1):
layers = [layer(kernel_size, inp_dim, out_dim, stride=stride)]
layers += [layer(kernel_size, out_dim, out_dim) for _ in range(modules - 1)]
return nn.Sequential(*layers)
# inp_dim -> inp_dim -> ... -> inp_dim -> out_dim
def make_layer_revr(kernel_size, inp_dim, out_dim, modules, layer):
layers = [layer(kernel_size, inp_dim, inp_dim) for _ in range(modules - 1)]
layers.append(layer(kernel_size, inp_dim, out_dim))
return nn.Sequential(*layers)
# key point layer
def make_kp_layer(cnv_dim, curr_dim, out_dim):
return nn.Sequential(convolution(3, cnv_dim, curr_dim, with_bn=False),
nn.Conv2d(curr_dim, out_dim, (1, 1)))
class kp_module(nn.Module):
def __init__(self, n, dims, modules):
super(kp_module, self).__init__()
self.n = n
curr_modules = modules[0]
next_modules = modules[1]
curr_dim = dims[0]
next_dim = dims[1]
# curr_mod x residual,curr_dim -> curr_dim -> ... -> curr_dim
self.top = make_layer(3, curr_dim, curr_dim, curr_modules, layer=residual)
self.down = nn.Sequential()
# curr_mod x residual,curr_dim -> next_dim -> ... -> next_dim
self.low1 = make_layer(3, curr_dim, next_dim, curr_modules, layer=residual, stride=2)
# next_mod x residual,next_dim -> next_dim -> ... -> next_dim
if self.n > 1:
self.low2 = kp_module(n - 1, dims[1:], modules[1:])
else:
self.low2 = make_layer(3, next_dim, next_dim, next_modules, layer=residual)
# curr_mod x residual,next_dim -> next_dim -> ... -> next_dim -> curr_dim
self.low3 = make_layer_revr(3, next_dim, curr_dim, curr_modules, layer=residual)
self.up = nn.Upsample(scale_factor=2)
def forward(self, x):
up1 = self.top(x)
down = self.down(x)
low1 = self.low1(down)
low2 = self.low2(low1)
low3 = self.low3(low2)
up2 = self.up(low3)
return up1 + up2
class exkp(nn.Module):
def __init__(self, n, nstack, dims, modules, cnv_dim=256, num_classes=80):
super(exkp, self).__init__()
self.nstack = nstack
self.num_classes = num_classes
curr_dim = dims[0]
self.pre = nn.Sequential(convolution(7, 3, 128, stride=2),
residual(3, 128, curr_dim, stride=2))
self.kps = nn.ModuleList([kp_module(n, dims, modules) for _ in range(nstack)])
self.cnvs = nn.ModuleList([convolution(3, curr_dim, cnv_dim) for _ in range(nstack)])
self.inters = nn.ModuleList([residual(3, curr_dim, curr_dim) for _ in range(nstack - 1)])
self.inters_ = nn.ModuleList([nn.Sequential(nn.Conv2d(curr_dim, curr_dim, (1, 1), bias=False),
nn.BatchNorm2d(curr_dim))
for _ in range(nstack - 1)])
self.cnvs_ = nn.ModuleList([nn.Sequential(nn.Conv2d(cnv_dim, curr_dim, (1, 1), bias=False),
nn.BatchNorm2d(curr_dim))
for _ in range(nstack - 1)])
# heatmap layers
self.hmap = nn.ModuleList([make_kp_layer(cnv_dim, curr_dim, num_classes) for _ in range(nstack)])
for hmap in self.hmap:
hmap[-1].bias.data.fill_(-2.19)
# regression layers
self.regs = nn.ModuleList([make_kp_layer(cnv_dim, curr_dim, 2) for _ in range(nstack)])
self.w_h_ = nn.ModuleList([make_kp_layer(cnv_dim, curr_dim, 2) for _ in range(nstack)])
# codes layers
self.codes_ = nn.ModuleList([make_kp_layer(cnv_dim, curr_dim, 64) for _ in range(nstack)])
# for c in self.codes_:
# c[-1].bias.data.fill_(1.0157) # np.exp(1/64.), average sum of all components
self.relu = nn.ReLU(inplace=True)
def forward(self, image):
inter = self.pre(image)
outs = []
for ind in range(self.nstack):
kp = self.kps[ind](inter)
cnv = self.cnvs[ind](kp)
if self.training or ind == self.nstack - 1:
outs.append([self.hmap[ind](cnv), self.regs[ind](cnv), self.w_h_[ind](cnv), self.codes_[ind](cnv)])
if ind < self.nstack - 1:
inter = self.inters_[ind](inter) + self.cnvs_[ind](cnv)
inter = self.relu(inter)
inter = self.inters[ind](inter)
return outs
get_hourglass = \
{'large_hourglass':
exkp(n=5, nstack=2, dims=[256, 256, 384, 384, 384, 512], modules=[2, 2, 2, 2, 2, 4]),
'small_hourglass':
exkp(n=5, nstack=1, dims=[256, 256, 384, 384, 384, 512], modules=[2, 2, 2, 2, 2, 4])}
if __name__ == '__main__':
from collections import OrderedDict
from utils.utils import count_parameters, count_flops, load_model
def hook(self, input, output):
print(output.data.cpu().numpy().shape)
# pass
net = get_hourglass['large_hourglass']
load_model(net, '../ckpt/pretrain/checkpoint.t7')
count_parameters(net)
count_flops(net, input_size=512)
for m in net.modules():
if isinstance(m, nn.Conv2d):
m.register_forward_hook(hook)
with torch.no_grad():
y = net(torch.randn(2, 3, 512, 512).cuda())
# print(y.size())
|
[
"liukeyi1993@gmail.com"
] |
liukeyi1993@gmail.com
|
cc5f5e52e6123e32c7894ce9542d460cf539b8e2
|
2b28530b3108f41cfa24a0d589149272ce734cb1
|
/Python/POO/Práctica_Parcial.py
|
096b3c23a78258d394af3188226da16ad50cb80b
|
[] |
no_license
|
emMercado/TUP-Programacion1
|
e986a02ba07ab88a137803e8ce8750e09685a344
|
49cad7e4776b7f03e4ebd7afce831eaac8a8e104
|
refs/heads/main
| 2023-08-23T12:40:39.121779
| 2021-09-27T23:21:09
| 2021-09-27T23:21:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,625
|
py
|
import uuid
from random import randint
class Producto:
def __init__(self,descripcion,codigoBarras,precio,proveedor):
self.id = uuid.uuid4()
self.descripcion = descripcion
self.clave = randint(1,200)
self.codigoBarras = codigoBarras
self.precio = precio
self.proveedor = proveedor
def __str__(self):
return '{0}, {1}, {2}, {3}, {4}, {5}'.format(self.id,self.descripcion,self.clave,self.codigoBarras,self.precio,self.proveedor)
class Carrito:
def __init__(self):
self.listadoProductos = []
self.usuario = ""
def cargarProducto(self,prod,cant):
self.listadoProductos.append([prod,cant])
def mostrarProductos(self):
i = 1
for Producto in self.listadoProductos:
print(str(i) + " - " + str(Producto[0].descripcion) + "\n")
i=i+1
class ListaProductos:
def __init__(self):
self.listadoProductos = []
def cargarProducto(self,prod):
self.listadoProductos.append(prod)
def mostrarProductos(self):
i = 0
for Producto in self.listadoProductos:
print(str(i) + " - " + str(Producto.descripcion) + "\n")
i=i+1
# Manzana = Producto("Fruta",1231241231,120,"Moño Azul")
# Carrito1 = Carrito()
# Carrito1.cargarProducto(Manzana,2)
# print(Carrito1.listadoProductos[0][0].descripcion)
# print(Carrito1.listadoProductos[0][1])
# print(Carrito1.listadoProductos)
menu = '''### MENÚ ###
- 1 Agregar Producto
- 2 Agregar al Carrito
- 3 Salir'''
opcion = True
listadoProductosObjeto = ListaProductos()
carritoProductosObjeto = Carrito()
while opcion == True :
print(menu)
op = int (input("Ingrese una Opción\n"))
if op == 1:
descripcion = input("Descripcion\n")
codigoBarras = int (input("Codigo de Barras\n"))
precio = int (input("Precio\n"))
proveedor = input("Proveedor\n")
objetoTransitorio = Producto(descripcion, codigoBarras, precio, proveedor)
listadoProductosObjeto.cargarProducto(objetoTransitorio)
print("Se agrego el Producto",objetoTransitorio)
#listadoProductosObjeto(Producto(descripcion,codigoBarras,precio,proveedor))
elif op == 2:
listadoProductosObjeto.mostrarProductos()
indice = int (input("Ingrese el numero del producto\n"))
cantidad = int (input("cantidad\n"))
productoTransitorio = listadoProductosObjeto.listadoProductos[indice]
carritoProductosObjeto.cargarProducto(productoTransitorio,cantidad)
carritoProductosObjeto.mostrarProductos()
elif op == 3:
opcion=False
|
[
"graciajorge.sist@gmail.com"
] |
graciajorge.sist@gmail.com
|
833b2113b3ae2c9ad9deecfba486cc67eee08b41
|
21839bc2817a02d01180baff826b4ce5fe2789bd
|
/official/vision/beta/projects/yolo/modeling/backbones/darknet.py
|
5a76c7eefbc615657b563714da3e8a042c18257f
|
[
"Apache-2.0"
] |
permissive
|
TrellixVulnTeam/TF-OD-API_BICS
|
1240fbf7cfbed73fe8633870c4eb237289dbd899
|
22ac477ff4dfb93fe7a32c94b5f0b1e74330902b
|
refs/heads/main
| 2023-06-24T23:46:19.756540
| 2021-07-26T05:27:12
| 2021-07-26T05:27:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 22,207
|
py
|
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Contains definitions of Darknet Backbone Networks.
The models are inspired by ResNet, and CSPNet
Residual networks (ResNets) were proposed in:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
Cross Stage Partial networks (CSPNets) were proposed in:
[1] Chien-Yao Wang, Hong-Yuan Mark Liao, I-Hau Yeh, Yueh-Hua Wu, Ping-Yang Chen,
Jun-Wei Hsieh
CSPNet: A New Backbone that can Enhance Learning Capability of CNN.
arXiv:1911.11929
Darknets are used mainly for object detection in:
[1] Joseph Redmon, Ali Farhadi
YOLOv3: An Incremental Improvement. arXiv:1804.02767
[2] Alexey Bochkovskiy, Chien-Yao Wang, Hong-Yuan Mark Liao
YOLOv4: Optimal Speed and Accuracy of Object Detection. arXiv:2004.10934
"""
import collections
import tensorflow as tf
from official.modeling import hyperparams
from official.vision.beta.modeling.backbones import factory
from official.vision.beta.projects.yolo.modeling.layers import nn_blocks
class BlockConfig:
"""Class to store layer config to make code more readable."""
def __init__(self, layer, stack, reps, bottleneck, filters, pool_size,
kernel_size, strides, padding, activation, route, dilation_rate,
output_name, is_output):
"""Initializing method for BlockConfig.
Args:
layer: A `str` for layer name.
stack: A `str` for the type of layer ordering to use for this specific
level.
reps: An `int` for the number of times to repeat block.
bottleneck: A `bool` for whether this stack has a bottle neck layer.
filters: An `int` for the output depth of the level.
pool_size: An `int` for the pool_size of max pool layers.
kernel_size: An `int` for convolution kernel size.
strides: A `Union[int, tuple]` that indicates convolution strides.
padding: An `int` for the padding to apply to layers in this stack.
activation: A `str` for the activation to use for this stack.
route: An `int` for the level to route from to get the next input.
dilation_rate: An `int` for the scale used in dialated Darknet.
output_name: A `str` for the name to use for this output.
is_output: A `bool` for whether this layer is an output in the default
model.
"""
self.layer = layer
self.stack = stack
self.repetitions = reps
self.bottleneck = bottleneck
self.filters = filters
self.kernel_size = kernel_size
self.pool_size = pool_size
self.strides = strides
self.padding = padding
self.activation = activation
self.route = route
self.dilation_rate = dilation_rate
self.output_name = output_name
self.is_output = is_output
def build_block_specs(config):
specs = []
for layer in config:
specs.append(BlockConfig(*layer))
return specs
class LayerBuilder:
"""Layer builder class.
Class for quick look up of default layers used by darknet to
connect, introduce or exit a level. Used in place of an if condition
or switch to make adding new layers easier and to reduce redundant code.
"""
def __init__(self):
self._layer_dict = {
'ConvBN': (nn_blocks.ConvBN, self.conv_bn_config_todict),
'MaxPool': (tf.keras.layers.MaxPool2D, self.maxpool_config_todict)
}
def conv_bn_config_todict(self, config, kwargs):
dictvals = {
'filters': config.filters,
'kernel_size': config.kernel_size,
'strides': config.strides,
'padding': config.padding
}
dictvals.update(kwargs)
return dictvals
def darktiny_config_todict(self, config, kwargs):
dictvals = {'filters': config.filters, 'strides': config.strides}
dictvals.update(kwargs)
return dictvals
def maxpool_config_todict(self, config, kwargs):
return {
'pool_size': config.pool_size,
'strides': config.strides,
'padding': config.padding,
'name': kwargs['name']
}
def __call__(self, config, kwargs):
layer, get_param_dict = self._layer_dict[config.layer]
param_dict = get_param_dict(config, kwargs)
return layer(**param_dict)
# model configs
LISTNAMES = [
'default_layer_name', 'level_type', 'number_of_layers_in_level',
'bottleneck', 'filters', 'kernal_size', 'pool_size', 'strides', 'padding',
'default_activation', 'route', 'dilation', 'level/name', 'is_output'
]
CSPDARKNET53 = {
'list_names':
LISTNAMES,
'splits': {
'backbone_split': 106,
'neck_split': 132
},
'backbone': [
[
'ConvBN', None, 1, False, 32, None, 3, 1, 'same', 'mish', -1, 1, 0,
False
],
[
'DarkRes', 'csp', 1, True, 64, None, None, None, None, 'mish', -1,
1, 1, False
],
[
'DarkRes', 'csp', 2, False, 128, None, None, None, None, 'mish', -1,
1, 2, False
],
[
'DarkRes', 'csp', 8, False, 256, None, None, None, None, 'mish', -1,
1, 3, True
],
[
'DarkRes', 'csp', 8, False, 512, None, None, None, None, 'mish', -1,
2, 4, True
],
[
'DarkRes', 'csp', 4, False, 1024, None, None, None, None, 'mish',
-1, 4, 5, True
],
]
}
CSPADARKNET53 = {
'list_names':
LISTNAMES,
'splits': {
'backbone_split': 100,
'neck_split': 135
},
'backbone': [
[
'ConvBN', None, 1, False, 32, None, 3, 1, 'same', 'mish', -1, 1, 0,
False
],
[
'DarkRes', 'residual', 1, True, 64, None, None, None, None, 'mish',
-1, 1, 1, False
],
[
'DarkRes', 'csp', 2, False, 128, None, None, None, None, 'mish', -1,
1, 2, False
],
[
'DarkRes', 'csp', 8, False, 256, None, None, None, None, 'mish', -1,
1, 3, True
],
[
'DarkRes', 'csp', 8, False, 512, None, None, None, None, 'mish', -1,
2, 4, True
],
[
'DarkRes', 'csp', 4, False, 1024, None, None, None, None, 'mish',
-1, 4, 5, True
],
]
}
LARGECSP53 = {
'list_names':
LISTNAMES,
'splits': {
'backbone_split': 100,
'neck_split': 135
},
'backbone': [
[
'ConvBN', None, 1, False, 32, None, 3, 1, 'same', 'mish', -1, 1, 0,
False
],
[
'DarkRes', 'csp', 1, True, 64, None, None, None, None, 'mish', -1,
1, 1, False
],
[
'DarkRes', 'csp', 3, False, 128, None, None, None, None, 'mish', -1,
1, 2, False
],
[
'DarkRes', 'csp', 15, False, 256, None, None, None, None, 'mish',
-1, 1, 3, True
],
[
'DarkRes', 'csp', 15, False, 512, None, None, None, None, 'mish',
-1, 2, 4, True
],
[
'DarkRes', 'csp', 7, False, 1024, None, None, None, None, 'mish',
-1, 4, 5, True
],
[
'DarkRes', 'csp', 7, False, 1024, None, None, None, None, 'mish',
-1, 8, 6, True
],
[
'DarkRes', 'csp', 7, False, 1024, None, None, None, None, 'mish',
-1, 16, 7, True
],
]
}
DARKNET53 = {
'list_names':
LISTNAMES,
'splits': {
'backbone_split': 76
},
'backbone': [
[
'ConvBN', None, 1, False, 32, None, 3, 1, 'same', 'leaky', -1, 1, 0,
False
],
[
'DarkRes', 'residual', 1, True, 64, None, None, None, None, 'leaky',
-1, 1, 1, False
],
[
'DarkRes', 'residual', 2, False, 128, None, None, None, None,
'leaky', -1, 1, 2, False
],
[
'DarkRes', 'residual', 8, False, 256, None, None, None, None,
'leaky', -1, 1, 3, True
],
[
'DarkRes', 'residual', 8, False, 512, None, None, None, None,
'leaky', -1, 2, 4, True
],
[
'DarkRes', 'residual', 4, False, 1024, None, None, None, None,
'leaky', -1, 4, 5, True
],
]
}
CSPDARKNETTINY = {
'list_names':
LISTNAMES,
'splits': {
'backbone_split': 28
},
'backbone': [
[
'ConvBN', None, 1, False, 32, None, 3, 2, 'same', 'leaky', -1, 1, 0,
False
],
[
'ConvBN', None, 1, False, 64, None, 3, 2, 'same', 'leaky', -1, 1, 1,
False
],
[
'CSPTiny', 'csp_tiny', 1, False, 64, None, 3, 2, 'same', 'leaky',
-1, 1, 2, False
],
[
'CSPTiny', 'csp_tiny', 1, False, 128, None, 3, 2, 'same', 'leaky',
-1, 1, 3, False
],
[
'CSPTiny', 'csp_tiny', 1, False, 256, None, 3, 2, 'same', 'leaky',
-1, 1, 4, True
],
[
'ConvBN', None, 1, False, 512, None, 3, 1, 'same', 'leaky', -1, 1,
5, True
],
]
}
DARKNETTINY = {
'list_names':
LISTNAMES,
'splits': {
'backbone_split': 14
},
'backbone': [
[
'ConvBN', None, 1, False, 16, None, 3, 1, 'same', 'leaky', -1, 1, 0,
False
],
[
'DarkTiny', 'tiny', 1, True, 32, None, 3, 2, 'same', 'leaky', -1, 1,
1, False
],
[
'DarkTiny', 'tiny', 1, True, 64, None, 3, 2, 'same', 'leaky', -1, 1,
2, False
],
[
'DarkTiny', 'tiny', 1, False, 128, None, 3, 2, 'same', 'leaky', -1,
1, 3, False
],
[
'DarkTiny', 'tiny', 1, False, 256, None, 3, 2, 'same', 'leaky', -1,
1, 4, True
],
[
'DarkTiny', 'tiny', 1, False, 512, None, 3, 2, 'same', 'leaky', -1,
1, 5, False
],
[
'DarkTiny', 'tiny', 1, False, 1024, None, 3, 1, 'same', 'leaky', -1,
1, 5, True
],
]
}
BACKBONES = {
'darknettiny': DARKNETTINY,
'darknet53': DARKNET53,
'cspdarknet53': CSPDARKNET53,
'altered_cspdarknet53': CSPADARKNET53,
'cspdarknettiny': CSPDARKNETTINY,
'csp-large': LARGECSP53,
}
@tf.keras.utils.register_keras_serializable(package='yolo')
class Darknet(tf.keras.Model):
"""The Darknet backbone architecture."""
def __init__(
self,
model_id='darknet53',
input_specs=tf.keras.layers.InputSpec(shape=[None, None, None, 3]),
min_level=None,
max_level=5,
width_scale=1.0,
depth_scale=1.0,
csp_level_mod=(),
activation=None,
use_sync_bn=False,
norm_momentum=0.99,
norm_epsilon=0.001,
dilate=False,
kernel_initializer='glorot_uniform',
kernel_regularizer=None,
bias_regularizer=None,
**kwargs):
layer_specs, splits = Darknet.get_model_config(model_id)
self._model_name = model_id
self._splits = splits
self._input_shape = input_specs
self._registry = LayerBuilder()
# default layer look up
self._min_size = min_level
self._max_size = max_level
self._output_specs = None
self._csp_level_mod = set(csp_level_mod)
self._kernel_initializer = kernel_initializer
self._bias_regularizer = bias_regularizer
self._norm_momentum = norm_momentum
self._norm_epislon = norm_epsilon
self._use_sync_bn = use_sync_bn
self._activation = activation
self._kernel_regularizer = kernel_regularizer
self._dilate = dilate
self._width_scale = width_scale
self._depth_scale = depth_scale
self._default_dict = {
'kernel_initializer': self._kernel_initializer,
'kernel_regularizer': self._kernel_regularizer,
'bias_regularizer': self._bias_regularizer,
'norm_momentum': self._norm_momentum,
'norm_epsilon': self._norm_epislon,
'use_sync_bn': self._use_sync_bn,
'activation': self._activation,
'dilation_rate': 1,
'name': None
}
inputs = tf.keras.layers.Input(shape=self._input_shape.shape[1:])
output = self._build_struct(layer_specs, inputs)
super().__init__(inputs=inputs, outputs=output, name=self._model_name)
@property
def input_specs(self):
return self._input_shape
@property
def output_specs(self):
return self._output_specs
@property
def splits(self):
return self._splits
def _build_struct(self, net, inputs):
endpoints = collections.OrderedDict()
stack_outputs = [inputs]
for i, config in enumerate(net):
if config.output_name > self._max_size:
break
if config.output_name in self._csp_level_mod:
config.stack = 'residual'
config.filters = int(config.filters * self._width_scale)
config.repetitions = int(config.repetitions * self._depth_scale)
if config.stack is None:
x = self._build_block(
stack_outputs[config.route], config, name=f'{config.layer}_{i}')
stack_outputs.append(x)
elif config.stack == 'residual':
x = self._residual_stack(
stack_outputs[config.route], config, name=f'{config.layer}_{i}')
stack_outputs.append(x)
elif config.stack == 'csp':
x = self._csp_stack(
stack_outputs[config.route], config, name=f'{config.layer}_{i}')
stack_outputs.append(x)
elif config.stack == 'csp_tiny':
x_pass, x = self._csp_tiny_stack(
stack_outputs[config.route], config, name=f'{config.layer}_{i}')
stack_outputs.append(x_pass)
elif config.stack == 'tiny':
x = self._tiny_stack(
stack_outputs[config.route], config, name=f'{config.layer}_{i}')
stack_outputs.append(x)
if (config.is_output and self._min_size is None):
endpoints[str(config.output_name)] = x
elif (self._min_size is not None and
config.output_name >= self._min_size and
config.output_name <= self._max_size):
endpoints[str(config.output_name)] = x
self._output_specs = {l: endpoints[l].get_shape() for l in endpoints.keys()}
return endpoints
def _get_activation(self, activation):
if self._activation is None:
return activation
return self._activation
def _csp_stack(self, inputs, config, name):
if config.bottleneck:
csp_filter_scale = 1
residual_filter_scale = 2
scale_filters = 1
else:
csp_filter_scale = 2
residual_filter_scale = 1
scale_filters = 2
self._default_dict['activation'] = self._get_activation(config.activation)
self._default_dict['name'] = f'{name}_csp_down'
if self._dilate:
self._default_dict['dilation_rate'] = config.dilation_rate
else:
self._default_dict['dilation_rate'] = 1
# swap/add dilation
x, x_route = nn_blocks.CSPRoute(
filters=config.filters,
filter_scale=csp_filter_scale,
downsample=True,
**self._default_dict)(
inputs)
dilated_reps = config.repetitions - self._default_dict['dilation_rate'] // 2
for i in range(dilated_reps):
self._default_dict['name'] = f'{name}_{i}'
x = nn_blocks.DarkResidual(
filters=config.filters // scale_filters,
filter_scale=residual_filter_scale,
**self._default_dict)(
x)
for i in range(dilated_reps, config.repetitions):
self._default_dict[
'dilation_rate'] = self._default_dict['dilation_rate'] // 2
self._default_dict[
'name'] = f"{name}_{i}_degridded_{self._default_dict['dilation_rate']}"
x = nn_blocks.DarkResidual(
filters=config.filters // scale_filters,
filter_scale=residual_filter_scale,
**self._default_dict)(
x)
self._default_dict['name'] = f'{name}_csp_connect'
output = nn_blocks.CSPConnect(
filters=config.filters,
filter_scale=csp_filter_scale,
**self._default_dict)([x, x_route])
self._default_dict['activation'] = self._activation
self._default_dict['name'] = None
return output
def _csp_tiny_stack(self, inputs, config, name):
self._default_dict['activation'] = self._get_activation(config.activation)
self._default_dict['name'] = f'{name}_csp_tiny'
x, x_route = nn_blocks.CSPTiny(
filters=config.filters, **self._default_dict)(
inputs)
self._default_dict['activation'] = self._activation
self._default_dict['name'] = None
return x, x_route
def _tiny_stack(self, inputs, config, name):
x = tf.keras.layers.MaxPool2D(
pool_size=2,
strides=config.strides,
padding='same',
data_format=None,
name=f'{name}_tiny/pool')(
inputs)
self._default_dict['activation'] = self._get_activation(config.activation)
self._default_dict['name'] = f'{name}_tiny/conv'
x = nn_blocks.ConvBN(
filters=config.filters,
kernel_size=(3, 3),
strides=(1, 1),
padding='same',
**self._default_dict)(
x)
self._default_dict['activation'] = self._activation
self._default_dict['name'] = None
return x
def _residual_stack(self, inputs, config, name):
self._default_dict['activation'] = self._get_activation(config.activation)
self._default_dict['name'] = f'{name}_residual_down'
if self._dilate:
self._default_dict['dilation_rate'] = config.dilation_rate
if config.repetitions < 8:
config.repetitions += 2
else:
self._default_dict['dilation_rate'] = 1
x = nn_blocks.DarkResidual(
filters=config.filters, downsample=True, **self._default_dict)(
inputs)
dilated_reps = config.repetitions - (
self._default_dict['dilation_rate'] // 2) - 1
for i in range(dilated_reps):
self._default_dict['name'] = f'{name}_{i}'
x = nn_blocks.DarkResidual(
filters=config.filters, **self._default_dict)(
x)
for i in range(dilated_reps, config.repetitions - 1):
self._default_dict[
'dilation_rate'] = self._default_dict['dilation_rate'] // 2
self._default_dict[
'name'] = f"{name}_{i}_degridded_{self._default_dict['dilation_rate']}"
x = nn_blocks.DarkResidual(
filters=config.filters, **self._default_dict)(
x)
self._default_dict['activation'] = self._activation
self._default_dict['name'] = None
self._default_dict['dilation_rate'] = 1
return x
def _build_block(self, inputs, config, name):
x = inputs
i = 0
self._default_dict['activation'] = self._get_activation(config.activation)
while i < config.repetitions:
self._default_dict['name'] = f'{name}_{i}'
layer = self._registry(config, self._default_dict)
x = layer(x)
i += 1
self._default_dict['activation'] = self._activation
self._default_dict['name'] = None
return x
@staticmethod
def get_model_config(name):
name = name.lower()
backbone = BACKBONES[name]['backbone']
splits = BACKBONES[name]['splits']
return build_block_specs(backbone), splits
@property
def model_id(self):
return self._model_name
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
def get_config(self):
layer_config = {
'model_id': self._model_name,
'min_level': self._min_size,
'max_level': self._max_size,
'kernel_initializer': self._kernel_initializer,
'kernel_regularizer': self._kernel_regularizer,
'bias_regularizer': self._bias_regularizer,
'norm_momentum': self._norm_momentum,
'norm_epsilon': self._norm_epislon,
'use_sync_bn': self._use_sync_bn,
'activation': self._activation,
}
return layer_config
@factory.register_backbone_builder('darknet')
def build_darknet(
input_specs: tf.keras.layers.InputSpec,
backbone_config: hyperparams.Config,
norm_activation_config: hyperparams.Config,
l2_regularizer: tf.keras.regularizers.Regularizer = None) -> tf.keras.Model:
"""Builds darknet."""
backbone_cfg = backbone_config.get()
model = Darknet(
model_id=backbone_cfg.model_id,
min_level=backbone_cfg.min_level,
max_level=backbone_cfg.max_level,
input_specs=input_specs,
dilate=backbone_cfg.dilate,
width_scale=backbone_cfg.width_scale,
depth_scale=backbone_cfg.depth_scale,
activation=norm_activation_config.activation,
use_sync_bn=norm_activation_config.use_sync_bn,
norm_momentum=norm_activation_config.norm_momentum,
norm_epsilon=norm_activation_config.norm_epsilon,
kernel_regularizer=l2_regularizer)
model.summary()
return model
|
[
"hjkim@multiiq.com"
] |
hjkim@multiiq.com
|
3a986721bc36d1ad9af86b993cfa8c6f23662c94
|
3839caecee5d5e237055b9968598a58dc1007885
|
/user/urls.py
|
eb6f4f2b815f21c06cb9aaf8ff832249fdbc7af6
|
[] |
no_license
|
hwangseonu/Yazamoon
|
64869878e32a9366782c0f080c49e2f320720fb3
|
3defce29ff4ce09f3aae592c2d72afc6858172cc
|
refs/heads/master
| 2021-06-12T00:32:14.042680
| 2021-05-09T11:08:52
| 2021-05-09T11:08:52
| 134,995,863
| 0
| 0
| null | 2021-05-09T11:08:53
| 2018-05-26T21:29:29
|
Python
|
UTF-8
|
Python
| false
| false
| 245
|
py
|
from django.urls import path
from .views import *
urlpatterns = [
path('login', LoginView.as_view(), name='login'),
path('logout', LogoutView.as_view(), name='logout'),
path('register', RegisterView.as_view(), name='register'),
]
|
[
"hwangseonu12@naver.com"
] |
hwangseonu12@naver.com
|
82e1baea688932692b1e89c3d367c760d949b168
|
683b4174a86d1aea5fb1fec53e27dfca43169356
|
/MPI/02.mpi_bcast.py
|
6c3664c44fe5952efd8c08da6ac0743acf94d457
|
[] |
no_license
|
naufalhilmiaji/pds-mpi-thread
|
4be2ce0225973aa60cfe8a64fe519859a9f1bcc9
|
8e1bcbb8bc9f82fff39295528527c0be706264c0
|
refs/heads/master
| 2022-04-24T11:27:16.096416
| 2020-04-28T23:25:28
| 2020-04-28T23:25:28
| 259,584,358
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 438
|
py
|
# import mpi4py
from mpi4py import MPI
# buat COMM
comm = MPI.COMM_WORLD
# dapatkan rank proses
rank = comm.Get_rank()
# dapatkan total proses berjalan
size = comm.Get_size()
pesan = 'HALO!'
data = comm.bcast(pesan, root=0)
# jika saya rank 0 maka saya akan melakukan broadscast
if rank == 0:
print('Broadcast data:', data+'\n')
# jika saya bukan rank 0 maka saya menerima pesan
else:
print('Received data: "'+ data+'"')
|
[
"noreply@github.com"
] |
noreply@github.com
|
e34bfa5f3d2bc242c5e1c36d1f5abb5517de2055
|
29e881915cb0132035e223dd2b97e0cbf44c2d12
|
/alluvian/commands/wizard/users.py
|
c076295c3b637b748c6e12830c8eb493e410d2d3
|
[
"MIT"
] |
permissive
|
wrparker/alluvian-engine
|
37701d542e4485131fd178ded281c2af44f2b264
|
70a6227af3e977ecda2fc4a1752dd4703f206778
|
refs/heads/master
| 2023-06-21T22:56:11.323860
| 2020-09-20T21:45:57
| 2020-09-20T21:45:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 891
|
py
|
from alluvian.commands.mud_command import MudCommand
import alluvian.globals as globs
from players.level import Level
from beautifultable import BeautifulTable
class Users(MudCommand):
key = 'users'
aliases = []
level = Level.IMPL
def execute(self):
table = BeautifulTable()
table.columns.header = ['Num', 'Name', 'IPAddr']
table.set_style(BeautifulTable.STYLE_COMPACT)
table.columns.alignment = BeautifulTable.ALIGN_LEFT
for pid, pl in globs.sessions.items():
try:
table.rows.append([pid, pl.player.name, globs.mud.get_player_ip(pid)])
except AttributeError:
table.rows.append([pid, 'Unidentified', globs.mud.get_player_ip(pid)])
msg = f'{table}\r\n\r\n'
msg += f'{len(table.rows)} visible Sockets connected'
self.msg(msg)
|
[
"rparker@indeed.com"
] |
rparker@indeed.com
|
7ad1e1493d3ea5d4a3b73bb9d7556fba745c4024
|
11ea97128214750b403a150edfab70dc6207d970
|
/ia32doc/processors/c_processor.py
|
9652e5799613a984a0bf9827d571a769fdb01d7c
|
[
"MIT"
] |
permissive
|
fengjixuchui/ia32-doc
|
5dc8a18dcc8fdbcb083433d3ed4aa8b3f3d35005
|
2336ac5d0cd8475656d8dbb56940d9b17d07295a
|
refs/heads/master
| 2022-05-17T02:00:49.877215
| 2022-04-16T10:34:36
| 2022-04-16T10:34:36
| 167,521,447
| 0
| 0
|
MIT
| 2022-04-16T10:34:37
| 2019-01-25T09:28:59
|
C
|
UTF-8
|
Python
| false
| false
| 25,075
|
py
|
from typing import Union, Tuple
from .base import DocProcessor
from ..doc import DocBase, DocGroup, DocDefinition, DocEnum, DocEnumField,\
DocBitfield, DocBitfieldField, DocStruct, DocStructField
from ..doc import DOC_DEFINITION, DOC_STRUCT, DOC_BITFIELD, DOC_STRUCT_FIELD, DOC_ENUM_FIELD
from ..text import DocText
class DocCProcessor(DocProcessor):
def __init__(self):
super().__init__()
#
# Nesting level of union/struct typedefs.
#
self._typedef_nesting = 0
#
# Bitfield position of last bitfield field.
#
self._bitfield_position = None
#
# Number of "Reserved" bitfield fields in current bitfield.
#
self._bitfield_reserved_count = None
#
# Make C++ code.
#
self._output_cpp = False
def process_group(self, doc: DocGroup) -> None:
if self.opt.group_comments and doc.long_description or self.opt.group_defgroup:
self.print(f'/**')
self.print_details(doc)
if self.opt.group_defgroup:
self.print(f' * @{{')
self.print(f' */')
self.process(doc.fields)
if self.opt.group_defgroup:
self.print(f'/**')
self.print(f' * @}}')
self.print(f' */')
self.print(f'')
def process_definition(self, doc: DocDefinition) -> None:
#
# We need to set "override_name_letter_case" explicitly,
# because this method is also shared for DocEnumField.
#
name = self.make_name(doc, override_name_letter_case=self.opt.definition_name_letter_case)
if self.opt.definition_comments and doc.long_description:
#
# Do not print empty line for the first element.
#
if next(filter(lambda field: isinstance(field, DocDefinition), doc.parent.fields)) != doc:
self.print(f'')
self.print(f'/**')
self.print_details(doc)
self.print(f' */')
align = self.opt.align if self.opt.definition_no_indent else \
self.align_indent_adjusted
value = f'0x{doc.value:08X}' if isinstance(doc.value, int) else \
f'{doc.value}'
self.print(f'#define {name:<{align}} {value}')
self.process(doc.fields)
def process_enum(self, doc: DocEnum) -> None:
if self.opt.enum_as_define:
if self.opt.enum_comments and doc.long_description:
self.print(f'/**')
self.print_details(doc)
#
# Create defgroup for this group of definitions (enum).
#
if self.opt.group_defgroup:
self.print(f' * @{{')
self.print(f' */')
for field in doc.fields:
assert field.type in [ DOC_DEFINITION, DOC_ENUM_FIELD ]
definition_field: DocDefinition = field
#
# DocDefinition and DocEnumField has the same interface,
# so it can be hacked this way.
#
self.process_definition(definition_field)
if self.opt.group_defgroup:
self.print(f'/**')
self.print(f' * @}}')
self.print(f' */')
else:
self._typedef_nesting += 1
if self.opt.enum_comments and doc.long_description:
self.print(f'/**')
self.print_details(doc, treat_description_as_short=True)
self.print(f' */')
optional_curly_brace = ' {' if not self.opt.brace_on_next_line else ''
optional_typedef = ''
optional_name_begin = ''
optional_name_end = ''
if self._output_cpp:
optional_name_begin = f' {self.make_name(doc)}'
else:
optional_typedef = 'typedef ' if self._typedef_nesting == 1 else ''
optional_name_end = f' {self.make_name(doc)}'
self.print(f'{optional_typedef}enum{optional_name_begin}{optional_curly_brace}')
if self.opt.brace_on_next_line:
self.print(f'{{')
with self.indent:
for field in doc.fields:
assert field.type in [ DOC_DEFINITION, DOC_ENUM_FIELD ]
getattr(self, f'process_{field.type}')(field)
if self._typedef_nesting == 1:
self.print(f'}}{optional_name_end};')
else:
name = self.make_name(
doc,
standalone=True,
override_name_letter_case=self.opt.enum_field_name_letter_case
)
self.print(f'}} {name};')
self._typedef_nesting -= 1
self.print(f'')
def process_enum_field(self, doc: DocEnumField) -> None:
name = self.make_name(doc)
if self.opt.enum_field_comments and doc.long_description:
#
# Do not print empty line for the first element.
#
if next(filter(lambda field: isinstance(field, DocEnumField), doc.parent.fields)) != doc:
self.print(f'')
self.print(f'/**')
self.print_details(doc)
self.print(f' */')
value = f'0x{doc.value:08X}'
self.print(f'{name:<{self.opt.align}} = {value},')
self.process(doc.fields)
def process_struct(self, doc: DocStruct) -> None:
self._typedef_nesting += 1
if self.opt.struct_comments and doc.long_description:
self.print(f'/**')
self.print_details(doc, treat_description_as_short=True)
self.print(f' */')
if doc.tag == 'Packed':
self.print(f'#pragma pack(push, 1)')
has_name = doc.short_name or doc.long_name
optional_curly_brace = ' {' if not self.opt.brace_on_next_line else ''
optional_typedef = ''
optional_name_begin = ''
optional_name_end = ''
if self._output_cpp:
optional_name_begin = f' {self.make_name(doc)}'
else:
optional_typedef = 'typedef ' if self._typedef_nesting == 1 else ''
optional_name_end = f' {self.make_name(doc)}'
self.print(f'{optional_typedef}struct{optional_name_begin}{optional_curly_brace}')
if self.opt.brace_on_next_line:
self.print(f'{{')
with self.indent:
for field in doc.fields:
assert field.type in [ DOC_STRUCT, DOC_BITFIELD, DOC_STRUCT_FIELD ]
if isinstance(field, DocBitfield) and not self.opt.bitfield_create_struct:
self.print(f'{self.make_size_type(field.size)[0]} {self.make_name(field, standalone=True)};')
else:
getattr(self, f'process_{field.type}')(field)
if self._typedef_nesting == 1:
assert has_name
self.print(f'}}{optional_name_end};')
else:
if has_name:
name = self.make_name(
doc,
standalone=True,
override_name_letter_case=self.opt.struct_field_name_letter_case
)
self.print(f'}} {name};')
else:
self.print(f'}};')
if doc.tag == 'Packed':
self.print(f'#pragma pack(pop)')
self.print(f'')
self._typedef_nesting -= 1
def process_struct_field(self, doc: DocStructField) -> None:
if self.opt.struct_field_comments and doc.long_description:
#
# Do not print empty line for the first element.
#
if next(iter(doc.parent.fields)) != doc:
self.print(f'')
self.print(f'/**')
self.print_details(doc)
self.print(f' */')
size_type, size_type_array = self.make_size_type(doc.size)
self.print(f'{size_type} {self.make_name(doc)}{size_type_array};')
if doc.fields:
self.print(f'')
self.process(doc.fields)
def process_bitfield(self, doc: DocBitfield) -> None:
if self.opt.bitfield_create_struct:
self._typedef_nesting += 1
if self.opt.bitfield_comments and doc.long_description:
self.print(f'/**')
self.print_details(doc, treat_description_as_short=True)
self.print(f' */')
has_name = doc.short_name or doc.long_name
optional_curly_brace = ' {' if not self.opt.brace_on_next_line else ''
optional_typedef = ''
optional_name_begin = ''
optional_name_end = ''
#
# Bitfields at root level MUST have name.
#
if self._typedef_nesting == 1:
assert has_name
if self._output_cpp:
optional_name_begin = f' {self.make_name(doc)}'
else:
optional_typedef = 'typedef ' if self._typedef_nesting == 1 else ''
optional_name_end = f' {self.make_name(doc)}'
#
# Create union (only for named bitfields).
#
if has_name:
self.print(f'{optional_typedef}union{optional_name_begin}{optional_curly_brace}')
if self.opt.brace_on_next_line:
self.print(f'{{')
else:
#
# If the bitfield is unnamed, do not double-indent the struct.
#
self.indent.indent_next = 0
with self.indent:
self.print(f'struct{optional_curly_brace}')
if self.opt.brace_on_next_line:
self.print(f'{{')
with self.indent:
assert self._bitfield_position is None
assert self._bitfield_reserved_count is None
self._bitfield_position = 0
self._bitfield_reserved_count = 0
last_field = None
for field in doc.fields:
if isinstance(field, DocBitfieldField):
self.process_bitfield_field(field)
last_field = field
#
# Check if we have to create last "Reserved" field.
#
last_bit_from, last_bit_to = last_field.bit
if last_bit_to < doc.size and self.opt.bitfield_field_fill_with_reserved:
self._bitfield_reserved_count += 1
bit_length = doc.size - self._bitfield_position
long_name = f'{self.opt.bitfield_field_reserved_prefix}{self._bitfield_reserved_count}'
self.print(
f'{self.make_size_type(doc.size)[0]} {long_name:<{self.align_indent_adjusted}}: '
f'{bit_length};'
)
self._bitfield_position = None
self._bitfield_reserved_count = None
self.print(f'}};')
#
# Print "Flags" member (only for named bitfields).
#
if has_name:
self.print(f'')
self.print(f'{self.make_size_type(doc.size)[0]} {self.opt.bitfield_field_flags_name};')
#
# End of the union (only for named bitfields).
#
if has_name:
if self._typedef_nesting == 1:
self.print(f'}}{optional_name_end};')
else:
name = self.make_name(
doc,
standalone=True,
override_name_letter_case=self.opt.bitfield_field_name_letter_case
)
self.print(f'}} {name};')
self._typedef_nesting -= 1
else:
#
# Do not create unions.
#
for field in doc.fields:
if isinstance(field, DocBitfieldField):
self.process_bitfield_field(field)
self.print(f'')
def process_bitfield_field(self, doc: DocBitfieldField) -> None:
bit_from, bit_to = doc.bit
if self.opt.bitfield_create_struct:
#
# Handle "Reserved" fields.
#
if bit_from > self._bitfield_position:
self._bitfield_reserved_count += 1
bit_length = bit_from - self._bitfield_position
long_name = f'{self.opt.bitfield_field_reserved_prefix}{self._bitfield_reserved_count}'
self.print(
f'{self.make_size_type(doc.parent.size)[0]} {long_name:<{self.align_indent_adjusted}}: '
f'{bit_length};'
)
self._bitfield_position = bit_from
#
# Print bit-field.
#
bit_length = bit_to - self._bitfield_position
if self.opt.bitfield_field_comments and doc.long_description:
if self._bitfield_position > 0:
self.print(f'')
self.print(f'/**')
self.print_details(doc)
self.print(f' */')
self.print(
f'{self.make_size_type(doc.parent.size)[0]} {self.make_name(doc):<{self.align_indent_adjusted}}: '
f'{bit_length};'
)
#
# Print definitions for fields.
#
bit_shift = bit_to - bit_from
#
# Print definitions only for NAMED bitfields.
#
if doc.parent.short_name or doc.parent.long_name:
bitfield_field_with_define_any = any([
self.opt.bitfield_field_with_define_bit,
self.opt.bitfield_field_with_define_flag,
self.opt.bitfield_field_with_define_mask,
self.opt.bitfield_field_with_define_get
])
if bitfield_field_with_define_any:
part1 = self.make_name(doc.parent, override_name_letter_case=self.opt.definition_name_letter_case)
part2 = self.make_name(doc, override_name_letter_case=self.opt.definition_name_letter_case)
align = self.opt.align if self.opt.definition_no_indent else \
self.align_indent_adjusted
#
# !!! INCREDIBLY UGLY HACK !!!
# Remove _REGISTER suffix.
#
if 'name_with_suffix' in doc.parent._doc:
part1 = part1[0:(len(part1) - len(doc.parent._doc['name_with_suffix']) - 1)]
if self.opt.bitfield_field_with_define_bit:
definition = f'{part1}_{part2}{self.opt.bitfield_field_with_define_bit_suffix}'
self.print(f'#define {definition:<{align}} {bit_from}')
if self.opt.bitfield_field_with_define_flag:
definition = f'{part1}_{part2}{self.opt.bitfield_field_with_define_flag_suffix}'
self.print(f'#define {definition:<{align}} 0x{(((1 << bit_shift) - 1) << bit_from):02X}')
if self.opt.bitfield_field_with_define_mask:
definition = f'{part1}_{part2}{self.opt.bitfield_field_with_define_mask_suffix}'
self.print(f'#define {definition:<{align}} 0x{((1 << bit_shift) - 1):02X}')
if self.opt.bitfield_field_with_define_get:
definition = f'{part1}_{part2}({self.opt.bitfield_field_with_define_get_macro_argument_name})'
self.print(
f'#define {definition:<{align}} '
f'((({self.opt.bitfield_field_with_define_get_macro_argument_name}) >> {bit_from}) & '
f'0x{((1 << bit_shift) - 1):02X})'
)
self._bitfield_position = bit_to
self.process(doc.fields)
def process_struct(self, doc: DocStruct) -> None:
self._typedef_nesting += 1
if self.opt.struct_comments and doc.long_description:
self.print(f'/**')
self.print_details(doc, treat_description_as_short=True)
self.print(f' */')
if doc.tag == 'Packed':
self.print(f'#pragma pack(push, 1)')
has_name = doc.short_name or doc.long_name
optional_curly_brace = ' {' if not self.opt.brace_on_next_line else ''
optional_typedef = 'typedef ' if self._typedef_nesting == 1 else ''
self.print(f'{optional_typedef}struct{optional_curly_brace}')
if self.opt.brace_on_next_line:
self.print(f'{{')
with self.indent:
for field in doc.fields:
assert field.type in [ DOC_DEFINITION, DOC_STRUCT, DOC_BITFIELD, DOC_STRUCT_FIELD ]
if isinstance(field, DocBitfield) and not self.opt.bitfield_create_struct:
self.print(f'{self.make_size_type(field.size)[0]} {self.make_name(field, standalone=True)};')
else:
getattr(self, f'process_{field.type}')(field)
if self._typedef_nesting == 1:
assert has_name
self.print(f'}} {self.make_name(doc)};')
else:
if has_name:
name = self.make_name(
doc,
standalone=True,
override_name_letter_case=self.opt.struct_field_name_letter_case
)
self.print(f'}} {name};')
else:
self.print(f'}};')
if doc.tag == 'Packed':
self.print(f'#pragma pack(pop)')
self.print(f'')
self._typedef_nesting -= 1
def process_struct_field(self, doc: DocStructField) -> None:
if self.opt.struct_field_comments and doc.long_description:
#
# Do not print empty line for the first element.
#
if next(iter(doc.parent.fields)) != doc:
self.print(f'')
self.print(f'/**')
self.print_details(doc)
self.print(f' */')
size_type, size_type_array = self.make_size_type(doc.size)
self.print(f'{size_type} {self.make_name(doc)}{size_type_array};')
if doc.fields:
self.print(f'')
self.process(doc.fields)
# #
# ================================================================================================================ #
# #
def print_details(self, doc: DocBase, treat_description_as_short=False) -> None:
#
# Handle group-related comments.
# ------------------------------
#
print_defgroup = False
if isinstance(doc, DocGroup):
print_defgroup = self.opt.group_defgroup
if print_defgroup and doc.short_name:
group_id = self.make_name(doc, long=False, raw=True)
group_name = self.make_multiline_comment(doc.short_description, ' ')
self.print(f' * @defgroup {group_id} \\')
self.print(f' * {group_name}')
#
# Handle short/long descriptions.
# -------------------------------
#
print_short_description = getattr(self.opt, f'{doc.type}_short_description') and doc.short_description
print_long_description = getattr(self.opt, f'{doc.type}_long_description') and doc.long_description
print_access = getattr(self.opt, f'{doc.type}_access') and doc.access
if doc.short_description_raw == doc.long_description_raw:
if treat_description_as_short:
print_long_description = False
else:
print_short_description = False
#
# Do not print @brief when @defgroup has been printed.
#
if print_short_description and not print_defgroup:
access = f' <b>({doc.access})</b>' if print_access else ''
short_description = self.make_multiline_comment(doc.short_description, '@brief ')
self.print(f' * {short_description}{access}')
if print_long_description:
#
# Delimit short_description and long_description with empty line.
# Delimit @defgroup and long_description with empty line.
#
if print_short_description or print_defgroup:
self.print(f' *')
if isinstance(doc, DocBitfieldField) and self.opt.bitfield_field_long_description_with_bit_range:
bit_from, bit_to = doc.bit
bit_to -= 1
bit = f'[Bit {bit_from}] ' if bit_from == bit_to else \
f'[Bits {bit_to}:{bit_from}] '
long_description = self.make_multiline_comment(f'{bit}{doc.long_description}')
else:
long_description = self.make_multiline_comment(doc.long_description)
self.print(f' * {long_description}')
#
# Handle detailed comments.
# -------------------------
#
print_note = getattr(self.opt, f'{doc.type}_note') and doc.note
print_remarks = getattr(self.opt, f'{doc.type}_remarks') and doc.remarks
print_see = getattr(self.opt, f'{doc.type}_see') and doc.see
print_reference = getattr(self.opt, f'{doc.type}_reference') and doc.reference
#
# Delimit description (above) from details with empty line.
#
if any([print_note, print_remarks, print_see, print_reference]):
self.print(f' *')
if print_note:
note = self.make_multiline_comment(doc.note, '@note ')
self.print(f' * {note}')
if print_remarks:
remarks = self.make_multiline_comment(doc.remarks, '@remarks ')
self.print(f' * {remarks}')
#
# see and reference can be either list (of strings) or string.
#
if print_see:
if isinstance(doc.see, list):
for see in doc.see:
self.print(f' * @see {see}')
else:
self.print(f' * @see {doc.see}')
if print_reference:
if isinstance(doc.reference, list):
for reference in doc.reference:
self.print(f' * @see {reference} (reference)')
else:
self.print(f' * @see {doc.reference} (reference)')
def make_name(self, doc: DocBase,
long: bool=None, raw: bool=False, standalone: bool=False,
override_name_letter_case: str=None) -> str:
assert not (raw and standalone) # invalid combination
if long is None:
long = getattr(self.opt, f'{doc.type}_prefer_long_names')
if getattr(self.opt, f'{doc.type}_prefer_alternative_names') and doc.alternative_name:
result = doc.alternative_name
else:
if raw:
result = doc.long_name_raw if long else doc.short_name_raw
elif standalone:
result = doc.long_name_standalone if long else doc.short_name_standalone
else:
result = doc.long_name if long else doc.short_name
if override_name_letter_case is not None:
letter_case = override_name_letter_case
else:
letter_case = getattr(self.opt, f'{doc.type}_name_letter_case')
return DocText.convert_case(result, letter_case)
def make_size_type(self, size) -> Union[str, Tuple[str, str]]:
try:
if size in [ 8, 16, 32, 64 ]:
return getattr(self.opt, f'int_type_{size}'), ''
elif size % 8 == 0:
size_in_bytes = size // 8
return self.opt.int_type_8, f'[{size_in_bytes}]'
else:
raise Exception('Cannot represent size as type')
except:
import sys
print(size, file=sys.stderr)
raise
@staticmethod
def make_multiline_comment(text: str, prefix: str='', indent: int=1) -> str:
#
# Compute indent from the prefix (if provided).
#
if prefix:
indent = len(prefix) + 1 # + 1 space ' * {...}'
# ~~~~~~~~~~~~ ^ here
text = prefix + text
indent_text = ' ' * indent
lines = DocText.wrap(text, 120)
result = lines[0]
if len(lines) > 1:
result += f'\n'
result += f'\n'.join([ f' *{indent_text}{line}' for line in lines[1:] ])
return result
@property
def align_indent_adjusted(self) -> int:
return max(0, self.opt.align - self.indent.indent)
|
[
"w.benny@outlook.com"
] |
w.benny@outlook.com
|
61c08f3149c26a08411c2cf3f2177844c8a9a0c1
|
f76e1898476d05ab3007a9b08c9737f131a838c5
|
/projeto_banco/sqllite.py
|
438814e15550e3ee741bf3637361d60af1f2f897
|
[] |
no_license
|
bbnsdevelop/python_3_estudos
|
8f9f6ac51c751914bd48cd464a844691e13e23c8
|
d299177bf9ad0cb37577576b02776d31d768f9c9
|
refs/heads/main
| 2023-07-12T06:53:29.747184
| 2021-08-23T20:03:03
| 2021-08-23T20:03:03
| 335,139,653
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,708
|
py
|
#!/usr/bin/python3
# coding: utf-8
from sqlite3 import connect, ProgrammingError, Row
tabela_grupo = """
CREATE TABLE IF NOT EXISTS grupos (
id INTEGER PRIMARY KEY AUTOINCREMENT,
descricao VARCHAR(30)
)
"""
tabela_contatos = """
CREATE TABLE IF NOT EXISTS contatos (
id INTEGER PRIMARY KEY AUTOINCREMENT,
nome VARCHAR(50),
tel VARCHAR(40),
grupo_id INTEGER,
FOREIGN KEY (grupo_id) REFERENCES grupos(id)
)
"""
insert_grupos = 'INSERT INTO grupos (descricao) VALUES (?)'
select_grupos = 'SELECT id, descricao FROM grupos'
insert_contatos = 'INSERT INTO contatos (nome, tel, grupo_id) VALUES (?, ?, ?)'
select = """
SELECT
grupos.descricao AS grupo,
contatos.nome AS contato
FROM contatos
INNER JOIN grupos ON contatos.grupo_id = grupos.id
ORDER BY grupo, contato
"""
try:
conexao = connect(':memory:')
conexao.row_factory = Row
cursor = conexao.cursor()
cursor.execute(tabela_grupo)
cursor.execute(tabela_contatos)
cursor.executemany(insert_grupos, (('Casa',), ('Trabalho',)))
cursor.execute(select_grupos)
grupos = {row['descricao']: row['id'] for row in cursor.fetchall()}
contatos = (
('Arthur', '456', grupos['Casa']),
('Paulo', '789', grupos['Casa']),
('Ângelo', '000', grupos['Trabalho']),
('Eduardo', '987', None),
('Yuri', '654', grupos['Casa']),
('Leonardo', '321', grupos['Casa']),
)
cursor.executemany(insert_contatos, contatos)
cursor.execute(select)
for contato in cursor:
print(contato['contato'], contato['grupo'])
except ProgrammingError as e:
print(f'Erro: {e.msg}')
|
[
"bbnsdevelop@gmail.com"
] |
bbnsdevelop@gmail.com
|
d443767901ae0397302c5d783a2ee3b265d80304
|
0f0e724b73f1998bac96bea2c581271388192a2a
|
/Data_Analyst_basics/test.py
|
053d6897f7e065c0d6c9013cc6777830d2fdf503
|
[] |
no_license
|
hkmangla/Data-Analyst-Nanodegree
|
572dad5e1493e0e50edb6e0f9fd6a02c0b78dbba
|
04f17a19c7005f0cc59fb5cae92ae1b585faa72e
|
refs/heads/master
| 2020-12-24T20:14:52.906927
| 2016-09-30T18:06:55
| 2016-09-30T18:06:55
| 58,628,693
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,687
|
py
|
import unicodecsv
from datetime import datetime as dt
from collections import defaultdict
import numpy as np
with open('enrollments.csv', 'rb') as f:
reader = unicodecsv.DictReader(f)
enrollments = list(reader)
with open('daily_engagement.csv', 'rb') as f:
reader = unicodecsv.DictReader(f)
daily_engagement = list(reader)
with open('project_submissions.csv', 'rb') as f:
reader = unicodecsv.DictReader(f)
project_submissions = list(reader)
def parse_date(date):
if date == '':
return None
else:
return dt.strptime(date, '%Y-%m-%d')
def parse_maybe_int(i):
if i == '':
return None
else:
return int(i)
for enrollment in enrollments:
enrollment['cancel_date'] = parse_date(enrollment['cancel_date'])
enrollment['days_to_cancel'] = parse_maybe_int(enrollment['days_to_cancel'])
enrollment['is_canceled'] = enrollment['is_canceled'] == 'True'
enrollment['is_udacity'] = enrollment['is_udacity'] == 'True'
enrollment['join_date'] = parse_date(enrollment['join_date'])
for engagement_record in daily_engagement:
engagement_record['lessons_completed'] = int(float(engagement_record['lessons_completed']))
engagement_record['num_courses_visited'] = int(float(engagement_record['num_courses_visited']))
engagement_record['projects_completed'] = int(float(engagement_record['projects_completed']))
engagement_record['total_minutes_visited'] = float(engagement_record['total_minutes_visited'])
engagement_record['utc_date'] = parse_date(engagement_record['utc_date'])
for submission in project_submissions:
submission['completion_date'] = parse_date(submission['completion_date'])
submission['creation_date'] = parse_date(submission['creation_date'])
def check_id(enrollments):
unique_id = []
enrollment_num_unique_students = 0
for i in enrollments:
if i['account_key'] not in unique_id:
unique_id.append(i['account_key'])
enrollment_num_unique_students += 1
return unique_id
enrollment_num_rows = len(enrollments)
engagement_num_rows = len(daily_engagement)
submission_num_rows = len(project_submissions)
enrollment_num_unique_students = len(check_id(enrollments))
engagement_num_unique_students = 0
unique_id = []
for i in daily_engagement:
if i['acct'] not in unique_id:
unique_id.append(i['acct'])
engagement_num_unique_students += 1
i['account_key'] = i['acct']
del i['acct']
submission_num_unique_students = len(check_id(project_submissions))
v = 0
y = unique_id
x = check_id(enrollments)
for item in enrollments:
iyt = item['account_key']
if iyt not in y and item['join_date'] != item['cancel_date']:
v += 1
udacity_test_account = set()
for enrollment in enrollments:
if enrollment['is_udacity']:
udacity_test_account.add(enrollment['account_key'])
def remove_udacity_account(data):
non_udacity = []
for datalist in data:
if datalist['account_key'] not in udacity_test_account:
non_udacity.append(datalist)
return non_udacity
non_udacity_enrollments = remove_udacity_account(enrollments)
non_udacity_engagements = remove_udacity_account(daily_engagement)
non_udacity_submissions = remove_udacity_account(project_submissions)
paid_students = {}
for enrollment in non_udacity_enrollments:
if not enrollment['is_canceled'] or enrollment['days_to_cancel'] > 7:
if enrollment['account_key'] not in paid_students or enrollment['join_date'] > paid_students[enrollment['account_key']]:
paid_students[enrollment['account_key']] = enrollment['join_date']
def within_one_week(join_date, engagement_date):
time_delta = engagement_date - join_date
return time_delta.days < 7 and time_delta.days >=0
paid_engagement_in_first_week = []
for data in daily_engagement:
if data['account_key'] in paid_students.keys():
if within_one_week(paid_students[data['account_key']],data['utc_date']):
paid_engagement_in_first_week.append(data)
def group_by_account(data,key):
enagaged_by_account = defaultdict(list)
for engaged in data:
account_key = engaged[key]
enagaged_by_account[account_key].append(engaged)
return enagaged_by_account
enagaged_by_account = group_by_account(paid_engagement_in_first_week,'account_key')
def total_num_of_by_account(enagaged_by_account,key):
total_minutes_by_account = {}
for account_key,engaged_record in enagaged_by_account.items():
total_minutes = 0
for i in engaged_record:
total_minutes += i[key]
total_minutes_by_account[account_key] = total_minutes
return total_minutes_by_account
total_minutes_by_account = total_num_of_by_account(enagaged_by_account,'total_minutes_visited')
tatal_lesson_completed_by_account = total_num_of_by_account(enagaged_by_account,'lessons_completed')
# total_num_of_day_visited = total_num_of_by_account(enagaged_by_account,'has_visited')
subway_project_lesson_keys = ['746169184', '3176718735']
passing_engagement = []
non_passing_engagement = []
passing_project_keys = set()
for submission in non_udacity_submissions:
if submission['lesson_key'] in subway_project_lesson_keys:
if submission['assigned_rating'] == 'PASSED' or submission['assigned_rating'] == 'DISTINCTION':
passing_project_keys.add(submission['account_key'])
# print len(passing_project_keys)
for engaged in paid_engagement_in_first_week:
if engaged['account_key'] in passing_project_keys:
passing_engagement.append(engaged)
else:
non_passing_engagement.append(engaged)
# print len(passing_engagement)
# print len(non_passing_engagement)
passing_engagement_by_account = group_by_account(passing_engagement,'account_key')
non_passing_engagement_by_account = group_by_account(non_passing_engagement,'account_key')
total_minutes_in_passing_engagement = total_num_of_by_account(passing_engagement_by_account,'total_minutes_visited')
total_minutes_in_non_passing_engagement = total_num_of_by_account(non_passing_engagement_by_account,'total_minutes_visited')
# total_minutes = total_minutes_in_passing_engagement.values()
# print np.mean(total_minutes)
# total_minutes = total_minutes_in_non_passing_engagement.values()
# print np.mean(total_minutes)
# total_minutes = total_minutes_by_account.values()
# max_minutes = np.max(total_minutes)
# for account_key,engaged_record in enagaged_by_account.items():
# if total_minutes_by_account[account_key] == max_minutes:
# print engaged_record
# print len(paid_engagement_in_first_week)
# print daily_engagement[0]['account_key']
# print enrollment_num_rows
# print engagement_num_rows
# print submission_num_rows
# print enrollment_num_unique_students
# print engagement_num_unique_students
# print submission_num_unique_students
# print enrollments[0]
# print daily_engagement[0]
# print project_submissions[0]
|
[
"hemantmangla78@gmail.com"
] |
hemantmangla78@gmail.com
|
83622cad6e919ac9bd61c4be424b88df3b873a66
|
658e16b3004ebae7520b1780a9b9c61f7b1a3b0c
|
/Scripts/tests/test_custom_loss_in_rl.py
|
f9a203329e5c95ff728580457d09926637cf0dba
|
[] |
no_license
|
flyinskybtx/CNP-BC
|
8298ae912520ebbac0d317d9d9781e48cb79df8b
|
0219121f914d00108362459432a83d156e8ccdb2
|
refs/heads/master
| 2023-06-09T04:47:22.937304
| 2023-05-26T10:05:15
| 2023-05-26T10:05:15
| 289,280,452
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,898
|
py
|
import random
import string
import numpy as np
import ray
from ray.rllib import rollout
from ray.rllib.agents import pg
from ray.rllib.models import ModelCatalog
from ray.tune import register_env
from tqdm import tqdm
from Envs.custom_cartpole_v1 import CustomCartPole
from Models.policy_model import PolicyFCModel
if __name__ == '__main__':
ray.shutdown(True)
ray.init(num_gpus=1, )
register_env('CustomCartPole-v1', lambda config: CustomCartPole(config))
# Model Free
model_name = ''.join(random.choices(string.ascii_uppercase + string.digits, k=10))
ModelCatalog.register_custom_model(model_name, PolicyFCModel)
config_rl = {
"train_batch_size": 200,
'num_workers': 0,
'log_level': 'INFO',
'framework': 'tf',
'env_config': {
'masscart': 1.0,
'masspole': 0.1,
'length': np.random.uniform(0.5, 1),
'force_mag': 10,
},
'model': {
'custom_model': model_name,
"custom_model_config": {
'hiddens': [32, 32, 16],
'offline_dataset': 'offline/cartpole/Cem_MPC'
},
},
}
results = {'mf': [], 'mpc-bc': []}
agent = pg.PGTrainer(config=config_rl, env='CustomCartPole-v1')
for i in tqdm(range(100)):
result_mf = agent.train()
print(f"\t RL Reward: "
f"{result_mf['episode_reward_max']:.4f} | "
f"{result_mf['episode_reward_mean']:.4f} | "
f"{result_mf['episode_reward_min']:.4f} |"
)
results['mf'].append(result_mf['episode_reward_mean'])
if i % 50 == 0:
checkpoint = agent.save()
print("checkpoint saved at", checkpoint)
if i % 50 == 0:
rollout.rollout(agent, env_name='CustomCartPole-v1', num_steps=50, num_episodes=1, no_render=False)
|
[
"flyinskybtx@sina.com"
] |
flyinskybtx@sina.com
|
19e5e99b4598f9270e0cc992301e841753fd2870
|
c2b386e1d28c58efbb9d847098a87032e2cbacca
|
/products_app/init.py
|
930a8691b7c54fa99f1d8508a131fb4977bb6b31
|
[] |
no_license
|
jmlm74/P11-Ameliorez-un-projet-existant-en-Python
|
e6468342554f5c4aa03bc0bb954aa7995e98e293
|
28cd84698bf272e279bbf6e1d15211ef2a3c6403
|
refs/heads/master
| 2022-12-11T02:57:59.563283
| 2020-09-10T16:02:27
| 2020-09-10T16:02:27
| 290,844,109
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 845
|
py
|
# url an parameters for the open food fact API
url = 'https://fr-en.openfoodfacts.org/cgi/search.pl'
params_off = {'search_simple': 1,
'action': 'process',
'json': 1,
'page_size': 300,
'page': 1,
'tagtype_0': 'categories',
'tag_contains_0': 'contains',
'tag_0': 'cat',
'tagtype_1': 'countries',
'tag_contains_1': 'contains',
'tag_1': 'france',
'sort_by': 'unique_scans_n'
}
# categories to fetch
categories = ['biscuits',
'Crepes',
'desserts',
'sweetened-beverages', ]
# brands to fecth to have well known products
brands = {'coca cola',
'ferrero',
'pepsi'}
# items per page for the paginator
NB_ITEMS_PAGE = 12
|
[
"jmlm74@gmail.com"
] |
jmlm74@gmail.com
|
dda19d3760238be041683f318ac54c057e37c6f2
|
e049a58929f0a878b4b46f4b5d21db1cf5833863
|
/backend/app/schemas/video.py
|
57277fdf149ab05923208064769126b42ba8c9d6
|
[
"MIT"
] |
permissive
|
Asma-Alghamdi/CREstimator_website
|
791e8e15d8bb382ca63ea0e7260c02a1257d89e3
|
7c4dc07e9ed15cbfa4981ceba8e3115b2b9dabad
|
refs/heads/master
| 2023-07-07T05:11:13.425156
| 2021-08-12T16:30:23
| 2021-08-12T16:30:23
| 394,372,142
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 755
|
py
|
# Normal way
def videoEntity(item) -> dict:
return {
"name": item["name"],
"path": item["path"],
"Placename": item["Placename"],
"setting": item["setting"],
"country": item["country"],
"duration": item["duration"],
"date": item["date"],
"sendEmail": item["sendEmail"],
"publish": item["publish"],
"outputVideoPath": item["outputVideoPath"],
"contactRate": item["contactRate"],
"average": item["average"],
"totalPeople": item["totalPeople"],
"coverPic": item["coverPic"],
"figurePath": item["figurePath"],
"userId": item["userId"],
}
def videosEntity(entity) -> list:
return [videoEntity(item) for item in entity]
|
[
"aalghamdi.wo@gmail.com"
] |
aalghamdi.wo@gmail.com
|
90a04cffae6636dbc36a4055192bd72cc9f358fc
|
bfd33836d69dd05d6e7720216cef6d341c35147b
|
/User/TDat/fruitandgreen_project.py
|
a2e78d191c64653e701c1fe60dc0e25dafb17e89
|
[] |
no_license
|
ducdan/PYTHON-COURSE
|
5371b65685769e88c2e14a38b9c86a2bdfc82b79
|
cc5a26d0d8124087f711ee0ca354065df6ea4fcf
|
refs/heads/master
| 2020-05-25T15:00:00.465760
| 2017-06-15T11:49:54
| 2017-06-15T11:49:54
| 84,941,845
| 1
| 2
| null | 2017-06-15T11:44:32
| 2017-03-14T11:34:28
|
HTML
|
UTF-8
|
Python
| false
| false
| 1,918
|
py
|
from flask import Flask,render_template,request,send_file
from DataModel import KHO, XNK
app = Flask(__name__,static_folder='vendors')
@app.route('/')
def hello_world():
return render_template('index.html')
@app.route('/xemct')
def xemct():
return render_template('XemCT.html')
@app.route('/xemhangton')
def xemhangton():
return render_template('xemhangton.html')
@app.route('/xuatkho')
def xuatkho():
return render_template('xuatkho.html')
@app.route('/phieunhap')
def phieunhap():
return render_template('phieunhap.html')
@app.route('/phieuxuat')
def phieuxuat():
return render_template('phieuxuat.html')
@app.route('/nhap',methods=['POST'])
def nhap():
fistname=request.form['firstname']
lastname = request.form['lastname']
print(fistname+" "+lastname)
dic={
'first':fistname,
'last':lastname
}
return render_template('nhapkho.html',fist=dic)
@app.route('/xemct', methods=['GET', 'POST'])
def show_kho():
xnk = ""
MaKho = ""
LoaiSp = ""
if request.method == 'POST':
xnk = request.form['XNK']
Port_of_Discharge = xnk['']
Shipper = request.form['LoaiSP']
data = []
if ((xnk == "Tất cả") & (Port_of_Discharge == "") & (Shipper == "")):
data = KHO.query.all()
elif ((xnk == "Tất cả") & (Port_of_Discharge == "")):
pass
elif ((xnk == "Tất cả") & (Shipper == "")):
pass
elif ((xnk == "Tất cả")):
pass
elif ((Port_of_Discharge == "") & (Shipper == "")):
data = XNK.query.filter_by(XNK=Port_of_Discharge).all()
elif ((Port_of_Discharge == "")):
pass
elif ((Shipper == "")):
pass
else:
pass
return render_template("XemCT.html", data = data)
@app.route('/report')
def report():
return send_file('report.pdf',attachment_filename=True)
if __name__ == '__main__':
app.run(debug=True)
|
[
"why.not.me.20131995@gmail.com"
] |
why.not.me.20131995@gmail.com
|
6e0c4ad69b4b49971c03a2c5351156bcf88a2bb4
|
f9fee177fcc9c512550bb5216b4c227a8509518f
|
/Additional Linear Referencing Tools.pyt
|
ebc7a5dbd34a897fd89ede9f881de9133b919159
|
[
"Apache-2.0"
] |
permissive
|
M-Bryant/arcgis-additional-linear-referencing-toolbox
|
a45d6ace2e2bd5c619ad9f29f0d5c00db1ca29b2
|
12d1129b9adde506ecd9f50c81e486d4ebb506cf
|
refs/heads/master
| 2020-12-24T10:32:50.478404
| 2019-12-17T03:53:45
| 2019-12-17T03:53:45
| 73,145,171
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,617
|
pyt
|
"""
The Additional Linear Referencing Tools provides a collection of geoprocessing tools
for working with linear referencing systems.
The toolbox is conveniently organized into
toolsets which define the general sequence of tasks accomplished by
the tools.
More information about linear referencing systems is available at
http://desktop.arcgis.com/en/arcmap/latest/manage-data/linear-referencing/what-is-linear-referencing.htm
"""
import sys
import os
# Tools are located in a subfolder called Scripts. Append to path
SCRIPTPATH = os.path.join(os.path.dirname(__file__), "Scripts")
sys.path.append(SCRIPTPATH)
# Do not compile .pyc files for the tool modules.
sys.dont_write_bytecode = True
# Import the tool
from create_point_event_table import CreatePointEventTable
from create_line_event_table import CreateLineEventTable
from create_route_by_length import CreateRouteByLength
from create_points_along_line import PointsAlongLine
from station_points_and_cross_sections import StationPointsAndCrossSections
del SCRIPTPATH
class Toolbox(object):
"""ArcGIS Python Toolbox - Additional Linear Referencing Tools"""
def __init__(self):
"""Define the toolbox (the name of the toolbox is the name of the
.pyt file)."""
self.label = 'Additional Linear Referencing Tools'
self.alias = 'alr'
# List of tool classes associated with this toolbox
self.tools = [CreatePointEventTable,
CreateLineEventTable,
CreateRouteByLength,
PointsAlongLine,
StationPointsAndCrossSections]
|
[
"Mark.Bryant@aecom.com"
] |
Mark.Bryant@aecom.com
|
ac82366a01d98b48b370d69f835e2fbf03cad04d
|
43efe19d743e16236cdb29a78cf70b0990facdd8
|
/Lxf/06模块/test22/hello.py
|
32f4375b3beda022eda364f83bc2272aaf988726
|
[] |
no_license
|
qiongloo/python-learning
|
17ecb71106f9c770b29920bbad65c504225949e5
|
03d0f635b2bf8038ecfd8f5064a51ade794a528b
|
refs/heads/master
| 2021-05-09T09:49:57.499089
| 2019-08-12T02:29:57
| 2019-08-12T02:29:57
| 119,462,064
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 572
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
' a test22 module '
__author__ = 'Billy Yang'
import sys
def test():
args = sys.argv
if len(args)==1:
print('Hello, world!')
elif len(args)==2:
print('Hello, %s!' % args[1])
else:
print('Too many arguments!')
if __name__=='__main__':
test()
def _private_1(name):
return 'Hello, %s' % name
def _private_2(name):
return 'Hi, %s' % name
def greeting(name):
if len(name) > 3:
return _private_1(name)
else:
return _private_2(name)
print(test())
|
[
"e@qiongloo.com"
] |
e@qiongloo.com
|
ec51d97b1126b9b3ccbac302459e3ba7f14b9559
|
bb4f8edd2eb8bb12fc303ff9bacb31b62a312466
|
/firstnumber.py
|
5f7941b99470b9ee116c97ca5acea6353f6fca90
|
[] |
no_license
|
maninsa/Coursera_basics
|
99fcb813209a661e08a83ded33824928c5de12d8
|
0dce72e5f80cebc9c26dee58d285b77a4fba2ef3
|
refs/heads/main
| 2023-03-27T21:17:14.448451
| 2021-03-30T11:23:34
| 2021-03-30T11:23:34
| 352,972,360
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 162
|
py
|
#!/bin/python3.9
# Вывести первое число в строке
user_input = int(input('введите число: '))
a = user_input
print(a // 10)
|
[
"noreply@github.com"
] |
noreply@github.com
|
3b8afbc42efa361f2ca62cc337f001e390465515
|
a64dd26503e8d8a878d021e308e79230344145b5
|
/2016/day_1/1-1.py
|
451366a024963350de72331833a0f8bebf9c31d3
|
[
"MIT"
] |
permissive
|
zigapk/adventofcode
|
4a93602afd8f4aeae942a6a42dec4facf4231970
|
693b03014ae1dc6c303e717b3d7e9b7658d6211b
|
refs/heads/master
| 2021-05-08T09:51:11.888319
| 2020-12-25T11:45:26
| 2020-12-25T11:45:26
| 76,105,279
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 285
|
py
|
asdf = input().split(", ")
smeri = [(1,0),(0,1),(-1,0),(0,-1)]
smer = 0
gor = 0
desno = 0
for item in asdf:
if item[0] == "R": smer = (smer + 1) % 4
else: smer = (smer - 1) % 4
gor += int(item[1:])*smeri[smer][0]
desno += int(item[1:])*smeri[smer][1]
print(abs(gor)+abs(desno))
|
[
"ziga.patacko@protonmail.com"
] |
ziga.patacko@protonmail.com
|
103462c39288b8fcdbe883ec1b1cccea5ab73dd5
|
277c29b460cef26db7d43fb80d10667f5fa321f6
|
/SIR_extended/init_spec_beta_gamma.py
|
aec3e23832a44abf6387b5b566cf86dea5f6677a
|
[] |
no_license
|
zuzancek/corona
|
75e0bd3678f1a01ec0109338067649abf1292bbe
|
da15613e231d4d61a0a6bb739b59454a65c0c36f
|
refs/heads/master
| 2021-07-16T14:56:27.052410
| 2021-05-24T21:34:08
| 2021-05-24T21:34:08
| 248,943,879
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,735
|
py
|
import sys
import numpy as np
from tqdm import tqdm_notebook
import pickle
import pandas as pd
import plotly
import plotly.graph_objects as go
import matplotlib.pyplot as plt
import seaborn as sns
import time
import os
import init_common as x
import helpers as hp
from random import sample
from sklearn.utils import shuffle
sns.set(rc={'figure.figsize':(11, 4)})
R0_target = 2.2
N_nodes = 1000000
## 1. add distribution for recovery time / gamma
Trec_mean = 6.5 # serial interval
Trec_sig = 0.62
m0 = Trec_mean*(Trec_sig**2)
s0 = 1/(Trec_sig**2)
Trec_vec = np.random.gamma(m0,s0,N_nodes)
Trec_vec_list = Trec_vec.tolist()
Trec_mean = np.average(Trec_vec)
gamma_mean = 1/Trec_mean
gamma_vec = 1/Trec_vec
## use this -->>
gamma_vec_list = gamma_vec.tolist()
## 2.add distribution for R0
## 2.A self-isolated
isol_share = 0.7
N_nodes_isol = round(isol_share*N_nodes)
R0_scale = 100 #10**2
R0_isol_mean = 0.25*3.96# 1.43
R0_isol_vec = np.random.gamma(R0_scale*R0_isol_mean,1/R0_scale,N_nodes_isol)
## 2.B non-isolated
a0 = R0_isol_mean+np.std(R0_isol_vec)#2
a1 = 14#16
scale_nonisol = 2.3#1.65
N_nodes_nonisol = N_nodes-N_nodes_isol
R0_nonisol_vec = hp.power_law_pdf(np.random.uniform(0,1,N_nodes_nonisol),a0,a1,scale_nonisol)
## 2.C joint distribution
R0_vec = shuffle(np.transpose([*np.transpose(R0_isol_vec),*np.transpose(R0_nonisol_vec)]),random_state=0)
R0_vec_list = R0_vec.tolist()
R0_mean = np.mean(R0_vec)
R0_std = np.std(R0_vec)
## 3. exctract Beta
beta_vec = R0_vec/Trec_vec
beta_vec_mean = np.mean(beta_vec)
## use this -->>
beta_vec_list = beta_vec.tolist()
def get_vectors(R0_scale=1):
beta_vec_new = R0_scale*beta_vec
beta_vec_list_new = beta_vec_new.tolist()
return beta_vec_list_new,Trec_vec_list
|
[
"zuzana.mucka@gmail.com"
] |
zuzana.mucka@gmail.com
|
16f3dbf02322671414bc23c389a471151a3fcd0c
|
0ef8742a124fd89e3b2a650d7f3667ba4ad5ad54
|
/Sudoku/solution.py
|
2cb82178bd9d6e5f2b7b891f036f3a332024bd96
|
[] |
no_license
|
letyrodridc/Artificial-Intelligence-Udacity
|
13d286566f2c3cb90320181925ea6c638d71dfdd
|
3ea3c3714c6c85af1e0d6587c81644ef35d1c25d
|
refs/heads/master
| 2021-05-05T12:14:34.727082
| 2018-10-27T18:11:36
| 2018-10-27T18:11:36
| 118,210,080
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,842
|
py
|
'''
Udacity AIND
Project 1: Solve a Sudoku with AI
by Leticia Rodriguez
'''
from utils import *
assignments = []
#UDACITY
def assign_value(values, box, value):
"""
Assigns a value to a given box. If it updates the board record it.
Args:
values(dict): a dictionary of the form {'box_name': '123456789', ...}
box (str): box to be udated
value: new value for the box
Returns:
values updated with value in box
"""
values[box] = value
if len(value) == 1:
assignments.append(values.copy())
return values
#MY_SOLUTION
def naked_twins(values):
"""
Eliminate values using the naked twins strategy.
Args:
values(dict): a dictionary of the form {'box_name': '123456789', ...}
Returns:
the values dictionary with the naked twins eliminated from peers.
"""
# Find all instances of naked twins
all_twins = list()
# Are twins! I need 2 values, if > or < I avoid iteration
boxes_to_search = [box for box in boxes if len(values[box]) == 2]
# Uses a stack just not to add same tuple twice
while boxes_to_search:
box = boxes_to_search.pop()
value = values[box]
for p in peers[box]:
if p in boxes_to_search and values[p] == value:
# They have same value, are twins!
all_twins.append( (box,p) )
# Eliminate the naked twins as possibilities for their peers
for box, twin in all_twins:
value = values[box]
# It's interested just to update those boxes affected by both twins
# uses an intersection of peers of both boxes
for p in peers[box].intersection(peers[twin]):
if not values[p] == value:
for c in value:
values[p] = values[p].replace(c, '')
return values
#MY_SOLUTION
def grid_values(grid):
"""
Convert grid into a dict of {square: char} with '123456789' for empties.
Args:
grid(string) - A grid in string form.
Returns:
A grid in dictionary form
Keys: The boxes, e.g., 'A1'
Values: The value in each box, e.g., '8'. If the box has no value, then the value will be '123456789'.
"""
res = dict()
for i, c in enumerate(grid):
k = boxes[i]
if c=='.':
res[k] = digits
else:
res[k] = c
return res
#UDACITY
def display(values):
"""
Display the values as a 2-D grid.
Input: values - The sudoku in dictionary form
Output: None
"""
width = 1+max(len(values[s]) for s in boxes)
line = '+'.join(['-'*(width*3)]*3)
for r in rows:
print(''.join(values[r+c].center(width)+('|' if c in '36' else '')
for c in cols))
if r in 'CF': print(line)
return
#MY_SOLUTION
def diagonal_peers(block):
"""
Returns a set with diagonal peers of block (both diagonals)
Input: block - string
Output: set of peers of block in the diagonal
"""
res = set()
if block in diagonal_units:
res.union(set(diagonal_units))
if block in diagonal_units2:
res.union(set(diagonal_units2))
if res:
res.remove(block)
return res
#MY_SOLUTION
def diagonal_units_search(block):
"""
Returns a list of list that are diagonal_units if the block belongs to any of these
Input: block
Output: list of diagonal_units lists
"""
res = list()
if block in diagonal_units:
res.append(list(diagonal_units))
if block in diagonal_units2:
res.append(list(diagonal_units2))
return res
#MY_SOLUTION
def eliminate(values):
"""
Returns a dictionary with elimination technique applied.
Solution for Diagonal Sudoku. It also eliminates the value from diagonals.
Input: values - The sudoku in dictionary form
Output: dict - copy of values with elimination technique applied
"""
res = dict(values)
for block in values.keys():
block_value = values[block]
if len(block_value) == 1:
# Changed for Diagonal Sudoku. Searchs also in the peers in both diagonal.
for p in peers[block].union(diagonal_peers(block)):
assign_value(res, p, res[p].replace(block_value, ''))
return res
#MY_SOLUTION
def only_choice(values):
"""
Returns a dictionary with Only Choice technique applied.
Solution for Diagonal Sudoku. It also verify the values in diagonals.
Input: values - The sudoku in dictionary form
Output: dict - copy of values with Only Choice applied
"""
new_values = values.copy() # note: do not modify original values
boxes = [box for box in values.keys() if len(values[box]) > 1]
for box in boxes:
# For Diagonal Sudoku not only search in units but also in diagonals
box_units = units[box] + diagonal_units_search(box)
# Iterates over diferent box units: row, column, square and diagonals if it applies
for s_units in box_units:
# Create a set with all values found in the boxes of the unit
s_values = set()
for p in s_units:
if not box == p:
s_values = s_values | set(values[p])
# Calculate the choices that the current box have
choices = set(values[box]) - s_values
# If there is only one is an only_choice. Update it value
if len(choices) == 1:
assign_value(new_values,box,choices.pop())
return new_values
#MY_SOLUTION
def reduce_puzzle(values):
"""
Applies elimination and only choice techiniques until no more changes could be made
Input: values - The sudoku in dictionary form
Output: values (dict updated)
"""
stalled = False
while not stalled:
# Check how many boxes have a determined value
solved_values_before = len([box for box in values.keys() if len(values[box]) == 1])
# Eliminate Strategy
values = eliminate(values)
# Only Choice Strategy
values = only_choice(values)
# Check how many boxes have a determined value, to compare
solved_values_after = len([box for box in values.keys() if len(values[box]) == 1])
# If no new values were added, stop the loop.
stalled = solved_values_before == solved_values_after
# Sanity check, return False if there is a box with zero available values:
if len([box for box in values.keys() if len(values[box]) == 0]):
return False
return values
#MY_SOLUTION - DFS
def search(values):
"""
Solves a Sudoku problem in variant Diagonal Sudoku.
Input: values - The sudoku in dictionary form
Output: values - Sudoku solved
"""
values = reduce_puzzle(values)
# Verify if could't solved it and there is no solution for that instance
# See reduce_puzzle
not_solved = values is False
if not_solved:
return None
# Checks if all the boxes has one value --- so, solved true
solved = len([b for b in boxes if len(values[b]) == 1]) == len(boxes)
if solved:
return values
else:
# Chose one of the unfilled square s with the fewest possibilities
_,selected = min([(len(values[b]), b) for b in boxes if len(values[b]) > 1])
# Saves selected values
old_value = values[selected]
s = None
# Try each value of the selected
for v in old_value:
# Update box using one of the values of selected box
values[selected] = v
# Recursion
s = s or search(dict(values))
# Restore previous value
values[selected] = old_value
return s
#MY_SOLUTION
def solve(grid):
"""
Find the solution to a Sudoku grid.
Args:
grid(string): a string representing a sudoku grid.
Example: '2.............62....1....7...6..8...3...9...7...6..4...4....8....52.............3'
Returns:
The dictionary representation of the final sudoku grid. False if no solution exists.
"""
# Converts from sudoku string to sudoku dictionary
sudoku = grid_values(grid)
# Search solution
sudoku = search(sudoku)
return sudoku
if __name__ == '__main__':
diag_sudoku_grid = '2.............62....1....7...6..8...3...9...7...6..4...4....8....52.............3'
display(solve(diag_sudoku_grid))
try:
from visualize import visualize_assignments
visualize_assignments(assignments)
except SystemExit:
pass
except:
print('We could not visualize your board due to a pygame issue. Not a problem! It is not a requirement.')
|
[
"lrodrig@dc.uba.ar"
] |
lrodrig@dc.uba.ar
|
cf3dc94deb898b3147c1d529a1fbf335561c2e0b
|
639d6a00e3a8fab07ce07fec408eef6bc050d21d
|
/indice_pollution/db.py
|
8c93d5b392a7f26b2474e3b88b9e22891432315d
|
[
"MIT"
] |
permissive
|
betagouv/indice_pollution
|
e04634e0b9c6d4ce24ffdc4c19868599995c1bd5
|
b85e53ca22d420e3d685fc84843d2011c6a696e4
|
refs/heads/master
| 2023-02-10T20:25:13.321999
| 2023-02-06T10:57:09
| 2023-02-06T10:57:09
| 250,297,957
| 4
| 1
|
MIT
| 2023-01-25T09:25:45
| 2020-03-26T15:33:02
|
Python
|
UTF-8
|
Python
| false
| false
| 218
|
py
|
from sqlalchemy import MetaData
from sqlalchemy.orm import declarative_base
global engine, Session
metadata = MetaData(schema="indice_schema")
Base = declarative_base(metadata=metadata)
engine = None
session = None
|
[
"lara.vincent@gmail.com"
] |
lara.vincent@gmail.com
|
dac61de3894ea89b441f9876d43b4e8b8e7aabcc
|
a7587f813492163433202e244df2237c9993a1a1
|
/Store/migrations/0003_variation.py
|
192756496452ac5feb5ca11e93277167f0ed89b4
|
[] |
no_license
|
kamran1231/E-COM-WEBSITE-2021
|
3a10bc0059f4d29fc52ee029e4919d4f965174c6
|
32214468cf716cc312a63f6346b8c844f720abda
|
refs/heads/master
| 2023-06-01T03:18:03.137405
| 2021-07-04T14:20:16
| 2021-07-04T14:20:16
| 381,634,544
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 935
|
py
|
# Generated by Django 3.2.4 on 2021-07-02 18:42
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('Store', '0002_alter_product_price'),
]
operations = [
migrations.CreateModel(
name='Variation',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('variation_category', models.CharField(choices=[('color', 'color'), ('size', 'size')], max_length=100)),
('variation_value', models.CharField(max_length=100)),
('is_active', models.BooleanField(default=True)),
('created_date', models.DateTimeField(auto_now=True)),
('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='Store.product')),
],
),
]
|
[
"khanbrother805@gmail.com"
] |
khanbrother805@gmail.com
|
56efa079b2691582bee503f93198d1e99ed01d56
|
48cd431063b33f430251275a51307987ce2dae78
|
/JewelryBox.py
|
ccd580cbdad58c2b0acc563ee1c1f8a6bf08c833
|
[] |
no_license
|
edwardsong05/competitive-programming
|
005168a946f4af6bdd53671aab3ea00ffe401796
|
68bc9e38351c1c9c333d25e2cb1322c856d9bb0a
|
refs/heads/master
| 2020-04-06T20:35:21.247329
| 2019-09-19T20:25:22
| 2019-09-19T20:25:22
| 157,776,988
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 568
|
py
|
# problem on kattis
# https://open.kattis.com/problems/jewelrybox
# ternary search
def volume(x, y, h):
return (x-2*h)*(y-2*h)*h
T = int(input())
for i in range(T):
x, y = [int(i) for i in input().split()]
left = 0
right = min([x, y]) / 2
while right-left > 10**-6:
m1 = left + (right - left) / 3
m2 = right - (right - left) / 3
v1 = volume(x, y, m1)
v2 = volume(x, y, m2)
if v1 > v2:
right = m2
else:
left = m1
print(volume(x, y, left))
|
[
"noreply@github.com"
] |
noreply@github.com
|
72b698651d6f869623903874a9cb46cd307ac5e2
|
05218d01394294bb7ede726bf3dc6f0691e4299b
|
/machineLearning/mini_Project/mini_project_0401.py
|
21e36071cf323a4a3e1726e08d32fe4925ed6a43
|
[] |
no_license
|
bitacademy-howl/anaconda_workspace
|
156556c52342b461ffb8304dfb55a845fff5ae90
|
d9dc5007b6e95fa0bf7b95a457cafe68a0167992
|
refs/heads/master
| 2020-03-23T12:10:14.872385
| 2018-08-10T10:04:55
| 2018-08-10T10:04:55
| 141,539,377
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,058
|
py
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_boston
import os
# sklearn 에서 제공하는 학습용 데이터셋
data = load_boston()
print('=============================================================================')
print('================================ 데이터 타입 =================================')
print(type(data))
print('=============================================================================')
print('=============================================================================')
print(type(data.keys()), data.keys())
print('=============================================================================')
print('=============================== =설명서= ==================================')
print(data['DESCR'])
print('=============================================================================')
# 실제 값들만 존재하는 데이터셋
print('================================데이터 셋=====================================')
X = data['data']
print(X)
print('=============================================================================')
# 실제 데이터 필드에 컬럼명이 들어있지 않다.
print('=============================================================================')
header = data['feature_names']
print(header)
# 제공되는 데이터셋에 가격은 별도로 target 으로 제공되므로 dataframe을 만들때는 합쳐서 만든다....
print('=============================================================================')
Y = data['target']
Y = Y.reshape(-1, 1)
print(type(Y), Y)
print('=============================================================================')
# 실제 사용될 데이터 프레임 : 아직 헤더 포함되지 않음
df = pd.DataFrame(np.append(X, Y, axis=1))
print(df)
print('=============================================================================')
# 헤더에 header와 PRICE 컬럼명 추가
df.columns = np.append(header,'PRICE')
# 데이터 프레임에 헤더 추가
# 데이터프레임의 확인
print(df.head(5))
print(df.tail(5))
# 여러 통계치의 종합 선물세트
result_desc = df.describe()
print(result_desc)
#######################################################################################################
# 여기서 잠깐 번외로 통계치를 가지고
# 1. 박스플롯 그려보고
# 2. 분포도 그려보고
# # 1. 가격 분포도
# plt.hist(df['PRICE'],bins=100,color='green', density=True)
# plt.show()
# # 2.
# plt.boxplot([df['PRICE']],0)
# plt.show()
# 일단 이건 계속 해보고 생각해보쟈....
#######################################################################################################
# 각각의 컬럼간 상관관계
corr_df = np.round(df.corr(),3)
print(corr_df)
# ,marker='o',s=10
pd.plotting.scatter_matrix(df,alpha=0.8, diagonal='kde')
# os.chdir(r'D:\1. stark\temp')
#
# df.to_csv('data.csv',index=True)
|
[
"howl1118@gmail.com"
] |
howl1118@gmail.com
|
0599e1b5865e8d9987e0659e9e04bf93f58d70be
|
c16ea32a4cddb6b63ad3bacce3c6db0259d2bacd
|
/google/cloud/bigquery/v2/bigquery-v2-py/google/cloud/bigquery_v2/services/model_service/async_client.py
|
f663b4845089503f56d9cf414847565501df681b
|
[
"Apache-2.0"
] |
permissive
|
dizcology/googleapis-gen
|
74a72b655fba2565233e5a289cfaea6dc7b91e1a
|
478f36572d7bcf1dc66038d0e76b9b3fa2abae63
|
refs/heads/master
| 2023-06-04T15:51:18.380826
| 2021-06-16T20:42:38
| 2021-06-16T20:42:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 20,963
|
py
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import functools
import re
from typing import Dict, Sequence, Tuple, Type, Union
import pkg_resources
import google.api_core.client_options as ClientOptions # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.cloud.bigquery_v2.types import encryption_config
from google.cloud.bigquery_v2.types import model
from google.cloud.bigquery_v2.types import model as gcb_model
from google.cloud.bigquery_v2.types import model_reference
from google.cloud.bigquery_v2.types import standard_sql
from google.protobuf import wrappers_pb2 # type: ignore
from .transports.base import ModelServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc_asyncio import ModelServiceGrpcAsyncIOTransport
from .client import ModelServiceClient
class ModelServiceAsyncClient:
""""""
_client: ModelServiceClient
DEFAULT_ENDPOINT = ModelServiceClient.DEFAULT_ENDPOINT
DEFAULT_MTLS_ENDPOINT = ModelServiceClient.DEFAULT_MTLS_ENDPOINT
common_billing_account_path = staticmethod(ModelServiceClient.common_billing_account_path)
parse_common_billing_account_path = staticmethod(ModelServiceClient.parse_common_billing_account_path)
common_folder_path = staticmethod(ModelServiceClient.common_folder_path)
parse_common_folder_path = staticmethod(ModelServiceClient.parse_common_folder_path)
common_organization_path = staticmethod(ModelServiceClient.common_organization_path)
parse_common_organization_path = staticmethod(ModelServiceClient.parse_common_organization_path)
common_project_path = staticmethod(ModelServiceClient.common_project_path)
parse_common_project_path = staticmethod(ModelServiceClient.parse_common_project_path)
common_location_path = staticmethod(ModelServiceClient.common_location_path)
parse_common_location_path = staticmethod(ModelServiceClient.parse_common_location_path)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
ModelServiceAsyncClient: The constructed client.
"""
return ModelServiceClient.from_service_account_info.__func__(ModelServiceAsyncClient, info, *args, **kwargs) # type: ignore
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
ModelServiceAsyncClient: The constructed client.
"""
return ModelServiceClient.from_service_account_file.__func__(ModelServiceAsyncClient, filename, *args, **kwargs) # type: ignore
from_service_account_json = from_service_account_file
@property
def transport(self) -> ModelServiceTransport:
"""Returns the transport used by the client instance.
Returns:
ModelServiceTransport: The transport used by the client instance.
"""
return self._client.transport
get_transport_class = functools.partial(type(ModelServiceClient).get_transport_class, type(ModelServiceClient))
def __init__(self, *,
credentials: ga_credentials.Credentials = None,
transport: Union[str, ModelServiceTransport] = "grpc_asyncio",
client_options: ClientOptions = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the model service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.ModelServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (ClientOptions): Custom options for the client. It
won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._client = ModelServiceClient(
credentials=credentials,
transport=transport,
client_options=client_options,
client_info=client_info,
)
async def get_model(self,
request: model.GetModelRequest = None,
*,
project_id: str = None,
dataset_id: str = None,
model_id: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> model.Model:
r"""Gets the specified model resource by model ID.
Args:
request (:class:`google.cloud.bigquery_v2.types.GetModelRequest`):
The request object.
project_id (:class:`str`):
Required. Project ID of the requested
model.
This corresponds to the ``project_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
dataset_id (:class:`str`):
Required. Dataset ID of the requested
model.
This corresponds to the ``dataset_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
model_id (:class:`str`):
Required. Model ID of the requested
model.
This corresponds to the ``model_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.bigquery_v2.types.Model:
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([project_id, dataset_id, model_id])
if request is not None and has_flattened_params:
raise ValueError("If the `request` argument is set, then none of "
"the individual field arguments should be set.")
request = model.GetModelRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if project_id is not None:
request.project_id = project_id
if dataset_id is not None:
request.dataset_id = dataset_id
if model_id is not None:
request.model_id = model_id
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.get_model,
default_timeout=600.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
async def list_models(self,
request: model.ListModelsRequest = None,
*,
project_id: str = None,
dataset_id: str = None,
max_results: wrappers_pb2.UInt32Value = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> model.ListModelsResponse:
r"""Lists all models in the specified dataset. Requires
the READER dataset role.
Args:
request (:class:`google.cloud.bigquery_v2.types.ListModelsRequest`):
The request object.
project_id (:class:`str`):
Required. Project ID of the models to
list.
This corresponds to the ``project_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
dataset_id (:class:`str`):
Required. Dataset ID of the models to
list.
This corresponds to the ``dataset_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
max_results (:class:`google.protobuf.wrappers_pb2.UInt32Value`):
The maximum number of results to
return in a single response page.
Leverage the page tokens to iterate
through the entire collection.
This corresponds to the ``max_results`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.bigquery_v2.types.ListModelsResponse:
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([project_id, dataset_id, max_results])
if request is not None and has_flattened_params:
raise ValueError("If the `request` argument is set, then none of "
"the individual field arguments should be set.")
request = model.ListModelsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if project_id is not None:
request.project_id = project_id
if dataset_id is not None:
request.dataset_id = dataset_id
if max_results is not None:
request.max_results = max_results
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.list_models,
default_timeout=600.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
async def patch_model(self,
request: gcb_model.PatchModelRequest = None,
*,
project_id: str = None,
dataset_id: str = None,
model_id: str = None,
model: gcb_model.Model = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> gcb_model.Model:
r"""Patch specific fields in the specified model.
Args:
request (:class:`google.cloud.bigquery_v2.types.PatchModelRequest`):
The request object.
project_id (:class:`str`):
Required. Project ID of the model to
patch.
This corresponds to the ``project_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
dataset_id (:class:`str`):
Required. Dataset ID of the model to
patch.
This corresponds to the ``dataset_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
model_id (:class:`str`):
Required. Model ID of the model to
patch.
This corresponds to the ``model_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
model (:class:`google.cloud.bigquery_v2.types.Model`):
Required. Patched model.
Follows RFC5789 patch semantics. Missing
fields are not updated. To clear a
field, explicitly set to default value.
This corresponds to the ``model`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.bigquery_v2.types.Model:
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([project_id, dataset_id, model_id, model])
if request is not None and has_flattened_params:
raise ValueError("If the `request` argument is set, then none of "
"the individual field arguments should be set.")
request = gcb_model.PatchModelRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if project_id is not None:
request.project_id = project_id
if dataset_id is not None:
request.dataset_id = dataset_id
if model_id is not None:
request.model_id = model_id
if model is not None:
request.model = model
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.patch_model,
default_timeout=600.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
async def delete_model(self,
request: model.DeleteModelRequest = None,
*,
project_id: str = None,
dataset_id: str = None,
model_id: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Deletes the model specified by modelId from the
dataset.
Args:
request (:class:`google.cloud.bigquery_v2.types.DeleteModelRequest`):
The request object.
project_id (:class:`str`):
Required. Project ID of the model to
delete.
This corresponds to the ``project_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
dataset_id (:class:`str`):
Required. Dataset ID of the model to
delete.
This corresponds to the ``dataset_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
model_id (:class:`str`):
Required. Model ID of the model to
delete.
This corresponds to the ``model_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([project_id, dataset_id, model_id])
if request is not None and has_flattened_params:
raise ValueError("If the `request` argument is set, then none of "
"the individual field arguments should be set.")
request = model.DeleteModelRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if project_id is not None:
request.project_id = project_id
if dataset_id is not None:
request.dataset_id = dataset_id
if model_id is not None:
request.model_id = model_id
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.delete_model,
default_timeout=600.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Send the request.
await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-bigquery",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = (
"ModelServiceAsyncClient",
)
|
[
"bazel-bot-development[bot]@users.noreply.github.com"
] |
bazel-bot-development[bot]@users.noreply.github.com
|
88feb48981edab108611ba7b542047e3caf87f8d
|
ef5fde794c8bc24056f5b40579d840efa0693571
|
/ex9.py
|
1024755513c0a4272cd67a31c5a78c34aaa5067a
|
[] |
no_license
|
chongduoduo/pythonbasic
|
11d7d348fb5fa22a636dc34af9d3bb7f9898ce9f
|
fb39d0d332dd0f75301604358c2c11addeb0ecf5
|
refs/heads/master
| 2021-01-12T17:22:23.288132
| 2016-12-21T09:35:24
| 2016-12-21T09:35:24
| 71,550,984
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 391
|
py
|
# Here's some new strange stuff, remember type it exactly.
days = "Mon Tue Wed Thu Fri Sat Sun"
months = "Jan\nFeb\nMar\nApr\nMay\nJun\nJul\nAug"
print "Here are they days: ", days
print "Here are the months: ", months
print """
There's something going on here.
With the three double-quotes.
We'll be able to type as much as we like.
Even 4 lines if we want, or 5, or 6.
"""
|
[
"mancao@186LMANCAO.ad.here.com"
] |
mancao@186LMANCAO.ad.here.com
|
fbeb57daaa30b0d192be33757876869f9e7acc75
|
156c486bc35b95162b73b0ce4e7a6dd9bca95302
|
/retval.py
|
8af9acd28841f848a53365755960a7b521357562
|
[] |
no_license
|
fangjian601/Loterry
|
98188e6d8c71f75f53163f7b37e34e70666b9684
|
1448a8786c9f2b7e84a2f4b8091dba7c24f882b4
|
refs/heads/master
| 2016-09-15T17:51:16.501974
| 2012-01-13T03:55:40
| 2012-01-13T03:55:40
| 3,161,375
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,634
|
py
|
import json
import logging
import traceback
logger = logging.getLogger("lottery")
def return_value(**kwargs):
global logger
try:
result = {}
if 'status' in kwargs:
result['status'] = kwargs['status']
else:
result['status'] = -1
if result['status'] == 0:
if 'val' in kwargs:
result['val'] = kwargs['val']
else:
result['val'] = None
elif result['status'] == -1:
result['err'] = {}
if 'event' in kwargs:
result['err']['event'] = kwargs['event']
else:
result['err']['event'] = None
if 'msg' in kwargs:
result['err']['msg'] = kwargs['msg']
else:
result['err']['msg'] = None
if not ('json' in kwargs):
kwargs['json'] = 1
if kwargs['json'] == 1:
return json.dumps(result)
else:
result_str = None
if result['status'] == 0:
result_str = '''{"status": "%d", "val": "''' %(result['status']) + result['val'] + '''"}'''
elif result['status'] == -1:
result_str = '''{"status": "%d", "err": {"msg": "%s", "event": "%s"}}''' %(result['status'], result['err']['msg'], result['err']['event'])
return result_str
except:
msg = traceback.format_exc()
logger.error(msg)
return '''{"status": -1, "err": {"msg": "return_value error", "event": "LIBS.RETVAL"}}'''
|
[
"superman601@126.com"
] |
superman601@126.com
|
49ba31cebb80516a9e15a3a44e6dbb8bf93243a8
|
36d52637d0947472a87d6f9cb5ab9eb346f61878
|
/venv/bin/django-admin.py
|
6093234e2abfa316be14a2878aecd1137d1c757b
|
[] |
no_license
|
diogoandrade1999/RestApiTPW
|
c8c14a4a2e1d442ef90867ccd304431a40226c3f
|
9af7ee8d3b712253f6f958474333d151d81acfe5
|
refs/heads/master
| 2022-03-29T19:49:45.479582
| 2019-12-18T15:13:24
| 2019-12-18T15:13:24
| 228,865,214
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 155
|
py
|
#!/home/diogo/Desktop/RestApiTPW/venv/bin/python
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
|
[
"diogo.andrade@ua.pt"
] |
diogo.andrade@ua.pt
|
f742ee9c9d8e6f83bc86b8c21e11b729a4f4d129
|
6614bf94d54cf04aa4291b87123a180578d510f3
|
/setup.py
|
e5d214d55ab05ad82477ab4630963aa1dc1770c4
|
[] |
no_license
|
kemey188/rugis
|
0396df70fc1f86598a2dbb2704a76078c5b030af
|
da0b399a2dfeb90e1adc5acfd13fe3d81ef8e930
|
refs/heads/master
| 2018-09-25T05:34:17.795976
| 2018-07-18T02:57:41
| 2018-07-18T02:57:41
| 115,926,823
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 871
|
py
|
#!/usr/bin/env python
import os
import sys
from distutils.core import setup, Extension
os_macro = 'OS_MACOSX' if sys.platform == 'darwin' else 'OS_LINUX'
extension = Extension(
name='rugis',
sources=[
"gpsconv.cc",
"s2helper.cc",
"radar.cc",
"geo.cc",
"rugis_python.cc"
],
include_dirs=['/usr/lib','/usr/lib64','/usr/local/lib','3rdparty', '3rdparty/google','3rdparty/boost_1_58_0/include'],
libraries=['s2', 'crypto'],
library_dirs=['3rdparty/lib'],
runtime_library_dirs=['.', './rugis_libs','/usr/lib','/usr/lib64','/usr/local/lib'],
extra_compile_args=['-std=c++11', '-fno-wrapv', '-stdlib=libc++'],
define_macros=[('NDEBUG', '1'), (os_macro, None)]
)
setup(
name="rugis",
version='1.0',
description='preprocess module for project radar',
ext_modules=[extension])
|
[
"noreply@github.com"
] |
noreply@github.com
|
3f0ee9146eb77381bdd91251c6707c87efd8b730
|
19b5dc6cd9bc20fa57e6a448ff422438faf97a92
|
/app.py
|
bfa07413e10f300ba996796225869ef0e44195c2
|
[] |
no_license
|
marusia19/templates
|
da924694c86cfa3626a346043cd366833a43c9d4
|
eaddba6d85390f1adef5434fb0951489cc010ad8
|
refs/heads/master
| 2021-01-22T13:37:29.594582
| 2015-07-03T20:24:31
| 2015-07-03T20:24:31
| 38,508,329
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,960
|
py
|
from flask import Flask
from flask import render_template
from flask import request
from flask import redirect
from pymongo import MongoClient, DESCENDING
from bson import ObjectId
import datetime
import login_system
import news_sorter
from flask_login import login_required, login_user, logout_user, current_user
import recommender
DATABASE_NAME = 'summertime_webness'
app = Flask(__name__)
@app.route("/")
def index():
filter_options = {}
tag = request.args.get('tag')
if tag:
filter_options['tags'] = {'$in': tag.split(',')}
news = app.db['news'].find(filter_options).sort([('time', DESCENDING)])
#sort news
news = news_sorter.sort_news(news)
return render_template("index.html", news=news)
@app.route("/news", methods=['POST', 'GET'])
@login_required
def news_handler():
if request.method == "GET":
# Get news'
news = list(app.db['news'].find({}))
recommended_news = recommender.recommend(current_user.data['name'], news)
return render_template("news.html", news=recommended_news)
else:
title = request.form.get('title', '')
text = request.form.get('text', '')
tags = request.form.get('tags', '')
tags = tags.split(',')
tags = [tag.strip().lower() for tag in tags]
# filter empty tags
# tags = [tag for tag in tags if tag]
new_tags= []
for tag in tags:
if tag:
new_tags.append(tag)
tags = new_tags
tags = list(set(tags))
new_post = {
'title': title,
'text': text,
'tags': tags,
'author': current_user.data['name'],
'time': datetime.datetime.utcnow(),
'likes': [],
'dislikes': []
}
app.db['news'].insert(new_post)
return redirect('/')
@app.route('/news/<post_id>', methods=['GET','POST'])
@login_required
def news_post(post_id):
if request.method == 'GET':
return redirect('/')
else:
action = request.form.get('action')
post = app.db['news'].find_one(ObjectId(post_id))
if action == 'like':
likes = post['likes']\
#
likes.append(current_user.data['name'])
likes = list(set(likes))
#
post['likes'] = likes
app.db['news'].save(post)
return redirect('/')
@app.route("/users/<user_name>", methods=['GET','POST'])
def profile(user_name):
if request.method == "GET":
#user = app.db['users'][user_name]
user = app.db['users'].find({'name': user_name}).limit(1)[0]
return render_template("user.html", user=user)
else:
full_name = request.form.get('full_name', '')
skills = request.form.get('skills', '')
position = request.form.get('position', '')
photo = request.form.get('photo', '')
password = request.form.get('password', '')
# get user object
#user = app.db['users'][user_name]
user = app.db['users'].find({'name': user_name}).limit(1)[0]
user['full_name'] = full_name
user['position'] = position
user['photo'] = photo
user['skills'] = skills
user['password'] = password
app.db['users'].save(user)
return redirect("/users/%s" % user_name)
@app.route("/login", methods = ['GET', 'POST'])
def login():
if request.method == 'GET':
next_page = request.args.get('next', '/')
return render_template("login.html", next_page=next_page)
else:
user_name = request.form.get('name' ,'')
password = request.form.get('password', '')
next_page = request.form.get('next_page', '/')
# get user from DB
users = app.db['users'].find({'name': user_name}).limit(1)
# Did we find anyone in DB?
if not users:
return render_template("login.html", error=True)
# Lets take the first one
user = users[0]
# check password
if user and password == user['password']:
# if password is correct, login user
login_user(login_system.load_user(user_name))
return redirect(next_page)
else:
# if password is not correct, return error page
return render_template("login.html", error=True)
@app.route('/logout')
@login_required
def logout():
logout_user()
return redirect('/')
@app.route("/users/add")
@login_required
def add_user_page():
return render_template("add_user.html")
@app.route("/users/<user_name>/edit")
def edit_user(user_name):
user = app.db['users'].find({'name': user_name}).limit(1)[0]
return render_template("edit_user.html", user=user)
@app.route("/users", methods=['GET', 'POST'])
def users_handler():
if request.method == 'GET':
users = app.db['users'].find()
return render_template("users.html", users=users)
elif request.method == 'POST':
name = request.form.get('name', '')
full_name = request.form.get('full_name', '')
skills = request.form.get('skills', '')
position = request.form.get('position', '')
photo = request.form.get('photo', '')
password = request.form.get('password', '')
new_user = {
'name': name,
'full_name': full_name,
"skills": skills,
"position": position,
"photo": photo,
"password": password,
}
#app.db['users'][name] = new_user
app.db['users'].insert(new_user)
return redirect("/users/%s" % name)
else:
raise RuntimeError("Only POST and GET methods are supported")
if __name__ == "__main__":
app.db = MongoClient('wardoctor.nosoc.io', port=443)[DATABASE_NAME]
login_system.init_login_system(app)
app.run(debug=True)
# style="background-image:url(https://pp.vk.me/c618426/v618426104/12476/CtRpXTwpsKk.jpg)"
|
[
"sergeeva.maria.nik@gmail.com"
] |
sergeeva.maria.nik@gmail.com
|
a6743e5addcd58b500f621ff320a3c06ab63208f
|
a12155840b0f5b9c31d32353e9e5c28cf7cee161
|
/app/core/models.py
|
6f9a2f5f9e78afa49e3e91bac0d73d9e7d7eb24a
|
[
"MIT"
] |
permissive
|
obuqwe/recipe_app_api
|
fa152a9815d50ba9d7561b64253767b61ed4035c
|
d3424eb171f7dc77c7d6a42afb93f8368e9dc65e
|
refs/heads/master
| 2020-05-09T21:53:36.501530
| 2019-05-07T15:41:15
| 2019-05-07T15:41:15
| 181,451,031
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,048
|
py
|
from django.db import models
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager, \
PermissionsMixin
class UserManager(BaseUserManager):
def create_user(self, email, password=None, **extra_fields):
if not email:
raise ValueError('Users must have an email address')
user = self.model(email=self.normalize_email(email), **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, password):
user = self.create_user(email, password)
user.is_staff = True
user.is_superuser = True
user.save(using=self._db)
return user
class User(AbstractBaseUser, PermissionsMixin):
email = models.EmailField(max_length=255, unique=True)
name = models.CharField(max_length=255)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
objects = UserManager()
USERNAME_FIELD = 'email'
|
[
"mesisleo10@gmail.com"
] |
mesisleo10@gmail.com
|
8fd88234986d81619c8a63de0ed1e13a8316b84d
|
9f1b345bb1852b00bba93ce3ab0b6d59973d2d97
|
/FTP_Project/tcp_client.py
|
90be2ccf6a202d17d138ec3958ed368358538609
|
[] |
no_license
|
chidalgo001/FTP-Client-Server
|
2de8874581bc84b220f1562d417eaba38691cd08
|
d721f947cd7c9d673984cf3971a95dd983428663
|
refs/heads/master
| 2020-12-30T16:27:34.005859
| 2017-05-11T14:03:30
| 2017-05-11T14:03:30
| 90,985,557
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 15,787
|
py
|
from socket import*
import os , sys
dir = os.getcwd()
HOME = dir #--- will hold the dir for the root of the program
ROOT = dir + "/ftpclient/ftproot/" #--- will hold the dir for the root of the ftpclient folder
RECV_BUFFER = 1024
PORT = 2039
next_data_port = 1
DATA_PORT_MIN = 12000
DATA_PORT_MAX = 12499
CMD_QUIT = "QUIT" #--- quits the server
CMD_HELP = "HELP" #--- displays help file
CMD_LOGIN = "LOGIN" #--- logs in into server
CMD_LOGOUT = "LOGOUT" #--- logs out current user
CMD_LS = "LS" #--- displays contents of CWD
CMD_DELETE = "DELETE" #--- deletes a specific file
CMD_PWD = "PWD" #--- diplays the current working directory
CMD_CON = "CONNECT" #--- connects to the server
CMD_CDUP = "CDUP" #--- moves up in the directory
CMD_CCD = "CCD" #--- changes the directory in the client side
CMD_SCD = "SCD" #--- changes the directory in the server side
CMD_RETR = "GET" #--- gets a file copy from server CWD into client CWD
CMD_STOR = "PUT" #--- places a file copy from client CWD into server CWD
CMD_TEST = "TEST"
CMD_PORT = "PORT"
def tcp_connection( addr , port , sock):
sock.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
con = False
try:
sock.connect((addr , port ))
con = True
sock.send(addr.encode())
except ConnectionRefusedError:
print("Connection Error. Try again!")
con = False
return sock , con
def main():
dir = os.getcwd()
root = dir + "/ftpclient/ftproot/"
os.chdir(root)
logged = False # Will keep track if there is a user logged on
userName = '' # Will keep track of the user that is logged on
connection = False # Will keep track if there is a connection made
cmd = ''
print("Welcome to the TCP Client\n")
cSock = socket(AF_INET , SOCK_STREAM)
while (cmd != CMD_QUIT ):
command = input("\nFTP>> ")
tokens = command.split()
cmd , logged , cSock , userName , connection = evaluateCommands(tokens,logged,cSock,userName,connection)
print("\nThank you for using FTP Client")
def evaluateCommands( tokens , logged , sock , userName , connection):
cmd = tokens[0].upper()
if (cmd == CMD_CON):
host = gethostname()
host_address = gethostbyname(host)
sock , connection = tcp_connection( '127.0.0.1' , PORT , sock )
if (connection == True):
serverReply = sock.recv(RECV_BUFFER).decode()
print("Server: " + str(serverReply) )
if (connection == False):
cmd = ''
return "" , logged , sock , userName , connection
#------
if (cmd == CMD_QUIT):
logged = False
if( connection == True):
sock.send( "QUIT".encode() )
return "QUIT" , logged , sock , userName , connection
#------
if (cmd == CMD_HELP):
temp = os.getcwd()
os.chdir(HOME)
try:
file = open("ftpserver/conf/help.txt" , "r")
line = file.readline()
while (line != ''):
print(line)
line = file.readline()
except FileNotFoundError:
print("FIle not found")
file.close()
os.chdir(temp)
return "" , logged , sock , userName , connection
#-----
if (cmd == CMD_LOGIN):
reply = False
if (connection == True):
reply , logged , sock , userName =_login( tokens , logged , sock , userName )
else:
print("Need a connection to login.")
print("Try to connect first...")
return reply , logged , sock , userName , connection
#Logs out the current user
if (cmd == CMD_LOGOUT):
_logout(userName)
logged = False
userName = ""
sock.send("LOGOUT".encode())
return "" , logged , sock , userName , connection
# this will retuen the current directory in either the server or the client
# User can select.
if (cmd == CMD_PWD):
if ( len(tokens) == 2):
clientOrServer = tokens[1].lower()
if(clientOrServer == 'client'):
print("\nClient Current Working Dir: ")
print(os.getcwd())
return "" , logged , sock , userName , connection
if(clientOrServer == 'server' and connection == True):
print("\nServer Current Working Dir: ")
sock.send("PWD".encode())
reply = sock.recv(RECV_BUFFER).decode()
reply_tokens = reply.split()
if (reply_tokens[0] == "530"):
print("Need to be logged in ")
else:
print(reply)
return "" , logged , sock , userName , connection
else:
print("\n: cwd [ client ] || [ server ]. Please specify.")
print("Also, there must be a server conenction in order to execute 'cwd [ server ]'")
else:
print("\n : cwd [ client ] | [ server ]. Please specify.")
print("Also, there must be a server conenction in order to execute 'cwd [ server ]'")
return "" , logged , sock , userName , connection
if (cmd == CMD_CDUP):
if ( len(tokens) == 2):
clientOrServer = tokens[1].lower()
if(clientOrServer == 'client'):
os.chdir(ROOT)
print("\nClient Current Working Dir: ")
print(os.getcwd())
return "" , logged , sock , userName , connection
if(clientOrServer == 'server' and connection == True):
print("\nServer Current Working Dir: ")
sock.send("CDUP".encode())
cwd = sock.recv(RECV_BUFFER).decode()
print(cwd)
return "" , logged , sock , userName , connection
else:
print("\nCWD works on [ client ] or [ server ].")
print("Also, there must be a server conenction in order to get the CWD on [ server ]")
else:
print("\n : cwd [ client ] | [ server ]. Please specify.")
print("Also, there must be a server conenction in order to execute 'cwd [ server ]'")
return "" , logged , sock , userName , connection
if (cmd == CMD_LS):
if (logged == True):
if ( len(tokens) ==1 ):
ls = os.listdir(os.getcwd())
print("\nClient Dir: " + str(os.getcwd()))
print("\n" + str(ls))
return "" , logged , sock , userName , connection
else:
cli_ser = tokens[1].lower()
if(cli_ser == "client" ):
ls = os.listdir(os.getcwd())
print("\nClient Dir: " + str(os.getcwd()))
print("\n" + str(ls))
return "" , logged , sock , userName , connection
if(cli_ser == "server" ):
sock.send("LS".encode())
directory = sock.recv(RECV_BUFFER).decode()
print("\nServer Ls: ")
print("\n" + str(directory))
return "" , logged , sock , userName , connection
print("Need to log in to navigate dirs.")
return "" , logged , sock , userName , connection
#--- changes the dir for client
if (cmd == CMD_CCD):
input = tokens[1]
curdir = os.getcwd()
dir = curdir + input
os.chdir(dir)
print(os.getcwd())
return "" , logged , sock , userName , connection
if (cmd == CMD_RETR):
file_to_open = tokens[1]
if (connection == False and logged == False):
print("\nThere needs to be a connecion to the server to use this command.")
print("Please connect to the server")
return "" , logged , sock , userName , connection
data_channel = ftp_new_dataport(sock) #this should be connected when I reach here
d_channel , addr = data_channel.accept()
msg = "RETR " + file_to_open
sock.send( msg.encode() )
file2 = open( file_to_open , "w")
while True:
file = d_channel.recv(RECV_BUFFER).decode()
tokens = file.split()
code = tokens[0]
if(code == "150"):
print("ERROR: File not found...")
file2.close()
os.remove(str(file_to_open))
return "" , logged , sock , userName , connection
if(len(file) < RECV_BUFFER ):
file2.write(file)
break
else:
file2.write(file)
file2.close()
d_channel.close()
print("\n File [ " + file_to_open + " ] has been transfered to the current working directory")
print("from the remote server.\n")
return "" , logged , sock , userName , connection
if (cmd == CMD_STOR):
if (connection == False and logged == False):
print("\nThere needs to be a connecion to the server to use this command.")
print("Please connect to the server")
return "" , logged , sock , userName , connection
file_to_open = tokens[1]
try:
f = open( file_to_open , "r" )
f.close()
except FileNotFoundError:
msg = "FIle [ " + file_to_open + " ] not found in your current directory"
print(msg)
return "" , logged , sock , userName , connection
data_channel = ftp_new_dataport(sock) #this should be connected when I reach here
d_channel , addr = data_channel.accept()
msg = "STOR " + file_to_open
sock.send( msg.encode() )
with open(file_to_open, 'r') as file_to_send:
for data in file_to_send:
d_channel.sendall(data.encode())
file_to_send.close()
print("CLOSED FILE")
d_channel.close()
return "" , logged , sock , userName , connection
if (cmd == CMD_SCD):
if(len(tokens) == 2):
dir_to_change = tokens[1]
msg = "SCD " + dir_to_change
sock.send(msg.encode())
reply = sock.recv(RECV_BUFFER).decode()
print(reply)
else:
print("\nParameters for this command are SCD [ dir ] \nPlease try again.")
return "" , logged , sock , userName , connection
if (cmd == CMD_TEST):
temp = os.getcwd()
os.chdir(HOME)
print(os.getcwd())
test_file = open("tests/file1.txt" , 'r')
while (cmd != CMD_QUIT ):
command = test_file.readline()
tokens = command.split()
cmd , logged , sock , userName , connection = evaluateCommands(tokens,logged,sock,userName,connection)
test_file.close()
print("Finished Test run!")
os.chdir(temp)
return "" , logged , sock , userName , connection
#--- valid commands above this line
else:
print("Invalid Command.")
return "" , logged , sock , userName , connection
def _logout(userName):
if(userName == ""):
print("No user is logged on...")
else:
print("Loggin out " + userName + ".")
logged = False
def _login(tokens , logged , sock , userName ):
user = ''
pw = ''
if(userName != ""):
print("\nSession already in use...")
print("Logout to change users.")
return "" , logged , sock , userName
else:
if(len(tokens) == 1):
user = input("Please Enter UserName: ")
msg = "USER " + user
sock.send(msg.encode())
pw = input("Please Enter Password: ")
msg2 = "PASS " + pw
sock.send(msg2.encode())
print("\nAttempting to connect user [ " + user + " ]")
answer = sock.recv(RECV_BUFFER).decode()
ans_tokens = answer.split()
ans = ans_tokens[0]
if(ans == "230"):
userName = user
print("\nLogin Successful! Welcome, " + userName + ".")
user = userName
dir = ROOT + user + "/"
os.chdir(dir)
logged = True
else:
print("Invalid username or password...")
print("Try again!")
return "" , logged , sock , userName
if(len(tokens) == 3):
user = tokens[1]
pw = tokens[2]
print("\nAttempting to connect user [ " + user + " ]")
msg = "USER " + user
sock.send(msg.encode())
msg = "PASS " + pw
sock.send(msg.encode())
answer = sock.recv(RECV_BUFFER).decode()
ans_tokens = answer.split()
ans = ans_tokens[0]
if(ans == "230"):
userName = user
print("Login Successful! Welcome, " + userName + ".")
user = userName
dir = ROOT + user + "/"
os.chdir(dir)
logged = True
else:
print("Invalid username or password...")
print("Try again!")
return answer , logged , sock , userName
return "Invalid Login, try again" , logged , sock , userName
def ftp_new_dataport(ftp_socket):
global next_data_port
dport = next_data_port
next_data_port = next_data_port + 1 #for next next
dport = (DATA_PORT_MIN + dport) % DATA_PORT_MAX
print(("Preparing Data Port: 127.0.0.1 " + str(dport)))
data_socket = socket(AF_INET, SOCK_STREAM)
# reuse port
data_socket.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
data_socket.bind(('127.0.0.1' , dport))
data_socket.listen( 1 )
'''
#the port requires the following
#PORT IP PORT
#however, it must be transmitted like this.
#PORT 192,168,1,2,17,24
#where the first four octet are the ip and the last two form a port number.
host_address_split = host_address.split('.')
high_dport = str(dport // 256) #get high part
low_dport = str(dport % 256) #similar to dport << 8 (left shift)
port_argument_list = host_address_split + [high_dport,low_dport]
port_arguments = ','.join(port_argument_list)
'''
cmd_port_send = CMD_PORT + ' ' + str(dport) + '\r\n'
print(cmd_port_send)
try:
ftp_socket.send(cmd_port_send.encode())
except socket.timeout:
print("Socket timeout. Port may have been used recently. wait and try again!")
return None
except socket.error:
print("Socket error. Try again")
return None
msg = ftp_socket.recv(RECV_BUFFER).decode()
print(msg)
return data_socket
main()
|
[
"noreply@github.com"
] |
noreply@github.com
|
826ff29b8209c97f97229d3a9b5855b40d325524
|
1a166165ab8287d01cbb377a13efdb5eff5dfef0
|
/sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/_generated/_azure_data_lake_storage_restapi.py
|
efb21f39026ffdd1e919cf6d1b8d713df2b94c91
|
[
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] |
permissive
|
manoj0806/azure-sdk-for-python
|
7a14b202ff80f528abd068bf50334e91001a9686
|
aab999792db1132232b2f297c76800590a901142
|
refs/heads/master
| 2023-04-19T16:11:31.984930
| 2021-04-29T23:19:49
| 2021-04-29T23:19:49
| 363,025,016
| 1
| 0
|
MIT
| 2021-04-30T04:23:35
| 2021-04-30T04:23:35
| null |
UTF-8
|
Python
| false
| false
| 2,804
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
from azure.core import PipelineClient
from msrest import Deserializer, Serializer
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any
from ._configuration import AzureDataLakeStorageRESTAPIConfiguration
from .operations import ServiceOperations
from .operations import FileSystemOperations
from .operations import PathOperations
from . import models
class AzureDataLakeStorageRESTAPI(object):
"""Azure Data Lake Storage provides storage for Hadoop and other big data workloads.
:ivar service: ServiceOperations operations
:vartype service: azure.storage.filedatalake.operations.ServiceOperations
:ivar file_system: FileSystemOperations operations
:vartype file_system: azure.storage.filedatalake.operations.FileSystemOperations
:ivar path: PathOperations operations
:vartype path: azure.storage.filedatalake.operations.PathOperations
:param url: The URL of the service account, container, or blob that is the targe of the desired operation.
:type url: str
"""
def __init__(
self,
url, # type: str
**kwargs # type: Any
):
# type: (...) -> None
base_url = '{url}'
self._config = AzureDataLakeStorageRESTAPIConfiguration(url, **kwargs)
self._client = PipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._serialize.client_side_validation = False
self._deserialize = Deserializer(client_models)
self.service = ServiceOperations(
self._client, self._config, self._serialize, self._deserialize)
self.file_system = FileSystemOperations(
self._client, self._config, self._serialize, self._deserialize)
self.path = PathOperations(
self._client, self._config, self._serialize, self._deserialize)
def close(self):
# type: () -> None
self._client.close()
def __enter__(self):
# type: () -> AzureDataLakeStorageRESTAPI
self._client.__enter__()
return self
def __exit__(self, *exc_details):
# type: (Any) -> None
self._client.__exit__(*exc_details)
|
[
"noreply@github.com"
] |
noreply@github.com
|
9c4556f4339c51fcec4b3a12839e0de40db73964
|
8c8149fd09091368eadb83ffbf68f9236a5bc654
|
/wedding-gallery/app/main/__init__.py
|
3fd2c5175f68d03bca9617a74f91fb7f2c3bb141
|
[] |
no_license
|
supwr/anchor-loans-test-api
|
8fd1bb62cd20c8b1fbd105f45665f18949e636c7
|
1123d807b2ce260f522d99858cd4574ebe0d75e1
|
refs/heads/master
| 2020-05-18T16:47:16.185612
| 2019-05-02T07:42:35
| 2019-05-02T07:42:35
| 184,535,276
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,549
|
py
|
from flask import Flask, request, send_from_directory, render_template, abort
import json
from bson import json_util
from main.controller.users_controller import users
from main.controller.gallery_controller import gallery
from flask_jwt_extended import JWTManager
from main.database import mongo
import os
app = Flask(__name__, template_folder='view')
app.config['JWT_SECRET_KEY'] = 'd53nwH!8KADsu+Rk'
app.config['MONGO_DBNAME'] = 'wedding-gallery'
app.config['MONGO_URI'] = 'mongodb://anchor-loans-mongo/wedding-gallery'
app.config['UPLOAD_FOLDER'] = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'public')
app.config['ADMIN_ROLE'] = 'ADMIN'
app.config['GUEST_ROLE'] = 'GUEST'
jwt = JWTManager(app)
mongo.init_app(app)
@app.errorhandler(500)
def internal_error(error):
response = {
"message": error.description
}
return json.dumps(response, default=json_util.default), 500, {'Content-Type': 'application/json; charset=utf-8'}
@app.errorhandler(404)
def not_found(error):
response = {
"message": "Page not found"
}
return json.dumps(response, default=json_util.default), 404, {'Content-Type': 'application/json; charset=utf-8'}
@app.errorhandler(403)
def forbidden(error):
response = {
"message": "You are not allowed to perform this operation"
}
return json.dumps(response, default=json_util.default), 403, {'Content-Type': 'application/json; charset=utf-8'}
app.register_blueprint(users, url_prefix='/api/')
app.register_blueprint(gallery, url_prefix='/api/')
|
[
"marcelo.rodriguespires@gmail.com"
] |
marcelo.rodriguespires@gmail.com
|
f3a7f50f45d24de61d099e04f9c4ddfff584b706
|
068780d1035b349d846c81ed06d3f867368458a5
|
/myvariant/src/biothings/utils/dataload.py
|
8817330ca501c5458c3457db724d2fead6f4a530
|
[] |
no_license
|
cyrus0824/myvariant.info_new
|
123ac84b29e505a7c72c28ae6501afdbff392b55
|
45b52891da35d68fd3570aef6a5e1f20ddc9c1ad
|
refs/heads/master
| 2021-01-10T12:44:59.943184
| 2016-01-26T19:55:55
| 2016-01-26T19:55:55
| 48,048,980
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,443
|
py
|
from __future__ import print_function
#from __future__ import unicode_literals
import itertools
import csv
from utils.common import open_anyfile, is_str
"""
Utility functions for parsing flatfiles,
mapping to JSON, cleaning.
"""
# remove keys whos values are ".", "-", "", "NA", "none", " "
# and remove empty dictionaries
def dict_sweep(d, vals=[".", "-", "", "NA", "none", " ", "Not Available", "unknown"]):
"""
@param d: a dictionary
@param vals: a string or list of strings to sweep
"""
for key, val in d.items():
if val in vals:
del d[key]
elif isinstance(val, list):
for item in val:
if item in vals:
val.remove(item)
elif isinstance(item, dict):
dict_sweep(item, vals)
if len(val) == 0:
del d[key]
elif isinstance(val, dict):
dict_sweep(val, vals)
if len(val) == 0:
del d[key]
return d
def to_number(val):
"""convert an input string to int/float."""
if is_str(val):
try:
return int(val)
except ValueError:
try:
return float(val)
except ValueError:
pass
return val
def value_convert(d):
"""convert string numbers into integers or floats"""
for key, val in d.items():
if isinstance(val, dict):
value_convert(val)
elif isinstance(val, list):
d[key] = [to_number(x) for x in val]
elif isinstance(val, tuple):
d[key] = tuple([to_number(x) for x in val])
else:
d[key] = to_number(val)
return d
# if dict value is a list of length 1, unlist
def unlist(d):
for key, val in d.items():
if isinstance(val, list):
if len(val) == 1:
d[key] = val[0]
elif isinstance(val, dict):
unlist(val)
return d
# split fields by sep into comma separated lists, strip.
def list_split(d, sep):
for key, val in d.items():
if isinstance(val, dict):
list_split(val, sep)
try:
if len(val.split(sep)) > 1:
d[key] = val.rstrip().rstrip(sep).split(sep)
except (AttributeError):
pass
return d
def id_strip(id_list):
id_list = id_list.split("|")
ids = []
for id in id_list:
ids.append(id.rstrip().lstrip())
return ids
def merge_duplicate_rows(rows, db):
"""
@param rows: rows to be grouped by
@param db: database name, string
"""
rows = list(rows)
first_row = rows[0]
other_rows = rows[1:]
for row in other_rows:
for i in first_row[db]:
if i in row[db]:
if row[db][i] != first_row[db][i]:
aa = first_row[db][i]
if not isinstance(aa, list):
aa = [aa]
aa.append(row[db][i])
first_row[db][i] = aa
else:
continue
return first_row
def unique_ids(src_module):
i = src_module.load_data()
out = list(i)
id_list = [a['_id'] for a in out if a]
myset = set(id_list)
print(len(out), "Documents produced")
print(len(myset), "Unique IDs")
return out
def rec_handler(infile, block_end='\n', skip=0, include_block_end=False, as_list=False):
'''A generator to return a record (block of text)
at once from the infile. The record is separated by
one or more empty lines by default.
skip can be used to skip top n-th lines
if include_block_end is True, the line matching block_end will also be returned.
if as_list is True, return a list of lines in one record.
'''
rec_separator = lambda line: line == block_end
with open_anyfile(infile) as in_f:
if skip:
for i in range(skip):
in_f.readline()
for key, group in itertools.groupby(in_f, rec_separator):
if not key:
if include_block_end:
_g = itertools.chain(group, (block_end,))
yield (list(_g) if as_list else ''.join(_g))
def tabfile_feeder(datafile, header=1, sep='\t',
includefn=None,
# coerce_unicode=True, # no need here because importing unicode_literals at the top
assert_column_no=None):
'''a generator for each row in the file.'''
with open_anyfile(datafile) as in_f:
reader = csv.reader(in_f, delimiter=sep)
lineno = 0
try:
for i in range(header):
reader.next()
lineno += 1
for ld in reader:
if assert_column_no:
if len(ld) != assert_column_no:
err = "Unexpected column number:" \
" got {}, should be {}".format(len(ld), assert_column_no)
raise ValueError(err)
if not includefn or includefn(ld):
lineno += 1
# if coerce_unicode:
# yield [unicode(x, encoding='utf-8', errors='replace') for x in ld]
# else:
# yield ld
yield ld
except ValueError:
print("Error at line number:", lineno)
raise
|
[
"cyrus.afrasiabi@gmail.com"
] |
cyrus.afrasiabi@gmail.com
|
06eb118e8879ca755ff7c592ecfb8c07b1333b91
|
553b34a101c54090e68f540d96369ac7d5774d95
|
/python/algo/src/minimum_cut.py
|
bf33b42a8714492e38de25c04a941877eafc0264
|
[
"MIT"
] |
permissive
|
topliceanu/learn
|
fd124e1885b5c0bfea8587510b5eab79da629099
|
1c5b1433c3d6bfd834df35dee08607fcbdd9f4e3
|
refs/heads/master
| 2022-07-16T19:50:40.939933
| 2022-06-12T15:40:20
| 2022-06-12T15:40:20
| 21,684,180
| 26
| 12
|
MIT
| 2020-03-26T20:51:35
| 2014-07-10T07:22:17
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 2,668
|
py
|
# -*- coding: utf-8 -*-
import random
from src.maximum_flow import ford_fulkerson_maximum_flow
def pick_random_edge(graph):
""" Returns a random edge from the given graph. """
edges = graph.get_edges()
return random.choice(edges)
def contract(graph, edge):
""" Composes a new vertex from the ends of the given edge.
All the resulting self-loop edges are removed.
Args:
graph: a data structure containg all data and operations.
edge: a tuple of format (tail, head, value)
Returns:
The graph after contracting value.
"""
(tail, head, value) = graph.split_edge(edge)
super_vertex = '{start}_{end}'.format(start=tail, end=head)
# Remove individual vertices and add super-vertex.
graph.rename_vertex(tail, super_vertex)
graph.rename_vertex(head, super_vertex)
return graph
def randomized_cut(graph):
""" Finds a cut in a given graph using the random contraction algorithm
defined by David Karger in '93.
NOTE! This algorithm modifies the graph in place, so make sure you clone
it before compacting if you don't want your original graph modified.
Args:
graph: a data structure containg all data and operations.
Returns:
The compacted graph.
"""
while len(graph.get_vertices()) != 2:
edge = pick_random_edge(graph)
contract(graph, edge)
return graph
def minimum_cut(graph, tries):
""" Finds the the minimum cut in the given graph after a running the
randomized cut algorithm a given number of tries.
Args:
graph: a data structure containg all vertices, edges and supported
operations.
tries: int, number of times to try the randomized cut algorithm.
Returns:
cuts, list of cut edges which produce the minimum cut.
"""
min_cuts = []
for __ in xrange(tries):
g = graph.clone()
randomized_cut(g)
[left_super_vertex, right_super_vertex] = g.get_vertices()
left_vertices = set(left_super_vertex.split('_'))
right_vertices = set(right_super_vertex.split('_'))
cuts = []
for left_vertex in left_vertices:
right_neighbours = set(graph.neighbours(left_vertex))\
.intersection(right_vertices)
for right_vertex in right_neighbours:
cuts.append((left_vertex, right_vertex))
if (len(min_cuts) == 0 or len(min_cuts) > len(cuts)):
min_cuts = cuts
return min_cuts
def minimum_cut_using_maximum_flow(graph, start, end):
""" Solve the minimum cut problem by reducing it to maximum flow. """
# TODO
|
[
"alexandru.topliceanu@gmail.com"
] |
alexandru.topliceanu@gmail.com
|
134635314de03d47eab274af2e570ea7234fd720
|
0b1df575fcddee7abbce853bf2f38466cc0ced10
|
/Python/01_introduction/04_arithmetic_operators.py
|
0a4a2291db616ead79d264439a1a474a16d7e9bd
|
[
"Apache-2.0"
] |
permissive
|
droideck/hacker_rank
|
b85bf31abaafc67eb877e7a37384c436fe71432d
|
6f150ede01de07968b26a4bf89cec15fa39bcf9b
|
refs/heads/master
| 2021-01-18T22:19:30.711457
| 2016-11-17T10:05:34
| 2016-11-17T10:05:34
| 72,424,397
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 184
|
py
|
#!/bin/python3
a = int(input())
b = int(input())
if (a >= 1) and (a <= 10**10) and \
(b >= 1) and (b <= 10**10):
print(str(a + b))
print(str(a - b))
print(str(a * b))
|
[
"simon.pichugin@gmail.com"
] |
simon.pichugin@gmail.com
|
e52f6e4abfd00051cf9f6fc93ce637f9a796a083
|
514d20002899ee240508141f8e6124919c414ad2
|
/owhbv4/owhbv4_import_owhb2.py
|
bbe6cf23751ae90996fb3f37de09205127a6641b
|
[] |
no_license
|
pluzorminuz/pluz-owws
|
6b06dbf5dd6c2fd22e25d6ea88263b82583b6f9f
|
35dcd4205d5764e75964f10f1245ca5bd9e59350
|
refs/heads/main
| 2023-04-09T14:12:03.159884
| 2021-04-02T10:51:26
| 2021-04-02T10:51:26
| 321,260,904
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,682
|
py
|
import bpy
import decimal
D = decimal.Decimal
import math
import os
import fnmatch
THIS_BLENDFILE_DIR = bpy.path.abspath('//')
f5 = D('100000')
f4 = D('10000')
f0 = D('1')
class DataEntry:
def __init__(self, type, pos, rolled_pos, delay, delay_raw, dmg, slot, scale):
self.type = type
self.pos = pos
self.rolled_pos = rolled_pos
self.delay = delay
self.delay_raw = delay_raw
self.dmg = dmg
self.slot = slot
self.scale = scale
def __str__(self):
print()
print('Type:',self.type)
print('True Pos:', self.pos)
print('Rolled Pos:', self.rolled_pos)
print('Attack Delay:', self.delay)
print('Attack Delay (Raw):', self.delay_raw)
print('Damage Received:', self.dmg)
print('Slot:', self.slot)
print('Scale:', self.scale)
return ''
def find(pattern, path):
result = []
for root, dirs, files in os.walk(path):
for name in files:
if fnmatch.fnmatch(name, pattern):
result.append(os.path.join(name))
break # don't do recursively
return result
def new_collection(name='New Collection', parent=None):
# does the collection already exists?
if name in bpy.data.collections:
print('Collection [',name,'] already exists.',sep='')
return bpy.data.collections[name]
else:
# if no specified parent colletion, then use the master collection
new_coll = bpy.data.collections.new(name)
if parent == None:
parent_coll = bpy.context.scene.collection
print(parent_coll)
# if specified
else:
# try to check if the parent collection exists
# if parent is of type string
if isinstance(parent, str):
if parent in bpy.data.collections:
parent_coll = bpy.data.collections[parent]
else:
parent_coll = new_collection(parent)
# parent is a collection
else:
parent_coll = parent
parent_coll.children.link(new_coll)
return new_coll
def nearest_mult_of(num,mult):
round = D(str(num))
round = round / D(str(mult))
round = round.quantize(D('1'), rounding=ROUND_HALF_EVEN)
round = round * D(str(mult))
return round
def ow_coord_conv_decimal(string,factor):
temp = string.split('; ')
return (D(temp[0])/factor,-D(temp[2])/factor,D(temp[1])/factor)
def ow_coord_conv_decimal154(string,factor):
temp = string
temp = temp.replace('(','')
temp = temp.replace(')','')
temp = temp.split('; ')
return (D(temp[0])/factor,-D(temp[2])/factor,D(temp[1])/factor)
def ow_coord_array_conv154(line,factor): # given a raw OWWS RAW array (string), convert to an array of world coords
if line == '0':
return []
elif line == '[]':
return []
else:
array = []
temp = line
temp = temp.replace('[','')
temp = temp.replace(']','')
temp = temp.replace(');(','):(')
temp = temp.split(':')
for item in temp:
array.append(ow_coord_conv_decimal154(item,factor))
return array
def ow_coord_conv_decimal154_mod(string,factor,offset,mod):
temp = string
temp = temp.replace('(','')
temp = temp.replace(')','')
temp = temp.split('; ')
x = D(temp[0])/factor
y = -D(temp[2])/factor
z = D(temp[1])/factor
x_trans = x + offset
x_mult = x_trans / mod
x_mult = x_mult.quantize(D(1),rounding=ROUND_FLOOR)
x_new = x-(x_mult * mod)
return (x_new, y, z)
def ow_coord_array_conv154_mod(line,factor,offset,mod): # given a raw OWWS RAW array (string), convert to an array of world coords
if line == '0':
return []
elif line == '[]':
return []
else:
array = []
temp = line
temp = temp.replace('[','')
temp = temp.replace(']','')
temp = temp.replace(');(','):(')
temp = temp.split(':')
for item in temp:
array.append(ow_coord_conv_decimal154_mod(item,factor,offset,mod))
return array
def ow_facdir_conv(string,factor):
unit = ow_coord_conv_decimal(string,factor)
return unit
def add_vector(a,b):
return (a[0]+b[0],a[1]+b[1],a[2]+b[2])
def create_empty_dir(pos,dir,name,coll):
o = bpy.data.objects.new(name,None)
coll.objects.link(o)
o.empty_display_size = 2
o.empty_display_type = 'SINGLE_ARROW'
o.location = pos
o.rotation_euler = dir
def create_empty(pos,name='Empty',coll=bpy.context.scene.collection,type='PLAIN_AXES'):
o = bpy.data.objects.new(name,None)
coll.objects.link(o)
o.empty_display_size = 2
o.empty_display_type = type
o.location = pos
def create_path(ob_name,coll,coords):
curveData = bpy.data.curves.new(ob_name, type='CURVE')
curveData.dimensions = '3D'
curveData.resolution_u = 2
polyline = curveData.splines.new('BEZIER')
polyline.bezier_points.add(len(coords)-1)
for i, coord in enumerate(coords):
polyline.bezier_points[i].co = coord
polyline.bezier_points[i].handle_right = coord
polyline.bezier_points[i].handle_left = coord
curveOB = bpy.data.objects.new(ob_name, curveData)
coll.objects.link(curveOB)
def point_cloud(coords, coll=bpy.context.scene.collection, ob_name='Point Cloud', edges=[], faces=[]):
# Create new mesh and a new object
me = bpy.data.meshes.new(ob_name)
ob = bpy.data.objects.new(ob_name, me)
# Make a mesh from a list of vertices/edges/faces
me.from_pydata(coords, edges, faces)
# Display name and update the mesh
#ob.show_name = True
me.update()
coll.objects.link(ob)
return ob
def ow_coord_array_conv(line):
array = []
temp = line
temp = temp.replace('{','')
temp = temp.replace('}','')
temp = temp.split('); ')
for item in temp:
temp1 = item
temp1 = temp1.replace('(','')
temp1 = temp1.replace(')','')
array.append(ow_coord_conv_decimal(temp1,factor))
return array
def ow_integer_array_conv(line):
temp = line
temp = temp.replace('{','')
temp = temp.replace('}','')
temp = temp.split('; ')
array = [item for item in temp]
return array
def ow_integer_array_conv154(line):
if line == '0':
return []
elif line == '[]':
return []
else:
temp = line
temp = temp.replace('[','')
temp = temp.replace(']','')
temp = temp.split(';')
array = [item for item in temp]
return array
def ow_coord_conv_decimal_alf_mod(string,factor,offset,mod):
temp = string
temp = temp.replace('(','')
temp = temp.replace(')','')
temp = temp.split(', ')
x = D(temp[0])/factor
y = -D(temp[2])/factor
z = D(temp[1])/factor
x_trans = x + offset
x_mult = x_trans / mod
x_mult = x_mult.quantize(D('1'),rounding=ROUND_FLOOR)
x_new = x-(x_mult * mod)
return (x_new, y, z)
def ow_coord_conv_decimal_alf_multi(string,factor,mod):
temp = string
temp = temp.replace('(','')
temp = temp.replace(')','')
temp = temp.split(', ')
x = D(temp[0])/factor
y = -D(temp[2])/factor
z = D(temp[1])/factor
x_trans = x + (offset * D('0.5'))
x_mult = x_trans / mod
x_mult = x_mult.quantize(D('1'),rounding=ROUND_FLOOR)
x_new = x-(x_mult * mod)
return (x_new, y, z)
def ow_coord_conv_decimal_alf(string,factor):
temp = string
temp = temp.replace('(','')
temp = temp.replace(')','')
temp = temp.split(', ')
return (D(temp[0])/factor, -D(temp[2])/factor, D(temp[1])/factor)
def ow_coord_array_conv_alf_mod(input_array,factor,offset,mod): # given an ALF parsed array of coord, convert to an array of decimals
array = []
for item in input_array:
array.append(ow_coord_conv_decimal_alf_mod(item,factor,offset,mod))
return array
def floatToDecimal(input, factor):
return D(input) / factor
def owCoordToDecimalVec(input, factor):
temp = input
temp = temp.replace('(','')
temp = temp.replace(')','')
temp = temp.split('; ')
return (D(temp[0])/factor, -D(temp[2])/factor, D(temp[1])/factor)
def owConv_arrayOfFloat(input, factor):
if input == '0':
return []
elif input == '[]':
return []
else:
temp = input
temp = temp.replace('[','')
temp = temp.replace(']','')
temp = temp.split(';')
return [floatToDecimal(item, factor) for item in temp]
def owConv_arrayOfVectors(input, factor):
if input == '0':
return []
elif input == '[]':
return []
else:
temp = input
temp = temp.replace('[','')
temp = temp.replace(']','')
temp = temp.replace(');(','):(')
temp = temp.split(':')
return [owCoordToDecimalVec(item, factor) for item in temp]
def floatToInt(input):
return D(int(input))
def owConv_arrayOfInt(input):
if input == '0':
return []
elif input == '[]':
return []
else:
temp = input
temp = temp.replace('[','')
temp = temp.replace(']','')
temp = temp.split(';')
return [floatToInt(item) for item in temp]
def scalarMultVec(scalar, vec):
return (scalar*vec[0], scalar*vec[1], scalar*vec[2])
this_coll = bpy.context.scene.collection
#scan_index = 'doomfistuppercut_2a'
scan_index = 'extremeres'
offset = D('2.5')
mod = D('5')
filelist = find(scan_index + '.owhb2', THIS_BLENDFILE_DIR)
if filelist == []:
print('No file match! Exiting...')
else:
acc_file_len = 0
data = []
for file in filelist:
f = open(THIS_BLENDFILE_DIR + file,'r')
data.extend(f.readlines()[2:])
f.close()
print(file,len(data)-acc_file_len)
acc_file_len = len(data)
del acc_file_len
data = [line.split(',') for line in data]
# [0] timestamp
# [1] variable target
# [2] 0: l_hit_store array of (float * f5)
# [3] 1: r_hit_store array of (float * f5)
# [4] 2: thisscan_resolution_copy (float * f5)
# [5] 3: thisslice_x_copy vector of (float * f5)
# [6] 4: thisslice_y_bounds_copy array of 2 int's
# [7] 5: scan_y_batchsize int
# [8] 6: scan_axis array of 3 vectors * f0
# [9] 7: thisscan_extract_axis 1 vector * f0
# [10] 8: thisscan_limits_int array of 4 int's
# [19] 17: loop_i
# [20] 18: thisscan_limits_raw
# [21] 19: thisscan_resolution
# [22] 20: scan_origin
# [23] 21: thisscan_batch_y_start
# [24] 22: thisscan_cur_x
# [25] 23: thisscan_cur_y
# [26] 24: thisslice_x
print(data[0][8])
for i in range(len(data)):
data[i][2] = owConv_arrayOfFloat(data[i][2], f5)
data[i][3] = owConv_arrayOfFloat(data[i][3], f5)
data[i][4] = floatToDecimal(data[i][4], f5)
data[i][5] = owCoordToDecimalVec(data[i][5], f5)
data[i][6] = owConv_arrayOfInt(data[i][6])
data[i][7] = floatToInt(data[i][7])
data[i][8] = owConv_arrayOfVectors(data[i][8], f0)
data[i][9] = owCoordToDecimalVec(data[i][9], f0)
data[i][10] = owConv_arrayOfInt(data[i][10])
# to reconstruct the ray cast hit point
# x + y + z
# x = thisslice_x_copy (line[5])
# y = thisslice_y_bounds_copy[0] * loop_i * resolution (line[6] * i * line[4])
# z = thisscan_extract_axis * line[2][i]/[3][i] (line[9] * [line[2][i])
this_hb_data = []
for line in data:
this_x = line[5]
thisline_resolution = line[4]
thisline_extract_axis = line[9]
thisline_y_axis = line[8][1]
thisline_starting_y = line[6][0]
i = D('0')
for number in line[2]: # l hit store
this_y = scalarMultVec(thisline_resolution * (thisline_starting_y + i), thisline_y_axis)
this_z = scalarMultVec(number, thisline_extract_axis)
i += D('1')
this_hb_data.append( add_vector(this_x, add_vector(this_y,this_z)) )
i = D('0')
for number in line[3]: # r hit store
this_y = scalarMultVec(thisline_resolution * (thisline_starting_y + i), thisline_y_axis)
this_z = scalarMultVec(number, thisline_extract_axis)
i += D('1')
this_hb_data.append( add_vector(this_x, add_vector(this_y,this_z)) )
hit_pc = point_cloud(this_hb_data, this_coll, ob_name=scan_index+' Raw')
|
[
"63382500+pluzorminuz@users.noreply.github.com"
] |
63382500+pluzorminuz@users.noreply.github.com
|
fbea031940ce24c438b127acbfb5063daaa52c3b
|
792efdc5d6e43c9e5597371d1d8b675e527ae9ad
|
/server.py
|
66c1911e923baa28ca4b255ff7c7e85476ab8f5b
|
[] |
no_license
|
lindemanswork/bowling-game
|
57d0d45327d7392c78e5aee741302d20d0e46f02
|
de8fa64fbef387bcc6072b8876800bd8b1a15f27
|
refs/heads/master
| 2020-06-19T01:03:52.186180
| 2016-11-28T19:52:35
| 2016-11-28T19:52:35
| 74,930,135
| 0
| 0
| null | 2016-11-28T02:35:55
| 2016-11-28T02:35:55
| null |
UTF-8
|
Python
| false
| false
| 213
|
py
|
from pymongo import MongoClient
def get_db():
client = MongoClient('mongodb://heroku_hxb7k9t8:4plphtk310q4t1f6cdbagftega@ds033106.mlab.com:33106/heroku_hxb7k9t8')
db = client.heroku_hxb7k9t8
return db
|
[
"spothorse9.lucy@gmail.com"
] |
spothorse9.lucy@gmail.com
|
3e3c445e3345d5edfc6acb24ebfa57aaa2674a73
|
a684cabb6db764055494e744752599c2d978f26b
|
/Lista 2/1j.py
|
b969a87cc78888a12e027889c8eacb938bf949d0
|
[] |
no_license
|
jarelio/FUP
|
c603c2264d160aca2a2e8a6dc7ba3af2baf6c04c
|
78555fe49e9046937483f7e4fcc6f1f135746d50
|
refs/heads/master
| 2020-04-17T04:53:04.097058
| 2019-01-17T18:06:38
| 2019-01-17T18:06:38
| 166,250,981
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 166
|
py
|
numero = float(input("Digite um número: "))
if (numero%3==0):
print("Este número é múltiplo de 3")
else:
print("Este número não é múltiplo de 3 ")
|
[
"jareliofilho@gmail.com"
] |
jareliofilho@gmail.com
|
d7d665b31302f408522f3bbb16e092e476bc808b
|
a6d7b62e52506e2137710dc1970265395f418cf0
|
/base_entity/migrations/0005_auto_20170927_1137.py
|
572a618b5c92bdba171cc07a8e92c6628cb5e26e
|
[] |
no_license
|
wizcarder/wizcard-server
|
e5c192325b7313a9253c73c9600133432eb355c7
|
f69ba06f20a5be3701a9c0b9b040ef0fdf474266
|
refs/heads/master
| 2021-03-30T17:57:49.844868
| 2018-06-09T08:47:37
| 2018-06-09T08:47:37
| 27,246,626
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,074
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('base_entity', '0004_auto_20170923_2319'),
]
operations = [
migrations.CreateModel(
name='BaseEntityComponentsOwner',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('is_creator', models.BooleanField(default=True)),
],
),
migrations.RemoveField(
model_name='baseentitycomponentsuser',
name='base_entity_component',
),
migrations.RemoveField(
model_name='baseentitycomponentsuser',
name='user',
),
migrations.RemoveField(
model_name='baseentity',
name='engagements',
),
migrations.AddField(
model_name='baseentitycomponent',
name='engagements',
field=models.OneToOneField(related_name='engagements_baseentitycomponent_related', null=True, to='base_entity.EntityEngagementStats'),
),
migrations.AlterField(
model_name='baseentity',
name='created',
field=models.DateTimeField(auto_now_add=True, null=True),
),
migrations.AlterField(
model_name='baseentity',
name='modified',
field=models.DateTimeField(auto_now=True, null=True),
),
migrations.AlterField(
model_name='baseentitycomponent',
name='entity_type',
field=models.CharField(default=b'EVT', max_length=3, choices=[(b'EVT', b'Event'), (b'CMP', b'Campaign'), (b'TBL', b'Table'), (b'WZC', b'Wizcard'), (b'SPK', b'Speaker'), (b'SPN', b'Sponsor'), (b'COW', b'Coowner'), (b'ATI', b'AttendeeInvitee'), (b'EXI', b'ExhibitorInvitee'), (b'MED', b'Media'), (b'COW', b'Coowner'), (b'AGN', b'Agenda'), (b'AGI', b'AgendaItem')]),
),
migrations.AlterField(
model_name='baseentitycomponent',
name='owners',
field=models.ManyToManyField(related_name='owners_baseentitycomponent_related', through='base_entity.BaseEntityComponentsOwner', to=settings.AUTH_USER_MODEL),
),
migrations.DeleteModel(
name='BaseEntityComponentsUser',
),
migrations.AddField(
model_name='baseentitycomponentsowner',
name='base_entity_component',
field=models.ForeignKey(to='base_entity.BaseEntityComponent'),
),
migrations.AddField(
model_name='baseentitycomponentsowner',
name='owner',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL),
),
migrations.AlterUniqueTogether(
name='baseentitycomponentsowner',
unique_together=set([('base_entity_component', 'owner')]),
),
]
|
[
"ubuntu@ip-172-31-28-47.ap-south-1.compute.internal"
] |
ubuntu@ip-172-31-28-47.ap-south-1.compute.internal
|
fe515d63f16e31fca3ec46d721b6100f81025ade
|
522f01770aa6dcc4cf628ced5ed9e248e8a7c528
|
/jin10/settings.py
|
849e9bf4850b43d902634fc2db47777168cab2c2
|
[] |
no_license
|
bopo/jin10
|
bb9ce60e53560508e684b886d9ec358aeac2b499
|
594e219920a3ab46639212d7e587389a609ea66c
|
refs/heads/master
| 2021-01-19T13:39:02.688693
| 2016-09-20T12:01:25
| 2016-09-20T12:01:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,139
|
py
|
# -*- coding: utf-8 -*-
# Scrapy settings for jin10 project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'jin10'
SPIDER_MODULES = ['jin10.spiders']
NEWSPIDER_MODULE = 'jin10.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'jin10 (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
LOG_LEVEL = 'INFO'
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'jin10.middlewares.MyCustomSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'jin10.middlewares.MyCustomDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'jin10.pipelines.Jin10Pipeline': 300,
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
# AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
|
[
"daulgas.tang@sachsen.cc"
] |
daulgas.tang@sachsen.cc
|
3bc48ad57dbf84c0d65a2c59a2f654b60f5b1089
|
a98bc512be9b9691200c6a0cc33a5fb7b4053c13
|
/com.ppc.Bot/devices/thermostat/thermostat_honeywell_lyric.py
|
4fc7ab8f7dd9ec9b7fd8517681898b5f9c38d9cf
|
[
"Apache-2.0"
] |
permissive
|
30s/botlab
|
c21682ed2c9aefc9cba688c6a8c136e9f969adc9
|
f7617147b65521a66ad88cdbc175176021a7a486
|
refs/heads/master
| 2020-04-04T18:39:00.776636
| 2018-10-04T04:56:32
| 2018-10-04T04:56:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,179
|
py
|
'''
Created on March 27, 2017
This file is subject to the terms and conditions defined in the
file 'LICENSE.txt', which is part of this source code package.
@author: David Moss
'''
# Device Model
# https://presence.atlassian.net/wiki/display/devices/Thermostat
from devices.thermostat.thermostat import ThermostatDevice
# Set the default rounding to 3 numbers.
from decimal import *
getcontext().prec = 1
class ThermostatHoneywellLyricDevice(ThermostatDevice):
"""Honeywell Lyric Thermostat Device"""
# List of Device Types this class is compatible with
DEVICE_TYPES = [4230]
# Minimum setpoint in Celsius
MIN_SETPOINT_C = 7.0
# Maximum setpoint in Celsius
MAX_SETPOINT_C = 29.0
def get_device_type_name(self, language):
"""
:return: the name of this device type in the given language, for example, "Entry Sensor"
"""
# NOTE: Device type name
return _("Honeywell Lyric Thermostat")
def set_system_mode(self, botengine, system_mode, reliably=False):
"""
Set the system mode
:param botengine:
:param system_mode:
:param reliably: True to keep retrying to get the command through
:return:
"""
ThermostatDevice.set_system_mode(self, botengine, system_mode, reliably=False)
def set_cooling_setpoint(self, botengine, setpoint_celsius, reliably=False):
"""
Set the cooling setpoint
:param botengine: BotEngine environment
:param setpoint_celsius: Absolute setpoint in Celsius
:param reliably: True to keep retrying to get the command through
"""
ThermostatDevice.set_cooling_setpoint(self, botengine, setpoint_celsius, reliably=False)
def set_heating_setpoint(self, botengine, setpoint_celsius, reliably=False):
"""
Set the heating set-point
:param botengine: BotEngine environmnet
:param setpoint_celsius: Temperature in Celsius
:param reliably: True to keep retrying to get the command through
"""
ThermostatDevice.set_heating_setpoint(self, botengine, setpoint_celsius, reliably=False)
|
[
"dmoss@peoplepowerco.com"
] |
dmoss@peoplepowerco.com
|
137eb82126f252ba550cbaa41ee96463bccc45fe
|
cde3ef1ee294b2bbeb44331debe5ba3f0c19bd9a
|
/getTrans.py
|
593025dd7f92cc216dae6902222eac4e702670be
|
[] |
no_license
|
wuyu1492/pair-HMM-project
|
21485642ad7d99743b4f974840dffebdf5a47bca
|
0b87eb8ea4b2a9c5961958ff5686b22eb45540ce
|
refs/heads/master
| 2021-01-20T05:37:44.121776
| 2017-09-24T16:56:21
| 2017-09-24T16:56:21
| 101,459,745
| 0
| 0
| null | 2017-08-26T05:02:22
| 2017-08-26T03:24:26
|
Python
|
UTF-8
|
Python
| false
| false
| 640
|
py
|
import numpy as np
"""
def getPrior(base, haplo, Qbase):
prior = np.zeros((len(base), len(haplo))) # initialize prior matrix
#print(prior.shape) # check prior shape
for i in range(0, len(base)):
for j in range(0, len(haplo)):
if base[i] == haplo[j]:
prior[i][j] = 1-Qbase[i]
else:
prior[i][j] = Qbase[i]
return prior
"""
def getTrans(Qi, Qd, Qg):
qi = np.array(Qi)
qd = np.array(Qd)
qg = np.array(Qg)
mm = 1.0 - (qi + qd)
im = 1.0 - qg
dm = im
mi = qi
ii = qg
md = qd
dd = qg
return mm, im, dm, mi, ii, md, dd
|
[
"noreply@github.com"
] |
noreply@github.com
|
3fdcf3ec812b8cd4991b70e219ec666356c0854c
|
7a63658141578f025fbe81e7c02784382d4ddfeb
|
/misc/approx_milestones.py
|
5f36a124a7e0371bda8a050020ffede2424108f0
|
[
"MIT"
] |
permissive
|
barrettotte/Issue-Utils
|
05c996fde590693c26f253842498a9154adae22a
|
ae359f2bd2245d2bbba84cd80af46572ce155062
|
refs/heads/master
| 2020-12-10T12:42:18.615180
| 2020-06-16T02:07:20
| 2020-06-16T02:07:20
| 233,598,052
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,967
|
py
|
# Attempt approximating the milestone an issue would have landed in
# based on the completion date and some leftover data I found in my GitLab boards.
import json
from datetime import datetime,date,timedelta
def main():
trello = []
date_fmt = "%Y-%m-%dT%H:%M:%S.%fZ"
with open('export.json', 'r') as f:
trello = json.load(f)
# I only close issues on my 'Main' board, so hardcode index
closed = sorted([c for c in trello[0]['issues'] if not c['is_open']], key=lambda i: i['completed_date'])
lowest = datetime.strptime(closed[0]['completed_date'], date_fmt)
new_cards,milestones = [],[]
for card in trello[0]['issues']:
if not card['is_open']:
card_date = datetime.strptime(card['completed_date'], date_fmt)
milestone = ((card_date - lowest).days // 7) + 1
print("{} {} {} -> milestone {}".format(
card['identifier'],
card['name'][:25].rjust(25,' '),
card['completed_date'],
milestone
))
card['milestone_id'] = milestone
card['due_date'] = str(lowest + timedelta(weeks=milestone))
milestones.append({
'name': "Week {}".format(milestone),
'id': milestone
})
new_cards.append(card)
print("\nApproximated milestone for {} card(s)".format(len(closed)))
print("Oldest card: {}".format(lowest))
# get unique milestones and calculate due dates
unique_ms = sorted([dict(t) for t in {tuple(d.items()) for d in milestones}], key=lambda i: i['id'])
for i,ms in enumerate(unique_ms):
unique_ms[i]['due_date'] = str(lowest + timedelta(weeks=ms['id']))
trello[0]['milestones'] = unique_ms
trello[0]['issues'] = sorted(new_cards, key=lambda i: i['milestone_id'])
with open('export.json', 'w+') as f:
f.write(json.dumps(trello, indent=2))
if __name__ == "__main__":main()
|
[
"barrettotte@gmail.com"
] |
barrettotte@gmail.com
|
6cdd4ab3c27594dfd256c8dbc494d71ae2dfcf3f
|
6c641bb3be6949306dc7834de883a2e66a751eac
|
/analyze_historic_news_movement.py
|
9faf452fcf2b4cce8ea1d3018c3c3a514c0704b1
|
[] |
no_license
|
andrewstevens59/python_machine_learning_samples
|
3117287d7d3539a3a8d80b28876d8162bdae4879
|
3d20a4b6f19265558aa4f5d1e8ef3592a068c0d0
|
refs/heads/master
| 2021-08-11T15:51:00.673757
| 2021-08-10T08:14:17
| 2021-08-10T08:14:17
| 120,370,709
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 46,077
|
py
|
import sys
import math
from datetime import datetime
from random import *
import os.path
import pickle
from StringIO import StringIO
from sklearn.cluster import KMeans
from numpy import linalg as LA
from bayes_opt import BayesianOptimization
from datetime import timedelta
import execute_news_signals
from execute_news_signals import ModelType
import time
import datetime
import calendar
from dateutil import tz
import requests
import lxml.html as lh
import json
import copy
import math
import sys
import re
import numpy as np
import pandas as pd
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
from sklearn.datasets.samples_generator import make_blobs
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.model_selection import cross_val_score
import gzip, cPickle
import string
import random as rand
import os
from sklearn.cluster import SpectralClustering
from sklearn.mixture import GaussianMixture
from sklearn.linear_model import LinearRegression
from sklearn import mixture
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split
from sklearn import ensemble
from sklearn.metrics import r2_score
from sklearn import svm
from uuid import getnode as get_mac
import socket
import paramiko
import json
import enum
import os
import mysql.connector
all_currency_pairs = [
"AUD_CAD", "CHF_JPY", "EUR_NZD", "GBP_JPY",
"AUD_CHF", "EUR_AUD", "GBP_NZD", "USD_CAD",
"AUD_JPY", "EUR_CAD", "GBP_USD", "USD_CHF",
"AUD_NZD", "EUR_CHF", "EUR_USD", "NZD_CAD",
"AUD_USD", "EUR_GBP", "GBP_AUD", "NZD_CHF",
"CAD_CHF", "EUR_JPY", "GBP_CAD", "NZD_JPY",
"CAD_JPY", "GBP_CHF", "NZD_USD", "USD_JPY"
]
def load_time_series(symbol, year, is_bid_file):
if get_mac() == 150538578859218:
prefix = '/Users/andrewstevens/Downloads/economic_calendar/'
else:
prefix = '/root/trading_data/'
from os import listdir
from os.path import isfile, join
onlyfiles = [f for f in listdir(prefix) if isfile(join(prefix, f))]
pair = symbol[0:3] + symbol[4:7]
for file in onlyfiles:
if pair in file and 'Candlestick_1_Hour_BID' in file:
break
if pair not in file:
return None
with open(prefix + file) as f:
content = f.readlines()
# you may also want to remove whitespace characters like `\n` at the end of each line
content = [x.strip() for x in content]
from_zone = tz.gettz('America/New_York')
to_zone = tz.tzutc()
prices = []
times = []
volumes = []
content = content[1:]
if year != None:
start_time = calendar.timegm(datetime.datetime.strptime(str(year) + ".1.1 00:00:00", "%Y.%m.%d %H:%M:%S").timetuple())
end_time = calendar.timegm(datetime.datetime.strptime(str(year) + ".12.31 00:00:00", "%Y.%m.%d %H:%M:%S").timetuple())
for index in range(len(content)):
toks = content[index].split(',')
utc = datetime.datetime.strptime(toks[0], "%d.%m.%Y %H:%M:%S.%f")
time = calendar.timegm(utc.timetuple())
if year == None or (time >= start_time and time < end_time):
high = float(toks[2])
low = float(toks[3])
o_price = float(toks[1])
c_price = float(toks[4])
volume = float(toks[5])
if high != low or utc.weekday() in {4}:
prices.append(c_price)
times.append(time)
volumes.append(volume)
return prices, times, volumes
class Order:
def __init__(self):
self.pair = ""
self.dir = 0
self.open_price = 0
self.time = 0
self.readable_time = ""
self.amount = 0
self.id = 0
self.side = 0
self.pnl = 0
self.max_pnl = 0
self.open_predict = 0
self.tp_price = 0
self.sl_price = 0
self.hold_time = 0
self.is_invert = False
self.invert_num = 0
self.reduce_amount = 0
self.match_amount = 0
self.equity_factor = 0
def barrier_function(prev_releases, avg_probability_low_barrier, currency_pair, is_norm_prob, is_norm_base, auc_barrier_mult, is_low_barrier, max_barrier, currency_weights, max_release_time_delay):
avg_dir = 0
avg_count = 0
aucs = []
probs = []
found = False
time_delay_map = {}
description_map = {}
for release in prev_releases:
time_stamp = release[2]
relase_time = release[3]
currency = release[0]
barrier = release[4]
'''
if is_low_barrier:
prob = release[5] - avg_probability_low_barrier[barrier]
else:
key = str(barrier) + "_" + str(auc_barrier_mult)
if key in avg_probability_high_barrier:
prob = release[5] - avg_probability_high_barrier[key]
else:
prob = release[5]
'''
if currency_weights != None and currency in currency_weights:
currency_weight = currency_weights[currency]
else:
currency_weight = 1.0
if currency_weight < 0.01:
continue
if barrier in avg_probability_low_barrier:
prob = release[5] - avg_probability_low_barrier[barrier]
else:
prob = release[5]
auc = release[6]
description = release[7]
'''
key = str(time_stamp) + "_" + str(relase_time)
if key not in time_delay_map:
hours = calculate_time_diff(time_stamp, relase_time)
time_delay_map[key] = hours
else:
hours = time_delay_map[key]
if hours > max_release_time_delay:
continue
'''
if (currency != currency_pair[0:3] and currency != currency_pair[4:7] and is_relavent_currency == True):
continue
if barrier > max_barrier:
continue
if abs(prob - 0.5) < 0.5 - max(0, (auc - 0.5) * auc_barrier_mult) and is_low_barrier == False:
continue
if is_norm_prob:
if prob > 0.5:
prob = 1.0
else:
prob = 0.0
if auc > 0.51:
avg_dir += barrier * (prob - 0.5) * currency_weight
if description not in description_map:
description_map[description] = []
description_map[description].append(barrier * (prob - 0.5) * currency_weight)
if is_norm_base:
avg_count += abs(prob - 0.5) * currency_weight
else:
avg_count += (prob - 0.5) * currency_weight
found = True
exchange_rate = release[9]
if found:
for release in prev_releases:
auc = release[6]
prob = release[5]
aucs.append(0.5)
return avg_dir, avg_count, aucs, probs, description_map
def time_decay_function_regression(prev_releases, avg_probability_low_barrier, currency_pair, is_norm_prob, is_norm_base, auc_barrier_mult, is_low_barrier, max_barrier, currency_weights, max_release_time_delay):
avg_dir = 0
avg_count = 0
aucs = []
probs = []
found = False
for release in prev_releases:
time_stamp = release[2]
relase_time = release[3]
currency = release[0]
barrier = release[4]
prob = release[5]
auc = release[6]
if (currency != currency_pair[0:3] and currency != currency_pair[4:7] and is_relavent_currency == True):
continue
if barrier > max_barrier:
continue
if abs(prob) * auc_barrier_mult < auc and is_low_barrier == False:
continue
avg_dir += prob
avg_count += 1
exchange_rate = release[9]
if found:
for release in prev_releases:
auc = release[6]
prob = release[5]
aucs.append(0.5)
return avg_dir, avg_count, aucs, probs
def time_decay_function_binary(prev_releases, avg_probability_low_barrier, currency_pair, is_norm_prob, is_norm_base, auc_barrier_mult, is_low_barrier, max_barrier):
avg_dir = 0
avg_count = 0
aucs = []
probs = []
found = False
for release in prev_releases:
time_stamp = release[2]
relase_time = release[3]
currency = release[0]
barrier = release[4]
if is_low_barrier:
prob = release[5] - avg_probability_low_barrier[barrier]
else:
key = str(barrier) + "_" + str(auc_barrier_mult)
if key in avg_probability_high_barrier:
prob = release[5] - avg_probability_high_barrier[key]
else:
prob = release[5]
auc = release[6]
if (currency != currency_pair[0:3] and currency != currency_pair[4:7] and is_relavent_currency == True):
continue
if barrier > max_barrier:
continue
if abs(prob - 0.5) < 0.5 - max(0, (auc - 0.5) * auc_barrier_mult) and is_low_barrier == False:
continue
if is_norm_prob:
if prob > 0.5:
prob = 1.0
else:
prob = 0.0
if auc > 0.51:
avg_dir += 50 * (prob - 0.5)
avg_count += 1
found = True
exchange_rate = release[9]
if found:
for release in prev_releases:
auc = release[6]
prob = release[5]
aucs.append(0.5)
return avg_dir, avg_count, aucs, probs
def calculate_time_diff(now_time, ts):
date = datetime.datetime.utcfromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
s = datetime.datetime.strptime(date, "%Y-%m-%d %H:%M:%S")
date = datetime.datetime.utcfromtimestamp(now_time).strftime('%Y-%m-%d %H:%M:%S')
e = datetime.datetime.strptime(date, "%Y-%m-%d %H:%M:%S")
_diff = (e - s)
while s < e:
max_hour = 24
if s.day == e.day:
max_hour = e.hour
if s.weekday() in {4}:
max_hour = 21
if s.weekday() in {4} and s.hour in {21, 22, 23}:
hours = 1
_diff -= timedelta(hours=hours)
elif s.weekday() in {5}:
hours = max_hour - s.hour
_diff -= timedelta(hours=hours)
elif s.weekday() in {6} and s.hour < 21:
hours = min(21, max_hour) - s.hour
_diff -= timedelta(hours=hours)
else:
hours = max_hour - s.hour
if hours == 0:
break
s += timedelta(hours=hours)
return (_diff.total_seconds() / (60 * 60))
def is_valid_trading_period(ts):
date = datetime.datetime.utcfromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
s = datetime.datetime.strptime(date, "%Y-%m-%d %H:%M:%S")
if s.weekday() in {4} and s.hour in {21, 22, 23}:
return False
if s.weekday() in {5}:
return False
if s.weekday() in {6} and s.hour < 21:
return False
return True
def linreg(X, Y):
"""
return a,b in solution to y = ax + b such that root mean square distance between trend line and original points is minimized
"""
N = len(X)
Sx = Sy = Sxx = Syy = Sxy = 0.0
for x, y in zip(X, Y):
Sx = Sx + x
Sy = Sy + y
Sxx = Sxx + x*x
Syy = Syy + y*y
Sxy = Sxy + x*y
det = Sxx * N - Sx * Sx
return (Sxy * N - Sy * Sx)/det, (Sxx * Sy - Sx * Sxy)/det
def find_std(price_df, curr_time, lag):
before_prices = price_df["prices"][price_df["times"] < curr_time].tail(lag).values.tolist()
a, b = linreg(range(len(before_prices)),before_prices)
return a, np.std([before_prices[index] - ((a * index) + b) for index in range(len(before_prices))])
def back_test(select_pair, adjust_factor, is_hedge, is_low_barrier, auc_barrier_mult,
is_norm_base, is_any_barrier, is_relavent_currency, max_barrier, is_norm_prob,
is_norm_signal, is_max_volatility, model_type, day_wait, avg_probability_high_barrier,
max_pip_barrier, reward_risk_ratio, max_order_size, min_trade_volatility,
currency_weights, max_pip_slippage, grad_mult, decay_frac):
orders = []
curr_trade_dir = None
ideal_position = 0
total_profit = 5000
min_profit = 0
max_profit = 0
pnl = 0
max_exposure = 0
prev_releases = []
curr_release_time = setup_rows[0][2]
prev_time_stamp = setup_rows[0][2]
last_order_time = 0
equity = []
equity_buffer = []
float_pnls = []
news_signal_frequency = []
pnls = []
pip_diffs = []
min_time = 99999999999999999
max_time = 0
max_equity = 0
hit_stop_loss_count = 0
for row in setup_rows:
currency = row[0]
currency_pair = row[1]
time_stamp = row[2]
if is_valid_trading_period(time_stamp) == False:
continue
min_time = min(min_time, time_stamp)
max_time = max(max_time, time_stamp)
if time_stamp != curr_release_time:
#growth_factor = ((total_profit + pnl) / 5000)
#growth_factor = 5000
growth_factor = 1.0
if select_pair[4:7] == "JPY":
pip_size = 0.01
else:
pip_size = 0.0001
curr_release_time = time_stamp
current_price = prev_releases[0][9]
prev_time_stamp = prev_releases[0][2]
grad, std1 = find_std(price_df, prev_time_stamp, 24 * 20)
grad /= pip_size
std1 /= pip_size
final_auc_barrier = auc_barrier_mult * (std1 / 100)
if is_max_volatility:
max_barrier = max(max_barrier, std1)
if model_type == ModelType.barrier:
avg_dir, avg_count, aucs, probs, description_map = barrier_function(prev_releases, avg_probability_high_barrier, currency_pair, is_norm_prob, is_norm_base, final_auc_barrier, is_low_barrier, max_barrier, currency_weights, 0)
elif model_type == ModelType.time_regression:
avg_dir, avg_count, aucs, probs = time_decay_function_regression(prev_releases, avg_probability_high_barrier, currency_pair, is_norm_prob, is_norm_base, final_auc_barrier, is_low_barrier, max_barrier, currency_weights, 0)
elif model_type == ModelType.time_classification:
avg_dir, avg_count, aucs, probs = time_decay_function_binary(prev_releases, avg_probability_high_barrier, currency_pair, is_norm_prob, is_norm_base, final_auc_barrier, is_low_barrier, max_barrier, currency_weights)
avg_dir = 0
avg_count = 0
for description in description_map:
avg_dir += np.mean(description_map[description])
avg_count += 1
prev_releases = []
pnl = 0
total_buy = 0
total_sell = 0
order_count = 0
total_amount = 0
for order in orders:
if order.dir == (current_price > order.open_price):
profit = (abs(order.open_price - current_price) - (pip_size * 5)) * order.amount
else:
profit = (-abs(order.open_price - current_price) - (pip_size * 5)) * order.amount
pip_diff = (profit / pip_size) / order.amount
if select_pair[4:7] == "JPY":
profit /= 100
pip_diffs.append(abs(pip_diff))
order.max_pnl = max(order.max_pnl, profit)
order.pnl = profit
total_amount += order.amount
pnl += profit
total_amount += order.amount
if order.dir:
total_buy += order.amount
else:
total_sell += order.amount
if len(orders) > 0:
float_pnls.append(pnl)
if len(float_pnls) > 1000:
float_pnls = float_pnls[1:]
'''
if (avg_dir > 0) == (grad > 0):
std1 += abs(grad) * grad_mult
'''
if abs(avg_count) > 0:# and (len(orders) == 0 or abs(exchange_rate - price_deltas[-1]) > pip_size * 10):
equity.append(total_profit + pnl)
if is_norm_signal:
signal = avg_dir / abs(avg_count)
else:
signal = avg_dir
signal = min(signal, max_order_size)
signal = max(signal, -max_order_size)
if abs(signal) > 0 and std1 > min_trade_volatility:
news_signal_frequency.append(prev_time_stamp)
while (prev_time_stamp - news_signal_frequency[0]) > 60 * 60 * 24 * 30:
news_signal_frequency = news_signal_frequency[1:]
news_signal_time_lapse = float(prev_time_stamp - news_signal_frequency[0])
news_signal_time_lapse /= (60 * 60 * 24 * 30)
max_equity = max(max_equity, total_profit + pnl)
if signal > 0:
amount = abs(signal) * ((900000 * 0.0001 * growth_factor) + (0)) * 0.5
else:
amount = -abs(signal) * ((900000 * 0.0001 * growth_factor) + (0)) * 0.5
if pnl > abs(np.mean(float_pnls)) * 1.2:
for order in orders:
pnls.append(order.pnl * (1.0 / order.growth_factor))
total_profit += pnl
orders = []
pnl = 0
curr_trade_dir = total_buy < total_sell
if len(orders) > 0:
ideal_position = (ideal_position * (1 - decay_frac)) + (amount * decay_frac)
delta_pos = ideal_position - (total_buy - total_sell)
delta_fraction = abs(delta_pos) / abs(total_buy - total_sell)
else:
ideal_position = amount
delta_pos = amount
delta_fraction = 1.0
amount = abs(delta_pos)
signal = delta_pos
total_amount += amount
if (delta_fraction > 0.1) and total_amount < (total_profit + pnl) * 50:
if is_hedge == False:
temp_orders = orders
new_orders = []
else:
temp_orders = []
new_orders = orders
for curr_order in temp_orders:
if (amount < 0) or ((signal > 0) == curr_order.trade_dir):
new_orders.append(curr_order)
continue
if amount >= curr_order.amount:
total_profit += curr_order.pnl
amount -= curr_order.amount
pnls.append(curr_order.pnl * (1.0 / order.growth_factor))
else:
total_profit += curr_order.pnl * (amount / curr_order.amount)
pnls.append(curr_order.pnl * (amount / curr_order.amount) * (1.0 / order.growth_factor))
curr_order.amount -= amount
new_orders.append(curr_order)
amount = -1
last_order_time = time_stamp
orders = new_orders
if amount > 0:
order = Order()
order.open_price = current_price
order.trade_dir = signal > 0
order.amount = amount
order.open_time = prev_time_stamp
order.growth_factor = growth_factor
curr_trade_dir = (signal > 0)
if order.trade_dir:
total_buy += amount
else:
total_sell += amount
orders.append(order)
max_exposure = max(max_exposure, total_amount / ((total_profit + pnl) * 50))
time_years = float(max_time - min_time) / (60 * 60 * 24 * 365)
max_profit = max(max_profit, total_profit + pnl)
min_profit = min(min_profit, total_profit + pnl - max_profit)
between_df = price_df[(price_df["times"] >= prev_time_stamp) & (price_df["times"] <= curr_release_time)]
between_prices = between_df["prices"].values.tolist()
between_times = between_df["times"].values.tolist()
total_buy = 0
total_sell = 0
for order in orders:
if order.dir:
total_buy += order.amount
else:
total_sell += order.amount
# go over previous prices before release
if len(between_prices) > 0:
prev_price = between_prices[0]
for between_price, between_time in zip(between_prices, between_times):
if len(orders) == 0:
break
price_gap = (between_price - prev_price) / pip_size
prev_price = between_price
date = datetime.datetime.utcfromtimestamp(between_time).strftime('%Y-%m-%d %H:%M:%S')
s = datetime.datetime.strptime(date, "%Y-%m-%d %H:%M:%S")
min_order_time = 99999999999999999
inst_profit = 0
for order in orders:
if order.dir == (between_price > order.open_price):
profit = (abs(order.open_price - between_price) - (pip_size * 5)) * order.amount
else:
profit = (-abs(order.open_price - between_price) - (pip_size * 5)) * order.amount
if select_pair[4:7] == "JPY":
profit /= 100
order.pnl = profit
min_order_time = min(min_order_time, order.open_time)
inst_profit += order.pnl
pnl = 0
new_orders = []
order_count = 0
for order in orders:
if between_time < order.open_time:
continue
if order.dir == (between_price > order.open_price):
profit = (abs(order.open_price - between_price) - (pip_size * 5)) * order.amount
else:
profit = (-abs(order.open_price - between_price) - (pip_size * 5)) * order.amount
pip_diff = (profit / pip_size) / order.amount
if select_pair[4:7] == "JPY":
profit /= 100
order.pnl = profit
max_time_diff = calculate_time_diff(between_time, min_order_time)
is_weekend = (s.weekday() in {4} and s.hour >= 20) or (s.weekday() in {5})
if (((pip_diff < -max_pip_barrier) or (pip_diff > max_pip_barrier * reward_risk_ratio))) or max_time_diff > (24 * day_wait) / len(orders) or (len(orders) - order_count >= 10) or (inst_profit < -50 * growth_factor * adjust_factor) or (inst_profit > 50 * growth_factor * adjust_factor * reward_risk_ratio):
total_profit += profit
order_count += 1
limit_profit = max(-max_pip_barrier * order.amount, profit)
limist_profit = min(max_pip_barrier * order.amount * reward_risk_ratio, profit)
limit_profit = max(-50 * growth_factor * adjust_factor * 2, profit)
limit_profit = min(50 * reward_risk_ratio * growth_factor * adjust_factor * 2, profit)
pnls.append(limit_profit * (1.0 / order.growth_factor))
if profit < 0:
hit_stop_loss_count += 1
continue
pnl += profit
new_orders.append(order)
orders = new_orders
equity_buffer.append(total_profit + pnl)
# don't trade same direction as a loser next time
if len(orders) == 0 and max(total_buy, total_sell) > 0:
if pnl > 0:
curr_trade_dir = total_buy < total_sell
else:
curr_trade_dir = total_buy > total_sell
prev_releases.append(row)
print "Sharpe", np.mean(pnls) / np.std(pnls)
print "Samples", len(pnls) / time_years
if abs(time_years) > 0:
print time_stamp, total_profit + pnl, ((((total_profit + pnl - 5000) / 5000)) / time_years), time_years, len(orders), min_profit, max_exposure
if len(pnls) == 0:
return -100, [0], 0, 0, 0
return np.mean(pnls) / np.std(pnls), equity, float(hit_stop_loss_count) / len(pnls), np.mean(pnls), len(pnls) / time_years
def search1(setting, is_relavent_currency, select_pair):
pbounds = {
'adjust_factor': (setting["adjust_factor"], setting["adjust_factor"]),
'day_wait' : (setting["day_wait"], setting["day_wait"]),
'auc_barrier' : (setting["auc_barrier"], setting["auc_barrier"]),
'is_norm_signal' : (setting["is_norm_signal"], setting["is_norm_signal"]),
'max_barrier' : (50, 100),
'min_trade_volatility' : (setting["min_trade_volatility"], setting["min_trade_volatility"]),
'max_pip_barrier' : (setting["max_pip_barrier"], setting["max_pip_barrier"]),
'reward_risk_ratio' : (setting["reward_risk_ratio"], setting["reward_risk_ratio"]),
'max_order_size' : (setting["max_order_size"], setting["max_order_size"]),
'is_close_pos_trade' : (setting["is_close_pos_trade"], setting["is_close_pos_trade"]),
'max_pip_slippage' : (200, 200),
'AUD' : (0, 1),
'GBP' : (0, 1),
'CAD' : (0, 1),
'EUR' : (0, 1),
'NZD' : (0, 1),
'CHF' : (0, 1),
'USD' : (0, 1),
'JPY' : (0, 1),
}
if is_relavent_currency:
for currency in ['AUD', 'GBP', 'CAD', 'EUR', 'NZD', 'CHF', 'USD', 'JPY']:
if currency != select_pair[0:3] and currency != select_pair[4:7]:
pbounds[currency] = (1, 1)
return pbounds
def search2(setting):
pbounds = {
'adjust_factor': (setting["adjust_factor"], setting["adjust_factor"]),
'day_wait' : (0.5, setting["day_wait"]),
'auc_barrier' : (setting["auc_barrier"], setting["auc_barrier"]),
'is_norm_signal' : (setting["is_norm_signal"], setting["is_norm_signal"]),
'max_barrier' : (40, 100),
'min_trade_volatility' : (setting["min_trade_volatility"], setting["min_trade_volatility"]),
'max_pip_barrier' : (80, 220),
'reward_risk_ratio' : (setting["reward_risk_ratio"], setting["reward_risk_ratio"]),
'max_order_size' : (30, 180),
'is_close_pos_trade' : (setting["is_close_pos_trade"], setting["is_close_pos_trade"]),
'max_pip_slippage' : (200, 200),
}
return pbounds
def search3(setting):
if "min_trade_volatility" not in setting:
setting["min_trade_volatility"] = 50
if "decay_frac" not in setting:
setting["decay_frac"] = 0.5
pbounds = {
'adjust_factor': (setting["adjust_factor"], setting["adjust_factor"]),
'day_wait' : (500000, 500000),
'auc_barrier' : (0.1, 0.1),
'is_norm_signal' : (setting["is_norm_signal"], setting["is_norm_signal"]),
'max_barrier' : (setting["max_barrier"], setting["max_barrier"]),
'is_max_volatility' : (setting["is_max_volatility"], setting["is_max_volatility"]),
'max_pip_barrier' : (setting["max_pip_barrier"], setting["max_pip_barrier"]),
'reward_risk_ratio' : (setting["reward_risk_ratio"], setting["reward_risk_ratio"]),
'max_order_size' : (setting["max_order_size"], setting["max_order_size"]),
'min_trade_volatility' : (setting["min_trade_volatility"], setting["min_trade_volatility"]),
'max_pip_slippage' : (3.0, 3.0),
'grad_mult' : (1, 1),
'decay_frac' : (setting["decay_frac"], setting["decay_frac"])
}
return pbounds
def bayesian_optimization_output(setting, select_pair, is_relavent_currency, is_hedge,
is_low_barrier, is_any_barrier, model_type, cursor):
pbounds = search3(setting)
#pbounds = search1(setting, is_relavent_currency, select_pair)
all_sharpes = []
samples_set = []
def xgboost_hyper_param1(adjust_factor, day_wait, auc_barrier, is_norm_signal,
max_barrier, is_max_volatility, max_pip_barrier, reward_risk_ratio, max_order_size,
is_close_pos_trade, max_pip_slippage, AUD, GBP, CAD, EUR, NZD, CHF, USD, JPY):
currency_weights = {}
currency_weights['AUD'] = AUD
currency_weights['GBP'] = GBP
currency_weights['CAD'] = CAD
currency_weights['EUR'] = EUR
currency_weights['NZD'] = NZD
currency_weights['CHF'] = CHF
currency_weights['USD'] = USD
currency_weights['JPY'] = JPY
if AUD < 0.01:
AUD = 0
if GBP < 0.01:
GBP = 0
if CAD < 0.01:
CAD = 0
if EUR < 0.01:
EUR = 0
if NZD < 0.01:
NZD = 0
if CHF < 0.01:
CHF = 0
if USD < 0.01:
USD = 0
if JPY < 0.01:
JPY = 0
sharpe, equity_curve, stop_loss_ratio, mean_pnl, samples = back_test(select_pair, adjust_factor, is_hedge,
is_low_barrier, auc_barrier, True, False, is_relavent_currency,
max_barrier, False, is_norm_signal > 0.5,
is_max_volatility > 0.5, model_type, day_wait,
{}, max_pip_barrier, reward_risk_ratio, max_order_size,
min_trade_volatility, currency_weights, 200)
if samples < 10:
return -1
all_sharpes.append(sharpe)
samples_set.append(samples)
return sharpe * max(1, (equity_curve[-1] / 5000))
def xgboost_hyper_param(adjust_factor, day_wait, auc_barrier, is_norm_signal,
max_barrier, is_max_volatility, max_pip_barrier, reward_risk_ratio, max_order_size,
min_trade_volatility, max_pip_slippage, grad_mult, decay_frac):
if "currency_weights" in setting:
currency_weights = setting["currency_weights"]
else:
currency_weights = None
sharpe, equity_curve, stop_loss_ratio, mean_pnl, samples = back_test(select_pair, adjust_factor, is_hedge,
is_low_barrier, auc_barrier, True, False, is_relavent_currency,
max_barrier, False, is_norm_signal > 0.5,
is_max_volatility > 0.5, model_type, day_wait,
{}, max_pip_barrier, reward_risk_ratio, max_order_size,
min_trade_volatility, currency_weights, max_pip_slippage,
grad_mult, decay_frac)
if samples < 10:
return -1
all_sharpes.append(sharpe)
samples_set.append(samples)
return sharpe * max(1, (equity_curve[-1] / 5000))
optimizer = BayesianOptimization(
f=xgboost_hyper_param,
pbounds=pbounds,
)
optimizer.maximize(
init_points=4,
n_iter=8,
)
max_sharpe = max(all_sharpes)
samples = [sample for sharpe, sample in zip(all_sharpes, samples_set) if sharpe >= max_sharpe]
return optimizer.max['params'], optimizer.max['target'], max_sharpe, samples[0], np.mean(all_sharpes)
model_type = sys.argv[3]
if model_type == "barrier":
model_type = ModelType.barrier
elif model_type == "time_regression":
model_type = ModelType.time_regression
elif model_type == "time_classification":
model_type = ModelType.time_classification
def rank_data_size():
final_fitted_map = execute_news_signals.get_strategy_parameters(model_type)
#final_fitted_map = pickle.load(open(str(model_type) + "_final_fitted_map.pickle", "rb"))
for j in range(4):
ranking = {}
for i in [j]:
s3 = []
for pair in final_fitted_map:
if pair not in all_currency_pairs:
continue
if pair not in ranking:
ranking[pair] = []
ranking[pair].append(final_fitted_map[pair][i]['sharpe'] / final_fitted_map[pair][i]['samples'])
final_ranking = []
for pair in final_fitted_map:
if pair not in all_currency_pairs:
continue
if final_fitted_map[pair+"_sample_num"] > 1202353:
continue
ranking[pair] = np.mean(ranking[pair])
final_ranking.append([ranking[pair], pair])
final_ranking = sorted(final_ranking, key=lambda x: x[0], reverse=True)
print ([item[1] for item in final_ranking])
def check_sample_num():
for select_pair in all_currency_pairs:
cnx = mysql.connector.connect(user='andstv48', password='Password81',
host='mysql.newscaptial.com',
database='newscapital')
cursor = cnx.cursor()
query = ("SELECT count(*), min(time_stamp), max(time_stamp) FROM historic_news_barrier_probs where \
currency_pair = '" + select_pair + "' and model_key='" + sys.argv[2] + "' order by time_stamp \
")
cursor.execute(query)
for row1 in cursor:
duration = float(row1[2] - row1[1]) / (60 * 60 * 24 * 365)
print (select_pair, duration)
def check_sample_num_regression():
for select_pair in all_currency_pairs:
cnx = mysql.connector.connect(user='andstv48', password='Password81',
host='mysql.newscaptial.com',
database='newscapital')
cursor = cnx.cursor()
query = ("SELECT count(*), min(time_stamp), max(time_stamp) FROM historic_new_regression_probs where \
currency_pair = '" + select_pair + "' and model_key='R1' order by time_stamp \
")
cursor.execute(query)
for row1 in cursor:
count = row1[0]
if count > 0:
duration = float(row1[2] - row1[1]) / (60 * 60 * 24 * 365)
if count > 0:
print (select_pair, duration)
#final_fitted_map = execute_news_signals.get_strategy_parameters(model_type)
final_fitted_map = pickle.load(open(str(model_type) + "_final_fitted_map_currency.pickle", "rb"))
if model_type == ModelType.barrier:
select_pairs = all_currency_pairs
elif model_type == ModelType.time_regression:
select_pairs = ["NZD_USD", "GBP_USD", "GBP_CAD", "NZD_JPY", "AUD_CAD", "USD_CAD", "EUR_JPY", "GBP_AUD", "AUD_NZD", "AUD_USD", "AUD_JPY", "CHF_JPY", "EUR_GBP"]#EUR_NZD, GBP_NZD,EUR_CHF,NZD_CAD
else:
select_pairs = ["EUR_NZD", "GBP_NZD", "EUR_CHF", "NZD_CAD", "USD_CAD", "GBP_USD", "AUD_USD"]
all_sharpes = []
for select_pair in all_currency_pairs:
print (select_pair)
'''
if select_pair in final_fitted_map:
continue
'''
cnx = mysql.connector.connect(user='andstv48', password='Password81',
host='mysql.newscaptial.com',
database='newscapital')
cursor = cnx.cursor()
query = ("SELECT count(*) FROM historic_news_barrier_probs where \
currency_pair = '" + select_pair + "' and model_key='" + sys.argv[2] + "' order by time_stamp \
")
cursor.execute(query)
for row1 in cursor:
sample_num = row1[0]
'''
if sample_num == 0 or (select_pair + "_sample_num" in final_fitted_map and (sample_num == final_fitted_map[select_pair + "_sample_num"])):
continue
'''
prices, times, volumes = load_time_series(select_pair, None, True)
buy_price_df = pd.DataFrame()
buy_price_df['times'] = times
buy_price_df["price_buy"] = prices
buy_price_df["volume_buy"] = volumes
buy_price_df.set_index('times', inplace=True)
buy_price_df.fillna(method='ffill', inplace=True)
prices, times, volumes = load_time_series(select_pair, None, False)
sell_price_df = pd.DataFrame()
sell_price_df['times'] = times
sell_price_df["price_sell"] = prices
sell_price_df["volume_sell"] = volumes
sell_price_df.set_index('times', inplace=True)
sell_price_df.fillna(method='ffill', inplace=True)
price_df = buy_price_df.join(sell_price_df)
price_df["prices"] = price_df.apply(lambda x: (x["price_buy"] + x["price_sell"]) * 0.5, axis=1)
price_df.reset_index(inplace=True)
def find_max_cutoff(percentile):
prices = price_df["prices"].values.tolist()
deltas = [prices[index] - prices[index-1] for index in range(1, len(prices))]
gammas = [abs(deltas[index]) / abs(np.mean(deltas[index-24:index])) for index in range(24, len(deltas)) if abs(np.mean(deltas[index-24:index])) > 0]
return np.percentile(gammas, percentile)
query = ("SELECT avg(probability), barrier FROM historic_news_barrier_probs where \
currency_pair = '" + select_pair + "' and model_key='" + sys.argv[2] + "' group by barrier order by time_stamp \
")
cursor.execute(query)
'''
avg_probability_high_barrier = {}
for auc_barrier_mult in [1.0, 1.5, 2.0, 2.5]:
query = ("SELECT avg(probability), barrier FROM historic_news_barrier_probs where \
currency_pair = '" + select_pair + "' and model_key='" + sys.argv[2] + "' AND abs(probability - 0.5) > 0.5 - ((auc - 0.5) * " + str(auc_barrier_mult) + ") group by barrier order by time_stamp \
")
cursor.execute(query)
for row1 in cursor:
avg_probability_high_barrier[str(row1[1]) + "_" + str(auc_barrier_mult)] = row1[0] - 0.5
print ("high barrier bias", np.mean(avg_probability_high_barrier.values()), auc_barrier_mult)
print (avg_probability_high_barrier.keys())
'''
avg_probability_low_barrier = {}
for row1 in cursor:
avg_probability_low_barrier[row1[1]] = row1[0] - 0.5
print ("low barrier bias", np.mean(avg_probability_low_barrier.values()))
query = ("SELECT * FROM historic_news_barrier_probs where \
currency_pair = '" + select_pair + "' and model_key='" + sys.argv[2] + "' order by time_stamp \
")
cursor.execute(query)
print ("out")
setup_rows = []
for row1 in cursor:
setup_rows.append(row1)
print len(setup_rows), "tot"
# pickle.dump(setup_rows, open("rows" + select_pair + ".pickle", "wb"))
#setup_rows = pickle.load(open("rows" + select_pair + ".pickle", "rb"))
settings = []
aucs = execute_news_signals.get_strategy_parameters(ModelType.barrier)[select_pair]
setting_offset = 0
for is_low_barrier in [False]:
for is_hedge in [True, False]:
for is_relavent_currency in [False, True]:
print ("optimize original ", aucs[len(settings)]["target"])
setting = aucs[len(settings)]
if "currency_weights" in setting:
currency_weights = setting["currency_weights"]
else:
currency_weights = None
'''
if len(setup_rows) != final_fitted_map[select_pair+"_sample_num"]:
print ("recalulating optimal sharpe")
sharpe, equity_curve, stop_loss_ratio, mean_pnl, samples = back_test(select_pair, setting["adjust_factor"], is_hedge,
is_low_barrier, setting["auc_barrier"], True, False, is_relavent_currency,
setting["max_barrier"], False, setting["is_norm_signal"],
setting["is_reverse_trade"], model_type, setting["day_wait"],
{}, setting["max_pip_barrier"], setting["reward_risk_ratio"], setting["max_order_size"],
setting["is_close_pos_trade"], currency_weights, 100, 2000)
target = sharpe * max(1, (equity_curve[-1] / 5000))
setting["sharpe"] = sharpe
setting["target"] = target
setting["samples"] = samples
'''
params, target, sharpe, samples, mean_sharpe = bayesian_optimization_output(setting, select_pair, is_relavent_currency,
is_hedge, is_low_barrier, False, model_type, cursor)
if target > setting["target"] or (setting["samples"] < 10 and target > 0):
all_sharpes.append(target)
print ("Found Better")
new_setting = {
"is_low_barrier" : False,
"is_any_barrier" : False,
"is_hedge" : is_hedge,
"decay_frac" : params['decay_frac'],
"is_relavent_currency" : is_relavent_currency,
"adjust_factor" : params['adjust_factor'],
"max_barrier" : params['max_barrier'],
"auc_barrier" : params['auc_barrier'],
"day_wait" : params['day_wait'],
"is_norm_signal" : params['is_norm_signal'] > 0.5,
"is_max_volatility" : params['is_max_volatility'] > 0.5,
"currency_pair" : select_pair,
"max_pip_barrier" : params['max_pip_barrier'],
"reward_risk_ratio" : params['reward_risk_ratio'],
"max_order_size" : params['max_order_size'],
"min_trade_volatility" : params['min_trade_volatility'],
"samples" : samples,
"sharpe" : sharpe,
"target" : target,
"mean_sharpe" : mean_sharpe,
}
currency_weights = {}
for currency in ['AUD', 'GBP', 'CAD', 'EUR', 'NZD', 'CHF', 'USD', 'JPY']:
if currency in params:
currency_weights[currency] = params[currency]
if len(currency_weights) == 0 and "currency_weights" in setting:
currency_weights = setting["currency_weights"]
new_setting['currency_weights'] = currency_weights
settings.append(new_setting)
else:
all_sharpes.append(setting["sharpe"])
settings.append(setting)
print ("Mean Sharpe Overall", np.mean(all_sharpes))
final_fitted_map[select_pair] = settings
final_fitted_map[select_pair+"_sample_num"] = sample_num
pickle.dump(final_fitted_map, open(str(model_type) + "_final_fitted_map_currency.pickle", "wb"))
cursor.close()
'''
plt.title("Best Adjust: " + str(best_adjust_factor))
plt.plot(best_equity_curve)
plt.show()
'''
default_stops = {False : [], True : []}
default_auc_mults = []
default_norm_signals = []
default_wait_days = {False : [], True : []}
for pair in final_fitted_map:
if pair not in all_currency_pairs:
continue
print (pair)
for setting in final_fitted_map[pair]:
default_auc_mults.append(setting["auc_barrier"])
default_norm_signals.append(setting["is_norm_signal"])
default_stops[setting["is_hedge"]].append(setting["adjust_factor"])
default_wait_days[setting["is_hedge"]].append(setting["day_wait"])
print (len(default_norm_signals))
final_fitted_map["default_day_wait"] = {False : np.mean(default_wait_days[False]), True : np.mean(default_wait_days[True])}
final_fitted_map["default_adjust"] = {False : np.mean(default_stops[False]), True : np.mean(default_stops[True])}
final_fitted_map["default_auc_mult"] = np.mean(default_auc_mults)
final_fitted_map["default_is_norm_signal"] = sum(default_norm_signals) > sum([(v == False) for v in default_norm_signals])
print (final_fitted_map)
print ("Default Day Wait: ", final_fitted_map["default_day_wait"])
print ("Default Adjust: ", final_fitted_map["default_adjust"])
print ("Default AUC Mult: ", final_fitted_map["default_auc_mult"])
print ("Default default_is_norm_signal: ", final_fitted_map["default_is_norm_signal"])
'''
('Mean Sharpe Overall', 4748.545318017648)
('Mean Return Overall', 4748.545318017648)
'''
'''
Tere Sarah. If I could interest you, there
are concerts at QPAC, mini golf (holly molly), theme parks, standard bar / restaurants, whale watching, movies.
If you have time mid week or coming weekend for one - I'll organize, I would love to see you again.
|
[
""
] | |
b384befc386278c815f59f3dda5b8684f78b7cb5
|
e2a5c001a4e18e61b1ae04db6d4da45c6a2af5ee
|
/Src/modulo_reduction.py
|
1f68021bfc0ec58518ca2b8490c4e9112b67a6fc
|
[
"MIT"
] |
permissive
|
rstodden/verbal-multiword-expression-st11
|
77c1c1920a18e98becabb42288054bb894d2178e
|
768c453fb0d28c3e237c1b22522a319c1b157c1f
|
refs/heads/master
| 2020-03-25T21:27:26.983110
| 2018-08-10T19:56:32
| 2018-08-10T19:56:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,635
|
py
|
from __future__ import division
from scipy.sparse import csr_matrix, save_npz
import numpy
import math
from collections import Counter
from param import XPParams
class ModuloReduction():
@staticmethod
def get_expected_cooc(total, count_feature, count_vector):
""" return expected value
"""
if total == 0 or count_feature == 0 or count_vector == 0:
return 0
return (count_vector*count_feature)/(total*total)
@staticmethod
def ppmi(count_cooc, expected_cooc):
""" return pointwise mutual information.
If count of observed or expected co-ooccurrences is 0 return 0.
"""
if count_cooc == 0 or expected_cooc == 0:
ppmi = 0
else:
ppmi = max(0, math.log(count_cooc/expected_cooc))
return ppmi
@staticmethod
def reduce_matrix_with_modulo(X, n_new_features, folder=None, training=False, calc_ppmi=False, new_y_number=None, twoD_feats=False):
data = [] # contains all values
rows = [] # contains all rownumbers of the data
cols = [] # contains all colnumbers of the data
#count_feature_dict = Counter()
#count_vector_dict = Counter()
count_feature_vec = numpy.zeros(n_new_features)
#count_vector_vec = []
if twoD_feats and new_y_number != None:
#output_matrix = numpy.zeros((X.shape[0], 1))
output_matrix = numpy.empty((X.shape[0], n_new_features, new_y_number))
for row_nr, vector in enumerate(X):
new_data = numpy.zeros((n_new_features, new_y_number))
for col_nr, value in zip(vector.indices, vector.data):
if value != 0:
new_dimension_1 = abs(col_nr % n_new_features)
new_dimension_2 = abs((col_nr * 7) % n_new_features)
new_dimension_3 = abs((col_nr * 17) % n_new_features)
new_dimension_4 = abs((col_nr * 37) % n_new_features)
new_dimension_5 = abs((col_nr * 47) % n_new_features)
new_y = abs(col_nr % new_y_number)
new_data[new_dimension_1, new_y] += value
new_data[new_dimension_2, new_y] += value
new_data[new_dimension_3, new_y] += value
new_data[new_dimension_4, new_y] += value
new_data[new_dimension_5, new_y] += value
#print(len(new_data[numpy.nonzero(new_data)]))
if n_new_features > 500:
new_dimension_6 = abs((col_nr * 67) % n_new_features)
new_dimension_7 = abs((col_nr * 97) % n_new_features)
new_dimension_8 = abs((col_nr * 107) % n_new_features)
new_dimension_9 = abs((col_nr * 127) % n_new_features)
new_dimension_10 = abs((col_nr * 137) % n_new_features)
new_data[new_dimension_6, new_y] += value
new_data[new_dimension_7, new_y] += value
new_data[new_dimension_8, new_y] += value
new_data[new_dimension_9, new_y] += value
new_data[new_dimension_10, new_y] += value
output_matrix[row_nr] = new_data
#print(output_matrix)
#output_matrix = csr_matrix(output_matrix)
else:
for row_nr, vector in enumerate(X):
new_data = numpy.zeros(n_new_features)
for col_nr, value in zip(vector.indices, vector.data):
#print(value)
if value != 0:
new_dimension_1 = abs(col_nr % n_new_features)
new_dimension_2 = abs((col_nr*7) % n_new_features)
new_dimension_3 = abs((col_nr * 17) % n_new_features)
new_dimension_4 = abs((col_nr * 37) % n_new_features)
new_dimension_5 = abs((col_nr * 47) % n_new_features)
new_data[new_dimension_1] += value
new_data[new_dimension_2] += value
new_data[new_dimension_3] += value
new_data[new_dimension_4] += value
new_data[new_dimension_5] += value
if n_new_features > 500:
new_dimension_6 = abs(col_nr*67 % n_new_features)
new_dimension_7 = abs((col_nr*97) % n_new_features)
new_dimension_8 = abs((col_nr * 107) % n_new_features)
new_dimension_9 = abs((col_nr * 127) % n_new_features)
new_dimension_10 = abs((col_nr * 137) % n_new_features)
new_data[new_dimension_6] += value
new_data[new_dimension_7] += value
new_data[new_dimension_8] += value
new_data[new_dimension_9] += value
new_data[new_dimension_10] += value
for nr_value, value in enumerate(new_data):
#print(value)
# add values to new_matrix if not zero
if value != 0:
data.append(value) # add new value if not 0
rows.append(row_nr) # add row number of current vector
cols.append(nr_value) # add index of column/feature
count_feature_vec[nr_value] += value # number 2
#count_vector_vec[row_nr] = sum(count_feature_vec)
output_matrix = csr_matrix((data, (rows, cols)), shape=(X.shape[0], n_new_features))
#print("mod", output_matrix.toarray())
if calc_ppmi:
#ppmi
if training:
dim =n_new_features
output_matrix = output_matrix.toarray()
vectorOfSumAllVectors = numpy.zeros(dim)
sumAllComponents = 0
vecWeighted = numpy.zeros(shape=(output_matrix.shape[0], output_matrix.shape[1]))
for vector in output_matrix:
for i in range(dim):
vectorOfSumAllVectors[i] += vector[i]
for i in range(dim):
sumAllComponents += vectorOfSumAllVectors[i]
#print("sumAllComponents", sumAllComponents)
for n, vector in enumerate(output_matrix):
#if training == False:
#print(vector)
sumThisRow = sum(vector)
for i in range(dim):
pmi = 0
if vector[i] != 0:
pmi = max(0,math.log(vector[i]*sumAllComponents)-math.log(sumThisRow*vectorOfSumAllVectors[i]))
vecWeighted[n,i] = pmi
#if not training:
#print("vectorOfSumAllVectors", vectorOfSumAllVectors[i])
#print("sumThisRow", sumThisRow, "vector", vector[i])
#print("ppmi", vecWeighted)
XPParams.vectorOfSumAllVectors = vectorOfSumAllVectors
XPParams.sumAllComponents = sumAllComponents
else:
dim = n_new_features
output_matrix = output_matrix.toarray()
vectorOfSumAllVectors = XPParams.vectorOfSumAllVectors
sumAllComponents = XPParams.sumAllComponents
vecWeighted = numpy.zeros(shape=(output_matrix.shape[0], output_matrix.shape[1]))
for n, vector in enumerate(output_matrix):
# if training == False:
# print(vector)
sumThisRow = sum(vector)
for i in range(dim):
pmi = 0
if vector[i] != 0:
pmi = max(0, math.log(vector[i] * sumAllComponents) - math.log(
sumThisRow * vectorOfSumAllVectors[i]))
#print(pmi)
vecWeighted[n, i] = pmi
# if not training:
# print("vectorOfSumAllVectors", vectorOfSumAllVectors[i])
# print("sumThisRow", sumThisRow, "vector", vector[i])
#print("ppmi", vecWeighted)
return csr_matrix(vecWeighted)
else:
return output_matrix
# data_list_pmi = []
# rows_list_pmi = []
# cols_list_pmi = []
# """for row_nr, vector in enumerate(output_matrix):
# count_vector = sum(vector.data)
# for col_nr, value in zip(vector.indices, vector.data):
# expected_cooc = get_expected_cooc(sum(output_matrix.data), count_feature_dict[col_nr], count_vector_dict[row_nr])
# ppmi_value = ppmi(value, expected_cooc)
# data_list_pmi.append(ppmi_value) # add new value if not 0
# rows_list_pmi.append(row_nr) # add row number of current vector
# cols_list_pmi.append(col_nr) # add index of column/feature"""
#
# total = sum(output_matrix.data) # number 1
# output_matrix = output_matrix.toarray()
# for row_nr, vector in enumerate(output_matrix):
# count_vector = sum(vector)
#
# for col_nr, value in enumerate(vector):
# pmi=0
# if value != 0:
# pmi = max(0,math.log(value * total)-math.log(count_vector * count_feature_vec[col_nr]))
#
# # print pmi
#
# #expected_cooc = get_expected_cooc(sum(output_matrix.data), count_feature_dict[col_nr], count_vector_dict[row_nr])
# #ppmi_value = ppmi(value, expected_cooc)
# data_list_pmi.append(pmi) # add new value if not 0
# rows_list_pmi.append(row_nr) # add row number of current vector
# cols_list_pmi.append(col_nr) # add index of column/feature"""
# #print('vector', vector, row_nr, count_vector)
#
# output_matrix = csr_matrix((data_list_pmi, (rows_list_pmi, cols_list_pmi)), shape=(X.shape[0], n_new_features))
# return output_matrix
|
[
"regina.stodden@uni-duesseldorf.de"
] |
regina.stodden@uni-duesseldorf.de
|
fdac9cb8328b1522e3c118d25ef7845b4fcb9edd
|
9fe9a586e9c0ae659e2dfe091d4ad0795d92fb7e
|
/backend/newsnetworks/views.py
|
a4c61e5455c0b73a2a6e65314de6ad42162a62dc
|
[] |
no_license
|
JaeInK/Deep-Learning-Projects_Django-API
|
bd2ed34bd2212b83411c2b4d7b30f1731ecc38c3
|
87c98f8ef32594e080ddfb3ca3dc3be6fa74fe77
|
refs/heads/master
| 2020-03-09T19:34:58.323235
| 2018-04-10T16:18:38
| 2018-04-10T16:18:38
| 128,961,315
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,473
|
py
|
from django.shortcuts import render
from django.http import HttpResponse, JsonResponse
from django.views.decorators.csrf import csrf_exempt
from backend.settings import NEWS_DIR
# from .scripts import newsnetworks_web
import requests
import pandas as pd
import networkx as nx
import os
import sys
import json
import time
sys.path.append(os.path.join(NEWS_DIR,'scripts/'))
NET_DIC = {'child' : '소년법', 'coin': '가상화폐', 'clear': '적폐청산', 'cortax': '법인세', 'estate': '부동산', 'fulltime': '정규직', 'korea': '남북관계', 'macro': '거시경제', 'nuclear': '원자력발전소', 'wage': '최저임금'}
# Create your views here.
# @csrf_exempt
# def networks_backup(request):
# received_data = json.loads(request.body.decode('utf-8'))
# dataset = received_data['dataset']
# edge_th = received_data['edge_threshold']
# deg_th = received_data['degree_threshold']
# max_sub_flag = received_data['max_subgraph']
# net_name = NET_DIC[dataset]
#
# pickle_path = os.path.join(pwd + '/data/' + str(dataset) + '_extracted_with_polarity.p')
# st = time.time()
# df = pd.read_pickle(pickle_path)
# print("read_pickle opt elapsed: {} secons".format(time.time()-st))
#
# G = nnw.NewsNetwork()
# st = time.time()
# G.read_file(df)
# print("read_file opt elapsed: {} secons".format(time.time()-st))
# st = time.time()
# G.pre_processing(edge_threshold = edge_th, deg_threshold = deg_th)
# print("pre_processing opt elapsed: {} secons".format(time.time()-st))
# st = time.time()
# if max_sub_flag:
# max_sub = G.subgraph(max_subgraph = True)
# G.set_node_link_attrs(max_sub)
#
# else:
# G.set_node_link_attrs(edge_bold = 10)
# print("set_node_link_attrs opt elapsed: {} secons".format(time.time()-st))
# st = time.time()
# G_dict = G.get_node_link_data()
# print("get_node_link_data opt elapsed: {} secons".format(time.time()-st))
# st = time.time()
# network_info = G.get_network_attrs(net_name)
# print("get_network_attrs opt elapsed: {} secons".format(time.time()-st))
# G_dict.update(network_info)
#
# return JsonResponse(G_dict)
@csrf_exempt
def networks(request):
received_data = json.loads(request.body.decode('utf-8'))
dataset = received_data['dataset']
edge_th = received_data['edge_threshold']
deg_th = received_data['degree_threshold']
max_sub_flag = received_data['max_subgraph']
net_name = NET_DIC[dataset]
pickle_path = os.path.join('newsnetworks/data/' + str(dataset) + '.pickle')
st = time.time()
G = pd.read_pickle(pickle_path)
print("read_pickle opt elapsed: {} secons".format(time.time()-st))
G.pre_processing(edge_threshold = edge_th, deg_threshold = deg_th)
print("pre_processing opt elapsed: {} secons".format(time.time()-st))
st = time.time()
if max_sub_flag:
max_sub = G.subgraph(max_subgraph = True)
G.set_node_link_attrs(max_sub)
else:
G.set_node_link_attrs(edge_bold = 10)
print("set_node_link_attrs opt elapsed: {} secons".format(time.time()-st))
st = time.time()
G_dict = G.get_node_link_data()
print("get_node_link_data opt elapsed: {} secons".format(time.time()-st))
st = time.time()
network_info = G.get_network_attrs(net_name)
print("get_network_attrs opt elapsed: {} secons".format(time.time()-st))
G_dict.update(network_info)
return JsonResponse(G_dict)
|
[
"gjames5809@gmail.com"
] |
gjames5809@gmail.com
|
cf9c97731069aeb242a91dad70d726c9a8f4caba
|
e407cd1e873ef1a626a592ac22901a300f5be8f4
|
/.pycharm_helpers/python_stubs/-1840357896/_operator.py
|
6fba24788ffd6ca8c444f37cd0c640922bb2343e
|
[] |
no_license
|
rpesce/oktetoProject
|
65f77cfd2d92e6372f32e6e3dbfb8ce038d1b45d
|
7dbddf3d85b040755b15f4e647894353d4e5a3c5
|
refs/heads/master
| 2023-03-31T12:03:49.419915
| 2020-05-13T19:37:49
| 2020-05-13T19:37:49
| 263,726,526
| 0
| 0
| null | 2021-03-20T03:57:35
| 2020-05-13T19:38:54
|
Python
|
UTF-8
|
Python
| false
| false
| 12,775
|
py
|
# encoding: utf-8
# module _operator
# from (built-in)
# by generator 1.147
"""
Operator interface.
This module exports a set of functions implemented in C corresponding
to the intrinsic operators of Python. For example, operator.add(x, y)
is equivalent to the expression x+y. The function names are those
used for special methods; variants without leading and trailing
'__' are also provided for convenience.
"""
# no imports
# functions
def abs(a): # real signature unknown; restored from __doc__
""" Same as abs(a). """
pass
def add(*args, **kwargs): # real signature unknown
""" Same as a + b. """
pass
def and_(*args, **kwargs): # real signature unknown
""" Same as a & b. """
pass
def concat(*args, **kwargs): # real signature unknown
""" Same as a + b, for a and b sequences. """
pass
def contains(*args, **kwargs): # real signature unknown
""" Same as b in a (note reversed operands). """
pass
def countOf(*args, **kwargs): # real signature unknown
""" Return the number of times b occurs in a. """
pass
def delitem(*args, **kwargs): # real signature unknown
""" Same as del a[b]. """
pass
def eq(*args, **kwargs): # real signature unknown
""" Same as a == b. """
pass
def floordiv(*args, **kwargs): # real signature unknown
""" Same as a // b. """
pass
def ge(*args, **kwargs): # real signature unknown
""" Same as a >= b. """
pass
def getitem(*args, **kwargs): # real signature unknown
""" Same as a[b]. """
pass
def gt(*args, **kwargs): # real signature unknown
""" Same as a > b. """
pass
def iadd(*args, **kwargs): # real signature unknown
""" Same as a += b. """
pass
def iand(*args, **kwargs): # real signature unknown
""" Same as a &= b. """
pass
def iconcat(*args, **kwargs): # real signature unknown
""" Same as a += b, for a and b sequences. """
pass
def ifloordiv(*args, **kwargs): # real signature unknown
""" Same as a //= b. """
pass
def ilshift(*args, **kwargs): # real signature unknown
""" Same as a <<= b. """
pass
def imatmul(*args, **kwargs): # real signature unknown
""" Same as a @= b. """
pass
def imod(*args, **kwargs): # real signature unknown
""" Same as a %= b. """
pass
def imul(*args, **kwargs): # real signature unknown
""" Same as a *= b. """
pass
def index(*args, **kwargs): # real signature unknown
""" Same as a.__index__() """
pass
def indexOf(*args, **kwargs): # real signature unknown
""" Return the first index of b in a. """
pass
def inv(*args, **kwargs): # real signature unknown
""" Same as ~a. """
pass
def invert(*args, **kwargs): # real signature unknown
""" Same as ~a. """
pass
def ior(*args, **kwargs): # real signature unknown
""" Same as a |= b. """
pass
def ipow(*args, **kwargs): # real signature unknown
""" Same as a **= b. """
pass
def irshift(*args, **kwargs): # real signature unknown
""" Same as a >>= b. """
pass
def isub(*args, **kwargs): # real signature unknown
""" Same as a -= b. """
pass
def is_(*args, **kwargs): # real signature unknown
""" Same as a is b. """
pass
def is_not(*args, **kwargs): # real signature unknown
""" Same as a is not b. """
pass
def itruediv(*args, **kwargs): # real signature unknown
""" Same as a /= b. """
pass
def ixor(*args, **kwargs): # real signature unknown
""" Same as a ^= b. """
pass
def le(*args, **kwargs): # real signature unknown
""" Same as a <= b. """
pass
def length_hint(*args, **kwargs): # real signature unknown
"""
Return an estimate of the number of items in obj.
This is useful for presizing containers when building from an iterable.
If the object supports len(), the result will be exact.
Otherwise, it may over- or under-estimate by an arbitrary amount.
The result will be an integer >= 0.
"""
pass
def lshift(*args, **kwargs): # real signature unknown
""" Same as a << b. """
pass
def lt(*args, **kwargs): # real signature unknown
""" Same as a < b. """
pass
def matmul(*args, **kwargs): # real signature unknown
""" Same as a @ b. """
pass
def mod(*args, **kwargs): # real signature unknown
""" Same as a % b. """
pass
def mul(*args, **kwargs): # real signature unknown
""" Same as a * b. """
pass
def ne(*args, **kwargs): # real signature unknown
""" Same as a != b. """
pass
def neg(*args, **kwargs): # real signature unknown
""" Same as -a. """
pass
def not_(*args, **kwargs): # real signature unknown
""" Same as not a. """
pass
def or_(*args, **kwargs): # real signature unknown
""" Same as a | b. """
pass
def pos(*args, **kwargs): # real signature unknown
""" Same as +a. """
pass
def pow(*args, **kwargs): # real signature unknown
""" Same as a ** b. """
pass
def rshift(*args, **kwargs): # real signature unknown
""" Same as a >> b. """
pass
def setitem(*args, **kwargs): # real signature unknown
""" Same as a[b] = c. """
pass
def sub(*args, **kwargs): # real signature unknown
""" Same as a - b. """
pass
def truediv(*args, **kwargs): # real signature unknown
""" Same as a / b. """
pass
def truth(*args, **kwargs): # real signature unknown
""" Return True if a is true, False otherwise. """
pass
def xor(*args, **kwargs): # real signature unknown
""" Same as a ^ b. """
pass
def _compare_digest(*args, **kwargs): # real signature unknown
"""
Return 'a == b'.
This function uses an approach designed to prevent
timing analysis, making it appropriate for cryptography.
a and b must both be of the same type: either str (ASCII only),
or any bytes-like object.
Note: If a and b are of different lengths, or if an error occurs,
a timing attack could theoretically reveal information about the
types and lengths of a and b--but not their values.
"""
pass
# classes
class attrgetter(object):
"""
attrgetter(attr, ...) --> attrgetter object
Return a callable object that fetches the given attribute(s) from its operand.
After f = attrgetter('name'), the call f(r) returns r.name.
After g = attrgetter('name', 'date'), the call g(r) returns (r.name, r.date).
After h = attrgetter('name.first', 'name.last'), the call h(r) returns
(r.name.first, r.name.last).
"""
def __call__(self, *args, **kwargs): # real signature unknown
""" Call self as a function. """
pass
def __getattribute__(self, *args, **kwargs): # real signature unknown
""" Return getattr(self, name). """
pass
def __init__(self, attr, *more): # real signature unknown; restored from __doc__
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
def __reduce__(self, *args, **kwargs): # real signature unknown
""" Return state information for pickling """
pass
def __repr__(self, *args, **kwargs): # real signature unknown
""" Return repr(self). """
pass
class itemgetter(object):
"""
itemgetter(item, ...) --> itemgetter object
Return a callable object that fetches the given item(s) from its operand.
After f = itemgetter(2), the call f(r) returns r[2].
After g = itemgetter(2, 5, 3), the call g(r) returns (r[2], r[5], r[3])
"""
def __call__(self, *args, **kwargs): # real signature unknown
""" Call self as a function. """
pass
def __getattribute__(self, *args, **kwargs): # real signature unknown
""" Return getattr(self, name). """
pass
def __init__(self, item, *more): # real signature unknown; restored from __doc__
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
def __reduce__(self, *args, **kwargs): # real signature unknown
""" Return state information for pickling """
pass
def __repr__(self, *args, **kwargs): # real signature unknown
""" Return repr(self). """
pass
class methodcaller(object):
"""
methodcaller(name, ...) --> methodcaller object
Return a callable object that calls the given method on its operand.
After f = methodcaller('name'), the call f(r) returns r.name().
After g = methodcaller('name', 'date', foo=1), the call g(r) returns
r.name('date', foo=1).
"""
def __call__(self, *args, **kwargs): # real signature unknown
""" Call self as a function. """
pass
def __getattribute__(self, *args, **kwargs): # real signature unknown
""" Return getattr(self, name). """
pass
def __init__(self, name, *more): # real signature unknown; restored from __doc__
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
def __reduce__(self, *args, **kwargs): # real signature unknown
""" Return state information for pickling """
pass
def __repr__(self, *args, **kwargs): # real signature unknown
""" Return repr(self). """
pass
class __loader__(object):
"""
Meta path import for built-in modules.
All methods are either class or static methods to avoid the need to
instantiate the class.
"""
@classmethod
def create_module(cls, *args, **kwargs): # real signature unknown
""" Create a built-in module """
pass
@classmethod
def exec_module(cls, *args, **kwargs): # real signature unknown
""" Exec a built-in module """
pass
@classmethod
def find_module(cls, *args, **kwargs): # real signature unknown
"""
Find the built-in module.
If 'path' is ever specified then the search is considered a failure.
This method is deprecated. Use find_spec() instead.
"""
pass
@classmethod
def find_spec(cls, *args, **kwargs): # real signature unknown
pass
@classmethod
def get_code(cls, *args, **kwargs): # real signature unknown
""" Return None as built-in modules do not have code objects. """
pass
@classmethod
def get_source(cls, *args, **kwargs): # real signature unknown
""" Return None as built-in modules do not have source code. """
pass
@classmethod
def is_package(cls, *args, **kwargs): # real signature unknown
""" Return False as built-in modules are never packages. """
pass
@classmethod
def load_module(cls, *args, **kwargs): # real signature unknown
"""
Load the specified module into sys.modules and return it.
This method is deprecated. Use loader.exec_module instead.
"""
pass
def module_repr(module): # reliably restored by inspect
"""
Return repr for the module.
The method is deprecated. The import machinery does the job itself.
"""
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
__dict__ = None # (!) real value is "mappingproxy({'__module__': '_frozen_importlib', '__doc__': 'Meta path import for built-in modules.\\n\\n All methods are either class or static methods to avoid the need to\\n instantiate the class.\\n\\n ', 'module_repr': <staticmethod object at 0x7f7005ada430>, 'find_spec': <classmethod object at 0x7f7005ada460>, 'find_module': <classmethod object at 0x7f7005ada490>, 'create_module': <classmethod object at 0x7f7005ada4c0>, 'exec_module': <classmethod object at 0x7f7005ada4f0>, 'get_code': <classmethod object at 0x7f7005ada580>, 'get_source': <classmethod object at 0x7f7005ada610>, 'is_package': <classmethod object at 0x7f7005ada6a0>, 'load_module': <classmethod object at 0x7f7005ada6d0>, '__dict__': <attribute '__dict__' of 'BuiltinImporter' objects>, '__weakref__': <attribute '__weakref__' of 'BuiltinImporter' objects>})"
# variables with complex values
__spec__ = None # (!) real value is "ModuleSpec(name='_operator', loader=<class '_frozen_importlib.BuiltinImporter'>, origin='built-in')"
|
[
"robertopescee@hotmail.com"
] |
robertopescee@hotmail.com
|
df9d5faf1848f56880a1bdaf4ed992a0c0b9543e
|
6aff8daa41526a8430267b1aecdfe0741b3115ac
|
/2-ModelComplexByArenaDispatcher/dispatcher/tests/api_samples.py
|
1b33c0e4678802fbcfe0626e6d956f02bcc2a632
|
[] |
no_license
|
LAD-PUCRS/Arena_SARS-BCG
|
91a299365fdacdb3d0d8785b7ecbfebb0b138618
|
5fda0ef67608cc462dd1acda375f112dc472b04b
|
refs/heads/main
| 2023-07-19T11:59:11.561216
| 2021-08-24T23:06:50
| 2021-08-24T23:06:50
| 397,739,185
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,936
|
py
|
import docker
import os
import time
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
# print("Reminder: Disabled Warnings")
# https://docs.docker.com/engine/api/sdk/examples/
# https://docker-py.readthedocs.io/en/stable/
# Instantiate a connection
# timeout (int) – Default timeout for API calls, in seconds.
# ssl_version (int) – A valid SSL version.
# assert_hostname (bool) – Verify the hostname of the server.
# environment (dict) – The environment to read environment variables from. Default: the value of os.environ
# credstore_env (dict) – Override environment variables when calling the credential store process.
# client = docker.from_env(timeout=5)
# client = docker.DockerClient(base_url='unix://var/run/docker.sock')
# for container in client.containers.list():
# print(help(container))
# get_archive(path, chunk_size=2097152, encode_stream=False)
# Retrieve a file or folder from the container in the form of a tar archive.
# https://docker-py.readthedocs.io/en/stable/containers.html
# Example: Dispatches 3 Containers and waits for all of them to complete their execution.
# def prepare_unsafe_host(address):
# client = docker.DockerClient(base_url='tcp://'+address+':2375')
# client.images.pull("alpine:latest") # Same as pull('alpine',tag='latest')
# return client
client_cert = "./client-cert.pem"
client_key = "./client-key.pem"
def connect_host(address,port="2375",cert=None,key=None):
tls_config=None
if cert != None and key != None:
tls_config = docker.tls.TLSConfig(client_cert=(cert, key))
if h == "127.0.0.1" or h == "localhost":
return docker.DockerClient(base_url='unix://var/run/docker.sock',tls=tls_config)
return docker.DockerClient(base_url='tcp://'+address+":"+port,tls=tls_config)
def prepare_host(host):
return 0
def send_job():
client.containers.run()
# auto_remove=
hosts = ["127.0.0.1"]
# hosts = [{'address':'10.0.0.2','client':None}]
# Lifecycle:
# Run Job
# Wait for it to complete
# Export Logs
# Export Zip
# Delete container
# mark as complete
job_id = 0
if not os.path.exists('./output'):
os.makedirs('./output')
for h in hosts:
# h['client'] = prepare_safe_host(h['address'],client_cert,client_key)
# print(h['client'])
client = connect_host(h)
# print(client.images.list())
print("Create Container")
container = client.containers.run("feliperubin/sample_app","python3 /app/job.py %d" % (job_id),detach=True,stdout=True,stderr=True)
print("Wait to Finish Execution")
while container.status != "exited":
time.sleep(1)
container.reload()
print("Store Logs")
with open('./output/job_%s_logs.txt' % (job_id),'w') as f:
f.write(container.logs().decode('utf-8'))
print("Downloading Data")
bits, stat = container.get_archive("/app/output_%s.txt" % (job_id))
with open('./output/job_%s_data.tar' % (job_id),'wb') as f:
for chunk in bits:
f.write(chunk)
print("Deleting Container")
container.remove()
job_id+=1
# container.logs()
# complete = []
# incomplete = []
# for i in range(0,3):
# container = client.containers.run("alpine",["echo","container %d completed" % (i)],detach=True)
# incomplete.append(container)
# while len(incomplete) > 0:
# for container in incomplete:
# if client.containers.get(container.id).status == "exited":
# complete.append(container)
# incomplete.remove(container)
# print("Container %s Exited!" % (container.id))
# print("Incomplete List Length: ",len(incomplete))
# time.sleep(1)
# image
# image (str) – The image to run.
# command (str or list) – The command to run in the container.
# auto_remove (bool) – enable auto-removal of the container on daemon side when the container’s process exits.
# cpu_count (int) – Number of usable CPUs (Windows only).
# cpu_percent (int) – Usable percentage of the available CPUs (Windows only).
# cpu_period (int) – The length of a CPU period in microseconds.
# cpuset_cpus (str) – CPUs in which to allow execution (0-3, 0,1).
# cpuset_mems (str) – Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems.
# detach (bool) – Run container in the background and return a Container object.
# device_cgroup_rules (list) – A list of cgroup rules to apply to the container.
# dns (list) – Set custom DNS servers.
# dns_opt (list) – Additional options to be added to the container’s resolv.conf file.
# dns_search (list) – DNS search domains.
# domainname (str or list) – Set custom DNS search domains.
# entrypoint (str or list) – The entrypoint for the container.
# environment (dict or list) – Environment variables to set inside the container, as a dictionary or a list of strings in the format ["SOMEVARIABLE=xxx"].
# healthcheck (dict) – Specify a test to perform to check that the container is healthy.
# hostname (str) – Optional hostname for the container.
# log_config (LogConfig) – Logging configuration.
# mac_address (str) – MAC address to assign to the container.
# mem_limit (int or str) – Memory limit. Accepts float values (which represent the memory limit of the created container in bytes) or a string with a units identification char (100000b, 1000k, 128m, 1g). If a string is specified without a units character, bytes are assumed as an intended unit.
# mem_reservation (int or str) – Memory soft limit.
# mem_swappiness (int) – Tune a container’s memory swappiness behavior. Accepts number between 0 and 100.
# memswap_limit (str or int) – Maximum amount of memory + swap a container is allowed to consume.
# name (str) – The name for this container.
# nano_cpus (int) – CPU quota in units of 1e-9 CPUs.
# remove (bool) – Remove the container when it has finished running. Default: False.
# stdout (bool) – Return logs from STDOUT when detach=False. Default: True.
# stderr (bool) – Return logs from STDERR when detach=False. Default: False.
|
[
"rafael.belle@pucrs.br"
] |
rafael.belle@pucrs.br
|
d4b702f957ed4344c9bcab805a5053cb18e16353
|
e62dd824081de0f6b7b35a2731184d8b2c872b7d
|
/work/wallpaper/girl_report_proc.py
|
049eb62a0e6e7d81e1fa16d462a55d3130a22a06
|
[] |
no_license
|
coblan/py2
|
d0231211452e8a27e41e6a85ab736538d0a66113
|
ecf743e027e9f15925e43f05c0b8a86bb88946db
|
refs/heads/master
| 2021-01-24T08:28:41.288136
| 2018-06-07T17:54:05
| 2018-06-07T17:54:05
| 69,465,492
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 324
|
py
|
import json
import re
with open('two_month.json') as f:
js = json.load(f)
for img in js:
if img.get('url'):
mt = re.search('(.*)(\.\w+$)',img.get('url'))
img['thumb'] = mt.group(1)+'-305x543'+mt.group(2)
with open('two_month_normed.json','w') as f2:
json.dump(js,f2)
|
[
"coblan@163.com"
] |
coblan@163.com
|
06a67003d5b6ec09b973f26143121a05d9f0261f
|
d5468f9cc80f2de38e871e232fa85a1e9b61e9f5
|
/python練習/StudentCard_Composition/StuDateMain.py
|
ab50a8486f01eae88b438dfb0a7df0fd3c333f36
|
[] |
no_license
|
SamSSY/ObjectAndClass
|
1cf46368c6301b61eb0e2d942b92a7d191dc6d90
|
13a3db7349df38a77806009ddfbb1e748f4ee453
|
refs/heads/master
| 2023-06-02T21:07:25.463620
| 2021-06-23T13:14:55
| 2021-06-23T13:14:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 296
|
py
|
from Date import Date
from Student import Student
s1 = Student("John", Date(6, 1, 1999), 90)
s2 = Student("Marry", Date(10, 8, 1997), 80)
name = input()
month = int(input())
day = int(input())
year = int(input())
s1.setName(name)
s2.setDate(Date(month, day, year))
s1.toString()
s2.toString()
|
[
"45253893+hoopizs1452@users.noreply.github.com"
] |
45253893+hoopizs1452@users.noreply.github.com
|
b8746dc8705106040ad856d569a30ce2862d9e49
|
f58610d2b189668574058ccf54071e65b275efe7
|
/step4_train_submissions.py
|
6b430c7e6c969352b120c38a8578ad23f8216632
|
[] |
no_license
|
CyranoChen/deepluna
|
8a69419166ea71409fa9acc93536a3043231ebc7
|
93c8377910ff329f5ebb9526a5a2c7d2a95c4498
|
refs/heads/master
| 2021-01-01T06:56:06.610263
| 2017-07-18T03:59:15
| 2017-07-18T03:59:15
| 97,550,272
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 16,312
|
py
|
import settings
import helpers
import sys
import os
import glob
import pandas
import numpy
import SimpleITK
import ntpath
import math
P_TH = 0.3
MAX_NODULE_COUNT = 10000
MIN_DISTANCE = 18
def get_distance(x1,y1,z1,x2,y2,z2):
return math.sqrt((x1-x2)**2+(y1-y2)**2+(z1-z2)**2)
#change world coordination to voxel space
def voxel_to_world(voxel, origin, spacing):
voxel = voxel * spacing
voxel = voxel + origin
return voxel
def convert_csv_coord_to_world():
csv_path = settings.TIANCHI_NODULE_DETECTION_DIR + "predictions10_tianchi_val_fs_final/"
df = pandas.read_csv(csv_path+"all_predictions_candidates_falsepos.csv")
df_all_mhd = pandas.read_csv(settings.TIANCHI_RAW_SRC_DIR + "csv/val/all_mhd.csv")
print("df_all_mhd count: ", len(df_all_mhd))
rows = []
for row_index, row in df.iterrows():
patient_id = row["patient_id"]
print(patient_id)
mhd = df_all_mhd[df_all_mhd["patient_id"] == patient_id]
row["coord_x"] = float(row["coord_x"]*mhd["shape_x"]*mhd["spacing_x"])+float(mhd["origin_x"])
row["coord_y"] = float(row["coord_y"]*mhd["shape_y"]*mhd["spacing_y"])+float(mhd["origin_y"])
row["coord_z"] = float(row["coord_z"]*mhd["shape_z"]*mhd["spacing_z"])+float(mhd["origin_z"])
row["diameter_mm"] = round(float(row["diameter_mm"]) / float(mhd["spacing_x"]),4)
rows.append(row)
df = pandas.DataFrame(rows, columns=["patient_id", "anno_index", "coord_x", "coord_y", "coord_z", "diameter", "nodule_chance", "diameter_mm"])
df.to_csv(csv_path+"all_predictions_candidates_falsepos_world_coord.csv", index=False)
def convert_nodules_coord_to_world(mhd, dir_name, magnification=1):
patient_id = mhd["patient_id"]
print("patient_id: ", patient_id)
img_array_shape = [int(mhd["shape_x"]), int(mhd["shape_y"]), int(mhd["shape_z"])]
print("Img array shape: ", img_array_shape)
origin = [float(mhd["origin_x"]), float(mhd["origin_y"]), float(mhd["origin_z"])]
print("Origin (x,y,z): ", origin)
spacing = [float(mhd["spacing_x"]), float(mhd["spacing_y"]), float(mhd["spacing_z"])]
print("Spacing (x,y,z): ", spacing)
print("Direction: ", mhd["direction"])
# patient_img = helpers.load_patient_images(patient_id, settings.TIANCHI_EXTRACTED_IMAGE_DIR, "*_i.png", [])
patient_img_shape = [int(img_array_shape[0]*spacing[0]/float(magnification)),
int(img_array_shape[1]*spacing[1]/float(magnification)),
int(img_array_shape[2]*spacing[2]/float(magnification))]
print("patient_img shape: ", patient_img_shape)
csv_path = settings.TIANCHI_NODULE_DETECTION_DIR + dir_name + "/" + patient_id + ".csv"
print(csv_path)
rows = []
if not os.path.exists(csv_path):
return rows
# pred_df_list = []
pred_nodules_df = pandas.read_csv(csv_path)
# pred_df_list.append(pred_nodules_df)
# pred_nodules_df = pandas.concat(pred_df_list, ignore_index=True)
nodule_count = len(pred_nodules_df)
print("nodule_count: ", nodule_count)
nodule_count = 0
if len(pred_nodules_df) > 0:
pred_nodules_df = pred_nodules_df.sort_values(by="nodule_chance", ascending=False)
for row_index, row in pred_nodules_df.iterrows():
if float(row["nodule_chance"]) < P_TH:
continue
dia = float(row["diameter_mm"]) / spacing[0]
# if dia < 3.0:
# continue
p_x = float(row["coord_x"])*patient_img_shape[0]
p_y = float(row["coord_y"])*patient_img_shape[1]
p_z = float(row["coord_z"])*patient_img_shape[2]
x, y, z = [p_x+origin[0],p_y+origin[1],p_z+origin[2]]
# z, y, x = voxel_to_world([p_z, p_y, p_x], origin[::-1], spacing[::-1])
row["coord_z"] = z
row["coord_y"] = y
row["coord_x"] = x
row["diameter_mm"] = dia
row["patient_id"] = patient_id
rows.append(row)
nodule_count += 1
if nodule_count >= MAX_NODULE_COUNT:
break
print(nodule_count)
return rows
def convert_all_nodules_coord_to_world(csv_dir_name, magnification=1):
df_all_mhd = pandas.read_csv(settings.TIANCHI_RAW_SRC_DIR + "csv/val/all_mhd.csv")
print("df_all_mhd count: ", len(df_all_mhd))
all_predictions_world_coord_csv = []
for index, mhd in df_all_mhd.iterrows():
# if mhd["patient_id"] != "LKDS-00006":
# continue
rows = convert_nodules_coord_to_world(mhd, csv_dir_name, magnification)
all_predictions_world_coord_csv.extend(rows)
df = pandas.DataFrame(all_predictions_world_coord_csv, columns=["patient_id", "anno_index", "coord_x", "coord_y", "coord_z", "diameter", "nodule_chance", "diameter_mm"])
dst_dir = settings.TIANCHI_NODULE_DETECTION_DIR + csv_dir_name + "/"
df.to_csv(dst_dir + "all_predictions_world_coord.csv", index=False)
def combine_nodule_predictions(dirs, train_set=True, nodule_th=0.5, extensions=[""]):
print("Combining nodule predictions: ", "Train" if train_set else "Submission")
if train_set:
labels_df = pandas.read_csv("resources/val/seriesuids.csv")
else:
labels_df = pandas.read_csv("resources/test2/seriesuids.csv")
# mass_df = pandas.read_csv(settings.BASE_DIR + "masses_predictions.csv")
# mass_df.set_index(["patient_id"], inplace=True)
# meta_df = pandas.read_csv(settings.BASE_DIR + "patient_metadata.csv")
# meta_df.set_index(["patient_id"], inplace=True)
data_rows = []
for index, row in labels_df.iterrows():
patient_id = row["id"]
# mask = helpers.load_patient_images(patient_id, settings.EXTRACTED_IMAGE_DIR, "*_m.png")
print(len(data_rows), " : ", patient_id)
# if len(data_rows) > 19:
# break
# cancer_label = row["cancer"]
# mass_pred = int(mass_df.loc[patient_id]["prediction"])
# meta_row = meta_df.loc[patient_id]
# z_scale = meta_row["slice_thickness"]
# x_scale = meta_row["spacingx"]
# vendor_low = 1 if "1.2.276.0.28.3.145667764438817.42.13928" in meta_row["instance_id"] else 0
# vendor_high = 1 if "1.3.6.1.4.1.14519.5.2.1.3983.1600" in meta_row["instance_id"] else 0
# row_items = [cancer_label, 0, mass_pred, x_scale, z_scale, vendor_low, vendor_high] # mask.sum()
row_items = [] # mask.sum()
for magnification in [1, 1.5, 2]:
pred_df_list = []
for extension in extensions:
src_dir = settings.TIANCHI_NODULE_DETECTION_DIR + "predictions" + str(int(magnification * 10)) + extension + "/"
pred_nodules_df = pandas.read_csv(src_dir + patient_id + ".csv")
pred_nodules_df = pred_nodules_df[pred_nodules_df["diameter_mm"] > 0]
pred_nodules_df = pred_nodules_df[pred_nodules_df["nodule_chance"] > nodule_th]
pred_df_list.append(pred_nodules_df)
pred_nodules_df = pandas.concat(pred_df_list, ignore_index=True)
nodule_count = len(pred_nodules_df)
nodule_max = 0
nodule_median = 0
nodule_chance = 0
nodule_sum = 0
coord_z = 0
second_largest = 0
nodule_wmax = 0
count_rows = []
coord_y = 0
coord_x = 0
if len(pred_nodules_df) > 0:
max_index = pred_nodules_df["diameter_mm"].argmax
max_row = pred_nodules_df.loc[max_index]
nodule_max = round(max_row["diameter_mm"], 2)
nodule_chance = round(max_row["nodule_chance"], 2)
nodule_median = round(pred_nodules_df["diameter_mm"].median(), 2)
nodule_wmax = round(nodule_max * nodule_chance, 2)
coord_z = max_row["coord_z"]
coord_y = max_row["coord_y"]
coord_x = max_row["coord_x"]
rows = []
for row_index, row in pred_nodules_df.iterrows():
dist = helpers.get_distance(max_row, row)
if dist > 0.2:
nodule_mal = row["diameter_mm"]
if nodule_mal > second_largest:
second_largest = nodule_mal
rows.append(row)
count_rows = []
for row in rows:
ok = True
for count_row in count_rows:
dist = helpers.get_distance(count_row, row)
if dist < 0.2:
ok = False
if ok:
count_rows.append(row)
nodule_count = len(count_rows)
row_items += [nodule_max, nodule_chance, nodule_count, nodule_median, nodule_wmax, coord_z, second_largest, coord_y, coord_x]
row_items.append(patient_id)
data_rows.append(row_items)
# , "x_scale", "z_scale", "vendor_low", "vendor_high"
columns = []
for magnification in [1, 1.5, 2]:
str_mag = str(int(magnification * 10))
columns.append("nodule_max_" + str_mag) #
columns.append("nodule_chance_" + str_mag)
columns.append("nodule_count_" + str_mag)
columns.append("nodule_median_" + str_mag)
columns.append("nodule_wmax_" + str_mag)
columns.append("coord_z_" + str_mag)
columns.append("second_largest_" + str_mag)
columns.append("coord_y_" + str_mag)
columns.append("coord_x_" + str_mag)
columns.append("patient_id")
res_df = pandas.DataFrame(data_rows, columns=columns)
if not os.path.exists(settings.TIANCHI_NODULE_DETECTION_DIR + "submission/"):
os.mkdir(settings.TIANCHI_NODULE_DETECTION_DIR + "submission/")
target_path = settings.TIANCHI_NODULE_DETECTION_DIR + "submission/" + "submission" + extension + ".csv"
res_df.to_csv(target_path, index=False)
def filter_submission():
df_all_mhd = pandas.read_csv(settings.TIANCHI_RAW_SRC_DIR + "csv/test2/all_mhd.csv")
print("df_all_mhd count: ", len(df_all_mhd))
df_nodules = pandas.read_csv(settings.TIANCHI_NODULE_DETECTION_DIR + "predictions10_tianchi_test2_fs_final/all_predictions_world_coord_merge.csv")
rows = []
for index, mhd in df_all_mhd.iterrows():
patient_id = mhd["patient_id"]
print(patient_id)
df = df_nodules[df_nodules["seriesuid"] == patient_id]
count = 0
if len(df) > 0:
df = df.sort_values(by="probability", ascending=False)
for index, row in df.iterrows():
# if row["diameter_mm"] < 3:
# continue
if row["probability"] < P_TH:
continue
rows.append(row)
count +=1
if count >= 50:
break
print(count)
res_df = pandas.DataFrame(rows, columns=["seriesuid","coordX", "coordY", "coordZ","probability"])
target_path = settings.TIANCHI_NODULE_DETECTION_DIR + "predictions10_tianchi_test2_fs_final/all_predictions_world_coord_merge_filter.csv"
res_df.to_csv(target_path, index=False)
def merge_submission():
#read the annotations.csv that contains the nodules info
df_node = pandas.read_csv(settings.TIANCHI_NODULE_DETECTION_DIR + "predictions10_tianchi_test2_fs_final/all_predictions_world_coord.csv")
df_node = df_node.dropna()
seriesuids_csv = pandas.read_csv(settings.TIANCHI_RAW_SRC_DIR + "csv/test2/all_mhd.csv")
seriesuids = seriesuids_csv['patient_id'].values
x = []
y = []
z = []
p = []
user_id = []
uid_done = []
for seriesuid in seriesuids:
if seriesuid in uid_done:
continue
uid_done.append(seriesuid)
mini_node = df_node[df_node['patient_id'] == seriesuid]
print(seriesuid)
uid = mini_node["patient_id"].values
node_x = mini_node["coord_x"].values
node_y = mini_node["coord_y"].values
node_z = mini_node["coord_z"].values
probability = mini_node['nodule_chance'].values
print(len(node_x))
mat = numpy.zeros([len(node_x),len(node_x)])
for i in range(len(node_x)):
for j in range(len(node_x)):
mat[i,j]=get_distance(node_x[i],node_y[i],node_z[i],node_x[j],node_y[j],node_z[j])
if i == j:
mat[i,j] = 80
for i in range(len(node_x)):
num = 1
print("node index: ",i)
for j in range(len(node_x)):
if mat[i,j] < MIN_DISTANCE:
print("distance",mat[i,j])
num += 1
node_x[i] += node_x[j]
node_y[i] += node_y[j]
node_z[i] += node_z[j]
probability[i] += probability[j]
print(probability[i])
#if probability[j] > probability[i]:
# probability[i] = probability[j]
print('add one',j,node_x[j])
print('whole',node_x[i],num)
node_x[i] /= num
node_y[i] /= num
node_z[i] /= num
probability[i] /= num
user_id.append(uid[i])
x.append(node_x[i])
y.append(node_y[i])
z.append(node_z[i])
p.append(probability[i])
print(node_x[i])
#raw_input()
x1 = []
y1 = []
z1 = []
p1 = []
u = []
for i in range(len(x) -1):
if get_distance(x[i],y[i],z[i],x[i+1],y[i+1],z[i+1]) < 3:
x[i+1] = x[i]/2 + x[i+1]/2
y[i+1] = y[i]/2 + y[i+1]/2
z[i+1] = z[i]/2 + z[i+1]/2
p[i+1] = p[i]/2 + p[i+1]/2
else:
x1.append(x[i])
y1.append(y[i])
z1.append(z[i])
p1.append(p[i])
u.append(user_id[i])
dataframe = pandas.DataFrame({'seriesuid':u,'coordX':x1,'coordY':y1,'coordZ':z1,'probability':p1})
#dataframe = pd.DataFrame({'seriesuid':user_id,'coordX':x,'coordY':y,'coordZ':z,'probability':p})
dataframe.to_csv(settings.TIANCHI_NODULE_DETECTION_DIR + "predictions10_tianchi_test2_fs_final/all_predictions_world_coord_merge.csv",index=False)
if __name__ == "__main__":
if False:
combine_nodule_predictions(None, train_set=False, nodule_th=0.5, extensions=["_tianchi_test2_fs_final"])
if False:
convert_csv_coord_to_world()
if False:
for magnification in [1, 1.5, 2]:
convert_all_nodules_coord_to_world("predictions"+str(int(magnification*10))+"_tianchi_test2_fs_final", magnification)
convert_all_nodules_coord_to_world("predictions"+str(int(magnification*10))+"_tianchi_test2_fs_final", magnification)
if True:
convert_all_nodules_coord_to_world("predictions"+str(int(1*10))+"_tianchi_val_fs_final_new", 1)
if False:
filter_submission()
if False:
merge_submission()
# if False:
# for model_variant in ["_luna16_fs", "_luna_posnegndsb_v1", "_luna_posnegndsb_v2"]:
# print("Variant: ", model_variant)
# if True:
# combine_nodule_predictions(None, train_set=False, nodule_th=0.7, extensions=[model_variant])
# if True:
# train_xgboost_on_combined_nodules(fixed_holdout=False, submission=True, submission_is_fixed_holdout=False, extension=model_variant)
# train_xgboost_on_combined_nodules(fixed_holdout=True, extension=model_variant)
#
# combine_submissions(level=1, model_type="luna_posnegndsb")
# combine_submissions(level=1, model_type="luna16_fs")
# combine_submissions(level=1, model_type="daniel")
# combine_submissions(level=2)
|
[
"cyrano@arsenalcn.com"
] |
cyrano@arsenalcn.com
|
ef5c629c1605df9a425e6499d917baca39dd8517
|
ab509b0565c3831f6d3c28a7e3a07252b6a4fe16
|
/CSR_Blue_Tools/global_settings.py
|
72f65d81e2a78476004e77b9663fd53abbd2ab96
|
[] |
no_license
|
JadenHuang/CSR_Blue_Tools
|
1763f5ff285973d0afbdb817526833f86c29634a
|
678919b10448bc4950c91488c29c453eeda4f589
|
refs/heads/master
| 2020-06-09T14:55:49.891538
| 2019-12-30T09:24:38
| 2019-12-30T09:24:38
| 193,455,503
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 321
|
py
|
# -*- coding: utf-8 -*-
from six import with_metaclass
from .helpers import Singleton
class g(with_metaclass(Singleton, object)):
#class g:
def __init__(self):
self.debug = 0
self.station = '00000'
self.serial = '000000000000'
self.module = 'MAIN'
self.CONFIG_FILE = "config/config.xml"
|
[
"jianxing217@126.com"
] |
jianxing217@126.com
|
dbf94606df33bddfeb17c93651cda4f7ba15eac2
|
f25317ae4ca52e766d9c9a0e6ecee951dbd436fd
|
/gonote/urls.py
|
b2a200fd093177c54f381095aa0ff722d655b830
|
[] |
no_license
|
uncle-T0ny/GoNote
|
bce76e86cfee7739e63cac6379ff4708936d6042
|
d8efb88b83d41a2403d825bd90c6b980384a4eca
|
refs/heads/master
| 2021-01-10T02:02:43.444288
| 2015-06-29T14:50:00
| 2015-06-29T14:50:00
| 36,979,148
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,101
|
py
|
from django.conf.urls import include, url, patterns
from django.contrib import admin
from document.views import add_delete_file, get_file
from folder.views import add_delete_folder, get_all_folders
from gonote import settings
from gonote.views import index, login_view, login, logout
from note.views import get_all_notes, get_note, add_delete_note, get_note_files
admin.autodiscover()
urlpatterns = patterns('',
url(r'^$', index),
(r'^admin/logout/$', logout),
url(r'^admin/', include(admin.site.urls)),
(r'^login-page/$', login_view),
(r'^login/$', login),
(r'^logout/$', logout),
url('', include('social.apps.django_app.urls', namespace='social')),
(r'^notes/$', get_all_notes),
(r'^note/(\d+)$', get_note),
(r'^note/(\d+)/files/$', get_note_files),
(r'^note/$', add_delete_note),
(r'^folders/$', get_all_folders),
(r'^folder/$', add_delete_folder),
(r'^media/(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings.MEDIA_ROOT, 'show_indexes':True}),
(r'^file/$', add_delete_file),
(r'^file/(\d+)$', get_file),
)
|
[
"r.uncleT0ny@gmail.com"
] |
r.uncleT0ny@gmail.com
|
0e849db000cb801bd9ded84acc0a9701ad7449f3
|
4b0cf8ca64b4623927e130e9af33c8e81b69a05a
|
/myapp/views.py
|
521328b5a70a9feeb7a0fdcc6964dfca788e2a5f
|
[] |
no_license
|
itminha123/mysite
|
6009b337363c1e1cb2d5259dc73ba6a27562591f
|
c577514993d3409e49fb31d705297942fd7f58b0
|
refs/heads/master
| 2020-03-23T19:08:24.129575
| 2018-07-23T03:18:41
| 2018-07-23T03:18:41
| 141,955,540
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,615
|
py
|
from django.shortcuts import render
from django.http import HttpResponse
# from gpiozero import LED
import json
bookmarks = [{'site': 'naver', 'url': 'http://www.naver.com'},
{'site': 'daum', 'url': 'http://www.daum.net'},
{'site': 'google', 'url': 'http://www.google.com'},
]
def get_bookmarks(request):
context = {'bookmarks': bookmarks}
site = request.GET.get('site')
url = request.GET.get('url')
global bookmarks
if site:
bookmark = {'site': site, 'url': url}
bookmarks.append(bookmark)
return HttpResponse(json.dumps(context), content_type='application/json')
def set_led(request):
led = request.GET.get('led')
# hw_led = LED(17)
# if led == 'on':
# hw_led.on()
# elif led == 'off':
# hw_led.off()
context = {'led':led}
return render(request, 'led.html', context)
def bookmark_list(request):
site = request.GET.get('site')
url = request.GET.get('url')
global bookmarks
if site:
bookmark = {'site': site, 'url': url}
bookmarks.append(bookmark)
context = {'bookmarks': bookmarks}
return render(request, 'bookmarkList.html', context)
def mem_ber_list(request):
members = ['Song', 'Lee', 'Kim']
context = {'members':members}
return render(request, 'memberList.html', context)
def home(request):
title = request.GET.get('title')
aaa_param = request.GET.get('aaa')
content = {'title': title,
'aaa':aaa_param,
}
return render(request, 'home.html', content)
# Create your views here.
|
[
"itminha123"
] |
itminha123
|
9036906fab7491c1ac445431dd9956327bdde9e6
|
eefc5f6e86c8c95c21ae64a67480882e4f0d5ea3
|
/hecuba/qthrift/__init__.py
|
b5c3c9ae2c06b8d4f5da70fae4fa3c73e52a815b
|
[
"Apache-2.0"
] |
permissive
|
him-28/hecuba
|
11de348f63085fe858039f9e42d61191ac229755
|
d9dbc1c5ad728612c87bd601d9b03369fceedc92
|
refs/heads/master
| 2021-07-20T17:19:02.782661
| 2017-10-27T08:24:46
| 2017-10-27T08:24:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 66
|
py
|
__all__ = ['ttypes', 'constants', 'QbeastMaster', 'QbeastWorker']
|
[
"cesare.cugnasco@gmail.com"
] |
cesare.cugnasco@gmail.com
|
a52b73942b4c31626805066da977628c224be70f
|
2fed1572162d09304972c5e0afd16c63d75d5858
|
/chip8/sound.py
|
7f985b4ce870334b212b1229c5aad1b7b6d5740c
|
[
"MIT"
] |
permissive
|
weibell/python-chip8-emu
|
f290af1b8207c4490bd960071ee762dded2379c5
|
c60934dbda4f9f972aa5ec912616545b8e518893
|
refs/heads/master
| 2022-11-25T07:33:53.583338
| 2020-07-15T20:08:58
| 2020-07-15T20:08:58
| 275,413,098
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 551
|
py
|
import os
os.environ['PYGAME_HIDE_SUPPORT_PROMPT'] = ""
import pygame
class Sound:
beep: pygame.mixer.Sound
is_playing: bool
def __init__(self):
pygame.mixer.init()
self.beep = pygame.mixer.Sound("chip8/beep.wav")
self.is_playing = False
def update(self, sound_timer: int):
if sound_timer > 0 and not self.is_playing:
self.beep.play()
self.is_playing = True
elif sound_timer == 0 and self.is_playing:
self.beep.stop()
self.is_playing = False
|
[
"13382354+weibell@users.noreply.github.com"
] |
13382354+weibell@users.noreply.github.com
|
c96b1858c2de9c2efbfff3b280563c848dbc4c13
|
e8c521a1faa67a41cbb45b3e070de837a30effd5
|
/regExpressForNom.py
|
38b64b442dbced74a59f9127fbe46541ce5552e5
|
[] |
no_license
|
Mdayes27/assignment2
|
e15dd1e952587eb4522d143c601c010632bfa69d
|
c22f0d323d41d1b13463a07ae6e5f2f5fe7ed434
|
refs/heads/master
| 2020-07-31T15:05:33.740141
| 2019-09-24T17:37:49
| 2019-09-24T17:37:49
| 210,646,785
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 395
|
py
|
han = open('text2Read.py')
for line in han:
line = line.rstrip()
wds = line.split()
count = 0
if wds.endswith("ing"):
count++
continue
elif wds.endswith("or"):
count++
continue
elif wds.endswith("ee"):
count++
continue
elif wds.endswith("ion"):
count++
continue
print("The current count is ",count)
|
[
"mdayes22@byu.edu"
] |
mdayes22@byu.edu
|
9a03d1da14ebcf56ad107007b030340184f2e25f
|
592a070ae66808c86df4c526cdb0bd04072dabee
|
/dokugaku/6/6-3.py
|
0ecd4fd0fe1d12d9b6ad088cc415e97a0021ea69
|
[] |
no_license
|
masato-su/hangman
|
0d780557e1a63bc96043a911df42da86cb23222b
|
0d26d854de0850ab26e79220af31d509a4eada25
|
refs/heads/master
| 2020-05-02T21:32:06.400132
| 2019-03-28T15:16:17
| 2019-03-28T15:16:17
| 178,223,350
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 65
|
py
|
result = "aldous Huxley was born in 1894".title()
print(result);
|
[
"sudo1227@yahoo.co.jp"
] |
sudo1227@yahoo.co.jp
|
00b97f79e17b4330dbcba0d3ba429c29cf57b151
|
c81d010c3943ea5db6154a45ddb3f8d4870e00d0
|
/Text-Analysis/code.py
|
c916468b8daa273c78908f9d87f0a2e81d654756
|
[
"MIT"
] |
permissive
|
SatyapriyaChaudhari/ga-learner-dsmp-repo
|
971ad84fecaad395197363e57b50c8a800e508e5
|
a51711526705a563eb826449d46557c3f3ae8751
|
refs/heads/master
| 2020-04-08T09:19:57.292275
| 2019-05-01T06:46:07
| 2019-05-01T06:46:07
| 159,218,977
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,357
|
py
|
# --------------
# import packages
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import re
from nltk.corpus import stopwords
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.naive_bayes import MultinomialNB
from sklearn.multiclass import OneVsRestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score ,confusion_matrix
# Code starts here
# load data
news = pd.read_csv(path)
# subset data
news = news[["TITLE", "CATEGORY"]]
# distribution of classes
dist = news.CATEGORY.value_counts()
# display class distribution
print("Distribution: \n", dist)
# display data
print(news.head())
# Code ends here
# --------------
# Code starts here
# stopwords
stop = set(stopwords.words('english'))
# retain only alphabets
#re.compile('[^a-zA-Z]')
news['TITLE'] = news['TITLE'].apply(lambda x: re.sub("[^a-zA-Z]", " ", x))
# convert to lowercase and tokenize
news['TITLE'] = news['TITLE'].apply(lambda x: x.lower().split())
# remove stopwords
news['TITLE'] = news['TITLE'].apply(lambda x: [i for i in x if i not in stop])
# join list elements
news['TITLE'] = news['TITLE'].apply(lambda x: " ".join(x))
# split into training and test sets
X_train, X_test, Y_train, Y_test = train_test_split(news["TITLE"], news["CATEGORY"], test_size = 0.2, random_state = 3)
# Code ends here
# --------------
# Code starts here
# initialize count vectorizer
count_vectorizer = CountVectorizer()
# initialize tfidf vectorizer
tfidf_vectorizer = TfidfVectorizer(ngram_range=(1,3))
# fit and transform with count vectorizer
X_train_count = count_vectorizer.fit_transform(X_train)
X_test_count = count_vectorizer.transform(X_test)
# fit and transform with tfidf vectorizer
X_train_tfidf = tfidf_vectorizer.fit_transform(X_train)
X_test_tfidf = tfidf_vectorizer.transform(X_test)
# Code ends here
# --------------
# Code starts here
# initialize multinomial naive bayes
nb_1 = MultinomialNB()
nb_2 = MultinomialNB()
# fit on count vectorizer training data
nb_1.fit(X_train_count, Y_train)
# fit on tfidf vectorizer training data
nb_2.fit(X_train_tfidf, Y_train)
# accuracy with count vectorizer
acc_count_nb = accuracy_score(nb_1.predict(X_test_count), Y_test)
# accuracy with tfidf vectorizer
acc_tfidf_nb = accuracy_score(nb_2.predict(X_test_tfidf), Y_test)
# display accuracies
print("Count Vectorizer Accuracy: ", acc_count_nb)
print("TFIDF Vectorizer Accuracy: ", acc_tfidf_nb)
# Code ends here
# --------------
import warnings
warnings.filterwarnings('ignore')
# initialize logistic regression
logreg_1 = OneVsRestClassifier(LogisticRegression(random_state=10))
logreg_2 = OneVsRestClassifier(LogisticRegression(random_state=10))
# fit on count vectorizer training data
logreg_1.fit(X_train_count, Y_train)
# fit on tfidf vectorizer training data
logreg_2.fit(X_train_tfidf, Y_train)
# accuracy with count vectorizer
acc_count_logreg = accuracy_score(logreg_1.predict(X_test_count), Y_test)
# accuracy with tfidf vectorizer
acc_tfidf_logreg = accuracy_score(logreg_2.predict(X_test_tfidf), Y_test)
# display accuracies
print("Count Vectorizer Accuracy: ", acc_count_logreg)
print("TFIDF Vectorizer Accuracy: ", acc_tfidf_logreg)
# Code ends here
|
[
"satyapriya.chaudhari@gmail.com"
] |
satyapriya.chaudhari@gmail.com
|
d2aa1db756fe5df542ebf0bf94cea699dd96ca62
|
846724efe734db14610a40be99a6e0f77bb55732
|
/arrows/notes/management/commands/create_notes.py
|
8aee4967fdb7f188f7aa0ecdcce7d0a400e51dfc
|
[] |
no_license
|
HelenMaksimova/arrows
|
620ed1c8b00ef0ef9b751fbc9c26cd6db5ab36cc
|
d86c3f38350e665b280f2d9771e49ab647bc52b2
|
refs/heads/master
| 2023-08-24T17:02:40.955617
| 2021-10-02T22:45:03
| 2021-10-02T22:45:03
| 398,632,837
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 812
|
py
|
from django.core.management.base import BaseCommand
from users.models import ArrowsUser
from notes.models import Project, Note
from random import choice, randint
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('count', nargs='?', type=int, default=5)
def handle(self, *args, **options):
Note.objects.all().delete()
for idx in range(options.get('count')):
project = choice(Project.objects.all())
user = choice(ArrowsUser.objects.all())
text = 'text-text-text-' * randint(1, 20)
note = Note.objects.create(
project=project,
text=text,
created_by_user=user
)
print(f'Note with id {note.id} created')
print('Complete')
|
[
"alnorda@gmail.com"
] |
alnorda@gmail.com
|
560fc6848ea6ff069e9d4f0c2edcd412adf992e8
|
993cd331f65748fa7b55ed7dfeca909255aa38d0
|
/teting_dyn/urls.py
|
1a3e2f47bf6c37d9db7b79d563c2981bb7031636
|
[] |
no_license
|
pavelKhat/testing-dyn_forms
|
4d794ae0dbebadd41a4bb96ffecc0612f571f29d
|
002dade7e82eebb4b93b67f7cc43e4b293a1a149
|
refs/heads/master
| 2020-03-25T15:15:24.627572
| 2018-08-09T14:07:28
| 2018-08-09T14:07:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 803
|
py
|
"""teting_dyn URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('app1/', include('app1.urls')),
]
|
[
"pavelkhat@gmail.com"
] |
pavelkhat@gmail.com
|
f4dbb257bfe88751a5b86d2e942b52d4b7feea8b
|
8b1c17561bf34252b79f6c806981f7c6fb1c4221
|
/Algorithms-LeetCode/1-10/3. lengthoflongestsubstring.py
|
bb0885abcb38804cfae91588881f81bb413cdfff
|
[] |
no_license
|
danebista/LeetCode-Algorithms
|
52c5ce77ae43eb9bcb1ce23c06062f5ebdc3170e
|
994b14123f07cbd0ad48b8163cb68a65ec8679ff
|
refs/heads/master
| 2023-05-21T05:00:50.333685
| 2021-06-13T09:37:56
| 2021-06-13T09:37:56
| 364,216,221
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 496
|
py
|
"""
Input: s = "abcabcbb"
Output: 3
Explanation: The answer is "abc", with the length of 3.
"""
class Solution:
def lengthOfLongestSubstring(self, s: str) -> int:
sets=set()
l=0
result=0
for r in range(len(s)):
while s[r] in sets:
sets.remove(s[l])
l=l+1
sets.add(s[r])
result= result if result>r-l+1 else r-l+1
return result
|
[
"bista.dinank@gmail.com"
] |
bista.dinank@gmail.com
|
5a2f6e7eb2b719a8764503f9e5dbaecd6a39ae15
|
129d5f787569bd21c5acbd34889219493c3bd28f
|
/pytorch/pytorch-deeplab_v3_plus/DenseCRFLoss.py
|
73a0874ec937b0eb075bcb69d1e11901af38d085
|
[
"MIT"
] |
permissive
|
Ahmadreza-Jeddi/rloss
|
bbc83744e450221eb4489dde6a4ab22cf018fbaa
|
ad28a6f49230ff5d80f4fcd038ecb6c4bd96b1c4
|
refs/heads/master
| 2021-10-09T19:19:04.172812
| 2021-09-27T03:50:29
| 2021-09-27T03:50:29
| 242,904,034
| 7
| 1
|
MIT
| 2020-02-25T03:50:19
| 2020-02-25T03:50:18
| null |
UTF-8
|
Python
| false
| false
| 2,810
|
py
|
import torch
import torch.nn as nn
from torch.autograd import Function
from torch.autograd import Variable
import torch.nn.functional as F
import numpy as np
import sys
sys.path.append("../wrapper/bilateralfilter/build/lib.linux-x86_64-3.6")
from bilateralfilter import bilateralfilter, bilateralfilter_batch
from dataloaders.custom_transforms import denormalizeimage
import time
from multiprocessing import Pool
import multiprocessing
from itertools import repeat
import pickle
class DenseCRFLossFunction(Function):
@staticmethod
def forward(ctx, images, segmentations, sigma_rgb, sigma_xy, ROIs):
ctx.save_for_backward(segmentations)
ctx.N, ctx.K, ctx.H, ctx.W = segmentations.shape
ROIs = ROIs.unsqueeze_(1).repeat(1,ctx.K,1,1)
segmentations = torch.mul(segmentations.cuda(), ROIs.cuda())
ctx.ROIs = ROIs
densecrf_loss = 0.0
images = images.numpy().flatten()
segmentations = segmentations.cpu().numpy().flatten()
AS = np.zeros(segmentations.shape, dtype=np.float32)
bilateralfilter_batch(images, segmentations, AS, ctx.N, ctx.K, ctx.H, ctx.W, sigma_rgb, sigma_xy)
densecrf_loss -= np.dot(segmentations, AS)
# averaged by the number of images
densecrf_loss /= ctx.N
ctx.AS = np.reshape(AS, (ctx.N, ctx.K, ctx.H, ctx.W))
return Variable(torch.tensor([densecrf_loss]), requires_grad=True)
@staticmethod
def backward(ctx, grad_output):
grad_segmentation = -2*grad_output*torch.from_numpy(ctx.AS)/ctx.N
grad_segmentation=grad_segmentation.cuda()
grad_segmentation = torch.mul(grad_segmentation, ctx.ROIs.cuda())
return None, grad_segmentation, None, None, None
class DenseCRFLoss(nn.Module):
def __init__(self, weight, sigma_rgb, sigma_xy, scale_factor):
super(DenseCRFLoss, self).__init__()
self.weight = weight
self.sigma_rgb = sigma_rgb
self.sigma_xy = sigma_xy
self.scale_factor = scale_factor
def forward(self, images, segmentations, ROIs):
""" scale imag by scale_factor """
scaled_images = F.interpolate(images,scale_factor=self.scale_factor)
scaled_segs = F.interpolate(segmentations,scale_factor=self.scale_factor,mode='bilinear',align_corners=False)
scaled_ROIs = F.interpolate(ROIs.unsqueeze(1),scale_factor=self.scale_factor).squeeze(1)
return self.weight*DenseCRFLossFunction.apply(
scaled_images, scaled_segs, self.sigma_rgb, self.sigma_xy*self.scale_factor, scaled_ROIs)
def extra_repr(self):
return 'sigma_rgb={}, sigma_xy={}, weight={}, scale_factor={}'.format(
self.sigma_rgb, self.sigma_xy, self.weight, self.scale_factor
)
|
[
"m62tang@rsg-pc286.localnet"
] |
m62tang@rsg-pc286.localnet
|
789f9bbf33252c5f8444b39894175a96e85ff453
|
f70dd2356260e84a3baebc68f0c49327ff1cb005
|
/outliers/enron_outliers.py
|
90715800719ce2049ecf8890cd425cf8f7ca34aa
|
[] |
no_license
|
kmather73/ud120-ML-Project
|
04f91d4de859200d454e8701b0461bd3cef5a34f
|
b58452ae5d4105707731de56c567c60578b28514
|
refs/heads/master
| 2020-09-17T22:26:23.663962
| 2016-08-19T02:55:12
| 2016-08-19T02:55:12
| 66,047,988
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 872
|
py
|
#!/usr/bin/python
import pickle
import sys
import matplotlib.pyplot
sys.path.append("../tools/")
from feature_format import featureFormat, targetFeatureSplit
### read in data dictionary, convert to numpy array
data_dict = pickle.load( open("../final_project/final_project_dataset.pkl", "r") )
data_dict.pop( "TOTAL", 0 )
features = ["salary", "bonus"]
data = featureFormat(data_dict, features)
### your code below
for point in data:
salary = point[0]
bonus = point[1]
matplotlib.pyplot.scatter( salary, bonus )
matplotlib.pyplot.xlabel("salary")
matplotlib.pyplot.ylabel("bonus")
matplotlib.pyplot.show()
for k in data_dict:
if "NaN" != data_dict[k]["salary"] and data_dict[k]["salary"] > 1e6:
print k
elif data_dict[k]["bonus"] != "NaN" != data_dict[k]["salary"] and data_dict[k]["bonus"] > 0.7*1e7:
print k
|
[
"kmather73@gmail.com"
] |
kmather73@gmail.com
|
942d41e63481adf5d1b9a7bfb848e7106e9eabad
|
18642e63abd4e5b935c5c60d8451857220dad2ca
|
/portfolio/models.py
|
cb0f68f1bd458cd8cb4381deb3be1f7be0adebed
|
[] |
no_license
|
SravaniKV/Assign1p2
|
8cb24e8dfd4ca97c0b1fbf61ecec300326b8deeb
|
56ee638db6815c3707f596cc8158c78e8049926c
|
refs/heads/master
| 2021-06-30T08:17:13.198594
| 2017-09-18T21:53:45
| 2017-09-18T21:53:45
| 104,000,474
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,654
|
py
|
#from django.db import models
# Create your models here.
from django.db import models
from django.utils import timezone
from django.contrib.auth.models import User
from yahoo_finance import Share
class Customer(models.Model):
name = models.CharField(max_length=50)
address = models.CharField(max_length=200)
cust_number = models.IntegerField(blank=False, null=False)
city = models.CharField(max_length=50)
state = models.CharField(max_length=50)
zipcode = models.CharField(max_length=10)
email = models.EmailField(max_length=200)
cell_phone = models.CharField(max_length=50)
created_date = models.DateTimeField(
default=timezone.now)
updated_date = models.DateTimeField(auto_now_add=True)
def created(self):
self.created_date = timezone.now()
self.save()
def updated(self):
self.updated_date = timezone.now()
self.save()
def __str__(self):
return str(self.cust_number)
class Investment(models.Model):
customer = models.ForeignKey(Customer, related_name='investments')
category = models.CharField(max_length=50)
description = models.CharField(max_length=200)
acquired_value = models.DecimalField(max_digits=10, decimal_places=2)
acquired_date = models.DateField(default=timezone.now)
recent_value = models.DecimalField(max_digits=10, decimal_places=2)
recent_date = models.DateField(default=timezone.now, blank=True, null=True)
def created(self):
self.acquired_date = timezone.now()
self.save()
def updated(self):
self.recent_date = timezone.now()
self.save()
def __str__(self):
return str(self.customer)
def results_by_investment(self):
return self.recent_value - self.acquired_value
class Stock(models.Model):
customer = models.ForeignKey(Customer, related_name='stocks')
symbol = models.CharField(max_length=10)
name = models.CharField(max_length=50)
shares = models.DecimalField (max_digits=10, decimal_places=1)
purchase_price = models.DecimalField(max_digits=10, decimal_places=2)
purchase_date = models.DateField(default=timezone.now, blank=True, null=True)
def created(self):
self.recent_date = timezone.now()
self.save()
def __str__(self):
return str(self.customer)
def initial_stock_value(self):
return self.shares * self.purchase_price
def current_stock_price(self):
symbol_f=self.symbol
data=Share(symbol_f)
share_value=(data.get_open())
return share_value
def current_stock_value(self):
symbol_f=self.symbol
data=Share(symbol_f)
share_value=(data.get_open())
return float(share_value) * float(self.shares)
class MutualFunds(models.Model):
customer = models.ForeignKey(Customer, related_name='mutualfunds')
category = models.CharField(max_length=50)
description = models.CharField(max_length=200)
units=models.DecimalField(max_digits=10,decimal_places=0)
acquired_value = models.DecimalField(max_digits=10, decimal_places=2)
acquired_date = models.DateField(default=timezone.now)
recent_value = models.DecimalField(max_digits=10, decimal_places=2)
recent_date = models.DateField(default=timezone.now, blank=True, null=True)
def created(self):
self.acquired_date = timezone.now()
self.save()
def updated(self):
self.recent_date = timezone.now()
self.save()
def __str__(self):
return str(self.customer)
def results_by_mutualfunds(self):
return self.recent_value - self.acquired_value
|
[
"venkatasravanikaka@unomaha.edu"
] |
venkatasravanikaka@unomaha.edu
|
f83dfa728113a399cb36710ce43bfd39835c8799
|
9e716a625f7732359c34efc6e18b2a6b7664d413
|
/green_earth_app/admin.py
|
b393c8f0b8bce6becd8b14906210d1555013cf9d
|
[] |
no_license
|
greenearthinc/mobileapp-prototype
|
1fe2e4bf18f7258a393c377e9d0b46bec5cf319d
|
22299fd01a1e7cdfa38ea40b845ebfd12eea5bc6
|
refs/heads/master
| 2020-07-11T11:32:56.993491
| 2019-08-26T17:40:17
| 2019-08-26T17:40:17
| 204,528,283
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,306
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
from sensor_models import iotCore
from sensor_models import airCluster, waterCluster, lightCluster, robot, channel
from user_models import serviceAccount, workerAccount
from access_models import coreAccess
from support_files import generate_key
import random
# Register your models here.
from django.contrib.auth.models import User
from support_files import publish_information
def create_service_token(modeladmin, request, queryset):
for q in queryset:
username = "core_id_"+str(q.core_id)
try:
user= User.objects.get(username=username)
except:
key = generate_key()
user = User.objects.create_user(username, '', key)
user.save()
new_account = serviceAccount()
new_account.user = user
new_account.access_token = key
new_account.save()
def run_nutrient_motors(modeladmin, request, queryset):
publish_information("water_nutr", 0, 1) # 0 is empty value
def simulate_farms(modeladmin, request, queryset):
for q in queryset:
id = q.core_id
channel.objects.filter(core=q).delete()
waterCluster.objects.filter(core=q).delete()
airCluster.objects.filter(core=q).delete()
lightCluster.objects.filter(core=q).delete()
robot.objects.filter(core=q).delete()
letter_combo = ["A", "B", "C", "D", "E", "F", "G", "H"]
unique_value = 1
air = airCluster()
air.joint_id = str(q.core_id) + "-" + str(1)
air.core = q
air.cluster_id = 1
air.humidity = 50
air.temperature = 26.0
air.tvoc = 200
air.co2 = 10
air.pessure = 101.3
air.save()
i =0
while i < len(letter_combo):
j = 1
while j <= 8:
name = letter_combo[i] + str(j)
new_channel = channel()
new_channel.name = name
new_channel.unique_id = str(q.core_id) + "-" + name
new_channel.core = q
water = waterCluster()
water.joint_id = str(q.core_id) + "-" + str(unique_value)
water.core = q
water.cluster_id = str(unique_value)
water.temperature = 25.0
water.conductivity = 0.6
water.ph = 7
v = int(random.random()*10)
if v > 7:
water.status_code = 1
water.save()
light = lightCluster()
light.joint_id = str(q.core_id) + "-" + str(unique_value)
light.core = q
light.cluster_id = str(unique_value)
v = int(random.random()*10)
if v < 7:
light.status_code = 1
light.state = "ON"
else:
light.state = "OFF"
light.status_code = 0
light.spectrum = 590
light.save()
j += 1
unique_value += 1
new_channel.water = water
new_channel.light = light
new_channel.air = air
new_channel.save()
i += 1
cluster = robot()
cluster.joint_id = str(q.core_id) + "-" + str(1)
cluster.core = q
cluster.cluster_id = 1
cluster.status = "Inspecting B6"
cluster.save()
@admin.register(channel)
class adminChannel(admin.ModelAdmin):
pass
@admin.register(iotCore)
class adminIotCore(admin.ModelAdmin):
actions = [create_service_token, simulate_farms, run_nutrient_motors]
@admin.register(airCluster)
class adminAirCluster(admin.ModelAdmin):
pass
@admin.register(waterCluster)
class adminWaterCluster(admin.ModelAdmin):
pass
@admin.register(lightCluster)
class adminLightCluster(admin.ModelAdmin):
pass
@admin.register(robot)
class adminRobot(admin.ModelAdmin):
pass
@admin.register(serviceAccount)
class adminServiceAccount(admin.ModelAdmin):
pass
@admin.register(workerAccount)
class adminWorkerAccount(admin.ModelAdmin):
pass
@admin.register(coreAccess)
class adminCoreAccess(admin.ModelAdmin):
pass
|
[
"noreply@github.com"
] |
noreply@github.com
|
1d0b6383c33973c35589ec4404e85d7a6c72e8e8
|
8130c34d546c323d6d5d2ca6b4a67330af08828f
|
/.history/menu_app/models_20210105152309.py
|
ede59bfeb80ad64987ea4b2b9d2f75c6e48ba8a7
|
[] |
no_license
|
lienusrob/final
|
ba2dad086fc97b21b537ef12df834dfadd222943
|
f2726e31f1d51450e4aed8c74021c33679957b28
|
refs/heads/master
| 2023-02-15T01:36:54.463034
| 2021-01-07T12:47:05
| 2021-01-07T12:47:05
| 327,279,792
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,265
|
py
|
from django.db import models
from django.utils import timezone
from django.contrib.auth.models import User
from django.urls import reverse
import random
import string
from datetime import date, datetime
class ToppingsCategory(models.Model):
name = models.CharField(max_length=100)
type = models.CharField(max_length=100)
description = models.TextField(max_length=100, blank=True, null=True, default='')
def __str__(self):
return self.name
class Topping(models.Model):
name = models.CharField(max_length=100)
price = models.DecimalField(max_digits = 4, decimal_places=2, default=0)
category = models.ForeignKey(ToppingsCategory, on_delete = models.PROTECT, default=None)
def __str__(self):
return self.name
class ItemsCategory(models.Model):
name = models.CharField(max_length=100)
def __str__(self):
return self.name
class MenuItem(models.Model):
name = models.CharField(max_length=22)
price = models.DecimalField(max_digits = 4, decimal_places=2)
category = models.ForeignKey(ItemsCategory, on_delete = models.PROTECT)
detail = models.TextField(max_length=1000, default = ' ')
# toppings = models.ManyToManyField(Topping, blank=True)
#image = models.ImageField(default=None, upload_to='', null=True, blank=True)
def __str__(self):
return self.name
class Extras(models.Model):
requests = models.TextField(max_length=400,)
def __str__(self):
return self.name
class Cart (models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
current = models.BooleanField(default=True)
date_ordered = models.DateTimeField(auto_now_add= True )
class CartItem (models.Model):
add_item = models.ForeignKey(MenuItem, on_delete= models.CASCADE)
quantity = models.IntegerField(default=0)
cart = models.ForeignKey(Cart, on_delete= models.CASCADE)
def __str__(self):
return self.add_item.name
#remove dont need
class OrderItem(models.Model):
item = models.ForeignKey(MenuItem, on_delete=models.SET_NULL, null=True)
price = models.DecimalField(max_digits = 4, decimal_places=2, default=0)
order_item_order = models.ForeignKey('menu_app.Order', on_delete=models.CASCADE, null=True)
#toppings = models.ManyToManyField(Topping, blank=True)
def __str__(self):
return self.item.name
def get_item_price(self):
self.price = sum(topping.price for topping in self.toppings.all()) + self.item.price
def get_all_topping_categories(self):
categories = []
for topping in self.toppings.all():
if not topping.category in categories:
categories.append(topping.category)
return categories
class Orders (models.Model):
cart = models.ForeignKey(Cart, on_delete=models.CASCADE)
placed = models.BooleanField(default=False)
def __str__ (self):
return self.cart.user.username
#old need to remove
class Order(models.Model):
customer = models.ForeignKey(User, on_delete = models.CASCADE)
date_ordered = models.DateTimeField(default=timezone.now)
items = models.ManyToManyField(MenuItem)
order_items = models.ManyToManyField(OrderItem)
total = models.DecimalField(max_digits = 6, decimal_places=2, null=True)
is_ordered = models.BooleanField(default=False)
pickup_time = models.DateTimeField(default=timezone.now)
special_instructions = models.TextField(max_length=256, blank=True)
def __str__(self):
return f'Order #{self.id} - {self.customer.username}'
# # url to redirect to when submitting order form
# def get_absolute_url(self):
# return reverse('orders:order_detail', kwargs={'pk':self.pk})
# returns the sum of each item price in order and assigns it to self.total
def get_order_total(self):
self.total = sum(order_item.price for order_item in self.order_items.all())
def get_cart_items(self):
return self.items.all()
def generate_order_id():
date_str = date.today().strftime('%Y%m%d')[2:] + str(datetime.now().second)
rand_str = "".join([random.choice(string.digits) for count in range(3)])
return date_str + rand_str
# class Meta():
# ordering = ['-date_ordered']
|
[
"lienus.rob@hotmail.de"
] |
lienus.rob@hotmail.de
|
eb4fe4d2438140d7da3760ede1e8840e63849c66
|
5ad84a7ec7269e63eecedc33072b5d7877530d60
|
/lib/exec.py
|
7aa57e49b853b8677d7bbe512055308c037e1c7f
|
[] |
no_license
|
walkrain/python_base
|
d144da3df6cea02de76be11d55b5c87374a096bc
|
8e15ffecd42ba656be883fd80cc781958ba0e932
|
refs/heads/master
| 2020-05-27T19:15:22.415146
| 2015-05-19T00:20:33
| 2015-05-19T00:20:33
| 35,705,164
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 83
|
py
|
#!/usr/bin/python
# Filename:exec.py
exec 'print "hello world"'
print eval('2*3')
|
[
"walkrain@126.com"
] |
walkrain@126.com
|
36c1449fb9b44dcf2b01c472c97533b9f70ca5c6
|
f851122e51b760f454ba35a47223f373852b7b82
|
/main.py
|
c30a3e1497c258fc42a8dd0a17de56de9b03f435
|
[] |
no_license
|
JustinGuerra/ThreadCheckerBot
|
eb95b7ea05daf316588bacf7a59fc3209195d454
|
2df761121255d223e02ec9c502b047cc2968b00a
|
refs/heads/master
| 2020-04-09T03:20:08.675795
| 2018-12-01T19:04:12
| 2018-12-01T19:04:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,768
|
py
|
import praw
import operator
import time
import datetime
from praw.models import Comment
total_comments_processed = 0
def main():
reddit = praw.Reddit("ThreadCheckBot")
#subreddit = reddit.subreddit("news")
#submissions = subreddit.hot(limit=10)
# for submission in submissions:
#check_thread(reddit, submission)
check_thread(reddit, reddit.submission(id='a1y03b'))
def check_thread(reddit, submission):
start_time = time.time()
print("Checking Thread.")
print("Thread Title: " + submission.title)
print("Thread Id: " + submission.id)
print("Grabbing all comments in thread...")
all_comments_in_thread = get_all_comments(reddit, submission.id)
print("Total Comments: " + str(len(all_comments_in_thread)))
print("Getting all users in thread...")
all_users_in_thread = get_all_unique_users(all_comments_in_thread)
print("Total unique Users: " + str(len(all_users_in_thread)))
top_subreddits = calculate_top_subreddits(all_users_in_thread)
sorted_top_subreddits = sorted(
top_subreddits.items(), key=operator.itemgetter(1))
for result in sorted_top_subreddits:
if result[0] != submission.subreddit and result[1] >= 15:
print("Current thread has suspicious activity.")
print("Thread has over 20 percent of users from a specific subreddit")
print(result.display_name + ": " +
sorted_top_subreddits[result] + "%")
print("Thread link: " + submission.permalink)
runtime_in_seconds = int(round(time.time() - start_time))
minutes, seconds = divmod(runtime_in_seconds, 60)
hours, minutes = divmod(minutes, 60)
print("Processed Thread in " + str(hours) + "h " +
str(minutes) + "m " + str(seconds) + "s")
def calculate_top_subreddits(users):
dict_of_primary_subreddits = {}
print("Fetching top subreddits")
user_counter = 0
for user in users:
if user:
start_time = time.time()
if type(user).__name__ == "Redditor":
top_subreddit = fetch_subreddit_in_position(
user.comments.new(limit=None), 0)
if top_subreddit in dict_of_primary_subreddits:
dict_of_primary_subreddits[top_subreddit] += 1
else:
dict_of_primary_subreddits[top_subreddit] = 1
user_counter += 1
print("Processed user " + str(user_counter) +
" of " + str(len(users)))
dict_of_subreddit_percentages = {}
for subreddit in dict_of_primary_subreddits:
percentage = (
dict_of_primary_subreddits[subreddit] / len(users)) * 100
dict_of_subreddit_percentages[subreddit] = percentage
return dict_of_primary_subreddits
def fetch_subreddit_in_position(comments, position):
subreddit_numbers = {}
global total_comments_processed
comments_processed_for_user = 0
start_time = time.time()
# Check each comment and grab the subreddit and increase tally for repeated subreddits
for comment in comments:
if comment:
if type(comment).__name__ == "Comment":
comment_date = datetime.datetime.utcfromtimestamp(
comment.created_utc)
compare_date = datetime.datetime.now() - datetime.timedelta(days=365)
if(comment_date >= compare_date):
if comment.subreddit in subreddit_numbers:
subreddit_numbers[comment.subreddit] += 1
else:
subreddit_numbers[comment.subreddit] = 1
comments_processed_for_user += 1
total_comments_processed += 1
sorted_subreddits = sorted(
subreddit_numbers.items(), key=operator.itemgetter(1))
timeTook = time.time() - start_time
print("Took " + str(timeTook) + " to process user")
return sorted_subreddits[position][0]
def get_all_unique_users(comments):
users = list()
for comment in comments:
if type(comment).__name__ == "Comment":
if comment.author not in users:
users.append(comment.author)
return users
def get_sub_comments(comment, allComments):
allComments.append(comment)
if not hasattr(comment, "replies"):
replies = comment.comments()
else:
replies = comment.replies
for child in replies:
get_sub_comments(child, allComments)
def get_all_comments(r, submissionId):
submission = r.submission(submissionId)
comments = submission.comments
commentsList = []
for comment in comments:
get_sub_comments(comment, commentsList)
return commentsList
if __name__ == "__main__":
main()
|
[
"danieljacobhix@gmail.com"
] |
danieljacobhix@gmail.com
|
31291fea928eb8e023f65781c71fa4432037efea
|
ba1eff6535027c16b9e1d399b96e7853bc1514dc
|
/tests/test_16_userinfo_endpoint.py
|
03ec0337b9ec1fd4207b1850726eb13b7fc2b0da
|
[
"Apache-2.0"
] |
permissive
|
sklemer1/oidcendpoint
|
09d06e4cf21113f74a78734cdd06c964aaed3c7d
|
bc2cd9222bd05aec7b7ba5c7c7f593c2143357f3
|
refs/heads/master
| 2020-03-30T12:24:20.500373
| 2018-10-04T13:42:31
| 2018-10-04T13:42:31
| 151,222,819
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,738
|
py
|
import json
import os
import pytest
import time
from oidcmsg.key_jar import build_keyjar
from oidcmsg.oidc import AccessTokenRequest
from oidcmsg.oidc import AuthorizationRequest
from oidcendpoint.client_authn import verify_client
from oidcendpoint.oidc import userinfo
from oidcendpoint.oidc.authorization import Authorization
from oidcendpoint.oidc.provider_config import ProviderConfiguration
from oidcendpoint.oidc.registration import Registration
from oidcendpoint.oidc.token import AccessToken
from oidcendpoint.authn_event import AuthnEvent
from oidcendpoint.endpoint_context import EndpointContext
from oidcendpoint.user_authn.authn_context import INTERNETPROTOCOLPASSWORD
from oidcendpoint.user_info import UserInfo
KEYDEFS = [
{"type": "RSA", "key": '', "use": ["sig"]},
{"type": "EC", "crv": "P-256", "use": ["sig"]}
]
KEYJAR = build_keyjar(KEYDEFS)[1]
RESPONSE_TYPES_SUPPORTED = [
["code"], ["token"], ["id_token"], ["code", "token"], ["code", "id_token"],
["id_token", "token"], ["code", "token", "id_token"], ['none']]
CAPABILITIES = {
"response_types_supported": [" ".join(x) for x in RESPONSE_TYPES_SUPPORTED],
"token_endpoint_auth_methods_supported": [
"client_secret_post", "client_secret_basic",
"client_secret_jwt", "private_key_jwt"],
"response_modes_supported": ['query', 'fragment', 'form_post'],
"subject_types_supported": ["public", "pairwise"],
"grant_types_supported": [
"authorization_code", "implicit",
"urn:ietf:params:oauth:grant-type:jwt-bearer", "refresh_token"],
"claim_types_supported": ["normal", "aggregated", "distributed"],
"claims_parameter_supported": True,
"request_parameter_supported": True,
"request_uri_parameter_supported": True,
}
AUTH_REQ = AuthorizationRequest(client_id='client_1',
redirect_uri='https://example.com/cb',
scope=['openid'],
state='STATE',
response_type='code')
TOKEN_REQ = AccessTokenRequest(client_id='client_1',
redirect_uri='https://example.com/cb',
state='STATE',
grant_type='authorization_code',
client_secret='hemligt')
TOKEN_REQ_DICT = TOKEN_REQ.to_dict()
BASEDIR = os.path.abspath(os.path.dirname(__file__))
def full_path(local_file):
return os.path.join(BASEDIR, local_file)
USERINFO = UserInfo(json.loads(open(full_path('users.json')).read()))
def setup_session(endpoint_context, areq):
authn_event = AuthnEvent(uid="uid", salt='salt',
authn_info=INTERNETPROTOCOLPASSWORD,
time_stamp=time.time())
sid = endpoint_context.sdb.create_authz_session(authn_event, areq,
client_id='client_id')
endpoint_context.sdb.do_sub(sid, '')
return sid
class TestEndpoint(object):
@pytest.fixture(autouse=True)
def create_endpoint(self):
self.endpoint = userinfo.UserInfo(KEYJAR)
conf = {
"issuer": "https://example.com/",
"password": "mycket hemligt",
"token_expires_in": 600,
"grant_expires_in": 300,
"refresh_token_expires_in": 86400,
"verify_ssl": False,
"capabilities": CAPABILITIES,
"jwks": {
'url_path': '{}/jwks.json',
'local_path': 'static/jwks.json',
'private_path': 'own/jwks.json'
},
'endpoint': {
'provider_config': {
'path': '{}/.well-known/openid-configuration',
'class': ProviderConfiguration,
'kwargs': {}
},
'registration': {
'path': '{}/registration',
'class': Registration,
'kwargs': {}
},
'authorization': {
'path': '{}/authorization',
'class': Authorization,
'kwargs': {}
},
'token': {
'path': '{}/token',
'class': AccessToken,
'kwargs': {}
},
'userinfo': {
'path': '{}/userinfo',
'class': userinfo.UserInfo,
'kwargs': {'db_file': 'users.json'}
}
},
'client_authn': verify_client,
"authentication": [{
'acr': INTERNETPROTOCOLPASSWORD,
'name': 'NoAuthn',
'kwargs': {'user': 'diana'}
}],
'template_dir': 'template'
}
endpoint_context = EndpointContext(conf, keyjar=KEYJAR)
endpoint_context.cdb['client_1'] = {
"client_secret": 'hemligt',
"redirect_uris": [("https://example.com/cb", None)],
"client_salt": "salted",
'token_endpoint_auth_method': 'client_secret_post',
'response_types': ['code', 'token', 'code id_token', 'id_token']
}
self.endpoint = userinfo.UserInfo(endpoint_context)
def test_init(self):
assert self.endpoint
def test_parse(self):
session_id = setup_session(self.endpoint.endpoint_context, AUTH_REQ)
_dic = self.endpoint.endpoint_context.sdb.upgrade_to_token(
key=session_id)
_req = self.endpoint.parse_request(
{}, auth="Bearer {}".format(_dic['access_token']))
assert set(_req.keys()) == {'client_id', 'access_token'}
|
[
"roland@catalogix.se"
] |
roland@catalogix.se
|
15df76003269acd982a7ce23366d7b3a40f22baf
|
122e1c1129b4488aa89c59c6d14e166f22d437ad
|
/CanchaAlToque/views.py
|
d7963d1a63897462d832cc0a43cc12b8bd214825
|
[] |
no_license
|
jolu7432/ProjectoCanchaHero
|
100710ab31ec5ac49c7779a251eb30d24df5cb56
|
9f21535b2ff930a882989dfa113c429a63453a31
|
refs/heads/master
| 2021-01-01T04:25:24.482723
| 2016-04-16T21:20:56
| 2016-04-16T21:20:56
| 56,405,066
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 386
|
py
|
from django.shortcuts import render
from django.http import HttpResponseRedirect, HttpResponse
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.views.decorators.csrf import csrf_exempt
# Create your views here.
@csrf_exempt
def index(request):
return render_to_response('index.html', context_instance=RequestContext(request))
|
[
"jorgeluis.tanos@exitco.biz"
] |
jorgeluis.tanos@exitco.biz
|
d0a5b868d53ddcc88c056248836618359d2a331f
|
71030516166767491cf181f70665534083c36b13
|
/JPype-0.5.4.2/src/python/jpype/_pykeywords.py
|
d64b47282de2824701beb2e10cb490ff9ae1729d
|
[] |
no_license
|
ct1104/fileservices
|
2e7a473785e464c9fdcc43ccc6e12c311235fb1a
|
7e993cb18933370ab83de7c9189c1d96371b7cd2
|
refs/heads/master
| 2020-03-17T01:35:35.308602
| 2018-05-19T07:32:29
| 2018-05-19T07:32:29
| 133,159,521
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,086
|
py
|
#*****************************************************************************
# Copyright 2004-2008 Steve Menard
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#*****************************************************************************
import sets
KEYWORDS = sets.Set( (
"del", "for", "is", "raise",
"assert", "elif", "from", "lambda", "return",
"break", "else", "global", "not", "try",
"class", "except", "if", "or", "while",
"continue", "exec", "import", "pass", "yield",
"def", "finally", "in", "print", "as", "None"
))
|
[
"ct@guangdongjizhi.com"
] |
ct@guangdongjizhi.com
|
46dfc22eb40865a48197d7f5ac0164222f4d45bc
|
3c000380cbb7e8deb6abf9c6f3e29e8e89784830
|
/venv/Lib/site-packages/cobra/modelimpl/fabric/nodetopolicy.py
|
73f41a28e62353875c97e455cded44de58073546
|
[] |
no_license
|
bkhoward/aciDOM
|
91b0406f00da7aac413a81c8db2129b4bfc5497b
|
f2674456ecb19cf7299ef0c5a0887560b8b315d0
|
refs/heads/master
| 2023-03-27T23:37:02.836904
| 2021-03-26T22:07:54
| 2021-03-26T22:07:54
| 351,855,399
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,662
|
py
|
# coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2020 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class NodeToPolicy(Mo):
meta = ClassMeta("cobra.model.fabric.NodeToPolicy")
meta.isAbstract = True
meta.moClassName = "fabricNodeToPolicy"
meta.moClassName = "fabricNodeToPolicy"
meta.rnFormat = ""
meta.category = MoCategory.RELATIONSHIP_TO_LOCAL
meta.label = "Super Class for Relation from Node to Fabric Policies Deployed on Node"
meta.writeAccessMask = 0x0
meta.readAccessMask = 0x1
meta.isDomainable = False
meta.isReadOnly = False
meta.isConfigurable = True
meta.isDeletable = False
meta.isContextRoot = False
meta.childClasses.add("cobra.model.fabric.CreatedBy")
meta.childNamesAndRnPrefix.append(("cobra.model.fabric.CreatedBy", "source-"))
meta.superClasses.add("cobra.model.reln.Inst")
meta.superClasses.add("cobra.model.reln.To")
meta.concreteSubClasses.add("cobra.model.fabric.RsModulePolProfile")
meta.concreteSubClasses.add("cobra.model.infra.RsToInterfacePolProfile")
meta.concreteSubClasses.add("cobra.model.infra.RsToInterfaceSpPolProfile")
meta.concreteSubClasses.add("cobra.model.infra.RsBndlGrp")
meta.concreteSubClasses.add("cobra.model.fabric.RsPodPolGroup")
meta.concreteSubClasses.add("cobra.model.infra.RsVpcBndlGrp")
meta.concreteSubClasses.add("cobra.model.fabric.RsInterfacePolProfile")
meta.concreteSubClasses.add("cobra.model.infra.RsModulePolProfile")
meta.concreteSubClasses.add("cobra.model.fabric.RsNodeOverride")
meta.concreteSubClasses.add("cobra.model.infra.RsToVsanEncapInstDef")
meta.concreteSubClasses.add("cobra.model.fabric.RsCtrlrPolGroup")
meta.concreteSubClasses.add("cobra.model.infra.RsInfraNodeOverride")
meta.concreteSubClasses.add("cobra.model.fabric.RsNodePolGroup")
meta.concreteSubClasses.add("cobra.model.infra.RsToVsanAttr")
meta.concreteSubClasses.add("cobra.model.infra.RsToEncapInstDef")
meta.concreteSubClasses.add("cobra.model.infra.RsNodePolGroup")
meta.concreteSubClasses.add("cobra.model.infra.RsFexGrp")
meta.concreteSubClasses.add("cobra.model.infra.RsInterfacePolProfile")
meta.rnPrefixes = [
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "deplSt", "deplSt", 15582, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "none"
prop._addConstant("delivered", "delivered", 1)
prop._addConstant("node-not-ready", "node-not-ready", 1073741824)
prop._addConstant("none", "none", 0)
prop._addConstant("not-registered-for-atg", "node-cannot-deploy-epg", 64)
prop._addConstant("not-registered-for-fabric-ctrls", "node-not-controller", 16)
prop._addConstant("not-registered-for-fabric-leafs", "node-not-leaf-for-fabric-policies", 4)
prop._addConstant("not-registered-for-fabric-node-group", "node-not-registered-for-node-group-policies", 32)
prop._addConstant("not-registered-for-fabric-oleafs", "node-not-capable-of-deploying-fabric-node-leaf-override", 2048)
prop._addConstant("not-registered-for-fabric-ospines", "node-not-capable-of-deploying-fabric-node-spine-override", 4096)
prop._addConstant("not-registered-for-fabric-pods", "node-has-not-joined-pod", 8)
prop._addConstant("not-registered-for-fabric-spines", "node-not-spine", 2)
prop._addConstant("not-registered-for-infra-leafs", "node-not-leaf-for-infra-policies", 128)
prop._addConstant("not-registered-for-infra-oleafs", "node-not-capable-of-deploying-infra-node-leaf-override", 512)
prop._addConstant("not-registered-for-infra-ospines", "node-not-capable-of-deploying-infra-node-spine-override", 1024)
prop._addConstant("not-registered-for-infra-spines", "node-not-spine-for-infra-policies", 256)
prop._addConstant("pod-misconfig", "node-belongs-to-different-pod", 8192)
prop._addConstant("policy-deployment-failed", "policy-deployment-failed", 2147483648)
meta.props.add("deplSt", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "forceResolve", "forceResolve", 107, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = True
prop.defaultValueStr = "yes"
prop._addConstant("no", None, False)
prop._addConstant("yes", None, True)
meta.props.add("forceResolve", prop)
prop = PropMeta("str", "rType", "rType", 106, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 1
prop.defaultValueStr = "mo"
prop._addConstant("local", "local", 3)
prop._addConstant("mo", "mo", 1)
prop._addConstant("service", "service", 2)
meta.props.add("rType", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "state", "state", 103, PropCategory.REGULAR)
prop.label = "State"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "unformed"
prop._addConstant("cardinality-violation", "cardinality-violation", 5)
prop._addConstant("formed", "formed", 1)
prop._addConstant("invalid-target", "invalid-target", 4)
prop._addConstant("missing-target", "missing-target", 2)
prop._addConstant("unformed", "unformed", 0)
meta.props.add("state", prop)
prop = PropMeta("str", "stateQual", "stateQual", 104, PropCategory.REGULAR)
prop.label = "State Qualifier"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "none"
prop._addConstant("default-target", "default-target", 2)
prop._addConstant("mismatch-target", "mismatch-target", 1)
prop._addConstant("none", "none", 0)
meta.props.add("stateQual", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
prop = PropMeta("str", "tCl", "tCl", 101, PropCategory.REGULAR)
prop.label = "Target-class"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("unspecified", "unspecified", 0)
meta.props.add("tCl", prop)
prop = PropMeta("str", "tDn", "tDn", 100, PropCategory.REGULAR)
prop.label = "Target-dn"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("tDn", prop)
prop = PropMeta("str", "tType", "tType", 105, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 1
prop.defaultValueStr = "mo"
prop._addConstant("all", "all", 2)
prop._addConstant("mo", "mo", 1)
prop._addConstant("name", "name", 0)
meta.props.add("tType", prop)
def __init__(self, parentMoOrDn, markDirty=True, **creationProps):
namingVals = []
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
|
[
"bkhoward@live.com"
] |
bkhoward@live.com
|
7dde14eff394ecd809ab6474ad19919b9767a399
|
68666fa5932e3fb6f6f26155d02e932e7ff6de3c
|
/ECMA_python3.5/myMedicalModel/eval_attention.py
|
a3acdbf98cffba581587b427bdbde81aa29a4287
|
[] |
no_license
|
yaopanyaopan/Chinese_medical
|
76f40200bd9f8969e80f6bc9644e352cde2daf63
|
df3fe00247a7b9e988f4f0baa0d371f29ae12b0e
|
refs/heads/master
| 2022-02-19T02:06:24.218300
| 2019-07-23T02:45:04
| 2019-07-23T02:45:04
| 198,338,635
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,586
|
py
|
# coding=utf-8
import re
import os
import numpy as np
import tensorflow as tf
import tflearn
from sklearn.utils import shuffle
from myMedicalModel.myModelNolstm_HH import SelfAttentive
from reader import load_csv, VocabDict
import encode_window
import diceEval
import data_process
from matplotlib import pyplot as plt
from matplotlib.font_manager import FontProperties
from myMedicalModel import processAllList
'''
parse
'''
tf.app.flags.DEFINE_integer('num_epochs',20, 'number of epochs to train')
tf.app.flags.DEFINE_integer('batch_size', 1, 'batch size to train in one step')
tf.app.flags.DEFINE_integer('labels', 2, 'number of label classes')
tf.app.flags.DEFINE_integer('word_pad_length', 20, 'word pad length for training')
tf.app.flags.DEFINE_integer('decay_step', 100, 'decay steps')
tf.app.flags.DEFINE_float('learn_rate', 1e-3, 'learn rate for training optimization')
tf.app.flags.DEFINE_boolean('shuffle', True, 'shuffle data FLAG')
tf.app.flags.DEFINE_boolean('train', True, 'train mode FLAG')
tf.app.flags.DEFINE_boolean('visualize', True, 'visualize FLAG')
tf.app.flags.DEFINE_boolean('penalization', False, 'penalization FLAG')
tf.app.flags.DEFINE_boolean('usePreVector', False, 'preVector FLAG')
FLAGS = tf.app.flags.FLAGS
num_epochs = FLAGS.num_epochs
batch_size = FLAGS.batch_size
tag_size = FLAGS.labels
word_pad_length = FLAGS.word_pad_length
lr = FLAGS.learn_rate
usePreVector=FLAGS.usePreVector
word_vecs=None
TOKENIZER_RE = re.compile(r"[A-Z]{2,}(?![a-z])|[A-Z][a-z]+(?=[A-Z])|[\'\w\-]+", re.UNICODE)
def token_parse(iterator):
for value in iterator:
return TOKENIZER_RE.findall(value)
tokenizer = tflearn.data_utils.VocabularyProcessor(word_pad_length, tokenizer_fn=lambda tokens: [token_parse(x) for x in tokens])
label_dict = VocabDict()
def plotMetrics(step_losses,epoch_lossacc,preName,preNum):
pic_path = '../myMedicalModel/forTest/'
steps = np.arange(0, len(step_losses), 1)
plot_pic(
title="Training epoch Loss",
x_content=steps,
y_content=step_losses,
xlabel="Epochs",
ylabel="Loss ",
xlim=(0, steps[-1]),
path=pic_path + "train_loss_"+preName+"_100_0.001_"+preNum+".svg"
)
plot_pic(
title="Training Epoch Acc",
x_content=steps,
y_content=epoch_lossacc,
xlabel="Epochs",
ylabel="Acc ",
xlim=(0, steps[-1]),
path=pic_path + "train_acc_"+preName+"_100_0.001_"+preNum+".svg"
)
def plot_pic(title, x_content, y_content, xlabel, ylabel, xlim, path):
print(" - [Info] Plotting metrics into picture " + path)
plt.rcParams['font.sans-serif'] = ['Arial']
plt.rcParams['axes.unicode_minus'] = True
plt.figure(figsize=(10, 5))
plt.grid(linestyle="--")
plt.xlim(xlim)
ax = plt.gca()
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.title(title, fontsize=14, fontweight='bold')
plt.plot(x_content, y_content)
plt.xlabel(xlabel, fontsize=13, fontweight='bold')
plt.ylabel(ylabel, fontsize=13, fontweight='bold')
plt.savefig(path, format='svg')
plt.clf()
def string_parser(arr, fit):
if fit == False:
return list(tokenizer.transform(arr))
else:
return list(tokenizer.fit_transform(arr))
preNum='1'
preName='QRJD' #69
medical_avg_count=[]
x_label=[]
allList=[]
model = SelfAttentive()
with tf.Session() as sess:
#load train data
print('load train data')
words, tags = load_csv('../data/trainTCM/TCM_train_%s.csv'%preName, target_columns=[0], columns_to_ignore=None,
target_dict=label_dict,usePreVector=usePreVector)
# print('zz',words)
vocab_list={}
# zsy 判断是否适用预训练的词向量 start
if usePreVector== True:
input_iter = encode_window.create_document_iter(words)
vocab = encode_window.encode_dictionary(input_iter)
vocab_list = vocab.vocabulary_._mapping
word_vecs = encode_window.load_bin_vec("../medicalVector/model/medicalCorpus_50d.model", vocab_list)
word_input = encode_window.encode_word(words, vocab,word_pad_length)
else:
words = string_parser(words, fit=True)
if FLAGS.shuffle == True:
words, tags = shuffle(words, tags)
word_input = tflearn.data_utils.pad_sequences(words, maxlen=word_pad_length)
# print('vvv', word_input)
# zsy 判断是否适用预训练的词向量 end
# build graph
model.build_graph(n=word_pad_length,usePreVector=usePreVector,vectors=word_vecs)
# Downstream Application
with tf.variable_scope('DownstreamApplication'):
global_step = tf.Variable(0, trainable=False, name='global_step')
learn_rate = tf.train.exponential_decay(lr, global_step, FLAGS.decay_step, 0.95, staircase=True)
labels = tf.placeholder('float32', shape=[None, tag_size])
net = tflearn.fully_connected(model.M, 50, activation='relu')
logits = tflearn.fully_connected(net, tag_size, activation=None)
loss = tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(labels=labels, logits=logits), axis=1)
loss = tf.reduce_mean(loss)
params = tf.trainable_variables()
#clipped_gradients = [tf.clip_by_value(x, -0.5, 0.5) for x in gradients]
optimizer = tf.train.AdamOptimizer(learn_rate)
# optimizer = tf.train.GradientDescentOptimizer(learn_rate)
grad_and_vars = tf.gradients(loss, params)
opt = optimizer.apply_gradients(zip( grad_and_vars, params), global_step=global_step)
# Start Training
sess.run(tf.global_variables_initializer())
total = len(word_input)
step_print = int((total/batch_size) / 20)
epoch_losslist=[]
epoch_lossacc = []
if FLAGS.train == True:
print('start training')
for epoch_num in range(num_epochs):
epoch_loss = 0
step_loss = 0
allnum = 0
epoch_acc = 0
for i in range(int(total/batch_size)):
batch_input, batch_tags = (word_input[i*batch_size:(i+1)*batch_size], tags[i*batch_size:(i+1)*batch_size])
train_ops = [opt, loss, learn_rate, global_step,logits]
if FLAGS.usePreVector== True:
result = sess.run(train_ops, feed_dict={model.input_pl: batch_input, labels: batch_tags})
else:
result = sess.run(train_ops, feed_dict={model.input_pl: batch_input, labels: batch_tags})
arr = result[4][0].tolist()
# print(arr.index(max(arr)),batch_tags[0].index(max(batch_tags[0])))
if arr.index(max(arr))==batch_tags[0].index(max(batch_tags[0])):
epoch_acc+=1
allnum+=1
step_loss += result[1]
epoch_loss += result[1]
print('***')
print('epoch {%s}: (global_step: {%s}), Average Loss: {%s})'%(epoch_num,result[3],(epoch_loss/(total/batch_size))))
print('***\n')
epoch_losslist.append(epoch_loss/(total/batch_size))
epoch_lossacc.append(float(epoch_acc)/allnum)
saver = tf.train.Saver()
saver.save(sess, '../myMedicalModel/forTest/0617_model_noLstmHH_r1_%s_epoches%s_num%s.ckpt'%(preName,FLAGS.num_epochs,preNum))
else:
saver = tf.train.Saver()
saver.restore(sess, '../myMedicalModel/forTest/0617_model_noLstmHH_r1_%s_epoches%s_num%s.ckpt'%(preName,FLAGS.num_epochs,preNum))
# plotMetrics(epoch_losslist,epoch_lossacc,preName,preNum)
allDice=[]
lenList=[]
evalCount = 0
for a in range(1,51):
a=float(a)/100
x_label.append(round(a,2))
print('start testing')
words, tags = load_csv('../data/aprioriData/TCM_train_%s.csv' % preName, target_columns=[0], columns_to_ignore=None,
target_dict=label_dict)
words_with_index = string_parser(words, fit=True)
word_input = tflearn.data_utils.pad_sequences(words_with_index, maxlen=word_pad_length)
total = len(word_input)
evalNum = total-1
rs = 0.
#load evalData start
evalData=[]
evalCav='../data/evalData/%s_evaluate.csv'%preName
evalList=data_process.read_csv(evalCav)
for item in evalList:
evalData.append(item)
# load evalData end
if FLAGS.visualize == True:
f = open('../myMedicalModel/forTest/%s_visualizeTCM_%s_noLSTM_HH_epoches%s_r1_num%s.html'%(preName,preName,FLAGS.num_epochs,preNum), 'w')
f.write('<html style="margin:0;padding:0;"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><body style="margin:0;padding:0;">\n')
for i in range(int(total/batch_size)):
batch_input, batch_tags = (word_input[i*batch_size:(i+1)*batch_size], tags[i*batch_size:(i+1)*batch_size])
result = sess.run([logits, model.B,model.Q], feed_dict={model.input_pl: batch_input, labels: batch_tags})
#arr保存预测概率
arr = result[0]
if not np.argmax(arr[0]):
preClass=True
else:
preClass = False
for j in range(len(batch_tags)):
if np.argmax(batch_tags[j])==0:
if np.argmax(arr[j]) == np.argmax(batch_tags[j]):
evalCount+=1
rs+=np.sum(np.argmax(arr[j]) == np.argmax(batch_tags[j]))
medicalList=[]
if FLAGS.visualize == True:
f.write('<div style="margin:15px;">\n')
#result[1][0]保存的是方剂中每个药物对应的attention因子,具体result[1][0][k][j]取出
for k in range(len(result[1][0])):
f.write('\t<p> —— 测试方剂 %s (类标:%s ; 预测类标:%s):—— </p>\n'%(i, tags[i],preClass))
f.write('<p style="margin:10px;font-family:SimHei">\n')
ww = TOKENIZER_RE.findall(words[i*batch_size][0])
for j in range(word_pad_length):
if result[1][0][k][j] <a:
result[1][0][k][j]=0
alpha = "{:.2f}".format(result[1][0][k][j])
if len(ww) <= j:
w = " "
else:
w = ww[j]
if result[1][0][k][j]>=a:
medicalList.append(w)
f.write('\t<span style="margin-left:3px;background-color:rgba(255,0,0,%s)">%s</span>\n'%(alpha,w))
f.write('</p>\n')
if i < evalNum:
if preClass == True:
print('配伍评估药组:', medicalList)
allDice.append(diceEval.evalMedicalDice(medicalList, evalData))
allList.append(medicalList)
lenList.append(len(medicalList))
else:
allDice.append(0)
if i < evalNum:
f.write('\t<b>配伍评估药组: %s ,dice = %s</b>\n' % (','.join(medicalList),allDice[i]))
f.write('</div>\n')
if FLAGS.visualize == True:
f.write('\t<p>Test accuracy: %s</p>\n' % (rs / total))
f.write('\t<p>该功效下%s个经典方剂(即测试集前%s个方剂) accuracy :%s</p>\n' % (evalNum,evalNum,evalCount / evalNum))
f.write('\t<p>该功效下%s个经典方剂 avg-dice : %s</p>\n' % (evalNum,sum(allDice)/evalNum))
f.write('</body></html>')
f.close()
print('Test accuracy(all test data): %s'%(rs/total))
print('该功效下%s个经典方剂(即测试集前%s个方剂)的accuracy评估:%s' % (evalNum,evalNum,evalCount / evalNum))
print('allDice,evalCount',allDice,evalCount)
sumValue=0
for i in allDice[:evalCount]:
sumValue+=i
print('avg-dice:%s'%(sumValue/evalCount))
print('平均药味数:%s' % (sum(lenList) / evalCount))
medical_avg_count.append(sum(lenList) / evalCount)
# print('lenList',lenList)
# print('medical_avg_count',medical_avg_count)
sess.close()
font = FontProperties(fname=r"c:\windows\fonts\simsun.ttc", size=14) # 步骤二
plt.figure()
plt.plot(x_label,medical_avg_count)
plt.xlabel("自注意力阈值",fontproperties=font)
plt.ylabel("平均配伍药味数",fontproperties=font)
plt.title("不同阈值下平均药味数统计",fontproperties=font)
plt.savefig("自注意力因子阈值设置0.1_0.5.jpg")
plt.figure()
plt.plot(x_label[:10],medical_avg_count[:10])
plt.xlabel("自注意力阈值",fontproperties=font)
plt.ylabel("平均配伍药味数",fontproperties=font)
plt.title("不同阈值下平均药味数统计",fontproperties=font)
plt.savefig("自注意力因子阈值设置0.01_0.1.jpg")
# processAllList.processAll(allList,'forTest')
data_process.write_list_in_csv('a.csv',medical_avg_count)
data_process.write_list_in_csv('b.csv',x_label)
|
[
"2968186105@qq.com"
] |
2968186105@qq.com
|
88dbf7980b4c0dacaebc3668465441c1d408b2c5
|
116a2ae77f946858d97d9d950c7b360c603104d7
|
/boa/gen/scope.py
|
190dc118cd4caf77b703d939cbd053f2ace851da
|
[] |
no_license
|
maxov/boa
|
641aa5503cbc58ea06c16260bcefc3ff9fa2f0f8
|
2bc35e2a5aee3d87ab3f05d234b6c732024d1d1f
|
refs/heads/master
| 2021-05-27T20:09:13.009161
| 2014-07-12T19:09:10
| 2014-07-12T19:09:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,322
|
py
|
from boa.gen.constants import BOA_MODULE_CONSTANT_NAME
class Binding(object):
# represents a binding of a name to a value
def __init__(self, name, val):
# create a binding object
self.name = name
self.val = val
class Scope(object):
# represents a scope with bindings
def __init__(self):
# create a scope object
self.bindings = []
def binding(self, name, val):
# add a bindings
binding = self.get_binding(name)
if binding is not None:
binding.val = val
else:
self.add_binding(Binding(name, val))
def get_binding(self, name):
# do I have this binding?
for x in self.bindings:
if x.name is name:
return x
return None
def add_binding(self, binding):
# add a binding
self.bindings.append(binding)
def refer(self, name):
# get what a binding should be to referred as
binding = self.get_binding(name)
if binding:
return binding.name
#def declarations(self):
# get the declarations
#return 'var ' + ', '.join(x.name for x in self.bindings) if len(self.bindings) > 0 else ''
class ModuleScope(Scope):
# scope on the module level, or as python calls it, global level
def refer(self, name):
# a binding is referred to by the 'module' object
binding = self.get_binding(name)
if binding:
return BOA_MODULE_CONSTANT_NAME + '.' + binding.name
else:
return BOA_MODULE_CONSTANT_NAME + '.' + name
class LocalScope(Scope):
# scope that has a parent scope or inherited scope
def __init__(self, parent):
# initialize with the parent scope
super(LocalScope, self).__init__()
self.parent = parent
def get_binding(self, name):
# do I have this binding?
for x in self.bindings:
if x.name is name:
return x
# check the parent's binding
return self.parent.get_binding(name)
def refer(self, name):
# a binding is referred to by the local scope object
binding = self.get_binding(name)
if binding:
return binding.name
else:
return self.parent.refer(name)
|
[
"max@ovsankin.com"
] |
max@ovsankin.com
|
5b5b935227c53eccc8bd5e5cdbd56cb97c222c33
|
9dd729a238a97966d3c49af06f61450bd89dc00d
|
/accounts/tests/test_view_signup.py
|
bc38aafed2fba618e58cbff9d9fef1ebcc795b2e
|
[] |
no_license
|
sigfrido64/asc-siw
|
5a449c4d73fb913019a75021d03a024110364f86
|
619cd82375ba3fa0829f62361fc850ae48635a1f
|
refs/heads/master
| 2022-07-22T23:10:51.705175
| 2019-02-10T22:00:45
| 2019-02-10T22:00:45
| 120,227,961
| 0
| 0
| null | 2022-07-06T19:26:38
| 2018-02-04T22:10:53
|
CSS
|
UTF-8
|
Python
| false
| false
| 2,802
|
py
|
# coding=utf-8
from django.contrib.auth.models import User
from django.test import TestCase
from django.urls import resolve, reverse
from ..forms import SignUpForm
from ..views import signup
class SignUpTests(TestCase):
fixtures = ['af']
def setUp(self):
url = reverse('signup')
self.response = self.client.get(url)
def test_signup_status_code(self):
self.assertEquals(self.response.status_code, 200)
def test_signup_url_resolves_signup_view(self):
view = resolve('/signup/')
self.assertEquals(view.func, signup)
def test_csrf(self):
self.assertContains(self.response, 'csrfmiddlewaretoken')
def test_contains_form(self):
form = self.response.context.get('form')
self.assertIsInstance(form, SignUpForm)
def test_form_inputs(self):
"""
The view must contain five inputs: csrf, username, email, password1, password2
"""
self.assertContains(self.response, '<input', 5)
self.assertContains(self.response, 'type="text"', 1)
self.assertContains(self.response, 'type="email"', 1)
self.assertContains(self.response, 'type="password"', 2)
class SuccessfulSignUpTests(TestCase):
fixtures = ['af']
def setUp(self):
url = reverse('signup')
data = {
'username': 'john',
'email': 'john@doe.com',
'password1': 'abcdef123456',
'password2': 'abcdef123456'
}
self.response = self.client.post(url, data)
self.home_url = reverse('home')
def test_redirection(self):
"""
A valid form submission should redirect the user to the home page
"""
self.assertRedirects(self.response, self.home_url)
def test_user_creation(self):
self.assertTrue(User.objects.exists())
def test_user_authentication(self):
"""
Create a new request to an arbitrary page.
The resulting response should now have an `user` to its context, after a successful sign up.
"""
response = self.client.get(self.home_url)
user = response.context.get('user')
self.assertTrue(user.is_authenticated)
class InvalidSignUpTests(TestCase):
fixtures = ['af']
def setUp(self):
url = reverse('signup')
self.response = self.client.post(url, {}) # submit an empty dictionary
def test_signup_status_code(self):
"""
An invalid form submission should return to the same page
"""
self.assertEquals(self.response.status_code, 200)
def test_form_errors(self):
form = self.response.context.get('form')
self.assertTrue(form.errors)
def test_dont_create_user(self):
self.assertFalse(User.objects.exists())
|
[
"sigfrido.pilone@gmail.com"
] |
sigfrido.pilone@gmail.com
|
2b7b3f26a0a071871daa6d9510b94e09410413ec
|
5e2605c10d7ab7838d6a5caa09bf61381a70e055
|
/.venv/bin/pip
|
5a188b5c5f641c3fae63657357ed75ee726348b0
|
[] |
no_license
|
sleekEagle/pcr_cloud
|
08d7eefce4836c1807a4cc806e61e8c9de9950bc
|
8a3f876e06d23e21979c25455d4adcb1880295df
|
refs/heads/master
| 2023-02-22T13:40:16.718241
| 2022-11-03T15:14:59
| 2022-11-03T15:14:59
| 217,382,660
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 264
|
#!/home/sleekeagle/works/pcr/repo/pcr_cloud/.venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal.cli.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"lahirunuwanisme@gmail.com"
] |
lahirunuwanisme@gmail.com
|
|
b1afda3e5255fc90f7617880094b5585b6f6f29f
|
20717dd660bfbaeb88311ea4d3527338cfc4d086
|
/checkout/models.py
|
df2f5f51923be694ffa5fd24998186846ccd5381
|
[] |
no_license
|
Code-Institute-Submissions/auto-tints
|
8f6f8ddba8d3ec933078a4a3527c31ca6cf4dc92
|
fdde47397a12c04aacde24a806283d9dee1225be
|
refs/heads/main
| 2023-09-01T06:03:40.963429
| 2021-10-27T02:23:42
| 2021-10-27T02:23:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,694
|
py
|
import uuid
from django.db import models
from django.db.models import Sum
from django.conf import settings
from products.models import Product
from django_countries.fields import CountryField
from pages.models import UserProfile
class Order(models.Model):
order_number = models.CharField(max_length=32, null=False, editable=False)
user_profile = models.ForeignKey(UserProfile, on_delete=models.SET_NULL,
null=True, blank=True,
related_name='orders')
full_name = models.CharField(max_length=50, null=False, blank=False)
email = models.EmailField(max_length=254, null=False, blank=False)
phone_number = models.CharField(max_length=20, null=False, blank=False)
country = CountryField(blank_label="Country *", null=False, blank=False)
postcode = models.CharField(max_length=20, blank=True, null=True)
town_or_city = models.CharField(max_length=40, null=False, blank=False)
street_address1 = models.CharField(max_length=80, null=False, blank=False)
street_address2 = models.CharField(max_length=80, blank=True, null=True)
county = models.CharField(max_length=80, blank=True, null=True)
date = models.DateTimeField(auto_now_add=True)
delivery_cost = models.DecimalField(max_digits=6, decimal_places=2,
null=False, default=0)
order_total = models.DecimalField(max_digits=10, decimal_places=2,
null=False, default=0)
grand_total = models.DecimalField(max_digits=10, decimal_places=2,
null=False, default=0)
original_cart = models.TextField(null=False, blank=False, default='')
stripe_pid = models.CharField(max_length=254, null=False,
blank=False, default='')
def _generate_order_number(self):
"""
Generate a unique order number
"""
return uuid.uuid4().hex.upper()
def update_total(self):
"""
Update grand total each time a new item is added.
"""
self.order_total = self.lineitems.aggregate(
Sum('lineitem_total'))['lineitem_total__sum'] or 0
self.grand_total = self.order_total + settings.DELIVERY_CHARGE
self.delivery_cost = settings.DELIVERY_CHARGE
self.save()
def save(self, *args, **kwargs):
"""
Override the original save method to set the order number
if it hasn't been set already.
"""
if not self.order_number:
self.order_number = self._generate_order_number()
super().save(*args, **kwargs)
def __str__(self):
return self.order_number
class OrderLineItem(models.Model):
order = models.ForeignKey(Order, null=False, blank=False,
on_delete=models.CASCADE,
related_name='lineitems')
product = models.ForeignKey(Product, null=False,
blank=False, on_delete=models.CASCADE)
quantity = models.IntegerField(null=False, blank=False, default=0)
lineitem_total = models.DecimalField(max_digits=6, decimal_places=2,
null=False, blank=False,
editable=False)
def save(self, *args, **kwargs):
"""
Override the original save method to calculate
total correctly if ordering multiple items.
"""
self.lineitem_total = self.product.price * self.quantity
super().save(*args, **kwargs)
def __str__(self):
return f'SKU {self.product.sku} on order {self.order.order_number}'
|
[
"jakubmrowicki96@gmail.com"
] |
jakubmrowicki96@gmail.com
|
b07204cd7cc358a55e848dff2fb18b8ff76f36b9
|
06eef71f28a395cbb9b2a6b3bb6ba8138318d054
|
/projectorGeom.py
|
18f3c0b1374365a8392ac20e5133d6a63aa83077
|
[] |
no_license
|
Rookieokky/3d-display
|
c4408e64d2c42f8203aa5c54d4551d8a85bbf685
|
49dad5a539531b0f126fde95cc6e97d07b014a59
|
refs/heads/master
| 2021-05-30T15:54:09.315334
| 2014-07-01T04:18:44
| 2014-07-01T04:18:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,335
|
py
|
def computeGrid(scn, showGrid, size):
import pointInside, makeCube, gridGeom, bpy, mathutils, imp
imp.reload(pointInside)
imp.reload(makeCube)
imp.reload(gridGeom)
xmax = size[0]
ymax = size[1]
zmax = size[2]
table= [ [ 0 for i in range(xmax) ] for j in range(ymax) ]
# rol, col
for d1 in range(ymax):
for d2 in range(xmax):
# perform calculations
x = d2 - ((xmax - 1)/2)
y = d1 - ((ymax - 1)/2)
z = gridGeom.helicoid_half(x, y, zmax)
pt = mathutils.Vector((x, y, z))
table[d1][d2] = 0
for obj in bpy.data.objects:
if obj.name != 'Lamp' and obj.name != 'Camera' and not 'grid' in obj.name:
if pointInside.pointInsideMesh(obj, pt) == 1:
table[d1][d2] = 1
break
if showGrid:
name = "grid" + str(d1) + "." + str(d2)
grid_obj = makeCube.createCube(scn, (x, y, z), 0.5, name)
# change color
if table[d1][d2] == 1:
mat = bpy.data.materials.new('visuals')
mat.diffuse_color = (1.0, 0.3, 0.0)
if len(grid_obj.data.materials) < 1:
grid_obj.data.materials.append(mat)
else:
grid_obj.data.materials[0] = mat
return table
if __name__ == '__main__':
import bpy
scn = bpy.context.scene
t = computeGrid(scn, True, (10,10,10))
print('results of grid:')
print(t)
|
[
"babraham@appnexus.com"
] |
babraham@appnexus.com
|
d498464e187eab0a0e5b7f34d9495f1fab2c8587
|
3b10be6f883547f44a67983e2b04be6d7808e007
|
/bin/evaluate-timelines
|
52cc720d01ce9fd4e999841bfb9dddd079f98449
|
[
"MIT"
] |
permissive
|
natsudalkr/tilse
|
62f8ef9b4ab03202b8b603a62edaff80d291c97f
|
8b4fdb6077663c57f0923d4e68820ba3d73dafe7
|
refs/heads/master
| 2021-01-18T15:38:37.729296
| 2017-03-20T16:13:45
| 2017-03-20T16:13:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,982
|
#!/usr/bin/env python
import argparse
import logging
import os
import codecs
from collections import defaultdict
from tilse.data import timelines
from tilse.evaluation import metrictests, rouge
def get_scores(metric_desc, pred_tl, groundtruth, evaluator):
if metric == "concat":
return evaluator.evaluate_concat(pred_tl, groundtruth)
elif metric == "agreement":
return evaluator.evaluate_agreement(pred_tl, groundtruth)
elif metric == "align_date_costs":
return evaluator.evaluate_align_date_costs(pred_tl, groundtruth)
elif metric == "align_date_content_costs":
return evaluator.evaluate_align_date_content_costs(pred_tl, groundtruth)
elif metric == "align_date_content_costs_many_to_one":
return evaluator.evaluate_align_date_content_costs_many_to_one(pred_tl, groundtruth)
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(''message)s')
parser = argparse.ArgumentParser(description="Evaluate timelines")
parser.add_argument('-p', dest="predicted", type=str, help='Predicted timelines',
required=True)
parser.add_argument('-r', dest="reference", type=str, help='Reference timelines',
required=True, nargs="*")
parser.add_argument('-m', dest="metric", type=str, help='Metric to use',
required=True)
args = parser.parse_args()
predicted = timelines.Timeline.from_file(codecs.open(args.predicted, "r", "utf-8", "replace"))
reference = args.reference
metric = args.metric
temp_ref_tls = []
for filename in args.reference:
temp_ref_tls.append(
timelines.Timeline.from_file(codecs.open(filename, "r", "utf-8", "replace"))
)
reference_timelines = timelines.GroundTruth(temp_ref_tls)
evaluator = rouge.TimelineRougeEvaluator(measures=["rouge_1", "rouge_2"])
scores = get_scores(metric, predicted, reference_timelines, evaluator)
print(scores)
|
[
"sebastian.martschat@gmail.com"
] |
sebastian.martschat@gmail.com
|
|
81e637c137eb35264303fc69b8323a2a2287261a
|
acb8e84e3b9c987fcab341f799f41d5a5ec4d587
|
/langs/6/o8-.py
|
9c6d0d0413aeff42ae26f9f44f8d70275e890256
|
[] |
no_license
|
G4te-Keep3r/HowdyHackers
|
46bfad63eafe5ac515da363e1c75fa6f4b9bca32
|
fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2
|
refs/heads/master
| 2020-08-01T12:08:10.782018
| 2016-11-13T20:45:50
| 2016-11-13T20:45:50
| 73,624,224
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 486
|
py
|
import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'o8-':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1])
|
[
"juliettaylorswift@gmail.com"
] |
juliettaylorswift@gmail.com
|
6bd2dd8a1dc0c5152bff3d6838c8a6ca248dba0b
|
96e93d15c859b96d8627d32d5d1b8378cec63693
|
/database/migrations/20200628_01_cm3XQ-add-users.py
|
73a85452e62b9c6b603d64933f44a854633e9d98
|
[] |
no_license
|
lekha/jeopardy
|
ebff871883b5956e6536f7b0c734c4d0bf0611af
|
e29694da68b7f73002279ede396b4dcb88db8119
|
refs/heads/master
| 2023-01-24T04:11:39.074244
| 2020-08-05T11:37:40
| 2020-08-05T11:38:05
| 131,525,151
| 0
| 0
| null | 2023-01-06T09:49:37
| 2018-04-29T19:55:53
|
Python
|
UTF-8
|
Python
| false
| false
| 2,890
|
py
|
"""
Add users
"""
from yoyo import step
__depends__ = {'20200621_02_tnF6J-add-team-and-game-models'}
create_user_metadata = """
CREATE TABLE user_metadata_anonymous (
id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT,
created_ts DATETIME(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6),
updated_ts DATETIME(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6),
expire_ts DATETIME(6) NOT NULL
)
CHARACTER SET utf8mb4;
CREATE TABLE user_metadata_google (
id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT,
created_ts DATETIME(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6),
updated_ts DATETIME(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6),
subject VARCHAR(255) NOT NULL UNIQUE,
email VARCHAR(255) NOT NULL,
given_name VARCHAR(255),
issuer VARCHAR(255) NOT NULL,
family_name VARCHAR(255),
name VARCHAR(255),
locale VARCHAR(255),
picture VARCHAR(255)
)
CHARACTER SET utf8mb4;
"""
drop_user_metadata = """
DROP TABLE user_metadata_google;
DROP TABLE user_metadata_anonymous;
"""
create_users = """
CREATE TABLE users (
id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT,
created_ts DATETIME(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6),
updated_ts DATETIME(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6),
display_name VARCHAR(255) NOT NULL,
is_active BOOL NOT NULL DEFAULT 1,
auth_provider ENUM("none", "google") NOT NULL,
anonymous_metadata_id BIGINT,
google_metadata_id BIGINT,
CONSTRAINT fk_user_anonymous_metadata FOREIGN KEY (anonymous_metadata_id) REFERENCES user_metadata_anonymous (id) ON DELETE RESTRICT,
CONSTRAINT fk_user_google_metadata FOREIGN KEY (google_metadata_id) REFERENCES user_metadata_google (id) ON DELETE RESTRICT
)
CHARACTER SET utf8mb4;
"""
drop_users = "DROP TABLE users;"
point_foreign_keys_to_users = """
ALTER TABLE games
DROP FOREIGN KEY fk_games_players,
ADD CONSTRAINT fk_games_users FOREIGN KEY (owner_id) REFERENCES users (id) ON DELETE CASCADE;
DROP TABLE players;
"""
point_foreign_keys_to_players = """
ALTER TABLE games
DROP FOREIGN KEY fk_games_users;
CREATE TABLE players (
id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT,
created_ts DATETIME(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6),
updated_ts DATETIME(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6),
name VARCHAR(255) NOT NULL
)
CHARACTER SET utf8mb4;
INSERT INTO players (id, created_ts, updated_ts, name)
SELECT id, created_ts, updated_ts, display_name
FROM users;
ALTER TABLE games
ADD CONSTRAINT fk_games_players FOREIGN KEY (owner_id) REFERENCES players (id) ON DELETE CASCADE;
"""
steps = [
step(create_user_metadata, drop_user_metadata),
step(create_users, drop_users),
step(point_foreign_keys_to_users, point_foreign_keys_to_players),
]
|
[
"305097+lekha@users.noreply.github.com"
] |
305097+lekha@users.noreply.github.com
|
48f6bf7eed3e7ed029e76a1561da9c2b9fd6b645
|
4488e3c26de4291da447d8251c491b43cb810f7c
|
/account_banking_payment_export/model/payment_mode.py
|
798c8ed20daab08128d6d0b68c1d1b223e11f9d5
|
[] |
no_license
|
smart-solution/odoo-crm-80
|
b19592ce6e374c9c7b0a3198498930ffb1283018
|
85dfd0cc37f81bcba24d2a0091094708a262fe2c
|
refs/heads/master
| 2016-09-06T06:04:35.191924
| 2015-07-14T12:48:28
| 2015-07-14T12:48:28
| 33,174,511
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,285
|
py
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2009 EduSense BV (<http://www.edusense.nl>).
# (C) 2011 - 2013 Therp BV (<http://therp.nl>).
#
# All other contributions are (C) by their respective contributors
#
# All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import orm, fields
class payment_mode(orm.Model):
''' Restoring the payment type from version 5,
used to select the export wizard (if any) '''
_inherit = "payment.mode"
def suitable_bank_types(self, cr, uid, payment_mode_id=None, context=None):
""" Reinstates functional code for suitable bank type filtering.
Current code in account_payment is disfunctional.
"""
res = []
payment_mode = self.browse(
cr, uid, payment_mode_id, context)
if (payment_mode and payment_mode.type and
payment_mode.type.suitable_bank_types):
res = [t.code for t in payment_mode.type.suitable_bank_types]
return res
_columns = {
'type': fields.many2one(
'payment.mode.type', 'Payment type',
required=True,
help='Select the Payment Type for the Payment Mode.'
),
'payment_order_type': fields.related(
'type', 'payment_order_type', readonly=True, type='selection',
selection=[('payment', 'Payment'), ('debit', 'Direct debit')],
string="Payment Order Type"),
}
|
[
"fabian.semal@smartsolution.be"
] |
fabian.semal@smartsolution.be
|
0da8f2ee1dac97eab30cc8d96672dac2ef752180
|
6253cb645eaa09c19645750946fbf1f0b0bc4fc7
|
/visualize.py
|
63a382f4eafd1c61bd21c3a5bbfb7491a587ff81
|
[] |
no_license
|
Alevs2R/trajectory_planning
|
fb1c0da39b5b2cfcb7606302d36e2298bd026d5f
|
410c4e784efbeb518ba6cf565b39901dba79cf02
|
refs/heads/master
| 2020-04-05T16:44:26.590239
| 2018-11-26T13:06:44
| 2018-11-26T13:06:44
| 157,026,541
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,317
|
py
|
import matplotlib.pyplot as plt
import numpy as np
from kinematics import forw_kin
from robot_configuration import L
def motion_plot(pos, v, acc):
time = np.arange(0, v.shape[1] * 0.01 - 0.005, 0.01)
plt.figure(1)
plt.ylabel('position, rad')
plt.xlabel('time, s')
plt.title('joint positions')
for i in range(0, v.shape[0]):
plt.plot(time, pos[i, :], 'C'+str(i+1))
plt.savefig('plots/joint_positions.png')
plt.show()
plt.figure(2)
plt.ylabel('velocity, rad')
plt.xlabel('time, s')
plt.title('joint velocities')
for i in range(0, v.shape[0]):
plt.plot(time, v[i, :], 'C'+str(i+1))
plt.savefig('plots/joint velocities.png')
plt.show()
plt.figure(3)
plt.ylabel('rad/s^2')
plt.xlabel('time, ms')
plt.title('joint accelerations')
cartesian_pos = np.zeros(shape=(3, pos.shape[1]))
for i in range(0, v.shape[0]):
plt.plot(time, acc[i, :], 'C'+str(i+1))
plt.savefig('plots/joint_accelerations.png')
plt.show()
for i in range(0, pos.shape[1]):
cartesian_pos[:, i] = forw_kin(pos[:, i].flatten(), L)
# plt.figure(4)
# ax = plt.axes(projection='3d')
# ax.set_xlabel('X axis')
# ax.set_ylabel('Y axis')
# ax.set_zlabel('Z axis')
# ax.plot3D(cartesian_pos[0, :].flatten(), cartesian_pos[1, :].flatten(), cartesian_pos[2:, ].flatten(), 'gray')
# plt.show()
plt.figure(4)
plt.ylabel('position, m')
plt.xlabel('time, s')
plt.title('X cartesian axis')
plt.plot(time, cartesian_pos[0])
plt.ylim([-3, 10])
plt.savefig('plots/x_position.png')
plt.show()
plt.figure(5)
plt.ylabel('position, m')
plt.xlabel('time, s')
plt.title('Y cartesian axis')
plt.plot(time, cartesian_pos[1])
plt.ylim([-3, 7])
plt.savefig('plots/y_position.png')
plt.show()
plt.figure(6)
plt.ylabel('position, m')
plt.xlabel('time, s')
plt.title('Z cartesian axis')
plt.plot(time, cartesian_pos[2])
plt.ylim([-7, 2])
plt.savefig('plots/z_position.png')
plt.show()
|
[
"a.evlampev@dooglys.com"
] |
a.evlampev@dooglys.com
|
68a04c7e9e3f2d829fdc061ddb6ed9582d837f10
|
2879edc657c192002ae13fcdd3296b5127d1e9a2
|
/dp_eu_gdpr/models/eu_gdpr_log.py
|
525908b2a9f03769b92b73ab375830d007be58d5
|
[] |
no_license
|
safiyaayoub/gdpr
|
9525892893afb49b97f70a032d5a6845a25a1b66
|
d640a4c4786ca461d7a8e384420134ce0be1971d
|
refs/heads/master
| 2022-11-07T16:00:58.096341
| 2020-06-23T13:49:28
| 2020-06-23T13:49:28
| 274,417,701
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 905
|
py
|
# Copyright 2018-Today datenpol gmbh (<https://www.datenpol.at/>)
# License OPL-1 or later (https://www.odoo.com/documentation/user/13.0/legal/licenses/licenses.html#licenses).
from odoo import fields, models, _
class GDPRLog(models.Model):
_name = 'eu.gdpr_log'
_description = 'GDPR Log'
_order = 'create_date DESC'
name = fields.Char('Name', compute='_compute_name')
date = fields.Datetime('Creation date', required=True, default=fields.Datetime.now())
user_id = fields.Many2one('res.users', 'User', required=True, default=lambda self: self._uid)
operation = fields.Char('Operation')
object = fields.Char('Object')
dataset = fields.Char('Data Set')
partner = fields.Char('Partner', help='Person who executed the operation (e. g. Customer)')
note = fields.Char('Notes')
def _compute_name(self):
self.name = _('EU-GDPR Logentry #%s') % self.id
|
[
"noreply@github.com"
] |
noreply@github.com
|
1858a3d914607191dbfee1f4dbec29d465bc1d45
|
07d6366ff64b98896d7cda1df9e0fe955339d5ab
|
/__main__.py
|
60ffdd669ba0dc79f5c8c4e768b531c778589fe0
|
[] |
no_license
|
lwxted/takemehome
|
0e2980194c28332979c5110ac6ad46550422cfd3
|
5da25cb4db76d4bd8ce341ee0cfcf39fb25ad214
|
refs/heads/master
| 2021-01-21T05:00:38.042011
| 2016-06-22T22:20:13
| 2016-06-22T22:20:13
| 40,530,828
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 660
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import importlib
import sys
from takemehome.util.color_print import cprint
def main(process_name, process_args):
cprint.p('Importing module {}...'.format(process_name))
process_module = importlib.import_module('takemehome.processes.{}'.format(process_name))
process_class = getattr(process_module, process_name)
if process_class:
try:
process_class.main(*process_args)
except Exception, e:
print process_class.usage()
if __name__ == '__main__':
if len(sys.argv) < 2:
print 'Usage: invocation_routine <process_name> <process_args>'
exit()
main(sys.argv[1], sys.argv[2:])
|
[
"tedli@dropbox.com"
] |
tedli@dropbox.com
|
43ca51fd48fed5140fdc3d3cd9190c260124a9b0
|
7298cc9d339852fac5afc598c30ac60c3f752af8
|
/python/tictactoe.py
|
1ed8cc57bf4fb9ff09fb7e77227942b834992da0
|
[
"MIT"
] |
permissive
|
benshanahan1/coderepo
|
c94d6ade7a29324877633cf0591d12c775bff57a
|
d2389a764fa8936f638e556779dabd8b8febe390
|
refs/heads/master
| 2020-12-30T23:59:45.403106
| 2018-03-05T15:52:53
| 2018-03-05T15:52:53
| 80,560,746
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,880
|
py
|
#!/usr/bin/env python
# Simple Two-Player / Computer AI TicTacToe Game in Python.
# Code by Benjamin Shanahan, 7 July 2015.
#
# Note that there is currently no checking for a TIE-GAME condition.
class TicTacToe(object):
def __init__(self, p1_name, p2_name):
# Define defaults for resetting the game when done
self.board_default = [["1", "2", "3"], ["4", "5", "6"], ["7", "8", "9"]]
self.last_move_p_number_default = 2
self.last_move_loc_default = None
self.reset()
self.p1 = Player(p1_name, "x", 1, False)
self.p2 = Player(p2_name, "o", 2, False)
def draw(self):
for idx, row in enumerate(self.board):
print(" %s | %s | %s " % tuple(row))
if idx < 2:
dash = "-" * 3
plus = "+"
print((dash + plus)*2 + dash)
else:
pass
def move(self, p, loc):
# Find the coordinates of given loc in board and fill that space in based on player (p)
for r, row in enumerate(self.board):
for c, val in enumerate(row):
if (val is loc):
if (val is not self.p1.token) and (val is not self.p2.token):
self.board[r][c] = self._player_num_to_token(p)
self.last_move_p_number = p
self.last_move_loc = loc
return True
else:
return False
return False
def check(self):
all_p1 = [self.p1.token]*3
all_p2 = [self.p2.token]*3
# Check rows
for row in self.board:
if row == all_p1:
return self.p1
elif row == all_p2:
return self.p2
# Check columns
for col in range(1,3):
res = [self.board[0][col], self.board[1][col], self.board[2][col]];
if res == all_p1:
return self.p1
elif res == all_p2:
return self.p2
# Check diagonals
diag1 = [self.board[0][0], self.board[1][1], self.board[2][2]];
diag2 = [self.board[2][0], self.board[1][1], self.board[0][2]];
if diag1 == all_p1:
return self.p1
elif diag1 == all_p2:
return self.p2
elif diag2 == all_p1:
return self.p1
elif diag2 == all_p2:
return self.p2
return False # no win yet
def current_player(self):
return self.p1 if self.last_move_p_number is self.p2.number else self.p2
def previous_player(self):
return self.p2 if self.last_move_p_number is self.p2.number else self.p1
def reset(self):
from copy import deepcopy # create unique instance of list (list() is insufficient for multidimensional lists)
self.board = deepcopy(self.board_default)
self.last_move_p_number = deepcopy(self.last_move_p_number_default)
self.last_move_loc = deepcopy(self.last_move_loc_default)
# Private methods
def _player_num_to_token(self, p):
return self.p1.token if p is self.p1.number else self.p2.token
def _player_name_to_num(self, p):
return self.p1.number if p is self.p1.name else self.p2.number
class Player(object):
def __init__(self, name, token, number, is_ai=False):
self.name = name.lower().capitalize()
self.token = token
self.number = number
self.is_ai = is_ai
self.last_move_loc = None
self.wins = 0
self.losses = 0
def __str__(self):
return "Player %s has %d wins and %d losses." % (self.name, self.wins, self.losses)
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.name == other.name
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
def name(self):
return self.name
def set_last_move_loc(self, loc):
self.last_move_loc = loc
def win(self):
self.wins = self.wins + 1
def lose(self):
self.losses = self.losses + 1
class Engine(object):
def __init__(self):
self.isplaying = True
self.intro()
def intro(self):
print("Welcome to TicTacToe!")
p1_name = input("Player 1, please type your name: ")
p2_name = input("Player 2, please type your name: ")
self.game = TicTacToe(p1_name, p2_name)
print("%s will be '%s' and %s will be '%s'." % (self.game.p1.name, self.game.p1.token, self.game.p2.name, self.game.p2.token))
def start_game(self):
self._start_loop()
def _start_loop(self):
while self.isplaying:
self.game.draw()
choice = input("%s, Where do you want to move? " % self.game.current_player().name)
if self.game.move(self.game.current_player().number, choice):
if self.game.check(): # check if this player has won the game
self.game.draw()
self.game.previous_player().win()
self.game.current_player().lose()
print("%s wins!" % self.game.previous_player().name)
again = input("Would you like to play again (y/n)? ")
if again.lower() != "y":
self.isplaying = False
else:
self.game.reset() # reset game without affecting player information
else:
print("Invalid move, please choose another.")
print("%s won %d times and %s won %d times!" % (self.game.previous_player().name, self.game.previous_player().wins, self.game.current_player().name, self.game.current_player().wins))
print("Thanks for playing!")
# Start Game Engine
engine = Engine()
engine.start_game()
|
[
"benshanahan1@gmail.com"
] |
benshanahan1@gmail.com
|
27c4b2a9bc058d641cab0c1ac907d55c0eeae4a2
|
5172b86d1731faa79931bbde708127beca803307
|
/semantic/classifier/vectorizer.py
|
94f8b713b4593211428f413ffc7670eb30f7b784
|
[] |
no_license
|
amunra94/personal_web_site
|
1a7fda68b6bd21b4d9e6b9fdf195995abe6e1572
|
1d3d9e1f5c48793dde045524b6dbfbbac02847aa
|
refs/heads/master
| 2023-03-15T16:01:09.966065
| 2021-03-01T19:44:02
| 2021-03-01T19:44:02
| 343,096,355
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 749
|
py
|
from sklearn.feature_extraction.text import HashingVectorizer
import re
import os
import pickle
_curr_dir = os.path.dirname(__file__)
STOP_WORDS = pickle.load(open(os.path.join(_curr_dir,'pkl_objects','stopwords.pckl'), 'rb'))
def tokenizer(text):
text = re.sub('<[^>]*>', '', text)
emoticons = re.findall('(?::|;|=)(?:-)?(?:\)|\(|D|P)', text.lower())
text = re.sub('[\W]+', ' ', text.lower()) + ' '.join(emoticons).replace('-', '')
tokenized = [word for word in text.split() if word not in STOP_WORDS]
return tokenized
_vect = HashingVectorizer(decode_error='ignore',
n_features=2**21,
preprocessor=None,
tokenizer=tokenizer)
|
[
"root@arudnitskiy.ru"
] |
root@arudnitskiy.ru
|
1d626c9dbdb41c344f8870b691bab05f897edafa
|
5864e86954a221d52d4fa83a607c71bacf201c5a
|
/dogma/items/fittableDogmaItem.py
|
8e70cbc858bb571171c14d42eeafc1040058e7eb
|
[] |
no_license
|
connoryang/1v1dec
|
e9a2303a01e5a26bf14159112b112be81a6560fd
|
404f2cebf13b311e754d45206008918881496370
|
refs/heads/master
| 2021-05-04T02:34:59.627529
| 2016-10-19T08:56:26
| 2016-10-19T08:56:26
| 71,334,417
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,769
|
py
|
#Embedded file name: e:\jenkins\workspace\client_SERENITY\branches\release\SERENITY\packages\dogma\items\fittableDogmaItem.py
from dogma.dogmaLogging import *
from baseDogmaItem import BaseDogmaItem
from ccpProfile import TimedFunction
import weakref
class FittableDogmaItem(BaseDogmaItem):
def __init__(self, *args, **kwargs):
self._location = None
self.lastStopTime = None
BaseDogmaItem.__init__(self, *args, **kwargs)
@property
def location(self):
if self._location:
return self._location()
@location.setter
def location(self, location):
if location is None:
self._location = None
else:
self._location = weakref.ref(location)
@property
def ownerID(self):
if self.location:
return self.location.ownerID
@ownerID.setter
def ownerID(self, ownerID):
if self.location and self.location.ownerID != ownerID:
self.dogmaLocation.LogError('Setting ownerID on a FittableDogmaItem to something that disagrees with its location!', self.location.ownerID, ownerID)
@TimedFunction('FittableDogmaItem::Unload')
def Unload(self):
BaseDogmaItem.Unload(self)
if self.location:
try:
locationFittedItems = self.location.fittedItems
except AttributeError:
return
if self.itemID in locationFittedItems:
del locationFittedItems[self.itemID]
elif self.itemID in self.dogmaLocation.itemsMissingLocation:
del self.dogmaLocation.itemsMissingLocation[self.itemID]
def SetLastStopTime(self, lastStopTime):
self.lastStopTime = lastStopTime
def IsActive(self):
for effectID in self.activeEffects:
if effectID == const.effectOnline:
continue
effect = self.dogmaLocation.GetEffect(effectID)
if effect.effectCategory in (const.dgmEffActivation, const.dgmEffTarget):
return True
return False
@TimedFunction('FittableDogmaItem::SetLocation')
def SetLocation(self, locationID, location, flagID):
if location is None:
self.dogmaLocation.LogError('FittableDogmaItem.SetLocation :: Location dogma item is None')
return
if not self.IsValidFittingLocation(location):
self.dogmaLocation.LogError('FittableDogmaItem.SetLocation :: Invalid fitting location')
return
oldData = self.GetLocationInfo()
self.location = location
self.flagID = flagID
location.RegisterFittedItem(self, flagID)
return oldData
def IsValidFittingLocation(self, location):
return False
def UnsetLocation(self, locationDogmaItem):
locationDogmaItem.UnregisterFittedItem(self)
def GetShipID(self):
if self.location:
return self.location.itemID
def GetPilot(self):
if self.location:
return self.location.GetPilot()
def GetOtherID(self):
otherID = None
if self.location:
otherID = self.location.subLocations.get(self.flagID, None)
if otherID is None:
other = self.dogmaLocation.GetChargeNonDB(self.location.itemID, self.flagID)
if other is not None:
otherID = other.itemID
return otherID
def SerializeForPropagation(self):
retVal = BaseDogmaItem.SerializeForPropagation(self)
retVal.lastStopTime = self.lastStopTime
return retVal
def UnpackPropagationData(self, propData, charID, shipID):
BaseDogmaItem.UnpackPropagationData(self, propData, charID, shipID)
self.SetLastStopTime(propData.lastStopTime)
|
[
"le02005@163.com"
] |
le02005@163.com
|
486e379a1a9bebb38a6243372da39082c59dbe52
|
c3b00e74b51d3b7c7a46a63b8bf2b1d1c98d5f70
|
/algorithms/fibonacci_sequence.py
|
a3913a30f4fdf52083c2c7b9767aeea11f59cc09
|
[] |
no_license
|
ajanzadeh/python_interview
|
8057fe2fc9f4da7bc7f96a20a808c279b2fd9110
|
91194e540467261604f72eae99ad0cdaf487eb11
|
refs/heads/master
| 2022-09-16T00:32:06.354326
| 2020-06-04T22:16:27
| 2020-06-04T22:16:27
| 260,756,645
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 287
|
py
|
0, 1, 1, 2, 3, 5, 8, 13, 21,
def sequence(a,b,n):
arr = []
arr.append(a)
arr.append(b)
for i in range(0,n):
c = a+b
arr.append(c)
a = b
b = c
c = 0
resutl = " ,".join(map(str,arr))
return resutl
print(sequence(0,1,20))
|
[
"arash@zenops.co.uk"
] |
arash@zenops.co.uk
|
7c7405d5b792cd6f20e89b0b56489b366c8baecf
|
ba730380c8406b234202a6a19a9e5f01f6b66d25
|
/django/crud2/articles/views.py
|
4a9f35242edeef84e1211c795529a801b810b62b
|
[] |
no_license
|
ssabum/note
|
3b0fd891ab7053997c7978298635e599b42a7659
|
47354aa55a87813dab66f2ff7a930f5313bffe7a
|
refs/heads/master
| 2023-06-19T03:03:02.398976
| 2021-07-09T15:09:42
| 2021-07-09T15:09:42
| 331,743,760
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,151
|
py
|
from django.shortcuts import render, redirect
from .models import Article
# Create your views here.
# READ
def index(request):
# 모든 게시글 조회
# articles = Article.objects.all()[::-1] # 파이썬 언어로 해결
articles = Article.objects.order_by('-updated_at') # DB 단에서 해결, 수정순으로 정렬
context = {
'articles': articles,
}
return render(request, 'articles/index.html', context)
# CREATE
def new(request):
return render(request, 'articles/new.html')
# CREATE
def create(request):
# POST 요청으로 들어온 사용자 데이터를 추출
title = request.POST.get('title')
content = request.POST.get('content')
# Article 모델 클래스를 기반으로 인스턴스를 생성
article = Article(title=title, content=content)
# DB에 저장
article.save()
# return render(request, 'articles/index.html')
# return redirect('articles:index')
return redirect('articles:detail', article.pk)
# READ
def detail(request, pk):
article = Article.objects.get(pk=pk)
context = {
'article': article,
}
return render(request, 'articles/detail.html', context)
# DELETE
# 발동조건: /articles/index/게시글번호/delete
# 띠라서 POST만 삭제되게 만들어야 한다
def delete(request, pk):
# 삭제할 데이터 불러오기
article = Article.objects.get(pk=pk)
if request.method == 'POST':
# 삭제
article.delete()
# 메인페이지로 이동
return redirect('articles:index')
else:
return redirect('articles:detail', article.pk)
# UPDATE
def edit(request, pk):
article = Article.objects.get(pk=pk)
context = {
'article':article,
}
return render(request, 'articles/edit.html', context)
def update(request, pk):
# 수정할 게시글 불러오기
article = Article.objects.get(pk=pk)
# 사용자가 건네준 데이터 추출
article.title = request.POST.get('title')
article.content = request.POST.get('content')
# DB에 저장
article.save()
return redirect('articles:detail', article.pk)
|
[
"qqq960909@likelion.org"
] |
qqq960909@likelion.org
|
1977596321f6c2fbccea688545e4eed977f15378
|
85a1c8f96b602d8b2646e315524231d39b643045
|
/hello_world.py
|
1224e4520b4eda8478b56a97bc4556ff8780ab0d
|
[] |
no_license
|
ics3ur-1-2020/ICS3U-Unit-1-03-Python
|
eb05c72b667a464dfdc1685b95910360cef2427e
|
06390c9d7591b42adfc579934d3ecc4a0afc2a42
|
refs/heads/main
| 2023-01-23T09:56:27.253541
| 2020-11-20T16:48:05
| 2020-11-20T16:48:05
| 314,608,865
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 251
|
py
|
#!/usr/bin/env python3
# Created by: Mr. Coxall
# Created on: June 2019
# This is the "Hello, World!" program, but with nice style
def main():
# this function prints to console
print("Hello, World!")
if __name__ == "__main__":
main()
|
[
"ubuntu@ip-172-31-55-204.ec2.internal"
] |
ubuntu@ip-172-31-55-204.ec2.internal
|
ed74b4fbe497a9c5a4850fc2f58b830b5aef33aa
|
87f6f561408c5fb9dd93a4e95b39336a8fff843d
|
/kakuro.py
|
bd57b5ada1dd0bbc94e2365e58c3002d09f63929
|
[] |
no_license
|
prolomova/kakuro
|
ee6d07b2e188df6fdd335a39e34fd51be0f2e9c1
|
a74c4a5b4d8353bffe7f8207def7f9818d5e2f95
|
refs/heads/master
| 2020-04-07T03:28:52.904556
| 2018-11-17T19:31:40
| 2018-11-17T19:31:40
| 158,016,940
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,254
|
py
|
#!/usr/bin/env python3
"""
Реализация решателя головоломки какуро
"""
import sys
import re
import argparse
import slover
def main(args):
'''
Главная функция
'''
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--count', required=False, type=int,
help='This argument is a number of answers', default=1)
parser.add_argument('-f', '--filename', required=False, type=str,
help='This argument is a file name, without '
'it you have to input data')
parser.add_argument('-g', '--groups', nargs='+', required=False,
help='This argument are the coordinates of '
'cells whose values are the same, '
'groups must be separated by "/", x and y '
'values must be separated by ",", '
'coordinate values must be separated by spaces')
namespace = parser.parse_args(args)
groups = []
if namespace.groups is not None:
groups.append([])
cell = 0
is_splited = False
for item in namespace.groups:
if item == ',':
continue
elif is_splited:
groups[-1].append((cell, int(item)))
is_splited = False
elif re.search(re.compile(r'\d+,\d+'), item):
coord = item.split(',')
groups[-1].append((int(coord[0]), int(coord[1])))
elif re.search(re.compile(r'\d+,'), item):
cell = int(item[:-1])
is_splited = True
elif re.search(re.compile(r'\d+'), item):
cell = int(item)
is_splited = True
elif item == '/':
groups.append([])
count = namespace.count
strings = read_data(namespace.filename)
try:
board = Board(strings)
except FormatException as err:
print(err, file=sys.stderr)
sys.exit(1)
ans = list(map(str, slover.start(board, count, groups)))
if ans:
for answer in ans:
print(answer)
print()
sys.exit(0)
sys.exit(1)
def read_data(fname):
"""
Функция, которая считывает входные данные
"""
strings = []
open_file = None
if fname is not None:
open_file = open(fname)
else:
open_file = sys.stdin
with open_file as file:
while True:
line = file.readline()
if not line.rstrip():
break
else:
if line.strip() != "":
strings.append(line.strip())
return strings
class FormatException(Exception):
"""
Класс ошибки неправильного формата входных данных
"""
def __init__(self, msg, line):
super(FormatException, self).__init__(msg, line)
self.type = msg
self.line = line
def __str__(self):
return "{}: line {}".format(self.type, self.line)
class Cell:
"""
Класс, описывающий клетку поля
"""
def __init__(self, sum_vertical, sum_horizontal,
cur_sum_vertical, cur_sum_horizontal):
self.cur_sum_horizontal = cur_sum_horizontal
self.sum_horizontal = sum_horizontal
self.sum_vertical = sum_vertical
self.cur_sum_vertical = cur_sum_vertical
self.value = 0
def change_value(self, value, sum_horizontal, sum_vertical):
"""
Функция, изменяющая значение клетки
"""
self.cur_sum_horizontal = sum_horizontal + value - self.value
self.cur_sum_vertical = sum_vertical + value - self.value
self.value = value
def __str__(self):
return str(self.value)
class Board:
"""
Класс, реализующий игровое поле
"""
CELL_FORMAT = re.compile(r'(\d+|X)/(\d+|X)')
def __init__(self, file):
self.board = self.read(self.CELL_FORMAT, file)
@staticmethod
def read(cell_format, f_data):
"""
Функция, считывающая данные в формат поля
"""
def check_format(sum_board, is_vertical):
for i in range(height):
curr_sum = -1
count = 0
for j in range(width):
cell = sum_board[i][j]
if is_vertical:
cell = sum_board[j][i]
if cell != '0':
if curr_sum > (9 + 10 - count) * count / 2:
raise FormatException(
"Too large value of the sum", str(i + 1))
if cell != 'X':
curr_sum = int(cell)
if is_vertical:
sum_board[j][i] = 'X'
else:
sum_board[i][j] = 'X'
elif cell == '0':
count += 1
if curr_sum <= 0:
raise FormatException(
"Insufficient value of the sum", str(i + 1))
if is_vertical:
sum_board[j][i] = curr_sum
else:
sum_board[i][j] = curr_sum
height = 0
width = len(f_data[0].split(','))
vertical_sum_board = []
horizontal_sum_board = []
for line in f_data:
line_data = [i.strip() for i in line.split(',')
if (i.strip() != '')]
horizontal_sum_board.append([])
vertical_sum_board.append([])
for cell in line_data:
if cell in ('', '\n'):
continue
found_cell = re.search(cell_format, cell)
if found_cell is not None:
horizontal_sum_board[height].append(found_cell.group(1))
vertical_sum_board[height].append(found_cell.group(2))
elif cell in ('X', '0'):
horizontal_sum_board[height].append(cell)
vertical_sum_board[height].append(cell)
else:
raise FormatException("Wrong char", height + 1)
height += 1
check_format(horizontal_sum_board, False)
check_format(vertical_sum_board, True)
result = []
for i in range(height):
result.append([])
for j in range(width):
if vertical_sum_board[i][j] == 'X':
result[i].append('X')
else:
result[i].append(Cell(vertical_sum_board[i][j],
horizontal_sum_board[i][j], 0, 0))
return result
def __str__(self):
lines = [' '.join(map(str, self.board[i]))
for i in range(len(self.board))]
return '\n'.join(lines)
if __name__ == '__main__':
main(sys.argv[1:])
|
[
"prolomova2012@yandex.ru"
] |
prolomova2012@yandex.ru
|
8bf8ae5a5303dec52b8c6726f0af144bdcc6f9a9
|
679de86de84c04d11627a306b2199cf9f8114b65
|
/mnist/start.py
|
823e4fe89602b424ccb4f3af8a53e2b07b03d2a5
|
[] |
no_license
|
a2lin/dnn
|
e0a5e52012b8cefb927b62f085c8930137719cf3
|
74ba7dc28d4a84bf3a48ca1dc596c2390e0c8b5c
|
refs/heads/master
| 2021-01-06T20:40:04.003789
| 2017-08-07T05:38:22
| 2017-08-07T05:38:22
| 99,540,732
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 122
|
py
|
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
|
[
"lin.alexander.2011@gmail.com"
] |
lin.alexander.2011@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.