max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
ansible/roles/kubernetes/tests/test_master.py | dominikl/deployment | 0 | 12770951 | <gh_stars>0
import testinfra.utils.ansible_runner
from re import match
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
'.molecule/ansible_inventory').get_hosts('kubernetes-master')
def test_get_nodes(Command, Sudo):
with Sudo():
out = Command.check_output(
'kubectl --kubeconfig /etc/kubernetes/admin.conf get nodes')
# First line contains column headings
lines = sorted(out.strip().split('\n')[1:])
assert len(lines) == 2
assert match('kubernetes-master\s+Ready\s+', lines[0])
assert match('kubernetes-worker\s+Ready\s+', lines[1])
| 2.234375 | 2 |
python/blackjack/game.py | JohnPaton/blackjack | 2 | 12770952 | <reponame>JohnPaton/blackjack<filename>python/blackjack/game.py
from .deck import Deck
from .player import Player, Dealer
class Game():
"""A game of blackjack.
Attributes:
dealer (Dealer): The dealer
deck (Deck): The deck
player (Player): The player
Methods:
bust: return true if either player's score is over 21
final_view: the view of all the cards on the table
setup_players: initialize dealer and player with two cards each
standing: true if both players are standing
play: play a round using the deck, shuffling if necessary
"""
init_cards = 2
def __init__(self):
self.deck = Deck().shuffle()
self.setup_players()
def setup_players(self):
"""Initialize dealer and player, draw two cards for each"""
self.dealer = Dealer(self.deck)
self.player = Player(self.deck)
for i in range(self.init_cards):
self.player.draw()
self.dealer.draw()
def __str__(self):
# can only see dealer's first card
dealer_view = [str(self.dealer.cards[0])]\
+ (len(self.dealer.cards)-1)*['??']
player_view = [str(c) for c in self.player.cards]
s = """
Dealer: {}
Player: {}
""".format(' '.join(dealer_view),
' '.join(player_view))
return s
def final_view(self):
"""View of all the cards."""
dealer_view = [str(c) for c in self.dealer.cards]
player_view = [str(c) for c in self.player.cards]
s = """
Dealer: {}
Player: {}
""".format(' '.join(dealer_view),
' '.join(player_view))
return s
def bust(self):
"""True if either player is bust."""
return self.player.score() > 21 or self.dealer.score() > 21
def standing(self):
"""True if both players are standing."""
return self.player.standing and self.dealer.standing
def play(self):
"""Play a round of blackjack.
The deck is shuffled when there are two cards left.
Returns:
str: the winner of the round ("Player" or "Dealer")
"""
dealer = self.dealer
player = self.player
while not self.bust() and not self.standing():
# show the table
print(str(self))
# shuffle if needs be
if len(self.deck.cards) <= 2:
self.deck.shuffle(reset=True)
if not player.standing: # no turn if already standing
player.turn()
if not self.bust(): # dealer turn if player isn't bust
dealer.turn()
print(self.final_view(), '\n') # cards on the table
print('Dealer score:', dealer.score())
print('Player score:', player.score())
if dealer.score() > 21:
winner = 'Player'
elif player.score() == 21 and len(player.cards) == 2: # blackjack
winner = 'Player'
elif player.score() > dealer.score() and player.score() <= 21:
winner = 'Player'
else:
winner = 'Dealer'
print(winner, 'wins!')
# reset the players
if len(self.deck.cards) <= 4:
self.deck.shuffle(reset=True)
self.setup_players()
return winner
def blackjack():
"""Play a simplified game of blackjack against the dealer."""
print('Welcome to Blackjack!')
# keep track of total games won
player_score = 0
dealer_score = 0
response = ''
game = Game()
while response != 'n': # default to play another round
winner = game.play()
# update scores
if winner == 'Player':
player_score += 1
else:
dealer_score += 1
print()
print('Totals: Player {}, Dealer {}'
.format(player_score, dealer_score))
response = input('Play again ([y]/n)? ').lower()
print('See you next time!')
| 3.796875 | 4 |
Florence/MeshGeneration/CustomMesher.py | romeric/florence | 65 | 12770953 | import os, sys
import numpy as np
from copy import deepcopy
from warnings import warn
from .Mesh import Mesh
from .GeometricPath import *
from Florence.Tensor import totuple, unique2d
__all__ = ['HarvesterPatch', 'SubdivisionArc', 'SubdivisionCircle', 'QuadBall',
'QuadBallSphericalArc']
"""
A series of custom meshes
"""
def HarvesterPatch(ndisc=20, nradial=4, show_plot=False):
"""Creates a custom mesh for an energy harvester patch. [Not to be modified]
ndisc: [int] number of discretisation in c
ndradial: [int] number of discretisation in radial directions for different
components of harevester
"""
center = np.array([30.6979,20.5])
p1 = np.array([30.,20.])
p2 = np.array([30.,21.])
p1line = p1 - center
p2line = p2 - center
radius = np.linalg.norm(p1line)
pp = np.array([center[0],center[1]+radius])
y_line = pp - center
start_angle = -np.pi/2. - np.arccos(np.linalg.norm(y_line*p1line)/np.linalg.norm(y_line)/np.linalg.norm(p1line))
end_angle = np.pi/2. + np.arccos(np.linalg.norm(y_line*p1line)/np.linalg.norm(y_line)/np.linalg.norm(p1line))
points = np.array([p1,p2,center])
# nradial = 4
mesh = Mesh()
mesh.Arc(element_type="quad", radius=radius, start_angle=start_angle,
end_angle=end_angle, nrad=nradial, ncirc=ndisc, center=(center[0],center[1]), refinement=True)
mesh1 = Mesh()
mesh1.Triangle(element_type="quad",npoints=nradial, c1=totuple(center), c2=totuple(p1), c3=totuple(p2))
mesh += mesh1
mesh_patch = Mesh()
mesh_patch.HollowArc(ncirc=ndisc, nrad=nradial, center=(-7.818181,44.22727272),
start_angle=np.arctan(44.22727272/-7.818181), end_angle=np.arctan(-24.22727272/37.818181),
element_type="quad", inner_radius=43.9129782, outer_radius=44.9129782)
mesh3 = Mesh()
mesh3.Triangle(element_type="quad",npoints=nradial, c2=totuple(p1), c3=totuple(p2), c1=(mesh_patch.points[0,0], mesh_patch.points[0,1]))
mesh += mesh3
mesh += mesh_patch
mesh.Extrude(nlong=ndisc,length=40)
if show_plot:
mesh.SimplePlot()
return mesh
def CurvedPlate(ncirc=2, nlong=20, show_plot=False):
"""Creates custom mesh for plate with curved edges
ncirc discretisation around circular fillets
nlong discretisation along the length - X
"""
mesh_arc = Mesh()
mesh_arc.Arc(element_type="quad",nrad=ncirc,ncirc=ncirc, radius=5)
mesh_arc1 = deepcopy(mesh_arc)
mesh_arc1.points[:,1] += 15
mesh_arc1.points[:,0] += 95
mesh_arc2 = deepcopy(mesh_arc)
mesh_arc2.points[:,1] +=15
mesh_arc2.points[:,0] *= -1.
mesh_arc2.points[:,0] += 5.
mesh_plate1 = Mesh()
mesh_plate1.Rectangle(element_type="quad",lower_left_point=(5,15),upper_right_point=(95,20),ny=ncirc, nx=nlong)
mesh_plate2 = deepcopy(mesh_plate1)
mesh_plate2.points[:,1] -= 5.
mesh_square1 = Mesh()
mesh_square1.Square(element_type="quad",lower_left_point=(0,10), side_length=5,nx=ncirc,ny=ncirc)
mesh_square2 = deepcopy(mesh_square1)
mesh_square2.points[:,0] += 95
mesh = mesh_plate1 + mesh_plate2 + mesh_arc1 + mesh_arc2 + mesh_square1 + mesh_square2
mesh.Extrude(length=0.5,nlong=1)
mesh2 = deepcopy(mesh)
mesh2.points[:,2] += 0.5
mesh += mesh2
if show_plot:
mesh.SimplePlot()
return mesh
def SubdivisionArc(center=(0.,0.), radius=1., nrad=16, ncirc=40,
start_angle=0., end_angle=np.pi/2., element_type="tri", refinement=False, refinement_level=2):
"""Creates a mesh on circle using midpoint subdivision.
This function is internally called from Mesh.Circle if
'midpoint_subdivision' algorithm is selected
"""
if start_angle!=0. and end_angle!=np.pi/2.:
raise ValueError("Subdivision based arc only produces meshes for a quarter-circle arc for now")
r = float(radius)
h_r = float(radius)/2.
nx = int(ncirc/4.)
ny = int(nrad/2.)
if nx < 3:
warn("Number of division in circumferential direction too low")
mesh = Mesh()
mesh.Rectangle(element_type="quad", lower_left_point=(-1.,-1.),
upper_right_point=(1.,1.), nx=nx, ny=ny)
uv = np.array([
[-1.,-1],
[1.,-1],
[1.,1],
[-1.,1],
])
t = np.pi/4.
end_points = np.array([
[0.,h_r*np.sin(t)],
[h_r*np.cos(t),h_r*np.sin(t)],
[r*np.cos(t),r*np.sin(t)],
[0.,radius],
])
edge_points = mesh.points[np.unique(mesh.edges),:]
new_end_points = []
new_end_points.append(end_points[0,:])
new_end_points.append(end_points[1,:])
new_end_points.append(end_points[2,:])
tt = np.linspace(np.pi/4,np.pi/2,nx)
x = r*np.cos(tt)
y = r*np.sin(tt)
interp_p = np.vstack((x,y)).T
for i in range(1,len(x)-1):
new_end_points.append([x[i], y[i]])
new_end_points.append(end_points[3,:])
new_end_points = np.array(new_end_points)
new_uv = []
new_uv.append(uv[0,:])
new_uv.append(uv[1,:])
new_uv.append(uv[2,:])
L = 0.
for i in range(1,interp_p.shape[0]):
L += np.linalg.norm(interp_p[i,:] - interp_p[i-1,:])
interp_uv = []
last_uv = uv[2,:]
for i in range(1,interp_p.shape[0]-1):
val = (uv[3,:] - uv[2,:])*np.linalg.norm(interp_p[i,:] - interp_p[i-1,:])/L + last_uv
last_uv = np.copy(val)
interp_uv.append(val)
interp_uv = np.array(interp_uv)
new_uv = np.array(new_uv)
if interp_uv.shape[0] !=0:
new_uv = np.vstack((new_uv,interp_uv))
new_uv = np.vstack((new_uv,uv[3,:]))
from Florence.FunctionSpace import MeanValueCoordinateMapping
new_points = np.zeros_like(mesh.points)
# All nodes barring the ones lying on the arc
for i in range(mesh.nnode - nx - 1):
point = MeanValueCoordinateMapping(mesh.points[i,:], new_uv, new_end_points)
new_points[i,:] = point
# The nodes on the arc are not exactly on the arc
# so they need to be snapped/clipped
tt = np.linspace(np.pi/4,np.pi/2,nx+1)[::-1]
x = r*np.cos(tt)
y = r*np.sin(tt)
new_points[mesh.nnode-nx-1:,:] = np.vstack((x,y)).T
mesh.points = new_points
rmesh = deepcopy(mesh)
rmesh.points = mesh.Rotate(angle=-np.pi/2., copy=True)
rmesh.points[:,1] *= -1.
mesh += rmesh
mesh.LaplacianSmoothing(niter=10)
qmesh = Mesh()
qmesh.Rectangle(element_type="quad", lower_left_point=(0.0,0.0),
upper_right_point=(h_r*np.cos(t),h_r*np.sin(t)),
nx=nx,
ny=nx)
mesh += qmesh
# mesh.LaplacianSmoothing(niter=20)
NodeSliderSmootherArc(mesh, niter=20)
mesh.points[:,0] += center[0]
mesh.points[:,1] += center[1]
if refinement:
mesh.Refine(level=refinement_level)
if element_type == "tri":
sys.stdout = open(os.devnull, "w")
mesh.ConvertQuadsToTris()
sys.stdout = sys.__stdout__
return mesh
def SubdivisionCircle(center=(0.,0.), radius=1., nrad=16, ncirc=40,
element_type="tri", refinement=False, refinement_level=2):
"""Creates a mesh on circle using midpoint subdivision.
This function is internally called from Mesh.Circle if
'midpoint_subdivision' algorithm is selected
"""
r = float(radius)
h_r = float(radius)/2.
nx = int(ncirc/4.)
ny = int(nrad/2.)
if nx < 3:
warn("Number of division in circumferential direction too low")
mesh = Mesh()
mesh.Rectangle(element_type="quad", lower_left_point=(-1.,-1.),
upper_right_point=(1.,1.), nx=nx, ny=ny)
uv = np.array([
[-1.,-1],
[1.,-1],
[1.,1],
[-1.,1],
])
t = np.pi/4
end_points = np.array([
[-h_r*np.cos(t),h_r*np.sin(t)],
[h_r*np.cos(t),h_r*np.sin(t)],
[r*np.cos(t),r*np.sin(t)],
[-r*np.cos(t),r*np.sin(t)],
])
edge_points = mesh.points[np.unique(mesh.edges),:]
new_end_points = []
new_end_points.append(end_points[0,:])
new_end_points.append(end_points[1,:])
new_end_points.append(end_points[2,:])
tt = np.linspace(np.pi/4,3*np.pi/4,nx)
x = r*np.cos(tt)
y = r*np.sin(tt)
interp_p = np.vstack((x,y)).T
for i in range(1,len(x)-1):
new_end_points.append([x[i], y[i]])
new_end_points.append(end_points[3,:])
new_end_points = np.array(new_end_points)
new_uv = []
new_uv.append(uv[0,:])
new_uv.append(uv[1,:])
new_uv.append(uv[2,:])
L = 0.
for i in range(1,interp_p.shape[0]):
L += np.linalg.norm(interp_p[i,:] - interp_p[i-1,:])
interp_uv = []
last_uv = uv[2,:]
for i in range(1,interp_p.shape[0]-1):
val = (uv[3,:] - uv[2,:])*np.linalg.norm(interp_p[i,:] - interp_p[i-1,:])/L + last_uv
last_uv = np.copy(val)
interp_uv.append(val)
interp_uv = np.array(interp_uv)
new_uv = np.array(new_uv)
if interp_uv.shape[0] !=0:
new_uv = np.vstack((new_uv,interp_uv))
new_uv = np.vstack((new_uv,uv[3,:]))
from Florence.FunctionSpace import MeanValueCoordinateMapping
new_points = np.zeros_like(mesh.points)
for i in range(mesh.nnode):
point = MeanValueCoordinateMapping(mesh.points[i,:], new_uv, new_end_points)
new_points[i,:] = point
mesh.points = new_points
rmesh = deepcopy(mesh)
rmesh.points = mesh.Rotate(angle=np.pi/2., copy=True)
mesh += rmesh
rmesh.points = rmesh.Rotate(angle=np.pi/2., copy=True)
mesh += rmesh
rmesh.points = rmesh.Rotate(angle=np.pi/2., copy=True)
mesh += rmesh
mesh.LaplacianSmoothing(niter=10)
qmesh = Mesh()
qmesh.Rectangle(element_type="quad", lower_left_point=(-h_r*np.cos(t),-h_r*np.sin(t)),
upper_right_point=(h_r*np.cos(t),h_r*np.sin(t)),
nx=nx,
ny=nx)
mesh += qmesh
mesh.LaplacianSmoothing(niter=20)
mesh.points[:,0] += center[0]
mesh.points[:,1] += center[1]
if refinement:
mesh.Refine(level=refinement_level)
if element_type == "tri":
sys.stdout = open(os.devnull, "w")
mesh.ConvertQuadsToTris()
sys.stdout = sys.__stdout__
return mesh
def QuadBall(center=(0.,0.,0.), radius=1., n=10, element_type="hex"):
"""Creates a fully hexahedral mesh on sphere using midpoint subdivision algorithm
by creating a cube and spherifying it using PostMesh's projection schemes
inputs:
n: [int] number of divsion in every direction.
Given that this implementation is based on
high order bases different divisions in
different directions is not possible
"""
try:
from Florence import Mesh, BoundaryCondition, DisplacementFormulation, FEMSolver, LinearSolver
from Florence import LinearElastic, NeoHookean
from Florence.Tensor import prime_number_factorisation
except ImportError:
raise ImportError("This function needs Florence's core support")
n = int(n)
if n > 50:
# Values beyond this result in >1M DoFs due to internal prime factoristaion splitting
raise ValueError("The value of n={} (division in each direction) is too high".format(str(n)))
if not isinstance(center,tuple):
raise ValueError("The center of the circle should be given in a tuple with two elements (x,y,z)")
if len(center) != 3:
raise ValueError("The center of the circle should be given in a tuple with two elements (x,y,z)")
if n == 2 or n==3 or n==5 or n==7:
ps = [n]
else:
def factorise_all(n):
if n < 2:
n = 2
factors = prime_number_factorisation(n)
if len(factors) == 1 and n > 2:
n += 1
factors = prime_number_factorisation(n)
return factors
factors = factorise_all(n)
ps = []
for factor in factors:
ps +=factorise_all(factor)
# Do high ps first
ps = np.sort(ps)[::-1].tolist()
niter = len(ps)
# IGS file for sphere with radius 1000.
sphere_igs_file_content = SphereIGS()
with open("sphere_cad_file.igs", "w") as f:
f.write(sphere_igs_file_content)
sys.stdout = open(os.devnull, "w")
ndim = 3
scale = 1000.
condition = 1.e020
mesh = Mesh()
material = LinearElastic(ndim, mu=1., lamb=4.)
# Keep the solver iterative for low memory consumption. All boundary points are Dirichlet BCs
# so they will be exact anyway
solver = LinearSolver(linear_solver="iterative", linear_solver_type="cg2",
dont_switch_solver=True, iterative_solver_tolerance=1e-9)
for it in range(niter):
if it == 0:
mesh.Parallelepiped(element_type="hex", nx=1, ny=1, nz=1, lower_left_rear_point=(-0.5,-0.5,-0.5),
upper_right_front_point=(0.5,0.5,0.5))
mesh.GetHighOrderMesh(p=ps[it], equally_spaced=True)
boundary_condition = BoundaryCondition()
boundary_condition.SetCADProjectionParameters(
"sphere_cad_file.igs",
scale=scale,condition=condition, project_on_curves=True, solve_for_planar_faces=True,
modify_linear_mesh_on_projection=True, fix_dof_elsewhere=False
)
boundary_condition.GetProjectionCriteria(mesh)
formulation = DisplacementFormulation(mesh)
fem_solver = FEMSolver(
number_of_load_increments=1,
analysis_nature="linear",
force_not_computing_mesh_qualities=True,
report_log_level=0,
optimise=True)
solution = fem_solver.Solve(formulation=formulation, mesh=mesh,
material=material, boundary_condition=boundary_condition, solver=solver)
mesh.points += solution.sol[:,:,-1]
mesh = mesh.ConvertToLinearMesh()
os.remove("sphere_cad_file.igs")
if not np.isclose(radius,1):
mesh.points *= radius
mesh.points[:,0] += center[0]
mesh.points[:,1] += center[1]
mesh.points[:,2] += center[2]
if element_type == "tet":
mesh.ConvertHexesToTets()
sys.stdout = sys.__stdout__
return mesh
def QuadBallSurface(center=(0.,0.,0.), radius=1., n=10, element_type="quad"):
"""Creates a surface quad mesh on sphere using midpoint subdivision algorithm
by creating a cube and spherifying it using PostMesh's projection schemes.
Unlike the volume QuadBall method there is no restriction on number of divisions
here as no system of equations is solved
inputs:
n: [int] number of divsion in every direction.
Given that this implementation is based on
high order bases different divisions in
different directions is not possible
"""
try:
from Florence import Mesh, BoundaryCondition, DisplacementFormulation, FEMSolver, LinearSolver
from Florence import LinearElastic, NeoHookean
from Florence.Tensor import prime_number_factorisation
except ImportError:
raise ImportError("This function needs Florence's core support")
n = int(n)
if not isinstance(center,tuple):
raise ValueError("The center of the circle should be given in a tuple with two elements (x,y,z)")
if len(center) != 3:
raise ValueError("The center of the circle should be given in a tuple with two elements (x,y,z)")
if n == 2 or n==3 or n==5 or n==7:
ps = [n]
else:
def factorise_all(n):
if n < 2:
n = 2
factors = prime_number_factorisation(n)
if len(factors) == 1 and n > 2:
n += 1
factors = prime_number_factorisation(n)
return factors
factors = factorise_all(n)
ps = []
for factor in factors:
ps +=factorise_all(factor)
# Do high ps first
ps = np.sort(ps)[::-1].tolist()
niter = len(ps)
sphere_igs_file_content = SphereIGS()
with open("sphere_cad_file.igs", "w") as f:
f.write(sphere_igs_file_content)
sys.stdout = open(os.devnull, "w")
ndim = 3
scale = 1000.
condition = 1.e020
mesh = Mesh()
material = LinearElastic(ndim, mu=1., lamb=4.)
for it in range(niter):
if it == 0:
mesh.Parallelepiped(element_type="hex", nx=1, ny=1, nz=1, lower_left_rear_point=(-0.5,-0.5,-0.5),
upper_right_front_point=(0.5,0.5,0.5))
mesh = mesh.CreateSurface2DMeshfrom3DMesh()
mesh.GetHighOrderMesh(p=ps[it], equally_spaced=True)
mesh = mesh.CreateDummy3DMeshfrom2DMesh()
formulation = DisplacementFormulation(mesh)
else:
mesh.GetHighOrderMesh(p=ps[it], equally_spaced=True)
mesh = mesh.CreateDummy3DMeshfrom2DMesh()
boundary_condition = BoundaryCondition()
boundary_condition.SetCADProjectionParameters(
"sphere_cad_file.igs",
scale=scale,condition=condition,
project_on_curves=True,
solve_for_planar_faces=True,
modify_linear_mesh_on_projection=True,
fix_dof_elsewhere=False
)
boundary_condition.GetProjectionCriteria(mesh)
nodesDBC, Dirichlet = boundary_condition.PostMeshWrapper(formulation, mesh, None, None, FEMSolver())
mesh.points[nodesDBC.ravel(),:] += Dirichlet
mesh = mesh.CreateSurface2DMeshfrom3DMesh()
mesh = mesh.ConvertToLinearMesh()
os.remove("sphere_cad_file.igs")
if not np.isclose(radius,1):
mesh.points *= radius
mesh.points[:,0] += center[0]
mesh.points[:,1] += center[1]
mesh.points[:,2] += center[2]
if element_type == "tri":
mesh.ConvertQuadsToTris()
sys.stdout = sys.__stdout__
return mesh
def QuadBallSphericalArc(center=(0.,0.,0.), inner_radius=9., outer_radius=10., n=10, nthick=1,
element_type="hex", cut_threshold=None, portion=1./8.):
"""Similar to QuadBall but hollow and creates only 1/8th or 1/4th or 1/2th of the sphere.
Starting and ending angles are not supported. Radial division (nthick: to be consistent
with SphericalArc method of Mesh class) is supported
input:
cut_threshold [float] cutting threshold for element removal since this function is based
QuadBall. Ideal value is zero, so prescribe a value as close to zero
as possible, however that might not always be possible as the cut
might take remove some wanted elements [default = -0.01]
portion [float] portion of the sphere to take. Can only be 1/8., 1/4., 1/2.
"""
assert inner_radius < outer_radius
mm = QuadBallSurface(n=n, element_type=element_type)
offset = outer_radius*2.
if cut_threshold is None:
cut_threshold = -0.01
if portion == 1./8.:
mm.RemoveElements(np.array([ [ cut_threshold, cut_threshold, cut_threshold], [ offset, offset, offset]]))
elif portion == 1./4.:
mm.RemoveElements(np.array([ [ cut_threshold, cut_threshold, -offset], [ offset, offset, offset]]))
elif portion == 1./2.:
mm.RemoveElements(np.array([ [ cut_threshold, -offset, -offset], [ offset, offset, offset]]))
else:
raise ValueError("The value of portion can only be 1/8., 1/4. or 1/2.")
radii = np.linspace(inner_radius, outer_radius, nthick+1)
mesh = Mesh()
mesh.element_type = "hex"
mesh.nelem = 0
mesh.nnode = 0
for i in range(nthick):
mm1, mm2 = deepcopy(mm), deepcopy(mm)
if not np.isclose(radii[i],1):
mm1.points *= radii[i]
if not np.isclose(radii[i+1],1):
mm2.points *= radii[i+1]
if i == 0:
elements = np.hstack((mm1.elements, mm1.nnode + mm2.elements)).astype(np.int64)
mesh.elements = np.copy(elements)
mesh.points = np.vstack((mm1.points, mm2.points))
else:
elements = np.hstack((mesh.elements[(i-1)*mm2.nelem:i*mm2.nelem,4:],
mesh.nnode + mm2.elements)).astype(np.int64)
mesh.elements = np.vstack((mesh.elements, elements))
mesh.points = np.vstack((mesh.points, mm2.points))
mesh.nelem = mesh.elements.shape[0]
mesh.nnode = mesh.points.shape[0]
mesh.elements = np.ascontiguousarray(mesh.elements, dtype=np.int64)
mesh.nelem = mesh.elements.shape[0]
mesh.nnode = mesh.points.shape[0]
mesh.GetBoundaryFaces()
mesh.GetBoundaryEdges()
mesh.points[:,0] += center[0]
mesh.points[:,1] += center[1]
mesh.points[:,2] += center[2]
return mesh
def Torus(show_plot=False):
"""Custom mesh for torus
"""
raise NotImplementedError("Not fully implemented yet")
# MAKE TORUS WORK
from copy import deepcopy
from numpy.linalg import norm
mesh = Mesh()
mesh.Circle(element_type="quad", ncirc=2, nrad=2)
tmesh = deepcopy(mesh)
arc = GeometricArc(start=(10,10,8),end=(10,10,-8))
# arc.GeometricArc()
nlong = 10
points = mesh.Extrude(path=arc, nlong=nlong)
# mesh.SimplePlot()
# print points
# elem_nodes = tmesh.elements[0,:]
# p1 = tmesh.points[elem_nodes[0],:]
# p2 = tmesh.points[elem_nodes[1],:]
# p3 = tmesh.points[elem_nodes[2],:]
# p4 = tmesh.points[elem_nodes[3],:]
# E1 = np.append(p2 - p1, 0.0)
# E2 = np.append(p4 - p1, 0.0)
# E3 = np.array([0,0,1.])
# E1 /= norm(E1)
# E2 /= norm(E2)
# # print E1,E2,E3
# elem_nodes = mesh.elements[0,:]
# p1 = mesh.points[elem_nodes[0],:]
# p2 = mesh.points[elem_nodes[1],:]
# p3 = mesh.points[elem_nodes[2],:]
# p4 = mesh.points[elem_nodes[3],:]
# p5 = mesh.points[elem_nodes[4],:]
# e1 = p2 - p1
# e2 = p4 - p1
# e3 = p5 - p1
# e1 /= norm(e1)
# e2 /= norm(e2)
# e3 /= norm(e3)
# # print e1,e2,e3
# # TRANSFORMATION MATRIX
# Q = np.array([
# [np.einsum('i,i',e1,E1), np.einsum('i,i',e1,E2), np.einsum('i,i',e1,E3)],
# [np.einsum('i,i',e2,E1), np.einsum('i,i',e2,E2), np.einsum('i,i',e2,E3)],
# [np.einsum('i,i',e3,E1), np.einsum('i,i',e3,E2), np.einsum('i,i',e3,E3)]
# ])
# mesh.points = np.dot(mesh.points,Q.T)
# points = np.dot(points,Q)
# E1 = np.array([1,0,0.])
E3 = np.array([0.,0.,1.])
nnode_2D = tmesh.points.shape[0]
for i in range(nlong+1):
# e1 = points[i,:][None,:]/norm(points[i,:])
# Q = np.dot(E1[:,None],e1)
# vpoints = np.dot(points,Q)
e3 = points[i+1,:] - points[i,:]; e3 /= norm(e3)
Q = np.dot(e3[:,None],E3[None,:])
# print Q
# print np.dot(Q,points[i,:][:,None])
vpoints = np.dot(points,Q)
# print current_points
mesh.points[nnode_2D*i:nnode_2D*(i+1),:2] = tmesh.points + points[i,:2]
mesh.points[nnode_2D*i:nnode_2D*(i+1), 2] = vpoints[i,2]
# print Q
# print tmesh.points
# mesh = Mesh.HexahedralProjection()
if show_plot:
mesh.SimplePlot()
return mesh
def NodeSliderSmootherArc(mesh, niter=10):
"""This is less than half-baked node slider smoother that only works
for arc type meshes
"""
if mesh.element_type != "quad":
raise RuntimeError("Only implemented for quads")
un_edges = np.unique(mesh.edges)
points = mesh.points[un_edges,:]
radius = mesh.Bounds[1,1]
# For all x==0
idx = np.where(np.isclose(mesh.points[:,0], 0.0)==True)[0]
idx_sort = np.lexsort((mesh.points[idx,1],mesh.points[idx,0]))
mesh.points[idx[idx_sort],1] = np.linspace(0.,radius, idx_sort.shape[0])
# For all y==0
idx = np.where(np.isclose(mesh.points[:,1], 0.0)==True)[0]
idx_sort = np.lexsort((mesh.points[idx,0],mesh.points[idx,1]))
mesh.points[idx[idx_sort],0] = np.linspace(0.,radius, idx_sort.shape[0])
mesh.LaplacianSmoothing(niter)
# -----------------------------------------------------------------------------------------
def SphereIGS():
# IGS file for sphere with radius 1000.
sphere_igs_file_content ="""
S0000001
,,31HOpen CASCADE IGES processor 6.7,13HFilename.iges, G0000001
16HOpen CASCADE 6.7,31HOpen CASCADE IGES processor 6.7,32,308,15,308,15,G0000002
,1.,6,1HM,1,0.00001,15H20150628.043945,1E-07,1.007104,5Hroman,,11,0, G0000003
15H20150628.043945,; G0000004
186 1 0 0 0 0 0 000000000D0000001
186 0 0 1 0 0D0000002
514 2 0 0 0 0 0 000010000D0000003
514 0 0 1 1 0D0000004
510 3 0 0 0 0 0 000010000D0000005
510 0 0 1 1 0D0000006
196 4 0 0 0 0 0 000010000D0000007
196 0 0 1 1 0D0000008
116 5 0 0 0 0 0 000010400D0000009
116 0 0 1 0 0D0000010
123 6 0 0 0 0 0 000010200D0000011
123 0 0 1 0 0D0000012
123 7 0 0 0 0 0 000010200D0000013
123 0 0 1 0 0D0000014
508 8 0 0 0 0 0 000010000D0000015
508 0 0 2 1 0D0000016
502 10 0 0 0 0 0 000010000D0000017
502 0 0 2 1 0D0000018
110 12 0 0 0 0 0 000010000D0000019
110 0 0 1 0 0D0000020
504 13 0 0 0 0 0 000010001D0000021
504 0 0 1 1 0D0000022
100 14 0 0 0 0 25 000010000D0000023
100 0 0 1 0 0D0000024
124 15 0 0 0 0 0 000000000D0000025
124 0 0 2 0 0D0000026
110 17 0 0 0 0 0 000010000D0000027
110 0 0 1 0 0D0000028
110 18 0 0 0 0 0 000010000D0000029
110 0 0 1 0 0D0000030
110 19 0 0 0 0 0 000010000D0000031
110 0 0 1 0 0D0000032
186,3,1,0; 0000001P0000001
514,1,5,1; 0000003P0000002
510,7,1,1,15; 0000005P0000003
196,9,1.,11,13; 0000007P0000004
116,0.,0.,0.,0; 0000009P0000005
123,0.,0.,1.; 0000011P0000006
123,1.,0.,-0.; 0000013P0000007
508,4,1,17,1,0,1,0,19,0,21,1,0,1,0,27,1,17,2,1,1,0,29,0,21,1,1, 0000015P0000008
1,0,31; 0000015P0000009
502,2,6.123233996E-17,-1.499759783E-32,1.,6.123233996E-17, 0000017P0000010
-1.499759783E-32,-1.; 0000017P0000011
110,360.,90.,0.,0.,90.,0.; 0000019P0000012
504,1,23,17,2,17,1; 0000021P0000013
100,0.,0.,0.,-1.836970199E-16,-1.,3.061616998E-16,1.; 0000023P0000014
124,1.,0.,-2.449293598E-16,0.,-2.449293598E-16,0.,-1.,0.,0.,1., 0000025P0000015
0.,0.; 0000025P0000016
110,0.,90.,-0.,0.,-90.,-0.; 0000027P0000017
110,0.,-90.,0.,360.,-90.,0.; 0000029P0000018
110,360.,-90.,0.,360.,90.,0.; 0000031P0000019
S 1G 4D 32P 19 T0000001
"""
return sphere_igs_file_content | 2.203125 | 2 |
lingvo/core/spectrum_augmenter_on_device.py | Singed-jj/lingvo | 1 | 12770954 | <reponame>Singed-jj/lingvo
# Lint as: python2, python3
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Lingvo layers that are used for spectrum augmentation on-device."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import lingvo.compat as tf
from lingvo.core import spectrum_augmenter
class SpectrumAugmenterOnDevice(spectrum_augmenter.SpectrumAugmenter):
"""Performs data augmentation as according to the SpecAug paper.
This implementation uses portable replacements for the tf.einsum ops.
https://arxiv.org/pdf/1904.08779.pdf
"""
def EinsumBBmBm(self, a, b, name=None):
"""Portable replacement for tf.einsum('b,bm->bm', a, b)."""
return tf.math.multiply(tf.expand_dims(a, axis=-1), b, name=name)
def EinsumBmtBmBt(self, a, b, name=None):
"""Portable replacement for tf.einsum('bmt,bm->bt', a, b)."""
return tf.linalg.matvec(a, b, transpose_a=True, name=name)
def EinsumBxycByBxyc(self, a, b, name=None):
"""Portable replacement for tf.einsum('bxyc,by->bxyc', a, b)."""
expanded_b = tf.expand_dims(tf.expand_dims(b, axis=1), axis=3)
return tf.math.multiply(a, expanded_b, name=name)
def EinsumBxycBxBxyc(self, a, b, name=None):
"""Portable replacement for tf.einsum('bxyc,bx->bxyc', a, b)."""
expanded_b = tf.expand_dims(tf.expand_dims(b, axis=2), axis=3)
return tf.math.multiply(a, expanded_b, name=name)
def EinsumBxyBxBxy(self, a, b, name=None):
"""Portable replacement for tf.einsum('bxy,bx->bxy', a, b)."""
return tf.math.multiply(a, tf.expand_dims(b, axis=2), name=name)
def EinsumBxycBzxBzyc(self, a, b, name=None):
"""Portable replacement for tf.einsum('bxyc,bzx->bzyc', a, b)."""
expanded_a = tf.expand_dims(a, axis=1)
expanded_b = tf.expand_dims(tf.expand_dims(b, axis=-1), axis=-1)
return tf.reduce_sum(
tf.math.multiply(expanded_a, expanded_b, name=name), axis=2)
| 1.867188 | 2 |
Day_3/Day_3_Cards_of_the_Same_Suit.py | Kasun-se-2018-023/HackerRank_10_days_statistics_course | 1 | 12770955 | No need to use combinations or permutations. There are 13 cards of each suit. Draw one card. It can be anything with probability of 1.
Now there are 51 cards left and 12 of them are the same suit as the first card you drew. So the chance the second card matches the 1st is 12/51.
| 2.890625 | 3 |
commands/hello.py | el-ideal-ideas/MocaCommands | 0 | 12770956 | <filename>commands/hello.py
#!/usr/bin/env python3
# Please make sure this file can be executed.
# chmod +x hello.py
print('Hello World!!')
| 2.25 | 2 |
capreolus/tests/common_fixtures.py | bpiwowar/capreolus-xpm | 0 | 12770957 | import os
import pytest
from capreolus.collection import COLLECTIONS, Collection
from capreolus.index.anserini import AnseriniIndex
from capreolus.utils.common import Anserini
@pytest.fixture(scope="function")
def trec_index(request, tmpdir):
"""
Build an index based on sample data and create an AnseriniIndex instance based on it
"""
indir = os.path.join(COLLECTIONS["dummy"].basepath, "dummy")
outdir = os.path.join(tmpdir, "index")
anserini_fat_jar = Anserini.get_fat_jar()
cmd = f"java -classpath {anserini_fat_jar} -Xms512M -Xmx31G -Dapp.name=IndexCollection io.anserini.index.IndexCollection -collection TrecCollection -generator JsoupGenerator -threads 1 -input {indir} -index {outdir} -storeTransformedDocs"
os.system(cmd)
collection = Collection(dummy_collection_config())
anserini_index = AnseriniIndex(collection, outdir, os.path.join(tmpdir, "index_cache"))
anserini_index.open()
return anserini_index
@pytest.fixture(scope="module")
def dummy_collection_config():
collection_path = COLLECTIONS["dummy"].basepath
return {
"name": "dummy",
"topics": {"type": "trec", "path": os.path.join(collection_path, "topics.dummy.txt")},
"qrels": {"type": "trec", "path": os.path.join(collection_path, "qrels.dummy.txt")},
"documents": {"type": "trec", "path": os.path.join(collection_path, "dummy")},
}
| 2.046875 | 2 |
test.py | EuphoriaYan/Chinese-ancient-book-recognition-HSK | 0 | 12770958 | import tensorflow as tf
from PIL import Image
import numpy as np
import os
from util import check_or_makedirs
im = Image.open("1.jpg")
print(im.mode, im.size)
np_im = np.array(im)
tf_im = tf.constant(np_im)
print(tf_im.dtype)
img = tf.image.grayscale_to_rgb(tf_im[:, :, tf.newaxis])
# scale image to fixed size
fixed_size = tf.constant([640, 640], dtype=tf.float32) # 16的倍数
raw_shape = tf.cast(tf.shape(img)[:2], tf.float32)
scale_ratio = tf.reduce_min(fixed_size / raw_shape)
new_size = tf.cast(raw_shape * scale_ratio, dtype=tf.int32)
img = tf.image.resize(img, size=new_size)
delta = tf.cast(fixed_size, tf.int32) - new_size
dh, dw = delta[0], delta[1]
img = tf.pad(img, paddings=[[0, dh], [0, dw], [0, 0]], mode='CONSTANT', constant_values=255) # fixed_size, 白底黑字
# image = tf.image.random_brightness(img, max_delta=0.5)
# image = tf.image.random_contrast(image, lower=0.5, upper=2.)
# image = tf.image.random_hue(image, max_delta=0.4)
# image = tf.image.random_jpeg_quality(image, min_jpeg_quality=20, max_jpeg_quality=80)
# image = tf.image.random_saturation(image, lower=0.5, upper=5)
# check_or_makedirs(os.path.join("..", "summary"))
# summary_writer = tf.summary.create_file_writer(os.path.join("..", "summary"))
# with summary_writer.as_default():
# print(np_im.dtype)
# tf.summary.image("image", np_im.reshape((1, 897, 708, 1)).astype("float32")/255, step=0)
# summary_writer.flush()
noise = tf.random.normal(img.shape, mean=0.0, stddev=30.0)
img = img + noise
img = tf.where(img < 0, 0, img)
img = tf.where(img > 255, 255, img)
img = tf.cast(img, tf.uint8)
for i in range(100):
print(i, img.dtype)
# ****************************
delta = -1 + i * 2 / 100
im = tf.image.adjust_brightness(img, delta=delta)
print(im.dtype)
np_im = im.numpy().astype(np.uint8)
p_im = Image.fromarray(np_im)
check_or_makedirs(os.path.join("..", "tf_image", "brightness"))
im_path = os.path.join("..", "tf_image", "brightness", "delta_" + str(delta) + ".jpg")
p_im.save(im_path, format="jpeg")
# ****************************
contrast_factor = 0.3 + i * 1.5 / 100
im = tf.image.adjust_contrast(img, contrast_factor=contrast_factor)
print(im.dtype)
np_im = im.numpy().astype(np.uint8)
p_im = Image.fromarray(np_im)
check_or_makedirs(os.path.join("..", "tf_image", "contrast"))
im_path = os.path.join("..", "tf_image", "contrast", "contrast_factor_" + str(contrast_factor) + ".jpg")
p_im.save(im_path, format="jpeg")
# ****************************
delta = -1 + i * 2 / 100
im = tf.image.adjust_hue(img, delta=delta)
print(im.dtype)
np_im = im.numpy().astype(np.uint8)
p_im = Image.fromarray(np_im)
check_or_makedirs(os.path.join("..", "tf_image", "hue"))
im_path = os.path.join("..", "tf_image", "hue", "delta_" + str(delta) + ".jpg")
p_im.save(im_path, format="jpeg")
# ****************************
jpeg_quality = 0 + i
im = tf.image.adjust_jpeg_quality(img, jpeg_quality=jpeg_quality)
print(im.dtype)
np_im = (im.numpy() * 255).astype(np.uint8)
# print(np_im)
p_im = Image.fromarray(np_im)
check_or_makedirs(os.path.join("..", "tf_image", "jpeg_quality"))
im_path = os.path.join("..", "tf_image", "jpeg_quality", "quality_" + str(jpeg_quality) + ".jpg")
p_im.save(im_path, format="jpeg")
# ****************************
saturation_factor = 0 + i * 100 / 100
im = tf.image.adjust_saturation(img, saturation_factor=saturation_factor)
print(im.dtype)
np_im = im.numpy().astype(np.uint8)
p_im = Image.fromarray(np_im)
check_or_makedirs(os.path.join("..", "tf_image", "saturation"))
im_path = os.path.join("..", "tf_image", "saturation", "saturation_factor_" + str(saturation_factor) + ".jpg")
p_im.save(im_path, format="jpeg")
| 2.5 | 2 |
AGD_ST/search/datasets.py | Erfun76/AGD | 52 | 12770959 | import glob
import random
import os
import numpy as np
from torch.utils.data import Dataset
from PIL import Image
import torchvision.transforms as transforms
class ImageDataset(Dataset):
def __init__(self, root, transforms_=None, unaligned=False, mode='train', portion=None):
self.transform = transforms.Compose(transforms_)
self.unaligned = unaligned
self._portion = portion
self.files_A_total = sorted(glob.glob(os.path.join(root, '%s/A' % mode) + '/*.jpg'))
self.files_B_total = sorted(glob.glob(os.path.join(root, '%s/B' % mode) + '/*.jpg'))
if self._portion is not None:
num_files_A = len(self.files_A_total)
num_files_B = len(self.files_B_total)
if self._portion > 0:
split_A = int(np.floor(self._portion * num_files_A))
self.files_A = self.files_A_total[:split_A]
split_B = int(np.floor(self._portion * num_files_B))
self.files_B = self.files_B_total[:split_B]
elif self._portion < 0:
split_A = int(np.floor((1 + self._portion) * num_files_A))
self.files_A = self.files_A_total[split_A:]
split_B = int(np.floor((1 + self._portion) * num_files_B))
self.files_B = self.files_B_total[split_B:]
else:
self.files_A = self.files_A_total
self.files_B = self.files_B_total
def __getitem__(self, index):
item_A = self.transform(Image.open(self.files_A[index % len(self.files_A)]))
if self.unaligned:
item_B = self.transform(Image.open(self.files_B[random.randint(0, len(self.files_B) - 1)]).convert('RGB'))
else:
item_B = self.transform(Image.open(self.files_B[index % len(self.files_B)]).convert('RGB'))
return {'A': item_A, 'B': item_B}
def __len__(self):
# return max(len(self.files_A), len(self.files_B))
return len(self.files_A)
class PairedImageDataset(Dataset):
def __init__(self, dataset_dir, soft_data_dir, mode='train', portion=None, transforms_=None):
'''
Construct a dataset with all images from a dir.
dataset: str. dataset name
style: str. 'A2B' or 'B2A'
'''
self.transform = transforms.Compose(transforms_)
self._portion = portion
path_A = os.path.join(dataset_dir, '%s/A' % mode)
path_B = os.path.join(soft_data_dir)
self.files_A_total = sorted(glob.glob(path_A + '/*.jpg'))
self.files_B_total = sorted(glob.glob(path_B + '/*.png'))
assert len(self.files_A_total) == len(self.files_B_total)
if self._portion is not None:
num_files = len(self.files_A_total)
if self._portion > 0:
split = int(np.floor(self._portion * num_files))
self.files_A = self.files_A_total[:split]
self.files_B = self.files_B_total[:split]
elif self._portion < 0:
split = int(np.floor((1 + self._portion) * num_files))
self.files_A = self.files_A_total[split:]
self.files_B = self.files_B_total[split:]
else:
self.files_A = self.files_A_total
self.files_B = self.files_B_total
print('files_A:', len(self.files_A))
print('files_B:', len(self.files_B))
def __getitem__(self, index):
if np.random.rand() < 0.5:
flip = True
else:
flip = False
img_A = Image.open(self.files_A[index % len(self.files_A)])
img_A = img_A.convert("RGB")
if flip:
img_A= np.asarray(img_A) # PIL.Image to np.ndarray
img_A = np.flip(img_A, axis=1) # data augumentation: horrizental flip
img_A = Image.fromarray(np.uint8(img_A)) # np.ndarray to PIL.Image
item_A = self.transform(img_A)
img_B = Image.open(self.files_B[index % len(self.files_B)])
img_B = img_B.convert("RGB")
if flip:
img_B= np.asarray(img_B) # PIL.Image to np.ndarray
img_B = np.flip(img_B, axis=1) # data augumentation: horrizental flip
img_B = Image.fromarray(np.uint8(img_B)) # np.ndarray to PIL.Image
item_B = self.transform(img_B)
return {'A': item_A, 'B': item_B}
def __len__(self):
return len(self.files_A) | 2.40625 | 2 |
experiments/basic/beaming.py | vuillaut/agnpy | 25 | 12770960 | <reponame>vuillaut/agnpy<filename>experiments/basic/beaming.py<gh_stars>10-100
import numpy as np
import math
import astropy.units as u
import astropy.constants as const
from astropy.coordinates import Distance
import matplotlib.pyplot as plt
from scipy.interpolate import interp1d
import sys
sys.path.append("/home/jsitarek/zdalne/agnpy/agnpy/")
from agnpy.emission_regions import Blob
from agnpy.synchrotron import Synchrotron
nu = np.logspace(8, 27, 100) * u.Hz # for SED calculations
spectrum_norm = 1.0 * u.Unit("erg cm-3")
parameters = {
"p1": 1.5,
"p2": 2.5,
"gamma_b": 1.0e3,
"gamma_min": 1,
"gamma_max": 1.0e6,
}
spectrum_dict = {"type": "BrokenPowerLaw", "parameters": parameters}
delta_D = 1.01
Gamma = 1.01
B = 1.0 * u.G
r_b = 1.0e15 * u.cm
# no beaming
blob0 = Blob(r_b, 0.01, delta_D, Gamma, B, spectrum_norm, spectrum_dict, xi=0.01)
synch0 = Synchrotron(blob0, ssa=True)
synch0_sed = synch0.sed_flux(nu)
# beaming
delta_D = 20
Gamma = 15
blob1 = Blob(r_b, 0.01, delta_D, Gamma, B, spectrum_norm, spectrum_dict, xi=0.01)
synch1 = Synchrotron(blob1, ssa=True)
synch1_sed = synch1.sed_flux(nu)
# doing beaming by hand: dN/dOmega dt depsilon scales like D^2, and E^2 in SED scales with another D^2
synch0_sed_scale = synch0_sed * delta_D ** 4
nu_scale = nu * delta_D
plt.rc("figure", figsize=(7.5, 5.5))
plt.rc("font", size=12)
plt.rc("axes", grid=True)
plt.rc("grid", ls=":")
sed_x_label = r"$\nu\,/\,Hz$"
sed_y_label = r"$\nu F_{\nu}\,/\,(\mathrm{erg}\,\mathrm{cm}^{-2}\,\mathrm{s}^{-1})$"
plt.loglog(nu, synch0_sed, color="k", ls=":", lw=1, label="No beaming") #
plt.loglog(nu, synch1_sed, color="r", ls=":", lw=1, label="Beaming") #
plt.loglog(
nu_scale, synch0_sed_scale * 1.1, color="b", ls=":", lw=1, label="scaled"
) # 1.1 so both curves show up
plt.ylim(1e-15, 1e-7)
plt.xlim(1e8, 1e27)
plt.xscale("log")
plt.yscale("log")
plt.xlabel(sed_x_label)
plt.ylabel(sed_y_label)
plt.legend()
plt.show()
| 2.234375 | 2 |
healthForecaster.py | rashecl/healthForecaster | 0 | 12770961 | import numpy as np
from sas7bdat import SAS7BDAT
import glob
import pandas as pd
from sklearn import preprocessing
from sas7bdat import SAS7BDAT
import glob
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
from sklearn import utils, model_selection, metrics, linear_model, neighbors, ensemble
def convertAllCHSData(year = [], onlySubjectsWBiomarkers = 0):
if onlySubjectsWBiomarkers:
print('Only obtaining data for subjects/households with biomarker data.')
dataDirs = glob.glob('./data/Master*')
for dir in dataDirs:
SASfiles = glob.glob(dir + '/*.sas7bdat')
for SASfile in SASfiles:
convertSASfile(SASfile, year)
def convertSASfile(inputFullPath, year = [], onlySubjectsWBiomarkers = 0):
print('Converting ' + inputFullPath)
df = SAS2DataFrame(inputFullPath, year = year)
outputName = inputFullPath.split('/')[-1].split('.')[0]
outputDir = '/'.join(inputFullPath.split('/')[0:-2])
if year:
outputFullPath = outputDir + '/' + outputName
outputFullPath = outputFullPath + '_' + str(year) + 'only' + '.csv'
else:
outputFullPath = outputDir + '/' + outputName + '.csv'
if onlySubjectsWBiomarkers:
subjectsWithBiomarkers = pd.read_csv('./data/subjectsWithBiomarkers.csv')
tmp = set(df.columns)
identifyingFields = list(tmp.intersection(set(subjectsWithBiomarkers.columns)))
if not identifyingFields:
print('No identifying fields found.')
return
elif identifyingFields.count('idind'):
selFactor = 'idind'
selidinds = list(set(df[selFactor]).intersection(set(subjectsWithBiomarkers[selFactor])))
selIdxs = [a in selidinds for a in df[selFactor]]
df = df[selIdxs]
elif identifyingFields.count('hhid'):
selFactor = 'hhid'
selidinds = list(set(df[selFactor]).intersection(set(subjectsWithBiomarkers[selFactor])))
selIdxs = [a in selidinds for a in df[selFactor]]
df = df[selIdxs]
elif identifyingFields.count('commid'):
selFactor = 'commid'
selidinds = list(set(df[selFactor]).intersection(set(subjectsWithBiomarkers[selFactor])))
selIdxs = [a in selidinds for a in df[selFactor]]
df = df[selIdxs]
print(str(df.shape[0]) + ' valid rows')
df.to_csv(outputFullPath)
return
def SAS2DataFrame(inputFullPath, year = []):
with SAS7BDAT(inputFullPath, skip_header=False) as reader:
df = reader.to_data_frame()
df.columns = [col.lower() for col in df.columns]
if (not not year) & any(df.columns == 'wave'):
df = df[df['wave'] == year]
return df
def getSurveyData():
''' Gets relevant survey data for dHealth project
i.e. survey data for subjects that have biomarker data
'''
surveyPath = './data/Master_ID_201908/surveys_pub_12.sas7bdat'
surveyData = SAS2DataFrame(surveyPath)
surveyData = surveyData[(surveyData['biomaker'] == 1) & (surveyData['wave'] == 2009)]
return surveyData
def getBiomarkerData():
surveyData = getSurveyData()
biomarkerPath = './data/Master_Biomarker_2009/biomarker_09.sas7bdat'
biomarkerData = SAS2DataFrame(biomarkerPath)
ids1 = set(biomarkerData.idind)
ids2 = set(surveyData.idind)
excludeIds = list(ids1.difference(ids2))
for id in excludeIds:
tmp = list(biomarkerData.idind)
idx = tmp.index(id)
biomarkerData = biomarkerData.drop(idx)
return biomarkerData
def createSubjectsWithBiomarkersCSV():
surveyData = getSurveyData()
surveyData = surveyData.iloc[:,[0,1,5,3]]
surveyData.columns = ['idind', 'hhid', 'commid', 'Age']
surveyData.to_csv('./data/subjectsWithBiomarkers.csv')
# createSubjectsWithBiomarkersCSV()
featureMap = pd.read_csv('featureTableMap.csv')
subjects = pd.read_csv('./data/subjectsWithBiomarkers.csv',usecols = ['idind','Age']) # Could add others too'hhid','commid'
def createGenderCSV():
print('Extracting gender data...')
subjects = pd.read_csv('./data/subjectsWithBiomarkers.csv',usecols = ['idind','hhid','commid'])
subjects = subjects.astype({'idind': 'int',
'hhid': 'int',
'commid': 'int'})
def getGender(subjectIdx, idind_1, idind_2, sex_1, sex_2):
gender = np.nan
if subjects.idind[subjectIdx] in idind_1:
idx = idind_1.index(subjects.idind[subjectIdx])
gender = int(sex_1[idx])
elif subjects.idind[subjectIdx] in idind_2:
idx = idind_2.index(subjects.idind[subjectIdx])
gender = int(sex_2[idx])
else:
gender = np.nan
if gender == 1:
gender = int(1)
elif gender == 2:
gender = 0
if subjectIdx % 500 == 0:
print(str(100*subjectIdx/9548) + '% complete')
return gender
relations = pd.read_csv('./data/relationmast_pub_00_2009only.csv')
idind_1 = list(relations.idind_1)
idind_2 = list(relations.idind_2)
sex_1 = list(relations.sex_1)
sex_2 = list(relations.sex_2)
gender = [getGender(i, idind_1, idind_2, sex_1, sex_2) for i in range(len(subjects))]
d = {'idind': subjects.idind, 'Sex': gender}
df = pd.DataFrame(data=d)
df.to_csv('./data/gender.csv')
def createSleep_ScreenTimeCSV():
sleep_screenTime = pd.read_csv('./data/pact_12_2009only.csv',usecols = ['idind', 'u324', 'u339','u340_mn', 'u341_mn','u508', 'u509_mn','u510_mn','u345','u346_mn', 'u347_mn'])
sleep_screenTime.columns = ['idind', 'Hours_of_sleep', 'watchTV','TVhours_week','TVhours_weekend','goesOnline','online_week','online_weekend', 'play_videoGames', 'videoGames_week', 'videoGames_weekend']
sleep_screenTime = sleep_screenTime.replace({'watchTV':{9:1,np.nan:1}, 'goesOnline':{9:0,np.nan:0}, 'play_videoGames':{9:0,np.nan:0}, 'Hours_of_sleep':{-9: np.nan}})
sleep_screenTime = sleep_screenTime.fillna(sleep_screenTime.median())
sleep_screenTime_subjects= list(sleep_screenTime.idind)
def getDailyScreenTime(subjectIdx):
weeklyScreenTime = 0
if subjects.idind[subjectIdx] in sleep_screenTime_subjects:
idx = sleep_screenTime_subjects.index(subjects.idind[subjectIdx])
else:
return np.nan
if sleep_screenTime.watchTV[idx]:
weeklyScreenTime = weeklyScreenTime + sleep_screenTime.TVhours_week[idx] + sleep_screenTime.TVhours_weekend[idx]
else:
pass
if sleep_screenTime.goesOnline[idx]:
weeklyScreenTime = weeklyScreenTime + sleep_screenTime.online_week[idx] + sleep_screenTime.online_weekend[idx]
else:
pass
if sleep_screenTime.play_videoGames[idx]:
weeklyScreenTime = weeklyScreenTime + sleep_screenTime.videoGames_week[idx] + sleep_screenTime.videoGames_weekend[idx]
else:
pass
return np.round(weeklyScreenTime/7)
def getDailySleepTime(subjectIdx):
if subjects.idind[subjectIdx] in sleep_screenTime_subjects:
idx = sleep_screenTime_subjects.index(subjects.idind[subjectIdx])
else:
return np.nan
return sleep_screenTime.Hours_of_sleep[idx]
subjects = pd.read_csv('./data/subjectsWithBiomarkers.csv',usecols = ['idind'])
Daily_screen_time = [getDailyScreenTime(i) for i in range(len(subjects))]
Hours_of_sleep = [getDailySleepTime(i) for i in range(len(subjects))]
d = {'idind': subjects.idind, 'Daily_screen_time': Daily_screen_time, 'Hours_of_sleep': Hours_of_sleep}
df = pd.DataFrame(data=d)
df.to_csv('./data/sleep_screentime.csv')
return df
# Define these variables for default inputs for the functions below:
def preprocessRawChinaHealthStudyData():
createSubjectsWithBiomarkersCSV()
convertAllCHSData(year = 2009, onlySubjectsWBiomarkers = 1)
createGenderCSV()
createSleep_ScreenTimeCSV()
def getAndMergeTables(subjects = subjects, tableNum = 1):
newDF = pd.read_csv('./data/'+featureMap['tablename'][tableNum],usecols = eval(featureMap['varnames'][tableNum]))
newDF.columns = eval(featureMap['newnames'][tableNum])
try:
replaceDict = eval(featureMap['replacements'][tableNum])
print('This should not work for surveys')
newDF.replace(replaceDict, inplace = True)
except:
print('Could not replace values or none exists.')
subjects = pd.merge(subjects,newDF,how='left', on ='idind')
print(list(newDF.columns))
print(subjects.columns)
return subjects
def createDataTable():
subjects = pd.read_csv('./data/subjectsWithBiomarkers.csv',usecols = ['idind','Age'])
print('Adding demographic info')
for i in range(1,4):
print('Adding ' + featureMap['tablename'][i])
subjects = getAndMergeTables(subjects = subjects, tableNum = i)
print('One-hot-encoding medical conditions...')
# One-hot-encode medical conditions:
medicalConditions = subjects['Medical_condition'].fillna('noReport')
medicalConditions = medicalConditions.fillna('noReport')
medicalConditions = pd.DataFrame(medicalConditions)
enc = preprocessing.OneHotEncoder(categories = "auto")
enc.fit(medicalConditions)
data = enc.transform(medicalConditions).toarray()
columnNames = enc.categories_[0]
medicalConditions = pd.DataFrame(data,columns=columnNames)
# Replace old medical condition column to one-hot-encoded vars:
subjects.drop('Medical_condition', axis=1, inplace=True)
subjects=pd.concat([subjects,medicalConditions], axis=1, ignore_index=False)
# Add physical exam:
print('Adding lifestyle features...')
i = 4
print('Adding ' + featureMap['tablename'][i])
subjects = getAndMergeTables(subjects = subjects, tableNum = i)
# Add lifestyle features:
print('Adding lifestyle features...')
for i in range(5,featureMap.shape[0]-1):
print('Adding ' + featureMap['tablename'][i])
subjects = getAndMergeTables(subjects = subjects, tableNum = i)
print('Adding reponse variables...')
# Add the response variables (biomarker levels):
i = featureMap.shape[0]-1
print('Adding ' + featureMap['tablename'][i])
subjects = getAndMergeTables(subjects = subjects, tableNum = i)
# Median impute missing data:
subjects = subjects.fillna(subjects.median())
#Change data types:
subjects = subjects.astype({'idind': 'int',
'Sex': 'int',
'Urban': 'int',
'Activity_level': 'int'})
return subjects
def shuffleAndSplit(featureMatrix, targetMatrix, test_size=.2, n_splits=5):
# Shuffle datasets:
X,Y = utils.shuffle(featureMatrix,targetMatrix, random_state = 0)
# Split X and y into training and test sets (80% Train : 20% Test):
X_Train, X_Test, Y_Train, Y_Test = model_selection.train_test_split(
X, Y, random_state = 0, test_size = test_size)
cv=model_selection.KFold(n_splits = n_splits, shuffle = False)
return X_Train, X_Test, Y_Train, Y_Test, cv
def showDataSplits(Y_Train, Y_Test, cv):
''' Helper function to show how the data was split
'''
fig, ax = plt.subplots(figsize = (12,3))
plt.xlim(0,len(Y_Train)+len(Y_Test))
plt.ylim(0,cv.n_splits+1.5)
ax.set_title('Training and Validation splits \n (after shuffling)')
plt.xlabel('Dataset indicies')
yticklabels= [];
offset = -.4
i = 0
for train_idxs, cval_idxs in cv.split(Y_Train):
# training data:
i += 1
start = (min(train_idxs),i+offset)
width = max(train_idxs)-min(train_idxs)
if i == 1:
ax.add_patch(mpl.patches.Rectangle(start, width = width, height = .8, color = 'c', label = 'CV_train'))
ax.add_patch(mpl.patches.Rectangle(start, width = width, height = .8, color = 'c'))
# cross-validation data:
start = (min(cval_idxs),i+offset)
width = max(cval_idxs)-min(cval_idxs)
if i == 1:
ax.add_patch(mpl.patches.Rectangle(start, width = width, height = .8, color = 'orange', label = 'CV_validation'))
ax.add_patch(mpl.patches.Rectangle(start, width = width, height = .8, color = 'orange'))
yticklabels.append('Cross validation_' + str(i))
start = (0,cv.n_splits+1+offset)
width = len(Y_Train)
ax.add_patch(mpl.patches.Rectangle(start, width = width, height = .8, color = 'g', label = 'Train'))
start = (len(Y_Train),cv.n_splits+1+offset)
width = len(Y_Train)
ax.add_patch(mpl.patches.Rectangle(start, width = width, height = .8, color = 'r', label = 'Test'))
yticklabels.append('Final test')
#Format plot
plt.yticks(np.arange(1,cv.n_splits+2),yticklabels)
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles, labels)
plt.show()
def createHealthForecasterModels():
import pickle
## Aggregate relevant data for ML:
data = createDataTable()
fixedFactors = ['Age', 'Sex', 'Urban', 'ENT', 'OBGYN', 'Old_age_midLife_syndrome', 'alcohol_poisoning',
'dermatological', 'digestive', 'endocrine', 'heart', 'hematological', 'infectious_parasitic', 'injury',
'muscular_rheumatological', 'neurological', 'noDiagnosis', 'noReport', 'other', 'pyschiatric', 'respiratory',
'sexualDysfunction', 'tumor', 'unknown', 'urinary', 'High_BP', 'Diabetes', 'Heart_attack', 'Internal_bleeding',
'Pregnant','Height']
fixedFactorIdxs = [list(data.columns).index(varName) for varName in fixedFactors]
lifestyleFactors = ['Smoker', 'Cups_water_daily', 'Alcohol_frequency', 'Weight', 'Kcal', 'Carbs', 'Fat', 'Protein', 'Activity_level', 'Daily_screen_time', 'Hours_of_sleep']
lifestyleFactorIdxs = [list(data.columns).index(varName) for varName in lifestyleFactors]
responseVariables = ['Insulin','Triglycerides','HDL_C', 'LDL_C','Urea', 'Uric_acid', 'APO_A', 'Lipoprotein_A','High_sensitivity_CRP', 'Creatinine',
'APO_B', 'Mg', 'Ferritin', 'Hemoglobin', 'White_blood_cell',
'Red_blood_cell', 'Platelet', 'Glucose_field','HbA1c', 'Total_protein','Albumin', 'Glucose',
'Total_cholestorol', 'Alanine_AT', 'Transferrin', 'Transferrin_receptor','Systol', 'Diastol']
responseVariableIdxs = [list(data.columns).index(varName) for varName in responseVariables]
fatRelatedIdxs = [responseVariables.index('APO_A'),
responseVariables.index('Lipoprotein_A'),
responseVariables.index('HDL_C'),
responseVariables.index('LDL_C'),
responseVariables.index('APO_B'),
responseVariables.index('Triglycerides'),
responseVariables.index('Total_cholestorol')]
gluRelatedIdxs = [responseVariables.index('Insulin'),
responseVariables.index('HbA1c'),
responseVariables.index('Glucose')]
inputFeatures = fixedFactors + lifestyleFactors
X = data[inputFeatures].to_numpy()
Y = data[responseVariables].to_numpy()
# Y_zscore = (Y-np.mean(Y,axis=0))/np.std(Y,axis=0)
# X_Train, X_Test, Y_Train, Y_Test, cv = shuffleAndSplit(X, Y, test_size=.2, n_splits=5)
# X_Train, X_Test, Y_Train_zscore, Y_Test_zscore, cv = shuffleAndSplit(X, Y_zscore, test_size=.2, n_splits=5)
## Create a second model to predict weight:
# fixedFactors2 = ['age', 'sex', 'urban', 'ENT', 'OBGYN', 'Old_age_midLife_syndrome', 'alcohol_poisoning',
# 'dermatological', 'digestive', 'endocrine', 'heart', 'hematological', 'infectious_parasitic', 'injury',
# 'muscular_rheumatological', 'neurological', 'noDiagnosis', 'noReport', 'other', 'pyschiatric', 'respiratory',
# 'sexualDysfunction', 'tumor', 'unknown', 'urinary', 'highBP', 'diabetes', 'heart_attack', 'internal_bleeding',
# 'pregnant','height']
# fixedFactorIdxs2 = [list(data.columns).index(varName) for varName in fixedFactors]
# lifestyleFactors2 = ['smoker', 'cups_water_daily', 'Alcohol_frequency', 'kcal', 'carbo', 'fat', 'protn', 'Activity_level', 'Daily_screen_time', 'Hours_of_sleep']
# lifestyleFactorIdxs2 = [list(data.columns).index(varName) for varName in lifestyleFactors]
# responseVariables2 = ['weight']
# responseVariableIdxs2 = [list(data.columns).index(varName) for varName in responseVariables2]
# inputFeatures2 = fixedFactors2+lifestyleFactors2
# X2 = data[fixedFactors2 + lifestyleFactors2].to_numpy()
# Y2 = data[responseVariables2].to_numpy()
# X_Train2, X_Test2, Y_Train2, Y_Test2, cv = shuffleAndSplit(X2, Y2, test_size=.2, n_splits=5)
models = dict(ols=linear_model.LinearRegression(),
lasso=linear_model.Lasso(alpha=0.75),
ridge=linear_model.Ridge(alpha=0.75),
elastic=linear_model.ElasticNet(alpha=0.1, l1_ratio=0.75),
randomForest = ensemble.RandomForestRegressor(random_state=0,
max_features = 'auto',
min_samples_leaf = 50, #max_depth = 3,
n_estimators = 200)
)
# Also define models to predict z_score Target Matrix
# models_zscore = dict(ols=linear_model.LinearRegression(),
# lasso=linear_model.Lasso(alpha=.5),
# ridge=linear_model.Ridge(alpha=.5),
# elastic=linear_model.ElasticNet(alpha=.5, l1_ratio=0.5),
# randomForest = ensemble.RandomForestRegressor(random_state=0,
# max_features = 'auto',
# min_samples_leaf = 10,
# n_estimators = 200)
# weightModel = dict(ols=linear_model.LinearRegression(),
# lasso=linear_model.Lasso(alpha=.5),
# ridge=linear_model.Ridge(alpha=.5),
# elastic=linear_model.ElasticNet(alpha=.5, l1_ratio=0.5),
# randomForest = ensemble.RandomForestRegressor(random_state=0,
# max_features = 'auto',
# min_samples_leaf = 10,
# n_estimators = 200))
# print('Training trainedWeightBPModels')
# trainedWeightModels = {}
# for name, mdl in weightModel.items():
# print('Training ' + str(name) + '...')
# trainedWeightModels.update({name : mdl.fit(X2,Y2.ravel())})
# print('finished')
# Train models
print('Training trainedModels')
trainedModels = {}
for name, mdl in models.items():
print('Training ' + str(name) + '...')
trainedModels.update({name : mdl.fit(X,Y)})
print('finished')
# pickle.dump([trainedModels, trainedWeightModels, inputFeatures, responseVariables, inputFeatures2, responseVariables2], open("models.p", "wb"))
pickle.dump([trainedModels, inputFeatures, responseVariables, data], open("models.p", "wb"))
# return trainedModels, trainedWeightModels, inputFeatures, responseVariables, inputFeatures2, responseVariables2
return trainedModels, inputFeatures, responseVariables
def parseInputs(inputDict,inputFeatures):
# inputValues = np.zeros(len(inputFeatures))
currentValues = np.zeros(len(inputFeatures))
futureValues = np.zeros(len(inputFeatures))
# Age
currentValues[inputFeatures.index('Age')] = inputDict['Age']
futureValues[inputFeatures.index('Age')] = inputDict['Age']
# Sex
if inputDict['Sex'] == 'M':
currentValues[inputFeatures.index('Sex')] = 1
futureValues[inputFeatures.index('Sex')] = 1
else:
currentValues[inputFeatures.index('Sex')] = 0
futureValues[inputFeatures.index('Sex')] = 0
# Location:
if inputDict['Location'] == 'Urban':
currentValues[inputFeatures.index('Urban')] = 1
futureValues[inputFeatures.index('Urban')] = 1
else:
currentValues[inputFeatures.index('Urban')] = 0
futureValues[inputFeatures.index('Urban')] = 0
# Physical exam/Medical Conditions:
currentValues[inputFeatures.index('Height')] = inputDict['Height']*2.54
futureValues[inputFeatures.index('Height')] = inputDict['Height']*2.54
currentValues[inputFeatures.index(inputDict['Medical_condition'])] = 1
futureValues[inputFeatures.index(inputDict['Medical_condition'])] = 1
if inputDict['Pregnant']:
currentValues[inputFeatures.index('Pregnant')] = 1
futureValues[inputFeatures.index('Pregnant')] = 1
if inputDict['Diabetes']:
currentValues[inputFeatures.index('Diabetes')] = 1
futureValues[inputFeatures.index('Diabetes')] = 1
if inputDict['High_BP']:
currentValues[inputFeatures.index('High_BP')] = 1
futureValues[inputFeatures.index('High_BP')] = 1
if inputDict['Heart_attack']:
currentValues[inputFeatures.index('Heart_attack')] = 1
futureValues[inputFeatures.index('Heart_attack')] = 1
if inputDict['Internal_bleeding']:
currentValues[inputFeatures.index('Internal_bleeding')] = 1
futureValues[inputFeatures.index('Internal_bleeding')] = 1
# currentValues = futureValues = inputValues # This may have done some weird cloning thing?
### Current lifestyle
# Habits:
if inputDict['currAlcohol_frequency'] == 'daily':
currentValues[inputFeatures.index('Alcohol_frequency')] = 1
elif inputDict['currAlcohol_frequency'] == '3-4 times a week':
currentValues[inputFeatures.index('Alcohol_frequency')] = 2
elif inputDict['currAlcohol_frequency'] == 'Once or twice a week':
currentValues[inputFeatures.index('Alcohol_frequency')] = 3
elif inputDict['currAlcohol_frequency'] == 'Once or twice a month':
currentValues[inputFeatures.index('Alcohol_frequency')] = 4
elif inputDict['currAlcohol_frequency'] == 'No more than once a month':
currentValues[inputFeatures.index('Alcohol_frequency')] = 5
else:
currentValues[inputFeatures.index('Alcohol_frequency')] = 3
currentValues[inputFeatures.index('Cups_water_daily')] = inputDict['currCups_water_daily']
if inputDict['currSmoker']:
currentValues[inputFeatures.index('Smoker')] = 1
# Diet/Weight:
currentValues[inputFeatures.index('Kcal')] = inputDict['currCarbo']*4 + inputDict['currProtn']*4 + inputDict['currFat']*9 #currKcal
currentValues[inputFeatures.index('Carbs')] = inputDict['currCarbo']
currentValues[inputFeatures.index('Fat')] = inputDict['currFat']
currentValues[inputFeatures.index('Protein')] = inputDict['currProtn']
# Activity
currentValues[inputFeatures.index('Activity_level')] = inputDict['currActivityLevel']
currentValues[inputFeatures.index('Daily_screen_time')] = inputDict['currDailyScreenTime']
currentValues[inputFeatures.index('Hours_of_sleep')] = inputDict['currHours_of_sleep']
if 'Weight' in inputFeatures:
currentValues[inputFeatures.index('Weight')] = inputDict['currWeight']/2.205
### Lifestyle intervention
# Habits:
if inputDict['intAlcohol_frequency'] == 'daily':
futureValues[inputFeatures.index('Alcohol_frequency')] = 1
elif inputDict['intAlcohol_frequency'] == '3-4 times a week':
futureValues[inputFeatures.index('Alcohol_frequency')] = 2
elif inputDict['intAlcohol_frequency'] == 'Once or twice a week':
futureValues[inputFeatures.index('Alcohol_frequency')] = 3
elif inputDict['intAlcohol_frequency'] == 'Once or twice a month':
futureValues[inputFeatures.index('Alcohol_frequency')] = 4
elif inputDict['intAlcohol_frequency'] == 'No more than once a month':
futureValues[inputFeatures.index('Alcohol_frequency')] = 5
else:
futureValues[inputFeatures.index('Alcohol_frequency')] = 3
futureValues[inputFeatures.index('Cups_water_daily')] = inputDict['intCups_water_daily']
if inputDict['intSmoker']:
futureValues[inputFeatures.index('Smoker')] = 1
# Diet/Weight:
futureValues[inputFeatures.index('Kcal')] = inputDict['intCarbo']*4 + inputDict['intProtn']*4 + inputDict['intFat']*9 #currKcal
futureValues[inputFeatures.index('Carbs')] = inputDict['intCarbo']
futureValues[inputFeatures.index('Fat')] = inputDict['intFat']
futureValues[inputFeatures.index('Protein')] = inputDict['intProtn']
# Activity
futureValues[inputFeatures.index('Activity_level')] = inputDict['intActivityLevel']
futureValues[inputFeatures.index('Daily_screen_time')] = inputDict['intDailyScreenTime']
futureValues[inputFeatures.index('Hours_of_sleep')] = inputDict['intHours_of_sleep']
if 'Weight' in inputFeatures:
futureValues[inputFeatures.index('Weight')] = inputDict['intWeight']/2.205
return currentValues, futureValues
def plotSubjectModelPrediction(trainedModels, X, Y, responseVariables, modelName = 'randomForest', subjectIdx = 3):
import matplotlib.pyplot as plt
f, ax = plt.subplots(figsize=(11, 5))
y_predict = trainedModels[modelName].predict(X[subjectIdx,:].reshape(1, -1))
plt.scatter(range(0,26), Y[subjectIdx,:].T,color = 'b',label = 'actual')
plt.scatter(range(0,26), y_predict.T,color = 'r',label = 'prediction')
plt.xticks(range(0,26))
plt.xticks(rotation='vertical')
ax.set_xticklabels(responseVariables)
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles, labels)
plt.show()
## Helper functions
def update_progress(numerator, denominator=1, taskName = 'Progress'):
from IPython.display import clear_output
bar_length = 20
if isinstance(numerator, int):
numerator = float(numerator)
if not isinstance(numerator, float):
numerator = 0
if numerator/denominator < 0:
numerator = 0
if numerator/denominator >= 1:
numerator = denominator
block = int(round(bar_length * (numerator/denominator)))
clear_output(wait = True)
text = taskName + ": [{0}] {1:.1f}%".format( "#" * block + "-" * (bar_length - block), (numerator/denominator) * 100)
print(text)
| 2.71875 | 3 |
pytagcloud/lang/counter.py | cocode/PyTagCloud | 0 | 12770962 | <filename>pytagcloud/lang/counter.py
# -*- coding: utf-8 -*-
import re
from pytagcloud.lang.stopwords import StopWords
from operator import itemgetter
from collections import defaultdict
def get_tag_counts(text):
"""
Search tags in a given text. The language detection is based on stop lists.
This implementation is inspired by https://github.com/jdf/cue.language. Thanks <NAME>.
"""
words = map(lambda x:x.lower(), re.findall(r"[\w']+", text, re.UNICODE))
# words above is an iterator, which would get consumed by guess(words),
# So, convert it to a list, as it would have been in python 2
words = [word for word in words]
s = StopWords()
s.load_language(s.guess(words))
counted = defaultdict(int)
for word in words:
if not s.is_stop_word(word) and len(word) > 1:
counted[word] += 1
return sorted(counted.items(), key=itemgetter(1), reverse=True)
| 3.53125 | 4 |
pyclustering/nnet/__init__.py | JosephChataignon/pyclustering | 1,013 | 12770963 | <reponame>JosephChataignon/pyclustering<gh_stars>1000+
"""!
@brief Neural and oscillatory network module. Consists of models of bio-inspired networks.
@authors <NAME> (<EMAIL>)
@date 2014-2020
@copyright BSD-3-Clause
"""
import math
from enum import IntEnum
class initial_type(IntEnum):
"""!
@brief Enumerator of types of oscillator output initialization.
"""
## Output of oscillators are random in line with gaussian distribution.
RANDOM_GAUSSIAN = 0
## Output of oscillators are equidistant from each other (uniformly distributed, not randomly).
EQUIPARTITION = 1
class solve_type(IntEnum):
"""!
@brief Enumerator of solver types that are used for network simulation.
"""
## Forward Euler first-order method.
FAST = 0 # Usual calculation: x(k + 1) = x(k) + f(x(k)).
## Classic fourth-order Runge-Kutta method (fixed step).
RK4 = 1
## Runge-Kutta-Fehlberg method with order 4 and 5 (float step)."
RKF45 = 2
class conn_type(IntEnum):
"""!
@brief Enumerator of connection types between oscillators.
"""
## No connection between oscillators.
NONE = 0
## All oscillators have connection with each other.
ALL_TO_ALL = 1
## Connections between oscillators represent grid where one oscillator can be connected with four neighbor oscillators: right, upper, left, lower.
GRID_FOUR = 2
## Connections between oscillators represent grid where one oscillator can be connected with eight neighbor oscillators: right, right-upper, upper, upper-left, left, left-lower, lower, lower-right.
GRID_EIGHT = 3
## Connections between oscillators represent bidirectional list.
LIST_BIDIR = 4
## Connections are defined by user or by network during simulation.
DYNAMIC = 5
class conn_represent(IntEnum):
"""!
@brief Enumerator of internal network connection representation between oscillators.
"""
## Each oscillator has list of his neighbors.
LIST = 0
## Connections are represented my matrix connection NxN, where N is number of oscillators.
MATRIX = 1
class network:
"""!
@brief Common network description that consists of information about oscillators and connection between them.
"""
_num_osc = 0
_osc_conn = None
_conn_represent = None
__conn_type = None
__height = 0
__width = 0
@property
def height(self):
"""!
@brief Height of the network grid (that is defined by amout of oscillators in each column), this value is zero in case of non-grid structure.
@note This property returns valid value only for network with grid structure.
"""
return self.__height
@property
def width(self):
"""!
@brief Width of the network grid, this value is zero in case of non-grid structure.
@note This property returns valid value only for network with grid structure.
"""
return self.__width
@property
def structure(self):
"""!
@brief Type of network structure that is used for connecting oscillators.
"""
return self.__conn_type
def __init__(self, num_osc, type_conn = conn_type.ALL_TO_ALL, conn_repr = conn_represent.MATRIX, height = None, width = None):
"""!
@brief Constructor of the network.
@param[in] num_osc (uint): Number of oscillators in the network that defines size of the network.
@param[in] type_conn (conn_type): Type of connections that are used in the network between oscillators.
@param[in] conn_repr (conn_represent): Type of representation of connections.
@param[in] height (uint): Number of oscillators in column of the network, this argument is used
only for network with grid structure (GRID_FOUR, GRID_EIGHT), for other types this argument is ignored.
@param[in] width (uint): Number of oscillotors in row of the network, this argument is used only
for network with grid structure (GRID_FOUR, GRID_EIGHT), for other types this argument is ignored.
"""
self._num_osc = num_osc
self._conn_represent = conn_repr
self.__conn_type = type_conn
if conn_repr is None:
self._conn_represent = conn_represent.MATRIX
if (type_conn == conn_type.GRID_EIGHT) or (type_conn == conn_type.GRID_FOUR):
if (height is not None) and (width is not None):
self.__height = height
self.__width = width
else:
side_size = self._num_osc ** 0.5
if (side_size - math.floor(side_size) > 0):
raise NameError("Invalid number of oscillators '" + str(num_osc) + "' in the network in case of grid structure (root square should be extractable for the number of oscillators).");
self.__height = int(side_size)
self.__width = self.__height
if self.__height * self.__width != self._num_osc:
raise NameError('Width (' + str(self.__width) + ') x Height (' + str(self.__height) + ') must be equal to Size (' + str(self._num_osc) + ') in case of grid structure');
self._create_structure(type_conn)
def __len__(self):
"""!
@brief Returns size of the network that is defined by amount of oscillators.
"""
return self._num_osc;
def __create_connection(self, index1, index2):
if (self._conn_represent == conn_represent.MATRIX):
self._osc_conn[index1][index2] = True;
else:
self._osc_conn[index1].append(index2);
def __create_all_to_all_connections(self):
"""!
@brief Creates connections between all oscillators.
"""
if (self._conn_represent == conn_represent.MATRIX):
for index in range(0, self._num_osc, 1):
self._osc_conn.append([True] * self._num_osc);
self._osc_conn[index][index] = False;
elif (self._conn_represent == conn_represent.LIST):
for index in range(0, self._num_osc, 1):
self._osc_conn.append([neigh for neigh in range(0, self._num_osc, 1) if index != neigh]);
def __create_grid_four_connections(self):
"""!
@brief Creates network with connections that make up four grid structure.
@details Each oscillator may be connected with four neighbors in line with 'grid' structure: right, upper, left, lower.
"""
side_size = self.__width;
if (self._conn_represent == conn_represent.MATRIX):
self._osc_conn = [[0] * self._num_osc for index in range(0, self._num_osc, 1)];
elif (self._conn_represent == conn_represent.LIST):
self._osc_conn = [[] for index in range(0, self._num_osc, 1)];
else:
raise NameError("Unknown type of representation of connections");
for index in range(0, self._num_osc, 1):
upper_index = index - side_size;
lower_index = index + side_size;
left_index = index - 1;
right_index = index + 1;
node_row_index = math.ceil(index / side_size);
if (upper_index >= 0):
self.__create_connection(index, upper_index);
if (lower_index < self._num_osc):
self.__create_connection(index, lower_index);
if ( (left_index >= 0) and (math.ceil(left_index / side_size) == node_row_index) ):
self.__create_connection(index, left_index);
if ( (right_index < self._num_osc) and (math.ceil(right_index / side_size) == node_row_index) ):
self.__create_connection(index, right_index);
def __create_grid_eight_connections(self):
"""!
@brief Creates network with connections that make up eight grid structure.
@details Each oscillator may be connected with eight neighbors in line with grid structure: right, right-upper, upper, upper-left, left, left-lower, lower, lower-right.
"""
self.__create_grid_four_connections(); # create connection with right, upper, left, lower.
side_size = self.__width;
for index in range(0, self._num_osc, 1):
upper_left_index = index - side_size - 1;
upper_right_index = index - side_size + 1;
lower_left_index = index + side_size - 1;
lower_right_index = index + side_size + 1;
node_row_index = math.floor(index / side_size);
upper_row_index = node_row_index - 1;
lower_row_index = node_row_index + 1;
if ( (upper_left_index >= 0) and (math.floor(upper_left_index / side_size) == upper_row_index) ):
self.__create_connection(index, upper_left_index);
if ( (upper_right_index >= 0) and (math.floor(upper_right_index / side_size) == upper_row_index) ):
self.__create_connection(index, upper_right_index);
if ( (lower_left_index < self._num_osc) and (math.floor(lower_left_index / side_size) == lower_row_index) ):
self.__create_connection(index, lower_left_index);
if ( (lower_right_index < self._num_osc) and (math.floor(lower_right_index / side_size) == lower_row_index) ):
self.__create_connection(index, lower_right_index);
def __create_list_bidir_connections(self):
"""!
@brief Creates network as bidirectional list.
@details Each oscillator may be conneted with two neighbors in line with classical list structure: right, left.
"""
if (self._conn_represent == conn_represent.MATRIX):
for index in range(0, self._num_osc, 1):
self._osc_conn.append([0] * self._num_osc);
self._osc_conn[index][index] = False;
if (index > 0):
self._osc_conn[index][index - 1] = True;
if (index < (self._num_osc - 1)):
self._osc_conn[index][index + 1] = True;
elif (self._conn_represent == conn_represent.LIST):
for index in range(self._num_osc):
self._osc_conn.append([]);
if (index > 0):
self._osc_conn[index].append(index - 1);
if (index < (self._num_osc - 1)):
self._osc_conn[index].append(index + 1);
def __create_none_connections(self):
"""!
@brief Creates network without connections.
"""
if (self._conn_represent == conn_represent.MATRIX):
for _ in range(0, self._num_osc, 1):
self._osc_conn.append([False] * self._num_osc);
elif (self._conn_represent == conn_represent.LIST):
self._osc_conn = [[] for _ in range(0, self._num_osc, 1)];
def __create_dynamic_connection(self):
"""!
@brief Prepare storage for dynamic connections.
"""
if (self._conn_represent == conn_represent.MATRIX):
for _ in range(0, self._num_osc, 1):
self._osc_conn.append([False] * self._num_osc);
elif (self._conn_represent == conn_represent.LIST):
self._osc_conn = [[] for _ in range(0, self._num_osc, 1)];
def _create_structure(self, type_conn = conn_type.ALL_TO_ALL):
"""!
@brief Creates connection in line with representation of matrix connections [NunOsc x NumOsc].
@param[in] type_conn (conn_type): Connection type (all-to-all, bidirectional list, grid structure, etc.) that is used by the network.
"""
self._osc_conn = list();
if (type_conn == conn_type.NONE):
self.__create_none_connections();
elif (type_conn == conn_type.ALL_TO_ALL):
self.__create_all_to_all_connections();
elif (type_conn == conn_type.GRID_FOUR):
self.__create_grid_four_connections();
elif (type_conn == conn_type.GRID_EIGHT):
self.__create_grid_eight_connections();
elif (type_conn == conn_type.LIST_BIDIR):
self.__create_list_bidir_connections();
elif (type_conn == conn_type.DYNAMIC):
self.__create_dynamic_connection();
else:
raise NameError('The unknown type of connections');
def has_connection(self, i, j):
"""!
@brief Returns True if there is connection between i and j oscillators and False - if connection doesn't exist.
@param[in] i (uint): index of an oscillator in the network.
@param[in] j (uint): index of an oscillator in the network.
"""
if (self._conn_represent == conn_represent.MATRIX):
return (self._osc_conn[i][j]);
elif (self._conn_represent == conn_represent.LIST):
for neigh_index in range(0, len(self._osc_conn[i]), 1):
if (self._osc_conn[i][neigh_index] == j):
return True;
return False;
else:
raise NameError("Unknown type of representation of coupling");
def set_connection(self, i, j):
"""!
@brief Couples two specified oscillators in the network with dynamic connections.
@param[in] i (uint): index of an oscillator that should be coupled with oscillator 'j' in the network.
@param[in] j (uint): index of an oscillator that should be coupled with oscillator 'i' in the network.
@note This method can be used only in case of DYNAMIC connections, otherwise it throws expection.
"""
if (self.structure != conn_type.DYNAMIC):
raise NameError("Connection between oscillators can be changed only in case of dynamic type.");
if (self._conn_represent == conn_represent.MATRIX):
self._osc_conn[i][j] = True;
self._osc_conn[j][i] = True;
else:
self._osc_conn[i].append(j);
self._osc_conn[j].append(i);
def get_neighbors(self, index):
"""!
@brief Finds neighbors of the oscillator with specified index.
@param[in] index (uint): index of oscillator for which neighbors should be found in the network.
@return (list) Indexes of neighbors of the specified oscillator.
"""
if (self._conn_represent == conn_represent.LIST):
return self._osc_conn[index]; # connections are represented by list.
elif (self._conn_represent == conn_represent.MATRIX):
return [neigh_index for neigh_index in range(self._num_osc) if self._osc_conn[index][neigh_index] == True];
else:
raise NameError("Unknown type of representation of connections");
| 3 | 3 |
leprikon/migrations/0031_registration_questions_agreements.py | leprikon-cz/leprikon | 4 | 12770964 | <reponame>leprikon-cz/leprikon<gh_stars>1-10
# -*- coding: utf-8 -*-
# Generated by Django 1.11.23 on 2019-09-01 22:42
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
def set_registration_questions_agreements(apps, schema_editor):
leprikon_site = apps.get_model('leprikon', 'LeprikonSite').objects.filter(id=settings.SITE_ID).first()
site_agreements = set(leprikon_site.registration_agreements.all()) if leprikon_site else set()
for subject_type in apps.get_model('leprikon', 'SubjectType').objects.all():
subject_type_questions = set(subject_type.questions.all())
subject_type_agreements = site_agreements.union(subject_type.registration_agreements.all())
for subject in subject_type.subjects.prefetch_related('questions', 'registration_agreements'):
questions = subject_type_questions.union(subject.questions.all())
agreements = subject_type_agreements.union(subject.registration_agreements.all())
for registration in subject.registrations.all():
registration.questions.set(questions)
registration.agreements.set(agreements)
class Migration(migrations.Migration):
dependencies = [
('leprikon', '0030_agreements'),
]
operations = [
migrations.AddField(
model_name='agreement',
name='active',
field=models.BooleanField(default=True, verbose_name='active'),
),
migrations.AddField(
model_name='question',
name='active',
field=models.BooleanField(default=True, verbose_name='active'),
),
migrations.AddField(
model_name='subjectregistration',
name='agreements',
field=models.ManyToManyField(editable=False, related_name='registrations', to='leprikon.Agreement'),
),
migrations.AddField(
model_name='subjectregistration',
name='questions',
field=models.ManyToManyField(editable=False, related_name='registrations', to='leprikon.Question'),
),
migrations.RunPython(set_registration_questions_agreements),
]
| 1.5625 | 2 |
underscore/constant_finder.py | doboy/Underscore | 7 | 12770965 | <gh_stars>1-10
# Copyright (c) 2013 <NAME>, http://huan.do
import ast
from assignment_manager import AssignmentManager
from utils import value_of
class ConstantFinder(ast.NodeVisitor):
def __init__(self, env):
self.env = env
self.assignment_manager = AssignmentManager()
def visit_Constant(self, node):
if not hasattr(node, 'isdoc'):
value = value_of(node)
return self.add_constant(node, value)
visit_Num = visit_Str = visit_Constant
def visit_FunctionDef(self, node):
if (isinstance(node.body[0], ast.Expr) and
isinstance(node.body[0].value, ast.Str)):
node.body[0].value.isdoc = True
self.generic_visit(node)
visit_ClassDef = visit_FunctionDef
def add_constant(self, node, value):
if value not in self.env.constants and not hasattr(node, 'isdoc'):
decl = self.env.generate_new_decl()
self.assignment_manager.add_assignment(decl.name, node)
self.env.constants[value] = decl
| 2.46875 | 2 |
load_data.py | yodacatmeow/VGG16-SNU-B36-50 | 2 | 12770966 | # Public python modules
import numpy as np
import pandas as pd
import pickle
import feature
from os import path
# If categories of test data = categories of the training data
class load():
def __init__(self, data_path, batch_size):
self.pointer = 0
self.dataframe = pickle.load(open(data_path,"rb"))
self.batch_size = batch_size
self.n_batch = int(len(self.dataframe) / self.batch_size) # The number of batches
self.n_class = len(set(self.dataframe['category'].values)) # The number of classes
# Batch
def batch(self, dataframe):
x_data = []
y_data = []
# get patches from the saved data (in here, "/dataset/train.p" OR "/dataset/valid.p") and append
for i, row in dataframe.iterrows():
# Select dataframe[row, 'patch']
patch = row['patch']
# Append "patch" to "x_data"
x_data.append(np.float32(patch))
# One-hot encoding
cl = row['category']
y = np.zeros(self.n_class)
y[cl] = 1
y_data.append(y)
return x_data, y_data
#print("x:", x_data)
#print("y:", y_data)
# Mini-batch (via batch(self, dataframe) )
def next_batch(self):
start_pos = self.pointer * self.batch_size
batch_df = self.dataframe.iloc[start_pos:start_pos + self.batch_size]
self.pointer = (self.pointer + 1) % self.n_batch # Move pointer for the next mini-batch
return self.batch(batch_df)
# If categories of test data ~= categories of the training data
class load2():
def __init__(self, data_path, batch_size):
self.pointer = 0
self.dataframe = pickle.load(open(data_path,"rb"))
self.batch_size = batch_size
self.n_batch = int(len(self.dataframe) / self.batch_size)
self.n_class = len(set(self.dataframe['category'].values))
# Batch
def batch(self, dataframe):
x_data = []
y_data = []
# get patches from the saved data (in here, "/dataset/train.p" OR "/dataset/valid.p") and append
for i, row in dataframe.iterrows():
# Select dataframe[row, 'patch']
patch = row['patch']
# Append "patch" to "x_data"
x_data.append(np.float32(patch))
# Append category
cl = row['track_id']
y_data.append(cl)
return x_data, y_data
# Mini-batch (via batch(self, dataframe) )
def next_batch(self):
start_pos = self.pointer * self.batch_size
batch_df = self.dataframe.iloc[start_pos:start_pos + self.batch_size]
self.pointer = (self.pointer + 1) % self.n_batch # Move pointer for the next mini-batch
return self.batch(batch_df)
if __name__ == "__main__":
import gen_data
| 2.90625 | 3 |
covidbot/local/alerts.py | igorecarrasco/covidbot | 1 | 12770967 | from random import choices
from typing import Callable
import humanize
from .covid import Covid
from .graph import Graph
from .image import Image
from .testing import Testing
from .twitter import Twitter
class Alerts(Covid, Graph, Image, Testing, Twitter):
def __init__(self):
super().__init__()
@property
def chosen_data(self) -> Callable:
"""
Chooses at random with weighted distribution whether to get
data for the whole world or any specific country. We want
to post countries more.
"""
chosen: Callable = choices(
[
self.world_data,
self.random_country_data,
self.random_country_graph,
self.random_image,
self.random_country_tests,
self.random_country_group_graph,
],
weights=[0.2, 0.1, 0.25, 0.05, 0.15, 0.25],
k=1,
)
return chosen[0]()
def generate(self):
"""
Generates the alert.
Data for a given country looks like this:
{'country': 'Malta', 'cases': 21, 'todayCases': 3, 'deaths': 0, 'todayDeaths': 0, 'recovered': 2, 'critical': 0}
Data for the world looks like:
{'cases': 162386, 'deaths': 5984, 'recovered': 75967}
"""
data = self.chosen_data
if data.get("image"):
self.__image(data)
elif data.get("tests"):
self.__tests(data)
elif data.get("graph"):
self.__graph(data)
elif data.get("graph_group"):
self.__graph_group(data)
elif not data.get("country"):
self.__world(data)
elif data.get("cases") == 0:
self.__no_cases(data)
elif data.get("cases") == data.get("todayCases"):
self.__first_batch(data)
elif data.get("deaths") == data.get("todayDeaths") and data.get("deaths") != 0:
self.__first_deaths(data)
else:
self.__country(data)
def __image(self, data):
img_path = data["img_path"]
media_id = self.upload_image(img_path)
self.post(
f"Guidance from the World Health Organization (WHO)", media_ids=[media_id],
)
def __graph(self, data):
cases = data["cases"]
country = data["country"]
img_path = data["img_path"]
media_id = self.upload_image(img_path)
self.post(
f"Evolution of number of cases for {country.replace('*', '')}, with a total confirmed of {humanize.intcomma(cases)}",
media_ids=[media_id],
)
def __graph_group(self, data):
countries = data["countries"]
img_path = data["img_path"]
media_id = self.upload_image(img_path)
self.post(
f"Evolution of cases in {', '.join(countries)}, since 100th confirmed case.",
media_ids=[media_id],
)
def __world(self, data):
cases = data["cases"]
deaths = data["deaths"]
rate = round(deaths / cases * 100, 2)
self.post(
f"Latest worldwide COVID-19 data: {humanize.intcomma(cases)} cases, {humanize.intcomma(deaths)} deaths.\n\nA {rate}% fatality rate."
)
def __country(self, data):
cases = data["cases"]
deaths = data["deaths"]
today_cases = data["todayCases"]
today_deaths = data["todayDeaths"]
rate = round(deaths / cases * 100, 2)
self.post(
f"Latest COVID-19 data for {data['country']}: {humanize.intcomma(cases)} case{'s' if cases > 1 else ''}, of those {humanize.intcomma(today_cases)} today; {humanize.intcomma(deaths)} death{'s' if deaths > 1 else ''}, of those {humanize.intcomma(today_deaths)} today.\n\nA {rate}% fatality rate."
)
def __first_batch(self, data):
cases = data["cases"]
deaths = data["deaths"]
self.post(
f"First case{'s' if cases > 1 else ''} of COVID-19 confirmed in {data['country']}: {humanize.intcomma(cases)} case{'s' if cases > 1 else ''}, with {humanize.intcomma(deaths)} death{'s' if deaths > 1 else ''} reported."
)
def __first_deaths(self, data):
cases = data["cases"]
deaths = data["deaths"]
rate = round(deaths / cases * 100, 2)
self.post(
f"First death{'s' if cases > 1 else ''} by COVID-19 reported in {data['country']}: {humanize.intcomma(deaths)} {'people' if cases > 1 else 'person'} have died out of {humanize.intcomma(cases)} confirmed cases.\n\nA {rate}% fatality rate."
)
def __no_cases(self, data):
self.post(
f"Latest COVID-19 data: {data['country']} still reports no infections or deaths."
)
def __tests(self, data):
try: # lets try to enrich this with other statistics from the country in question
if len(data["country"]) <= 2:
raise ValueError("Likely a state. Skipping...")
country_data = self.country(data["country"])
cases = country_data["cases"]
today_cases = country_data["todayCases"]
deaths = country_data["deaths"]
today_deaths = country_data["todayDeaths"]
except Exception as e: # if anything blows up here and we can't find the country by FuzzyMatching, no biggie
print(str(e))
country_data = None
message = (
f"Total COVID-19 tests performed in {data['country']}: {data['tests']}."
)
if country_data:
message = (
message
+ f" {humanize.intcomma(cases)} case{'s' if cases > 1 else ''}, of those {humanize.intcomma(today_cases)} today; {humanize.intcomma(deaths)} death{'s' if deaths > 1 else ''}, of those {humanize.intcomma(today_deaths)} today."
)
self.post(message)
| 3.171875 | 3 |
migrations/versions/2870937c5fa_.py | daghan/MarkDownBlog | 25 | 12770968 | <gh_stars>10-100
"""empty message
Revision ID: 2870937c5fa
Revises: <PASSWORD>
Create Date: 2015-02-06 14:21:20.794958
"""
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = '<PASSWORD>'
from alembic import op
import sqlalchemy as sa
from migrations.utils import drop_column_sqlite
def upgrade():
op.add_column('user', sa.Column('blog_bg', sa.String(length=200), nullable=True))
op.add_column('user', sa.Column('blog_bg_everywhere', sa.Boolean(), nullable=True))
op.add_column('user', sa.Column('blog_bg_override', sa.Boolean(), nullable=True))
op.add_column('user', sa.Column('blog_bg_public', sa.Boolean(), nullable=True))
op.add_column('user', sa.Column('blog_bg_repeat', sa.Boolean(), nullable=True))
op.add_column('user', sa.Column('blog_image_rounded', sa.Boolean(), nullable=True))
op.add_column('user', sa.Column('last_login', sa.DateTime(), nullable=True))
drop_column_sqlite('user', 'blog_round_image')
def downgrade():
op.add_column('user', sa.Column('blog_round_image', sa.BOOLEAN(), nullable=True))
drop_column_sqlite('user', 'last_login')
drop_column_sqlite('user', 'blog_image_rounded')
drop_column_sqlite('user', 'blog_bg_repeat')
drop_column_sqlite('user', 'blog_bg_public')
drop_column_sqlite('user', 'blog_bg_override')
drop_column_sqlite('user', 'blog_bg_everywhere')
drop_column_sqlite('user', 'blog_bg')
| 1.71875 | 2 |
hpecp/client.py | pramaku/hpecp-python-library | 0 | 12770969 | """
This module is the main module that users of this library will interact with.
"""
from __future__ import absolute_import
from six import raise_from
from .logger import Logger
from .tenant import TenantController
from .config import ConfigController
from .gateway import GatewayController
from .k8s_worker import K8sWorkerController
from .k8s_cluster import K8sClusterController
from .license import LicenseController
from .lock import LockController
from .exceptions import ContainerPlatformClientException, APIException, APIItemNotFoundException, APIItemConflictException
import re
import os
import requests
import json
import configparser
import sys
try:
basestring
except NameError:
basestring = str
class ContainerPlatformClient(object):
"""The ContainerPlatformClient object is the central object that users of this library work with.
Parameters:
username : str
HPECP username
password : str
HPECP password
api_host : str
HPECP api_host
api_port : int
HPECP api_port
use_ssl : bool:
Connect to HPECP using SSL: True|False
verify_ssl : bool|str
See https://requests.readthedocs.io/en/master/user/advanced/#ssl-cert-verification
warn_ssl : bool
Disable ssl warnings
Returns:
ContainerPlatformClient:
An instance of ContainerPlatformClient
Notes:
Instantiating the ContainerPlatformClient does not make any connection to the HPE Container Platform API. The
initial connection would be made by calling the method :py:meth:`create_session`.
See also:
:py:meth:`create_from_config_file` for an alternative way to create a ContainerPlatformClient instance
:py:meth:`create_from_env` for an alternative way to create a ContainerPlatformClient instance
"""
@classmethod
def create_from_config_file(cls, config_file="~/.hpecp.conf", profile=None):
"""Create a ContainerPlatformClient object from a configuration file.
Parameters:
config_file : str
The configuration filename and path
profile : str
If the configuration file has multiple profile sections, you can select the profile to use.
Returns:
ContainerPlatformClient:
An instance of ContainerPlatformClient is returned.
Example config_file::
[default]
api_host = 127.0.0.1
api_port = 8080
use_ssl = True
verify_ssl = False
warn_ssl = False
[demoserver]
username = admin
password = <PASSWORD>
"""
if profile is None:
profile = 'default'
if config_file.startswith('~'):
file_path = config_file[1:]
file_path = file_path.lstrip('/')
config_file = os.path.join(os.path.expanduser("~"), file_path)
if not os.path.exists(config_file):
raise ContainerPlatformClientException(
"Could not find configuration file '{}'".format(config_file))
config = configparser.ConfigParser()
config.read(config_file)
assert profile in config, "'{}' section not found in '{}'".format(profile, config_file)
assert 'username' in config[profile] or 'username' in config['default'], "'username' not found in section '{}' or in the default section".format(profile)
assert 'password' in config[profile] or 'password' in config['default'], "'password' not found in section '{}' or in the default section".format(profile)
assert 'api_host' in config[profile] or 'api_host' in config['default'], "'api_host' not found in section '{}' or in the default section".format(profile)
assert 'api_port' in config[profile] or 'api_port' in config['default'], "'api_port' not found in section '{}' or in the default section".format(profile)
assert 'use_ssl' in config[profile] or 'use_ssl' in config['default'], "'use_ssl' not found in section '{}' or in the default section".format(profile)
assert 'verify_ssl' in config[profile] or 'verify_ssl' in config['default'], "'verify_ssl' not found in section '{}' or in the default section".format(profile)
assert 'warn_ssl' in config[profile] or 'warn_ssl' in config['default'], "'warn_ssl' not found in section '{}' or in the default section".format(profile)
def get_config_value(key, profile):
if key in config[profile]:
return config[profile][key]
else:
return config['default'][key]
username = str(get_config_value('username', profile))
password = str(get_config_value('password', profile))
api_host = str(get_config_value('api_host', profile))
api_port = int(get_config_value('api_port', profile))
use_ssl = str(get_config_value('use_ssl', profile))
verify_ssl = str(get_config_value('verify_ssl', profile))
warn_ssl = str(get_config_value('warn_ssl', profile))
if use_ssl == 'False':
use_ssl = False
else:
use_ssl = True
# verify_ssl could be a path
if verify_ssl == 'False':
verify_ssl = False
if warn_ssl == 'False':
warn_ssl = False
else:
warn_ssl = True
return cls(username, password, api_host, api_port, use_ssl, verify_ssl, warn_ssl)
@classmethod
def create_from_env(cls):
"""Create an instance of ContainerPlatformClient from environment variables:
Variables::
HPECP_USERNAME
HPECP_PASSWORD
HPECP_API_HOST
HPECP_API_PORT
HPECP_USE_SSL
HPECP_VERIFY_SSL
HPECP_warn_ssl
See ContainerPlatformClient :py:class:`constructor <ContainerPlatformClient>` for the paramaeter definitions.
"""
if 'HPECP_USERNAME' in os.environ:
HPECP_USERNAME = os.environ[HPECP_USERNAME]
if 'HPECP_PASSWORD' in os.environ:
HPECP_PASSWORD = os.environ[HPECP_PASSWORD]
if 'HPECP_API_HOST' in os.environ:
HPECP_API_HOST = os.environ[HPECP_API_HOST]
if 'HPECP_API_PORT' in os.environ:
HPECP_API_PORT = os.environ[HPECP_API_PORT]
if 'HPECP_USE_SSL' in os.environ:
HPECP_USE_SSL = os.environ[HPECP_USE_SSL]
if 'HPECP_VERIFY_SSL' in os.environ:
HPECP_VERIFY_SSL = os.environ[HPECP_VERIFY_SSL]
if 'HPECP_warn_ssl' in os.environ:
HPECP_warn_ssl = os.environ[HPECP_warn_ssl]
return cls(
username=HPECP_USERNAME,
password=<PASSWORD>,
api_host=HPECP_API_HOST,
api_port=HPECP_API_PORT,
use_ssl=HPECP_USE_SSL,
verify_ssl=HPECP_VERIFY_SSL,
warn_ssl=HPECP_warn_ssl
)
def __init__(self,
username = None,
password = <PASSWORD>,
api_host = None,
api_port = 8080,
use_ssl = True,
verify_ssl = True,
warn_ssl = False
):
"""Doc string is defined at the top of the class"""
self._log = Logger().get_logger(self.__class__.__name__)
# TODO add other fields, except password
self._log.debug("ContainerPlatformClient() created with username['{}']".format(username))
assert isinstance(username, basestring), "'username' parameter must be of type string"
assert isinstance(password, basestring), "'password' parameter must be of type string"
assert isinstance(api_host, basestring), "'api_host' parameter must be of type string"
assert isinstance(api_port, int), "'api_port' parameter must be of type int"
assert isinstance(use_ssl, bool), "'use_ssl' parameter must be of type bool"
assert isinstance(verify_ssl, bool) or \
(isinstance(verify_ssl, basestring) and
os.access(verify_ssl, os.R_OK)), "'verify_ssl' parameter must be of type bool or point to a file"
assert isinstance(warn_ssl, bool), "'warn_ssl' parameter must be of type bool"
self.username = username
self.password = password
self.api_host = api_host
self.api_port = api_port
self.use_ssl = use_ssl
self.verify_ssl = verify_ssl
self.warn_ssl = warn_ssl
if self.use_ssl:
scheme = 'https'
else:
scheme = 'http'
self.base_url = "{}://{}:{}".format(scheme, self.api_host, self.api_port)
# register endpoint modules - see @property definitions at end of file for each module
self._tenant = TenantController(self)
self._config = ConfigController(self)
self._gateway = GatewayController(self)
self._k8s_worker = K8sWorkerController(self)
self._k8s_cluster = K8sClusterController(self)
self._license = LicenseController(self)
self._lock = LockController(self)
def create_session(self):
"""Create a session with the HPE CP controller defined in the object :py:class:`ContainerPlatformClient`.
Returns:
ContainerPlatformClient:
An instance of ContainerPlatformClient is returned.
Raises:
APIException
for connection error to the HPE CP controller
requests.exceptions.RequestException
for exceptions that are not a connection error
"""
url = self.base_url + "/api/v1/login"
auth = { "name": self.username, "password": <PASSWORD> }
if self.warn_ssl is False:
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
response = None
try:
self.log.debug('REQ: {} : {} {}'.format('Login', 'post', url))
response = requests.post(url, json=auth, verify=self.verify_ssl)
response.raise_for_status()
except requests.exceptions.ConnectionError as e:
self.log.debug('RES: {} : {} {} {}'.format('Login', 'post', url, str(e)))
raise_from(APIException(
message='Could not connect to controller - set LOG_LEVEL=DEBUG to see more detail.',
request_method='post',
request_url=url
), None)
except requests.exceptions.RequestException as e:
if response is not None:
self.log.error('Auth Response: ' + response.text)
else:
self.log.error(e)
raise
self.session_headers = response.headers
self.session_id = response.headers['location']
return self
def _request_headers(self):
headers = {
'accept': 'application/json',
'X-BDS-SESSION': self.session_id,
'cache-control': 'no-cache',
'content-type': 'application/json'
}
return headers
def _request(self, url, http_method='get', data={}, description='', create_auth_headers=True, additional_headers={}):
if create_auth_headers:
headers = self._request_headers()
else:
headers = {}
all_headers = {}
all_headers.update(headers)
all_headers.update(additional_headers)
url = url = self.base_url + url
if self.warn_ssl is False:
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
try:
if http_method == 'get':
self.log.debug('REQ: {} : {} {}'.format(description, http_method, url))
response = requests.get(url, headers=all_headers, verify=self.verify_ssl)
elif http_method == 'put':
self.log.debug('REQ: {} : {} {} {}'.format(description, http_method, url, data))
response = requests.put(url, headers=all_headers, data=json.dumps(data), verify=self.verify_ssl)
elif http_method == 'post':
self.log.debug('REQ: {} : {} {} {}'.format(description, http_method, url, data))
response = requests.post(url, headers=all_headers, data=json.dumps(data), verify=self.verify_ssl)
elif http_method == 'delete':
self.log.debug('REQ: {} : {} {}'.format(description, http_method, url))
response = requests.delete(url, headers=all_headers, verify=self.verify_ssl)
response.raise_for_status()
except requests.exceptions.RequestException:
try:
response_info = response.json()
except:
response_info = response.text
else:
response_info = ''
if response.status_code == 404:
# This is expected for some method calls so do not log as an error
self.log.debug('{} : {} {} REQ: {}'.format(description, http_method, url, json.dumps(data)))
raise APIItemNotFoundException(message=response_info, request_method=http_method, request_url=url, request_data=json.dumps(data))
if response.status_code == 409:
# This is expected for some method calls so do not log as an error
self.log.debug('{} : {} {} REQ: {}'.format(description, http_method, url, json.dumps(data)))
raise APIItemConflictException(message=response_info, request_method=http_method, request_url=url, request_data=json.dumps(data))
else:
self.log.exception('{} : {} {} REQ: {}'.format(description, http_method, url, json.dumps(data)))
raise APIException(message=response_info, request_method=http_method, request_url=url, request_data=json.dumps(data))
try:
self.log.debug('RES: {} : {} {} : {} {}'.format(description, http_method, url, response.status_code, json.dumps(response.json())))
except ValueError:
self.log.debug('RES: {} : {} {} : {} {}'.format(description, http_method, url, response.status_code, response.text))
return response
@property
def tenant(self):
"""
This attribute is a reference to an object of type `.tenant.TenantController`.
See the class :py:class:`.tenant.TenantController` for the methods available.
Example::
client = ContainerPlatformClient(...)
client.create_session()
client.tenant.list()
This example calls the method :py:meth:`list() <.tenant.TenantController.list>` in :py:class:`.tenant.TenantController`.
"""
return self._tenant
@property
def config(self):
"""
This attribute is a reference to an object of type `.config.ConfigController`.
See the class :py:class:`.config.ConfigController` for the methods available.
Example::
client = ContainerPlatformClient(...)
client.create_session()
client.config.auth(
{
"external_identity_server": {
"bind_pwd":"<PASSWORD>@",
"user_attribute":"sAMAccountName",
"bind_type":"search_bind",
"bind_dn":"cn=Administrator,CN=Users,DC=samdom,DC=example,DC=com",
"host":"10.1.0.77",
"security_protocol":"ldaps",
"base_dn":"CN=Users,DC=samdom,DC=example,DC=com",
"verify_peer": False,
"type":"Active Directory",
"port":636
}
}
)
This example calls the method :py:meth:`auth() <.config.ConfigController.auth>` in :py:class:`.config.ConfigController`.
"""
return self._config
@property
def k8s_cluster(self):
"""
This attribute is a reference to an object of type `.k8s_cluster.K8sClusterController`.
See the class :py:class:`.k8s_cluster.K8sClusterController` for the methods available.
Example::
client = ContainerPlatformClient(...)
client.create_session()
client.k8s_cluster.list()
This example calls the method :py:meth:`list() <.k8s_cluster.K8sClusterController.list>` in :py:class:`.k8s_cluster.K8sClusterController`.
"""
return self._k8s_cluster
@property
def k8s_worker(self):
"""
This attribute is a reference to an object of type `.k8s_worker.K8sWorkerController`.
See the class :py:class:`.k8s_worker.K8sWorkerController` for the methods available.
Example::
client = ContainerPlatformClient(...)
client.create_session()
client.k8s_worker.list()
This example calls the method :py:meth:`list() <.k8s_worker.K8sWorkerController.list>` in :py:class:`.k8s_worker.K8sWorkerController`.
"""
return self._k8s_worker
@property
def gateway(self):
"""
This attribute is a reference to an object of type `.gateway.GatewayController`.
See the class :py:class:`.gateway.GatewayController` for the methods available.
Example::
client = ContainerPlatformClient(...)
client.create_session()
client.gateway.list()
This example calls the method :py:meth:`list() <.gateway.GatewayController.list>` in :py:class:`.gateway.GatewayController`.
"""
return self._gateway
@property
def license(self):
"""
This attribute is a reference to an object of type `.license.LicenseController`.
See the class :py:class:`.license.LicenseController` for the methods available.
Example::
client = ContainerPlatformClient(...)
client.create_session()
client.license.list()
This example calls the method :py:meth:`list() <.license.LicenseController.list>` in :py:class:`.license.LicenseController`.
"""
return self._license
@property
def lock(self):
"""
This attribute is a reference to an object of type `.lock.LockController`.
See the class :py:class:`.lock.LockController` for the methods available.
Example::
client = ContainerPlatformClient(...)
client.create_session()
client.lock.get()
This example calls the method :py:meth:`get() <.lock.LockController.list>` in :py:class:`.lock.LockController`.
"""
return self._lock
@property
def log(self):
"""
This attribute is a reference to :py:class:`.logger.Logger`. The log function can be called from controller objects
via the `client` parameter passed in during instantiation of the controller.
Example::
class K8sClusterController:
...
def __init__(self, client):
self.client = client
def some_method(self):
...
self.client.log.error("Some Error")
"""
return self._log
| 2 | 2 |
partsdb/tools/__init__.py | HaseloffLab/PartsDB | 2 | 12770970 | <filename>partsdb/tools/__init__.py
from cutGenes import prepareLibrary
from compRev import compRev
from getUtrCoordinates import getUtrCoordinates
from annotateBlastp import annotateBlastp
from saveSeqs import saveSequences | 1.109375 | 1 |
formate_black/__init__.py | python-formate/formate-black | 0 | 12770971 | #!/usr/bin/env python3
#
# __init__.py
"""
Use black with formate.
"""
#
# Copyright © 2021 <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
#
# stdlib
import itertools
from collections.abc import Mapping
from typing import Optional
# 3rd party
import black
from black import TargetVersion
from black.lines import Line
from domdf_python_tools.paths import PathPlus
from domdf_python_tools.typing import PathLike
from domdf_python_tools.words import TAB
from formate.config import wants_filename, wants_global_config
__author__: str = "<NAME>"
__copyright__: str = "2021 <NAME>"
__license__: str = "MIT License"
__version__: str = "0.1.0"
__email__: str = "<EMAIL>"
__all__ = ["black_hook"]
@wants_filename
@wants_global_config
def black_hook(
source: str,
formate_filename: PathLike,
formate_global_config: Optional[Mapping] = None,
**kwargs,
) -> str:
r"""
Call `black <https://pypi.org/project/black/>`_, using the given keyword arguments as its configuration.
:param source: The source to reformat.
:param formate_global_config: The global configuration dictionary. Optional.
:param \*\*kwargs:
:returns: The reformatted source.
"""
black_mode_cls = black.Mode
if "use_tabs" in kwargs:
if kwargs.pop("use_tabs"):
black_mode_cls = TabsMode
elif formate_global_config:
if formate_global_config.get("indent") == TAB:
black_mode_cls = TabsMode
if "line_length" not in kwargs and formate_global_config:
if "line_length" in (formate_global_config or {}):
kwargs["line_length"] = formate_global_config["line_length"]
kwargs["is_pyi"] = PathPlus(formate_filename).suffix == ".pyi"
if "target_versions" in kwargs:
kwargs["target_versions"] = {TargetVersion[val.upper()] for val in kwargs["target_versions"]}
else:
kwargs["target_versions"] = set()
if "target_version" in kwargs:
kwargs["target_versions"].add(TargetVersion[kwargs.pop("target_version").upper()])
return black.format_str(source, mode=black_mode_cls(**kwargs))
# The following adapted from black itself
# https://github.com/psf/black
# MIT Licensed
# Copyright (c) 2018 <NAME>
def line_str(self: Line) -> str:
"""
Render the line.
"""
if not self:
return '\n'
if getattr(self.mode, "use_tabs", False):
indent = '\t' * self.depth
else:
indent = " " * self.depth
leaves = iter(self.leaves)
first = next(leaves)
res = f"{first.prefix}{indent}{first.value}"
for leaf in leaves:
res += str(leaf)
for comment in itertools.chain.from_iterable(self.comments.values()):
res += str(comment)
return res + '\n'
Line.__str__ = line_str # type: ignore
class TabsMode(black.Mode):
use_tabs = True
| 2.046875 | 2 |
tryagain/try04.py | jpsura/pyapi | 0 | 12770972 | <filename>tryagain/try04.py
#!/usr/bin/env python3
import uuid
#simulate job id with uuid package
ticket = uuid.uuid1()
try:
print("please type the config file to load")
configfile = input("filename: ")
with open(configfile, 'r') as configfileobj:
switchconfig = configfileobj.read()
except:
x = "error with obtaining config file info"
else:
x = "File Found"
finally:
with open("try04.log", "a") as zlog:
print("\n\nWriting results of service to log file...")
print(ticket, " - ", x, file=zlog)
| 2.625 | 3 |
example/app.py | storborg/aiohttp_themes | 1 | 12770973 | <reponame>storborg/aiohttp_themes
import sys
import aiohttp
import aiohttp.web
import aiohttp_themes
from aiohttp_themes.theme import Theme
from aiohttp_themes.asset import SASSAsset, RequireJSAsset
@aiohttp_themes.template('index.html')
async def hello_view(request):
name = request.match_info.get('name', 'Anonymous')
return {'name': name}
class LightTheme(Theme):
key = 'light'
assets = {
'main.css': SASSAsset('scss/main.scss'),
'main.js': RequireJSAsset('js/main.js'),
}
def init(debug):
app = aiohttp.web.Application()
app.router.add_route('GET', '/{name}', hello_view)
app.router.add_route('GET', '/', hello_view)
aiohttp_themes.setup(app,
themes=[LightTheme],
debug=debug,
theme_strategy='light',
compiled_asset_dir='/tmp/compiled')
return app
def serve(debug):
app = init(debug)
aiohttp.web.run_app(app)
def compile():
app = init(debug=False)
aiohttp_themes.compile(app, compiled_asset_dir='/tmp/compiled')
if __name__ == '__main__':
if (len(sys.argv) > 1) and (sys.argv[1] == 'compile'):
print("Compiling...")
compile()
elif (len(sys.argv) > 1) and (sys.argv[1] == 'production'):
serve(debug=False)
else:
serve(debug=True)
| 2.125 | 2 |
juneberry/config/experiment_outline.py | sei-nmvanhoudnos/Juneberry | 0 | 12770974 | <filename>juneberry/config/experiment_outline.py
#! /usr/bin/env python3
# ==========================================================================================================================================================
# Copyright 2021 Carnegie Mellon University.
#
# NO WARRANTY. THIS CARNEGIE MELLON UNIVERSITY AND SOFTWARE ENGINEERING INSTITUTE MATERIAL IS FURNISHED ON AN "AS-IS"
# BASIS. CARNEGIE MELLON UNIVERSITY MAKES NO WARRANTIES OF ANY KIND, EITHER EXPRESSED OR IMPLIED, AS TO ANY MATTER
# INCLUDING, BUT NOT LIMITED TO, WARRANTY OF FITNESS FOR PURPOSE OR MERCHANTABILITY, EXCLUSIVITY, OR RESULTS OBTAINED
# FROM USE OF THE MATERIAL. CARNEGIE MELLON UNIVERSITY DOES NOT MAKE ANY WARRANTY OF ANY KIND WITH RESPECT TO FREEDOM
# FROM PATENT, TRADEMARK, OR COPYRIGHT INFRINGEMENT. Released under a BSD (SEI)-style license, please see license.txt
# or contact <EMAIL> for full terms.
#
# [DISTRIBUTION STATEMENT A] This material has been approved for public release and unlimited distribution. Please see
# Copyright notice for non-US Government use and distribution.
#
# This Software includes and/or makes use of the following Third-Party Software subject to its own license:
# 1. Pytorch (https://github.com/pytorch/pytorch/blob/master/LICENSE) Copyright 2016 facebook, inc..
# 2. NumPY (https://github.com/numpy/numpy/blob/master/LICENSE.txt) Copyright 2020 Numpy developers.
# 3. Matplotlib (https://matplotlib.org/3.1.1/users/license.html) Copyright 2013 Matplotlib Development Team.
# 4. pillow (https://github.com/python-pillow/Pillow/blob/master/LICENSE) Copyright 2020 <NAME> and contributors.
# 5. SKlearn (https://github.com/scikit-learn/sklearn-docbuilder/blob/master/LICENSE) Copyright 2013 scikit-learn
# developers.
# 6. torchsummary (https://github.com/TylerYep/torch-summary/blob/master/LICENSE) Copyright 2020 <NAME>.
# 7. adversarial robust toolbox (https://github.com/Trusted-AI/adversarial-robustness-toolbox/blob/main/LICENSE)
# Copyright 2018 the adversarial robustness toolbox authors.
# 8. pytest (https://docs.pytest.org/en/stable/license.html) Copyright 2020 <NAME> and others.
# 9. pylint (https://github.com/PyCQA/pylint/blob/master/COPYING) Copyright 1991 Free Software Foundation, Inc..
# 10. python (https://docs.python.org/3/license.html#psf-license) Copyright 2001 python software foundation.
#
# DM20-1149
#
# ==========================================================================================================================================================
import sys
import logging
import juneberry.filesystem as jbfs
FORMAT_VERSION = '1.0.0'
class ExperimentOutline:
def __init__(self, experiment_name, config):
self.valid = True
self.experiment_name = experiment_name
self.config = config.copy()
# Check for REQUIRED items
for param in ['baselineConfig', 'variables']:
if param not in config:
logging.error(f"Failed to find {param} in experiment outline!")
self.valid = False
# Pull out the values
self.baseline_config = config.get('baselineConfig', None)
self.variables = config.get('variables', None)
self.tests = config.get('tests', None)
self.reports = config.get('reports', None)
self.format_version = config.get('formatVersion', None)
# Check formatVersion
jbfs.version_check("EXPERIMENT OUTLINE", self.format_version, FORMAT_VERSION, True)
# Verify variables aren't constants
self.check_experiment_variables()
def analyze_experiment_variables(self):
"""
This method identifies the experiment variables and calculates the number of possible combinations.
:return: Nothing.
"""
logging.info(f"Identified {len(self.variables)} variables:")
combinations = 1
for variable in self.variables:
if type(variable['values']) is str:
count = 1
logging.info(f" {count:3d} random value for {variable['configField']}")
else:
count = len(variable['values'])
logging.info(f" {count:3d} possibilities for {variable['configField']}")
combinations *= count
logging.info(f"{combinations:5d} unique configurations in the outline file for '{self.experiment_name}'.")
def check_experiment_variables(self):
"""
This method verifies that each variable in the experiment has more than one possibility. If a variable
has only one possibility, then it should not be a variable.
:return: Nothing.
"""
for variable in self.variables:
if type(variable['values']) is list:
count = len(variable['values'])
if count < 2:
self.valid = False
logging.error(f"Insufficient possibilities for '{variable}' in '{self.experiment_name}'. "
f"this variable from the experiment outline or add more possibilities.")
sys.exit(-1)
| 1.320313 | 1 |
full-problems/minimizeStringVal.py | vikas-t/DS-Algo | 0 | 12770975 | <reponame>vikas-t/DS-Algo<gh_stars>0
#!/usr/bin/python3
# https://practice.geeksforgeeks.org/problems/minimize-string-value/0
import sys
def findMax(sc, n):
mx = -1*sys.maxsize
mxi = None
for i in range(n):
if sc[i] > mx:
mx = sc[i]
mxi = i
return mxi
def sol(s, k):
"""
Keep finding the number with max frequency and decrease the frequency by 1
Check the max heap solution as solved in gameWithStrings.py
"""
c = {}
for char in s:
c[char] = c[char] + 1 if char in c else 1
sc = sorted(list(c.values()))
x = -1
while k > 0 and sc[x] > 0:
k -= 1
sc[x] -= 1
x = findMax(sc, len(sc))
res = 0
for x in sc:
res = res + x**2
return res
print(sol("sjybldbefsarcbynecdyggxxpklorellnmpapqfwkhop", 13)) | 3.109375 | 3 |
lib/exabgp/bgp/message/message.py | bopopescu/exabgp_priv | 2 | 12770976 | <reponame>bopopescu/exabgp_priv
# encoding: utf-8
"""
update/__init__.py
Created by <NAME> on 2010-01-15.
Copyright (c) 2009-2015 Exa Networks. All rights reserved.
"""
from struct import pack
# ================================================================== BGP Message
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | |
# + +
# | |
# + +
# | Marker |
# + +
# | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Length | Type |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
class Message (Exception):
# we need to define TYPE inside __init__ of the subclasses
# otherwise we can not dynamically create different UnknownMessage
# TYPE = None
MARKER = chr(0xff)*16
HEADER_LEN = 19
MAX_LEN = 4096
registered_message = {}
# This is redefined by the Notify class, Exception is never used
klass_notify = Exception
klass_unknown = Exception
class CODE (int):
__slots__ = []
NOP = 0x00 # . 0 - internal
OPEN = 0x01 # . 1
UPDATE = 0x02 # . 2
NOTIFICATION = 0x03 # . 3
KEEPALIVE = 0x04 # . 4
ROUTE_REFRESH = 0x05 # . 5
OPERATIONAL = 0x06 # . 6 # Not IANA assigned yet
names = {
NOP: 'NOP',
OPEN: 'OPEN',
UPDATE: 'UPDATE',
NOTIFICATION: 'NOTIFICATION',
KEEPALIVE: 'KEEPALIVE',
ROUTE_REFRESH: 'ROUTE_REFRESH',
OPERATIONAL: 'OPERATIONAL',
}
def __str__ (self):
return self.names.get(self,'unknown message %s' % hex(self))
def __repr__ (self):
return str(self)
@staticmethod
def name (message_id):
return Message.CODE.names.get(message_id,'unknown message %s' % hex(message_id))
Length = {
CODE.OPEN: lambda _: _ >= 29, # noqa
CODE.UPDATE: lambda _: _ >= 23, # noqa
CODE.NOTIFICATION: lambda _: _ >= 21, # noqa
CODE.KEEPALIVE: lambda _: _ == 19, # noqa
CODE.ROUTE_REFRESH: lambda _: _ == 23, # noqa
}
def __init__ (self):
self._name = None
@staticmethod
def string (code):
if code is None:
return 'invalid'
if code == Message.CODE.OPEN:
return 'open'
if code == Message.CODE.UPDATE:
return 'update'
if code == Message.CODE.NOTIFICATION:
return 'notification'
if code == Message.CODE.KEEPALIVE:
return 'keepalive'
if code == Message.CODE.ROUTE_REFRESH:
return 'route refresh'
if code == Message.CODE.OPERATIONAL:
return 'operational'
return 'unknown'
def _message (self, message):
message_len = pack('!H',19+len(message))
return "%s%s%s%s" % (self.MARKER,message_len,self.TYPE,message)
def message (self):
raise NotImplementedError('message not implemented in subclasses')
@staticmethod
def register_message (klass, message=None):
what = klass.TYPE if message is None else message
if what in Message.registered_message:
raise RuntimeError('only one class can be registered per message')
Message.registered_message[ord(what)] = klass
@classmethod
def klass (cls, what):
if what in cls.registered_message:
return cls.registered_message[what]
raise cls.klass_notify(2,4,'can not handle message %s' % what)
@classmethod
def unpack (cls, message, data, negotiated):
if message in cls.registered_message:
return cls.klass(message).unpack_message(data,negotiated)
return cls.klass_unknown(message,data,negotiated)
| 2.03125 | 2 |
Exercicios/Extras/Arrow.py | RicardoMart922/estudo_Python | 0 | 12770977 | def seta(n):
for i in range(n):
if i == n - 1:
print((2 * n) * '*', end='')
print((i + 1) * '*')
else:
print((2 * n) * ' ', end='')
print((i + 1) * '*')
for j in range(n - 1, 0, -1):
print((2 * n) * ' ',end='')
print(j * '*')
seta(10)
| 3.765625 | 4 |
autokey/CapsCtrl/caps_g.py | TeX2e/dotfiles | 1 | 12770978 | keyboard.send_keys("<ctrl>+g")
| 1.140625 | 1 |
sandbox/src/htmlTabularParser.py | sniemi/SamPy | 5 | 12770979 | <reponame>sniemi/SamPy<filename>sandbox/src/htmlTabularParser.py
'''
Created on Sep 2, 2009
@author: niemi
'''
import BeautifulSoup
import urllib2
url = 'http://www.stsci.edu/institute/org/ins/cos_stis/projects/UserSupport/BOP/cos_spec_limits.html'
url2 = 'http://www.stsci.edu/institute/org/ins/cos_stis/projects/UserSupport/BOP/cos_ima_limits.html'
g = open('html_table.txt','w')
#change this
data = urllib2.urlopen(url2).read()
data = data[data.find('<!-- start PAGE CONTENT -->'): data.find('<!-- end PAGE CONTENT -->')]
soup = BeautifulSoup.BeautifulSoup(data)
t = soup.findAll('table')
for table in t:
g.write('\nNew Table:\n')
rows = table.findAll('tr')
for tr in rows:
cols = tr.findAll('td')
hdr = tr.findAll('th')
for th in hdr:
try:
g.write(th.find(text=True))
g.write(' ')
except: pass
for td in cols:
try:
g.write(td.find(text=True))
g.write(' ')
except: pass
g.write("\n")
g.close()
| 2.921875 | 3 |
leonardo/module/web/widgets/tables.py | timgates42/django-leonardo | 102 | 12770980 |
import floppyforms as forms
from django.forms.models import modelformset_factory
from django.utils.translation import ugettext_lazy as _
from horizon import tables
from horizon.tables.formset import FormsetDataTable, FormsetRow
from leonardo.module.web.models import WidgetDimension
class Slider(forms.RangeInput):
min = 1
max = 12
step = 1
template_name = 'floppyforms/slider.html'
class OffsetSlider(Slider):
min = 0
class HeightSlider(OffsetSlider):
max = 24
class WidgetDimensionForm(forms.ModelForm):
width = forms.CharField(widget=Slider(), initial=12)
height = forms.CharField(widget=HeightSlider(), initial=0)
offset = forms.CharField(widget=OffsetSlider(), initial=0)
def __init__(self, *args, **kw):
super(WidgetDimensionForm, self).__init__(*args, **kw)
self.fields['size'].initial = 'xs'
class Meta:
model = WidgetDimension
exclude = tuple()
WidgetDimensionFormset = modelformset_factory(
WidgetDimension, form=WidgetDimensionForm, can_delete=True, extra=1)
class CustomFormsetRow(FormsetRow):
def __init__(self, column, datum, form):
self.form = form
super(CustomFormsetRow, self).__init__(column, datum, form)
# add initial
if not datum and column.data:
try:
previous = column.data[0]
self.form.fields['widget_type'].initial = previous.widget_type
self.form.fields['widget_id'].initial = previous.widget_id
self.form.fields['id'].initial = previous.id + 1
except Exception:
pass
class WidgetDimensionTable(FormsetDataTable):
formset_class = WidgetDimensionFormset
def get_formset(self):
"""Provide the formset corresponding to this DataTable.
Use this to validate the formset and to get the submitted data back.
"""
if self.widget:
queryset = self.widget.dimensions
else:
queryset = WidgetDimension.objects.none()
if self._formset is None:
self._formset = self.formset_class(
self.request.POST or None,
initial=self._get_formset_data(),
prefix=self._meta.name,
queryset=queryset)
return self._formset
def __init__(self, *args, **kwargs):
self._meta.row_class = CustomFormsetRow
self.widget = kwargs.pop('widget', None)
super(WidgetDimensionTable, self).__init__(*args, **kwargs)
widget_id = tables.Column('widget_id', hidden=True)
widget_type = tables.Column('widget_type', hidden=True)
size = tables.Column('size', verbose_name=_('Size'))
width = tables.Column('width', verbose_name=('Width'))
height = tables.Column('height', verbose_name=_('Height'))
offset = tables.Column('offset', verbose_name=_('Offset'))
name = 'dimensions'
class Meta:
name = 'dimensions'
table_name = 'Dimensions'
| 2.171875 | 2 |
swcpm/main.py | Davis-Software/SWC_packagemanager | 1 | 12770981 | from swcpm import click, swc_pm
from .run import run_command
from .info import info_command
from .wget import wget_command
from .install import install_command
from .update import update_command
from .remove import remove_command
########################################################################################################################
@swc_pm.command("debug", short_help="Debugs the application.")
def debug():
"""Debugs the application."""
cmd_list = [run_command, info_command, wget_command, install_command, update_command, remove_command]
for cmd in cmd_list:
click.echo("Found command module: " + cmd.__name__)
if __name__ == '__main__':
swc_pm()
| 2.1875 | 2 |
SplunkforPaloAltoNetworks/bin/panUserUpdate.py | moshekaplan/Splunk-Apps | 34 | 12770982 | # Copyright (c) 2015, Palo Alto Networks
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
# Author: <NAME> <<EMAIL>>
"""Update users on the firewall from logs in Splunk
About this script
-----------------
User-ID is a mechanism in the firewall that maps users to IP addresses.
These User to IP mappings can be updated using many methods including from
Active Directory or by sending syslogs to a firewall from a Radius or other
authentication server.
Many organizations send authentication logs to Splunk, so it is natural for
Splunk to communicate these authentication events to the firewalls so their
User to IP mappings are always up-to-date.
There are two methods to synchronize authentication events from Splunk to the firewall:
Method 1: Forward logs from Splunk to the User-ID firewall.
Method 2: Use this script to update the firewall using its API.
Method 1 is preferred because it is more efficient. However, Method 2 is
useful in cases where the user and the IP are not in the same logs. Splunk
can correlate the user to the IP before passing the mapping to the firewall
via API.
This script supports connection to a firewall or to Panorama.
"""
#########################################################
# Do NOT modify anything below this line unless you are
# certain of the ramifications of the changes
#########################################################
import sys # for system params and sys.exit()
import os
libpath = os.path.dirname(os.path.abspath(__file__))
sys.path[:0] = [os.path.join(libpath, 'lib')]
import common
import environment
logger = common.logging.getLogger().getChild('panUserUpdate')
try:
if environment.run_by_splunk():
import splunk.Intersplunk # so you can interact with Splunk
import splunk.entity as entity # for splunk config info
libpath = os.path.dirname(os.path.abspath(__file__))
sys.path[:0] = [os.path.join(libpath, 'lib')]
sys.path[:0] = [os.path.join(libpath, 'lib', 'pan-python', 'lib')]
sys.path[:0] = [os.path.join(libpath, 'lib', 'pandevice')]
import pandevice
from pandevice.panorama import Panorama
from pandevice.firewall import Firewall
import pan.xapi
from common import log
except Exception as e:
# Handle exception to produce logs to python.log
common.exit_with_error(e)
def main_splunk():
# Get arguments
args, kwargs = splunk.Intersplunk.getKeywordsAndOptions()
# Enable debugging by passing 'debug=yes' as an argument of
# the command on the Splunk searchbar.
debug = common.check_debug(kwargs)
# kwargs contains important parameters.
# parameters from splunk searchbar include:
# action
# device
# panorama
# serial
# vsys
# user_field
# ip_field
# timeout
# debug
# Verify required args were passed to command
log(debug, "Determining if required arguments are present")
if 'device' not in kwargs and 'panorama' not in kwargs:
common.exit_with_error("Missing required command argument: device or panorama", 3)
if 'panorama' in kwargs and 'serial' not in kwargs:
common.exit_with_error("Found 'panorama' arguments, but missing 'serial' argument", 3)
# Assign defaults to fields that aren't specified
action = kwargs['action'] if 'action' in kwargs else "login"
vsys = kwargs['vsys'] if 'vsys' in kwargs else "vsys1"
ip_field = kwargs['ip_field'] if 'ip_field' in kwargs else "src_ip"
user_field = kwargs['user_field'] if 'user_field' in kwargs else "user"
timeout = kwargs['timeout'] if 'timeout' in kwargs else None
# Determine if device hostname or serial was provided as argument or should be pulled from entries
log(debug, "Determining how firewalls should be contacted based on arguments")
use_panorama = False
hostname = None
serial = None
if "device" in kwargs:
hostname = kwargs['device']
elif "panorama" in kwargs:
use_panorama = True
hostname = kwargs['panorama']
serial = kwargs['serial']
else:
common.exit_with_error("Missing required command argument: device or panorama", 3)
log(debug, "Use Panorama: %s" % use_panorama)
log(debug, "VSys: %s" % vsys)
log(debug, "Hostname: %s" % hostname)
if use_panorama and serial is not None:
log(debug, "Device Serial: %s" % serial)
# Results contains the data from the search results and settings
# contains the sessionKey that we can use to talk to Splunk
results, unused1, settings = splunk.Intersplunk.getOrganizedResults()
# Get the sessionKey
sessionKey = settings['sessionKey']
log(debug, "Begin get API key")
# Get the API key from the Splunk store or from the device at hostname if no apikey is stored
apikey = common.apikey(sessionKey, hostname, debug)
# Create the connection to the firewall or Panorama
if use_panorama:
# For Panorama, create the Panorama object, and the firewall object
panorama = Panorama(hostname, api_key=apikey)
firewall = Firewall(serial=serial, vsys=vsys)
panorama.add(firewall)
firewall.userid.batch_start()
else:
# No Panorama, so just create the firewall object
firewall = Firewall(hostname, api_key=apikey, vsys=vsys)
firewall.userid.batch_start()
# Collect all the ip addresses and users into firewall batch requests
for result in results:
## Find the user (if a user_field was specified)
try:
this_user = result[user_field]
except KeyError as e:
result['status'] = "ERROR: Unable to determine user from field: %s" % user_field
continue
## Find the IP
try:
this_ip = result[ip_field]
except KeyError as e:
result['status'] = "ERROR: Unable to determine ip from field: %s" % ip_field
## Create a request in the batch user-id update for the firewall
## No API call to the firewall happens until all batch requests are created.
if action == "login":
log(debug, "Login event on firewall %s: %s - %s" % (firewall, this_ip, this_user))
firewall.userid.login(this_user, this_ip, timeout=timeout)
else:
log(debug, "Logout event on firewall %s: %s - %s" % (firewall, this_ip, this_user))
firewall.userid.logout(this_user, this_ip)
result['status'] = "Submitted successfully"
## Make the API calls to the User-ID API of each firewall
try:
firewall.userid.batch_end()
except pan.xapi.PanXapiError as e:
common.exit_with_error(str(e))
except Exception as e:
common.exit_with_error(str(e))
# output results
splunk.Intersplunk.outputResults(results)
def main_cli():
raise NotImplementedError
if __name__ == "__main__":
if environment.run_by_splunk():
try:
main_splunk()
except Exception as e:
common.exit_with_error(e)
else:
main_cli()
| 1.789063 | 2 |
tests/samples/config_copy.py | ron-huberfeld/lms | 85 | 12770983 | # flake8: noqa
import os
WTF_CSRF_ENABLED = False # On production, delete this line!
SECRET_KEY = ''
SERVER_ADDRESS = os.getenv('SERVER_ADDRESS', '127.0.0.1:80')
FEATURE_FLAG_CHECK_IDENTICAL_CODE_ON = os.getenv(
'FEATURE_FLAG_CHECK_IDENTICAL_CODE_ON', False,
)
USERS_CSV = 'users.csv'
# Babel config
LANGUAGES = {
'en': 'English',
'he': 'Hebrew',
}
| 1.382813 | 1 |
vivarium_cell/composites/injected_glc_phosphorylation.py | vivarium-collective/vivarium-cell | 7 | 12770984 | """
================================================
Toy Injected Glucose Phosphorylation Compartment
================================================
This is a toy example referenced in the documentation.
"""
from vivarium.core.experiment import Experiment
from vivarium.core.process import Composite
from vivarium.library.pretty import format_dict
from vivarium_cell.processes.glucose_phosphorylation import GlucosePhosphorylation
from vivarium_cell.processes.injector import Injector
class InjectedGlcPhosphorylation(Composite):
defaults = {
'glucose_phosphorylation': {
'k_cat': 1e-2,
},
'injector': {
'substrate_rate_map': {
'GLC': 1e-4,
'ATP': 1e-3,
},
},
}
def __init__(self, config):
super(InjectedGlcPhosphorylation, self).__init__(config)
def generate_processes(self, config):
injector = Injector(self.config['injector'])
glucose_phosphorylation = GlucosePhosphorylation(
self.config['glucose_phosphorylation'])
return {
'injector': injector,
'glucose_phosphorylation': glucose_phosphorylation,
}
def generate_topology(self, config):
return {
'injector': {
'internal': ('cell', ),
},
'glucose_phosphorylation': {
'cytoplasm': ('cell', ),
'nucleoside_phosphates': ('cell', ),
'global': ('global', ),
},
}
| 2.421875 | 2 |
ppo_robot.py | nakamotoo/lifelong_rl | 0 | 12770985 | <reponame>nakamotoo/lifelong_rl<filename>ppo_robot.py
from experiment_utils.launch_experiment import launch_experiment
from experiment_configs.configs.ppo_her.ppo_her_config import get_config
# from experiment_configs.configs.pg.ppo_config import get_config
from experiment_configs.algorithms.batch import get_algorithm
import os
num_epochs = 4
horizon = int(2000)
policy_layer_size = 512
policy_num_layer = 2
layer_division = 2
use_desired_goal = False
# initial_distance, z_distance, sparse, goal_distance
reward_type = "z_distance"
reward_scale = 3
# PickAndPlace, PartialPickAndPlace
# Push, PartialPush
# Slide, PartialSlide
ENV_NAME = 'PickAndPlace'
experiment_kwargs = dict(
exp_name='ppo-oracle-{}-p{}-num{}-div{}-{}-o{}-3e-5'.format(ENV_NAME, str(policy_layer_size), str(policy_num_layer), str(layer_division), str(reward_type), str(reward_scale)),
num_seeds=1,
instance_type='c4.4xlarge',
use_gpu=True,
)
if __name__ == "__main__":
os.environ["CUDA_VISIBLE_DEVICES"]='0'
variant = dict(
algorithm='PPO',
# collector_type='batch',
collector_type='lstm_memory',
env_name=ENV_NAME,
env_kwargs=dict(
terminates=False,
use_desired_goal = use_desired_goal,
reward_type = reward_type
),
replay_buffer_size=horizon,
policy_kwargs=dict(
layer_size=policy_layer_size,
layer_num = policy_num_layer,
layer_division = layer_division
),
trainer_kwargs = dict(
reward_bounds=(-50, 50),
reward_scale=reward_scale, # increasing reward scale helps learning signal
),
value_kwargs=dict(
layer_size=policy_layer_size,
),
policy_trainer_kwargs=dict(
discount=0.99,
gae_lambda=0.97,
ppo_epsilon=0.1,
policy_lr=3e-5,
value_lr=3e-5,
target_kl=0.01,
num_epochs=num_epochs,
policy_batch_size=200,
value_batch_size=200,
normalize_advantages=True,
),
algorithm_kwargs=dict(
num_epochs=10000,
num_eval_steps_per_epoch=1000,
num_trains_per_train_loop=1,
num_expl_steps_per_train_loop=horizon,
min_num_steps_before_training=0,
max_path_length=100,
save_snapshot_freq=50,
),
)
sweep_values = {
}
launch_experiment(
get_config=get_config,
get_algorithm=get_algorithm,
variant=variant,
sweep_values=sweep_values,
**experiment_kwargs
)
| 1.921875 | 2 |
api-prototype/database/run_dev.py | LandRegistry/land-charges-discovery | 2 | 12770986 | <filename>api-prototype/database/run_dev.py
from service.server import app
import os
app.run( host="0.0.0.0", debug=True )
| 1.21875 | 1 |
Python/main.py | minjibyeongho/KOSA-Pytorch | 2 | 12770987 | #https://docs.python.org/ko/3/library/__main__.html
#main.py
#from module import *
import module
if __name__ == "__main__":
print(__name__)
#hello()
module.hello() | 2.171875 | 2 |
1/sonarsweep.py | ejpo/AoC-2021 | 0 | 12770988 | import sys
import os
def readDepths(filePath):
with open(filePath) as f:
depths = f.readlines()
return depths
#Process Individual depths
def processDepthReadings(depthReadings):
previousDepth = -1
depthIncreases = 0
for depth in depthReadings:
depth = int(depth)
if previousDepth == -1 :
previousDepth = depth
elif previousDepth < depth :
previousDepth = depth
depthIncreases += 1
else:
previousDepth = depth
print(depthIncreases)
#Process Sum of Readings
def processDepthReadingsSum(depthReadings):
previousSum = -1
sumIncreases = 0
for i in range(len(depthReadings)):
if i < len(depthReadings) - 2:
sum = int(depthReadings[i]) + int(depthReadings[i+1]) + int(depthReadings[i+2])
if sum > previousSum and previousSum != -1:
sumIncreases += 1
previousSum = sum
else:
break
print(sumIncreases)
def main() -> int:
#Check Args
if len(sys.argv) <= 1:
print("Please specify the path for the file of depths and optionally set the second argument to 1 to process a moving sum of the next 3 depth readings")
return 0
elif not os.path.isfile(sys.argv[1]):
print ("File path provided is not a file or does not exist")
return 1
else:
depthReadings = readDepths(sys.argv[1])
#Process Depth Readings
if len(sys.argv) > 2 and int(sys.argv[2]) == 1:
print("hi")
processDepthReadingsSum(depthReadings)
else:
processDepthReadings(depthReadings)
#Return OK to Sys.Exit
return 0
if __name__ == '__main__':
sys.exit(main()) | 3.8125 | 4 |
cache.py | Omar-Saleh/TomasuloProcessor | 1 | 12770989 | import math
class Cache(object):
"""docstring for cache"""
def __init__(self, size, length, associativity, cycle_time, writing_policy,parent ):
#super(cache, self).__init__()
#self.arg = arg
self.index = int(math.log(size / (length * associativity),2))
self.offset = int(math.log(length,2))
self.tag = 16 - (self.index + self.offset)
self.num_of_sets = int( size / (length * associativity))
self.set_size = associativity
self.writing_policy = writing_policy
#self.hit_cycle_time = hit_cycle_time
#self.miss_cycle_time = miss_cycle_time
self.cycle_time = cycle_time
self.entries = []
self.hits = 0
self.misses = 0
for i in range(self.num_of_sets):
self.entries.append({})
self.child = None
self.parent = parent
if(parent != None):
parent.child = self
# print(self.num_of_sets)
def hit_ratio(self):
return (self.hits / (self.hits + self.misses))*100
def __repr__(self):
return "Index Bits: %s Offset Bits: %s Tag Bits: %s" % (self.index , self.offset , self.tag)
#testing
#a = Cache(2048,8,4,4,"wb",None)
#b = cache(4,4,4,4,"ahmed",a)
#print(b.parent)
#
# print(a)
# mask = 0
# for i in range(1 , a.index + 1):
# mask |= (1 << (16 - (a.tag + i)))
# print(bin(mask)) | 3.21875 | 3 |
ABC/abc051-abc100/abc059/a.py | KATO-Hiro/AtCoder | 2 | 12770990 | <gh_stars>1-10
'''input
atcoder beginner contest
ABC
'''
# -*- coding: utf-8 -*-
# AtCoder Beginner Contest
# Problem A
if __name__ == '__main__':
a, b, c = input().split()
result = a[0] + b[0] + c[0]
print(result.upper())
| 3.109375 | 3 |
setup.py | XDeschuyteneer/live | 0 | 12770991 | # -*- coding: utf-8 -*-
# Learn more: https://github.com/kennethreitz/setup.py
from setuptools import setup, find_packages
with open('README.md') as f:
readme = f.read()
with open('LICENSE') as f:
license = f.read()
setup(
name='livedata-subscribetags',
version='0.1.0',
description='Sample script to subscribe to changes of tag\'s value ',
long_description=readme,
author='HMS Industrial Netwoks S.A.',
author_email='<EMAIL>',
url='https://developer.ewon.biz/content/apiv2',
license=license,
packages=find_packages(exclude=('tests', 'docs')),
scripts=['APIv2/example.py'],
entry_points={
'console_scripts': [
'livedata-subscribetags=APIv2.example:launch',
],
},
install_requires=[
'stomp.py',
'websocket-client',
],
dependency_links=['git+https://github.com/gschizas/websocket-client.git@patch-1#egg=websocket-client-0'],
)
| 1.335938 | 1 |
check_fr24feed.py | anbucher/check_fr24feed | 0 | 12770992 | <gh_stars>0
#! /usr/bin/env python3
# -*- coding: utf-8; py-indent-offset: 4 -*-
#
# Author: <NAME>
# Contact: icinga (at) buchermail (dot) de
#
# License: The Unlicense, see LICENSE file.
# https://github.com/anbucher/check_fr24feed.git
"""Have a look at the check's README for further details.
"""
import argparse
from difflib import diff_bytes
import sys
import json
import datetime
import requests
from requests.structures import CaseInsensitiveDict
from traceback import format_exc
__author__ = '<NAME>'
__version__ = '2022031701'
DESCRIPTION = """This plugin lets you track if a fr24feeder is connected"""
# Sample URL: https://{feeder_ip}:8754/monitor.json
DEFAULT_PORT = '8754'
DEFAULT_WARN = 600 # seconds
DEFAULT_CRIT = 3600 # seconds
## Define states
# STATE_OK = 0: The plugin was able to check the service and it appeared
# to be functioning properly.
# STATE_WARN = 1: The plugin was able to check the service, but it
# appeared to be above some "warning" threshold or did not appear to be
# working properly.
# STATE_CRIT = 2: The plugin detected that either the service was not
# running or it was above some "critical" threshold.
# STATE_UNKNOWN = 3: Invalid command line arguments were supplied to the
# plugin or low-level failures internal to the plugin (such as unable to
# fork, or open a tcp socket) that prevent it from performing the
# specified operation. Higher-level errors (such as name resolution
# errors, socket timeouts, etc) are outside of the control of plugins and
# should generally NOT be reported as UNKNOWN states.
# Author of state definition
# __author__ = 'Linuxfabrik GmbH, Zurich/Switzerland'
# __version__ = '2020043001'
STATE_OK = 0
STATE_WARN = 1
STATE_CRIT = 2
STATE_UNKNOWN = 3
#STATE_DEPENDENT = 4
########### common functions ###########
# useful functions - Copyright by https://git.linuxfabrik.ch/linuxfabrik/lib/-/blob/master/base3.py
def get_perfdata(label, value, uom, warn, crit, min, max):
"""Returns 'label'=value[UOM];[warn];[crit];[min];[max]
"""
msg = "'{}'={}".format(label, value)
if uom is not None:
msg += uom
msg += ';'
if warn is not None:
msg += str(warn)
msg += ';'
if crit is not None:
msg += str(crit)
msg += ';'
if min is not None:
msg += str(min)
msg += ';'
if max is not None:
msg += str(max)
msg += ' '
return msg
def oao(msg, state=STATE_OK, perfdata='', always_ok=False):
"""Over and Out (OaO)
Print the stripped plugin message. If perfdata is given, attach it
by `|` and print it stripped. Exit with `state`, or with STATE_OK (0) if
`always_ok` is set to `True`.
"""
if perfdata:
print(msg.strip() + '|' + perfdata.strip())
else:
print(msg.strip())
if always_ok:
sys.exit(0)
sys.exit(state)
def coe(result, state=STATE_UNKNOWN):
"""Continue or Exit (CoE)
This is useful if calling complex library functions in your checks
`main()` function. Don't use this in functions.
If a more complex library function, for example `lib.url3.fetch()` fails, it
returns `(False, 'the reason why I failed')`, otherwise `(True,
'this is my result'). This forces you to do some error handling.
To keep things simple, use `result = lib.base3.coe(lib.url.fetch(...))`.
If `fetch()` fails, your plugin will exit with STATE_UNKNOWN (default) and
print the original error message. Otherwise your script just goes on.
The use case in `main()` - without `coe`:
>>> success, html = lib.url3.fetch(URL)
>>> if not success:
>>> print(html) # contains the error message here
>>>> exit(STATE_UNKNOWN)
Or simply:
>>> html = lib.base3.coe(lib.url.fetch(URL))
Parameters
----------
result : tuple
The result from a function call.
result[0] = expects the function return code (True on success)
result[1] = expects the function result (could be of any type)
state : int
If result[0] is False, exit with this state.
Default: 3 (which is STATE_UNKNOWN)
Returns
-------
any type
The result of the inner function call (result[1]).
"""
if result[0]:
# success
return result[1]
print(result[1])
sys.exit(state)
########### specific check functions ###########
def parse_args():
"""Parse command line arguments using argparse.
"""
parser = argparse.ArgumentParser(description=DESCRIPTION)
parser.add_argument(
'-V', '--version',
action='version',
version='%(prog)s: v{} by {}'.format(__version__, __author__)
)
parser.add_argument(
'--always-ok',
help='Always returns OK.',
dest='ALWAYS_OK',
action='store_true',
default=False,
)
parser.add_argument(
'--host',
help='Host IP address of your feeder.',
dest='HOST_IP',
required=True,
default=False,
)
parser.add_argument(
'--port',
help='Monitor Port of your feeder. Default: %(default)s',
dest='HOST_PORT',
default=DEFAULT_PORT,
)
parser.add_argument(
'-c', '--critical',
help='Set the critical threshold seconds since last connection update. Default: %(default)s',
dest='CRIT',
type=int,
default=DEFAULT_CRIT,
)
parser.add_argument(
'-w', '--warning',
help='Set the warning threshold seconds since last connection update. Default: %(default)s',
dest='WARN',
type=int,
default=DEFAULT_WARN,
)
return parser.parse_args()
def run_monitor_check(path):
"""Check FR24 feeder.
"""
headers = CaseInsensitiveDict()
headers["Accept"] = "application/json"
# Get data from monitor.json
try:
j = requests.get(path, headers=headers)
json_str = j.json()
except Exception as ex:
template = "An exception of type {0} occurred. Arguments:\n{1!r}"
msg = template.format(type(ex).__name__, ex.args)
return(False, msg)
# FAKE request
# f = open("sample_data/monitor.json")
# json_str = json.load(f)
try:
return (True, json_str)
except:
return(False, 'ValueError: No JSON object could be decoded')
def get_sec_last_status(data):
"""Read out seconds since last status update.
"""
# Get current datetime
now = datetime.datetime.utcnow()
# Check date difference
try:
### timeFormat: 2022-03-17 12:39:31
datetimeunix = int(data['feed_last_ac_sent_time'])
# lastsentTime = datetime.datetime.strptime(datetimestring, '%Y-%m-%d %H:%M:%S')
lastsentTime = datetime.datetime.utcfromtimestamp(datetimeunix)
# calculate time difference
diffInSecs = (abs(now - lastsentTime ).days * 24 * 60 * 60) + abs(now - lastsentTime ).seconds
return (True, diffInSecs)
except:
return (False, 'ValueError: Last Status could not be parsed')
def get_metrics(data):
try:
metrics = {
'adsb_tracked': data['feed_num_ac_adsb_tracked'],
'non_adsb_tracked': data['feed_num_ac_non_adsb_tracked'],
'sum_tracked': data['feed_num_ac_tracked']
}
return (True, metrics)
except:
return (False, 'ValueError: Metrics could not be parsed')
def get_status(data):
try:
status = {
'feed_status': data['feed_status'] ,
'last_rx_connect_status': data['last_rx_connect_status'],
'feed_last_connected_time': datetime.datetime.utcfromtimestamp(int(data['feed_last_connected_time'])).strftime("%Y-%m-%d %H:%M:%S")
}
return (True, status)
except:
return (False, 'ValueError: Status could not be parsed')
def main():
"""The main function. Hier spielt die Musik.
"""
# parse the command line, exit with UNKNOWN if it fails
try:
args = parse_args()
except SystemExit:
sys.exit(STATE_UNKNOWN)
# init output vars
msg = ''
state = STATE_OK
perfdata = ''
# Build url
path = 'http://' + args.HOST_IP + ':' + args.HOST_PORT + '/monitor.json'
response = coe(run_monitor_check(path))
diffSecs = coe(get_sec_last_status(response))
metrics = coe(get_metrics(response))
status = coe(get_status(response))
# # Add metrics to perfdata
perfdata += get_perfdata('adsb_tracked', metrics['adsb_tracked'], None, None, None, 0, None)
perfdata += get_perfdata('non_adsb_tracked', metrics['non_adsb_tracked'], None, None, None, 0, None)
perfdata += get_perfdata('sum_tracked', metrics['sum_tracked'], None, None, None, 0, None)
# check warn and crit thresholds
try:
if diffSecs > args.CRIT:
msg += 'CRIT threshold reached: ' + str(diffSecs)
state = STATE_CRIT
else:
if diffSecs > args.WARN:
msg += 'WARN threshold reached: ' + str(diffSecs)
state = STATE_WARN
else:
msg = 'Feeder: OK - ' + str(diffSecs) + 's since last upload'
msg += '\nStatus: {}'.format(status['feed_status'] + ' since ' + status['feed_last_connected_time']
)
state = STATE_OK
except Exception as ex:
template = "An exception of type {0} occurred. Arguments:\n{1!r}"
msg = template.format(type(ex).__name__, ex.args)
state = STATE_UNKNOWN
oao(msg, state, perfdata)
if __name__ == '__main__':
try:
main()
except Exception: # pylint: disable=W0703
"""See you (cu)
Prints a Stacktrace (replacing "<" and ">" to be printable in Web-GUIs), and exits with
STATE_UNKNOWN.
"""
print(format_exc().replace("<", "'").replace(">", "'"))
sys.exit(STATE_UNKNOWN)
| 1.875 | 2 |
projects/judge_map/generate_json.py | freelawproject/cleanup-scripts | 0 | 12770993 | import pprint
template = {
"type": "FeatureCollection",
"features": [
{
"type": "Feature",
"geometry": {
"type": "Point",
"coordinates": [-111.6782379150,39.32373809814] # Lat then Long
}
},
{
"type": "Feature",
"geometry": {
"type": "Point",
"coordinates": [-74.00714111328,40.71455001831]
}
}]}
feat = {
"type": "Feature",
"geometry": {
"type": "Point",
"coordinates": [-111.6782379150,39.32373809814] # Lat then Long
}
}
with open("locations_with_long_lat.txt", "r") as f: #technically lat than long
locations = f.readlines()
spots = []
for location in locations:
feat = {
"type": "Feature",
"geometry": {
"type": "Point",
"coordinates": [-111.6782379150, 39.32373809814] # Lat then Long
}
}
try:
row = location.split("\t")
# # print(row)
# feat["geometry"]['coordinates'] = [row[-1].strip(), row[-2].strip()]
# # int(row[-1].strip())
float(row[-1].strip())
# place = feat["geometry"]['coordinates']
# # print(feat)
# spots.append(feat)
# # print(feat)
# template['features'] = spots
spots.append({"longitude": row[-1].strip(), "latitude": row[-2].strip()})
except Exception as e:
# print(str(e))
pass
pprint.pprint(spots)
# with open("")
| 2.859375 | 3 |
desafios/desafio#07.py | thiagocanabarro/PythonProjects | 0 | 12770994 | <filename>desafios/desafio#07.py
#Desenvolva um programa que leia as duas notas de um aluno. Calcule e mostre a sua média
n1 = int(input("Digite a nota de seu primeiro teste: "))
n2 = int(input("Digite a nota de seu segundo teste: "))
me = ((n1+n2)/2)
print ("A média aritmética de suas notas é:",me)
| 3.859375 | 4 |
serializers/protobuf_serializer/structure_pb2.py | whiteRa2bit/serializers | 0 | 12770995 | <reponame>whiteRa2bit/serializers
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: structure.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='structure.proto',
package='protoblog',
syntax='proto3',
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x0fstructure.proto\x12\tprotoblog\"\xbb\x01\n\x06Struct\x12\x11\n\tPackageID\x18\x01 \x01(\x05\x12\x10\n\x08PersonID\x18\x02 \x01(\x05\x12\x0c\n\x04Name\x18\x03 \x01(\t\x12\x33\n\tInventory\x18\x04 \x03(\x0b\x32 .protoblog.Struct.InventoryEntry\x12\x17\n\x0f\x43urrentLocation\x18\x05 \x01(\t\x1a\x30\n\x0eInventoryEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\x62\x06proto3'
)
_STRUCT_INVENTORYENTRY = _descriptor.Descriptor(
name='InventoryEntry',
full_name='protoblog.Struct.InventoryEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='protoblog.Struct.InventoryEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='value', full_name='protoblog.Struct.InventoryEntry.value', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'8\001',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=170,
serialized_end=218,
)
_STRUCT = _descriptor.Descriptor(
name='Struct',
full_name='protoblog.Struct',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='PackageID', full_name='protoblog.Struct.PackageID', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='PersonID', full_name='protoblog.Struct.PersonID', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='Name', full_name='protoblog.Struct.Name', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='Inventory', full_name='protoblog.Struct.Inventory', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='CurrentLocation', full_name='protoblog.Struct.CurrentLocation', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[_STRUCT_INVENTORYENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=31,
serialized_end=218,
)
_STRUCT_INVENTORYENTRY.containing_type = _STRUCT
_STRUCT.fields_by_name['Inventory'].message_type = _STRUCT_INVENTORYENTRY
DESCRIPTOR.message_types_by_name['Struct'] = _STRUCT
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Struct = _reflection.GeneratedProtocolMessageType('Struct', (_message.Message,), {
'InventoryEntry' : _reflection.GeneratedProtocolMessageType('InventoryEntry', (_message.Message,), {
'DESCRIPTOR' : _STRUCT_INVENTORYENTRY,
'__module__' : 'structure_pb2'
# @@protoc_insertion_point(class_scope:protoblog.Struct.InventoryEntry)
})
,
'DESCRIPTOR' : _STRUCT,
'__module__' : 'structure_pb2'
# @@protoc_insertion_point(class_scope:protoblog.Struct)
})
_sym_db.RegisterMessage(Struct)
_sym_db.RegisterMessage(Struct.InventoryEntry)
_STRUCT_INVENTORYENTRY._options = None
# @@protoc_insertion_point(module_scope)
| 1.304688 | 1 |
static/data/heatmap/edit.py | noahbkim/finances | 1 | 12770996 | <reponame>noahbkim/finances
import json
import os
| 0.902344 | 1 |
flow/envs/bay_bridge/__init__.py | pcmoritz/flow | 16 | 12770997 | from flow.envs.bay_bridge.base import BayBridgeEnv
__all__ = ["BayBridgeEnv"]
| 1.070313 | 1 |
tests/test_models/test_fields.py | fossabot/postmodel | 15 | 12770998 | <reponame>fossabot/postmodel
import pytest
from datetime import datetime, date, timedelta
import json
import uuid
from tests.testmodels import (
IntFieldsModel,
DataVersionFieldsModel,
DatetimeFieldsModel,
DateFieldsModel,
TimeDeltaFieldsModel,
JSONFieldsModel,
UUIDFieldsModel
)
from postmodel.models.fields import (
CharField,
DecimalField,
DatetimeField,
UUIDField
)
from postmodel.exceptions import ConfigurationError
def test_int_field():
model = IntFieldsModel(id=1, intnum=10)
assert model.intnum_null == None
fields_map = model._meta.fields_map
intnum_field = fields_map['intnum']
assert intnum_field.required == True
assert intnum_field.to_db_value(None) == None
assert intnum_field.to_db_value(23) == 23
assert intnum_field.to_db_value('33') == 33
assert intnum_field.to_python_value(None) == None
assert intnum_field.to_python_value(344) == 344
assert intnum_field.to_python_value('876') == 876
assert fields_map['intnum_null'].required == False
def test_wrong_config():
with pytest.raises(ConfigurationError):
CharField(max_length = 0)
with pytest.raises(ConfigurationError):
DecimalField(max_digits = 0, decimal_places=10)
with pytest.raises(ConfigurationError):
DecimalField(max_digits = 128, decimal_places = -1)
with pytest.raises(ConfigurationError):
DatetimeField(auto_now=True, auto_now_add=True)
def test_dataversion_field():
model = DataVersionFieldsModel(id=1)
assert model.data_ver == 0
fields_map = model._meta.fields_map
data_ver = fields_map['data_ver']
data_ver.auto_value(model)
assert model.data_ver == 1
def test_datatime_field():
model = DatetimeFieldsModel(id=1, datetime=datetime.utcnow())
fields_map = model._meta.fields_map
field = fields_map['datetime']
fields_map['datetime_null'].auto_value(model)
assert model.datetime_null == None
assert field.to_python_value(None) == None
dt = datetime.now()
assert field.to_python_value(dt) == dt
assert field.to_python_value('2020-01-06 12:12:12') == datetime(2020, 1, 6, 12, 12, 12)
assert model.datetime_auto == None
assert model.datetime_add == None
dt = datetime.utcnow()
fields_map['datetime_auto'].auto_value(model)
fields_map['datetime_add'].auto_value(model)
assert model.datetime_auto > dt
assert model.datetime_add > dt
def test_date_field():
model = DateFieldsModel(id=1, date=date.today())
fields_map = model._meta.fields_map
field = fields_map['date']
assert field.to_python_value(None) == None
dt = date.today()
assert field.to_python_value(dt) == dt
assert field.to_python_value('2020-01-06') == date(2020, 1, 6)
def test_timedelta_field():
model = TimeDeltaFieldsModel(id=1, timedelta=1000000)
fields_map = model._meta.fields_map
field = fields_map['timedelta']
assert field.to_python_value(None) == None
assert field.to_python_value(timedelta(20, 0, 0)) == timedelta(20, 0, 0)
assert field.to_python_value(1000) == timedelta(microseconds=1000)
assert field.to_db_value(None) == None
assert field.to_db_value(timedelta(days=1)) == 86400*1000000
def test_json_field():
model = JSONFieldsModel(id=1, data={'fookey': 'hello', 'key2': 124})
fields_map = model._meta.fields_map
field = fields_map['data']
assert field.to_python_value(None) == None
assert field.to_python_value(['a', 'b']) == ['a', 'b']
assert field.to_python_value('["abc", 123, "cde"]') == ["abc", 123, "cde"]
assert field.to_db_value(None) == None
assert json.loads(field.to_db_value({'fookey': 'world', 'key2': 223})) == {'fookey': 'world', 'key2': 223}
def test_uuid_field():
f = UUIDField(pk=True)
assert f.default == uuid.uuid4
model = UUIDFieldsModel(data='123e4567-e89b-12d3-a456-426655440000')
fields_map = model._meta.fields_map
field = fields_map['data']
v = uuid.uuid4()
assert field.to_python_value(None) == None
assert field.to_python_value(v) == uuid.UUID(str(v))
assert field.to_python_value(str(v)) == v
assert field.to_db_value(None) == None
assert field.to_db_value(v) == str(v)
| 2.28125 | 2 |
balancer/balancer.py | jawilk/balancer-exchange-python | 6 | 12770999 | <gh_stars>1-10
import json
from web3 import Web3
from utils import load_abi
def initialize_tokens(tokens):
all_tokens = []
for token in tokens:
all_tokens.append(Token(
contract_address = Web3.toChecksumAddress(token['address']),
balance = token['balance'],
decimals = token['decimals'],
symbol = token['symbol'],
denorm_weight = token['denormWeight'],
))
return all_tokens
class Token:
''''
tokens': [{'address': '0xa3bed4e1c75d00fa6f4e5e6922db7261b5e9acd2', 'balance': '566201.286846114414239124', 'decimals': 18, 'denormWeight': '8', 'id': '0x003a70265a3662342010823bea15dc84c6f7ed54-0xa3bed4e1c75d00fa6f4e5e6922db7261b5e9acd2', 'symbol': 'MTA'}, {'address': '0xe2f2a5c287993345a840db3b0845fbc70f5935a5', 'balance': '783270.361777465361029266', 'decimals': 18, 'denormWeight': '2', 'id': '0x003a70265a3662342010823bea15dc84c6f7ed54-0xe2f2a5c287993345a840db3b0845fbc70f5935a5', 'symbol': 'mUSD'}]
'''
def __init__(
self,
contract_address,
balance,
decimals,
symbol,
denorm_weight,
):
self.contract_address = contract_address
self.balance = balance
self.decimals = decimals
self.symbol = symbol
self.denorm_weight = denorm_weight
class Pool:
'''
https://docs.balancer.finance/smart-contracts/api
{'pools': [{'finalized': True, 'id': '0x003a70265a3662342010823bea15dc84c6f7ed54', 'publicSwap': True, 'swapFee': '0.001', 'tokens': [{'address': '0xa3bed4e1c75d00fa6f4e5e6922db7261b5e9acd2', 'balance': '566201.286846114414239124', 'decimals': 18, 'denormWeight': '8', 'id': '0x003a70265a3662342010823bea15dc84c6f7ed54-0xa3bed4e1c75d00fa6f4e5e6922db7261b5e9acd2', 'symbol': 'MTA'}, {'address': '0xe2f2a5c287993345a840db3b0845fbc70f5935a5', 'balance': '783270.361777465361029266', 'decimals': 18, 'denormWeight': '2', 'id': '0x003a70265a3662342010823bea15dc84c6f7ed54-0xe2f2a5c287993345a840db3b0845fbc70f5935a5', 'symbol': 'mUSD'}], 'tokensList': ['0xe2f2a5c287993345a840db3b0845fbc70f5935a5', '0xa3bed4e1c75d00fa6f4e5e6922db7261b5e9acd2'], 'totalWeight': '10'}]}
'''
ABI_PATH = 'abi/BPool.abi'
def __init__(
self,
w3,
contract_address,
finalized=None,
public_swap=None,
swap_fee=None,
total_weight=None,
tokens_list=None,
tokens=None,
):
self.contract_address = contract_address
self.contract_abi = load_abi(self.ABI_PATH)
self.contract = w3.eth.contract(
address=self.contract_address,
abi=self.contract_abi,
)
# Pool properties
self.properties = {
'isFinalized': finalized,
'isPublicSwap': public_swap,
'getSwapFee': swap_fee,
'getTotalDenormalizedWeight': total_weight,
'tokens_list': tokens_list,
'getFinalTokens': initialize_tokens(tokens) if tokens else None,
}
def _set_value(self, prop, *argv):
'''Fetch static information only once on demand'''
if argv:
self.properties[prop] = self.properties[prop] if self.properties.get(prop) else self.contract.get_function_by_name(prop)(*argv).call()
return self.properties[prop]
self.properties[prop] = self.properties[prop] if self.properties.get(prop) else self.contract.get_function_by_name(prop)().call()
return self.properties[prop]
def bone(self):
return self._set_value('BONE')
def bpow_precision(self):
return self._set_value('BPOW_PRECISION')
def exit_fee(self):
return self._set_value('EXIT_FEE')
def init_pool_supply(self):
return self._set_value('INIT_POOL_SUPPLY')
def max_bound_tokens(self):
return self._set_value('MAX_BOUND_TOKENS')
def max_bpow_base(self):
return self._set_value('MAX_BPOW_BASE')
def max_fee(self):
return self._set_value('MAX_FEE')
def max_in_ratio(self):
return self._set_value('MAX_IN_RATIO')
def max_out_ratio(self):
return self._set_value('MAX_OUT_RATIO')
def max_total_weight(self):
return self._set_value('MAX_TOTAL_WEIGHT')
def max_weight(self):
return self._set_value('MAX_WEIGHT')
def min_balance(self):
return self._set_value('MIN_BAlANCE')
def min_bound_tokens(self):
return self._set_value('MIN_BOUND_TOKENS')
def min_bpow_base(self):
return self._set_value('MIN_BPOW_BASE')
def min_fee(self):
return self._set_value('MIN_FEE')
def min_weight(self):
return self._set_value('MIN_WEIGHT')
def allowance(self, src_address, dst_address):
return self._set_value('ALLOWANCE', src_address, dst_address)
def balance_of(self, address):
return self._set_value('BALANCE_OF', address)
def decimals(self):
return self._set_value('decimals')
def color(self):
return self._set_value('getColor')
def controller(self):
return self._set_value('getController')
def final_tokens(self):
return self._set_value('getFinalTokens')
def swap_fee(self):
return self._set_value('getSwapFee')
def total_denormalized_weight(self):
return self._set_value('getTotalDenormalizedWeight')
def is_finalized(self):
return self._set_value('isFinalized')
def is_public_swap(self):
return self._set_value('isPublicSwap')
def name(self):
return self._set_value('name')
def symbol(self):
return self._set_value('symbol')
def total_supply(self):
return self._set_value('totalSupply')
def calc_in_given_out(self, *argv):
'''argv:
tokenBalanceIn
tokenWeightIn
tokenBalanceOut
tokenWeightOut
tokenAmountOut
swapFee
'''
return self.contract.functions.calcInGivenOut(*argv).call()
def calc_out_given_in(self, *argv):
'''argv:
tokenBalanceIn
tokenWeightIn
tokenBalanceOut
tokenWeightOut
tokenAmountIn
swapFee
'''
return self.contract.functions.calcOutGivenIn(*argv).call()
def calc_pool_in_given_single_out(self, *argv):
'''argv:
tokenBalanceOut
tokenWeightOut
poolSupply
totalWeight
tokenAmountOut
swapFee
'''
return self.contract.functions.calcPoolInGivenSingleOut(*argv).call()
def calc_pool_out_given_single_in(self, *argv):
'''argv:
tokenBalanceIn
tokenWeightIn
poolSupply
totalWeight
tokenAmountIn
swapFee
'''
return self.contract.functions.calcPoolOutGivenSingleIn(*argv).call()
def calc_single_in_given_pool_out(self, *argv):
'''argv:
tokenBalanceIn
tokenWeightIn
poolSupply
totalWeight
tokenAmountOut
swapFee
'''
return self.contract.functions.calcSingleInGivenPoolOut(*argv).call()
def calc_single_out_given_pool_in(self, *argv):
'''argv:
tokenBalanceOut
tokenWeightOut
poolSupply
totalWeight
poolAmountIn
swapFee
'''
return self.contract.functions.calcPoolOutGivenSingleIn(*argv).call()
def cal_spot_price(self, *argv):
'''argv:
tokenBalanceIn
tokenWeightIn
tokenBalanceOut
tokenWeightOut
swapFee
'''
return self.contract.functions.calcSpotPrice(*argv).call()
def get_balance(self, address):
return self.contract.functions.getBalance(address).call()
def get_denormalized_weight(self, token_address):
return self.contract.functions.getDenormalizedWeight(token_address).call()
def get_normalized_weight(self, token_address):
return self.contract.functions.getNormalizedWeight(token_address).call() #/ 10**16
def get_num_tokens(self):
return self.contract.functions.getNumTokens().call()
def get_spot_price(self, token_in_address, token_out_address):
return self.contract.functions.getSpotPrice(token_in_address, token_out_address).call()
def get_spot_price_sans_fee(self, token_in_address, token_out_address):
return self.contract.functions.getSpotPriceSansFee(token_in_address, token_out_address).call()
def is_bound(self, token_address):
return self.contract.functions.isBound(token_address).call()
| 2.3125 | 2 |
qt__pyqt__pyside__pyqode/pyqt5__QComboBox.py | DazEB2/SimplePyScripts | 117 | 12771000 | <reponame>DazEB2/SimplePyScripts<filename>qt__pyqt__pyside__pyqode/pyqt5__QComboBox.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
from PyQt5.Qt import *
class MainWindow(QWidget):
def __init__(self):
super().__init__()
self.tb_result = QTextBrowser()
self.cb_pets = QComboBox()
self.cb_pets.currentIndexChanged.connect(self._on_pet_changed)
self.cb_pets.addItem('Собаки', userData='dogs')
self.cb_pets.addItem('Коты', userData='cats')
layout = QVBoxLayout()
layout.addWidget(self.cb_pets)
layout.addWidget(self.tb_result)
self.setLayout(layout)
def _on_pet_changed(self, index):
# print(index) # 0
# print(self.cb_pets.itemText(index)) # Собаки
# print(self.cb_pets.itemData(index)) # dogs
# print()
# print(self.cb_pets.currentIndex()) # 0
# print(self.cb_pets.currentText()) # Собаки
# print(self.cb_pets.currentData()) # dogs
data = self.cb_pets.itemData(index)
if data == 'cats':
text = "Вы любите кошек"
elif data == 'dogs':
text = "Вы любите собак"
else:
text = ''
self.tb_result.setHtml(text)
if __name__ == '__main__':
app = QApplication([])
mw = MainWindow()
mw.show()
app.exec()
| 2.359375 | 2 |
python/SerialPortUI.py | ptracton/wb_platform | 0 | 12771001 | #! /usr/bin/env python3
"""
UI class for Serial Port hardware. This will have an instantiation of a
Serial port.
"""
#
# The GUI libraries since we build some GUI components here
#
import PyQt5
import PyQt5.QtCore
import PyQt5.QtWidgets
import SerialPort
class SerialPortUI(PyQt5.QtCore.QObject):
connectButtonSignal = PyQt5.QtCore.pyqtSignal()
def __init__(self, parent=None, name="Serial Port", port="/dev/ttyUSB0",
baud_rate="115200", bits=8, parity=None, stop_bits=1):
super(SerialPortUI, self).__init__()
#
# Serial Port
#
self.serial_port = SerialPort.SerialPort(port, baud_rate,
bits, parity,
stop_bits)
#
# GUI components
#
self.SerialPortName = PyQt5.QtWidgets.QLabel(name)
self.SerialPortComboBox = PyQt5.QtWidgets.QComboBox()
self.SerialPortComboBox.addItems(self.serial_port.get_list_of_ports())
baud_rate_list = ["115200", "57600", "38400", "9600"]
self.BaudRateSelected = baud_rate_list[0]
self.BaudRateComboBox = PyQt5.QtWidgets.QComboBox()
self.BaudRateComboBox.addItems(baud_rate_list)
self.SerialPortLayout = PyQt5.QtWidgets.QHBoxLayout()
self.SerialConnectButton = PyQt5.QtWidgets.QPushButton("Connect")
self.SerialDisConnectButton = PyQt5.QtWidgets.QPushButton("Disconnect")
self.SerialPortLayout.addWidget(self.SerialPortName)
self.SerialPortLayout.addWidget(PyQt5.QtWidgets.QLabel("Select Port"))
self.SerialPortLayout.addWidget(self.SerialPortComboBox)
self.SerialPortLayout.addWidget(
PyQt5.QtWidgets.QLabel("Select Baud Rate"))
self.SerialPortLayout.addWidget(self.BaudRateComboBox)
self.SerialPortLayout.addWidget(self.SerialConnectButton)
self.SerialPortLayout.addWidget(self.SerialDisConnectButton)
#
# Serial port configs based on GUI selection (defaults)
#
# self.serial_port.setBaudrate(self.BaudRateSelected)
# self.serial_port.setPort("/dev/ttyUSB0")
self.SerialConnectButton.clicked.connect(self.connectClicked)
self.SerialDisConnectButton.clicked.connect(self.disconnectClicked)
pass
def getLayout(self):
"""
Return our layout for easy GUI integration
"""
return self.SerialPortLayout
def connectClicked(self):
print("Connect Clicked")
print("BaudRate {}".format(self.BaudRateComboBox.currentText()))
print("Port {}".format(self.SerialPortComboBox.currentText()))
if self.serial_port.is_open:
self.serial_port.close()
self.serial_port.setPort(self.SerialPortComboBox.currentText())
self.serial_port.baudrate = self.BaudRateComboBox.currentText()
try:
self.serial_port.open()
except:
print("FAILED TO OPEN PORT {}".format(
self.SerialPortComboBox.currentText()))
if self.serial_port.is_open:
self.SerialPortComboBox.setEnabled(False)
self.BaudRateComboBox.setEnabled(False)
self.connectButtonSignal.emit()
pass
def disconnectClicked(self):
print("Disconnect Clicked")
self.serial_port.close()
self.SerialPortComboBox.setEnabled(True)
self.BaudRateComboBox.setEnabled(True)
pass
if __name__ == "__main__":
import sys
class TestUI(PyQt5.QtWidgets.QDialog):
def __init__(self, parent=None):
super(TestUI, self).__init__(parent)
layOut = PyQt5.QtWidgets.QHBoxLayout()
self.serial_port_ui = SerialPortUI()
layOut.addLayout(self.serial_port_ui.getLayout())
self.setLayout(layOut)
pass
app = PyQt5.QtWidgets.QApplication(sys.argv)
GUI = TestUI()
GUI.show()
app.exec_()
| 3.03125 | 3 |
rw_calibration/rw_calibration.py | mrossi93/rw_calibration | 0 | 12771002 | """Main module."""
import itertools as it
import numpy as np
def read_data(filepath, sep=" "):
"""This function reads file containing Points Coordinates
Arguments:
filepath (str) -- Path to the file to be read
Keyword Arguments:
sep (str) -- Separator for columns in file (default: " ")
Returns:
(list) -- List of points read from input file, in the format [x,y,z]
"""
with open(filepath, "r") as file:
raw_lines = file.readlines()
points = []
for raw_line in raw_lines:
coordinates = raw_line.split(sep)[1:4]
for i in range(3):
coordinates[i] = float(coordinates[i])
points.append(coordinates)
return points
def rototranslation(points):
"""This function generates a rototranslator starting from three
non-collinear points
Arguments:
points (numpy.array) -- Three non-collinear points in a 3x3
numpy array [x,y,z]
Returns:
[numpy.array] -- Rototranslation matrix (4x4 numpy array)
"""
origin = points[0, :]
x = points[1, :] - points[0, :]
x_versor = np.divide(x, np.linalg.norm(x))
y_1 = points[1, :] - points[0, :]
y_2 = points[2, :] - points[0, :]
y = np.cross(y_1, y_2)
y_versor = np.divide(y, np.linalg.norm(y))
z_versor = np.cross(x_versor, y_versor)
rototranslator = np.array(
[
np.append(x_versor, 0.0),
np.append(y_versor, 0.0),
np.append(z_versor, 0.0),
np.append(origin, 1.0),
]
).T
return rototranslator
def calibrate(points_G, points_R):
"""This function performs the actual Robot to World Calibration.
It computes every possibile combination between three non-collinear points,
computes the correspoding rototranslator and then average the mean
rototranslator. Everything is expressed in mm.
Arguments:
points_G (numpy.array) -- Points in World Coordinates
points_R (numpy.array) -- Points in Robot Coordinates
Raises:
Exception: Number of points in Robot and World Coordinates
file is not correspoding.
Returns:
[dict] -- Dictionary containing the computed rototranslator
and some informations about the error (mean and standard
deviation).
"""
# Remove offset from data
if len(points_G) != len(points_R):
raise Exception(
"""
Number of points must match in robot and world files!
Found {} points in World file and {} points in Robot file""".format(
len(points_G), len(points_R)
)
)
num_points = len(points_G)
offset_G_x = -80 # coherence
offset_G_y = 0 # No offset on y axis
offset_G_z = 250 # coherence
offset_G = [offset_G_x, offset_G_y, offset_G_z]
offset_R_x = 80 - 80 # from TCP to SMR + coherence
offset_R_y = 0 # No offset on y axis
offset_R_z = (
20 + 25 + 250
) # pointer basement along z + SMR along z + coherence
offset_R = [offset_R_x, offset_R_y, offset_R_z]
# Remove offset
points_G = np.array(points_G)
points_R = np.array(points_R)
points_G[:, :] = points_G[:, :] - offset_G
points_R[:, :] = points_R[:, :] - offset_R
# Generate creation dataset and control dataset
creation_perc = 0.3
num_creation_points = round(num_points * creation_perc)
num_star_points = round(num_points * (1 - creation_perc))
# At least three points are needed in creation set
if num_creation_points <= 2:
num_creation_points = 3
num_star_points = num_points - num_creation_points
if num_creation_points + num_star_points != num_points:
num_star_points = num_star_points - 1
index_creation = np.round(
np.linspace(0, num_points - 1, num_creation_points)
)
index_creation = [int(i) for i in index_creation]
index_star = [i for i in range(num_points) if i not in index_creation]
points_G_creation = points_G[index_creation, :]
points_R_creation = points_R[index_creation, :]
points_star_G_real = points_G[index_star, :]
points_star_R = points_R[index_star, :]
# Mean Rototranslation Method
index_perm = list(
it.permutations(range(num_creation_points), 3)
) # permutations without ripetitions
creation_perm_G = np.zeros([len(index_perm), 9])
creation_perm_R = np.zeros([len(index_perm), 9])
for i in range(len(index_perm)):
creation_perm_G[i, :3] = points_G_creation[index_perm[i][0], :3]
creation_perm_G[i, 3:6] = points_G_creation[index_perm[i][1], :3]
creation_perm_G[i, 6:] = points_G_creation[index_perm[i][2], :3]
creation_perm_R[i, :3] = points_R_creation[index_perm[i][0], :3]
creation_perm_R[i, 3:6] = points_R_creation[index_perm[i][1], :3]
creation_perm_R[i, 6:] = points_R_creation[index_perm[i][2], :3]
LG_T = np.zeros([4, 4, len(index_perm)])
LR_T = np.zeros([4, 4, len(index_perm)])
RL_T = np.zeros([4, 4, len(index_perm)])
RG_T_temp = np.zeros([4, 4, len(index_perm)])
# for each permutation, generate the rototranslator
for i in range(len(index_perm)):
points_G_current = np.array(
[
creation_perm_G[i, :3],
creation_perm_G[i, 3:6],
creation_perm_G[i, 6:],
]
)
points_R_current = np.array(
[
creation_perm_R[i, :3],
creation_perm_R[i, 3:6],
creation_perm_R[i, 6:],
]
)
LG_T[:, :, i] = rototranslation(points_G_current)
LR_T[:, :, i] = rototranslation(points_R_current)
RL_T[:, :, i] = np.linalg.inv(LR_T[:, :, i])
RG_T_temp[:, :, i] = np.matmul(LG_T[:, :, i], RL_T[:, :, i])
RG_T = np.mean(RG_T_temp, axis=2) # Mean rototranslator
# Comparison between the three methods
points_star_R = np.append(
points_star_R, np.ones([len(points_star_R), 1]), axis=1
) # homogeneous
points_star_G_real = np.append(
points_star_G_real, np.ones([len(points_star_G_real), 1]), axis=1
) # homogeneous
# estimation starting from T and robot data
points_star_G_estimated = np.matmul(RG_T, points_star_R.T).T
# comparison between real and estimated
error = abs(points_star_G_real - points_star_G_estimated)
error_mean = np.mean(error, axis=0)[:3]
error_std_dev = np.std(error, axis=0)[:3]
results = {
"Rototranslator": RG_T,
"Error Mean": error_mean,
"Error Std Dev": error_std_dev,
}
return results
| 4.0625 | 4 |
intro_exercise.py | kefa4520/ds_week1 | 0 | 12771003 | # Lines starting with # are comments and are not run by Python.
"""
Multi-line comments are possible with triple quotes like this.
"""
# import pandas and matplotlib
# Load the pandas library as pd
import pandas as pd
# Load the matplotlib library as plt
import matplotlib.pyplot as plt
# load the numpy library as np (called "aliasing")
import numpy as np
# This data comes from the UCI ML repository:
# https://archive.ics.uci.edu/ml/datasets/Bike+Sharing+Dataset
# It is the daily number of users from a bike share program
df = pd.read_csv('day.csv')
# shows a preview of the data
df.head()
# shows some basic stats of the data
df.describe()
# Use the examples in the jupyter notebook to help you here.
# calculate the mean and standard deviation of the hourly data counts (the 'cnt' column)
# mean
df['cnt'].mean()
# standard deviation
df['cnt'].std()
# plot the counts ('cnt' column)
df['cnt'].plot()
# Carry out other EDA and analysis if you wish for more practice.
# You can also carry this out in a Jupyter Notebook. | 2.875 | 3 |
ardublocklyserver/uploader.py | bijij/ardublockly | 0 | 12771004 | <filename>ardublocklyserver/uploader.py
import subprocess
import os
class uploader:
# constants
uploaderName = "avrdude.exe"
compilerName = "arduino-builder.exe"
#def __init__(self):
def LoadSketchWithDefaults(self, sketchName, builderConfigFilePath):
self.sketchName = sketchName
self.builderConfigFilePath = builderConfigFilePath
def LoadSketch(self, sketchName, hardwarePath, tools, hardwareTools, libraryPath, outputDirectory, qualifiedArduinoName = "arduino:avr:uno"):
output = subprocess.check_output([os.path.join("buildingTools",self.compilerName),
"-hardware", os.path.join("buildingTools",hardwarePath),
"-tools", os.path.join("buildingTools",tools),
"-tools", os.path.join("buildingTools",hardwareTools),
"-libraries", os.path.join("buildingTools",libraryPath),
"-fqbn", qualifiedArduinoName,
"-build-path", outputDirectory,
sketchName
])
return(output)
def UploadSketch(self, hexName, arduinoArchitecture, arduinoPort, uploaderConfigFilePath = "avrdude.conf", verbosity = 0):
hexProcessing = "flash:w:" + hexName + ":i"
path = os.path.join("buildingTools", self.uploaderName)
for i in range(verbosity):
path += " -v"
output = subprocess.check_output([path,
"-p", arduinoArchitecture,
"-c", "arduino",
"-C", os.path.join("buildingTools",uploaderConfigFilePath),
"-P", arduinoPort,
"-U", hexProcessing
])
return(output)
| 2.4375 | 2 |
hw4/mv4.py | iaxat/MMIS3500 | 0 | 12771005 | file = open("AAPL.txt", "r")
lines = file.readlines()
prices = []
for line in lines:
price = float(line)
prices.append(price)
print(price)
print(prices) | 3.40625 | 3 |
file handling/Count the Number of Blank Spaces in a Text File.py | ZephyrAveryl777/Python-Programs | 6 | 12771006 | <filename>file handling/Count the Number of Blank Spaces in a Text File.py
'''
Problem Description:
--------------------
The program reads a file and counts
the number of blank spaces in a text file.
'''
print(__doc__)
print('-'*25)
fileName=input('Enter file name: ')
k=0
with open(fileName,'r')as f:
for line in f:
words=line.split()
for i in words:
for letter in i:
if letter.isspace:
k+=1
print('Occurance of blank space in text file \'{%s}\' is %d times'%(fileName,k)) | 3.953125 | 4 |
src/data/dicom_conversion.py | DonDzundza/hecktor | 0 | 12771007 | <gh_stars>0
import warnings
from os.path import join
from datetime import time, datetime
import numpy as np
from skimage.draw import polygon
import SimpleITK as sitk
import pydicom as pdcm
from pydicom.tag import Tag
def convert_dicom_to_nifty(input_filepaths,
output_folder,
modality='CT',
sitk_writer=None,
rtstruct_file=None,
labels_rtstruct=['GTVt'],
patient_weight_from_ct=None,
extension='.nii',
dtype_image=np.float32,
dtype_mask=np.uint8):
"""Function to convert the dicom files contained in input_filepaths to one
NIFTI image.
Args:
input_filepaths (list): list of the dicom paths
output_folder (str): path to the output folder where to store the
NIFTI file.
modality (str, optional): The modality of the DICOM, it is used to
obtain the correct physical values
(Hounsfield unit for the CT and SUV for the
PT). Defaults to 'CT'.
sitk_writer (sitk.WriteImage(), optional): The SimpleITK object used
to write an array to the
NIFTI format. Defaults to
None.
rtstruct_file (str, optional): Path to the RTSTRUCT file associated
with the current image. Defaults to
None.
labels_rtstruct (list, optional): List of label to extract from the
RTSTRUCT. Defaults to ['GTVt'].
patient_weight_from_ct (float, optional): If the patient's weight is
missing from the PT DICOM
it can be provided through
this argument. Defaults to
None.
extension (str, optional): The extension in which to save the NIFTI.
Defaults to '.nii'.
dtype_image (numpy.dtype, optional): The dtype in which to save the
image. Defaults to np.float32.
dtype_mask (numpy.dtype, optional): The dtype in which to save the
segmentation. Defaults to np.uint8.
Raises:
MissingWeightException: Error to alert when the weight is missing from
the PT, to compute the SUV.
RuntimeError: Error to alert when one or more slices are missing
ValueError: Raised when a modality or a unit (for the PT) is not
handled.
Returns:
numpy.array: The numpy image, used to compute the bounding boxes
"""
slices = [pdcm.read_file(dcm) for dcm in input_filepaths]
slices.sort(key=lambda x: float(x.ImagePositionPatient[2]))
if modality == 'PT':
if slices[0].PatientWeight is None:
if hasattr(slices[0], 'PatientsWeight'):
patient_weight = float(slices[0].PatientsWeight)
elif patient_weight_from_ct is not None:
patient_weight = patient_weight_from_ct
else:
raise MissingWeightException(
'Cannot compute SUV the weight is missing')
else:
patient_weight = float(slices[0].PatientWeight)
# Check if all the slices come from the same serie
same_serie_uid = True
serie_uid = slices[0].SeriesInstanceUID
for s in slices:
same_serie_uid *= serie_uid == s.SeriesInstanceUID
if not same_serie_uid:
raise RuntimeError('A slice comes from another serie')
axial_positions = np.asarray([k.ImagePositionPatient[2] for k in slices])
# Compute redundant slice positions
ind2rm = [
ind for ind in range(len(axial_positions))
if axial_positions[ind] == axial_positions[ind - 1]
]
# Check if there is redundancy in slice positions and remove them
if len(ind2rm) > 0:
slices = [k for i, k in enumerate(slices) if i not in ind2rm]
axial_positions = np.asarray(
[k.ImagePositionPatient[2] for k in slices])
slice_spacing = (slices[1].ImagePositionPatient[2] -
slices[0].ImagePositionPatient[2])
pixel_spacing = np.asarray([
slices[0].PixelSpacing[0],
slices[0].PixelSpacing[1],
slice_spacing,
])
if modality == 'CT':
np_image = get_physical_values_ct(slices, dtype=dtype_image)
elif modality == 'PT':
np_image = get_physical_values_pt(slices,
patient_weight,
dtype=dtype_image)
else:
raise ValueError('The modality {} is not supported'.format(modality))
position_final_slice = (
len(slices) - 1) * slice_spacing + slices[0].ImagePositionPatient[2]
# Test whether some slices are missing
if not is_approx_equal(position_final_slice,
float(slices[-1].ImagePositionPatient[2])):
if (position_final_slice - axial_positions[-1]) / slice_spacing < 1.5:
# If only one slice is missing
diff = np.asarray([
not is_approx_equal(
float(axial_positions[ind]) -
float(axial_positions[ind - 1]) - slice_spacing, 0)
for ind in range(1, len(axial_positions))
])
ind2interp = int(np.where(diff)[0])
new_slice = (np_image[:, :, ind2interp] +
np_image[:, :, ind2interp + 1]) * 0.5
new_slice = new_slice[..., np.newaxis]
np_image = np.concatenate(
(np_image[..., :ind2interp], new_slice, np_image[...,
ind2interp:]),
axis=2)
warnings.warn(
"One slice is missing, we replaced it by linear interpolation")
else:
# if more than one slice are missing
raise RuntimeError('Multiple slices are missing')
image_position_patient = [float(k) for k in slices[0].ImagePositionPatient]
sitk_image = get_sitk_volume_from_np(np_image, pixel_spacing,
image_position_patient)
output_filepath = join(
output_folder,
correct_patient_name(str(slices[0].PatientName)) + '_' +
modality.lower() + extension)
sitk_writer.SetFileName(output_filepath)
sitk_writer.Execute(sitk_image)
if rtstruct_file is not None:
masks = get_masks(rtstruct_file,
labels=labels_rtstruct,
image_position_patient=image_position_patient,
axial_positions=axial_positions,
pixel_spacing=pixel_spacing,
shape=np_image.shape,
dtype=dtype_image)
for label, np_mask in masks:
output_filepath_mask = output_filepath.split(
'.')[0] + '_' + label.lower() + extension
sitk_mask = get_sitk_volume_from_np(np_mask, pixel_spacing,
image_position_patient)
sitk_writer.SetFileName(output_filepath_mask)
sitk_writer.Execute(sitk_mask)
return np.transpose(np_image,
(1, 0, 2)), pixel_spacing, image_position_patient
def get_sitk_volume_from_np(np_image, pixel_spacing, image_position_patient):
trans = (2, 0, 1)
sitk_image = sitk.GetImageFromArray(np.transpose(np_image, trans))
sitk_image.SetSpacing(pixel_spacing)
sitk_image.SetOrigin(image_position_patient)
return sitk_image
def is_approx_equal(x, y, tolerance=0.05):
return abs(x - y) <= tolerance
class MissingWeightException(RuntimeError):
pass
def correct_patient_name(patient_name):
output = patient_name.replace("HN-CHUS-", "CHUS")
output = output.replace("HN-CHUM-", "CHUM")
output = output.replace("HN-HGJ-", "CHGJ")
output = output.replace("HN-HMR-", "CHMR")
output = output.replace("HN-CHUV-", "CHUV")
return output
def get_masks(rtstruct_file,
labels=['GTVt', 'GTVn'],
image_position_patient=None,
axial_positions=None,
pixel_spacing=None,
shape=None,
dtype=np.int8):
contours = read_structure(rtstruct_file, labels=labels)
return get_mask_from_contour(contours,
image_position_patient,
axial_positions,
pixel_spacing,
shape,
dtype=dtype)
def read_structure(rtstruct_file, labels=['GTVt', 'GTVn']):
structure = pdcm.read_file(rtstruct_file)
contours = []
for i, roi_seq in enumerate(structure.StructureSetROISequence):
contour = {}
for label in labels:
if roi_seq.ROIName == label:
contour['color'] = structure.ROIContourSequence[
i].ROIDisplayColor
contour['number'] = structure.ROIContourSequence[
i].ReferencedROINumber
contour['name'] = roi_seq.ROIName
assert contour['number'] == roi_seq.ROINumber
contour['contours'] = [
s.ContourData
for s in structure.ROIContourSequence[i].ContourSequence
]
contours.append(contour)
return contours
def get_mask_from_contour(contours,
image_position_patient,
axial_positions,
pixel_spacing,
shape,
dtype=np.uint8):
z = np.asarray(axial_positions)
pos_r = image_position_patient[1]
spacing_r = pixel_spacing[1]
pos_c = image_position_patient[0]
spacing_c = pixel_spacing[0]
output = []
for con in contours:
mask = np.zeros(shape, dtype=dtype)
for current in con['contours']:
nodes = np.array(current).reshape((-1, 3))
assert np.amax(np.abs(np.diff(nodes[:, 2]))) == 0
z_index = np.where((nodes[0, 2] - 0.001 < z)
& (z < nodes[0, 2] + 0.001))[0][0]
r = (nodes[:, 1] - pos_r) / spacing_r
c = (nodes[:, 0] - pos_c) / spacing_c
rr, cc = polygon(r, c)
if len(rr) > 0 and len(cc) > 0:
if np.max(rr) > 512 or np.max(cc) > 512:
raise Exception("The RTSTRUCT file is compromised")
mask[rr, cc, z_index] = 1
output.append((con['name'], mask))
return output
def get_physical_values_ct(slices, dtype=np.float32):
image = list()
for s in slices:
image.append(
float(s.RescaleSlope) * s.pixel_array + float(s.RescaleIntercept))
return np.stack(image, axis=-1).astype(dtype)
def get_physical_values_pt_old(slices):
units = slices[0].Units
s = slices[0]
if units == 'BQML':
datetime_acquisition = datetime.strptime(
s[Tag(0x00080022)].value + s[Tag(0x00080032)].value.split('.')[0],
"%Y%m%d%H%M%S")
datetime_serie = datetime.strptime(
s[Tag(0x00080021)].value + s[Tag(0x00080031)].value.split('.')[0],
"%Y%m%d%H%M%S")
if datetime_serie < datetime_acquisition and datetime_serie > datetime(
1950, 1, 1):
pass
else:
pass
return get_suv_from_bqml(slices)
elif units == 'CNTS':
return get_suv_philips(slices)
else:
raise ValueError('The {} units is not handled'.format(units))
def get_physical_values_pt(slices, patient_weight, dtype=np.float32):
s = slices[0]
units = s.Units
if units == 'BQML':
acquisition_datetime = datetime.strptime(
s[Tag(0x00080022)].value + s[Tag(0x00080032)].value.split('.')[0],
"%Y%m%d%H%M%S")
serie_datetime = datetime.strptime(
s[Tag(0x00080021)].value + s[Tag(0x00080031)].value.split('.')[0],
"%Y%m%d%H%M%S")
try:
if (serie_datetime <= acquisition_datetime) and (
serie_datetime > datetime(1950, 1, 1)):
scan_datetime = serie_datetime
else:
scan_datetime_value = s[Tag(0x0009100d)].value
if isinstance(scan_datetime_value, bytes):
scan_datetime_str = scan_datetime_value.decode(
"utf-8").split('.')[0]
elif isinstance(scan_datetime_value, str):
scan_datetime_str = scan_datetime_value.split('.')[0]
else:
raise ValueError(
"The value of scandatetime is not handled")
scan_datetime = datetime.strptime(scan_datetime_str,
"%Y%m%d%H%M%S")
start_time_str = s.RadiopharmaceuticalInformationSequence[
0].RadiopharmaceuticalStartTime
start_time = time(int(start_time_str[0:2]),
int(start_time_str[2:4]),
int(start_time_str[4:6]))
start_datetime = datetime.combine(scan_datetime.date(), start_time)
decay_time = (scan_datetime - start_datetime).total_seconds()
except KeyError:
warnings.warn("Estimation of time decay for SUV"
" computation from average parameters")
decay_time = 1.75 * 3600 # From Martin's code
return get_suv_from_bqml(slices,
decay_time,
patient_weight,
dtype=dtype)
elif units == 'CNTS':
return get_suv_philips(slices, dtype=dtype)
else:
raise ValueError('The {} units is not handled'.format(units))
def get_suv_philips(slices, dtype=np.float32):
image = list()
suv_scale_factor_tag = Tag(0x70531000)
for s in slices:
im = (float(s.RescaleSlope) * s.pixel_array +
float(s.RescaleIntercept)) * float(s[suv_scale_factor_tag].value)
image.append(im)
return np.stack(image, axis=-1).astype(dtype)
def get_suv_from_bqml(slices, decay_time, patient_weight, dtype=np.float32):
# Get SUV from raw PET
image = list()
for s in slices:
pet = float(s.RescaleSlope) * s.pixel_array + float(s.RescaleIntercept)
half_life = float(
s.RadiopharmaceuticalInformationSequence[0].RadionuclideHalfLife)
total_dose = float(
s.RadiopharmaceuticalInformationSequence[0].RadionuclideTotalDose)
decay = 2**(-decay_time / half_life)
actual_activity = total_dose * decay
im = pet * patient_weight * 1000 / actual_activity
image.append(im)
return np.stack(image, axis=-1).astype(dtype)
| 2.265625 | 2 |
deep_audio_features/utils/load_dataset.py | nikosmichas/deep_audio_features | 0 | 12771008 | import os
import numpy as np
import glob
from sklearn.model_selection import StratifiedShuffleSplit
import sys, os
sys.path.insert(0, os.path.join(
os.path.dirname(os.path.realpath(__file__)), "../../"))
from deep_audio_features.bin import config
import wave
import contextlib
def load(folders=None, test_val=[0.2, 0.2], test=True, validation=True):
"""Loads a dataset from some folders.
Arguments
----------
folders {list} : A list of folders containing all samples.
test_val {list} : A list containing the percentages for test and validation split.
test {boolean} : If False only train samples and labels are returned.
validation {boolean} : If False only train and test samples and
labels are returned.
Returns
--------
X_train {list} : All filenames for train.
y_train {list} : Labels for train.
X_test {list} : Filenames for test.
y_test {list} : Labels for train.
if `validation` is `True` also returns the following:
X_valid {list} : Filenames for validation.
y_valid {list} : Labels for validation.
"""
if folders is None:
raise AssertionError()
filenames = []
labels = []
# Match filenames with labels
for folder in folders:
for f in glob.iglob(os.path.join(folder, '*.wav')):
filenames.append(f)
labels.append(folder)
# Convert labels to int
folder2idx, idx2folder = folders_mapping(folders=folders)
labels = list(map(lambda x: folder2idx[x], labels))
# Split
if test is False and validation is False:
# Use this data only to train
return filenames, labels
# Get percentages
test_p, val_p = test_val
X_train_, y_train_ = [], []
X_test, y_test = [], []
X_train, y_train = [], []
X_val, y_val = [], []
# First split
sss = StratifiedShuffleSplit(n_splits=1, test_size=test_p, random_state=0)
train_idx, test_idx = next(
sss.split(filenames, labels))
# Train
for idx in train_idx:
X_train_.append(filenames[idx])
y_train_.append(labels[idx])
# Test
for idx in test_idx:
X_test.append(filenames[idx])
y_test.append(labels[idx])
# If validation split is not needed return
if validation is False:
return X_train_, y_train_, X_test, y_test
# If valuation is True split again
sss = StratifiedShuffleSplit(n_splits=1, test_size=val_p, random_state=0)
train_idx, val_idx = next(sss.split(X_train_, y_train_))
# Train after both splits
for idx in train_idx:
X_train.append(X_train_[idx])
y_train.append(y_train_[idx])
# validation
for idx in val_idx:
X_val.append(X_train_[idx])
y_val.append(y_train_[idx])
return X_train, y_train, X_test, y_test, X_val, y_val
def compute_max_seq_len(reload=False, X=None, folders=None):
"""Return max sequence length for all files."""
# TAKE THE WINDOW STEPS
if reload is True:
if folders is None:
raise AssertionError()
# Get all sample labels
X_train, _, X_test, _, X_val, _ = load(folders=folders)
X = X_train+X_test+X_val
# DEFAULT
else:
if X is None:
raise AssertionError()
# Calculate and print max sequence number
print(config.HOP_LENGTH, config.WINDOW_LENGTH)
lengths = []
for f in X:
with contextlib.closing(wave.open(f, 'r')) as fp:
frames = fp.getnframes()
fs = fp.getframerate()
duration = frames / float(fs)
length = int((duration -
(config.WINDOW_LENGTH - config.HOP_LENGTH)) / \
(config.HOP_LENGTH) + 1)
lengths.append(length)
max_seq = np.max(lengths)
print(f"Max sequence length in dataset: {max_seq}")
return max_seq
def folders_mapping(folders):
"""Return a mapping from folder to class and a mapping from class to folder."""
folder2idx = {}
idx2folder = {}
for idx, folder in enumerate(folders):
folder2idx[folder] = idx
idx2folder[idx] = folder
return folder2idx, idx2folder
def get_categories_population_dictionary(labels, n_classes=9):
"""Return a mapping (category) -> Population."""
mapping = {i: 0 for i in range(0, n_classes)}
# Iterate each file and map
for l in labels:
if l >= n_classes:
continue
mapping[l] += 1
return mapping
| 2.640625 | 3 |
algorithm/search/frequency_queries/frequency_queries.py | delaanthonio/hackerrank | 1 | 12771009 | #!/usr/bin/env python3
"""
:problem: https://www.hackerrank.com/challenges/frequency-queries/problem
"""
from typing import List, Tuple
from collections import Counter
def process_queries(queries: List[Tuple[int, int]]) -> List[int]:
"""Execute queries and report whether a value with a given count exists."""
values = Counter()
counts = Counter()
results = []
for query, val in queries:
if query == 1:
counts[values[val]] -= 1
counts[values[val] + 1] += 1
values[val] += 1
elif query == 2:
if val in values and values[val]:
counts[values[val]] -= 1
counts[values[val] - 1] += 1
values[val] -= 1
else:
results.append(int(bool(counts[val])))
return results
def main():
q = int(input())
queries = []
for _ in range(q):
query, val = [int(x) for x in input().split()]
queries.append((query, val))
results = process_queries(queries)
print(*results, sep='\n')
if __name__ == '__main__':
main()
| 3.953125 | 4 |
sequence_search/consumer/settings/__init__.py | RNAcentral/sequence_search | 2 | 12771010 | """
Copyright [2009-2019] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import pathlib
"""
Any setting defined here can be overridden by:
Settings the appropriate environment variable, eg. to override FOOBAR, `export APP_FOOBAR="whatever"`.
This is useful in production for secrets you do not wish to save in code and
also plays nicely with docker(-compose). Settings will attempt to convert environment variables to match the
type of the value here. See also activate.settings.sh.
Or, passing the custom setting as a keyword argument when initialising settings (useful when testing)
"""
# consumer folder, where media, static, templates and other subfolders are located
PROJECT_ROOT = pathlib.Path(__file__).parent.parent
# minimum query sequence length
MIN_LENGTH = 10
# maximum query sequence length
MAX_LENGTH = 7000
# results expiration time
EXPIRATION = 60 * 60 * 24 * 7 # seconds
# maximum time to run nhmmer
MAX_RUN_TIME = 5 * 60 # seconds
ENVIRONMENT = os.getenv('ENVIRONMENT', 'LOCAL')
# add settings from environment-specific files
if ENVIRONMENT == "LOCAL":
from .local import *
elif ENVIRONMENT == "TEST":
from .test import *
elif ENVIRONMENT == "DOCKER-COMPOSE":
from .docker_compose import *
elif ENVIRONMENT == "PRODUCTION":
from .production import *
# hostname to listen on
HOST = '0.0.0.0'
# TCP port for the server to listen on
PORT = 8000
def substitute_environment_variables():
"""
Substitute environment variables into settings.
This function is stolen from the default project, generated by
aiohttp-devtools 'adev start' command.
"""
for attr_name in globals():
env_var = os.getenv(attr_name, None)
if attr_name.startswith('_') or attr_name.upper() != attr_name:
continue
elif env_var is not None:
# convert environment variable to the same type as the variable in settings
original_type = type(globals()[attr_name])
if issubclass(original_type, bool):
env_var = env_var.upper() in ('1', 'TRUE')
elif issubclass(original_type, int):
env_var = int(env_var)
elif issubclass(original_type, float):
env_var = float(env_var)
elif issubclass(original_type, pathlib.Path):
env_var = pathlib.Path(env_var)
elif issubclass(original_type, bytes):
env_var = env_var.encode()
globals()[attr_name] = env_var
substitute_environment_variables()
| 1.851563 | 2 |
tf2/train.py | lihebi/AdvAE | 1 | 12771011 | <filename>tf2/train.py
import tensorflow as tf
from tensorflow.keras.layers import Input, Flatten, Dense, Activation
from tensorflow.keras.layers import Conv2D, ReLU, MaxPool2D, Softmax, Reshape
from tensorflow.keras import Sequential
from tensorflow.keras.optimizers import Adam
import numpy as np
from cleverhans.future.tf2.attacks import fast_gradient_method, projected_gradient_descent, spsa
from tqdm import tqdm
import time
import datetime
import functools
import tensorflow.keras.backend as K
from data_utils import load_mnist, sample_and_view, load_mnist_ds, load_cifar10_ds
from model import get_Madry_model, get_LeNet5, dense_AE, CNN_AE, train_AE, train_CNN, test_AE
from utils import tfinit
import sys
sys.path.append('./tf_models')
from official.vision.image_classification import resnet_cifar_model
def keras_train(model, opt, ds, steps_per_epoch, train_epochs):
model.compile(
loss='categorical_crossentropy',
optimizer=opt,
metrics=(['categorical_accuracy']))
model.fit(ds,
epochs=train_epochs,
steps_per_epoch=train_steps,
# callbacks=callbacks,
# validation_steps=num_eval_steps,
# validation_data=validation_data,
# validation_freq=flags_obj.epochs_between_evals,
verbose=1)
def test_keras_train():
batch_size = 128
ds, eval_ds, train_steps, test_steps = load_cifar10_ds(batch_size)
model = resnet_cifar_model.resnet20(training=None)
# keras training
keras_train(model, opt, ds, steps_steps, 100)
def train_metric_step(step, metrics):
print('[step {}] loss: {:.5f}, acc: {:.5f}'
.format(step,
metrics['train_loss'].result(),
metrics['train_acc'].result()))
# about 0.0035
tf.summary.scalar('loss', metrics['train_loss'].result(), step=step)
tf.summary.scalar('accuracy', metrics['train_acc'].result(), step=step)
tf.summary.scalar('time', metrics['train_time'].result(), step=step)
metrics['train_loss'].reset_states()
metrics['train_acc'].reset_states()
def test_metric_step(step, metrics):
tf.summary.scalar('loss', metrics['test_loss'].result(), step=step)
tf.summary.scalar('accuracy', metrics['test_acc'].result(), step=step)
# used only to monitor the time wasted
tf.summary.scalar('time', metrics['test_time'].result(), step=step)
print('[test in step {}] loss: {}, acc: {}'.format(step,
metrics['test_loss'].result(),
metrics['test_acc'].result()))
metrics['test_loss'].reset_states()
metrics['test_acc'].reset_states()
def create_ckpt(model, opt, ID):
# FIXME keyword arguments names are random?
ckpt = tf.train.Checkpoint(step=tf.Variable(1), optimizer=opt, model=model)
# use logname as path
manager = tf.train.CheckpointManager(ckpt, './tf_ckpts/' + ID, max_to_keep=3)
# FIXME sanity check for optimizer state restore
ckpt.restore(manager.latest_checkpoint)
if manager.latest_checkpoint:
print("Restored from {}".format(manager.latest_checkpoint))
else:
print("Initializing from scratch.")
return ckpt, manager
def create_metrics():
# FIXME I need both clean and adversarial accuracy, for both train and test
metrics = {
'train_loss': tf.keras.metrics.Mean('train_loss', dtype=tf.float32),
'train_acc': tf.keras.metrics.CategoricalAccuracy('train_accuracy'),
'train_time': tf.keras.metrics.Sum('train time', dtype=tf.float32),
'test_loss': tf.keras.metrics.Mean('test_loss', dtype=tf.float32),
'test_acc': tf.keras.metrics.CategoricalAccuracy('test_accuracy'),
'test_time': tf.keras.metrics.Sum('test time', dtype=tf.float32)}
return metrics
def create_summary_writers(ID):
train_log_dir = 'logs/gradient_tape/' + ID + '/train'
test_log_dir = 'logs/gradient_tape/' + ID + '/test'
# This will append instead of overwrite
train_summary_writer = tf.summary.create_file_writer(train_log_dir)
test_summary_writer = tf.summary.create_file_writer(test_log_dir)
return train_summary_writer, test_summary_writer
def FGSM(model, x, y, ε=0.3, clip_min=0., clip_max=1.):
adv = fast_gradient_method(model, x,
ε, np.inf,
clip_min=clip_min,
clip_max=clip_max,
y=np.argmax(y, 1))
return adv
def PGD(model, x, y, ε=0.3, step_size=0.01, iters=40, clip_min=0., clip_max=1.):
# FIXME rand_init rand_minmax
# FIXME parameters
return projected_gradient_descent(model, x, ε, step_size, iters,
np.inf,
clip_min=clip_min,
clip_max=clip_max,
y=np.argmax(y, 1),
# FIXME this is throwing errors
sanity_checks=False)
def clean_train_step(model, params, opt, loss_fn, x, y, metrics):
t = time.time()
with tf.GradientTape() as tape:
with K.learning_phase_scope(1):
logits = model(x)
loss = loss_fn(y, logits)
gradients = tape.gradient(loss, params)
opt.apply_gradients(zip(gradients, params))
metrics['train_time'].update_state(time.time() - t)
metrics['train_acc'].update_state(y, logits)
metrics['train_loss'].update_state(loss)
def clean_test_step(model, loss_fn, x, y, metrics):
t = time.time()
with K.learning_phase_scope(0):
logits = model(x)
loss = loss_fn(y, logits)
metrics['test_loss'].update_state(loss)
metrics['test_acc'].update_state(y, logits)
metrics['test_time'].update_state(time.time() - t)
def get_adv_train_step(attack_fn):
def step_fn(model, params, opt,
loss_fn, x, y, metrics):
t = time.time()
# FIXME Do not collect statistics during attack? Or maybe do, because we're
# using the same minibatch
with K.learning_phase_scope(0):
adv = attack_fn(model, x, y)
with tf.GradientTape() as tape:
with K.learning_phase_scope(1):
adv_logits = model(adv)
nat_logits = model(x)
adv_loss = loss_fn(y, adv_logits)
nat_loss = loss_fn(y, nat_logits)
λ = 1
loss = adv_loss + λ * nat_loss
gradients = tape.gradient(loss, params)
opt.apply_gradients(zip(gradients, params))
metrics['train_time'].update_state(time.time() - t)
# NOTE: This is adv loss/acc
metrics['train_loss'].update_state(adv_loss)
metrics['train_acc'].update_state(y, adv_logits)
return step_fn
def get_adv_test_step(attack_fn):
def step_fn(model, loss_fn, x, y, metrics):
t = time.time()
with K.learning_phase_scope(0):
adv = attack_fn(model, x, y)
adv_logits = model(adv)
adv_loss = loss_fn(y, adv_logits)
# NOTE: this is adv loss/acc
metrics['test_loss'].update_state(adv_loss)
metrics['test_acc'].update_state(y, adv_logits)
metrics['test_time'].update_state(time.time() - t)
return step_fn
def free_train_stepS(model, params, opt, loss_fn, x, y, metrics):
# CAUTION this updates opt model m=30 times, so technically it is m
# steps. However, the running time cost is one step
#
# FIXME parameters
m = 30
step_size = 0.01
ε = 0.3
# initial
δ = 0
adv = x
for i in range(m):
with tf.GradientTape(persistent=True) as tape:
tape.watch(adv)
with K.learning_phase_scope(1):
logits = model(adv)
loss = loss_fn(logits, y)
# FIXME this gradient computation might be slow. Consider using tf.function to wrap it
# update model
gradients = tape.gradient(loss, model.trainable_variables)
opt.apply_gradients(zip(gradients, model.trainable_variables))
# update adv
x_grads = tape.gradient(loss, adv)
# FIXME this should not be step_size
δ += tf.sign(x_grads) * ε
# δ += tf.sign(x_grads) * step_size
δ = tf.clip_by_value(δ, -ε, ε)
# this will create new tensors
adv = tf.clip_by_value(x + δ, 0, 1)
metrics['train_loss'].update_state(loss)
# CAUTION FIXME since we do not have a attack here, we don't have
# training adv accuracy. We can only get validation accuracy
metrics['train_acc'].update_state(y, logits)
# FIXME I should not have to del this explicitly
del tape
def custom_train(model, params, opt,
train_step_fn, test_step_fn,
ds, test_ds,
ID, config):
"""
FIXME params = model.trainable_variables
TODO optimizer
"""
assert isinstance(ds, tf.data.Dataset)
assert isinstance(test_ds, tf.data.Dataset)
# FIXME opt should be created elsewhere
# opt = Adam(0.001)
ckpt, manager = create_ckpt(model, opt, ID)
train_summary_writer, test_summary_writer = create_summary_writers(ID)
metrics = create_metrics()
loss_fn = tf.keras.losses.CategoricalCrossentropy()
for x, y in ds:
if int(ckpt.step) > config['max_steps']:
print('Finished {} steps larger than max step {}. Returning ..'
.format(ckpt.step, config['max_steps']))
return
train_step_fn(model, params, opt, loss_fn, x, y, metrics)
ckpt.step.assign_add(1)
if int(ckpt.step) % config['num_train_summary_step'] == 0:
with train_summary_writer.as_default():
train_metric_step(int(ckpt.step), metrics)
if int(ckpt.step) % config['num_test_summary_step'] == 0:
# about 20 seconds
print('performing test phase ..')
# TODO since test_ds runs only once, add tqdm support
for x,y in test_ds:
test_step_fn(model, loss_fn, x, y, metrics)
with test_summary_writer.as_default():
test_metric_step(int(ckpt.step), metrics)
# save check point at the end, to make sure the above branches are
# always performed completely. If interrupted, it will get re-performed.
#
# FIXME if interrupted, summary steps might be re-run, and will have overlap
if int(ckpt.step) % config['num_ckpt_step'] == 0:
# FIXME the dataset status is not saved
save_path = manager.save()
print("Saved checkpoint for step {}: {}".format(int(ckpt.step), save_path))
def mnist_train(model, ID, config):
batch_size = config['batch_size']
ds, eval_ds, train_steps, test_steps = load_mnist_ds(batch_size)
# FIXME get from config, or related to batch_size
opt = Adam(1e-3)
custom_train(model, model.trainable_variables, opt,
clean_train_step, clean_test_step,
ds, eval_ds,
ID, config)
def mnist_advtrain(model, ID, config):
batch_size = config['batch_size']
ds, eval_ds, train_steps, test_steps = load_mnist_ds(batch_size)
# FIXME 1e-3 seems to work as well this time
opt = Adam(1e-4)
attack_fn = functools.partial(PGD, ε=0.3, step_size=0.01, iters=40, clip_min=0., clip_max=1.)
custom_train(model, model.trainable_variables, opt,
get_adv_train_step(attack_fn), get_adv_test_step(attack_fn),
ds, eval_ds,
ID, config)
def cifar10_train(model, ID, config):
batch_size = config['batch_size']
ds, eval_ds, train_steps, test_steps = load_cifar10_ds(batch_size)
# LR_SCHEDULE = [(0.1, 91), (0.01, 136), (0.001, 182)]
# lr_schedule = [(0, 0.1), (40000, 0.01), (60000, 0.001)]
# lr_schedule = [(0, 0.1), (10000, 0.01), (20000, 0.001)]
#
boundaries = [10000, 20000]
values = [0.1, 0.01, 0.001]
learning_rate_fn = tf.keras.optimizers.schedules.PiecewiseConstantDecay(
boundaries, values)
# Later, whenever we perform an optimization step, we pass in the step.
# step = tf.Variable(0, trainable=False)
# learning_rate = learning_rate_fn(step)
# opt = tf.keras.optimizers.SGD(learning_rate=0.1, momentum=0.9)
opt = tf.keras.optimizers.SGD(learning_rate=learning_rate_fn, momentum=0.9)
custom_train(model, model.trainable_variables, opt,
clean_train_step, clean_test_step,
ds, eval_ds,
ID, config)
def cifar10_advtrain(model, ID, config):
batch_size = config['batch_size']
ds, eval_ds, train_steps, test_steps = load_cifar10_ds(batch_size)
# Madry:
# "step_size_schedule": [[0, 0.1], [40000, 0.01], [60000, 0.001]],
boundaries = [10000, 20000]
values = [0.1, 0.01, 0.001]
learning_rate_fn = tf.keras.optimizers.schedules.PiecewiseConstantDecay(
boundaries, values)
opt = tf.keras.optimizers.SGD(learning_rate=learning_rate_fn, momentum=0.9)
attack_fn = functools.partial(PGD, ε=8., step_size=2., iters=7, clip_min=0., clip_max=255.)
custom_train(model, model.trainable_variables, opt,
get_adv_train_step(attack_fn), get_adv_test_step(attack_fn),
ds, eval_ds,
ID, config)
def evaluate_model(model, ds):
# Prepare the training dataset.
batch_size = 64
# FIXME size of dataset
tqdm_total = len(ds[0]) // batch_size
ds = tf.data.Dataset.from_tensor_slices(ds)
ds = ds.shuffle(buffer_size=1024).batch(batch_size)
acc_metric = tf.keras.metrics.CategoricalAccuracy()
clear_tqdm()
for step, (x, y) in enumerate(tqdm(ds, total=tqdm_total)):
# in evaluation mode, set training=False
logits = model(x, training=False)
acc_metric.update_state(y, logits)
if step % 200 == 0:
acc = acc_metric.result()
print('Acc so far: %s' % (float(acc),))
acc = acc_metric.result()
# FIXME I don't need to reset
acc_metric.reset_states()
print('Acc of all data: %s' % (float(acc),))
def evaluate_attack(model, attack_fn, ds):
x, y = ds
print('attacking 10 samples ..')
x_adv = attack_fn(model, x[:10], y[:10])
print('sample:')
sample_and_view(x_adv)
print('labels:', np.argmax(model(x_adv), 1))
# print('evaluating all models')
# evaluate_model(model, (x_adv, y))
batch_size = 64
tqdm_total = len(ds[0]) // batch_size
ds = tf.data.Dataset.from_tensor_slices(ds)
ds = ds.shuffle(buffer_size=1024).batch(batch_size)
acc_metric = tf.keras.metrics.CategoricalAccuracy()
clear_tqdm()
for step, (x, y) in enumerate(tqdm(ds, total=tqdm_total)):
adv = attack_fn(model, x, y)
logits = model(adv)
acc_metric.update_state(y, logits)
if step % 20 == 0:
acc = acc_metric.result()
print('Acc so far: %s' % (float(acc),))
acc = acc_metric.result()
# FIXME I don't need to reset
acc_metric.reset_states()
print('Acc of all data: %s' % (float(acc),))
def do_evaluate_attack(model, dl):
print('evaluting clean model ..')
evaluate_attack(model, lambda m,x,y: x, dl)
print('evaluating FGSM ..')
evaluate_attack(model, FGSM, dl)
print('evaluating PGD ..')
evaluate_attack(model, PGD, dl)
def get_default_config():
return {
# 'lr': 1e-3,
# 'mixing_fn': lambda nat, adv: 0,
'max_steps': 5000,
# CAUTION: batch size will affect the scale of number of steps
'batch_size': 50,
# FIXME: setting this to 1000 because dataset state is not restored.
# 'num_ckpt_step': 1000,
'num_ckpt_step': 100,
'num_train_summary_step': 20,
# NOTE: running full loop of test data evaluation
'num_test_summary_step': 200}
def test_mnist():
tfinit()
model = get_Madry_model()
model.build((None,28,28,1))
config = get_default_config()
config['batch_size'] = 50
# ID should be related to:
# - batch size
# - learning rate. However, learning rate has schedule, so ...
# - seed
mnist_train(model, 'mnist_clean', config)
mnist_advtrain(model, 'mnist_adv', config)
# evaluate_model(model, eval_ds)
def test_resnet():
tfinit()
model = resnet_cifar_model.resnet20(training=None)
model.build((None,32,32,3))
config = get_default_config()
config['batch_size'] = 128
config['num_test_summary_step'] = 500
current_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
cifar10_train(model, 'cifar10_clean-{}'.format(current_time), config)
cifar10_advtrain(model, 'cifar10_adv-{}'.format(current_time), config)
# evaluate_model(model, eval_ds)
| 2.59375 | 3 |
players/__init__.py | gdahia/ufba-ai-minesweeper | 0 | 12771012 | <filename>players/__init__.py
import players.player
import players.logical
import players.user
| 1.390625 | 1 |
neorl/core.py | ssimonc/NeoRL | 50 | 12771013 | <reponame>ssimonc/NeoRL
import gym
from .utils import get_json, sample_dataset, LOCAL_JSON_FILE_PATH, DATA_PATH
class EnvData(gym.Env):
def get_dataset(self, task_name_version: str = None, data_type: str = "high", train_num: int = 100,
need_val: bool = True, val_ratio: float = 0.1, path: str = DATA_PATH, use_data_reward: bool = True):
"""
Get dataset from given env.
:param task_name_version: The name and version (if applicable) of the task,
default is the same as `task` while making env
:param data_type: Which type of policy is used to collect data. It should
be one of ["high", "medium", "low"], default to `high`
:param train_num: The num of trajectory of training data. Note that the num
should be less than 10,000, default to `100`
:param need_val: Whether needs to download validation data, default to `True`
:param val_ratio: The ratio of validation data to training data, default to `0.1`
:param path: The directory of data to load from or download to `./data/`
:param use_data_reward: Whether uses default data reward. If false, a customized
reward function should be provided by users while making env
:return train_samples, val_samples
"""
# EXPERT = ["Expert", "expert", "E", "e"]
HIGH = ["High", "high", "H", "h"]
MEDIUM = ["Medium", "medium", "M", "m"]
LOW = ["Low", "low", "L", "l"]
if data_type in HIGH:
data_type = "high"
elif data_type in MEDIUM:
data_type = "medium"
elif data_type in LOW:
data_type = "low"
# elif data_type in EXPERT:
# data_type = "expert"
else:
raise Exception(f"Please check `data_type`, {data_type} is not supported!")
task_name_version = self._name if task_name_version is None else task_name_version
data_json = get_json(LOCAL_JSON_FILE_PATH)
train_samples = sample_dataset(task_name_version, path, train_num, data_json, data_type, use_data_reward,
self._reward_func, "train")
val_samples = None
if need_val:
val_samples = sample_dataset(task_name_version, path, int(train_num * val_ratio), data_json, data_type,
use_data_reward, self._reward_func, "val")
return train_samples, val_samples
def set_reward_func(self, reward_func):
"""
Users can call this func to set customized reward func.
"""
self._reward_func = reward_func
def get_reward_func(self):
"""
Users can call this func to get customized reward func.
"""
return self._reward_func
def set_name(self, name):
"""
Set name for envs.
"""
self._name = name
def set_done_func(self, done_func):
"""
Users can call this func to set done func.
"""
self._done_func = done_func
def get_done_func(self):
"""
Users can call this func to get done func.
"""
return self._done_func
| 2.703125 | 3 |
Lib/site-packages/altendpy/tests/test_timeit.py | fochoao/cpython | 0 | 12771014 | import altendpy.timeit
def test_runner():
short = altendpy.timeit.Example(statement="time.sleep(0.1)")
long = altendpy.timeit.Example(statement="time.sleep(0.2)")
runner = altendpy.timeit.Runner(
examples=(short, long),
repeats=1,
interleave=True,
cycles=10,
)
runner.run()
assert 1.8 < runner.results[long] / runner.results[short] < 2.2
| 2.421875 | 2 |
models.py | jiwei-dot/VAE | 0 | 12771015 | <gh_stars>0
import torch
import torch.nn as nn
class Conv_BN_ReLU(nn.Module):
def __init__(self, in_channels, out_channels, stride):
super(Conv_BN_ReLU, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, 3, stride, 1, bias=False)
self.bn = nn.BatchNorm2d(out_channels)
self.relu = nn.LeakyReLU()
self.sigmoid = nn.Sigmoid()
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
out = self.relu(x)
return out
class TransposeConv_BN_ReLU(nn.Module):
def __init__(self, in_channels, out_channels, output_padding, stride):
super(TransposeConv_BN_ReLU, self).__init__()
self.tconv = nn.ConvTranspose2d(in_channels, out_channels, 3, stride, 1, output_padding,
bias=False)
self.bn = nn.BatchNorm2d(out_channels)
self.relu = nn.LeakyReLU()
def forward(self, x):
x = self.tconv(x)
x = self.bn(x)
out = self.relu(x)
return out
class Encoder(nn.Module):
def __init__(self):
super(Encoder, self).__init__()
self.block1 = Conv_BN_ReLU(1, 32, 1)
self.block2 = Conv_BN_ReLU(32, 64, 2)
self.block3 = Conv_BN_ReLU(64, 64, 2)
self.block4 = Conv_BN_ReLU(64, 64, 1)
self.fc1 = nn.Linear(3136, 4)
self.fc2 = nn.Linear(3136, 4)
def forward(self, x):
x = self.block1(x)
x = self.block2(x)
x = self.block3(x)
x = self.block4(x)
x = x.view(x.size(0), -1)
mu = self.fc1(x)
log_var = self.fc2(x)
return mu, log_var
class Sample(nn.Module):
def forward(self, mu, log_var):
eplison = torch.rand_like(mu)
return eplison * torch.exp((log_var / 2)) + mu
class Decoder(nn.Module):
def __init__(self):
super(Decoder, self).__init__()
self.fc = nn.Linear(4, 3136)
self.block1 = TransposeConv_BN_ReLU(64, 64, 0, 1)
self.block2 = TransposeConv_BN_ReLU(64, 64, 1, 2)
self.block3 = TransposeConv_BN_ReLU(64, 32, 1, 2)
self.tconv = nn.ConvTranspose2d(32, 1, 3, 1, 1, bias=False)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
x = self.fc(x)
x = x.reshape(-1, 64, 7, 7)
x = self.block1(x)
x = self.block2(x)
x = self.block3(x)
x = self.tconv(x)
out = self.sigmoid(x)
return out
class VAE(nn.Module):
def __init__(self):
super(VAE, self).__init__()
self.encoder = Encoder()
self.sample = Sample()
self.decoder = Decoder()
def forward(self, x):
mu, log_var = self.encoder(x)
x = self.sample(mu, log_var)
out = self.decoder(x)
return out, mu, log_var
if __name__ == '__main__':
from torchsummary import summary
import torch
x = torch.randn(32, 1, 28, 28)
vae = VAE()
print(vae(x).shape) | 2.640625 | 3 |
Scripts/builders/monkeypatch_tempdir_cleanup.py | onecent1101/3p-package-source | 4 | 12771016 | #
# Copyright (c) Contributors to the Open 3D Engine Project.
# For complete copyright and license terms please see the LICENSE at the root of this distribution.
#
# SPDX-License-Identifier: Apache-2.0 OR MIT
#
#
import platform
if platform.system() == 'Windows':
from tempfile import TemporaryDirectory
from pathlib import Path
import os
import stat
realTempdirCleanup = TemporaryDirectory.cleanup
def cleanup(self):
"""
Make files writable before removing them
In Windows and with Python < 3.8, TemporaryDirectory() will fail to clean up files that are read-only. Git marks
files in the object store as read-only, so running git clone in a tempdir will fail. This wrapper marks files as
writable before the cleanup runs.
"""
for (dirpath, dirnames, filenames) in os.walk(self.name):
for filename in filenames:
(Path(dirpath) / filename).chmod(stat.S_IWRITE)
realTempdirCleanup(self)
TemporaryDirectory.cleanup = cleanup
| 2.1875 | 2 |
src/pynn/bin/train_lm_lstm.py | enesyugan/yapay-nn | 0 | 12771017 | #!/usr/bin/env python3
# encoding: utf-8
# Copyright 2019 <NAME>
# Licensed under the Apache License, Version 2.0 (the "License")
import os
import argparse
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
from pynn.util import save_object_param
from pynn.net.lm_lstm import SeqLM
from pynn.bin import print_model, train_language_model
parser = argparse.ArgumentParser(description='pynn')
parser.add_argument('--train-seq', help='path to train seq', required=True)
parser.add_argument('--valid-seq', help='path to validation seq', required=True)
parser.add_argument('--n-classes', type=int, required=True)
parser.add_argument('--n-layer', type=int, default=2)
parser.add_argument('--d-model', type=int, default=512)
parser.add_argument('--d-emb', type=int, default=0)
parser.add_argument('--d-project', type=int, default=0)
parser.add_argument('--shared-emb', help='sharing embedding', action='store_true')
parser.add_argument('--no-sek', help='without start and end tokens', action='store_true')
parser.add_argument('--label-smooth', type=float, default=0.1)
parser.add_argument('--dropout', type=float, default=0.2)
parser.add_argument('--dropconnect', type=float, default=0.)
parser.add_argument('--emb-drop', type=float, default=0.)
parser.add_argument('--model-path', help='model saving path', default='model')
parser.add_argument('--n-epoch', type=int, default=50)
parser.add_argument('--n-save', type=int, default=5)
parser.add_argument('--n-warmup', help='warm-up steps', type=int, default=6000)
parser.add_argument('--n-const', help='constant steps', type=int, default=0)
parser.add_argument('--n-print', help='inputs per update', type=int, default=40000)
parser.add_argument('--b-input', help='inputs per load', type=int, default=0)
parser.add_argument('--b-sample', help='maximum samples per batch', type=int, default=64)
parser.add_argument('--b-update', help='characters per update', type=int, default=12000)
parser.add_argument('--b-sync', help='steps per update', type=int, default=0)
parser.add_argument('--lr', help='learning rate', type=float, default=0.001)
parser.add_argument('--grad-norm', help='divide gradient by updated tokens', action='store_true')
parser.add_argument('--fp16', help='fp16 or not', action='store_true')
def create_model(args, device):
params = {
'n_vocab': args.n_classes,
'd_model': args.d_model,
'n_layer': args.n_layer,
'd_emb': args.d_emb,
'd_project': args.d_project,
'shared_emb': args.shared_emb,
'dropout': args.dropout,
'dropconnect': args.dropconnect,
'emb_drop': args.emb_drop}
model = SeqLM(**params)
save_object_param(model, params, args.model_path+'/model.cfg')
return model
def train(device, args):
model = create_model(args, device)
print_model(model)
train_language_model(model, args, device)
def train_distributed(device, gpus, args):
os.environ['MASTER_ADDR'] = 'localhost'
os.environ['MASTER_PORT'] = '12355'
dist.init_process_group("nccl", rank=device, world_size=gpus)
torch.manual_seed(0)
model = create_model(args, device)
if device == 0: print_model(model)
train_language_model(model, args, device, gpus)
dist.destroy_process_group()
if __name__ == '__main__':
args = parser.parse_args()
print(args)
if torch.cuda.device_count() > 1:
gpus = torch.cuda.device_count()
print('Training with distributed data parallel. Number of devices: %d' % gpus)
mp.spawn(train_distributed, nprocs=gpus, args=(gpus, args), join=True)
else:
device = 0 if torch.cuda.is_available() else torch.device('cpu')
train(device, args)
| 1.960938 | 2 |
Ex009.py | Fernando-Rodrigo/Exercicios | 1 | 12771018 | n = int(input('Digite um número para calcular a tabuada '))
m = 1
print('{} x {:2} = {}'.format(n, 1, n*1))
print('{} x {:2} = {}'.format(n, 2, n*2))
print('{} x {:2} = {}'.format(n, 3, n*3))
print('{} x {:2} = {}'.format(n, 4, n*4))
print('{} x {:2} = {}'.format(n, 5, n*5))
print('{} x {:2} = {}'.format(n, 6, n*6))
print('{} x {:2} = {}'.format(n, 7, n*7))
print('{} x {:2} = {}'.format(n, 8, n*8))
print('{} x {:2} = {}'.format(n, 9, n*9))
print('{} x {:2} = {}'.format(n, 10, n*10))
# Quando se usa :2 entre colchetes, o conteúdo vai ficar alinhado a direita com duas casas como é o caso dos números.
# Não confundir com :.2f que serve para limitar a quantidade de casas decimais que aparecem depois da virgula. | 3.84375 | 4 |
app/models.py | YuliyaSim/blogwebsite | 0 | 12771019 | <reponame>YuliyaSim/blogwebsite
from app import db
class Blogpost(db.Model):
post_id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(64))
subtitle = db.Column(db.String(64))
author = db.Column(db.String(64))
date_posted = db.Column(db.DateTime)
content = db.Column(db.Text)
# def __repr__(self):
# return f'<Book: {self.title}>'
| 2.6875 | 3 |
pyHPConfBackup.py | efflicto/pyHPConfigBackup | 2 | 12771020 | <filename>pyHPConfBackup.py
import os
import zipfile
import glob
import datetime
import telnetlib
import time
print("Starting backup...")
hosts = ["192.168.1.1"]
user = "UserName"
passw = "PassWord"
loc = "/mnt/backup"
date = str(datetime.date.today())
hosts_ok =0
count =0
print("Removing all old backup files")
for fl in glob.glob("/srv/tftp/*.cfg"):
os.remove(fl)
print("Done.")
print("Backing up all given hosts")
for n in hosts:
tn = telnetlib.Telnet(n)
tn.read_until("Username: ")
tn.write(user + "\r\n")
tn.read_until("Password: ")
tn.write(passw + "\r\n")
tn.read_until("*",2)
tn.write("backup startup-configuration to YOURTFTPSERVERHERE "+n+".cfg\r\n")
read = tn.read_until("*",2)
if read.__contains__("00009K"):
hosts_ok=hosts_ok+1
else:
print "Error on host: "+n
count=count+1
print("Hosts complete: "+str(count)+"/"+str(len(hosts)))
print("Zipping up this stuff...")
target_dir = '/srv/tftp'
zip = zipfile.ZipFile(loc+"/"+date+'.zip', 'w', zipfile.ZIP_DEFLATED)
rootlen = len(target_dir) + 1
for base, dirs, files in os.walk(target_dir):
for file in files:
fn = os.path.join(base, file)
zip.write(fn, fn[rootlen:])
print("Moving this zipfile to: "+loc)
print("Deleting zip files older than 10 days")
now = time.time()
cutoff = now - (7 * 86400)
files = os.listdir(loc)
for xfile in files:
if os.path.isfile( loc + xfile ):
t = os.stat( loc + xfile )
c = t.st_ctime
if c < cutoff:
os.remove(loc + xfile)
print("Done.")
print("Config backup completed: "+str(hosts_ok)+"/"+str(len(hosts))+" hosts ok.") | 2.859375 | 3 |
distribution/build/lib/ALeA/dependencies/interfaceFAAL/__init__.py | MKilani/ALeA | 0 | 12771021 | from py4j.java_gateway import JavaGateway, GatewayParameters
from subprocess import *
def interfaceFAAL(word_A, word_B, addition_app):
aligments = addition_app.addition(word_A, word_B)
alignmentsResultsItem = {}
alignmentsResultsItem["bestAlignGlobal"] = aligments[0].getGlobalSimilarityScore()
alignmentsResultsItem["bestAlignCorrected"] = aligments[0].getCorrectedGlobalSimilarityScore()
alignmentsResultsItem["wordWithDiacritics_1"] = aligments[0].getWord1_WithDiacritics().replace("\t", " ")
alignmentsResultsItem["wordWithoutDiacritics_1"] = aligments[0].getWord1_WithoutDiacritics().replace("\t", " ")
alignmentsResultsItem["wordWithDiacritics_2"] = aligments[0].getWord2_WithDiacritics().replace("\t", " ")
alignmentsResultsItem["wordWithoutDiacritics_2"] = aligments[0].getWord2_WithoutDiacritics().replace("\t", " ")
#gateway.shutdown()
#gateway.shutdown_callback_server()
return alignmentsResultsItem | 2.328125 | 2 |
driftbase/api/servers.py | directivegames/drift-base | 1 | 12771022 | import datetime
import logging
import uuid
import marshmallow as ma
from flask import url_for, g, jsonify
from flask.views import MethodView
from flask_smorest import Blueprint, abort
import http.client as http_client
from drift.core.extensions.jwt import current_user, requires_roles
from drift.core.extensions.urlregistry import Endpoints
from driftbase.config import get_server_heartbeat_config
from driftbase.models.db import (
Machine, Server, Match, ServerDaemonCommand
)
log = logging.getLogger(__name__)
bp = Blueprint("servers", __name__, url_prefix="/servers", description="Battle server processes")
endpoints = Endpoints()
def drift_init_extension(app, api, **kwargs):
api.register_blueprint(bp)
endpoints.init_app(app)
def utcnow():
return datetime.datetime.utcnow()
class ServersGetArgsSchema(ma.Schema):
machine_id = ma.fields.Integer()
rows = ma.fields.Integer()
class ServersPostRequestSchema(ma.Schema):
machine_id = ma.fields.Integer()
version = ma.fields.String()
public_ip = ma.fields.IPv4()
port = ma.fields.Integer()
command_line = ma.fields.String()
command_line_custom = ma.fields.String()
pid = ma.fields.Integer()
status = ma.fields.String()
image_name = ma.fields.String()
instance_name = ma.fields.String()
branch = ma.fields.String()
commit_id = ma.fields.String()
process_info = ma.fields.Dict()
details = ma.fields.Dict()
repository = ma.fields.String()
ref = ma.fields.String()
build = ma.fields.String()
build_number = ma.fields.Integer()
target_platform = ma.fields.String()
build_info = ma.fields.Dict()
placement = ma.fields.String()
class ServersPostResponseSchema(ma.Schema):
server_id = ma.fields.Integer(required=True)
machine_id = ma.fields.Integer(required=True)
url = ma.fields.Url(required=True)
machine_url = ma.fields.Url(required=True)
heartbeat_url = ma.fields.Url(required=True)
commands_url = ma.fields.Url(required=True)
token = ma.fields.String(required=True)
next_heartbeat_seconds = ma.fields.Number(required=True)
heartbeat_timeout = ma.fields.Str(required=True)
class ServerPutRequestSchema(ma.Schema):
status = ma.fields.String(required=True)
machine_id = ma.fields.Integer()
version = ma.fields.String()
public_ip = ma.fields.IPv4()
port = ma.fields.Integer()
command_line = ma.fields.String()
command_line_custom = ma.fields.String()
pid = ma.fields.Integer()
image_name = ma.fields.String()
error = ma.fields.String()
branch = ma.fields.String()
commit_id = ma.fields.String()
process_info = ma.fields.Dict()
details = ma.fields.Dict()
repository = ma.fields.String()
ref = ma.fields.String()
build = ma.fields.String()
build_number = ma.fields.Integer()
target_platform = ma.fields.String()
build_info = ma.fields.Dict()
class ServerPutResponseSchema(ma.Schema):
server_id = ma.fields.Integer(required=True)
machine_id = ma.fields.Integer(required=True)
url = ma.fields.Url(required=True)
machine_url = ma.fields.Url(required=True)
heartbeat_url = ma.fields.Url(required=True)
class ServerHeartbeatPutResponseSchema(ma.Schema):
last_heartbeat = ma.fields.DateTime(metadata=dict(description="Timestamp of the previous heartbeat"))
this_heartbeat = ma.fields.DateTime(metadata=dict(description="Timestamp of this heartbeat"))
next_heartbeat = ma.fields.DateTime(metadata=dict(description="Timestamp when the next heartbeat is expected"))
next_heartbeat_seconds = ma.fields.Integer(metadata=dict(description="Number of seconds until the next heartbeat is expected"))
heartbeat_timeout = ma.fields.DateTime(
metadata=dict(description="Timestamp when the server times out if no heartbeat is received"))
heartbeat_timeout_seconds = ma.fields.Integer(
metadata=dict(description="Number of seconds until the server times out if no heartbeat is received"))
@bp.route('', endpoint='list')
class ServersAPI(MethodView):
@requires_roles("service")
@bp.arguments(ServersGetArgsSchema, location='query')
def get(self, args):
"""
Get a list of the last 100 battle servers that have been registered in
the system.
"""
num_rows = args.get("rows") or 100
query = g.db.query(Server)
if args.get("machine_id"):
query = query.filter(Server.machine_id == args.get("machine_id"))
query = query.order_by(-Server.server_id)
query = query.limit(num_rows)
rows = query.all()
ret = []
for row in rows:
record = row.as_dict()
record["url"] = url_for("servers.entry", server_id=row.server_id, _external=True)
ret.append(record)
return jsonify(ret)
@requires_roles("service")
@bp.arguments(ServersPostRequestSchema)
@bp.response(http_client.CREATED, ServersPostResponseSchema)
def post(self, args):
"""
The daemon process (and server, for local development) post here
to register the server instance with the backend. You need to
register the server before you can register a battle.
"""
machine_id = args.get("machine_id")
log.info("registering a server on machine_id %s, realm %s and public_ip %s",
machine_id, args.get("realm"), args.get("public_ip"))
# If we don't already have a machine we make one just in time now on the realm "Local".
# This is to support local devs where an external daemon is not running and the server iself
# does this registration without a prior registration on the machines endpoint
if not machine_id:
realm = "local"
instance_name = args.get("instance_name")
placement = args.get("placement") or "<unknown placement>"
if not instance_name:
abort(http_client.BAD_REQUEST, description="You need to supply an instance_name")
machine = g.db.query(Machine).filter(Machine.realm == realm,
Machine.instance_name == instance_name,
Machine.placement == placement).first()
if machine:
machine_id = machine.machine_id
log.info("machine_id %s found for server", machine_id)
else:
machine = Machine(realm=realm, instance_name=instance_name,
placement=placement, server_count=0)
g.db.add(machine)
g.db.flush()
machine_id = machine.machine_id
log.info("Created machine_id %s for server instance \"%s\"",
machine_id, instance_name)
else:
machine = g.db.query(Machine).get(machine_id)
if not machine:
abort(http_client.NOT_FOUND, description="Machine %s was not found" % machine_id)
token = str(uuid.uuid4()).replace("-", "")[:20]
def get_or_null(ip):
return ip and str(ip) or None
server = Server(machine_id=machine_id,
version=args.get("version"),
public_ip=get_or_null(args.get("public_ip")),
port=args.get("port"),
command_line=args.get("command_line"),
command_line_custom=args.get("command_line_custom"),
pid=args.get("pid"),
status=args.get("status"),
image_name=args.get("image_name"),
branch=args.get("branch"),
commit_id=args.get("commit_id"),
process_info=args.get("process_info"),
details=args.get("details"),
repository=args.get("repository"),
ref=args.get("ref"),
build=args.get("build"),
build_number=args.get("build_number"),
target_platform=args.get("target_platform"),
build_info=args.get("build_info"),
token=token
)
g.db.add(server)
machine.server_count += 1
machine.server_date = utcnow()
g.db.commit()
server_id = server.server_id
resource_url = url_for("servers.entry", server_id=server_id, _external=True)
machine_url = url_for("machines.entry", machine_id=machine_id, _external=True)
heartbeat_url = url_for("servers.heartbeat", server_id=server_id, _external=True)
commands_url = url_for("servers.commands", server_id=server_id, _external=True)
response_header = {
"Location": resource_url,
}
log.info("Server %s has been registered on machine_id %s", server_id, machine_id)
heartbeat_period, heartbeat_timeout = get_server_heartbeat_config()
return {"server_id": server_id,
"url": resource_url,
"machine_id": machine_id,
"machine_url": machine_url,
"heartbeat_url": heartbeat_url,
"commands_url": commands_url,
"token": token,
"next_heartbeat_seconds": heartbeat_period,
"heartbeat_timeout": utcnow() + datetime.timedelta(seconds=heartbeat_timeout),
}, None, response_header
@bp.route('/<int:server_id>', endpoint='entry')
class ServerAPI(MethodView):
"""
Interface to battle servers instances. A battle server instance is
a single run of a battle server executable. The battle server will
have a single battle on it. You should never have a battle resource
without an associated battle server resource.
"""
@requires_roles("service")
def get(self, server_id):
"""
Get information about a single battle server instance.
Returns information from the machine and the associated
battle if found.
"""
server = g.db.query(Server).get(server_id)
if not server:
log.warning("Requested a non-existant battle server: %s", server_id)
abort(http_client.NOT_FOUND, description="Server not found")
machine_id = server.machine_id
record = server.as_dict()
record["url"] = url_for("servers.entry", server_id=server_id, _external=True)
record["heartbeat_url"] = url_for("servers.heartbeat", server_id=server_id, _external=True)
record["commands_url"] = url_for("servers.commands", server_id=server_id, _external=True)
record["machine_url"] = None
if machine_id:
machine = g.db.query(Machine).get(machine_id)
if machine:
record["machine_url"] = url_for("machines.entry", machine_id=machine_id,
_external=True)
matches = []
rows = g.db.query(Match).filter(Match.server_id == server_id).all()
for row in rows:
match_id = row.match_id
match = {"match_id": match_id,
"url": url_for("matches.entry", match_id=match_id, _external=True),
"num_players": row.num_players,
}
matches.append(match)
record["matches"] = matches
commands = []
rows = g.db.query(ServerDaemonCommand).filter(ServerDaemonCommand.server_id == server_id,
ServerDaemonCommand.status == "pending").all()
for row in rows:
command = {"command_id": row.command_id,
"command": row.command,
"arguments": row.arguments,
"create_date": row.create_date,
"url": url_for("servers.command", server_id=server_id,
command_id=row.command_id, _external=True)
}
commands.append(command)
record["pending_commands"] = commands
log.debug("Returning info for battle server %s", server_id)
return jsonify(record)
@requires_roles("service")
@bp.arguments(ServerPutRequestSchema)
@bp.response(http_client.OK, ServerPutResponseSchema)
def put(self, args, server_id):
"""
The battle server management (celery) process calls this to update
the status of running a specific battle server task
"""
log.info("Updating battle server %s", server_id)
server = g.db.query(Server).get(server_id)
if not server:
abort(http_client.NOT_FOUND)
if args.get("status"):
log.info("Changing status of battle server %s from '%s' to '%s'",
server_id, server.status, args["status"])
public_ip = args.pop("public_ip", None)
if public_ip:
server.public_ip = str(public_ip)
for arg in args:
setattr(server, arg, args[arg])
g.db.commit()
machine_id = server.machine_id
machine_url = None
if machine_id:
machine_url = url_for("machines.entry", machine_id=machine_id, _external=True)
return {"server_id": server_id,
"url": url_for("servers.entry", server_id=server_id, _external=True),
"machine_id": machine_id,
"machine_url": machine_url,
"heartbeat_url": url_for("servers.heartbeat", server_id=server_id, _external=True),
}
@bp.route('/<int:server_id>/heartbeat', endpoint='heartbeat')
class ServerHeartbeatAPI(MethodView):
"""
Thin heartbeat API
"""
@requires_roles("service")
@bp.response(http_client.OK, ServerHeartbeatPutResponseSchema)
def put(self, server_id):
"""
Battle server heartbeat
"""
log.debug("%s is heart beating battle server %s",
current_user.get("user_name", "unknown"), server_id)
server = g.db.query(Server).get(server_id)
if not server:
abort(http_client.NOT_FOUND, description="Server not found")
heartbeat_period, heartbeat_timeout = get_server_heartbeat_config()
now = utcnow()
last_heartbeat = server.heartbeat_date
if last_heartbeat + datetime.timedelta(seconds=heartbeat_timeout) < now:
msg = "Heartbeat timeout. Last heartbeat was at {} and now we are at {}" \
.format(last_heartbeat, now)
log.info(msg)
abort(http_client.NOT_FOUND, message=msg)
server.heartbeat_count += 1
server.heartbeat_date = now
g.db.commit()
return {
"last_heartbeat": last_heartbeat,
"this_heartbeat": server.heartbeat_date,
"next_heartbeat": server.heartbeat_date + datetime.timedelta(seconds=heartbeat_period),
"next_heartbeat_seconds": heartbeat_period,
"heartbeat_timeout": now + datetime.timedelta(seconds=heartbeat_timeout),
"heartbeat_timeout_seconds": heartbeat_timeout,
}
class ServerCommandsPostSchema(ma.Schema):
command = ma.fields.String(required=True)
arguments = ma.fields.Dict()
details = ma.fields.Dict()
@bp.route('/<int:server_id>/commands', endpoint='commands')
class ServerCommandsAPI(MethodView):
"""
Commands for the battle server daemon
"""
@requires_roles("service")
@bp.arguments(ServerCommandsPostSchema)
def post(self, args, server_id):
"""
Add a new command for the daemon to execute
"""
server = g.db.query(Server).get(server_id)
if not server:
abort(http_client.NOT_FOUND)
status = "pending"
command = ServerDaemonCommand(server_id=server_id,
command=args["command"],
arguments=args.get("arguments"),
details=args.get("details"),
status=status,
)
g.db.add(command)
g.db.commit()
resource_url = url_for("servers.command", server_id=server_id,
command_id=command.command_id, _external=True)
return jsonify({"command_id": command.command_id,
"url": resource_url,
"status": status,
}), http_client.CREATED, None
@requires_roles("service")
def get(self, server_id):
rows = g.db.query(ServerDaemonCommand) \
.filter(ServerDaemonCommand.server_id == server_id) \
.all()
ret = []
for r in rows:
command = r.as_dict()
command["url"] = url_for("servers.command",
server_id=server_id,
command_id=r.command_id,
_external=True)
ret.append(command)
return jsonify(ret)
class ServerCommandPatchSchema(ma.Schema):
status = ma.fields.String(required=True)
details = ma.fields.Dict()
@bp.route('/<int:server_id>/commands/<int:command_id>', endpoint='command')
class ServerCommandAPI(MethodView):
@requires_roles("service")
@bp.arguments(ServerCommandPatchSchema)
def patch(self, args, server_id, command_id):
return self._patch(args, server_id, command_id)
@requires_roles("service")
@bp.arguments(ServerCommandPatchSchema)
def put(self, args, server_id, command_id):
return self._patch(args, server_id, command_id)
def _patch(self, args, server_id, command_id):
"""
Add a new command for the daemon to execute
"""
server = g.db.query(Server).get(server_id)
if not server:
abort(http_client.NOT_FOUND)
row = g.db.query(ServerDaemonCommand).get(command_id)
row.status = args["status"]
row.status_date = utcnow()
if "details" in args:
row.details = args["details"]
g.db.commit()
ret = row.as_dict()
ret["url"] = url_for("servers.command", server_id=server_id, command_id=row.command_id,
_external=True)
return jsonify(ret)
@requires_roles("service")
def get(self, server_id, command_id):
row = g.db.query(ServerDaemonCommand).get(command_id)
ret = row.as_dict()
ret["url"] = url_for("servers.command", server_id=server_id, command_id=row.command_id,
_external=True)
return jsonify(ret)
@endpoints.register
def endpoint_info(*args):
ret = {"servers": url_for("servers.list", _external=True), }
return ret
| 1.84375 | 2 |
pymtl3/passes/sverilog/translation/test/SVTranslator_L2_cases_test.py | hsqforfun/pymtl3 | 1 | 12771023 | <gh_stars>1-10
#=========================================================================
# SVTranslator_L2_cases_test.py
#=========================================================================
"""Test the SystemVerilog translator."""
from pymtl3.datatypes import Bits1, Bits32, Bits96, bitstruct, concat
from pymtl3.dsl import Component, InPort, OutPort, Wire, connect
from pymtl3.passes.rtlir.util.test_utility import do_test
from pymtl3.passes.sverilog.translation.structural.test.SVStructuralTranslatorL1_test import (
check_eq,
)
from pymtl3.passes.sverilog.translation.SVTranslator import SVTranslator
def local_do_test( m ):
m.elaborate()
tr = SVTranslator( m )
tr.translate( m )
check_eq( tr.hierarchy.src, m._ref_src )
#-------------------------------------------------------------------------
# Behavioral
#-------------------------------------------------------------------------
def test_if( do_test ):
class A( Component ):
def construct( s ):
s.in_1 = InPort( Bits32 )
s.in_2 = InPort( Bits32 )
s.out = OutPort( Bits32 )
@s.update
def upblk():
if Bits1(1):
s.out = s.in_1
else:
s.out = s.in_2
a = A()
a._ref_src = \
"""
module A
(
input logic [0:0] clk,
input logic [31:0] in_1,
input logic [31:0] in_2,
output logic [31:0] out,
input logic [0:0] reset
);
// PYMTL SOURCE:
//
// @s.update
// def upblk():
// if Bits1(1):
// s.out = s.in_1
// else:
// s.out = s.in_2
always_comb begin : upblk
if ( 1'd1 ) begin
out = in_1;
end
else
out = in_2;
end
endmodule
"""
a._ref_src_yosys = a._ref_src
do_test( a )
def test_if_dangling_else_inner( do_test ):
class A( Component ):
def construct( s ):
s.in_1 = InPort( Bits32 )
s.in_2 = InPort( Bits32 )
s.out = OutPort( Bits32 )
@s.update
def upblk():
if Bits1(1):
if Bits1(0):
s.out = s.in_1
else:
s.out = s.in_2
a = A()
a._ref_src = \
"""
module A
(
input logic [0:0] clk,
input logic [31:0] in_1,
input logic [31:0] in_2,
output logic [31:0] out,
input logic [0:0] reset
);
// PYMTL SOURCE:
//
// @s.update
// def upblk():
// if Bits1(1):
// if Bits1(0):
// s.out = s.in_1
// else:
// s.out = s.in_2
always_comb begin : upblk
if ( 1'd1 ) begin
if ( 1'd0 ) begin
out = in_1;
end
else
out = in_2;
end
end
endmodule
"""
a._ref_src_yosys = a._ref_src
do_test( a )
def test_if_dangling_else_outter( do_test ):
class A( Component ):
def construct( s ):
s.in_1 = InPort( Bits32 )
s.in_2 = InPort( Bits32 )
s.out = OutPort( Bits32 )
@s.update
def upblk():
if Bits1(1):
if Bits1(0):
s.out = s.in_1
else:
s.out = s.in_2
a = A()
a._ref_src = \
"""
module A
(
input logic [0:0] clk,
input logic [31:0] in_1,
input logic [31:0] in_2,
output logic [31:0] out,
input logic [0:0] reset
);
// PYMTL SOURCE:
//
// @s.update
// def upblk():
// if Bits1(1):
// if Bits1(0):
// s.out = s.in_1
// else:
// s.out = s.in_2
always_comb begin : upblk
if ( 1'd1 ) begin
if ( 1'd0 ) begin
out = in_1;
end
end
else
out = in_2;
end
endmodule
"""
a._ref_src_yosys = a._ref_src
do_test( a )
def test_if_branches( do_test ):
class A( Component ):
def construct( s ):
s.in_1 = InPort( Bits32 )
s.in_2 = InPort( Bits32 )
s.in_3 = InPort( Bits32 )
s.out = OutPort( Bits32 )
@s.update
def upblk():
if Bits1(1):
s.out = s.in_1
elif Bits1(0):
s.out = s.in_2
else:
s.out = s.in_3
a = A()
a._ref_src = \
"""
module A
(
input logic [0:0] clk,
input logic [31:0] in_1,
input logic [31:0] in_2,
input logic [31:0] in_3,
output logic [31:0] out,
input logic [0:0] reset
);
// PYMTL SOURCE:
//
// @s.update
// def upblk():
// if Bits1(1):
// s.out = s.in_1
// elif Bits1(0):
// s.out = s.in_2
// else:
// s.out = s.in_3
always_comb begin : upblk
if ( 1'd1 ) begin
out = in_1;
end
else if ( 1'd0 ) begin
out = in_2;
end
else
out = in_3;
end
endmodule
"""
a._ref_src_yosys = a._ref_src
do_test( a )
def test_nested_if( do_test ):
class A( Component ):
def construct( s ):
s.in_1 = InPort( Bits32 )
s.in_2 = InPort( Bits32 )
s.in_3 = InPort( Bits32 )
s.out = OutPort( Bits32 )
@s.update
def upblk():
if Bits1(1):
if Bits1(0):
s.out = s.in_1
else:
s.out = s.in_2
elif Bits1(0):
if Bits1(1):
s.out = s.in_2
else:
s.out = s.in_3
else:
if Bits1(1):
s.out = s.in_3
else:
s.out = s.in_1
a = A()
a._ref_src = \
"""
module A
(
input logic [0:0] clk,
input logic [31:0] in_1,
input logic [31:0] in_2,
input logic [31:0] in_3,
output logic [31:0] out,
input logic [0:0] reset
);
// PYMTL SOURCE:
//
// @s.update
// def upblk():
// if Bits1(1):
// if Bits1(0):
// s.out = s.in_1
// else:
// s.out = s.in_2
// elif Bits1(0):
// if Bits1(1):
// s.out = s.in_2
// else:
// s.out = s.in_3
// else:
// if Bits1(1):
// s.out = s.in_3
// else:
// s.out = s.in_1
always_comb begin : upblk
if ( 1'd1 ) begin
if ( 1'd0 ) begin
out = in_1;
end
else
out = in_2;
end
else if ( 1'd0 ) begin
if ( 1'd1 ) begin
out = in_2;
end
else
out = in_3;
end
else if ( 1'd1 ) begin
out = in_3;
end
else
out = in_1;
end
endmodule
"""
a._ref_src_yosys = a._ref_src
do_test( a )
def test_for_range_upper( do_test ):
class A( Component ):
def construct( s ):
s.in_ = [ InPort( Bits32 ) for _ in range(2) ]
s.out = [ OutPort( Bits32 ) for _ in range(2) ]
@s.update
def upblk():
for i in range(2):
s.out[i] = s.in_[i]
a = A()
a._ref_src = \
"""
module A
(
input logic [0:0] clk,
input logic [31:0] in_ [0:1],
output logic [31:0] out [0:1],
input logic [0:0] reset
);
// PYMTL SOURCE:
//
// @s.update
// def upblk():
// for i in range(2):
// s.out[i] = s.in_[i]
always_comb begin : upblk
for ( int i = 0; i < 2; i += 1 )
out[i] = in_[i];
end
endmodule
"""
a._ref_src_yosys = \
"""
module A
(
input logic [0:0] clk,
input logic [31:0] in___0,
input logic [31:0] in___1,
output logic [31:0] out__0,
output logic [31:0] out__1,
input logic [0:0] reset
);
logic [31:0] in_ [0:1];
logic [31:0] out [0:1];
// PYMTL SOURCE:
//
// @s.update
// def upblk():
// for i in range(2):
// s.out[i] = s.in_[i]
integer __loopvar__upblk_i;
always_comb begin : upblk
for ( __loopvar__upblk_i = 0; __loopvar__upblk_i < 2; __loopvar__upblk_i = __loopvar__upblk_i + 1 )
out[__loopvar__upblk_i] = in_[__loopvar__upblk_i];
end
assign in_[0] = in___0;
assign in_[1] = in___1;
assign out__0 = out[0];
assign out__1 = out[1];
endmodule
"""
do_test( a )
def test_for_range_lower_upper( do_test ):
class A( Component ):
def construct( s ):
s.in_ = [ InPort( Bits32 ) for _ in range(2) ]
s.out = [ OutPort( Bits32 ) for _ in range(2) ]
@s.update
def upblk():
for i in range(1, 2):
s.out[i] = s.in_[i]
s.out[0] = s.in_[0]
a = A()
a._ref_src = \
"""
module A
(
input logic [0:0] clk,
input logic [31:0] in_ [0:1],
output logic [31:0] out [0:1],
input logic [0:0] reset
);
// PYMTL SOURCE:
//
// @s.update
// def upblk():
// for i in range(1, 2):
// s.out[i] = s.in_[i]
// s.out[0] = s.in_[0]
always_comb begin : upblk
for ( int i = 1; i < 2; i += 1 )
out[i] = in_[i];
out[0] = in_[0];
end
endmodule
"""
a._ref_src_yosys = \
"""
module A
(
input logic [0:0] clk,
input logic [31:0] in___0,
input logic [31:0] in___1,
output logic [31:0] out__0,
output logic [31:0] out__1,
input logic [0:0] reset
);
logic [31:0] in_ [0:1];
logic [31:0] out [0:1];
// PYMTL SOURCE:
//
// @s.update
// def upblk():
// for i in range(1, 2):
// s.out[i] = s.in_[i]
// s.out[0] = s.in_[0]
integer __loopvar__upblk_i;
always_comb begin : upblk
for ( __loopvar__upblk_i = 1; __loopvar__upblk_i < 2; __loopvar__upblk_i = __loopvar__upblk_i + 1 )
out[__loopvar__upblk_i] = in_[__loopvar__upblk_i];
out[0] = in_[0];
end
assign in_[0] = in___0;
assign in_[1] = in___1;
assign out__0 = out[0];
assign out__1 = out[1];
endmodule
"""
do_test( a )
def test_for_range_lower_upper_step( do_test ):
class A( Component ):
def construct( s ):
s.in_ = [ InPort( Bits32 ) for _ in range(5) ]
s.out = [ OutPort( Bits32 ) for _ in range(5) ]
@s.update
def upblk():
for i in range(0, 5, 2):
s.out[i] = s.in_[i]
for i in range(1, 5, 2):
s.out[i] = s.in_[i]
a = A()
a._ref_src = \
"""
module A
(
input logic [0:0] clk,
input logic [31:0] in_ [0:4],
output logic [31:0] out [0:4],
input logic [0:0] reset
);
// PYMTL SOURCE:
//
// @s.update
// def upblk():
// for i in range(0, 5, 2):
// s.out[i] = s.in_[i]
// for i in range(1, 5, 2):
// s.out[i] = s.in_[i]
always_comb begin : upblk
for ( int i = 0; i < 5; i += 2 )
out[i] = in_[i];
for ( int i = 1; i < 5; i += 2 )
out[i] = in_[i];
end
endmodule
"""
a._ref_src_yosys = \
"""
module A
(
input logic [0:0] clk,
input logic [31:0] in___0,
input logic [31:0] in___1,
input logic [31:0] in___2,
input logic [31:0] in___3,
input logic [31:0] in___4,
output logic [31:0] out__0,
output logic [31:0] out__1,
output logic [31:0] out__2,
output logic [31:0] out__3,
output logic [31:0] out__4,
input logic [0:0] reset
);
logic [31:0] in_ [0:4];
logic [31:0] out [0:4];
// PYMTL SOURCE:
//
// @s.update
// def upblk():
// for i in range(0, 5, 2):
// s.out[i] = s.in_[i]
// for i in range(1, 5, 2):
// s.out[i] = s.in_[i]
integer __loopvar__upblk_i;
always_comb begin : upblk
for ( __loopvar__upblk_i = 0; __loopvar__upblk_i < 5; __loopvar__upblk_i = __loopvar__upblk_i + 2 )
out[__loopvar__upblk_i] = in_[__loopvar__upblk_i];
for ( __loopvar__upblk_i = 1; __loopvar__upblk_i < 5; __loopvar__upblk_i = __loopvar__upblk_i + 2 )
out[__loopvar__upblk_i] = in_[__loopvar__upblk_i];
end
assign in_[0] = in___0;
assign in_[1] = in___1;
assign in_[2] = in___2;
assign in_[3] = in___3;
assign in_[4] = in___4;
assign out__0 = out[0];
assign out__1 = out[1];
assign out__2 = out[2];
assign out__3 = out[3];
assign out__4 = out[4];
endmodule
"""
do_test( a )
def test_if_exp_for( do_test ):
class A( Component ):
def construct( s ):
s.in_ = [ InPort( Bits32 ) for _ in range(5) ]
s.out = [ OutPort( Bits32 ) for _ in range(5) ]
@s.update
def upblk():
for i in range(5):
s.out[i] = s.in_[i] if i == 1 else s.in_[0]
a = A()
a._ref_src = \
"""
module A
(
input logic [0:0] clk,
input logic [31:0] in_ [0:4],
output logic [31:0] out [0:4],
input logic [0:0] reset
);
// PYMTL SOURCE:
//
// @s.update
// def upblk():
// for i in range(5):
// s.out[i] = s.in_[i] if i == 1 else s.in_[0]
always_comb begin : upblk
for ( int i = 0; i < 5; i += 1 )
out[i] = ( i == 1 ) ? in_[i] : in_[0];
end
endmodule
"""
a._ref_src_yosys = \
"""
module A
(
input logic [0:0] clk,
input logic [31:0] in___0,
input logic [31:0] in___1,
input logic [31:0] in___2,
input logic [31:0] in___3,
input logic [31:0] in___4,
output logic [31:0] out__0,
output logic [31:0] out__1,
output logic [31:0] out__2,
output logic [31:0] out__3,
output logic [31:0] out__4,
input logic [0:0] reset
);
logic [31:0] in_ [0:4];
logic [31:0] out [0:4];
// PYMTL SOURCE:
//
// @s.update
// def upblk():
// for i in range(5):
// s.out[i] = s.in_[i] if i == 1 else s.in_[0]
integer __loopvar__upblk_i;
always_comb begin : upblk
for ( __loopvar__upblk_i = 0; __loopvar__upblk_i < 5; __loopvar__upblk_i = __loopvar__upblk_i + 1 )
out[__loopvar__upblk_i] = ( __loopvar__upblk_i == 1 ) ? in_[__loopvar__upblk_i] : in_[0];
end
assign in_[0] = in___0;
assign in_[1] = in___1;
assign in_[2] = in___2;
assign in_[3] = in___3;
assign in_[4] = in___4;
assign out__0 = out[0];
assign out__1 = out[1];
assign out__2 = out[2];
assign out__3 = out[3];
assign out__4 = out[4];
endmodule
"""
do_test( a )
def test_if_exp_unary_op( do_test ):
class A( Component ):
def construct( s ):
s.in_ = [ InPort( Bits32 ) for _ in range(5) ]
s.out = [ OutPort( Bits32 ) for _ in range(5) ]
@s.update
def upblk():
for i in range(5):
s.out[i] = (~s.in_[i]) if i == 1 else s.in_[0]
a = A()
a._ref_src = \
"""
module A
(
input logic [0:0] clk,
input logic [31:0] in_ [0:4],
output logic [31:0] out [0:4],
input logic [0:0] reset
);
// PYMTL SOURCE:
//
// @s.update
// def upblk():
// for i in range(5):
// s.out[i] = (~s.in_[i]) if i == 1 else s.in_[0]
always_comb begin : upblk
for ( int i = 0; i < 5; i += 1 )
out[i] = ( i == 1 ) ? ~in_[i] : in_[0];
end
endmodule
"""
a._ref_src_yosys = \
"""
module A
(
input logic [0:0] clk,
input logic [31:0] in___0,
input logic [31:0] in___1,
input logic [31:0] in___2,
input logic [31:0] in___3,
input logic [31:0] in___4,
output logic [31:0] out__0,
output logic [31:0] out__1,
output logic [31:0] out__2,
output logic [31:0] out__3,
output logic [31:0] out__4,
input logic [0:0] reset
);
logic [31:0] in_ [0:4];
logic [31:0] out [0:4];
// PYMTL SOURCE:
//
// @s.update
// def upblk():
// for i in range(5):
// s.out[i] = (~s.in_[i]) if i == 1 else s.in_[0]
integer __loopvar__upblk_i;
always_comb begin : upblk
for ( __loopvar__upblk_i = 0; __loopvar__upblk_i < 5; __loopvar__upblk_i = __loopvar__upblk_i + 1 )
out[__loopvar__upblk_i] = ( __loopvar__upblk_i == 1 ) ? ~in_[__loopvar__upblk_i] : in_[0];
end
assign in_[0] = in___0;
assign in_[1] = in___1;
assign in_[2] = in___2;
assign in_[3] = in___3;
assign in_[4] = in___4;
assign out__0 = out[0];
assign out__1 = out[1];
assign out__2 = out[2];
assign out__3 = out[3];
assign out__4 = out[4];
endmodule
"""
do_test( a )
def test_if_bool_op( do_test ):
class A( Component ):
def construct( s ):
s.in_ = [ InPort( Bits32 ) for _ in range(5) ]
s.out = [ OutPort( Bits32 ) for _ in range(5) ]
@s.update
def upblk():
for i in range(5):
if s.in_[i] and (s.in_[i+1] if i<5 else s.in_[4]):
s.out[i] = s.in_[i]
else:
s.out[i] = Bits32(0)
a = A()
a._ref_src = \
"""
module A
(
input logic [0:0] clk,
input logic [31:0] in_ [0:4],
output logic [31:0] out [0:4],
input logic [0:0] reset
);
// PYMTL SOURCE:
//
// @s.update
// def upblk():
// for i in range(5):
// if s.in_[i] and (s.in_[i+1] if i<5 else s.in_[4]):
// s.out[i] = s.in_[i]
// else:
// s.out[i] = Bits32(0)
always_comb begin : upblk
for ( int i = 0; i < 5; i += 1 )
if ( in_[i] && ( ( i < 5 ) ? in_[i + 1] : in_[4] ) ) begin
out[i] = in_[i];
end
else
out[i] = 32'd0;
end
endmodule
"""
a._ref_src_yosys = \
"""
module A
(
input logic [0:0] clk,
input logic [31:0] in___0,
input logic [31:0] in___1,
input logic [31:0] in___2,
input logic [31:0] in___3,
input logic [31:0] in___4,
output logic [31:0] out__0,
output logic [31:0] out__1,
output logic [31:0] out__2,
output logic [31:0] out__3,
output logic [31:0] out__4,
input logic [0:0] reset
);
logic [31:0] in_ [0:4];
logic [31:0] out [0:4];
// PYMTL SOURCE:
//
// @s.update
// def upblk():
// for i in range(5):
// if s.in_[i] and (s.in_[i+1] if i<5 else s.in_[4]):
// s.out[i] = s.in_[i]
// else:
// s.out[i] = Bits32(0)
integer __loopvar__upblk_i;
always_comb begin : upblk
for ( __loopvar__upblk_i = 0; __loopvar__upblk_i < 5; __loopvar__upblk_i = __loopvar__upblk_i + 1 )
if ( in_[__loopvar__upblk_i] && ( ( __loopvar__upblk_i < 5 ) ? in_[__loopvar__upblk_i + 1] : in_[4] ) ) begin
out[__loopvar__upblk_i] = in_[__loopvar__upblk_i];
end
else
out[__loopvar__upblk_i] = 32'd0;
end
assign in_[0] = in___0;
assign in_[1] = in___1;
assign in_[2] = in___2;
assign in_[3] = in___3;
assign in_[4] = in___4;
assign out__0 = out[0];
assign out__1 = out[1];
assign out__2 = out[2];
assign out__3 = out[3];
assign out__4 = out[4];
endmodule
"""
do_test( a )
def test_tmpvar( do_test ):
class A( Component ):
def construct( s ):
s.in_ = [ InPort( Bits32 ) for _ in range(5) ]
s.out = [ OutPort( Bits32 ) for _ in range(5) ]
@s.update
def upblk():
for i in range(5):
if s.in_[i] and (s.in_[i+1] if i<5 else s.in_[4]):
tmpvar = s.in_[i]
else:
tmpvar = Bits32(0)
s.out[i] = tmpvar
a = A()
a._ref_src = \
"""
module A
(
input logic [0:0] clk,
input logic [31:0] in_ [0:4],
output logic [31:0] out [0:4],
input logic [0:0] reset
);
logic [31:0] __tmpvar__upblk_tmpvar;
// PYMTL SOURCE:
//
// @s.update
// def upblk():
// for i in range(5):
// if s.in_[i] and (s.in_[i+1] if i<5 else s.in_[4]):
// tmpvar = s.in_[i]
// else:
// tmpvar = Bits32(0)
// s.out[i] = tmpvar
always_comb begin : upblk
for ( int i = 0; i < 5; i += 1 ) begin
if ( in_[i] && ( ( i < 5 ) ? in_[i + 1] : in_[4] ) ) begin
__tmpvar__upblk_tmpvar = in_[i];
end
else
__tmpvar__upblk_tmpvar = 32'd0;
out[i] = __tmpvar__upblk_tmpvar;
end
end
endmodule
"""
a._ref_src_yosys = \
"""
module A
(
input logic [0:0] clk,
input logic [31:0] in___0,
input logic [31:0] in___1,
input logic [31:0] in___2,
input logic [31:0] in___3,
input logic [31:0] in___4,
output logic [31:0] out__0,
output logic [31:0] out__1,
output logic [31:0] out__2,
output logic [31:0] out__3,
output logic [31:0] out__4,
input logic [0:0] reset
);
logic [31:0] in_ [0:4];
logic [31:0] out [0:4];
logic [31:0] __tmpvar__upblk_tmpvar;
// PYMTL SOURCE:
//
// @s.update
// def upblk():
// for i in range(5):
// if s.in_[i] and (s.in_[i+1] if i<5 else s.in_[4]):
// tmpvar = s.in_[i]
// else:
// tmpvar = Bits32(0)
// s.out[i] = tmpvar
integer __loopvar__upblk_i;
always_comb begin : upblk
for ( __loopvar__upblk_i = 0; __loopvar__upblk_i < 5; __loopvar__upblk_i = __loopvar__upblk_i + 1 ) begin
if ( in_[__loopvar__upblk_i] && ( ( __loopvar__upblk_i < 5 ) ? in_[__loopvar__upblk_i + 1] : in_[4] ) ) begin
__tmpvar__upblk_tmpvar = in_[__loopvar__upblk_i];
end
else
__tmpvar__upblk_tmpvar = 32'd0;
out[__loopvar__upblk_i] = __tmpvar__upblk_tmpvar;
end
end
assign in_[0] = in___0;
assign in_[1] = in___1;
assign in_[2] = in___2;
assign in_[3] = in___3;
assign in_[4] = in___4;
assign out__0 = out[0];
assign out__1 = out[1];
assign out__2 = out[2];
assign out__3 = out[3];
assign out__4 = out[4];
endmodule
"""
do_test( a )
def test_struct( do_test ):
@bitstruct
class B:
foo: Bits32
class A( Component ):
def construct( s ):
s.in_ = InPort( B )
s.out = OutPort( Bits32 )
@s.update
def upblk():
s.out = s.in_.foo
a = A()
a._ref_src = \
"""
typedef struct packed {
logic [31:0] foo;
} B;
module A
(
input logic [0:0] clk,
input B in_,
output logic [31:0] out,
input logic [0:0] reset
);
// PYMTL SOURCE:
//
// @s.update
// def upblk():
// s.out = s.in_.foo
always_comb begin : upblk
out = in_.foo;
end
endmodule
"""
a._ref_src_yosys = \
"""
module A
(
input logic [0:0] clk,
input logic [31:0] in___foo,
output logic [31:0] out,
input logic [0:0] reset
);
logic [31:0] in_;
// PYMTL SOURCE:
//
// @s.update
// def upblk():
// s.out = s.in_.foo
always_comb begin : upblk
out = in___foo;
end
assign in_[31:0] = in___foo;
endmodule
"""
do_test( a )
def test_packed_array_concat( do_test ):
@bitstruct
class B:
bar: [ Bits32 ] * 2
foo: Bits32
class A( Component ):
def construct( s ):
s.in_ = InPort( B )
s.out = OutPort( Bits96 )
@s.update
def upblk():
s.out = concat( s.in_.bar[0], s.in_.bar[1], s.in_.foo )
a = A()
a._ref_src = \
"""
typedef struct packed {
logic [1:0][31:0] bar;
logic [31:0] foo;
} B;
module A
(
input logic [0:0] clk,
input B in_,
output logic [95:0] out,
input logic [0:0] reset
);
// PYMTL SOURCE:
//
// @s.update
// def upblk():
// s.out = concat( s.in_.bar[0], s.in_.bar[1], s.in_.foo )
always_comb begin : upblk
out = { in_.bar[0], in_.bar[1], in_.foo };
end
endmodule
"""
a._ref_src_yosys = \
"""
module A
(
input logic [0:0] clk,
input logic [31:0] in___bar__0,
input logic [31:0] in___bar__1,
input logic [31:0] in___foo,
output logic [95:0] out,
input logic [0:0] reset
);
logic [31:0] in___bar [0:1];
logic [95:0] in_;
// PYMTL SOURCE:
//
// @s.update
// def upblk():
// s.out = concat( s.in_.bar[0], s.in_.bar[1], s.in_.foo )
always_comb begin : upblk
out = { in___bar[0], in___bar[1], in___foo };
end
assign in___bar[0] = in___bar__0;
assign in___bar[1] = in___bar__1;
assign in_[95:64] = in___bar__1;
assign in_[63:32] = in___bar__0;
assign in_[31:0] = in___foo;
endmodule
"""
do_test( a )
def test_nested_struct( do_test ):
@bitstruct
class C:
woof: Bits32
@bitstruct
class B:
bar: [ Bits32 ]*2
c: C
foo: Bits32
class A( Component ):
def construct( s ):
s.in_ = InPort( B )
s.out = OutPort( Bits96 )
@s.update
def upblk():
s.out = concat( s.in_.bar[0], s.in_.c.woof, s.in_.foo )
a = A()
a._ref_src = \
"""
typedef struct packed {
logic [31:0] woof;
} C;
typedef struct packed {
logic [1:0][31:0] bar;
C c;
logic [31:0] foo;
} B;
module A
(
input logic [0:0] clk,
input B in_,
output logic [95:0] out,
input logic [0:0] reset
);
// PYMTL SOURCE:
//
// @s.update
// def upblk():
// s.out = concat( s.in_.bar[0], s.in_.c.woof, s.in_.foo )
always_comb begin : upblk
out = { in_.bar[0], in_.c.woof, in_.foo };
end
endmodule
"""
a._ref_src_yosys = \
"""
module A
(
input logic [0:0] clk,
input logic [31:0] in___bar__0,
input logic [31:0] in___bar__1,
input logic [31:0] in___c__woof,
input logic [31:0] in___foo,
output logic [95:0] out,
input logic [0:0] reset
);
logic [31:0] in___bar [0:1];
logic [31:0] in___c;
logic [127:0] in_;
// PYMTL SOURCE:
//
// @s.update
// def upblk():
// s.out = concat( s.in_.bar[0], s.in_.c.woof, s.in_.foo )
always_comb begin : upblk
out = { in___bar[0], in___c__woof, in___foo };
end
assign in___bar[0] = in___bar__0;
assign in___bar[1] = in___bar__1;
assign in___c[31:0] = in___c__woof;
assign in_[127:96] = in___bar__1;
assign in_[95:64] = in___bar__0;
assign in_[63:32] = in___c__woof;
assign in_[31:0] = in___foo;
endmodule
"""
do_test( a )
def test_lambda_connect( do_test ):
class A( Component ):
def construct( s ):
s.in_ = InPort( Bits32 )
s.out = OutPort( Bits32 )
s.out //= lambda: s.in_ + Bits32(42)
a = A()
a._ref_src = \
"""
module A
(
input logic [0:0] clk,
input logic [31:0] in_,
output logic [31:0] out,
input logic [0:0] reset
);
// PYMTL SOURCE:
//
// This upblk was generated from a lambda function defined in file \
// .../pymtl3/passes/sverilog/translation/test/SVTranslator_L2_cases_test.py, line 1157:
// s.out //= lambda: s.in_ + Bits32(42)
// def _lambda__s_out(): s.out = s.in_ + Bits32(42)
always_comb begin : _lambda__s_out
out = in_ + 32'd42;
end
endmodule
"""
a._ref_src_yosys = a._ref_src
do_test( a )
#-------------------------------------------------------------------------
# Structural
#-------------------------------------------------------------------------
def test_struct_port( do_test ):
@bitstruct
class B:
foo: Bits32
class A( Component ):
def construct( s ):
s.in_ = InPort( B )
s.out = OutPort( Bits32 )
connect( s.out, s.in_.foo )
a = A()
a._ref_src = \
"""
typedef struct packed {
logic [31:0] foo;
} B;
module A
(
input logic [0:0] clk,
input B in_,
output logic [31:0] out,
input logic [0:0] reset
);
assign out = in_.foo;
endmodule
"""
a._ref_src_yosys = \
"""
module A
(
input logic [0:0] clk,
input logic [31:0] in___foo,
output logic [31:0] out,
input logic [0:0] reset
);
logic [31:0] in_;
assign in_[31:0] = in___foo;
assign out = in___foo;
endmodule
"""
do_test( a )
def test_nested_struct_port( do_test ):
@bitstruct
class C:
bar: Bits32
@bitstruct
class B:
foo: Bits32
c: C
class A( Component ):
def construct( s ):
s.in_ = InPort( B )
s.out_foo = OutPort( Bits32 )
s.out_bar = OutPort( Bits32 )
connect( s.out_foo, s.in_.foo )
connect( s.out_bar, s.in_.c.bar )
a = A()
a._ref_src = \
"""
typedef struct packed {
logic [31:0] bar;
} C;
typedef struct packed {
logic [31:0] foo;
C c;
} B;
module A
(
input logic [0:0] clk,
input B in_,
output logic [31:0] out_bar,
output logic [31:0] out_foo,
input logic [0:0] reset
);
assign out_foo = in_.foo;
assign out_bar = in_.c.bar;
endmodule
"""
a._ref_src_yosys = \
"""
module A
(
input logic [0:0] clk,
input logic [31:0] in___foo,
input logic [31:0] in___c__bar,
output logic [31:0] out_bar,
output logic [31:0] out_foo,
input logic [0:0] reset
);
logic [31:0] in___c;
logic [63:0] in_;
assign in___c[31:0] = in___c__bar;
assign in_[63:32] = in___foo;
assign in_[31:0] = in___c__bar;
assign out_foo = in___foo;
assign out_bar = in___c__bar;
endmodule
"""
do_test( a )
def test_packed_array( do_test ):
@bitstruct
class B:
foo: [ Bits32 ] * 2
class A( Component ):
def construct( s ):
s.in_ = InPort( B )
s.out = [ OutPort( Bits32 ) for _ in range(2) ]
connect( s.out[0], s.in_.foo[0] )
connect( s.out[1], s.in_.foo[1] )
a = A()
a._ref_src = \
"""
typedef struct packed {
logic [1:0][31:0] foo;
} B;
module A
(
input logic [0:0] clk,
input B in_,
output logic [31:0] out [0:1],
input logic [0:0] reset
);
assign out[0] = in_.foo[0];
assign out[1] = in_.foo[1];
endmodule
"""
a._ref_src_yosys = \
"""
module A
(
input logic [0:0] clk,
input logic [31:0] in___foo__0,
input logic [31:0] in___foo__1,
output logic [31:0] out__0,
output logic [31:0] out__1,
input logic [0:0] reset
);
logic [31:0] in___foo [0:1];
logic [63:0] in_;
logic [31:0] out [0:1];
assign in___foo[0] = in___foo__0;
assign in___foo[1] = in___foo__1;
assign in_[63:32] = in___foo__1;
assign in_[31:0] = in___foo__0;
assign out__0 = out[0];
assign out__1 = out[1];
assign out[0] = in___foo[0];
assign out[1] = in___foo[1];
endmodule
"""
do_test( a )
def test_struct_packed_array( do_test ):
@bitstruct
class C:
bar: Bits32
@bitstruct
class B:
c: [ C ] * 2
class A( Component ):
def construct( s ):
s.in_ = InPort( B )
s.out = [ OutPort( Bits32 ) for _ in range(2) ]
connect( s.out[0], s.in_.c[0].bar )
connect( s.out[1], s.in_.c[1].bar )
a = A()
a._ref_src = \
"""
typedef struct packed {
logic [31:0] bar;
} C;
typedef struct packed {
C [1:0] c;
} B;
module A
(
input logic [0:0] clk,
input B in_,
output logic [31:0] out [0:1],
input logic [0:0] reset
);
assign out[0] = in_.c[0].bar;
assign out[1] = in_.c[1].bar;
endmodule
"""
a._ref_src_yosys = \
"""
module A
(
input logic [0:0] clk,
input logic [31:0] in___c__0__bar,
input logic [31:0] in___c__1__bar,
output logic [31:0] out__0,
output logic [31:0] out__1,
input logic [0:0] reset
);
logic [31:0] in___c__bar [0:1];
logic [31:0] in___c [0:1];
logic [63:0] in_;
logic [31:0] out [0:1];
assign in___c__bar[0] = in___c__0__bar;
assign in___c[0][31:0] = in___c__0__bar;
assign in___c__bar[1] = in___c__1__bar;
assign in___c[1][31:0] = in___c__1__bar;
assign in_[63:32] = in___c__1__bar;
assign in_[31:0] = in___c__0__bar;
assign out__0 = out[0];
assign out__1 = out[1];
assign out[0] = in___c__bar[0];
assign out[1] = in___c__bar[1];
endmodule
"""
do_test( a )
def test_long_component_name( do_test ):
@bitstruct
class ThisIsABitStructWithSuperLongName:
bar: Bits32
class A( Component ):
def construct( s, T1, T2, T3, T4, T5, T6, T7 ):
s.in_ = InPort( Bits32 )
s.wire_ = Wire( Bits32 )
s.out = OutPort( Bits32 )
connect( s.in_, s.wire_ )
connect( s.wire_, s.out )
args = [ThisIsABitStructWithSuperLongName]*7
a = A(*args)
a._ref_src = \
"""
module A__a840bd1c84c05ea2
(
input logic [0:0] clk,
input logic [31:0] in_,
output logic [31:0] out,
input logic [0:0] reset
);
logic [31:0] wire_;
assign wire_ = in_;
assign out = wire_;
endmodule
"""
a._ref_src_yosys = a._ref_src
do_test( a )
| 2.21875 | 2 |
examples/do_noise.py | jackthgu/rnnoise | 0 | 12771024 | import os
stream = os.popen('echo Returned output')
output = stream.read()
output
| 2.09375 | 2 |
mcsf/commands/up.py | beremaran/mcsf | 0 | 12771025 | import time
import logging
from mcsf.commands.base import Command
from mcsf.services.backup import BackupService
from mcsf.services.json_storage import JsonStorage
from mcsf.services.ssh import SshService
from mcsf.services.vultr import VultrService
class UpCommand(Command):
def __init__(self):
self.json_storage = JsonStorage()
def handle(self, args):
alias = args.alias
storage_key = 'SERVER_{}'.format(alias)
if not self.json_storage.has('SSHKEYID'):
logging.error('Please configure MCSF first.')
exit(1)
vultr = VultrService()
if self.json_storage.has(storage_key):
logging.error('This alias is in use.')
exit(2)
logging.info('Creating new server ...')
sub_id = vultr.start_new_server()
self.json_storage.set(storage_key, sub_id)
logging.info('Waiting server to get online ...')
server = {}
while True:
try:
server = vultr.get_server_info(sub_id)
time.sleep(5)
if server['main_ip'] == '0.0.0.0':
continue
except KeyError:
continue
break
logging.info('Connecting to server ...')
ssh = SshService(server['main_ip'])
backup_service = BackupService(alias, ssh)
logging.info('Installing Java Runtime Environment ...')
ssh.exec('apt-get update')
ssh.exec('apt-get install -y default-jre')
logging.info('Installing unzip ...')
ssh.exec('apt-get install -y zip unzip')
if backup_service.has_backup():
logging.info('Restoring backup ...')
backup_service.restore()
else:
logging.info('Downloading Minecraft server ...')
ssh.exec('wget https://launcher.mojang.com/v1/objects/3dc3d84a581f14691199cf6831b71ed1296a9fdf/server.jar')
logging.info('Running the server first time ...')
ssh.exec('java -Xmx1024M -Xms1024M -jar server.jar nogui')
logging.info('Accepting EULA ...')
ssh.exec("sed -i 's/false/true/g' eula.txt")
logging.info('Installation completed.')
logging.info('Starting Minecraft server ...')
ssh.exec('nohup java -Xmx1024M -Xms1024M -jar server.jar nogui &')
logging.info('Connect to server:')
logging.info('{}:{}'.format(server['main_ip'], 25565))
logging.info('Please wait while server is initializing!')
| 2.296875 | 2 |
_modules/keystonev3/domains.py | tommycz1/salt-formula-keystone | 3 | 12771026 | <filename>_modules/keystonev3/domains.py
from keystonev3.common import send
from keystonev3.arg_converter import get_by_name_or_uuid_multiple
@send('post')
def domain_create(name, **kwargs):
url = '/domains'
json = {
'domain': kwargs,
}
json['domain']['name'] = name
return url, json
@get_by_name_or_uuid_multiple([('domain', 'domain_id')])
@send('get')
def domain_get_details(domain_id, **kwargs):
url = '/domains/{}'.format(domain_id)
return url, None
@get_by_name_or_uuid_multiple([('domain', 'domain_id')])
@send('patch')
def domain_update(domain_id, **kwargs):
url = '/domains/{}'.format(domain_id)
json = {
'domain': kwargs,
}
return url, json
@get_by_name_or_uuid_multiple([('domain', 'domain_id')])
@send('delete')
def domain_delete(domain_id, **kwargs):
url = '/domains/{}'.format(domain_id)
return url, None
| 1.984375 | 2 |
nagios_graphite/__init__.py | SegFaultAX/nagios_graphite | 3 | 12771027 | <reponame>SegFaultAX/nagios_graphite<filename>nagios_graphite/__init__.py
# -*- coding: utf-8 -*-
"""Nagios command for Graphite metrics"""
from nagios_graphite import metadata
__version__ = metadata.version
__author__ = metadata.authors[0]
__license__ = metadata.license
__copyright__ = metadata.copyright
| 1.203125 | 1 |
rainforest/common/wgs84_ch1903.py | gugerlir/rainforest | 3 | 12771028 | #-*- coding: utf-8 -*-
import numpy as np
class GPSConverter(object):
'''
GPS Converter class which is able to perform convertions between the
CH1903 and WGS84 system.
'''
# Convert CH y/x/h to WGS height
def CHtoWGSheight(self, y, x, h):
# Axiliary values (% Bern)
y_aux = (y - 600000) / 1000000
x_aux = (x - 200000) / 1000000
h = (h + 49.55) - (12.60 * y_aux) - (22.64 * x_aux)
return h
# Convert CH y/x to WGS lat
def CHtoWGSlat(self, y, x):
# Axiliary values (% Bern)
y_aux = (y - 600000) / 1000000
x_aux = (x - 200000) / 1000000
lat = (16.9023892 + (3.238272 * x_aux)) + \
- (0.270978 * pow(y_aux, 2)) + \
- (0.002528 * pow(x_aux, 2)) + \
- (0.0447 * pow(y_aux, 2) * x_aux) + \
- (0.0140 * pow(x_aux, 3))
# Unit 10000" to 1" and convert seconds to degrees (dec)
lat = (lat * 100) / 36
return lat
# Convert CH y/x to WGS long
def CHtoWGSlng(self, y, x):
# Axiliary values (% Bern)
y_aux = (y - 600000) / 1000000
x_aux = (x - 200000) / 1000000
lng = (2.6779094 + (4.728982 * y_aux) + \
+ (0.791484 * y_aux * x_aux) + \
+ (0.1306 * y_aux * pow(x_aux, 2))) + \
- (0.0436 * pow(y_aux, 3))
# Unit 10000" to 1" and convert seconds to degrees (dec)
lng = (lng * 100) / 36
return lng
# Convert decimal angle (° dec) to sexagesimal angle (dd.mmss,ss)
def DecToSexAngle(self, dec):
degree = dec.astype(int)
minute = (np.floor((dec - degree) * 60)).astype(int)
second = (((dec - degree) * 60) - minute) * 60
return degree + ((minute).astype(float) / 100) + (second / 10000)
# Convert sexagesimal angle (dd.mmss,ss) to seconds
def SexAngleToSeconds(self, dms):
degree = 0
minute = 0
second = 0
degree = dms.astype(float)
minute = ((dms - degree) * 100).astype(float)
second = (((dms - degree) * 100) - minute) * 100
return second + (minute * 60) + (degree * 3600)
# Convert sexagesimal angle (dd.mmss) to decimal angle (degrees)
def SexToDecAngle(self, dms):
degree = 0
minute = 0
second = 0
degree = dms.astype(float)
minute = ((dms - degree) * 100).astype(float)
second = (((dms - degree) * 100) - minute) * 100
return degree + (minute / 60) + (second / 3600)
# Convert WGS lat/long (° dec) and height to CH h
def WGStoCHh(self, lat, lng, h):
lat = self.DecToSexAngle(lat)
lng = self.DecToSexAngle(lng)
lat = self.SexAngleToSeconds(lat)
lng = self.SexAngleToSeconds(lng)
# Axiliary values (% Bern)
lat_aux = (lat - 169028.66) / 10000
lng_aux = (lng - 26782.5) / 10000
h = (h - 49.55) + (2.73 * lng_aux) + (6.94 * lat_aux)
return h
# Convert WGS lat/long (° dec) to CH x
def WGStoCHx(self, lat, lng):
lat = self.DecToSexAngle(lat)
lng = self.DecToSexAngle(lng)
lat = self.SexAngleToSeconds(lat)
lng = self.SexAngleToSeconds(lng)
# Axiliary values (% Bern)
lat_aux = (lat - 169028.66) / 10000
lng_aux = (lng - 26782.5) / 10000
x = ((200147.07 + (308807.95 * lat_aux) + \
+ (3745.25 * lng_aux**2)) + \
+ (76.63 * lat_aux**2)) + \
- (194.56 *lng_aux**2 * lat_aux) + \
+ (119.79 * lat_aux**3)
return x
# Convert WGS lat/long (° dec) to CH y
def WGStoCHy(self, lat, lng):
lat = self.DecToSexAngle(lat)
lng = self.DecToSexAngle(lng)
lat = self.SexAngleToSeconds(lat)
lng = self.SexAngleToSeconds(lng)
# Axiliary values (% Bern)
lat_aux = (lat - 169028.66) / 10000
lng_aux = (lng - 26782.5) / 10000
y = (600072.37 + (211455.93 * lng_aux)) + \
- (10938.51 * lng_aux * lat_aux) + \
- (0.36 * lng_aux * lat_aux**2) + \
- (44.54 * lat_aux**3)
return y
def LV03toWGS84(self, east, north, height):
'''
Convert LV03 to WGS84 Return a array of double that contain lat, long,
and height
'''
d = []
d.append(self.CHtoWGSlat(east, north))
d.append(self.CHtoWGSlng(east, north))
d.append(self.CHtoWGSheight(east, north, height))
return d
def WGS84toLV03(self, latitude, longitude, ellHeight):
'''
Convert WGS84 to LV03 Return an array of double that contaign east,
north, and height
'''
d = []
d.append(self.WGStoCHy(latitude, longitude))
d.append(self.WGStoCHx(latitude, longitude))
d.append(self.WGStoCHh(latitude, longitude, ellHeight))
return d
converter = GPSConverter()
| 3.390625 | 3 |
mne/viz/tests/test_figure.py | stevemats/mne-python | 1,953 | 12771029 | # Authors: <NAME> <<EMAIL>>
#
# License: Simplified BSD
import pytest
from mne.viz._mpl_figure import _psd_figure
from mne.viz._figure import _get_browser
def test_browse_figure_constructor():
"""Test error handling in MNEBrowseFigure constructor."""
with pytest.raises(TypeError, match='an instance of Raw, Epochs, or ICA'):
_get_browser(inst='foo')
def test_psd_figure_constructor():
"""Test error handling in MNELineFigure constructor."""
with pytest.raises(TypeError, match='an instance of Raw or Epochs, got'):
_psd_figure('foo', *((None,) * 20))
| 1.945313 | 2 |
docsie_universal_importer/providers/dropbox/urls.py | LikaloLLC/docsie-universal-doc-importer | 0 | 12771030 | <filename>docsie_universal_importer/providers/dropbox/urls.py
from docsie_universal_importer.providers.base.urls import default_urlpatterns
from .import_provider import DropboxOAuth2Provider
urlpatterns = default_urlpatterns(DropboxOAuth2Provider)
| 1.75 | 2 |
argus/engine/__init__.py | NickVeld/argus | 0 | 12771031 | <filename>argus/engine/__init__.py
from argus.engine.engine import (
EventEnum,
Events,
Engine,
State,
)
| 1.257813 | 1 |
ansible/roles/exit-scanner/files/ipscan.py | torproject-git/metrics-cloud | 1 | 12771032 | <gh_stars>1-10
#!/usr/bin/env python2
# Copyright 2013-2017 <NAME> <<EMAIL>>
#
# This file is part of exitmap.
#
# exitmap is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# exitmap is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with exitmap. If not, see <http://www.gnu.org/licenses/>.
"""
Module to detect false negatives for <https://check.torproject.org>.
"""
import sys
import json
import logging
try:
import urllib2
except ImportError:
import urllib.request as urllib2
from util import exiturl
import stem.descriptor.server_descriptor as descriptor
log = logging.getLogger(__name__)
# exitmap needs this variable to figure out which relays can exit to the given
# destination(s).
destinations = [("check.torproject.org", 443)]
def fetch_page(exit_desc):
"""
Fetch check.torproject.org and see if we are using Tor.
"""
data = None
url = exiturl(exit_desc.fingerprint)
try:
data = urllib2.urlopen("https://check.torproject.org/api/ip",
timeout=10).read()
except Exception as err:
log.debug("urllib2.urlopen says: %s" % err)
return
if not data:
return
try:
check_answer = json.loads(data)
except ValueError as err:
log.warning("Couldn't parse JSON over relay %s: %s" % (url, data))
return
check_answer["DescPublished"] = exit_desc.published.isoformat()
check_answer["Fingerprint"] = exit_desc.fingerprint
log.info(json.dumps(check_answer))
def probe(exit_desc, run_python_over_tor, run_cmd_over_tor, **kwargs):
"""
Probe the given exit relay and look for check.tp.o false negatives.
"""
run_python_over_tor(fetch_page, exit_desc)
def main():
"""
Entry point when invoked over the command line.
"""
desc = descriptor.ServerDescriptor("")
desc.fingerprint = "bogus"
desc.address = "0.0.0.0"
fetch_page(desc)
return 0
if __name__ == "__main__":
sys.exit(main())
| 2.515625 | 3 |
build/lib/peewee_utils/main.py | negadive/peewee-utils | 1 | 12771033 | from peewee import ForeignKeyField
_clone_set = lambda s: set(s) if s else set()
def prefetch_to_dict(model, recurse=True, backrefs=True, max_depth=None, exclude=None, __seen=None, __parent=None):
"""
Convert a model instance (and any related objects) to a dictionary, without querying.
:param bool recurse: Whether foreign-keys should be recursed.
:param bool backrefs: Whether lists of related objects should be recursed.
:param exclude: A list (or set) of field instances that should be
excluded from the dictionary.
:param int max_depth: Maximum depth to recurse, value <= 0 means no max.
"""
data = {}
max_depth = -1 if max_depth is None else max_depth
if max_depth == 0:
recurse = False
backrefs = False
if __parent is None:
__parent = model
elif __parent == model:
return __parent._pk
exclude = _clone_set(exclude)
__seen = _clone_set(__seen)
exclude |= __seen
model_class = type(model)
for field in model._meta.sorted_fields:
field_data = model.__data__.get(field.name)
if isinstance(field, ForeignKeyField):
if field_data is not None and recurse:
if (
field.name in model.__rel__
):
__seen.add(field)
rel_obj = getattr(model, field.name)
field_data = prefetch_to_dict(
rel_obj,
recurse=recurse,
backrefs=backrefs,
exclude=exclude,
max_depth=max_depth - 1,
__seen=__seen,
__parent=__parent,
)
else:
field_data = None
data[field.name] = field_data
# backref
if backrefs:
for foreign_key in model._meta.backrefs.keys():
related_query = getattr(model, foreign_key.backref)
if not isinstance(related_query, list):
continue
descriptor = getattr(model_class, foreign_key.backref)
if descriptor in exclude or foreign_key in exclude:
continue
accum = []
exclude.add(foreign_key)
for item in related_query:
accum.append(
prefetch_to_dict(
item,
recurse=recurse,
backrefs=backrefs,
exclude=exclude,
max_depth=max_depth - 1,
__parent=__parent,
)
)
data[foreign_key.backref] = accum
return data
| 2.578125 | 3 |
non_essentials/t2response.py | deapplegate/wtgpipeline | 1 | 12771034 | <filename>non_essentials/t2response.py
detector = 't2kb'
for detector in ['t2ka','t2kb']:
f = open(detector + '.response').readlines()
wave_0,trans_0,x_0,y_0 = [float(x) for x in f[0][:-1].split(' ')]
wave_1,trans_1,x_1,y_1 = [float(x) for x in f[1][:-1].split(' ')]
waves = []
resps = []
for l in f[2:]:
x,y = [float(p) for p in l[:-1].split(' ')]
waves.append( 10. * ((x-x_0)/(x_1-x_0)*(wave_1-wave_0) + wave_0) )
resps.append( 0.01 * (trans_1 + (y_1-y)/(y_0-y_1)*(trans_1-trans_0)) )
file = open(detector + '.txt','w')
for w,r in zip(waves,resps):
file.write(str(w) + ' ' + str(r) + '\n')
file.close()
import pylab
pylab.plot(waves,resps)
pylab.show()
| 2.703125 | 3 |
iliad/integrators/stateful/softabs_euler.py | JamesBrofos/Iliad | 1 | 12771035 | import copy
from typing import Tuple
import numpy as np
from odyssey.distribution import Distribution
from iliad.integrators.info import SoftAbsLeapfrogInfo
from iliad.integrators.states import SoftAbsLeapfrogState
from iliad.integrators.terminal import cond
from iliad.integrators.fields import riemannian, softabs
def momentum_step(
val: Tuple[np.ndarray, np.ndarray, int],
step_size: float,
state: SoftAbsLeapfrogState,
) -> Tuple[np.ndarray, np.ndarray, int]:
"""Computes the update to the momentum variable using the equations of motion
determined by the SoftAbs metric.
Args:
val: A tuple containing the current guess for the fixed point of the
momentum, the difference between the momentum at this fixed point
iteration and the last, and the number of fixed point iterations
considered so far.
step_size: The integration step-size.
state: The current state of the SoftAbs metric system.
Returns:
pm: The updated momentum variable.
delta: The difference between the updated momentum variable and the
guess.
num_iters: The number of fixed point iterations attempted so far.
"""
pmcand, _, num_iters = val
f = softabs.force(pmcand,
state.grad_log_posterior,
state.jac_hessian,
state.hessian_eigenvals,
state.softabs_eigenvals,
state.softabs_inv_eigenvals,
state.hessian_eigenvecs,
state.alpha)
pm = state.momentum + step_size * f
delta = pm - pmcand
num_iters += 1
return pm, delta, num_iters
def position_step(
val: Tuple[np.ndarray, np.ndarray, int],
step_size: float,
distr: Distribution,
state: SoftAbsLeapfrogState,
) -> Tuple[np.ndarray, np.ndarray, int]:
"""Computes the update to the position variable using the equations of motion
determined by the SoftAbs metric.
Args:
val: A tuple containing the current guess for the fixed point of the
position, the difference between the position at this fixed point
iteration and the last, and the number of fixed point iterations
considered so far.
step_size: The integration step-size.
distr: The distribution that guides the time evolution of the Euclidean
Hamiltonian trajectory.
state: The current state of the SoftAbs metric system.
Returns:
qn: The updated momentum variable.
delta; The difference between the updated position variable and the
guess.
num_iters: The number of fixed point iterations attempted so far.
"""
qncand, _, num_iters = val
H = distr.hessian(qncand)
l, U, lt, inv_lt, metric, inv_metric = softabs.decomposition(H, state.alpha)
newvel = inv_metric@state.momentum
qn = state.position + step_size * newvel
delta = qn - qncand
num_iters += 1
return qn, delta, num_iters
def euler_a_single_step(
distr: Distribution,
state: SoftAbsLeapfrogState,
info: SoftAbsLeapfrogInfo,
step_size: float,
thresh: float,
max_iters: int,
) -> Tuple[SoftAbsLeapfrogState, SoftAbsLeapfrogInfo]:
"""The Euler-A integrator is a symplectic map that integrates Hamilton's
equations of motion for a general non-separable
Hamiltonian. It updates the position implicitly and then computes an
explicit update to the momentum variable.
Args:
distr: The distribution that guides the time evolution of the Euclidean
Hamiltonian trajectory.
state: An object containing the position and momentum variables of the
state in phase space, and possibly previously computed log-posterior,
metrics, and gradients.
info: An object that keeps track of the number of fixed point iterations
and whether or not integration has been successful.
step_size: Integration step-size.
thresh: Convergence tolerance for fixed point iterations.
max_iters: Maximum number of fixed point iterations.
Returns:
state: An augmented state object with the updated position and momentum
and values for the log-posterior and metric and their gradients.
info: An augmented information object with the updated number of fixed
point iterations and boolean indicator for successful integration.
"""
# Unpack the position and momentum.
qo, po = state.position, state.momentum
num_dims = len(qo)
# Precompute the initial difference vector, which is set to be an array of
# infinite values.
delta = np.inf*np.ones(num_dims)
# Fixed point iteration to solve the implicit update to the position.
val = (qo + step_size*state.velocity, delta, 0)
while cond(val, thresh, max_iters):
val = position_step(val, step_size, distr, state)
qn, delta, num_iters = val
success = np.max(np.abs(delta)) < thresh
# Update the state with the new position and compute the updated momentum.
state.position = qn
state.update(distr)
state.momentum += step_size*state.force
info.num_iters_pos += num_iters
info.success &= success
return state, info
def euler_b_single_step(
distr: Distribution,
state: SoftAbsLeapfrogState,
info: SoftAbsLeapfrogInfo,
step_size: float,
thresh: float,
max_iters: int,
) -> Tuple[SoftAbsLeapfrogState, SoftAbsLeapfrogInfo]:
"""The Euler-B integrator is a symplectic map that integrates Hamilton's
equations of motion for a general non-separable Hamiltonian. It updates the
momentum implicitly and then computes an explicit update to the position
variable.
Args:
distr: The distribution that guides the time evolution of the Euclidean
Hamiltonian trajectory.
state: An object containing the position and momentum variables of the
state in phase space, and possibly previously computed log-posterior,
metrics, and gradients.
info: An object that keeps track of the number of fixed point iterations
and whether or not integration has been successful.
step_size: Integration step-size.
thresh: Convergence tolerance for fixed point iterations.
max_iters: Maximum number of fixed point iterations.
Returns:
state: An augmented state object with the updated position and momentum
and values for the log-posterior and metric and their gradients.
info: An augmented information object with the updated number of fixed
point iterations and boolean indicator for successful integration.
"""
# Unpack the position and momentum.
qo, po = state.position, state.momentum
num_dims = len(qo)
# Precompute the initial difference vector, which is set to be an array of
# infinite values.
delta = np.inf*np.ones(num_dims)
# Fixed point iteration to solve the implicit update to the momentum.
val = (po + step_size*state.force, delta, 0)
while cond(val, thresh, max_iters):
val = momentum_step(val, step_size, state)
pn, delta, num_iters = val
vn = state.inv_metric@pn
success = np.max(np.abs(delta)) < thresh
# Update the state's new position.
state.momentum = pn
state.velocity = vn
state.position += step_size*vn
state.update(distr)
info.num_iters_mom += num_iters
info.success &= success
return state, info
def softabs_euler_a(
state: SoftAbsLeapfrogState,
step_size: float,
num_steps: int,
distr: Distribution,
thresh: float,
max_iters: int,
) -> Tuple[SoftAbsLeapfrogState, SoftAbsLeapfrogInfo]:
state = copy.copy(state)
info = SoftAbsLeapfrogInfo()
for i in range(num_steps):
state, info = euler_a_single_step(
distr,
state,
info,
step_size,
thresh,
max_iters,
)
L = np.linalg.cholesky(state.metric)
state.velocity = state.inv_metric.dot(state.momentum)
state.sqrtm_metric = L
state.logdet_metric = 2.0*np.sum(np.log(np.diag(L)))
return state, info
def softabs_euler_b(
state: SoftAbsLeapfrogState,
step_size: float,
num_steps: int,
distr: Distribution,
thresh: float,
max_iters: int,
) -> Tuple[SoftAbsLeapfrogState, SoftAbsLeapfrogInfo]:
state = copy.copy(state)
info = SoftAbsLeapfrogInfo()
for i in range(num_steps):
state, info = euler_b_single_step(
distr,
state,
info,
step_size,
thresh,
max_iters,
)
L = np.linalg.cholesky(state.metric)
state.velocity = state.inv_metric.dot(state.momentum)
state.sqrtm_metric = L
state.logdet_metric = 2.0*np.sum(np.log(np.diag(L)))
return state, info
| 2.65625 | 3 |
2020/solutions/day_01.py | Noettore/AdventOfCode | 0 | 12771036 | <gh_stars>0
"""AOC Day 1"""
import pathlib
import time
def read_input(input_path: str) -> tuple:
"""take input file path and return appropriate data structure"""
with open(input_path, 'r') as input_file:
entries = list()
is_present = [False]*2020
for entry in input_file.readlines():
entries.append(int(entry))
is_present[int(entry)-1] = True
return (entries, is_present)
def part1(entries: list, is_present: list) -> int:
"""part1 solver take a list of int and a list of bool and return an int"""
for x in entries:
complement = 2020 - x
if complement > 0 and is_present[complement-1]:
return x * complement
return None
def part2(entries: list, is_present: list) -> int:
"""part2 solver take a list of int and a list of bool and return an int"""
for x, i in enumerate(entries):
for y in entries[i:]:
complement = 2020 - x - y
if complement > 0 and is_present[complement-1]:
return x * y * complement
return None
def main():
"""main function"""
input_path = str(pathlib.Path(__file__).resolve().parent.parent) + "/inputs/" + str(pathlib.Path(__file__).stem)
entries, is_present = read_input(input_path)
start_time = time.time()
print("Part 1: %d" % part1(entries, is_present))
print("Part 2: %d" % part2(entries, is_present))
end_time = time.time()
print("Execution time: %f" % (end_time-start_time))
if __name__ == "__main__":
main()
| 3.484375 | 3 |
src/mail.py | harrydaihaolin/Stock-Trading-Server | 0 | 12771037 | import logging
logging = logging.getLogger()
import constants
from mailchimp3 import MailChimp
import web_template
from string import Template
client = MailChimp(mc_api=constants.MAILCHIMPAPI, mc_user=constants.MAILCHIMPUSENAME)
campaign_name="trading_alert"
from_name="<NAME>"
reply_to="<EMAIL>"
audience_id="4e7840abaf"
def getAudiencesId():
try:
return client.lists.all(get_all=True, fields="lists.name,lists.id")
except Exception as e:
logging.error(e)
def campaign_creation_function(campaign_name, audience_id, from_name, reply_to, client=client):
campaign_name = campaign_name
audience_id = audience_id
from_name = from_name
reply_to = reply_to
data = {
"recipients" :
{
"list_id": audience_id
},
"settings":
{
"subject_line": campaign_name,
"from_name": from_name,
"reply_to": reply_to
},
"type": "regular"
}
new_campaign = client.campaigns.create(data=data)
return new_campaign
def customized_template(html_code, campaign_id, client=client):
html_code = html_code
campaign_id = campaign_id
string_template = Template(html_code).safe_substitute()
try:
client.campaigns.content.update(
campaign_id=campaign_id,
data={'message': 'Campaign message', 'html': string_template}
)
except Exception as error:
logging.error(error)
def send_mail(client=client):
campaign = campaign_creation_function(campaign_name, audience_id, from_name, reply_to)
campaign_id = campaign['id']
try:
customized_template(web_template.html_code, campaign_id)
client.campaigns.actions.send(campaign_id=campaign_id)
except Exception as error:
logging.error(error)
| 2.484375 | 2 |
examples/tensorflow/model.py | nicolaspi/config-state | 2 | 12771038 | from abc import abstractmethod
from typing import List
from typing import Optional
from typing import Tuple
from typing import Union
import tensorflow as tf
from config_state import builder
from config_state import ConfigField
from config_state import ConfigState
from config_state import DeferredConf
from config_state import register
from config_state import stateproperty
@builder
class Model(ConfigState):
input_shape: Tuple[int] = ConfigField(...,
"Input shape of the model",
type=tuple)
output_units: Optional[int] = ConfigField(...,
"Model's output units count",
type=int)
def __init__(self, config):
super().__init__(config)
self._keras_model = None
@abstractmethod
def _build_keras_model(self) -> tf.keras.Model:
"""Build the keras model"""
@property
def keras_model(self) -> tf.keras.Model:
if self._keras_model is None and not isinstance(
self.input_shape, DeferredConf) and not isinstance(
self.output_units, DeferredConf):
self._keras_model = self._build_keras_model()
return self._keras_model
@stateproperty
def weights(self):
return self.keras_model.get_weights()
@weights.setter
def weights(self, weights):
self.keras_model.set_weights(weights)
@register
class MultiLayerPerceptron(Model):
structure: List[int] = ConfigField([128], "hidden structure of the MLP")
dropout_rate: float = ConfigField(
0.0, "Dropout rate applied on the last "
"hidden layer.")
def _build_keras_model(self) -> tf.keras.Model:
layers = [tf.keras.layers.Flatten(input_shape=self.input_shape)]
for units in self.structure:
layers.append(tf.keras.layers.Dense(units, activation='relu'))
if self.dropout_rate > 0.0:
layers.append(tf.keras.layers.Dropout(self.dropout_rate))
if self.output_units is not None:
layers.append(tf.keras.layers.Dense(self.output_units))
return tf.keras.models.Sequential(layers)
@register
class CNN(Model):
structure: List[Union[int, str]] = ConfigField([32, 'max', 64, 'max', 64],
"Convolutional structure. "
"Conv2D layers units "
"are integers, pooling "
"layers type are str among "
"'max' or 'average'.")
def _build_keras_model(self) -> tf.keras.Model:
layers = [tf.keras.layers.InputLayer(input_shape=self.input_shape)]
for layer in self.structure:
if isinstance(layer, int):
layers.append(tf.keras.layers.Conv2D(layer, (3, 3), activation='relu'))
elif layer == 'max':
layers.append(tf.keras.layers.MaxPooling2D((2, 2)))
elif layer == 'average':
layers.append(tf.keras.layers.AveragePooling2D((2, 2)))
else:
raise ValueError(f"Unknown layer spec {layer}.")
layers.append(tf.keras.layers.Flatten())
if self.output_units is not None:
layers.append(tf.keras.layers.Dense(self.output_units))
return tf.keras.models.Sequential(layers)
@register
class Ensembler(Model):
model: Model = ConfigField(type=Model, doc="The model to be ensembled")
ensemble_size: int = ConfigField(2, "Size of the ensemble", force_type=True)
input_shape = ConfigField(model.input_shape)
output_units = ConfigField(model.output_units)
def _build_keras_model(self) -> tf.keras.Model:
models = [
self.model._build_keras_model() for _ in range(self.ensemble_size)
]
input = tf.keras.layers.InputLayer(input_shape=self.input_shape).output
inputs = tf.keras.layers.Lambda(self.lambda_splitter)(input)
outputs = []
for sub_input, model in zip(inputs, models):
sub_output = model(sub_input)
outputs.append(sub_output)
output = tf.keras.layers.Lambda(self.lambda_merger)(outputs)
return tf.keras.Model(inputs=input, outputs=output)
@tf.function
def lambda_splitter(self, input: tf.Tensor, training: bool = False):
outputs = []
slice_size = tf.cast(tf.shape(input)[0] // self.ensemble_size, tf.int64)
if training:
tf.assert_equal(tf.math.mod(tf.shape(input)[0], self.ensemble_size), 0)
for i in range(self.ensemble_size):
if training:
outputs.append(input[i * slice_size:(i + 1) * slice_size, :, :])
else:
outputs.append(input)
return outputs
@tf.function
def lambda_merger(self, inputs, training=False):
if training:
output = tf.concat(inputs, axis=0)
else:
# Average during inference
output = tf.add_n(inputs) / tf.cast(len(inputs), inputs[0].dtype)
return output
| 2.484375 | 2 |
code/day3.py | MikeD89/Advent2020 | 0 | 12771039 | <reponame>MikeD89/Advent2020<filename>code/day3.py
from utils import utils
def processItem(line):
tree = '#'
retVal = []
line = line.strip()
for x in line:
retVal.append(x == tree)
return retVal
def calcSlope(data, xStep, yStep):
count = 0
x = 0
for y in range(0, len(data), yStep):
row = data[y]
width = len(row)
if data[y][x % width]:
count += 1
x += xStep
return count
def partOne(data):
return calcSlope(data, 3, 1)
def partTwo(data):
return \
calcSlope(data, 1, 1) * \
calcSlope(data, 3, 1) * \
calcSlope(data, 5, 1) * \
calcSlope(data, 7, 1) * \
calcSlope(data, 1, 2)
if __name__ == "__main__":
# Load Data
data = utils.load_data("day3.txt")
# Process it
parsedData = []
for line in data:
parsedData.append(processItem(line))
# Do puzzle
print("---- Day 3 ----")
print("Part 1: " + str(partOne(parsedData)))
print("Part 2: " + str(partTwo(parsedData)))
| 3.484375 | 3 |
tests/test_scrape_selector.py | gubschk/CDEWIP | 0 | 12771040 | # -*- coding: utf-8 -*-
"""
test_scrape_selector
~~~~~~~~~~~~~~~~~~~~
Test the HTML/XML Selector.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import unittest
from chemdataextractor.scrape.selector import Selector
logging.basicConfig(level=logging.DEBUG)
log = logging.getLogger(__name__)
HTML = '''
<html>
<body>
<div><h1>Heading</h1></div>
<div><a href="page">Link</a></div>
<div><div>Nested</div></div>
</body>
</html>
'''
class TestSelector(unittest.TestCase):
def test_html_xpath(self):
selector = Selector.from_text(HTML)
self.assertEqual(len(selector.xpath('.//div')), 4)
self.assertEqual(selector.xpath('.//a').extract(), ['Link'])
self.assertEqual(selector.xpath('.//a').extract(raw=True), ['<a href="page">Link</a>'])
self.assertEqual(selector.xpath('.//a/text()').extract(), ['Link'])
self.assertEqual(selector.xpath('.//a/@href').extract(), ['page'])
self.assertEqual(selector.xpath('/html/body/div/h1/text()').extract(), ['Heading'])
def test_html_css(self):
selector = Selector.from_text(HTML)
self.assertEqual(len(selector.css('div')), 4)
self.assertEqual(selector.css('a').extract(), ['Link'])
self.assertEqual(selector.css('a').extract(raw=True), ['<a href="page">Link</a>'])
self.assertEqual(selector.css('a::text').extract(), ['Link'])
self.assertEqual(selector.css('a::attr(href)').extract(), ['page'])
self.assertEqual(selector.css('html>body>div>h1::text').extract(), ['Heading'])
if __name__ == '__main__':
unittest.main()
| 2.65625 | 3 |
alphamind/benchmarks/data/winsorize.py | rongliang-tech/alpha-mind | 186 | 12771041 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
"""
Created on 2017-4-25
@author: cheng.li
"""
import datetime as dt
import numpy as np
import pandas as pd
from alphamind.data.winsorize import winsorize_normal
def benchmark_winsorize_normal(n_samples: int, n_features: int, n_loops: int) -> None:
print("-" * 60)
print("Starting winsorize normal benchmarking")
print("Parameters(n_samples: {0}, n_features: {1}, n_loops: {2})".format(n_samples, n_features,
n_loops))
num_stds = 2
x = np.random.randn(n_samples, n_features)
start = dt.datetime.now()
for _ in range(n_loops):
_ = winsorize_normal(x, num_stds)
impl_model_time = dt.datetime.now() - start
print('{0:20s}: {1}'.format('Implemented model', impl_model_time))
def impl(x):
std_values = x.std(axis=0)
mean_value = x.mean(axis=0)
lower_bound = mean_value - num_stds * std_values
upper_bound = mean_value + num_stds * std_values
res = np.where(x > upper_bound, upper_bound, x)
res = np.where(res < lower_bound, lower_bound, res)
return res
start = dt.datetime.now()
for _ in range(n_loops):
_ = impl(x)
benchmark_model_time = dt.datetime.now() - start
print('{0:20s}: {1}'.format('Benchmark model', benchmark_model_time))
def benchmark_winsorize_normal_with_group(n_samples: int, n_features: int, n_loops: int,
n_groups: int) -> None:
print("-" * 60)
print("Starting winsorize normal with group-by values benchmarking")
print(
"Parameters(n_samples: {0}, n_features: {1}, n_loops: {2}, n_groups: {3})".format(n_samples,
n_features,
n_loops,
n_groups))
num_stds = 2
x = np.random.randn(n_samples, n_features)
groups = np.random.randint(n_groups, size=n_samples)
start = dt.datetime.now()
for _ in range(n_loops):
_ = winsorize_normal(x, num_stds, groups=groups)
impl_model_time = dt.datetime.now() - start
print('{0:20s}: {1}'.format('Implemented model', impl_model_time))
def impl(x):
std_values = x.std(axis=0)
mean_value = x.mean(axis=0)
lower_bound = mean_value - num_stds * std_values
upper_bound = mean_value + num_stds * std_values
res = np.where(x > upper_bound, upper_bound, x)
res = np.where(res < lower_bound, lower_bound, res)
return res
start = dt.datetime.now()
for _ in range(n_loops):
_ = pd.DataFrame(x).groupby(groups).transform(impl)
benchmark_model_time = dt.datetime.now() - start
print('{0:20s}: {1}'.format('Benchmark model', benchmark_model_time))
if __name__ == '__main__':
benchmark_winsorize_normal(3000, 10, 1000)
benchmark_winsorize_normal_with_group(3000, 10, 1000, 30)
| 2.796875 | 3 |
ibidem/advent_of_code/y2020/dec03.py | mortenlj/advent_of_code | 0 | 12771042 | from ibidem.advent_of_code.board import Board
from ibidem.advent_of_code.util import get_input_name
PART1_SLOPE = (3, 1)
PART2_SLOPES = (
(1, 1),
(3, 1),
(5, 1),
(7, 1),
(1, 2),
)
def load():
with open(get_input_name(3, 2020)) as fobj:
return Board.from_string(fobj.read())
def part1():
slope = PART1_SLOPE
return count_slope(slope)
def part2():
result = 1
for slope in PART2_SLOPES:
result *= count_slope(slope)
print(f"Part 2 result: {result}")
def count_slope(slope):
board = load()
x = y = 0
count = 0
while y < board.size_y:
c = board.get(x, y)
if c == "#":
count += 1
board.set(x, y, "X")
else:
board.set(x, y, "O")
x += slope[0]
if x >= board.size_x:
x = x - board.size_x
y += slope[1]
print(f"Counted {count} trees for slope {slope}")
return count
if __name__ == "__main__":
part1()
part2()
| 3.578125 | 4 |
urlShortner/urls.py | erfanhs/Tuky | 3 | 12771043 | <filename>urlShortner/urls.py<gh_stars>1-10
from django.conf import settings
from django.contrib import admin
from django.urls import path, include
from django.conf.urls.static import static
from django.views.generic import TemplateView
from main.views import *
urlpatterns = [
# admin panel
path('admin/', admin.site.urls),
# site routes
path('', index, name='home'),
path('registration/', registration, name='registration'),
path('settings/', settings_, name='configs'),
path('report/', report, name='report'),
path('stats/<str:url_id>', stats, name='stats'),
path('verify/<str:verify_id>', verify),
path('<str:url_id>/', handle_link),
# include RESTful API routes
path('api/v1/', include('Api.urls'))
]
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
| 1.890625 | 2 |
src/LPB/LPBVisitor.py | CptSpookz/LPB | 1 | 12771044 | <filename>src/LPB/LPBVisitor.py
# Generated from LPB.g4 by ANTLR 4.7.1
from antlr4 import *
if __name__ is not None and "." in __name__:
from .LPBParser import LPBParser
else:
from LPBParser import LPBParser
from antlr4.error.Errors import ParseCancellationException
# This class defines a complete generic visitor for a parse tree produced by LPBParser.
class LPBVisitor(ParseTreeVisitor):
# Visit a parse tree produced by LPBParser#programa.
def visitPrograma(self, ctx:LPBParser.ProgramaContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by LPBParser#decl_imovel.
def visitDecl_imovel(self, ctx:LPBParser.Decl_imovelContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by LPBParser#decl_casa.
def visitDecl_casa(self, ctx:LPBParser.Decl_casaContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by LPBParser#decl_apartamento.
def visitDecl_apartamento(self, ctx:LPBParser.Decl_apartamentoContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by LPBParser#corpo.
def visitCorpo(self, ctx:LPBParser.CorpoContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by LPBParser#decl_andar.
def visitDecl_andar(self, ctx:LPBParser.Decl_andarContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by LPBParser#decl_planta.
def visitDecl_planta(self, ctx:LPBParser.Decl_plantaContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by LPBParser#decl_comodos.
def visitDecl_comodos(self, ctx:LPBParser.Decl_comodosContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by LPBParser#decl_moveis.
def visitDecl_moveis(self, ctx:LPBParser.Decl_moveisContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by LPBParser#id_bloco.
def visitId_bloco(self, ctx:LPBParser.Id_blocoContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by LPBParser#var_comodo.
def visitVar_comodo(self, ctx:LPBParser.Var_comodoContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by LPBParser#tipo_comodo.
def visitTipo_comodo(self, ctx:LPBParser.Tipo_comodoContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by LPBParser#tipo_movel.
def visitTipo_movel(self, ctx:LPBParser.Tipo_movelContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by LPBParser#dimensao.
def visitDimensao(self, ctx:LPBParser.DimensaoContext):
return self.visitChildren(ctx)
del LPBParser | 2.203125 | 2 |
_project/testovani/apps.py | SucheG/cayman-pullup.cz | 0 | 12771045 | from django.apps import AppConfig
class TestovaniConfig(AppConfig):
name = 'testovani'
| 1.070313 | 1 |
utils/perms.py | np-overflow/bytehackz-discord-bot | 0 | 12771046 | <gh_stars>0
from dis_snek.models.application_commands import Permission
from utils.config import BOT_DEV_ROLE, ADMIN_ROLE, PARTICIPANT_ROLE, GUILD
NOT_EVERYBODY = Permission(
id=PARTICIPANT_ROLE,
guild_id=GUILD,
type=1,
permission=False
)
ADMIN_ONLY = Permission(
id=ADMIN_ROLE,
guild_id=GUILD,
type=1,
permission=True
)
BOT_DEV_ONLY = Permission(
id=BOT_DEV_ROLE,
guild_id=GUILD,
type=1,
permission=True
) | 1.695313 | 2 |
examples/00-mapdl-examples/contact_elements.py | RGPATCHI/pymapdl | 194 | 12771047 | """
.. _ref_contact_example:
Contact Element Example
~~~~~~~~~~~~~~~~~~~~~~~
This example demonstrates how to create contact elements for general
contact.
Begin by launching MAPDL.
"""
from ansys.mapdl import core as pymapdl
mapdl = pymapdl.launch_mapdl()
###############################################################################
# Enter the pre-processor, create a block and mesh it with tetrahedral
# elements.
#
mapdl.prep7()
vnum0 = mapdl.block(0, 1, 0, 1, 0, 0.5)
mapdl.et(1, 187)
mapdl.esize(0.1)
mapdl.vmesh(vnum0)
mapdl.eplot()
###############################################################################
# Second a volume block above the existing block and mesh it with
# quadratic hexahedral elements. Ensure that these blocks do not
# touch by starting it slightly higher than the existing block.
#
# Note how these two blocks do not touch and the mesh is non-conformal.
mapdl.esize(0.09)
mapdl.et(2, 186)
mapdl.type(2)
vnum1 = mapdl.block(0, 1, 0, 1, 0.50001, 1)
mapdl.vmesh(vnum1)
mapdl.eplot()
###############################################################################
# Select all the elements at the intersection between the two blocks
# and generate contact elements.
mapdl.nsel("s", "loc", "z", 0.5, 0.50001)
mapdl.esln("s")
output = mapdl.gcgen("NEW", splitkey="SPLIT", selopt="SELECT")
print(output)
###############################################################################
# Plot the contact element pairs. Note from the command output above
# that the section IDs are 5 and 6.
#
# Here, we plot the element mesh as a wire-frame to show that the
# contact pairs overlap.
mapdl.esel("S", "SEC", vmin=5, vmax=6)
mapdl.eplot(style="wireframe", line_width=3)
| 3.171875 | 3 |
cogs/backend.py | evergreenbots/evergreen | 0 | 12771048 | <filename>cogs/backend.py<gh_stars>0
import discord
import base64
import paramiko
import asyncio
import httpx
from apikeys import ssh_secret, ssh_pass, ssh_user, ssh_ip, amp_pass, amp_url_base, amp_user
from discord.ext import commands
class Backend(commands.Cog, name="Server sided stuff"):
HEADERS = {'Accept': 'application/json, text/javascript',
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8'}
def __init__(self, client):
self.client = client
@commands.has_permissions(administrator=True)
@commands.command(hidden=True)
@commands.cooldown(1, 600, type=commands.BucketType.guild)
async def backup(self, ctx):
key = paramiko.ECDSAKey(data=base64.b64decode(f'{ssh_secret}'))
try:
client = paramiko.SSHClient()
client.get_host_keys().add(f'{ssh_ip}', 'ssh-rsa', key)
client.connect(f'{ssh_ip}', username=f'{ssh_user}',
password=f'{<PASSWORD>}')
await ctx.send("Backup has started.")
client.exec_command('./scripts/backup_infinitycraft.sh')
await asyncio.sleep(180)
# may need to bump up later or find a way to see when its done
except:
await ctx.send("Backup failed to start or connect to host server.")
client.close()
@commands.has_permissions(administrator=True)
@commands.group(invoke_without_command=True, hidden=True)
@commands.cooldown(1, 5, type=commands.BucketType.user)
async def console(self, ctx):
pass
@commands.has_permissions(administrator=True)
@console.command()
@commands.cooldown(1, 5, type=commands.BucketType.user)
async def send(self, ctx, *, command):
url = amp_url_base + 'Login'
data = str({"username": f"{amp_user}", "password": f"{<PASSWORD>}",
"token": "", "rememberMe": "true", "SESSIONID": ""})
async with httpx.AsyncClient() as client:
r = await client.post(url, headers=self.HEADERS, data=data)
url = amp_url_base + 'SendConsoleMessage'
data = str({"message": f"""{command}""",
"SESSIONID": f"{r.json()['sessionID']}"})
r = await client.post(url, headers=HEADERS, data=data)
@commands.has_permissions(administrator=True)
@console.command()
@commands.cooldown(1, 5, type=commands.BucketType.user)
async def asmarvin(self, ctx, *, message):
url = amp_url_base + 'Login'
color = 'white'
as_marvin_msg = str(tellraw @a[{"text": "[", "color": "gold"}, {"text": "Marvin", "color": "green"}, {
"text": "]", "color": "gold"}, {"text": ": ", "color": "white"}, {"text": f"{message}", "color": f"{color}"}])
data = str({"username": f"{amp_user}", "password": f"{<PASSWORD>}",
"token": "", "rememberMe": "true", "SESSIONID": ""})
async with httpx.AsyncClient() as client:
r = await client.post(url, headers=HEADERS, data=data)
url = amp_url_base + 'SendConsoleMessage'
data = str({"message": f"""{as_marvin_msg}""",
"SESSIONID": f"{r.json()['sessionID']}"})
r = await client.post(url, headers=HEADERS, data=data)
@backup.error
async def backup_error(self, ctx, error):
embed = discord.Embed(
title=f" Try again in {int(error.retry_after)} seconds.", colour=0xd95454)
embed.set_author(name=f"You are on a cooldown for this command!")
if isinstance(error, commands.CommandOnCooldown):
await ctx.send(embed=embed)
@asmarvin.error
async def asmarvin_error(self, ctx, error):
embed = discord.Embed(
title="Try: m.console asmarvin [content]", colour=0xd95454)
embed.set_author(name=f"{error}")
if isinstance(error, commands.MissingRequiredArgument):
await ctx.send(embed=embed)
elif isinstance(error, commands.BadArgument):
await ctx.send(embed=embed)
embed = discord.Embed(
title=f" Try again in {int(error.retry_after)} seconds.", colour=0xd95454)
embed.set_author(name=f"You are on a cooldown for this command!")
if isinstance(error, commands.CommandOnCooldown):
await ctx.send(embed=embed)
def setup(client):
client.add_cog(Backend(client))
print('@COG: Backend Cog loaded \n---------------------')
| 2.265625 | 2 |
qhana/backend/maxCutSolver.py | UST-QuAntiL/qhana | 1 | 12771049 | <reponame>UST-QuAntiL/qhana
from abc import ABCMeta, abstractmethod
from typing import List
import networkx as nx
"""
Represents an abstract MaxCutSolver class.
"""
class MaxCutSolver(metaclass=ABCMeta):
"""
Instantiates the MaxCutSolver with the graph.
"""
def __init__(self, graph: nx.Graph) -> None:
self.graph = graph
return
"""
Solves the max cut problem and returns the
maximum cut in the format (cutValue, [(node1, node2), ...]),
i.e. the cut value and the list of edges that
correspond to the cut.
"""
@abstractmethod
def solve(self):
return NotImplemented | 2.984375 | 3 |
tests/test_dataloader.py | NiWaRe/deepee | 16 | 12771050 | <filename>tests/test_dataloader.py
from deepee import UniformDataLoader
from torch.utils.data import Dataset
import torch
class SimpleDataset(Dataset):
def __init__(self):
super().__init__()
self.data = torch.arange(1, 100, 1, dtype=torch.int)
def __getitem__(self, idx: int) -> torch.Tensor:
return self.data[idx]
def __len__(self) -> int:
return len(self.data)
ds = SimpleDataset()
dl = UniformDataLoader(ds, 50)
def test_dataloader():
for item in dl:
assert (
len(set(item)) == 50
) # always returns correct batch size and never the same item twice
| 2.796875 | 3 |