text
stringlengths 8
6.05M
|
|---|
def simplerec(number):
if number > high_num:
return high_num
if number % 15 == 0:
print ("Fizz Buzz")
elif number % 5 == 0:
print ("Buzz")
elif number % 3 == 0:
print("Fizz")
else:
print(number)
simplerec(number + 1)
low_num = 1
high_num = 100
simplerec(low_num)
|
lst=input("Enter a sentence:\n ").split()
len_list=[]
for i in lst:
len_list.append(len(i))
for i in range(0,len(lst)):
for j in range(0,len(lst)-i-1):
if len_list[j]>len_list[j+1]:
t=len_list[j]
len_list[j]=len_list[j+1]
len_list[j+1]=t
t=lst[j]
lst[j]=lst[j+1]
lst[j+1]=t
mx=max(len_list)
m=0
n=0
for i in len_list:
if i<mx:
m=i
n=len_list.index(m)
print("The second largest word(s) is/are :\n")
for i in range(len(len_list)):
if len_list[i]==m:
print(lst[i])
|
from __future__ import division
import os
import os.path
import sys
import time
class DULED(object):
SIDES = ['left', 'right']
def __init__(self, path):
self._path = path
self.roll()
def run(self):
while True:
used = self.percent_used()
print "%0.0f%% used" % used
self.setpct(used)
time.sleep(10)
def percent_used(self):
try:
stat = os.statvfs(self._path)
except OSError:
self.blink()
raise
return (1 - (stat.f_bavail / stat.f_blocks)) * 100
def setpct(self, percent):
if percent > 95:
self.blink()
return
for i in xrange(0, 8):
cutoff = (i / 8) * 100
if percent > cutoff:
self._ledon(i)
else:
self._ledoff(i)
def blink(self):
for i in xrange(0, 8):
self._ledblink(i)
def roll(self):
for i in xrange(0, 8):
self._ledon(i)
time.sleep(0.250)
for i in xrange(0, 8):
self._ledoff(i)
time.sleep(0.250)
def _ledpath(self, led):
if not 0 <= led <= 7:
raise ValueError("0 <= led <= 7")
side = DULED.SIDES[led // 4]
led = led % 4
return "/sys/class/leds/status:white:%s%d" % (side, led)
def _ledwrite(self, led, state, target="trigger"):
path = self._ledpath(led)
path = os.path.join(path, target)
open(path, "w").write(state)
def _ledon(self, led):
self._ledwrite(led, 'default-on')
def _ledoff(self, led):
self._ledwrite(led, 'none')
def _ledblink(self, led):
self._ledwrite(led, 'timer')
self._ledwrite(led, '250', 'delay_on')
self._ledwrite(led, '250', 'delay_off')
def main(argv):
if len(argv) < 2:
sys.stderr.write("Usage: %s <filesystem>\n")
sys.exit(1)
du = DULED(argv[1])
du.run()
if __name__ == "__main__":
main(sys.argv)
|
#!/usr/bin/env python3
#
# This script shows how to set up an D/Ne SPI scenario in an ITER-like setting.
# The injection can either be separated into one stage with pure D and one stage with pure NE,
# or be made as a single stage injection with a similar total amount of particles.
#
################################################################################################
import numpy as np
import scipy as sp
import sys
from scipy import integrate
from scipy.special import kn
sys.path.append('../../py/')
from DREAM.DREAMSettings import DREAMSettings
from DREAM.DREAMOutput import DREAMOutput
from DREAM import runiface
import DREAM.Settings.Equations.IonSpecies as Ions
import DREAM.Settings.Solver as Solver
import DREAM.Settings.CollisionHandler as Collisions
import DREAM.Settings.Equations.ElectricField as Efield
import DREAM.Settings.Equations.RunawayElectrons as RE
import DREAM.Settings.Equations.HotElectronDistribution as FHot
import DREAM.Settings.Equations.ColdElectronTemperature as T_cold
import DREAM.Settings.TimeStepper as TimeStep
import DREAM.Settings.Equations.SPI as SPI
import DREAM.Settings.TransportSettings as Transport
import DREAM.Settings.RadialGrid as RGrid
from DREAM.Settings.Equations.ElectricField import ElectricField
from DREAM.Settings.Equations.ColdElectronTemperature import ColdElectronTemperature
# Makes sure this script generates exactly the same result every time the same settings are used
np.random.seed(1)
ds = DREAMSettings()
#######################################
# Set numerical simulation parameters #
#######################################
# Choose which parts of the disruption should be simulated
run_init=True # Includes a single-step run to extract the conductivity and
# another one to set the efield according to the wanted current profiel
run_injection_init=True # Accelerate the distribution function to carry the right current
run_injection=True # D or D/Ne injection
run_CQ=True # Second Ne injection (if any), beginning of the CQ
# Specify number of restarts to do during the CQ
nCQ_restart_start=2 # Number of CQ restart to start from
nCQ_restart=1 # How many CQ restarts to run
# Temperature and electron distribution settings
T_selfconsistent = True
hotTailGrid_enabled = False
use_fluid_runaways = True
use_heat_transport=True
use_f_hot_transport=False
transport_CQ_only=False
dBOverB=1e-3/np.sqrt(2)
# Time steps during the various restarts
Tmax_init = 1e-11 # simulation time in seconds
Nt_init = 2 # number of time steps
Tmax_init2 = 3e-3
Nt_init2 = 300
Tmax_injection = 3.4e-3
Nt_injection = 1360
# For single stage
# Tmax_injection = 6e-3
# Nt_injection = 3000
Tmax_CQ = 17e-3
Nt_CQ = 3000
Tmax_CQ_restart = 129.6e-3
Nt_CQ_restart = 40000
# Grid parameters
Nr = 11 # number of radial grid points
Np = 120 # number of momentum grid points
Nxi = 5 # number of pitch grid points
pMax = 3 # maximum momentum in m_e*c
times = [0] # times at which parameters are given
radius = [0, 2] # span of the radial grid
dr=(radius[1]-radius[0])/(Nr+1)
radialgrid = np.linspace(radius[0]+dr/2,radius[-1]-dr/2,Nr)
radius_wall = 2.15 # location of the wall
B0 = 5.3 # magnetic field strength in Tesla
# Set up radial grid
R0 = 6.2
kappa = 1.0
"""
ds.radialgrid.setType(RGrid.TYPE_ANALYTIC_TOROIDAL)
mu0 = sp.constants.mu_0
# R0_set = np.inf
R0_set = R0
a = radius[-1]
rref = np.linspace(0, a, 20)
#IpRef = 15e6 # reference plasma current which generates the poloidal magnetic field (assumed uniform profile)
#psiref = -mu0 * IpRef * (1-(rref/a)**2) * a
psiref = np.zeros(rref.size) # Ignore the poloidal magnetic field component for simplicity
rDelta = np.linspace(0, radius_wall, 20)
Delta = np.linspace(0.2*radius_wall, 0, rDelta.size)
rdelta = np.linspace(0, radius_wall, 20)
delta = np.linspace(0, 0.0, rdelta.size)
ds.radialgrid.setShaping(psi=psiref, rpsi=rref, GOverR0=B0/R0, kappa=kappa, delta=delta, rdelta=rdelta, Delta=Delta, rDelta=rDelta)
ds.radialgrid.setMinorRadius(a)
ds.radialgrid.setWallRadius(radius_wall)
ds.radialgrid.setMajorRadius(R0_set)
ds.radialgrid.setNr(Nr)
"""
# Set up cylindrical radial grid
ds.radialgrid.setB0(B0)
ds.radialgrid.setMinorRadius(radius[-1])
ds.radialgrid.setNr(Nr)
ds.radialgrid.setWallRadius(radius_wall)
Delta = [0,0]
#ds.radialgrid.visualize_analytic(nr=8, ntheta=40)
#######################################
# Set physical simulation parameters #
#######################################
# Set E_field
E_initial = 0.00032 # initial electric field in V/m
E_wall = 0.0 # boundary electric field in V/m
inverse_wall_time = 2 # s^{-1}
efield = E_initial*np.ones((len(times), len(radius)))
ds.eqsys.E_field.setPrescribedData(efield=efield, times=times, radius=radius)
ds.eqsys.E_field.setBoundaryCondition()
# Set runaway generation rates
if use_fluid_runaways:
ds.eqsys.n_re.setCompton(RE.COMPTON_RATE_ITER_DMS)
ds.eqsys.n_re.setAvalanche(RE.AVALANCHE_MODE_FLUID_HESSLOW)
ds.eqsys.n_re.setDreicer(RE.DREICER_RATE_NEURAL_NETWORK)
ds.eqsys.n_re.setTritium(True)
# Set temperature profile
T_initial = 20e3 # initial temperature in eV
temp_prof=(1-0.99*(radialgrid/radialgrid[-1])**2).reshape(1,-1)
temperature = T_initial*temp_prof
ds.eqsys.T_cold.setPrescribedData(temperature=temperature, times=times, radius=radialgrid)
# Settings for the first SPI (presumably consisting mostly of deuterium)
nShardD=3843 # Number of shards
NinjD=2.5e24 # Number of atoms
alpha_maxD=0.17 # Divergence angle
abs_vp_meanD=800 # Mean shard speed
abs_vp_diffD=0.2*abs_vp_meanD # Width of the uniform shard speed distribution
molarFractionNe=0 # Molar fraction of neon (the rest is deuterium)
# The shard velocities are set to zero for now,
# and will be changed later when the injections are supposed to start
if molarFractionNe>0:
ds.eqsys.spi.setParamsVallhagenMSc(nShard=nShardD, Ninj=NinjD, Zs=[1,10], isotopes=[2,0], opacity_modes=[Ions.ION_OPACITY_MODE_GROUND_STATE_OPAQUE, Ions.ION_OPACITY_TRANSPARENT], molarFractions=[1-molarFractionNe,molarFractionNe], ionNames=['D_inj_mix','Ne_inj_mix'], abs_vp_mean=0, abs_vp_diff=0, alpha_max=alpha_maxD, shatterPoint=np.array([radius_wall+Delta[-1],0,0]))
else:
ds.eqsys.spi.setParamsVallhagenMSc(nShard=nShardD, Ninj=NinjD, Zs=[1], isotopes=[2], opacity_modes=[Ions.ION_OPACITY_MODE_GROUND_STATE_OPAQUE], molarFractions=[1], ionNames=['D_inj'], abs_vp_mean=0, abs_vp_diff=0, alpha_max=alpha_maxD, shatterPoint=np.array([radius_wall+Delta[-1],0,0]))
# Settings for the second Neon SPI
nShardNe=50
NinjNe=1e24
alpha_maxNe=0.17
abs_vp_meanNe=200
abs_vp_diffNe=0.2*abs_vp_meanNe
if nShardNe>0:
ds.eqsys.spi.setParamsVallhagenMSc(nShard=nShardNe, Ninj=NinjNe, Zs=[10], isotopes=[0], molarFractions=[1], ionNames=['Ne_inj'], abs_vp_mean=0, abs_vp_diff=0, alpha_max=alpha_maxNe, shatterPoint=np.array([radius_wall+Delta[-1],0,0]))
# Set geometrical parameters used to rescale VpVol when calculting the size of the flux surfaces
if ds.radialgrid.type == RGrid.TYPE_CYLINDRICAL:
ds.eqsys.spi.setVpVolNormFactor(R0*kappa)
elif np.isinf(R0_set) and ds.radialgrid.type == RGrid.TYPE_ANALYTIC_TOROIDAL:
ds.eqsys.spi.setVpVolNormFactor(R0)
# Set models for advancing the shard motion and ablation
ds.eqsys.spi.setVelocity(SPI.VELOCITY_MODE_PRESCRIBED) # Constant prescribed velocities
ds.eqsys.spi.setAblation(SPI.ABLATION_MODE_FLUID_NGS) # Parks NGS formula based on T_cold
# ds.eqsys.spi.setDeposition(SPI.DEPOSITION_MODE_LOCAL_GAUSSIAN) # Use a gaussian deposition kernel of width rcl
ds.eqsys.spi.setDeposition(SPI.DEPOSITION_MODE_LOCAL) # Delta function deposition kernel
# ds.eqsys.spi.setHeatAbsorbtion(SPI.HEAT_ABSORBTION_MODE_LOCAL_FLUID_NGS) # Remove all heat flowing through a disc
# of radius rcl from background plasma.
# when assuming a local and immediate deposition
# the heat absorbed by the ablated material is
# immediately returned, so this term should then
# be switched off
# ds.eqsys.spi.setCloudRadiusMode(SPI.CLOUD_RADIUS_MODE_PRESCRIBED_CONSTANT)
# Size of the neutral cloud (only relevant when using gaussian deposition or heat absorbtion)
rcl=0.01
ds.eqsys.spi.setRclPrescribedConstant(rcl)
# Set background ions
n_D = 1e20
ds.eqsys.n_i.addIon(name='D', Z=1, iontype=Ions.IONS_DYNAMIC_FULLY_IONIZED, n=0.5*n_D, opacity_mode=Ions.ION_OPACITY_MODE_GROUND_STATE_OPAQUE)
ds.eqsys.n_i.addIon(name='T', Z=1, iontype=Ions.IONS_DYNAMIC_FULLY_IONIZED, n=0.5*n_D, tritium=True, opacity_mode=Ions.ION_OPACITY_MODE_GROUND_STATE_OPAQUE)
# set collision settings
ds.collisions.collfreq_mode = Collisions.COLLFREQ_MODE_FULL
ds.collisions.collfreq_type = Collisions.COLLFREQ_TYPE_PARTIALLY_SCREENED
#ds.collisions.bremsstrahlung_mode = Collisions.BREMSSTRAHLUNG_MODE_NEGLECT
ds.collisions.bremsstrahlung_mode = Collisions.BREMSSTRAHLUNG_MODE_STOPPING_POWER
#ds.collisions.lnlambda = Collisions.LNLAMBDA_CONSTANT
ds.collisions.lnlambda = Collisions.LNLAMBDA_ENERGY_DEPENDENT
ds.collisions.pstar_mode = Collisions.PSTAR_MODE_COLLISIONAL
# Kinetic grid settings
if not hotTailGrid_enabled:
ds.hottailgrid.setEnabled(False)
else:
ds.hottailgrid.setNxi(Nxi)
ds.hottailgrid.setNp(Np)
ds.hottailgrid.setPmax(pMax)
ds.hottailgrid.setBiuniformGrid(psep=0.07,npsep=70)
nfree_initial, rn0 = ds.eqsys.n_i.getFreeElectronDensity()
ds.eqsys.f_hot.setInitialProfiles(rn0=rn0, n0=nfree_initial, rT0=radialgrid, T0=temperature.flatten())
ds.eqsys.f_hot.setBoundaryCondition(bc=FHot.BC_F_0)
ds.eqsys.f_hot.setAdvectionInterpolationMethod(ad_int=FHot.AD_INTERP_TCDF)
#ds.eqsys.f_hot.enableIonJacobian(False)
# Disable runaway grid
ds.runawaygrid.setEnabled(False)
######################
# Run the simulation #
######################
# Use the nonlinear solver
ds.solver.setType(Solver.NONLINEAR)
ds.solver.setLinearSolver(linsolv=Solver.LINEAR_SOLVER_LU)
ds.solver.setMaxIterations(maxiter = 500)
ds.solver.setTolerance(reltol=0.001)
#ds.solver.setVerbose(True)
ds.other.include('fluid', 'scalar')
filename_ending='nShardD'+str(nShardD)+'NinjD'+str(NinjD)+'nShardNe'+str(nShardNe)+'NinjNe'+str(NinjNe)+'vpD'+str(abs_vp_meanD)+'vpNe'+str(abs_vp_meanNe)+'LyOpaque_hottail'+str(hotTailGrid_enabled)+'heat_transport'+str(use_heat_transport)+'f_hot_transport'+str(use_f_hot_transport)+'dBB'+str(dBOverB)
folder_name='finite_wall_time/with_runaways/'
#if ds.radialgrid.type == RGrid.TYPE_ANALYTIC_TOROIDAL:
# filename_ending = filename_ending + 'analyticB'
#filename_ending = filename_ending +'kappa'+ str(kappa)
#filename_ending = filename_ending +'delta_edge'+ str(delta[-1])
#filename_ending = filename_ending +'Delta_core'+ str(Delta[0])
#filename_ending = filename_ending + 'toroidal'
filename_ending = filename_ending + 'inverse_wall_time'+str(inverse_wall_time)
# Set time stepper
ds.timestep.setTmax(Tmax_init)
ds.timestep.setNt(Nt_init)
if run_init:
# Save settings to HDF5 file
ds.save(folder_name+'init_settings'+filename_ending+'.h5')
runiface(ds, folder_name+'output_init'+filename_ending+'.h5', quiet=False)
#######################
# RESTART set current #
#######################
do=DREAMOutput(folder_name+'output_init'+filename_ending+'.h5')
conductivity=do.other.fluid.conductivity.getData()
jprof=(1-(1-0.001**(1/0.41))*(radialgrid/radialgrid[-1])**2)**0.41
I0=15e6
j0=I0/np.trapz(2*np.pi*radialgrid*jprof,radialgrid)
efield=j0*jprof/conductivity[-1,:]
ds.eqsys.E_field.setPrescribedData(efield=efield, radius=radialgrid)
if run_init:
# Save settings to HDF5 file
ds.save(folder_name+'init_settings_'+filename_ending+'.h5')
runiface(ds, folder_name+'output_init_'+filename_ending+'.h5', quiet=False)
##########################
# RESTART injection init #
##########################
# Used to accelerate the distribution to carry the right current
ds2 = DREAMSettings(ds)
ds2.fromOutput(folder_name+'output_init_'+filename_ending+'.h5')
ds2.timestep.setTmax(Tmax_init2)
ds2.timestep.setNt(Nt_init2)
if run_injection_init:
ds2.save(folder_name+'init2_restart_settings_'+filename_ending+'.h5')
runiface(ds2, folder_name+'output_init2_'+filename_ending+'.h5', quiet=False)
#####################
# RESTART injection #
#####################
# Here we actually make the first injection
ds3 = DREAMSettings(ds2)
# From now on, the temperature and electric field should be calculated self-consistently
if T_selfconsistent:
ds3.eqsys.T_cold.setType(ttype=T_cold.TYPE_SELFCONSISTENT)
#ds3.eqsys.T_cold.setRecombinationRadiation(T_cold.RECOMBINATION_RADIATION_INCLUDED)
ds3.eqsys.E_field.setType(Efield.TYPE_SELFCONSISTENT)
#ds3.eqsys.E_field.setBoundaryCondition(bctype = Efield.BC_TYPE_PRESCRIBED, inverse_wall_time = 0, V_loop_wall_R0 = E_wall*2*np.pi)
ds3.eqsys.E_field.setBoundaryCondition(bctype = Efield.BC_TYPE_SELFCONSISTENT, inverse_wall_time = inverse_wall_time, R0=R0)
ds3.fromOutput(folder_name+'output_init2_'+filename_ending+'.h5',ignore=['v_p'])
# Now make the shards from the first injection start moving
ds3.eqsys.spi.setShardVelocitiesUniform(nShard=None,abs_vp_mean=abs_vp_meanD,abs_vp_diff=abs_vp_diffD,alpha_max=alpha_maxD,nDim=2,add=False, shards=slice(0,nShardD))
# If the first injection contains any impurities, the pressure and current will be
# significantly perturbed already during this injection stage, and then we
# turn on the transport when the shards reach the plasma boundary
if use_heat_transport and molarFractionNe>0:
print('Turn on transport already during first injection')
dBB_mat=np.sqrt(R)*dBOverB*np.vstack((np.zeros((2,Nr)),np.ones((2,Nr))))
t_edge=(radius_wall-radius[1])/np.max(-ds3.eqsys.spi.vp)
print(t_edge)
ds3.eqsys.T_cold.transport.setMagneticPerturbation(dBB=dBB_mat,r=radialgrid,t=[0,t_edge-1e-8,t_edge,1])
ds3.eqsys.T_cold.transport.setBoundaryCondition(Transport.BC_F_0)
if use_f_hot_transport and hotTailGrid_enabled:
ds3.eqsys.n_re.transport.prescribeDiffusion(drr=sp.constants.pi*sp.constants.c*R*dBOverB**2*np.vstack((np.zeros((2,Nr)),np.ones((2,Nr)))),r=radialgrid,t=[0,0.00015999,0.00016,1])
ds3.eqsys.n_re.transport.setBoundaryCondition(Transport.BC_F_0)
ds3.eqsys.f_hot.transport.setMagneticPerturbation(dBB=np.sqrt(R)*dBOverB*np.ones(radialgrid.shape).reshape(1,-1),r=radialgrid,t=[0])
ds3.eqsys.f_hot.transport.setBoundaryCondition(Transport.BC_F_0)
ds3.timestep.setTmax(Tmax_injection)
ds3.timestep.setNt(Nt_injection)
ds3.timestep.setNumberOfSaveSteps(int(Tmax_injection/1e-4))
if run_injection:
ds3.save(folder_name+'injection_restart_settings_'+filename_ending+'.h5')
runiface(ds3, folder_name+'output_restart_injection_'+filename_ending+'.h5', quiet=False)
##############
# Restart CQ #
##############
ds4 = DREAMSettings(ds3)
ds4.fromOutput(folder_name+'output_restart_injection_'+filename_ending+'.h5',ignore=['x_p','v_p'])
ds4.timestep.setTmax(Tmax_CQ)
ds4.timestep.setNt(Nt_CQ)
ds4.timestep.setNumberOfSaveSteps(int(Tmax_CQ/1e-4))
# Turn on transport, if any
if use_heat_transport:
ds4.eqsys.T_cold.transport.setMagneticPerturbation(dBB=np.sqrt(R0)*dBOverB*np.ones(radialgrid.shape).reshape(1,-1),r=radialgrid,t=[0])
ds4.eqsys.T_cold.transport.setBoundaryCondition(Transport.BC_F_0)
if use_f_hot_transport and hotTailGrid_enabled:
ds4.eqsys.n_re.transport.prescribeDiffusion(drr=sp.constants.pi*sp.constants.c*R*dBOverB**2*np.ones(radialgrid.shape).reshape(1,-1),r=radialgrid,t=[0])
ds4.eqsys.n_re.transport.setBoundaryCondition(Transport.BC_F_0)
ds4.eqsys.f_hot.transport.setMagneticPerturbation(dBB=np.sqrt(R)*dBOverB*np.ones(radialgrid.shape).reshape(1,-1),r=radialgrid,t=[0])
ds4.eqsys.f_hot.transport.setBoundaryCondition(Transport.BC_F_0)
if run_CQ:
ds4.save(folder_name+'CQ_restart_settings_'+filename_ending+'.h5')
# Set the shards of the second injection into motion and advance them until
# the fastest shards reach the plasma edge
do4=DREAMOutput(folder_name+'output_restart_injection_'+filename_ending+'.h5')
ds4.eqsys.spi.vp=do4.eqsys.v_p.data[-1,:].flatten()
ds4.eqsys.spi.xp=do4.eqsys.x_p.data[-1,:].flatten()
if(nShardNe>0):
ds4.eqsys.spi.setShardVelocitiesUniform(nShard=None,abs_vp_mean=abs_vp_meanNe,abs_vp_diff=abs_vp_diffNe,alpha_max=alpha_maxNe,nDim=3,add=False, shards=slice(-nShardNe,None))
t_edge=(radius_wall-radius[1])/np.max(-ds4.eqsys.spi.vp[-3*nShardNe::3])
ds4.eqsys.spi.xp[-3*nShardNe:]=ds4.eqsys.spi.xp[-3*nShardNe:]+ds4.eqsys.spi.vp[-3*nShardNe:]*t_edge
runiface(ds4, folder_name+'output_restart_CQ_'+filename_ending+'.h5', quiet=False)
#################
# Restart CQ 2+ #
#################
ds5 = DREAMSettings(ds4)
for iCQ in range(nCQ_restart_start-2,nCQ_restart):
if iCQ==0:
ds5.fromOutput(folder_name+'output_restart_CQ_'+filename_ending+'.h5')
else:
ds5.fromOutput(folder_name+'output_restart_CQ'+str(iCQ+2)+'_'+filename_ending+'.h5')
ds5.timestep.setTmax(Tmax_CQ_restart)
ds5.timestep.setNt(Nt_CQ_restart)
ds5.timestep.setNumberOfSaveSteps(int(Tmax_CQ_restart/1e-4))
if iCQ==0:
do5=DREAMOutput(folder_name+'output_restart_CQ_'+filename_ending+'.h5')
else:
do5=DREAMOutput(folder_name+'output_restart_CQ'+str(iCQ+2)+'_'+filename_ending+'.h5')
ds5.eqsys.spi.setInitialData(rp=do5.eqsys.Y_p.calcRadii(t=-1).flatten(),xp=do5.eqsys.x_p.data[-1,:].flatten(),vp=do5.eqsys.v_p.data[-1,:].flatten())
ds5.save(folder_name+'CQ'+str(iCQ+3)+'_restart_settings_'+filename_ending+'.h5')
runiface(ds5, folder_name+'output_restart_CQ'+str(iCQ+3)+'_'+filename_ending+'.h5', quiet=False)
|
import networkx as nx
import numpy as np
import scipy.sparse as sp
import torch
from math import ceil
def load_data(ds_name, use_node_labels):
node2graph = {}
Gs = []
with open("datasets/%s/%s_graph_indicator.txt"%(ds_name,ds_name), "r") as f:
c = 1
for line in f:
node2graph[c] = int(line[:-1])
if not node2graph[c] == len(Gs):
Gs.append(nx.Graph())
Gs[-1].add_node(c)
c += 1
with open("datasets/%s/%s_A.txt"%(ds_name,ds_name), "r") as f:
for line in f:
edge = line[:-1].split(",")
edge[1] = edge[1].replace(" ", "")
Gs[node2graph[int(edge[0])]-1].add_edge(int(edge[0]), int(edge[1]))
if use_node_labels:
with open("datasets/%s/%s_node_labels.txt"%(ds_name,ds_name), "r") as f:
c = 1
for line in f:
node_label = int(line[:-1])
Gs[node2graph[c]-1].node[c]['label'] = node_label
c += 1
labels = []
with open("datasets/%s/%s_graph_labels.txt"%(ds_name,ds_name), "r") as f:
for line in f:
labels.append(int(line[:-1]))
labels = np.array(labels, dtype = np.float)
return Gs, labels
def process_node_labels(Gs):
node_labels = dict()
for G in Gs:
for node in G.nodes():
if G.node[node]["label"] not in node_labels:
node_labels[G.node[node]["label"]] = len(node_labels)
n_node_labels = len(node_labels)
for G in Gs:
for node in G.nodes():
G.node[node]["label"] = node_labels[G.node[node]["label"]]
return Gs, n_node_labels
def generate_batches(Gs, use_node_labels, n_feat, y, batch_size, radius, device, shuffle=False):
N = len(Gs)
if shuffle:
index = np.random.permutation(N)
else:
index = np.array(range(N), dtype=np.int32)
n_batches = ceil(N/batch_size)
adj_lst = list()
features_lst = list()
idx_lst = list()
y_lst = list()
for i in range(0, N, batch_size):
n_nodes = 0
for j in range(i, min(i+batch_size, N)):
n_nodes += Gs[index[j]].number_of_nodes()
y_batch = np.zeros(min(i+batch_size, N)-i)
idx_batch = np.zeros(min(i+batch_size, N)-i+1, dtype=np.int64)
idx_batch[0] = 0
idx_node = np.zeros(n_nodes, dtype=np.int64)
edges_batch = list()
for _ in range(radius*2):
edges_batch.append(list())
tuple_to_idx = list()
features_batch = list()
for _ in range(radius+1):
tuple_to_idx.append(dict())
features_batch.append(list())
for j in range(i, min(i+batch_size, N)):
n = Gs[index[j]].number_of_nodes()
feat = dict()
if use_node_labels:
for node in Gs[index[j]].nodes():
v = np.zeros(n_feat)
v[Gs[index[j]].node[node]["label"]] = 1
feat[node] = v
else:
for node in Gs[index[j]].nodes():
feat[node] = [Gs[index[j]].degree(node)]
for k,n1 in enumerate(Gs[index[j]].nodes()):
idx_node[idx_batch[j-i]+k] = j-i
ego = nx.ego_graph(Gs[index[j]], n1, radius=radius)
dists = nx.single_source_shortest_path_length(ego, n1)
for n2 in ego.nodes():
tuple_to_idx[dists[n2]][(n1,n2)] = len(tuple_to_idx[dists[n2]])
features_batch[dists[n2]].append(feat[n2])
for n2 in ego.nodes():
for n3 in ego.neighbors(n2):
if dists[n3] > dists[n2]:
edges_batch[2*dists[n2]].append((tuple_to_idx[dists[n2]][(n1,n2)], tuple_to_idx[dists[n2]+1][(n1,n3)]))
elif dists[n3] == dists[n2]:
edges_batch[2*dists[n2]-1].append((tuple_to_idx[dists[n2]][(n1,n2)], tuple_to_idx[dists[n2]][(n1,n3)]))
idx_batch[j-i+1] = idx_batch[j-i] + n
y_batch[j-i] = y[index[j]]
adj_batch = list()
for i in range(2*radius):
if i%2 == 0:
edges = np.vstack(edges_batch[i])
adj = sp.coo_matrix((np.ones(edges.shape[0]), (edges[:,0], edges[:,1])), shape=(len(features_batch[i//2]), len(features_batch[i//2+1])), dtype=np.float32)
else:
if len(edges_batch[i]) == 0:
adj = sp.coo_matrix((len(features_batch[i//2+1]), len(features_batch[i//2+1])), dtype=np.float32)
else:
edges = np.vstack(edges_batch[i])
adj = sp.coo_matrix((np.ones(edges.shape[0]), (edges[:,0], edges[:,1])), shape=(len(features_batch[i//2+1]), len(features_batch[i//2+1])), dtype=np.float32)
adj_batch.append(sparse_mx_to_torch_sparse_tensor(adj).to(device))
for i in range(radius+1):
features_batch[i] = torch.FloatTensor(features_batch[i]).to(device)
adj_lst.append(adj_batch)
features_lst.append(features_batch)
idx_lst.append(torch.LongTensor(idx_node).to(device))
y_lst.append(torch.LongTensor(y_batch).to(device))
return adj_lst, features_lst, idx_lst, y_lst
def sparse_mx_to_torch_sparse_tensor(sparse_mx):
"""Convert a scipy sparse matrix to a torch sparse tensor."""
sparse_mx = sparse_mx.tocoo().astype(np.float32)
indices = torch.from_numpy(np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64))
values = torch.from_numpy(sparse_mx.data)
shape = torch.Size(sparse_mx.shape)
return torch.sparse.FloatTensor(indices, values, shape)
def accuracy(output, labels):
preds = output.max(1)[1].type_as(labels)
correct = preds.eq(labels).double()
correct = correct.sum()
return correct / len(labels)
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
|
#!/usr/bin/env python
#
# Copyright (c) 2019 Opticks Team. All Rights Reserved.
#
# This file is part of Opticks
# (see https://bitbucket.org/simoncblyth/opticks).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Hmm the CtrlReturn* does not need to
be a mask, but the CtrlLoop* does.
Maybe split.
"""
CtrlReturnMiss = 0x1 << 1
CtrlReturnLeft = 0x1 << 2
CtrlReturnRight = 0x1 << 3
CtrlReturnFlipRight = 0x1 << 4
CtrlLoopLeft = 0x1 << 5
CtrlLoopRight = 0x1 << 6
_ctrl_index = {
CtrlReturnMiss : 1,
CtrlReturnLeft : 2,
CtrlReturnRight : 3,
CtrlReturnFlipRight : 4,
CtrlLoopLeft : 5,
CtrlLoopRight : 6,
}
_ctrl_color = {
0:'c',
1:'k',
2:'m',
3:'b',
4:'r',
5:'y',
6:'r'
}
def ctrl_index(ctrl):
return _ctrl_index[ctrl]
def desc_ctrl(ctrl):
s = ""
if ctrl & CtrlReturnMiss: s+= "CtrlReturnMiss "
if ctrl & CtrlReturnLeft: s+= "CtrlReturnLeft "
if ctrl & CtrlReturnRight: s+= "CtrlReturnRight "
if ctrl & CtrlReturnFlipRight: s+= "CtrlReturnFlipRight "
if ctrl & CtrlLoopLeft: s+= "CtrlLoopLeft "
if ctrl & CtrlLoopRight: s+= "CtrlLoopRight "
return s
def desc_ctrl_cu(seqcu, label=""):
try:
ret = "\n".join([label]+[" %s : %2d %5d : %6d : %s " % (_ctrl_color[index], index, 0x1 << int(index), count, desc_ctrl(0x1 << int(index))) for index, count in seqcu])
except KeyError:
ret = repr(seqcu)
pass
return ret
|
#
# JiWER - Jitsi Word Error Rate
#
# Copyright @ 2018 - present 8x8, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This file implements methods for calculating a number of similarity error
measures between a ground-truth sentence and a hypothesis sentence, which are
commonly used to measure the performance for an automatic speech recognition
(ASR) system.
The following measures are implemented:
- Word Error Rate (WER), which is where this library got its name from. This
has has long been (and arguably still is) the de facto standard for computing
ASR performance.
- Match Error Rate (MER)
- Word Information Lost (WIL)
- Word Information Preserved (WIP)
"""
import Levenshtein
from typing import List, Mapping, Tuple, Union
import jiwer.transforms as tr
__all__ = ["wer", "mer", "wil", "wip", "compute_measures", "ops"]
################################################################################
# Implementation of the WER method, exposed publicly
_default_transform = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.SentencesToListOfWords(),
tr.RemoveEmptyStrings(),
]
)
_standardize_transform = tr.Compose(
[
tr.ToLowerCase(),
tr.ExpandCommonEnglishContractions(),
tr.RemoveKaldiNonWords(),
tr.RemoveWhiteSpace(replace_by_space=True),
]
)
def ops(
truth: Union[str, List[str]],
hypothesis: Union[str, List[str]],
truth_transform: Union[tr.Compose, tr.AbstractTransform] = _default_transform,
hypothesis_transform: Union[tr.Compose, tr.AbstractTransform] = _default_transform,
**kwargs
):
"""
return edit ops
"""
all_ops = get_operations(
truth, hypothesis, truth_transform, hypothesis_transform, **kwargs
)
return all_ops
def wer(
truth: Union[str, List[str]],
hypothesis: Union[str, List[str]],
truth_transform: Union[tr.Compose, tr.AbstractTransform] = _default_transform,
hypothesis_transform: Union[tr.Compose, tr.AbstractTransform] = _default_transform,
**kwargs
) -> float:
"""
Calculate word error rate (WER) between a set of ground-truth sentences and
a set of hypothesis sentences.
See `compute_measures` for details on the arguments.
:return: WER as a floating point number
"""
measures = compute_measures(
truth, hypothesis, truth_transform, hypothesis_transform, **kwargs
)
return measures["wer"]
def mer(
truth: Union[str, List[str]],
hypothesis: Union[str, List[str]],
truth_transform: Union[tr.Compose, tr.AbstractTransform] = _default_transform,
hypothesis_transform: Union[tr.Compose, tr.AbstractTransform] = _default_transform,
**kwargs
) -> float:
"""
Calculate match error rate (MER) between a set of ground-truth sentences and
a set of hypothesis sentences.
See `compute_measures` for details on the arguments.
:return: MER as a floating point number
"""
measures = compute_measures(
truth, hypothesis, truth_transform, hypothesis_transform, **kwargs
)
return measures["mer"]
def wip(
truth: Union[str, List[str]],
hypothesis: Union[str, List[str]],
truth_transform: Union[tr.Compose, tr.AbstractTransform] = _default_transform,
hypothesis_transform: Union[tr.Compose, tr.AbstractTransform] = _default_transform,
**kwargs
) -> float:
"""
Calculate Word Information Preserved (WIP) between a set of ground-truth
sentences and a set of hypothesis sentences.
See `compute_measures` for details on the arguments.
:return: WIP as a floating point number
"""
measures = compute_measures(
truth, hypothesis, truth_transform, hypothesis_transform, **kwargs
)
return measures["wip"]
def wil(
truth: Union[str, List[str]],
hypothesis: Union[str, List[str]],
truth_transform: Union[tr.Compose, tr.AbstractTransform] = _default_transform,
hypothesis_transform: Union[tr.Compose, tr.AbstractTransform] = _default_transform,
**kwargs
) -> float:
"""
Calculate Word Information Lost (WIL) between a set of ground-truth sentences
and a set of hypothesis sentences.
See `compute_measures` for details on the arguments.
:return: WIL as a floating point number
"""
measures = compute_measures(
truth, hypothesis, truth_transform, hypothesis_transform, **kwargs
)
return measures["wil"]
def compute_measures(
truth: Union[str, List[str]],
hypothesis: Union[str, List[str]],
truth_transform: Union[tr.Compose, tr.AbstractTransform] = _default_transform,
hypothesis_transform: Union[tr.Compose, tr.AbstractTransform] = _default_transform,
**kwargs
) -> Mapping[str, float]:
"""
Calculate error measures between a set of ground-truth sentences and a set of
hypothesis sentences.
The set of sentences can be given as a string or a list of strings. A string
input is assumed to be a single sentence. A list of strings is assumed to be
multiple sentences. Each word in a sentence is separated by one or more spaces.
A sentence is not expected to end with a specific token (such as a `.`). If
the ASR does delimit sentences it is expected that these tokens are filtered out.
The optional `transforms` arguments can be used to apply pre-processing to
respectively the ground truth and hypotheses input. Note that the transform
should ALWAYS include `SentencesToListOfWords`, as that is the expected input.
:param truth: the ground-truth sentence(s) as a string or list of strings
:param hypothesis: the hypothesis sentence(s) as a string or list of strings
:param truth_transform: the transformation to apply on the truths input
:param hypothesis_transform: the transformation to apply on the hypothesis input
:return: a dict with WER, MER, WIP and WIL measures as floating point numbers
"""
# deal with old API
if "standardize" in kwargs:
truth = _standardize_transform(truth)
hypothesis = _standardize_transform(hypothesis)
if "words_to_filter" in kwargs:
t = tr.RemoveSpecificWords(kwargs["words_to_filter"])
truth = t(truth)
hypothesis = t(hypothesis)
# Preprocess truth and hypothesisi
truth, hypothesis = _preprocess(
truth, hypothesis, truth_transform, hypothesis_transform
)
# Get the operation counts (#hits, #substitutions, #deletions, #insertions)
H, S, D, I = _get_operation_counts(truth, hypothesis)
# Compute Word Error Rate
wer = float(S + D + I) / float(H + S + D)
# Compute Match Error Rate
mer = float(S + D + I) / float(H + S + D + I)
# Compute Word Information Preserved
wip = (float(H) / len(truth)) * (float(H) / len(hypothesis)) if hypothesis else 0
# Compute Word Information Lost
wil = 1 - wip
return {
"wer": wer,
"mer": mer,
"wil": wil,
"wip": wip,
}
def get_operations(
truth: Union[str, List[str]],
hypothesis: Union[str, List[str]],
truth_transform: Union[tr.Compose, tr.AbstractTransform] = _default_transform,
hypothesis_transform: Union[tr.Compose, tr.AbstractTransform] = _default_transform,
**kwargs
):
"""
blah
"""
# deal with old API
if "standardize" in kwargs:
truth = _standardize_transform(truth)
hypothesis = _standardize_transform(hypothesis)
if "words_to_filter" in kwargs:
t = tr.RemoveSpecificWords(kwargs["words_to_filter"])
truth = t(truth)
hypothesis = t(hypothesis)
# Preprocess truth and hypothesisi
truth, hypothesis = _preprocess(
truth, hypothesis, truth_transform, hypothesis_transform
)
# Get the operation counts (#hits, #substitutions, #deletions, #insertions)
operations = _get_editops(truth, hypothesis)
return operations
################################################################################
# Implementation of helper methods
def _preprocess(
truth: Union[str, List[str]],
hypothesis: Union[str, List[str]],
truth_transform: Union[tr.Compose, tr.AbstractTransform],
hypothesis_transform: Union[tr.Compose, tr.AbstractTransform],
) -> Tuple[str, str]:
"""
Pre-process the truth and hypothesis into a form that Levenshtein can handle.
:param truth: the ground-truth sentence(s) as a string or list of strings
:param hypothesis: the hypothesis sentence(s) as a string or list of strings
:param truth_transform: the transformation to apply on the truths input
:param hypothesis_transform: the transformation to apply on the hypothesis input
:return: the preprocessed truth and hypothesis
"""
# Apply transforms. By default, it collapses input to a list of words
truth = truth_transform(truth)
hypothesis = hypothesis_transform(hypothesis)
# raise an error if the ground truth is empty
if len(truth) == 0:
raise ValueError("the ground truth cannot be an empty")
# tokenize each word into an integer
vocabulary = set(truth + hypothesis)
word2char = dict(zip(vocabulary, range(len(vocabulary))))
truth_chars = [chr(word2char[w]) for w in truth]
hypothesis_chars = [chr(word2char[w]) for w in hypothesis]
truth_str = "".join(truth_chars)
hypothesis_str = "".join(hypothesis_chars)
return truth_str, hypothesis_str
def _get_operation_counts(
source_string: str, destination_string: str
) -> Tuple[int, int, int, int]:
"""
Check how many edit operations (delete, insert, replace) are required to
transform the source string into the destination string. The number of hits
can be given by subtracting the number of deletes and substitutions from the
total length of the source string.
:param source_string: the source string to transform into the destination string
:param destination_string: the destination to transform the source string into
:return: a tuple of #hits, #substitutions, #deletions, #insertions
"""
editops = Levenshtein.editops(source_string, destination_string)
substitutions = sum(1 if op[0] == "replace" else 0 for op in editops)
deletions = sum(1 if op[0] == "delete" else 0 for op in editops)
insertions = sum(1 if op[0] == "insert" else 0 for op in editops)
hits = len(source_string) - (substitutions + deletions)
return hits, substitutions, deletions, insertions
def _get_editops(
source_string: str, destination_string: str
):
editops = Levenshtein.editops(source_string, destination_string)
# type(editops)
# substitutions = sum(1 if op[0] == "replace" else 0 for op in editops)
# deletions = sum(1 if op[0] == "delete" else 0 for op in editops)
# insertions = sum(1 if op[0] == "insert" else 0 for op in editops)
# hits = len(source_string) - (substitutions + deletions)
return editops
|
# 数论
# 关键是思考 因数 之间的关系
# 学习下枚举n及n以下的每个数的因子
n = 10000
divides = []
for i in range(1, n+1):
for j in range(i, n+1, i):
divides[j].append(i)
class Solution:
def countPairs(self, nums: List[int], k: int) -> int:
divisors = []
d = 1
while d * d <= k: # 预处理 k 的所有因子
if k % d == 0:
divisors.append(d)
if d * d < k:
divisors.append(k / d)
d += 1
ans = 0
cnt = defaultdict(int)
for v in nums:
# 得到smallest数的个数
ans += cnt[k / gcd(v, k)]
for d in divisors:
if v % d == 0:
cnt[d] += 1
return ans
|
# Generated by Django 2.2.4 on 2019-08-31 00:39
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='video',
name='filepath',
field=models.FileField(upload_to=''),
),
migrations.AlterField(
model_name='video',
name='thumbnail',
field=models.FileField(upload_to=''),
),
]
|
import pandas as pd
import pysam
import argparse
import pdb
def parse_args():
parser=argparse.ArgumentParser(description="get gc content from a bed file")
parser.add_argument("--chrom_sizes")
parser.add_argument("--ref_fasta")
parser.add_argument("--out_prefix")
parser.add_argument("--region_size",type=int,default=1000)
parser.add_argument("--stride",type=int,default=50)
parser.add_argument("--output_format",choices=['tsv','hdf5'],help="store output track as either a .tsv or an .hdf5 file")
return parser.parse_args()
def main():
args=parse_args()
ref=pysam.FastaFile(args.ref_fasta)
chrom_sizes=pd.read_csv(args.chrom_sizes,header=None,sep='\t')
region_dict=dict()
for index,row in chrom_sizes.iterrows():
chrom=row[0]
print(chrom)
chrom_size=row[1]
for bin_start in range(0,chrom_size,args.stride):
if bin_start%1000000==0:
print(str(bin_start))
bin_end=bin_start+args.region_size
seq=ref.fetch(chrom,bin_start,bin_end).upper()
g=seq.count('G')
c=seq.count('C')
gc=g+c
fract=round(gc/args.region_size,2)
region_dict[tuple([chrom,bin_start,bin_end])]=fract
#generate pandas df from dict
print("making df")
df=pd.DataFrame.from_dict(region_dict,orient='index')
print("made df")
new_index=pd.MultiIndex.from_tuples(df.index, names=('CHR', 'START','END'))
df = pd.DataFrame(df[0], new_index)
if args.output_format=="tsv":
df.to_csv(args.out_prefix+".tsv",sep='\t', header=True, index=True, index_label=['CHROM','START','END'])
else:
assert args.output_format=="hdf5"
df.to_hdf(args.out_prefix+".hdf5",key='data',mode='w',append=False,format='table',min_itemsize=30)
if __name__=="__main__":
main()
|
from django.shortcuts import (
render, redirect
)
from django.http import HttpResponse
from django.views import View
from django.contrib import messages
from account.forms import (
UserCreationForm, PasswordChangeForm
)
from django.contrib.auth import (
authenticate, login, logout, update_session_auth_hash
)
from django.contrib.auth.views import logout_then_login
from django.contrib.auth.decorators import login_required
from django.core.exceptions import ValidationError
from .forms import (
CustomerLoginForm, ReviewForm
)
from .models import Review
from mainapp.models import Product
# Create your views here.
class ReviewView(View):
def get(self, request, *args, **kwargs):
product_id = kwargs.get('product_id', 0)
product = Product.objects.filter(product_id = product_id)[0]
review = Review.objects.filter(product_id = product_id)
if request.user.is_authenticated:
form = ReviewForm({'product':product})
return render(request, "customer/addreview.html", {'form':form, 'product':product})
else:
form = ReviewForm({'review_content':"Your are not authorised to access this form."})
return render(request, "customer/addreview.html", {'form':form, 'product':product})
def post(self, request, *args, **kwargs):
form = ReviewForm(request.POST)
if form.is_valid():
product_id = kwargs.get('product_id', 0)
product = Product.objects.filter(product_id = product_id)[0]
review = form.save(commit=False)
review.user = request.user
review.product = product
review.save()
return redirect(f'/productDetail/{product.product_id}')
return render(request, "customer/addreview.html", {'form':form, 'product':product})
class ReplywView(View):
def get(self, request, *args, **kwargs):
previous_review_id = kwargs.get('review_id', 0)
previous_review = Review.objects.filter(review_id = previous_review_id)[0]
if request.user.is_authenticated:
form = ReviewForm()
return render(request, "customer/addreply.html", {'form':form, 'previous_review':previous_review})
else:
form = ReviewForm({'review_content':"Your are not authorised to access this form."})
return render(request, "customer/addreply.html", {'form':form, 'previous_review':previous_review})
def post(self, request, *args, **kwargs):
form = ReviewForm(request.POST)
if form.is_valid():
previous_review_id = kwargs.get('review_id', 0)
previous_review = Review.objects.filter(review_id = previous_review_id)[0]
review = form.save(commit=False)
review.user = request.user
review.previous_review = previous_review
review.save()
product = self._traverse_back_to_product(previous_review)
return redirect(f'/productDetail/{product.product_id}')
return render(request, "customer/addreply.html", {'form':form, 'previous_review':previous_review})
def _traverse_back_to_product(self, previous_review):
if previous_review.product:
return previous_review.product
return self._traverse_back_to_product(previous_review.previous_review)
class ChangePasswordView(View):
def get(self, request, *args, **kwargs):
form = PasswordChangeForm()
return render(request, "customer/change_password.html", {'form':form})
def post(self, request, *args, **kwargs):
form = PasswordChangeForm(request.POST, Request=request)
if form.is_valid():
useremail = request.user.email
password = form.cleaned_data.get("password")
user = authenticate(request, email=useremail, password=password)
if user is not None:
user.set_password(form.cleaned_data["password1"])
user.save()
update_session_auth_hash(request, user) # Important!
messages.success(request, 'Your password was successfully updated!')
return redirect('/')
return render(request, "customer/change_password.html", {'form':form})
class RegisterView(View):
def get(self, request, *args, **kwargs):
if request.user.is_authenticated:
messages.success(request,f"You have been already logged in using user {request.user.username}")
return redirect('/')
form = UserCreationForm()
return render(request, "customer/register.html", {'form': form})
def post(self, request, *args, **kwargs):
form = UserCreationForm(request.POST)
if form.is_valid():
user = form.save()
user_name = user.username
messages.success(request, f'{user_name} is registered successfully.')
user = authenticate(request, email=user.email, password=form.cleaned_data.get("password1"))
if user is not None:
login(request, user)
messages.success(request, f'{user_name} is logedin successfully.')
return redirect('/')
return render(request, "customer/register.html", {'form': form})
class LoginView(View):
def get(self, request, *args, **kwargs):
if request.user.is_authenticated:
messages.success(request,f"You have been already logged in using user {request.user.username}")
return redirect('/')
form = CustomerLoginForm()
return render(request, "customer/login.html", {"form":form})
def post(self, request, *args, **kwargs):
form = CustomerLoginForm(request.POST)
if form.is_valid():
email = form.cleaned_data['email']
password = form.cleaned_data['password']
user = authenticate(request, email=email, password=password)
if user is not None:
login(request, user)
messages.success(request, f"user loged in successfuly {user.username}")
return redirect('/')
else:
messages.error(request, "Invalid User name or password.")
return render(request, "customer/login.html", {"form":form})
@login_required(login_url='/customer/login/')
def logout_view(request):
return logout_then_login(request, login_url='/customer/login/')
|
"""Installs and configures Treadmill locally.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import os
import click
from treadmill import bootstrap
from treadmill import cli
from treadmill import context
_LOGGER = logging.getLogger(__name__)
def init():
"""Return top level command handler."""
@click.command()
@click.option('--gssapi', help='use gssapi auth.', is_flag=True,
default=False)
@click.option('-p', '--rootpw',
help='password hash, generated by slappass -s <pwd>.')
@click.option('-o', '--owner', help='root user.', required=True)
@click.option('--env', help='Treadmill environment',
required=True, envvar='TREADMILL_ENV')
@click.option('-s', '--suffix',
help='suffix (e.g dc=example,dc=com).',
required=False)
@click.option('-u', '--uri', help='uri, e.g: ldap://...:20389',
required=True)
@click.option('-m', '--masters', help='list of masters.',
type=cli.LIST)
@click.option('--first-time/--no-first-time', is_flag=True, default=False)
@click.option('--run/--no-run', is_flag=True, default=False)
@click.pass_context
def openldap(ctx, gssapi, rootpw, owner, suffix, uri, masters,
first_time, run, env):
"""Installs Treadmill Openldap server."""
dst_dir = ctx.obj['PARAMS']['dir']
profile = ctx.obj['PARAMS'].get('profile')
run_script = None
if run:
run_script = os.path.join(dst_dir, 'bin', 'run.sh')
ctx.obj['PARAMS']['env'] = env
ctx.obj['PARAMS']['treadmillid'] = owner
if uri:
ctx.obj['PARAMS']['uri'] = uri
if rootpw:
ctx.obj['PARAMS']['rootpw'] = rootpw
if gssapi:
ctx.obj['PARAMS']['gssapi'] = gssapi
ctx.obj['PARAMS']['rootpw'] = ''
if masters:
ctx.obj['PARAMS']['masters'] = masters
else:
ctx.obj['PARAMS']['masters'] = []
if first_time:
ctx.obj['PARAMS']['first_time'] = first_time
if suffix:
ctx.obj['PARAMS']['suffix'] = suffix
else:
ctx.obj['PARAMS']['suffix'] = context.GLOBAL.ldap_suffix
bootstrap.install(
'openldap',
dst_dir,
ctx.obj['PARAMS'],
run=run_script,
profile=profile,
)
return openldap
|
#!/usr/bin/env python
"""
v0.1 Tool used for initating an Ipython (v0.9.1+) cluster using
multiple nodes This actually uses ipcontroller and ipengine
rather than the obsolete ipcluster.
NOTE: if installing ipython controller (and engines) on a new computer,
- need to rm ~/.ipython/security/ipcontroller-engine.furl
prior to running ipython_cluster_setup.py
NOTE: Requires prior setup of ssh tunneling for:
- from controller computer to engine client computers
- from engine client computers to controller computer
TO SETUP a remote, ssh tunneled ipengine node:
- install OpenSSL for python
apt-get install python-openssl
- Make a passwordless ssh tunnel to the ipengine client
so that you can scp (or you can let the script by uncommenting
scp ~/.ipython/ipcontroller-engine.furl to the ipengine client.
- SETUP an ssh tunnel on the ipengine client for controller/engine (worms2)
ssh -L 23610:localhost:23611 pteluser@lyra.berkeley.edu
- SETUP an ssh tunnel on the ipcontroller computer for controller/engine (transx)
ssh -R 23611:localhost:23612 pteluser@lyra.berkeley.edu
- EDIT the ipcontroller-engine.furl on the ipengine client computer
so that the 2nd IP is 127.0.0.1 and the portforwared port. Eg:
e.g.: ipcontroller computer ipcontroller-engine.furl is:
pb://3r___rcx@127.0.0.1:23612,192.168.1.25:23612/ec___yi
and once edited on ipengine client computer, looks like:
pb://3r___rcx@127.0.0.1:23612,127.0.0.1:23610/ec___yi
- Also ensure that TCP environ var setting will work duing mec.execute():
os.environ['TCP_SEX_BIN']=os.path.expandvars('$HOME/bin/sex')
os.environ['TCP_WCSTOOLS_DIR']=os.path.expandvars('$HOME/src/install/wcstools-3.6.4/bin/')
os.environ['TCP_DIR']=os.path.expandvars('$HOME/src/TCP/')
os.environ['TCP_DATA_DIR']=os.path.expandvars('$HOME/scratch/TCP_scratch/')
- NOW you are ready to run ipython_cluster_setup.py, which will
kill and start the ipcontroller, start ipengines,
and run a TaskClient test.
TODO: Eventually have this script run by testsuite.py?
TODO: Is ipcontroller doing some logging?
- Is this essentially a memory leak/growth?
TODO: EC2 connection?
"""
import os, sys
import time
import threading # used for scp/ssh tasks.
from IPython.kernel import client
class Setup_System:
""" Setup Ipython controller and engines.
"""
def __init__(self, pars={}):
self.pars = pars
def kill_existing_controller(self):
"""
"""
# Default is SIGTERM
exec_str = "pkill -f .*ipcontroller.*"
#KILLS SCREEN NOOOO! #exec_str = "pkill -f .*ipengine.*"
os.system(exec_str)
def initialize_controller(self):
""" initialize controller on local machine.
"""
# -y -x flags turn off engine and client security, which is ok since SSH tunnels are used.
#exec_str = "ipcontroller -y -x --client-port=%d --engine-port=%d &" % \
exec_str = "ipcontroller --client-port=%d --engine-port=%d &" % \
(self.pars['client_port'],
self.pars['engine_port'])
os.system(exec_str)
def scp_furls_to_clients(self):
""" scp controller furl file to task client machines.
Also modifies the generated_enginefurl to have the correct ports for port-forward use.
"""
lines = open(self.pars['generated_engine_furl_fpath']).readlines()
new_line = lines[0].replace('192.168.1.25:%d' % (self.pars['engine_port']),\
'127.0.0.1:%d' % (self.pars['tunneled_engine_client_port']))
new_furl_fp = open(self.pars['modified_engine_furl_fpath'],'w')
new_furl_fp.write(new_line)
new_furl_fp.close()
thread_list = []
for client_dict in self.pars['client_defs']:
if ('__local__' in client_dict['name']):
continue # skip the scp since generated on same machine
elif ('__trans' in client_dict['name']):
exec_str = "scp -C -P %d %s %s@%s:%s/ipcontroller-engine.furl" % (client_dict['ssh_port'],
self.pars['generated_engine_furl_fpath'],
client_dict['username'],
client_dict['hostname'],
client_dict['furl_dirpath'])
t = threading.Thread(target=os.system, args=[exec_str])
t.start()
thread_list.append(t)
else:
exec_str = "scp -C -P %d %s %s@%s:%s/ipcontroller-engine.furl" % (client_dict['ssh_port'],
self.pars['modified_engine_furl_fpath'],
client_dict['username'],
client_dict['hostname'],
client_dict['furl_dirpath'])
t = threading.Thread(target=os.system, args=[exec_str])
t.start()
thread_list.append(t)
for t in thread_list:
print "scp/ssh thread (%s) waiting..." % (client_dict['hostname'])
t.join(10.0) # wait 10 seconds for scp/ssh
if t.isAlive():
print "! Thread (%s) has not returned! (dead host?)" % (client_dict['hostname'])
def kill_engines_on_taskclients(self):
""" pkill any existing ipengines on local and remote
ipengine client machines.
"""
thread_list = []
#DISABLED: try doing this 2x:
for i in xrange(1):
for client_dict in self.pars['client_defs']:
# Comment this out if I want to KILL engines on trans[123]
#if ('__trans' in client_dict['name']):
# continue # KLUDGE dont kill ipengines on trans[123] computers
#exec_str = "ssh -fn -p %d %s@%s pkill -9 -f .*bin.ipengine" % ( \
# client_dict['ssh_port'],
# client_dict['username'],
# client_dict['hostname'])
home_dirpath = client_dict['furl_dirpath'][:client_dict['furl_dirpath'].find('.ipython')]
exec_str = "ssh -fn -p %d %s@%s %ssrc/TCP/Algorithms/ipengine_kill.py" % ( \
client_dict['ssh_port'],
client_dict['username'],
client_dict['hostname'],
home_dirpath)
print exec_str
t = threading.Thread(target=os.system, args=[exec_str])
t.start()
thread_list.append(t)
for t in thread_list:
print "scp/ssh thread (%s) waiting..." % (client_dict['hostname'])
t.join(10.0) # wait 10 seconds for scp/ssh
if t.isAlive():
print "! Thread (%s) has not returned! (dead host?)" % (client_dict['hostname'])
def start_engines_on_taskclients(self):
""" Start ipengine clients on local and remote machines
"""
# IF CURRENT IMPLEMENTATION POSES PROBLEMS:
# Maybe have a daemon on the remote machine which can be given
# a config file & restarted (which kills existing
# engines), and the daemon will spawn the engines
# without need of a continuous ssh session from
# ipython_clister_setup.py host.
# - This would allow for the ~dozen engines to be easily spawned
thread_list = []
for client_dict in self.pars['client_defs']:
for i in xrange(client_dict['n_engines']):
if ('__local__' in client_dict['name']):
exec_str = "ipengine &"
os.system(exec_str)
else:
# here we spawn a remote ssh session.
### NOTE: nice -19 works, but since I run on my onw machines, I dont do:
#exec_str = "ssh -fn -p %d %s@%s nice -19 ipengine &" % ( \
if client_dict.has_key('nice'):
exec_str = "ssh -fn -p %d %s@%s nice -%d ipengine &" % ( \
client_dict['ssh_port'],
client_dict['username'],
client_dict['hostname'],
client_dict['nice'])
else:
exec_str = "ssh -fn -p %d %s@%s ipengine &" % ( \
client_dict['ssh_port'],
client_dict['username'],
client_dict['hostname'])
print exec_str
t = threading.Thread(target=os.system, args=[exec_str])
t.start()
thread_list.append(t)
for t in thread_list:
print "scp/ssh thread (%s) waiting..." % (client_dict['hostname'])
t.join(20.0) # wait 10 seconds for scp/ssh
if t.isAlive():
print "! Thread (%s) has not returned! (dead host?)" % (client_dict['hostname'])
def main(self):
""" Main function.
"""
flag_done = False
while not flag_done:
try:
self.kill_existing_controller()
time.sleep(5)
self.initialize_controller()
time.sleep(20) # we need to wait for the ipcontroller to generate new .furl files
self.scp_furls_to_clients()
self.kill_engines_on_taskclients()
#sys.exit()
time.sleep(5)
self.start_engines_on_taskclients()
flag_done = True
except:
print "Setup_System.main() Except. sleeping(20)"
time.sleep(20) # wait a couple seconds, probably an incomplete file scp or .mec() initialization failure.
class Test_System:
""" Run a test case of Ipython parallelization.
"""
def __init__(self, pars={}):
self.pars = pars
def main(self):
""" Main function for Testing.
"""
# This tests the Multi-engine interface:
mec = client.MultiEngineClient()
exec_str = """import os
os.environ['TCP_SEX_BIN']=os.path.expandvars('$HOME/bin/sex')
os.environ['TCP_WCSTOOLS_DIR']=os.path.expandvars('$HOME/src/install/wcstools-3.6.4/bin/')
os.environ['TCP_DIR']=os.path.expandvars('$HOME/src/TCP/')
os.environ['TCP_DATA_DIR']=os.path.expandvars('$HOME/scratch/TCP_scratch/')
os.environ['CLASSPATH']=os.path.expandvars('$HOME/src/install/weka-3-5-7/weka.jar')
"""
#if os.path.exists(os.path.expandvars("$HOME/.ipython/custom_configs")): execfile(os.path.expandvars("$HOME/.ipython/custom_configs"))
mec.execute(exec_str)
# This tests the task client interface:
tc = client.TaskClient()
task_list = []
n_iters_total = 8
n_iters_per_clear = 10
for i in xrange(n_iters_total):
task_str = """cat = os.getpid()""" # os.getpid() # os.environ
taskid = tc.run(client.StringTask(task_str, pull="cat"))
task_list.append(taskid)
### NOTE: This can be used to thin down the ipcontroller memory storage of
### finished tasks, but afterwards you cannot retrieve values (below):
#if (i % n_iters_per_clear == 0):
# tc.clear()
print '!!! NUMBER OF TASKS STILL SCHEDULED: ', tc.queue_status()['scheduled']
for i,taskid in enumerate(task_list):
### NOTE: The following retrieval doesnt work if
### tc.clear() was called earlier:
task_result = tc.get_task_result(taskid, block=True)
print task_result['cat']
print 'done'
print tc.queue_status()
#del tc
#time.sleep(1)
if __name__ == '__main__':
if len(sys.argv) > 1:
# We can assume we were given a python file to exec() which contains a pars={} dict.
# TODO: read pars from file.
f = open(os.path.abspath(os.path.expandvars(sys.argv[1])))# Import the standard Parameter file
exec f
f.close()
else:
pars = { \
'client_port':10113, # Controller listen port for python taskclient connections
'engine_port':23612, # Controller listen port for engine connections
'tunneled_engine_client_port':23610, # port used on the engine client to get back to ipcontroller
'generated_engine_furl_fpath':'/home/pteluser/.ipython/security/ipcontroller-engine.furl',
'modified_engine_furl_fpath':'/tmp/temp_engine_furl_fpath',
'engine-cert-file':'', # this is shared by all engines. ?Maybe it is manually copied over once, initially? Although this .pem is not identical to the ipcontroller computer.
'client_defs':[ \
{'name':'__local__',
'hostname':'127.0.0.1',
'furl_dirpath':'/home/pteluser/.ipython/security',
'username':'pteluser',
'ssh_port':22,
'n_engines':10},
],
}
SetupSystem = Setup_System(pars=pars)
SetupSystem.main()
# FOR TESTING:
time.sleep(20) # (70) # We give some time for controller to initialize (sgn02 requires > 30secs)
TestSystem = Test_System()
TestSystem.main()
time.sleep(0.01) # This seems to fix a traceback where Ipython/Twisted trips on itself while shutting down
"""
{'name':'__local__',
'hostname':'127.0.0.1',
'furl_dirpath':'/home/pteluser/.ipython/security',
'username':'pteluser',
'ssh_port':22,
'n_engines':8},
{'name':'__trans_betsy__',
'hostname':'192.168.1.85',
'furl_dirpath':'/home/pteluser/.ipython/security',
'username':'pteluser',
'ssh_port':22,
'n_engines':8},
{'name':'__trans1__',
'hostname':'192.168.1.45',
'furl_dirpath':'/home/pteluser/.ipython/security',
'username':'pteluser',
'ssh_port':22,
'n_engines':2},
{'name':'__trans2__',
'hostname':'192.168.1.55',
'furl_dirpath':'/home/pteluser/.ipython/security',
'username':'pteluser',
'ssh_port':22,
'n_engines':2},
{'name':'__trans3__',
'hostname':'192.168.1.65',
'furl_dirpath':'/home/pteluser/.ipython/security',
'username':'pteluser',
'ssh_port':22,
'n_engines':2},
{'name':'__worms2__',
'hostname':'localhost',
'furl_dirpath':'/home/starr/.ipython/security',
'username':'starr',
'ssh_port':32151,
'n_engines':6},
{'name':'__cch1__',
'hostname':'localhost',
'furl_dirpath':'/home/dstarr/.ipython/security',
'username':'dstarr',
'nice':19,
'ssh_port':32161,
'n_engines':1},
{'name':'__sgn02__',
'hostname':'localhost',
'furl_dirpath':'/global/homes/d/dstarr/datatran/.ipython/security',
'username':'dstarr',
'ssh_port':32141,
'n_engines':0}, # 6
"""
|
import turtle
def draw_square(some_turtle):
for i in range(1,5):
some_turtle.forward(100)
some_turtle.right(90)
def draw_triangle(some_turtle):
for i in range(1,4):
some_turtle.forward(100)
some_turtle.right(120)
def draw_art():
window = turtle.Screen()
window.bgcolor('green')
n = 0
while n < 360:
brad = turtle.Turtle()
brad.right(n)
draw_triangle(brad)
n+= 10
window.exitonclick()
draw_art()
|
from django.conf.urls import patterns, url, include
from django.views.generic import ListView
from ideacalculator.views import getideas
urlpatterns = patterns('',
(r'^getidea/$', getideas),
)
|
# Generated by Django 3.0.3 on 2020-06-04 20:10
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('dimensoes', '0014_dimensaomodel_profundidade_media'),
]
operations = [
migrations.RemoveField(
model_name='dimensaomodel',
name='construcao',
),
migrations.RemoveField(
model_name='dimensaomodel',
name='contra_piso',
),
migrations.RemoveField(
model_name='dimensaomodel',
name='escavacao',
),
migrations.RemoveField(
model_name='dimensaomodel',
name='instalacao_vinil',
),
migrations.RemoveField(
model_name='dimensaomodel',
name='isomanta_m2',
),
migrations.RemoveField(
model_name='dimensaomodel',
name='perfil_fixo_m',
),
migrations.RemoveField(
model_name='dimensaomodel',
name='preco',
),
migrations.RemoveField(
model_name='dimensaomodel',
name='produto',
),
migrations.RemoveField(
model_name='dimensaomodel',
name='remocao_terra',
),
migrations.RemoveField(
model_name='dimensaomodel',
name='vinil_m2',
),
]
|
import sqlite3
from fastapi import APIRouter, HTTPException
from models.customers import Customer
router = APIRouter()
@router.on_event("startup")
async def startup():
router.db_connection = sqlite3.connect("chinook.db")
router.db_connection.row_factory = sqlite3.Row
@router.on_event("shutdown")
async def shutdown():
router.db_connection.close()
@router.put("/customers/{customer_id}")
async def customers(customer_id, customer: Customer = {}):
cursor = router.db_connection.cursor()
selected_customer = cursor.execute(
"SELECT * FROM customers WHERE customerid = ?", (customer_id,),
).fetchone()
if not selected_customer:
raise HTTPException(status_code=404, detail={"error": "Not Found"})
column_names = [value[0] for value in customer if value[1] is not None]
values = [value[1] for value in customer if value[1] is not None]
columns = " = ?,".join(column_names) + "= ?"
cursor.execute(
f"UPDATE customers SET {columns} WHERE customerid = ?", (*values, customer_id,),
)
router.db_connection.commit()
return selected_customer
@router.get("/sales")
async def sales(category: str):
if category == "customers":
cursor = router.db_connection.cursor()
sales = cursor.execute(
"""
SELECT customerid, email, phone, ROUND(SUM(total),2) AS Sum FROM
invoices JOIN customers USING(customerid) GROUP BY customerid
ORDER BY Sum DESC, customerid
"""
).fetchall()
return sales
if category == "genres":
cursor = router.db_connection.cursor()
genres = cursor.execute(
"""
SELECT g.name, Sum(quantity) AS Sum FROM invoice_items
JOIN tracks USING(trackid)
JOIN genres g USING(genreid)
GROUP BY genreid
ORDER BY Sum DESC, g.name"""
).fetchall()
return genres
raise HTTPException(status_code=404, detail={"error": "Not Found"})
|
from django.conf.urls import url
from players import views
urlpatterns = [
url(r'^(?P<season>\d{4}-\d{2})/$',
views.PlayerListView.as_view(), name='ranking'),
url(r'^(?P<season>\d{4}-\d{2})/vote/$',
views.PlayerVoteModalView.as_view(), name='vote_modal'),
url(r'^vote_save/(?P<signed_data>[\w:=_-]+)/$',
views.PlayerVoteSaveView.as_view(), name='vote_save'),
]
|
import os
import re
import math
import sys
from fnmatch import fnmatch
pattern = "*.txt"
word_list = []
spam = 1
ham = 0
dict_spam = {}
dict_ham = {}
lw = []
learning_rate = sys.argv[1]
iterations = sys.argv[2]
cwd = os.getcwd()
with open('stopwords.txt') as f:
stop_words = f.read().splitlines()
def get_words(x,y):
for line in x:
for word in line.split():
word = word.lower()
y += filter(None,re.split(r'\W+|_', word))
def word_frequency(doc,list,ws):
temp_w = sorted(set(ws))
for i in temp_w:
count = 0
for j in ws:
if i == j:
count = count + 1
list.append(words(i,count))
def extractwords(dir,dict):
os.chdir(cwd)
global word_list
for path, subdirs, files in os.walk(dir):
os.chdir(dir)
for name in files:
words_obj = []
words = []
if fnmatch(name, pattern):
with open(name,'r') as file:
get_words(file,words)
if(sys.argv[3] == 'yes'):
filtered_word_list = words[:]
for word in words:
if word in stop_words:
filtered_word_list.remove(word)
words = filtered_word_list
word_frequency(file,words_obj,words)
word_list = word_list + words
dict[name] = words_obj
class Vocabulary(object):
def __init__(self, word, weight):
self.word = word
self.weight = weight
class words(object):
def __init__(self, word, count):
self.word = word
self.count = count
extractwords('train/spam',dict_spam)
extractwords('train/ham',dict_ham)
V = sorted(set(word_list))
print len(V)
def initialize_weights(V):
for i in V:
lw.append(Vocabulary(i,0))
initialize_weights(V)
def match(words):
for i in range(len(lw)):
if words == lw[i].word:
return lw[i].weight
return 0
def learn_weights_helper(list):
sum = 0
for i in range(len(list)):
w = match(list[i].word)
sum = sum + w * list[i].count
if sum >= 0:
return 1
else:
return 0
def update_weights(val,cs,ls):
error = cs - val
for i in range(len(ls)):
for j in range(len(lw)):
if ls[i].word == lw[j].word:
lw[j].weight = float(lw[j].weight) + (float(learning_rate) * error * ls[i].count)
def learn_weights(dict,exp):
for key in dict.keys():
ls = dict[key]
prediction = learn_weights_helper(ls)
update_weights(prediction,exp,ls)
for i in range(int(iterations)):
print "learning weights - iteration... ",i
learn_weights(dict_spam,spam)
learn_weights(dict_ham,ham)
def apply_p_helper(dict,exp):
global count
for key in dict.keys():
ls = dict[key]
prediction = learn_weights_helper(ls)
if prediction == exp:
count = count + 1
count = 0
def apply_p():
print "applying...."
test_spam = {}
test_ham = {}
os.chdir(cwd)
extractwords('test/spam',test_spam)
extractwords('test/ham',test_ham)
apply_p_helper(test_spam,spam)
apply_p_helper(test_ham,ham)
total_docs = len(test_spam) + len(test_ham)
accuracy = float(count)/float(total_docs) * 100
print total_docs,count,accuracy
apply_p()
|
from django.contrib import admin
from home.models import Post, Comment, LikeDislike
admin.site.register(Post)
admin.site.register(Comment)
admin.site.register(LikeDislike)
|
from read_input import *
from itertools import zip_longest
from pprint import pprint
def left_factoring(nonterminal_list,production_list):
left_part = [] # left part of productions list
right_part = [] # right part of productions list
remove_nonterminal_index_list = [] #index of productions to be removed
new_productions = [] #new productions to be added,will contain object
new_productions_final = [] #all new productions to be added,will contain object
new_nonterminal_list = [] #new nonterminal to be added
new_nonterminal_list_final = [] #all new nonterminal to be added
for production in production_list:
for left,right in production.items():
left_part.append(left)
right_part.append(right)
for nonterminal in nonterminal_list:
nonterminal_index_list = [i for i,x in enumerate(left_part) if x == nonterminal and right_part[i] != ""] #list of indexes of production that derive from same nonterminal and that do not derive to epsilon/null string
if len(nonterminal_index_list) != 1:
right_part_list = [right_part[index] for index in nonterminal_index_list]
# print(nonterminal,right_part_list)
prefixes_list = find_prefixes(right_part_list)
# print(prefixes_list)
if (len(prefixes_list) != len(right_part_list)):
remove_nonterminal_index_list += nonterminal_index_list
prefix_sufixes_dict = find_prefix_suffixes(right_part_list, prefixes_list)
# print("prefix_sufixes_dict",nonterminal,prefix_sufixes_dict)
new_nonterminal_list, new_productions = create_new_productions_and_nonterminal(nonterminal, prefix_sufixes_dict)
# print("new_nonterminal_list before",new_nonterminal_list)
# print("new_productions before",new_productions)
new_nonterminal_list, new_productions = left_factoring(new_nonterminal_list,new_productions) #recursion
# print("new_nonterminal_list after",new_nonterminal_list)
# print("new_productions after",new_productions)
new_nonterminal_list_final += new_nonterminal_list
new_productions_final += new_productions
# print("Index of nonterminal to remove",remove_nonterminal_index_list)
# print("Productions",production_list)
production_list = [elem for index,elem in enumerate(production_list) if index not in remove_nonterminal_index_list]
# print("Productions",production_list)
# print("New Productions",new_productions_final)
production_list += new_productions_final
# print("New nonterminals",new_nonterminal_list_final)
nonterminal_list += new_nonterminal_list_final
# print("Nonterminals",nonterminal_list)
# print("Productions",production_list)
return nonterminal_list,production_list
def find_prefixes(strings):
zipped = zip_longest(*strings, fillvalue='')
for index, letters in enumerate(zipped):
if index == 0:
prefixes = letters # assumes there will always be a prefix
else:
poss_prefixes = [prefix + letters[i] for i, prefix in enumerate(prefixes)]
prefixes = [prefix if poss_prefixes.count(prefix) == letters.count(prefix) or poss_prefixes.count(prefix)==prefixes.count(prefix[:-1]) else prefixes[i] for i, prefix in enumerate(poss_prefixes)]
return list(set(prefixes))
def find_prefix_suffixes(strings, prefixes):
prefix_suffix = {}
for s in strings:
for prefix in prefixes:
if s.startswith(prefix):
if prefix in prefix_suffix:
prefix_suffix[prefix].append(s.replace(prefix, '', 1))
break
else:
prefix_suffix[prefix] = list([s.replace(prefix, '', 1)])
break
return prefix_suffix
def create_new_productions_and_nonterminal(nonterminal, prefix_sufixes_dict):
new_productions = []
new_nonterminal_list = []
counter = 0
for key, value_list in prefix_sufixes_dict.items():
if len(value_list) == 1:
if value_list[0] == "":
new_productions.append({nonterminal:key})
else:
new_productions.append({nonterminal:key+value_list[0]})
else:
counter+=1
new_nonterminal = nonterminal+str(counter)
new_productions.append({nonterminal:key+new_nonterminal})
for value in value_list:
new_productions.append({new_nonterminal:value})
new_nonterminal_list.append(new_nonterminal)
return new_nonterminal_list, new_productions
input_dict["Grammar"]["Nonterminal"],input_dict["Grammar"]["Productions"] = left_factoring(input_dict["Grammar"]["Nonterminal"],input_dict["Grammar"]["Productions"])
print("\nData after left factoring\n")
pprint(input_dict)
|
class Hero:
def __init__(self, name, level):
self.name = name
self.level = level
class Creature:
def __init__(self, name, the_level):
self.name = name
self.level = the_level
health1 = the_level * 7
def __repr__(self):
return "{}, Level {}".format(
self.name, self.level
)
|
#!/usr/bin/python3.4
# -*-coding:Utf-8
def aff_float(fl) :
""" This function take a float in param and return a string with the troncature of this float with 3 decimal"""
if type(fl) is not float :
raise TypeError("Le paramètre doit être un float")
else :
flottant = str(fl)
entier, virgule = flottant.split(".")
return ",".join([entier, virgule[:3]])
|
# MQTT Library Import
import paho.mqtt.client as mqtt
import paho.mqtt.publish as publish
import psutil, datetime
# Generate intial CPU Utilization Counters
psutil.cpu_percent();
# Generate CPU Stats
cpuCount = psutil.cpu_count();
print "CPU Count: ",cpuCount;
# Generate Memory Stats
virtualmemoryStats = psutil.virtual_memory();
totalVM = virtualmemoryStats[0];
availVM = virtualmemoryStats[1];
print "Total Memory: ",totalVM
print "Availible Memory: ",availVM
# Root Parition Space
rootStats = psutil.disk_usage('/')
rootTotal = rootStats[0];
rootUsed = rootStats[1];
rootFree = rootStats[2];
rootPercent = rootStats[3];
print "Root Parition: ",rootTotal," total - ",rootUsed," used - ",rootFree," free - ",rootPercent," % free"
# Determine Uptime
lastBoot = datetime.datetime.fromtimestamp(psutil.boot_time()).strftime("%Y-%m-%d %H:%M:%S")
print "Last Boot Time/Date: ",lastBoot
# Generating CPU Utilization Usage (Can hang the program)
cpuUtil = psutil.cpu_percent(interval=5, percpu=True)
print "CPU Utilization: ",cpuUtil[0],"% Proc 1 ",cpuUtil[1],"% Proc 2 ",cpuUtil[2],"$ Proc 3 ",cpuUtil[3],"% Proc 4"
#mqttc = mqtt.Client(client_id="chomper")
#mqttc.connect("octokong", 1883)
#mqttc.publish("uptime", uptime())
#mqttc.loop(2)
|
import pandas
import pandasql
import json
import requests
import pprint
li = []
li.ap
def add_full_name(path_to_csv, path_to_new_csv):
#Assume you will be reading in a csv file with the same columns that the
#Lahman baseball data set has -- most importantly, there are columns
#called 'nameFirst' and 'nameLast'.
#1) Write a function that reads a csv
#located at "path_to_csv" into a pandas dataframe and adds a new column
#called 'nameFull' with a player's full name.
#
#For example:
# for Hank Aaron, nameFull would be 'Hank Aaron',
#
#2) Write the data in the pandas dataFrame to a new csv file located at
#path_to_new_csv
#WRITE YOUR CODE HERE
df = pandas.read_csv(path_to_csv)
df['nameFull'] = df['nameFirst'] + ' ' + df['nameLast']
df.to_csv(path_to_new_csv)
if __name__ == "__main__":
# For local use only
# If you are running this on your own machine add the path to the
# Lahman baseball csv and a path for the new csv.
# The dataset can be downloaded from this website: http://www.seanlahman.com/baseball-archive/statistics
# We are using the file Master.csv
path_to_csv = ""
path_to_new_csv = ""
add_full_name(path_to_csv, path_to_new_csv)
def select_first_50(filename):
# Read in our aadhaar_data csv to a pandas dataframe. Afterwards, we rename the columns
# by replacing spaces with underscores and setting all characters to lowercase, so the
# column names more closely resemble columns names one might find in a table.
aadhaar_data = pandas.read_csv(filename)
aadhaar_data.rename(columns = lambda x: x.replace(' ', '_').lower(), inplace=True)
# Select out the first 50 values for "registrar" and "enrolment_agency"
# in the aadhaar_data table using SQL syntax.
#
# Note that "enrolment_agency" is spelled with one l. Also, the order
# of the select does matter. Make sure you select registrar then enrolment agency
# in your query.
#
# You can download a copy of the aadhaar data that we are passing
# into this exercise below:
# https://s3.amazonaws.com/content.udacity-data.com/courses/ud359/aadhaar_data.csv
q = "SELECT registrar, enrolment_agency FROM aadhaar_data limit 50"
#Execute your SQL command against the pandas frame
aadhaar_solution = pandasql.sqldf(q.lower(), locals())
return aadhaar_solution
def aggregate_query(filename):
# Read in our aadhaar_data csv to a pandas dataframe. Afterwards, we rename the columns
# by replacing spaces with underscores and setting all characters to lowercase, so the
# column names more closely resemble columns names one might find in a table.
aadhaar_data = pandas.read_csv(filename)
aadhaar_data.rename(columns = lambda x: x.replace(' ', '_').lower(), inplace=True)
# Write a query that will select from the aadhaar_data table how many men and how
# many women over the age of 50 have had aadhaar generated for them in each district.
# aadhaar_generated is a column in the Aadhaar Data that denotes the number who have had
# aadhaar generated in each row of the table.
#
# Note that in this quiz, the SQL query keywords are case sensitive.
# For example, if you want to do a sum make sure you type 'sum' rather than 'SUM'.
#
# The possible columns to select from aadhaar data are:
# 1) registrar
# 2) enrolment_agency
# 3) state
# 4) district
# 5) sub_district
# 6) pin_code
# 7) gender
# 8) age
# 9) aadhaar_generated
# 10) enrolment_rejected
# 11) residents_providing_email,
# 12) residents_providing_mobile_number
#
# You can download a copy of the aadhaar data that we are passing
# into this exercise below:
# https://s3.amazonaws.com/content.udacity-data.com/courses/ud359/aadhaar_data.csv
q = "SELECT gender, district, sum(aadhaar_generated) from aadhaar_data where age > 50 GROUP BY gender, district"
# Execute your SQL command against the pandas frame
aadhaar_solution = pandasql.sqldf(q.lower(), locals())
return aadhaar_solution
def api_get_request(url):
# In this exercise, you want to call the last.fm API to get a list of the
# top artists in Spain. The grader will supply the URL as an argument to
# the function; you do not need to construct the address or call this
# function in your grader submission.
#
# Once you've done this, return the name of the number 1 top artist in
# Spain.
data = requests.get(url).text
data = json.loads(data)
pp = pprint.PrettyPrinter()
pp.pprint(data)
toparts = data['topartists']
attr = toparts['artist'][0]
name = attr['name']
print(name)
return name # return the top artist in Spain
|
import json
import os
import sys
import boto3
import botocore
from garage import config
from garage.misc import console
def setup_iam():
iam_client = boto3.client(
"iam",
aws_access_key_id=AWS_ACCESS_KEY,
aws_secret_access_key=AWS_ACCESS_SECRET,
)
iam = boto3.resource(
'iam',
aws_access_key_id=AWS_ACCESS_KEY,
aws_secret_access_key=AWS_ACCESS_SECRET)
# delete existing role if it exists
try:
existing_role = iam.Role('garage')
existing_role.load()
# if role exists, delete and recreate
if not config.query_yes_no(
("There is an existing role named garage. "
"Proceed to delete everything garage-related and recreate?"),
default="no"):
sys.exit()
print("Listing instance profiles...")
inst_profiles = existing_role.instance_profiles.all()
for prof in inst_profiles:
for role in prof.roles:
print("Removing role %s from instance profile %s" %
(role.name, prof.name))
prof.remove_role(RoleName=role.name)
print("Deleting instance profile %s" % prof.name)
prof.delete()
for policy in existing_role.policies.all():
print("Deleting inline policy %s" % policy.name)
policy.delete()
for policy in existing_role.attached_policies.all():
print("Detaching policy %s" % policy.arn)
existing_role.detach_policy(PolicyArn=policy.arn)
print("Deleting role")
existing_role.delete()
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == 'NoSuchEntity':
pass
else:
raise e
print("Creating role garage")
iam_client.create_role(
Path='/',
RoleName='garage',
AssumeRolePolicyDocument=json.dumps({
'Version':
'2012-10-17',
'Statement': [{
'Action': 'sts:AssumeRole',
'Effect': 'Allow',
'Principal': {
'Service': 'ec2.amazonaws.com'
}
}]
}))
role = iam.Role('garage')
print("Attaching policies")
role.attach_policy(PolicyArn='arn:aws:iam::aws:policy/AmazonS3FullAccess')
role.attach_policy(
PolicyArn='arn:aws:iam::aws:policy/ResourceGroupsandTagEditorFullAccess'
)
print("Creating inline policies")
iam_client.put_role_policy(
RoleName=role.name,
PolicyName='CreateTags',
PolicyDocument=json.dumps({
"Version":
"2012-10-17",
"Statement": [{
"Effect": "Allow",
"Action": ["ec2:CreateTags"],
"Resource": ["*"]
}]
}))
iam_client.put_role_policy(
RoleName=role.name,
PolicyName='TerminateInstances',
PolicyDocument=json.dumps({
"Version":
"2012-10-17",
"Statement": [{
"Sid": "Stmt1458019101000",
"Effect": "Allow",
"Action": ["ec2:TerminateInstances"],
"Resource": ["*"]
}]
}))
print("Creating instance profile garage")
iam_client.create_instance_profile(InstanceProfileName='garage', Path='/')
print("Adding role garage to instance profile garage")
iam_client.add_role_to_instance_profile(
InstanceProfileName='garage', RoleName='garage')
def setup_s3():
print("Creating S3 bucket at s3://%s" % S3_BUCKET_NAME)
s3_client = boto3.client(
"s3",
aws_access_key_id=AWS_ACCESS_KEY,
aws_secret_access_key=AWS_ACCESS_SECRET,
)
try:
s3_client.create_bucket(
ACL='private',
Bucket=S3_BUCKET_NAME,
)
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == 'BucketAlreadyExists':
raise ValueError(
"Bucket %s already exists. Please reconfigure S3_BUCKET_NAME" %
S3_BUCKET_NAME) from e
elif e.response['Error']['Code'] == 'BucketAlreadyOwnedByYou':
print("Bucket already created by you")
else:
raise e
print("S3 bucket created")
def setup_ec2():
for region in ["us-east-1", "us-west-1", "us-west-2"]:
print("Setting up region %s" % region)
ec2 = boto3.resource(
"ec2",
region_name=region,
aws_access_key_id=AWS_ACCESS_KEY,
aws_secret_access_key=AWS_ACCESS_SECRET,
)
ec2_client = boto3.client(
"ec2",
region_name=region,
aws_access_key_id=AWS_ACCESS_KEY,
aws_secret_access_key=AWS_ACCESS_SECRET,
)
existing_vpcs = list(ec2.vpcs.all())
assert len(existing_vpcs) >= 1
vpc = existing_vpcs[0]
print("Creating security group in VPC %s" % str(vpc.id))
try:
security_group = vpc.create_security_group(
GroupName='garage-sg', Description='Security group for garage')
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == 'InvalidGroup.Duplicate':
sgs = list(
vpc.security_groups.filter(GroupNames=['garage-sg']))
security_group = sgs[0]
else:
raise e
ALL_REGION_AWS_SECURITY_GROUP_IDS[region] = [security_group.id]
ec2_client.create_tags(
Resources=[security_group.id],
Tags=[{
'Key': 'Name',
'Value': 'garage-sg'
}])
try:
security_group.authorize_ingress(
FromPort=22, ToPort=22, IpProtocol='tcp', CidrIp='0.0.0.0/0')
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == 'InvalidPermission.Duplicate':
pass
else:
raise e
print("Security group created with id %s" % str(security_group.id))
key_name = 'garage-%s' % region
try:
print("Trying to create key pair with name %s" % key_name)
key_pair = ec2_client.create_key_pair(KeyName=key_name)
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == 'InvalidKeyPair.Duplicate':
if not config.query_yes_no(
("Key pair with name %s exists. "
"Proceed to delete and recreate?") % key_name, "no"):
sys.exit()
print("Deleting existing key pair with name %s" % key_name)
ec2_client.delete_key_pair(KeyName=key_name)
print("Recreating key pair with name %s" % key_name)
key_pair = ec2_client.create_key_pair(KeyName=key_name)
else:
raise e
key_pair_folder_path = os.path.join(config.PROJECT_PATH, "private",
"key_pairs")
file_name = os.path.join(key_pair_folder_path, "%s.pem" % key_name)
print("Saving keypair file")
console.mkdir_p(key_pair_folder_path)
with os.fdopen(
os.open(file_name, os.O_WRONLY | os.O_CREAT, 0o600),
'w') as handle:
handle.write(key_pair['KeyMaterial'] + '\n')
# adding pem file to ssh
os.system("ssh-add %s" % file_name)
ALL_REGION_AWS_KEY_NAMES[region] = key_name
def setup():
setup_s3()
setup_iam()
setup_ec2()
if __name__ == "__main__":
from garage.config_personal import *
setup()
|
def readfile(name, fabric):
for s in open(name):
a = s.split('@')
id = a[0]
b = a[1].split(':')
from_edge = b[0].split(',')
from_left = int(from_edge[0])
from_top = int(from_edge[1])
d= b[1].split('x')
width = int(d[0])
height = int(d[1])
top_left = (from_left, from_top)
bottom_right = (from_left+width, from_top+height)
for x in range(top_left[0], bottom_right[0]):
for y in range(top_left[1], bottom_right[1]):
if fabric[x][y] == '.':
fabric[x][y] = id
else:
fabric[x][y] = 'x'
return fabric
def fabric(size):
fabric = []
for y in range(size):
fabric.append( ['.'] * size)
return fabric
count=0
for a in readfile('aoc3.txt', fabric(1000)):
for b in a:
if b == 'x':
count +=1
print("Result ", count)
|
import logging
import json
import os
from pathlib import Path
from flask import Flask, request
from flask_cors import CORS
from flask_restplus import Api, Resource
from flask_restplus import abort
from enigma_docker_common.config import Config
from enigma_docker_common.logger import get_logger
env_defaults = {'K8S': './config/k8s_config.json',
'TESTNET': './config/testnet_config.json',
'MAINNET': './config/mainnet_config.json',
'COMPOSE': './config/compose_config.json'}
config = Config(config_file=env_defaults[os.getenv('ENIGMA_ENV', 'COMPOSE')])
logger = get_logger('km.server')
logging.getLogger("urllib3.connectionpool").setLevel(logging.ERROR)
logging.getLogger("werkzeug").setLevel(logging.ERROR)
application = Flask(__name__)
CORS(application)
api = Api(app=application, version='1.0')
ns = api.namespace('km', description='Contract operations')
@ns.route("/address")
class GetAddress(Resource):
""" returns a list of tracked addresses for a chain/network. If parameters are empty, will return
all addresses """
@ns.param('name', 'Key management address filename -- by default right now can only be principal-sign-addr.txt', 'query')
def get(self):
filename = request.args.get('name')
try:
if filename not in config["KM_FILENAME"]:
logger.error(f'Tried to retrieve file which was not in allowed file names: {filename}')
return abort(404)
contract_filename = f'{config["KEYPAIR_DIRECTORY"]}{filename}'
with open(contract_filename) as f:
return f.read()
except FileNotFoundError as e:
logger.error(f'File not found: {e}')
return abort(404)
except json.JSONDecodeError as e:
logger.error(f'Error decoding config file. Is it valid JSON? {e}')
return abort(500)
def run(port):
logger.debug("using port:"+str(port))
application.run(host='0.0.0.0', port=port, debug=False)
if __name__ == '__main__':
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument('-p', '--port', default=8081, type=int, help='port to listen on')
args = parser.parse_args()
run(args.port)
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
#from sklearn.preprocessing import StandardScaler
#Input dataset
data = pd.read_csv('Position_Salaries.csv')
X = data.iloc[:,1:2].values
y = data.iloc[:,2].values
#Splitting the dataset into the Training set and Test set
#X_train, X_test, y_train, y_test = train_test_split(X,y,test_size=0.3,random_state=42)
'''
#Feature Scaling
sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test)
'''
lin_reg = LinearRegression()
lin_reg.fit(X,y)
print(lin_reg.predict([[6.5]]))
poly_reg = PolynomialFeatures(degree = 4)
X_poly = poly_reg.fit_transform(X)
lin_reg_2 = LinearRegression()
lin_reg_2.fit(X_poly,y)
X_grid = np.arange(min(X),max(X),0.1)
X_grid = X_grid.reshape((len(X_grid),1))
plt.scatter(X, y,color = 'r',s=15)
plt.plot(X_grid,lin_reg_2.predict(poly_reg.fit_transform(X_grid)),color = 'b')
plt.title('Polynomial Regression')
plt.xlabel('Position level')
plt.ylabel('Salary')
plt.show()
|
# Copyright 2022 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import os
import re
from typing import Any, Mapping
import pytest
from pants.backend.visibility.glob import PathGlob, PathGlobAnchorMode, TargetGlob
from pants.engine.addresses import Address
from pants.engine.internals.target_adaptor import TargetAdaptor
@pytest.mark.parametrize(
"base, pattern_text, expected",
[
(
"base",
"foo",
PathGlob(
raw="foo",
anchor_mode=PathGlobAnchorMode.FLOATING,
glob=re.compile(r"/?\bfoo$"),
uplvl=0,
),
),
(
"base",
".",
PathGlob(
raw="", anchor_mode=PathGlobAnchorMode.INVOKED_PATH, glob=re.compile("$"), uplvl=0
),
),
(
"base",
"./foo",
PathGlob(
raw="foo",
anchor_mode=PathGlobAnchorMode.INVOKED_PATH,
glob=re.compile("foo$"),
uplvl=0,
),
),
(
"base",
("../foo/../bar", "../bar"),
PathGlob(
raw="bar",
anchor_mode=PathGlobAnchorMode.INVOKED_PATH,
glob=re.compile("bar$"),
uplvl=1,
),
),
(
"base",
("/foo", "base/foo"),
PathGlob(
raw="base/foo",
anchor_mode=PathGlobAnchorMode.DECLARED_PATH,
glob=re.compile("base/foo$"),
uplvl=0,
),
),
(
"base/sub",
("/../bar", "base/bar"),
PathGlob(
raw="base/bar",
anchor_mode=PathGlobAnchorMode.DECLARED_PATH,
glob=re.compile("base/bar$"),
uplvl=0,
),
),
(
"base",
("/foo/../baz", "base/baz"),
PathGlob(
raw="base/baz",
anchor_mode=PathGlobAnchorMode.DECLARED_PATH,
glob=re.compile("base/baz$"),
uplvl=0,
),
),
(
"base",
"//foo",
PathGlob(
raw="foo",
anchor_mode=PathGlobAnchorMode.PROJECT_ROOT,
glob=re.compile(r"foo$"),
uplvl=0,
),
),
(
"base",
"foo/**/bar",
PathGlob(
raw="foo/**/bar",
anchor_mode=PathGlobAnchorMode.FLOATING,
glob=re.compile(r"/?\bfoo(/.*)?/bar$"),
uplvl=0,
),
),
(
"base",
("foo/../bar", "bar"),
PathGlob(
raw="bar",
anchor_mode=PathGlobAnchorMode.FLOATING,
glob=re.compile(r"/?\bbar$"),
uplvl=0,
),
),
(
"base",
"my_file.ext",
PathGlob(
raw="my_file.ext",
anchor_mode=PathGlobAnchorMode.FLOATING,
glob=re.compile(r"/?\bmy_file\.ext$"),
uplvl=0,
),
),
(
"base",
"*my_file.ext",
PathGlob(
raw="*my_file.ext",
anchor_mode=PathGlobAnchorMode.FLOATING,
glob=re.compile(r"[^/]*my_file\.ext$"),
uplvl=0,
),
),
(
"base",
".ext",
PathGlob(
raw=".ext",
anchor_mode=PathGlobAnchorMode.FLOATING,
glob=re.compile(r"\.ext$"),
uplvl=0,
),
),
(
"base",
"**/path",
PathGlob(
raw="**/path",
anchor_mode=PathGlobAnchorMode.FLOATING,
glob=re.compile(r"/?\bpath$"),
uplvl=0,
),
),
],
)
def test_pathglob_parse(base: str, pattern_text: str | tuple[str, str], expected: PathGlob) -> None:
if isinstance(pattern_text, tuple):
pattern, text = pattern_text
else:
pattern, text = (pattern_text,) * 2
actual = PathGlob.parse(pattern, base)
assert expected.anchor_mode == actual.anchor_mode
assert expected.glob.pattern == actual.glob.pattern
assert text == str(actual)
assert expected == actual
@pytest.mark.parametrize(
"glob, tests",
[
(
PathGlob.parse("./foo/bar", "base"),
(
# path, base, expected
("tests/foo", "src", None),
("src/foo", "src", "foo"),
("src/foo", "src/a", None),
),
),
(
PathGlob.parse("../foo/bar", "base"),
(
# path, base, expected
("src/foo/bar", "src/qux", "foo/bar"),
),
),
],
)
def test_pathglob_match_path(
glob: PathGlob, tests: tuple[tuple[str, str, str | None], ...]
) -> None:
for path, base, expected in tests:
assert expected == glob._match_path(path, base)
@pytest.mark.parametrize(
"glob, tests",
[
(
PathGlob.parse("//foo/bar", "base"),
(
("tests/foo", "src", False),
("src/foo", "src", False),
("foo/bar", "src/a", True),
("foo/bar/baz", "src/a", False),
),
),
(
PathGlob.parse("/foo/bar", "base"),
(
("foo/bar", "src", False),
("base/foo/bar", "src", True),
("src/foo/bar", "src", False),
),
),
(
PathGlob.parse("./foo/bar", "base"),
(
("foo/bar", "src", False),
("base/foo/bar", "src", False),
("src/foo/bar", "src", True),
),
),
(
PathGlob.parse(".", "base"),
(
("foo/bar", "src", False),
("base/foo/bar", "src", False),
("src/foo/bar", "src", False),
("src/proj", "src/proj", True),
),
),
(
PathGlob.parse("./foo", "base"),
(
("foo/bar", "src", False),
("base/foo/bar", "src", False),
("src/foo/bar", "src", False),
("src/foo", "src", True),
),
),
(
PathGlob.parse("foo/bar", "base"),
(
("foo/bar", "src", True),
("base/foo/bar", "src", True),
("src/foo/bar", "src", True),
("foo/bar/baz", "src", False),
),
),
],
)
def test_pathglob_match(glob: PathGlob, tests: tuple[tuple[str, str, bool], ...]) -> None:
for path, base, expected in tests:
assert expected == glob.match(path, base)
@pytest.mark.parametrize(
"target_spec, expected",
[
({"path": ""}, "!*"),
("[]", "!*"),
(dict(type="resources"), "<resources>"),
(dict(type="file", path="glob/*/this.ext"), "<file>[glob/*/this.ext]"),
(dict(path="glob/*/this.ext"), "glob/*/this.ext"),
(dict(tags=["tagged"]), "(tagged)"),
(dict(tags=["tag-a", "tag-b , b", "c"]), "(tag-a, 'tag-b , b', c)"),
(dict(type="file*", tags=["foo", "bar"], path="baz.txt"), "<file*>[baz.txt](foo, bar)"),
("<resources>", "<resources>"),
("<file>[glob/*/this.ext]", "<file>[glob/*/this.ext]"),
("glob/*/this.ext", "glob/*/this.ext"),
("(tag-a)", "(tag-a)"),
("(tag-a , tag-b)", "(tag-a, tag-b)"),
("<file*>(foo, bar)[baz.txt]", "<file*>[baz.txt](foo, bar)"),
(":name", ":name"),
(dict(name="name"), ":name"),
(dict(path="src/*", name="name"), "src/*:name"),
(dict(type="target", path="src", name="name"), "<target>[src:name]"),
],
)
def test_target_glob_parse_spec(target_spec: str | Mapping[str, Any], expected: str) -> None:
assert expected == str(TargetGlob.parse(target_spec, "base"))
@pytest.mark.parametrize(
"expected, target_spec",
[
(True, "*"),
(True, "<file>"),
(True, "(tag-c)"),
(True, "(tag-*)"),
(False, "(tag-b)"),
(True, "[file.ext]"),
(False, "[files.ext]"),
(True, "//src/*"),
(True, "<file>(tag-a, tag-c)[src/file.ext]"),
(False, "<file>(tag-a, tag-b)[src/file.ext]"),
(False, "<resource>"),
(False, ":name"),
(True, ":src"),
(True, ":*"),
(False, "other.txt:src"),
(True, "file.ext:src"),
(True, "src/file.ext:src"),
],
)
def test_targetglob_match(expected: bool, target_spec: str) -> None:
path = "src/file.ext"
adaptor = TargetAdaptor(
"file", None, tags=["tag-a", "tag-c"], __description_of_origin__="BUILD:1"
)
address = Address(os.path.dirname(path), relative_file_path=os.path.basename(path))
assert expected == TargetGlob.parse(target_spec, "src").match(address, adaptor, "src")
@pytest.mark.parametrize(
"address, path",
[
(Address("src", relative_file_path="file"), "src/file"),
(Address("src", target_name="name"), "src"),
(Address("src", target_name="gen", generated_name="name"), "src/gen#name"),
(Address("", relative_file_path="file"), "file"),
(Address("", target_name="name"), ""),
(Address("", target_name="gen", generated_name="name"), "gen#name"),
],
)
def test_address_path(address: Address, path: str) -> None:
assert TargetGlob.address_path(address) == path
|
val, par, impar = [], [], []
while True:
cont = ' '
val.append(int(input('Digite um número: ')))
while cont not in 'ns':
cont = str(input('Deseja continuar? [S/N] ')).lower()[0]
if cont == 'n': break
for i in val:
if i%2 == 0:
par.append(i)
else:
impar.append(i)
print(f'Lista digitada: {val}')
print(f'Lista dos pares: {par}')
print(f'Lista dos ímpares: {impar}')
|
#Module for handling the configuration and submission of jobs via condor
import subprocess
import os
import time
class CondorJob:
def __init__(self, **kwargs):
self.workingdir=kwargs.get('workingdir')
self.universe=kwargs.get('universe')
self.executable=kwargs.get('executable')
self.arguments=kwargs.get('arguments')
self.requirements=kwargs.get('requirements')
self.log=kwargs.get('log')
self.output=kwargs.get('output')
self.error=kwargs.get('error')
if 'subid' in kwargs.keys():
self.subid=kwargs.get('subid')
else:
self.subid = ''
self.image_size = ''
if 'image_size' in kwargs.keys():
self.image_size = kwargs.get('image_size')
if 'environment' in kwargs.keys():
self.environment = kwargs.get('environment')
if not self.workingdir.endswith('/'):
self.workingdir=self.workingdir+'/'
self.exitstatus=None
self.write_submit_file()
self.status='configured'
#Method for writing the condor submit file
def write_submit_file(self):
self.submit_filename = self.workingdir + 'condor_' + self.subid + '.sub'
with open(self.submit_filename,'w') as sf:
sf.write("Universe = " + self.universe + '\n')
sf.write("Executable = " + self.executable + '\n')
sf.write("Arguments = " + self.arguments + '\n')
sf.write("Requirements = " + self.requirements + '\n')
sf.write("Environment = " + self.environment + '\n')
sf.write("GetEnv = True" + '\n')
if self.image_size != '':
sf.write("image_size = " + self.image_size + '\n')
sf.write("want_graceful_removal = True" + '\n')
sf.write("max_retries = 100" + '\n')
sf.write('\n')
sf.write("Output = " + self.workingdir + self.output + '\n')
sf.write("Error = " + self.workingdir + self.error + '\n')
sf.write("Log = " + self.workingdir + self.log + '\n')
sf.write('\n')
sf.write("Queue")
#Method for getting information on the job from the first two lines of the condor log file
def get_job_id(self):
log_filename= self.workingdir + self.log
loginfo_lines=[]
try:
lf = open(log_filename, 'r')
except OSError:
raise CondorLogError('Condor log file cannot be opended for reading. I need a log file to function!')
else:
loginfo_lines=lf.readlines()
lf.close()
jobid_tmp = loginfo_lines[0].split('(')[1].split(')')[0]
jobid_num = jobid_tmp.split('.')[0]
jobid_subidx = jobid_tmp.split('.')[2]
jobid_subidx = jobid_subidx[:2].lstrip('0') + jobid_subidx[2]
self.jobid = jobid_num + '.' + jobid_subidx
def submit(self):
#check for old logfile and remove if found.
log_filename = self.workingdir + self.log
if os.path.isfile(log_filename):
os.remove(log_filename)
sp=subprocess.Popen(['condor_submit', self.submit_filename], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
sub_out=sp.stdout.read().decode('utf-8')
#Make sure the job actually submitted...
while(sp.poll() == None):
time.sleep(0.05)
if sp.poll() != 0:
err_str = 'Condor submission failed:\n {0}'.format(sub_out)
raise CondorSubmissionError(err_str)
#errors = []
#for line in sub_out:
# err_idx = line.lower().find('error')
# if err_idx != -1:
# errors.append(line)
#if len(errors) != 0:
# err_str=""
# for e in errors:
# err_str = err_str + e + '\n'
# raise CondorSubmissionError(err_str)
self.get_job_id()
self.status='submitted'
def execute(self):
self.submit()
#checks the status of the job using the logfile
def update_status(self):
log_filename = self.workingdir + self.log
with open(log_filename) as lf:
for line in lf.readlines():
line = line.lower()
if line.find('job terminated') != -1 and self.status in ['submitted', 'executing']:
self.status='terminated'
elif line.find('aborted') != -1 and self.status in ['submitted', 'executing']:
self.status='terminated'
self.exitstatus = '1'
elif line.find('job executing') != -1 and self.status == 'submitted':
self.status='executing'
#Check the exit status of the job if it has terminated
if line.find('return value ') != -1 and self.status == 'terminated':
self.exitstatus=line.split('return value ')[1][0]
def get_status(self):
self.update_status()
return self.status
def kill(self):
subprocess.run(['condor_rm', self.jobid])
self.update_status()
class CondorSubmissionError(Exception):
pass
class CondorLogError(Exception):
pass
|
from backpack.core.derivatives.batchnorm1d import BatchNorm1dDerivatives
from backpack.extensions.curvmatprod.ggnmp.ggnmpbase import GGNMPBase
class GGNMPBatchNorm1d(GGNMPBase):
def __init__(self):
super().__init__(
derivatives=BatchNorm1dDerivatives(), params=["weight", "bias"]
)
def weight(self, ext, module, g_inp, g_out, backproped):
h_out_mat_prod = backproped
def weight_ggnmp(mat):
result = self.derivatives.weight_jac_mat_prod(module, g_inp, g_out, mat)
result = h_out_mat_prod(result)
result = self.derivatives.weight_jac_t_mat_prod(
module, g_inp, g_out, result
)
return result
return weight_ggnmp
def bias(self, ext, module, g_inp, g_out, backproped):
h_out_mat_prod = backproped
def bias_ggnmp(mat):
result = self.derivatives.bias_jac_mat_prod(module, g_inp, g_out, mat)
result = h_out_mat_prod(result)
result = self.derivatives.bias_jac_t_mat_prod(module, g_inp, g_out, result)
return result
return bias_ggnmp
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-09-18 08:02
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('resources', '0002_auto_20170903_0845'),
]
operations = [
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('service_name', models.CharField(max_length=32, verbose_name='业务线的名字')),
('module_letter', models.CharField(db_index=True, max_length=10, verbose_name='业务线字母简称')),
('op_interface', models.CharField(max_length=150, verbose_name='运维对接人')),
('dev_interface', models.CharField(max_length=150, verbose_name='业务对接人')),
('pid', models.IntegerField(db_index=True, verbose_name='上级业务线id')),
],
options={
'db_table': 'resources_my_product',
'ordering': ['id'],
},
),
migrations.CreateModel(
name='Server',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('supplier', models.IntegerField(null=True, verbose_name='供应商')),
('manufacturers', models.CharField(max_length=50, null=True, verbose_name='生产厂商')),
('manufacture_date', models.DateField(null=True, verbose_name='生产日期')),
('server_type', models.CharField(max_length=20, null=True, verbose_name='服务器类型')),
('sn', models.CharField(db_index=True, max_length=60, null=True, verbose_name='SN码')),
('os', models.CharField(max_length=50, null=True, verbose_name='操作系统')),
('hostname', models.CharField(db_index=True, max_length=50, null=True, verbose_name='主机名')),
('inner_ip', models.CharField(max_length=32, null=True, unique=True, verbose_name='管理ip')),
('mac_address', models.CharField(max_length=50, null=True, verbose_name='mac地址')),
('ip_info', models.CharField(max_length=255, null=True, verbose_name='ip信息')),
('server_cpu', models.CharField(max_length=250, null=True, verbose_name='cpu信息')),
('server_disk', models.CharField(max_length=100, null=True, verbose_name='磁盘信息')),
('server_mem', models.CharField(max_length=100, null=True, verbose_name='内存信息')),
('status', models.CharField(db_index=True, max_length=100, null=True, verbose_name='服务器状态')),
('remark', models.TextField(null=True)),
('service_id', models.IntegerField(db_index=True, null=True)),
('server_purpose', models.IntegerField(db_index=True, null=True)),
('check_update_time', models.DateTimeField(auto_now=True, null=True)),
('vm_status', models.IntegerField(db_index=True, null=True, verbose_name='虚拟机状态')),
('uuid', models.CharField(db_index=True, max_length=100, null=True, verbose_name='虚拟机uuid')),
('idc', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='resources.Idc')),
],
options={
'db_table': 'resources_my_server',
'ordering': ['id'],
},
),
]
|
from collections import namedtuple
from .base import BasePlugin
Plugin = namedtuple("Plugin", ("name", "package", "class_name"))
PLUGINS = {
"base": Plugin(".base", __name__, "BasePlugin"),
"flask": Plugin(".flask_plugin", __name__, "FlaskPlugin"),
"quart": Plugin(".quart_plugin", __name__, "QuartPlugin"),
"falcon": Plugin(".falcon_plugin", __name__, "FalconPlugin"),
"falcon-asgi": Plugin(".falcon_plugin", __name__, "FalconAsgiPlugin"),
"starlette": Plugin(".starlette_plugin", __name__, "StarlettePlugin"),
}
__all__ = ["BasePlugin", "PLUGINS", "Plugin"]
|
def scramble(s1,s2):
for letter in set(s2): # after we convert the s2 to a set() , we limit the loop for max 26 letters instead of going through and each letter in a time!
if s1.count(letter) < s2.count(letter):
return False
return True
|
import warnings
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Dense, Lambda
from tensorflow.keras.layers import Activation
from tensorflow.keras.layers import Conv2D
from tensorflow.keras.layers import GlobalAveragePooling2D, GlobalMaxPooling2D, MaxPooling2D
from tensorflow.keras.layers import Input
from tensorflow.keras.layers import concatenate, add
from tensorflow.keras.layers import BatchNormalization
from tensorflow.keras.regularizers import l2
from tensorflow.keras.utils import convert_all_kernels_in_model
from tensorflow.keras.utils import get_file
from tensorflow.keras.utils import get_source_inputs
import tensorflow.keras.backend as K
'''
from ResNeXt3D_blocks import __initial_conv_block
from ResNeXt3D_blocks import __initial_conv_block_imagenet
from ResNeXt3D_blocks import __grouped_convolution_block
from ResNeXt3D_blocks import __bottleneck_block
'''
def __create_res_next(nb_classes, img_input, include_top, depth=29, cardinality=8, width=4,
weight_decay=5e-4, pooling=None):
''' Creates a ResNeXt model with specified parameters
Args:
nb_classes: Number of output classes
img_input: Input tensor or layer
include_top: Flag to include the last dense layer
depth: Depth of the network. Can be an positive integer or a list
Compute N = (n - 2) / 9.
For a depth of 56, n = 56, N = (56 - 2) / 9 = 6
For a depth of 101, n = 101, N = (101 - 2) / 9 = 11
cardinality: the size of the set of transformations.
Increasing cardinality improves classification accuracy,
width: Width of the network.
weight_decay: weight_decay (l2 norm)
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional layer.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
Returns: a Keras Model
'''
if type(depth) is list or type(depth) is tuple:
# If a list is provided, defer to user how many blocks are present
N = list(depth)
else:
# Otherwise, default to 3 blocks each of default number of group convolution blocks
N = [(depth - 2) // 9 for _ in range(3)]
filters = cardinality * width
filters_list = []
for i in range(len(N)):
filters_list.append(filters)
filters *= 2 # double the size of the filters
x = __initial_conv_block(img_input, weight_decay)
# block 1 (no pooling)
for i in range(N[0]):
x = __bottleneck_block(x, filters_list[0], cardinality, strides=1, weight_decay=weight_decay)
N = N[1:] # remove the first block from block definition list
filters_list = filters_list[1:] # remove the first filter from the filter list
# block 2 to N
for block_idx, n_i in enumerate(N):
for i in range(n_i):
if i == 0:
x = __bottleneck_block(x, filters_list[block_idx], cardinality, strides=2,
weight_decay=weight_decay)
else:
x = __bottleneck_block(x, filters_list[block_idx], cardinality, strides=1,
weight_decay=weight_decay)
if include_top:
x = GlobalAveragePooling2D()(x)
x = Dense(nb_classes, use_bias=False, kernel_regularizer=l2(weight_decay),
kernel_initializer='he_normal', activation='softmax')(x)
else:
if pooling == 'avg':
x = GlobalAveragePooling2D()(x)
elif pooling == 'max':
x = GlobalMaxPooling2D()(x)
return x
def __create_res_next_imagenet(nb_classes, img_input, include_top, depth, cardinality=32, width=4,
weight_decay=5e-4, pooling=None):
''' Creates a ResNeXt model with specified parameters
Args:
nb_classes: Number of output classes
img_input: Input tensor or layer
include_top: Flag to include the last dense layer
depth: Depth of the network. List of integers.
Increasing cardinality improves classification accuracy,
width: Width of the network.
weight_decay: weight_decay (l2 norm)
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional layer.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
Returns: a Keras Model
'''
if type(depth) is list or type(depth) is tuple:
# If a list is provided, defer to user how many blocks are present
N = list(depth)
else:
# Otherwise, default to 3 blocks each of default number of group convolution blocks
N = [(depth - 2) // 9 for _ in range(3)]
filters = cardinality * width
filters_list = []
for i in range(len(N)):
filters_list.append(filters)
filters *= 2 # double the size of the filters
x = __initial_conv_block_imagenet(img_input, weight_decay)
# block 1 (no pooling)
for i in range(N[0]):
x = __bottleneck_block(x, filters_list[0], cardinality, strides=1, weight_decay=weight_decay)
N = N[1:] # remove the first block from block definition list
filters_list = filters_list[1:] # remove the first filter from the filter list
# block 2 to N
for block_idx, n_i in enumerate(N):
for i in range(n_i):
if i == 0:
x = __bottleneck_block(x, filters_list[block_idx], cardinality, strides=2,
weight_decay=weight_decay)
else:
x = __bottleneck_block(x, filters_list[block_idx], cardinality, strides=1,
weight_decay=weight_decay)
if include_top:
x = GlobalAveragePooling2D()(x)
x = Dense(nb_classes, use_bias=False, kernel_regularizer=l2(weight_decay),
kernel_initializer='he_normal', activation='softmax')(x)
else:
if pooling == 'avg':
x = GlobalAveragePooling2D()(x)
elif pooling == 'max':
x = GlobalMaxPooling2D()(x)
return x
|
from django.db import models
from django.conf import settings
from django.utils.encoding import force_unicode
from django.utils.hashcompat import md5_constructor
from django.contrib.auth.models import User
from pygments import highlight
from pygments.lexers import get_lexer_by_name
from pygments.formatters import HtmlFormatter
from utils.models import ImageField, RenameFilesModel, DateMixin
from datetime import datetime
import os.path
EXTENSIONS = frozenset(('.bmp', '.gif', '.jpg', '.png'))
FORMAT_EXT = {
'BMP': '.bmp',
'GIF': '.gif',
'JPEG': '.jpg',
'PNG': '.png'
}
def get_temp_path(instance, filename):
ext = FORMAT_EXT[instance.file.format]
timestamp = datetime.now().isoformat()
key = md5_constructor(timestamp).hexdigest()[:12]
return 'uploads/temp/%s%s' % (key, ext)
class TemporaryImageManager(models.Manager):
def get_query_set(self):
query = super(TemporaryImageManager, self).get_query_set()
return query.filter(is_temporary=True)
class Image(DateMixin):
width = models.IntegerField(null=True, blank=True)
height = models.IntegerField(null=True, blank=True)
format = models.CharField(max_length=8, null=True, blank=True)
file = ImageField(
upload_to=get_temp_path,
width_field='width',
height_field='height',
format_field='format'
)
title = models.CharField(max_length=75, null=True, blank=True)
author = models.ForeignKey(User, related_name='images')
is_temporary = models.BooleanField(default=True)
objects = models.Manager()
temporary = TemporaryImageManager()
def finalize(self):
file_name = force_unicode(self.file)
name, ext = os.path.splitext(file_name)
final_name = os.path.join('uploads/images', '%s%s' % (self.pk, ext))
if file_name != final_name:
self.file.storage.delete(final_name)
self.file.storage.save(final_name, self.file)
self.file.storage.delete(file_name)
self.file = final_name
self.is_temporary = False
self.save()
def __unicode__(self):
return force_unicode(self.file)
class Markup(DateMixin):
CONTENT_LEXER = get_lexer_by_name('html', stripall=True)
CONTENT_FORMATTER = HtmlFormatter(linenos=False, cssclass='syntax')
title = models.CharField(max_length=75, null=True, blank=True)
description = models.TextField(blank=True, default="")
content = models.TextField()
author = models.ForeignKey(User, related_name='snippets')
def get_highlighted_content(self):
return highlight(self.content, self.CONTENT_LEXER, self.CONTENT_FORMATTER)
def __unicode__(self):
if self.title:
return u"#%s: %s" % (self.id, self.title)
else:
return u"#%s" % self.id
class MediaType(models.Model):
name = models.CharField(max_length=30)
description = models.TextField(blank=True, default="")
class Meta:
ordering = ('name',)
def __unicode__(self):
return self.name
class Style(DateMixin):
CONTENT_LEXER = get_lexer_by_name('css', stripall=True)
CONTENT_FORMATTER = HtmlFormatter(linenos=False, cssclass='syntax')
title = models.CharField(max_length=75, null=True, blank=True)
description = models.TextField(blank=True, default="")
content = models.TextField()
media_types = models.ManyToManyField(MediaType, blank=True, related_name='styles')
author = models.ForeignKey(User, related_name='styles')
@property
def media(self):
return ','.join(map(force_unicode, self.media_types.all()))
def get_highlighted_content(self):
return highlight(self.content, self.CONTENT_LEXER, self.CONTENT_FORMATTER)
def __unicode__(self):
if self.title:
return u"#%s: %s" % (self.id, self.title)
else:
return u"#%s" % self.id
|
n1 = int(input('Digite a primeira nota: '))
n2 = int(input('Digite a segunda nota: '))
m = (n1+n2)/2
if(m < 5):
print('Reprovado!')
elif(m >= 5 and m < 7):
print('Recuperação!')
else:
print('Aprovado!')
|
from django.urls import path
from .views import (
TagsList,
TagDetails
)
urlpatterns = [
path('', TagsList.as_view()),
path('<int:pk>/', TagDetails.as_view())
]
|
from django.db import migrations
from ..services import *
from django.conf import settings
def initialize_client(apps, schema_editor):
client_data = {'email': settings.EMAIL_TEST, 'client_name':'client1', 'address':'HN', 'name': 'client1_name'}
client = create_client(data=client_data)
class Migration(migrations.Migration):
dependencies = [
('clients', '0001_initial'),
]
operations = [
migrations.RunPython(initialize_client),
]
|
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import logging
import os
import sys
import time
from contextlib import contextmanager
from threading import Lock
from typing import Dict, Tuple
from pants.base.exiter import PANTS_FAILED_EXIT_CODE, ExitCode
from pants.bin.local_pants_runner import LocalPantsRunner
from pants.engine.env_vars import CompleteEnvironmentVars
from pants.engine.internals.native_engine import PySessionCancellationLatch
from pants.init.logging import stdio_destination
from pants.option.options_bootstrapper import OptionsBootstrapper
from pants.pantsd.pants_daemon_core import PantsDaemonCore
logger = logging.getLogger(__name__)
class ExclusiveRequestTimeout(Exception):
"""Represents a timeout while waiting for another request to complete."""
class DaemonPantsRunner:
"""A RawFdRunner (callable) that will be called for each client request to Pantsd."""
def __init__(self, core: PantsDaemonCore) -> None:
super().__init__()
self._core = core
self._run_lock = Lock()
@staticmethod
def _send_stderr(stderr_fileno: int, msg: str) -> None:
"""Used to send stderr on a raw filehandle _before_ stdio replacement.
TODO: This method will be removed as part of #7654.
"""
with os.fdopen(stderr_fileno, mode="w", closefd=False) as stderr:
print(msg, file=stderr, flush=True)
@contextmanager
def _one_run_at_a_time(
self, stderr_fileno: int, cancellation_latch: PySessionCancellationLatch, timeout: float
):
"""Acquires exclusive access within the daemon.
Periodically prints a message on the given stderr_fileno while exclusive access cannot be
acquired.
TODO: This method will be removed as part of #7654, so it currently polls the lock and
cancellation latch rather than waiting for both of them asynchronously, which would be a bit
cleaner.
"""
render_timeout = 5
should_poll_forever = timeout <= 0
start = time.time()
render_deadline = start + render_timeout
deadline = None if should_poll_forever else start + timeout
def should_keep_polling(now):
return not cancellation_latch.is_cancelled() and (not deadline or deadline > now)
acquired = self._run_lock.acquire(blocking=False)
if not acquired:
# If we don't acquire immediately, send an explanation.
length = "forever" if should_poll_forever else f"up to {timeout} seconds"
self._send_stderr(
stderr_fileno,
f"Another pants invocation is running. Will wait {length} for it to finish before giving up.\n"
"If you don't want to wait for the first run to finish, please press Ctrl-C and run "
"this command with PANTS_CONCURRENT=True in the environment.\n",
)
while True:
now = time.time()
if acquired:
try:
yield
break
finally:
self._run_lock.release()
elif should_keep_polling(now):
if now > render_deadline:
self._send_stderr(
stderr_fileno,
f"Waiting for invocation to finish (waited for {int(now - start)}s so far)...\n",
)
render_deadline = now + render_timeout
acquired = self._run_lock.acquire(blocking=True, timeout=0.1)
else:
raise ExclusiveRequestTimeout(
"Timed out while waiting for another pants invocation to finish."
)
def single_daemonized_run(
self,
args: Tuple[str, ...],
env: Dict[str, str],
working_dir: str,
cancellation_latch: PySessionCancellationLatch,
) -> ExitCode:
"""Run a single daemonized run of Pants.
All aspects of the `sys` global should already have been replaced in `__call__`, so this
method should not need any special handling for the fact that it's running in a proxied
environment.
"""
try:
logger.debug("Connected to pantsd")
logger.debug(f"work dir: {working_dir}")
# Capture the client's start time, which we propagate here in order to get an accurate
# view of total time.
env_start_time = env.get("PANTSD_RUNTRACKER_CLIENT_START_TIME", None)
if not env_start_time:
# NB: We warn rather than erroring here because it eases use of non-Pants nailgun
# clients for testing.
logger.warning(
"No start time was reported by the client! Metrics may be inaccurate."
)
start_time = float(env_start_time) if env_start_time else time.time()
options_bootstrapper = OptionsBootstrapper.create(
env=env, args=args, allow_pantsrc=True
)
# Run using the pre-warmed Session.
complete_env = CompleteEnvironmentVars(env)
scheduler, options_initializer = self._core.prepare(options_bootstrapper, complete_env)
runner = LocalPantsRunner.create(
complete_env,
working_dir,
options_bootstrapper,
scheduler=scheduler,
options_initializer=options_initializer,
cancellation_latch=cancellation_latch,
)
return runner.run(start_time)
except Exception as e:
logger.exception(e)
return PANTS_FAILED_EXIT_CODE
except KeyboardInterrupt:
print("Interrupted by user.\n", file=sys.stderr)
return PANTS_FAILED_EXIT_CODE
def __call__(
self,
command: str,
args: Tuple[str, ...],
env: Dict[str, str],
working_dir: str,
cancellation_latch: PySessionCancellationLatch,
stdin_fileno: int,
stdout_fileno: int,
stderr_fileno: int,
) -> ExitCode:
request_timeout = float(env.get("PANTSD_REQUEST_TIMEOUT_LIMIT", -1))
# NB: Order matters: we acquire a lock before mutating either `sys.std*`, `os.environ`, etc.
with self._one_run_at_a_time(
stderr_fileno,
cancellation_latch=cancellation_latch,
timeout=request_timeout,
):
# NB: `single_daemonized_run` implements exception handling, so only the most primitive
# errors will escape this function, where they will be logged by the server.
logger.info(f"handling request: `{' '.join(args)}`")
try:
with stdio_destination(
stdin_fileno=stdin_fileno,
stdout_fileno=stdout_fileno,
stderr_fileno=stderr_fileno,
):
return self.single_daemonized_run(
((command,) + args), env, working_dir, cancellation_latch
)
finally:
logger.info(f"request completed: `{' '.join(args)}`")
|
# -*- coding: utf-8 -*-
"""
SVL 2016
TP1 Méthodes formelles avec contracts.py
Auteur: Honore Nintunze, antonin Durey
Classes
"""
class Disque:
"""
inv[self.taille]:
self.taille > 0
"""
def __init__(self,taille):
self.taille = taille
class Tour:
"""
Une tour pour contenir les disques ordonnées
inv[self.disques]:
# array is ordered
forall([self.disques[i-1].taille > self.disques[i].taille for i in range(1, len(self.disques))])
"""
def __init__(self, disques):
"""
post:
len(self.disques) == len(disques)
"""
self.disques = disques
def pop(self):
"""
Retourne le sommet
post[self.disques]:
len(self.disques) == len(__old__.self.disques) - 1
__return__ == __old__.self.disques[-1]
"""
return self.disques.pop()
def push(self,disque):
"""
Dépose un disque sur la tour
pre[self.disques]:
len(self.disques) == 0 or disque.taille < self.disques[-1].taille
post[self.disques]:
self.disques[-1].taille == disque.taille
len(self.disques) == len(__old__.self.disques) + 1
"""
self.disques.append(disque)
class Hanoi:
"""
Jeu des Tours de Hanoi
"""
def __init__(self,tours):
"""
Un jeu de Hanoi initialisé avec une seule tour possédant des disques
pre:
len(filter(lambda t: len(t.disques) > 0,tours)) == 1
post:
len(self.tours) == len(tours)
"""
self.tours = tours
def deplacer(self,source,dest):
"""
Deplacer un disque d'une tour à une autre
pre[self.tours]:
source >= 0 and source < len(self.tours)
dest >= 0 and dest < len(self.tours)
post[self.tours]:
__old__.self.tours[source].disques[-1] == self.tours[dest].disques[-1]
"""
self.tours[dest].push(self.tours[source].pop())
def jeu(self,n, D, A, I):
"""
Execute le jeu de Hanoi
"""
if n > 0:
jeu(n-1,D,I,A)
self.deplacer(D,A)
jeu(n-1,I,A,D)
import contract
contract.checkmod(__name__)
if __name__ == "__main__":
# Initialisation d'un jeu à 3 disques
tour0 = Tour([Disque(i) for i in range(3, 0, -1)]) # si une autre tour contient des disques ça ne passe plus
tour0.pop()
tour0.push(Disque(1)) # avec 2 ou 0 ça ne passe plus
tour1 = Tour([])
tour2 = Tour([])
hanoi = Hanoi([tour0, tour1,tour2])
hanoi.deplacer(0, 1)
# hanoi.jeu(len(tour0),0,2,1)
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^confirm/$', views.payment_confirm, name='w1-payment-confirm'),
]
|
def twoForLoops(n):
n = n * 2
counter = 1
for x in range(n):
m = min(n - counter, counter)
counter += 1
for y in range(m):
print("*", end="")
print("")
def twoWhileLoops(n):
n = n * 2
counter = 1
x = 0
y = 0
while x < n:
m = min(n - counter, counter)
counter += 1
x += 1
while y < m:
y += 1
print("*", end="")
y = 0
print("")
def inverseTwoForloop(n):
n = n * 2
counter = 1
for x in range(n):
m = min(n - counter, counter)
counter += 1
string = ""
for y in range(m):
string += '*'
print('{:>{}}'.format(string, n//2))
|
#-*- coding: utf-8 -*-
import operator
from math import log
import pprint
# 加载数据
def loadData(filename):
# 打开数据文件
fr = open(filename)
# 读取生成列表后存入lenses列表
lenses = [inst.strip().split('\t') for inst in fr.readlines()]
# 构建标签列表
lensesLabels = ['age', 'prescript', 'astigmatic', 'tearRate']
return lenses, lensesLabels
# 计算香农熵
def calcShannonEnt(dataSet):
# 获得数据集的大小
numEntries = len(dataSet)
labelCounts = {}
# 遍历数据集
for featVec in dataSet:
# 提取数据中的类型,本例中为医生推荐的隐形眼镜类型
currentLabel = featVec[-1]
# 计算该类型眼镜在数据集中出现的次数
if currentLabel not in labelCounts.keys(): labelCounts[currentLabel] = 0
labelCounts[currentLabel] += 1
# 初始化香农熵
shannonEnt = 0.0
# 遍历隐形眼镜的类型字典计算香农熵
for key in labelCounts:
# 见书P35的计算香农熵的公式
prob = float(labelCounts[key])/numEntries
shannonEnt -= prob * log(prob,2) #log base 2
return shannonEnt
# 划分数据集
def splitDataSet(dataSet, axis, value):
retDataSet = []
# 遍历数据集
for featVec in dataSet:
# 判断当前划分的特征对应的值是否是需要返回的值
if featVec[axis] == value:
# 将当前处理的数据列表featVec进行重构组合
reducedFeatVec = featVec[:axis]
reducedFeatVec.extend(featVec[axis+1:])
retDataSet.append(reducedFeatVec)
return retDataSet
# 获取最好的数据划分方式
def chooseBestFeatureToSplit(dataSet):
# 用来划分的特征数量,数据集中最后一列是分类不是特征
numFeatures = len(dataSet[0]) - 1
# 计算数据集的香农熵
baseEntropy = calcShannonEnt(dataSet)
# 初始化
bestInfoGain = 0.0; bestFeature = -1
# 遍历所有特征
for i in range(numFeatures):
# 提取该特征对应的所有值
featList = [example[i] for example in dataSet]
# 去重
uniqueVals = set(featList)
newEntropy = 0.0
# 循环使用该特征值作为参数进行数据集划分尝试
for value in uniqueVals:
subDataSet = splitDataSet(dataSet, i, value)
# 对划分后的数据集计算香农熵,并进行求和操作
prob = len(subDataSet)/float(len(dataSet))
newEntropy += prob * calcShannonEnt(subDataSet)
# 计算信息增益
infoGain = baseEntropy - newEntropy
# 获取最好的信息增益
if (infoGain > bestInfoGain):
bestInfoGain = infoGain
bestFeature = i
return bestFeature
# 构建决策树
def createTree(dataSet,labels):
# 提取数据集中的所有分类标签
classList = [example[-1] for example in dataSet]
# 数据集的类别完全相同时则停止继续对数据集进行划分,并返回分类标签
if classList.count(classList[0]) == len(classList):
return classList[0]
# 如果所有的特征值都已经遍历结束
if len(dataSet[0]) == 1:
# 计算出现频次最多的分类标签并返回
classCount={}
for vote in classList:
if vote not in classCount.keys(): classCount[vote] = 0
classCount[vote] += 1
sortedClassCount = sorted(classCount.iteritems(), key=operator.itemgetter(1), reverse=True)
return sortedClassCount[0][0]
# 获取最好的划分特征
bestFeat = chooseBestFeatureToSplit(dataSet)
bestFeatLabel = labels[bestFeat]
# 以当前数据集中获取的最好的特征初始化树
myTree = {bestFeatLabel:{}}
del(labels[bestFeat])
# 提取该特征对应的所有特征值
featValues = [example[bestFeat] for example in dataSet]
# 去重,需要根据唯一的特征值进行划分分支
uniqueVals = set(featValues)
# 划分数据集,创建树的分支,将不同的特征值对应的数据集进行递归划分
for value in uniqueVals:
subLabels = labels[:]
myTree[bestFeatLabel][value] = createTree(splitDataSet(dataSet, bestFeat, value),subLabels)
return myTree
dataFile = '/home/shiyanlou/mylab4/lenses.txt'
lenses, lensesLabels = loadData(dataFile)
tree = createTree(lenses, lensesLabels)
pprint.pprint(tree)
|
#!/usr/bin/python
import re
import unittest
import glue2.EntryTest
EntryTest = glue2.EntryTest.EntryTest
class EntityTest(unittest.TestCase):
def setUp(self):
self.good_entry = {
'dn' : ['EntityId=test,o=GLUE2'],
'objectClass' : ['GLUE2Entity'],
'GLUE2EntityId' : ['test'],
}
def test_good_entryTest(self):
'''Test a good entry.'''
entry = self.good_entry
suite = unittest.TestSuite()
test_names = unittest.TestLoader().getTestCaseNames(EntryTest)
for test_name in test_names:
suite.addTest(EntryTest(test_name, entry))
self.assertTrue(unittest.TextTestRunner().run(suite).wasSuccessful())
def test_bad_object_class(self):
'''Test a bad object class'''
entry = self.good_entry
entry['objectClass'] = ['BadObject']
suite = unittest.TestSuite()
test_names = unittest.TestLoader().getTestCaseNames(EntryTest)
for test_name in test_names:
suite.addTest(EntryTest(test_name, entry))
self.assertFalse(unittest.TextTestRunner().run(suite).wasSuccessful())
self.assertEqual(len(unittest.TextTestRunner().run(suite).failures),1)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(EntityTest)
unittest.TextTestRunner(verbosity=2).run(suite)
|
# -*- coding: utf-8 -*-
import sys
import pygame
import nucleo
from pygame.locals import *
WIDTH = 700
HEIGHT = 700
def load_image(filename):
"""Carga la imagen de la ruta que se pasa como argumento"""
try:
image = pygame.image.load(filename)
except pygame.error.message:
raise SystemExit.message
image = image.convert()
return image
def cargarImagenes():
"""Carga una serie de imagenes necesarias para representar el 'juego'"""
azul = load_image("images/azul.png")
cabazul = load_image("images/cabazul.png")
azulchoque = load_image("images/azulchoque.png")
azulvacio = load_image("images/azulvacio.png")
rojo = load_image("images/rojo.png")
cabrojo = load_image("images/cabrojo.png")
rojochoque = load_image("images/rojochoque.png")
rojovacio = load_image("images/rojovacio.png")
choquetotal = load_image("images/choquetotal.png")
error = load_image("images/verde.png")
return azul, cabazul, azulchoque, azulvacio, rojo, cabrojo, rojochoque,
rojovacio, choquetotal, error
def texto(texto):
"""Presenta texto en la pantalla"""
color = (0, 0, 0)
fuente = pygame.font.Font("images/DroidSans.ttf", 25)
salida = pygame.font.Font.render(fuente, texto, 1, color)
return salida
def dibujaMatriz(matriz, screen, images):
"""Esta funcion se encarga de dibujar en pantalla la matriz pasada
como argumento.
En 'images' se deberá pasar una tupla de imagenes para tal fin."""
for y in range(102):
for x in range(102):
ordenadas = 44+6*y
abscisas = 44+6*x
if matriz[y][x] == 3:
screen.blit(images[1], (abscisas, ordenadas))
elif matriz[y][x] == 4:
screen.blit(images[0], (abscisas, ordenadas))
elif matriz[y][x] == 13:
screen.blit(images[5], (abscisas, ordenadas))
elif matriz[y][x] == 15:
screen.blit(images[4], (abscisas, ordenadas))
elif matriz[y][x] == 18:
screen.blit(images[2], (abscisas, ordenadas))
elif matriz[y][x] == 34:
screen.blit(images[3], (abscisas, ordenadas))
elif matriz[y][x] == 17:
screen.blit(images[6], (abscisas, ordenadas))
elif matriz[y][x] == 44:
screen.blit(images[7], (abscisas, ordenadas))
elif matriz[y][x] == 16:
screen.blit(images[8], (abscisas, ordenadas))
elif matriz[y][x] == 7:
screen.blit(images[2], (abscisas, ordenadas))
elif matriz[y][x] == 28:
screen.blit(images[6], (abscisas, ordenadas))
elif not(matriz[y][x] == 0):
screen.blit(images[9], (abscisas, ordenadas))
def mostrarResultado(resultado, screen):
"""Muestra un texto con el resultado."""
coordenadas = (250, 300)
textoAux = texto("Error")
if resultado[0] == 1:
textoAux = texto("Jugador 1 gana")
elif resultado[0] == 2:
textoAux = texto("Jugador 2 gana")
elif resultado[0] == 3:
if resultado[1] == 10:
textoAux = texto("¡Ostia terrible!")
else:
textoAux = texto("Empate")
screen.blit(textoAux, coordenadas)
def visualizarMatriz(matriz):
"""Este metodo ofrece una version grafica de la matriz pasada
como argumento"""
pygame.init()
screen = pygame.display.set_mode((WIDTH, HEIGHT))
pygame.display.set_caption("Instantanea")
clock = pygame.time.Clock()
background = load_image('images/fondo.png')
images = cargarImagenes()
time = clock.tick(60)
screen.blit(background, (50, 50))
dibujaMatriz(matriz, screen, images)
pygame.display.flip()
while True:
keys = pygame.key.get_pressed()
for eventos in pygame.event.get():
if eventos.type == QUIT:
sys.exit(0)
def main(uno, dos):
"""Como su propio nombre indica, la funcion principal. inicia un juego
entre los jugadores pasados como argumento.
NOTA: Para reiniciar el juego, hay que pulsar 'Backspace'"""
pygame.init()
screen = pygame.display.set_mode((WIDTH, HEIGHT))
pygame.display.set_caption("Pruebas Tron")
clock = pygame.time.Clock()
background = load_image('images/fondo.png')
images = cargarImagenes()
matriz = nucleo.creaMatriz(102)
matriz[50][25] = 3
matriz[50][75] = 13
while True:
time = clock.tick(60)
keys = pygame.key.get_pressed()
for eventos in pygame.event.get():
if eventos.type == QUIT:
sys.exit(0)
resultado = nucleo.resultado(matriz)
if keys[K_BACKSPACE]:
matriz = nucleo.creaMatriz(102)
matriz[50][25] = 3
matriz[50][75] = 13
if resultado[0] == 0:
direct = uno.actualizar(matriz)
matriz = nucleo.actualizaMatriz(matriz, 1, direct)
direct = dos.actualizar(matriz)
matriz = nucleo.actualizaMatriz(matriz, 2, direct)
screen.blit(background, (50, 50))
dibujaMatriz(matriz, screen, images)
if not resultado[0] == 0:
mostrarResultado(resultado, screen)
pygame.display.flip()
|
from django import forms
from apps.users.models import UserProfile
import logging
logger = logging.getLogger(__name__)
class CommissionForm(forms.Form):
"""Form to handle the commission"""
handyman = forms.ModelMultipleChoiceField(
queryset=UserProfile.objects.filter(user_type=1, is_active=True))
def __init__(self, *args, **kwargs):
super(CommissionForm, self).__init__(*args, **kwargs)
self.fields['handyman'].widget.attrs.update(
{'class': 'form-control'})
|
#produce an SQLite database that contains a User, Course, and Member table
#and populate the tables from the data file.
import json
import sqlite3
conn = sqlite3.connect('rosterdb.sqlite')
cur = conn.cursor()
cur.executescript('''
DROP TABLE IF EXISTS User;
DROP TABLE IF EXISTS Course;
DROP TABLE IF EXISTS Member
''')
cur.executescript('''
CREATE TABLE User(
id INTEGER PRIMARY KEY AUTOINCREMENT,
name TEXT
);
CREATE TABLE Course(
id INTEGER PRIMARY KEY AUTOINCREMENT,
title TEXT
);
CREATE TABLE Member(
course_id INTEGER,
user_id INTEGER,
role INTEGER,
PRIMARY KEY(course_id, user_id)
)
''')
file = open('roster_data.json').read()
data = json.loads(file) #parse the json file
for entry in data:
#read through each entry of the data, and get name, title, and role
name = entry[0]
title = entry[1]
role = entry[2]
cur.execute('INSERT OR IGNORE INTO Course(title) VALUES (?)', (title,))
cur.execute('SELECT id FROM Course WHERE title = ?', (title,))
course_id = cur.fetchone()[0]
cur.execute('INSERT OR IGNORE INTO User(name) VALUES (?)', (name,))
cur.execute('SELECT id FROM User WHERE name = ?', (name,))
user_id = cur.fetchone()[0]
cur.execute('''INSERT INTO Member(course_id,user_id,role)
VALUES (?,?,?)''',(course_id,user_id,role))
conn.commit()
|
def unique(n):
return [i for c,i in enumerate(n) if n[0:c].count(i) == 0 or c == 0]
'''
Remove Duplicates
You are to write a function called unique that takes an array of integers and
returns the array with duplicates removed. It must return the values in the
same order as first seen in the given array. Thus no sorting should be done,
if 52 appears before 10 in the given array then it should also be that 52 appears
before 10 in the returned array.
Assumptions
All values given are integers (they can be positive or negative).
You are given an array but it may be empty.
They array may have duplicates or it may not.
You cannot use the uniq method on Arrays (don't even try it), or the nub function from Data.List.
Example
print unique([1, 5, 2, 0, 2, -3, 1, 10])
[1, 5, 2, 0, -3, 10]
print unique([])
[]
print unique([5, 2, 1, 3])
[5, 2, 1, 3]
'''
|
import paho.mqtt.client as mqtt
from pymongo import MongoClient
broker = "192.168.10.15"
port = 1883
dbClient = MongoClient('localhost', 27017)
db = dbClient.cpu_useage
def on_connect(client, userdata, flags, rc):
print("Connected with result code "+str(rc))
client.subscribe("rpi/useage")
def on_message(client, userdata, msg):
print(msg.topic+" "+str(msg.payload))
#useage = {}
#posts = db.posts
#post = {"useage": (msg.payload)}
#result = posts.insert_one(post)
#post_id
client = mqtt.Client()
client.on_connect = on_connect
client.on_message = on_message
client.connect(broker, port)
client.loop_forever()
|
"""
make_fig3.py
Reproduces Figure 3 in O'Shaughnessy et al., 'Generative causal
explanations of black-box classifiers,' Proc. NeurIPS 2020: global
explanation for CNN classifier trained on MNIST 3/8 digits.
"""
import numpy as np
import scipy.io as sio
import os
import torch
import util
import plotting
from GCE import GenerativeCausalExplainer
# --- parameters ---
gray = False
dataset = 'cifar'
data_classes = [3, 5]
# classifier
classifier_path = 'C:/Users/Dylan/Desktop/FACT/src/pretrained_models/cifar_35_classifier'
# vae
K = 1
L = 16
train_steps = 3000
Nalpha = 25
Nbeta = 70
lam = 0.01
batch_size = 128
lr = 1e-3
# other
randseed = 0
gce_path = 'C:/Users/Dylan/Desktop/FACT/src/outputs/cifar_35_gce_K1_L16_lambda001'
retrain_gce = False # train explanatory VAE from scratch
save_gce = False # save/overwrite pretrained explanatory VAE at gce_path
# --- initialize ---
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# device = torch.device('cpu')
if randseed is not None:
np.random.seed(randseed)
torch.manual_seed(randseed)
ylabels = range(0, len(data_classes))
# --- load data ---
if dataset == 'mnist':
from load_mnist import load_mnist_classSelect
X, Y, tridx = load_mnist_classSelect('train', data_classes, ylabels)
vaX, vaY, vaidx = load_mnist_classSelect('val', data_classes, ylabels)
ntrain, nrow, ncol, c_dim = X.shape
x_dim = nrow*ncol
elif dataset == 'cifar':
from load_cifar import load_cifar_classSelect
X, Y, _ = load_cifar_classSelect('train', data_classes, ylabels, gray=gray)
vaX, vaY, _ = load_cifar_classSelect('val', data_classes, ylabels, gray=gray)
X, vaX = X / 255, vaX / 255
ntrain, nrow, ncol, c_dim = X.shape
x_dim = nrow * ncol
# --- load classifier ---
from models.CNN_classifier import CNN
classifier = CNN(len(data_classes), c_dim).to(device)
checkpoint = torch.load('%s/model.pt' % classifier_path, map_location=device)
classifier.load_state_dict(checkpoint['model_state_dict_classifier'])
# --- train/load GCE ---
from models.CVAEImageNet import Decoder, Encoder
if retrain_gce:
encoder = Encoder(K+L, c_dim, x_dim).to(device)
decoder = Decoder(K+L, c_dim, x_dim).to(device)
encoder.apply(util.weights_init_normal)
decoder.apply(util.weights_init_normal)
gce = GenerativeCausalExplainer(classifier, decoder, encoder, device, save_dir=gce_path)
traininfo = gce.train(X, K, L,
steps=train_steps,
Nalpha=Nalpha,
Nbeta=Nbeta,
lam=lam,
batch_size=batch_size,
lr=lr)
if save_gce:
if not os.path.exists(gce_path):
os.makedirs(gce_path)
torch.save(gce, os.path.join(gce_path,'model.pt'))
sio.savemat(os.path.join(gce_path, 'training-info.mat'), {
'data_classes' : data_classes, 'classifier_path' : classifier_path,
'K' : K, 'L' : L, 'train_step' : train_steps, 'Nalpha' : Nalpha,
'Nbeta' : Nbeta, 'lam' : lam, 'batch_size' : batch_size, 'lr' : lr,
'randseed' : randseed, 'traininfo' : traininfo})
else: # load pretrained model
gce = torch.load(os.path.join(gce_path, 'model.pt'), map_location=device)
# --- compute final information flow ---
I = gce.informationFlow()
Is = gce.informationFlow_singledim(range(0, K+L))
print('Information flow of K=%d causal factors on classifier output:' % K)
print(Is[:K])
print('Information flow of L=%d noncausal factors on classifier output:' % L)
print(Is[K:])
# --- generate explanation and create figure ---
sample_ind = np.concatenate((np.where(vaY == 0)[0][:4],
np.where(vaY == 1)[0][:4]))
x = torch.from_numpy(vaX[sample_ind])
zs_sweep = [-3., -2., -1., 0., 1., 2., 3.]
Xhats, yhats = gce.explain(x, zs_sweep)
plotting.plotExplanation(1. - Xhats, yhats, save_path='/Fig3CIFAR')
|
import threading
import time
valor = 100
valor1 = 5
def soma(num, num1):
result = num + num1
print("soma:", result)
def sub(num, num1):
result = num - num1
print("subtração:", result)
def div(num, num1):
result = num / num1
print("divisão:", result)
s = threading.Thread(target=soma(valor,valor1),args=("thread sendo executada"))
ss= threading.Thread(target=sub(valor,valor1),args=("thread sendo executada"))
d = threading.Thread(target=div(valor,valor1),args=("thread sendo executada"))
s,ss,d.start()
|
from time import sleep
from vcenter import get_all_host_info, get_all_vm_info
from sqlit import insert_host_info, insert_vm_info
import logging
logging.basicConfig(level=logging.DEBUG,filename="log.txt",format="%(asctime)s;%(levelname)s;%(message)s")
while True:
try:
count = 0
host_info = get_all_host_info()
logging.info(host_info)
insert_host_info(host_info)
while True:
vm_info = get_all_vm_info()
logging.info(vm_info)
insert_vm_info(vm_info)
count = count +1
if count%12 == 0:
break
sleep(300)
except Exception as err:
logging.exception(err)
|
# -*- coding:utf-8 -*-
from threading import Thread
from time import sleep # 调用sleep函数让线程休眠
# 在Python中创建线程,需要让类像线程一样工作
# 继承Thread类
class CookBook(Thread):
def __init__(self):
Thread.__init__(self)
self.message = "Hello Parallel Python Cookbook!!\n"
def print_message(self):
"""
打印消息
:return:
"""
print (self.message)
def run(self):
"""
消息打印10次
:return:
"""
print ("Thread starting\n")
x = 0
while (x < 10):
self.print_message()
sleep(2)
x += 1
print ("Thread Ended\n")
print "Process Started" # 开启主进程
hello_Python = CookBook() # 创建CB类的一个实例
hello_Python.start() # 打印消息,启动线程
print "Process Ended" # 终止主进程
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Coloque o quadrado giratorio e a bolinha dos
exercícios anteriores no mesmo programa.
Isto é, o programa deve consistir em uma animação
com um quadrado giratório crescendo constantemente
e uma bolinha "rolando" pela tela e quicando nas
bordas também em velocidade constante.
'''
from htdp_pt_br.universe import *
FREQUENCIA = 200
LARGURA = 600
ALTURA = 500
tela = criar_tela_base(LARGURA,ALTURA)
'''constantes para Bola'''
RAIO = 20
BOLA = circulo(RAIO,Cor('green'))
BORDA_DIREITA = LARGURA - RAIO
BORDA_ESQUERDA = RAIO
BORDA_BAIXO = ALTURA - RAIO
BORDA_CIMA = RAIO
'''
Bola
x é um int entre BORDA_DIREITA e BORDA_ESQUERDA
y é um int entre BORDA_CIMA e BORDA_BAIXO
'''
Bola = definir_estrutura("Bola", "x,y,ax,ay")
Bola.x = LARGURA // 2
Bola.y = ALTURA // 2
Bola.ax = 3
Bola.ay = 3
'''constantes para quadrado'''
variacao_tamanho = 2
variacao_angulo = 2
TAMANHO_INICIAL = LARGURA//10
ANGULO_INICIAL = 1
'''
Quadrado
tamanho = int (> 0)
angulo = int (entre 0 e 359)
'''
Quadrado = definir_estrutura("Quadrado", "tamanho,angulo")
Quadrado.tamanho = TAMANHO_INICIAL
Quadrado.angulo = ANGULO_INICIAL
QUADRADO_X = LARGURA//2
QUADRADO_Y = ALTURA//2
'''
Mundo contem
Bola
Quadrado
'''
Mundo = definir_estrutura("Mundo","Bola,Quadrado")
def gira(Quadrado):
'''
modifica a variavel Quadrado.angulo
:param Quadrado: Quadrado
:return: Quadrado
'''
if Quadrado.angulo > 360:
Quadrado.angulo = ANGULO_INICIAL
return Quadrado
elif (Quadrado.angulo % 360) == 0:
Quadrado.angulo = ANGULO_INICIAL
return Quadrado
else:
Quadrado.angulo = Quadrado.angulo + variacao_angulo
return Quadrado
def aumentar(Quadrado):
'''
modifica a variavel Quadrado.tamanho
:param Quadrado: Quadrado
:return: Quadrado
'''
Quadrado.tamanho = Quadrado.tamanho + variacao_tamanho
return Quadrado
def desenha(mundo):
'''
:param mundo: Posicao
:return: imagem
'''
img_quadrado = quadrado(mundo.Quadrado.tamanho, Cor('black'))
img_quadrado = girar(img_quadrado, mundo.Quadrado.angulo)
colocar_imagem(img_quadrado, tela, LARGURA // 2, ALTURA // 2)
colocar_imagem(BOLA, tela, mundo.Bola.x, mundo.Bola.y)
return tela
def tecla(mundo,tecla):
'''
retrna ao estado inicial quando SPACE é precionado
:param Quadrado: Quadrado
:param tecla: tecla
:return: Quadrado
'''
mesmabola = mundo.Bola(mundo.Bola.x, mundo.Bola.y, mundo.Bola.ax, mundo.Bola.ay)
if tecla == pg.K_SPACE:
novoquadrado = Quadrado(TAMANHO_INICIAL,ANGULO_INICIAL)
return Mundo(mesmabola, novoquadrado)
else:
return Mundo
#TODO
def mover(mundo):
'''
:param Posicao: Posicao
:return: Posicao
'''
novoax = 0
novoay = 0
proximo_x = (mundo.Bola.x + mundo.Bola.ax)
proximo_y = (mundo.Bola.y + mundo.Bola.ay)
tamanho_q = mundo.Quadrado.tamanho // 2
#Bordas horizontais
if (proximo_x) > BORDA_DIREITA or (proximo_x) < BORDA_ESQUERDA:
novoax = mundo.Bola.ax * (-1)
#Bordas verticais
if (proximo_y) > BORDA_BAIXO or (proximo_y) < BORDA_CIMA:
novoay = mundo.Bola.ay * (-1)
#colisao com quadrado
if (QUADRADO_X + tamanho_q) > proximo_x > (QUADRADO_X - tamanho_q) and (QUADRADO_X + tamanho_q) > proximo_y > (QUADRADO_X - tamanho_q):
novoax = mundo.Bola.ax * (-1)
if (QUADRADO_X + tamanho_q) > proximo_y > (QUADRADO_X - tamanho_q) and (QUADRADO_X + tamanho_q) > proximo_x > (QUADRADO_X - tamanho_q):
novoay = mundo.Bola.ay * (-1)
novotamanho = aumentar(mundo.Quadrado)
novoangulo = gira(mundo.Quadrado)
novox = Bola.x + novoax
novoy = Bola.y + novoay
novomundo = Mundo(Bola(novox,novoy),Quadrado(novotamanho,novoangulo))
return novomundo
|
from common.run_method import RunMethod
import allure
@allure.step("极师通/作业/学生作业查阅情况详情")
def homework_readStatusOfStudentDetails_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极师通/作业/学生作业查阅情况详情"
url = f"/service-profile/homework/readStatusOfStudentDetails"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极师通/作业/草稿列表")
def homework_draft_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极师通/作业/草稿列表"
url = f"/service-profile/homework/draft"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极师通/作业/布置或保存草稿")
def homework_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极师通/作业/布置或保存草稿"
url = f"/service-profile/homework"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极师通/作业/详情")
def homework_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极师通/作业/详情"
url = f"/service-profile/homework"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极师通/作业/查阅情况")
def homework_consultSituation_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极师通/作业/查阅情况"
url = f"/service-profile/homework/consultSituation"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极师通/作业/删除作业")
def homework_delete(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极师通/作业/删除作业"
url = f"/service-profile/homework"
res = RunMethod.run_request("DELETE", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极师通/作业/提醒学生查看作业")
def homework_remind_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极师通/作业/提醒学生查看作业"
url = f"/service-profile/homework/remind"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极师通/作业/已发布列表")
def homework_published_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极师通/作业/已发布列表"
url = f"/service-profile/homework/published"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极师通/作业/发布作业")
def homework_publishHomework_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极师通/作业/发布作业"
url = f"/service-profile/homework/publishHomework"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极师通/作业/作业列表")
def homework_homeworkList_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极师通/作业/作业列表"
url = f"/service-profile/homework/homeworkList"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极师通/作业/班级列表")
def homework_myClasses_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极师通/作业/班级列表"
url = f"/service-profile/homework/myClasses"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极师通/作业/学生作业查阅情况")
def homework_readStatusOfStudent_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极师通/作业/学生作业查阅情况"
url = f"/service-profile/homework/readStatusOfStudent"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极师通/作业/作业详情")
def homework_homeworkDetails_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极师通/作业/作业详情"
url = f"/service-profile/homework/homeworkDetails"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极师通/作业/删除作业")
def homework_delHomework_delete(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极师通/作业/删除作业"
url = f"/service-profile/homework/delHomework"
res = RunMethod.run_request("DELETE", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极师通撤回作业")
def homework_recall_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极师通撤回作业"
url = f"/service-profile/homework/recall"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
|
import re
import requests
from bs4 import BeautifulSoup
def get_movie_id(url):
# e.g. "https://tw.rd.yahoo.com/referurl/movie/thisweek/info/*https://tw.movies.yahoo.com/movieinfo_main.html/id=6707"
# -> match.group(0): "/id=6707"
pattern = '/id=\d+'
match = re.search(pattern, url)
if match is None:
return url
else:
return match.group(0).replace('/id=', '')
def get_movies():
Y_MOVIE_URL = 'https://tw.movies.yahoo.com/movie_thisweek.html'
dom = requests.get(Y_MOVIE_URL)
soup = BeautifulSoup(dom.text, 'html.parser')
movies = []
rows = soup.select('.release_list li')
#rows = soup.select('#content_l li')
Y_INTRO_URL = 'https://tw.movies.yahoo.com/movieinfo_main.html' # 詳細資訊
for row in rows:
movie = dict()
movie['ch_name'] = row.select('.release_movie_name .gabtn')[0].text.strip()
movie['eng_name'] = row.select('.en .gabtn')[0].text.strip()
#movie['movie_id'] = get_movie_id(row.select('.release_movie_name .gabtn')[0]['href'])
movie['poster_url'] = row.select('img')[0]['src']
#movie['release_date'] = get_date(row.select('.release_movie_time')[0].text)
movie['intro'] = row.select('.release_text')[0].text.strip().replace(u'...詳全文', '').replace('\n', '')[0:15] + '...'
#movie['info_url'] = row.select('.release_movie_name .gabtn')[0]['href']
#movie['info_url'] = Y_INTRO_URL + '/id=' + get_movie_id(row.select('.release_movie_name .gabtn')[0]['href'])
movie['info_url'] = get_movie_id(row.select('.release_movie_name .gabtn')[0]['href'])
movies.append(movie)
return movies
|
"""
@description: 跟数据有关的函数库
"""
"""
import
"""
import numpy as np
import cv2
import torch
import os
def encode_gray_label(labels):
"""
将标签图的灰度值转换成类别id
注意:ignoreInEval为True的都当分类0处理
@param labels: 标签灰度图
"""
encoded_labels = np.zeros_like(labels)
# 除了下面特意转换的,其余都属于类别0
# 1
encoded_labels[labels == 200] = 1
encoded_labels[labels == 204] = 1
encoded_labels[labels == 209] = 1
# 2
encoded_labels[labels == 201] = 2
encoded_labels[labels == 203] = 2
# 3
encoded_labels[labels == 217] = 3
# 4
encoded_labels[labels == 210] = 4
# 5
encoded_labels[labels == 214] = 5
# 6
encoded_labels[labels == 220] = 6
encoded_labels[labels == 221] = 6
encoded_labels[labels == 222] = 6
encoded_labels[labels == 224] = 6
encoded_labels[labels == 225] = 6
encoded_labels[labels == 226] = 6
# 7
encoded_labels[labels == 205] = 7
encoded_labels[labels == 227] = 7
encoded_labels[labels == 250] = 7
return encoded_labels
def decode_gray_label(labels):
"""
将类别id恢复成灰度值
@params labels: shape=(h, w)
"""
decoded_labels = np.zeros_like(labels, dtype=np.int8)
# 1
decoded_labels[labels == 1] = 204
# 2
decoded_labels[labels == 2] = 203
# 3
decoded_labels[labels == 3] = 217
# 4
decoded_labels[labels == 4] = 210
# 5
decoded_labels[labels == 5] = 214
# 6
decoded_labels[labels == 6] = 224
# 7
decoded_labels[labels == 7] = 227
return decoded_labels
def decode_color_label(labels):
"""
将类别id恢复成RGB值
@params labels: shape=(h, w)
"""
decoded_labels = np.zeros((3, labels.shape[0], labels.shape[1]), dtype=np.int8)
# 1
decoded_labels[0][labels == 1] = 220
decoded_labels[1][labels == 1] = 20
decoded_labels[2][labels == 1] = 60
# 2
decoded_labels[0][labels == 2] = 119
decoded_labels[1][labels == 2] = 11
decoded_labels[2][labels == 2] = 32
# 3
decoded_labels[0][labels == 3] = 220
decoded_labels[1][labels == 3] = 220
decoded_labels[2][labels == 3] = 0
# 4
decoded_labels[0][labels == 4] = 128
decoded_labels[1][labels == 4] = 64
decoded_labels[2][labels == 4] = 128
# 5
decoded_labels[0][labels == 5] = 190
decoded_labels[1][labels == 5] = 153
decoded_labels[2][labels == 5] = 153
# 6
decoded_labels[0][labels == 6] = 180
decoded_labels[1][labels == 6] = 165
decoded_labels[2][labels == 6] = 180
# 7
decoded_labels[0][labels == 7] = 178
decoded_labels[1][labels == 7] = 132
decoded_labels[2][labels == 7] = 190
return decoded_labels
def crop_resize_data(image, labels, out_size, height_crop_offset):
"""
@param out_size: (w, h)
"""
roi_image = image[height_crop_offset:] # crop
roi_image = cv2.resize(roi_image, out_size, interpolation=cv2.INTER_LINEAR) # resize
if labels is not None:
roi_label = labels[height_crop_offset:]
roi_label = cv2.resize(roi_label, out_size, interpolation=cv2.INTER_NEAREST) # label必须用最近邻来,因为每个像素值是一个分类id
else:
roi_label = None
return roi_image, roi_label
def train_data_generator(image_list, label_list, batch_size, out_size, height_crop_offset):
"""
训练数据生成器
:@param image_list: 图片文件的绝对地址
:@param label_list: 标签文件的绝对地址
:@param batch_size: 每批取多少张图片
:@param image_size: 输出的图片尺寸
:@param crop_offset: 在高度的方向上,将原始图片截掉多少
"""
indices = np.arange(0, len(image_list)) # 索引
out_images = []
out_labels = []
out_images_filename = []
while True: # 可以无限生成
np.random.shuffle(indices)
for i in indices:
try:
image = cv2.imread(image_list[i])
labels = cv2.imread(label_list[i], cv2.IMREAD_GRAYSCALE)
except:
continue
# crop & resize
image, labels = crop_resize_data(image, labels, out_size, height_crop_offset)
# encode
labels = encode_gray_label(labels)
out_images.append(image)
out_labels.append(labels)
out_images_filename.append(image_list[i])
if len(out_images) == batch_size:
out_images = np.array(out_images, dtype=np.float32)
out_labels = np.array(out_labels, dtype=np.int64)
# 转换成RGB
out_images = out_images[:, :, :, ::-1]
# 维度改成 (n, c, h, w)
out_images = out_images.transpose(0, 3, 1, 2)
# 归一化 -1 ~ 1
out_images = out_images*2/255 - 1
yield torch.from_numpy(out_images), torch.from_numpy(out_labels).long(), out_images_filename
out_images = []
out_labels = []
out_images_filename = []
def test_data_generator(images_root, batch_size, out_size, height_crop_offset):
"""
测试数据生成器
:@param image_root: 测试图片文件的所在目录
:@param batch_size: 每批最多取多少张图片
:@param image_size: 输出的图片尺寸
:@param crop_offset: 在高度的方向上,将原始图片截掉多少
"""
# 遍历测试图片目录
out_images = []
out_images_filename = []
for file in os.listdir(images_root):
if not file.endswith('.jpg'):
continue
try:
image = cv2.imread(os.path.join(images_root, file))
except:
continue
# crop & resize
image, _ = crop_resize_data(image, None, out_size, height_crop_offset)
out_images.append(image)
out_images_filename.append(file)
if len(out_images) == batch_size:
out_images = np.array(out_images, dtype=np.float32)
# 转换成RGB
out_images = out_images[:, :, :, ::-1]
# 维度改成 (n, c, h, w)
out_images = out_images.transpose(0, 3, 1, 2)
# 归一化 -1 ~ 1
out_images = out_images*2/255 - 1
yield torch.from_numpy(out_images), out_images_filename
out_images = []
out_images_filename = []
def decodePredicts(predicts, out_size, height_pad_offset, mode='color'):
"""
将推断的结果恢复成图片
@param predicts: shape=(n, c, h, w)
@param out_size: 恢复的尺寸 (w, h)
@param height_pad_offset: 在高度维度上填充回多少
@param mode: color | gray
"""
# softmax
predicts = np.argmax(predicts, axis=1)
# reshape to (n, -1)
n, h, w = predicts.shape
predicts = predicts.reshape((n, -1))
if mode == 'color':
predicts = decode_color_label(predicts)
predicts = predicts.reshape((3, n, h, w))
predicts = predicts.transpose((1, 2, 3, 0)) # to (n, h, w, c)
c = 3
elif mode == 'gray':
predicts = decode_gray_label(predicts)
predicts.reshape((n, 1, h, w))
predicts = predicts.transpose((0, 2, 3, 1)) # to (n, h, w, c)
c = 1
else:
raise ValueError('mode supports: color / gary')
# resize & pad (必须用最近邻)
dsize = (out_size[0], out_size[1]-height_pad_offset)
outs = []
for i in range(n):
out = np.zeros((out_size[1], out_size[0], c), dtype=np.uint8)
out[height_pad_offset:] = cv2.resize(predicts[i], dsize, interpolation=cv2.INTER_NEAREST) # label
outs.append(out)
return outs
|
"""
Provides a touchstone to the project root for resolution
of project-relative paths.
"""
import os
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
|
# Generated by Django 2.0.9 on 2018-12-18 20:02
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('empresas', '0003_acao'),
]
operations = [
migrations.AlterField(
model_name='acao',
name='data',
field=models.DateTimeField(default=datetime.datetime(2018, 12, 18, 20, 2, 41, 383835)),
),
]
|
# !/usr/bin/env python3
# -*- coding: utf-8 -*-
import setuptools
__author__ = 'ingbyr'
setuptools.setup(
name='BUPTNetLogin',
version='0.0.9',
author='ingbyr',
author_email='dev@ingbyr.com',
url='http://www.ingbyr.com',
description='Command line tool to login the BUPT net',
packages=['BUPTLogin'],
install_requires=[
'beautifulsoup4',
'lxml'
],
include_package_data=True,
entry_points={
'console_scripts': [
'bnl = BUPTLogin.login:do_login',
'bnlo = BUPTLogin.logout:logout'
]
},
)
|
import speech_recognition as sr
import pyttsx3
engine = pyttsx3.init()
rate = engine.getProperty('rate')
print (rate)
r = sr.Recognizer()
with sr.Microphone() as source: # use the default microphone as the audio source
audio = r.listen(source) # listen for the first phrase and extract it into audio data
try:
print("You said " + r.recognize_google(audio)) # recognize speech using Google Speech Recognition - ONLINE
print("You said " + r.recognize_sphinx(audio)) # recognize speech using CMUsphinx Speech Recognition - OFFLINE
except LookupError: # speech is unintelligible
print("Could not understand audio")
engine.setProperty('rate', 150)
if r.recognize_sphinx(audio) == "brown bear what do you see":
engine.say("I see a yellow bird looking at me")
engine.runAndWait()
# pip install pocketsphinx pyaudio pyttsx3
# https://stackoverflow.com/questions/12239080/getting-started-with-speech-recognition-and-python
|
import torch.nn as nn
import torch.nn.functional as F
import torch
from torch.autograd import Variable
class LSTMClassifier(nn.Module):
# class torch.nn.Module
# 官方文档
# 所有网络的基类
# 你的模型也应该继承这个类。
# Model description
# model = LSTMC.LSTMClassifier(embedding_dim=embedding_dim,hidden_dim=hidden_dim,
# vocab_size=len(corpus.dictionary),label_size=nlabel, batch_size=batch_size, use_gpu=use_gpu)
def __init__(self, embedding_dim, hidden_dim, vocab_size, label_size, batch_size, use_gpu):
super(LSTMClassifier, self).__init__() # _init__()确保父类被正确的初始化了:
self.hidden_dim = hidden_dim
self.batch_size = batch_size
self.use_gpu = use_gpu
self.word_embeddings = nn.Embedding(vocab_size, embedding_dim)
# create model, regulate size
# Input should be module containing 23590 tensors of size 100
# 模块的输入是一个下标的列表,输出是对应的词嵌入。
# 官方文档
# class torch.nn.Embedding(num_embeddings, embedding_dim, padding_idx=None, max_norm=None, norm_type=2, scale_grad_by_freq=False, sparse=False)
# 参数:
# num_embeddings(int) - 嵌入字典的大小
# embedding_dim(int) - 每个嵌入向量的大小
# padding_idx(int, optional) - 如果提供的话,输出遇到此下标时用零填充
# max_norm(float, optional) - 如果提供的话,会重新归一化词嵌入,使它们的范数小于提供的值
# norm_type(float, optional) - 对于max_norm选项计算p范数时的p
# scale_grad_by_freq(boolean, optional) - 如果提供的话,会根据字典中单词频率缩放梯度
# 变量:
# weight(Tensor) - 形状为(num_embeddings, embedding_dim)的模块中可学习的权值
# 形状:
# 输入: LongTensor(N, W), N = mini - batch, W = 每个mini - batch中提取的下标数
# 输出: (N, W, embedding_dim)
self.lstm = nn.LSTM(embedding_dim, hidden_dim)
# Applies a multi - layer long short - term memory(LSTM) RNN to an input sequence.
# input layer size is 100, hidden layer size is 50
# 层数为单层可能是这里的 num_layer 没有设定 ?????????????
# 官方文档 https://pytorch.org/docs/stable/nn.html
# 参数说明:
# input_size – 输入的特征维度
# hidden_size – 隐状态的特征维度
# num_layers – 层数(和时序展开要区分开)
# bias – 如果为False,那么LSTM将不会使用bias weights b_ih and b_hh 默认为True。
# batch_first – 如果为True,那么输入和输出Tensor的形状为(batch, seq, feature)
# dropout – 如果非零的话,将会在RNN的输出上加个dropout,最后一层除外。
# bidirectional – 如果为True,将会变成一个双向RNN,默认为False。
self.hidden2label = nn.Linear(hidden_dim, label_size)
# x1 = nn.Linear(hidden_dim, label_size).weight.shape torch.Size([8, 50])
# x2 = nn.Linear(hidden_dim, label_size).bias.shape torch.Size([8])
# 输入应该是一个 8 * 50 的torch
# 有点难理解这里的数据结构 ???????????????????
# Create linear layer
# Applies a linear transformation to the incoming data: y = xA ^ T + b, x is a matrix
# in_features - 每个输入样本的大小
# out_features - 每个输出样本的大小
# bias - 若设置为False,这层不会学习偏置。默认值:True
# 形状:
# 输入:
# vector(N, in_features)
# vector(N, in_features)
# 输出:
# (N, out_features)
# (N, out_features)
# 变量:
# weight - 形状为(out_features x in_features)的模块中可学习的权值
# bias - 形状为(out_features) 的模块中可学习的偏置
self.hidden = self.init_hidden() # 返回保存着batch中每个元素的初始化隐状态的Tensor
# 返回batch中每个元素的初始化细胞状态的Tensor
def init_hidden(self):
if self.use_gpu:
h0 = Variable(torch.zeros(1, self.batch_size, self.hidden_dim).cuda())
c0 = Variable(torch.zeros(1, self.batch_size, self.hidden_dim).cuda())
else:
# 在Torch中的Variable就是一个存放会变化的值的地理位置.里面的值会不停的变化.值是Tensor如果用一个 Variable进行计算, 那返回的也是一个同类型的
# Variable
# Create two size 5 * 50 tensors, filling with 0 这里的5应该是LSTM对应的5个状态还是? ???????
# 一开始那个1意味着小数点后一位
# print(torch.zeros(1, 3, 5))
# tensor([[[0., 0., 0., 0., 0.],
# [0., 0., 0., 0., 0.],
# [0., 0., 0., 0., 0.]]])
h0 = Variable(torch.zeros(1, self.batch_size, self.hidden_dim))
c0 = Variable(torch.zeros(1, self.batch_size, self.hidden_dim))
# LSTM输入: input, (h_0, c_0)
# input(seq_len, batch, input_size): 包含输入序列特征的Tensor。也可以是packed variable ,详见[pack_padded_sequence](
# torch.nn.utils.rnn.pack_padded_sequence(input, lengths, batch_first=False[source])
# h_0(num_layers * num_directions, batch, hidden_size): 保存着batch中每个元素的初始化隐状态的Tensor
# c_0(num_layers * num_directions, batch, hidden_size): 保存着batch中每个元素的初始化细胞状态的Tensor
return (h0, c0)
def forward(self, sentence):
# 之前创建的Embedding层是用VOCAB SIZE,然后再FORWARD里面用的是SENTENCE Size, 有点搞不懂 ?????????
# self.word_embeddings = nn.Embedding(vocab_size, embedding_dim)
# create model, regulate size
# Sentence should be module containing 23590 tensors of size 100
#输出: (N, W, embedding_dim
embeds = self.word_embeddings(sentence)
# Input : Sentence tensor size 32 *5 print (sentence.size()) torch.Size([32, 5])
# Output: Embeds tensor 32* 5* 10 print (embeds.size()) torch.Size([32, 5, 100])t
# x = embeds
x = embeds.view(len(sentence), self.batch_size, -1)
#直接使用也是一样的
# 好像本来就是32,5,100的tensor,转化结果前后应该是一样的 ????????????
# Input : Embeds.torch.Size([32, 5, 100])
# Output: x.torch.Size([32,5,100]) print (x.shape) torch.Size([32, 5, 100])
# -1 means no sure about the size of one row
# View() method can regroup the tensor into different size , but does not change content.
# e.g. a = torch.arange(1, 17) # a's shape is (16,)
# a.view(4, 4) # output below
# 1 2 3 4
# 5 6 7 8
# 9 10 11 12
# 13 14 15 16
# [torch.FloatTensor of size 4x4]
lstm_out, self.hidden = self.lstm(x, self.hidden)
# 右边相当于把X作为输入,h0和c0作为初始状态变量 == input, (h_0, c_0) h0和 c0 是两个5* 50 tensors,初始值都为0.0,然后不断迭代
# 怎么看出循环过程 ???????????
# 左边是LSTM输出 output, (h_n, c_n) 对于LSTM,size()为 torch.Size([32, 5, 50]),最后一步为torch.Size([32, 4, 50])
# output(seq_len, batch, hidden_size * num_directions):
# print (lstm_out.size())
# 保存RNN最后一层的输出的Tensor。
# 如果输入是torch.nn.utils.rnn.PackedSequence,那么输出也是torch.nn.utils.rnn.PackedSequence。
# h_n(num_layers * num_directions, batch, hidden_size): Tensor,保存着RNN最后一个时间步的隐状态。
# c_n(num_layers * num_directions, batch, hidden_size): Tensor,保存着RNN最后一个时间步的细胞状态
y = self.hidden2label(lstm_out[-1])
# print (lstm_out[-1].shape)
# Input: The last output of LSTM_Out (-1 means last output) print ( lstm_out[-1].shape)---torch.Size([5, 50])
# Output: print( y.shape) 一开始都是torch.Size([5, 8]),最后一个是 torch.Size([4, 8]) ????????????????????????
# 在 t 时刻,LSTM 的输入有三个:当前时刻网络的输入值 x_t、上一时刻 LSTM 的输出值 h_t-1、以及上一时刻的单元状态 c_t-1;
# LSTM 的输出有两个:当前时刻 LSTM 输出值 h_t、和当前时刻的单元状态 c_t. 应该是对应着5个状态和8个label的得分,然后最后一次结束有一个状态没有了,所以是4,8 ??????
# X的值最后是torch.Size([32, 4, 100])
# print (x.shape)
# print("This is X")
# print(len(sentence))
# print("This is len(sentence)")
# print(lstm_out[-1].shape)
# print("This is Lstm_out[-1]")
# print(y.shape)
# print("A")
# print(y.size())
# print ("This is Y")
return y # 最后返回的是一个4.8的tensor torch.Size([5, 8]) torch.Size([4, 8])
# 编写前向过程
# '''def forward(self, inputs):
# embeds = self.embeddings(inputs).view((1, -1)) # Input is voc vecture and project to Embed layer
# out = F.relu(self.linear1(embeds)) # Calculate Hidden layer output, Relu activation function
# out = self.linear2(out) # Self.linear2 is output layer
# log_probs = F.log_softmax(out) # Calculate forward process
# ''''''return log_probs
#
# # 第二个前向
# 预处理文本转成稠密向量
# embeds=self.embedding((inputs))
# #根据文本的稠密向量训练网络
# out,self.hidden=self.lstm(embeds.view(len(inputs),1,-1),self.hidden)
# #做出预测
# tag_space=self.out2tag(out.view(len(inputs),-1))
# tags=F.log_softmax(tag_space,dim=1)
# return tags
|
"""Configuration module"""
# noqa
# System Imports
# Framework / Library Imports
# Application Imports
# Local Imports
import env_creds as creds
APP_VERSION = '0.0.1'
APP_DATE = '2020-11-02 1900'
APP_NODE = creds.APP_NODE
API_PREFIX = creds.API_PREFIX
DEBUG = creds.DEBUG
# RabbitMQ Queue Configuration
RABBITMQ = {
"server": creds.RABBIT_SERVER,
"port": creds.RABBIT_PORT,
"user": creds.RABBIT_USER,
"password": creds.RABBIT_PASS
}
|
def is_unique_string(string):
try:
iterator = iter(string)
except TypeError:
return False
chars = {}
for char in iterator:
if chars.get(char) != None:
return False
chars[char] = True
return True
assert is_unique_string(())
assert is_unique_string({})
assert is_unique_string('')
assert is_unique_string('a')
assert is_unique_string('ab')
assert is_unique_string('abc')
assert is_unique_string('123')
assert is_unique_string('123!')
assert not is_unique_string('!abcc')
assert not is_unique_string(None)
assert not is_unique_string(123)
|
#!/usr/bin/env python3
import os
import requests
from lxml import etree
from ..lib import utils
'''
desc: save the data into a file
'''
def save_linux_data(file_name, data):
with open(file_name, "w", encoding='utf-8') as f:
f.write(data)
def replace_bad_character(ori_str):
new_str = ori_str.replace("Â", "")
new_str = new_str.replace("¶", "")
new_str = new_str.replace("â", "'")
new_str = new_str.replace("â", "'") # ‘
new_str = new_str.replace("â", "'") # ’
new_str = new_str.replace("â", "'") # “
new_str = new_str.replace("â", "'") # ”
new_str = new_str.replace("â", "...") # …
new_str = new_str.replace(" ", " ") #
return new_str
'''
desc: get infomation of linux
get per-function infomation in Linux Core API page
- basic description, Parameters, Description, Note
concern: may have some absent situations
using lxml to filter xpath
'''
def get_linux_info(content):
info = ""
sections = content.xpath("div[@class='section']")
if len(sections) > 0:
for section in sections:
info += get_linux_info(section)
elif len(content.xpath("section")) > 0:
for section in content.xpath("section"):
info += get_linux_info(section)
else:
for element in content:
if "h" == element.tag[0]:
continue
if 'class' in element.attrib:
if element.attrib['class'] in ['function', 'c function']:
info += "\n" + "=" * 20 + "\n"
elif element.attrib['class'] == 'c macro':
continue
if 'class' in element.attrib and element.attrib['class'] == 'simple':
info += replace_bad_character(element.text) if element.text != None else ""
for e in element:
info += replace_bad_character(e.xpath("string(.)")) + "\n"
else:
info += replace_bad_character(element.xpath("string(.)")) + "\n"
if element.tag == "p":
info += "\n"
return info
'''
doc_dir - the storage directory of data
Now the default version is set to v5.15.
See the versions at https://www.kernel.org/doc/html/.
Note: This is an empirical implementation for the version <= v6.1, which
may not support the future version because the websites are updating.
'''
def handle_linux(doc_dir):
print("===============================================")
print("==== Handling linux info =====")
print("==== From linux Core API page =====")
dir = os.path.join(doc_dir, "linux")
utils.mkdir(dir)
source_linux = requests.get("https://www.kernel.org/doc/html/v5.15/core-api/kernel-api.html")
parser = etree.HTMLParser(encoding='utf-8')
html_elements = etree.HTML(source_linux.text, parser=parser).xpath("//*[@id='the-linux-kernel-api']")
saved_data = ""
for content in html_elements:
saved_data += get_linux_info(content)
source_linux = requests.get("https://www.kernel.org/doc/html/v5.15/core-api/mm-api.html")
parser = etree.HTMLParser(encoding='utf-8')
html_elements = etree.HTML(source_linux.text, parser=parser).xpath("//*[@id='memory-management-apis']")
for content in html_elements:
saved_data += get_linux_info(content)
save_linux_data(os.path.join(dir, "linux_api.txt"), saved_data)
print("===============================================")
|
# Generated by Django 2.2 on 2022-06-10 02:55
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('basketball', '0065_auto_20180722_2036'),
]
operations = [
migrations.AlterModelOptions(
name='award',
options={'ordering': ('-date',)},
),
migrations.AddField(
model_name='dailystatline',
name='fta',
field=models.PositiveIntegerField(default=0),
),
migrations.AddField(
model_name='dailystatline',
name='ftm',
field=models.PositiveIntegerField(default=0),
),
migrations.AddField(
model_name='recordstatline',
name='fta',
field=models.PositiveIntegerField(default=0),
),
migrations.AddField(
model_name='recordstatline',
name='ftm',
field=models.PositiveIntegerField(default=0),
),
migrations.AddField(
model_name='seasonstatline',
name='fta',
field=models.PositiveIntegerField(default=0),
),
migrations.AddField(
model_name='seasonstatline',
name='ftm',
field=models.PositiveIntegerField(default=0),
),
migrations.AddField(
model_name='statline',
name='fta',
field=models.PositiveIntegerField(default=0),
),
migrations.AddField(
model_name='statline',
name='ftm',
field=models.PositiveIntegerField(default=0),
),
migrations.AlterField(
model_name='dailystatline',
name='points_to_win',
field=models.CharField(choices=[('11', '11'), ('30', '30'), ('other', 'Other')], default='other', max_length=30),
),
migrations.AlterField(
model_name='game',
name='points_to_win',
field=models.CharField(choices=[('11', '11'), ('30', '30'), ('other', 'Other')], default='other', max_length=30),
),
migrations.AlterField(
model_name='playbyplay',
name='primary_play',
field=models.CharField(choices=[('fgm', 'FGM'), ('fga', 'FGA'), ('threepm', '3PM'), ('threepa', '3PA'), ('ftm', 'FTM'), ('fta', 'FTA'), ('blk', 'BLK'), ('to', 'TO'), ('pf', 'FOUL'), ('sub_out', 'OUT'), ('misc', 'Misc')], max_length=30),
),
migrations.AlterField(
model_name='player',
name='is_active',
field=models.BooleanField(blank=True, default=True, help_text='Determine if a player should be selectable when creating games'),
),
migrations.AlterField(
model_name='recordstatline',
name='points_to_win',
field=models.CharField(choices=[('11', '11'), ('30', '30'), ('other', 'Other')], default='other', max_length=30),
),
migrations.AlterField(
model_name='seasonper100statline',
name='points_to_win',
field=models.CharField(choices=[('11', '11'), ('30', '30'), ('other', 'Other')], default='other', max_length=30),
),
migrations.AlterField(
model_name='seasonstatline',
name='points_to_win',
field=models.CharField(choices=[('11', '11'), ('30', '30'), ('other', 'Other')], default='other', max_length=30),
),
]
|
#coding:gb2312
#给文件添加内容
filename = 'write.txt'
with open(filename,'a') as f:
f.write("\nPython 是一门解释性语言。")
|
"""读取源数据,定义数据加载函数
"""
import numpy as np
def data_gen(data, lookback, delay, min_index, max_index,
shuffle=False, batch_size=128, step=6):
"""生成历史和预测数据集
算法执行过程:首先在合理区间里找出一系列时间基准值保存在 rows 中,
根据 shuffle 的取值以及上次取值结束位置(保存在 i 中),
这个合理区间可能是:(min_index + lookback, max_index),
或者 (i, i + batch_size), (i, max_index)。
对 rows 的每个元素 row,以 row - lookback 为左边界、
row 为右边界、step 为间隔,生成一系列历史数据采样点,
将这些时间点上所有 14 个测量值被放入 samples 中,
将 row + delay 时间点上的温度值作为标签(预测值,与算法给出的预测结果比较)
放入 target 里。
本例中,一个批量取 128 个样本,一个样本有一个基准时间,
向前推10天,在这个区间内等间隔取240个点(每1小时采样一次),
每个点上取源数据中所有14个物理测量值,形成一个 240 x 14 的矩阵作为预测依据,
再取基准时间后1天的温度作为预测目标。
Parameters:
data (numpy.ndarray): 本例中为 420551 行,14 列
lookback (int): 历史数据长度,本例中为 1440,即 10 天(原始数据间隔为10分钟);
delay: 从基准时间(lookback 的结束时间点)向后推 delay 时间,预测目标是这个时间点上的温度值
min_index (int): 历史区间的左边界
max_index (int): 历史区间的右边界
batch_size (int): 样本个数
step: 采样点时间间隔,本例中 6 表示每 6 个点采样一次,即采用间隔为 1 小时
Returns:
tuple: 包含(历史 预测)二元组,
第一部分是形状为 (batch_size, lookback/step, feature_number),
本例中为 (128, 240, 14) 的 numpy.ndarray,
第二部分(预测)是一个长度为 batch_size 的一元 ndarray,
本例中形状为 (128,)。
"""
if max_index is None:
max_index = len(data) - delay - 1 # 防止 target 取值时数组下标右越界
i = min_index + lookback # 防止 samples 向回取历史值(保存在 indices 里)时左越界
while 1:
if shuffle:
rows = np.random.randint(min_index + lookback,
max_index, size=batch_size)
# rows 的每个元素以自己为右边界,生成一个时间序列作为历史和一个预测值,
# 彼此之间没有顺序,所以可以乱序。
else:
if i + batch_size >= max_index:
i = min_index + lookback
rows = np.arange(i, min(i + batch_size, max_index))
i += len(rows)
samples = np.zeros((len(rows), lookback // step, data.shape[-1])) # data 的最后一个维度,即除时间戳外的特征数,14
targets = np.zeros((len(rows),))
for j, row in enumerate(rows):
indices = range(rows[j] - lookback, rows[j], step)
samples[j] = data[indices]
targets[j] = data[rows[j] + delay][1]
yield samples, targets
fname = './jena_climate_2009_2016.csv'
with open(fname, 'r') as f:
rawlines = f.readlines()
striped = map(lambda x: x.strip(), rawlines)
header = next(striped).split(',')
lines = list(striped)
print('Header:', header)
print('Number of data lines:', len(lines))
float_data = np.zeros((len(lines), len(header) - 1)) # 在 header 包含的列数上 -1 是因为不包含时间列
for i, line in enumerate(lines):
values = [float(x) for x in line.split(',')[1:]]
float_data[i] = values
mean = float_data[:200000].mean(axis=0)
float_data -= mean
std = float_data[:200000].std(axis=0)
float_data /= std
|
from tkinter import *
root = Tk()
with open("file.txt", "r") as f:
Label(root, text=f.read()).pack()
root.mainloop()
|
from JapaneseTokenizer.mecab_wrapper import MecabWrapper
from JapaneseTokenizer.juman_wrapper import JumanWrapper
from JapaneseTokenizer.kytea_wrapper import KyteaWrapper
from JapaneseTokenizer.datamodels import TokenizedSenetence
from JapaneseTokenizer.datamodels import FilteredObject
|
from script.base_api.service_finance.app import *
from script.base_api.service_finance.web import *
from script.base_api.service_finance.finance import *
from script.base_api.service_finance.handCash import *
from script.base_api.service_finance.v2 import *
from script.base_api.service_finance.applet import *
|
import random
def guess(x):
random_number=random.randint(1,10)
guess=0
while guess!=random_number:
guess=input(f'guess a number between 1 and {x}')
if int(guess)<random_number:
print("Sorry,guess again, too low")
elif int(guess)>random_number:
print("Sorry, guess again, too high")
elif int(guess)==random_number:
print(f'Congrats, right number {random_number}')
guess(10)
|
import numpy as np
import pytest
import osmo_camera.tiff.save as module
class TestGuardImageFitsIn32Bits:
def setup_method(self):
self.test_image = np.zeros(shape=(2, 2, 3))
@pytest.mark.parametrize(
"name, in_range_value",
[
("zero", 0),
("value within range", 1),
("value near min", -64),
("value near max", 63.9999),
],
)
def test_does_not_raise_if_in_range(self, name, in_range_value):
self.test_image[0][0][0] = in_range_value
module._guard_rgb_image_fits_in_padded_range(self.test_image)
@pytest.mark.parametrize(
"name, out_of_range_value",
[
("value just below min", -64.1),
("value well below min", -10000),
("value just above max", 64),
("value well above max", 10000),
],
)
def test_raises_if_out_of_range(self, name, out_of_range_value):
self.test_image[0][0][0] = out_of_range_value
with pytest.raises(module.DataTruncationError):
module._guard_rgb_image_fits_in_padded_range(self.test_image)
|
from hamcrest import assert_that, equal_to
from ..utils.geometry import RectSize
from ..utils.image import ScreenshotFromPngBytes
class Window(object):
def __init__(self, browser):
self._browser = browser
@property
@RectSize.wrapped
def outer_size(self):
size = self._browser.get_window_size()
return (size['width'], size['height'])
@property
@RectSize.wrapped
def inner_size(self):
return self._browser.execute_script(
'return [window.innerWidth, window.innerHeight];')
@property
def virtual_pixel_ratio(self):
return self._browser.execute_script('return window.devicePixelRatio;')
@property
def physical_pixel_ratio(self):
return self.virtual_pixel_ratio
@property
def address_bar_height(self):
if hasattr(self._browser, 'address_bar_height'):
return self._browser.address_bar_height
else:
return 0
@property
def bar_shadow_height(self):
if hasattr(self._browser, 'bar_shadow_height'):
return self._browser.bar_shadow_height
else:
return 0
def get_screenshot(self):
png_bytes = self._browser.get_screenshot_as_png()
return ScreenshotFromPngBytes(png_bytes)
class ResizableWindow(Window):
@Window.outer_size.setter
def outer_size(self, value):
self._browser.set_window_size(*value)
assert_that(self.outer_size, equal_to(value), 'Outer Window Size')
@Window.inner_size.setter
def inner_size(self, value):
size_adjustment = value - self.inner_size
try:
self.outer_size += size_adjustment
except AssertionError:
pass
assert_that(self.inner_size, equal_to(value), 'Inner Window Size')
def maximize(self):
self._browser.maximize_window()
|
#Sean Kim
#Unit 1 What's Your Order?
print ("Welcome to the Respass Deli!")
print ("What type of sandwich would you like?")
sandwich = input ()
print ("What size fries would you like (small/medium/large)?")
fries = input ()
print ("What type of soda would you like?")
soda = input ()
print ("You have ordered: \n" + "\t" + sandwich.title() +" Sandwich\n" + "\t" +
fries.title() + " Fries \n" + "\t" + soda.title() +" to Drink\n"
"Enjoy")
|
# Generated by Django 2.1 on 2018-10-02 13:15
from django.db import migrations, models
import tinymce.models
class Migration(migrations.Migration):
dependencies = [
('mainapp', '0028_price'),
]
operations = [
migrations.CreateModel(
name='News',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=256, verbose_name='Наименовение')),
('desc', tinymce.models.HTMLField()),
('date', models.DateTimeField(auto_now_add=True)),
],
),
migrations.AlterField(
model_name='price',
name='file',
field=models.FileField(blank=True, null=True, upload_to='price', verbose_name='Файл'),
),
]
|
from __future__ import print_function
import sys
from operator import add
from pyspark import SparkContext
from csv import reader
import re
def check_y_coord_cd(input):
if len(input) == 0:
return 'NULL\tNULL\tNULL'
try:
x = int(input)
return 'INT\tY-COORDINATE\tVALID' if x >= 110618 and x<= 278254 else 'INT\tY-COORDINATE\tINVALID/OUTLIER'
except ValueError as err:
return 'INT\tY-COORDINATE\tINVALID/OUTLIER'
if __name__ == "__main__":
sc = SparkContext()
lines = sc.textFile(sys.argv[1], 1)
lines = lines.mapPartitions(lambda x: reader(x)).filter(lambda x: x[0] != 'CMPLNT_NUM')
results = lines.map(lambda x: check_y_coord_cd(x[20])) \
results.saveAsTextFile('check_y_coord_cd.out')
sc.stop()
|
from .base import FunctionalTest
class LayoutAndStylingTest(FunctionalTest):
def test_layout_and_styling(self):
# For a simple layout and styling test check user area is near to top right corner
# Visitor goes to the home page
self.browser.get(self.server_url)
# He notice user area is at top right
user_area = self.browser.find_element_by_id('navbar-tray')
self.assertAlmostEqual(user_area.location['y'], 0, delta=20)
|
brian = "Hello life"
# Assign your variables below, each on its own line!
caesar = "Graham"
praline = "John"
viking = "Teresa"
# Put your variables above this line
print caesar
print praline
print viking
"""
The string "PYTHON" has six characters,
numbered 0 to 5, as shown below:
+---+---+---+---+---+---+
| P | Y | T | H | O | N |
+---+---+---+---+---+---+
0 1 2 3 4 5
So if you wanted "Y", you could just type
"PYTHON"[1] (always start counting from 0!)
"""
fifth_letter = "MONTY"[4]
print fifth_letter
parrot = "Norwegian Blue"
print len(parrot)
print parrot.lower()
pi = 3.14
print str(pi)
print "Spam "+"and "+"eggs"
print "The value of pi is around " + str(3.14)
string_1 = "Camelot"
string_2 = "place"
print "Let's not go to %s. 'Tis a silly %s." % (string_1, string_2)
name = raw_input("What is your name?")
quest = raw_input("What is your quest?")
color = raw_input("What is your favorite color?")
print "Ah, so your name is %s, your quest is %s, " \
"and your favorite color is %s." % (name, quest, color)
|
# -*- coding: utf-8 -*-
import re, os, time, sys
import urllib, urllib2, urlparse
import xbmcplugin, xbmcgui, xbmcaddon
from resources.lib import kokolib, hellotools
addon = xbmcaddon.Addon()
addonname = addon.getAddonInfo('name')
addon_handle = int(sys.argv[1])
sysaddon = sys.argv[0]
xbmcplugin.setContent(addon_handle, 'movies')
COOKIEPATH = unicode(addon.getAddonInfo('path') + os.path.sep + "cookies",'utf-8')
cookieFile = COOKIEPATH + os.path.sep + 'koko.cookie'
baseUrl = 'http://kokosik1207.pl'
mainMenu = [
["[COLOR=blue]Ostatnio dodane[/COLOR]",'/newposts/',"FilmList"],
["Seriale",'/seriale/',"FilmList"],
["Kolekcje filmowe",'/kolekcje-filmowe',"FilmList"],
["Filmy na prosbe",'/filmy-na-prosbe',"FilmList"],
["Gatunek",'g',"SubCategory"],
["Wersja",'w',"SubCategory"],
["Rok",'r',"SubCategory"],
["[COLOR=green]Szukaj[/COLOR]","0","Search"]
]
gatMenu = [
["Akcja",'/akcja',"FilmList"],
["Animowane",'/animowane',"FilmList"],
["Biograficzne",'/biograficzne',"FilmList"],
["Dokumentalne",'/dokument',"FilmList"],
["Dramat","/dramat","FilmList"],
["Familijny","/familijny","FilmList"],
["Fantasy","/fantasy","FilmList"],
["Historyczny","/historyczny","FilmList"],
["Horror","/horror","FilmList"],
["Katastroficzny","/katastroficzny","FilmList"],
["Komedia","/komedia","FilmList"],
["Kryminal","/kryminal","FilmList"],
["Przygodowy","/przygodowy","FilmList"],
["Psychologiczny","/psychologiczny","FilmList"],
["Romans","/romans","FilmList"],
["Sci-Fi","/sci-fi","FilmList"],
["Sensacyjny","/sensacyjny","FilmList"],
["Thriller","/thriller","FilmList"],
["Western","/western","FilmList"]
]
werMenu = [
["Film polski",'/xfsearch/polski',"FilmList"],
["Napisy PL",'/xfsearch/napisy+pl',"FilmList"],
["Lektor PL",'/xfsearch/lektor+pl',"FilmList"],
["Lektor amatorski",'/xfsearch/lektor+amatorski',"FilmList"],
["Dubbing PL",'/xfsearch/dubbing',"FilmList"],
["Inne",'/xfsearch/inne',"FilmList"]
]
rokMenu = [
["2017",'/xfsearch/2017',"FilmList"],
["2016",'/xfsearch/2016',"FilmList"],
["2015",'/xfsearch/2015',"FilmList"],
["2014",'/xfsearch/2014',"FilmList"],
["2013","/xfsearch/2013","FilmList"],
["2012","/xfsearch/2012","FilmList"],
["2011","/xfsearch/2011","FilmList"],
["2010","/xfsearch/2010","FilmList"]
]
def addMenu(name,url,mode,folder,contextmenu='',info='',icon='',isPlayable = False, img = ''):
liz=xbmcgui.ListItem(name, iconImage="DefaultFolder.png", thumbnailImage=icon)
liz.setInfo( type="video", infoLabels = info )
if img:
liz.setArt({ "poster": img }) # "fanart": "https://image.tmdb.org/t/p/w185/weUSwMdQIa3NaXVzwUoIIcAi85d.jpg"
if isPlayable:
liz.setProperty('IsPlayable', 'True')
if contextmenu:
liz.addContextMenuItems(contextmenu)
u=sysaddon+"?url="+urllib.quote_plus(url)+"&mode="+str(mode)+"&name="+urllib.quote_plus(name)+"&icon="+urllib.quote_plus(icon)+"&content="
ok=xbmcplugin.addDirectoryItem(handle=addon_handle,url=u,listitem=liz,isFolder=folder)
return ok
def buildMenu(menuList):
for i in menuList:
addMenu(i[0],i[1],i[2],True)
def subCategory(url):
if(url == 'g'):
buildMenu(gatMenu)
if(url == 'w'):
buildMenu(werMenu)
if(url == 'r'):
buildMenu(rokMenu)
xbmcplugin.endOfDirectory(addon_handle)
def filmList(url):
html = hellotools.httpRequest({ 'url':url, 'use_cookie': True, 'cookiefile': cookieFile, 'save_cookie': False, 'load_cookie': True, 'return_data': True })
movieList = kokolib.getTitleList(html)
pageing = kokolib.getNextPage(html)
for mm in movieList:
info = { "genre":mm[2], "plot": "[COLOR=blue]" + mm[2] + "[/COLOR]. " + mm[3], "title": mm[1] }
addMenu(mm[1],mm[0],'MovieDetails',True, '', info, '', False, mm[4])
if pageing:
addMenu('[COLOR=blue]> Następna strona >[/COLOR] ' + pageing[1], pageing[0],'FilmList', True, '', {"plot":pageing[0]})
xbmcplugin.endOfDirectory(addon_handle)
# play2 dziala
def play2(stream_url):
listitem = xbmcgui.ListItem("play2")
xbmc.Player().play(stream_url, listitem)
# uses urlresolver - but urlresolver is shitty
#def findAndPlayStream(url):
# try:
# import urlresolver
# movieUrl = urlresolver.resolve(url)
# if isinstance(movieUrl, basestring):
# play2(movieUrl)
# else:
# xbmcgui.Dialog().ok(addonname, 'Brak filmu', 'Info: %s' % movieUrl)
# except urlresolver.resolver.ResolverError as e:
# xbmcgui.Dialog().ok(addonname, 'ResolverError: %s' % e)
# except:
# xbmcgui.Dialog().ok(addonname, 'KokoError: %s' % str(sys.exc_info()[0]), str(sys.exc_info()[1]), str(sys.exc_info()[2])) #traceback.format_exc()
def findAndPlayStream(url):
movieUrl = kokolib.findMovieUrl(url)
if movieUrl[0]:
play2(movieUrl[1])
else:
xbmcgui.Dialog().ok(addonname, movieUrl[1])
def checkHasCredentials():
if not addon.getSetting('kokosik.user') or not addon.getSetting('kokosik.pass'):
xbmcgui.Dialog().ok(addonname, 'Zaloguj sie', 'Aby ogladac filmy musisz byc zalogowany')
addon.openSettings()
return False
return True
def isLoggedin(html):
return html.find('index.php?action=logout') > 0
def login():
query_data = { 'url': 'http://kokosik1207.pl/', 'use_cookie': True, 'cookiefile': cookieFile, 'save_cookie': True, 'load_cookie': False, 'use_post': True, 'return_data': True }
loginData = { 'login_name': addon.getSetting('kokosik.user'), 'login_password': addon.getSetting('kokosik.pass'), 'login':'submit' }
html = hellotools.httpRequest(query_data, loginData)
loggedin = isLoggedin(html)
xbmcgui.Dialog().ok(addonname, 'Podane login lub haslo sa nieprawidlowe', addon.getSetting('kokosik.user') + ":" + addon.getSetting('kokosik.pass'))
return loggedin
def movieDetails(url):
query_data2 = { 'url': url, 'use_cookie': True, 'cookiefile': cookieFile, 'save_cookie': False, 'load_cookie': True, 'return_data': True }
html = hellotools.httpRequest(query_data2)
loggedin = isLoggedin(html)
if(not loggedin):
checkHasCredentials()
loggedin = login()
if(loggedin):
html = kokolib.httpRequest(query_data2)
lst = kokolib.listVideoProviders(html)
movie = kokolib.getTitleList(html)[0]
for z in lst:
info = { "plot":str(movie[1]) + "\n" + movie[3] + "\n\n" + z[3], "title":movie[1], "rating":"4" }
addMenu(z[0] + " " + z[1] + " "+ z[2],z[3],'FindAndPlay', True, '', info, '', False, movie[4])
xbmcplugin.endOfDirectory(addon_handle)
def inputSearchText(text=''):
textnew = None
kb = xbmc.Keyboard(text)
kb.doModal()
if (kb.isConfirmed()):
textnew = kb.getText()
return textnew
def search(key):
query_data = { 'url': 'http://kokosik1207.pl/', 'use_cookie': True, 'cookiefile': cookieFile, 'save_cookie': False, 'load_cookie': True, 'use_post': True, 'return_data': True }
postData = { 'do': 'search', 'subaction':'search', 'story':key, 'x':'0', 'y':'0' }
html = hellotools.httpRequest(query_data, postData)
data = kokolib.parseSearchHtml(html)
for mm in data:
info = { "genre":mm[1], "plot": "[COLOR=blue]" + mm[1] + "[/COLOR]. " + mm[3], "title": mm[0] }
addMenu(mm[0],mm[2],'MovieDetails',True, '', info)
#pageing = kokolib.getNextPage(html)
#if pageing:
# addMenu('[COLOR=blue]> Następna strona >[/COLOR] ' + pageing[1], pageing[0],'FilmList', True, '', {"plot":pageing[0]})
xbmcplugin.endOfDirectory(addon_handle)
## MAIN LOOP
params = dict(urlparse.parse_qsl(sys.argv[2].replace('?','')))
if not os.path.isdir(COOKIEPATH):
os.mkdir(COOKIEPATH)
url = params.get('url') or None
name = params.get('name') or None
mode = params.get('mode') or None
content = params.get('content') or ''
icon = params.get('icon') or None
premium = params.get('premium') or ''
param1 = params.get('param1') or None
param2 = params.get('param2') or None
param3 = params.get('param3') or None
######################################
if mode==None:
buildMenu(mainMenu)
xbmcplugin.endOfDirectory(addon_handle)
checkHasCredentials()
elif mode=='FilmList':
if 'http' not in url:
url = baseUrl + url
filmList(url)
elif mode=='MovieDetails':
movieDetails(url)
elif mode=='FindAndPlay':
findAndPlayStream(url)
elif mode=='SubCategory':
subCategory(url)
elif mode=='Tools':
toolsMenu()
elif mode=='Search':
key = inputSearchText()
if key:
search(key)
#key=repPolChars(key)
#addHistoryItem(my_addon_name, key)
#xbmc.executebuiltin('Container.Refresh')
elif mode=='SearchFromList':
searchFromList(url)
|
number = 19
swap = [1, 1, 2]
fib = []
if number == 1:
fib.append(1)
print(fib)
elif number == 2:
fib.append(1)
fib.append(1)
print(fib)
else:
fib.append(1)
fib.append(1)
fib.append(2)
for i in range(0, number-3):
swap[0] = swap[1]
swap[1] = swap[2]
swap[2] = swap[0] + swap[1]
fib.append(swap[2])
print(fib)
|
"""
计算句子相似度
输入参数:
2个句子 s1,s2
返回参数:
句子相似度
"""
import numpy as np
from scipy.linalg import norm
from sklearn.feature_extraction.text import CountVectorizer
def tf_similarity(s1, s2):
def add_space(s):
return ' '.join(list(s))
# 将字中间加入空格
s1, s2 = add_space(s1), add_space(s2)
# 转化为TF矩阵
cv = CountVectorizer(tokenizer=lambda s: s.split())
corpus = [s1, s2]
vectors = cv.fit_transform(corpus).toarray()
# 计算TF系数
return np.dot(vectors[0], vectors[1]) / (norm(vectors[0]) * norm(vectors[1]))
|
number = "+18684732335"
|
#!/usr/bin/env python
# coding: utf-8
# Copyright (c) Qotto, 2019
from .events import CoffeeOrdered
from .events import CoffeeServed
from .events import CoffeeFinished
__all__ = [
'CoffeeOrdered',
'CoffeeServed',
'CoffeeFinished',
]
|
from django import forms
from .models import Hamyar
class HamyarForm(forms.ModelForm):
password = forms.CharField(widget=forms.PasswordInput)
class Meta:
model = Hamyar
fields = ['country', 'city', 'address',
'postal_code', 'phone_number']
def __init__(self, *args, **kwargs):
super(HamyarForm, self).__init__(*args, **kwargs)
# self.fields['first_name'].label = "نام"
# self.fields['last_name'].label = "نام خانوادگی"
# self.fields['email'].label = "پست الکترونیکی"
# self.fields['password'].label = "رمز عبور"
# self.fields['national_id'].label = "کد ملی"
# self.fields['country'].label = "کشور"
# self.fields['city'].label = 'شهر'
# self.fields['address'].label = 'آدرس'
# self.fields['postal_code'].label = 'کد پستی'
# self.fields['phone_number'].label = 'شماره تلفن'
# self.fields['first_name'].widget.attrs[
# 'style'] = 'width:400px; height:20px; border-radius=5px; direction:rtl; float:left;'
# self.fields['email'].widget.attrs['style'] = 'width:400px; height:20px; border-radius=5px; float:left;'
# self.fields['last_name'].widget.attrs[
# 'style'] = 'width:400px; height:20px; border-radius=5px; direction:rtl; float:left;'
# self.fields['country'].widget.attrs[
# 'style'] = 'width:400px; height20px; border-radius=5px; direction:rtl; float:left;'
|
# -*- coding: utf-8 -*-
import psycopg2
class Requests:
def __init__(self):
self.registerQuery = 'select id,dni,student_number,name,lastname,email,reason,password,hash,confirmed from account_requests.requests'
def convertToDict(self, d):
r = {
'id':d[0],
'dni':d[1],
'studentNumber':d[2],
'name':d[3],
'lastname':d[4],
'email':d[5],
'reason':d[6],
'password':d[7],
'hash':d[8],
'confirmed':d[9]
}
return r
def listRequests(self, con):
cur = con.cursor()
cur.execute(self.registerQuery);
data = cur.fetchall();
if data == None:
return []
''' transformo a diccionario la respuesta '''
rdata = []
for d in data:
rdata.append(self.convertToDict(d))
return rdata
def findRequest(self, con, id):
cur = con.cursor()
cur.execute(self.registerQuery + ' where id = %s',(id,));
d = cur.fetchone()
if d == None:
return None
''' transformo a diccionario la respuesta '''
rdata = self.convertToDict(d)
return rdata
def findRequestByHash(self, con, hash):
cur = con.cursor()
cur.execute(self.registerQuery + ' where hash = %s',(hash,));
d = cur.fetchone()
if d == None:
return None
''' transformo a diccionario la respuesta '''
rdata = self.convertToDict(d)
return rdata
def confirmRequest(self, con, rid):
cur = con.cursor()
cur.execute('update account_requests.requests set confirmed = true where id = %s', (rid,))
def createRequest(self, con, req):
rreq = (req['id'],req['dni'],req['studentNumber'],req['name'],req['lastname'],req['email'],req['reason'],req['password'],req['hash'])
cur = con.cursor()
cur.execute('insert into account_requests.requests (id,dni,student_number,name,lastname,email,reason,password,hash) values (%s,%s,%s,%s,%s,%s,%s,%s,%s)', rreq)
def removeRequest(self, con, rid):
cur = con.cursor()
cur.execute('delete from account_requests.requests where id = %s', (rid,))
|
from crawler import Crawler
class FindWordCrawler(Crawler):
def __init__(self, word_to_find):
super().__init__()
self.word_to_find = word_to_find
self.word_found = False
def process_data(self, data, url):
print(self.visited_urls)
for line in data:
if self.word_to_find in line:
self.word_found = True
print("Word found on page: {}, line: {}".format(url, line.strip()))
break
def is_ended(self):
return self.word_found
|
from urllib.parse import urlparse
from crawler import Crawler
class CountWordCrawler(Crawler):
def __init__(self, word_to_count, visit_limit=100):
super().__init__()
self.word_to_count = word_to_count
self.word_counter = 0
self.pages_left = visit_limit
self.base_url_netloc = "";
def init_crawler(self, base_url):
self.base_url_netloc = urlparse(base_url).netloc
def process_data(self, data, url):
for line in data:
self.word_counter += line.strip().split(' ').count(self.word_to_count)
def filter_links(self, links):
filtered_links = []
for link in links:
parsed_link = urlparse(link)
if parsed_link.netloc == self.base_url_netloc:
filtered_links.append(link)
return filtered_links
def update(self):
self.pages_left -= 1
def is_ended(self):
return self.pages_left <= 0
def value_to_return(self):
return self.word_counter
if __name__ == "__main__":
crawler = CountWordCrawler("word")
word_occurencies = crawler.crawl("your_address_goes_here")
print("Word occurencies: {}".format(word_occurencies))
|
# Copyright 2009-2010, BlueDynamics Alliance - http://bluedynamics.com
from zope.interface import implements
from zope.catalog.catalog import Catalog
from zope.catalog.field import FieldIndex
from zope.catalog.text import TextIndex
from zope.catalog.keyword import KeywordIndex
from cornerstone.soup.interfaces import ICatalogFactory
from cornerstone.soup.ting import TingIndex
class MyCatalogFactory(object):
"""ICatalogFactory implementation used for testing.
"""
implements(ICatalogFactory)
def create_catalog(self):
catalog = Catalog()
catalog[u'user'] = FieldIndex(field_name='user', field_callable=False)
catalog[u'text'] = TextIndex(field_name='text', field_callable=False)
catalog[u'keywords'] = KeywordIndex(field_name='keywords',
field_callable=False)
return catalog
def __call__(self):
self.catalog = self.create_catalog()
return self.catalog
class TingCatalogFactory(object):
"""ICatalogFactory implementation for testing textindex NG integration to
soup.
"""
implements(ICatalogFactory)
def create_catalog(self):
catalog = Catalog()
catalog[u'ting'] = TingIndex(field_name=('foo', 'bar', 'baz'),
field_callable=False)
return catalog
def __call__(self):
self.catalog = self.create_catalog()
return self.catalog
class SortCatalogFactory(object):
"""ICatalogFactory implementation for testing sorting in soup.
"""
implements(ICatalogFactory)
def __call__(self):
catalog = Catalog()
catalog[u'date'] = FieldIndex(field_name='date',
field_callable=False)
return catalog
|
import torch
from torch import nn
import torch.nn.functional as F
class DeConv1(nn.Module):
# Conv1 反卷积
def __init__(self, in_channel, out_channel, kernel_size=(7, 7), stride=2, padding=3):
super(DeConv1, self).__init__()
self.deconv = nn.ConvTranspose2d(in_channel, out_channel, kernel_size=kernel_size, stride=stride,
padding=padding)
self.init()
def init(self, pth_path='runs/cifar10_resnet18_experiment_1/56_epoch_para.pkl'):
resnet_state_dict = torch.load(pth_path)
self.deconv.weight.data = resnet_state_dict['conv1.0.weight']
def forward(self, featuremap):
return self.deconv(featuremap)
class DeResBlock(nn.Module):
def __init__(self,in_channel,out_channel,stride):
super(DeResBlock,self).__init__()
self.relu = nn.ReLU(inplace=True)
self.deshortcut = nn.ConvTranspose2d(in_channel,out_channel,kernel_size=1,stride=stride,padding=0,bias=False)
self.deleft = nn.Sequential(
nn.ConvTranspose2d(in_channel,in_channel,kernel_size=3,stride=1,padding=1,bias=False),
nn.ReLU(inplace=True),
nn.ConvTranspose2d(in_channel,out_channel,kernel_size=3,stride=stride,padding=1,bias=False)
)
def forward(self, x):
x = self.relu(x)
# x = self.deshortcut(x)
x = self.deleft(x)
return x
class DeConv4(nn.Module):
# 反卷积第五层
def __init__(self):
super(DeConv4,self).__init__()
# self.delayer4 = DeResBlock(512,256,stride=2)
self.delayer3 = DeResBlock(256,128, stride=2)
self.delayer2 = DeResBlock(128,64,stride=2)
self.delayer1 = DeResBlock(64,64,stride=2)
self.derelu = nn.ReLU(inplace=True)
self.conv1 = DeConv1(64,3,kernel_size=(7,7),stride=2,padding=3)
self.init()
def forward(self, x):
# x = self.delayer4(x)
x = self.delayer3(x)
x = self.delayer2(x)
x = self.delayer1(x)
x = self.derelu(x)
x = self.conv1(x)
return x
def init(self,pth_path='runs/cifar10_resnet18_experiment_1/56_epoch_para.pkl'):
resnet_state_dict = torch.load(pth_path)
self.conv1.deconv.weight.data = resnet_state_dict['conv1.0.weight']
self.delayer1.deshortcut.weight.data = resnet_state_dict['layer1.0.shortcut.0.weight']
self.delayer2.deshortcut.weight.data = resnet_state_dict['layer2.0.shortcut.0.weight']
self.delayer3.deshortcut.weight.data = resnet_state_dict['layer3.0.shortcut.0.weight']
# self.delayer4.deshortcut.weight.data = resnet_state_dict['layer4.0.shortcut.0.weight']
self.delayer1.deleft[0].weight.data = resnet_state_dict['layer1.0.left.3.weight']
self.delayer1.deleft[2].weight.data = resnet_state_dict['layer1.0.left.0.weight']
self.delayer2.deleft[0].weight.data = resnet_state_dict['layer2.0.left.3.weight']
self.delayer2.deleft[2].weight.data = resnet_state_dict['layer2.0.left.0.weight']
self.delayer3.deleft[0].weight.data = resnet_state_dict['layer3.0.left.3.weight']
self.delayer3.deleft[2].weight.data = resnet_state_dict['layer3.0.left.0.weight']
# self.delayer4.deleft[0].weight.data = resnet_state_dict['layer4.0.left.3.weight']
# self.delayer4.deleft[2].weight.data = resnet_state_dict['layer4.0.left.0.weight']
def plot_reconstruction(conv1_feature, conv4_feature, device):
conv1_deconv = DeConv1(64, 3, kernel_size=(7, 7), stride=2, padding=3)
conv1_deconv.to(device)
conv1_reconstruction = conv1_deconv(conv1_feature)
print(conv1_reconstruction.shape)
# print(conv1_reconstruction)
conv4_deconv = DeConv4()
conv4_deconv.to(device)
conv4_deconstruction = conv4_deconv(conv4_feature)
print(conv4_deconstruction.shape)
return conv1_reconstruction, conv4_deconstruction
|
from tkinter import *
from tkinter import ttk
import customerquery as query
import cx_Oracle
class Customer:
def __init__(self, root):
self.root = root
self.root.title("Customer")
self.root.geometry("1000x450+0+0")
self.root.config(bg="grey")
# =============== Left Frame ==================
frame1 = Frame(self.root, bg="white")
frame1.place(x=0, y=0, width=500, height=450)
title = Label(frame1, text = "LIBRARY", font=("times new roman", 20, "bold"), bg="white", fg="green").place(x=10, y=30)
#=========== GenreLabel ===================
MODES = [
("Romance", "Romance"),
("Action", "Action"),
("Comedy", "Comedy"),
("Adventure", "Adventure"),
("All", "All")
]
genre_lbl = Label(frame1, text="Genre", font=("times new roman", 15, "bold"), bg="white", fg="gray").place(x=10, y=70)
self.genre = StringVar()
self.genre.set("All")
print("THIS IS {}".format(self.genre))
x=30
y= 100
for text, mode in MODES:
Radiobutton(frame1, text=text, variable =self.genre, value=mode, bg="white", font=("times new roman", 14)).place(x=x, y=y)
y+=22
#============= Movie,book selection =======
type_lbl = Label(frame1, text="Item Type", font=("times new roman", 15, "bold"), bg="white", fg="gray").place(x=10, y=214)
self.type_select = StringVar()
self.type_select.set("BOTH")
self.r1= Radiobutton(frame1, text="Books", variable=self.type_select, value="BOOKS", bg="white", font=("times new roman", 14)).place(x=30, y=242)
self.r2= Radiobutton(frame1, text="Videos", variable=self.type_select, value="VIDEOS", bg="white", font=("times new roman", 14)).place(x=30,y=264)
self.r3= Radiobutton(frame1, text="Both", variable=self.type_select, value="BOTH", bg="white", font=("times new roman", 14)).place(x=30,y=285)
#===========
availability_lbl = Label(frame1, text="Availability", font=("times new roman", 15, "bold"), bg="white", fg="gray").place(x=10, y=307)
self.availability = IntVar()
Radiobutton(frame1, text="Available", variable=self.availability, value=1, bg="white", font=("times new roman", 14)).place(x=30, y=328)
Radiobutton(frame1, text="Unavailable", variable=self.availability, value=2, bg="white", font=("times new roman", 14)).place(x=30,y=350)
Radiobutton(frame1, text="All", variable=self.availability, value=3, bg="white", font=("times new roman", 14)).place(x=30,y=372)
#================ Book Title ============
book_title_lbl = Label(frame1, text="Item Title", font=("times new roman", 15, "bold"), bg="white", fg="gray").place(x=160, y=70)
self.txt_user = Entry(frame1, font=("times new roman", 15), bg = "lightgray")
self.txt_user.place(x=160, y=100, width = 250, height=30)
#=========== Author
author_lbl = Label(frame1, text="Author", font=("times new roman", 15, "bold"), bg="white", fg="gray").place(x=160, y=140)
self.txt_author = Entry(frame1, font=("times new roman", 15), bg = "lightgray")
self.txt_author.place(x=160, y=170, width = 250, height=30)
#========= Item Number
item_number_lbl = Label(frame1, text="Item Number", font=("times new roman", 15, "bold"), bg="white", fg="gray").place(x=160, y=210)
self.txt_author = Entry(frame1, font=("times new roman", 15), bg = "lightgray")
self.txt_author.place(x=160, y=240, width = 250, height=30)
#============ Buttons
reset_selection= Button(frame1, text="Reset Selection",fg="white", bg="#0B547C", font=("Arial", 15, "bold"), command = self.reset).place(x=160, y=290, width = 250, height=30)
search= Button(frame1, text="Search Library",fg="white", bg="#0B547C", font=("Arial", 15, "bold"), command=self.search).place(x=160, y=340, width = 250, height=30)
cols = ('Item #', 'Title', 'Genre', 'Availability', 'Rating')
self.listBox = ttk.Treeview(root, columns=cols, show='headings')
for col in cols:
self.listBox.heading(col, text=col)
self.listBox.column(col, minwidth=0, width=100, stretch=NO)
self.listBox.place(x=510, y=100, width=480)
def reset(self):
self.genre.set("All")
self.type_select.set("BOTH")
def search(self):
#print(self.genre.get(), self.type_select.get(), self.txt_user.get(), self.txt_author.get())
dsnStr = cx_Oracle.makedsn("oracle.scs.ryerson.ca", "1521", "orcl")
db = cx_Oracle.connect(user="j65gill", password="09260785", dsn=dsnStr)
cursor = db.cursor()
# genre_search = """
# SELECT l.ITEMID, l.GENRE, l.AVAILABILITYSTATUS, l.RATING
# FROM LIBRARYITEM L
# WHERE GENRE = '{}'
# """.format(genre)
if self.genre and self.type_select:
print(self.genre.get(), self.type_select.get())
if self.genre.get()!="All" and self.type_select.get()=="BOTH":
print(self.genre.get(), self.type_select.get())
print(self.type_select)
#for i in self.listBox.get
self.remove_all()
genre = self.genre.get()
cursor.execute(query.genre_search.format(genre))
tables = cursor.fetchall()
# for t in tables:
# print(t[1])
for table in tables:
self.listBox.insert("", "end", values=(table[0],table[1], table[2], table[3], table[4]))
elif self.type_select.get()!="BOTH" and self.genre.get()=="All":
print(self.genre.get(), self.type_select.get())
self.remove_all()
type = self.type_select.get()
print(type)
cursor.execute(query.type_book.format(type))
tables = cursor.fetchall()
for table in tables:
self.listBox.insert("", "end", values=(table[0],table[1], table[2], table[3], table[4]))
elif self.type_select.get()=="BOTH" and self.genre.get()=="All":
self.remove_all()
cursor.execute(query.all_books)
tables = cursor.fetchall()
for table in tables:
self.listBox.insert("", "end", values=(table[0],table[1], table[2], table[3], table[4]))
else:
self.remove_all()
genre = self.genre.get()
type = self.type_select.get()
cursor.execute(query.genre_and_type.format(type, genre))
tables = cursor.fetchall()
for table in tables:
self.listBox.insert("", "end", values=(table[0],table[1], table[2], table[3], table[4]))
# elif self.genre and self.type_select==None:
# print(self.type_select)
# #for i in self.listBox.get
# self.remove_all()
# genre = self.genre.get()
# cursor.execute(query.genre_search.format(genre))
# tables = cursor.fetchall()
# # for t in tables:
# # print(t[1])
# for table in tables:
# self.listBox.insert("", "end", values=(table[0],table[1], table[2], table[3], table[4]))
#
# elif self.type_select and self.genre==None:
# self.remove_all()
# type = self.type_select.get()
# print(type)
# cursor.execute(query.type_book.format(type))
# tables = cursor.fetchall()
# for table in tables:
# self.listBox.insert("", "end", values=(table[0],table[1], table[2], table[3], table[4]))
#
#
def remove_all(self):
for record in self.listBox.get_children():
self.listBox.delete(record)
root = Tk()
obj = Customer(root)
root.mainloop()
|
#!/usr/bin/env python3
# Import the ZMQ module
import zmq
# Import the Thread, Lock and Event objects from the threading module
from threading import Thread, Lock, Event
# Import the uuid4 function from the UUID module
from uuid import uuid4
# Import the system method from the OS module
from os import system, name
# Import the Pause method from the Signal module
from signal import pause
# Import the Time method from the Time module
from time import time
# Import the ArgParse module
import argparse
# Received delay of 10 sec
RECV_DELAY = 30*10000
# Clear terminal screen
def cls():
# Perform action based on the current platform
system('cls' if name == 'nt' else 'clear')
def parse_cli_args():
# Instantiate ArgumentParser object
parser = argparse.ArgumentParser(description='End-to-End Hyperstrator')
parser.add_argument(
'--cn_ip',
type=str,
default='134.226.55.122',
required=False,
help='CN orchestrator IP')
parser.add_argument(
'--cn_port',
type=int,
default=2300,
required=False,
help='CN orchestrator port')
parser.add_argument(
'--tn_ip',
type=str,
default='134.226.55.106',
required=False,
help='TN orchestrator IP')
parser.add_argument(
'--tn_port',
type=int,
default=2200,
required=False,
help='TN orchestrator port')
parser.add_argument(
'--ran_ip',
type=str,
default='134.226.55.90',
required=False,
help='RAN orchestrator IP')
parser.add_argument(
'--ran_port',
type=int,
default=2100,
required=False,
help='RAN orchestrator port')
parser.add_argument(
'-c', '--skip_cn',
required=False,
action='store_true',
help='Skip the CN segment')
parser.add_argument(
'-t', '--skip_tn',
required=False,
action='store_true',
help='Skip the TN segment')
parser.add_argument(
'-r', '--skip_ran',
required=False,
action='store_true',
help='Skip the RAN segment')
# Parse CLI arguments
arg_dict = vars(parser.parse_args())
return arg_dict
class orch_base(object):
def __init__(self, **kwargs):
# Extract parameters from keyword arguments
self.name = kwargs.get("name", "")
self.type = kwargs.get('type', '')
self.host_key = kwargs.get("host_key", "")
self.port_key = kwargs.get("port_key", "")
self.default_host = kwargs.get("default_host", "127.0.0.1")
self.default_port = kwargs.get("default_port", "3000")
self.request_key = kwargs.get("request_key", "")
self.reply_key = kwargs.get("reply_key", "")
# Parse keyword arguments
self._parse_kwargs(**kwargs)
# Connect to the server
self._server_connect(**kwargs)
# Extract message headers from keyword arguments
def _parse_kwargs(self, **kwargs):
# Get the error message header from keyword arguments
self.error_msg = kwargs.get("error_msg", "msg_err")
self.info_msg = kwargs.get("info_msg", "bo_nsi")
self.info_ack = "_".join([self.info_msg.split('_')[-1], "ack"])
self.info_nack = "_".join([self.info_msg.split('_')[-1], "nack"])
self.create_msg = kwargs.get("create_msg", "bo_crs")
self.create_ack = "_".join([self.create_msg.split('_')[-1], "ack"])
self.create_nack = "_".join([self.create_msg.split('_')[-1], "nack"])
self.request_msg = kwargs.get("request_msg", "bo_rrs")
self.request_ack = "_".join([self.request_msg.split('_')[-1], "ack"])
self.request_nack = "_".join([self.request_msg.split('_')[-1], "nack"])
self.update_msg = kwargs.get("update_msg", "bo_urs")
self.update_ack = "_".join([self.update_msg.split('_')[-1], "ack"])
self.update_nack = "_".join([self.update_msg.split('_')[-1], "nack"])
self.delete_msg = kwargs.get("delete_msg", "bo_drs")
self.delete_ack = "_".join([self.delete_msg.split('_')[-1], "ack"])
self.delete_nack = "_".join([self.delete_msg.split('_')[-1], "nack"])
def _server_connect(self, **kwargs):
# Default Server host
host = kwargs.get(self.host_key, self.default_host)
# Default Server port
port = kwargs.get(self.port_key, self.default_port)
# Create a ZMQ context
self.context = zmq.Context()
# Specity the type of ZMQ socket
self.socket = self.context.socket(zmq.REQ)
# Connect ZMQ socket to host:port
self.socket.connect("tcp://" + host + ":" + str(port))
# Timeout reception every 5 seconds
self.socket.setsockopt(zmq.RCVTIMEO, RECV_DELAY)
# # Allow multiple requests and replies
self.socket.setsockopt(zmq.REQ_RELAXED, 1)
# # Add IDs to ZMQ messages
self.socket.setsockopt(zmq.REQ_CORRELATE, 1)
def _send_msg(self, ack, nack, **kwargs):
# Send request to the orchestrator
self.socket.send_json({self.request_key: kwargs})
try:
# Wait for command
msg = self.socket.recv_json().get(self.reply_key, None)
# If nothing was received during the timeout
except zmq.Again:
# Try again
return False, "Connection timeout to " + self.name + " Orchestrator"
# If the message is not valid
if msg is None:
# Return proper error
return False, "Received invalid message: " + str(msg)
# The orchestrator couldn't decode message
elif self.error_msg in msg:
# Return message and error code
return False, msg[self.error_msg]
# If the request succeeded
elif ack in msg:
# Return host and port
return True, msg[ack]
# If the create slice request failed
elif nack in msg:
# Return the failure message
return False, msg[nack]
else:
return False, "Missing ACK or NACK: " + str(msg)
def network_info(self, **kwargs):
# Send Info message
success, msg = self._send_msg(self.info_ack, self.info_nack,
**{self.info_msg: kwargs})
# If the message failed
if not success:
# Inform the hyperstrator about the failure
print('\t', 'Failed requesting information about the ' + self.name)
return False, msg
# Otherwise, it succeeded
else:
# Inform the hyperstrator about the success
print('\t', 'Succeeded requesting information about the ' + \
self.name)
return True, msg
def create_slice(self, **kwargs):
# Send Creation message
success, msg = self._send_msg(self.create_ack, self.create_nack,
**{self.create_msg: kwargs})
# If the slice allocation failed
if not success:
# Inform the hyperstrator about the failure
print('\t', 'Failed creating a ' + self.type + ' Slice in ' + \
self.name)
return False, msg
# Otherwise, it succeeded
else:
# Inform the hyperstrator about the success
print('\t', 'Succeeded creating a ' + self.type + ' Slice in ' + \
self.name)
return True, msg
def request_slice(self, **kwargs):
# Send request message
success, msg = self._send_msg(self.request_ack, self.request_nack,
**{self.request_msg: kwargs})
# If the slice request failed
if not success:
# Inform the hyperstrator about the failure
print('\t', 'Failed requesting a ' + self.type + ' Slice in ' + \
self.name)
return False, msg
# Otherwise, it succeeded
else:
# Inform the hyperstrator about the success
print('\t', 'Succeeded requesting a ' + self.type + ' Slice in ' + self.name)
return True, msg
return msg
def update_slice(self):
# Send update message
success, msg = self._send_msg(self.update_ack, self.update_nack,
**{self.update_msg: kwargs})
# If the slice update failed
if not success:
# Inform the hyperstrator about the failure
print('\t', 'Failed updating a ' + self.type + ' Slice in ' + \
self.name)
return False, msg
# Otherwise, it succeeded
else:
# Inform the hyperstrator about the success
print('\t', 'Succeeded updating a ' + self.type + ' Slice in ' + \
self.name)
return True, msg
return msg
def delete_slice(self, **kwargs):
# Send removal message
success, msg = self._send_msg(self.delete_ack, self.delete_nack,
**{self.delete_msg: kwargs})
# If the slice removal failed
if not success:
# Inform the hyperstrator about the failure
print('\t', 'Failed deleting a ' + self.type + ' Slice in ' + \
self.name)
return False, msg
# Otherwise, it succeeded
else:
# Inform the hyperstrator about the success
print('\t', 'Succeeded deleting a ' + self.type + ' Slice in ' + \
self.name)
return True, msg
return msg
class hyperstrator_server(Thread):
def __init__(self, **kwargs):
# Initialize the parent class
Thread.__init__(self)
# Flat to exit gracefully
self.shutdown_flag = Event()
# Parse keyword arguments
self._parse_kwargs(**kwargs)
# Bind the server
self._server_bind(**kwargs)
# Container to hold the list of current services
self.s_ids = []
# Create an instance of the CN orchestrator handler
self.cn_orch = orch_base(
name="Core Network",
host_key="cn_host",
port_key="cn_port",
default_host=kwargs.get('cn_ip', '134.226.55.122'),
default_port=str(kwargs.get('cn_port', 2300)),
info_msg="ns_cn",
create_msg="cn_cc",
request_msg="cn_rc",
update_msg="cn_uc",
delete_msg="cn_dc",
request_key="cn_req",
reply_key="cn_rep")
# Create an instance of the TN orchestrator handler
self.tn_orch = orch_base(
name="Transport Network",
host_key="tn_host",
port_key="tn_port",
default_host=kwargs.get('tn_ip', '134.226.55.106'),
default_port=str(kwargs.get('tn_port', 2200)),
info_msg="ns_tn",
create_msg="tn_cc",
request_msg="tn_rc",
update_msg="tn_uc",
delete_msg="tn_dc",
request_key="tn_req",
reply_key="tn_rep")
# Create an instance of the RAN orchestrator handler
self.ran_orch = orch_base(
name="Radio Access Network",
host_key="ran_host",
port_key="ran_port",
default_host=kwargs.get('ran_ip', '134.226.55.90'),
default_port=str(kwargs.get('ran_port', 2100)),
info_msg="ns_rn",
create_msg="rn_cc",
request_msg="rn_rc",
update_msg="rn_uc",
delete_msg="rn_dc",
request_key="rn_req",
reply_key="rn_rep")
# Make printing easier. TODO: Implement real logging
def _log(self, *args, head=False):
print("-" if head else '\t' ,*args)
# Extract message headers from keyword arguments
def _parse_kwargs(self, **kwargs):
# Get the error message header from keyword arguments
self.error_msg = kwargs.get('error_msg', 'msg_err')
# Get the network info message from keyword arguments
self.info_msg = kwargs.get('info_msg', 'ns_ni')
# Get the network info acknowledgment from keyword arguments
self.info_ack = "_".join([self.info_msg.split('_')[-1], "ack"])
# Get the network info not acknowledgment from keyword arguments
self.info_nack = "_".join([self.info_msg.split('_')[-1], "nack"])
# Get the create service message from keyword arguments
self.create_msg = kwargs.get('create_msg', 'sr_cs')
# Get the create service acknowledgment from keyword arguments
self.create_ack = "_".join([self.create_msg.split('_')[-1], "ack"])
# Get the create service not acknowledgment from keyword arguments
self.create_nack = "_".join([self.create_msg.split('_')[-1], "nack"])
# Get the request service message from keyword arguments
self.request_msg = kwargs.get('request_msg', 'sr_rs')
# Get the request service acknowledgment from keyword arguments
self.request_ack = "_".join([self.request_msg.split('_')[-1], "ack"])
# Get the request service not acknowledgment from keyword arguments
self.request_nack = "_".join([self.request_msg.split('_')[-1], "nack"])
# Get the update service message from keyword arguments
self.update_msg = kwargs.get('update_msg', 'sr_us')
# Get the update service acknowledgment from keyword arguments
self.update_ack = "_".join([self.update_msg.split('_')[-1], "ack"])
# Get the update service not acknowledgment from keyword arguments
self.update_nack = "_".join([self.update_msg.split('_')[-1], "nack"])
# Get the delete service message from keyword arguments
self.delete_msg = kwargs.get('delete_msg', 'sr_ds')
# Get the delete service acknowledgment from keyword arguments
self.delete_ack = "_".join([self.delete_msg.split('_')[-1], "ack"])
# Get the delete service not acknowledgment from keyword arguments
self.delete_nack = "_".join([self.delete_msg.split('_')[-1], "nack"])
# Debug flags
self.skip_radio = kwargs.get('skip_ran', True)
self.skip_transport= kwargs.get('skip_tn', True)
self.skip_core = kwargs.get('skip_cn', True)
# Bind server to socket
def _server_bind(self, **kwargs):
# Default HS Server host
host = kwargs.get('host', '0.0.0.0')
# Default HS Server port
port = kwargs.get('port', 1000)
# Create a ZMQ context
self.context = zmq.Context()
# Specify the type of ZMQ socket
self.socket = self.context.socket(zmq.REP)
# Bind ZMQ socket to host:port
self.socket.bind("tcp://" + host + ":" + str(port))
# Timeout reception every 500 milliseconds
self.socket.setsockopt(zmq.RCVTIMEO, RECV_DELAY)
def _send_msg(self, message_type, message):
# Send a message with a header
self.socket.send_json({message_type: message})
def run(self):
self._log('Started Hyperstrator', head=True)
# Run while thread is active
while not self.shutdown_flag.is_set():
try:
# Wait for transaction
transactions = self.socket.recv_json()
# If nothing was received during the timeout
except zmq.Again:
# Try again
continue
# Received a command
else:
# Info transaction, network segments
info_transaction = transactions.get(self.info_msg, None)
# If the message worked
if info_transaction is not None:
self._network_info(info_transaction)
# Service transaction, new service
create_transaction = transactions.get(self.create_msg, None)
# If the message worked
if create_transaction is not None:
self._create_service(create_transaction)
# Service transaction, request service
request_transaction = transactions.get(self.request_msg, None)
# If the flag exists
if request_transaction is not None:
self._request_service(request_transaction)
# Service transaction, update service
update_transaction = transactions.get(self.update_msg, None)
# If the flag exists
if update_transaction is not None:
self._update_service(update_transaction)
# Service transaction, delete service
delete_transaction = transactions.get(self.delete_msg, None)
# If the flag exists
if delete_transaction is not None:
self._delete_service(delete_transaction)
# Check for unknown messages
unknown_msg = [x for x in transactions if x not in
[self.create_msg, self.request_msg,
self.update_msg, self.delete_msg,
self.info_msg] ]
# If there is at least an existing unknown message
if unknown_msg:
self._log('Unknown message', head=True)
self._log('Message:', unknown_msg[0])
msg = {self.error_msg: "Unknown message:" + \
str(unknown_msg[0])}
# Send message
self._send_msg(self.error_msg, msg)
# Terminate zmq
self.socket.close()
self.context.term()
# Method for stopping the server thread nicely
def safe_shutdown(self):
self._log('Exiting', head=True)
self.shutdown_flag.set()
self.join()
def _network_info(self, info_transaction):
# Start time counter
st = time()
self._log('Network Information Transaction', head=True)
self._log('Gather information about:',
str(info_transaction['s_ns'])[1:-1])
# Container to hold information about network segments
network_info = {}
# If doing the CN
if 'cn' in info_transaction['s_ns'] or not info_transaction['s_ns']:
if not self.skip_core:
self._log('Send message to CN orchestrator')
# Send message to the CN orchestrator
core_success, core_msg = self.cn_orch.network_info(
**{'s_ns': info_transaction['s_ns']})
# If there was an error at the CN orchestrator
if not core_success:
self._log('Failed requesting CN info')
# Inform the user about the failure
self._send_msg(self.info_nack, core_msg)
# Finish here
return
# Fill in the network segment info
network_info['cn'] = core_msg.get('cn',
"Not reported by CN")
# If debugging
else:
# Fill in the network info with a stub
network_info.update({'cn': 'stub'})
# If doing the TN
if 'tn' in info_transaction['s_ns'] or not info_transaction['s_ns']:
if not self.skip_transport:
self._log('Send message to TN orchestrator')
# Send message to the TN orchestrator
transport_success, transport_msg = self.tn_orch.network_info(
**{'s_ns': info_transaction['s_ns']})
# If there was an error at the TN orchestrator
if not transport_success:
self._log('Failed requesting TN info')
# Inform the user about the failure
self._send_msg(self.info_nack, transport_msg)
# Finish here
return
# Fill in the network segment info
network_info['tn'] = transport_msg.get('tn',
"Not reported by TN")
# If debugging
else:
# Fill in the network info with a stub
network_info.update({'tn': 'stub'})
# If doing the RAN
if 'ran' in info_transaction['s_ns'] or not info_transaction['s_ns']:
if not self.skip_radio:
self._log('Send message to RAN orchestrator')
# Send message to the CN orchestrator
radio_success, radio_msg = self.ran_orch.network_info(
**{'s_ns': info_transaction['s_ns']})
# If there was an error at the RAN orchestrator
if not radio_success:
self._log('Failed requesting RAN info')
# Inform the user about the failure
self._send_msg(self.info_nack, radio_msg)
# Finish here
return
# Fill in the network segment info
network_info['ran'] = radio_msg.get('ran',
"Not reported by RAN")
# If debugging
else:
# Fill in the network info with a stub
network_info.update({'ran': 'stub'})
# Inform the user about the slice information
self._send_msg(self.info_ack, network_info)
# Measure elapsed time
self._log('Get time:', (time() - st)*1000, 'ms')
def _create_service(self, create_transaction):
# Start time counter
st = time()
self._log('Create Service Transaction', head=True)
# Create a Service ID
s_id = str(uuid4())
self._log('Service ID:', s_id)
# If allocating CN slices
if not self.skip_core:
self._log('Send message to the CN orchestrator')
# Otherwise, send message to the CN orchestrator
core_success, core_msg = self.cn_orch.create_slice(
**{
's_id': s_id,
'service': create_transaction['service'],
'application': create_transaction['application'],
'requirements': create_transaction['requirements']
})
# If the core allocation failed
if not core_success or 'source' not in core_msg:
self._log('Failed creating Core Slice')
# Inform the user about the failure
self._send_msg(
self.create_nack,
core_msg if not core_success else
"Malformatted message from CN orchestrator.")
# Measured elapsed time
self._log('Failed core, took:',
(time() - st)*1000, 'ms')
# Finish the main loop here
return
# Otherwise, the CN allocation succeeded
self._log('Succeeded creating a CN slice')
# In case of tests
else:
self._log('Skipping CN')
# Use a fake source IP
core_msg = {'s_id': s_id, 'source': '30.0.7.1'}
# If allocating RAN slices
if not self.skip_radio:
self._log('Send message to the RAN orchestrator')
# Otherwise, send message to the CN orchestrator
radio_success, radio_msg = self.ran_orch.create_slice(
**{
's_id': s_id,
'service': create_transaction['service'],
'application': create_transaction['application'],
'requirements': create_transaction['requirements']
})
# If the radio allocation failed
if not radio_success or 'destination' not in radio_msg:
self._log('Failed creating Radio Slice')
# Inform the user about the failure
self._send_msg(
self.create_nack,
radio_msg if not radio_success else
"Malformatted message from RAN orchestrator.")
# Measured elapsed time
self._log('Failed radio, took:',
(time() - st)*1000, 'ms')
# Clear up existing network segment slices
if not self.skip_core:
core_success, core_msg = self.cn_orch.delete_slice(
**{'s_id': s_id})
# If the core delete failed
if not core_success:
self._log('Failed cleaning up Core Slice')
# Finish the main loop here
return
# Otherwise, the RAN allocation succeeded
self._log('Succeeded creating a RAN slice')
# In case of tests
else:
self._log('Skipping RAN')
# Use a fake source IP
#radio_msg = {'s_id': s_id, 'destination': '10.30.0.179'}
radio_msg = {'s_id': s_id, 'destination': '10.0.0.160'}
# If allocating TN slices
if not self.skip_transport:
self._log('Send message to the TN orchestrator')
# Send UUID and requirements to the TN orchestrator
transport_success, transport_msg = \
self.tn_orch.create_slice(
**{
's_id': s_id,
'requirements': create_transaction['requirements'],
'source': core_msg['source'],
'destination': radio_msg['destination']
})
# If the transport allocation failed
if not transport_success:
self._log('Failed creating Transport Slice')
# Inform the user about the failure
self._send_msg(
self.create_nack,
transport_msg if not transport_success else
"Malformatted message from TN orchestrator.")
# Measured elapsed time
self._log('Failed transport, took:',
(time() - st)*1000, 'ms')
# Clear up existing network segment slices
if not self.skip_core:
core_success, core_msg = self.cn_orch.delete_slice(
**{'s_id': s_id})
# If the core delete failed
if not core_success:
self._log('Failed cleaning up Core Slice')
# Clear up existing network segment slices
if not self.skip_radio:
radio_success, radio_msg = self.ran_orch.delete_slice(
**{'s_id': s_id})
# If the radio delete failed
if not radio_success:
self._log('Failed cleaning up Radio Slice')
# Finish here
return
# Otherwise, the transport allocation succeeded
self._log('Succeeded creating a TN Slice')
# In case of tests
else:
self._log('Skipping TN')
# Use a fake return message
transport_msg = {'s_id': s_id}
# Append it to the list of service IDs
self.s_ids.append(s_id)
# Inform the user about the configuration success
self._send_msg(self.create_ack, {
's_id': s_id
})
self._log('Creation time:', (time() - st)*1000, 'ms')
def _request_service(self, request_transaction):
# Start time counter
st = time()
self._log('Request Service Transaction', head=True)
# If missing the slice ID:
if 's_id' not in request_transaction:
self._log("Missing Service ID.")
# Send message
self._send_msg(self.request_nack, "Missing Service ID")
# Leave if clause
return
# If there is an S_ID but it doesn't exist
elif request_transaction['s_id'] and \
(request_transaction['s_id'] not in self.s_ids):
self._log('Service ID does not exist')
# Send message
self._send_msg(self.request_nack,
'The service does not exist: ' + \
request_transaction['s_id'])
# Leave if clause
return
# If gathering information about a slice
if request_transaction['s_id']:
self._log('Service ID:', request_transaction['s_id'])
# If set to gather information about all slices
else:
self._log('Gather information about all Service IDs')
# Container to hold information about the slices
slice_info = dict((s_id, {}) for s_id in self.s_ids) \
if not request_transaction['s_id'] else \
{request_transaction['s_id']: {}}
# If doing the CN
if not self.skip_core:
self._log('Send message to CN orchestrator')
# Otherwise, send message to the CN orchestrator
core_success, core_msg = self.cn_orch.request_slice(
**{'s_id': request_transaction['s_id']})
# If there was an error at the CN orchestrator
if not core_success:
self._log('Failed requesting Core Slice')
# Inform the user about the failure
self._send_msg(self.request_nack, core_msg)
# Finish here
return
# Fill in the slice info
for s_id in slice_info:
slice_info[s_id]['cn'] = \
core_msg.get(s_id, "Not reported by CN")
# If debugging
else:
# Fill in the slice info with a stub
for s_id in slice_info:
slice_info[s_id].update({'cn': 'stub'})
# If doing the TN
if not self.skip_transport:
self._log('Send message to TN orchestrator')
# Otherwise, send message to the TN orchestrator
transport_success, transport_msg = \
self.tn_orch.request_slice(
**{'s_id': request_transaction['s_id']})
# If there was an error at the TN orchestrator
if not transport_success:
self._log('Failed requesting Transport Slice')
# Inform the user about the failure
self._send_msg(self.request_nack, transport_msg)
# Finish here
return
# Fill in the slice info
for s_id in slice_info:
slice_info[s_id]['tn'] = \
transport_msg.get(s_id, "Not reported by TN")
# If debugging
else:
# Fill in the slice info with a stub
for s_id in slice_info:
slice_info[s_id].update({'tn': 'stub'})
# If doing the RAN
if not self.skip_radio:
self._log('Send message to RAN orchestrator')
# Otherwise, send message to the RAN orchestrator
radio_success, radio_msg = \
self.ran_orch.request_slice(
**{'s_id': request_transaction['s_id']})
# If there was an error at the RAN orchestrator
if not radio_success:
self._log('Failed requesting Radio Slice')
# Inform the user about the failure
self._send_msg(self.request_nack, radio_msg)
# Finish here
return
# Fill in the slice info
for s_id in slice_info:
slice_info[s_id]['ran'] = \
radio_msg.get(s_id, "Not reported by RAN")
# If debugging
else:
# Fill in the slice info with a stub
for s_id in slice_info:
slice_info[s_id].update({'ran': 'stub'})
# Inform the user about the slice information
self._send_msg(self.request_ack, slice_info)
# Measure elapsed time
self._log('Get time:', (time() - st)*1000, 'ms')
def _update_service(self, update_transaction):
self._log('Update Service Transaction', head=True)
self._log("Not implemented yet.")
return
def _delete_service(self, delete_transaction):
# Start time counter
st = time()
self._log('Delete Service Transaction', head=True)
# If missing the slice ID:
if 's_id' not in delete_transaction or not \
delete_transaction['s_id']:
self._log("Missing Service ID.")
# Send message
self._send_msg(self.delete_nack, "Missing Service ID")
# Leave if clause
return
# If this service doesn't exist
elif delete_transaction['s_id'] not in self.s_ids:
self._log('Service ID does not exist')
# Send message
self._send_msg(self.delete_nack,
'The service does not exist: ' + \
delete_transaction['s_id'])
# Leave if clause
return
self._log('Service ID:', delete_transaction['s_id'])
# If doing the CN
if not self.skip_core:
self._log('Send message to CN orchestrator')
# Otherwise, send message to the CN orchestrator
core_success, core_msg = self.cn_orch.delete_slice(
**{'s_id': delete_transaction['s_id']})
# If the core allocation failed
if not core_success:
self._log('Failed deleting Core Slice')
# Inform the user about the failure
self._send_msg(self.delete_nack, core_msg)
# Finish here
return
# In case of testing
else:
self._log('Skipping CN')
# If doing the RAN
if not self.skip_radio:
self._log('Send message to RAN orchestrator')
# Otherwise, send message to the RAN orchestrator
radio_success, radio_msg = self.ran_orch.delete_slice(
**{'s_id': delete_transaction['s_id']})
# If the radio allocation failed
if not radio_success:
self._log('Failed deleting Radio Slice')
# Inform the user about the failure
self._send_msg(self.delete_nack, radio_msg)
# Finish here
return
# In case of testing
else:
self._log('Skipping RAN')
# If doing the TN
if not self.skip_transport:
self._log('Send message to the TN orchestrator')
# Otherwise, send message to the TN orchestrator
transport_success, transport_msg = \
self.tn_orch.delete_slice(
**{'s_id': delete_transaction['s_id']})
# If the TN allocation failed
if not transport_success:
self._log('Failed deleting Transport Slice')
# Inform the user about the failure
self._send_msg(self.delete_nack, transport_msg)
# Finish here
return
# In case of testing
else:
self._log('Skipping TN')
# Remove it to the list of service IDs
self.s_ids.remove(delete_transaction['s_id'])
# Inform the user about the removal success
self._send_msg(self.delete_ack,
{'s_id': delete_transaction['s_id']})
if __name__ == "__main__":
# Clear screen
cls()
# Parse CLI arguments
kwargs = parse_cli_args()
# Handle keyboard interrupt (SIGINT)
try:
# Instantiate the Hyperstrator Server
hyperstrator_thread = hyperstrator_server(
host='127.0.0.1',
port=1100,
error_msg='msg_err',
info_msg='ns_ni',
create_msg='sr_cs',
request_msg='sr_rs',
update_msg='sr_us',
delete_msg='sr_ds',
**kwargs
)
# Start the Hyperstrator Thread
hyperstrator_thread.start()
# Pause the main thread
pause()
except KeyboardInterrupt:
# Terminate the Hyperstrator
hyperstrator_thread.safe_shutdown()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2020-04-08 16:03:08
# @Author : Fallen (xdd043@qq.com)
# @Link : https://github.com/fallencrasher/python-learning
# @Version : $Id$
a = "外部定义的变量"
list1 = ['外部定义的变量']
def func():
print(a)
print(list1)
def func1():
global a
#我们在函数里声明将要修改全局变量,那我们在函数内的修改不需要返回值就可以改变外部变量
a = a + "hhhhh"
list1.append(a)
print(a)
print(list1)
def func2():
#如果我们声明要使用全局变量,我们函数里用的各种变量都随便是什么名字,跟外边都没关系
a = "莫哈哈"
list1 = [1,2,3,4]
print(a)
print(list1)
func() # 这个打印出来的是 "外部定义的变量"
func1() # 这个打印出来的是 "外部定义的变量hhhhh"
func2() # 这个打印出来的是 "莫哈哈"
|
#Author: Xing Cui
#NetID: xc918
#Data: 12/3
import unittest
from unittest import TestCase
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import data_cleanser as ds
from data_cleanser import *
from assignment10_functions import *
from visualization import *
class hw10_unittest(unittest.TestCase):
"""This class is testing for assigment10.
"""
def setUp(self):
pass
def test_test_grades(self):
self.assertTrue(1, test_grades(['C', 'B', 'A']))
self.assertEqual(-1, test_grades(['A', 'B', 'C']))
self.assertEqual(0, test_grades(['B', 'B', 'B']))
if __name__ == "__main__":
unittest.main()
|
"""
Useful utilities.
"""
import logging
import re
import unidecode
from kyoukai.asphalt import HTTPRequestContext
logger = logging.getLogger("OWAPI")
HOUR_REGEX = re.compile(r"([0-9]*) hours?")
MINUTE_REGEX = re.compile(r"([0-9]*) minutes?")
SECOND_REGEX = re.compile(r"([0-9]*\.?[0-9]*) seconds?")
PERCENT_REGEX = re.compile(r"([0-9]{1,3})\s?\%")
async def with_cache(ctx: HTTPRequestContext, func, *args, expires: int = None, cache_404=False):
"""
Run a coroutine with cache.
Stores the result in redis.
Unless we don't have redis.
"""
if expires is None:
expires = 300
if ctx.app.config["owapi_cache_time"] is not None:
expires = ctx.app.config["owapi_cache_time"]
if not ctx.app.config["owapi_use_redis"]:
# no caching without redis, just call the function
logger.info("Loading `{}` with disabled cache".format(repr(args)))
result = await func(ctx, *args)
return result
else:
import aioredis
assert isinstance(ctx.redis, aioredis.Redis)
built = func.__name__ + repr(args)
# Check for the key.
# Uses a simple func name + repr(args) as the key to use.
got = await ctx.redis.get(built)
if got and got != "None":
if await ctx.redis.ttl(built) == -1:
logger.info("Caching `{}` for `{}` seconds".format(built, expires))
await ctx.redis.expire(built, expires)
logger.info("Cache hit for `{}`".format(built))
return got.decode()
logger.info("Cache miss for `{}`".format(built))
# Call the function.
result = await func(ctx, *args)
if result is None and not cache_404:
# return None, no caching for 404s.
return None
# Store the result as cached.
to_set = result if result else "None"
logger.info("Storing {} with expiration {}".format(built, expires))
await ctx.redis.set(built, to_set, expire=expires)
if to_set == "None":
return None
return result
def int_or_string(val: str):
"""
Loads a value from MO into either an int or string value.
String is returned if we can't turn it into an int.
"""
new_s = val.replace(",", "")
try:
return float(new_s)
except ValueError:
return val
def parse_time(val: str) -> float:
"""
Parse the time out into minutes.
"""
unit = val.split(" ")[1]
if "minute" in unit:
# Calculate the hour.
mins = int(val.split(" ")[0])
hours = round(mins / 60, 3)
return hours
else:
hours = val.split(" ")[0]
return float(hours)
def try_extract(value):
"""
Attempt to extract a meaningful value from the time.
"""
if value.lower() in ("--", "null"):
return 0
get_float = int_or_string(value)
# If it's changed, return the new int value.
if get_float != value:
return get_float
# Next, try and get a time out of it.
matched = HOUR_REGEX.match(value)
if matched:
val = matched.groups()[0]
val = float(val)
return val
matched = MINUTE_REGEX.match(value)
if matched:
val = matched.groups()[0]
val = float(val)
val /= 60
return val
matched = SECOND_REGEX.match(value)
if matched:
val = matched.groups()[0]
val = float(val)
val = val / 60 / 60
return val
matched = PERCENT_REGEX.match(value)
if matched:
val = matched.groups()[0]
val = float(val)
val = val / 100
return val
# Check if there's an ':' in it.
if ":" in value:
sp = value.split(":")
# If it's only two, it's mm:ss.
# Formula is (minutes + (seconds / 60)) / 60
if len(sp) == 2:
mins, seconds = map(int, sp)
mins += seconds / 60
hours = mins / 60
return hours
# If it's three, it's hh:mm:ss.
# Formula is hours + ((minutes + (seconds / 60)) / 60).
elif len(sp) == 3:
try:
hours, mins, seconds = map(int, sp)
except ValueError:
# weird thousands values
if "," in sp[0]:
sp[0] = sp[0].replace(",", "")
else:
raise
hours, mins, seconds = map(int, sp)
mins += seconds / 60
hours += mins / 60
return hours
else:
# Just return the value.
return value
def sanitize_string(string):
"""
Convert an arbitrary string into the format used for our json keys
"""
space_converted = re.sub(r"[-\s]", "_", unidecode.unidecode(string).lower())
removed_nonalphanumeric = re.sub(r"\W", "", space_converted)
underscore_normalized = re.sub(r"_{2,}", "_", removed_nonalphanumeric)
return underscore_normalized.replace("soldier_76", "soldier76") # backwards compatability
def correct_plural_name(name: str, value):
"""
Convert the argument for Plural Bug
"""
one = name[name.find("_one_") + 5 : name.find("_other_")]
other = name[name.find("_other_") + 7 : name.rfind(one) + len(one) + 1]
if value == 1:
return name[: name.find("count_")] + one + name[name.find(other) + len(other) :]
return name[: name.find("count_")] + other + name[name.find(other) + len(other) :]
|
from __future__ import print_function
import boto3
import sys
import time
import threading
from multiprocessing import Queue
from lab_config import boto_args
queue = Queue()
def parallel_scan(tableName, totalsegments, threadsegment):
dynamodb = boto3.resource(**boto_args)
table = dynamodb.Table(tableName)
totalbytessent = 0
pageSize = 10000
fe = "responsecode <> :f"
eav = {":f": 200}
response = table.scan(
FilterExpression=fe,
ExpressionAttributeValues=eav,
Limit=pageSize,
TotalSegments=totalsegments,
Segment=threadsegment,
ProjectionExpression='bytessent'
)
for i in response['Items']:
totalbytessent += i['bytessent']
while 'LastEvaluatedKey' in response:
response = table.scan(
FilterExpression=fe,
ExpressionAttributeValues=eav,
Limit=pageSize,
TotalSegments=totalsegments,
Segment=threadsegment,
ExclusiveStartKey=response['LastEvaluatedKey'],
ProjectionExpression='bytessent')
for i in response['Items']:
totalbytessent += i['bytessent']
queue.put(totalbytessent)
if __name__ == "__main__":
args = sys.argv[1:]
tablename = args[0]
total_segments = int(args[1])
print('Scanning 1 million rows of table %s to get the total of bytes sent' %(tablename))
begin_time = time.time()
# BUGFIX https://github.com/boto/boto3/issues/1592
boto3.resource(**boto_args)
#
thread_list = []
for i in range(total_segments):
thread = threading.Thread(target=parallel_scan, args=(tablename, total_segments, i))
thread.start()
thread_list.append(thread)
time.sleep(.1)
for thread in thread_list:
thread.join()
totalbytessent = 0
for i in range(total_segments):
totalbytessent = totalbytessent + queue.get()
print('Total bytessent %s in %s seconds' %(totalbytessent, time.time() - begin_time))
|
Variables are a way to store and save data for use
This is called assignment. You are assigning a value to a variable
Declaring Variables
Do not need to use var
Cannot start with a number
Cannot declare with special characters
Written in snake case
Data Types
Strings
Strings are immutable. Once they are declared they cannot be changed. You can add strings together to create a new string but you cannot mutate an already existing one
Numbers
Floats
decimals
Integers
whole number
Booleans
0
False
Supports PEMDAS
==, !=, <=, >=, <, >
if/else statements
If an expression you passed in is True do something
Else do something else
Acknowledge:
You don't need parenthesis
Put a semi colon after the expression you want to evaluate
if to elif to else
Tab to show what part of the function belongs where
IF you wanted to store more data:
Can be assigned to variables
Can hold different data types at once
The values are indexed for us starting at zero
Functions and Statements
We declare our functions with the word def for define
Functions follow the same naming principles as declaring variables
Snake case
Do not start with numbers or special characters
LOOPS
One of the core functionalities of programming are loops
Loops are used to run a code block (a piece of code) over and over again until a condition is met
item is a placeholder for the current element in the list. The word item could be any name you want it to be.
Here's one more example using range() method
WHILE LOOP:
Like the For Loop we use this to run a code block over and over again.
The main difference is instead of passing an iterable data structure, we will be running the code block until a condition is no longer true.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.