text stringlengths 8 6.05M |
|---|
import numpy as np
import re
from math import log2
q = [np.array([[1, 0]]), np.array([[0, 1]])]
def parse_string(value):
if (value[0] == '~'):
value = value[1:]
return (int(value), None)
if (re.search(r'[2-9]', value) is None):
return (int(value, base=2), len(value))
return (int(value), None)
def generate_qubit(value, tensor=None):
if (value == 0): qubit = q[0]
elif (value == 1): qubit = q[1]
else: qubit = np.kron(generate_qubit(value >> 1, None), q[value & 0x1])
_, size = qubit.shape
while (tensor is not None and tensor > log2(size)):
qubit = np.kron(q[0], qubit)
_, size = qubit.shape
return qubit
|
def sum_series1(i):
if i == 1:
return 1
else:
return 1/i + sum_series1(i-1)
print(sum_series1(5))
|
from setuptools import find_packages, setup
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name="webapp",
version="0.0.1",
author="Ben",
description="Raspberry-Pi webapp project",
long_description=long_description,
long_description_content_type="text/markdown",
packages=find_packages(),
zip_safe=False,
include_package_data=True,
classifiers=[
"Programming Language :: Python :: 3",
"Operating System :: OS Independent",
],
install_requires=[
'webapp',
'flask',
],
)
|
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from sklearn.linear_model import LinearRegression
import statsmodels as ssm
df = pd.read_csv("data\world-happiness-report-2021.csv")
y_feature = ["Ladder score"] * 6
features = ["Logged GDP per capita", "Social support", "Healthy life expectancy", "Freedom to make life choices", "Generosity", "Perceptions of corruption"]
dependent = df["Ladder score"]
independent = df[["Logged GDP per capita", "Social support", "Healthy life expectancy", "Freedom to make life choices", "Generosity", "Perceptions of corruption"]]
model = LinearRegression()
model.fit(independent, dependent)
LinearRegression(copy_X=True, fit_intercept=True, n_jobs=None, normalize=False)
intercept = model.intercept_
coefficients=model.coef_
print("R2: ", model.score(independent, dependent))
print("Intercept: ", intercept)
print("coefficients: ", coefficients)
x = ssm.add_constant(independent)
model = ssm.OLS(dependent, independent).fit()
predictions = model.summary()
print(predictions) |
def f(x):
import math
return 10*math.e**(math.log(0.5)/5.27 * x)
def radiationExposure(start, stop, step):
'''
Computes and returns the amount of radiation exposed
to between the start and stop times. Calls the
function f (defined for you in the grading script)
to obtain the value of the function at any point.
start: integer, the time at which exposure begins
stop: integer, the time at which exposure ends
step: float, the width of each rectangle. You can assume that
the step size will always partition the space evenly.
returns: float, the amount of radiation exposed to
between start and stop times.
'''
radiation = 0
count = start
while count < stop:
radiation += step * f(count)
count += step
return radiation
print(radiationExposure(0, 5, 1))
print(radiationExposure(5, 11, 1))
print(radiationExposure(12, 16, 1))
print(radiationExposure(0, 4, 0.25))
print(radiationExposure(5, 10, 0.25))
print(radiationExposure(0, 3, 0.1))
print(radiationExposure(14, 20, 0.1))
print(radiationExposure(48, 72, 0.4))
|
from mlpnn.Import.Data import Data
class Samples(object):
def __init__(self, file, ratio=1.0, shuffle_data=False):
self.ratio = ratio
self.data = Data(file, shuffle_data)
def input_neurons(self):
return self.data.samples_count() - 1
def output_neurons(self):
return self.data.labels_count()
def train_data(self):
_data = self.data.data()
return _data[:round(self.ratio * len(_data))]
def train_labels(self):
_labels = self.data.labels()
return _labels[:round(self.ratio * len(_labels))]
def test_data(self):
_data = self.data.data()
return _data[round(self.ratio * len(_data)):]
def test_labels(self):
_labels = self.data.labels()
return _labels[round(self.ratio * len(_labels)):]
|
N, M = map( int, input().split())
ans = "-1 -1 -1"
for i in range(M//3+1):
if (M-i*3)%2 == 0 and M >= i*3:
y = (M-i*3)//2 - (N-i)
x = (N-i) - y
if x >= 0 and y >= 0:
ans = str(x) + " " + str(i) + " " + str(y)
break
print( ans)
|
# -*- coding: utf-8 -*-
from matplotlib.pylab import *
from collections import defaultdict
data = defaultdict(lambda:[])
for line in open("data.txt").readlines():
if not line.strip(): continue
(label, n, time) = line.strip().split(",")
data[label].append((n, time))
n = 0
clf()
type = ["o-", "*--", "s-", "x-."]
labels = ["N^3", "N^2", "NlgN", "N"]
for label, t in zip(labels, type):
val = data[label]
n = max(n, len(val))
lw = 1.2 if len(t) == 2 else 2
ms = 8
if t[0] == "x": ms = 10
if t[0] == "*": ms = 12
mew = 0 if t[0] != "x" else 2
plot(range(len(val)), [v[1] for v in val], t, color="k", lw=lw,
ms=ms, label=label, mew=mew)
def txt(n):
return str(n)
if n >= 1024*1024: return "%.1lfM" % (n / (1024.*1024))
if n >= 1024: return "%.1lfK" % (n / (1024.))
return str(n)
xticks(xrange(n), [txt(10*(2**i)) for i in xrange(n)], rotation=30)
subplots_adjust(0.05,0.2,0.95,0.95)
ylim(-0.5,7)
xlim(-0.5,n+0.5)
grid(True)
xlabel("Size of array (N)")
ylabel("Running time (second)")
axhline(0,lw=1,color='k')
legend(loc="best")
show()
|
#!/usr/bin/env python
#-*-coding:utf-8-*-
'''
Pentagonal numbers are generated by the formula, Pn=n(3n−1)/2.
The first ten pentagonal numbers are:
1, 5, 12, 22, 35, 51, 70, 92, 117, 145, ...
It can be seen that P4 + P7 = 22 + 70 = 92 = P8.
However, their difference, 70 − 22 = 48, is not pentagonal.
Find the pair of pentagonal numbers, Pj and Pk,
for which their sum and difference are pentagonal and
D = |Pk − Pj| is minimised; what is the value of D?
'''
import math
import timeit
def is_pentagonal(pn):
if (1+math.sqrt(1+24*pn)) % 6 == 0:
return True
else:
return False
def calc():
n = 0
pentagonals = []
while True:
n += 1
pn = n*(3*n-1)/2
for p in pentagonals[::-1]:
if is_pentagonal(pn+p) and is_pentagonal(pn-p):
return pn-p
pentagonals.append(pn)
if __name__ == '__main__':
print calc()
print timeit.Timer('problem_044.calc()', 'import problem_044').timeit(1)
|
#########################################
##
## JetRecConfig
##
## This file is a prototype module for a jet configuration system compatible
## with RootCore.
## The system is based on a hierarchy of keywords which describe top-level full configuration
## down to individual tool configuration
## An example of keyword hiearchy could be :
##
## 'AntiKt4EMTopo' # a top-level keyword. Refers to :
## ('emtopoInputs', 'calib+cut5' ) # a pair (keyword for input list, keyword for modifier list). They refer to
##
## 'emtopoInputs' # a input list keyword. Refers to :
## [ 'emtopo' ] # a list of keywords for input tools
##
## 'calib' # a modifier list keyword. Refers to
## ['calib'] # a list of keywords for input tools
## 'cut5' # a modifier list keyword. Refers to
## ['ptMin5GeV', 'sort'] # an other list of keywords for input tools
## # (then 'calib+cut5' is interpreted as ['calib','ptMin5GeV', 'sort']
##
## 'emtopo' # a keyword for a (input) tool configuration. Refers to :
## (PseudoJetGetter, dict(InputContainer="CaloCalTopoClusters",Label="EMTopo",SkipNegativeEnergy=True, OutputContainer="EMTopoPseudoJetVec") )
## # this pair (class, dict_of_properties) fully specifies a tool configuration
##
## 'ptMin5GeV' # a keyword for a (modifier) tool configuration. Refers to :
## ( JetFilterTool, dict(PtMin= 5000) )
## #
## ... etc ....
##
## Together with this hiearchy, some helper functions can interpret the keywords and return the corresponding
## configured tool.
## As examples :
##
## # re-create AntiKt10LCTopoJets exactly as the standard ones (just changing the name) :
## jetConfig.jetFindingSequence('AntiKt10LCTopo', outputName="AntiKt10LCTopoJets2", jetTool=jetTool)
##
## # re-create AntiKt10LCTopoJets exactly as the standard ones but a special list of modifiers
## jetConfig.jetFindingSequence('AntiKt10LCTopo', modifierList=['ptMin50GeV','sort','width'],outputName="AntiKt10LCTopoJets2", jetTool=jetTool)
##
##
##
##
import JSSTutorial.RootCoreConfigInit
from ROOT import JetFromPseudojet, JetFinder, JetPseudojetRetriever
def buildJetInputTruthParticles(tool=None):
from ROOT import CopyTruthParticles, CopyTruthJetParticles, MCTruthClassifier
if tool is None:
tool = CopyTruthJetParticles("truthpartcopy")
tool.OutputName = "JetInputTruthParticles"
tool.BarCodeFromMetadata = False # !!! not sure what this implies !
tool.MCTruthClassifier = MCTruthClassifier("jettruthclassifier")
return tool
def buildJetTrackVtxAssoc(tool=None):
# for now we call our onw defined buildTightTrackVertexAssociationTool() (TrackSelecToolHelper.h) because there's no
# dictionnary for TightTrackVertexAssociationTool
from ROOT import TrackVertexAssociationTool, buildTightTrackVertexAssociationTool
cpTVa = buildTightTrackVertexAssociationTool("jetTighTVAtool")
if tool is None:
tool = TrackVertexAssociationTool("tvassoc")
tool.TrackParticleContainer = "InDetTrackParticles"
tool.TrackVertexAssociation = "JetTrackVtxAssoc"
tool.VertexContainer = "PrimaryVertices"
tool.TrackVertexAssoTool = cpTVa
return tool
def buildJetTrackSelection(tool=None):
# for now we call our onw defined buildTrackSelectionTool() (TrackSelecToolHelper.h) because there's no
# dictionnary for InDet__InDetTrackSelectionTool.
from ROOT import buildTrackSelectionTool
inDetSel = buildTrackSelectionTool("TrackSelForJet", "Loose")
if tool is None:
tool = JetTrackSelectionTool("trackselloose_trackjets")
tool.InputContainer = "InDetTrackParticles"
tool.OutputContainer = "JetSelectedTracks_LooseTrackJets"
tool.Selector = inDetSel
return tool
###############################################3
## Below is config experimentations.
###############################################3
class JetConfigException(Exception):
pass
from collections import namedtuple
JetConfigContext = namedtuple( 'JetConfigContext', 'algName, alg, R, input, dataType, output' )
class JetConfigurator(object):
globalOptions = dict(dataType = 'FullS')
## ********************************************************
## Dictionnary definitions
## ********************************************************
## knownJetBuilders = { 'top_level_alias' : ( alias_for_input, alias_for_modifier) }
## where alias_for_xxx is either a string or a list of alias
## knownJetBuilders thus maps top aliase to a full jet alg configuration (in the form of alias to input and modifiers).
knownJetBuilders = dict( )
## ------------------------
## knownJetGroomers maps a string (groomer alias) to (klass, dict_of_properties, namebuilding_function)
knownJetGroomers = dict( )
## ------------------------
## map alias to list of alias = {'alias_for_input' : ['alias1', 'alias2', ...] }
## where 'aliasX' is an entry in knownInputTools (i.e is an alias to a tool configuration)
knownInputLists = dict()
## The map of known/standard PseudoJetGetter tools : 'alias' : ( class , dict_of_properties )
knownInputTools = dict( )
## ------------------------
## map alias to list of alias = {'alias_for_modifier' : ['alias1', 'alias2', ...] }
## where 'aliasX' is an entry in knownModifierTools (i.e is an alias to a tool configuration)
knownModifierList = dict( )
## The map of known/standard modifier tools : 'alias' : ( class , dict_of_properties )
## (class can also be a function, see )
knownModifierTools = dict( )
## ------------------------
## define default options for calibrations.
## calibOptions format is in the form
## ('JetAlgName', 'dataType') : ('calib_config_file','calib_sequence')
calibOptions = { }
##
## All standard content of these dictionnaries is done in JetRecDefaultTools.py
##
## ********************************************************
## top level tools
## ********************************************************
def jetFindingSequence(self, topAlias, inputList=None, finder=None, modifierList=None, jetTool=None, outputName=None, doArea=True ):
"""jetFindingSequence returns a JetRecTool (or configure jetTool if given) to run a full jet finding sequence.
topAlias will be used to retrieve the full configuration from the alias dictionnary knownJetBuilders.
If given, the other arguments will be use to overwrite default config as retrieved from knownJetBuilders.
topAlias : str, a key in knownJetBuilders in the form 'AlgRadInputSuffix' like 'CamKt11PV0TrackJets' (i.e Alg=Cam ,Rad=11, Input=PV0Track)
optional arguments :
inputList : str (key in knownInputLists) or a list of (key in knownInputTools or configured PseudoJetGetter instance)
modifierList :str (key in knownModifierList) or list of (key in knownModifierTools or configured JetModifier instance)
finder : a configured JetFinder instance
jetTool : a JetRecTool instance : will be configured by this function
outputName : name of final JetContainer. if None, will be build from topAlias
Examples :
# re-create AntiKt10LCTopoJets exactly as the standard ones :
jetConfig.jetFindingSequence('AntiKt10LCTopoJets2', jetTool=jetTool)
# re-create AntiKt10LCTopoJets exactly as the standard ones but a special list of modifiers
jetConfig.jetFindingSequence('AntiKt10LCTopoJets2', modifierList=['ptMin50GeV','sort','width'],jetTool=jetTool)
# create AntiKt12LCTopoJets. As this key is not in knownJetBuilders, specify aliases for input and modifiers.
jetConfig.jetFindingSequence('AntiKt12LCTopoJets', inputList='lctopoInputs', modifierList='cut50+substr',jetTool=jetTool)
# create AntiKt12LCTopoJets. same as above, but no ghosts (as implied by the 'lctopo' input list alias)
jetConfig.jetFindingSequence('AntiKt12LCTopoJets', inputList=['lctopo'], modifierList='cut50+substr',jetTool=jetTool)
# -----> in this case the 'lctopo' is in the list, so it refers to the PseudoJetGetter tool directly
"""
alg, R, input = interpretJetName(topAlias)
algName = buildJetAlgName( alg, R)+ input
if outputName is None : outputName = algName+"Jets"
# context is used to pass around usefull information to sub-tools configuration
context = JetConfigContext(algName, alg, R, input, self.globalOptions['dataType'], outputName)
if jetTool is None:
jetTool = JetRecTool(outputName )
else:
jetTool.setName( outputName )
inputAlias, modifAlias = self.knownJetBuilders.get( topAlias, (None,None) )
# prepare the inputs --------------
if inputList is None:
if inputAlias is None:
print "JetConfigurator.jetFindingSequence ERROR can't retrieve input tools for ", topAlias , " interpreted as ",alg, r, input
raise JetConfigException("Bad input specification")
else:
inputAlias = inputList # consider the user given inputList as an alias
# interpret the inputAlias :
inputList, inputAliasList = self.getInputList( inputAlias ,context=context)
# prepare the modifiers --------------
if modifierList is None :
if modifAlias is None:
print "JetConfigurator.jetFindingSequence ERROR can't retrieve modifier tools for ", topAlias , " interpreted as ",alg, r, input
raise JetConfigException("Bad modif specification")
else:
modifAlias = modifierList
modifierList, modifAliasList = self.getModifList( modifAlias, context)
# prepare the finder --------------
if finder is None:
finder = self.getJetFinderTool(algName=algName, context=context, doArea=doArea )
jetTool.PseudoJetGetters = inputList
jetTool.JetFinder = finder
jetTool.JetModifiers = modifierList
jetTool.OutputContainer = outputName
print " *********************************** "
print " JetConfigurator : Configured jet finder for ", topAlias, " -> ", outputName
print " --> alg name : ",algName.ljust(20) , '(',alg,R,input,')'
print " --> inputs : ", str(inputAlias).ljust(20), '=',inputAliasList
print " --> modifiers : ", str(modifAlias).ljust(20), '=', modifAliasList
print " *********************************** "
return jetTool
def jetGroomingSequence(self, inputJets, groomAlias, modifierList=None, jetTool=None, outputJets=None, **userParams ):
""" """
# retrieve class, parameters and name from the dict :
groomerKlass, groomerParams, nameBuildingFunc = self.knownJetGroomers.get( groomAlias, (None,None,None) )
if groomerKlass is None :
print "JetConfigurator.jetGroomingSequence ERROR can't retrieve groomer for ", groomAlias
raise JetConfigException("Bad groomer specification")
# take user parameters into account
if userParams != {}:
groomerParams = dict(groomerParams) # copy
groomerParams.update(userParams)
algName = nameBuildingFunc(**groomerParams)
if outputJets is None:
alg,R,input = interpretJetName(inputJets)
outputJets = inputJets.replace(input, input+algName)
context = JetConfigContext(algName, groomAlias, -1, inputJets, self.globalOptions['dataType'], outputJets)
if jetTool is None:
jetTool = JetRecTool(algName )
else:
jetTool.setName( algName )
if modifierList is None :
# use the same as for input :
inputAlias, modifAlias = self.knownJetBuilders.get( inputJets, (None,None) )
if modifAlias is None :
print "ERROR JetConfigurator.jetGroomingSequence : can't guess a modifier list from ",inputJets
raise JetConfigException("Bad modifier specification")
else:
modifAlias = modifierList
modifierList, modifAliasList = self.getModifList( modifAlias, context)
# needed for technical reasons
jetBuilder=JetFromPseudojet(outputJets+"jetbuild", Attributes = [] )
jetTool.JetGroomer = groomerKlass( algName, JetBuilder=jetBuilder,**groomerParams)
jetTool.InputContainer = inputJets
jetTool.OutputContainer = outputJets
jetTool.JetPseudojetRetriever = JetPseudojetRetriever("pjretriever")
jetTool.JetModifiers = modifierList
print " *********************************** "
print " JetConfigurator : Configured jet groomer for ", groomAlias, " from ", inputJets, "to", outputJets
print " --> groom class : ", groomerKlass
print " --> groom params: ", groomerParams
print " --> modifiers : ", modifAlias.ljust(20), '=', modifAliasList
print " *********************************** "
return jetTool
## ********************************************************
## Jet finding
## ********************************************************
def getJetFinderTool(self, alg="AntiKt", R=0.4, input="LCTopo", algName=None, doArea=True, context=None, **userProp):
"""returns a configured JetFinder tool.
The JetFinderTool is configured according to the input arguments.
if algName is not None, it is interpreted and OVERWRITES alg,R and input.
"""
# Use some default properties
defaultProps = dict( PtMin = 5000, GhostArea = 0.01 if doArea else 0., RandomOption = 1, )
if algName is None:
algName = buildJetAlgName(alg,R)
else:
alg, R, input = interpretJetName( algName )
if context:
toolName = context.output+'.Finder'
else:
toolName = algName+"Finder"
# Technically we need this tool to translate fastjet to xAOD::Jet
jetFromPJ = JetFromPseudojet(toolName.replace('Finder',"jetbuild"),
Attributes = ["ActiveArea", "ActiveAreaFourVector"] if doArea else [] )
defaultProps['JetBuilder'] = jetFromPJ
# overwrite with userProp
defaultProps.update(userProp)
defaultProps.update( JetAlgorithm = alg, JetRadius = R )
if alg.startswith('Var'):
# add Variable R jet params.
# for now assuming large-R jet usage so fixed param :
defaultProps.update(VariableRMinRadius=0.2, VariableRMassScale=600000,JetRadius=1.0,
JetAlgorithm = dict(VarA='AntiKt',VarK='Kt',VarC='CamKt')[alg] )
# pass all the options to the constructor :
finder = JetFinder(toolName, **defaultProps)
# incase of track jet, the finder is a bit more complex
# we used a dedicated one which will build jets per vertex
if "Track" in input:
from ROOT import JetByVertexFinder
vertexIndex = 0 # configure it to use PV0 (we could interpret input)
vfinder = JetByVertexFinder(toolName + "ByVertex",
JetFinder = finder,
Vertex = vertexIndex)
finder = vfinder
return finder
## ********************************************************
## Inputs
## ********************************************************
def addKnownInput(self, alias, klass, **properties):
if alias in self.knownInputTools:
print "ERROR in PseudoJetInput::addKnownInput can't add already existing ",alias
return
self.knownInputTools[alias] = (klass, properties)
def getInputTool(self, alias,context=None, **userProp):
tool = self.aliasToTool( alias, self.knownInputTools, context = context, **userProp)
if tool is None:
print "ERROR. JetConfigurator.getInputTool unknown input ",alias
print "available are ",self.knownInputTools.keys()
return None
if "OutputContainer" not in userProp:
tool.OutputContainer = tool.InputContainer+"_pseudojet"
if tool.GhostScale > 0.0:
tool.OutputContainer = tool.InputContainer+"_gpseudojet"
return tool
def getInputToolFromAlgName(self, algname, **userProp):
alg, R, input = interpretJetName(algName)
if 'Track' in input:
input='Track' # (because 'PV0Track' is the same input as 'Track')
return self.getInputTool(input.lower(), **userProp)
def getInputList(self, alias, context=None):
"""Interpret the given alias and returns (toolList, aliasList).
input :
alias : str (key in knownInputLists) or a list of (key in knownInputTools or configured PseudoJetGetter instance)
returns
- toolList : a list of configured instances
- aliasList : a list of strings (alias or tool names)
"""
aliasList, failed = self.aliasToListOfAliases(alias, self.knownInputLists)
if aliasList is None:
print "ERROR JetConfigurator.getInputList unknown alias", failed
print " --> add it to JetConfigurator.knownInputLists ? "
return
toolList = []
aliasList_str = []
for a in aliasList:
print a
if isinstance(a, str):
t = self.getInputTool(a, context=context)
else: # assume a is alaready a tool:
t=a
a = t.name()
toolList.append( t )
aliasList_str.append( a )
return toolList, aliasList_str
## ********************************************************
## Jet Modifiers
## ********************************************************
def getModifTool(self, alias, context=None, **userProp ):
tool = self.aliasToTool( alias, self.knownModifierTools, context = context, **userProp)
if tool is None:
print "ERROR. JetConfigurator.getModifTool unknown modifer ",alias
return None
return tool
def getModifList(self, alias, context=None):
aliasList, failed = self.aliasToListOfAliases(alias, self.knownModifierList)
if aliasList is None:
print "ERROR JetConfigurator.getModifList unknown alias", failed
print " --> add it to JetConfigurator.knownModifierList ? "
return
toolList = []
aliasList_str = []
for a in aliasList:
if isinstance(a, str):
t = self.getModifTool(a, context=context)
else: # assume a is alaready a tool:
t=a
a=t.name()
aliasList_str.append( a )
toolList.append( t )
return toolList, aliasList_str
## ********************************************************
## Jet Calibration
## ********************************************************
def getCalibTool(self, algName="AntiKt4EMTopo", dataType='FullS', context=None, **userProp):
if context is not None:
# take algName and dataType from context and ignore other algs
algName, dataType = context.algName, context.dataType
if (algName,dataType) not in self.calibOptions:
print "ERROR JetConfigurator.getCalibTool can't retrieve calib config for ",algName, dataType
return None
confFile, seq = self.calibOptions[ (algName,dataType) ]
tool=JetCalibrationTool(algName+"calib", IsData= (dataType=='data') , ConfigFile=confFile, CalibSequence=seq, JetCollection=algName)
return tool
## ********************************************************
## Helpers
## ********************************************************
def aliasToListOfAliases(self, alias, aliasDict):
""" Given alias (a string or a list of strings), returns a list of aliases as defined by those mapped in aliasDict.
The following forms are allowed for alias :
* 'alias' --> aliasDict['alias']
* ['alias0','alias1',...] --> ['alias0','alias1',...] (same list)
* 'aliasX+aliasY+aliasZ' --> aliasDict['aliasX']+aliasDict['aliasY']+aliasDict['aliasZ']
"""
if isinstance(alias, list):
return alias, ''
aL = alias.split('+')
finalAliases = []
for a in aL :
a_list = aliasDict.get(a,None)
if a_list is None:
return None, a # return no list and the offender
finalAliases += a_list
return finalAliases, ''
def aliasToTool(self, alias, aliasDict, context=None, **userProp ):
klass, defaultProp = aliasDict.get(alias, (None,None) )
tname = alias
if klass is None:
return None
if userProp != {} : # copy default and update
finalProp = dict(defaultProp)
finalProp.update(userProp)
else:
finalProp = defaultProp
if 'context' in finalProp: # then klass is actually a function which needs a context
finalProp['context'] = context
if context:
tname = context.output+'.'+alias
modif = klass(tname, **finalProp)
return modif
def dumpGlobalOptions(self):
print "*******************************"
print "JetConfiguration global options :"
for k,v in self.globalOptions.iteritems():
print " %-40s : %s"%(k,str(v))
print "*******************************"
jetConfig = JetConfigurator()
import JetRecDefaultTools
## **************************************************************************
## Helper functions
##
## **************************************************************************
def buildJetAlgName(finder, mainParam):
return finder + str(int(mainParam*10))
def buildJetContName(finder, mainParam, input):
return buildJetAlgName(finder, mainParam) +input+"Jets" # could be more elaborated...
def interpretJetName(jetcollName, finder = None,input=None, mainParam=None):
# first step : guess the finder, input , mainParam, if needed
if finder is None:
for a in [ 'AntiKt','CamKt','Kt', 'Cone','SISCone','CMSCone','VarA','VarK','VarC']:
if jetcollName.startswith(a):
finder = a #dict(VarA='AntiKt',VarK='Kt',VarC='CamKt').get(a,a)
break
if finder is None:
print "interpretJetName Error could not guess jet finder type in ", jetcollName
return
if mainParam is None:
# get the 2 chars following finder :
mp = jetcollName[len(finder):len(finder)+2]
mp = mp[0] if not mp[1] in '0123456789' else mp
try :
mainParam = float(mp)/10.
except ValueError :
print "interpretJetName Error could not guess main parameter in ",jetcollName
return
if input is None:
prefix=finder+mp
end = jetcollName.find('Jet')
if end==-1: end =len(jetcollName)
input = jetcollName[len(prefix):end]
if input is None:
print "interpretJetName ERROR could not guess input type in ",jetcollName
print " Known input :", knownInput
return
return finder, mainParam, input
def globalOptionBuilder( **args ):
from cPickle import dumps
return dumps(args)
|
import json
class QuestionController():
def getallquestions(self):
allquestions = (
{
'question': "What is your name?",
"answer": "Priscilla Kyei Danso",
},
{
'question': "How old are you?",
"answer": "Why do you care?",
},
{
'question': "Do you love God?",
"answer": "Definitely!",
},
{
'question': "Do you have a boyfriend?",
"answer": "Still searching! No i found one!",
},
)
return json.dumps(allquestions)
def getquestionwithid(self):
getquestionid = {
'question' : "What is your name?",
"answer": "Priscilla Kyei Danso",
}
def postquestions(self):
getquestionposted = {
'question': ' ',
'answer' : ' '
}
return getquestionposted
def updatequestion(self):
pass
def deletequestion(self):
pass
|
r"""*CLI module for* ``sphobjinv``.
``sphobjinv`` is a toolkit for manipulation and inspection of
Sphinx |objects.inv| files.
.. note::
This module is NOT part of the public API for ``sphobjinv``.
Its entire contents should be considered implementation detail.
**Author**
Brian Skinn (bskinn@alum.mit.edu)
**File Created**
17 May 2016
**Copyright**
\(c) Brian Skinn 2016-2020
**Source Repository**
http://www.github.com/bskinn/sphobjinv
**Documentation**
http://sphobjinv.readthedocs.io
**License**
The MIT License; see |license_txt|_ for full license terms
**Members**
"""
import argparse as ap
import os
import sys
from json.decoder import JSONDecodeError
from sphobjinv import __version__
from sphobjinv.fileops import readjson, writebytes, writejson
from sphobjinv.inventory import Inventory as Inv
from sphobjinv.zlib import compress
# ### Version arg and helpers
#: Optional argument name for use with the base
#: argument parser, to show version &c. info, and exit
VERSION = "version"
#: Version &c. output blurb
VER_TXT = (
"\nsphobjinv v{0}\n\n".format(__version__) + "Copyright (c) Brian Skinn 2016-2020\n"
"License: The MIT License\n\n"
"Bug reports & feature requests:"
" https://github.com/bskinn/sphobjinv\n"
"Documentation:"
" http://sphobjinv.readthedocs.io\n"
)
# ### Subparser selectors and argparse param for storing subparser name
#: Subparser name for inventory file conversions; stored in
#: :data:`SUBPARSER_NAME` when selected
CONVERT = "convert"
#: Subparser name for inventory object suggestions; stored in
#: :data:`SUBPARSER_NAME` when selected
SUGGEST = "suggest"
#: Param for storing subparser name
#: (:data:`CONVERT` or :data:`SUGGEST`)
SUBPARSER_NAME = "sprs_name"
# ### Common URL argument for both subparsers
#: Optional argument name for use with both :data:`CONVERT` and
#: :data:`SUGGEST` subparsers, indicating that
#: :data:`INFILE` is to be treated as a URL
#: rather than a local file path
URL = "url"
# ### Conversion subparser: 'mode' param and choices
#: Positional argument name for use with :data:`CONVERT` subparser,
#: indicating output file format
#: (:data:`ZLIB`, :data:`PLAIN` or :data:`JSON`)
MODE = "mode"
#: Argument value for :data:`CONVERT` :data:`MODE`,
#: to output a :mod:`zlib`-compressed inventory
ZLIB = "zlib"
#: Argument value for :data:`CONVERT` :data:`MODE`,
#: to output a plaintext inventory
PLAIN = "plain"
#: Argument value for :data:`CONVERT` :data:`MODE`,
#: to output an inventory as JSON
JSON = "json"
# ### Source/destination params
#: Required positional argument name for use with both :data:`CONVERT` and
#: :data:`SUGGEST` subparsers, holding the path
#: (or URL, if :data:`URL` is specified)
#: to the input file
INFILE = "infile"
#: Optional positional argument name
#: for use with the :data:`CONVERT` subparser,
#: holding the path to the output file
#: (:data:`DEF_BASENAME` and the appropriate item from :data:`DEF_OUT_EXT`
#: are used if this argument is not provided)
OUTFILE = "outfile"
# ### Convert subparser optional params
#: Optional argument name for use with the :data:`CONVERT` subparser,
#: indicating to suppress console output
QUIET = "quiet"
#: Optional argument name for use with the :data:`CONVERT` subparser,
#: indicating to expand URI and display name
#: abbreviations in the generated output file
EXPAND = "expand"
#: Optional argument name for use with the :data:`CONVERT` subparser,
#: indicating to contract URIs and display names
#: to abbreviated forms in the generated output file
CONTRACT = "contract"
#: Optional argument name for use with the :data:`CONVERT` subparser,
#: indicating to overwrite any existing output
#: file without prompting
OVERWRITE = "overwrite"
# ### Suggest subparser params
#: Positional argument name for use with the :data:`SUGGEST` subparser,
#: holding the search term for |fuzzywuzzy|_ text matching
SEARCH = "search"
#: Optional argument name for use with the :data:`SUGGEST` subparser,
#: taking the minimum desired |fuzzywuzzy|_ match quality
#: as one required argument
THRESH = "thresh"
#: Optional argument name for use with the :data:`SUGGEST` subparser,
#: indicating to print the location index of each returned object
#: within :data:`INFILE` along with the object domain/role/name
#: (may be specified with :data:`SCORE`)
INDEX = "index"
#: Optional argument name for use with the :data:`SUGGEST` subparser,
#: indicating to print the |fuzzywuzzy|_ score of each returned object
#: within :data:`INFILE` along with the object domain/role/name
#: (may be specified with :data:`INDEX`)
SCORE = "score"
#: Optional argument name for use with the :data:`SUGGEST` subparser,
#: indicating to print all returned objects, regardless of the
#: number returned, without asking for confirmation
ALL = "all"
# ### Helper strings
#: Help text for the :data:`CONVERT` subparser
HELP_CO_PARSER = (
"Convert intersphinx inventory to zlib-compressed, " "plaintext, or JSON formats."
)
#: Help text for the :data:`SUGGEST` subparser
HELP_SU_PARSER = "Fuzzy-search intersphinx inventory " "for desired object(s)."
#: Help text for default extensions for the various conversion types
HELP_CONV_EXTS = "'.inv/.txt/.json'"
# ### Defaults for an unspecified OUTFILE
#: Default base name for an unspecified :data:`OUTFILE`
DEF_BASENAME = "objects"
#: Default extensions for an unspecified :data:`OUTFILE`
DEF_OUT_EXT = {ZLIB: ".inv", PLAIN: ".txt", JSON: ".json"}
# ### Useful constants
#: Number of returned objects from a :data:`SUGGEST` subparser invocation
#: above which user will be prompted for confirmation to print the results
#: (unless :data:`ALL` is specified)
SUGGEST_CONFIRM_LENGTH = 30
#: Default match threshold for :option:`sphobjinv suggest --thresh`
DEF_THRESH = 75
def selective_print(thing, params):
"""Print `thing` if not in quiet mode.
Quiet mode is indicated by the value at the :data:`QUIET` key
within `params`.
Quiet mode is not implemented for the ":doc:`suggest </cli/suggest>`"
CLI mode.
Parameters
----------
thing
*any* -- Object to be printed
params
|dict| -- Parameters/values mapping from the active subparser
"""
if not params[SUBPARSER_NAME][:2] == "co" or not params[QUIET]:
print(thing)
def err_format(exc):
r"""Pretty-format an exception.
Parameters
----------
exc
:class:`Exception` -- Exception instance to pretty-format
Returns
-------
pretty_exc
|str| -- Exception type and message formatted as
|cour|\ '{type}: {message}'\ |/cour|
"""
return "{0}: {1}".format(type(exc).__name__, str(exc))
def yesno_prompt(prompt):
r"""Query user at `stdin` for yes/no confirmation.
Uses :func:`input`, so will hang if used programmatically
unless `stdin` is suitably mocked.
The value returned from :func:`input` must satisfy either
|cour|\ resp.lower() == 'n'\ |/cour| or
|cour|\ resp.lower() == 'y'\ |/cour|,
or else the query will be repeated *ad infinitum*.
This function does **NOT** augment `prompt`
to indicate the constraints on the accepted values.
Parameters
----------
prompt
|str| -- Prompt to display to user that
requests a 'Y' or 'N' response
Returns
-------
resp
|str| -- User response
"""
resp = ""
while not (resp.lower() == "n" or resp.lower() == "y"):
resp = input(prompt) # noqa: S322
return resp
def getparser():
"""Generate argument parser.
Returns
-------
prs
:class:`~argparse.ArgumentParser` -- Parser for commandline usage
of ``sphobjinv``
"""
prs = ap.ArgumentParser(
description="Format conversion for "
"and introspection of "
"intersphinx "
"'objects.inv' files."
)
prs.add_argument(
"-" + VERSION[0],
"--" + VERSION,
help="Print package version & other info",
action="store_true",
)
sprs = prs.add_subparsers(
title="Subcommands",
dest=SUBPARSER_NAME,
metavar="{{{0},{1}}}".format(CONVERT, SUGGEST),
help="Execution mode. Type "
"'sphobjinv [mode] -h' "
"for more information "
"on available options. "
"Mode names can be abbreviated "
"to their first two letters.",
)
# Enforce subparser as optional. No effect for 3.4 to 3.7;
# briefly required a/o 3.7.0b4 due to change in default behavior, per:
# https://bugs.python.org/issue33109. 3.6 behavior restored for
# 3.7 release.
sprs.required = False
spr_convert = sprs.add_parser(
CONVERT, aliases=[CONVERT[:2]], help=HELP_CO_PARSER, description=HELP_CO_PARSER
)
spr_suggest = sprs.add_parser(
SUGGEST, aliases=[SUGGEST[:2]], help=HELP_SU_PARSER, description=HELP_SU_PARSER
)
# ### Args for conversion subparser
spr_convert.add_argument(
MODE, help="Conversion output format", choices=(ZLIB, PLAIN, JSON)
)
spr_convert.add_argument(INFILE, help="Path to file to be converted")
spr_convert.add_argument(
OUTFILE,
help="Path to desired output file. "
"Defaults to same directory and main "
"file name as input file but with extension "
+ HELP_CONV_EXTS
+ ", as appropriate for the output format. "
"A bare path is accepted here, "
"using the default output file names.",
nargs="?",
default=None,
)
# Mutually exclusive group for --expand/--contract
gp_expcont = spr_convert.add_argument_group(title="URI/display name " "conversions")
meg_expcont = gp_expcont.add_mutually_exclusive_group()
meg_expcont.add_argument(
"-e",
"--" + EXPAND,
help="Expand all URI and display name " "abbreviations",
action="store_true",
)
meg_expcont.add_argument(
"-c",
"--" + CONTRACT,
help="Contract all URI and display name " "abbreviations",
action="store_true",
)
# Clobber argument
spr_convert.add_argument(
"-" + OVERWRITE[0],
"--" + OVERWRITE,
help="Overwrite output files without prompting",
action="store_true",
)
# stdout suppressor option (e.g., for scripting)
spr_convert.add_argument(
"-" + QUIET[0],
"--" + QUIET,
help="Suppress printing of status messages "
"and overwrite output files "
"without prompting",
action="store_true",
)
# Flag to treat infile as a URL
spr_convert.add_argument(
"-" + URL[0],
"--" + URL,
help="Treat 'infile' as a URL for download",
action="store_true",
)
# ### Args for suggest subparser
spr_suggest.add_argument(INFILE, help="Path to inventory file to be searched")
spr_suggest.add_argument(SEARCH, help="Search term for object suggestions")
spr_suggest.add_argument(
"-" + ALL[0],
"--" + ALL,
help="Display all results "
"regardless of the number returned "
"without prompting for confirmation.",
action="store_true",
)
spr_suggest.add_argument(
"-" + INDEX[0],
"--" + INDEX,
help="Include Inventory.objects list indices " "with the search results",
action="store_true",
)
spr_suggest.add_argument(
"-" + SCORE[0],
"--" + SCORE,
help="Include fuzzywuzzy scores " "with the search results",
action="store_true",
)
spr_suggest.add_argument(
"-" + THRESH[0],
"--" + THRESH,
help="Match quality threshold, integer 0-100, "
"default 75. Default is suitable when "
"'search' is exactly a known object name. "
"A value of 30-50 gives better results "
"for approximate matches.",
default=DEF_THRESH,
type=int,
choices=range(101),
metavar="{0-100}",
)
spr_suggest.add_argument(
"-" + URL[0],
"--" + URL,
help="Treat 'infile' as a URL for download",
action="store_true",
)
return prs
def resolve_inpath(in_path):
"""Resolve the input file, handling invalid values.
Currently, only checks for existence and not-directory.
Parameters
----------
in_path
|str| -- Path to desired input file
Returns
-------
abs_path
|str| -- Absolute path to indicated file
Raises
------
:exc:`FileNotFoundError`
If a file is not found at the given path
"""
# Path MUST be to a file, that exists
if not os.path.isfile(in_path):
raise FileNotFoundError("Indicated path is not a valid file")
# Return the path as absolute
return os.path.abspath(in_path)
def resolve_outpath(out_path, in_path, params):
r"""Resolve the output location, handling mode-specific defaults.
If the output path or basename are not specified, they are
taken as the same as the input file. If the extension is
unspecified, it is taken as the appropriate mode-specific value
from :data:`DEF_OUT_EXT`.
If :data:`URL` is passed, the input directory
is taken to be :func:`os.getcwd` and the input basename
is taken as :data:`DEF_BASENAME`.
Parameters
----------
out_path
|str| or |None| -- Output location provided by the user,
or |None| if omitted
in_path
|str| -- For a local input file, its absolute path.
For a URL, the (possibly truncated) URL text.
params
|dict| -- Parameters/values mapping from the active subparser
Returns
-------
out_path
|str| -- Absolute path to the target output file
"""
mode = params[MODE]
if params[URL]:
in_fld = os.getcwd()
in_fname = DEF_BASENAME
else:
in_fld, in_fname = os.path.split(in_path)
if out_path:
# Must check if the path entered is a folder
if os.path.isdir(out_path):
# Set just the folder and leave the name blank
out_fld = out_path
out_fname = None
else:
# Split appropriately
out_fld, out_fname = os.path.split(out_path)
# Output to same folder if unspecified
if not out_fld:
out_fld = in_fld
# Use same base filename if not specified
if not out_fname:
out_fname = os.path.splitext(in_fname)[0] + DEF_OUT_EXT[mode]
# Composite the full output path
out_path = os.path.join(out_fld, out_fname)
else:
# No output location specified; use defaults
out_fname = os.path.splitext(in_fname)[0] + DEF_OUT_EXT[mode]
out_path = os.path.join(in_fld, out_fname)
return out_path
def import_infile(in_path):
"""Attempt import of indicated file.
Convenience function wrapping attempts to load an
|Inventory| from a local path.
Parameters
----------
in_path
|str| -- Path to input file
Returns
-------
inv
|Inventory| or |None| -- If instantiation with the file at
`in_path` succeeds, the resulting |Inventory| instance;
otherwise, |None|
"""
# Try general import, for zlib or plaintext files
try:
inv = Inv(in_path)
except AttributeError:
pass # Punt to JSON attempt
else:
return inv
# Maybe it's JSON
try:
inv = Inv(readjson(in_path))
except JSONDecodeError:
return None
else:
return inv
def write_plaintext(inv, path, *, expand=False, contract=False):
"""Write an |Inventory| to plaintext.
Newlines are inserted in an OS-aware manner,
based on the value of :data:`os.linesep`.
Calling with both `expand` and `contract` as |True| is invalid.
Parameters
----------
inv
|Inventory| -- Objects inventory to be written as plaintext
path
|str| -- Path to output file
expand
|bool| *(optional)* -- Generate output with any
:data:`~sphobjinv.data.SuperDataObj.uri` or
:data:`~sphobjinv.data.SuperDataObj.dispname`
abbreviations expanded
contract
|bool| *(optional)* -- Generate output with abbreviated
:data:`~sphobjinv.data.SuperDataObj.uri` and
:data:`~sphobjinv.data.SuperDataObj.dispname` values
Raises
------
ValueError
If both `expand` and `contract` are |True|
"""
b_str = inv.data_file(expand=expand, contract=contract)
writebytes(path, b_str.replace(b"\n", os.linesep.encode("utf-8")))
def write_zlib(inv, path, *, expand=False, contract=False):
"""Write an |Inventory| to zlib-compressed format.
Calling with both `expand` and `contract` as |True| is invalid.
Parameters
----------
inv
|Inventory| -- Objects inventory to be written zlib-compressed
path
|str| -- Path to output file
expand
|bool| *(optional)* -- Generate output with any
:data:`~sphobjinv.data.SuperDataObj.uri` or
:data:`~sphobjinv.data.SuperDataObj.dispname`
abbreviations expanded
contract
|bool| *(optional)* -- Generate output with abbreviated
:data:`~sphobjinv.data.SuperDataObj.uri` and
:data:`~sphobjinv.data.SuperDataObj.dispname` values
Raises
------
ValueError
If both `expand` and `contract` are |True|
"""
b_str = inv.data_file(expand=expand, contract=contract)
bz_str = compress(b_str)
writebytes(path, bz_str)
def write_json(inv, path, *, expand=False, contract=False):
"""Write an |Inventory| to JSON.
Writes output via
:func:`fileops.writejson() <sphobjinv.fileops.writejson>`.
Calling with both `expand` and `contract` as |True| is invalid.
Parameters
----------
inv
|Inventory| -- Objects inventory to be written zlib-compressed
path
|str| -- Path to output file
expand
|bool| *(optional)* -- Generate output with any
:data:`~sphobjinv.data.SuperDataObj.uri` or
:data:`~sphobjinv.data.SuperDataObj.dispname`
abbreviations expanded
contract
|bool| *(optional)* -- Generate output with abbreviated
:data:`~sphobjinv.data.SuperDataObj.uri` and
:data:`~sphobjinv.data.SuperDataObj.dispname` values
Raises
------
ValueError
If both `expand` and `contract` are |True|
"""
json_dict = inv.json_dict(expand=expand, contract=contract)
writejson(path, json_dict)
def do_convert(inv, in_path, params):
r"""Carry out the conversion operation, including writing output.
If :data:`OVERWRITE` is passed and the output file
(the default location, or as passed to :data:`OUTFILE`)
exists, it will be overwritten without a prompt. Otherwise,
the user will be queried if it is desired to overwrite
the existing file.
If :data:`QUIET` is passed, nothing will be
printed to |cour|\ stdout\ |/cour|
(potentially useful for scripting),
and any existing output file will be overwritten
without prompting.
Parameters
----------
inv
|Inventory| -- Inventory object to be output in the format
indicated by :data:`MODE`.
in_path
|str| -- For a local input file, its absolute path.
For a URL, the (possibly truncated) URL text.
params
|dict| -- Parameters/values mapping from the active subparser
"""
mode = params[MODE]
# Work up the output location
try:
out_path = resolve_outpath(params[OUTFILE], in_path, params)
except Exception as e: # pragma: no cover
# This may not actually be reachable except in exceptional situations
selective_print("\nError while constructing output file path:", params)
selective_print(err_format(e), params)
sys.exit(1)
# If exists, confirm overwrite; clobber if QUIET
if os.path.isfile(out_path) and not params[QUIET] and not params[OVERWRITE]:
resp = yesno_prompt("File exists. Overwrite (Y/N)? ")
if resp.lower() == "n":
print("\nExiting...")
sys.exit(0)
# Write the output file
try:
if mode == ZLIB:
write_zlib(inv, out_path, expand=params[EXPAND], contract=params[CONTRACT])
if mode == PLAIN:
write_plaintext(
inv, out_path, expand=params[EXPAND], contract=params[CONTRACT]
)
if mode == JSON:
write_json(inv, out_path, expand=params[EXPAND], contract=params[CONTRACT])
except Exception as e:
selective_print("\nError during write of output file:", params)
selective_print(err_format(e), params)
sys.exit(1)
# Report success, if not QUIET
selective_print(
"Conversion completed.\n"
"'{0}' converted to '{1}' ({2}).".format(in_path, out_path, mode),
params,
)
def do_suggest(inv, params):
r"""Perform the suggest call and output the results.
Results are printed one per line.
If neither :data:`INDEX` nor :data:`SCORE` is specified,
the results are output without a header.
If either or both are specified,
the results are output in a lightweight tabular format.
If the number of results exceeds
:data:`SUGGEST_CONFIRM_LENGTH`,
the user will be queried whether to display
all of the returned results
unless :data:`ALL` is specified.
No |cour|\ -\\-quiet\ |/cour| option is available here, since
a silent mode for suggestion output is nonsensical.
Parameters
----------
inv
|Inventory| -- Inventory object to be output in the format
indicated by :data:`MODE`.
params
|dict| -- Parameters/values mapping from the active subparser
"""
with_index = params[INDEX]
with_score = params[SCORE]
results = inv.suggest(
params[SEARCH],
thresh=params[THRESH],
with_index=with_index,
with_score=with_score,
)
if len(results) == 0:
print("No results found.")
return
if len(results) > SUGGEST_CONFIRM_LENGTH and not params[ALL]:
resp = yesno_prompt("Display all {0} results ".format(len(results)) + "(Y/N)? ")
if resp.lower() == "n":
print("\nExiting...")
sys.exit(0)
# Field widths in output
score_width = 7
index_width = 7
if with_index or with_score:
rst_width = max(len(_[0]) for _ in results)
else:
rst_width = max(len(_) for _ in results)
rst_width += 2
if with_index:
if with_score:
fmt = "{{0: <{0}}} {{1: ^{1}}} {{2: ^{2}}}".format(
rst_width, score_width, index_width
)
print("")
print(fmt.format(" Name", "Score", "Index"))
print(fmt.format("-" * rst_width, "-" * score_width, "-" * index_width))
print("\n".join(fmt.format(*_) for _ in results))
else:
fmt = "{{0: <{0}}} {{1: ^{1}}}".format(rst_width, index_width)
print("")
print(fmt.format(" Name", "Index"))
print(fmt.format("-" * rst_width, "-" * index_width))
print("\n".join(fmt.format(*_) for _ in results))
else:
if with_score:
fmt = "{{0: <{0}}} {{1: ^{1}}}".format(rst_width, score_width)
print("")
print(fmt.format(" Name", "Score"))
print(fmt.format("-" * rst_width, "-" * score_width))
print("\n".join(fmt.format(*_) for _ in results))
else:
print("\n".join(str(_) for _ in results))
def inv_local(params):
"""Create |Inventory| from local source.
Uses :func:`resolve_inpath` to sanity-check and/or convert
:data:`INFILE`.
Calls :func:`sys.exit` internally in error-exit situations.
Parameters
----------
params
|dict| -- Parameters/values mapping from the active subparser
Returns
-------
inv
|Inventory| -- Object representation of the inventory
at :data:`INFILE`
in_path
|str| -- Input file path as resolved/checked by
:func:`resolve_inpath`
"""
# Resolve input file path
try:
in_path = resolve_inpath(params[INFILE])
except Exception as e:
selective_print("\nError while parsing input file path:", params)
selective_print(err_format(e), params)
sys.exit(1)
# Attempt import
inv = import_infile(in_path)
if inv is None:
selective_print("\nError: Unrecognized file format", params)
sys.exit(1)
return inv, in_path
def inv_url(params):
"""Create |Inventory| from file downloaded from URL.
Initially, treats :data:`INFILE` as a download URL to be passed to
the `url` initialization argument
of :class:`~sphobjinv.inventory.Inventory`.
If an inventory is not found at that exact URL, progressively
searches the directory tree of the URL for |objects.inv|.
Calls :func:`sys.exit` internally in error-exit situations.
Parameters
----------
params
|dict| -- Parameters/values mapping from the active subparser
Returns
-------
inv
|Inventory| -- Object representation of the inventory
at :data:`INFILE`
ret_path
|str| -- URL from :data:`INFILE` used to construct `inv`.
If URL is longer than 45 characters, the central portion is elided.
"""
from urllib.error import HTTPError, URLError
from sphobjinv.error import VersionError
from sphobjinv.fileops import urlwalk
from sphobjinv.inventory import Inventory
in_file = params[INFILE]
# Disallow --url mode on local files
if in_file.startswith("file:/"):
selective_print("\nError: URL mode on local file is invalid", params)
sys.exit(1)
# Need to initialize the inventory variable
inv = None
# Try URL as provided
try:
inv = Inventory(url=in_file)
except (HTTPError, ValueError, VersionError, URLError):
selective_print("No inventory at provided URL.", params)
else:
selective_print("Remote inventory found.", params)
url = in_file
# Keep searching if inv not found yet
if not inv:
for url in urlwalk(in_file):
selective_print('Attempting "{0}" ...'.format(url), params)
try:
inv = Inventory(url=url)
except (ValueError, HTTPError):
pass
else:
selective_print("Remote inventory found.", params)
break
# Cosmetic line break
selective_print(" ", params)
# Success or no?
if not inv:
selective_print("No inventory found!", params)
sys.exit(1)
if len(url) > 45:
ret_path = url[:20] + "[...]" + url[-20:]
else: # pragma: no cover
ret_path = url
return inv, ret_path
def main():
r"""Handle command line invocation.
Parses command line arguments,
handling the no-arguments and
:data:`VERSION` cases.
Creates the |Inventory| from the indicated source
and method.
Invokes :func:`do_convert` or :func:`do_suggest`
per the subparser name stored in :data:`SUBPARSER_NAME`.
"""
# If no args passed, stick in '-h'
if len(sys.argv) == 1:
sys.argv.append("-h")
# Parse commandline arguments
prs = getparser()
ns, args_left = prs.parse_known_args()
params = vars(ns)
# Print version &c. and exit if indicated
if params[VERSION]:
print(VER_TXT)
sys.exit(0)
# Regardless of mode, insert extra blank line
# for cosmetics
selective_print(" ", params)
# Generate the input Inventory based on --url or not.
# These inventory-load functions should call
# sys.exit(n) internally in error-exit situations
if params[URL]:
inv, in_path = inv_url(params)
else:
inv, in_path = inv_local(params)
# Perform action based upon mode
if params[SUBPARSER_NAME][:2] == CONVERT[:2]:
do_convert(inv, in_path, params)
elif params[SUBPARSER_NAME][:2] == SUGGEST[:2]:
do_suggest(inv, params)
# Clean exit
sys.exit(0)
if __name__ == "__main__": # pragma: no cover
main()
|
"""
xlwings - Make Excel fly with Python!
Homepage and documentation: http://xlwings.org
See also: http://zoomeranalytics.com
Copyright (C) 2014-2015, Zoomer Analytics LLC.
All rights reserved.
License: BSD 3-clause (see LICENSE.txt for details)
"""
import os
import sys
import re
import numbers
import itertools
import inspect
import collections
import tempfile
import shutil
from . import xlplatform, string_types, time_types, xrange, map, ShapeAlreadyExists
from .constants import ChartType
# Optional imports
try:
import numpy as np
except ImportError:
np = None
try:
import pandas as pd
except ImportError:
pd = None
try:
from matplotlib.backends.backend_agg import FigureCanvas
except ImportError:
FigureCanvas = None
try:
from PIL import Image
except ImportError:
Image = None
class Application(object):
"""
Application is dependent on the Workbook since there might be different application instances on Windows.
"""
def __init__(self, wkb):
self.wkb = wkb
self.xl_app = wkb.xl_app
@property
def version(self):
"""
Returns Excel's version string.
.. versionadded:: 0.5.0
"""
return xlplatform.get_app_version_string(self.wkb.xl_workbook)
def quit(self):
"""
Quits the application without saving any workbooks.
.. versionadded:: 0.3.3
"""
xlplatform.quit_app(self.xl_app)
@property
def screen_updating(self):
"""
True if screen updating is turned on. Read/write Boolean.
.. versionadded:: 0.3.3
"""
return xlplatform.get_screen_updating(self.xl_app)
@screen_updating.setter
def screen_updating(self, value):
xlplatform.set_screen_updating(self.xl_app, value)
@property
def visible(self):
"""
Gets or sets the visibility of Excel to ``True`` or ``False``. This property can also be
conveniently set during instantiation of a new Workbook: ``Workbook(app_visible=False)``
.. versionadded:: 0.3.3
"""
return xlplatform.get_visible(self.xl_app)
@visible.setter
def visible(self, value):
xlplatform.set_visible(self.xl_app, value)
@property
def calculation(self):
"""
Returns or sets a Calculation value that represents the calculation mode.
Example
-------
>>> from xlwings import Workbook, Application
>>> from xlwings.constants import Calculation
>>> wb = Workbook()
>>> Application(wkb=wb).calculation = Calculation.xlCalculationManual
.. versionadded:: 0.3.3
"""
return xlplatform.get_calculation(self.xl_app)
@calculation.setter
def calculation(self, value):
xlplatform.set_calculation(self.xl_app, value)
def calculate(self):
"""
Calculates all open Workbooks
.. versionadded:: 0.3.6
"""
xlplatform.calculate(self.xl_app)
class Workbook(object):
"""
``Workbook`` connects an Excel Workbook with Python. You can create a new connection from Python with
* a new workbook: ``wb = Workbook()``
* the active workbook: ``wb = Workbook.active()``
* an unsaved workbook: ``wb = Workbook('Book1')``
* a saved (open) workbook by name (incl. xlsx etc): ``wb = Workbook('MyWorkbook.xlsx')``
* a saved (open or closed) workbook by path: ``wb = Workbook(r'C:\\path\\to\\file.xlsx')``
Keyword Arguments
-----------------
fullname : str, default None
Full path or name (incl. xlsx, xlsm etc.) of existing workbook or name of an unsaved workbook.
xl_workbook : pywin32 or appscript Workbook object, default None
This enables to turn existing Workbook objects of the underlying libraries into xlwings objects
app_visible : boolean, default True
The resulting Workbook will be visible by default. To open it without showing a window,
set ``app_visible=False``. Or, to not alter the visibility (e.g., if Excel is already running),
set ``app_visible=None``. Note that this property acts on the whole Excel instance, not just the
specific Workbook.
app_target : str, default None
Mac-only, use the full path to the Excel application,
e.g. ``/Applications/Microsoft Office 2011/Microsoft Excel`` or ``/Applications/Microsoft Excel``
On Windows, if you want to change the version of Excel that xlwings talks to, go to ``Control Panel >
Programs and Features`` and ``Repair`` the Office version that you want as default.
To create a connection when the Python function is called from Excel, use:
``wb = Workbook.caller()``
"""
def __init__(self, fullname=None, xl_workbook=None, app_visible=True, app_target=None):
if xl_workbook:
self.xl_workbook = xl_workbook
self.xl_app = xlplatform.get_app(self.xl_workbook, app_target)
elif fullname:
self.fullname = fullname
if not os.path.isfile(fullname) or xlplatform.is_file_open(self.fullname):
# Connect to unsaved Workbook (e.g. 'Workbook1') or to an opened Workbook
self.xl_app, self.xl_workbook = xlplatform.get_open_workbook(self.fullname, app_target)
else:
# Open Excel and the Workbook
self.xl_app, self.xl_workbook = xlplatform.open_workbook(self.fullname, app_target)
else:
# Open Excel if necessary and create a new workbook
self.xl_app, self.xl_workbook = xlplatform.new_workbook(app_target)
self.name = xlplatform.get_workbook_name(self.xl_workbook)
self.active_sheet = Sheet.active(wkb=self)
if fullname is None:
self.fullname = xlplatform.get_fullname(self.xl_workbook)
# Make the most recently created Workbook the default when creating Range objects directly
xlplatform.set_xl_workbook_current(self.xl_workbook)
if app_visible is not None:
xlplatform.set_visible(self.xl_app, app_visible)
@classmethod
def active(cls, app_target=None):
"""
Returns the Workbook that is currently active or has been active last. On Windows,
this works across all instances.
.. versionadded:: 0.4.1
"""
xl_workbook = xlplatform.get_active_workbook(app_target=app_target)
return cls(xl_workbook=xl_workbook, app_target=app_target)
@classmethod
def caller(cls):
"""
Creates a connection when the Python function is called from Excel:
``wb = Workbook.caller()``
Always pack the ``Workbook`` call into the function being called from Excel, e.g.:
.. code-block:: python
def my_macro():
wb = Workbook.caller()
Range('A1').value = 1
To be able to easily invoke such code from Python for debugging, use ``Workbook.set_mock_caller()``.
.. versionadded:: 0.3.0
"""
if hasattr(Workbook, '_mock_file'):
# Use mocking Workbook, see Workbook.set_mock_caller()
_, xl_workbook = xlplatform.get_open_workbook(Workbook._mock_file)
return cls(xl_workbook=xl_workbook)
elif len(sys.argv) > 2 and sys.argv[2] == 'from_xl':
# Connect to the workbook from which this code has been invoked
fullname = sys.argv[1].lower()
if sys.platform.startswith('win'):
xl_app, xl_workbook = xlplatform.get_open_workbook(fullname, hwnd=sys.argv[4])
return cls(xl_workbook=xl_workbook)
else:
xl_app, xl_workbook = xlplatform.get_open_workbook(fullname, app_target=sys.argv[3])
return cls(xl_workbook=xl_workbook, app_target=sys.argv[3])
elif xlplatform.get_xl_workbook_current():
# Called through ExcelPython connection
return cls(xl_workbook=xlplatform.get_xl_workbook_current())
else:
raise Exception('Workbook.caller() must not be called directly. Call through Excel or set a mock caller '
'first with Workbook.set_mock_caller().')
@staticmethod
def set_mock_caller(fullpath):
"""
Sets the Excel file which is used to mock ``Workbook.caller()`` when the code is called from within Python.
Examples
--------
::
# This code runs unchanged from Excel and Python directly
import os
from xlwings import Workbook, Range
def my_macro():
wb = Workbook.caller()
Range('A1').value = 'Hello xlwings!'
if __name__ == '__main__':
# Mock the calling Excel file
Workbook.set_mock_caller(r'C:\\path\\to\\file.xlsx')
my_macro()
.. versionadded:: 0.3.1
"""
Workbook._mock_file = fullpath
@classmethod
def current(cls):
"""
Returns the current Workbook object, i.e. the default Workbook used by ``Sheet``, ``Range`` and ``Chart`` if not
specified otherwise. On Windows, in case there are various instances of Excel running, opening an existing or
creating a new Workbook through ``Workbook()`` is acting on the same instance of Excel as this Workbook. Use
like this: ``Workbook.current()``.
.. versionadded:: 0.2.2
"""
return cls(xl_workbook=xlplatform.get_xl_workbook_current(), app_visible=None)
def set_current(self):
"""
This makes the Workbook the default that ``Sheet``, ``Range`` and ``Chart`` use if not specified
otherwise. On Windows, in case there are various instances of Excel running, opening an existing or creating a
new Workbook through ``Workbook()`` is acting on the same instance of Excel as this Workbook.
.. versionadded:: 0.2.2
"""
xlplatform.set_xl_workbook_current(self.xl_workbook)
def get_selection(self, asarray=False, atleast_2d=False):
"""
Returns the currently selected cells from Excel as ``Range`` object.
Keyword Arguments
-----------------
asarray : boolean, default False
returns a NumPy array where empty cells are shown as nan
atleast_2d : boolean, default False
Returns 2d lists/arrays even if the Range is a Row or Column.
Returns
-------
Range object
"""
return Range(xlplatform.get_selection_address(self.xl_app), wkb=self, asarray=asarray, atleast_2d=atleast_2d)
def close(self):
"""
Closes the Workbook without saving it.
.. versionadded:: 0.1.1
"""
xlplatform.close_workbook(self.xl_workbook)
def save(self, path=None):
"""
Saves the Workbook. If a path is being provided, this works like SaveAs() in Excel. If no path is specified and
if the file hasn't been saved previously, it's being saved in the current working directory with the current
filename. Existing files are overwritten without prompting.
Arguments
---------
path : str, default None
Full path to the workbook
Example
-------
>>> from xlwings import Workbook
>>> wb = Workbook()
>>> wb.save()
>>> wb.save(r'C:\\path\\to\\new_file_name.xlsx')
.. versionadded:: 0.3.1
"""
xlplatform.save_workbook(self.xl_workbook, path)
@staticmethod
def get_xl_workbook(wkb):
"""
Returns the ``xl_workbook_current`` if ``wkb`` is ``None``, otherwise the ``xl_workbook`` of ``wkb``. On Windows,
``xl_workbook`` is a pywin32 COM object, on Mac it's an appscript object.
Arguments
---------
wkb : Workbook or None
Workbook object
"""
if wkb is None and xlplatform.get_xl_workbook_current() is None:
raise NameError('You must first instantiate a Workbook object.')
elif wkb is None:
xl_workbook = xlplatform.get_xl_workbook_current()
else:
xl_workbook = wkb.xl_workbook
return xl_workbook
@staticmethod
def open_template():
"""
Creates a new Excel file with the xlwings VBA module already included. This method must be called from an
interactive Python shell::
>>> Workbook.open_template()
.. versionadded:: 0.3.3
"""
this_dir = os.path.abspath(os.path.dirname(inspect.getfile(inspect.currentframe())))
template_file = 'xlwings_template.xltm'
try:
os.remove(os.path.join(this_dir, '~$' + template_file))
except OSError:
pass
xlplatform.open_template(os.path.realpath(os.path.join(this_dir, template_file)))
@property
def names(self):
"""
A collection of all the (platform-specific) name objects in the application or workbook.
Each name object represents a defined name for a range of cells (built-in or custom ones).
.. versionadded:: 0.4.0
"""
names = NamesDict(self.xl_workbook)
xlplatform.set_names(self.xl_workbook, names)
return names
def __repr__(self):
return "<Workbook '{0}'>".format(self.name)
class Sheet(object):
"""
Represents a Sheet of the current Workbook. Either call it with the Sheet name or index::
Sheet('Sheet1')
Sheet(1)
Arguments
---------
sheet : str or int
Sheet name or index
Keyword Arguments
-----------------
wkb : Workbook object, default Workbook.current()
Defaults to the Workbook that was instantiated last or set via ``Workbook.set_current()``.
.. versionadded:: 0.2.3
"""
def __init__(self, sheet, wkb=None):
self.xl_workbook = Workbook.get_xl_workbook(wkb)
self.sheet = sheet
self.xl_sheet = xlplatform.get_xl_sheet(self.xl_workbook, self.sheet)
def activate(self):
"""Activates the sheet."""
xlplatform.activate_sheet(self.xl_workbook, self.sheet)
def autofit(self, axis=None):
"""
Autofits the width of either columns, rows or both on a whole Sheet.
Arguments
---------
axis : string, default None
- To autofit rows, use one of the following: ``rows`` or ``r``
- To autofit columns, use one of the following: ``columns`` or ``c``
- To autofit rows and columns, provide no arguments
Examples
--------
::
# Autofit columns
Sheet('Sheet1').autofit('c')
# Autofit rows
Sheet('Sheet1').autofit('r')
# Autofit columns and rows
Range('Sheet1').autofit()
.. versionadded:: 0.2.3
"""
xlplatform.autofit_sheet(self, axis)
def clear_contents(self):
"""Clears the content of the whole sheet but leaves the formatting."""
xlplatform.clear_contents_worksheet(self.xl_workbook, self.sheet)
def clear(self):
"""Clears the content and formatting of the whole sheet."""
xlplatform.clear_worksheet(self.xl_workbook, self.sheet)
@property
def name(self):
"""Get or set the name of the Sheet."""
return xlplatform.get_worksheet_name(self.xl_sheet)
@name.setter
def name(self, value):
xlplatform.set_worksheet_name(self.xl_sheet, value)
@property
def index(self):
"""Returns the index of the Sheet."""
return xlplatform.get_worksheet_index(self.xl_sheet)
@classmethod
def active(cls, wkb=None):
"""Returns the active Sheet. Use like so: ``Sheet.active()``"""
xl_workbook = Workbook.get_xl_workbook(wkb)
return cls(xlplatform.get_worksheet_name(xlplatform.get_active_sheet(xl_workbook)), wkb)
@classmethod
def add(cls, name=None, before=None, after=None, wkb=None):
"""
Creates a new worksheet: the new worksheet becomes the active sheet. If neither ``before`` nor
``after`` is specified, the new Sheet will be placed at the end.
Arguments
---------
name : str, default None
Sheet name, defaults to Excel standard name
before : str or int, default None
Sheet name or index
after : str or int, default None
Sheet name or index
Returns
-------
Sheet object
Examples
--------
>>> Sheet.add() # Place at end with default name
>>> Sheet.add('NewSheet', before='Sheet1') # Include name and position
>>> new_sheet = Sheet.add(after=3)
>>> new_sheet.index
4
.. versionadded:: 0.2.3
"""
xl_workbook = Workbook.get_xl_workbook(wkb)
if before is None and after is None:
after = Sheet(Sheet.count(wkb=wkb), wkb=wkb)
elif before:
before = Sheet(before, wkb=wkb)
elif after:
after = Sheet(after, wkb=wkb)
if name:
if name.lower() in [i.name.lower() for i in Sheet.all(wkb=wkb)]:
raise Exception('That sheet name is already taken.')
else:
xl_sheet = xlplatform.add_sheet(xl_workbook, before, after)
xlplatform.set_worksheet_name(xl_sheet, name)
return cls(name, wkb)
else:
xl_sheet = xlplatform.add_sheet(xl_workbook, before, after)
return cls(xlplatform.get_worksheet_name(xl_sheet), wkb)
@staticmethod
def count(wkb=None):
"""
Counts the number of Sheets.
Keyword Arguments
-----------------
wkb : Workbook object, default Workbook.current()
Defaults to the Workbook that was instantiated last or set via ``Workbook.set_current()``.
Examples
--------
>>> Sheet.count()
3
.. versionadded:: 0.2.3
"""
xl_workbook = Workbook.get_xl_workbook(wkb)
return xlplatform.count_worksheets(xl_workbook)
@staticmethod
def all(wkb=None):
"""
Returns a list with all Sheet objects.
Keyword Arguments
-----------------
wkb : Workbook object, default Workbook.current()
Defaults to the Workbook that was instantiated last or set via ``Workbook.set_current()``.
Examples
--------
>>> Sheet.all()
[<Sheet 'Sheet1' of Workbook 'Book1'>, <Sheet 'Sheet2' of Workbook 'Book1'>]
>>> [i.name.lower() for i in Sheet.all()]
['sheet1', 'sheet2']
>>> [i.autofit() for i in Sheet.all()]
.. versionadded:: 0.2.3
"""
xl_workbook = Workbook.get_xl_workbook(wkb)
sheet_list = []
for i in range(1, xlplatform.count_worksheets(xl_workbook) + 1):
sheet_list.append(Sheet(i, wkb=wkb))
return sheet_list
def delete(self):
"""
Deletes the Sheet.
.. versionadded: 0.6.0
"""
xlplatform.delete_sheet(self)
def __repr__(self):
return "<Sheet '{0}' of Workbook '{1}'>".format(self.name, xlplatform.get_workbook_name(self.xl_workbook))
class Range(object):
"""
A Range object can be instantiated with the following arguments::
Range('A1') Range('Sheet1', 'A1') Range(1, 'A1')
Range('A1:C3') Range('Sheet1', 'A1:C3') Range(1, 'A1:C3')
Range((1,2)) Range('Sheet1, (1,2)) Range(1, (1,2))
Range((1,1), (3,3)) Range('Sheet1', (1,1), (3,3)) Range(1, (1,1), (3,3))
Range('NamedRange') Range('Sheet1', 'NamedRange') Range(1, 'NamedRange')
The Sheet can also be provided as Sheet object::
sh = Sheet(1)
Range(sh, 'A1')
If no worksheet name is provided as first argument, it will take the Range from the active sheet.
You usually want to go for ``Range(...).value`` to get the values (as list of lists).
Arguments
---------
*args :
Definition of sheet (optional) and Range in the above described combinations.
Keyword Arguments
-----------------
asarray : boolean, default False
Returns a NumPy array (atleast_1d) where empty cells are transformed into nan.
index : boolean, default True
Includes the index when setting a Pandas DataFrame or Series.
header : boolean, default True
Includes the column headers when setting a Pandas DataFrame.
atleast_2d : boolean, default False
Returns 2d lists/arrays even if the Range is a Row or Column.
wkb : Workbook object, default Workbook.current()
Defaults to the Workbook that was instantiated last or set via `Workbook.set_current()``.
"""
def __init__(self, *args, **kwargs):
# Arguments
if len(args) == 1 and isinstance(args[0], string_types):
sheet_name_or_index = None
range_address = args[0]
elif len(args) == 1 and isinstance(args[0], tuple):
sheet_name_or_index = None
range_address = None
self.row1 = args[0][0]
self.col1 = args[0][1]
self.row2 = self.row1
self.col2 = self.col1
elif (len(args) == 2
and isinstance(args[0], (numbers.Number, string_types, Sheet))
and isinstance(args[1], string_types)):
if isinstance(args[0], Sheet):
sheet_name_or_index = args[0].index
else:
sheet_name_or_index = args[0]
range_address = args[1]
elif (len(args) == 2
and isinstance(args[0], (numbers.Number, string_types, Sheet))
and isinstance(args[1], tuple)):
if isinstance(args[0], Sheet):
sheet_name_or_index = args[0].index
else:
sheet_name_or_index = args[0]
range_address = None
self.row1 = args[1][0]
self.col1 = args[1][1]
self.row2 = self.row1
self.col2 = self.col1
elif len(args) == 2 and isinstance(args[0], tuple):
sheet_name_or_index = None
range_address = None
self.row1 = args[0][0]
self.col1 = args[0][1]
self.row2 = args[1][0]
self.col2 = args[1][1]
elif len(args) == 3:
if isinstance(args[0], Sheet):
sheet_name_or_index = args[0].index
else:
sheet_name_or_index = args[0]
range_address = None
self.row1 = args[1][0]
self.col1 = args[1][1]
self.row2 = args[2][0]
self.col2 = args[2][1]
# Keyword Arguments
self.kwargs = kwargs
self.workbook = kwargs.get('wkb', None)
if self.workbook is None and xlplatform.get_xl_workbook_current() is None:
raise NameError('You must first instantiate a Workbook object.')
elif self.workbook is None:
self.xl_workbook = xlplatform.get_xl_workbook_current()
else:
self.xl_workbook = self.workbook.xl_workbook
self.index = kwargs.get('index', True) # Set DataFrame with index
self.header = kwargs.get('header', True) # Set DataFrame with header
self.asarray = kwargs.get('asarray', False) # Return Data as NumPy Array
self.strict = kwargs.get('strict', False) # Stop table/horizontal/vertical at empty cells that contain formulas
self.atleast_2d = kwargs.get('atleast_2d', False) # Force data to be list of list or a 2d numpy array
# Get sheet
if sheet_name_or_index:
self.xl_sheet = xlplatform.get_worksheet(self.xl_workbook, sheet_name_or_index)
else:
self.xl_sheet = xlplatform.get_active_sheet(self.xl_workbook)
# Get xl_range object
if range_address:
self.row1 = xlplatform.get_first_row(self.xl_sheet, range_address)
self.col1 = xlplatform.get_first_column(self.xl_sheet, range_address)
self.row2 = self.row1 + xlplatform.count_rows(self.xl_sheet, range_address) - 1
self.col2 = self.col1 + xlplatform.count_columns(self.xl_sheet, range_address) - 1
if 0 in (self.row1, self.col1, self.row2, self.col2):
raise IndexError("Attempted to access 0-based Range. xlwings/Excel Ranges are 1-based.")
self.xl_range = xlplatform.get_range_from_indices(self.xl_sheet, self.row1, self.col1, self.row2, self.col2)
def __iter__(self):
# Iterator object that returns cell Ranges: (1, 1), (1, 2) etc.
return map(lambda cell: Range(xlplatform.get_worksheet_name(self.xl_sheet), cell, **self.kwargs),
itertools.product(xrange(self.row1, self.row2 + 1), xrange(self.col1, self.col2 + 1)))
def is_cell(self):
"""
Returns ``True`` if the Range consists of a single Cell otherwise ``False``.
.. versionadded:: 0.1.1
"""
if self.row1 == self.row2 and self.col1 == self.col2:
return True
else:
return False
def is_row(self):
"""
Returns ``True`` if the Range consists of a single Row otherwise ``False``.
.. versionadded:: 0.1.1
"""
if self.row1 == self.row2 and self.col1 != self.col2:
return True
else:
return False
def is_column(self):
"""
Returns ``True`` if the Range consists of a single Column otherwise ``False``.
.. versionadded:: 0.1.1
"""
if self.row1 != self.row2 and self.col1 == self.col2:
return True
else:
return False
def is_table(self):
"""
Returns ``True`` if the Range consists of a 2d array otherwise ``False``.
.. versionadded:: 0.1.1
"""
if self.row1 != self.row2 and self.col1 != self.col2:
return True
else:
return False
@property
def shape(self):
"""
Tuple of Range dimensions.
.. versionadded:: 0.3.0
"""
return self.row2 - self.row1 + 1, self.col2 - self.col1 + 1
@property
def size(self):
"""
Number of elements in the Range.
.. versionadded:: 0.3.0
"""
return self.shape[0] * self.shape[1]
def __len__(self):
return self.row2 - self.row1 + 1
@property
def value(self):
"""
Gets and sets the values for the given Range.
Returns
-------
list or numpy array
Empty cells are set to ``None``. If ``asarray=True``,
a numpy array is returned where empty cells are set to ``nan``.
"""
# TODO: refactor
if self.is_cell():
# Clean_xl_data requires and returns a list of list
data = xlplatform.clean_xl_data([[xlplatform.get_value_from_range(self.xl_range)]])
if not self.atleast_2d:
data = data[0][0]
elif self.is_row():
data = xlplatform.clean_xl_data(xlplatform.get_value_from_range(self.xl_range))
if not self.atleast_2d:
data = data[0]
elif self.is_column():
data = xlplatform.clean_xl_data(xlplatform.get_value_from_range(self.xl_range))
if not self.atleast_2d:
data = [item for sublist in data for item in sublist]
else: # 2d Range, leave as list of list
data = xlplatform.clean_xl_data(xlplatform.get_value_from_range(self.xl_range))
# Return as NumPy Array
if self.asarray:
# replace None (empty cells) with nan as None produces arrays with dtype=object
# TODO: easier like this: np.array(my_list, dtype=np.float)
if data is None:
data = np.nan
if (self.is_column() or self.is_row()) and not self.atleast_2d:
data = [np.nan if x is None else x for x in data]
elif self.is_table() or self.atleast_2d:
data = [[np.nan if x is None else x for x in i] for i in data]
return np.atleast_1d(np.array(data))
return data
@value.setter
def value(self, data):
# Pandas DataFrame: Turn into NumPy object array with or without Index and Headers
if pd and isinstance(data, pd.DataFrame):
if self.index:
if data.index.name in data.columns:
# Prevents column name collision when resetting the index
data.index.rename(None, inplace=True)
data = data.reset_index()
if self.header:
if isinstance(data.columns, pd.MultiIndex):
# Ensure dtype=object because otherwise it may get assigned a string type which sometimes makes
# vstacking return a string array. This would cause values to be truncated and we can't easily
# transform np.nan in string form. Python 3 requires zip wrapped in list.
columns = np.array(list(zip(*data.columns.tolist())), dtype=object)
else:
columns = np.empty((data.columns.shape[0],), dtype=object)
columns[:] = np.array([data.columns.tolist()])
data = np.vstack((columns, data.values))
else:
data = data.values
# Pandas Series
if pd and isinstance(data, pd.Series):
if self.index:
data = data.reset_index().values
else:
data = data.values[:, np.newaxis]
# NumPy array: nan have to be transformed to None, otherwise Excel shows them as 65535.
# See: http://visualstudiomagazine.com/articles/2008/07/01/return-double-values-in-excel.aspx
# Also, turn into list (Python 3 can't handle arrays directly)
if np and isinstance(data, np.ndarray):
try:
data = np.where(np.isnan(data), None, data)
data = data.tolist()
except TypeError:
# isnan doesn't work on arrays of dtype=object
if pd:
data[pd.isnull(data)] = None
data = data.tolist()
else:
# expensive way of replacing nan with None in object arrays in case Pandas is not available
data = [[None if isinstance(c, float) and np.isnan(c) else c for c in row] for row in data]
# Simple Lists: Turn into list of lists (np.nan is part of numbers.Number)
if isinstance(data, list) and (isinstance(data[0], (numbers.Number, string_types, time_types))
or data[0] is None):
data = [data]
# Get dimensions and prepare data for Excel
# TODO: refactor
if isinstance(data, (numbers.Number, string_types, time_types)) or data is None:
# Single cells
row2 = self.row2
col2 = self.col2
data = xlplatform.prepare_xl_data([[data]])[0][0]
try:
# scalar np.nan need to be turned into None, otherwise Excel shows it as 65535 (same as for NumPy array)
if np and np.isnan(data):
data = None
except (TypeError, NotImplementedError):
# raised if data is not a np.nan.
# NumPy < 1.7.0 raises NotImplementedError, >= 1.7.0 raises TypeError
pass
else:
# List of List
row2 = self.row1 + len(data) - 1
col2 = self.col1 + len(data[0]) - 1
data = xlplatform.prepare_xl_data(data)
xlplatform.set_value(xlplatform.get_range_from_indices(self.xl_sheet,
self.row1, self.col1, row2, col2), data)
@property
def formula(self):
"""
Gets or sets the formula for the given Range.
"""
return xlplatform.get_formula(self.xl_range)
@formula.setter
def formula(self, value):
xlplatform.set_formula(self.xl_range, value)
@property
def table(self):
"""
Returns a contiguous Range starting with the indicated cell as top-left corner and going down and right as
long as no empty cell is hit.
Keyword Arguments
-----------------
strict : boolean, default False
``True`` stops the table at empty cells even if they contain a formula. Less efficient than if set to
``False``.
Returns
-------
Range object
Examples
--------
To get the values of a contiguous range or clear its contents use::
Range('A1').table.value
Range('A1').table.clear_contents()
"""
row2 = Range(xlplatform.get_worksheet_name(self.xl_sheet),
(self.row1, self.col1), **self.kwargs).vertical.row2
col2 = Range(xlplatform.get_worksheet_name(self.xl_sheet),
(self.row1, self.col1), **self.kwargs).horizontal.col2
return Range(xlplatform.get_worksheet_name(self.xl_sheet),
(self.row1, self.col1), (row2, col2), **self.kwargs)
@property
def vertical(self):
"""
Returns a contiguous Range starting with the indicated cell and going down as long as no empty cell is hit.
This corresponds to ``Ctrl-Shift-DownArrow`` in Excel.
Arguments
---------
strict : bool, default False
``True`` stops the table at empty cells even if they contain a formula. Less efficient than if set to
``False``.
Returns
-------
Range object
Examples
--------
To get the values of a contiguous range or clear its contents use::
Range('A1').vertical.value
Range('A1').vertical.clear_contents()
"""
# A single cell is a special case as End(xlDown) jumps over adjacent empty cells
if xlplatform.get_value_from_index(self.xl_sheet, self.row1 + 1, self.col1) in [None, ""]:
row2 = self.row1
else:
row2 = xlplatform.get_row_index_end_down(self.xl_sheet, self.row1, self.col1)
# Strict stops at cells that contain a formula but show an empty value
if self.strict:
row2 = self.row1
while xlplatform.get_value_from_index(self.xl_sheet, row2 + 1, self.col1) not in [None, ""]:
row2 += 1
col2 = self.col2
return Range(xlplatform.get_worksheet_name(self.xl_sheet),
(self.row1, self.col1), (row2, col2), **self.kwargs)
@property
def horizontal(self):
"""
Returns a contiguous Range starting with the indicated cell and going right as long as no empty cell is hit.
Keyword Arguments
-----------------
strict : bool, default False
``True`` stops the table at empty cells even if they contain a formula. Less efficient than if set to
``False``.
Returns
-------
Range object
Examples
--------
To get the values of a contiguous Range or clear its contents use::
Range('A1').horizontal.value
Range('A1').horizontal.clear_contents()
"""
# A single cell is a special case as End(xlToRight) jumps over adjacent empty cells
if xlplatform.get_value_from_index(self.xl_sheet, self.row1, self.col1 + 1) in [None, ""]:
col2 = self.col1
else:
col2 = xlplatform.get_column_index_end_right(self.xl_sheet, self.row1, self.col1)
# Strict: stops at cells that contain a formula but show an empty value
if self.strict:
col2 = self.col1
while xlplatform.get_value_from_index(self.xl_sheet, self.row1, col2 + 1) not in [None, ""]:
col2 += 1
row2 = self.row2
return Range(xlplatform.get_worksheet_name(self.xl_sheet),
(self.row1, self.col1), (row2, col2), **self.kwargs)
@property
def current_region(self):
"""
This property returns a Range object representing a range bounded by (but not including) any
combination of blank rows and blank columns or the edges of the worksheet. It corresponds to ``Ctrl-*`` on
Windows and ``Shift-Ctrl-Space`` on Mac.
Returns
-------
Range object
"""
address = xlplatform.get_current_region_address(self.xl_sheet, self.row1, self.col1)
return Range(xlplatform.get_worksheet_name(self.xl_sheet), address, **self.kwargs)
@property
def number_format(self):
"""
Gets and sets the number_format of a Range.
Examples
--------
>>> Range('A1').number_format
'General'
>>> Range('A1:C3').number_format = '0.00%'
>>> Range('A1:C3').number_format
'0.00%'
.. versionadded:: 0.2.3
"""
return xlplatform.get_number_format(self)
@number_format.setter
def number_format(self, value):
xlplatform.set_number_format(self, value)
def clear(self):
"""
Clears the content and the formatting of a Range.
"""
xlplatform.clear_range(self.xl_range)
def clear_contents(self):
"""
Clears the content of a Range but leaves the formatting.
"""
xlplatform.clear_contents_range(self.xl_range)
@property
def column_width(self):
"""
Gets or sets the width, in characters, of a Range.
One unit of column width is equal to the width of one character in the Normal style.
For proportional fonts, the width of the character 0 (zero) is used.
If all columns in the Range have the same width, returns the width.
If columns in the Range have different widths, returns None.
column_width must be in the range:
0 <= column_width <= 255
Note: If the Range is outside the used range of the Worksheet, and columns in the Range have different widths,
returns the width of the first column.
Returns
-------
float
.. versionadded:: 0.4.0
"""
return xlplatform.get_column_width(self.xl_range)
@column_width.setter
def column_width(self, value):
xlplatform.set_column_width(self.xl_range, value)
@property
def row_height(self):
"""
Gets or sets the height, in points, of a Range.
If all rows in the Range have the same height, returns the height.
If rows in the Range have different heights, returns None.
row_height must be in the range:
0 <= row_height <= 409.5
Note: If the Range is outside the used range of the Worksheet, and rows in the Range have different heights,
returns the height of the first row.
Returns
-------
float
.. versionadded:: 0.4.0
"""
return xlplatform.get_row_height(self.xl_range)
@row_height.setter
def row_height(self, value):
xlplatform.set_row_height(self.xl_range, value)
@property
def width(self):
"""
Returns the width, in points, of a Range. Read-only.
Returns
-------
float
.. versionadded:: 0.4.0
"""
return xlplatform.get_width(self.xl_range)
@property
def height(self):
"""
Returns the height, in points, of a Range. Read-only.
Returns
-------
float
.. versionadded:: 0.4.0
"""
return xlplatform.get_height(self.xl_range)
@property
def left(self):
"""
Returns the distance, in points, from the left edge of column A to the left edge of the range. Read-only.
Returns
-------
float
.. versionadded:: 0.6.0
"""
return xlplatform.get_left(self.xl_range)
@property
def top(self):
"""
Returns the distance, in points, from the top edge of row 1 to the top edge of the range. Read-only.
Returns
-------
float
.. versionadded:: 0.6.0
"""
return xlplatform.get_top(self.xl_range)
def autofit(self, axis=None):
"""
Autofits the width of either columns, rows or both.
Arguments
---------
axis : string or integer, default None
- To autofit rows, use one of the following: ``rows`` or ``r``
- To autofit columns, use one of the following: ``columns`` or ``c``
- To autofit rows and columns, provide no arguments
Examples
--------
::
# Autofit column A
Range('A:A').autofit('c')
# Autofit row 1
Range('1:1').autofit('r')
# Autofit columns and rows, taking into account Range('A1:E4')
Range('A1:E4').autofit()
# AutoFit rows, taking into account Range('A1:E4')
Range('A1:E4').autofit('rows')
.. versionadded:: 0.2.2
"""
xlplatform.autofit(self, axis)
def get_address(self, row_absolute=True, column_absolute=True, include_sheetname=False, external=False):
"""
Returns the address of the range in the specified format.
Arguments
---------
row_absolute : bool, default True
Set to True to return the row part of the reference as an absolute reference.
column_absolute : bool, default True
Set to True to return the column part of the reference as an absolute reference.
include_sheetname : bool, default False
Set to True to include the Sheet name in the address. Ignored if external=True.
external : bool, default False
Set to True to return an external reference with workbook and worksheet name.
Returns
-------
str
Examples
--------
::
>>> Range((1,1)).get_address()
'$A$1'
>>> Range((1,1)).get_address(False, False)
'A1'
>>> Range('Sheet1', (1,1), (3,3)).get_address(True, False, True)
'Sheet1!A$1:C$3'
>>> Range('Sheet1', (1,1), (3,3)).get_address(True, False, external=True)
'[Workbook1]Sheet1!A$1:C$3'
.. versionadded:: 0.2.3
"""
if include_sheetname and not external:
# TODO: when the Workbook name contains spaces but not the Worksheet name, it will still be surrounded
# by '' when include_sheetname=True. Also, should probably changed to regex
temp_str = xlplatform.get_address(self.xl_range, row_absolute, column_absolute, True)
if temp_str.find("[") > -1:
results_address = temp_str[temp_str.rfind("]") + 1:]
if results_address.find("'") > -1:
results_address = "'" + results_address
return results_address
else:
return temp_str
else:
return xlplatform.get_address(self.xl_range, row_absolute, column_absolute, external)
def __repr__(self):
return "<Range on Sheet '{0}' of Workbook '{1}'>".format(xlplatform.get_worksheet_name(self.xl_sheet),
xlplatform.get_workbook_name(self.xl_workbook))
@property
def hyperlink(self):
"""
Returns the hyperlink address of the specified Range (single Cell only)
Examples
--------
>>> Range('A1').value
'www.xlwings.org'
>>> Range('A1').hyperlink
'http://www.xlwings.org'
.. versionadded:: 0.3.0
"""
if self.formula.lower().startswith('='):
# If it's a formula, extract the URL from the formula string
formula = self.formula
try:
return re.compile(r'\"(.+?)\"').search(formula).group(1)
except AttributeError:
raise Exception("The cell doesn't seem to contain a hyperlink!")
else:
# If it has been set pragmatically
return xlplatform.get_hyperlink_address(self.xl_range)
def add_hyperlink(self, address, text_to_display=None, screen_tip=None):
"""
Adds a hyperlink to the specified Range (single Cell)
Arguments
---------
address : str
The address of the hyperlink.
text_to_display : str, default None
The text to be displayed for the hyperlink. Defaults to the hyperlink address.
screen_tip: str, default None
The screen tip to be displayed when the mouse pointer is paused over the hyperlink.
Default is set to '<address> - Click once to follow. Click and hold to select this cell.'
.. versionadded:: 0.3.0
"""
if text_to_display is None:
text_to_display = address
if address[:4] == 'www.':
address = 'http://' + address
if screen_tip is None:
screen_tip = address + ' - Click once to follow. Click and hold to select this cell.'
xlplatform.set_hyperlink(self.xl_range, address, text_to_display, screen_tip)
@property
def color(self):
"""
Gets and sets the background color of the specified Range.
To set the color, either use an RGB tuple ``(0, 0, 0)`` or a color constant.
To remove the background, set the color to ``None``, see Examples.
Returns
-------
RGB : tuple
Examples
--------
>>> Range('A1').color = (255,255,255)
>>> from xlwings import RgbColor
>>> Range('A2').color = RgbColor.rgbAqua
>>> Range('A2').color
(0, 255, 255)
>>> Range('A2').color = None
>>> Range('A2').color is None
True
.. versionadded:: 0.3.0
"""
return xlplatform.get_color(self.xl_range)
@color.setter
def color(self, color_or_rgb):
xlplatform.set_color(self.xl_range, color_or_rgb)
def resize(self, row_size=None, column_size=None):
"""
Resizes the specified Range
Arguments
---------
row_size: int > 0
The number of rows in the new range (if None, the number of rows in the range is unchanged).
column_size: int > 0
The number of columns in the new range (if None, the number of columns in the range is unchanged).
Returns
-------
Range : Range object
.. versionadded:: 0.3.0
"""
if row_size is not None:
assert row_size > 0
row2 = self.row1 + row_size - 1
else:
row2 = self.row2
if column_size is not None:
assert column_size > 0
col2 = self.col1 + column_size - 1
else:
col2 = self.col2
return Range(xlplatform.get_worksheet_name(self.xl_sheet), (self.row1, self.col1), (row2, col2), **self.kwargs)
def offset(self, row_offset=None, column_offset=None):
"""
Returns a Range object that represents a Range that's offset from the specified range.
Returns
-------
Range : Range object
.. versionadded:: 0.3.0
"""
if row_offset:
row1 = self.row1 + row_offset
row2 = self.row2 + row_offset
else:
row1, row2 = self.row1, self.row2
if column_offset:
col1 = self.col1 + column_offset
col2 = self.col2 + column_offset
else:
col1, col2 = self.col1, self.col2
return Range(xlplatform.get_worksheet_name(self.xl_sheet), (row1, col1), (row2, col2), **self.kwargs)
@property
def column(self):
"""
Returns the number of the first column in the in the specified range. Read-only.
Returns
-------
Integer
.. versionadded:: 0.3.5
"""
return self.col1
@property
def row(self):
"""
Returns the number of the first row in the in the specified range. Read-only.
Returns
-------
Integer
.. versionadded:: 0.3.5
"""
return self.row1
@property
def last_cell(self):
"""
Returns the bottom right cell of the specified range. Read-only.
Returns
-------
Range object
Example
-------
>>> rng = Range('A1').table
>>> rng.last_cell.row, rng.last_cell.column
(4, 5)
.. versionadded:: 0.3.5
"""
return Range(xlplatform.get_worksheet_name(self.xl_sheet),
(self.row2, self.col2), **self.kwargs)
@property
def name(self):
"""
Sets or gets the name of a Range.
To delete a named Range, use ``del wb.names['NamedRange']`` if ``wb`` is
your Workbook object.
.. versionadded:: 0.4.0
"""
return xlplatform.get_named_range(self)
@name.setter
def name(self, value):
xlplatform.set_named_range(self, value)
class Shape(object):
"""
A Shape object represents an existing Excel shape and can be instantiated with the following arguments::
Shape(1) Shape('Sheet1', 1) Shape(1, 1)
Shape('Shape 1') Shape('Sheet1', 'Shape 1') Shape(1, 'Shape 1')
The Sheet can also be provided as Sheet object::
sh = Sheet(1)
Shape(sh, 'Shape 1')
If no Worksheet is provided as first argument, it will take the Shape from the active Sheet.
Arguments
---------
*args
Definition of Sheet (optional) and shape in the above described combinations.
Keyword Arguments
-----------------
wkb : Workbook object, default Workbook.current()
Defaults to the Workbook that was instantiated last or set via ``Workbook.set_current()``.
.. versionadded:: 0.5.0
"""
def __init__(self, *args, **kwargs):
# Use current Workbook if none provided
self.wkb = kwargs.get('wkb', None)
self.xl_workbook = Workbook.get_xl_workbook(self.wkb)
# Arguments
if len(args) == 1:
self.sheet_name_or_index = xlplatform.get_worksheet_name(xlplatform.get_active_sheet(self.xl_workbook))
self.name_or_index = args[0]
elif len(args) == 2:
if isinstance(args[0], Sheet):
self.sheet_name_or_index = args[0].index
else:
self.sheet_name_or_index = args[0]
self.name_or_index = args[1]
self.xl_shape = xlplatform.get_shape(self)
self.name = xlplatform.get_shape_name(self)
@property
def name(self):
"""
Returns or sets a String value representing the name of the object.
.. versionadded:: 0.5.0
"""
return xlplatform.get_shape_name(self)
@name.setter
def name(self, value):
self.xl_shape = xlplatform.set_shape_name(self.xl_workbook, self.sheet_name_or_index, self.xl_shape, value)
@property
def left(self):
"""
Returns or sets a value that represents the distance, in points, from the left edge of the object to the
left edge of column A.
.. versionadded:: 0.5.0
"""
return xlplatform.get_shape_left(self)
@left.setter
def left(self, value):
xlplatform.set_shape_left(self, value)
@property
def top(self):
"""
Returns or sets a value that represents the distance, in points, from the top edge of the topmost shape
in the shape range to the top edge of the worksheet.
.. versionadded:: 0.5.0
"""
return xlplatform.get_shape_top(self)
@top.setter
def top(self, value):
xlplatform.set_shape_top(self, value)
@property
def width(self):
"""
Returns or sets a value that represents the width, in points, of the object.
.. versionadded:: 0.5.0
"""
return xlplatform.get_shape_width(self)
@width.setter
def width(self, value):
xlplatform.set_shape_width(self, value)
@property
def height(self):
"""
Returns or sets a value that represents the height, in points, of the object.
.. versionadded:: 0.5.0
"""
return xlplatform.get_shape_height(self)
@height.setter
def height(self, value):
xlplatform.set_shape_height(self, value)
def delete(self):
"""
Deletes the object.
.. versionadded:: 0.5.0
"""
xlplatform.delete_shape(self)
def activate(self):
"""
Activates the object.
.. versionadded:: 0.5.0
"""
xlplatform.activate_shape(self.xl_shape)
class Chart(Shape):
"""
A Chart object represents an existing Excel chart and can be instantiated with the following arguments::
Chart(1) Chart('Sheet1', 1) Chart(1, 1)
Chart('Chart 1') Chart('Sheet1', 'Chart 1') Chart(1, 'Chart 1')
The Sheet can also be provided as Sheet object::
sh = Sheet(1)
Chart(sh, 'Chart 1')
If no Worksheet is provided as first argument, it will take the Chart from the active Sheet.
To insert a new Chart into Excel, create it as follows::
Chart.add()
Arguments
---------
*args
Definition of Sheet (optional) and chart in the above described combinations.
Keyword Arguments
-----------------
wkb : Workbook object, default Workbook.current()
Defaults to the Workbook that was instantiated last or set via ``Workbook.set_current()``.
Example
-------
>>> from xlwings import Workbook, Range, Chart, ChartType
>>> wb = Workbook()
>>> Range('A1').value = [['Foo1', 'Foo2'], [1, 2]]
>>> chart = Chart.add(source_data=Range('A1').table, chart_type=ChartType.xlLine)
>>> chart.name
'Chart1'
>>> chart.chart_type = ChartType.xl3DArea
"""
def __init__(self, *args, **kwargs):
super(Chart, self).__init__(*args, **kwargs)
# Get xl_chart object
self.xl_chart = xlplatform.get_chart_object(self.xl_workbook, self.sheet_name_or_index, self.name_or_index)
self.index = xlplatform.get_chart_index(self.xl_chart)
# Chart Type
chart_type = kwargs.get('chart_type')
if chart_type:
self.chart_type = chart_type
# Source Data
source_data = kwargs.get('source_data')
if source_data:
self.set_source_data(source_data)
@classmethod
def add(cls, sheet=None, left=0, top=0, width=355, height=211, **kwargs):
"""
Inserts a new Chart into Excel.
Arguments
---------
sheet : str or int or xlwings.Sheet, default None
Name or index of the Sheet or Sheet object, defaults to the active Sheet
left : float, default 0
left position in points
top : float, default 0
top position in points
width : float, default 375
width in points
height : float, default 225
height in points
Keyword Arguments
-----------------
chart_type : xlwings.ChartType member, default xlColumnClustered
Excel chart type. E.g. xlwings.ChartType.xlLine
name : str, default None
Excel chart name. Defaults to Excel standard name if not provided, e.g. 'Chart 1'
source_data : Range
e.g. Range('A1').table
wkb : Workbook object, default Workbook.current()
Defaults to the Workbook that was instantiated last or set via ``Workbook.set_current()``.
"""
wkb = kwargs.get('wkb', None)
xl_workbook = Workbook.get_xl_workbook(wkb)
chart_type = kwargs.get('chart_type', ChartType.xlColumnClustered)
name = kwargs.get('name')
source_data = kwargs.get('source_data')
if isinstance(sheet, Sheet):
sheet = sheet.index
if sheet is None:
sheet = xlplatform.get_worksheet_index(xlplatform.get_active_sheet(xl_workbook))
xl_chart = xlplatform.add_chart(xl_workbook, sheet, left, top, width, height)
if name:
xlplatform.set_chart_name(xl_chart, name)
else:
name = xlplatform.get_chart_name(xl_chart)
return cls(sheet, name, wkb=wkb, chart_type=chart_type, source_data=source_data)
@property
def chart_type(self):
"""
Gets and sets the chart type of a chart.
.. versionadded:: 0.1.1
"""
return xlplatform.get_chart_type(self.xl_chart)
@chart_type.setter
def chart_type(self, value):
xlplatform.set_chart_type(self.xl_chart, value)
def set_source_data(self, source):
"""
Sets the source for the chart.
Arguments
---------
source : Range
Range object, e.g. ``Range('A1')``
"""
xlplatform.set_source_data_chart(self.xl_chart, source.xl_range)
def __repr__(self):
return "<Chart '{0}' on Sheet '{1}' of Workbook '{2}'>".format(self.name,
Sheet(self.sheet_name_or_index).name,
xlplatform.get_workbook_name(self.xl_workbook))
class Picture(Shape):
"""
A Picture object represents an existing Excel Picture and can be instantiated with the following arguments::
Picture(1) Picture('Sheet1', 1) Picture(1, 1)
Picture('Picture 1') Picture('Sheet1', 'Picture 1') Picture(1, 'Picture 1')
The Sheet can also be provided as Sheet object::
sh = Sheet(1)
Shape(sh, 'Picture 1')
If no Worksheet is provided as first argument, it will take the Picture from the active Sheet.
Arguments
---------
*args
Definition of Sheet (optional) and picture in the above described combinations.
Keyword Arguments
-----------------
wkb : Workbook object, default Workbook.current()
Defaults to the Workbook that was instantiated last or set via ``Workbook.set_current()``.
.. versionadded:: 0.5.0
"""
def __init__(self, *args, **kwargs):
super(Picture, self).__init__(*args, **kwargs)
self.xl_picture = xlplatform.get_picture(self)
self.index = xlplatform.get_picture_index(self)
@classmethod
def add(cls, filename, sheet=None, name=None, link_to_file=False, save_with_document=True,
left=0, top=0, width=None, height=None, wkb=None):
"""
Inserts a picture into Excel.
Arguments
---------
filename : str
The full path to the file.
Keyword Arguments
-----------------
sheet : str or int or xlwings.Sheet, default None
Name or index of the Sheet or ``xlwings.Sheet`` object, defaults to the active Sheet
name : str, default None
Excel picture name. Defaults to Excel standard name if not provided, e.g. 'Picture 1'
left : float, default 0
Left position in points.
top : float, default 0
Top position in points.
width : float, default None
Width in points. If PIL/Pillow is installed, it defaults to the width of the picture.
Otherwise it defaults to 100 points.
height : float, default None
Height in points. If PIL/Pillow is installed, it defaults to the height of the picture.
Otherwise it defaults to 100 points.
wkb : Workbook object, default Workbook.current()
Defaults to the Workbook that was instantiated last or set via ``Workbook.set_current()``.
.. versionadded:: 0.5.0
"""
xl_workbook = Workbook.get_xl_workbook(wkb)
if isinstance(sheet, Sheet):
sheet = sheet.index
if sheet is None:
sheet = xlplatform.get_worksheet_index(xlplatform.get_active_sheet(xl_workbook))
if name:
if name in xlplatform.get_shapes_names(xl_workbook, sheet):
raise ShapeAlreadyExists('A shape with this name already exists.')
if sys.platform.startswith('darwin') and xlplatform.get_major_app_version_number(xl_workbook) >= 15:
# Office 2016 for Mac is sandboxed. This path seems to work without the need of granting access explicitly
xlwings_picture = os.path.expanduser("~") + '/Library/Containers/com.microsoft.Excel/Data/xlwings_picture.png'
shutil.copy2(filename, xlwings_picture)
filename = xlwings_picture
# Image dimensions
im_width, im_height = None, None
if width is None or height is None:
if Image:
im = Image.open(filename)
im_width, im_height = im.size
if width is None:
if im_width is not None:
width = im_width
else:
width = 100
if height is None:
if im_height is not None:
height = im_height
else:
height = 100
xl_picture = xlplatform.add_picture(xl_workbook, sheet, filename, link_to_file, save_with_document,
left, top, width, height)
if sys.platform.startswith('darwin') and xlplatform.get_major_app_version_number(xl_workbook) >= 15:
os.remove(xlwings_picture)
if name is None:
name = xlplatform.get_picture_name(xl_picture)
else:
xlplatform.set_shape_name(xl_workbook, sheet, xl_picture, name)
return cls(sheet, name, wkb=wkb)
def update(self, filename):
"""
Replaces an existing picture with a new one, taking over the attributes of the existing picture.
Arguments
---------
filename : str
Path to the picture.
.. versionadded:: 0.5.0
"""
wkb = self.wkb
name = self.name
left, top, width, height = self.left, self.top, self.width, self.height
sheet_name_or_index = self.sheet_name_or_index
xlplatform.delete_shape(self)
# TODO: link_to_file, save_with_document
Picture.add(filename, sheet=sheet_name_or_index, left=left, top=top, width=width, height=height,
name=name, wkb=wkb)
class Plot(object):
"""
Plot allows to easily display Matplotlib figures as pictures in Excel.
Arguments
---------
figure : matplotlib.figure.Figure
Matplotlib figure
Example
-------
Get a matplotlib ``figure`` object:
* via PyPlot interface::
import matplotlib.pyplot as plt
fig = plt.figure()
plt.plot([1, 2, 3, 4, 5])
* via object oriented interface::
from matplotlib.figure import Figure
fig = Figure(figsize=(8, 6))
ax = fig.add_subplot(111)
ax.plot([1, 2, 3, 4, 5])
* via Pandas::
import pandas as pd
import numpy as np
df = pd.DataFrame(np.random.rand(10, 4), columns=['a', 'b', 'c', 'd'])
ax = df.plot(kind='bar')
fig = ax.get_figure()
Then show it in Excel as picture::
plot = Plot(fig)
plot.show('Plot1')
.. versionadded:: 0.5.0
"""
def __init__(self, figure):
self.figure = figure
def show(self, name, sheet=None, left=0, top=0, width=None, height=None, wkb=None):
"""
Inserts the matplotlib figure as picture into Excel if a picture with that name doesn't exist yet.
Otherwise it replaces the picture, taking over its position and size.
Arguments
---------
name : str
Name of the picture in Excel
Keyword Arguments
-----------------
sheet : str or int or xlwings.Sheet, default None
Name or index of the Sheet or ``xlwings.Sheet`` object, defaults to the active Sheet
left : float, default 0
Left position in points. Only has an effect if the picture doesn't exist yet in Excel.
top : float, default 0
Top position in points. Only has an effect if the picture doesn't exist yet in Excel.
width : float, default None
Width in points, defaults to the width of the matplotlib figure.
Only has an effect if the picture doesn't exist yet in Excel.
height : float, default None
Height in points, defaults to the height of the matplotlib figure.
Only has an effect if the picture doesn't exist yet in Excel.
wkb : Workbook object, default Workbook.current()
Defaults to the Workbook that was instantiated last or set via ``Workbook.set_current()``.
.. versionadded:: 0.5.0
"""
xl_workbook = Workbook.get_xl_workbook(wkb)
if isinstance(sheet, Sheet):
sheet = sheet.index
if sheet is None:
sheet = xlplatform.get_worksheet_index(xlplatform.get_active_sheet(xl_workbook))
if sys.platform.startswith('darwin') and xlplatform.get_major_app_version_number(xl_workbook) >= 15:
# Office 2016 for Mac is sandboxed. This path seems to work without the need of granting access explicitly
filename = os.path.expanduser("~") + '/Library/Containers/com.microsoft.Excel/Data/xlwings_plot.png'
else:
temp_dir = os.path.realpath(tempfile.gettempdir())
filename = os.path.join(temp_dir, 'xlwings_plot.png')
canvas = FigureCanvas(self.figure)
canvas.draw()
self.figure.savefig(filename, format='png', bbox_inches='tight')
if width is None:
width = self.figure.bbox.bounds[2:][0]
if height is None:
height = self.figure.bbox.bounds[2:][1]
try:
return Picture.add(sheet=sheet, filename=filename, left=left, top=top, width=width,
height=height, name=name, wkb=wkb)
except ShapeAlreadyExists:
pic = Picture(sheet, name, wkb=wkb)
pic.update(filename)
return pic
finally:
os.remove(filename)
class NamesDict(collections.MutableMapping):
"""
Implements the Workbook.Names collection.
Currently only used to be able to do ``del wb.names['NamedRange']``
"""
def __init__(self, xl_workbook, *args, **kwargs):
self.xl_workbook = xl_workbook
self.store = dict()
self.update(dict(*args, **kwargs))
def __getitem__(self, key):
return self.store[self.__keytransform__(key)]
def __setitem__(self, key, value):
self.store[self.__keytransform__(key)] = value
def __delitem__(self, key):
xlplatform.delete_name(self.xl_workbook, key)
def __iter__(self):
return iter(self.store)
def __len__(self):
return len(self.store)
def __keytransform__(self, key):
return key
|
"""Kiran the Discow Bot."""
import asyncio
import os
import re
import subprocess
import tempfile
import traceback
import discord
from discord.ext import commands
# import sympy
# from sympy.parsing import sympy_parser
from dotenv import load_dotenv
from gtts import gTTS
import c4board
load_dotenv()
with open("bad_words.txt") as bad_words_file:
BAD_WORDS = [
re.compile(line, re.IGNORECASE) for line in bad_words_file.read().splitlines()
]
SHAME_CHANNEL_PATTERN = re.compile(r".*wall.*of.*shame.*", re.DOTALL | re.IGNORECASE)
discord.opus.load_opus("libopus.so.0")
intents = discord.Intents.default()
intents.members = True
bot = commands.Bot(intents=intents, command_prefix="!")
async def send_block(destination, content):
"""Send a block of text, splitting into multiple code blocks if necessary."""
paginator = commands.Paginator()
try:
paginator.add_line(content)
except RuntimeError:
for line in content.splitlines():
paginator.add_line(line)
for page in paginator.pages:
await destination.send(page)
@bot.event
async def on_ready():
"""Indicate that we have successfully logged in."""
print("Logged in as {0.user}".format(bot))
tasks = {}
@bot.command()
async def hello(ctx):
"""Say hello."""
await ctx.send(f"Hello, {ctx.author.display_name}!")
@bot.group()
async def task(ctx):
"""Manage tasks."""
if ctx.guild not in tasks:
tasks[ctx.guild] = []
@task.command()
async def add(ctx, *, new_task: commands.clean_content):
"""Add a new task."""
tasks[ctx.guild].append(new_task)
await ctx.send("Added task " + new_task)
if len(ctx.message.mentions) > 0:
await ctx.send(
" ".join(user.mention for user in ctx.message.mentions)
+ " You have a new task!"
)
@task.command(name="list")
async def list_(ctx):
"""List tasks."""
if len(tasks[ctx.guild]) == 0:
await ctx.send("There are no tasks. Yay!")
else:
await ctx.send(
"\n".join(f"{i + 1}. {task}" for i, task in enumerate(tasks[ctx.guild]))
)
@task.command()
async def remove(ctx, task_index: int):
"""Remove task specified by its index."""
task_index -= 1
try:
tsk = tasks[ctx.guild].pop(task_index)
await ctx.send("Deleted task " + tsk)
except IndexError:
await ctx.send("No such task")
@task.command()
async def clear(ctx):
"""Remove all tasks."""
tasks[ctx.guild].clear()
await ctx.send("Cleared tasks")
@bot.command()
async def say(ctx, *, message):
"""Echo the given message."""
await ctx.send(message)
@bot.command()
async def dance(ctx):
"""Send a dancing cow GIF."""
await ctx.send(file=discord.File("dance.gif"))
@bot.command()
async def skateboard(ctx):
"""Send a skateboarding cow GIF."""
await ctx.send(file=discord.File("skateboard.gif"))
# @bot.command(name='sp')
# async def eval_sympy(ctx, *, expression):
# """Evaluate a SymPy math expression."""
# try:
# result = sympy_parser.parse_expr(
# expression,
# transformations=sympy_parser.standard_transformations +
# (sympy_parser.implicit_multiplication_application,
# sympy_parser.rationalize, sympy_parser.convert_xor))
# except:
# await send_block(ctx, traceback.format_exc())
# else:
# await send_block(ctx, sympy.pretty(result))
async def _joinvoice(voice_client, channel):
if voice_client is None:
await channel.connect()
else:
if voice_client.is_playing():
voice_client.stop()
await voice_client.move_to(channel)
async def _speak(ctx, lang, tld, message):
if not ctx.author.voice:
await ctx.send("You must be in a voice channel in order to use this command.")
return
await _joinvoice(ctx.voice_client, ctx.author.voice.channel)
temp_file = tempfile.TemporaryFile()
tts = gTTS(message, lang=lang, tld=tld)
tts.write_to_fp(temp_file)
temp_file.seek(0)
source = discord.FFmpegPCMAudio(temp_file, pipe=True)
ctx.voice_client.play(source)
@bot.command()
async def speak(ctx, *, message: commands.clean_content):
"""Speak the given message."""
await _speak(ctx, "en", "com", message)
@bot.command()
async def speaklang(ctx, language, *, message: commands.clean_content):
"""Same as !speak but allows you to set the language.
Use two-letter language codes from https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes.
"""
await _speak(ctx, language, "com", message)
@bot.command()
async def speakaccent(ctx, tld, *, message: commands.clean_content):
"""Same as !speak but allows you to specify the accent.
See https://gtts.readthedocs.io/en/latest/module.html#localized-accents for possible values.
"""
await _speak(ctx, "en", tld, message)
@bot.command()
async def speaklangaccent(ctx, language, tld, *, message: commands.clean_content):
"""Same as !speak but allows you to specify the language and accent.
See the help for !speaklang and !speakaccent for more info.
"""
await _speak(ctx, language, tld, message)
@bot.command(aliases=["dc"])
async def disconnect(ctx):
"""Disconnect from voice channel."""
if ctx.voice_client is not None:
await ctx.voice_client.disconnect()
@bot.command()
async def fun(ctx, victim: discord.Member = None):
"""Mystery command."""
if victim is None:
victim = ctx.author
if not victim.voice:
await ctx.send(
"You must be in a voice channel in order to use this command."
if victim == ctx.author
else "The victim must be in a voice channel in order for this command to work."
)
return
await _joinvoice(ctx.voice_client, victim.voice.channel)
source = discord.FFmpegOpusAudio("fun.opus", codec="copy")
ctx.voice_client.play(source)
with open("cowsay_manual.txt") as cowsay_manual_file:
COWSAY_MANUAL = cowsay_manual_file.read()
@bot.command(help=COWSAY_MANUAL)
async def cowsay(ctx, *args):
"""The original cowsay command."""
proc = await asyncio.create_subprocess_exec(
"cowsay",
*args,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
await send_block(ctx, (await proc.communicate())[0].decode())
@bot.command()
async def cowthink(ctx, *args):
"""Variation of cowsay.
https://manpages.debian.org/buster/cowsay/cowsay.6.en.html
"""
proc = await asyncio.create_subprocess_exec(
"cowthink",
*args,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
await send_block(ctx, (await proc.communicate())[0].decode())
async def cowsay_block(block):
"""Wrap a block of text with cowsay."""
proc = await asyncio.create_subprocess_exec(
"cowsay", "-n", stdout=subprocess.PIPE, stderr=subprocess.STDOUT
)
return (await proc.communicate(block.encode()))[0].decode()
# @bot.command()
# async def cowsaysp(ctx, *, expression):
# """Evaluate a SymPy math expression and cowsay the result."""
# try:
# result = sympy_parser.parse_expr(
# expression,
# transformations=sympy_parser.standard_transformations +
# (sympy_parser.implicit_multiplication_application,
# sympy_parser.rationalize, sympy_parser.convert_xor))
# except:
# await send_block(ctx, cowsay_block(traceback.format_exc()))
# else:
# await send_block(ctx, cowsay_block(sympy.pretty(result)))
@bot.command()
async def c4(ctx): # pylint: disable=invalid-name
"""Play Four in a Row."""
board = c4board.C4Board()
msg = await ctx.send(board)
async def add_reactions():
for i in range(c4board.BOARD_WIDTH):
await msg.add_reaction(
str(i) + "\N{VARIATION SELECTOR-16}\N{COMBINING ENCLOSING KEYCAP}"
)
asyncio.create_task(add_reactions())
def check(payload):
if payload.message_id != msg.id:
return False
if payload.event_type == "REACTION_ADD" and payload.user_id == bot.user.id:
return False
emoji = str(payload.emoji)
try:
return (
len(emoji) == 3
and int(emoji[0]) < c4board.BOARD_WIDTH
and emoji[1:]
== "\N{VARIATION SELECTOR-16}\N{COMBINING ENCLOSING KEYCAP}"
)
except ValueError:
return False
pending = {
asyncio.create_task(bot.wait_for("raw_reaction_add", check=check)),
asyncio.create_task(bot.wait_for("raw_reaction_remove", check=check)),
}
try:
while True:
done, pending = await asyncio.wait(
pending, timeout=300, return_when=asyncio.FIRST_COMPLETED
)
if not done:
return
for done_task in done:
payload = done_task.result()
move_result = board.move(int(str(payload.emoji)[0]))
if move_result != c4board.MoveResult.INVALID:
await msg.edit(content=board)
if move_result == c4board.MoveResult.YELLOW_WIN:
await ctx.send("Yellow won!")
return
if move_result == c4board.MoveResult.RED_WIN:
await ctx.send("Red won!")
return
if move_result == c4board.MoveResult.DRAW:
await ctx.send("It's a draw!")
return
if payload.event_type == "REACTION_ADD":
pending.add(
asyncio.create_task(
bot.wait_for("raw_reaction_add", check=check)
)
)
else:
pending.add(
asyncio.create_task(
bot.wait_for("raw_reaction_remove", check=check)
)
)
finally:
for pending_task in pending:
pending_task.cancel()
@bot.event
async def on_error(ctx, error):
"""Send errors to the text channel."""
await send_block(
ctx,
"".join(
traceback.format_exception(
etype=type(error), value=error, tb=error.__traceback__
)
),
)
@bot.event
async def on_command_error(ctx, error):
"""Send command errors to the text channel."""
await send_block(
ctx,
"".join(
traceback.format_exception(
etype=type(error), value=error, tb=error.__traceback__
)
),
)
@bot.event
async def on_message(message):
"""Check for bad words and speak things in the muted channel."""
async def bad_word_check():
if any(
bad_word.search(message.clean_content) is not None for bad_word in BAD_WORDS
):
shame_channel = message.channel
try:
for channel in message.guild.text_channels:
if SHAME_CHANNEL_PATTERN.fullmatch(channel.name):
shame_channel = channel
break
except AttributeError:
pass # Message has no guild
await shame_channel.send(
"{} SAID A BAD WORD".format(message.author.display_name.upper())
)
async def speak_muted():
if (
not isinstance(message.channel, discord.DMChannel)
and "muted" in message.channel.name.lower()
and message.author.voice
and not message.content.startswith("!")
):
await _joinvoice(message.guild.voice_client, message.author.voice.channel)
temp_file = tempfile.TemporaryFile()
tts = gTTS(
re.split(r"\W+", message.author.display_name, maxsplit=1)[0]
+ " said: "
+ message.clean_content
)
tts.write_to_fp(temp_file)
temp_file.seek(0)
source = discord.FFmpegPCMAudio(temp_file, pipe=True)
message.guild.voice_client.play(source)
await asyncio.gather(bad_word_check(), speak_muted(), bot.process_commands(message))
bot.run(os.environ["KIRAN_TOKEN"])
|
from .visulization import Visulizer
|
import json
#test variables to store
varInt = 16
varReal = 5.0
varString = "Test"
varBool = True
varList = [1,2,3,4,5]
varList2 = [[1,2,3,4,5],[6,7,8,9,0]]
varTuple = (1,2,3)
varDic = {1:'s',3:'4',2:'a'}
varDic2 = {1:5,3:6,2:7}
'''
# https://docs.python.org/3/library/pickle.html#comparison-with-json
12.1.1.2. Comparison with json
There are fundamental differences between the pickle protocols and JSON (JavaScript Object Notation):
•JSON is a text serialization format (it outputs unicode text, although most of the time it is then encoded to utf-8), while pickle is a binary serialization format;
•JSON is human-readable, while pickle is not;
•JSON is interoperable and widely used outside of the Python ecosystem, while pickle is Python-specific;
•JSON, by default, can only represent a subset of the Python built-in types, and no custom classes; pickle can represent an extremely large number of Python types (many of them automatically, by clever usage of Python’s introspection facilities; complex cases can be tackled by implementing specific object APIs).
'''
#Do not use pickel, so many security concerns.
'''test variable file structures, a prototype for actual data structures to use to represent needed data'''
#single file
fileName = "Test.tmp"
fileSize = 64*1024*1024
fileHash = 2**512
numberOfPieces = 128
pieceSize = 512*1024
pieceHashs = {}
for i in range(0,512):
pieceHashs[i] = 2**512
#multifile
files = {"Path1.tmp":{"SIZE":64*1024*1024, "START":0, "END":64*1024*1024, "HASH": 2**512},
"Path2.tmp":{"SIZE":64*1024*1024, "START":64*1024*1024, "END":2*64*1024*1024, "HASH": 2**512}
}
pieceSize = 512*1024
numberOfPieces = 256
pieceHashs = []
for i in range(0,256):
pieceHashs.append(2**512)
infoFile = {"FILES": files, "PIECESIZE": pieceSize, "NUMBEROFPIECES": numberOfPieces, "PIECEHASHS": pieceHashs}
#Note: all string dictionary keys that are used for the program are capitalized (file names excepted)
jsonFile = json.dumps(infoFile, sort_keys=True, indent=1) #Note: json is sorted, and indentation is used
print(jsonFile)
temp = json.loads(jsonFile)
print(temp)
|
import reptile.data
from orun.data.datasource import DataSource, Param
class ReportConnection:
"""
Default report db connection
"""
def datasource_factory(self, **kwargs):
"""
Create a datasource instance compatible with reptile engine
:param kwargs:
:return:
"""
return Query(**kwargs)
default_connection = ReportConnection()
class Query(DataSource, reptile.data.DataSource):
def __init__(self, name=None, sql=None):
reptile.data.DataSource.__init__(self)
DataSource.__init__(self, sql=sql)
self.name = name
def load(self, structure: dict):
self.name = structure['name']
self.sql = structure['sql']
def execute(self, params=None):
rows = self._prepare(params)
fields = [f[0] for f in self.fields]
return [dict(zip(fields, row)) for row in rows]
def open(self):
if not self._opened and self.sql:
super().open()
self._data = self.execute()
def __getattr__(self, item):
return [obj[item] for obj in self._data]
def __iter__(self):
return iter(self.data)
|
from django.shortcuts import get_object_or_404, render
from django.urls import reverse_lazy, reverse
from django.views.generic import CreateView, UpdateView, DeleteView, TemplateView, View
from django.http import HttpResponse
import json
from planner.models import Garden, Bed
class GardenView(TemplateView):
template_name = 'planner/bed_list.html'
def get(self, request, *args, **kwargs):
garden = get_object_or_404(Garden, pk=kwargs['garden_id'])
surfaces = garden.bed_set.all()
beds = []
for s in surfaces:
if isinstance(s, Bed):
beds.append(s)
c = {'beds': beds}
return render(request, self.template_name, context=c)
class BedCreateView(CreateView):
model = Bed
fields = ['name', 'length', 'width', 'comment', 'soil_type', 'exposition']
template_name = 'planner/modals/bed_create_with_details_form.html'
def get_success_url(self):
return reverse_lazy('planner:garden_view', kwargs={'garden_id': self.kwargs['garden_id']})
def form_valid(self, form):
new_bed = form.save(commit=False)
new_bed.garden = Garden.objects.get(pk=self.kwargs["garden_id"])
new_bed.save()
return super().form_valid(form)
class BedUpdateView(UpdateView):
model = Bed
fields = ['name', 'length', 'width', 'comment', 'soil_type', 'exposition']
template_name = 'planner/modals/bed_update_with_details_form.html'
def get_success_url(self):
return reverse_lazy('planner:garden_view', kwargs={'garden_id': self.kwargs['garden_id']})
class BedDelete(DeleteView):
model = Bed
template_name = 'planner/modals/bed_confirm_delete.html'
def get_success_url(self):
return reverse_lazy('planner:garden_view', kwargs={'garden_id': self.kwargs['garden_id']})
class SaveBedPosition(View):
def post(self, request, **kwargs):
json_data = json.loads(request.body.decode('utf-8'))
for e in json_data:
current_bed = Bed.objects.get(pk=e.get('id'))
current_bed.x = e.get('x')
current_bed.y = e.get('y')
current_bed.save()
return HttpResponse()
|
from twisted.trial.unittest import TestCase
from .. import c_zlib
class CZlibTest(TestCase):
def testRoundTrip(self):
dictionary = 'foobar'
compressed = c_zlib.compress('foobar', level=9, dictionary=dictionary)
decompressed = c_zlib.decompress(compressed, dictionary=dictionary)
self.assertEqual('foobar', decompressed)
|
from utils import (
get_guard_periods,
lines_to_records,
read_input
)
def get_sleepiest_guard(guard_periods):
return sorted(
[guard_id for guard_id in guard_periods.keys()],
key=lambda guard_id: sum(guard_periods[guard_id])
)[-1]
if __name__ == '__main__':
records = lines_to_records(read_input())
guard_periods = get_guard_periods(records)
sleepiest_guard = get_sleepiest_guard(guard_periods)
sleepiest_minute = guard_periods[sleepiest_guard].index(
max(guard_periods[sleepiest_guard])
)
print(sleepiest_guard * sleepiest_minute)
|
import requests
import json
class GetAddress:
def __init__(self, postcode: str):
postcode = postcode.replace(" ","")
info = requests.get("https://api.postcodes.io/postcodes/" + postcode)
json_variable = info.json()
result = json_variable["result"]
self.country = result["country"]
self.region = result["region"]
|
"""Cluster Mass Module
abstract class to compute cluster mass function.
========================================
The implemented functions use PyCCL library as backend.
"""
from __future__ import annotations
from typing import final, List, Tuple, Optional
from abc import abstractmethod
import numpy as np
import sacc
from ..updatable import Updatable
from ..parameters import ParamsMap
class ClusterMassArgument:
"""Cluster Mass argument class."""
def __init__(self, logMl: float, logMu: float):
self.logMl: float = logMl
self.logMu: float = logMu
self.logM: Optional[float] = None
self.dirac_delta: bool = False
if logMl > logMu:
raise ValueError("logMl must be smaller than logMu")
if logMl == logMu:
self.dirac_delta = True
self.logM = logMl
def is_dirac_delta(self) -> bool:
"""Check if the argument is a dirac delta."""
return self.dirac_delta
def get_logM(self) -> float:
"""Return the logM value if the argument is a dirac delta."""
if self.logM is not None:
return self.logM
raise ValueError("Argument is not a Dirac delta")
@property
@abstractmethod
def dim(self) -> int:
"""Return the dimension of the argument."""
@abstractmethod
def get_logM_bounds(self) -> Tuple[float, float]:
"""Return the bounds of the cluster mass argument."""
@abstractmethod
def get_proxy_bounds(self) -> List[Tuple[float, float]]:
"""Return the bounds of the cluster mass proxy argument."""
@abstractmethod
def p(self, logM: float, z: float, *proxy_args) -> float:
"""Return the probability of the argument."""
class ClusterMass(Updatable):
"""Cluster Mass module."""
@abstractmethod
def read(self, sacc_data: sacc.Sacc):
"""Abstract method to read the data for this source from the SACC file."""
def _update_cluster_mass(self, params: ParamsMap):
"""Method to update the ClusterMass from the given ParamsMap.
Subclasses that need to do more than update their contained
:python:`Updatable` instance variables should implement this method."""
@abstractmethod
def _reset_cluster_mass(self):
"""Abstract method to reset the ClusterMass."""
@final
def _update(self, params: ParamsMap):
"""Implementation of Updatable interface method `_update`."""
self._update_cluster_mass(params)
@final
def _reset(self) -> None:
"""Implementation of the Updatable interface method `_reset`.
This calls the abstract method `_reset_cluster_mass`, which must be implemented
by all subclasses."""
self._reset_cluster_mass()
@abstractmethod
def gen_bins_by_array(self, logM_obs_bins: np.ndarray) -> List[ClusterMassArgument]:
"""Generate bins by an array of bin edges."""
@abstractmethod
def gen_bin_from_tracer(self, tracer: sacc.BaseTracer) -> ClusterMassArgument:
"""Return the bin for the given tracer."""
|
# -*- coding: utf-8 -*-
import codecs
import os
def word_split(words):
new_list = []
for word in words:
if '-' not in word:
new_list.append(word)
else:
lst= word.split('-')
new_list.extend(lst)
return new_list
def read_file(file_path):
f = codecs.open(file_path,'r',"utf-8")
lines = f.readlines()
word_list= []
for line in lines:
line = line.strip()
words = line.split(' ')
words = word_split(words)
word_list.extend(words)
return word_list
def get_file_from_folder(folder_path):
file_paths = []
for root, dirs,files in os.walk(folder_path):
for file in files:
file_path = os.path.join(root, file)
file_paths.append(file_path)
return file_paths
def read_files(file_paths):
final_words= []
for path in file_paths:
final_words.extend(read_file(path))
return final_words
def format_word(word):
fmt='abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ-'
for char in word:
if char not in fmt:
word = word.replace(char,'')
return word.lower()
def format_words(words):
word_list=[]
for word in words:
wd=format_word(word)
if wd:
word_list.append(wd)
return word_list
def statistics_words(words):
s_word_dict = {}
for word in words:
if s_word_dict.has_key(word):
s_word_dict[word] = s_word_dict[word]+1
else:
s_word_dict[word] = 1
return s_word_dict
def print_to_csv(vocabulary_map,to_file_path):
nfile=open(to_file_path,'w+')
for key in vocabulary_map.keys():
values=vocabulary_map[key]
nfile.write("%s,%d\n"%(key,values))
sorted(vocabulary_map.items(),key=lambda x:x[1],reverse=True)
return nfile.write
nfile.close()
def main():
words=read_files(get_file_from_folder('data2'))
print '获取未格式化单词%d个' %(len(words))
f_words=format_words(words)
print '获取已格式化单词%d个' %(len(f_words))
word_dict=statistics_words(f_words)
print_to_csv(word_dict,'output/sudo python test.csv')
if __name__ == '__main__':
main()
|
import sys
import os
from Crypto.Hash import SHA256
from Crypto.Signature import pss
from Crypto.PublicKey import RSA
from Crypto.Random import get_random_bytes
from Crypto.Cipher import AES
from Crypto.Cipher import PKCS1_OAEP
from Crypto.Util.Padding import pad
from typing import Tuple
def sign_buffer(data: bytes, priv_key: bytes) -> bytes:
"""
priv_key = content of PEM file
"""
# 01. h = hash(data)
sha256 = SHA256.new()
sha256.update(data)
# IMPORTANT: don't apply digest()
# 02. rsa = create RSA context
rsa_priv_key = RSA.import_key(priv_key)
rsa_pss = pss.new(rsa_priv_key)
# 03. sign RSA-2048-PSS with priv_key (h)
return rsa_pss.sign(sha256)
def protect_buffer(data: bytes, pub_key: bytes) -> bytes:
# 01. generate symetric key: kc
kc = get_random_bytes(AES.key_size[2]) # AES.key_size[2] == 32 | 256 bits
# 02. encrypt `data` with AES-256-CBC -> encrypted_data
iv = get_random_bytes(AES.block_size) # 16 bytes == 128bits
aes = AES.new(kc, AES.MODE_CBC, iv)
padded_data = pad(data, AES.block_size)
encrypted_data = aes.encrypt(padded_data)
# 03. encrypt `kc` (256bits) + iv (128 bits) with RSA-2048-OAEP -> wrap_key
rsa_pub_key = RSA.importKey(pub_key)
rsa = PKCS1_OAEP.new(rsa_pub_key)
wrap_key = rsa.encrypt(kc + iv)
# 04. return wrap_key || encrypted_data
return wrap_key + encrypted_data
def main(argv):
# 00. check arguments
if len(argv) != 5:
print("usage: {0} <public_key_receiver> <private_key_sender> <input_file> <output_file>".format(argv[0]))
sys.exit(1)
public_key_receiver = argv[1]
private_key_sender = argv[2]
input_file_path = argv[3]
output_file_path = argv[4]
# 01. read input file
plain_data = b''
if os.path.exists(input_file_path):
_sz = os.path.getsize(input_file_path)
if _sz == 0:
print("error: file is empty")
sys.exit(1)
with open(input_file_path, "rb") as f_in:
plain_data = f_in.read()
# 02. init RSA contexts
rsa_enc_pub_pem = open(public_key_receiver).read()
rsa_sign_priv_pem = open(private_key_sender).read()
# 03. protect plain_data
encrypted_data = protect_buffer(plain_data, rsa_enc_pub_pem)
# 04. signature
signature = sign_buffer(encrypted_data, rsa_sign_priv_pem)
# 05. write file
with open(output_file_path, "wb") as f_out:
f_out.write(encrypted_data)
f_out.write(signature)
print("protection done !")
if __name__ == "__main__":
main(sys.argv)
|
import re
print("===================================================================")
print("================== SELECCIONE UNA OPCION ==========================")
print("===================================================================")
print("1: X = 2 + 5 * y")
print("2: X = a / a + b * b")
print("3: X = (a + 2) / 3 + b")
print("4: X = (a + 2) / (3 - b)")
print("5: X = 2 * y - ((4 * y) + z)")
x=int(input("Seleccione una opcion: "))
#LOS CASOS SON SIN REGEX SOLO CON LISTAS, POR ESO, SE TRABAJA SOLO CON UN DIGITO O UN SOLO CARACTER
#===================================================================================================================
if x==1:
#CASO UNO SIN USO DE REGEX, CON LISTAS.
p = []
vs = []
valor =open("Ejemplo 1.txt").read()#LEEMOS EL ARCHIVO
suma = -1
for i in valor:#RECORREMOS LA CADENA INGRESADA
if i != " ":#SI LA CADENA ES DIFERENTE A UN CONJUNTO VACIO
p.append(i)#AÑADIMOS LA CADENA INGRESADA A LA LISTA P
#==========================================================================
temporalCero = ""
for i in p: # MULTIPLICACION O DIVISION
suma +=1
if i =="*" or i=="/":
# TEMPORALCERO = VARIABLE | OPERANDO 1 | VARIABLE
temporalCero = "_t0 = " + p[suma-1] + " " + p[suma] + " " + p[suma+1] #LA LISTA P VAN DESGLOZANDO LA EXPRESION EN PARTES
p.remove(p[suma]) # SE ELIMINA "*"
p.remove(p[suma-1]) #SE ELIMINA EL "5"
p.remove(p[suma-1]) #SE ELIMINA EL "Y"
print(temporalCero)
#==========================================================================
temporalUno = ""
for i in p:
if i == "+" or i == "-": # SUMA O RESTA
if p[-1] == "+" or p[-1]=="-":
temporalUno = "_t1 = "+ p[-2] + " "+ p[-1] + " " +"_t0"
else:
temporalUno = "_t1 = "+ p[-1] + " "+ p[-2] + " " +"_t0"
p.remove(p[-1])
p.remove(p[2])
print(temporalUno)
#==========================================================================
igualdad = ""
for i in p:
if i == "x" or "X":
igualdad = p[0] +" "+ p[1] + " _t1"
print(igualdad)
#===================================================================================================================
elif x==2:
p = []
vs = []
valor = open("Ejemplo 2.txt").read()
suma = -1
for i in valor:
if i != " ":
p.append(i)
#==========================================================================
temporalCero = ""
for i in p: # MULTIPLICACION O DIVISION
suma +=1
if i =="*":
#STRING TEMPORAL CERO
# TEMPORALCERO = VARIABLE | OPERANDO 1 | VARIABLE
temporalCero = "_t0 = " + p[suma-1] + " " + p[suma] + " " + p[suma+1]
p.remove(p[suma-1])
p.remove(p[suma])
p.remove(p[suma-1])
break
else:
if i == "/":
temporalCero = "_t0 = " + p[suma-1] + " " + p[suma] + " " + p[suma+1]
p.remove(p[suma-1])
p.remove(p[suma])
p.remove(p[suma-1])
break
print(temporalCero)
#==========================================================================
temporalUno = ""
for i in p: # MULTIPLICACION O DIVISION
if p[3] =="+":
if p[suma-4]=="/" or p[suma-4]=="*":
temporalUno = "_t1 = " + p[suma-5] + " " + p[suma-4] + " " + p[suma-3]
elif p[suma-4] != "/" or p[suma-4] !="*":
if p[suma-4] == "+" or p[suma-4]=="-":
temporalUno = "_t1 = " + p[suma-3] + " " + p[suma-2] + "_t0"
elif p[3] !="+":
if p[suma+1]=="/" or p[suma+1]=="*":
#STRING TEMPORAL CERO
# TEMPORALCERO = VARIABLE | OPERANDO 1 | VARIABLE
temporalUno = "_t1 = " + p[suma] + " " + p[suma+1] + " " + p[suma+2]
elif p[suma+1] != "/" or p[suma+1] !="*":
if p[suma+1] == "+" or p[suma+1]=="-":
temporalUno = "_t1 = _t0 " + p[suma-1] + " " + p[suma]
print(temporalUno)
#==========================================================================
temporalDos = ""
for i in p: # MULTIPLICACION O DIVISION
if p[3] =="+":
if p[suma-4]=="/" or p[suma-4]=="*":
temporalDos = "_t2 = " + temporalCero[0:3] + " " + p[suma-4] + " " + temporalUno[0:3]
elif p[suma-4] != "/" or p[suma-4] !="*":
if p[suma-4] == "+" or p[suma-4]=="-":
temporalDos = "_t2 = " + p[suma-5] + " " + p[suma-4] + "_t1"
elif p[3] !="+":
if p[suma+1]=="/" or p[suma+1]=="*":
#STRING TEMPORAL CERO
# TEMPORALCERO = VARIABLE | OPERANDO 1 | VARIABLE
temporalDos = "_t2 = " + temporalCero[0:3] + " " + p[suma-1] + " " + temporalUno[0:3]
elif p[suma+1] != "/" or p[suma+1] !="*":
if p[suma+1] == "+" or p[suma+1]=="-":
temporalDos = "_t1 = _t0 " + p[suma+1] + " " + p[suma+2]
print(temporalDos)
#==========================================================================
igualdad = ""
for i in p:
if i == "x" or "X":
igualdad = p[0] +" "+ p[1] + " _t2"
print(igualdad)
#======================================================================================================
elif x==3:
p = []
vs = []
valor = open("Ejemplo 3.txt").read()
suma = -1
for i in valor:
if i != " ":
p.append(i)
#=============================================================================
temporalCero = ""
for i in p:
suma +=1
if i =="(" or i == ")":
#STRING TEMPORAL CERO
# TEMPORALCERO = VARIABLE | OPERANDO 1 | VARIABLE
temporalCero = "_t0 = " + p[suma-4] + " " + p[suma-3] + " " + p[suma-2] + " " + p[suma-1] + " " + p[suma]
#=============================================================================
temporalUno = ""
for i in p:
if i == "*" or i == "/":
if p[4] == "(" :
temporalUno = "_t1 =" + " _t0 " + p[suma-5] + " " + p[suma-6] + " "
else:
temporalUno = "_t1 = " + " _t0 " + p[suma-3] + " " + p[suma-2] + " "
#=============================================================================
temporalDos = ""
for i in p:
if i == "+" or i == "-":
if p[4] == "(" :
temporalDos = "_t2 = " + " _t1 " + p[suma-7] + " " + p[suma-8] + " "
else:
temporalDos = "_t2 = " + " _t1 " + p[suma-1] + " " + p[suma] + " "
print(temporalCero)
print(temporalUno)
print(temporalDos)
#=============================================================================
igualdad = ""
for i in p:
if i == "x" or "X":
igualdad = p[0] + " " + p[1] + "_t2"
print(igualdad)
#===================================================================================================================
elif x==4:
p = []
vs = []
valor = open("Ejemplo 4.txt").read()
suma = -1
for i in valor:
if i != " ":
p.append(i)
igualdad = ""
for i in p:
suma +=1
if i == "x" or "X":
igualdad = p[suma-12] + " " + p[suma-11] + " _t3"
p.remove(p[suma-11])
p.remove(p[suma-12])
#=============================================================================
temporalCero = ""
for i in p: # MULTIPLICACION O DIVISION
suma +=1
if i =="*" or i=="/":
#STRING TEMPORAL CERO
# TEMPORALCERO = VARIABLE | OPERANDO 1 | VARIABLE
temporalCero = "_t0 = " + p[suma-15] + " " + p[suma-14] + " " + p[suma-13] + " " + p[suma-12] + " " + p[suma-11]
p.remove(p[suma-11])
p.remove(p[suma-12])
p.remove(p[suma-13])
p.remove(p[suma-14])
p.remove(p[suma-15])
break
else:
if i == "+" or i=="-":
temporalCero = "_t0 = " + p[suma-15] + " " + p[suma-14] + " " + p[suma-13] + " " + p[suma-12] + " " + p[suma-11]
p.remove(p[suma-11])
p.remove(p[suma-12])
p.remove(p[suma-13])
p.remove(p[suma-14])
p.remove(p[suma-15])
break
print(temporalCero)
#============================================================================================================================
temporalUno = ""
for i in p: # MULTIPLICACION O DIVISION
suma +=1
if i =="*" or i=="/":
#STRING TEMPORAL CERO
# TEMPORALCERO = VARIABLE | OPERANDO 1 | VARIABLE
try:
temporalUno = "_t1 = " + p[suma-15] + " " + p[suma-14] + " " + p[suma-13] + " " + p[suma-12] + " " + p[suma-11]
p.remove(p[suma-11])
p.remove(p[suma-12])
p.remove(p[suma-13])
p.remove(p[suma-14])
p.remove(p[suma-15])
except:
IndexError: temporalUno = 'null'
print("===============================================================================")
print("ERROR")
print("caso invalido")
print("===============================================================================")
break
else:
if i == "+" or i=="-":
try:
temporalUno = "_t1 = " + p[suma-15] + " " + p[suma-14] + " " + p[suma-13] + " " + p[suma-12] + " " + p[suma-11]
p.remove(p[suma-11])
p.remove(p[suma-12])
p.remove(p[suma-13])
p.remove(p[suma-14])
p.remove(p[suma-15])
except:
IndexError: temporalUno = 'null'
print("===============================================================================")
print("ERROR")
print("caso invalido")
print("===============================================================================")
break
print(temporalUno)
#================================================================================
temporalDos = ""
for i in p: # MULTIPLICACION O DIVISION
suma +=1
if i =="*" or i=="/":
#STRING TEMPORAL CERO
# TEMPORALCERO = VARIABLE | OPERANDO 1 | VARIABLE
temporalDos = "_t2 = " + temporalCero[0:3] + " " + p[suma-18] + " " + temporalUno[0:3]
break
else:
if i == "+" or i=="-":
temporalDos = "_t2 = " + temporalCero[0:3] + " " + p[suma-18] + " " + temporalUno[0:3]
break
print(temporalDos)
print(igualdad)
#================================================================================================================
elif x==5:
p = []
S = []
TC= []
valor = open("Ejemplo 5.txt").read()
suma = -1
suma2 = -1
for i in valor:
if i != " ":
p.append(i)
igualdad = ""
for i in p:
if i == "x" or "X":
igualdad = p[0] + " " + p[1] + " _t3"
p.remove("x" or "X")
p.remove("=")
#================================================================================
for i in p:
suma +=1
if i =="(" or i ==")":
if i =="(" or i ==")":
p.remove(p[suma-13])
TC.append(p[suma-12])
p.remove(p[suma-12])
TC.append(p[suma-11])
p.remove(p[suma-11])
TC.append(p[suma-10])
p.remove(p[suma-10])
TC.append(p[suma-9])
p.remove(p[suma-9])
TC.append(p[suma-8])
p.remove(p[suma-8])
TC.append(p[suma-7])
p.remove(p[suma-7])
TC.append(p[suma-6])
p.remove(p[suma-6])
p.remove(p[suma-5])
break
#================================================================================
temporalCero = ""
for x in TC:
suma +=1
if x =="(" or x ==")":
if TC[0]=="(":
temporalCero = "_t0 = " + TC[suma2-6] + " " + TC[suma2-5] + " " + TC[suma2-4] + " " + TC[suma2-3] + " " + TC[suma2-2]
else:
temporalCero = "_t0 = " + TC[suma2-4] + " " + TC[suma2-3] + " " + TC[suma2-2] + " " + TC[suma2-1] + " " + TC[suma2]
print(temporalCero)
#================================================================================
temporalUno = ""
for x in TC:
suma2 +=1
if x =="+" or x =="-" or x =="*" or x =="/":
if TC[0]=="(":
temporalUno = "_t1 = t0 " + TC[suma2-7] + " " + TC[suma2-6]
else:
temporalUno = "_t1 = t0 " + TC[suma2-3] + " " + TC[suma2-4]
print(temporalUno)
#================================================================================
temporalDos = ""
for i in p:
suma +=1
if p[0]=="-" or p[0]=="+" or p[0]=="*" or p[0]=="/":
temporalDos = "_t2 = " + p[0] + " " + p[2] + " " + p[3]
break
else:
temporalDos = "_t2 = " + " " +p[0] + " " + p[2] + " " + p[3]
break
print(temporalDos)
#================================================================================
temporalTres = ""
for i in p:
suma +=1
if p[0]=="-" or p[0]=="+" or p[0]=="*" or p[0]=="/":
temporalTres = "_t3 = " + temporalDos[0:3] + " " + p[1] + " " + temporalUno[0:3]
break
else:
temporalTres = "_t3 = " + temporalDos[0:3] + " " + p[1] + " " + temporalUno[0:3]
break
print(temporalTres)
#================================================================================
print(igualdad) |
from django.test import TestCase
from backend.user_service.user.infra.adapter.user_create_command_handler \
import UserCreateCommandHandler
from backend.common.command.user_create_command \
import UserCreateCommand
class UserCreateCommandHandlerTestCase(TestCase):
def test_when_message_has_no_email_or_password_raise_exception(self):
user_create_command_handler = \
UserCreateCommandHandler(user_application_service=None)
with self.assertRaises(ValueError):
user_create_command_handler.handle(
UserCreateCommand(email=None, password=None, user_type=None))
def test_when_message_valid_field_then_call_register(self):
class MockUserApplicationService:
def __init__(self, assert_func):
self.assert_func = assert_func
def register(self, email, password, user_type, car_type, plate_no):
self.assert_func(email, password, user_type, car_type, plate_no)
EMAIL = 'test@gmail.com'
PASSWORD = 1234
USER_TYPE = 'RIDER'
CAR_TYPE = None
PLATE_NO = None
def assert_func(email, password, user_type, car_type, plate_no):
self.assertEqual(email, EMAIL)
self.assertEqual(password, PASSWORD)
self.assertEqual(user_type, USER_TYPE)
self.assertEqual(car_type, CAR_TYPE)
self.assertEqual(plate_no, PLATE_NO)
mock_user_application_service = \
MockUserApplicationService(assert_func=assert_func)
user_create_command_handler = UserCreateCommandHandler(
user_application_service=mock_user_application_service)
user_create_command_handler.handle(UserCreateCommand(
email=EMAIL, password=PASSWORD, user_type=USER_TYPE,
car_type=CAR_TYPE, plate_no=PLATE_NO))
|
###################################################################
# File Name: train.py
# Author: Zhongdao Wang
# mail: wcd17@mails.tsinghua.edu.cn
# Created Time: Thu 06 Sep 2018 10:08:49 PM CST
###################################################################
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import os
import os.path as osp
import sys
import time
import argparse
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.backends import cudnn
from torch.utils.data import DataLoader
import model
from feeder.feeder import Feeder
from utils import to_numpy
from utils.meters import AverageMeter
from utils.serialization import load_checkpoint
from utils.utils import bcubed
from utils.graph import graph_propagation, graph_propagation_soft, graph_propagation_naive
from sklearn.metrics import normalized_mutual_info_score, precision_score, recall_score
def single_remove(Y, pred):
single_idcs = np.zeros_like(pred)
pred_unique = np.unique(pred)
for u in pred_unique:
idcs = pred == u
if np.sum(idcs) == 1:
single_idcs[np.where(idcs)[0][0]] = 1
remain_idcs = [i for i in range(len(pred)) if not single_idcs[i]]
remain_idcs = np.asarray(remain_idcs)
return Y[remain_idcs], pred[remain_idcs]
def main(args):
np.random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.benchmark = True
valset = Feeder(args.val_feat_path,
args.val_knn_graph_path,
args.val_label_path,
args.seed,
args.k_at_hop,
args.active_connection,
train=False)
valloader = DataLoader(
valset, batch_size=args.batch_size,
num_workers=args.workers, shuffle=False, pin_memory=True)
ckpt = load_checkpoint(args.checkpoint)
net = model.gcn()
net.load_state_dict(ckpt['state_dict'])
net = net.cuda()
knn_graph = valset.knn_graph
knn_graph_dict = list()
for neighbors in knn_graph:
knn_graph_dict.append(dict())
for n in neighbors[1:]:
knn_graph_dict[-1][n] = []
criterion = nn.CrossEntropyLoss().cuda()
edges, scores = validate(valloader, net, criterion)
np.save('edges', edges)
np.save('scores', scores)
#edges=np.load('edges.npy')
#scores = np.load('scores.npy')
clusters = graph_propagation(edges, scores, max_sz=900, step=0.6, pool='avg' )
final_pred = clusters2labels(clusters, len(valset))
labels = valset.labels
print('------------------------------------')
print('Number of nodes: ', len(labels))
print('Precision Recall F-Sore NMI')
p,r,f = bcubed(labels, final_pred)
nmi = normalized_mutual_info_score(final_pred, labels)
print(('{:.4f} '*4).format(p,r,f, nmi))
labels, final_pred = single_remove(labels, final_pred)
print('------------------------------------')
print('After removing singleton culsters, number of nodes: ', len(labels))
print('Precision Recall F-Sore NMI')
p,r,f = bcubed(labels, final_pred)
nmi = normalized_mutual_info_score(final_pred, labels)
print(('{:.4f} '*4).format(p,r,f, nmi))
def clusters2labels(clusters, n_nodes):
labels = (-1)* np.ones((n_nodes,))
for ci, c in enumerate(clusters):
for xid in c:
labels[xid.name] = ci
assert np.sum(labels<0) < 1
return labels
def make_labels(gtmat):
return gtmat.view(-1)
def validate(loader, net, crit):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
accs = AverageMeter()
precisions = AverageMeter()
recalls = AverageMeter()
net.eval()
end = time.time()
edges = list()
scores = list()
for i, ((feat, adj, cid, h1id, node_list), gtmat) in enumerate(loader):
data_time.update(time.time() - end)
feat, adj, cid, h1id, gtmat = map(lambda x: x.cuda(),
(feat, adj, cid, h1id, gtmat))
pred = net(feat, adj, h1id)
labels = make_labels(gtmat).long()
loss = crit(pred, labels)
pred = F.softmax(pred, dim=1)
p,r, acc = accuracy(pred, labels)
losses.update(loss.item(),feat.size(0))
accs.update(acc.item(),feat.size(0))
precisions.update(p, feat.size(0))
recalls.update(r,feat.size(0))
batch_time.update(time.time()- end)
end = time.time()
if i % args.print_freq == 0:
print('[{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {losses.val:.3f} ({losses.avg:.3f})\n'
'Accuracy {accs.val:.3f} ({accs.avg:.3f})\t'
'Precison {precisions.val:.3f} ({precisions.avg:.3f})\t'
'Recall {recalls.val:.3f} ({recalls.avg:.3f})'.format(
i, len(loader), batch_time=batch_time,
data_time=data_time, losses=losses, accs=accs,
precisions=precisions, recalls=recalls))
node_list = node_list.long().squeeze().numpy()
bs = feat.size(0)
for b in range(bs):
cidb = cid[b].int().item()
nl = node_list[b]
for j,n in enumerate(h1id[b]):
n = n.item()
edges.append([nl[cidb], nl[n]])
scores.append(pred[b*args.k_at_hop[0]+j,1].item())
edges = np.asarray(edges)
scores = np.asarray(scores)
return edges, scores
def accuracy(pred, label):
pred = torch.argmax(pred, dim=1).long()
acc = torch.mean((pred == label).float())
pred = to_numpy(pred)
label = to_numpy(label)
p = precision_score(label, pred)
r = recall_score(label, pred)
return p,r,acc
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# misc
working_dir = osp.dirname(osp.abspath(__file__))
parser.add_argument('--seed', default=1, type=int)
parser.add_argument('--workers', default=16, type=int)
parser.add_argument('--print_freq', default=40, type=int)
# Optimization args
parser.add_argument('--lr', type=float, default=1e-5)
parser.add_argument('--momentum', type=float, default=0.9)
parser.add_argument('--weight_decay', type=float, default=1e-4)
parser.add_argument('--epochs', type=int, default=20)
parser.add_argument('--batch_size', type=int, default=32)
parser.add_argument('--k-at-hop', type=int, nargs='+', default=[20,5])
parser.add_argument('--active_connection', type=int, default=5)
# Validation args
parser.add_argument('--val_feat_path', type=str, metavar='PATH',
default=osp.join(working_dir, '../facedata/1024.fea.npy'))
parser.add_argument('--val_knn_graph_path', type=str, metavar='PATH',
default=osp.join(working_dir, '../facedata/knn.graph.1024.bf.npy'))
parser.add_argument('--val_label_path', type=str, metavar='PATH',
default=osp.join(working_dir, '../facedata/1024.labels.npy'))
# Test args
parser.add_argument('--checkpoint', type=str, metavar='PATH', default='./logs/logs/best.ckpt')
args = parser.parse_args()
main(args)
|
# Generated by Django 3.2 on 2021-05-19 15:25
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('user', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user_id', models.CharField(max_length=50, unique=True, verbose_name='id')),
('name', models.CharField(max_length=50, verbose_name='姓名')),
('gender', models.CharField(choices=[('m', '男'), ('f', '女')], default='m', max_length=10, verbose_name='性别')),
('department', models.CharField(default='技术部', max_length=30, verbose_name='部门')),
('email', models.EmailField(max_length=254, unique=True, verbose_name='邮箱')),
('password', models.CharField(max_length=30, verbose_name='密码')),
('image', models.FileField(blank=True, null=True, upload_to='goods', verbose_name='图片')),
('identity', models.CharField(choices=[('user', '用户'), ('admin', '管理员'), ('staff', '后勤')], default='user', max_length=10, verbose_name='身份')),
('level', models.IntegerField(default=0)),
('c_time', models.DateTimeField(auto_now_add=True)),
],
options={
'verbose_name': '用户',
'verbose_name_plural': '用户',
'db_table': 'User',
'ordering': ['c_time'],
},
),
]
|
from django.db import models
from django.utils.translation import ugettext_lazy as _
from apps.user.models import CustomUser
class Experience(models.Model):
"""
Address Model which holds all the address data for a user
"""
class Meta:
db_table = 'experience'
verbose_name = _('experience')
verbose_name_plural = _("experience's")
user = models.ForeignKey(CustomUser, on_delete=models.CASCADE)
job_title = models.CharField(_("job title"), max_length=255, blank=False)
company_name = models.CharField(_("company name"), max_length=255, blank=False)
job_description = models.TextField(_("job role"), blank=True)
date_started = models.DateField(_("date started"), blank=True)
date_ended = models.DateField(_("date ended"), blank=True)
date_created = models.DateTimeField(auto_now_add=True)
date_updated = models.DateTimeField(auto_now=True)
def __str__(self):
return f'Experience: {self.id} - {self.user}'
|
from about_action import AboutAction
from edit_preferences_action import EditPreferencesAction
from exit_action import ExitAction
|
###################################################################################
# Title : KSE526 project baseline
# Author : hs_min
# Date : 2020.11.25
###################################################################################
#%%
import tensorflow as tf
from tensorflow.keras import Model
from tensorflow.keras.layers import RNN, GRU, BatchNormalization, Dropout, TimeDistributed, Softmax, Dot, Bidirectional, Layer, Conv1D, MaxPooling1D, Flatten, RepeatVector, LSTM, Attention, Concatenate, Dense
import tensorflow.keras.backend as K
#%%
###################################################################
# Loss
###################################################################
def root_mean_squared_error(y_true, y_pred):
return tf.sqrt(tf.reduce_mean(tf.square(y_true-y_pred)))
def weighted_root_mean_squared_error(y_true, y_pred):#, w):
w = 0.2
mask = tf.cast(tf.less(y_pred, y_true), dtype=tf.float64)
return tf.sqrt(tf.reduce_mean(tf.square(y_true-y_pred))) + mask * w * (y_true-y_pred)
def last_time_step_rmse(y_true, y_pred):
return root_mean_squared_error(y_true[:,-1], y_pred[:,-1])
###################################################################
# Model
###################################################################
class BahdanauAttention(tf.keras.Model):
def __init__(self, units):
super(BahdanauAttention, self).__init__()
self.W1 = Dense(units)
self.W2 = Dense(units)
self.V = Dense(1)
def call(self, values, query) : # 단, key와 value는 같음
# query shape == (batch_size, hidden size)
# hidden_with_time_axis shape == (batch_size, 1, hidden size)
# score 계산을 위해 뒤에서 할 덧셈을 위해서 차원을 변경해줍니다.
hidden_with_time_axis = tf.expand_dims(query, 1)
# score shape == (batch_size, max_length, 1)
# we get 1 at the last axis because we are applying score to self.V
# the shape of the tensor before applying self.V is (batch_size, max_length, units)
score = self.V(tf.nn.tanh(
self.W1(values) + self.W2(hidden_with_time_axis)))
# attention_weights shape == (batch_size, max_length, 1)
attention_weights = tf.nn.softmax(score, axis=1)
# context_vector shape after sum == (batch_size, hidden_size)
context_vector = attention_weights * values
context_vector = tf.reduce_sum(context_vector, axis=1)
return context_vector, attention_weights
#%%
class MyLayer(Layer):
def __init__(self, config):
super(MyLayer, self).__init__()
class MyModel(Model):
def __init__(self, config):
super(MyModel, self).__init__()
class CNNBiLSTMATTN_noAuxs(Model):
def __init__(self, config):
super(CNNBiLSTMATTN_noAuxs, self).__init__()
self.n_outputs = config.label_width
self.filters = config.filters
self.kernel_size = config.kernel_size
self.activation = config.activation
self.lstm_units = config.lstm_units
self.attn_units = config.attn_units
self.conv1d1 = Conv1D(filters = self.filters,
kernel_size = self.kernel_size,
activation = self.activation)
self.conv1d2 = Conv1D(filters = self.filters,
kernel_size = self.kernel_size,
activation = self.activation)
self.mp1d = MaxPooling1D(pool_size = 2)
self.lstm1 = Bidirectional(LSTM(self.lstm_units, dropout=0.1,
return_sequences = True, return_state= False,
recurrent_initializer='glorot_uniform'))
# self.rv = RepeatVector(self.n_outputs)
self.lstm2 = Bidirectional(LSTM(self.lstm_units, dropout=0.1,
return_sequences = True, return_state=True,
recurrent_initializer='glorot_uniform'))
self.concat = Concatenate()
self.attention = BahdanauAttention(self.lstm_units)
self.fcn1 = Dense(50)#, activation='relu')
self.conv1d3 = Conv1D(filters = self.filters,
kernel_size = self.kernel_size,
activation = self.activation)
self.aux_lstm = Bidirectional(LSTM(self.lstm_units, dropout=0.5,
return_sequences=True, return_state = True))
self.aux_attention = BahdanauAttention(self.lstm_units)
self.aux_fcn1 = Dense(20)
self.aux_fnc2 = TimeDistributed(Dense(20))
self.aux_flatten = Flatten()
self.fcn3 = Dense(10)
self.fcn4 = Dense(self.n_outputs, activation='sigmoid')
def call(self, inputs):
x = self.conv1d1(inputs[0])
x = self.conv1d2(x)
x = self.mp1d(x)
# encoder_lstm = self.lstm1(x)
# encoder_lstm, forward_h, forward_c, backward_h, backward_c = self.lstm1(x)
# encoder_lstm, forward_h, backward_h = self.lstm1(inputs[0])
# state_h = self.concat([forward_h, backward_h])
# decoder_input = self.rv(state_h)
decoder_lstm, forward_h, forward_c, backward_h, backward_c = self.lstm2(x)
# decoder_lstm, forward_h, backward_h = self.lstm2(x)
state_h = self.concat([forward_h, backward_h]) # 은닉 상태
# state_c = self.concat([forward_c, backward_c])
context_vector, attention_weights = self.attention(decoder_lstm, state_h)
x = self.fcn1(context_vector)
# x = self.dropout(x)
x_aux1 = self.conv1d3(inputs[1])
aux_lstm, aux_forward_h, aux_forward_c, aux_backward_h, aux_backward_c = self.aux_lstm(x_aux1)
aux_state_h = self.concat([aux_forward_h, aux_backward_h]) # 은닉 상태
aux_context_vector, aux_attention_weights = self.aux_attention(aux_lstm, aux_state_h)
x_aux1 = self.aux_fcn1(aux_context_vector)
x_aux2 = self.aux_fnc2(inputs[2])
x_aux2 = self.aux_flatten(x_aux2)
# x = self.concat([x]#, x_aux1, x_aux2])
x = self.fcn3(x)
x = self.fcn4(x)
return x
class CNNBiLSTMATTN_noAUX1(Model):
def __init__(self, config):
super(CNNBiLSTMATTN_noAUX1, self).__init__()
self.n_outputs = config.label_width
self.filters = config.filters
self.kernel_size = config.kernel_size
self.activation = config.activation
self.lstm_units = config.lstm_units
self.attn_units = config.attn_units
self.conv1d1 = Conv1D(filters = self.filters,
kernel_size = self.kernel_size,
activation = self.activation)
self.conv1d2 = Conv1D(filters = self.filters,
kernel_size = self.kernel_size,
activation = self.activation)
self.mp1d = MaxPooling1D(pool_size = 2)
self.lstm1 = Bidirectional(LSTM(self.lstm_units, dropout=0.1,
return_sequences = True, return_state= False,
recurrent_initializer='glorot_uniform'))
# self.rv = RepeatVector(self.n_outputs)
self.lstm2 = Bidirectional(LSTM(self.lstm_units, dropout=0.1,
return_sequences = True, return_state=True,
recurrent_initializer='glorot_uniform'))
self.concat = Concatenate()
self.attention = BahdanauAttention(self.lstm_units)
self.fcn1 = Dense(50)#, activation='relu')
self.conv1d3 = Conv1D(filters = self.filters,
kernel_size = self.kernel_size,
activation = self.activation)
self.aux_lstm = Bidirectional(LSTM(self.lstm_units, dropout=0.5,
return_sequences=True, return_state = True))
self.aux_attention = BahdanauAttention(self.lstm_units)
self.aux_fcn1 = Dense(20)
self.aux_fnc2 = TimeDistributed(Dense(20))
self.aux_flatten = Flatten()
self.fcn3 = Dense(10)
self.fcn4 = Dense(self.n_outputs, activation='sigmoid')
def call(self, inputs):
x = self.conv1d1(inputs[0])
x = self.conv1d2(x)
x = self.mp1d(x)
# encoder_lstm = self.lstm1(x)
# encoder_lstm, forward_h, forward_c, backward_h, backward_c = self.lstm1(x)
# encoder_lstm, forward_h, backward_h = self.lstm1(inputs[0])
# state_h = self.concat([forward_h, backward_h])
# decoder_input = self.rv(state_h)
decoder_lstm, forward_h, forward_c, backward_h, backward_c = self.lstm2(x)
# decoder_lstm, forward_h, backward_h = self.lstm2(x)
state_h = self.concat([forward_h, backward_h]) # 은닉 상태
# state_c = self.concat([forward_c, backward_c])
context_vector, attention_weights = self.attention(decoder_lstm, state_h)
x = self.fcn1(context_vector)
# x = self.dropout(x)
x_aux1 = self.conv1d3(inputs[1])
aux_lstm, aux_forward_h, aux_forward_c, aux_backward_h, aux_backward_c = self.aux_lstm(x_aux1)
aux_state_h = self.concat([aux_forward_h, aux_backward_h]) # 은닉 상태
aux_context_vector, aux_attention_weights = self.aux_attention(aux_lstm, aux_state_h)
x_aux1 = self.aux_fcn1(aux_context_vector)
x_aux2 = self.aux_fnc2(inputs[2])
x_aux2 = self.aux_flatten(x_aux2)
x = self.concat([x, x_aux2]) # x_aux1,
x = self.fcn3(x)
x = self.fcn4(x)
return x
class CNNBiLSTMATTN_noAUX2(Model):
def __init__(self, config):
super(CNNBiLSTMATTN_noAUX2, self).__init__()
self.n_outputs = config.label_width
self.filters = config.filters
self.kernel_size = config.kernel_size
self.activation = config.activation
self.lstm_units = config.lstm_units
self.attn_units = config.attn_units
self.conv1d1 = Conv1D(filters = self.filters,
kernel_size = self.kernel_size,
activation = self.activation)
self.conv1d2 = Conv1D(filters = self.filters,
kernel_size = self.kernel_size,
activation = self.activation)
self.mp1d = MaxPooling1D(pool_size = 2)
self.lstm1 = Bidirectional(LSTM(self.lstm_units, dropout=0.1,
return_sequences = True, return_state= False,
recurrent_initializer='glorot_uniform'))
# self.rv = RepeatVector(self.n_outputs)
self.lstm2 = Bidirectional(LSTM(self.lstm_units, dropout=0.1,
return_sequences = True, return_state=True,
recurrent_initializer='glorot_uniform'))
self.concat = Concatenate()
self.attention = BahdanauAttention(self.lstm_units)
self.fcn1 = Dense(50)#, activation='relu')
self.conv1d3 = Conv1D(filters = self.filters,
kernel_size = self.kernel_size,
activation = self.activation)
self.aux_lstm = Bidirectional(LSTM(self.lstm_units, dropout=0.5,
return_sequences=True, return_state = True))
self.aux_attention = BahdanauAttention(self.lstm_units)
self.aux_fcn1 = Dense(20)
self.aux_fnc2 = TimeDistributed(Dense(20))
self.aux_flatten = Flatten()
self.fcn3 = Dense(10)
self.fcn4 = Dense(self.n_outputs, activation='sigmoid')
def call(self, inputs):
x = self.conv1d1(inputs[0])
x = self.conv1d2(x)
x = self.mp1d(x)
# encoder_lstm = self.lstm1(x)
# encoder_lstm, forward_h, forward_c, backward_h, backward_c = self.lstm1(x)
# encoder_lstm, forward_h, backward_h = self.lstm1(inputs[0])
# state_h = self.concat([forward_h, backward_h])
# decoder_input = self.rv(state_h)
decoder_lstm, forward_h, forward_c, backward_h, backward_c = self.lstm2(x)
# decoder_lstm, forward_h, backward_h = self.lstm2(x)
state_h = self.concat([forward_h, backward_h]) # 은닉 상태
# state_c = self.concat([forward_c, backward_c])
context_vector, attention_weights = self.attention(decoder_lstm, state_h)
x = self.fcn1(context_vector)
# x = self.dropout(x)
x_aux1 = self.conv1d3(inputs[1])
aux_lstm, aux_forward_h, aux_forward_c, aux_backward_h, aux_backward_c = self.aux_lstm(x_aux1)
aux_state_h = self.concat([aux_forward_h, aux_backward_h]) # 은닉 상태
aux_context_vector, aux_attention_weights = self.aux_attention(aux_lstm, aux_state_h)
x_aux1 = self.aux_fcn1(aux_context_vector)
x_aux2 = self.aux_fnc2(inputs[2])
x_aux2 = self.aux_flatten(x_aux2)
x = self.concat([x, x_aux1])#, x_aux2])
x = self.fcn3(x)
x = self.fcn4(x)
return x
# %%
class CNNBiLSTMATTN(Model):
def __init__(self, config):
super(CNNBiLSTMATTN, self).__init__()
self.n_outputs = config.label_width
self.filters = config.filters
self.kernel_size = config.kernel_size
self.activation = config.activation
self.lstm_units = config.lstm_units
self.attn_units = config.attn_units
self.conv1d1 = Conv1D(filters = self.filters,
kernel_size = self.kernel_size,
activation = self.activation)
self.conv1d2 = Conv1D(filters = self.filters,
kernel_size = self.kernel_size,
activation = self.activation)
self.mp1d = MaxPooling1D(pool_size = 2)
self.lstm1 = Bidirectional(LSTM(self.lstm_units, dropout=0.1,
return_sequences = True, return_state= False,
recurrent_initializer='glorot_uniform'))
self.flatten = Flatten()
self.rv = RepeatVector(self.n_outputs)
self.lstm2 = Bidirectional(LSTM(self.lstm_units, dropout=0.1,
return_sequences = True, return_state=True,
recurrent_initializer='glorot_uniform'))
self.concat = Concatenate()
self.attention = BahdanauAttention(self.lstm_units)
self.fcn1 = Dense(50)#, activation='relu')
self.conv1d3 = Conv1D(filters = self.filters,
kernel_size = self.kernel_size,
activation = self.activation)
# self.aux_lstm = Bidirectional(LSTM(self.lstm_units, dropout=0.5,
# return_sequences=True, return_state = True))
# self.aux_attention = BahdanauAttention(self.lstm_units)
# self.aux_fcn1 = Dense(20)
self.aux_fcn1 = TimeDistributed(Dense(20))
self.aux_flatten1 = Flatten()
self.aux_fnc2 = TimeDistributed(Dense(20))
self.aux_flatten2 = Flatten()
self.fcn3 = Dense(10)
self.fcn4 = Dense(self.n_outputs, activation='sigmoid')
self.is_x_aux1 = config.is_x_aux1
self.is_x_aux2 = config.is_x_aux2
def call(self, inputs):
x = self.conv1d1(inputs[0])
x = self.conv1d2(x)
x = self.mp1d(x)
# encoder_lstm = self.lstm1(x)
# encoder_lstm, forward_h, forward_c, backward_h, backward_c = self.lstm1(x)
# encoder_lstm, forward_h, backward_h = self.lstm1(inputs[0])
# state_h = self.concat([forward_h, backward_h])
decoder_lstm, forward_h, forward_c, backward_h, backward_c = self.lstm2(x)
# decoder_lstm, forward_h, backward_h = self.lstm2(x)
state_h = self.concat([forward_h, backward_h]) # 은닉 상태
# state_c = self.concat([forward_c, backward_c])
context_vector, attention_weights = self.attention(x, state_h)
x = self.fcn1(context_vector)
# x = self.dropout(x)
x_aux1 = self.conv1d3(inputs[1])
aux_lstm, aux_forward_h, aux_forward_c, aux_backward_h, aux_backward_c = self.aux_lstm(x_aux1)
# aux_state_h = self.concat([aux_forward_h, aux_backward_h]) # 은닉 상태
# aux_context_vector, aux_attention_weights = self.aux_attention(aux_lstm, aux_state_h)
# x_aux1 = self.aux_fcn1(aux_context_vector)
x_aux1 = self.aux_fcn1(x_aux1)
x_aux1 = self.aux_flatten1(x_aux1)
x_aux2 = self.aux_fnc2(inputs[2])
x_aux2 = self.aux_flatten2(x_aux2)
if self.is_x_aux1 :
print("aux1",inputs[1].shape)
x = self.concat([x, x_aux1])
if self.is_x_aux2 :
print("aux2",inputs[2].shape)
x = self.concat([x, x_aux2])
# x = self.concat([x, x_aux2])
x = self.fcn3(x)
x = self.fcn4(x)
return x#, attention_weights
#%%
class BiLSTMATTN(Model):
def __init__(self, config):
super(BiLSTMATTN, self).__init__()
self.n_outputs = config.label_width
self.filters = config.filters
self.kernel_size = config.kernel_size
self.activation = config.activation
self.lstm_units = config.lstm_units
self.attn_units = config.attn_units
self.lstm1 = Bidirectional(LSTM(self.lstm_units, dropout=0.1,
return_sequences = True, return_state= False,
recurrent_initializer='glorot_uniform'))
# self.rv = RepeatVector(self.n_outputs)
self.lstm2 = Bidirectional(LSTM(self.lstm_units, dropout=0.1,
return_sequences = True, return_state=True,
recurrent_initializer='glorot_uniform'))
self.concat = Concatenate()
self.attention = BahdanauAttention(self.lstm_units)
self.fcn1 = Dense(50)#, activation='relu')
self.aux_lstm = LSTM(self.lstm_units, dropout=0.2, return_sequences=False)
self.aux_fcn1 = Dense(20)
self.aux_fnc2 = TimeDistributed(Dense(20))
self.aux_flatten = Flatten()
self.fcn3 = Dense(10)
self.fcn4 = Dense(self.n_outputs, activation='sigmoid')
def call(self, inputs):
encoder_lstm = self.lstm1(inputs[0])
# encoder_lstm, forward_h, forward_c, backward_h, backward_c = self.encoder_lstm(inputs[0])
# encoder_lstm, forward_h, backward_h = self.encoder_lstm(inputs[0])
# state_h = self.concat([forward_h, backward_h])
# decoder_input = self.rv(state_h)
decoder_lstm, forward_h, forward_c, backward_h, backward_c = self.lstm2(encoder_lstm)
# decoder_lstm, forward_h, backward_h = self.decoder_lstm(encoder_lstm)
state_h = self.concat([forward_h, backward_h]) # 은닉 상태
# state_c = self.concat([forward_c, backward_c])
context_vector, attention_weights = self.attention(encoder_lstm, state_h)
x = self.fcn1(context_vector)
# x = self.dropout(x)
x_aux1 = self.aux_lstm(inputs[1])
x_aux1 = self.aux_fcn1(x_aux1)
x_aux2 = self.aux_fnc2(inputs[2])
x_aux2 = self.aux_flatten(x_aux2)
x = self.concat([x, x_aux1, x_aux2])
x = self.fcn3(x)
x = self.fcn4(x)
return x
#%%
#%%
class BiLSTMATTNre(Model):
def __init__(self, config):
super(BiLSTMATTNre, self).__init__()
self.n_outputs = config.label_width
self.filters = config.filters
self.kernel_size = config.kernel_size
self.activation = config.activation
self.lstm_units = config.lstm_units
self.attn_units = config.attn_units
self.encoder_lstm = Bidirectional(LSTM(self.lstm_units, dropout=0.1, return_sequences = True, return_state= True, recurrent_initializer='glorot_uniform'))
self.rv = RepeatVector(self.n_outputs)
self.decoder_lstm = Bidirectional(LSTM(self.lstm_units, dropout=0.1, return_sequences = False, return_state=False, recurrent_initializer='glorot_uniform'))
self.concat = Concatenate(axis=-1)
self.attention = BahdanauAttention(self.lstm_units)
self.fcn0 = TimeDistributed(Dense(1))
self.flatten = Flatten()
self.fcn1 = Dense(50)#, activation='relu')
self.aux_lstm = LSTM(self.lstm_units, dropout=0.5, return_sequences=False)
self.aux_fcn1 = Dense(20)
self.aux_fnc2 = TimeDistributed(Dense(20))
self.aux_flatten = Flatten()
self.fcn3 = Dense(10)
self.fcn4 = Dense(self.n_outputs, activation='sigmoid')
def call(self, inputs):
encoder_lstm, forward_h, forward_c, backward_h, backward_c = self.encoder_lstm(inputs[0])
# encoder_lstm, forward_h, backward_h = self.encoder_lstm(inputs[0])
state_h = self.concat([forward_h, backward_h])
context_vector, attention_weights = self.attention(state_h, encoder_lstm) # state_h : query, state_h : values
context_vector = self.rv(context_vector)
decoder_input = self.concat([context_vector, inputs[2]])
decoder_lstm = self.decoder_lstm(decoder_input)
decoder_lstm = self.fcn0(decoder_lstm)
decoder_lstm = self.flatten(decoder_lstm)
x_aux1 = self.aux_lstm(inputs[1])
x_aux1 = self.aux_fcn1(x_aux1)
# x_aux2 = self.aux_fnc2(inputs[2])
# x_aux2 = self.aux_flatten(x_aux2)
x = self.concat([decoder_lstm, x_aux1])#, x_aux2])
x = self.fcn3(x)
x = self.fcn4(x)
return x
#%%
class LSTMaux(Model):
def __init__(self, config):
super(LSTMaux, self).__init__()
self.n_outputs = config.label_width
self.filters = config.filters
self.kernel_size = config.kernel_size
self.activation = config.activation
self.lstm_units = config.lstm_units
self.lstm_encoder = LSTM(units = self.lstm_units, dropout=0.5, return_sequences = True, return_state= True)
# output, forward_h, backward_h, forward_c, backward_c
self.lstm_decoder = LSTM(units = self.lstm_units, dropout=0.5, return_sequences = True, return_state = False)
# self.td1 = TimeDistributed(Dense(10, activation = self.activation ))
self.flatten = Flatten()
self.aux_dense = TimeDistributed(Dense(1))
self.aux_concat = Concatenate()
self.outputs = Dense(self.n_outputs) # self.n_outputs
def call(self, inputs):
encoder_stack_h, encoder_last_h, encoder_last_c = self.lstm_encoder(inputs[0])
decoder_input = self.rv(encoder_last_h)
decoder_stack_h = self.lstm_decoder(decoder_input, initial_state = [encoder_last_h, encoder_last_c])
decoder_flatten = self.flatten(decoder_stack_h)
aux_input = self.aux_dense(inputs[1])
aux_flatten = self.flatten(aux_input)
aux_concat = self.aux_concat([decoder_flatten, aux_flatten])
outputs = self.outputs(aux_concat)
return outputs
#%%
class LSTMATTN(Model):
def __init__(self, config):
super(LSTMATTN, self).__init__()
self.n_outputs = config.label_width
self.filters = config.filters
self.kernel_size = config.kernel_size
self.activation = config.activation
self.lstm_units = config.lstm_units
self.lstm_encoder = LSTM(units = self.lstm_units, return_sequences = True, return_state= True)
self.rv = RepeatVector(self.n_outputs)
# output, forward_h, backward_h, forward_c, backward_c
self.lstm_decoder = LSTM(units = self.lstm_units, return_sequences = True, return_state = False)
# self.td1 = TimeDistributed(Dense(10, activation = self.activation ))
self.attention = Dot(axes=[2,2])
self.softmax = Softmax()
self.context = Dot(axes=[2,1])
self.concat = Concatenate()
self.flatten = Flatten()
self.fcn = Dense(30) # self.n_outputs
self.outputs = Dense(self.n_outputs) # self.n_outputs
def call(self, inputs):
# x = self.lstm_in(inputs)
encoder_stack_h, encoder_last_h, encoder_last_c = self.lstm_encoder(inputs[0])
decoder_input = self.rv(encoder_last_h)
decoder_stack_h = self.lstm_decoder(decoder_input, initial_state = [encoder_last_h, encoder_last_c])
attention = self.attention([decoder_stack_h, encoder_stack_h])
attention = self.softmax(attention)
context = self.context([attention, encoder_stack_h])
decoder_combined_context = self.concat([context, decoder_stack_h])
flatten = self.flatten(decoder_combined_context)
fcn = self.fcn(flatten)
outputs = self.outputs(fcn)
return outputs
#%%
class CNNLSTMATTN(Model):
def __init__(self, config
):
super(CNNLSTMATTN, self).__init__()
self.n_outputs = config.label_width
self.filters = config.filters
self.kernel_size = config.kernel_size
self.activation = config.activation
self.lstm_units = config.lstm_units
self.conv1d1 = Conv1D(filters = self.filters,
kernel_size = self.kernel_size,
activation = self.activation)
self.conv1d2 = Conv1D(filters = self.filters,
kernel_size = self.kernel_size,
activation = self.activation)
self.mp1d = MaxPooling1D(pool_size = 2)
self.flatten = Flatten()
# self.lstm_in = LSTM(units = self.units, activation = self.activation)
self.rv = RepeatVector(self.n_outputs)
# output, forward_h, backward_h, forward_c, backward_c
self.lstm_out = Bidirectional(LSTM(units = self.lstm_units, return_sequences = True, return_state = True))
# self.td1 = TimeDistributed(Dense(10, activation = self.activation ))
self.attention = Attention()
self.concat = Concatenate()
self.td2 = Dense(self.n_outputs) # self.n_outputs
def call(self, inputs):
# x = self.lstm_in(inputs)
x = self.conv1d1(inputs)
x = self.conv1d2(x)
x = self.mp1d(x)
x = self.flatten(x)
x = self.rv(x)
x = self.lstm_out(x)
# x = self.td1(x)
x = self.attention([x,x])
x = self.td2(x)
return tf.reshape(x, shape =(-1,self.n_outputs))
#%%
class CNNLSTM(Model):
def __init__(self, config):
super(CNNLSTM, self).__init__()
self.n_outputs = config.label_width
self.filters = config.filters
self.kernel_size = config.kernel_size
self.activation = config.activation
self.lstm_units = config.lstm_units
self.conv1d1 = Conv1D(filters = self.filters,
kernel_size = self.kernel_size,
activation = self.activation)
self.mp1d = MaxPooling1D(pool_size = 2)
self.flatten = Flatten()
self.rv = RepeatVector(self.n_outputs)
self.lstm = LSTM(units = self.lstm_units, return_sequences = False)
self.fcn = Dense(self.n_outputs) # self.n_outputs
def call(self, inputs):
# x = self.lstm_in(inputs)
x = self.conv1d1(inputs)
x = self.mp1d(x)
x = self.flatten(x)
x = self.rv(x)
x = self.lstm(x)
x = self.fcn(x)
return tf.keras.backend.reshape(x, shape = (-1,self.n_outputs))
#%%
class CNNs(Model):
def __init__(self, config):
super(CNNs, self).__init__()
self.n_outputs = config.label_width
self.filters = config.filters
self.kernel_size = config.kernel_size
self.activation = config.activation
self.lstm_units = config.lstm_units
self.conv1d1 = Conv1D(filters = self.filters,
kernel_size = self.kernel_size,
activation = self.activation)
self.flatten = Flatten()
self.d = Dense(self.n_outputs) # self.n_outputs
def call(self, inputs):
# x = self.lstm_in(inputs)
x = self.conv1d1(inputs)
x = self.flatten(x)
x = self.d(x)
return tf.keras.backend.reshape(x, shape =(-1,self.n_outputs))
#%%
class LSTMs(Model):
def __init__(self, config):
super(LSTMs, self).__init__()
self.n_outputs = config.label_width
self.filters = config.filters
self.kernel_size = config.kernel_size
self.lstm_units = config.lstm_units
self.lstm_encoder = LSTM(units = self.lstm_units, return_sequences = False, return_state= False)
# self.flatten = Flatten()
self.outputs = Dense(self.n_outputs) # self.n_outputs
def call(self, inputs):
encoder_stack_h = self.lstm_encoder(inputs)
# flatten = self.flatten(encoder_stack_h)
outputs = self.outputs(encoder_stack_h)
return outputs
# %%
# class BiLSTMATTN(Model):
# def __init__(self, config):
# super(BiLSTMATTN, self).__init__()
# self.n_outputs = config.label_width
# self.filters = config.filters
# self.kernel_size = config.kernel_size
# self.activation = config.activation
# self.lstm_units = config.lstm_units
# self.attn_units = config.attn_units
# self.encoder_lstm = Bidirectional(LSTM(self.lstm_units, dropout=0.1, return_sequences = True, return_state= True, recurrent_initializer='glorot_uniform'))
# # self.rv = RepeatVector(self.n_outputs)
# self.decoder_lstm = Bidirectional(LSTM(self.lstm_units, dropout=0.1, return_sequences = True, return_state=True, recurrent_initializer='glorot_uniform'))
# self.concat = Concatenate()
# self.attention = BahdanauAttention(self.attn_units)
# self.fcn1 = Dense(50)#, activation='relu')
# self.aux_lstm = LSTM(self.lstm_units, dropout=0.5, return_sequences=False)
# self.aux_fcn1 = Dense(20)
# self.aux_fnc2 = TimeDistributed(Dense(20))
# self.aux_flatten = Flatten()
# self.fcn3 = Dense(10)
# self.fcn4 = Dense(self.n_outputs, activation='sigmoid')
# def call(self, inputs):
# encoder_lstm, forward_h, forward_c, backward_h, backward_c = self.encoder_lstm(inputs[0])
# # encoder_lstm, forward_h, backward_h = self.encoder_lstm(inputs[0])
# # state_h = self.concat([forward_h, backward_h])
# # decoder_input = self.rv(state_h)
# decoder_lstm, forward_h, forward_c, backward_h, backward_c = self.decoder_lstm(encoder_lstm)
# # decoder_lstm, forward_h, backward_h = self.decoder_lstm(encoder_lstm)
# state_h = self.concat([forward_h, backward_h]) # 은닉 상태
# # state_c = self.concat([forward_c, backward_c])
# context_vector, attention_weights = self.attention(encoder_lstm, state_h)
# x = self.fcn1(context_vector)
# # x = self.dropout(x)
# x_aux1 = self.aux_lstm(inputs[1])
# x_aux1 = self.aux_fcn1(x_aux1)
# x_aux2 = self.aux_fnc2(inputs[2])
# x_aux2 = self.aux_flatten(x_aux2)
# x = self.concat([x, x_aux1, x_aux2])
# x = self.fcn3(x)
# x = self.fcn4(x)
# return x |
#!/bin/python
import sys
import re
infile = open(sys.argv[1], "r")
program = []
lineNo = 1
for line in infile:
line = line.rstrip()
lineRE = re.match(r"(acc|jmp|nop) (\+|-)(\d+)", line)
if not lineRE:
print "SHIT"
argument = int(lineRE.group(3))
if lineRE.group(2) == '-':
argument *= -1
program.append((lineNo, lineRE.group(1), argument))
lineNo += 1
print program
accumulator = 0
programCounter = 0
runLines = []
while True:
(line, inst, arg) = program[programCounter]
if line in runLines:
print "Revisting %d: acc: %d" % (line, accumulator)
break;
if inst == "acc":
accumulator += arg
programCounter += 1
elif inst == "jmp":
programCounter += arg
elif inst == "nop":
nop = 0
programCounter += 1
else:
print "Bugger"
runLines.append(line)
print line, inst, arg, programCounter, accumulator |
#encoding:utf-8
import matplotlib.pyplot as plt
input_values=[1,2,3,4,5]
squares = [1, 4, 9, 16, 25]
plt.plot(input_values, squares, linewidth = 5)
#设置图标标题,并给坐标轴加上标签
plt.title("Square Numbers", fontsize=24)
plt.xlabel("Value", fontsize=14)
plt.ylabel("Square of Value", fontsize=14)
#设置刻度标记的大小
plt.tick_params(axis='both', labelsize=14)
plt.show()
|
# -*- coding: utf-8 -*-
import logging
from datetime import datetime
from dateutil.relativedelta import relativedelta
from operator import itemgetter
import time
from openerp import SUPERUSER_ID
from openerp import pooler, tools
from openerp.osv import fields, osv
from openerp.tools.translate import _
from openerp.tools.float_utils import float_round
import openerp.addons.decimal_precision as dp
class account_move_line(osv.osv):
_inherit = "account.move.line"
def onchange_account_id_analytic(self, cr, uid, ids, account_id, context=None):
print account_id
sql='SELECT analytic_id FROM account_account where id =%d' % int(account_id)
cr.execute(sql)
for record in cr.fetchall():
if record:
return {
'value': {
'analytic_account_id': record[0]
}
}
else:
return True
account_move_line() |
# def dollarize(fcount):
# fcount = round(fcount, 2)
# sfcount = format(fcount, ',')
# if fcount < 0:
# sfcount = sfcount.split('-')[1]
# return '-' + '$' + sfcount
# else:
# return '$' + sfcount
class MoneyFmt(object):
def __init__(self, fcount):
self.fcount = float(fcount)
def update(self, fcount=None):
if fcount is not None:
try:
self.fcount = float(fcount)
except(TypeError) as e:
print(e)
def __nonzero__(self):
return self.fcount
def __repr__(self):
return self.fcount
def __str__(self):
return self.dollarize()
def dollarize(self):
self.fcount = round(self.fcount, 2)
sfcount = format(self.fcount, ',')
if self.fcount < 0:
sfcount = sfcount.split('-')[1]
return '-' + '$' + sfcount
else:
return '$' + sfcount
a = MoneyFmt(12345.6789)
print(a)
|
string = input()
string2 = ''
for i in string:
if ((i >= 'a' and i <= 'z') or (i >= 'A' and i <= 'Z')):
if (i not in string2):
string2 += i
print(len(string2))
|
#import sys
#input = sys.stdin.readline
def main():
N = int( input())
ans = 0
for i in range(1,N):
ans += (N-1)//i
print(ans)D.
if __name__ == '__main__':
main()
|
from auxilaryFunctions import np
from auxilaryFunctions import calIntegralImage
from auxilaryFunctions import Grey_img,integralImage,Image,np,io,get_integral_image
from classifiers import getLayers
import time,math
from stages import *
def computerFeatureFunc(box,featureChosen,integralImg):
#scaling features
boxSize = box[0]
# @ TODO the calFeatures file
#featureChosen = features[featureIndex] # features should be from the calFeatures file
pattern = featureChosen[0]
areaPos_i = box[1]
areaPos_j = box[2]
sampleSize = 24
scale = np.sqrt(boxSize) / sampleSize
#scaling the i and j of the feature
i = featureChosen[1]
j = featureChosen[2]
i = int(scale*i + 0.5)
j = int(scale*j + 0.5)
#abs_i and abs_j will be used to calculate the integral image result
#indicate the feature position inside the real frame
abs_i = areaPos_i + i
abs_j = areaPos_j + j
#getting the haar feature width and height
#we will check on the feature pattern to get the width
width = featureChosen[4] + featureChosen[5] + featureChosen[6] if pattern <= 2 else featureChosen[6]
width += featureChosen[5] if pattern == 5 else 0 # as feature five width is at 5,6
#we will check on the feature pattern to get the height
height = featureChosen[3] if pattern <= 2 else featureChosen[3] + featureChosen[4]
# feature five height is at 3,4 while feature three and four their heights is at 3,4,5
height += featureChosen[5] if (pattern > 2 and pattern < 5) else 0
#original area of the feature
originArea = width*height
#scaling the width and the height of the feature
width = int(scale*width + 0.5)
height = int(scale*height + 0.5)
#scaling the feature pattern one i.e. 1x2 feature
if(pattern == 1):
'''
the height of the feature may exceeds the box's size - i as
boxSize - i is the maximum side the feature's height can hold
'''
height = height if height < (np.sqrt(boxSize) - i) else (np.sqrt(boxSize) - i)
'''
the width of the feature may exceeds the box's size - j as
boxSize - j is the maximum side the feature's width can hold
'''
#we should make sure that width is divisible by 2 after scaling
width = width if width % 2 == 0 else width + 1
while (width > np.sqrt(boxSize) - j):
width -= 2
#the increment slice which would indicate the size of the white and black areas
increment = int(width / 2)
#then calculate the integral image
#summation of the white area
white = calIntegralImage(integralImg,abs_i,abs_j,increment,height)
#summation of the black area
black = calIntegralImage(integralImg,abs_i,abs_j+increment,increment,height)
featureResult = (white-black)
#rescale the feature to its original scale
#multiply the originArea by 2
reScale = originArea/(width*height)
featureResult = featureResult * reScale
return featureResult
#scaling the feature pattern two i.e. 1x3 feature
elif(pattern == 2):
'''
the height of the feature may exceeds the box's size - i as
boxSize - i is the maximum side the feature's height can hold
'''
height = height if height < (np.sqrt(boxSize) - i) else (np.sqrt(boxSize) - i)
#we should make sure that width is divisible by 3 after scaling
width = width if width % 3 == 0 else ((width + 2 if width % 3 == 1 else width + 1))
'''
the width of the feature may exceeds the box's size - j as
boxSize - j is the maximum side the feature's width can hold
'''
while (width > np.sqrt(boxSize) - j):
width -= 3
#the increment slice which would indicate the size of the white and black areas
increment = int(width / 3)
#then calculate the integral image
#summation of the first white area
white = calIntegralImage(integralImg,abs_i,abs_j,increment,height)
#summation of the black area
black = calIntegralImage(integralImg,abs_i,abs_j+increment,increment,height)
#summation of the second and the first white area
white = white + calIntegralImage(integralImg,abs_i,abs_j+2*increment,increment,height)
featureResult = (white-black)
#rescale the feature to its original scale
#multiply the originArea by 3
reScale = (width*height)/originArea
featureResult /= reScale
return featureResult
#scaling the feature pattern one i.e. 2x1 feature
elif(pattern == 3):
'''
the width of the feature may exceeds the box's size - j as
boxSize - j is the maximum side the feature's width can hold
'''
width = width if width < (np.sqrt(boxSize) - j) else (np.sqrt(boxSize) - j)
'''p
the height of the feature may exceeds the box's size - i as
boxSize - i is the maximum side the feature's height can hold
'''
#we should make sure that height is divisible by 2 after scaling
height = height if height % 2 == 0 else height + 1
while (height > np.sqrt(boxSize) - i):
height -= 2
#the increment slice which would indicate the size of the white and black areas
increment = int(height / 2)
#then calculate the integral image
#summation of the white area
white = calIntegralImage(integralImg,abs_i,abs_j,width,increment)
#summation of the black area
black = calIntegralImage(integralImg,abs_i+increment,abs_j,width,increment)
featureResult = (white-black)
#rescale the feature to its original scale
#multiply the originArea by 2
reScale = (width*height)/originArea
featureResult /= reScale
return featureResult
#scaling the feature pattern one i.e. 3x1 feature
elif(pattern == 4):
'''
the width of the feature may exceeds the box's size - j as
boxSize - j is the maximum side the feature's width can hold
'''
width = width if (width < (np.sqrt(boxSize) - j)) else (np.sqrt(boxSize) - j)
'''
the height of the feature may exceeds the box's size - i as
boxSize - i is the maximum side the feature's height can hold
'''
#we should make sure that height is divisible by 3 after scaling
height = height if height % 3 == 0 else ((height + 2 if height % 3 == 1 else height + 1))
while (height > np.sqrt(boxSize) - i):
height -= 3
#the increment slice which would indicate the size of the white and black areas
increment = int(height / 3)
#then calculate the integral image
#summation of the first white area
white = calIntegralImage(integralImg,abs_i,abs_j,width,increment)
#summation of the black area
black = calIntegralImage(integralImg,abs_i+increment,abs_j,width,increment)
#summation of the second and the first white area
white = white + calIntegralImage(integralImg,abs_i+2*increment,abs_j,width,increment)
featureResult = (white-black)
#rescale the feature to its original scale
#multiply the originArea by 2
reScale = (width*height)/originArea
featureResult /= reScale
return featureResult
#scaling the feature pattern one i.e. 2x2 feature
else:
'''
the width of the feature may exceeds the box's size - j as
boxSize - j is the maximum side the feature's width can hold
'''
#we should make sure that width is divisible by 2 after scaling
width = width if width % 2 == 0 else width + 1
while (width > np.sqrt(boxSize) - j):
width -= 2
'''
the height of the feature may exceeds the box's size - i as
boxSize - i is the maximum side the feature's height can hold
'''
#we should make sure that height is divisible by 2 after scaling
height = height if height % 2 == 0 else height + 1
while (height > np.sqrt(boxSize) - i):
height -= 2
#the increment slices which would indicate the size of the white and black areas
incrementH = int(height / 2) # increment Height
incrementW = int(width / 2) # increment Width
#then calculate the integral image
#summation of the first white area
white = calIntegralImage(integralImg,abs_i,abs_j,incrementW,incrementH)
#summation of the first and the second white areas
white = white + calIntegralImage(integralImg,abs_i+incrementH,abs_j+incrementW,incrementW,incrementH)
#summation of the black area
black = calIntegralImage(integralImg,abs_i+incrementH,abs_j,incrementW,incrementH)
#summation of the second and the first black area
black = black + calIntegralImage(integralImg,abs_i,abs_j+incrementW,incrementW,incrementH)
featureResult = (white-black)
#rescale the feature to its original scale
reScale = (width*height)/originArea
featureResult /= reScale
return featureResult
#return rects
stepSizeW = 20
stepSizeH = 20
minSize = 24
sizeStep = 6
class Rect:
def __init_(self, startBox, endBox):
self.startBox = startBox
self.endBox = endBox
def detect_face(frame, frameWidth, frameHeight):
image = Grey_img(frame)
norm_image = normalizeImages(image)
iimage = get_integral_image(norm_image)
rects = []
startBox = (0,0)
endBox = (startBox[0]+minSize, startBox[1]+minSize)
minFrame = frameWidth if frameWidth < frameHeight else frameHeight
for b in range(minSize,minFrame,step=sizeStep):
for w in range(frameWidth-b,step=stepSizeW):
for h in range(frameHeight-b, step=stepSizeH):
startBox = (w,h)
endBox = (w+b, h+b)
rect_object = Rect(startBox, endBox)
if(cascade(rect_object,iimage)):
rects.append([rect_object])
return rects
|
# modified config_10gbe_core function from katcp_wrapper.py from the corr library to include a subnet hack
def config_10gbe_core(self,device_name,mac,ip,port,arp_table,gateway=1):
"""Hard-codes a 10GbE core with the provided params. It does a blindwrite, so there is no verifcation that configuration was successful (this is necessary since some of these registers are set by the fabric depending on traffic received).
@param self This object.
@param device_name String: name of the device.
@param mac integer: MAC address, 48 bits.
@param ip integer: IP address, 32 bits.
@param port integer: port of fabric interface (16 bits).
@param arp_table list of integers: MAC addresses (48 bits ea).
"""
#assemble struct for header stuff...
#0x00 - 0x07: My MAC address
#0x08 - 0x0b: Not used
#0x0c - 0x0f: Gateway addr
#0x10 - 0x13: my IP addr
#0x14 - 0x17: Not assigned
#0x18 - 0x1b: Buffer sizes
#0x1c - 0x1f: Not assigned
#0x20 : soft reset (bit 0)
#0x21 : fabric enable (bit 0)
#0x22 - 0x23: fabric port
#0x24 - 0x27: XAUI status (bit 2,3,4,5=lane sync, bit6=chan_bond)
#0x28 - 0x2b: PHY config
#0x28 : RX_eq_mix
#0x29 : RX_eq_pol
#0x2a : TX_preemph
#0x2b : TX_diff_ctrl
#0x1000 : CPU TX buffer
#0x2000 : CPU RX buffer
#0x3000 : ARP tables start
# subnet hack
subnet_mask = 0xfffffc00
subnet_mask_pack = struct.pack('>L',subnet_mask)
ctrl_pack=struct.pack('>QLLLLLLBBH',mac, 0, gateway, ip, 0, 0, 0, 0, 1, port)
arp_pack=struct.pack('>256Q',*arp_table)
self.blindwrite(device_name,ctrl_pack,offset=0)
# write subnet mask
self.blindwrite(device_name,subnet_mask_pack,offset=0x38)
self.write(device_name,arp_pack,offset=0x3000)
|
NUM_OF_ROWS = 6
NUM_OF_COLS = 7
DEPTH = 4
BLUE = (61, 164, 171)
YELLOW = (246, 205, 97)
RED = (254, 138, 113)
BLACK = (74, 78, 77)
SQUARE = 100
CIRCLE = 45
|
from sklearn.cluster import KMeans
import numpy as np
import matplotlib.pyplot as plt
def cluster_images(container):
positive = np.asarray([element[1] for element in container])
negative = np.asarray([element[2] for element in container])
ratings = np.asarray([element[3] for element in container])
combined = np.asarray([[element[1],element[2]] for element in container])
y_pred = KMeans(n_clusters=2, random_state=0).fit_predict(combined)
plt.figure(figsize=(12, 12))
plt.scatter(positive, ratings, c=y_pred)
plt.title("positive vs ratings ")
plt.savefig('positive_ratings.pdf')
plt.clf()
|
# from collector.Collector import Collector
# c = Collector()
# data = c.collect()
# print("\n\n\nFound data...\n\n\n")
# for i in data:
# print(i.content.text)
# print(i.label)
# print()
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn import metrics
from keras.models import Sequential
from keras import layers
from keras import regularizers
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.optimizers import SGD
from sklearn import model_selection, naive_bayes, svm
from keras import callbacks
from preprocessor.RemoveNoise import RemoveNoise
from preprocessor.LowerCase import LowerCase
from preprocessor.StopWords import StopWords
from preprocessor.Stemming import Stemming
def plot_history(history, title):
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
x = range(1, len(acc) + 1)
plt.figure()
plt.title(title)
plt.subplot(1, 2, 1)
plt.plot(x, acc, 'b', label='Acurácia de treinamento')
plt.plot(x, val_acc, 'r', label='Acurácia de validação')
plt.title('Acurácia')
plt.legend()
plt.subplot(1, 2, 2)
plt.plot(x, loss, 'b', label='Erro de treinamento')
plt.plot(x, val_loss, 'r', label='Erro de validação')
plt.title('Erro')
plt.legend()
plt.savefig(title+".png")
def print_metrics(model, X, y, type = "rna"):
y_pred = []
y_true = []
pred = model.predict(X)
true_fake = 0
false_fake = 0
true_real = 0
false_real = 0
for i in range(len(pred)):
val = 0
if type != "rna":
val = pred[i]
else:
val = pred[i][0]
y_pred.append(int(round(val)))
y_true.append(y[i])
if (y_true[i] == 1 and y_pred[i] == 1):
true_fake += 1
if (y_true[i] == 0 and y_pred[i] == 1):
false_fake += 1
if (y_true[i] == 0 and y_pred[i] == 0):
true_real += 1
if (y_true[i] == 1 and y_pred[i] == 0):
false_real += 1
# print(true_fake)
# print(false_fake)
# print(true_real)
# print(false_real)
# print(y_true)
# print(y_pred)
print("Confusion matrix:")
print(pd.DataFrame(metrics.confusion_matrix(y_true, y_pred, labels=[1, 0]), index=['true:fake', 'true:real'], columns=['pred:fake', 'pred:real']))
print("\nAccuracy:", metrics.accuracy_score(y_true, y_pred))
print("Precision:", metrics.precision_score(y_true, y_pred))
print("Recall:", metrics.recall_score(y_true, y_pred))
print("F1 score:", metrics.f1_score(y_true, y_pred))
print("\n==================================================================================\n\n")
def train_test_ds(x, y, test_size=0.20):
#sentences = df['text'].values
#y = df['label'].values
sentences = x
y = y
#X_train, X_test, y_train, y_test = train_test_split(sentences, y, test_size=test_size, stratify=y)
X_train, X_test, y_train, y_test = train_test_split(sentences, y, test_size=test_size, stratify=y)
return [X_train, X_test, y_train, y_test]
def ann_classifier(X_train, X_test, y_train, y_test, neuronRate = 1, epochs = 3, layers_qtd = 1, batch = 64):
vectorizer = CountVectorizer()
vectorizer.fit(X_train)
X_train = vectorizer.transform(X_train)
X_test = vectorizer.transform(X_test)
input_dim = X_train.shape[1]
L = input_dim
L = int(L*neuronRate)
model = Sequential()
#kernel_regularizer=regularizers.l2(0.01)
model.add(layers.Dense(input_dim, input_dim=input_dim, activation='relu'))
for i in range(0, layers_qtd):
model.add(layers.Dense(L, input_dim=L, kernel_regularizer=regularizers.l2(0.01), activation='relu'))
model.add(layers.Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
model.summary()
history = model.fit(X_train, y_train, epochs=epochs, verbose=True,
validation_data=(X_test, y_test), batch_size = 64)
plot_history(history, "Rede neural")
loss, accuracy = model.evaluate(X_test, y_test, verbose=False)
print("\n\nRNA RESULTS\n")
print_metrics(model, X_test, y_test)
return accuracy
def cnn_classifier(X_train, X_test, y_train, y_test, n_filters = 50, epochs = 3, dim = 100):
tokenizer = Tokenizer()
tokenizer.fit_on_texts(X_train)
X_train = tokenizer.texts_to_sequences(X_train)
X_test = tokenizer.texts_to_sequences(X_test)
maxSampleSize = 0
for sample in X_train:
if len(sample) > maxSampleSize:
maxSampleSize = len(sample)
print(maxSampleSize)
vocab_size = len(tokenizer.word_index) + 1
maxlen = maxSampleSize
embedding_dim = dim
X_train = pad_sequences(X_train, padding='post', truncating='post', maxlen=maxlen)
X_test = pad_sequences(X_test, padding='post', truncating='post', maxlen=maxlen)
model = Sequential()
model.add(layers.Embedding(input_dim=vocab_size, output_dim=dim, input_length=maxlen))
model.add(layers.Conv1D(n_filters, kernel_size=5, padding='valid', activation='relu', strides=1))
model.add(layers.MaxPool1D())
model.add(layers.Flatten())
model.add(layers.Dense(1000, input_dim=1000, kernel_regularizer=regularizers.l2(0.01), activation='relu'))
model.add(layers.Dense(1, activation='sigmoid'))
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
model.summary()
es_callback = callbacks.EarlyStopping(monitor='val_loss', patience=2)
history = model.fit(X_train, y_train, epochs=epochs, verbose=True, validation_data=(X_test, y_test), batch_size=64, callbacks=[es_callback])
loss, accuracy = model.evaluate(X_test, y_test, verbose=False)
print("\n\nCNN RESULTS\n")
print_metrics(model, X_test, y_test)
#plot_history(history, "Common Neural Network")
return accuracy
def svm_classifier(X_train, X_test, y_train, y_test, c, gamma):
vectorizer = CountVectorizer()
vectorizer.fit(X_train)
X_train = vectorizer.transform(X_train)
X_test = vectorizer.transform(X_test)
input_dim = X_train.shape[1]
L = input_dim
SVM = svm.SVC(C=c, kernel='rbf', gamma=gamma, random_state=0)
SVM.fit(X_train, y_train)
# predict the labels on validation dataset
predictions_SVM = SVM.predict(X_test)
# Use accuracy_score function to get the accuracy
#print("SVM Accuracy Score -> ", metrics.accuracy_score(predictions_SVM, y_test)*100)
acc = metrics.accuracy_score(predictions_SVM, y_test)*100
if acc > 70:
print_metrics(SVM, X_test, y_test, "svm")
def svm_classifier(X_train, X_test, y_train, y_test):
vectorizer = CountVectorizer()
vectorizer.fit(X_train)
X_train = vectorizer.transform(X_train)
X_test = vectorizer.transform(X_test)
input_dim = X_train.shape[1]
L = input_dim
clf = svm.SVC(C=c, kernel='rbf', gamma=gamma, random_state=0)
clf.fit(X_train, y_train)
# predict the labels on validation dataset
predictions_SVM = SVM.predict(X_test)
# Use accuracy_score function to get the accuracy
#print("SVM Accuracy Score -> ", metrics.accuracy_score(predictions_SVM, y_test)*100)
acc = metrics.accuracy_score(predictions_SVM, y_test)*100
if acc > 70:
print_metrics(SVM, X_test, y_test, "svm")
lupa = pd.read_csv('./data/CollectorLupa', sep='\t', names=['text', 'label'])
aosfatos = pd.read_csv('./data/CollectorAosfatos', sep='\t', names=['text', 'label'])
dataframe = lupa.append(aosfatos)
removeNoise = RemoveNoise()
lowerCase = LowerCase()
stopWords = StopWords()
stemming = Stemming()
removeNoise.execute(dataframe, True, True)
lowerCase.execute(dataframe, True)
stopWords.execute(dataframe, True)
true = 0
false = 0
todelete = []
x = []
y = []
df = pd.DataFrame(columns=['text', 'label'])
for index, row in dataframe.iterrows():
if row['label'].lower() == "verdadeiro":
true += 1
row['label'] = 0
x.append(row['text'])
y.append(row['label'])
elif row['label'].lower() == "falso":
row['label'] = 1
false += 1
x.append(row['text'])
y.append(row['label'])
else:
todelete.append(index)
data = train_test_ds(x, y, test_size=0.2)
test_true = 0
test_false = 0
for i in range (len(data[3])):
val = data[3][i]
if (val == 0):
test_true += 1
else:
test_false += 1
train_true = 0
train_false = 0
for i in range (len(data[2])):
val = data[2][i]
if (val == 0):
train_true += 1
else:
train_false += 1
print("TRAIN FALSE: ", train_false)
print("TRAIN TRUE", train_true)
print("TEST FALSE: ", test_false)
print("TEST TRUE", test_true)
# for i in range(10, 21):
# gamma = i * 0.01
# for j in range(1, 11):
# c = j * 0.1
# print(gamma, c)
# res = svm_classifier(data[0], data[1], data[2], data[3], c, gamma)
# #print("%.2f" % round(res,2) + ";", end = '')
# #print()
#ann_classifier(data[0], data[1], data[2], data[3], 0.5, 10, 1, 64)
resultMatrix = []
for i in range(4, 6):
layers_qtd = i
lineResult = []
for j in range(16, 129, 16):
batch = j
sum = 0
qtd = 3
print("Test", layers_qtd, batch)
for q in range(0, qtd):
sum += ann_classifier(data[0], data[1], data[2], data[3], 0.5, 3, layers_qtd, batch)
lineResult.append(sum/qtd)
resultMatrix.append(lineResult)
# resultMatrix = []
# for i in range(170, 171, 20):
# filters = i
# lineResult = []
# for j in range(30, 31, 30):
# dim = j
# sum = 0
# qtd = 3
# print("Test", filters, dim)
# for q in range(0, qtd):
# sum += cnn_classifier(data[0], data[1], data[2], data[3], filters, 10, dim)
# lineResult.append(sum/qtd)
# resultMatrix.append(lineResult)
for i in resultMatrix:
for j in i:
print("%.4f" % round(j, 4), ";", end = '')
print()
# Confusion matrix:
# pred:fake pred:real
# true:fake 28 21
# true:real 9 53
# Accuracy: 0.7297297297297297
# Precision: 0.7567567567567568
# Recall: 0.5714285714285714
# F1 score: 0.6511627906976745
# 0.6997 ;0.7027 ;0.7027 ;0.6907 ;0.6967 ;0.6847 ;0.6907 ;0.6817 ;
# 0.6757 ;0.6727 ;0.6817 ;0.6817 ;0.6697 ;0.6757 ;0.6817 ;0.6577 ;
# 0.6727 ;0.6727 ;0.6847 ;0.6787 ;0.6877 ;0.7027 ;0.6877 ;0.7027 ;
# 0.6547 ;0.6186 ;0.6396 ;0.6426 ;0.6517 ;0.6336 ;0.6426 ;0.6396 ;
# 0.6456 ;0.6486 ;0.6096 ;0.6366 ;0.6456 ;0.6426 ;0.6517 ;0.6366 ;
# 0.6276 ;0.6336 ;0.6246 ;0.5976 ;0.6156 ;0.6366 ;0.6126 ;0.6336 ;
# 0.5586 ;0.5586 ;0.5586 ;0.5646 ;0.5706 ;0.5586 ;0.5586 ;0.5586 ;
# 0.5586 ;0.5586 ;0.5706 ;0.6096 ;0.6096 ;0.6156 ;0.6036 ;
# 0.5586 ;0.5586 ;0.6006 ;0.6156 ;0.5856 ;0.5916 ;0.6156 ;
# 0.5586 ;0.5586 ;0.6006 ;0.6336 ;0.6156 ;0.6066 ;0.6216 ;
# 0.5586 ;0.5586 ;0.6186 ;0.6006 ;0.6126 ;0.5826 ;0.6276 ;
# 0.5586 ;0.5586 ;0.6006 ;0.5826 ;0.5826 ;0.6186 ;0.6306 ;
# 0.5586 ;0.5616 ;0.6096 ;0.5886 ;0.5976 ;0.6066 ;0.6607 ;
# 0.5586 ;0.5586 ;0.6276 ;0.5826 ;0.5856 ;0.5976 ;0.6306 ;
# 0.5586 ;0.5586 ;0.5766 ;0.6006 ;0.5946 ;0.6306 ;0.6697 ;
# 10 a 20 epocas
# 0.6757 ;0.6787 ;0.6817 ;0.6907 ;0.6967 ;0.7087 ;0.6937 ;0.6847 ;0.6937 ;0.6877 ;0.6787 ;
# 0.6907 ;0.6787 ;0.6757 ;0.6937 ;0.6697 ;0.6847 ;0.6967 ;0.6937 ;0.6877 ;0.6937 ;0.6937 ;
# 0.6877 ;0.6787 ;0.6937 ;0.6877 ;0.6787 ;0.6967 ;0.6727 ;0.6817 ;0.6937 ;0.6907 ;0.6847 ;
# 0.6697 ;0.6907 ;0.7027 ;0.6937 ;0.6757 ;0.6847 ;0.6757 ;0.6757 ;0.6847 ;0.6607 ;0.6727 ;
# 15 22 epocas
# 0.6937 ;0.6877 ;0.6937 ;0.6727 ;0.6997 ;0.6877 ;0.6937 ;
# 0.7057 ;0.6847 ;0.6697 ;0.6607 ;0.6847 ;0.7027 ;0.6937 ;
# 0.6787 ;0.7057 ;0.7027 ;0.6907 ;0.7177 ;0.6937 ;0.6517 ;
# 0.7027 ;0.6877 ;0.6997 ;0.6877 ;0.6937 ;0.6937 ;0.6727 ;
# dim 50 301
# 0.6937 ;0.7027 ;0.6877 ;0.7177 ;0.6637 ;0.6787 ;0.6787 ;
# 0.6817 ;0.7087 ;0.6997 ;0.6877 ;0.7087 ;0.6757 ;0.6727 ;
# 0.6667 ;0.6847 ;0.6697 ;0.6757 ;0.6997 ;0.6757 ;0.6967 ;
# 0.6967 ;0.7027 ;0.6877 ;0.6907 ;0.6967 ;0.6727 ;0.6577 ;
# 0.6937 ;0.7027 ;0.6907 ;0.6877 ;0.6727 ;0.6967 ;0.6937 ;
# 0.6817 ;0.6847 ;0.7237 ;0.6937 ;0.6937 ;0.6877 ;0.6907 ; |
from sklearn.datasets import load_breast_cancer
from sklearn.cluster import KMeans
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import scale
import pandas as pd
bc = load_breast_cancer()
print(bc)
x = scale(bc.data)
print(x)
y = bc.target
x_train, x_test, y_train, y_test = train_test_split(x,y, test_size=0.2)
model = KMeans(n_clusters=2, random_state=0)
model.fit(x_train) # we don't have to pass y as it is all about unsupervised learning
predictions = model.predict(x_test)
labels = model.labels_
print('labels', labels)
print('predictions', predictions)
print('accuracy', accuracy_score(y_test, predictions))
print('actual', y_test)
print(pd.crosstab(y_train,labels)) |
from django.contrib import admin
from django.urls import path,include
from home import views
urlpatterns = [
path('',views.index,name="home"),
path("notes/",views.about, name="notes"),
path('delete/<int:id>',views.delete,name= "delete"),
path('update/<int:id>',views.update,name = "update"),
path('edit/<int:id>',views.edit,name = "edit"),
path('back/',views.back,name="back"),
path('search/',views.search,name="search"),
path('login_page',views.handleLogin,name="handleLogin"),
path('signup_page',views.handleSignup,name="handleSignup"),
path('signup',views.signupUser,name="signupUser"),
path('logout',views.logoutUser,name="logoutUser"),
path('login',views.login,name="login"),
path('allnotesjson',views.notesjson,name='notejson'),
# path('api_updatenote',views.api_updatenote,name="api_updatenote")
]
|
# !/uer/bin/env python3
# coding=utf-8
import smtplib
import logging
import configparser
from email.mime.text import MIMEText
from email.utils import formataddr
LOGGER = logging.getLogger(__name__)
conf = configparser.ConfigParser()
conf.read("conf.ini")
def send_email(msg):
mail_host = conf.get("EMAIL", "mail_host")
rec_user = conf.get("EMAIL", "rec_user")
mail_pass = conf.get("EMAIL", "mail_pass")
sender = conf.get("EMAIL", "sender")
message = MIMEText(msg, _subtype='html', _charset='utf-8')
message['From'] = formataddr(["Python3", sender])
message['To'] = formataddr(["路成督", rec_user])
message['Subject'] = "**** 订单数据预警 from Python ****"
try:
smtp_obj = smtplib.SMTP(mail_host, 25)
smtp_obj.login(sender, mail_pass)
smtp_obj.sendmail(sender, [rec_user, ], message.as_string())
LOGGER.info("邮件发送成功")
smtp_obj.quit()
except smtplib.SMTPException as e:
LOGGER.error("Error: 无法发送邮件, {}".format(e))
raise e
if __name__ == '__main__':
send_email("123")
|
from datetime import datetime
import pytz as pytz
from marshmallow_dataclass import dataclass
from src.core.datastructures.base import BaseDataStruct
@dataclass
class CoinPrice(BaseDataStruct):
time: datetime = pytz.utc.localize(datetime.utcnow())
currency: str = ""
quote: float = 0
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import render
# Create your views here.
from django.http import HttpResponse,JsonResponse
import os
from django.conf import settings
from models import *
from django.core.paginator import *
import json
from django.views.decorators.cache import cache_page
from django.core.cache import cache
from task import *
def index_1(request):
return render(request,'booktest/index_1.html')
def myExp(request):
a1=int('abc')
return HttpResponse('hello')
def uploadPic(request):
return render(request, 'booktest/uploadPic.html')
def uploadHandle(request):
pic1=request.FILES['pic1']
picName=os.path.join(settings.MEDIA_ROOT,pic1.name)
with open(picName,'w') as pic:
for c in pic1.chunks():
pic.write(c)
return HttpResponse('<img src="/static/media/%s"/>'%pic1.name)
def herolist(request,pindex):
if pindex=='':
pindex='1'
list=HeroInfo.objects.all()
paginator=Paginator(list,5)
page=paginator.page(int(pindex))
context={'page':page}
return render(request, 'booktest/herolist.html',context)
def index(request):
return render(request,'booktest/index.html')
def pro(request):
prolist=AreaInfo.objects.filter(parea__isnull=True)
list=[]
#[[1,'beijing'],[2,'tianjin'],...]
for item in prolist:
list.append([item.id,item.title])
return JsonResponse({'data':list})
def city(request,id):
citylist=AreaInfo.objects.filter(parea_id=id)
list=[]
#[{id:1,title:'beijing'},{id:2,title:'tianjin'},...]
for item in citylist:
list.append({'id':item.id,'title':item.title})
return JsonResponse({'data':list})
def htmlEditor(request):
return render(request, 'booktest/htmlEditor.html')
def htmlEditorHandle(request):
html=request.POST['hcontent']
# test1=Test1.objects.get(pk=1)
# test1.content=html
# test1.save()
test1=Test1()
test1.content=html
test1.save()
context={'content':html}
return render(request,'booktest/htmlShow.html',context)
#@cache_page(60*10)
def cache1(request):
#return HttpResponse('hell1')
#return HttpResponse('hell2')
#cache.set('key1','value1',600)
#print(cache.get('key1'))
#return render(request, 'booktest/cache1.html')
cache.clear()
return HttpResponse('ok')
def mysearch(request):
return render(request,'booktest/mysearch.html')
def celeryTest(request):
show()
return HttpResponse('ok') |
from flask import Flask, render_template, url_for, request
from util import json_response
import util
import data_handler
app = Flask(__name__)
# Joel: joel123
# Adam: adam123
# Alex: alex123
# Gergő: gergo123
@app.route("/")
def index():
return render_template('index.html')
@app.route("/get-boards")
@json_response
def get_boards():
return data_handler.get_all_from_table('boards')
@app.route("/get-cards")
@json_response
def get_all_cards():
return data_handler.get_all_from_table('cards')
@app.route("/get-statuses")
@json_response
def get_statuses():
return data_handler.get_all_from_table('statuses')
@app.route('/create-new-board', methods=['GET', 'POST'])
@json_response
def create_new_board():
data_handler.create_new_board()
top_board = data_handler.get_last_board()
data_handler.create_status(top_board[0]['id'])
return top_board
@app.route('/create-private-board', methods=['GET', 'POST'])
@json_response
def create_private_board():
data = request.get_json()
data_handler.create_new_board(int(data['owner']))
top_board = data_handler.get_last_board()
data_handler.create_status(top_board[0]['id'])
return top_board
@app.route("/create-card", methods=["GET", "POST"])
@json_response
def create_card():
data = request.get_json()
data_handler.create_card(data["board_id"], data["status_id"])
return data_handler.get_all_from_table('cards')
@app.route("/delete-card", methods=["GET", "POST"])
@json_response
def delete_card():
card_id = request.get_json()
response = data_handler.delete_card(card_id)
return response
@app.route("/rename", methods=['GET', 'POST'])
@json_response
def rename():
data = request.get_json()
response = data_handler.rename_board(data["title"], data["id"])
return response
@app.route('/drag&drop', methods=['GET', 'POST'])
@json_response
def drag_and_drop():
data = request.get_json()
response = data_handler.update_status(data['new_id'], data['old_id'])
return response
@app.route("/rename-status", methods=['GET', 'POST'])
@json_response
def rename_status():
data = request.get_json()
response = data_handler.rename_status(data["title"], data["id"])
return response
@app.route("/rename-card", methods=['GET', 'POST'])
@json_response
def rename_card():
data = request.get_json()
response = data_handler.rename_card(data["title"], data["id"])
return response
@app.route("/create-status", methods=["GET", "POST"])
def create_status():
data = request.get_json()
data_handler.add_status(data["board_id"])
return data_handler.get_last_status()[0]
@app.route("/delete-board", methods=["GET", "POST"])
@json_response
def delete_board():
board_id = request.get_json()
response = data_handler.delete_board(board_id)
return response
@app.route("/board-open-close", methods=["GET", "POST"])
@json_response
def board_open_close():
data = request.get_json()
response = data_handler.change_board_open_close(data['boolean'], data['id'])
return response
@app.route('/check_username', methods=['GET', 'POST'])
@json_response
def check_username():
data = request.get_json()
response = data_handler.check_user_data('username', data['username'])
return response
@app.route('/check_email', methods=['GET', 'POST'])
@json_response
def check_email():
data = request.get_json()
response = data_handler.check_user_data('email_address', data['email'])
return response
@app.route('/check_passwords', methods=['GET', 'POST'])
@json_response
def check_passwords():
data = request.get_json()
psw = util.hash_password(data['psw'])
if not util.verify_password(data['pswAgain'], psw):
return 'True'
else:
return 'False'
@app.route('/save_data', methods=['GET', 'POST'])
@json_response
def save_data():
data = request.get_json()
psw = util.hash_password(data['password'])
data_handler.save_data(data['username'], data['email'], psw)
return 'done'
@app.route('/check_login', methods=['GET', 'POST'])
@json_response
def check_login():
data = request.get_json()
if data_handler.check_user_data('username', data['username']) == 'True':
real_psw = data_handler.password_by_username(data['username'])
if not util.verify_password(data['password'], real_psw[0]['password']):
return 'False'
else:
return real_psw[0]['id']
else:
return 'False'
def main():
app.run(debug=True, port=5000)
# Serving the favicon
with app.app_context():
app.add_url_rule('/favicon.ico', redirect_to=url_for('static', filename='favicon/favicon.ico'))
if __name__ == '__main__':
main()
|
from harkpython import harkbasenode
class HarkDebug(harkbasenode.HarkBaseNode):
def __init__(self):
print("-!!!!HarkDebug!!!!-" * 3)
self.outputNames = ("OUTPUT",)
self.outputTypes = ("prim_float",)
self.c = 0
def calculate(self):
self.outputValues["OUTPUT"] = 1
print("=" * 14 + str(type(self.INPUT)) + "=" * 14)
print(self.INPUT)
print("frame no." + str(self.c))
self.c = self.c + 1
print("")
|
#__author: "Jing Xu"
#date: 2018/1/23
import json
dict1 = {'name':'alex','age':'18'}
data = json.dumps( dict1 )
with open('JSON_text','w') as f:
f.write(data)
with open('JSON_text','r') as f1:
data1 = json.loads( f1.read() )
print(data1['name'])
def foo():
print("ok")
# data2 = json.dumps( foo ) # Object of type 'function' is not JSON serializable |
import os
import pandas as pd
class BlockData():
def __init__(self, download_folder, file_name, db):
self.file = os.path.join(download_folder, file_name)
self.db = db
pass
def parse_data(self):
data = pd.read_csv(self.file)
if not data["Date"][0] == "NO RECORDS":
save_data = data.to_dict(orient='records')
self.db.write_many_to_collection('block', save_data)
|
class Hero:
def __init__(self, name, title, health, mana, mana_regen, weapon = None, spell = None):
self.name = name
self.title = title
self.health = health
self.mana_regen = mana_regen
self.max_health = health
self.max_mana = mana
def known_as(self):
return "{} the {}".format(self.name, self.title)
def get_health(self):
return self.health
|
from math import factorial
arr = []
n = int(input().strip())
for i in range(n):
arr.append(int(input().strip()))
ans = 1 + n + (factorial(n) // (factorial(2)*factorial(n-2)))
for base_i in range(n):
for delta_i in range(base_i+1, n):
D = arr[delta_i] - arr[base_i]
search_val = arr[delta_i] + D
sorted_sub = sorted(arr[delta_i+1:])
inv_i = 0
while inv_i < sorted_end:
if sorted_sub[inv_i] == search_val:
while inv_i < sorted_end and sorted_sub[inv_i] == search_val:
ans += 1
inv_i += 1
search_val += D
elif search_val < 0 or search_val > 200000 or sorted_sub[inv_i] < search_val:
break
inv_i += 1
print(ans % (10**9 + 9)) |
# -*- coding: utf-8 -*-
import hashlib
import os
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
import pymysql.cursors
import redis
import requests
from pymongo import MongoClient
from .items import IndonesiaNewsContentItem, ShortWordLink, ImgLink, NewsLink
from .items import NewsLinkItem, NewsContentItem, TradeName, SongName, MovieName, KoreanNewsContentItem
from language.spiders.vietnam_news_vtv_content import VietnamNewsVtvContentSpider
from language.spiders.vietnam_news_thanhnien_content import VietnamNewsThanhnienContentSpider
from language.spiders.vietnam_news_dang_content import VietnamNewsDangContentSpider
from language.spiders.vietnam_news_ken_content import VietnamNewsKenContentSpider
from language.spiders.vietnam_news_net_content import VietnamNewsNetContentSpider
from language.spiders.vietnam_news_tuo_content import VietnamNewsTuoContentSpider
from language.spiders.vietnam_news_conomy_content import VietnamNewsConomyContentSpider
class LanguagePipeline(object):
def __init__(self):
# 创建redis数据库连接
pool = redis.ConnectionPool(host='47.105.132.57', port=6379, db=0, password='')
self.r = redis.Redis(connection_pool=pool)
# 创建mongodb库连接
self.client = MongoClient("47.105.132.57:27017")
# 连mysql接数据库
self.connect = pymysql.connect(
# host='123.56.11.156', # 数据库地址
# user='sjtUser', # 数据库用户名
# passwd='sjtUser!1234', # 数据库密码
# db='malaysia', # 数据库名
host='47.105.132.57', # 数据库地址
user='root', # 数据库用户名
passwd='Yang_123_456', # 数据库密码
db='spiderframe', # 数据库名
port=3306, # 数据库端口
charset='utf8', # 编码方式
use_unicode=True)
# 通过cursor执行增删查改
self.cursor = self.connect.cursor()
def process_item(self, item, spider):
# 将数据写入数据库
if isinstance(item, NewsLinkItem):
md5_url = self.md5_(item['url'])
sta = self.hash_exist(md5_url)
if not sta:
self.hash_(md5_url)
self.r.lpush('news_link', item['url'])
self.cursor.execute("""insert into news_link(category, url) value (%s, %s)""",
(item['category'], item['url']))
self.connect.commit()
else:
print("指纹重复")
elif isinstance(item, NewsContentItem):
if item['content']:
# self.cursor.execute("""insert into vietnam_news_nhandan_content(url, content) value (%s, %s)""",
# (item['url'], item['content']))
# self.connect.commit()
url_id = self.md5_(item['url'])
item["id"] = url_id
if isinstance(spider, VietnamNewsVtvContentSpider):
self.client.vietnam.vietnam_news_vtv_content.update({'id': item['id']}, item, True)
elif isinstance(spider, VietnamNewsThanhnienContentSpider):
self.client.vietnam.vietnam_news_thanhnien_content.update({'id': item['id']}, item, True)
elif isinstance(spider, VietnamNewsDangContentSpider):
self.client.vietnam.vietnam_news_dang_content.update({'id': item['id']}, item, True)
elif isinstance(spider, VietnamNewsKenContentSpider):
self.client.vietnam.vietnam_news_ken_content.update({'id': item['id']}, item, True)
elif isinstance(spider, VietnamNewsNetContentSpider):
self.client.vietnam.vietnam_news_net_content.update({'id': item['id']}, item, True)
elif isinstance(spider, VietnamNewsTuoContentSpider):
self.client.vietnam.vietnam_news_tuo_content.update({'id': item['id']}, item, True)
elif isinstance(spider, VietnamNewsConomyContentSpider):
self.client.vietnam.vietnam_news_conomy_content.update({'id': item['id']}, item, True)
else:
self.r.rpush(spider.name, item['url'])
elif isinstance(item, TradeName):
self.cursor.execute("""insert into vietnam_shopee_name(category, content) value (%s, %s)""",
(item['category'], item['content']))
self.connect.commit()
elif isinstance(item, SongName):
self.cursor.execute("""insert into vietnam_song_name(song_name, singer_name) value (%s, %s)""",
(item['song_name'], item['singer_name']))
self.connect.commit()
elif isinstance(item, MovieName):
self.cursor.execute("""insert into vietnam_movie_name(first_name, second_name) value (%s, %s)""",
(item['first_name'], item['second_name']))
self.connect.commit()
elif isinstance(item, KoreanNewsContentItem):
md5_url = self.md5_(item['url'])
sta = self.hash_exist(md5_url)
if not sta:
self.hash_(md5_url)
self.cursor.execute("""insert into korean_news_text(news_link, news_text) value (%s, %s)""",
(item['url'], item['content']))
self.connect.commit()
else:
print("指纹重复")
elif isinstance(item, IndonesiaNewsContentItem):
db_name = 'indonesia_news_fingerprint'
md5_url = self.md5_(item['url'])
sta = self.hash_exist(db_name, md5_url)
if not sta:
self.hash_(db_name, md5_url)
self.cursor.execute("""insert into indonesia_news_text(news_link, news_text) value (%s, %s)""",
(item['url'], item['content']))
self.connect.commit()
else:
print("指纹重复")
elif isinstance(item, ShortWordLink):
db_name = 'fingerprint'
md5_url = self.md5_(item['url'])
sta = self.hash_exist(db_name, md5_url)
if not sta:
self.hash_(db_name, md5_url)
self.r.lpush('malaysia_goods_name', item['url'])
else:
print("指纹重复")
elif isinstance(item, ImgLink):
db_name = 'fingerprint'
md5_url = self.md5_(item['url'])
sta = self.hash_exist(db_name, md5_url)
if not sta:
self.hash_(db_name, md5_url)
self.cursor.execute("""insert into Img(img_name, url) value (%s, %s)""",
(md5_url, item['url']))
self.connect.commit()
content = requests.get(item["url"]).content
folder = r"D:\datatang\language\language\files\car"
if not os.path.exists(folder):
os.mkdir(folder)
with open('{}\{}.jpg'.format(folder, md5_url), 'wb') as f:
f.write(content)
elif isinstance(item, NewsLink):
if not item.get("url"):
self.r.lpush("link_error", item["ori_url"])
else:
db_name = 'fingerprint'
md5_url = self.md5_(item['url'])
sta = self.hash_exist(db_name, md5_url)
if not sta:
self.hash_(db_name, md5_url)
self.r.rpush(spider.name, item['url'])
else:
print("指纹重复")
else:
pass
return item
def md5_(self, str):
md5 = hashlib.md5()
data = str
md5.update(data.encode('utf-8'))
return md5.hexdigest()
def hash_(self, db_name, str):
return self.r.hset(name=db_name, key=str, value=1)
def hash_exist(self, db_name, str):
return self.r.hexists(name=db_name, key=str)
|
import maya.cmds as cmds
#create joints (body chain)
cmds.joint(p=(-0.063,102.695,0),n=('root_jnt'))
cmds.joint(p=(-0.188,111.843,0),n=('stomach_jnt'))
cmds.joint(p=(0.188,129.763,0),n=('chest_jnt'))
cmds.joint(p=(-0.063,143.799,0),n=('neck_jnt'))
cmds.joint(p=(0.188,161.969,0),n=('head_jnt'))
#deselect
cmds.select(clear=True)
#create joints (arm chain)
cmds.joint(p=(6.203,138.535,0),n=('L_collarBone_jnt'))
cmds.joint(p=(12.93,137.929,0),n=('L_shoulder_jnt'))
cmds.joint(p=(12.437,-19.609,-1.657),n=('L_elbow_jnt'), r=True)
cmds.joint(p=(12.408,-14.055,8.077),n=('L_wrist_jnt'),r=True)
#create joints (hand)
cmds.joint(p=(4.238,-4.376,4.094),n=('L_palm_jnt'),r=True)
#thumbJoints
cmds.joint(p=(-5.243,0,2.574),n=('L_Thumb1_jnt'),r=True)
cmds.joint(p=(-0.363,-2.258,2.081),n=('L_Thumb2_jnt'),r=True)
cmds.joint(p=(-0.546,-1.847,1.756),n=('L_Thumb3_jnt'),r=True)
#deselect
cmds.select(clear=True)
#indexJoints
cmds.joint(p=(-0.584,-2.815,3.243),n=('L_Index1_jnt'),r=True)
cmds.joint(p=(0.184,-2.193,1.191),n=('L_Index2_jnt'),r=True)
cmds.joint(p=(0.318,-2.048,0.591),n=('L_Index3_jnt'),r=True)
cmds.joint(p=(0.222,-1.984,1.329),n=('L_Index4_jnt'),r=True)
#parent index1 to palm
cmds.parent('L_Index1_jnt','L_palm_jnt', r=True)
#deselect
cmds.select(clear=True)
#middleJoints
cmds.joint(p=(0.573,-3.412,1.466),n=('L_Middle1_jnt'),r=True)
cmds.joint(p=(0.631,-2.068,1.612),n=('L_Middle2_jnt'),r=True)
cmds.joint(p=(-0.027,-2.546,0.314),n=('L_Middle3_jnt'),r=True)
cmds.joint(p=(0.093,-1.48,0.427),n=('L_Middle4_jnt'),r=True)
#parent Middle1 to palm
cmds.parent('L_Middle1_jnt','L_palm_jnt', r=True)
#deselect
cmds.select(clear=True)
#ringJoints
cmds.joint(p=(1.896,-2.481,0.4531),n=('L_Ring1_jnt'),r=True)
cmds.joint(p=(0.961,-3.413,0.658),n=('L_Ring2_jnt'),r=True)
cmds.joint(p=(0.137,-2.309,0.434),n=('L_Ring3_jnt'),r=True)
cmds.joint(p=(-0.131,-1.728,0.185),n=('L_Ring4_jnt'),r=True)
#parent Ring1 to palm
cmds.parent('L_Ring1_jnt','L_palm_jnt', r=True)
#deselect
cmds.select(clear=True)
#pinkyJoints
cmds.joint(p=(1.807,-2.608,-2.075),n=('L_Pinky1_jnt'),r=True)
cmds.joint(p=(0.299,-2.683,0.581),n=('L_Pinky2_jnt'),r=True)
cmds.joint(p=(0.385,-2.063,0.288),n=('L_Pinky3_jnt'),r=True)
cmds.joint(p=(0.329,-1.935,0.196),n=('L_Pinky4_jnt'),r=True)
#parent Pinky1_jnt to palm
cmds.parent('L_Pinky1_jnt','L_palm_jnt', r=True)
#deselect
cmds.select(clear=True)
#create joints (leg chain)
cmds.joint(p=(8.068,97.25,0),n=('L_hip_jnt'))
cmds.joint(p=(4.206,-39.767,4.584),n=('L_knee_jnt'),r=True)
cmds.joint(p=(3.906,-43.079,-5.7),n=('L_ankle_jnt'),r=True)
cmds.joint(p=(0.403,-10.532,8.135),n=('L_ballOfFoot_jnt'),r=True)
cmds.joint(p=(-0.121,-2.019,4.19),n=('L_toe_jnt'),r=True)
#deselect
cmds.select(clear=True)
#orient joints
cmds.joint('L_hip_jnt',edit=True,orientJoint='xyz',secondaryAxisOrient='zdown',ch=True)
cmds.joint('root_jnt',edit=True,orientJoint='xyz',secondaryAxisOrient='zdown',ch=True)
cmds.joint('L_collarBone_jnt',edit=True,orientJoint='xyz',secondaryAxisOrient='zdown',ch=True)
#orient last joints to world
#************************************************************
#mirror leg chain
cmds.mirrorJoint('L_hip_jnt',mirrorYZ=True,mirrorBehavior=True,searchReplace=('L_', 'R_') )
#mirror Arm chain
cmds.mirrorJoint('L_collarBone_jnt',mirrorYZ=True,mirrorBehavior=True,searchReplace=('L_', 'R_') )
#parent collarbone to chest
cmds.parent('L_collarBone_jnt','chest_jnt', r=False)
cmds.parent('R_collarBone_jnt','chest_jnt', r=False)
#parent hips to root
#parent collarbone to chest
cmds.parent('L_hip_jnt','root_jnt', r=False)
cmds.parent('R_hip_jnt','root_jnt', r=False)
|
import boto3
import time
# Change these values, make them constant variables?
# EvaluationId
# MLModelId
# EvaluationDataSourceId
# s3 = boto3.resource('s3')
# s3.create_bucket(Bucket='dee-bucket-test', CreateBucketConfiguration={'LocationConstraint': 'us-west-1'})
# # bucket name has to be unique and all lowercase.
# s3.meta.client.upload_file('C:/Users/sothea/Documents/ASU EmergenTech Hackathon/Amazon AWS Cloud/banking.csv', 'dee-bucket-test', 'train1.csv')
# s3.meta.client.upload_file('C:/Users/sothea/Documents/ASU EmergenTech Hackathon/Amazon AWS Cloud/banking-batch.csv', 'dee-bucket-test', 'banking-batch.csv')
# s3.meta.client.upload_file('C:/Users/sothea/Documents/ASU EmergenTech Hackathon/Amazon AWS Cloud/banking-data.schema', 'dee-bucket-test', 'banking-data.schema')
client = boto3.client('machinelearning')
print("creating DataSource")
# change DataSourceID everytime you run this py script
response_data = client.create_data_source_from_s3(
DataSourceId='train-data-source-3',
#DataSourceName='string',
DataSpec={
'DataLocationS3': 's3://train1-bucket-2/train1.csv',
#'DataRearrangement': 'string',
#'DataSchema': 'banking-data.schema',
'DataSchemaLocationS3': 's3://train1-bucket-2/train1-data.schema'
},
#ComputeStatistics=True|False
ComputeStatistics=True
)
print(response_data)
print("Creating MLModel")
time.sleep(5)
#response = client.create_realtime_endpoint(
# MLModelId='banking-data-source-id'
#)
# change MLModeId and TrainingDataSourceID everytime you run this py script
response_ml_model = client.create_ml_model(
MLModelId='train-model-3',
MLModelName='train1model',
MLModelType='BINARY',
TrainingDataSourceId='train-data-source-3'
)
# response = client.get_data_source(
# DataSourceId='string',
# #Verbose=True|False
# )
print(response_ml_model)
print("ML Model Creation")
# change EvaluationId everytime you run this py script
response_eval = client.create_evaluation(
EvaluationId='ml-evaluation6',
#EvaluationName='string',
MLModelId='train-model-3',
EvaluationDataSourceId='train-data-source-3'
)
print(response_eval)
print("ML Model Evaluation")
# # Creating a Real-time Prediction Request
# response = client.create_realtime_endpoint(
# MLModelId='string'
# )
# # start predicting
# response_predict = client.predict(
# MLModelId='unique-model-id-1',
# Record={
# 'string': 'string'
# },
# PredictEndpoint="machinelearning.us-east-1.amazonaws.com"
# )
response_batch_predict = client.create_batch_prediction(
BatchPredictionId='unique-model-id-7',
#BatchPredictionName='string',
MLModelId='train-model-3',
BatchPredictionDataSourceId='train-data-source-3',
OutputUri='s3://train1-bucket-2/'
)
print(response_batch_predict)
print("ML Model Batch Prediction")
# get the batch predictor
response_store_predict = client.get_batch_prediction(
BatchPredictionId='unique-model-id-6'
) |
from _typeshed import SupportsItemAccess
from datetime import datetime, timedelta
from typing import Any
from wtforms.csrf.core import CSRF, CSRFTokenField
from wtforms.form import BaseForm
from wtforms.meta import DefaultMeta
class SessionCSRF(CSRF):
TIME_FORMAT: str
form_meta: DefaultMeta
def setup_form(self, form): ...
def generate_csrf_token(self, csrf_token_field: CSRFTokenField) -> str: ...
def validate_csrf_token(self, form: BaseForm, field: CSRFTokenField) -> None: ...
def now(self) -> datetime: ...
@property
def time_limit(self) -> timedelta: ...
@property
def session(self) -> SupportsItemAccess[str, Any]: ...
|
import sys
import json
import boto3
from random import randint
from pprint import pprint
import requests
import discord
from discord.ext import commands
from discord_commands import get_message, get_thumbnail_url, get_attachment_link
from anagrams import recursiveAnagrams
# OLD
# @client.event
# async def on_ready():
# return await client.change_presence(game=discord.Game(name='with time zones'))
# @client.command()
# async def weather(*args):
# location = " ".join(args)
# weather_data = get_weather(location)
# return await client.say(weather_data)
# @client.command(pass_context=True)
# async def remind(ctx, *args):
# request = "remind " + " ".join(args)
# result = engine.parse(request)
# intent = None
# dt_string = None
# if not result['slots']:
# return await client.say("I can't **** understand **** your accent ****")
# for s in result['slots']:
# if s['slotName'] == 'intent':
# intent = s['value']['value']
# if s['slotName'] == 'time':
# dt_string = s['value']['value']
# if not intent:
# return await client.say("It isn't clear to me what to remind you about.")
# if not dt_string:
# return await client.say("I know what you want to be reminded of, but not what time to remind you.")
# dt = parser.parse(dt_string)
# new_dt = dt.astimezone(tz=None)
# user_id = ctx.message.author.id
# channel_id = ctx.message.channel.id
# set_reminder(intent, new_dt, user_id, channel_id)
# output_string_format = "%I:%M %p on %a, %b %d"
# output_time = datetime.datetime.strftime(dt, output_string_format)
# output_string = "<@{}>, I will remind me you to `{}` at `{} UTC`".format(user_id, intent, output_time)
# return await client.say(output_string)
secrets_client = boto3.client('secretsmanager', region_name='us-west-2')
# Use dev token if we're testing on windows machine
token_secret_name = "discordBotTokenDev" if sys.platform == "win32" else "discordBotToken"
token_response = secrets_client.get_secret_value(SecretId=token_secret_name)
token_response_dict = json.loads(token_response['SecretString'])
discord_token = token_response_dict[token_secret_name]
bot = commands.Bot(command_prefix='-', description="haldibot.")
@bot.event
async def on_ready():
await bot.change_presence(activity=discord.Activity(type=discord.ActivityType.playing, name='hooky'))
@bot.command()
async def status(ctx, *args):
status_switch = {
"playing": discord.ActivityType.playing,
"watching": discord.ActivityType.watching,
"listening": discord.ActivityType.listening,
"streaming": discord.ActivityType.streaming
}
selected_type = status_switch.get(args[0], discord.ActivityType.unknown)
selected_name = " ".join(args[1:])
await bot.change_presence(activity=discord.Activity(type=selected_type, name=selected_name))
@bot.command()
async def echo(ctx, *args):
response = " ".join(args)
await ctx.send(response)
@bot.command()
async def hello(ctx):
message = "Hello, <@{}>! Have a nice day.".format(str(ctx.message.author.id))
await ctx.send(message)
@bot.command()
async def ping(ctx):
await ctx.send('pong')
@bot.command()
async def sentiment(ctx, *args):
message_text = await get_message(ctx)
comprehend = boto3.client('comprehend', region_name='us-west-2')
response = comprehend.detect_sentiment(Text=message_text, LanguageCode="en")
sentiment = response['Sentiment']
score = int(float(response['SentimentScore'][sentiment.title()]) * 100)
sentiment_string = f"I am {score}% sure that your tone was {sentiment}"
await ctx.send(str(sentiment_string))
@bot.command()
async def eightball(ctx):
vals = [
'It is certain.',
'It is decidedly so',
'Without a doubt.',
'Yes - definitely',
'You may rely on it.',
'As I see it, yes.',
'Most likely',
'Outlook good.',
'Yes',
'Signs point to yes.',
'Reply hazy, try again.',
'Ask again later.',
'Better not tell you now.',
'Cannot predict now.',
'Concentrate and ask again.',
"Don't count on it.",
'My reply is no.',
'My sources say no.',
'Outlook not so good.',
'Very doubtful.'
]
val = vals[randint(0,len(vals) - 1)]
await ctx.send(str(val))
@bot.command()
async def haldigram(ctx, *args):
# again this is sloppy and quick
word = ""
for a in args:
word += a
print(word)
results = recursiveAnagrams(word)
if not results:
await ctx.send("forgive me for i cannot find a haldigram of that")
elif len(results) >= 10:
await ctx.send(f"Got {len(results)} results, here's a few: ")
randomSelected = []
for x in range(0, 9):
randomSelected.append(results[randint(0, len(results) - 1)])
message_string = "```" + "\n".join(randomSelected) + "```"
await ctx.send(message_string)
elif len(results) > 1:
await ctx.send(f"Got {len(results)} results:")
message_string = "\n".join(results)
await ctx.send(message_string)
elif len(results) == 1:
await ctx.send(results[0])
else:
await ctx.send("forgive me for i cannot find a haldigram of that")
@bot.command()
async def stonks(ctx, *args):
token_secret_name = 'stockAPIKey'
stock_token_response = secrets_client.get_secret_value(SecretId=token_secret_name)
stock_token_response_dict = json.loads(stock_token_response['SecretString'])
stock_token = stock_token_response_dict[token_secret_name]
# Default is Slack stock, if none is specified
# Otherwise, get the first four characters in the provided string
symbol = "work"
if args:
symbol = args[0][0:4]
url = f"https://cloud.iexapis.com/stable/stock/{symbol}/quote?token={stock_token}"
try:
response = requests.get(url).json()
except:
await ctx.send("Couldn't find that company.")
return
company = response['companyName']
company_symbol = response['symbol']
price = response['latestPrice']
change = round(response['change'], 2)
color = discord.Colour.green() if change > 0 else discord.Colour.red()
# API gives a minus sign for negative change, but no plus for positive
# So determine which it was, and ensure we store the sign outside of the dollar sign
# in the final output
change_string = str(change)
sign = "+"
if "-" in change_string:
sign = "-"
change_string = change_string[1:]
change_percent = round(response['changePercent'], 2)
embed = discord.Embed(title="stonks!", color=color)
embed.add_field(name="Company Name", value=f"{company} ({company_symbol})", inline=False)
embed.add_field(name="Current Value", value=f"${price}", inline=True)
embed.add_field(name="Change", value=f"{sign}${change_string} ({change_percent}%)", inline=True)
if symbol == "work":
# Only do this if slack is specified - determines the value of 2.51 owned shares
total = round(2.51 * price, 2)
embed.add_field(name="Value of Your Shares", value=f"${total}", inline=False)
await ctx.send(embed=embed)
@bot.command()
async def image(ctx, *args):
image_url = await get_thumbnail_url(ctx)
if not image_url:
await ctx.send("No image found")
return
r = requests.get(image_url, stream=True)
if r.status_code == 200:
r.raw.decode_content = True
rekognition = boto3.client('rekognition', region_name='us-west-2')
result = rekognition.detect_labels(Image={'Bytes': r.raw.data}, MaxLabels=10)
labels = result['Labels']
embed = discord.Embed(title="Label Results:", color=discord.Colour.blue()) # TODO need better color
confidence_percent = lambda confidence : str(round(confidence, 2)) + "%"
for l in labels:
embed.add_field(name=l['Name'], value=confidence_percent(l['Confidence']), inline=False)
await ctx.send(embed=embed)
# @bot.command()
# async def article(ctx, *args):
# article_url = await get_attachment_link(ctx)
# if not article_url:
# await ctx.send("No article found")
# return
# # Newspaper library stuff
# # Experimental - i might just throw this away if it sucks
# config = Config()
# config.MAX_SUMMARY_SENT = 3
# article = Article(url=article_url, config=config)
# article.download()
# article.parse()
# article.nlp()
# await ctx.send(article.summary)
# @bot.command()
# async def meme(ctx, *args):
# image_url = await get_thumbnail_url(ctx)
# if not image_url:
# await ctx.send("No image found")
# return
# r = requests.get(image_url, stream=True)
# r.raw.decode_content = True
# upload_meme(r.raw, args)
# @bot.command()
# async def getmeme(ctx, *args):
# key = "d98f59b1-ba84-4aae-9ea7-e32f79204b10"
# result = await download_meme(key, ctx)
bot.run(discord_token) |
import time
from threading import Thread
def myfun():
time.sleep(1)
a = 1 + 1
print(a)
t1 = time.time()
for i in range(5):
myfun()
t2 = time.time()
print(t2-t1)
ths = []
for _ in range(5):
th = Thread(target=myfun)
th.start()
ths.append(th)
for th in ths:
th.join()
t3 = time.time()
print(t3-t2) |
# Generated by Django 3.2 on 2021-04-14 11:02
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('app', '0003_alter_role_options'),
]
operations = [
migrations.AlterModelOptions(
name='profile',
options={'ordering': ['user']},
),
]
|
from selenium import webdriver
import unittest
class GetCurrentPageUrlByChrome(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Chrome()
def test_getCurrenPageUrl(self):
url = "https://www.baidu.com/"
self.driver.get(url)
self.driver.maximize_window()
#获取当前页面的URL
currentPageURL = self.driver.current_url
print(currentPageURL)
#断言当前网址是否为"https://www.baidu.com/"
self.assertEqual(currentPageURL, "https://www.baidu.com/", "当前网址不正确")
def tearDown(self):
self.driver.quit()
if __name__ == '__main__':
unittest.main()
#注意:url 地址一定要填写完整,不然容易导致测试失败 |
from freqtrade.strategy.interface import IStrategy
from pandas import DataFrame
#from technical.indicators import accumulation_distribution
from technical.util import resample_to_interval, resampled_merge
import talib.abstract as ta
import freqtrade.vendor.qtpylib.indicators as qtpylib
import numpy
from technical.indicators import ichimoku
class Ichimoku_v34(IStrategy):
"""
"""
minimal_roi = {
"0": 100
}
stoploss = -1 #-0.35
ticker_interval = '4h' #3m
# startup_candle_count: int = 2
# trailing stoploss
#trailing_stop = True
#trailing_stop_positive = 0.40 #0.35
#trailing_stop_positive_offset = 0.50
#trailing_only_offset_is_reached = False
def informative_pairs(self):
return []
def populate_indicators(self, dataframe: DataFrame, metadata: dict) -> DataFrame:
ichi = ichimoku(dataframe, conversion_line_period=20, base_line_periods=60, laggin_span=120, displacement=30)
# dataframe['chikou_span'] = ichi['chikou_span']
dataframe['tenkan'] = ichi['tenkan_sen']
dataframe['kijun'] = ichi['kijun_sen']
dataframe['senkou_a'] = ichi['senkou_span_a']
dataframe['senkou_b'] = ichi['senkou_span_b']
dataframe['cloud_green'] = ichi['cloud_green']
dataframe['cloud_red'] = ichi['cloud_red']
return dataframe
def populate_buy_trend(self, dataframe: DataFrame, metadata: dict) -> DataFrame:
dataframe.loc[
(
(qtpylib.crossed_above(dataframe['close'].shift(2), dataframe['senkou_a'])) &
(dataframe['close'].shift(2) > dataframe['senkou_a']) &
(dataframe['close'].shift(2) > dataframe['senkou_b'])
),
'buy'] = 1
dataframe.loc[
(
(qtpylib.crossed_above(dataframe['close'].shift(2), dataframe['senkou_b'])) &
(dataframe['close'].shift(2) > dataframe['senkou_a']) &
(dataframe['close'].shift(2 ) > dataframe['senkou_b'])
),
'buy'] = 1
return dataframe
def populate_sell_trend(self, dataframe: DataFrame, metadata: dict) -> DataFrame:
dataframe.loc[
(
(qtpylib.crossed_below(dataframe['close'].shift(3), dataframe['kijun'])) &
(dataframe['close'] < dataframe['kijun'])
),
'sell'] = 1
return dataframe
|
import pytest
from freezegun import freeze_time
from onegov.election_day.layouts import ElectionLayout
from tests.onegov.election_day.common import login
from tests.onegov.election_day.common import MAJORZ_HEADER
from tests.onegov.election_day.common import upload_majorz_election
from tests.onegov.election_day.common import upload_party_results
from tests.onegov.election_day.common import upload_proporz_election
from webtest import TestApp as Client
from webtest.forms import Upload
def round_(n, z):
return round(100 * n / z, 2)
def test_view_election_redirect(election_day_app_gr):
client = Client(election_day_app_gr)
client.get('/locale/de_CH').follow()
login(client)
upload_majorz_election(client)
upload_proporz_election(client)
response = client.get('/election/majorz-election')
assert response.status == '302 Found'
assert 'majorz-election/candidates' in response.headers['Location']
response = client.get('/election/proporz-election')
assert response.status == '302 Found'
assert 'proporz-election/lists' in response.headers['Location']
def test_view_election_candidates(election_day_app_gr):
client = Client(election_day_app_gr)
client.get('/locale/de_CH').follow()
login(client)
# Majorz election
upload_majorz_election(client, status='final')
# ... main
candidates = client.get('/election/majorz-election/candidates')
assert all((expected in candidates for expected in (
"Engler Stefan", "20", "Schmid Martin", "18"
)))
# ... bar chart data (with filters)
for suffix in ('', '?limit=', '?limit=a', '?limit=0'):
candidates = client.get(
f'/election/majorz-election/candidates-data{suffix}'
)
assert {r['text']: r['value'] for r in candidates.json['results']} == {
'Engler Stefan': 20, 'Schmid Martin': 18
}
candidates = client.get(
'/election/majorz-election/candidates-data?limit=1'
)
assert {r['text']: r['value'] for r in candidates.json['results']} == {
'Engler Stefan': 20
}
candidates = client.get(
'/election/majorz-election/candidates-data?entity=Vaz/Obervaz'
)
assert {r['text']: r['value'] for r in candidates.json['results']} == {
'Engler Stefan': 20, 'Schmid Martin': 18
}
# ... embedded chart (with filters)
chart = client.get('/election/majorz-election/candidates-chart')
assert '/election/majorz-election/candidates' in chart
chart = client.get(
'/election/majorz-election/candidates-chart?entity=Filisur'
)
assert 'entity=Filisur' in chart
# ... ebmedded table (with filters)
table = client.get('/election/majorz-election/candidates-table')
assert 'data-text="20"' in table
table = client.get(
'/election/majorz-election/candidates-table?entity=Vaz/Obervaz'
)
assert 'data-text="20"' in table
table = client.get(
'/election/majorz-election/candidates-table?entity=Filisur'
)
assert 'data-text=' not in table
# Proporz election
upload_proporz_election(client, status='final')
# ....main
candidates = client.get('/election/proporz-election/candidates')
assert all((expected in candidates for expected in (
"Caluori Corina", "1", "Casanova Angela", "0"
)))
# ... bar chart data (with filters)
for suffix in ('', '?limit=', '?limit=a', '?limit=0'):
candidates = client.get(
f'/election/proporz-election/candidates-data{suffix}'
)
assert candidates.json['results'] == []
candidates = client.get(
'/election/proporz-election/candidates-data?elected=False&limit=1'
)
assert {r['text']: r['value'] for r in candidates.json['results']} == {
'Caluori Corina': 2
}
candidates = client.get(
'/election/majorz-election/candidates-data?elected=False&'
'entity=Vaz/Obervaz'
)
assert {r['text']: r['value'] for r in candidates.json['results']} == {
'Engler Stefan': 20, 'Schmid Martin': 18
}
# ... embedded chart (with filters)
chart = client.get('/election/proporz-election/candidates-chart')
assert '/election/proporz-election/candidates' in chart
chart = client.get(
'/election/proporz-election/candidates-chart?entity=Filisur'
)
assert 'entity=Filisur' in chart
# ... ebmedded table (with filters)
table = client.get('/election/proporz-election/candidates-table')
assert 'data-text="2"' in table
table = client.get(
'/election/proporz-election/candidates-table?entity=Vaz/Obervaz'
)
assert 'data-text="2"' in table
table = client.get(
'/election/proporz-election/candidates-table?entity=Filisur'
)
assert 'data-text=' not in table
def test_view_election_candidate_by_entity(election_day_app_gr):
client = Client(election_day_app_gr)
client.get('/locale/de_CH').follow()
login(client)
upload_majorz_election(client, status='final')
upload_proporz_election(client, status='final')
for url in (
'/election/majorz-election/candidate-by-entity',
'/election/majorz-election/candidate-by-entity-chart'
):
view = client.get(url)
assert '/by-entity">Engler Stefan (gewählt)</option>' in view
assert '/by-entity">Schmid Martin (gewählt)</option>' in view
data = {
option.text.split(' ')[0]: client.get(option.attrib['value']).json
for option in view.pyquery('option')
}
assert data['Engler']['3506']['counted'] is True
assert data['Engler']['3506']['percentage'] == round_(20, 21)
assert data['Schmid']['3506']['counted'] is True
assert data['Schmid']['3506']['percentage'] == round_(18, 21)
for url in (
'/election/proporz-election/candidate-by-entity',
'/election/proporz-election/candidate-by-entity-chart'
):
view = client.get(url)
assert '/by-entity">Caluori Corina</option>' in view
assert '/by-entity">Casanova Angela</option' in view
data = {
option.text.split(' ')[0]: client.get(option.attrib['value']).json
for option in view.pyquery('option')
}
assert data['Caluori']['3506']['counted'] is True
assert data['Caluori']['3506']['percentage'] == round_(2, 14)
assert data['Casanova']['3506']['counted'] is True
assert data['Casanova']['3506']['percentage'] == 0.0
# test for incomplete majorz
upload_majorz_election(client, status='unknown')
upload_proporz_election(client, status='final')
for url in (
'/election/majorz-election/candidate-by-entity',
'/election/majorz-election/candidate-by-entity-chart'
):
view = client.get(url)
assert '/by-entity">Engler Stefan</option>' in view
assert '/by-entity">Schmid Martin</option>' in view
# test for incomplete proporz
def test_view_election_candidate_by_district(election_day_app_gr):
client = Client(election_day_app_gr)
client.get('/locale/de_CH').follow()
login(client)
upload_majorz_election(client, status='final')
upload_proporz_election(client, status='final')
for url in (
'/election/majorz-election/candidate-by-district',
'/election/majorz-election/candidate-by-district-chart'
):
view = client.get(url)
assert '/by-district">Engler Stefan (gewählt)</option>' in view
assert '/by-district">Schmid Martin (gewählt)</option>' in view
data = {
option.text.split(' ')[0]: client.get(option.attrib['value']).json
for option in view.pyquery('option')
}
assert set(data['Engler']['Bernina']['entities']) == {3561, 3551}
assert data['Engler']['Bernina']['counted'] is False
assert data['Engler']['Bernina']['percentage'] == 0.0
assert set(data['Schmid']['Bernina']['entities']) == {3561, 3551}
assert data['Schmid']['Bernina']['counted'] is False
assert data['Schmid']['Bernina']['percentage'] == 0.0
for url in (
'/election/proporz-election/candidate-by-district',
'/election/proporz-election/candidate-by-district-chart'
):
view = client.get(url)
assert '/by-district">Caluori Corina</option>' in view
assert '/by-district">Casanova Angela</option' in view
data = {
option.text.split(' ')[0]: client.get(option.attrib['value']).json
for option in view.pyquery('option')
}
assert set(data['Caluori']['Bernina']['entities']) == {3561, 3551}
assert data['Caluori']['Bernina']['counted'] is False
assert data['Caluori']['Bernina']['percentage'] == 0.0
assert set(data['Casanova']['Bernina']['entities']) == {3561, 3551}
assert data['Casanova']['Bernina']['counted'] is False
assert data['Casanova']['Bernina']['percentage'] == 0.0
def test_view_election_statistics(election_day_app_gr):
client = Client(election_day_app_gr)
client.get('/locale/de_CH').follow()
login(client)
upload_majorz_election(client)
upload_proporz_election(client)
statistics = client.get('/election/majorz-election/statistics')
assert all((expected in statistics for expected in (
"1 von 101", "Grüsch", "56", "25", "21", "41", "Noch nicht ausgezählt"
)))
statistics = client.get('/election/proporz-election/statistics')
assert all((expected in statistics for expected in (
"1 von 101", "Grüsch", "56", "32", "31", "153", "Noch nicht ausgezählt"
)))
def test_view_election_lists(election_day_app_gr):
client = Client(election_day_app_gr)
client.get('/locale/de_CH').follow()
login(client)
# Majorz election
upload_majorz_election(client)
# ... main
main = client.get('/election/majorz-election/lists')
assert '<h3>Listen</h3>' not in main
# ... bar chart data
data = client.get('/election/majorz-election/lists-data')
assert data.json['results'] == []
# ... embedded chart
chart = client.get('/election/majorz-election/lists-chart')
assert chart.status_code == 200
assert '/election/majorz-election/lists' in chart
# .... embedded table
table = client.get('/election/majorz-election/lists-table')
assert 'data-text=' not in table
# Proporz election
upload_proporz_election(client)
# ... main
main = client.get('/election/proporz-election/lists')
assert '<h3>Listen</h3>' in main
# ... bar chart data (with filters)
for suffix in ('', '?limit=', '?limit=a', '?limit=0'):
data = client.get(f'/election/proporz-election/lists-data{suffix}')
assert {r['text']: r['value'] for r in data.json['results']} == {
'FDP': 8,
'CVP': 6
}
data = client.get('/election/proporz-election/lists-data?limit=1')
assert {r['text']: r['value'] for r in data.json['results']} == {
'FDP': 8,
}
data = client.get(
'/election/proporz-election/lists-data?entity=Vaz/Obervaz'
)
assert data.json['results']
data = client.get('/election/proporz-election/lists-data?entity=Filisur')
assert not data.json['results']
# ... embedded chart (with filters)
chart = client.get('/election/proporz-election/lists-chart')
assert chart.status_code == 200
assert '/election/proporz-election/lists-data' in chart
chart = client.get('/election/proporz-election/lists-chart?entity=Filisur')
assert 'entity=Filisur' in chart
# ... embedded table (with filters)
table = client.get('/election/proporz-election/lists-table')
assert 'data-text="8"' in table
table = client.get(
'/election/proporz-election/lists-table?entity=Vaz/Obervaz'
)
assert 'data-text="8"' in table
table = client.get('/election/proporz-election/lists-table?entity=Filisur')
assert 'data-text=' not in table
def test_view_election_list_by_entity(election_day_app_gr):
client = Client(election_day_app_gr)
client.get('/locale/de_CH').follow()
login(client)
upload_majorz_election(client)
upload_proporz_election(client)
url = '/election/majorz-election'
assert '</option>' not in client.get(f'{url}/list-by-entity')
assert '</option>' not in client.get(f'{url}/list-by-entity-chart')
for url in (
'/election/proporz-election/list-by-entity',
'/election/proporz-election/list-by-entity-chart'
):
view = client.get(url)
assert '/by-entity">CVP</option>' in view
assert '/by-entity">FDP</option' in view
data = {
option.text: client.get(option.attrib['value']).json
for option in view.pyquery('option')
}
assert data['CVP']['3506']['counted'] is True
assert data['CVP']['3506']['percentage'] == round_(6, 14)
assert data['FDP']['3506']['counted'] is True
assert data['FDP']['3506']['percentage'] == round_(8, 14)
def test_view_election_list_by_district(election_day_app_gr):
client = Client(election_day_app_gr)
client.get('/locale/de_CH').follow()
login(client)
upload_majorz_election(client)
upload_proporz_election(client)
url = '/election/majorz-election'
assert '</option>' not in client.get(f'{url}/list-by-district')
assert '</option>' not in client.get(f'{url}/list-by-district-chart')
for url in (
'/election/proporz-election/list-by-district',
'/election/proporz-election/list-by-district-chart'
):
view = client.get(url)
assert '/by-district">CVP</option>' in view
assert '/by-district">FDP</option' in view
data = {
option.text: client.get(option.attrib['value']).json
for option in view.pyquery('option')
}
assert set(data['CVP']['Bernina']['entities']) == {3561, 3551}
assert data['CVP']['Bernina']['counted'] is False
assert data['CVP']['Bernina']['percentage'] == 0.0
assert set(data['FDP']['Bernina']['entities']) == {3561, 3551}
assert data['FDP']['Bernina']['counted'] is False
assert data['FDP']['Bernina']['percentage'] == 0.0
def test_view_election_party_strengths(election_day_app_gr):
client = Client(election_day_app_gr)
client.get('/locale/de_CH').follow()
login(client)
# Majorz election
upload_majorz_election(client)
main = client.get('/election/majorz-election/party-strengths')
assert '<h4>Parteistärken</h4>' not in main
parties = client.get('/election/majorz-election/party-strengths-data')
assert parties.json['results'] == []
chart = client.get('/election/majorz-election/party-strengths-chart')
assert chart.status_code == 200
assert '/election/majorz-election/party-strengths' in chart
# Proporz election
upload_proporz_election(client)
upload_party_results(client)
main = client.get('/election/proporz-election/party-strengths')
assert '<h4>Parteistärken</h4>' in main
parties = client.get('/election/proporz-election/party-strengths-data')
parties = parties.json
assert parties['groups'] == ['BDP', 'CVP', 'FDP']
assert parties['labels'] == ['2022']
assert parties['maximum']['back'] == 100
assert parties['maximum']['front'] == 5
assert parties['results']
chart = client.get('/election/proporz-election/party-strengths-chart')
assert chart.status_code == 200
assert '/election/proporz-election/party-strengths-data' in chart
assert 'panel_2022' in client.get(
'/election/proporz-election/party-strengths-table'
)
assert 'panel_2022' in client.get(
'/election/proporz-election/party-strengths-table?year=2022'
)
assert 'panel_2022' not in client.get(
'/election/proporz-election/party-strengths-table?year=2018'
)
export = client.get('/election/proporz-election/data-parties-csv').text
lines = [l for l in export.split('\r\n') if l]
assert lines == [
'domain,domain_segment,year,id,'
'name,name_de_CH,name_fr_CH,name_it_CH,name_rm_CH,'
'total_votes,color,mandates,votes,'
'voters_count,voters_count_percentage,panachage_votes_from_1,'
'panachage_votes_from_2,panachage_votes_from_3,'
'panachage_votes_from_999',
'federation,,2022,1,BDP,BDP,,,,11270,#efb52c,'
'1,60387,603.01,41.73,,11,12,100',
'federation,,2022,2,CVP,CVP,,,,11270,#ff6300,'
'1,49117,491.02,33.98,21,,22,200',
'federation,,2022,3,FDP,FDP,,,,11270,,'
'0,35134,351.04,24.29,31,32,,300',
]
export = client.get('/election/proporz-election/data-parties-json').json
assert export == [
{
'color': '#efb52c',
'domain': 'federation',
'domain_segment': None,
'id': '1',
'mandates': 1,
'name': 'BDP',
'name_de_CH': 'BDP',
'name_fr_CH': None,
'name_it_CH': None,
'name_rm_CH': None,
'panachage_votes_from_1': None,
'panachage_votes_from_2': 11,
'panachage_votes_from_3': 12,
'panachage_votes_from_999': 100,
'total_votes': 11270,
'voters_count': 603.01,
'voters_count_percentage': 41.73,
'votes': 60387,
'year': 2022
},
{
'color': '#ff6300',
'domain': 'federation',
'domain_segment': None,
'id': '2',
'mandates': 1,
'name': 'CVP',
'name_de_CH': 'CVP',
'name_fr_CH': None,
'name_it_CH': None,
'name_rm_CH': None,
'panachage_votes_from_1': 21,
'panachage_votes_from_2': None,
'panachage_votes_from_3': 22,
'panachage_votes_from_999': 200,
'total_votes': 11270,
'voters_count': 491.02,
'voters_count_percentage': 33.98,
'votes': 49117,
'year': 2022
},
{
'color': None,
'domain': 'federation',
'domain_segment': None,
'id': '3',
'mandates': 0,
'name': 'FDP',
'name_de_CH': 'FDP',
'name_fr_CH': None,
'name_it_CH': None,
'name_rm_CH': None,
'panachage_votes_from_1': 31,
'panachage_votes_from_2': 32,
'panachage_votes_from_3': None,
'panachage_votes_from_999': 300,
'total_votes': 11270,
'voters_count': 351.04,
'voters_count_percentage': 24.29,
'votes': 35134,
'year': 2022
}
]
# Historical data with translations
csv_parties = (
'year,name,name_fr_ch,id,total_votes,color,mandates,'
'votes,voters_count,voters_count_percentage\r\n'
'2022,BDP,,1,60000,#efb52c,1,10000,100,16.67\r\n'
'2022,Die Mitte,Le Centre,2,60000,#ff6300,1,30000,300,50\r\n'
'2022,FDP,,3,60000,#4068c8,0,20000,200,33.33\r\n'
'2018,BDP,,1,40000,#efb52c,1,1000,10,2.5\r\n'
'2018,CVP,PDC,2,40000,#ff6300,1,15000,150.7,37.67\r\n'
'2018,FDP,,3,40000,#4068c8,1,10000,100,25.0\r\n'
).encode('utf-8')
upload = client.get('/election/proporz-election/upload-party-results')
upload.form['parties'] = Upload('parties.csv', csv_parties, 'text/plain')
upload = upload.form.submit()
assert "erfolgreich hochgeladen" in upload
parties = client.get('/election/proporz-election/party-strengths-data')
parties = parties.json
assert parties['groups'] == ['BDP', 'Die Mitte', 'FDP']
assert parties['labels'] == ['2018', '2022']
assert parties['maximum']['back'] == 100
assert parties['maximum']['front'] == 5
assert parties['results']
parties = {
'{}-{}'.format(party['item'], party['group']): party
for party in parties['results']
}
assert parties['2018-BDP']['color'] == '#efb52c'
assert parties['2022-BDP']['color'] == '#efb52c'
assert parties['2018-Die Mitte']['color'] == '#ff6300'
assert parties['2022-Die Mitte']['color'] == '#ff6300'
assert parties['2018-FDP']['color'] == '#4068c8'
assert parties['2022-FDP']['color'] == '#4068c8'
assert parties['2018-BDP']['active'] is False
assert parties['2018-Die Mitte']['active'] is False
assert parties['2018-FDP']['active'] is False
assert parties['2022-BDP']['active'] is True
assert parties['2022-Die Mitte']['active'] is True
assert parties['2022-FDP']['active'] is True
assert parties['2018-BDP']['value']['front'] == 1
assert parties['2018-Die Mitte']['value']['front'] == 1
assert parties['2018-FDP']['value']['front'] == 1
assert parties['2022-BDP']['value']['front'] == 1
assert parties['2022-Die Mitte']['value']['front'] == 1
assert parties['2022-FDP']['value']['front'] == 0
assert parties['2018-BDP']['value']['back'] == 2.5
assert parties['2018-Die Mitte']['value']['back'] == 37.5
assert parties['2018-FDP']['value']['back'] == 25
assert parties['2022-BDP']['value']['back'] == 16.7
assert parties['2022-Die Mitte']['value']['back'] == 50
assert parties['2022-FDP']['value']['back'] == 33.3
results = client.get('/election/proporz-election/party-strengths').text
assert '2.5%' in results
assert '16.7%' in results
assert '14.2%' in results
assert '37.5%' in results
assert '50.0%' in results
assert '12.5%' in results
assert '25.0%' in results
assert '33.3%' in results
assert '8.3%' in results
# with exact voters counts
edit = client.get('/election/proporz-election/edit')
edit.form['voters_counts'] = True
edit.form['exact_voters_counts'] = True
edit.form.submit()
assert '>10.00<' in client.get(
'/election/proporz-election/party-strengths'
)
data = client.get('/election/proporz-election/party-strengths-data').json
assert data['results'][0]['value']['back'] == 16.67
data = client.get('/election/proporz-election/json').json
assert data['parties']['2']['2018']['voters_count']['total'] == 150.7
# with rounded voters counts
edit = client.get('/election/proporz-election/edit')
edit.form['exact_voters_counts'] = False
edit.form.submit()
assert '>10<' in client.get('/election/proporz-election/party-strengths')
data = client.get('/election/proporz-election/party-strengths-data').json
assert data['results'][0]['value']['back'] == 16.67
data = client.get('/election/proporz-election/json').json
assert data['parties']['2']['2018']['voters_count']['total'] == 151
# translations
client.get('/locale/fr_CH')
parties = client.get('/election/proporz-election/party-strengths-data')
parties = parties.json
assert parties['groups'] == ['BDP', 'Le Centre', 'FDP']
results = client.get('/election/proporz-election/party-strengths').text
assert 'Le Centre' in results
assert 'PDC' in results
assert 'BDP' in results
def test_view_election_connections(election_day_app_gr):
client = Client(election_day_app_gr)
client.get('/locale/de_CH').follow()
login(client)
upload_majorz_election(client)
main = client.get('/election/majorz-election/connections')
assert '<h4>Listenverbindungen</h4>' not in main
assert client.get('/election/majorz-election/connections-data').json == {}
chart = client.get('/election/majorz-election/connections-chart')
assert '/election/majorz-election/connections-data' in chart
# Fixme: Add an incomplete election and test
# if connections_data is not there
upload_proporz_election(client)
main = client.get('/election/proporz-election/connections')
assert '<h4>Listenverbindungen</h4>' in main
data = client.get('/election/proporz-election/connections-data').json
nodes = [node['name'] for node in data['nodes']]
assert 'FDP' in nodes
assert 'CVP' in nodes
links = [
'{}:{}'.format(link['source'], link['value']) for link in data['links']
]
assert '{}:8'.format(nodes.index('FDP')) in links
assert '{}:6'.format(nodes.index('CVP')) in links
chart = client.get('/election/proporz-election/connections-chart')
assert '/election/proporz-election/connections-data' in chart
def test_view_election_lists_panachage_majorz(election_day_app_gr):
client = Client(election_day_app_gr)
client.get('/locale/de_CH').follow()
login(client)
upload_majorz_election(client)
main = client.get('/election/majorz-election/lists-panachage')
assert '<h4>Panaschierstatistik</h4>' not in main
assert client.get(
'/election/majorz-election/lists-panachage-data'
).json == {}
chart = client.get('/election/majorz-election/lists-panachage-chart')
assert chart.status_code == 200
assert '/election/majorz-election/lists-panachage-data' in chart
def test_view_election_lists_panachage_proporz(election_day_app_gr):
client = Client(election_day_app_gr)
client.get('/locale/de_CH').follow()
login(client)
upload_proporz_election(client)
main = client.get('/election/proporz-election/lists-panachage')
assert '<h4>Panaschierstatistik</h4>' in main
data = client.get('/election/proporz-election/lists-panachage-data').json
nodes = [node['name'] for node in data['nodes']]
assert 'Blankoliste' in nodes
assert 'FDP' in nodes
assert 'CVP' in nodes
# value is the thickness of the line
links = sorted([(r['target'], r['value']) for r in data['links']])
# List 1 gets 1 vote from list 2
# List 2 gets 2 votes from list 1
# 4 represents target index of list 2 in nodes on the right side
# 3 represents target index of list 1 in nodes on the right side
assert links == [(3, 1), (4, 2)]
def test_view_election_parties_panachage(election_day_app_gr):
client = Client(election_day_app_gr)
client.get('/locale/de_CH').follow()
login(client)
upload_majorz_election(client)
main = client.get('/election/majorz-election/parties-panachage')
assert '<h4>Panaschierstatistik</h4>' not in main
assert client.get(
'/election/majorz-election/parties-panachage-data'
).json == {}
chart = client.get('/election/majorz-election/parties-panachage-chart')
assert chart.status_code == 200
assert '/election/majorz-election/parties-panachage-data' in chart
upload_proporz_election(client)
upload_party_results(client)
main = client.get('/election/proporz-election/parties-panachage')
assert '<h4>Panaschierstatistik</h4>' in main
data = client.get('/election/proporz-election/parties-panachage-data').json
nodes = [node['name'] for node in data['nodes']]
assert 'Blankoliste' in nodes
assert 'BDP' in nodes
assert 'CVP' in nodes
assert 'FDP' in nodes
colors = [node['color'] for node in data['nodes']]
assert '#efb52c' in colors
assert '#ff6300' in colors
links = [link['value'] for link in data['links']]
assert all((i in links for i in (
11, 12, 100,
21, 22, 200,
31, 32, 300,
)))
def test_view_election_json(election_day_app_gr):
client = Client(election_day_app_gr)
client.get('/locale/de_CH').follow()
login(client)
upload_majorz_election(client)
upload_proporz_election(client)
response = client.get('/election/majorz-election/json')
assert response.headers['Access-Control-Allow-Origin'] == '*'
assert all((expected in str(response.json) for expected in (
"Engler", "Stefan", "20", "Schmid", "Martin", "18"
)))
response = client.get('/election/proporz-election/json')
assert response.headers['Access-Control-Allow-Origin'] == '*'
assert all((expected in str(response.json) for expected in (
"Casanova", "Angela", "56", "Caluori", "Corina", "32", "CVP", "FDP"
)))
def test_view_election_summary(election_day_app_gr):
client = Client(election_day_app_gr)
client.get('/locale/de_CH').follow()
login(client)
with freeze_time("2014-01-01 12:00"):
upload_majorz_election(client)
upload_proporz_election(client)
response = client.get('/election/majorz-election/summary')
assert response.headers['Access-Control-Allow-Origin'] == '*'
assert response.json == {
'completed': False,
'date': '2022-01-01',
'domain': 'federation',
'elected': [['Stefan', 'Engler'], ['Martin', 'Schmid']],
'last_modified': '2014-01-01T12:00:00+00:00',
'progress': {'counted': 1, 'total': 101},
'title': {'de_CH': 'Majorz Election'},
'type': 'election',
'url': 'http://localhost/election/majorz-election',
'turnout': 44.642857142857146
}
response = client.get('/election/proporz-election/summary')
assert response.headers['Access-Control-Allow-Origin'] == '*'
assert response.json == {
'completed': False,
'date': '2022-01-01',
'domain': 'federation',
'elected': [],
'last_modified': '2014-01-01T12:00:00+00:00',
'progress': {'counted': 1, 'total': 101},
'title': {'de_CH': 'Proporz Election'},
'type': 'election',
'url': 'http://localhost/election/proporz-election',
'turnout': 57.14285714285714
}
def test_view_election_data(election_day_app_gr):
client = Client(election_day_app_gr)
client.get('/locale/de_CH').follow()
login(client)
upload_majorz_election(client)
upload_proporz_election(client)
data = client.get('/election/majorz-election/data-json')
assert data.headers['Content-Type'] == 'application/json; charset=utf-8'
assert data.headers['Content-Disposition'] == \
'inline; filename=majorz-election.json'
assert all((expected in data for expected in ("3506", "Engler", "20")))
data = client.get('/election/majorz-election/data-csv')
assert data.headers['Content-Type'] == 'text/csv; charset=UTF-8'
assert data.headers['Content-Disposition'] == \
'inline; filename=majorz-election.csv'
assert all((expected in data for expected in ("3506", "Engler", "20")))
data = client.get('/election/proporz-election/data-json')
assert data.headers['Content-Type'] == 'application/json; charset=utf-8'
assert data.headers['Content-Disposition'] == \
'inline; filename=proporz-election.json'
assert all((expected in data for expected in ("FDP", "Caluori", "56")))
data = client.get('/election/proporz-election/data-csv')
assert data.headers['Content-Type'] == 'text/csv; charset=UTF-8'
assert data.headers['Content-Disposition'] == \
'inline; filename=proporz-election.csv'
assert all((expected in data for expected in ("FDP", "Caluori", "56")))
def test_view_election_tacit(election_day_app_gr):
client = Client(election_day_app_gr)
client.get('/locale/de_CH').follow()
login(client)
new = client.get('/manage/elections/new-election')
new.form['election_de'] = 'Tacit Election'
new.form['date'] = '2022-01-01'
new.form['mandates'] = 2
new.form['election_type'] = 'majorz'
new.form['domain'] = 'federation'
new.form['tacit'] = True
new.form.submit()
csv = MAJORZ_HEADER
csv += (
"final,,3506,True,56,0,0,0,0,0,1,True,Engler,Stefan,0,\n"
"final,,3506,True,56,0,0,0,0,0,2,True,Schmid,Martin,0,\n"
)
csv = csv.encode('utf-8')
upload = client.get('/election/tacit-election/upload').follow()
upload.form['file_format'] = 'internal'
upload.form['results'] = Upload('data.csv', csv, 'text/plain')
upload = upload.form.submit()
assert "Ihre Resultate wurden erfolgreich hochgeladen" in upload
candidates = client.get('/election/tacit-election/candidates')
assert "Engler Stefan" in candidates
assert "Schmid Martin" in candidates
assert "Wahlbeteiligung" not in candidates
def test_view_election_relations(election_day_app_gr):
client = Client(election_day_app_gr)
client.get('/locale/de_CH').follow()
login(client)
new = client.get('/manage/elections/new-election')
new.form['election_de'] = 'First Election'
new.form['date'] = '2022-01-01'
new.form['mandates'] = 2
new.form['election_type'] = 'majorz'
new.form['domain'] = 'federation'
new.form.submit()
new = client.get('/manage/elections/new-election')
new.form['election_de'] = 'Second Election'
new.form['date'] = '2022-01-02'
new.form['mandates'] = 2
new.form['election_type'] = 'majorz'
new.form['domain'] = 'federation'
new.form['related_elections_historical'] = ['first-election']
new.form['related_elections_other'] = ['first-election']
new.form.submit()
csv = MAJORZ_HEADER
csv += (
"final,,3506,True,56,0,0,0,0,0,1,True,Engler,Stefan,0,\n"
"final,,3506,True,56,0,0,0,0,0,2,True,Schmid,Martin,0,\n"
)
csv = csv.encode('utf-8')
for count in ('first', 'second'):
upload = client.get(f'/election/{count}-election/upload').follow()
upload.form['file_format'] = 'internal'
upload.form['results'] = Upload('data.csv', csv, 'text/plain')
upload = upload.form.submit()
assert "Ihre Resultate wurden erfolgreich hochgeladen" in upload
for page in ('candidates', 'statistics', 'data'):
result = client.get(f'/election/first-election/{page}')
assert '<h2>Zugehörige Wahlen</h2>' in result
assert 'http://localhost/election/second-election' in result
assert 'Second Election' in result
result = client.get(f'/election/second-election/{page}')
assert '<h2>Zugehörige Wahlen</h2>' in result
assert 'http://localhost/election/first-election' in result
assert 'First Election' in result
@pytest.mark.parametrize('tab_name', ElectionLayout.tabs_with_embedded_tables)
def test_views_election_embedded_widgets(election_day_app_gr, tab_name):
client = Client(election_day_app_gr)
client.get('/locale/de_CH').follow()
login(client)
upload_majorz_election(client)
client.get(f'/election/majorz-election/{tab_name}-table')
|
import torch
from torchvision import datasets, transforms
def read_data(data_path="", batch_size=1):
train_loader = torch.utils.data.DataLoader(
datasets.MNIST(data_path, train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size, shuffle=True, num_workers=1)
test_loader = torch.utils.data.DataLoader(
datasets.MNIST(data_path, train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size, shuffle=True, num_workers=1)
return train_loader, test_loader
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('pmtool', '0016_auto_20150212_1046'),
]
operations = [
migrations.RemoveField(
model_name='activity',
name='sequence_id',
),
migrations.AddField(
model_name='wbs',
name='sequence_id',
field=models.IntegerField(default=0, verbose_name=b'Sequence Id'),
preserve_default=True,
),
]
|
# login/views.py
from django.http import HttpResponseRedirect
from django.shortcuts import render, redirect
from mainWindow.models import House
from . import models
from .forms import UserForm
from .forms import RegisterForm
from .forms import ChangeForm
from .models import User
# 作者:王皓平 创建时间:2019.8.27 最后更新时间:2019.9.10
def index(request):
city_info = request.POST.get('city_Info')
if request.method == "POST":
if city_info == '南京':
return redirect("indexnj.html")
elif city_info == '成都':
return redirect("indexcd.html")
elif city_info == '长沙':
return redirect("indexcs.html")
elif city_info == '广州':
return redirect("indexgz.html")
elif city_info == '杭州':
return redirect("indexhz.html")
elif city_info == '南昌':
return redirect("indexnc.html")
elif city_info == '上海':
return redirect("indexsh.html")
elif city_info == '苏州':
return redirect("indexsu.html")
elif city_info == '深圳':
return redirect("indexsz.html")
elif city_info == '天津':
return redirect("indextj.html")
elif city_info == '太原':
return redirect("indexty.html")
elif city_info == '武汉':
return redirect("indexwh.html")
elif city_info == '无锡':
return redirect("indexwx.html")
elif city_info == '北京':
return redirect("indexbj.html")
else:
return render(request, "index1.html", locals())
return render(request, "index1.html", locals())
def login(request):
if request.session.get('is_login', None):
return redirect('/')
if request.method == "POST":
login_form = UserForm(request.POST)
message = "请检查填写的内容!"
if login_form.is_valid():
username = login_form.cleaned_data['username']
password = login_form.cleaned_data['password']
try:
user = models.User.objects.get(name=username)
if user.password == password:
request.session['is_login'] = True
request.session['user_id'] = user.id
request.session['user_name'] = user.name
return redirect('/')
else:
message = "密码不正确!"
except:
message = "用户不存在!"
return render(request, 'login/login.html', locals())
login_form = UserForm()
return render(request, 'login/login.html', locals())
def register(request):
if request.session.get('is_login', None):
# 登录状态不允许注册。你可以修改这条原则!
return redirect("/")
if request.method == "POST":
register_form = RegisterForm(request.POST)
message = "请检查填写的内容!"
if register_form.is_valid(): # 获取数据
username = register_form.cleaned_data['username']
password1 = register_form.cleaned_data['password1']
password2 = register_form.cleaned_data['password2']
email = register_form.cleaned_data['email']
sex = register_form.cleaned_data['sex']
phone = register_form.cleaned_data['phone']
if password1 != password2: # 判断两次密码是否相同
message = "两次输入的密码不同!"
return render(request, 'login/register.html', locals())
else:
same_name_user = models.User.objects.filter(name=username)
if same_name_user: # 用户名唯一
message = '用户已经存在,请重新选择用户名!'
return render(request, 'login/register.html', locals())
same_email_user = models.User.objects.filter(email=email)
if same_email_user: # 邮箱地址唯一
message = '该邮箱地址已被注册,请使用别的邮箱!'
return render(request, 'login/register.html', locals())
# 当一切都OK的情况下,创建新用户
new_user = models.User.objects.create()
new_user.name = username
new_user.password = password1
new_user.email = email
new_user.sex = sex
new_user.phone= phone
new_user.save()
return redirect('/login/') # 自动跳转到登录页面
register_form = RegisterForm()
return render(request, 'login/register.html', locals())
def logout(request):
if not request.session.get('is_login', None):
# 如果本来就未登录,也就没有登出一说
return redirect("/")
request.session.flush()
# 或者使用下面的方法
# del request.session['is_login']
# del request.session['user_id']
# del request.session['user_name']
return redirect("/")
def information(request):
user = models.User.objects.get(id = request.session['user_id'])
username = user.name
return render(request,'login/information.html',locals())
def edit(request):
user = models.User.objects.get(id = request.session['user_id'])
if request.method == "POST":
change_form = ChangeForm(request.POST)
if change_form.is_valid():
em = change_form.cleaned_data['new_email']
if em != user.email:
same_email_user = models.User.objects.filter(email=em)
if same_email_user:
message = '该邮箱地址已被注册,请使用别的邮箱!'
return render(request, 'login/edit.html', locals())
else:
user.email = em
else:
user.email = em
ph = change_form.cleaned_data['new_phone']
user.phone = ph
user.save()
return redirect('/information/')
change_form = ChangeForm(initial={"new_email":user.email,"new_phone":user.phone})
return render(request, 'login/edit.html', locals())
def Hcompare(request):
roomType1 = House.objects.filter(wkind="住宅")
roomType2 = House.objects.filter(wkind="别墅")
roomType3 = House.objects.filter(wkind="写字楼")
roomType4 = House.objects.filter(wkind="底商")
roomType5 = House.objects.filter(wkind="酒店式公寓")
s1=s2=s3=s4=s5=0
for sr1 in roomType1:
s1+=1
for sr2 in roomType2:
s2+=1
for sr3 in roomType3:
s3+=1
for sr4 in roomType4:
s4+=1
for sr5 in roomType5:
s5+=1
return render(request,'compare.html',locals())
def userInfo(request):
return render(request,'userInfo.html',locals())
|
import json
import requests
def get_weather_data():
url="https://samples.openweathermap.org/data/2.5/weather?q=London,uk&appid=b6907d289e10d714a6e88b30761fae22"
response = requests.get(url)
json_data = response.json()
return json_data
if __name__ == '__main__':
weather_data = get_weather_data()
print(f"Weather in {weather_data['name']}")
print(f"Wind speed {weather_data['wind']['speed']}")
print(f"Wind direction {weather_data['wind']['deg']}°")
|
import os, re
def main():
os.chdir(os.path.dirname(os.path.abspath(__file__)))
input = open("day20_input.txt").read().splitlines()
print(solve(input))
class Particle:
def __init__(self, position, velocity, acceleration):
self.position = position
self.velocity = velocity
self.acceleration = acceleration
def move(self):
for i, (v, a) in enumerate(zip(self.velocity, self.acceleration)):
self.velocity[i] = v + a
for i, (p, v) in enumerate(zip(self.position, self.velocity)):
self.position[i] = p + v
def distance(self):
return sum(map(abs, self.position))
# p=<-4897,3080,2133>, v=<-58,-15,-78>, a=<17,-7,0>
PATTERN = "-?\d+"
def solve(input, cycles=1000):
particles = []
for line in input:
m = re.findall(PATTERN, line)
m = list(map(int, m))
pos, vel, acc = m[:3], m[3:6], m[6:]
particles.append(Particle(pos, vel, acc))
count = 0
while count < cycles:
for p in particles:
p.move()
count += 1
min_dist = min(particles, key=lambda p: p.distance())
return particles.index(min_dist)
if __name__ == '__main__':
main()
|
from django.apps import AppConfig
class SocialsAppConfig(AppConfig):
name = "socials"
def ready(self):
from socials.signals.post_save import post_save_keyword
|
import requests
import random
import time
import json
import pandas as pd
download_path = 'http://static.cninfo.com.cn/'
saving_path = 'D:/中信证券暑期/2020年报'
User_Agent = [
"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:88.0) Gecko/20100101 Firefox/88.0",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 2.0.50727; Media Center PC 6.0)",
"Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 1.0.3705; .NET CLR 1.1.4322)",
"Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 5.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 3.0.04506.30)",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN) AppleWebKit/523.15 (KHTML, like Gecko, Safari/419.3) Arora/0.3 (Change: 287 c9dfb30)",
"Mozilla/5.0 (X11; U; Linux; en-US) AppleWebKit/527+ (KHTML, like Gecko, Safari/419.3) Arora/0.6",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.2pre) Gecko/20070215 K-Ninja/2.1.1",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN; rv:1.9) Gecko/20080705 Firefox/3.0 Kapiko/3.0"
] # User_Agent的集合
headers = {'Accept': 'application/json, text/javascript, */*; q=0.01',
"Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9,en-US;q=0.8,en;q=0.7,zh-HK;q=0.6,zh-TW;q=0.5",
'Host': 'www.cninfo.com.cn',
'Origin': 'http://www.cninfo.com.cn',
'Referer': 'http://www.cninfo.com.cn/new/commonUrl?url=disclosure/list/notice',
'X-Requested-With': 'XMLHttpRequest'
}
###巨潮要获取数据,需要ordid字段,具体post的形式是'stock':'证券代码,ordid;'
def get_orgid(Namelist):
orglist = []
url = 'http://www.cninfo.com.cn/new/information/topSearch/detailOfQuery'
hd = {
'Host': 'www.cninfo.com.cn',
'Origin': 'http://www.cninfo.com.cn',
'Pragma': 'no-cache',
'Accept-Encoding': 'gzip,deflate',
'Connection': 'keep-alive',
'Content-Length': '70',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.212 Safari/537.36',
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'Accept': 'application/json,text/plain,*/*',
'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8'}
for name in Namelist:
data = {'keyWord': name,
'maxSecNum': 10,
'maxListNum': 5,
}
r = requests.post(url, headers=hd, data=data)
org_id = r.json()['keyBoardList'][0]['orgId']
#print(org_id+'****'+name)
orglist.append(org_id)
##对列表去重
formatlist = list(set(orglist))
formatlist.sort(key=orglist.index)
return formatlist
def single_page(page,stock):
query_path = 'http://www.cninfo.com.cn/new/hisAnnouncement/query'
headers['User-Agent'] = random.choice(User_Agent) # 定义User_Agent
print(stock)
query = {'pageNum': page, # 页码
'pageSize': 30,
'tabName': 'fulltext',
'column': 'szse',
'stock': stock,
'searchkey': '',
'secid': '',
'plate': '',
'category': 'category_ndbg_szsh;', # 年度报告
'trade': '', #行业
'seDate': '2020-11-27~2021-05-28' # 时间区间
}
namelist = requests.post(query_path, headers=headers, data=query)
time.sleep(5)
single_page = namelist.json()['announcements']
print(len(single_page))
return single_page # json中的年度报告信息
def saving(single_page): # 下载年报
headers = {'Host': 'static.cninfo.com.cn',
'Connection': 'keep-alive',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.212 Safari/537.36 Edg/90.0.818.66',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6',
'Cookie': 'routeId=.uc1'
}
for i in single_page:
if ('2020年年度报告(更新后)' in i['announcementTitle']) or ('2020年年度报告' in i['announcementTitle']) or ('2020年年度报告(修订版)' in i['announcementTitle']) :
download = download_path + i["adjunctUrl"]
name = i["secCode"] + '_' + i['secName'] + '_' + i['announcementTitle'] + '.pdf'
file_path = saving_path + '/' + name
print(file_path)
time.sleep(random.random() * 2)
headers['User-Agent'] = random.choice(User_Agent)
r = requests.get(download, headers=headers)
time.sleep(15)
print(r.status_code)
f = open(file_path, "wb")
f.write(r.content)
f.close()
else:
continue
if __name__ == '__main__':
Sec = pd.read_excel('C:/Users/zikan/Desktop/dict.xlsx',dtype = {'code':'object'}) #读取excel,证券代码+证券简称
Seclist = list(Sec['code']) #证券代码转换成list
Namelist = list(Sec['name'])
org_list = get_orgid(Namelist)
Sec['orgid'] = org_list
Sec.to_excel('C:/Users/zikan/Desktop/dict.xlsx',sheet_name='sheet-2',index=False) #index参数不保存索引
stock = ''
##按行遍历
for rows in Sec.iterrows():
t = str(rows[1]['code'])+','+str(rows[1]['orgid'])+';'
stock = stock+t
for p in range(4):
page = p+1
try:
page_data = single_page(page,stock)
except:
print(page, 'page error, retrying')
try:
page_data = single_page(page,stock)
except:
print(page, 'page error')
saving(page_data)
|
from django import template
register = template.Library()
@register.filter
def get_key_value(some_dict, key):
"""
Provides a filter to be used in Django Jinja2 templates.
Filter allows lookup of values within a dictionary {} via a key.
:param some_dict: Dictionary object
:param key: key value to lookup in some_dict
:return: value in dict at key
"""
return some_dict.get(key, '')
|
from adapters.adapter_with_battery import AdapterWithBattery
from devices.sensor.temperature import TemperatureSensor
class TemperatureSensorAdapter(AdapterWithBattery):
def __init__(self, devices):
super().__init__(devices)
self.devices.append(TemperatureSensor(devices, 'temp', 'temperature', 'temperature'))
|
from django.shortcuts import render,HttpResponseRedirect
from faculty.models import Leave
from django.http import JsonResponse
from django.contrib.auth.decorators import login_required
from faculty.models import LoadShift,Leave, OD
from django.core.mail import EmailMultiAlternatives
from django.template.loader import render_to_string
from django.utils.html import strip_tags
from django.conf import settings
from django.contrib.auth.models import User
from guest.models import Reservation
import datetime
from datetime import timedelta
from EmailManager.views import send_async_mail
# Create your views here.
@login_required
def index(request):
if request.user.is_superuser:
return HttpResponseRedirect('/hod/approveleaves')
html_error_data = {
"error_code" : "401",
"error_message" : "UNAUTHORIZED"
}
return render(request,"error.html",html_error_data)
@login_required
def get_leaves(request):
if request.user.is_superuser:
if request.method == 'POST':
leave = Leave.objects.get(pk = request.POST.get('leave_id'))
# default duration
duration = 'full'
if(leave.leave_start_time.strftime("%H:%M") == '10:30' and leave.leave_end_time.strftime("%H:%M") == '17:15'):
duration = 'full'
elif(leave.leave_start_time.strftime("%H:%M") == '10:30' and leave.leave_end_time.strftime("%H:%M") == '13:15'):
duration = 'first_half'
elif(leave.leave_start_time.strftime("%H:%M") == '13:15' and leave.leave_end_time.strftime("%H:%M") == '17:15'):
duration = 'second_half'
else:
duration = leave.leave_start_time.strftime("%H:%M") + " TO " + leave.leave_end_time.strftime("%H:%M")
subject = 'Leave Notification'
message_data = {
'leave' : leave,
'duration' : duration
}
if '_reject' in request.POST:
leave.approved_status = False
leave.save()
elif '_approve' in request.POST:
leave.approved_status = True
leave.save()
subject = 'Leave Notification'
email_from = settings.EMAIL_HOST_USER
recipient_list = []
recipient_list.append(leave.leave_taken_by.email)
html_content = render_to_string('email/faculty/approve_leave.html', message_data) # render with dynamic value
text_content = strip_tags(html_content)
msg = EmailMultiAlternatives(subject, text_content, email_from, recipient_list)
msg.attach_alternative(html_content, "text/html")
send_async_mail(msg)
# return render(request,'email/faculty/approve_leave.html', message_data)
leaves = Leave.objects.filter(approved_status = None)
leave_loads_pairs = list()
for leave in leaves:
loads_data = list()
loads = LoadShift.objects.filter(leave = leave)
for load in loads:
loads_data.append(load)
leave_loads_pairs.append((leave,loads_data))
context_data = {
'leave_loads_pairs' : leave_loads_pairs
}
# print(context_data)
return render(request, 'hod/leaves.html', context_data)
html_error_data = {
"error_code" : "401",
"error_message" : "UNAUTHORIZED"
}
return render(request,"error.html",html_error_data)
@login_required
def leave_history(request):
if request.user.is_superuser:
if request.method == 'POST':
leave = Leave.objects.get(pk = request.POST.get('leave_id'))
# default duration
duration = 'full'
if(leave.leave_start_time.strftime("%H:%M") == '10:30' and leave.leave_end_time.strftime("%H:%M") == '17:15'):
duration = 'full'
elif(leave.leave_start_time.strftime("%H:%M") == '10:30' and leave.leave_end_time.strftime("%H:%M") == '13:15'):
duration = 'first_half'
elif(leave.leave_start_time.strftime("%H:%M") == '13:15' and leave.leave_end_time.strftime("%H:%M") == '17:15'):
duration = 'second_half'
else:
duration = leave.leave_start_time.strftime("%H:%M") + " TO " + leave.leave_end_time.strftime("%H:%M")
subject = 'Leave Notification'
message_data = {
'leave' : leave,
'duration' : duration
}
if '_reject' in request.POST:
leave.approved_status = False
leave.save()
elif '_approve' in request.POST:
leave.approved_status = True
leave.save()
#-------------------TO REQUESTING FACULTY-------------------
email_from = settings.EMAIL_HOST_USER
recipient_list = []
recipient_list.append(leave.leave_taken_by.email)
html_content = render_to_string('email/faculty/approve_leave.html', message_data) # render with dynamic value
text_content = strip_tags(html_content)
msg = EmailMultiAlternatives(subject, text_content, email_from, recipient_list)
msg.attach_alternative(html_content, "text/html")
send_async_mail(msg)
# return render(request,'email/faculty/approve_leave.html', message_data)
leaves = Leave.objects.exclude(approved_status = None)
leave_loads_pairs = list()
for leave in leaves:
loads_data = list()
loads = LoadShift.objects.filter(leave = leave)
for load in loads:
loads_data.append(load)
leave_loads_pairs.append((leave,loads_data))
context_data = {
'leave_loads_pairs' : leave_loads_pairs
}
return render(request,"hod/leave_history.html",context_data)
html_error_data = {
"error_code" : "401",
"error_message" : "UNAUTHORIZED"
}
return render(request,"error.html",html_error_data)
@login_required
def get_ods(request):
if request.user.is_superuser:
if request.method == 'POST':
od = OD.objects.get(pk = request.POST.get('od_id'))
subject = 'OD Notification'
message_data = {
'od' : od,
}
if '_reject' in request.POST:
od.delete()
elif '_approve' in request.POST:
od.approved_status = True
od.save()
#-------------------TO REQUESTING FACULTY-------------------
email_from = settings.EMAIL_HOST_USER
recipient_list = []
recipient_list.append(od.taken_by.email)
html_content = render_to_string('email/faculty/approve_od.html', message_data) # render with dynamic value
text_content = strip_tags(html_content)
msg = EmailMultiAlternatives(subject, text_content, email_from, recipient_list)
msg.attach_alternative(html_content, "text/html")
send_async_mail(msg)
# return render(request,'email/faculty/approve_od.html', message_data)
ods = OD.objects.filter(approved_status = None)
od_loads_pairs = list()
for od in ods:
loads_data = list()
loads = LoadShift.objects.filter(od = od)
for load in loads:
loads_data.append(load)
od_loads_pairs.append((od,loads_data))
context_data = {
"od_loads_pairs" : od_loads_pairs,
}
return render(request, 'hod/approve_ods.html', context_data)
html_error_data = {
"error_code" : "401",
"error_message" : "UNAUTHORIZED"
}
return render(request,"error.html",html_error_data)
def daterange(start_date, end_date):
for n in range(int ((end_date - start_date).days)):
yield start_date + timedelta(n)
@login_required
def events(request):
if request.user.is_superuser:
if request.method == 'POST':
events = list()
for reservation in Reservation.objects.all():
if(reservation.approved_status):
color = "green"
else:
color = "orange"
for date in daterange(reservation.start_date,reservation.end_date):
event_details = {
'eventName' : reservation.purpose + " (" + reservation.start_time.strftime('%I:%M %p') +" to "+ reservation.end_time.strftime('%I:%M %p') + ")",
'calendar' : 'Other',
'color' : color,
'date' : date.strftime('%d/%m/%Y')
}
events.append(event_details)
# { eventName: 'IOT Seminar', calendar: 'Other', color: 'green', date: '15/08/2019'}
json_data = {
'status' : 'success',
'events' : events
}
return JsonResponse(json_data)
return render(request,"guest/view_events.html",{})
json_data = {
'status' : 'false',
'message' : 'UNAUTHORIZED'
}
return JsonResponse(json_data, status=500)
@login_required
def room_reservations(request):
if request.user.is_superuser:
if request.method == 'POST':
event = Reservation.objects.get(pk = request.POST.get('event_id'))
if '_reject' in request.POST:
event.approved_status = False
event.save()
elif '_approve' in request.POST:
event.approved_status = True
event.save()
subject = 'Room reservation Update'
message_data = {
'reservation' : event,
}
email_from = settings.EMAIL_HOST_USER
recipient_list = []
recipient_list.append(event.email)
html_content = render_to_string('email/guest/event_approve_notification.html', message_data) # render with dynamic value
text_content = strip_tags(html_content)
msg = EmailMultiAlternatives(subject, text_content, email_from, recipient_list)
msg.attach_alternative(html_content, "text/html")
send_async_mail(msg)
# return render(request,'email/guest/event_approve_notification.html', message_data)
events = Reservation.objects.filter(approved_status = None)
context_data = {
"events" : events,
}
return render(request,"hod/room_reservations.html",context_data)
html_error_data = {
"error_code" : "401",
"error_message" : "UNAUTHORIZED"
}
return render(request,"error.html",html_error_data) |
# Standard imports
import ROOT
import pickle
import array
# TopEFT
from TopEFT.Tools.WeightInfo import WeightInfo
# RootTools
from RootTools.core.standard import *
# rw_cpQM -10 ... 30
# rw_cpt -20 ... 20
sample = Sample.fromFiles("ttZ_current_scan", ["/afs/hephy.at/data/rschoefbeck02/TopEFT/skims/gen/v2/fwlite_ttZ_ll_LO_currentplane_highStat_scan/fwlite_ttZ_ll_LO_currentplane_highStat_scan_0.root"])
# Load weight info
weight_info = pickle.load(file('/afs/hephy.at/data/rschoefbeck02/TopEFT/results/gridpacks/ttZ0j_rwgt_patch_currentplane_highStat_slc6_amd64_gcc630_CMSSW_9_3_0_tarball.pkl'))
w = WeightInfo("/afs/hephy.at/data/rschoefbeck02/TopEFT/results/gridpacks/ttZ0j_rwgt_patch_currentplane_highStat_slc6_amd64_gcc630_CMSSW_9_3_0_tarball.pkl")
weight_dict = { tuple( map(float, k.replace('p','.').replace('m','-').split('_')[1::2])): v for k,v in weight_info.iteritems()}
values = {}
for k in weight_info.keys():
vars = k.split('_')[::2]
vals = map(float, k.replace('p','.').replace('m','-').split('_')[1::2] )
assert len(vars)==len(vals)
for i in range(len(vars)):
if vars[i] not in values.keys(): values[vars[i]] = []
if vals[i] not in values[vars[i]]: values[vars[i]].append(vals[i])
for var in vars:
values[var].sort()
variables = [
"nrw/I", "p[C/F]", "np/I",
"Z_pt/F", "Z_eta/F", "Z_phi/F", "Z_mass/F", "Z_cosThetaStar/F", "Z_daughterPdg/I"
]
weight_vector = VectorTreeVariable.fromString("rw[w/F,cpQM/F,cpt/F]", nMax = len(weight_info.keys()) )
r = sample.treeReader( variables = map( TreeVariable.fromString, variables ) + [weight_vector] )
maxEvents = 30
counter = 0
first = True
c1 = ROOT.TCanvas()
r.start()
tg = {}
while r.run():
counter += 1
tg[counter] = ROOT.TGraph(len(values['cpt']), array.array('d', values['cpt'] ), array.array('d', [ r.event.rw_w[weight_dict[(0, v)]] for v in values['cpt'] ] ) )
tg[counter].Fit("pol2","","", min(values['cpt']), max(values['cpt']))
tg[counter].Draw('AP*' if first else 'P*')
tg[counter].GetYaxis().SetRangeUser(0,50*10**-6)
tg[counter].SetLineWidth(1)
first = False
if counter == maxEvents: break
f = ROOT.TTreeFormula( "f%i"%counter, w.weight_string(2), sample.chain)
c1.Print("/afs/hephy.at/user/r/rschoefbeck/www/etc/ew.png")
#[ weight_dict[(0, v)] for v in values['cpt'] ]
|
from sqlalchemy import Column, Integer, String, DateTime, Sequence
from sqlalchemy.ext.declarative import declarative_base
__author__ = 'cloudbeer'
Base = declarative_base()
class User(Base):
__tablename__ = 'user'
id = Column(Integer, Sequence('user_id_seq'), primary_key=True)
email = Column(String, nullable=False)
nick = Column(String)
password = Column(String, nullable=False)
salt = Column(String)
login_time = Column(DateTime)
status = Column(Integer, default=1)
create_date = Column(DateTime)
def __init__(self, id=None, email=None, nick=None, password=None, salt=None, login_time=None, status=None,
create_date=None):
self.id = id
self.email = email
self.nick = nick
self.password = password
self.salt = salt
self.login_time = login_time
self.status = status
self.create_date = create_date
def __repr__(self):
return "<User('%s','%s', '%s')>" % (self.id, self.email, self.nick)
class Template(Base):
__tablename__ = "template"
id = Column(Integer, Sequence('template_id_seq'), primary_key=True)
title = Column(String, nullable=False)
user_id = Column(Integer, default=0)
content = Column(String)
status = Column(Integer, default=1)
type = Column(Integer, default=1)
popular = Column(Integer, default=0)
rank = Column(Integer, default=0)
create_date = Column(DateTime)
def __init__(self, id=None, title=None, user_id=None, content=None, status=None, type=None, popular=None, rank=None, create_date=None):
self.id = id
self.title = title
self.user_id = user_id
self.content = content
self.type = type
self.popular = popular
self.rank = rank
self.status = status
self.create_date = create_date
def __repr__(self):
return "<Template('%s','%s', '%s', '%s')>" % (self.id, self.title, self.popular, self.rank)
class Project(Base):
__tablename__ = "project"
id = Column(Integer, Sequence('project_id_seq'), primary_key=True)
title = Column(String, nullable=False)
user_id = Column(Integer, default=0)
content = Column(String)
status = Column(Integer, default=1)
create_date = Column(DateTime)
def __init__(self, id=None, title=None, user_id=None, content=None, status=None, create_date=None):
self.id = id
self.title = title
self.user_id = user_id
self.content = content
self.status = status
self.create_date = create_date
def __repr__(self):
return "<User('%s','%s')>" % (self.id, self.title) |
import unittest
from Bio.Seq import Seq
from Bio.Alphabet import generic_dna
from codon_tools.lookup_tables import opt_codons_E_coli, reverse_genetic_code
class TestLookupTables(unittest.TestCase):
def test_reverse_genetic_code(self):
tested_codons = {}
for aa, codons in reverse_genetic_code.items():
for codon in codons:
self.assertEqual(aa, Seq(codon, generic_dna).translate())
if codon in tested_codons:
self.assertTrue(False)
else:
tested_codons[codon] = 1
def test_opt_codons_E_coli(self):
# known optimal codons for E. coli
opt_codons = { 'A':['GCT'], 'R':['CGT', 'CGC'], 'N':['AAC'], 'D':['GAC'], 'C':['TGC'], 'Q':['CAG'], 'E':['GAA'], 'G':['GGT','GGC'], 'H':['CAC'], 'I':['ATC'], 'L':['CTG'], 'F':['TTC'], 'P':['CCG'], 'S':['TCT','TCC'], 'T':['ACT','ACC'], 'Y':['TAC'], 'V':['GTT','GTA'] }
for aa, codons in opt_codons_E_coli.items():
observed = set(codons)
expected = set(opt_codons[aa])
self.assertEqual(observed, expected)
|
print('Welcome to the tip Calculator!')
total_bill = float(input('What was the total bill? $'))
tip_percentage = int(input('What persentage of tip you would like to give? 10, 12, or 15? '))
people_number = int(input('How many people to splitt the bill? '))
personal_bill = (total_bill + (total_bill * (tip_percentage/100))) / people_number
print(f'\nEach person sould pay: ${round(personal_bill,2)}') |
from django.db import models
from django.utils import timezone
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager
from PIL import Image
# Create your models here.
class AccountManager(BaseUserManager):
def create_user(self, email, username, password=None, is_manager=False):
if not email:
raise ValueError('Users must have an email')
if not username:
raise ValueError('Users must have an userName')
if not password:
raise ValueError('password is required to create user')
user = self.model(email=self.normalize_email(email), username=username)
user.set_password(password)
user.is_manager = is_manager
user.save(using=self._db)
return user
def create_superuser(self, email, username, password=None, is_manager=False):
user = self.create_user(email=self.normalize_email(email), username=username, password=password, is_manager=is_manager)
user.is_admin = True
user.is_staff = True
user.is_superuser = True
user.save(using=self._db)
return user
def create_staffuser(self, email, username, password=None, is_manager=False):
user = self.create_user(email, username, password, is_manager)
user.is_staff = True
user.save(using=True)
return user
class customUser(AbstractBaseUser):
email = models.EmailField(verbose_name='email', max_length=60, unique=True)
username = models.CharField(max_length=30, unique=True)
date_joined = models.DateTimeField(verbose_name='date joined', default=timezone.now)
is_manager = models.BooleanField(default=False)
profile_pic = models.ImageField(default='default.jpg', upload_to='profile_pics')
is_admin = models.BooleanField(default=False)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
is_superuser = models.BooleanField(default=False)
objects = AccountManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['username']
def __str__(self):
return self.email
def has_perm(self, perm, obj=None):
return self.is_admin
def has_module_perms(self, app_label):
return True
def save(self, *args, **kwargs):
super().save(*args, **kwargs)
img = Image.open(self.profile_pic.path)
if img.height > 300 or img.width > 300:
output_size = (300, 300)
img.thumbnail(output_size)
img.save(self.profile_pic.path)
|
from .processor import Processor, FilteredProcessor
from .color_mean import ColorMeanProcessor
from .chrom import ChromProcessor
|
#!/usr/bin/env python
# Software License Agreement (BSD License)
#
# Copyright (c) 2019 Gert Kanter.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Author: Gert Kanter
import rospy
import re
import os
import psutil
import signal
import testit_msgs.srv
import testit_msgs.msg
import std_msgs.msg
class TestItSut(object):
def __init__(self):
self.mode = rospy.get_param("~mode", "srv")
if self.mode == "srv":
# Service mode
rospy.loginfo("TestIt SUT in SERVICE mode")
self.flush_service = rospy.Service("/testit/flush_coverage", testit_msgs.srv.Coverage, self.handle_flush_service)
else:
# Topic mode
rospy.loginfo("TestIt SUT in TOPIC mode")
self.flush_subscriber = rospy.Subscriber("/testit/flush_coverage", std_msgs.msg.UInt32, self.handle_flush_topic)
self.flush_publisher = rospy.Publisher("/testit/flush_data", testit_msgs.msg.FlushData, queue_size=10)
self.node_workspace = rospy.get_param("~node_workspace", "")
self.coverage_directories = rospy.get_param("~coverage_directories", "")
self.host = rospy.get_param("~host", "")
@property
def host(self):
if self._host is None or len(self._host) == 0:
rospy.logwarn("SUT host identifier is not defined!")
return ""
else:
return self._host
@host.setter
def host(self, value):
if value is not None and type(value) == str:
self._host = value
else:
raise ValueError("host value must be string!")
@property
def node_workspace(self):
if self._node_workspace is None or len(self._node_workspace) == 0:
rospy.logwarn("Catkin workspace for tested packages is not defined (parameter 'node_workspace', this should be a string e.g., '/catkin_ws')")
return ""
else:
return self._node_workspace
@node_workspace.setter
def node_workspace(self, value):
if value is not None and type(value) == str:
self._node_workspace = value
else:
raise ValueError("node_workspace value must be string!")
@property
def coverage_directories(self):
if self._coverage_directories is None or len(self._coverage_directories) == 0:
rospy.logwarn("Coverage recording log file directories are not defined (parameter 'coverage_directories', this should be a semicolon-separated string e.g., '/catkin_ws/build;/root/.ros')")
return []
else:
return self._coverage_directories
@coverage_directories.setter
def coverage_directories(self, value):
if value is not None and type(value) == str:
self._coverage_directories = value.split(";")
else:
raise ValueError("coverage_directories value must be string!")
def get_coverage(self):
file_coverages = []
success = self.flush()
if self.coverage is not None:
for file_coverage in self.coverage.keys():
coverage = testit_msgs.msg.FileCoverage()
coverage.filename = file_coverage
coverage.lines = self.coverage[file_coverage]
file_coverages.append(coverage)
return file_coverages
def handle_flush_topic(self, data):
# Received request to send coverage data - send it via topic
message = testit_msgs.msg.FlushData()
message.host_id = self.host
message.seq = data.data
message.coverage = self.get_coverage()
self.flush_publisher.publish(message)
def handle_flush_service(self, req):
rospy.logdebug("Coverage results requested")
result = True
return testit_msgs.srv.CoverageResponse(result, self.get_coverage())
def process_coverage(self, filename):
rospy.loginfo("process_coverage(" + str(filename) + ")")
header = "!coverage.py: This is a private format, don't read it directly!"
data = []
try:
while True:
with open(filename) as f:
data = f.readlines()
replaced = data[0].replace(header, '')
terminators = [m.start() for m in re.finditer('}}', replaced)]
if len(terminators) > 1:
replaced = replaced[:terminators[0]+2]
lines = eval(replaced)
return lines['lines']
except Exception as e:
rospy.logerr(e)
return {}
def flush(self):
rospy.loginfo("Flushing...")
if self.node_workspace is not None:
# Remove *.gcda and .coverage files
# Send SIGUSR1 to packages under test
pids = psutil.pids()
for pid in pids:
p = psutil.Process(pid)
try:
cmdline = " ".join(p.cmdline())
if cmdline.find(" " + self.node_workspace) >= 0 and cmdline.find("/opt/ros/") == -1 and not cmdline.startswith("/bin/bash"):
if cmdline.find("testit_sut") == -1:
# Don't send SIGUSR1 to self
#rospy.loginfo("Sending SIGUSR1 to " + str(p.pid) + "(" + str(cmdline) + ")")
os.kill(p.pid, signal.SIGUSR1)
except psutil.AccessDenied:
# Some processes might be inaccessible
pass
# Process all *.gcda and .coverage files
self.coverage = {}
for coverage_directory in self.coverage_directories:
rospy.loginfo("Looking into " + coverage_directory)
for directory, dirnames, filenames in os.walk(coverage_directory):
for filename in filenames:
if filename == ".coverage":
self.coverage.update(self.process_coverage(str(directory) + "/" + filename))
return True
return False
if __name__ == "__main__":
rospy.init_node('testit_sut', anonymous=True)
testit_sut = TestItSut()
rospy.loginfo("TestIt SUT services started...")
rospy.spin()
rospy.loginfo("Shut down everything!")
|
import discord
from discord.ext import commands
import urllib.request
import os
bot = commands.Bot(command_prefix =".", description = "yolooooooooooooooooooooooooooooo")
@bot.event
async def on_ready():
print("prêt!")
@bot.command()
async def coucou(ctx):
await ctx.send("yo c est le test")
@bot.command()
@commands.has_permissions(kick_members = True)
async def clear(ctx, nombre):
try:
nombre_int = int(nombre)
messages = await ctx.channel.history(limit = nombre_int + 1).flatten()
for message in messages:
await message.delete()
mess = "Cleared **" + str(nombre_int) + "** message(s)."
await ctx.send(mess)
except:
await ctx.send("fuck off, its not a number")
@bot.command()
async def test(ctx, *heroes):
heroes = " ".join(heroes)
print(heroes)
@bot.command()
async def info(ctx, *heroes):
heroes = " ".join(heroes)
heroes = heroes.lower()
heroes = heroes.capitalize()
with urllib.request.urlopen("https://www.heroesprofile.com/Global/Hero/") as response:
texte = response.read()
poste_string = str(texte)
splitted = poste_string.split()
texte = False
access = False
heroess = heroes.replace("'", "\\'")
heroess = heroes.replace("ù", "\\xc3\\xba")
print(heroes)
if (heroes == "Li-ming"):
heroess = "Ming"
if (heroes == "D.va"):
heroess = "D.Va"
if (heroes == "Sgt.hammer"):
heroess = "Sgt."
i = 0
j = 0
winrate = ""
popularity = ""
pick_rate = ""
ban_rate = ""
game_played = ""
k = 0
z = 0
y = 0
d = 0
for word in splitted:
if (heroess in word) or (access):
i += 1
if (i >= 8):
access = True
if("win_rate_cell" in word):
j += 1
if (j == 1):
elmts = word.split('>')
winrate = elmts[1]
winrate = winrate.split('<')
winrate = winrate[0]
if("popularity_cell" in word):
k += 1
if (k == 1):
elmts = word.split('>')
popularity = elmts[1]
popularity = popularity.split('<')
popularity = popularity[0]
if("pick_rate_cell" in word):
z += 1
if (z == 1):
elmts = word.split('>')
pick_rate = elmts[1]
pick_rate = pick_rate.split('<')
pick_rate = pick_rate[0]
if("ban_rate" in word):
y += 1
if (y == 1):
elmts = word.split('>')
ban_rate = elmts[1]
ban_rate = ban_rate.split('<')
ban_rate = ban_rate[0]
if("games_played_cell" in word):
d += 1
if (d == 1):
elmts = word.split('>')
game_played = elmts[1]
game_played = game_played.split('<')
game_played = game_played[0]
image = "https://raw.githubusercontent.com/HeroesToolChest/heroes-images/master/heroesimages/heroportraits/storm_ui_glues_draft_portrait_"
image2 = heroes.lower()
image3 = ".png"
image = image + image2+ image3
embed = discord.Embed(title = heroes, color=0x2C75FF)
embed.set_thumbnail(url= image)
embed.add_field(name = "Winrate", value = winrate, inline = False)
embed.add_field(name = "Popularity", value = popularity, inline = False)
embed.add_field(name = "Pick rate", value = pick_rate, inline = False)
embed.add_field(name = "Ban rate", value = ban_rate, inline = False)
embed.add_field(name = "Game played", value = game_played, inline = False)
await ctx.channel.send(embed=embed)
token = os.getenv('TOKEN')
bot.run(token) |
import os
import copy
import sys
import run
import evaluator
from config import KNOWLEDGE_NET_DIR
which = sys.argv[1] if len(sys.argv) > 1 else "dev"
if which == "dev":
filename = "train.json"
fold = 4
elif which == "test":
filename = "test-no-facts.json"
fold = 5
else:
sys.exit('Invalid evaluation set')
gold_dataset, properties = evaluator.readKnowledgenetFile(os.path.join(KNOWLEDGE_NET_DIR, filename), fold)
dataset = copy.deepcopy(gold_dataset)
cont = 0
for document in dataset.values():
cont +=1
print ("Documentid: " + str(document.documentId) + "\t" + str(cont) + " of " + str(len(dataset.values())))
instances = run.generate_candidates(document.documentText)
for passage in document.passages:
annotated_properties = set(map(lambda x: x.propertyId, passage.exhaustivelyAnnotatedProperties))
if which == "dev" and len(annotated_properties) == 0:
continue
passage_instances = list(filter(lambda x: x.is_in_span(passage.passageStart, passage.passageEnd), instances))
if which == "dev":
run.classify_instances(passage_instances, annotated_properties)
else:
run.classify_instances(passage_instances)
passage.facts = []
for fact in passage_instances:
for predicate_id, label in fact.labels.items():
predicate = evaluator.KNDProperty(predicate_id, None, None)
if not label:
continue
passage.facts.append(evaluator.KNFact(None, predicate_id,
fact.subject_entity.start_char, fact.subject_entity.end_char,
fact.object_entity.start_char, fact.object_entity.end_char,
fact.get_subject_uri(predicate_id), fact.get_object_uri(predicate_id), str(fact.subject_entity), str(fact.object_entity), None, None))
# Evaluate
def print_evaluation(eval_type):
gold = copy.deepcopy(gold_dataset)
prediction = copy.deepcopy(dataset)
if eval_type == "uri":
gold, goldProperties = evaluator.filterForURIEvaluation(gold)
prediction, _ = evaluator.filterForURIEvaluation(prediction)
else:
goldProperties = properties
confusionMatrix, analysis = evaluator.evaluate(gold, prediction, eval_type, goldProperties)
# Print results
print("RESULTS FOR",eval_type)
evals = evaluator.microEvaluation(confusionMatrix, True)
evals.extend(evaluator.macroEvaluation(confusionMatrix))
evaluator.writeAnalysisFile(analysis, 'tmp', eval_type)
evaluator.writeHtmlFile(analysis, 'tmp', eval_type, goldProperties)
print_evaluation("span_overlap")
print_evaluation("uri")
print_evaluation("span_exact")
|
from rich import print
def banner():
with open('design/banner.txt') as file:
content = file.read()
print(f"[cyan]{content}[/]") |
# -*- coding:utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models, _
class HrPayslip(models.Model):
_inherit = 'hr.payslip'
expense_sheet_ids = fields.One2many(
'hr.expense.sheet', 'payslip_id', string='Expenses',
help="Expenses to reimburse to employee.",
states={'draft': [('readonly', False)], 'verify': [('readonly', False)]})
expenses_count = fields.Integer(compute='_compute_expenses_count')
@api.depends('expense_sheet_ids.expense_line_ids', 'expense_sheet_ids.payslip_id')
def _compute_expenses_count(self):
for payslip in self:
payslip.expenses_count = len(payslip.mapped('expense_sheet_ids.expense_line_ids'))
@api.onchange('input_line_ids')
def _onchange_input_line_ids(self):
expense_type = self.env.ref('hr_payroll_expense.expense_other_input', raise_if_not_found=False)
if not self.input_line_ids.filtered(lambda line: line.input_type_id == expense_type):
self.expense_sheet_ids.write({'payslip_id': False})
@api.onchange('employee_id', 'struct_id', 'contract_id', 'date_from', 'date_to')
def _onchange_employee(self):
res = super()._onchange_employee()
if self.state == 'draft':
self.expense_sheet_ids = self.env['hr.expense.sheet'].search([
('employee_id', '=', self.employee_id.id),
('state', '=', 'approve'),
('payment_mode', '=', 'own_account'),
('refund_in_payslip', '=', True),
('payslip_id', '=', False)])
self._onchange_expense_sheet_ids()
return res
@api.onchange('expense_sheet_ids')
def _onchange_expense_sheet_ids(self):
expense_type = self.env.ref('hr_payroll_expense.expense_other_input', raise_if_not_found=False)
if not expense_type:
return
total = sum(sheet.total_amount for sheet in self.expense_sheet_ids)
if not total:
return
lines_to_keep = self.input_line_ids.filtered(lambda x: x.input_type_id != expense_type)
input_lines_vals = [(5, 0, 0)] + [(4, line.id, False) for line in lines_to_keep]
input_lines_vals.append((0, 0, {
'amount': total,
'input_type_id': expense_type
}))
self.update({'input_line_ids': input_lines_vals})
def action_payslip_done(self):
res = super(HrPayslip, self).action_payslip_done()
for expense in self.expense_sheet_ids:
expense.action_sheet_move_create()
expense.set_to_paid()
return res
def open_expenses(self):
self.ensure_one()
return {
'type': 'ir.actions.act_window',
'name': _('Reimbursed Expenses'),
'res_model': 'hr.expense',
'view_mode': 'tree,form',
'domain': [('id', 'in', self.mapped('expense_sheet_ids.expense_line_ids').ids)],
}
|
# coding: utf-8
def sieve(n):
"""筛法得到n以内的素数"""
primes = [True] * (n + 1)
primes[0] = primes[1] = False
i = 2
while i * 2 <= n:
if primes[i]:
primes[i * 2:n + 1:i] = [False] * (int((n - i * 2) / i) + 1)
# 等同于如下句子,但是更快
# for j in range(i * 2, n + 1, i):
# primes[j] = False
i += 1
return [i for i in range(n + 1) if primes[i]]
if __name__ == "__main__":
print(sieve(200))
|
from django.core.management.base import BaseCommand
from django.conf import settings
import os
# import pwd, grp
class Command(BaseCommand):
help = "backs up the current sqllite db on an s3 bucket"
def handle(self, *args, **options):
# get database
db_file = os.path.abspath(settings.DATABASES['default']['NAME'])
sub_folder = os.path.dirname(db_file)
# set file permissions
os.chmod(db_file, 0777)
os.chmod(sub_folder, 0777)
# uid = pwd.getpwnam("www-data")[2]
# gid = grp.getgrnam("www-data")[2]
# os.chown(sub_folder, uid, gid)
|
print("hi i am abhinav") |
"""
Download a file from iamresponding.com. Then, annotate the model with new data.
Finally, email the generated report.
"""
import json
import argparse
from apiclient.discovery import build
from httplib2 import Http
from cvac.fetch_data import download
from cvac.misc_io import get_newest_file, wait_for_file_to_finish
from cvac.annotate import annotate_file
from cvac.email import create_message_with_attachment, send_message, get_creds
def main():
"""
Entry point for report.py
"""
# parse arguments
parser = argparse.ArgumentParser(
description='Download, annotate, and send report')
parser.add_argument('--startdate', dest='startdate', type=str,
help='Initial date for report (MM/DD/YYYY)')
parser.add_argument('--enddate', dest='enddate', type=str,
help='Final date for report (MM/DD/YYYY)')
parser.add_argument('--config', type=str, dest='config', default='config.json',
help='Where to read config file from')
parser.add_argument('--dontmail', dest='sendmail', action='store_false',
help="Don't send an email")
parser.add_argument('--source', dest='report_source',
help='Instead of downloading, use this file')
args = parser.parse_args()
if args.startdate and args.enddate is None:
parser.error("--startdate requires --enddate")
elif args.enddate and args.startdate is None:
parser.error("--enddate requires --startdate")
elif args.report_source and args.startdate and args.enddate:
parser.error("--source and --startdate/--enddate are mutually exclusive")
elif args.report_source is None and (args.startdate is None and args.enddate is None):
parser.error("must use either --source or --startdate/--enddate")
if args.report_source:
args.infile = args.report_source
with open(args.config) as file:
config = json.load(file)
startdate = args.startdate
enddate = args.enddate
if args.report_source:
filename = args.report_source
else:
download(config['IAR_username'],
config['IAR_password'],
startdate,
enddate)
# get new file
filename = get_newest_file(config['download_dir'])
print(filename)
# wait for file to finish
wait_for_file_to_finish(filename)
# annotate the file
annotate_file(filename, config['outfile'], config['model_path'])
if args.sendmail:
# get authorization for gmail api
creds = get_creds(config['tokenfile'], config['credsfile'])
service = build('gmail', 'v1', http=creds.authorize(Http()))
message = create_message_with_attachment(
config['email_from'],
config['email_to'],
('Report {} to {}'.format(startdate, enddate)),
"Attached is report xls file.",
config['outfile'])
send_message(service, 'me', message)
if __name__ == '__main__':
main()
|
# coding: utf-8
"""
LoRa App Server REST API
For more information about the usage of the LoRa App Server (REST) API, see [https://docs.loraserver.io/lora-app-server/api/](https://docs.loraserver.io/lora-app-server/api/). # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.api.device_service_api import DeviceServiceApi # noqa: E501
from swagger_client.rest import ApiException
class TestDeviceServiceApi(unittest.TestCase):
"""DeviceServiceApi unit test stubs"""
def setUp(self):
self.api = swagger_client.api.device_service_api.DeviceServiceApi() # noqa: E501
def tearDown(self):
pass
def test_activate(self):
"""Test case for activate
Activate (re)activates the device (only when ABP is set to true). # noqa: E501
"""
pass
def test_create(self):
"""Test case for create
Create creates the given device. # noqa: E501
"""
pass
def test_create_keys(self):
"""Test case for create_keys
CreateKeys creates the given device-keys. # noqa: E501
"""
pass
def test_deactivate(self):
"""Test case for deactivate
Deactivate de-activates the device. # noqa: E501
"""
pass
def test_delete(self):
"""Test case for delete
Delete deletes the device matching the given DevEUI. # noqa: E501
"""
pass
def test_delete_keys(self):
"""Test case for delete_keys
DeleteKeys deletes the device-keys for the given DevEUI. # noqa: E501
"""
pass
def test_get(self):
"""Test case for get
Get returns the device matching the given DevEUI. # noqa: E501
"""
pass
def test_get_activation(self):
"""Test case for get_activation
GetActivation returns the current activation details of the device (OTAA and ABP). # noqa: E501
"""
pass
def test_get_keys(self):
"""Test case for get_keys
GetKeys returns the device-keys for the given DevEUI. # noqa: E501
"""
pass
def test_get_random_dev_addr(self):
"""Test case for get_random_dev_addr
GetRandomDevAddr returns a random DevAddr taking the NwkID prefix into account. # noqa: E501
"""
pass
def test_list(self):
"""Test case for list
List returns the available devices. # noqa: E501
"""
pass
def test_stream_event_logs(self):
"""Test case for stream_event_logs
StreamEventLogs stream the device events (uplink payloads, ACKs, joins, errors). * This endpoint is intended for debugging only. * This endpoint does not work from a web-browser. # noqa: E501
"""
pass
def test_stream_frame_logs(self):
"""Test case for stream_frame_logs
StreamFrameLogs streams the uplink and downlink frame-logs for the given DevEUI. * These are the raw LoRaWAN frames and this endpoint is intended for debugging only. * This endpoint does not work from a web-browser. # noqa: E501
"""
pass
def test_update(self):
"""Test case for update
Update updates the device matching the given DevEUI. # noqa: E501
"""
pass
def test_update_keys(self):
"""Test case for update_keys
UpdateKeys updates the device-keys. # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
|
import logging
from django.conf import settings
from share import exceptions
from share.models import RegulatorLog
from share.util.extensions import Extensions
logger = logging.getLogger(__name__)
class RegulatorConfigError(exceptions.ShareException):
pass
class InfiniteRegulationError(exceptions.ShareException):
pass
class Regulator:
VERSION = 1
def __init__(
self,
ingest_job=None,
source_config=None,
regulator_config=None,
validate=True,
):
if ingest_job and source_config:
raise ValueError('Regulator: Provide ingest_job or source_config, not both')
self.job = ingest_job
self._logs = []
if ingest_job and not source_config:
source_config = ingest_job.suid.source_config
self._custom_steps = Steps(
self,
source_config.regulator_steps if source_config else None,
validate=validate,
)
self._default_steps = Steps(
self,
regulator_config or settings.SHARE_REGULATOR_CONFIG,
validate=validate
)
def regulate(self, graph):
try:
self._custom_steps.run(graph)
self._default_steps.run(graph)
finally:
if self.job and self._logs:
for log in self._logs:
log.ingest_job = self.job
RegulatorLog.objects.bulk_create(self._logs)
class Steps:
MAX_RUNS = 31
node_steps = ()
graph_steps = ()
validate_steps = ()
def __init__(self, regulator, regulator_config, node=True, graph=True, validate=True):
self.regulator = regulator
self.regulator_config = regulator_config
if not regulator_config:
return
if node:
self.node_steps = self._load_steps(regulator_config.get('NODE_STEPS'), 'share.regulate.steps.node')
if graph:
self.graph_steps = self._load_steps(regulator_config.get('GRAPH_STEPS'), 'share.regulate.steps.graph')
if validate:
self.validate_steps = self._load_steps(regulator_config.get('VALIDATE_STEPS'), 'share.regulate.steps.validate')
def run(self, graph):
runs = 0
while True:
self._run_steps(graph, self.node_steps)
graph.changed = False
self._run_steps(graph, self.graph_steps)
if not graph.changed:
break
runs += 1
if runs >= self.MAX_RUNS:
raise InfiniteRegulationError('Regulator config: {}'.format(self.regulator_config))
self._run_steps(graph, self.validate_steps)
def _run_steps(self, graph, steps):
for step in steps:
try:
step.run(graph)
finally:
if step.logs:
self.regulator._logs.extend(step.logs)
def _load_steps(self, step_configs, namespace):
try:
steps = []
for step in (step_configs or []):
if isinstance(step, str):
steps.append(self._load_step(namespace, step))
elif isinstance(step, (list, tuple)) and len(step) == 2:
steps.append(self._load_step(namespace, step[0], step[1]))
else:
raise RegulatorConfigError('Each step must be a string or (name, settings) pair. Got: {}'.format(step))
return tuple(steps)
except Exception:
raise RegulatorConfigError('Error loading regulator step config for namespace {}'.format(namespace))
def _load_step(self, namespace, name, settings=None):
"""Instantiate and return a regulator step for the given config.
Params:
namespace: Name of the step's entry point group in setup.py
name: Name of the step's entry point in setup.py
[settings]: Optional dictionary, passed as keyword arguments when initializing the step
"""
return Extensions.get(namespace, name)(**(settings or {}))
|
import os
questions = ["Whether or not someone's action showed love for his or her country",
"Whether or not someone showed a lack of respect for authority",
"Whether or not someone violated standards of purity and decency",
"Whether or not someone was good at math",
"Whether or not someone cared for someone weak or vulnerable",
"Whether or not someone acted unfairly",
"Whether or not someone did something to betray his or her group",
"Whether or not someone conformed to the traditions of society" ,
"Whether or not someone did something disgusting",
"Whether or not someone was cruel",
"Whether or not someone was denied his or her rights",
"Whether or not someone showed a lack of loyalty",
"Whether or not an action caused chaos or disorder",
"Whether or not someone acted in a way that God would approve of"]
questions2 = ["Compassion for those who are suffering is the most crucial virtue.",
"When the government makes laws, the number one principle should be ensuring that everyone is treated fairly.",
"I am proud of my country's history.",
"Respect for authority is something all children need to learn.",
"People should not do things that are disgusting, even if no one is harmed. ",
"It is better to do good than to do bad.",
"One of the worst things a person could do is hurt a defenseless animal.",
"Justice is the most important requirement for a society.",
"People should be loyal to their family members, even when they have done something wrong. ",
"Men and women each have different roles to play in society.",
"I would call some acts wrong on the grounds that they are unnatural.",
"It can never be right to kill a human being.",
"I think it's morally wrong that rich children inherit a lot of money while poor children inherit nothing.",
"It is more important to be a team player than to express oneself.",
"If I were a soldier and disagreed with my commanding officer's orders, I would obey anyway because that is my duty.",
"Chastity is an important and valuable virtue."]
def part1():
for i in range(3, 17):
print("<div class = \"question\">\n\tQ" + str(i) +": " + questions[i-3] + ": <strong><span id=\"demo" +str(i) + "\" , class = \"demo\"></span> </strong>\
\n\t\t<div class=\"slidecontainer\">\n\
<input type=\"range\" min=\"0\" max=\"5\" value=\"3\" class=\"slider\" id=\"myRange\" oninput=\"myFunction(this, \'demo" + str(i) + "\')\">\
\n\t\t</div>\
\n</div>")
print("")
print("")
def part2():
for i in range(17, 33):
print("</br></br></br><div class = \"question\">\n\tQ" + str(i) +": " + questions2[i-17] + ": <strong><span id=\"demo" +str(i) + "\" , class = \"demo\"></span> </strong>\
\n\t\t<div class=\"slidecontainer\">\n\
<input type=\"range\" min=\"0\" max=\"5\" value=\"3\" class=\"slider\" id=\"myRange\" oninput=\"myFunction2(this, \'demo" + str(i) + "\')\">\
\n\t\t</div>\
\n</div>")
print("")
print("")
part2() |
import os
import errno
def file_picker():
file_path = raw_input("Please enter the file to check: ")
file_path = str(file_path)
print("Attempting to open file: "+file_path)
try:
file = open(file_path,"r") #open file for 'r' READing
except IOError as e:
if e.errno == errno.ENOENT:
return "unusable input"
raise
else:
return file
def check_profanity(file):
data = {}
profane = 1
list_profanity = ['shit','fuck','ass']
for LineNumber, Line in enumerate(iter(file.readline, b'')):
for WordNumber, Word in enumerate(Line.split()):
Word = Word.strip(".,;:")
for swear in list_profanity:
if Word.lower() == swear:
print("LINE: "+str(LineNumber)+
" WORD: "+str(WordNumber)+
" <"+Word+
"> is most profane!!")
profane = 0
if(profane):
print("Good Job! You responded without swearing!")
def main():
file=file_picker()
check_profanity(file)
main() |
#-*-coding: utf-8 -*-
print("반복문 디버그")
task=0
for i in range(1,101):
print("반복문 실행")
task += 1
print("%d번째 업무 실행"%task)
#반복문에서 변수값이 루프를 돌면서 잘못된 값으로 변경되는 것을 찾을시
#alt+f9으로 디버깅하면 루프 단위로 디버깅이 가능하다.
print("해당 업무 종료\n")
if task ==10:
task -= 1
print("반복문 종료") |
from pathfinder.algorithms import (
a_star_search,
breadth_first_search,
dijkstra_search,
reconstruct_path,
)
from pathfinder.grids import SquareGrid, WeightedGrid
from pathfinder.views import ascii_drawer
grid = SquareGrid(30, 15)
grid.walls = []
grid.walls.extend((x, y) for x in range(3, 5) for y in range(3, 12))
grid.walls.extend((x, y) for x in range(13, 15) for y in range(4, 15))
grid.walls.extend((x, y) for x in range(21, 23) for y in range(0, 5))
grid.walls.extend((x, y) for x in range(21, 26) for y in range(5, 7))
start = (8, 7)
goal = (17, 2)
print(ascii_drawer.draw_grid(grid))
print()
parents = breadth_first_search(grid, start, goal)
print(ascii_drawer.draw_grid(grid, point_to=parents, start=start, goal=goal))
print()
wgrid = WeightedGrid(10, 10)
wgrid.walls = []
wgrid.walls.extend((x, y) for x in range(1, 4) for y in range(7, 9))
wgrid.weights = {
loc: 5
for loc in [
(3, 4),
(3, 5),
(4, 1),
(4, 2),
(4, 3),
(4, 4),
(4, 5),
(4, 6),
(4, 7),
(4, 8),
(5, 1),
(5, 2),
(5, 3),
(5, 4),
(5, 5),
(5, 6),
(5, 7),
(5, 8),
(6, 2),
(6, 3),
(6, 4),
(6, 5),
(6, 6),
(6, 7),
(7, 3),
(7, 4),
(7, 5),
]
}
start = (1, 4)
goal = (7, 8)
came_from, cost_so_far = dijkstra_search(wgrid, start, goal)
print(
ascii_drawer.draw_grid(
wgrid, tile_width=3, point_to=came_from, start=start, goal=goal
)
)
print()
print(
ascii_drawer.draw_grid(
wgrid, tile_width=3, number=cost_so_far, start=start, goal=goal
)
)
print()
print(
ascii_drawer.draw_grid(
wgrid, tile_width=3, path=reconstruct_path(came_from, start, goal)
)
)
print()
came_from, cost_so_far = a_star_search(wgrid, start, goal)
print(
ascii_drawer.draw_grid(
wgrid, tile_width=3, point_to=came_from, start=start, goal=goal
)
)
print()
print(
ascii_drawer.draw_grid(
wgrid, tile_width=3, number=cost_so_far, start=start, goal=goal
)
)
print()
print(
ascii_drawer.draw_grid(
wgrid, tile_width=3, path=reconstruct_path(came_from, start, goal)
)
)
|
a,b=input().split()
c=int(a)^int(b)
d=c^int(b)
e=c^d
print(e,d)
|
with open('matrix.txt', 'r') as f:
matrix = [map(int, line.strip('\n').split(',')) for line in f]
dp = [[None for i in range(len(matrix[0]))] for j in range(len(matrix))]
for j in range(len(matrix[0])):
dp[0][j] = sum(matrix[0][:j+1])
for i in range(1, len(matrix)):
dp[i][0] = sum([matrix[k][0] for k in range(i+1)])
for i in range(1, len(matrix)):
for j in range(1, len(matrix[0])):
topval = matrix[i][j] + dp[i-1][j]
leftval = matrix[i][j] + dp[i][j-1]
if topval <= leftval:
dp[i][j] = topval
else:
dp[i][j] = leftval
print dp[len(matrix)-1][len(matrix[0])-1] |
# -*- coding: utf-8 -*-
from rest_framework import serializers
from api.models import Employee
from rest_framework.validators import UniqueValidator
class EmployeeSerializer(serializers.ModelSerializer):
"""Serializer to map the Model instance into JSON format."""
name = serializers.CharField(min_length=3, max_length=200)
email = serializers.EmailField(validators=[UniqueValidator(queryset=Employee.objects.all())])
department = serializers.CharField(min_length=2, max_length=200)
class Meta:
"""Meta class to map serializer's fields with the model fields."""
model = Employee
fields = ('name', 'email', 'department')
|
"""
This file download the latest data for Myanmar
"""
import pandas as pd
from autumn.settings import INPUT_DATA_PATH
from pathlib import Path
INPUT_DATA_PATH = Path(INPUT_DATA_PATH)
COVID_MMR_TESTING_CSV = INPUT_DATA_PATH / "covid_mmr" / "cases.csv"
URL = "https://docs.google.com/spreadsheets/d/1VeUof9_-s0bsndo8tLsCwnAhkUUZgsdV-r980gumMPA/export?format=csv&id=1VeUof9_-s0bsndo8tLsCwnAhkUUZgsdV-r980gumMPA"
def fetch_covid_mmr_data():
mmr_df = pd.read_csv(URL)
mmr_df.to_csv(COVID_MMR_TESTING_CSV)
|
# -*- coding:utf-8 -*-
import unittest
import mock
class UsingMockDatetimeTest(unittest.TestCase):
def _callFUT(self):
import mydatetime
return mydatetime.now()
def test(self):
with mock.patch("datetime.datetime") as M:
M.now.return_value = 10
self.assertEqual(self._callFUT(), 10)
def test2(self):
with mock.patch("datetime.datetime") as M:
M.now.return_value = 11
# これは結局test で付けたパッチが生きたままになって10を返す。
self.assertEqual(self._callFUT(), 10)
# mydatetime.datetimeを置き換えるのがどうやら正解のようだ。
with mock.patch("mydatetime.datetime") as M:
M.now.return_value = 11
self.assertEqual(self._callFUT(), 11)
if __name__ == "__main__":
unittest.main()
|
import math
import numpy as np
class OffloadSVM:
def __init__(self, model, scaler):
self.scaler = scaler
self.class0 = model.__dict__['classes_'][0]
self.class1 = model.__dict__['classes_'][1]
if model.__repr__().split('(')[0] == 'SVC' and model.__dict__['kernel'] != 'linear':
raise TypeError("Only linear SVM is supported! Pass kernel = 'linear' to SVC or use LinearSVC.")
self.w = model.coef_[0]
self.c = -1*model.intercept_[0]
self.dim = len(self.w)
if scaler:
self.u = scaler.mean_
self.p = np.reciprocal(scaler.scale_)
def get_params(self):
return {'Weight_Vector':self.w, 'Negative_Intercept_Constant':self.c}
def get_svm_params_string(self):
str_w = ', '.join([str(x) for x in self.w])
return str_w
def get_scaling_params_string(self):
str_u = ', '.join([str(x) for x in self.u])
str_p = ', '.join([str(x) for x in self.p])
return str_u, str_p
def unscaled_svm_arduino_code(self):
str_w = self.get_svm_params_string()
code = f"""double w[] = {{{str_w}}};
double c = {str(self.c)};
void setup() {{
Serial.begin(9600);
}}
void loop() {{
//Data Section: To Be Coded Manually
float data[{str(self.dim)}]; //This is your feature vector. Retrive your data into this array.
//ML Inference Section
double temp = 0.0;
for(int i=0; i<{str(self.dim)}; i++)
{{
temp += (data[i] * w[i]);
}}
if(temp >= c)
{{
//Do something for class label {str(self.class1)}.
Serial.println("{str(self.class1)}");
}}
else
{{
//Do something for class label {str(self.class0)}.
Serial.println("{str(self.class0)}");
}}
delay(1000);
}}"""
return code
def scaled_svm_arduino_code(self):
str_w = self.get_svm_params_string()
str_u, str_p = self.get_scaling_params_string()
code = f"""double w[] = {{{str_w}}};
double u[] = {{{str_u}}};
double p[] = {{{str_p}}};
double c = {str(self.c)};
void setup() {{
Serial.begin(9600);
}}
void loop() {{
//Data Section: To Be Coded Manually
float data[{str(self.dim)}]; //This is your feature vector. Retrive your data into this array.
//ML Inference Section
double temp = 0.0;
for(int i=0; i<{str(self.dim)}; i++)
{{
temp += (data[i]-u[i]) * p[i] * w[i];
}}
if(temp >= c)
{{
//Do something for class label {str(self.class1)}.
Serial.println("{str(self.class1)}");
}}
else
{{
//Do something for class label {str(self.class0)}.
Serial.println("{str(self.class0)}");
}}
delay(1000);
}}"""
return code
def get_arduino_code(self):
if self.scaler:
return self.scaled_svm_arduino_code()
return self.unscaled_svm_arduino_code()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.