repo_name stringlengths 6 97 | path stringlengths 3 341 | text stringlengths 8 1.02M |
|---|---|---|
BeginToCodeSnaps/PythonSnaps | EG3-06 Ding.py | <reponame>BeginToCodeSnaps/PythonSnaps<gh_stars>1-10
# EG3-06 Ding
import snaps
snaps.play_sound('ding.wav')
|
BeginToCodeSnaps/PythonSnaps | EG3-04 hello world.py | # EG3-04 hello world
import snaps
snaps.display_message('hello world')
|
BeginToCodeSnaps/PythonSnaps | EG4-04 Pizza Order Calculator.py | # EG4-04 Pizza Order Calculator
students_text = input('How many students: ')
students_int = int(students_text)
pizza_count = students_int/1.5
print('You will need', pizza_count, 'pizzas')
|
BeginToCodeSnaps/PythonSnaps | EG4-01 Self Timer.py | # EG4-01 Self Timer
import time
import random
print('''Welcome to Self Timer
Everybody stand up
Stay standing until you think the time has ended.
Then sit down.
Anyone still standing when the time ends loses.
The last person to sit down before the time ended will win.''')
stand_time = random.randint(5, 20)
print('Stay standing for', stand_time, 'seconds.')
time.sleep(stand_time)
print('****TIME UP****')
|
jackchi/google-python-exercises | copyspecial/copyspecial.py | <reponame>jackchi/google-python-exercises
#!/usr/bin/python
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
import sys
import re
import os
import shutil
import commands
"""Copy Special exercise
"""
# +++your code here+++
# Write functions and modify main() to call them
spFiles = {} # global dictionary for keeping one copy of file
def copy_special_path(dir):
"""
Given a directory 'dir' return absolute file path of a
matching pattern 'e.g: __word__txt'
"""
rSP = re.compile(r'__\w+__') # special path pattern 'e.g: __word__.txt'
try:
# find all files in an existing 'dir' directory that matches the special path regular expression match
files = []
print ".............................\nChecking Directory: %s" % (os.path.abspath(dir))
for f in os.listdir(dir): # both files and directories
path = os.path.join(dir, f)
if (os.path.isfile(path) and re.search(rSP, f)): # only include matched special path and files
if(f in spFiles): # discard and error out duplicate file names
print "DUPLICATE FILES FOUND!"
print "File %s already exists in %s/%s" % (path, spFiles[f], f)
sys.exit(1)
else:
files.append(path)
spFiles[f] = os.path.abspath(dir)
print "Found %d files:" % len(files)
for v in files:
print "%s" % v
except OSError, e:
print e
sys.exit(1)
except Exception, e:
raise
sys.exit(1)
return files
def zip_to(paths, zippath):
files = []
for path in paths:
files += copy_special_path(path)
cmd = 'zip -j %s %s' % (zippath, ' '.join(files))
print 'Executing Command: %s' % (cmd)
(status, output) = commands.getstatusoutput(cmd) #
if status:
sys.stderr.write(output)
sys.exit(1)
print output
return
def copy_to(paths, dir):
"""
Given a list of files 'paths' copy to 'dir'
Create 'dir' if it doesn't exist
"""
for path in paths:
files = copy_special_path(path)
for f in files:
if os.path.exists(os.path.abspath(dir)):
shutil.copy(f, dir)
else:
os.mkdir(dir)
shutil.copy(f, dir)
print ".....................................\nCopied %d files to %s" % (len(files), os.path.abspath(dir))
return
def main():
# This basic command line argument parsing code is provided.
# Add code to call your functions below.
# Make a list of command line arguments, omitting the [0] element
# which is the script itself.
args = sys.argv[1:]
if not args:
print "usage: [--todir dir][--tozip zipfile] dir [dir ...]";
sys.exit(1)
# todir and tozip are either set from command line
# or left as the empty string.
# The args array is left just containing the dirs.
todir = ''
if args[0] == '--todir':
todir = args[1]
del args[0:2]
copy_to(args, todir)
tozip = ''
if args[0] == '--tozip':
tozip = args[1]
del args[0:2]
zip_to(args, tozip)
if len(args) == 0:
print "error: must specify one or more dirs"
sys.exit(1)
if (todir == '' and tozip == ''):
for arg in args:
copy_special_path(arg)
# +++your code here+++
# Call your functions
if __name__ == "__main__":
main()
|
jackchi/google-python-exercises | repeat.py | #!/usr/bin/python -tt
import sys
# Defines a "repeat" function that takes 2 arguments.
def repeat(s, exclaim):
"""Returns the string s repeated 3 times.
If exclaim is true, add exclamation marks.
"""
result = s + s + s # can also use "s * 3" which is faster (Why?)
if exclaim:
result = result + '!!!'
return result
def main():
print repeat('Yay', False) ## YayYayYay
print repeat('Woo Hoo', True) ## Woo HooWoo HooWoo Hoo!!!
# This is the standard boilerplate that calls the main() function.
if __name__ == '__main__':
main() |
jackchi/google-python-exercises | crack/reverse.py | <gh_stars>0
#!/usr/bin/python
# Simple Algorithm To Reverse A String
import sys
def main():
if len(sys.argv) != 2:
print 'usage: ./reverse.py %s' % ('"string"')
sys.exit(1)
index = {}
str = sys.argv[1]
rev= ""
for c in str:
rev = c + rev
print rev
return rev
if __name__ == '__main__':
main()
|
jackchi/google-python-exercises | babynames/babynames.py | #!/usr/bin/python
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
import sys
import re
"""Baby Names exercise
Define the extract_names() function below and change main()
to call it.
For writing regex, it's nice to include a copy of the target
text for inspiration.
Here's what the html looks like in the baby.html files:
...
<h3 align="center">Popularity in 1990</h3>
....
<tr align="right"><td>1</td><td>Michael</td><td>Jessica</td>
<tr align="right"><td>2</td><td>Christopher</td><td>Ashley</td>
<tr align="right"><td>3</td><td>Matthew</td><td>Brittany</td>
...
Suggested milestones for incremental development:
-Extract the year and print it (Done)
-Extract the names and rank numbers and just print them (Done)
-Get the names data into a dict and print it (Done)
-Build the [year, 'name rank', ... ] list and print it (Done)
-Fix main() to use the extract_names list (Done)
"""
def extract_names(filename):
"""
Given a file name for baby.html, returns a list starting with the year string
followed by the name-rank strings in alphabetical order.
['2006', 'Aaliyah 91', Aaron 57', 'Abagail 895', ' ...]
"""
# +++your code here+++
# Names Hashtable
rankDb = {}
rankList = []
# Compile regular expression
rYear = re.compile(r'Popularity in (\d\d\d\d)')
rName = re.compile(r'<td>(\d+)</td><td>(\w+)</td><td>(\w+)</td>')
# Extract the year
f = open(filename, 'r')
fileStr = f.read()
# Parse year and name ranks
year = re.search(rYear, fileStr)
ranks = re.findall(rName, fileStr)
# Insert ranks into Db
for rank in ranks:
rankDb[rank[1]] = rank[0]
rankDb[rank[2]] = rank[0]
def rankHelper(s):
return s[0]
# Construct the return year or skip if file isn't the right format
if year:
rankList.append(year.group(1))
else:
print "skipping over %s: Year Not Found" % filename
return
# Sort and add result list
for k,v in sorted(rankDb.items(), key=rankHelper):
rankList.append("%s %s" % (k, v))
f.close()
return rankList
def main():
# This command-line parsing code is provided.
# Make a list of command line arguments, omitting the [0] element
# which is the script itself.
args = sys.argv[1:]
if not args:
print 'usage: [--summaryfile] file [file ...]'
sys.exit(1)
# Notice the summary flag and remove it from args if it is present.
summary = False
if args[0] == '--summaryfile':
summary = True
del args[0]
# +++your code here+++
# For each filename, get the names, then either print the text output
# or write it to a summary file
for filename in args:
result = extract_names(filename)
if result:
if summary:
summaryfile = open('%s.summary' % filename, 'w')
summaryfile.write('\n'.join(result) + '\n')
summaryfile.close()
else:
print '\n'.join(result) + '\n'
if __name__ == '__main__':
main()
|
jackchi/google-python-exercises | crack/unique.py | #!/usr/bin/python
import sys
def main():
if len(sys.argv) != 2:
print 'usage: ./unique.py %s' % ('"string"')
sys.exit(1)
index = {}
str = sys.argv[1]
for c in str:
if c in index:
print "\"%s\" not unique: %c repeated" % (str, c)
sys.exit(1)
else:
index[c]= 1
print "\"%s\" is unique" % (str)
return
if __name__ == '__main__':
main()
|
FabianSaulRubilarAlvarez/mat281_portfolio_template | project/Project/preprocesamiento.py | hacer la mejor predicción de cada imagen. Para ellos es necesario realizar los pasos clásicos de un proyecto de _Machine Learning_, como estadística descriptiva, visualización y preprocesamiento.
|
LaRiffle/axa_challenge | Main.py | import pandas as pd
from fonction_py.preprocess import *
from fonction_py.train import *
from fonction_py.tools import *
from fonction_py.robin import *
from fonction_py.tim import *
from sklearn import linear_model
import numpy as np
import matplotlib.pyplot as plt
import time
#A FAIRE :)
#preprocess : Preprocessing : choisir les colonnes et créer les nouvelles,
#preprocessFINAL : Preprocessing de submission.txt pour qu'il soit exactement comme la sortie du preprocessing
#
#1 modele :
# option : PCA -> réduire et choisir la dimension la meilleur,
# prevision : regression lineaire (tout pourri)
# gradient descent
# tree
#2eme modele :
# etude en fonction de ASS_ASSIGNMENT : voir lesquels servent a qqc l'impact de l'annee
#
start_time = time.time()
print("go")
fields = ['DATE', 'DAY_OFF', 'WEEK_END', 'DAY_WE_DS', 'ASS_ASSIGNMENT', 'CSPL_RECEIVED_CALLS' ] # selectionne les colonnes à lire
selectAss = 'Téléphonie' # quel type de ASS_ASSIGNMENT on travaille
c = pd.DataFrame()
listass= ['CAT', 'CMS', 'Crises', 'Domicile', 'Gestion', 'Gestion - Accueil Telephonique','Gestion Assurances', 'Gestion Clients', 'Gestion DZ', 'Gestion Relation Clienteles', 'Gestion Renault', 'Japon', 'Manager', 'Mécanicien', 'Médical', 'Nuit', 'Prestataires', 'RENAULT', 'RTC', 'Regulation Medicale', 'SAP', 'Services', 'Tech. Axa', 'Tech. Inter', 'Tech. Total', 'Téléphonie']
#'Evenements', 'Gestion Amex'
#setFields = set(pd.read_csv("data/fields.txt", sep=";")['0'].values)
i=0
res = []
start_time = time.time()
for selectAss in listass:
i = i+1
print(selectAss+' ' +str(np.round(i*100/len(listass))))
x=pd.read_csv("data/trainPure.csv", sep=";", usecols=fields) # LECTURE
x,y = preprocess(x,selectAss) # rajoute les features
#res.append(robin(x,y))
res.append(linearLinexpMinimization(x,y))
print("--- %s seconds ---" % str((time.time() - start_time)))
res = pd.DataFrame(res, index=listass)
#res.columns = listModel
res.to_csv("restestTel.csv", sep=";", decimal=",")
####################################################### ecriture final
# xTest, souvenir = preprocessFINAL(x,selectAss)
# souvenir['prediction']= faire(x,y,xTest)
# ######################################################################TEST DE ROBIN
# resultat=pd.merge(resultat, souvenir, how='left',on=['DATE', 'ASS_ASSIGNMENT'])
# resultat['fait'] = ~pd.isnull(resultat['prediction_y']) | resultat['fait']
# resultat=resultat.fillna(0)
# resultat['prediction'] = resultat['prediction_x']+resultat['prediction_y']
# del resultat['prediction_x']
# del resultat['prediction_y']
#del resultat['fait']
#resultat.to_csv("vraipred.txt", sep="\t", index =False)
###########################################################
print("--- %s seconds ---" % (time.time() - start_time))
|
LaRiffle/axa_challenge | fonction_py/boosting.py | <reponame>LaRiffle/axa_challenge
from fonction_py.tools import *
from fonction_py.preprocess import *
from scipy.optimize import minimize
from sklearn import linear_model
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from pandas.tools.plotting import scatter_matrix
import matplotlib.pyplot as plt
from sklearn import cross_validation
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn import decomposition
import time
from sklearn.ensemble import GradientBoostingRegressor
clf = GradientBoostingRegressor(loss='huber', alpha=0.9,
n_estimators=100, max_depth=3,
learning_rate=.1, min_samples_leaf=9,
min_samples_split=9)
fields = ['DATE', 'DAY_OFF', 'WEEK_END', 'DAY_WE_DS', 'ASS_ASSIGNMENT', 'CSPL_RECEIVED_CALLS' ] # selectionne les colonnes à lire
data=pd.read_csv("data/trainPure.csv", sep=";", usecols=fields) # LECTURE
resultat = pd.read_csv("data/submission.txt", sep="\t") # LECTURE
xTrain,yTrain = preprocess(data.copy(), 'Téléphonie') # rajoute les features
xTest,xTrain,souvenir=preprocessFINAL(xTrain,'Téléphonie')
clf.fit(xTrain,yTrain)
yPred=clf.predict(xTest)
resultat=pd.merge(resultat, souvenir, how='left',on=['DATE', 'ASS_ASSIGNMENT'])
resultat=resultat.fillna(0)
resultat['prediction'] = resultat['prediction_x']+resultat['prediction_y']
del resultat['prediction_x']
del resultat['prediction_y']
pd.DataFrame(res).to_csv("reslist.csv", sep=";", decimal=",")
resultat.to_csv("boosting.txt", sep="\t", index =False) |
LaRiffle/axa_challenge | fonction_py/tim.py | from fonction_py.tools import *
from fonction_py.preprocess import *
from scipy.optimize import minimize
from sklearn import linear_model
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from pandas.tools.plotting import scatter_matrix
import matplotlib.pyplot as plt
from sklearn import cross_validation
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn import decomposition
import time
# predire que Gestion renault = 0 depuis fevrier/2011
#
#
def fun_to_min(x,xTrain,yTrain):
a=x[:-1]
b=x[-1]
return LinExp(np.dot(xTrain,np.transpose(a))+b,yTrain)
def linearLinexpMinimization(x, y):
xTrain, xTest, yTrain, yTest = faireSplitting(x, y, 0.8) # rajoute les features
del x
del y
print("ok")
print("AVEC")
pca = decomposition.PCA(n_components=65)
pca.fit(xTrain)
PCAxTrain = pca.transform(xTrain)
nbLines,nbFeatures = PCAxTrain.shape
res = minimize(fun_to_min,np.zeros(nbFeatures+1),args=(PCAxTrain,yTrain))
PCAxTest = pca.transform(xTest)
x = res.x #x est de longeur nbFeatures+1
a=x[:-1] #a.T est une colonne de longueur nbFeatures
b=x[-1] #b est un scalaire
print("a : \n",a)
print("b : \n",b)
pred = np.dot(PCAxTest,np.transpose(a))+b
pred = np.round(pred)
check(pred, yTest)
bins = np.linspace(-10, 10, 40)
plt.hist(pred-yTest, bins, normed=1)
def telephoniePred(x,y,xTest):
pca = decomposition.PCA(n_components=65)#65)
pca.fit(x)
PCAxTrain = pca.transform(x)
nbLines,nbFeatures = PCAxTrain.shape
res = minimize(fun_to_min,np.zeros(nbFeatures+1),args=(PCAxTrain,y))
PCAxTest = pca.transform(xTest)
x = res.x
a=x[:-1]
b=x[-1]
pred = np.dot(PCAxTest,np.transpose(a))+b
pred = np.round(pred)
return pred
def submit():
start_time = time.time()
fields = ['DATE', 'DAY_OFF', 'WEEK_END', 'DAY_WE_DS', 'ASS_ASSIGNMENT', 'CSPL_RECEIVED_CALLS' ] # selectionne les colonnes à lire
data=pd.read_csv("data/trainPure.csv", sep=";", usecols=fields) # LECTURE
resultat = pd.read_csv("data/submission.txt", sep="\t") # LECTURE
categoryList = ['CAT','CMS','Crises','Domicile','Gestion','Gestion - Accueil Telephonique','Gestion Assurances','Gestion Clients','Gestion DZ','Gestion Relation Clienteles','Gestion Renault','Japon','Manager','Mécanicien','Médical','Nuit','Prestataires','RENAULT','RTC','Regulation Medicale','SAP','Services','Tech. Axa','Tech. Inter','Tech. Total','Téléphonie']
for category in categoryList :
start_time = time.time()
print(category)
xTrain,yTrain = preprocess(data.copy(), category) # rajoute les features
xTest,xTrain,souvenir=preprocessFINAL(xTrain,category)
prediction = telephoniePred(xTrain,yTrain,xTest)
prediction =np.round(prediction).astype(int)
souvenir['prediction']=prediction
end_time = time.time()
print('prediction\'s length : ',len(prediction))
print('Time : ',end_time - start_time)
resultat=pd.merge(resultat, souvenir, how='left',on=['DATE', 'ASS_ASSIGNMENT'])
print('DONE')
resultat=resultat.fillna(0)
resultat['prediction'] = resultat['prediction_x']+resultat['prediction_y']
del resultat['prediction_x']
del resultat['prediction_y']
pd.DataFrame(res).to_csv("reslist.csv", sep=";", decimal=",")
resultat.to_csv("vraipred.txt", sep="\t", index =False)
return resultat
|
LaRiffle/axa_challenge | fonction_py/train2.py | <reponame>LaRiffle/axa_challenge
from fonction_py.tools import *
from sklearn import linear_model
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn import cross_validation
from sklearn.linear_model import LogisticRegression
from sklearn import tree
from sklearn import svm
from sklearn import decomposition
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.grid_search import GridSearchCV
from sklearn.grid_search import RandomizedSearchCV
from scipy.stats import uniform as sp_randint
from sklearn import datasets
from sklearn.linear_model import Ridge
import time
def faireTout():
fields = ['DATE', 'DAY_OFF', 'WEEK_END', 'DAY_WE_DS', 'ASS_ASSIGNMENT', 'CSPL_RECEIVED_CALLS' ] # selectionne les colonnes à lire
c = pd.DataFrame()
listmodel = faireListModel()
#'Evenements', 'Gestion Amex'
#setFields = set(pd.read_csv("data/fields.txt", sep=";")['0'].values)
resultat = pd.read_csv("data/submission.txt", sep="\t")
resultat['fait'] = False
i=0
res = []
start_time = time.time()
model = listmodel[25]
data=pd.read_csv("data/trainPure.csv", sep=";", usecols=fields) # LECTURE
resultat = pd.read_csv("data/submission.txt", sep="\t") # LECTURE
res=[]
for model in listmodel:
i = i+1
print(model[0])#nom du assignment
x,y = preprocess(data.copy(), model[0]) # rajoute les features
model[1].fit(x, y) #entrainement
(xTest, souvenir)=preprocessFINAL(x,model[0])
pred = model[1].predict(xTest)
pred[pred>max(y)*1.05]=max(y)*1.05
pred[pred<0]=0
pred =np.round(pred)
souvenir['prediction']=pred
resultat=pd.merge(resultat, souvenir, how='left',on=['DATE', 'ASS_ASSIGNMENT'])
resultat=resultat.fillna(0)
resultat['prediction'] = resultat['prediction_x']+resultat['prediction_y']
del resultat['prediction_x']
del resultat['prediction_y']
pd.DataFrame(res).to_csv("reslist.csv", sep=";", decimal=",")
resultat.to_csv("vraipred.txt", sep="\t", index =False)
return resultat
def faireListModel():
return [('CAT', linear_model.LinearRegression()),
('CMS', RandomForestRegressor(bootstrap=False, criterion='mse', max_depth=5,
max_features=30, max_leaf_nodes=None, min_samples_leaf=1,
min_samples_split=2, min_weight_fraction_leaf=0.0,
n_estimators=10, n_jobs=1, oob_score=False, random_state=None,
verbose=0, warm_start=False)),
('Crises',linear_model.LinearRegression()),
('Domicile', RandomForestRegressor(bootstrap=True, criterion='mse', max_depth=30,
max_features=30, max_leaf_nodes=None, min_samples_leaf=1,
min_samples_split=2, min_weight_fraction_leaf=0.0,
n_estimators=90, n_jobs=1, oob_score=False, random_state=None,
verbose=0, warm_start=False)),
('Gestion',RandomForestRegressor(bootstrap=True, criterion='mse', max_depth=30,
max_features='auto', max_leaf_nodes=None, min_samples_leaf=1,
min_samples_split=2, min_weight_fraction_leaf=0.0,
n_estimators=100, n_jobs=1, oob_score=False, random_state=None,
verbose=0, warm_start=False)),
('Gestion - Accueil Telephonique',RandomForestRegressor(bootstrap=True, criterion='mse', max_depth=20,
max_features=30, max_leaf_nodes=None, min_samples_leaf=1,
min_samples_split=2, min_weight_fraction_leaf=0.0,
n_estimators=70, n_jobs=1, oob_score=False, random_state=None,
verbose=0, warm_start=False)),
('Gestion Assurances',RandomForestRegressor(bootstrap=False, criterion='mse', max_depth=20,
max_features=30, max_leaf_nodes=None, min_samples_leaf=1,
min_samples_split=2, min_weight_fraction_leaf=0.0,
n_estimators=20, n_jobs=1, oob_score=False, random_state=None,
verbose=0, warm_start=False)),
('Gestion Clients', RandomForestRegressor(bootstrap=True, criterion='mse', max_depth=10,
max_features=90, max_leaf_nodes=None, min_samples_leaf=1,
min_samples_split=2, min_weight_fraction_leaf=0.0,
n_estimators=50, n_jobs=1, oob_score=False, random_state=None,
verbose=0, warm_start=False)),
('Gestion DZ', RandomForestRegressor(bootstrap=True, criterion='mse', max_depth=5,
max_features=30, max_leaf_nodes=None, min_samples_leaf=1,
min_samples_split=2, min_weight_fraction_leaf=0.0,
n_estimators=30, n_jobs=1, oob_score=False, random_state=None,
verbose=0, warm_start=False)),
('Gestion Relation Clienteles',RandomForestRegressor(bootstrap=True, criterion='mse', max_depth=10,
max_features=90, max_leaf_nodes=None, min_samples_leaf=1,
min_samples_split=2, min_weight_fraction_leaf=0.0,
n_estimators=110, n_jobs=1, oob_score=False, random_state=None,
verbose=0, warm_start=False)),
('<NAME>', RandomForestRegressor(bootstrap=True, criterion='mse', max_depth=30,
max_features=50, max_leaf_nodes=None, min_samples_leaf=1,
min_samples_split=2, min_weight_fraction_leaf=0.0,
n_estimators=30, n_jobs=1, oob_score=False, random_state=None,
verbose=0, warm_start=False)),
('Japon',RandomForestRegressor(bootstrap=False, criterion='mse', max_depth=10,
max_features=30, max_leaf_nodes=None, min_samples_leaf=1,
min_samples_split=2, min_weight_fraction_leaf=0.0,
n_estimators=30, n_jobs=1, oob_score=False, random_state=None,
verbose=0, warm_start=False)),
('Manager',RandomForestRegressor(bootstrap=True, criterion='mse', max_depth=10,
max_features=30, max_leaf_nodes=None, min_samples_leaf=1,
min_samples_split=2, min_weight_fraction_leaf=0.0,
n_estimators=30, n_jobs=1, oob_score=False, random_state=None,
verbose=0, warm_start=False)),
('Mécanicien',RandomForestRegressor(bootstrap=True, criterion='mse', max_depth=20,
max_features='auto', max_leaf_nodes=None, min_samples_leaf=1,
min_samples_split=2, min_weight_fraction_leaf=0.0,
n_estimators=100, n_jobs=1, oob_score=False, random_state=None,
verbose=0, warm_start=False)),
('Médical',RandomForestRegressor(bootstrap=True, criterion='mse', max_depth=30,
max_features='auto', max_leaf_nodes=None, min_samples_leaf=1,
min_samples_split=2, min_weight_fraction_leaf=0.0,
n_estimators=100, n_jobs=1, oob_score=False, random_state=None,
verbose=0, warm_start=False)),
('Nuit', RandomForestRegressor(bootstrap=True, criterion='mse', max_depth=20,
max_features='auto', max_leaf_nodes=None, min_samples_leaf=1,
min_samples_split=2, min_weight_fraction_leaf=0.0,
n_estimators=100, n_jobs=1, oob_score=False, random_state=None,
verbose=0, warm_start=False)),
('Prestataires',RandomForestRegressor(bootstrap=True, criterion='mse', max_depth=20,
max_features='auto', max_leaf_nodes=None, min_samples_leaf=1,
min_samples_split=2, min_weight_fraction_leaf=0.0,
n_estimators=100, n_jobs=1, oob_score=False, random_state=None,
verbose=0, warm_start=False)),
('RENAULT',RandomForestRegressor(bootstrap=True, criterion='mse', max_depth=80,
max_features='auto', max_leaf_nodes=None, min_samples_leaf=1,
min_samples_split=2, min_weight_fraction_leaf=0.0,
n_estimators=100, n_jobs=1, oob_score=False, random_state=None,
verbose=0, warm_start=False)),
('RTC',RandomForestRegressor(bootstrap=True, criterion='mse', max_depth=20,
max_features='auto', max_leaf_nodes=None, min_samples_leaf=1,
min_samples_split=2, min_weight_fraction_leaf=0.0,
n_estimators=100, n_jobs=1, oob_score=False, random_state=None,
verbose=0, warm_start=False)),
('Regulation Medicale',linear_model.LinearRegression()),
('SAP',RandomForestRegressor(bootstrap=False, criterion='mse', max_depth=20,
max_features=30, max_leaf_nodes=None, min_samples_leaf=1,
min_samples_split=2, min_weight_fraction_leaf=0.0,
n_estimators=30, n_jobs=1, oob_score=False, random_state=None,
verbose=0, warm_start=False)),
('Services',RandomForestRegressor(bootstrap=False, criterion='mse', max_depth=30,
max_features=30, max_leaf_nodes=None, min_samples_leaf=1,
min_samples_split=2, min_weight_fraction_leaf=0.0,
n_estimators=30, n_jobs=1, oob_score=False, random_state=None,
verbose=0, warm_start=False)),
('Tech. Axa',RandomForestRegressor(bootstrap=True, criterion='mse', max_depth=20,
max_features='auto', max_leaf_nodes=None, min_samples_leaf=1,
min_samples_split=2, min_weight_fraction_leaf=0.0,
n_estimators=100, n_jobs=1, oob_score=False, random_state=None,
verbose=0, warm_start=False)),
('Tech. Inter',RandomForestRegressor(bootstrap=False, criterion='mse', max_depth=30,
max_features=30, max_leaf_nodes=None, min_samples_leaf=1,
min_samples_split=2, min_weight_fraction_leaf=0.0,
n_estimators=30, n_jobs=1, oob_score=False, random_state=None,
verbose=0, warm_start=False)),
('Tech. Total',RandomForestRegressor(bootstrap=True, criterion='mse', max_depth=70,
max_features='auto', max_leaf_nodes=None, min_samples_leaf=1,
min_samples_split=2, min_weight_fraction_leaf=0.0,
n_estimators=100, n_jobs=1, oob_score=False, random_state=None,
verbose=0, warm_start=False)),
('Téléphonie',RandomForestRegressor(n_estimators=40, bootstrap=False, max_depth=1, max_features=12))] |
LaRiffle/axa_challenge | fonction_py/robin.py | from fonction_py.tools import *
from sklearn import linear_model
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn import cross_validation
from sklearn.linear_model import LogisticRegression
from sklearn import tree
from sklearn import svm
from sklearn import decomposition
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.grid_search import GridSearchCV
from sklearn.grid_search import RandomizedSearchCV
from scipy.stats import uniform as sp_randint
from sklearn import datasets
from sklearn.linear_model import Ridge
def faire(xTrain,yTrain,xTest):
le=[]
sc = []
data=pd.read_csv("data/trainPure.csv", sep=";", usecols=fields) # LECTURE du fichier de train,
x,y=preprocess(data, "CAT")
for i in range(100):
xTrain, xTest, yTrain, yTest = faireSplitting(x, y, 0.8)
model = RandomForestRegressor(bootstrap=False, criterion='mse', max_depth=5,
max_features=30, max_leaf_nodes=None, min_samples_leaf=1,
min_samples_split=2, min_weight_fraction_leaf=0.0,
n_estimators=10, n_jobs=1, oob_score=False, random_state=None,
verbose=0, warm_start=False)
model.fit(xTrain, yTrain)
sc.append(model.score(xTrain, yTrain))
pred = model.predict(xTest)
pred[pred>max(yTrain)*1.05]=max(yTrain)*1.05
pred[pred<0]=0
pred=np.round(pred).astype(int)
le.append(LinExp(pred, yTest))
mean(sc)
mean(le)
plt.hist(le)
plt.hist(sc)
return np.round(pred).astype(int)
def opt(x,y):
xTrain, xTest, yTrain, yTest = faireSplitting(x, y, 0.8) # rajoute les features
clf = RandomForestRegressor(n_estimators=20)
m = np.random.normal(xTrain.shape[1]/2, 5, 20).astype(int)
m[m<4]=4
m[m>xTrain.shape[1]]= xTrain.shape[1]
param_dist = {"max_depth": [100,90, 60, 50, 10, None],
"max_features":list(m) ,
# "min_samples_split": sp_randint(1, 11),
# "min_samples_leaf": sp_randint(1, 11),
"bootstrap": [True, False]
}
rsearch = RandomizedSearchCV(estimator=clf, param_distributions=param_dist, n_iter=20)
rsearch.fit(xTrain, yTrain)
# summarize the results of the random parameter search
print(rsearch.best_estimator_)
model =rsearch.best_estimator_
model.fit(xTrain, yTrain)
pred = model.predict(xTest)
pred[pred>max(yTrain)*1.05]=max(yTrain)*1.05
pred[pred<0]=0
pred =np.round(pred)
return [rsearch.best_estimator_, LinExp(pred, yTest)]
def robin(x, y):
xTrain, xTest, yTrain, yTest = faireSplitting(x, y, 0.8) # rajoute les features
#del x
#del y
listModel = []
nest = [10,20,30]
mfea = [30,70,100]
mdep = [3,5,8,10]
for i in nest:
for j in mfea:
for k in mdep:
listModel.append(RandomForestRegressor(n_estimators=i, bootstrap=False, max_depth=k, max_features=j))
# GradientBoostingClassifier(n_estimators=100, learning_rate=1.0, max_depth=10, random_state=0), svm.SVC()
res =[]
start_time = time.time()
i=0
for model in listModel:
i=i+1
#start_time = time.time()
print(i)
model.fit(xTrain, yTrain)
#model.score(xTrain, yTrain)
pred = model.predict(xTest)
pred[pred>max(yTrain)*1.05]=max(yTrain)*1.05
pred[pred<0]=0
pred =np.round(pred)
res.append(LinExp(pred, yTest))
print("--- %s seconds ---" % str((time.time() - start_time)))
return res
def robinTel(x,):
res = []
for i in range(10):
xTrain, xTest, yTrain, yTest = faireSplitting(x, y, 0.8) # rajoute les features
#del x
#del y
model =RandomForestRegressor(n_estimators=40, bootstrap=False, max_depth=1, max_features=12)
model.fit(xTrain, yTrain)
#model.score(xTrain, yTrain)
pred = model.predict(xTest)
pred[pred>max(yTrain)*1.05]=max(yTrain)*1.05
pred[pred<0]=0
pred =np.round(pred)
res.append(LinExp(pred, yTest))
LinExp(pred, yTest)
mean(res)
bins = range(-300, 300, 600)
plt.hist(pred-yTest)
plt.hist(res)
print("--- %s seconds ---" % str((time.time() - start_time)))
return res
# print("PCA")
# pos = [1,3,5,10,20,30,40,50, 60,62, 65, 70, 75,78, 80, 90]
# resAcc = []
# resLin = []
# for i in pos:
# pca = decomposition.PCA(n_components=i)
# pca.fit(xTrain)
# PCAxTrain = pca.transform(xTrain)
# model = linear_model.LinearRegression()
# model.fit(PCAxTrain, yTrain)
# model.score(PCAxTrain, yTrain)
# pred = model.predict(pca.transform(xTest))
# pred =np.floor(np.round(pred))
# resAcc.append(Accuracy(pred, yTest.values))
# resLin.append(LinExp(pred, yTest.values))
# print(resAcc)
# print(resLin)
# plt.plot(pos, resLin)
# plt.show()
# plt.plot(pos, resAcc)
# plt.show()
#######################################################################
#best accuracy en % :
#52.5071805097
#linEx :
#23987.9825805
## del x['TPER_TEAM']
# x['YEAR'] = x['DATE'].str[0:4]
# x['MONTH'] = x['DATE'].str[5:7]
# # x['DAY'] = x['DATE'].str[8:10]
# #x['HOUR'] = x['DATE'].str[-12:-10].astype(int)
# x['HOUR'] = x['HOUR']+ ':'+((x['DATE'].str[-9:-8].astype(int)==3)*0.5).astype(str)
# x['HOUR'] = x['DATE'].str[-12:-8]
# del x['DATE']
#
# x=pd.get_dummies(x) |
LaRiffle/axa_challenge | fonction_py/train.py | from fonction_py.tools import *
from fonction_py.preprocess import *
from sklearn import linear_model
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn import cross_validation
from sklearn.linear_model import LogisticRegression
from sklearn import tree
from sklearn import svm
from sklearn import decomposition
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.grid_search import GridSearchCV
from sklearn.grid_search import RandomizedSearchCV
from scipy.stats import uniform as sp_randint
from sklearn import datasets
from sklearn.linear_model import Ridge
from fonction_py.tim import *
import time
def faireTout():
fields = ['DATE', 'DAY_OFF', 'WEEK_END', 'DAY_WE_DS', 'ASS_ASSIGNMENT', 'CSPL_RECEIVED_CALLS' ] # selectionne les colonnes à lire
c = pd.DataFrame()
<<<<<<< HEAD
listmodel = faireListModel()#recupere le nom et les modeles de chaque truc
data=pd.read_csv("data/trainPure.csv", sep=";", usecols=fields) # LECTURE du fichier de train,
resultat = pd.read_csv("data/submission.txt", sep="\t") # LECTURE dufichier de test
res=[]
model = listmodel[0]
for model in listmodel:
print(model[0]) #affiche le ass assignment
(xTest, x, souvenir, y)=preprocessTOTAL(model[0]) # ajuste le nombre et le nom de feature pour que xTest et x aient les memes
mod= GradientBoostingRegressor(loss='huber', alpha=0.9,n_estimators=100, max_depth=3,learning_rate=.1, min_samples_leaf=9,min_samples_split=9)
mod.fit(x, y) #s'entraine
pred = mod.predict(xTest) # predit
pred[pred>max(y)*1.05]=max(y)*1.05 # pour pas predire trop grand
pred[pred<0]=0 # pas de negatif
pred =np.round(pred).astype(int) # to int
souvenir['prediction']=pred # on l'ajoute a souvenir qui garde le format standard et la date pour qu'on remette tout a la bonne place a la fin
resultat=pd.merge(resultat, souvenir, how='left',on=['DATE', 'ASS_ASSIGNMENT']) # on remet chaque prediction à la bonne ligne -> il cree prediction_x et prediction_y car l'ancienne prediction et la nouvelle colonne de prediction
resultat=resultat.fillna(0) # on remplit les endroits ou on a pas predit avec des 0
resultat['prediction'] = resultat['prediction_x']+resultat['prediction_y'] # merge les deux colonnes
del resultat['prediction_x']
del resultat['prediction_y']
=======
listmodel = faireListModel()
#'Evenements', 'Gestion Amex'
#setFields = set(pd.read_csv("data/fields.txt", sep=";")['0'].values)
# resultat = pd.read_csv("data/submission.txt", sep="\t")
i=0
# res = []
start_time = time.time()
model = listmodel[24]
data=pd.read_csv("data/trainPure.csv", sep=";", usecols=fields) # LECTURE
resultat = pd.read_csv("data/submission.txt", sep="\t") # LECTURE
res=[]
for model in listmodel:
i = i+1
print(model[0])
x,y = preprocess(data.copy(), model[0]) # rajoute les features
model[1].fit(x, y)
#model.score(xTrain, yTrain)
(xTest, souvenir)=preprocessFINAL(x,model[0])
pred = model[1].predict(xTest)
pred[pred>max(y)*1.05]=max(y)*1.05
pred[pred<0]=0
pred =np.round(pred)
souvenir['prediction']=int(pred)
resultat=pd.merge(resultat, souvenir, how='left',on=['DATE', 'ASS_ASSIGNMENT'])
resultat=resultat.fillna(0)
resultat['prediction'] = resultat['prediction_x']+resultat['prediction_y']
del resultat['prediction_x']
del resultat['prediction_y']
x,y = preprocess(data.copy(), 'Téléphonie') # rajoute les features
#model.score(xTrain, yTrain)
(xTest, souvenir)=preprocessFINAL(x,'Téléphonie')
pred=telephoniePred(x,y,xTest)
pred[pred>max(y)*1.05]=max(y)*1.05
pred[pred<0]=0
pred =np.round(pred)
souvenir['prediction']=int(pred)
resultat=pd.merge(resultat, souvenir, how='left',on=['DATE', 'ASS_ASSIGNMENT'])
resultat=resultat.fillna(0)
resultat['prediction'] = resultat['prediction_x']+resultat['prediction_y']
del resultat['prediction_x']
del resultat['prediction_y']
<<<<<<< HEAD
pd.DataFrame(res).to_csv("reslist.csv", sep=";", decimal=",")
resultat.to_csv("vraipred.txt", sep="\t", index =False)
=======
>>>>>>> origin/master
resultat['prediction']=resultat['prediction'].astype(int)
resultat.to_csv("pouranalyse.txt", sep="\t", index =False, encoding='utf-8')
>>>>>>> origin/master
return resultat
def faireListModel():
return [('CAT', linear_model.LinearRegression()),
('CMS', RandomForestRegressor(bootstrap=False, criterion='mse', max_depth=5,
max_features=30, max_leaf_nodes=None, min_samples_leaf=1,
min_samples_split=2, min_weight_fraction_leaf=0.0,
n_estimators=10, n_jobs=1, oob_score=False, random_state=None,
verbose=0, warm_start=False)),
('Crises',linear_model.LinearRegression()),
('Domicile', RandomForestRegressor(bootstrap=True, criterion='mse', max_depth=30,
max_features=30, max_leaf_nodes=None, min_samples_leaf=1,
min_samples_split=2, min_weight_fraction_leaf=0.0,
n_estimators=90, n_jobs=1, oob_score=False, random_state=None,
verbose=0, warm_start=False)),
('Gestion',RandomForestRegressor(bootstrap=True, criterion='mse', max_depth=30,
max_features='auto', max_leaf_nodes=None, min_samples_leaf=1,
min_samples_split=2, min_weight_fraction_leaf=0.0,
n_estimators=100, n_jobs=1, oob_score=False, random_state=None,
verbose=0, warm_start=False)),
('Gestion - Accueil Telephonique',RandomForestRegressor(bootstrap=True, criterion='mse', max_depth=20,
max_features=30, max_leaf_nodes=None, min_samples_leaf=1,
min_samples_split=2, min_weight_fraction_leaf=0.0,
n_estimators=70, n_jobs=1, oob_score=False, random_state=None,
verbose=0, warm_start=False)),
('Gestion Assurances',RandomForestRegressor(bootstrap=False, criterion='mse', max_depth=20,
max_features=30, max_leaf_nodes=None, min_samples_leaf=1,
min_samples_split=2, min_weight_fraction_leaf=0.0,
n_estimators=20, n_jobs=1, oob_score=False, random_state=None,
verbose=0, warm_start=False)),
('Gestion Clients', RandomForestRegressor(bootstrap=True, criterion='mse', max_depth=10,
max_features=90, max_leaf_nodes=None, min_samples_leaf=1,
min_samples_split=2, min_weight_fraction_leaf=0.0,
n_estimators=50, n_jobs=1, oob_score=False, random_state=None,
verbose=0, warm_start=False)),
('Gestion DZ', RandomForestRegressor(bootstrap=True, criterion='mse', max_depth=5,
max_features=30, max_leaf_nodes=None, min_samples_leaf=1,
min_samples_split=2, min_weight_fraction_leaf=0.0,
n_estimators=30, n_jobs=1, oob_score=False, random_state=None,
verbose=0, warm_start=False)),
('Gestion Relation Clienteles',RandomForestRegressor(bootstrap=True, criterion='mse', max_depth=10,
max_features=90, max_leaf_nodes=None, min_samples_leaf=1,
min_samples_split=2, min_weight_fraction_leaf=0.0,
n_estimators=110, n_jobs=1, oob_score=False, random_state=None,
verbose=0, warm_start=False)),
('Gestion Renault', RandomForestRegressor(bootstrap=True, criterion='mse', max_depth=30,
max_features=50, max_leaf_nodes=None, min_samples_leaf=1,
min_samples_split=2, min_weight_fraction_leaf=0.0,
n_estimators=30, n_jobs=1, oob_score=False, random_state=None,
verbose=0, warm_start=False)),
('Japon',RandomForestRegressor(bootstrap=False, criterion='mse', max_depth=10,
max_features=30, max_leaf_nodes=None, min_samples_leaf=1,
min_samples_split=2, min_weight_fraction_leaf=0.0,
n_estimators=30, n_jobs=1, oob_score=False, random_state=None,
verbose=0, warm_start=False)),
('Manager',RandomForestRegressor(bootstrap=True, criterion='mse', max_depth=10,
max_features=30, max_leaf_nodes=None, min_samples_leaf=1,
min_samples_split=2, min_weight_fraction_leaf=0.0,
n_estimators=30, n_jobs=1, oob_score=False, random_state=None,
verbose=0, warm_start=False)),
('Mécanicien',RandomForestRegressor(bootstrap=True, criterion='mse', max_depth=20,
max_features='auto', max_leaf_nodes=None, min_samples_leaf=1,
min_samples_split=2, min_weight_fraction_leaf=0.0,
n_estimators=100, n_jobs=1, oob_score=False, random_state=None,
verbose=0, warm_start=False)),
('Médical',RandomForestRegressor(bootstrap=True, criterion='mse', max_depth=30,
max_features='auto', max_leaf_nodes=None, min_samples_leaf=1,
min_samples_split=2, min_weight_fraction_leaf=0.0,
n_estimators=100, n_jobs=1, oob_score=False, random_state=None,
verbose=0, warm_start=False)),
('Nuit', RandomForestRegressor(bootstrap=True, criterion='mse', max_depth=20,
max_features='auto', max_leaf_nodes=None, min_samples_leaf=1,
min_samples_split=2, min_weight_fraction_leaf=0.0,
n_estimators=100, n_jobs=1, oob_score=False, random_state=None,
verbose=0, warm_start=False)),
('Prestataires',RandomForestRegressor(bootstrap=True, criterion='mse', max_depth=20,
max_features='auto', max_leaf_nodes=None, min_samples_leaf=1,
min_samples_split=2, min_weight_fraction_leaf=0.0,
n_estimators=100, n_jobs=1, oob_score=False, random_state=None,
verbose=0, warm_start=False)),
('RENAULT',RandomForestRegressor(bootstrap=True, criterion='mse', max_depth=80,
max_features='auto', max_leaf_nodes=None, min_samples_leaf=1,
min_samples_split=2, min_weight_fraction_leaf=0.0,
n_estimators=100, n_jobs=1, oob_score=False, random_state=None,
verbose=0, warm_start=False)),
('RTC',RandomForestRegressor(bootstrap=True, criterion='mse', max_depth=20,
max_features='auto', max_leaf_nodes=None, min_samples_leaf=1,
min_samples_split=2, min_weight_fraction_leaf=0.0,
n_estimators=100, n_jobs=1, oob_score=False, random_state=None,
verbose=0, warm_start=False)),
('Regulation Medicale',linear_model.LinearRegression()),
('SAP',RandomForestRegressor(bootstrap=False, criterion='mse', max_depth=20,
max_features=30, max_leaf_nodes=None, min_samples_leaf=1,
min_samples_split=2, min_weight_fraction_leaf=0.0,
n_estimators=30, n_jobs=1, oob_score=False, random_state=None,
verbose=0, warm_start=False)),
('Services',RandomForestRegressor(bootstrap=False, criterion='mse', max_depth=30,
max_features=30, max_leaf_nodes=None, min_samples_leaf=1,
min_samples_split=2, min_weight_fraction_leaf=0.0,
n_estimators=30, n_jobs=1, oob_score=False, random_state=None,
verbose=0, warm_start=False)),
('Tech. Axa',RandomForestRegressor(bootstrap=True, criterion='mse', max_depth=20,
max_features='auto', max_leaf_nodes=None, min_samples_leaf=1,
min_samples_split=2, min_weight_fraction_leaf=0.0,
n_estimators=100, n_jobs=1, oob_score=False, random_state=None,
verbose=0, warm_start=False)),
('Tech. Inter',RandomForestRegressor(bootstrap=False, criterion='mse', max_depth=30,
max_features=30, max_leaf_nodes=None, min_samples_leaf=1,
min_samples_split=2, min_weight_fraction_leaf=0.0,
n_estimators=30, n_jobs=1, oob_score=False, random_state=None,
verbose=0, warm_start=False)),
('Tech. Total',RandomForestRegressor(bootstrap=True, criterion='mse', max_depth=70,
max_features='auto', max_leaf_nodes=None, min_samples_leaf=1,
min_samples_split=2, min_weight_fraction_leaf=0.0,
n_estimators=100, n_jobs=1, oob_score=False, random_state=None,
verbose=0, warm_start=False)),
('Téléphonie',GradientBoostingRegressor(loss='huber', alpha=0.9,n_estimators=100, max_depth=3,learning_rate=.1, min_samples_leaf=9,min_samples_split=9) )] |
LaRiffle/axa_challenge | fonction_py/preprocess.py | from numpy import *
import pandas as pd
import datetime
from datetime import timedelta
def sum_duplicated():
fields = ['DATE', 'DAY_OFF', 'WEEK_END', 'DAY_WE_DS', 'ASS_ASSIGNMENT', 'CSPL_RECEIVED_CALLS' ] # selectionne les colonnes à lire
x=pd.read_csv("data/train_2011_2012_2013.csv", sep=";", usecols=fields) # LECTURE
pd.DataFrame(x.groupby(('ASS_ASSIGNMENT', 'DATE', 'WEEK_END', 'DAY_WE_DS'), squeeze =False).sum()).to_csv("data/trainPure.csv", sep=';', encoding='utf_8')
def preprocessTOTAL(selectAss):
fields = ['DATE', 'DAY_OFF', 'WEEK_END', 'DAY_WE_DS', 'ASS_ASSIGNMENT', 'CSPL_RECEIVED_CALLS' ] # selectionne les colonnes à lire
x=pd.read_csv("data/trainPure.csv", sep=";", usecols=fields) # LECTURE du fichier de train,
#################################################" Pour X
if(selectAss != False):#selection
x = x[x['ASS_ASSIGNMENT'] == selectAss]
del x['ASS_ASSIGNMENT']
x['YEAR'] = x['DATE'].str[0:4]
x['MONTH'] = x['DATE'].str[5:7]
x['DAY'] = x['DATE'].str[8:10]
x['HOUR'] = x['DATE'].str[-12:-8]
x['DATE'] = pd.to_datetime(x['DAY']+'/'+x['MONTH']+'/'+x['YEAR'])
##############pour avoir le call de 7jours avant en 's7'
tmp = pd.DataFrame()
tmp['HOUR'] = x['HOUR']
tmp['DATE'] = x['DATE']- timedelta(days=7)
#tmp.join(x[['DATE','HOUR', 'CSPL_RECEIVED_CALLS' ]], on=['DATE','HOUR'], how='left')
tmp[['DATE','HOUR', 's7' ]]=pd.merge(tmp[['DATE','HOUR']],x[['DATE','HOUR', 'CSPL_RECEIVED_CALLS' ]], on=['HOUR', 'DATE'], how='left')
x=pd.concat([x, tmp['s7']], axis=1)
x['s7'][pd.isnull(x['s7'])]=x['CSPL_RECEIVED_CALLS'][pd.isnull(x['s7'])]
file = ['joursFeries', 'vacances']
for f in file:
jf =pd.read_csv("data/"+f+".csv", sep=";")
for n in list(jf):
x[n]= x['DATE'].apply(lambda x: x.strftime('%d/%m/%Y')).isin(jf[n])
#######################################################pour xTest
xTest=pd.read_csv("data/submission.txt", sep="\t") # LECTURE
del xTest['prediction']
souvenir = xTest.copy()
if(selectAss != False):
xTest = xTest[xTest['ASS_ASSIGNMENT'] == selectAss]
souvenir = souvenir[souvenir['ASS_ASSIGNMENT'] == selectAss]
del xTest['ASS_ASSIGNMENT']
xTest['YEAR'] = xTest['DATE'].str[0:4]
xTest['MONTH'] = xTest['DATE'].str[5:7]
xTest['DAY'] = xTest['DATE'].str[8:10]
xTest['HOUR'] = xTest['DATE'].str[-12:-8]
xTest['DATE'] = pd.to_datetime(xTest['DAY']+'/'+xTest['MONTH']+'/'+xTest['YEAR'])
tmp = pd.DataFrame()
tmp['HOUR'] = xTest['HOUR']
tmp['DATE'] = xTest['DATE']- timedelta(days=7)
#tmp.join(x[['DATE','HOUR', 'CSPL_RECEIVED_CALLS' ]], on=['DATE','HOUR'], how='left')
tmp=pd.merge(tmp,x[['DATE','HOUR', 'CSPL_RECEIVED_CALLS' ]], on=['HOUR', 'DATE'], how='left')
tmp=tmp.rename(columns = {'CSPL_RECEIVED_CALLS':'s7'})
xTest['s7']=tmp['s7'].values
xTest['tmp']=xTest['DATE'].dt.dayofweek # recupere le numero du jour de la semaine
jour = pd.DataFrame(['Lundi', 'Mardi', 'Mercredi', 'Jeudi', 'Vendredi', 'Samedi', 'Dimanche'])
jour.columns = ['DAY_WE_DS']
jour['tmp']=[0,1,2,3,4,5,6]
xTest=pd.merge(xTest, jour) # attribue le nom du jour a chaque ligne
xTest['WEEK_END'] = xTest['DAY_WE_DS'].isin(['Samedi', 'Dimanche']) # rajoute si c'est un week end
file = ['joursFeries', 'vacances']
for f in file:
jf =pd.read_csv("data/"+f+".csv", sep=";")
for n in list(jf):
xTest[n]= xTest['DATE'].apply(lambda x: x.strftime('%d/%m/%Y')).isin(jf[n])
y = x['CSPL_RECEIVED_CALLS']
del x['CSPL_RECEIVED_CALLS']
del x['DATE']
x=pd.get_dummies(x)
del xTest['DATE']
xTest=pd.get_dummies(xTest) # cree des colonnes pour chaque feature categoriel
s=set(list(x))
ss=set(list(xTest))
for tmp in s.difference(ss): # supprime les features qui ne sont que dans x
del x[tmp]
for tmp in ss.difference(s): # supprime les features qui ne sont que dans xTest
del xTest[tmp]
xTest = xTest[list(x)] # reordonne les features pour qu'ils sont dans le meme ordre pour x et xTest
return(xTest.fillna(0), x, souvenir, y) |
LaRiffle/axa_challenge | fonction_py/test arima.py | from fonction_py.tools import *
from sklearn import linear_model
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn import cross_validation
from sklearn.linear_model import LogisticRegression
from sklearn import tree
from sklearn import svm
from sklearn import decomposition
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.grid_search import GridSearchCV
from sklearn.grid_search import RandomizedSearchCV
from scipy.stats import uniform as sp_randint
from sklearn import datasets
from sklearn.linear_model import Ridge
from statsmodels.tsa.stattools import adfuller
from statsmodels.tsa.stattools import acf, pacf
from statsmodels.tsa.arima_model import ARIMA
fields = ['DATE', 'DAY_OFF', 'WEEK_END', 'DAY_WE_DS', 'ASS_ASSIGNMENT', 'CSPL_RECEIVED_CALLS' ] # selectionne les colonnes à lire
c = pd.DataFrame()
fields = ['DATE', 'ASS_ASSIGNMENT', 'CSPL_RECEIVED_CALLS']
t=pd.read_csv("data/trainPure.csv", sep=";", usecols=fields)
resultat = pd.read_csv("data/submission.txt", sep="\t") # LECTURE dufichier de test
t['DATE']=pd.to_datetime(t['DATE'])
t=t[t['ASS_ASSIGNMENT']=='CAT']
ts=pd.Series(t['CSPL_RECEIVED_CALLS'].values, index=t['DATE'])
t=pd.DataFrame({'call':t['CSPL_RECEIVED_CALLS'].values}, index=t['DATE'])
lag_acf = acf(ts, nlags=20)
lag_pacf = pacf(ts, nlags=20, method='ols')
plt.subplot(121)
plt.plot(lag_acf)
plt.axhline(y=0,linestyle='--',color='gray')
plt.axhline(y=-1.96/np.sqrt(len(ts_log_diff)),linestyle='--',color='gray')
plt.axhline(y=1.96/np.sqrt(len(ts_log_diff)),linestyle='--',color='gray')
plt.title('Autocorrelation Function')
plt.subplot(122)
plt.plot(lag_pacf)
plt.axhline(y=0,linestyle='--',color='gray')
plt.axhline(y=-1.96/np.sqrt(len(ts_log_diff)),linestyle='--',color='gray')
plt.axhline(y=1.96/np.sqrt(len(ts_log_diff)),linestyle='--',color='gray')
plt.title('Partial Autocorrelation Function')
plt.tight_layout()
model = ARIMA(ts_log, order=(18, 1, 0))
results_AR = model.fit(disp=-1)
plt.plot(ts_log_diff)
plt.plot(results_AR.fittedvalues, color='red')
plt.title('RSS: %.4f'% sum((results_AR.fittedvalues-ts_log_diff)**2))
def test_stationarity(timeseries):
#Determing rolling statistics
rolmean = pd.rolling_mean(timeseries, window=100)
rolstd = pd.rolling_std(timeseries, window=100)
#Plot rolling statistics:
orig = plt.plot(timeseries, color='blue',label='Original')
mean = plt.plot(rolmean, color='red', label='Rolling Mean')
std = plt.plot(rolstd, color='black', label = 'Rolling Std')
plt.legend(loc='best')
plt.title('Rolling Mean & Standard Deviation')
plt.show(block=False)
#Perform Dickey-Fuller test:
print('Results of Dickey-Fuller Test:')
dftest = adfuller(timeseries, autolag='AIC')
dfoutput = pd.Series(dftest[0:4], index=['Test Statistic','p-value','#Lags Used','Number of Observations Used'])
for key,value in dftest[4].items():
dfoutput['Critical Value (%s)'%key] = value
print(dfoutput)
|
LaRiffle/axa_challenge | fonction_py/tools.py | <filename>fonction_py/tools.py
from numpy import *
import math
import pandas
def poly_exp(X, degree):
N,D = X.shape
for d in range(2,degree+1):
X = column_stack([X,X[:,0:D]**d])
return X
def MSE(yt,yp):
print("NE PAS UTILISER MSE !! utiliser LinExp !!!")
def normalize(df):
return (df - df.mean()) / (df.max() - df.min())
def faireSplitting(x, y, taille): # return xTrain, xTest, yTrain, yTest
ln = (random.rand(x.shape[0]) < taille)
return x[ln], x[~ln], y[ln], y[~ln];
def check(yEmpirique, yTest): # A UTILISER AVEC LES DATA FRAME DE PANDAS
alpha=0.1
if(yTest.shape[0] != yEmpirique.shape[0]):
print("Erreur sur la taille de la prédiction")
return 0
print("accuracy en % :")
print(sum(yEmpirique==yTest)*100/yEmpirique.shape[0]) # pourcentage de bonne prediction
linex = 0
diff = (yTest-yEmpirique).values
for i in range(len(diff)):
linex = linex + math.exp(alpha * diff[i]) - alpha*diff[i]-1
print("linEx :")
print(linex/yTest.shape[0])
def LinExp(yEmpirique, yTest):#Retourne l'erreur moyenne #UTILISER AVEC DES VECTEURS : POUR CONVERTIR DATA FRAME TO VECTOR DataFrame.values
alpha = 0.1
<<<<<<< HEAD
=======
coeff=linspace(1,3,len(yEmpirique))
>>>>>>> origin/master
linex = 0
diff = (yTest - yEmpirique).values
for i in range(len(diff)):
linex = linex + coeff[i]*( math.exp(alpha * diff[i]) - alpha * diff[i] - 1)
return linex/yTest.shape[0]
def MatLinExp(yEmpirique, yTest): #retourne la matrice d'erreur#UTILISER AVEC DES VECTEURS : POUR CONVERTIR DATA FRAME TO VECTOR DataFrame.values
alpha = 0.1
linex = []
diff = (yTest - yEmpirique)
for i in range(len(diff)):
linex.append(math.exp(alpha * diff[i]) - alpha * diff[i] - 1)
return linex
def Accuracy(yEmpirique, yTest):
return sum(yEmpirique==yTest)*100/yEmpirique.shape[0] |
pokepocky/smop | smop/version.py | <gh_stars>100-1000
__version__='0.41-beta'
|
bjacquin/deezer-python | tests/test_tornado_client.py | <reponame>bjacquin/deezer-python<filename>tests/test_tornado_client.py
import pytest
from tornado.httpclient import AsyncHTTPClient
from deezer import Album
from deezer.contrib.tornado import AsyncClient
pytestmark = pytest.mark.vcr
class TestAsyncClient:
@pytest.fixture()
def async_client(self):
return AsyncClient()
@pytest.mark.gen_test
def test_access_token_set(self, async_client, mocker):
client_fetch = mocker.spy(AsyncHTTPClient, "fetch")
async_client.access_token = "dummy"
yield async_client.request("GET", "user/me")
client_fetch.assert_called_with(
mocker.ANY,
"https://api.deezer.com/user/me?access_token=<PASSWORD>",
method="GET",
)
@pytest.mark.gen_test
def test_get_object(self, async_client):
album = yield async_client.get_album(302127)
assert isinstance(album, Album)
@pytest.mark.gen_test
def test_get_relation(self, async_client):
album = Album(async_client, {"id": 302127, "type": "album"})
tracks = yield album.get_tracks()
assert tracks[0].album is album
|
bjacquin/deezer-python | deezer/contrib/tornado.py | """
Implements an async tornado client class to query the
`Deezer API <http://developers.deezer.com/api>`_
"""
import json
from typing import Optional, Type
from urllib.parse import urlencode
from tornado.gen import Return, coroutine
from tornado.httpclient import AsyncHTTPClient
from deezer import Resource
from deezer.client import Client
class AsyncClient(Client):
"""
An async client to retrieve some basic infos about Deezer resources.
Create a client instance with the provided options. Options should
be passed in to the constructor as kwargs.
>>> from deezer.contrib.tornado import AsyncClient
>>> client = AsyncClient(app_id='foo', app_secret='bar')
This client provides several method to retrieve the content of most
sort of Deezer objects, based on their json structure.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
max_clients = kwargs.get("max_clients", 2)
self._async_client = AsyncHTTPClient(max_clients=max_clients)
@coroutine
def request(
self,
method: str,
path: str,
parent: Optional[Resource] = None,
resource_type: Optional[Type[Resource]] = None,
**params,
):
"""
Make a request to the API and parse the response.
:param method: HTTP verb to use: GET, POST< DELETE, ...
:param path: The path to make the API call to (e.g. 'artist/1234').
:param parent: A reference to the parent resource, to avoid fetching again.
:param resource_type: The resource class to use as top level.
:param params: Query parameters to add the the request
"""
if self.access_token is not None:
params["access_token"] = str(self.access_token)
url = f"{self.base_url}/{path}"
if params:
url = f"{url}?{urlencode(params)}"
response = yield self._async_client.fetch(url, method=method)
resp_str = response.body.decode("utf-8")
json_data = json.loads(resp_str)
result = self._process_json(
json_data,
parent=parent,
resource_type=resource_type,
)
raise Return(result)
|
bjacquin/deezer-python | conftest.py | <filename>conftest.py<gh_stars>100-1000
import pytest
from environs import Env
import deezer
env = Env()
env.read_env()
@pytest.fixture()
def client():
return deezer.Client( # nosec
app_id="foo",
app_secret="bar",
# This is to get human readable response output in VCR cassettes
headers={"Accept-Encoding": "identity"},
)
@pytest.fixture()
def client_token(client):
client.access_token = env("API_TOKEN", "dummy")
return client
def clean_response(response):
"""Remove a few info from the response before writing cassettes."""
remove_headers = {"Set-Cookie", "Date", "P3P"}
if isinstance(response["headers"], dict):
# Normal client stores headers as dict
for header_name in remove_headers:
response["headers"].pop(header_name, None)
elif isinstance(response["headers"], list):
# Tornado client stores headers as a list of 2-tuples
response["headers"] = [
(name, value)
for name, value in response["headers"]
if name not in remove_headers
]
return response
@pytest.fixture(scope="module", autouse=True)
def vcr_config():
return {
"filter_query_parameters": [("access_token", "dummy")],
"before_record_response": clean_response,
}
|
bjacquin/deezer-python | tests/test_exceptions.py | <reponame>bjacquin/deezer-python
import pytest
import requests
from deezer.exceptions import (
DeezerForbiddenError,
DeezerHTTPError,
DeezerNotFoundError,
DeezerRetryableHTTPError,
)
@pytest.mark.parametrize(
("status_code", "expected_exception"),
[
(403, DeezerForbiddenError),
(404, DeezerNotFoundError),
(418, DeezerHTTPError),
(502, DeezerRetryableHTTPError),
],
)
def test_deezer_http_error(status_code, expected_exception):
response = requests.Response()
response.status_code = status_code
http_error = requests.HTTPError(response=response)
exc = DeezerHTTPError.from_http_error(http_error)
assert isinstance(exc, expected_exception)
|
bjacquin/deezer-python | deezer/dates.py | import datetime as dt
from typing import Optional
DATE_FORMAT = "%Y-%m-%d"
DATETIME_FORMAT = "%Y-%m-%d %H:%M:%S"
def parse_date(date_str: str) -> Optional[dt.date]:
datetime = parse_datetime(date_str, DATE_FORMAT)
return datetime.date() if datetime else None
def parse_datetime(
datetime_str: str,
date_format: str = DATETIME_FORMAT,
) -> Optional[dt.datetime]:
if not datetime_str or datetime_str.startswith("0000-00-00"):
return None
return dt.datetime.strptime(datetime_str, date_format)
|
marchcui/pythUDS | udsoncan/services/ReadDTCInformation.py | from . import *
from udsoncan.Response import Response
from udsoncan.exceptions import *
import struct
class ReadDTCInformation(BaseService):
_sid = 0x19
supported_negative_response = [ Response.Code.SubFunctionNotSupported,
Response.Code.IncorrectMessageLegthOrInvalidFormat,
Response.Code.RequestOutOfRange
]
class Subfunction(BaseSubfunction):
__pretty_name__ = 'subfunction'
reportNumberOfDTCByStatusMask = 1
reportDTCByStatusMask = 2
reportDTCSnapshotIdentification = 3
reportDTCSnapshotRecordByDTCNumber = 4
reportDTCSnapshotRecordByRecordNumber = 5
reportDTCExtendedDataRecordByDTCNumber = 6
reportNumberOfDTCBySeverityMaskRecord = 7
reportDTCBySeverityMaskRecord = 8
reportSeverityInformationOfDTC = 9
reportSupportedDTCs = 0xA
reportFirstTestFailedDTC = 0xB
reportFirstConfirmedDTC = 0xC
reportMostRecentTestFailedDTC = 0xD
reportMostRecentConfirmedDTC = 0xE
reportMirrorMemoryDTCByStatusMask = 0xF
reportMirrorMemoryDTCExtendedDataRecordByDTCNumber = 0x10
reportNumberOfMirrorMemoryDTCByStatusMask = 0x11
reportNumberOfEmissionsRelatedOBDDTCByStatusMask = 0x12
reportEmissionsRelatedOBDDTCByStatusMask = 0x13
reportDTCFaultDetectionCounter = 0x14
reportDTCWithPermanentStatus = 0x15
@classmethod
def assert_severity_mask(cls, severity_mask, subfunction):
if severity_mask is None:
raise ValueError('severity_mask must be provided for subfunction 0x%02x' % subfunction)
ServiceHelper.validate_int(severity_mask, min=0, max=0xFF, name='Severity mask')
@classmethod
def assert_status_mask(cls, status_mask, subfunction):
if status_mask is None:
raise ValueError('status_mask must be provided for subfunction 0x%02x' % subfunction)
ServiceHelper.validate_int(status_mask, min=0, max=0xFF, name='Status mask')
@classmethod
def assert_dtc(cls, dtc, subfunction):
if dtc is None:
raise ValueError('A dtc value must be provided for subfunction 0x%02x' % subfunction)
ServiceHelper.validate_int(dtc, min=0, max=0xFFFFFF, name='DTC')
@classmethod
def assert_snapshot_record_number(cls, snapshot_record_number, subfunction):
if snapshot_record_number is None:
raise ValueError('snapshot_record_number must be provided for subfunction 0x%02x' % subfunction)
ServiceHelper.validate_int(snapshot_record_number, min=0, max=0xFF, name='Snapshot record number')
@classmethod
def assert_extended_data_record_number(cls, extended_data_record_number, subfunction):
if extended_data_record_number is None:
raise ValueError('extended_data_record_number must be provided for subfunction 0x%02x' % subfunction)
ServiceHelper.validate_int(extended_data_record_number, min=0, max=0xFF, name='Extended data record number')
@classmethod
def assert_extended_data_size(cls, extended_data_size, subfunction):
if extended_data_size is None:
raise ValueError('extended_data_size must be provided as length of data is not given by the server.')
ServiceHelper.validate_int(extended_data_size, min=0, max=0xFFF, name='Extended data size')
@classmethod
def pack_dtc(cls, dtcid):
return struct.pack('BBB', (dtcid >> 16) & 0xFF, (dtcid >> 8) & 0xFF, (dtcid >> 0) & 0xFF)
@classmethod
def make_request(cls, subfunction, status_mask=None, severity_mask=None, dtc=None, snapshot_record_number=None, extended_data_record_number=None):
"""
Generates a request for ReadDTCInformation.
Each subfunction uses a subset of parameters.
:param subfunction: The service subfunction. Values are defined in :class:`ReadDTCInformation.Subfunction<ReadDTCInformation.Subfunction>`
:type subfunction: int
:param status_mask: A DTC status mask used to filter DTC
:type status_mask: int or :ref:`Dtc.Status <DTC_Status>`
:param severity_mask: A severity mask used to filter DTC
:type severity_mask: int or :ref:`Dtc.Severity <DTC_Severity>`
:param dtc: A DTC mask used to filter DTC
:type dtc: int or :ref:`Dtc <DTC>`
:param snapshot_record_number: Snapshot record number
:type snapshot_record_number: int
:param extended_data_record_number: Extended data record number
:type extended_data_record_number: int
:raises ValueError: If parameters are out of range, missing or wrong type
"""
from udsoncan import Request, Dtc
# Request grouping for subfunctions that have the same request format
request_subfn_no_param = [
ReadDTCInformation.Subfunction.reportSupportedDTCs,
ReadDTCInformation.Subfunction.reportFirstTestFailedDTC,
ReadDTCInformation.Subfunction.reportFirstConfirmedDTC,
ReadDTCInformation.Subfunction.reportMostRecentTestFailedDTC,
ReadDTCInformation.Subfunction.reportMostRecentConfirmedDTC,
ReadDTCInformation.Subfunction.reportDTCFaultDetectionCounter,
ReadDTCInformation.Subfunction.reportDTCWithPermanentStatus,
# Documentation is confusing about reportDTCSnapshotIdentification subfunction.
# It is presented with reportDTCSnapshotRecordByDTCNumber (2 params) but a footnote says that these 2 parameters
# are not to be provided for reportDTCSnapshotIdentification. Therefore, it is the same as other no-params subfn
ReadDTCInformation.Subfunction.reportDTCSnapshotIdentification
]
request_subfn_status_mask = [
ReadDTCInformation.Subfunction.reportNumberOfDTCByStatusMask,
ReadDTCInformation.Subfunction.reportDTCByStatusMask,
ReadDTCInformation.Subfunction.reportMirrorMemoryDTCByStatusMask,
ReadDTCInformation.Subfunction.reportNumberOfMirrorMemoryDTCByStatusMask,
ReadDTCInformation.Subfunction.reportNumberOfEmissionsRelatedOBDDTCByStatusMask,
ReadDTCInformation.Subfunction.reportEmissionsRelatedOBDDTCByStatusMask
]
request_subfn_mask_record_plus_snapshot_record_number = [
ReadDTCInformation.Subfunction.reportDTCSnapshotRecordByDTCNumber
]
request_subfn_snapshot_record_number = [
ReadDTCInformation.Subfunction.reportDTCSnapshotRecordByRecordNumber
]
request_subfn_mask_record_plus_extdata_record_number = [
ReadDTCInformation.Subfunction.reportDTCExtendedDataRecordByDTCNumber,
ReadDTCInformation.Subfunction.reportMirrorMemoryDTCExtendedDataRecordByDTCNumber
]
request_subfn_severity_plus_status_mask = [
ReadDTCInformation.Subfunction.reportNumberOfDTCBySeverityMaskRecord,
ReadDTCInformation.Subfunction.reportDTCBySeverityMaskRecord
]
request_subfn_mask_record = [
ReadDTCInformation.Subfunction.reportSeverityInformationOfDTC
]
ServiceHelper.validate_int(subfunction, min=1, max=0x15, name='Subfunction')
if status_mask is not None and isinstance(status_mask, Dtc.Status):
status_mask = status_mask.get_byte_as_int()
if severity_mask is not None and isinstance(severity_mask, Dtc.Severity):
severity_mask = severity_mask.get_byte_as_int()
if dtc is not None and isinstance(dtc, Dtc):
dtc = dtc.id
req = Request(service=cls, subfunction=subfunction)
if subfunction in request_subfn_no_param: # Service ID + Subfunction
pass
elif subfunction in request_subfn_status_mask:
cls.assert_status_mask(status_mask, subfunction)
req.data = struct.pack('B', status_mask)
elif subfunction in request_subfn_mask_record_plus_snapshot_record_number:
cls.assert_dtc(dtc, subfunction)
cls.assert_snapshot_record_number(snapshot_record_number, subfunction)
req.data = cls.pack_dtc(dtc) + struct.pack('B', snapshot_record_number)
elif subfunction in request_subfn_snapshot_record_number:
cls.assert_snapshot_record_number(snapshot_record_number, subfunction)
req.data = struct.pack('B', snapshot_record_number)
elif subfunction in request_subfn_mask_record_plus_extdata_record_number:
cls.assert_dtc(dtc, subfunction)
cls.assert_extended_data_record_number(extended_data_record_number, subfunction)
req.data = cls.pack_dtc(dtc) + struct.pack('B', extended_data_record_number)
elif subfunction in request_subfn_severity_plus_status_mask:
cls.assert_status_mask(status_mask, subfunction)
cls.assert_severity_mask(severity_mask, subfunction)
req.data = struct.pack('BB', severity_mask, status_mask)
elif subfunction in request_subfn_mask_record:
cls.assert_dtc(dtc, subfunction)
req.data = cls.pack_dtc(dtc)
return req
@classmethod
def interpret_response(cls, response, subfunction, extended_data_size=None, tolerate_zero_padding=True, ignore_all_zero_dtc=True, dtc_snapshot_did_size=2, didconfig=None):
"""
Populates the response ``service_data`` property with an instance of :class:`ReadDTCInformation.ResponseData<udsoncan.services.ReadDTCInformation.ResponseData>`
:param response: The received response to interpret
:type response: :ref:`Response<Response>`
:param subfunction: The service subfunction. Values are defined in :class:`ReadDTCInformation.Subfunction<udsoncan.services.ReadDTCInformation.Subfunction>`
:type subfunction: int
:param extended_data_size: Extended data size to expect. Extended data is implementation specific, therefore, size is not standardized
:type extended_data_size: int
:param tolerate_zero_padding: Ignore trailing zeros in the response data avoiding raising false :class:`InvalidResponseException<udsoncan.exceptions.InvalidResponseException>`.
:type tolerate_zero_padding: bool
:param ignore_all_zero_dtc: Discard any DTC entries that have an ID of 0. Avoid reading extra DTCs when using a transport protocol using zero padding.
:type ignore_all_zero_dtc: bool
:param dtc_snapshot_did_size: Number of bytes to encode the data identifier number. Other services such as :ref:`ReadDataByIdentifier<ReadDataByIdentifier>` encode DID over 2 bytes.
UDS standard does not define the size of the snapshot DID, therefore, it must be supplied.
:type dtc_snapshot_did_size: int
:param didconfig: Definition of DID codecs. Dictionary mapping a DID (int) to a valid :ref:`DidCodec<DidCodec>` class or pack/unpack string
:type didconfig: dict[int] = :ref:`DidCodec<DidCodec>`
:raises InvalidResponseException: If response length is wrong or does not match DID configuration
:raises ValueError: If parameters are out of range, missing or wrong types
:raises ConfigError: If the server returns a snapshot DID not defined in ``didconfig``
"""
from udsoncan import Dtc, DidCodec
ServiceHelper.validate_int(subfunction, min=1, max=0x15, name='Subfunction')
# Response grouping for responses that are encoded the same way
response_subfn_dtc_availability_mask_plus_dtc_record = [
ReadDTCInformation.Subfunction.reportDTCByStatusMask,
ReadDTCInformation.Subfunction.reportSupportedDTCs,
ReadDTCInformation.Subfunction.reportFirstTestFailedDTC,
ReadDTCInformation.Subfunction.reportFirstConfirmedDTC,
ReadDTCInformation.Subfunction.reportMostRecentTestFailedDTC,
ReadDTCInformation.Subfunction.reportMostRecentConfirmedDTC,
ReadDTCInformation.Subfunction.reportMirrorMemoryDTCByStatusMask,
ReadDTCInformation.Subfunction.reportEmissionsRelatedOBDDTCByStatusMask,
ReadDTCInformation.Subfunction.reportDTCWithPermanentStatus
]
response_subfn_number_of_dtc = [
ReadDTCInformation.Subfunction.reportNumberOfDTCByStatusMask,
ReadDTCInformation.Subfunction.reportNumberOfDTCBySeverityMaskRecord,
ReadDTCInformation.Subfunction.reportNumberOfMirrorMemoryDTCByStatusMask,
ReadDTCInformation.Subfunction.reportNumberOfEmissionsRelatedOBDDTCByStatusMask,
]
response_subfn_dtc_availability_mask_plus_dtc_record_with_severity = [
ReadDTCInformation.Subfunction.reportDTCBySeverityMaskRecord,
ReadDTCInformation.Subfunction.reportSeverityInformationOfDTC
]
response_subfn_dtc_plus_fault_counter = [
ReadDTCInformation.Subfunction.reportDTCFaultDetectionCounter
]
response_subfn_dtc_plus_sapshot_record = [
ReadDTCInformation.Subfunction.reportDTCSnapshotIdentification
]
response_sbfn_dtc_status_snapshots_records = [
ReadDTCInformation.Subfunction.reportDTCSnapshotRecordByDTCNumber
]
response_sbfn_dtc_status_snapshots_records_record_first = [
ReadDTCInformation.Subfunction.reportDTCSnapshotRecordByRecordNumber
]
response_subfn_mask_record_plus_extdata = [
ReadDTCInformation.Subfunction.reportDTCExtendedDataRecordByDTCNumber,
ReadDTCInformation.Subfunction.reportMirrorMemoryDTCExtendedDataRecordByDTCNumber
]
response.service_data = cls.ResponseData() # what will be returned
if len(response.data) < 1:
raise InvalidResponseException(response, 'Response must be at least 1 byte long (echo of subfunction)')
response.service_data.subfunction_echo = response.data[0] # First byte is subfunction
# Now for each response group, we have a different decoding algorithm
if subfunction in response_subfn_dtc_availability_mask_plus_dtc_record + response_subfn_dtc_availability_mask_plus_dtc_record_with_severity:
if subfunction in response_subfn_dtc_availability_mask_plus_dtc_record:
dtc_size = 4 # DTC ID (3) + Status (1)
elif subfunction in response_subfn_dtc_availability_mask_plus_dtc_record_with_severity:
dtc_size = 6 # DTC ID (3) + Status (1) + Severity (1) + FunctionalUnit (1)
if len(response.data) < 2:
raise InvalidResponseException(response, 'Response must be at least 2 byte long (echo of subfunction and DTCStatusAvailabilityMask)')
response.service_data.status_availability = Dtc.Status.from_byte(response.data[1])
actual_byte = 2 # Increasing index
while True: # Loop until we have read all dtcs
if len(response.data) <= actual_byte:
break # done
elif len(response.data) < actual_byte+dtc_size:
partial_dtc_length = len(response.data)-actual_byte
if tolerate_zero_padding and response.data[actual_byte:] == b'\x00'*partial_dtc_length:
break
else:
# We purposely ignore extra byte for subfunction reportSeverityInformationOfDTC as it is supposed to return 0 or 1 DTC.
if subfunction != ReadDTCInformation.Subfunction.reportSeverityInformationOfDTC or actual_byte == 2:
raise InvalidResponseException(response, 'Incomplete DTC record. Missing %d bytes to response to complete the record' % (dtc_size-partial_dtc_length))
else:
dtc_bytes = response.data[actual_byte:actual_byte+dtc_size]
if dtc_bytes == b'\x00'*dtc_size and ignore_all_zero_dtc:
pass # ignore
else:
if subfunction in response_subfn_dtc_availability_mask_plus_dtc_record:
dtc = Dtc(struct.unpack('>L', b'\x00' + dtc_bytes[0:3])[0])
dtc.status.set_byte(dtc_bytes[3])
elif subfunction in response_subfn_dtc_availability_mask_plus_dtc_record_with_severity:
dtc = Dtc(struct.unpack('>L', b'\x00' + dtc_bytes[2:5])[0])
dtc.severity.set_byte(dtc_bytes[0])
dtc.functional_unit = dtc_bytes[1]
dtc.status.set_byte(dtc_bytes[5])
response.service_data.dtcs.append(dtc)
actual_byte += dtc_size
response.service_data.dtc_count = len(response.service_data.dtcs)
# The 2 following subfunction responses have different purposes but their constructions are very similar.
elif subfunction in response_subfn_dtc_plus_fault_counter + response_subfn_dtc_plus_sapshot_record:
dtc_size = 4
if len(response.data) < 1:
raise InvalidResponseException(response, 'Response must be at least 1 byte long (echo of subfunction)')
actual_byte = 1 # Increasing index
dtc_map = dict() # This map is used to append snapshot to existing DTC.
while True: # Loop until we have read all dtcs
if len(response.data) <= actual_byte:
break # done
elif len(response.data) < actual_byte+dtc_size:
partial_dtc_length = len(response.data)-actual_byte
if tolerate_zero_padding and response.data[actual_byte:] == b'\x00'*partial_dtc_length:
break
else:
raise InvalidResponseException(response, 'Incomplete DTC record. Missing %d bytes to response to complete the record' % (dtc_size-partial_dtc_length))
else:
dtc_bytes = response.data[actual_byte:actual_byte+dtc_size]
if dtc_bytes == b'\x00'*dtc_size and ignore_all_zero_dtc:
pass # ignore
else:
dtcid = struct.unpack('>L', b'\x00' + dtc_bytes[0:3])[0]
# We create the DTC or get its reference if already created.
dtc_created = False
if dtcid in dtc_map and subfunction in response_subfn_dtc_plus_sapshot_record:
dtc = dtc_map[dtcid]
else:
dtc = Dtc(dtcid)
dtc_map[dtcid] = dtc
dtc_created = True
# We either read the DTC fault counter or Snapshot record number.
if subfunction in response_subfn_dtc_plus_fault_counter:
dtc.fault_counter = dtc_bytes[3]
elif subfunction in response_subfn_dtc_plus_sapshot_record:
record_number = dtc_bytes[3]
if dtc.snapshots is None:
dtc.snapshots = []
dtc.snapshots.append(record_number)
# Adds the DTC to the list.
if dtc_created:
response.service_data.dtcs.append(dtc)
actual_byte += dtc_size
response.service_data.dtc_count = len(response.service_data.dtcs)
# This group of responses returns a number of DTCs available
elif subfunction in response_subfn_number_of_dtc:
if len(response.data) < 5:
raise InvalidResponseException(response, 'Response must be exactly 5 bytes long ')
response.service_data.status_availability = Dtc.Status.from_byte(response.data[1])
response.service_data.dtc_format = response.data[2]
response.service_data.dtc_count = struct.unpack('>H', response.data[3:5])[0]
# This group of responses returns DTC snapshots
# Responses include a DTC, many snapshot records. For each record, we find many Data Identifiers.
# We create one Dtc.Snapshot for each DID. That'll be easier to work with.
# <DTC,RecordNumber1,NumberOfDid_X,DID1,DID2,...DIDX, RecordNumber2,NumberOfDid_Y,DID1,DID2,...DIDY, etc>
elif subfunction in response_sbfn_dtc_status_snapshots_records:
if len(response.data) < 5:
raise InvalidResponseException(response, 'Response must be at least 5 bytes long ')
dtc = Dtc(struct.unpack('>L', b'\x00' + response.data[1:4])[0])
dtc.status.set_byte(response.data[4])
actual_byte = 5 # Increasing index
ServiceHelper.validate_int(dtc_snapshot_did_size, min=1, max=8, name='dtc_snapshot_did_size')
while True: # Loop until we have read all dtcs
if len(response.data) <= actual_byte:
break # done
remaining_data = response.data[actual_byte:]
if tolerate_zero_padding and remaining_data == b'\x00' * len(remaining_data):
break
if len(remaining_data) < 2:
raise InvalidResponseException(response, 'Incomplete response from server. Missing "number of identifier" and following data')
record_number = remaining_data[0]
number_of_did = remaining_data[1]
# Validate record number and number of DID before continuing
if number_of_did == 0:
raise InvalidResponseException(response, 'Server returned snapshot record #%d with no data identifier included' % (record_number))
if len(remaining_data) < 2 + dtc_snapshot_did_size:
raise InvalidResponseException(response, 'Incomplete response from server. Missing DID number and associated data.')
actual_byte += 2
for i in range(number_of_did):
remaining_data = response.data[actual_byte:]
snapshot = Dtc.Snapshot() # One snapshot per DID for convenience.
snapshot.record_number = record_number
# As standard does not specify the length of the DID, we craft it based on a config
did = 0
for j in range(dtc_snapshot_did_size):
offset = dtc_snapshot_did_size-1-j
did |= (remaining_data[offset] << (8*j))
# Decode the data based on DID number.
snapshot.did = did
didconfig = ServiceHelper.check_did_config(did, didconfig)
codec = DidCodec.from_config(didconfig[did])
data_offset = dtc_snapshot_did_size;
if len(remaining_data[data_offset:]) < len(codec):
raise InvalidResponseException(response, 'Incomplete response. Data for DID 0x%04x is only %d bytes while %d bytes is expected' % (did, len(remaining_data[data_offset:]), len(codec)))
snapshot.raw_data = remaining_data[data_offset:data_offset + len(codec)]
snapshot.data = codec.decode(snapshot.raw_data)
dtc.snapshots.append(snapshot)
actual_byte += dtc_snapshot_did_size + len(codec)
response.service_data.dtcs.append(dtc)
response.service_data.dtc_count = 1
# This group of responses returns DTC snapshots
# Responses include a DTC, many snapshots records. For each record, we find many Data Identifiers.
# We create one Dtc.Snapshot for each DID. That'll be easier to work with.
# Similar to previous subfunction group, but order of information is changed.
# <RecordNumber1, DTC1,NumberOfDid_X,DID1,DID2,...DIDX, RecordNumber2,DTC2, NumberOfDid_Y,DID1,DID2,...DIDY, etc>
elif subfunction in response_sbfn_dtc_status_snapshots_records_record_first :
ServiceHelper.validate_int(dtc_snapshot_did_size, min=1, max=8, name='dtc_snapshot_did_size')
if len(response.data) < 2:
raise InvalidResponseException(response, 'Response must be at least 2 bytes long. Subfunction echo + RecordNumber ')
actual_byte = 1 # Increasing index
while True: # Loop through response data
if len(response.data) <= actual_byte:
break # done
remaining_data = response.data[actual_byte:]
record_number = remaining_data[0]
# If empty response but filled with 0, it is considered ok
if remaining_data == b'\x00' * len(remaining_data) and tolerate_zero_padding:
break
# If record number received but no DTC provided (allowed according to standard), we exit.
if len(remaining_data) == 1 or tolerate_zero_padding and remaining_data[1:] == b'\x00' * len(remaining_data[1:]):
break
if len(remaining_data) < 5: # Partial DTC (No DTC at all is checked above)
raise InvalidResponseException(response, 'Incomplete response from server. Missing "DTCAndStatusRecord" and following data')
if len(remaining_data) < 6:
raise InvalidResponseException(response, 'Incomplete response from server. Missing number of data identifier')
# DTC decoding
dtc = Dtc(struct.unpack('>L', b'\x00' + remaining_data[1:4])[0])
dtc.status.set_byte(remaining_data[4])
number_of_did = remaining_data[5]
actual_byte += 6
remaining_data = response.data[actual_byte:]
if number_of_did == 0:
raise InvalidResponseException(response, 'Server returned snapshot record #%d with no data identifier included' % (record_number))
if len(remaining_data) < dtc_snapshot_did_size:
raise InvalidResponseException(response, 'Incomplete response from server. Missing DID and associated data')
# We have a DTC and 0 DID, next loop
if tolerate_zero_padding and remaining_data == b'\x00' * len(remaining_data):
break
# For each DID
for i in range(number_of_did):
remaining_data = response.data[actual_byte:]
snapshot = Dtc.Snapshot() # One snapshot epr DID for convenience
snapshot.record_number = record_number
# As standard does not specify the length of the DID, we craft it based on a config
did = 0
for j in range(dtc_snapshot_did_size):
offset = dtc_snapshot_did_size-1-j
did |= (remaining_data[offset] << (8*j))
# Decode the data based on DID number.
snapshot.did = did
didconfig = ServiceHelper.check_did_config(did, didconfig)
codec = DidCodec.from_config(didconfig[did])
data_offset = dtc_snapshot_did_size;
if len(remaining_data[data_offset:]) < len(codec):
raise InvalidResponseException(response, 'Incomplete response. Data for DID 0x%04x is only %d bytes while %d bytes is expected' % (did, len(remaining_data[data_offset:]), len(codec)))
snapshot.raw_data = remaining_data[data_offset:data_offset + len(codec)]
snapshot.data = codec.decode(snapshot.raw_data)
dtc.snapshots.append(snapshot)
actual_byte += dtc_snapshot_did_size + len(codec)
response.service_data.dtcs.append(dtc)
response.service_data.dtc_count = len(response.service_data.dtcs)
# These subfunctions include DTC ExtraData. We give it raw to user.
elif subfunction in response_subfn_mask_record_plus_extdata:
cls.assert_extended_data_size(extended_data_size, subfunction)
if len(response.data) < 5:
raise InvalidResponseException(response, 'Incomplete response from server. Missing DTCAndStatusRecord')
# DTC decoding
dtc = Dtc(struct.unpack('>L', b'\x00' + response.data[1:4])[0])
dtc.status.set_byte(response.data[4])
actual_byte = 5 # Increasing index
while actual_byte < len(response.data): # Loop through data
remaining_data = response.data[actual_byte:]
record_number = remaining_data[0]
if record_number == 0:
if remaining_data == b'\x00' * len(remaining_data) and tolerate_zero_padding:
break
else:
raise InvalidResponseException(response, 'Extended data record number given by the server is 0 but this value is a reserved value.')
actual_byte +=1
remaining_data = response.data[actual_byte:]
if len(remaining_data) < extended_data_size:
raise InvalidResponseException(response, 'Incomplete response from server. Length of extended data for DTC 0x%06x with record number 0x%02x is %d bytes but smaller than given data_size of %d bytes' % (dtc.id, record_number, len(remaining_data), extended_data_size))
exdata = Dtc.ExtendedData()
exdata.record_number = record_number
exdata.raw_data = remaining_data[0:extended_data_size]
dtc.extended_data.append(exdata)
actual_byte+= extended_data_size
response.service_data.dtcs.append(dtc)
response.service_data.dtc_count = len(response.service_data.dtcs)
class ResponseData(BaseResponseData):
"""
.. data:: subfunction_echo
Subfunction echoed back by the server
.. data:: dtcs
:ref:`DTC<DTC>` instances and their status read from the server.
.. data:: dtc_count
Number of DTC read or available
.. data:: dtc_format
Integer indicating the format of the DTC. See :ref:`DTC.Format<DTC_Format>`
.. data:: status_availability
:ref:`Dtc.Status<DTC_Status>` indicating which status the server supports
.. data:: extended_data
List of bytes containing the DTC extended data
"""
def __init__(self):
super().__init__(ReadDTCInformation)
self.subfunction_echo = None
self.dtcs = []
self.dtc_count = 0
self.dtc_format = None
self.status_availability = None
self.extended_data = []
|
marchcui/pythUDS | udsoncan/services/RoutineControl.py | from . import *
from udsoncan.Response import Response
from udsoncan.exceptions import *
import struct
class RoutineControl(BaseService):
_sid = 0x31
class ControlType(BaseSubfunction):
"""
RoutineControl defined subfunctions
"""
__pretty_name__ = 'control type'
startRoutine = 1
stopRoutine = 2
requestRoutineResults = 3
supported_negative_response = [ Response.Code.SubFunctionNotSupported,
Response.Code.IncorrectMessageLegthOrInvalidFormat,
Response.Code.ConditionsNotCorrect,
Response.Code.RequestSequenceError,
Response.Code.RequestOutOfRange,
Response.Code.SecurityAccessDenied,
Response.Code.GeneralProgrammingFailure
]
@classmethod
def make_request(cls, routine_id, control_type, data=None):
"""
Generates a request for RoutineControl
:param routine_id: The routine ID. Value should be between 0 and 0xFFFF
:type routine_id: int
:param control_type: Service subfunction. Allowed values are from 0 to 0x7F
:type control_type: bytes
:param data: Optional additional data to provide to the server
:type data: bytes
:raises ValueError: If parameters are out of range, missing or wrong type
"""
from udsoncan import Request
ServiceHelper.validate_int(routine_id, min=0, max=0xFFFF, name='Routine ID')
ServiceHelper.validate_int(control_type, min=0, max=0x7F, name='Routine control type')
if data is not None:
if not isinstance(data, bytes):
raise ValueError('data must be a valid bytes object')
request = Request(service=cls, subfunction=control_type)
request.data = struct.pack('>H', routine_id)
if data is not None:
request.data += data
return request
@classmethod
def interpret_response(cls, response):
"""
Populates the response ``service_data`` property with an instance of :class:`RoutineControl.ResponseData<udsoncan.services.RoutineControl.ResponseData>`
:param response: The received response to interpret
:type response: :ref:`Response<Response>`
:raises InvalidResponseException: If length of ``response.data`` is too short
"""
if len(response.data) < 3:
raise InvalidResponseException(response, "Response data must be at least 3 bytes")
response.service_data = cls.ResponseData()
response.service_data.control_type_echo = response.data[0]
response.service_data.routine_id_echo = struct.unpack(">H", response.data[1:3])[0]
response.service_data.routine_status_record = response.data[3:] if len(response.data) >3 else b''
class ResponseData(BaseResponseData):
"""
.. data:: control_type_echo
Requests subfunction echoed back by the server
.. data:: routine_id_echo
Requests routine ID echoed back by the server.
.. data:: routine_status_record
Additional data associated with the response.
"""
def __init__(self):
super().__init__(RoutineControl)
self.control_type_echo = None
self.routine_id_echo = None
self.routine_status_record = None
|
marchcui/pythUDS | udsoncan/configs.py | <gh_stars>0
default_client_config = {
'exception_on_negative_response' : True,
'exception_on_invalid_response' : True,
'exception_on_unexpected_response' : True,
'security_algo' : None,
'security_algo_params' : None,
'tolerate_zero_padding' : True,
'ignore_all_zero_dtc' : True,
'dtc_snapshot_did_size' : 2, # Not specified in standard. 2 bytes matches other services format.
'server_address_format' : None, # 8,16,24,32,40
'server_memorysize_format' : None, # 8,16,24,32,40
'data_identifiers' : {},
'input_output' : {},
'request_timeout' : 5,
'p2_timeout' : 1,
'p2_star_timeout' : 5,
}
|
marchcui/pythUDS | test/client/test_diagnostic_session_control.py | <gh_stars>0
from udsoncan.client import Client
from udsoncan import services
from udsoncan.exceptions import *
from test.ClientServerTest import ClientServerTest
class TestDiagnosticSessionControl(ClientServerTest):
def __init__(self, *args, **kwargs):
ClientServerTest.__init__(self, *args, **kwargs)
def test_dsc_success(self):
request = self.conn.touserqueue.get(timeout=0.2)
self.assertEqual(request, b"\x10\x01")
self.conn.fromuserqueue.put(b"\x50\x01\x99\x88") # Positive response
def _test_dsc_success(self):
response = self.udsclient.change_session(services.DiagnosticSessionControl.Session.defaultSession)
self.assertEqual(response.service_data.session_echo, 1)
self.assertEqual(response.service_data.session_param_records, b"\x99\x88")
def test_dsc_success_spr(self):
request = self.conn.touserqueue.get(timeout=0.2)
self.assertEqual(request, b"\x10\x81")
self.conn.fromuserqueue.put("wait") #Synchronize
def _test_dsc_success_spr(self):
with self.udsclient.suppress_positive_response:
response = self.udsclient.change_session(services.DiagnosticSessionControl.Session.defaultSession)
self.assertEqual(response, None)
self.conn.fromuserqueue.get(timeout=0.2) #Avoid closing connection prematurely
def test_dsc_denied_exception(self):
request = self.conn.touserqueue.get(timeout=0.2)
self.assertEqual(request, b"\x10\x08")
self.conn.fromuserqueue.put(b"\x7F\x10\x12") # Subfunction not supported
def _test_dsc_denied_exception(self):
with self.assertRaises(NegativeResponseException) as handle:
self.udsclient.change_session(0x08)
response = handle.exception.response
self.assertTrue(response.valid)
self.assertTrue(issubclass(response.service, services.DiagnosticSessionControl))
self.assertEqual(response.code, 0x12)
def test_dsc_denied_no_exception(self):
self.wait_request_and_respond(b"\x7F\x10\x12") # Subfunction not supported
def _test_dsc_denied_no_exception(self):
self.udsclient.config['exception_on_negative_response'] = False
response = self.udsclient.change_session(0x08)
self.assertTrue(response.valid)
self.assertFalse(response.positive)
def test_dsc_bad_subfunction_exception(self):
self.wait_request_and_respond(b"\x50\x02") # Positive response
def _test_dsc_bad_subfunction_exception(self):
with self.assertRaises(UnexpectedResponseException):
self.udsclient.change_session(services.DiagnosticSessionControl.Session.defaultSession)
def test_dsc_bad_subfunction_no_exception(self):
self.wait_request_and_respond(b"\x50\x02") # Positive response
def _test_dsc_bad_subfunction_no_exception(self):
self.udsclient.config['exception_on_unexpected_response'] = False
response = self.udsclient.change_session(services.DiagnosticSessionControl.Session.defaultSession)
self.assertTrue(response.valid)
self.assertTrue(response.unexpected)
def test_dsc_invalidservice_exception(self):
self.wait_request_and_respond(b"\x00\x02") #Inexistent Service
def _test_dsc_invalidservice_exception(self):
with self.assertRaises(InvalidResponseException) as handle:
self.udsclient.change_session(0x02)
def test_dsc_invalidservice_no_exception(self):
self.wait_request_and_respond(b"\x00\x02") #Inexistent Service
def _test_dsc_invalidservice_no_exception(self):
self.udsclient.config['exception_on_invalid_response'] = False
response = self.udsclient.change_session(0x02)
self.assertFalse(response.valid)
def test_ecu_reset_wrongservice_exception(self):
self.udsclient.config['exception_on_invalid_response'] = False
self.wait_request_and_respond(b"\x7E\x00") # Valid but wrong service (Tester Present)
def _test_ecu_reset_wrongservice_exception(self):
with self.assertRaises(UnexpectedResponseException) as handle:
self.udsclient.change_session(0x55)
def test_ecu_reset_wrongservice_no_exception(self):
self.wait_request_and_respond(b"\x7E\x00") # Valid but wrong service (Tester Present)
def _test_ecu_reset_wrongservice_no_exception(self):
self.udsclient.config['exception_on_unexpected_response'] = False
response = self.udsclient.change_session(0x55)
self.assertTrue(response.valid)
self.assertTrue(response.unexpected)
def test_bad_param(self):
pass
def _test_bad_param(self):
with self.assertRaises(ValueError):
success = self.udsclient.change_session(0x100)
with self.assertRaises(ValueError):
success = self.udsclient.change_session(-1)
|
marchcui/pythUDS | udsoncan/services/AccessTimingParameter.py | from . import *
from udsoncan.Response import Response
from udsoncan.exceptions import *
class AccessTimingParameter(BaseService):
_sid = 0x83
class AccessType(BaseSubfunction):
"""
AccessTimingParameter defined subfunctions
"""
__pretty_name__ = 'access type'
readExtendedTimingParameterSet = 1
setTimingParametersToDefaultValues = 2
readCurrentlyActiveTimingParameters = 3
setTimingParametersToGivenValues = 4
supported_negative_response = [ Response.Code.SubFunctionNotSupported,
Response.Code.IncorrectMessageLegthOrInvalidFormat,
Response.Code.ConditionsNotCorrect,
Response.Code.RequestOutOfRange
]
@classmethod
def make_request(cls, access_type, timing_param_record=None):
"""
Generates a request for AccessTimingParameter
:param access_type: Service subfunction. Allowed values are from 0 to 0x7F
:type access_type: int
:param timing_param_record: Data associated with request. Must be present only when access_type=``AccessType.setTimingParametersToGivenValues`` (4)
:type timing_param_record: bytes
:raises ValueError: If parameters are out of range, missing or wrong type
"""
from udsoncan import Request
ServiceHelper.validate_int(access_type, min=0, max=0x7F, name='Access type')
if timing_param_record is not None and access_type != cls.AccessType.setTimingParametersToGivenValues :
raise ValueError('timing_param_record can only be set when access_type is setTimingParametersToGivenValues"')
if timing_param_record is None and access_type == cls.AccessType.setTimingParametersToGivenValues :
raise ValueError('A timing_param_record must be provided when access_type is "setTimingParametersToGivenValues"')
request = Request(service=cls, subfunction=access_type)
if timing_param_record is not None:
if not isinstance(timing_param_record, bytes):
raise ValueError("timing_param_record must be a valid bytes objects")
request.data += timing_param_record
return request
@classmethod
def interpret_response(cls, response):
"""
Populates the response ``service_data`` property with an instance of :class:`AccessTimingParameter.ResponseData<udsoncan.services.AccessTimingParameter.ResponseData>`
:param response: The received response to interpret
:type response: :ref:`Response<Response>`
:raises InvalidResponseException: If length of ``response.data`` is too short
"""
if len(response.data) < 1:
raise InvalidResponseException(response, "Response data must be at least 1 byte")
response.service_data = cls.ResponseData()
response.service_data.access_type_echo = response.data[0]
response.service_data.timing_param_record = response.data[1:] if len(response.data) >1 else b''
class ResponseData(BaseResponseData):
"""
.. data:: access_type_echo
Request subfunction echoed back by the server
.. data:: timing_param_record
Additional data associated with the response.
"""
def __init__(self):
super().__init__(AccessTimingParameter)
self.access_type_echo = None
self.timing_param_record = None
|
marchcui/pythUDS | udsoncan/services/RequestDownload.py | from . import *
from udsoncan.Response import Response
from udsoncan.exceptions import *
import struct
class RequestDownload(BaseService):
_sid = 0x34
_use_subfunction = False
supported_negative_response = [ Response.Code.IncorrectMessageLegthOrInvalidFormat,
Response.Code.ConditionsNotCorrect,
Response.Code.RequestOutOfRange,
Response.Code.SecurityAccessDenied,
Response.Code.UploadDownloadNotAccepted
]
@classmethod
def normalize_data_format_identifier(cls, dfi):
from udsoncan import DataFormatIdentifier
if dfi is None:
dfi = DataFormatIdentifier()
if not isinstance(dfi, DataFormatIdentifier):
raise ValueError('dfi must be an instance of DataFormatIdentifier')
return dfi
@classmethod
def make_request(cls, memory_location, dfi=None):
"""
Generates a request for RequestDownload
:param memory_location: The address and the size of the memory block to be written.
:type memory_location: :ref:`MemoryLocation <MemoryLocation>`
:param dfi: Optional :ref:`DataFormatIdentifier <DataFormatIdentifier>` defining the compression and encryption scheme of the data.
If not specified, the default value of 00 will be used, specifying no encryption and no compression
:type dfi: :ref:`DataFormatIdentifier <DataFormatIdentifier>`
:raises ValueError: If parameters are out of range, missing or wrong type
"""
from udsoncan import Request, MemoryLocation
dfi = cls.normalize_data_format_identifier(dfi)
if not isinstance(memory_location, MemoryLocation):
raise ValueError('memory_location must be an instance of MemoryLocation')
request = Request(service=cls)
request.data=b""
request.data += dfi.get_byte() # Data Format Identifier
request.data += memory_location.alfid.get_byte() # AddressAndLengthFormatIdentifier
request.data += memory_location.get_address_bytes()
request.data += memory_location.get_memorysize_bytes()
return request
@classmethod
def interpret_response(cls, response):
"""
Populates the response ``service_data`` property with an instance of :class:`RequestDownload.ResponseData<udsoncan.services.RequestDownload.ResponseData>`
:param response: The received response to interpret
:type response: :ref:`Response<Response>`
:raises InvalidResponseException: If length of ``response.data`` is too short
:raises NotImplementedError: If the ``maxNumberOfBlockLength`` value is encoded over more than 8 bytes.
"""
if len(response.data) < 1:
raise InvalidResponseException(response, "Response data must be at least 1 bytes")
lfid = int(response.data[0]) >> 4
if lfid > 8:
raise NotImplementedError('This client does not support number bigger than %d bits' % (8*8))
if len(response.data) < lfid+1:
raise InvalidResponseException(response, "Length of data (%d) is too short to contains the number of block of given length (%d)" % (len(response.data), lfid))
todecode = bytearray(b'\x00\x00\x00\x00\x00\x00\x00\x00')
for i in range(1,lfid+1):
todecode[-i] = response.data[lfid+1-i]
response.service_data = cls.ResponseData()
response.service_data.max_length = struct.unpack('>q', todecode)[0]
class ResponseData(BaseResponseData):
"""
.. data:: max_length
(int) Maximum number of data blocks to write
"""
def __init__(self):
super().__init__(RequestDownload)
self.max_length = None
|
marchcui/pythUDS | udsoncan/services/WriteMemoryByAddress.py | from . import *
from udsoncan.Response import Response
from udsoncan.exceptions import *
class WriteMemoryByAddress(BaseService):
_sid = 0x3D
_use_subfunction = False
supported_negative_response = [ Response.Code.IncorrectMessageLegthOrInvalidFormat,
Response.Code.ConditionsNotCorrect,
Response.Code.RequestOutOfRange,
Response.Code.SecurityAccessDenied,
Response.Code.GeneralProgrammingFailure
]
@classmethod
def make_request(cls, memory_location, data):
"""
Generates a request for ReadMemoryByAddress
:param memory_location: The address and the size of the memory block to write.
:type memory_location: :ref:`MemoryLocation <MemoryLocation>`
:param data: The data to write into memory.
:type data: bytes
:raises ValueError: If parameters are out of range, missing or wrong type
"""
from udsoncan import Request, MemoryLocation
if not isinstance(memory_location, MemoryLocation):
raise ValueError('Given memory location must be an instance of MemoryLocation')
if not isinstance(data, bytes):
raise ValueError('data must be a bytes object')
request = Request(service=cls)
request.data = b''
request.data += memory_location.alfid.get_byte() # AddressAndLengthFormatIdentifier
request.data += memory_location.get_address_bytes()
request.data += memory_location.get_memorysize_bytes()
request.data += data
return request
@classmethod
def interpret_response(cls, response, memory_location):
"""
Populates the response ``service_data`` property with an instance of :class:`WriteMemoryByAddress.ResponseData<udsoncan.services.WriteMemoryByAddress.ResponseData>`
:param response: The received response to interpret
:type response: :ref:`Response<Response>`
:param memory_location: The memory location used for the request.
The bytes position varies depending on the ``memory_location`` format
:type memory_location: :ref:`MemoryLocation <MemoryLocation>`
:raises InvalidResponseException: If length of ``response.data`` is too short
"""
from udsoncan import MemoryLocation
if not isinstance(memory_location, MemoryLocation):
raise ValueError('Given memory location must be an instance of MemoryLocation')
address_bytes = memory_location.get_address_bytes()
memorysize_bytes = memory_location.get_memorysize_bytes()
expected_response_size = 1 + len(address_bytes) + len(memorysize_bytes)
if len(response.data) < expected_response_size:
raise InvalidResponseException(response, 'Repsonse should be at least %d bytes' % (expected_response_size))
response.service_data = cls.ResponseData()
response.service_data.alfid_echo = response.data[0]
offset=1
length = len( memory_location.get_address_bytes())
address_echo = response.data[1:1+length]
offset+=length
length = len(memory_location.get_memorysize_bytes())
memorysize_echo = response.data[offset:offset+length]
response.service_data.memory_location_echo = MemoryLocation.from_bytes(address_bytes=address_echo, memorysize_bytes=memorysize_echo)
class ResponseData(BaseResponseData):
"""
.. data:: alfid_echo
:ref:`AddressAndLengthFormatIdentifier <AddressAndLengthFormatIdentifier>` used in the :ref:`MemoryLocation <MemoryLocation>` object echoed back by the server.
.. data:: memory_location_echo
An instance of :ref:`MemoryLocation <MemoryLocation>` that includes the address, size and alfid that the server echoed back.
"""
def __init__(self):
super().__init__(WriteMemoryByAddress)
self.alfid_echo = None
self.memory_location_echo = None
|
matthew-bahloul/browser-utils | src/po_utils/common_actions/element_interactions.py | """
by_locator : tuple --> (<selenium By object>, <selector string>)
x_offset : int --> integer value of x offset in pixels
y_offset : int --> integer value of y offset in pixels
x_destination : int --> integer value of x location on page
y_desitination : int --> integer value of y location on page
by_locator_source : tuple --> (<selenium By object>, <selector string>)
by_locator_target : tuple --> (<selenium By object>, <selector string>)
clear_first : bool --> toggle for clearing input field before writing text to it
press_enter : bool --> toggle for sending the ENTER key to an input field after writing to it
"""
from selenium.webdriver import ActionChains
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.keys import Keys
from po_utils.common_actions.waits import wait_for_page_to_load, wait_until_displayed, wait_until_not_displayed
@wait_until_displayed
@wait_for_page_to_load
def click_element(self, by_locator:tuple, x_offset:int=0, y_offset:int=0) -> None:
# I hate clicking
element = WebDriverWait(self._driver, self._driver_wait_time).until(EC.visibility_of_element_located(by_locator))
scroll_height = self._driver.execute_script('return document.body.scrollHeight')
window_size = self._driver.get_window_size()['height']
if element.location['y'] > (scroll_height - .5 * window_size):
self._driver.execute_script('window.scrollTo(0, document.body.scrollHeight)')
elif element.location['y'] < (.5 * window_size):
self._driver.execute_script('window.scrollTo(0, 0)')
else:
self._driver.execute_script(f"window.scrollTo({element.location['x']}, {element.location['y'] - .5 * window_size});")
if x_offset == 0 and y_offset == 0:
try:
WebDriverWait(self._driver, self._driver_wait_time).until(EC.element_to_be_clickable(by_locator)).click()
except:
WebDriverWait(self._driver, self._driver_wait_time).until(EC.element_to_be_clickable(by_locator)).click()
else:
ActionChains(self._driver).move_to_element_with_offset(WebDriverWait(self._driver, self._driver_wait_time).until(EC.visibility_of_element_located(by_locator)), x_offset, y_offset).click().perform()
@wait_until_displayed
@wait_for_page_to_load
def click_and_drag_element_by_offset(self, by_locator:tuple, x_destination:int, y_desitination:int) -> None:
element = WebDriverWait(self._driver, self._driver_wait_time).until(EC.visibility_of_element_located(by_locator))
ActionChains(self._driver).drag_and_drop_by_offset(element, x_destination, y_desitination).perform()
@wait_until_displayed
@wait_for_page_to_load
def click_and_drag_element(self, by_locator_source:tuple, by_locator_target:tuple) -> None:
source = WebDriverWait(self._driver, self._driver_wait_time).until(EC.visibility_of_element_located(by_locator_source))
target = WebDriverWait(self._driver, self._driver_wait_time).until(EC.visibility_of_element_located(by_locator_target))
ActionChains(self._driver).drag_and_drop(source, target).perform()
@wait_until_displayed
@wait_for_page_to_load
def send_text_to_element(self, by_locator:tuple, text:str, clear_first:bool=True, press_enter:bool=False) -> None:
if clear_first:
self._driver.find_element(*by_locator).clear()
self._driver.find_element(*by_locator).send_keys(text)
if press_enter:
self._driver.find_element(*by_locator).send_keys(Keys.ENTER)
@wait_until_displayed
@wait_for_page_to_load
def hover_over_element(self, by_locator:tuple) -> None:
element = self._driver.find_element(*by_locator)
ActionChains(self._driver).move_to_element(element).perform()
|
matthew-bahloul/browser-utils | src/po_utils/common_actions/gets.py | """
by_locator : tuple --> (<selenium By object>, <selector string>)
attribute : str --> 'attribute of an html element'
"""
from po_utils.common_actions.waits import wait_for_page_to_load, wait_until_displayed, wait_until_not_displayed
@wait_until_displayed
@wait_for_page_to_load
def get_elements(self, by_locator: tuple) -> object:
return self._driver.find_elements(*by_locator)
@wait_until_displayed
@wait_for_page_to_load
def get_element(self, by_locator: tuple) -> object:
res = get_elements(self, by_locator)[0]
return res if res else []
@wait_until_displayed
@wait_for_page_to_load
def get_element_attribute(self, by_locator: tuple, attribute: str) -> object:
return self._driver.find_element(*by_locator).get_attribute(attribute)
@wait_until_displayed
@wait_for_page_to_load
def get_element_text(self, by_locator: tuple) -> object:
return self._driver.find_element(*by_locator).text
|
matthew-bahloul/browser-utils | src/po_utils/common_actions/booleans.py | """
by_locator : tuple --> (<selenium By object>, <selector string>)
attribute : str --> 'attribute of an html element'
text : str --> 'text of the element'
is_case_sensitive : bool --> boolean to toggle case sensitivity
"""
from po_utils.common_actions.waits import wait_for_page_to_load, wait_until_displayed
@wait_until_displayed
@wait_for_page_to_load
def has_attribute(self, by_locator: tuple, attribute: str) -> bool:
return self._driver.find_element(*by_locator).get_attribute(attribute) != None
@wait_until_displayed
@wait_for_page_to_load
def has_text(self, by_locator: tuple, text: str, is_case_sensitive:bool=False) -> bool:
element_text = self._driver.find_element(*by_locator).text
if is_case_sensitive:
return element_text == text
return element_text.lower() == text.lower()
@wait_for_page_to_load
def is_visible(self, by_locator: tuple) -> bool:
try:
el = self._driver.find_element(*by_locator)
return el.is_displayed()
except Exception:
return False
|
matthew-bahloul/browser-utils | src/po_utils/common_actions/waits.py | from functools import wraps
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
def wait_for_page_to_load(function):
@wraps(function)
def wrapper(self, *args, **kwargs):
try:
WebDriverWait(self.driver, self.__driver_wait_time).until(lambda _: self.driver.execute_script('return document.readyState') == 'complete')
except Exception as e:
pass
function_call = function(self, *args, **kwargs)
try:
WebDriverWait(self.driver, self.__driver_wait_time).until(lambda _: self.driver.execute_script('return document.readyState') == 'complete')
except Exception as e:
pass
return function_call
return wrapper
def wait_until_displayed(function):
@wraps(function)
def wrapper(self, *args, **kwargs):
try:
WebDriverWait(self.driver, self.__driver_wait_time).until(EC.visibility_of_element_located(args[0]))
except:
pass
return function(self, *args, **kwargs)
return wrapper
def wait_until_not_displayed(function):
@wraps(function)
def wrapper(self, *args, **kwargs):
try:
WebDriverWait(self.driver, self.__driver_wait_time).until(EC.invisibility_of_element_located(args[0]))
except:
pass
return function(self, *args, **kwargs)
return wrapper
|
matthew-bahloul/browser-utils | src/po_utils/base_objects.py | <filename>src/po_utils/base_objects.py
from po_utils.common_actions import element_interactions, gets, booleans
from re import compile, search
# helpers -----------------------------------------------
def __get_locator_by_re(locators, pattern):
pattern = compile(pattern) if type(pattern) is str else pattern
return next((loc for loc in locators.__dict__ if search(loc, pattern)), None)
# -------------------------------------------------------
class Component:
def __init__(self, driver, wait_time=5):
self._driver = driver
self._driver_wait_time = wait_time
self._driver.implicitly_wait(self._driver_wait_time)
self.locators = Locator()
# common element interactions -----------------------------------------------
click_element = element_interactions.click_element
click_and_drag_element_by_offset = element_interactions.click_and_drag_element_by_offset
click_and_drag_element = element_interactions.click_and_drag_element
hover_over_element = element_interactions.hover_over_element
send_text_to_element = element_interactions.send_text_to_element
# common gets -----------------------------------------------
get_elements = gets.get_elements
get_element = gets.get_element
get_element_attribute = gets.get_element_attribute
get_element_text = gets.get_element_text
# common booleans -----------------------------------------------
has_attribute = booleans.has_attribute
has_text = booleans.has_text
is_visible = booleans.is_visible
class Locator:
def __add__(self, other):
combination = {}
combination.update(self.__dict__)
combination.update(other.__dict__)
new = self.__class__()
new.__dict__.update(combination)
return new
def __iadd__(self, other):
combination = {}
combination.update(self.__dict__)
combination.update(other.__dict__)
new = self.__class__()
new.__dict__.update(combination)
return new
class Modal:
def __init__(self, driver, wait_time=5):
self._driver = driver
self._driver_wait_time = wait_time
self._driver.implicitly_wait(self._driver_wait_time)
self.locators = Locator()
# modal specific -----------------------------------------------
def close(self):
locator = __get_locator_by_re(self.locators, r'(close.*button|exit.*button)')
if locator:
element_interactions.click_element(locator)
else:
raise AttributeError('No locator for the close/exit button')
exit = close
def confirm(self):
locator = __get_locator_by_re(self.locators, r'(confirm.*button|okay.*button)')
if locator:
element_interactions.click_element(locator)
else:
raise AttributeError('No locator for the confirm/okay button')
okay = confirm
def decline(self):
locator = __get_locator_by_re(self.locators, r'(decline.*button|cancel.*button)')
if locator:
element_interactions.click_element(locator)
else:
raise AttributeError('No locator for the cancel/decline button')
cancel = decline
# common element interactions -----------------------------------------------
click_element = element_interactions.click_element
click_and_drag_element_by_offset = element_interactions.click_and_drag_element_by_offset
click_and_drag_element = element_interactions.click_and_drag_element
hover_over_element = element_interactions.hover_over_element
send_text_to_element = element_interactions.send_text_to_element
# common gets -----------------------------------------------
get_elements = gets.get_elements
get_element = gets.get_element
get_element_attribute = gets.get_element_attribute
get_element_text = gets.get_element_text
# common booleans -----------------------------------------------
has_attribute = booleans.has_attribute
has_text = booleans.has_text
is_visible = booleans.is_visible
class Page:
def __init__(self, driver, wait_time=5):
self._driver = driver
self._driver_wait_time = wait_time
self._driver.implicitly_wait(self._driver_wait_time)
# browser-specific controls -----------------------------------------------
def get_current_url(self):
return self._driver.current_url
def get_current_title(self):
return self._driver.title
def go_to_url(self, url):
self._driver.get(url)
def refresh(self) -> None:
self._driver.refresh()
def go_back(self) -> None:
self._driver.back()
def go_forward(self) -> None:
self._driver.forward()
def scroll_to_position_on_page(self, x_position, y_position):
self._driver.execute_script(f'window.scrollTo({x_position}, {y_position})')
def switch_to_tab(self, tab_position):
self._driver.switch_to.window(self._driver.window_handles[tab_position])
def switch_to_frame(self, frame_reference=None):
self._driver.switch_to.frame(frame_reference) if frame_reference else self._driver.switch_to.default_content()
def quit(self):
self._driver.quit()
# common element interactions -----------------------------------------------
click_element = element_interactions.click_element
click_and_drag_element_by_offset = element_interactions.click_and_drag_element_by_offset
click_and_drag_element = element_interactions.click_and_drag_element
hover_over_element = element_interactions.hover_over_element
send_text_to_element = element_interactions.send_text_to_element
# common gets -----------------------------------------------
get_elements = gets.get_elements
get_element = gets.get_element
get_element_attribute = gets.get_element_attribute
get_element_text = gets.get_element_text
# common booleans -----------------------------------------------
has_attribute = booleans.has_attribute
has_text = booleans.has_text
is_visible = booleans.is_visible
|
arcticlimer/flaskquotes | src/blueprints/user_routes.py | from flask import render_template
from flask import Blueprint
from flask import redirect
from flask import flash
from flask import abort
from flask_login import current_user
from flask_login import login_user
from utils.decorators import redirect_auth
from utils.decorators import templated
from utils.session import user_suggestions
from utils.session import validate_signup
from utils.session import validate_login
from utils.session import register_user
from utils.session import find_user
from forms.forms import RegisterForm
from forms.forms import QuoteInput
from forms.forms import LoginForm
user_routes = Blueprint(name="user_routes",
import_name=__name__,
template_folder="templates")
@user_routes.route('/', methods=["GET"])
@templated("social/index.html")
@redirect_auth()
def index():
"""Renders index page."""
@user_routes.route("/signup", methods=["GET", "POST"])
@templated("auth/sign_up.html")
@redirect_auth()
def sign_up():
"""Renders sign up page."""
form = RegisterForm()
if form.validate_on_submit():
username: str = form.username.data
usertag: str = form.usertag.data
password: str = form.password.data
if validate_signup(usertag):
user = register_user(username, usertag, password)
login_user(user)
return redirect(f"/user/{usertag}")
return dict(form=form)
@user_routes.route("/login", methods=["GET", "POST"])
@templated("auth/login.html")
@redirect_auth()
def login():
"""Renders login page."""
form = LoginForm()
if form.validate_on_submit():
username: str = form.username.data
password: str = form.password.data
remember: bool = form.remember_me.data
if validate_login(username, password):
user = find_user(username)
login_user(user, remember=remember)
return redirect(f"/user/{username}")
else:
flash("Invalid user credentials.")
return dict(form=form)
@user_routes.route("/user/<string:usertag>", methods=["GET", "POST"])
@templated("social/profile.html")
def profile(usertag: str):
"""Main application page, this route renders the users profiles."""
profile_owner = find_user(usertag)
# User was not found
if profile_owner is None:
abort(404)
quote_input = QuoteInput()
#if quote_input.validate_on_submit():
# create_quote(current_user, quote_input.content.data)
suggestions = user_suggestions(user=current_user,
prof_owner=profile_owner,
user_num=5)
# Reversed quotes for chronologic view
quotes = profile_owner.quotes
# We cannot use the reversed() iterator here since
# we need to know if the list is empty on the template
quotes.reverse()
return dict(quote_input=quote_input,
rec_users=suggestions,
user=profile_owner,
quotes=quotes)
|
arcticlimer/flaskquotes | src/utils/session.py | <filename>src/utils/session.py
from typing import Optional
from random import sample, randint
from utils.decorators import commit
from database.tables import Quote
from database.tables import User
def find_user(usersearch: str) -> Optional[User]:
"""Search for a user either by name or tag and returns it
Parameters
----------
usersearch : `str`
A username or usertag
Return
------
user : `User`
Requested user instance
Notes
-----
If the user is not found, returns None
"""
user = User.query.filter_by(usertag=usersearch).first() \
or User.query.filter_by(username=usersearch).first()
return user
def validate_signup(usertag: str) -> bool:
"""Checks if the usertag already exists before signup
Parameter
---------
usertag : `str`
Usertag
Return
------
result : `bool`
User already exists in the database
"""
user = User.query.filter_by(usertag=usertag).first()
# Returns True if the usertag is not in use
return not bool(user)
def validate_login(usertag: str, password: str) -> bool:
"""Compare input password with the actual user hashed password.
Parameters
----------
usertag : `str`
User usertag
password : `str`
Password inserted in login form
Return
------
valid : `bool`
Password matched or not
"""
user = find_user(usertag)
if user is None:
return False
return user.verify_password(password)
def user_suggestions(user: User, prof_owner: User, user_num: int) -> list:
"""Returns a mixed list of suggested users for the current user.
Parameters
----------
user : `User`
The current user using the application
prof_owner : `User`
Owner of the current profile
user_num : `int`
How many users the function will return
Notes
-----
If the total users number is lesser than `user_num`,
a list containing all the users will be returned.
"""
users = User.query.all()
try:
users = [u for u in users if not user.is_following(u)
and user != u != prof_owner]
except AttributeError:
# Current user is anonymous
users = [u for u in users if u != prof_owner]
if user_num <= len(users):
return sample(users, user_num)
# Less avaiable users than user_num required
return sample(users, len(users))
@commit()
def register_user(username: str, usertag: str, password: str) -> User:
"""Register the user on the database and returns it
Parameters
----------
username : `str`
Username
usertag : `str`
Usertag
password : `<PASSWORD>`
User password
Returns
-------
u : `User`
Registered user instance
"""
pic_num = randint(0, 1000)
user_pic = f'https://avatars.dicebear.com/api/avataaars/{pic_num}.svg'
u = User(username=username, usertag=usertag, profile_pic=user_pic)
u.create_hashed_password(password)
return u
@commit()
def create_quote(user: User, content: str) -> None:
"""Creates a quote linking it to it's author.
Parameters
----------
user : `User`
Quote author
quote_content : `str`
String containing quote content
"""
return Quote(content=content, user_id=user.id)
|
arcticlimer/flaskquotes | src/blueprints/server_routes.py | from flask import Blueprint
from flask import redirect
from flask import jsonify
from flask import request
from flask import abort
from flask_login import current_user
from flask_login import logout_user
from utils.session import create_quote
from utils.session import find_user
server_routes = Blueprint(name="server_routes",
import_name=__name__,
template_folder="templates")
@server_routes.route("/delete/<int:quote_id>", methods=["DELETE"])
def remove_quote(quote_id: int):
"""Removes the given ID quote from the database.
Notes
-----
- If the user is anonymous, he is redirected to /login;
- If the user is not the owner of the quote, a 401 status is raised;
- If the quote does not exist, a 404 status code is raised.
"""
try:
current_user.remove_quote(quote_id)
except:
abort(401)
return jsonify(
status="success"
)
@server_routes.route("/post", methods=["POST"])
def post_quote():
"""Receives a POST request and validates it content.
Notes
-----
If the quote posted by the user validate,
adds it to the database.
"""
if not current_user.is_authenticated:
abort(401)
content = request.form.get("content", '')
if 0 < len(content) < 150:
# Successful post
quote = create_quote(current_user, content)
return jsonify(
content=content,
timestamp=quote.fmt_time,
success=True,
id=quote.id
)
# Post failed validation
return jsonify(
success=False,
reason="Quote length must be lesser than 150 characters."
)
@server_routes.route("/follow/<string:usertag>")
def follow(usertag: str):
"""Follow the user given at endpoint and redirect to his profile.
Notes
-----
- If the user does not exist, a 404 status code is raised.
"""
user = find_user(usertag)
if user is None:
abort(404)
if auth := current_user.is_authenticated:
current_user.follow(user)
# This is used on clientside in order to redirect an unauthenticated
# user to the login page, any user changes into clientside javascript
# will not affect serverside behaviour.
return jsonify(
authenticated=auth
)
@server_routes.route("/unfollow/<string:usertag>")
def unfollow(usertag: str):
"""
Follows the at the given endpoint
Notes
-----
- If the user does not exist, a 404 status code is raised.
"""
user = find_user(usertag)
if user is None:
abort(404)
if auth := current_user.is_authenticated:
current_user.unfollow(user)
return jsonify(
authenticated=auth
)
@server_routes.route("/logout")
def logout():
"""Ends a flask_login user session."""
if current_user.is_authenticated:
logout_user()
return redirect('/')
|
arcticlimer/flaskquotes | src/database/tables.py | from __future__ import annotations
from datetime import datetime
from werkzeug.security import generate_password_hash
from werkzeug.security import check_password_hash
from flask_login import UserMixin
from utils.decorators import commit
from exts import login
from exts import db
@login.user_loader
def load_user(id):
return User.query.get(str(id))
# Association table
followers_table = db.Table("followers",
db.Column("follower_id", db.Integer, db.ForeignKey("user.id")),
db.Column("followed_id", db.Integer, db.ForeignKey("user.id"))
)
class User(db.Model, UserMixin):
"""User table for database
Notes
-----
One to many relationship with `Posts`.
"""
# Table fields
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(20), index=True)
usertag = db.Column(db.String(20), index=True)
profile_pic = db.Column(db.String(50))
# For security reasons, only the hashed passwords are stored.
password_hash = db.Column(db.String(128))
# Using the Quote class as a column and referencing
# the instance as "author" on the Quote object.
_quotes = db.relationship("Quote", backref="author", lazy="dynamic")
# Many-to-many relationship between users
_following = db.relationship(
"User",
secondary=followers_table,
primaryjoin=(followers_table.c.follower_id == id),
secondaryjoin=(followers_table.c.followed_id == id),
backref=db.backref("_followers", lazy="dynamic"),
lazy="dynamic"
)
def create_hashed_password(self, password: str) -> str:
"""Generates a sha256 salted hash for user the password"""
self.password_hash = generate_password_hash(password)
def verify_password(self, password: str) -> bool:
"""
Hashes the user input, compares it with the user hashed password
and returns a boolean, whether the passwords match or not.
"""
return check_password_hash(self.password_hash, password)
def is_following(self, user: User) -> bool:
return user in self._following
@commit(add=False)
def follow(self, user: User) -> None:
if not self.is_following(user):
self._following.append(user)
@commit(add=False)
def unfollow(self, user: User) -> None:
if self.is_following(user):
self._following.remove(user)
@commit(add=False)
def remove_quote(self, quote_id: int):
quote = self._quotes.filter_by(id=quote_id).first()
self._quotes.remove(quote)
@property
def quotes(self) -> list:
return self._quotes.all()
@property
def following(self) -> list:
return self._following.all()
@property
def followers(self) -> list:
return self._followers.all()
class Quote(db.Model):
"""Quote table for database"""
id = db.Column(db.Integer, primary_key=True)
content = db.Column(db.String(150))
timestamp = db.Column(db.DateTime,
index=True,
default=datetime.now)
user_id = db.Column(db.Integer,
db.ForeignKey("user.id"))
@property
def fmt_time(self):
return self.timestamp.strftime("%B %d, %A %H:%M")
|
arcticlimer/flaskquotes | src/utils/decorators.py | <reponame>arcticlimer/flaskquotes
import functools
from flask_login import current_user
from flask import render_template
from flask import redirect
from exts import db
def redirect_auth(endpoint=None):
"""Redirects the user to the given endpoint if authenticated"""
def decorator(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
# Redirect to user profile if any other route is provided.
if current_user.is_authenticated and endpoint is None:
return redirect(f"/user/{current_user.usertag}")
else:
return f(*args, **kwargs)
return wrapper
return decorator
def templated(template: str):
"""Render a template"""
def decorator(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
# The function must return a dictionary that
# will be used to populate the Jinja2 template.
ctx = f(*args, **kwargs)
if ctx is None:
ctx = {}
# Returns ctx object if the route doesn't return a dict
elif not isinstance(ctx, dict):
return ctx
return render_template(template, **ctx)
return wrapper
return decorator
def commit(add=True):
"""Commits the database after running the function.
Parameter
---------
add : `bool`
If True, adds the function return
to the database before commiting.
"""
def decorator(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
result = f(*args, **kwargs)
if add:
db.session.add(result)
db.session.commit()
return result
return wrapper
return decorator
|
SpadeLiu/Lac-GwcNet | test_sceneflow.py | import argparse
import torch
import torch.nn as nn
from torchvision import transforms
from tqdm import tqdm, trange
import os
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
import skimage.io
import cv2
from dataloader import sceneflow_loader as sf
from dataloader import readpfm as rp
from networks.stackhourglass import PSMNet
import loss_functions as lf
parser = argparse.ArgumentParser(description='LaC')
parser.add_argument('--no_cuda', action='store_true', default=False)
parser.add_argument('--gpu_id', type=str, default='2')
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--data_path', type=str, default='/media/data/dataset/SceneFlow/')
parser.add_argument('--load_path', type=str, default='state_dicts/SceneFlow.pth')
parser.add_argument('--max_disp', type=int, default=192)
parser.add_argument('--lsp_width', type=int, default=3)
parser.add_argument('--lsp_height', type=int, default=3)
parser.add_argument('--lsp_dilation', type=list, default=[1, 2, 4, 8])
parser.add_argument('--lsp_mode', type=str, default='separate')
parser.add_argument('--lsp_channel', type=int, default=4)
parser.add_argument('--no_udc', action='store_true', default=False)
parser.add_argument('--refine', type=str, default='csr')
args = parser.parse_args()
if not args.no_cuda:
os.environ['CUDA_DEVICE_ORDER'] = "PCI_BUS_ID"
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id
cuda = torch.cuda.is_available()
torch.manual_seed(args.seed)
if cuda:
torch.cuda.manual_seed(args.seed)
all_limg, all_rimg, all_ldisp, test_limg, test_rimg, test_ldisp = sf.sf_loader(args.data_path)
affinity_settings = {}
affinity_settings['win_w'] = args.lsp_width
affinity_settings['win_h'] = args.lsp_width
affinity_settings['dilation'] = args.lsp_dilation
udc = not args.no_udc
model = PSMNet(maxdisp=args.max_disp, struct_fea_c=args.lsp_channel, fuse_mode=args.lsp_mode,
affinity_settings=affinity_settings, udc=udc, refine=args.refine)
model = nn.DataParallel(model)
print('Number of model parameters: {}'.format(sum([p.data.nelement() for p in model.parameters()])))
if cuda:
model.cuda()
model.eval()
ckpt = torch.load(args.load_path)
model.load_state_dict(ckpt)
mae = 0
op = 0
for i in trange(len(test_limg)):
limg_path = test_limg[i]
rimg_path = test_rimg[i]
limg = Image.open(limg_path).convert('RGB')
rimg = Image.open(rimg_path).convert('RGB')
# rimg = lf.random_noise(rimg, type='illumination')
# rimg = lf.random_noise(rimg, type='color')
# rimg = lf.random_noise(rimg, type='haze')
#
w, h = limg.size
limg = limg.crop((w - 960, h - 544, w, h))
rimg = rimg.crop((w - 960, h - 544, w, h))
limg_tensor = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])(limg)
rimg_tensor = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])(rimg)
limg_tensor = limg_tensor.unsqueeze(0).cuda()
rimg_tensor = rimg_tensor.unsqueeze(0).cuda()
disp_gt, _ = rp.readPFM(test_ldisp[i])
disp_gt = np.ascontiguousarray(disp_gt, dtype=np.float32)
gt_tensor = torch.FloatTensor(disp_gt).unsqueeze(0).unsqueeze(0).cuda()
with torch.no_grad():
pred_disps = model(limg_tensor, rimg_tensor, gt_tensor)
pred_disps = pred_disps[:, 4:, :]
predict_np = pred_disps.squeeze().cpu().numpy()
mask = (disp_gt < args.max_disp) & (disp_gt > 0)
if len(disp_gt[mask]) == 0:
continue
op_thresh = 1
error = np.abs(predict_np * mask.astype(np.float32) - disp_gt * mask.astype(np.float32))
op += np.sum(error > op_thresh) / np.sum(mask)
mae += np.mean(error[mask])
print(mae / len(test_limg))
print(op / len(test_limg))
|
SpadeLiu/Lac-GwcNet | networks/stackhourglass.py | <reponame>SpadeLiu/Lac-GwcNet
from __future__ import print_function
import torch
import torch.nn as nn
import torch.utils.data
from torch.autograd import Variable
import torch.nn.functional as F
import math
from networks.submodule import convbn_3d, feature_extraction, DisparityRegression
from networks.deformable_refine import DeformableRefine, DeformableRefineF
import loss_functions as lf
import matplotlib.pyplot as plt
from networks.refinement import StereoDRNetRefinement
class hourglass(nn.Module):
def __init__(self, inplanes):
super(hourglass, self).__init__()
self.conv1 = nn.Sequential(convbn_3d(inplanes, inplanes*2, kernel_size=3, stride=2, pad=1),
nn.ReLU(inplace=True))
self.conv2 = convbn_3d(inplanes*2, inplanes*2, kernel_size=3, stride=1, pad=1)
self.conv3 = nn.Sequential(convbn_3d(inplanes*2, inplanes*2, kernel_size=3, stride=2, pad=1),
nn.ReLU(inplace=True))
self.conv4 = nn.Sequential(convbn_3d(inplanes*2, inplanes*2, kernel_size=3, stride=1, pad=1),
nn.ReLU(inplace=True))
self.conv5 = nn.Sequential(nn.ConvTranspose3d(inplanes*2, inplanes*2, kernel_size=3, padding=1, output_padding=1, stride=2,bias=False),
nn.BatchNorm3d(inplanes*2)) #+conv2
self.conv6 = nn.Sequential(nn.ConvTranspose3d(inplanes*2, inplanes, kernel_size=3, padding=1, output_padding=1, stride=2,bias=False),
nn.BatchNorm3d(inplanes)) #+x
def forward(self, x ,presqu, postsqu):
out = self.conv1(x) #in:1/4 out:1/8
pre = self.conv2(out) #in:1/8 out:1/8
if postsqu is not None:
pre = F.relu(pre + postsqu, inplace=True)
else:
pre = F.relu(pre, inplace=True)
# print('pre2', pre.size())
out = self.conv3(pre) #in:1/8 out:1/16
out = self.conv4(out) #in:1/16 out:1/16
# print('out', out.size())
if presqu is not None:
post = F.relu(self.conv5(out)+presqu, inplace=True) #in:1/16 out:1/8
else:
post = F.relu(self.conv5(out)+pre, inplace=True)
out = self.conv6(post) #in:1/8 out:1/4
return out, pre, post
class hourglass_gwcnet(nn.Module):
def __init__(self, inplanes):
super(hourglass_gwcnet, self).__init__()
self.conv1 = nn.Sequential(convbn_3d(inplanes, inplanes * 2, kernel_size=3, stride=2, pad=1),
nn.ReLU(inplace=True))
self.conv2 = nn.Sequential(convbn_3d(inplanes * 2, inplanes * 2, kernel_size=3, stride=1, pad=1),
nn.ReLU(inplace=True))
self.conv3 = nn.Sequential(convbn_3d(inplanes * 2, inplanes * 4, kernel_size=3, stride=2, pad=1),
nn.ReLU(inplace=True))
self.conv4 = nn.Sequential(convbn_3d(inplanes * 4, inplanes * 4, 3, 1, 1),
nn.ReLU(inplace=True))
self.conv5 = nn.Sequential(nn.ConvTranspose3d(inplanes * 4, inplanes * 2, kernel_size=3, padding=1,
output_padding=1, stride=2, bias=False),
nn.BatchNorm3d(inplanes * 2))
self.conv6 = nn.Sequential(nn.ConvTranspose3d(inplanes * 2, inplanes, kernel_size=3, padding=1,
output_padding=1, stride=2, bias=False),
nn.BatchNorm3d(inplanes))
self.redir1 = convbn_3d(inplanes, inplanes, kernel_size=1, stride=1, pad=0)
self.redir2 = convbn_3d(inplanes * 2, inplanes * 2, kernel_size=1, stride=1, pad=0)
def forward(self, x):
conv1 = self.conv1(x)
conv2 = self.conv2(conv1)
conv3 = self.conv3(conv2)
conv4 = self.conv4(conv3)
conv5 = F.relu(self.conv5(conv4) + self.redir2(conv2), inplace=True)
conv6 = F.relu(self.conv6(conv5) + self.redir1(x), inplace=True)
return conv6
class PSMNet(nn.Module):
def __init__(self, maxdisp, struct_fea_c, fuse_mode, affinity_settings, udc, refine):
super(PSMNet, self).__init__()
self.maxdisp = maxdisp
self.sfc = struct_fea_c
self.affinity_settings = affinity_settings
self.udc = udc
self.refine = refine
self.feature_extraction = feature_extraction(self.sfc, fuse_mode, affinity_settings)
self.dres0 = nn.Sequential(convbn_3d(64, 32, 3, 1, 1),
nn.ReLU(inplace=True),
convbn_3d(32, 32, 3, 1, 1),
nn.ReLU(inplace=True))
self.dres1 = nn.Sequential(convbn_3d(32, 32, 3, 1, 1),
nn.ReLU(inplace=True),
convbn_3d(32, 32, 3, 1, 1))
self.dres2 = hourglass_gwcnet(32)
self.dres3 = hourglass_gwcnet(32)
self.dres4 = hourglass_gwcnet(32)
self.classif1 = nn.Sequential(convbn_3d(32, 32, 3, 1, 1),
nn.ReLU(inplace=True),
nn.Conv3d(32, 1, kernel_size=3, padding=1, stride=1,bias=False))
self.classif2 = nn.Sequential(convbn_3d(32, 32, 3, 1, 1),
nn.ReLU(inplace=True),
nn.Conv3d(32, 1, kernel_size=3, padding=1, stride=1,bias=False))
self.classif3 = nn.Sequential(convbn_3d(32, 32, 3, 1, 1),
nn.ReLU(inplace=True),
nn.Conv3d(32, 1, kernel_size=3, padding=1, stride=1,bias=False))
if refine == 'csr':
self.refine_module = DeformableRefineF(feature_c=64, node_n=2, modulation=True, cost=True)
else:
self.refine_module = DeformableRefineF(feature_c=64, node_n=2, modulation=True, cost=False)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.Conv3d):
n = m.kernel_size[0] * m.kernel_size[1]*m.kernel_size[2] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm3d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def forward(self, left, right, gt_left):
refimg_fea = self.feature_extraction(left)
targetimg_fea = self.feature_extraction(right)
#matching
cost = torch.zeros(refimg_fea.size()[0], refimg_fea.size()[1]*2, self.maxdisp//4,
refimg_fea.size()[2], refimg_fea.size()[3]).cuda()
for i in range(self.maxdisp//4):
if i > 0:
cost[:, :refimg_fea.size()[1], i, :, i:] = refimg_fea[:, :, :, i:]
cost[:, refimg_fea.size()[1]:, i, :, i:] = targetimg_fea[:, :, :, :-i]
else:
cost[:, :refimg_fea.size()[1], i, :, :] = refimg_fea
cost[:, refimg_fea.size()[1]:, i, :, :] = targetimg_fea
cost = cost.contiguous()
cost0 = self.dres0(cost)
cost0 = self.dres1(cost0) + cost0
out1 = self.dres2(cost0)
out2 = self.dres3(out1)
out3 = self.dres4(out2)
if self.udc:
win_s = 5
else:
win_s = 0
if self.training:
cost1 = self.classif1(out1)
cost2 = self.classif2(out2)
cost1 = F.interpolate(cost1, [self.maxdisp, left.size()[2], left.size()[3]], mode='trilinear',
align_corners=True)
cost2 = F.interpolate(cost2, [self.maxdisp, left.size()[2], left.size()[3]], mode='trilinear',
align_corners=True)
cost1 = torch.squeeze(cost1, 1)
distribute1 = F.softmax(cost1, dim=1)
pred1 = DisparityRegression(self.maxdisp, win_size=win_s)(distribute1)
cost2 = torch.squeeze(cost2, 1)
distribute2 = F.softmax(cost2, dim=1)
pred2 = DisparityRegression(self.maxdisp, win_size=win_s)(distribute2)
cost3 = self.classif3(out3)
cost3 = F.interpolate(cost3, [self.maxdisp, left.size()[2], left.size()[3]], mode='trilinear', align_corners=True)
cost3 = torch.squeeze(cost3, 1)
distribute3 = F.softmax(cost3, dim=1)
pred3 = DisparityRegression(self.maxdisp, win_size=win_s)(distribute3)
if self.refine == 'csr':
costr, offset, m = self.refine_module(left, cost3.squeeze(1))
distributer = F.softmax(costr, dim=1)
predr = DisparityRegression(self.maxdisp, win_size=win_s)(distributer)
else:
predr = self.refine_module(left, pred3.unsqueeze(1))
predr = predr.squeeze(1)
if self.training:
mask = (gt_left < self.maxdisp) & (gt_left > 0)
loss1 = 0.5 * F.smooth_l1_loss(pred1[mask], gt_left[mask]) + \
0.7 * F.smooth_l1_loss(pred2[mask], gt_left[mask]) + \
F.smooth_l1_loss(pred3[mask], gt_left[mask])
if self.udc:
gt_distribute = lf.disp2distribute(gt_left, self.maxdisp, b=2)
loss2 = 0.5 * lf.CEloss(gt_left, self.maxdisp, gt_distribute, distribute1) + \
0.7 * lf.CEloss(gt_left, self.maxdisp, gt_distribute, distribute2) + \
lf.CEloss(gt_left, self.maxdisp, gt_distribute, distribute3)
if self.refine:
loss1 += F.smooth_l1_loss(predr[mask], gt_left[mask])
loss2 += lf.CEloss(gt_left, self.maxdisp, gt_distribute, distributer)
else:
loss2 = loss1
return loss1, loss2
else:
if self.refine:
return predr
else:
return pred3
|
SpadeLiu/Lac-GwcNet | loss_functions.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import cv2
from PIL import Image
import matplotlib.pyplot as plt
def disp2distribute(disp_gt, max_disp, b=2):
disp_gt = disp_gt.unsqueeze(1)
disp_range = torch.arange(0, max_disp).view(1, -1, 1, 1).float().cuda()
gt_distribute = torch.exp(-torch.abs(disp_range - disp_gt) / b)
gt_distribute = gt_distribute / (torch.sum(gt_distribute, dim=1, keepdim=True) + 1e-8)
return gt_distribute
def CEloss(disp_gt, max_disp, gt_distribute, pred_distribute):
mask = (disp_gt > 0) & (disp_gt < max_disp)
pred_distribute = torch.log(pred_distribute + 1e-8)
ce_loss = torch.sum(-gt_distribute * pred_distribute, dim=1)
ce_loss = torch.mean(ce_loss[mask])
return ce_loss
class DispAffinity(nn.Module):
def __init__(self, win_w, win_h, dilation, max_disp):
super(DispAffinity, self).__init__()
self.win_w = win_w
self.win_h = win_h
self.dilation = dilation
self.max_disp = max_disp
def forward(self, disp):
B, _, H, W = disp.size()
disp_mask = (disp > 0) & (disp < self.max_disp)
affinity = []
valid_mask = []
shift = []
for d in self.dilation:
pad_t = (self.win_w // 2 * d, self.win_w // 2 * d, self.win_h // 2 * d, self.win_h // 2 * d)
pad_disp = F.pad(disp, pad_t, mode='constant')
for i in range(self.win_w):
for j in range(self.win_h):
if (i == self.win_w // 2) & (j == self.win_h // 2):
continue
if ((j-self.win_h//2)*d, (i-self.win_w//2)*d) in shift:
continue
else:
rel_dif = torch.abs(pad_disp[:, :, d * j: d * j + H, d * i: d * i + W] - disp)
# whether the neighbor is valid
pad_mask = (pad_disp[:, :, d*j: d*j+H, d*i: d*i+W] > 0) & \
(pad_disp[:, :, d*j: d*j+H, d*i: d*i+W] < self.max_disp)
# both are valid, the disparity distance is valid
mask = disp_mask & pad_mask
rel_dif = rel_dif * mask.float()
affinity.append(rel_dif)
valid_mask.append(mask)
shift.append(((j-self.win_h//2)*d, (i-self.win_w//2)*d))
affinity = torch.stack(affinity, dim=1)
valid_mask = torch.stack(valid_mask, dim=1)
return affinity, valid_mask
def random_noise(img, type):
if type == 'illumination':
yuv_img = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2YUV)
# yuv_rimg[:, :, 0] = yuv_rimg[:, :, 0] - 20
illu_mask = yuv_img[:, :, 0] > 50
yuv_img[:, :, 0][illu_mask] = yuv_img[:, :, 0][illu_mask] - 50
img = Image.fromarray(cv2.cvtColor(yuv_img, cv2.COLOR_YUV2RGB))
elif type == 'color':
rgb_img = np.array(img)
color_mask = rgb_img[:, :, 2] > 50
rgb_img[:, :, 2][color_mask] = rgb_img[:, :, 2][color_mask] - 50
color_mask = rgb_img[:, :, 0] < 195
rgb_img[:, :, 0][color_mask] = rgb_img[:, :, 0][color_mask] + 50
img = Image.fromarray(rgb_img)
elif type == 'noise':
rgb_img = np.array(img)
shape = rgb_img.shape
noise = np.random.randint(-20, 20, size=shape).astype('uint8')
rgb_img = rgb_img + noise
rgb_img[rgb_img > 255] = 255
rgb_img[rgb_img < 0] = 0
img = Image.fromarray(rgb_img)
elif type == 'haze':
rgb_img = np.array(img)
A = np.random.uniform(0.6, 0.95) * 255
t = np.random.uniform(0.3, 0.95) * 255
img = rgb_img * t + A * (1 - t)
img = Image.fromarray(img.astype('uint8'))
return img
def gradient_x(img):
img = F.pad(img, [0, 1, 0, 0], mode='replicate')
gx = img[:, :, :, :-1] - img[:, :, :, 1:]
return gx
def gradient_y(img):
img = F.pad(img, [0, 0, 0, 1], mode='replicate')
gy = img[:, :, :-1, :] - img[:, :, 1:, :]
return gy
def smooth_loss(img, disp):
img_gx = gradient_x(img)
img_gy = gradient_y(img)
disp_gx = gradient_x(gradient_x(disp))
disp_gy = gradient_y(gradient_y(disp))
weight_x = torch.exp(-torch.mean(torch.abs(img_gx), dim=1, keepdim=True))
weight_y = torch.exp(-torch.mean(torch.abs(img_gy), dim=1, keepdim=True))
smoothness_x = torch.abs(disp_gx * weight_x)
smoothness_y = torch.abs(disp_gy * weight_y)
loss = smoothness_x + smoothness_y
return torch.mean(loss)
|
SpadeLiu/Lac-GwcNet | KITTI_ft.py | <gh_stars>10-100
import argparse
import torch
import torch.utils.data as data
import torch.optim as optim
import torch.nn.functional as F
import torch.nn as nn
import numpy as np
import os
from tqdm import tqdm
from collections import OrderedDict
from dataloader import KITTIloader as kt
from dataloader import KITTI2012loader as kt2012
from networks.stackhourglass import PSMNet
import loss_functions as lf
parser = argparse.ArgumentParser(description='LaC')
parser.add_argument('--no_cuda', action='store_true', default=False)
parser.add_argument('--gpu_id', type=str, default='0, 1')
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--batch_size', type=int, default=4)
parser.add_argument('--epoch', type=int, default=300)
parser.add_argument('--data_path', type=str, default='/media/data/dataset/KITTI/data_scene_flow/training/')
parser.add_argument('--KITTI', type=str, default='2015')
parser.add_argument('--load_path', type=str, default='state_dicts/SceneFlow.pth')
parser.add_argument('--save_path', type=str, default='finetuned_KITTI/')
parser.add_argument('--max_disp', type=int, default=192)
parser.add_argument('--lsp_width', type=int, default=3)
parser.add_argument('--lsp_height', type=int, default=3)
parser.add_argument('--lsp_dilation', type=list, default=[1, 2, 4, 8])
parser.add_argument('--lsp_mode', type=str, default='separate')
parser.add_argument('--lsp_channel', type=int, default=4)
parser.add_argument('--no_udc', action='store_true', default=False)
parser.add_argument('--refine', type=str, default='csr')
args = parser.parse_args()
if not args.no_cuda:
os.environ['CUDA_DEVICE_ORDER'] = "PCI_BUS_ID"
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id
cuda = torch.cuda.is_available()
torch.manual_seed(args.seed)
if cuda:
torch.cuda.manual_seed(args.seed)
if args.KITTI == '2015':
all_limg, all_rimg, all_ldisp, test_limg, test_rimg, test_ldisp = kt.kt_loader(args.data_path)
else:
all_limg, all_rimg, all_ldisp, test_limg, test_rimg, test_ldisp = kt.kt2012_loader(args.data_path)
trainLoader = torch.utils.data.DataLoader(
kt.myDataset(all_limg, all_rimg, all_ldisp, training=True),
batch_size=args.batch_size, shuffle=True, num_workers=4, drop_last=False)
testLoader = torch.utils.data.DataLoader(
kt.myDataset(test_limg, test_rimg, test_ldisp, training=False),
batch_size=1, shuffle=False, num_workers=2, drop_last=False)
affinity_settings = {}
affinity_settings['win_w'] = args.lsp_width
affinity_settings['win_h'] = args.lsp_width
affinity_settings['dilation'] = args.lsp_dilation
udc = not args.no_udc
model = PSMNet(maxdisp=args.max_disp, struct_fea_c=args.lsp_channel, fuse_mode=args.lsp_mode,
affinity_settings=affinity_settings, udc=udc, refine=args.refine)
model = nn.DataParallel(model)
print('Number of model parameters: {}'.format(sum([p.data.nelement() for p in model.parameters()])))
if cuda:
model.cuda()
checkpoint = torch.load(args.load_path)
model.load_state_dict(checkpoint)
optimizer = optim.Adam(model.parameters(), lr=0.1, betas=(0.9, 0.999))
def train(imgL, imgR, disp_true):
model.train()
imgL = torch.FloatTensor(imgL)
imgR = torch.FloatTensor(imgR)
disp_true = torch.FloatTensor(disp_true)
if cuda:
imgL, imgR, disp_true = imgL.cuda(), imgR.cuda(), disp_true.cuda()
optimizer.zero_grad()
loss1, loss2 = model(imgL, imgR, disp_true)
loss1 = torch.mean(loss1)
loss2 = torch.mean(loss2)
if udc:
loss = 0.1 * loss1 + loss2
else:
loss = loss1
loss.backward()
optimizer.step()
return loss.item()
def test(imgL, imgR, disp_true):
model.eval()
imgL = torch.FloatTensor(imgL)
imgR = torch.FloatTensor(imgR)
if args.cuda:
imgL, imgR, disp_true = imgL.cuda(), imgR.cuda(), disp_true.cuda()
with torch.no_grad():
pred_disp = model(imgL, imgR)
final_disp = pred_disp.cpu()
true_disp = disp_true
index = np.argwhere(true_disp > 0)
disp_true[index[0], index[1], index[2]] = np.abs(
true_disp[index[0], index[1], index[2]] - final_disp[index[0], index[1], index[2]])
correct = (disp_true[index[0], index[1], index[2]] < 3) | \
(disp_true[index[0], index[1], index[2]] < true_disp[index[0], index[1], index[2]]*0.05)
torch.cuda.empty_cache()
return 1-(float(torch.sum(correct)) / float(len(index[0])))
def adjust_learning_rate(optimizer, epoch):
if epoch <= 200:
lr = 0.001
else:
lr = 0.0001
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def main():
start_epoch = 1
for epoch in range(start_epoch, args.epoch + start_epoch):
print('This is %d-th epoch' % epoch)
total_train_loss = 0
total_test_loss = 0
adjust_learning_rate(optimizer, epoch)
for batch_id, (imgL, imgR, disp_L) in enumerate(tqdm(trainLoader)):
train_loss = train(imgL, imgR, disp_L)
total_train_loss += train_loss
avg_train_loss = total_train_loss / len(trainLoader)
print('Epoch %d average training loss = %.3f' % (epoch, avg_train_loss))
for batch_id, (imgL, imgR, disp_L) in enumerate(tqdm(testLoader)):
test_loss = test(imgL, imgR, disp_L)
total_test_loss += test_loss
avg_test_loss = total_test_loss / len(testLoader)
print('Epoch %d total test loss = %.3f' % (epoch, avg_test_loss))
if epoch % 50 == 0:
state = {'net': model.state_dict(),
'optimizer': optimizer.state_dict(),
'epoch': epoch}
if not os.path.exists(args.save_path):
os.mkdir(args.save_path)
save_model_path = args.save_path + 'checkpoint_{}.tar'.format(epoch)
torch.save(state, save_model_path)
torch.cuda.empty_cache()
print('Training Finished!')
if __name__ == '__main__':
main()
|
SpadeLiu/Lac-GwcNet | networks/deformable_refine.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from networks.U_net import U_Net, U_Net_F, U_Net_F_v2
class OffsetConv(nn.Module):
def __init__(self, inc, node_num, modulation):
super(OffsetConv, self).__init__()
self.modulation = modulation
self.p_conv = nn.Conv2d(inc, 2*node_num, kernel_size=1, padding=0, stride=1)
nn.init.constant_(self.p_conv.weight, 0)
self.p_conv.register_backward_hook(self._set_lr)
if modulation:
self.m_conv = nn.Conv2d(inc, node_num, kernel_size=1, padding=0, stride=1)
nn.init.constant_(self.m_conv.weight, 0)
self.m_conv.register_backward_hook(self._set_lr)
self.lr_ratio = 1e-2
def _set_lr(self, module, grad_input, grad_output):
# print('grad input:', grad_input)
new_grad_input = []
for i in range(len(grad_input)):
if grad_input[i] is not None:
new_grad_input.append(grad_input[i] * self.lr_ratio)
else:
new_grad_input.append(grad_input[i])
new_grad_input = tuple(new_grad_input)
# print('new grad input:', new_grad_input)
return new_grad_input
def forward(self, x):
offset = self.p_conv(x)
B, N, H, W = offset.size()
if self.modulation:
m = torch.sigmoid(self.m_conv(x))
else:
m = torch.ones(B, N//2, H, W).cuda()
return offset, m
class GetValueV2(nn.Module):
def __init__(self, stride):
"""
Args:
modulation (bool, optional): If True, Modulated Defomable Convolution (Deformable ConvNets v2).
"""
super(GetValueV2, self).__init__()
self.stride = stride
def forward(self, x, offset):
b, _, h, w = x.size()
dtype = offset.data.type()
N = offset.size(1) // 2
# (b, 2N, h, w)
p = self._get_p(offset, dtype)
# (b, h, w, 2N)
p = p.contiguous().permute(0, 2, 3, 1)
# clip p
p_y = torch.clamp(p[..., :N], 0, h-1) / (h-1) * 2 - 1
p_x = torch.clamp(p[..., N:], 0, w-1) / (w-1) * 2 - 1
x_offset = []
for i in range(N):
get_x = F.grid_sample(x, torch.stack((p_x[:, :, :, i], p_y[:, :, :, i]), dim=3), mode='bilinear')
x_offset.append(get_x)
x_offset = torch.stack(x_offset, dim=4)
return x_offset
def _get_p_n(self, N, dtype):
p_n_x, p_n_y = torch.meshgrid(
torch.arange(-(self.kernel_size-1)//2, (self.kernel_size-1)//2+1),
torch.arange(-(self.kernel_size-1)//2, (self.kernel_size-1)//2+1))
# (2N, 1)
p_n = torch.cat([torch.flatten(p_n_x), torch.flatten(p_n_y)], 0)
p_n = p_n.view(1, 2*N, 1, 1).type(dtype)
return p_n
def _get_p_0(self, h, w, N, dtype):
p_0_x, p_0_y = torch.meshgrid(
torch.arange(1, h*self.stride+1, self.stride),
torch.arange(1, w*self.stride+1, self.stride))
p_0_x = torch.flatten(p_0_x).view(1, 1, h, w).repeat(1, N, 1, 1)
p_0_y = torch.flatten(p_0_y).view(1, 1, h, w).repeat(1, N, 1, 1)
p_0 = torch.cat([p_0_x, p_0_y], 1).type(dtype)
return p_0
def _get_p(self, offset, dtype):
N, h, w = offset.size(1)//2, offset.size(2), offset.size(3)
# (1, 2N, h, w)
p_0 = self._get_p_0(h, w, N, dtype)
p = p_0 + offset
return p
def _get_x_q(self, x, q, N):
b, h, w, _ = q.size()
padded_w = x.size(3)
c = x.size(1)
# (b, c, h*w)
x = x.contiguous().view(b, c, -1)
# (b, h, w, N)
index = q[..., :N]*padded_w + q[..., N:] # offset_x*w + offset_y
# (b, c, h*w*N)
index = index.contiguous().unsqueeze(dim=1).expand(-1, c, -1, -1, -1).contiguous().view(b, c, -1)
x_offset = x.gather(dim=-1, index=index).contiguous().view(b, c, h, w, N)
return x_offset
@staticmethod
def _reshape_x_offset(x_offset, ks):
b, c, h, w, N = x_offset.size()
x_offset = torch.cat([x_offset[..., s:s+ks].contiguous().view(b, c, h, w*ks) for s in range(0, N, ks)], dim=-1)
x_offset = x_offset.contiguous().view(b, c, h*ks, w*ks)
return x_offset
class DeformableRefine(nn.Module):
def __init__(self, feature_c, node_n, modulation, cost=False):
super(DeformableRefine, self).__init__()
self.refine_cost = cost
self.feature_net = U_Net(img_ch=3, output_ch=feature_c)
# self.feature_net = U_Net_v2(img_ch=3, output_ch=feature_c)
#
self.offset_conv = OffsetConv(inc=feature_c, node_num=node_n, modulation=modulation)
self.get_value = GetValueV2(stride=1)
def forward(self, img, depth):
if not self.refine_cost:
depth = depth.unsqueeze(1)
feature = self.feature_net(img)
offset, m = self.offset_conv(feature)
# B, 1, H, W, N or B, D, H, W, N
depth_offset = self.get_value(depth, offset)
m = m.unsqueeze(4).transpose(1, 4)
interpolated_depth = torch.sum(m * depth_offset, dim=4) / (torch.sum(m, dim=4) + 1e-8)
return interpolated_depth, offset
class DeformableRefineF(nn.Module):
def __init__(self, feature_c, node_n, modulation, cost=False):
super(DeformableRefineF, self).__init__()
self.refine_cost = cost
# self.feature_net = U_Net_F(img_ch=3, output_ch=feature_c)
self.feature_net = U_Net_F_v2(img_ch=3, output_ch=feature_c)
#
self.offset_conv = OffsetConv(inc=feature_c, node_num=node_n, modulation=modulation)
self.get_value = GetValueV2(stride=1)
def forward(self, img, depth):
if not self.refine_cost:
depth = depth.unsqueeze(1)
feature = self.feature_net(img)
offset, m = self.offset_conv(feature)
# B, 1, H, W, N or B, D, H, W, N
depth_offset = self.get_value(depth, offset)
m = m.unsqueeze(4).transpose(1, 4)
interpolated_depth = torch.sum(m * depth_offset, dim=4) / (torch.sum(m, dim=4) + 1e-8)
return interpolated_depth, offset, m
|
SpadeLiu/Lac-GwcNet | dataloader/sceneflow_loader.py | import os
from PIL import Image
from dataloader import readpfm as rp
import torch.utils.data as data
import torchvision.transforms as transforms
import numpy as np
import random
IMG_EXTENSIONS= [
'.jpg', '.JPG', '.jpeg', '.JPEG',
'.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP'
]
def is_image_file(filename):
return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
def sf_loader(filepath):
classes = [d for d in os.listdir(filepath) if os.path.isdir(os.path.join(filepath, d))]
image = [img for img in classes if img.find('frames_cleanpass') > -1]
disparity = [disp for disp in classes if disp.find('disparity') > -1]
all_left_img = []
all_right_img = []
all_left_disp = []
test_left_img = []
test_right_img = []
test_left_disp = []
monkaa_img = filepath + [x for x in image if 'monkaa' in x][0]
monkaa_disp = filepath + [x for x in disparity if 'monkaa' in x][0]
monkaa_dir = os.listdir(monkaa_img)
for dd in monkaa_dir:
left_path = monkaa_img + '/' + dd + '/left/'
right_path = monkaa_img + '/' + dd + '/right/'
disp_path = monkaa_disp + '/' + dd + '/left/'
left_imgs = os.listdir(left_path)
for img in left_imgs:
img_path = os.path.join(left_path, img)
if is_image_file(img_path):
all_left_img.append(img_path)
all_right_img.append(os.path.join(right_path, img))
all_left_disp.append(disp_path + img.split(".")[0] + '.pfm')
flying_img = filepath + [x for x in image if 'flying' in x][0]
flying_disp = filepath + [x for x in disparity if 'flying' in x][0]
fimg_train = flying_img + '/TRAIN/'
fimg_test = flying_img + '/TEST/'
fdisp_train = flying_disp + '/TRAIN/'
fdisp_test = flying_disp + '/TEST/'
fsubdir = ['A', 'B', 'C']
for dd in fsubdir:
imgs_path = fimg_train + dd + '/'
disps_path = fdisp_train + dd + '/'
imgs = os.listdir(imgs_path)
for cc in imgs:
left_path = imgs_path + cc + '/left/'
right_path = imgs_path + cc + '/right/'
disp_path = disps_path + cc + '/left/'
left_imgs = os.listdir(left_path)
for img in left_imgs:
img_path = os.path.join(left_path, img)
if is_image_file(img_path):
all_left_img.append(img_path)
all_right_img.append(os.path.join(right_path, img))
all_left_disp.append(disp_path + img.split(".")[0] + '.pfm')
for dd in fsubdir:
imgs_path = fimg_test + dd + '/'
disps_path = fdisp_test + dd + '/'
imgs = os.listdir(imgs_path)
for cc in imgs:
left_path = imgs_path + cc + '/left/'
right_path = imgs_path + cc + '/right/'
disp_path = disps_path + cc + '/left/'
left_imgs = os.listdir(left_path)
for img in left_imgs:
img_path = os.path.join(left_path, img)
if is_image_file(img_path):
test_left_img.append(img_path)
test_right_img.append(os.path.join(right_path, img))
test_left_disp.append(disp_path + img.split(".")[0] + '.pfm')
driving_img = filepath + [x for x in image if 'driving' in x][0]
driving_disp = filepath + [x for x in disparity if 'driving' in x][0]
dsubdir1 = ['15mm_focallength', '35mm_focallength']
dsubdir2 = ['scene_backwards', 'scene_forwards']
dsubdir3 = ['fast', 'slow']
for d in dsubdir1:
img_path1 = driving_img + '/' + d + '/'
disp_path1 = driving_disp + '/' + d + '/'
for dd in dsubdir2:
img_path2 = img_path1 + dd + '/'
disp_path2 = disp_path1 + dd + '/'
for ddd in dsubdir3:
img_path3 = img_path2 + ddd + '/'
disp_path3 = disp_path2 + ddd + '/'
left_path = img_path3 + 'left/'
right_path = img_path3 + 'right/'
disp_path = disp_path3 + 'left/'
left_imgs = os.listdir(left_path)
for img in left_imgs:
img_path = os.path.join(left_path, img)
if is_image_file(img_path):
all_left_img.append(img_path)
all_right_img.append(os.path.join(right_path, img))
all_left_disp.append(disp_path + img.split(".")[0] + '.pfm')
return all_left_img, all_right_img, all_left_disp, test_left_img, test_right_img, test_left_disp
def img_loader(path):
return Image.open(path).convert('RGB')
def disparity_loader(path):
return rp.readPFM(path)
class myDataset(data.Dataset):
def __init__(self, left, right, left_disp, training, imgloader=img_loader, dploader = disparity_loader):
self.left = left
self.right = right
self.disp_L = left_disp
self.imgloader = imgloader
self.dploader = dploader
self.training = training
self.img_transorm = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])
def __getitem__(self, index):
left = self.left[index]
right = self.right[index]
disp_L = self.disp_L[index]
left_img = self.imgloader(left)
right_img = self.imgloader(right)
dataL, scaleL = self.dploader(disp_L)
dataL = np.ascontiguousarray(dataL, dtype=np.float32)
if self.training:
w, h = left_img.size
tw, th = 512, 256
x1 = random.randint(0, w - tw)
y1 = random.randint(0, h - th)
left_img = left_img.crop((x1, y1, x1+tw, y1+th))
right_img = right_img.crop((x1, y1, x1+tw, y1+th))
dataL = dataL[y1:y1+th, x1:x1+tw]
left_img = self.img_transorm(left_img)
right_img = self.img_transorm(right_img)
return left_img, right_img, dataL
else:
w, h = left_img.size
left_img = left_img.crop((w-960, h-544, w, h))
right_img = right_img.crop((w-960, h-544, w, h))
left_img = self.img_transorm(left_img)
right_img = self.img_transorm(right_img)
return left_img, right_img, dataL
def __len__(self):
return len(self.left)
|
SpadeLiu/Lac-GwcNet | networks/submodule.py | <gh_stars>10-100
from __future__ import print_function
import torch
import torch.nn as nn
import torch.utils.data
from torch.autograd import Variable
import torch.nn.functional as F
from torchvision import models
import math
import numpy as np
import torchvision.transforms as transforms
import PIL
import os
import matplotlib.pyplot as plt
from networks.affinity_feature import AffinityFeature
def convbn(in_planes, out_planes, kernel_size, stride, pad, dilation):
return nn.Sequential(nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=dilation if dilation > 1 else pad, dilation = dilation, bias=False),
nn.BatchNorm2d(out_planes))
def convbn_3d(in_planes, out_planes, kernel_size, stride, pad):
return nn.Sequential(nn.Conv3d(in_planes, out_planes, kernel_size=kernel_size, padding=pad, stride=stride,bias=False),
nn.BatchNorm3d(out_planes))
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride, downsample, pad, dilation):
super(BasicBlock, self).__init__()
self.conv1 = nn.Sequential(convbn(inplanes, planes, 3, stride, pad, dilation),
nn.ReLU(inplace=True))
self.conv2 = convbn(planes, planes, 3, 1, pad, dilation)
self.downsample = downsample
self.stride = stride
def forward(self, x):
out = self.conv1(x)
out = self.conv2(out)
if self.downsample is not None:
x = self.downsample(x)
out += x
return out
# class matchshifted(nn.Module):
# def __init__(self):
# super(matchshifted, self).__init__()
#
# def forward(self, left, right, shift):
# batch, filters, height, width = left.size()
# shifted_left = F.pad(torch.index_select(left, 3, Variable(torch.LongTensor([i for i in range(shift,width)])).cuda()),(shift,0,0,0))
# shifted_right = F.pad(torch.index_select(right, 3, Variable(torch.LongTensor([i for i in range(width-shift)])).cuda()),(shift,0,0,0))
# out = torch.cat((shifted_left,shifted_right),1).view(batch,filters*2,1,height,width)
# return out
class DisparityRegression(nn.Module):
def __init__(self, maxdisp, win_size):
super(DisparityRegression, self).__init__()
self.max_disp = maxdisp
self.win_size = win_size
def forward(self, x):
disp = torch.arange(0, self.max_disp).view(1, -1, 1, 1).float().to(x.device)
if self.win_size > 0:
max_d = torch.argmax(x, dim=1, keepdim=True)
d_value = []
prob_value = []
for d in range(-self.win_size, self.win_size + 1):
index = max_d + d
index[index < 0] = 0
index[index > x.shape[1] - 1] = x.shape[1] - 1
d_value.append(index)
prob = torch.gather(x, dim=1, index=index)
prob_value.append(prob)
part_x = torch.cat(prob_value, dim=1)
part_x = part_x / (torch.sum(part_x, dim=1, keepdim=True) + 1e-8)
part_d = torch.cat(d_value, dim=1).float()
out = torch.sum(part_x * part_d, dim=1)
else:
out = torch.sum(x * disp, 1)
return out
class feature_extraction(nn.Module):
def __init__(self, structure_fc, fuse_mode, affinity_settings):
super(feature_extraction, self).__init__()
self.inplanes = 32
self.sfc = structure_fc
self.fuse_mode = fuse_mode
self.win_w = affinity_settings['win_w']
self.win_h = affinity_settings['win_h']
self.dilation = affinity_settings['dilation']
self.firstconv = nn.Sequential(convbn(3, 32, 3, 2, 1, 1),
nn.ReLU(inplace=True),
convbn(32, 32, 3, 1, 1, 1),
nn.ReLU(inplace=True),
convbn(32, 32, 3, 1, 1, 1),
nn.ReLU(inplace=True))
self.layer1 = self._make_layer(BasicBlock, 32, 3, 1, 1, 1)
self.layer2 = self._make_layer(BasicBlock, 64, 16, 2, 1, 1)
self.layer3 = self._make_layer(BasicBlock, 128, 3, 1, 1, 1)
self.layer4 = self._make_layer(BasicBlock, 128, 3, 1, 1, 2)
self.lastconv = nn.Sequential(convbn(320, 128, 3, 1, 1, 1),
nn.ReLU(inplace=True),
nn.Conv2d(128, 32, kernel_size=1, padding=0, stride=1, bias=False))
if self.sfc > 0:
if fuse_mode == 'aggregate':
self.embedding = nn.Sequential(convbn(320, 128, 3, 1, 1, 1),
nn.ReLU(inplace=True),
nn.Conv2d(128, 128, kernel_size=1, padding=0, stride=1, bias=False))
in_c = self.win_w * self.win_h - 1
self.sfc_conv1 = nn.Sequential(convbn(in_c, self.sfc, 1, 1, 0, 1),
nn.ReLU(inplace=True))
self.sfc_conv2 = nn.Sequential(convbn(in_c, self.sfc, 1, 1, 0, 1),
nn.ReLU(inplace=True))
self.sfc_conv3 = nn.Sequential(convbn(in_c, self.sfc, 1, 1, 0, 1),
nn.ReLU(inplace=True))
self.sfc_conv4 = nn.Sequential(convbn(in_c, self.sfc, 1, 1, 0, 1),
nn.ReLU(inplace=True))
self.lastconv = nn.Sequential(convbn(4*self.sfc, 32, 3, 1, 1, 1),
nn.ReLU(inplace=True),
nn.Conv2d(32, 32, kernel_size=1, padding=0, stride=1, bias=False))
# self.lastconv = nn.Sequential(convbn(320 + 4*self.sfc, 128, 3, 1, 1, 1),
# nn.ReLU(inplace=True),
# nn.Conv2d(32, 32, kernel_size=1, padding=0, stride=1, bias=False))
self.to_sf = StructureFeature(affinity_settings, self.sfc)
elif fuse_mode == 'separate':
# self.embedding_l1 = nn.Sequential(convbn(32, 64, kernel_size=3, stride=1, pad=1, dilation=1),
# nn.ReLU(inplace=True),
# nn.Conv2d(64, 64, kernel_size=1, padding=0, stride=1, bias=False))
# self.to_sf_l1 = StructureFeature(affinity_settings, self.sfc)
self.embedding_l2 = nn.Sequential(convbn(64, 64, kernel_size=3, stride=1, pad=1, dilation=1),
nn.ReLU(inplace=True),
nn.Conv2d(64, 64, kernel_size=1, padding=0, stride=1, bias=False))
self.to_sf_l2 = StructureFeature(affinity_settings, self.sfc)
self.embedding_l3 = nn.Sequential(convbn(128, 64, kernel_size=3, stride=1, pad=1, dilation=1),
nn.ReLU(inplace=True),
nn.Conv2d(64, 64, kernel_size=1, padding=0, stride=1, bias=False))
self.to_sf_l3 = StructureFeature(affinity_settings, self.sfc)
self.embedding_l4 = nn.Sequential(convbn(128, 64, kernel_size=3, stride=1, pad=1, dilation=1),
nn.ReLU(inplace=True),
nn.Conv2d(64, 64, kernel_size=1, padding=0, stride=1, bias=False))
self.to_sf_l4 = StructureFeature(affinity_settings, self.sfc)
# self.lastconv = nn.Sequential(convbn(3 * 4 * self.sfc, 32, 3, 1, 1, 1),
# nn.ReLU(inplace=True),
# nn.Conv2d(32, 32, kernel_size=1, padding=0, stride=1, bias=False))
self.lastconv = nn.Sequential(convbn(320 + 3 * 4 * self.sfc, 128, 3, 1, 1, 1),
nn.ReLU(inplace=True),
nn.Conv2d(128, 32, kernel_size=1, padding=0, stride=1, bias=False))
def _make_layer(self, block, planes, blocks, stride, pad, dilation):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, pad, dilation))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes,1,None,pad,dilation))
return nn.Sequential(*layers)
def forward(self, x):
output = self.firstconv(x)
output_l1 = self.layer1(output)
output_l2 = self.layer2(output_l1)
output_l3 = self.layer3(output_l2)
output_l4 = self.layer4(output_l3)
# output_l1 = F.interpolate(output_l1, (output_l4.size()[2], output_l4.size()[3]),
# mode='bilinear', align_corners=True)
cat_feature = torch.cat((output_l2, output_l3, output_l4), 1)
if self.sfc > 0:
if self.fuse_mode == 'aggregate':
embedding = self.embedding(cat_feature)
cat_sf, affinity = self.to_sf(embedding)
# output_feature = self.lastconv(torch.cat((cat_feature, cat_sf), dim=1))
output_feature = self.lastconv(cat_sf)
elif self.fuse_mode == 'separate':
# embedding_l1 = self.embedding_l1(output_l1)
# l1_sf, l1_affi = self.to_sf_l1(embedding_l1)
embedding_l2 = self.embedding_l2(output_l2.detach())
l2_sf, l2_affi = self.to_sf_l2(embedding_l2)
embedding_l3 = self.embedding_l3(output_l3.detach())
l3_sf, l3_affi = self.to_sf_l3(embedding_l3)
embedding_l4 = self.embedding_l4(output_l4.detach())
l4_sf, l4_affi = self.to_sf_l4(embedding_l4)
# output_feature = self.lastconv(torch.cat((l2_sf, l3_sf, l4_sf), dim=1))
output_feature = self.lastconv(torch.cat((cat_feature, l2_sf, l3_sf, l4_sf), dim=1))
affinity = torch.cat((l2_affi, l3_affi, l4_affi), dim=1)
return output_feature
else:
output_feature = self.lastconv(cat_feature)
return output_feature
class StructureFeature(nn.Module):
def __init__(self, affinity_settings, sfc):
super(StructureFeature, self).__init__()
self.win_w = affinity_settings['win_w']
self.win_h = affinity_settings['win_h']
self.dilation = affinity_settings['dilation']
self.sfc = sfc
in_c = self.win_w * self.win_h - 1
self.sfc_conv1 = nn.Sequential(convbn(in_c, self.sfc, 1, 1, 0, 1),
nn.ReLU(inplace=True))
self.sfc_conv2 = nn.Sequential(convbn(in_c, self.sfc, 1, 1, 0, 1),
nn.ReLU(inplace=True))
self.sfc_conv3 = nn.Sequential(convbn(in_c, self.sfc, 1, 1, 0, 1),
nn.ReLU(inplace=True))
self.sfc_conv4 = nn.Sequential(convbn(in_c, self.sfc, 1, 1, 0, 1),
nn.ReLU(inplace=True))
def forward(self, x):
affinity1 = AffinityFeature(self.win_h, self.win_w, self.dilation[0], 0)(x)
affinity2 = AffinityFeature(self.win_h, self.win_w, self.dilation[1], 0)(x)
affinity3 = AffinityFeature(self.win_h, self.win_w, self.dilation[2], 0)(x)
affinity4 = AffinityFeature(self.win_h, self.win_w, self.dilation[3], 0)(x)
affi_feature1 = self.sfc_conv1(affinity1)
affi_feature2 = self.sfc_conv2(affinity2)
affi_feature3 = self.sfc_conv3(affinity3)
affi_feature4 = self.sfc_conv4(affinity4)
out_feature = torch.cat((affi_feature1, affi_feature2, affi_feature3, affi_feature4), dim=1)
affinity = torch.cat((affinity1, affinity2, affinity3, affinity4), dim=1)
# out_feature = torch.cat((affi_feature1, affi_feature2, affi_feature3), dim=1)
# affinity = torch.cat((affinity1, affinity2, affinity3), dim=1)
return out_feature, affinity
# return affinity1, affinity1 |
SpadeLiu/Lac-GwcNet | networks/refinement.py | import torch
import torch.nn as nn
import torch.nn.functional as F
def reconstruction(right, disp):
b, _, h, w = right.size()
x_base = torch.linspace(0, 1, w).repeat(b, h, 1).type_as(right)
y_base = torch.linspace(0, 1, h).repeat(b, w, 1).transpose(1, 2).type_as(right)
flow_field = torch.stack((x_base - disp / w, y_base), dim=3)
recon_left = F.grid_sample(right, 2 * flow_field - 1, mode='bilinear', padding_mode='zeros')
return recon_left
def conv2d(in_channels, out_channels, kernel_size=3, stride=1, dilation=1, groups=1):
return nn.Sequential(nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size,
stride=stride, padding=dilation, dilation=dilation,
bias=False, groups=groups),
nn.BatchNorm2d(out_channels),
nn.LeakyReLU(0.2, inplace=True))
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1, with_bn_relu=False, leaky_relu=False):
"""3x3 convolution with padding"""
conv = nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
if with_bn_relu:
relu = nn.LeakyReLU(0.2, inplace=True) if leaky_relu else nn.ReLU(inplace=True)
conv = nn.Sequential(conv,
nn.BatchNorm2d(out_planes),
relu)
return conv
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None, leaky_relu=True):
"""StereoNet uses leaky relu (alpha = 0.2)"""
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride=stride, dilation=dilation)
self.bn1 = norm_layer(planes)
self.relu = nn.LeakyReLU(0.2, inplace=True) if leaky_relu else nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes, dilation=dilation)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class StereoDRNetRefinement(nn.Module):
def __init__(self):
super(StereoDRNetRefinement, self).__init__()
# Left and warped error
in_channels = 6
self.conv1 = conv2d(in_channels, 16)
self.conv2 = conv2d(1, 16) # on low disparity
self.dilation_list = [1, 2, 4, 8, 1, 1]
self.dilated_blocks = nn.ModuleList()
for dilation in self.dilation_list:
self.dilated_blocks.append(BasicBlock(32, 32, stride=1, dilation=dilation))
self.dilated_blocks = nn.Sequential(*self.dilated_blocks)
self.final_conv = nn.Conv2d(32, 1, 3, 1, 1)
def forward(self, left_img, right_img, left_disp):
# Warp right image to left view with current disparity
recon_left_img = reconstruction(right_img, left_disp.squeeze(1))[0] # [B, C, H, W]
error = recon_left_img - left_img # [B, C, H, W]
concat1 = torch.cat((error, left_img), dim=1) # [B, 6, H, W]
conv1 = self.conv1(concat1) # [B, 16, H, W]
conv2 = self.conv2(left_disp) # [B, 16, H, W]
concat2 = torch.cat((conv1, conv2), dim=1) # [B, 32, H, W]
out = self.dilated_blocks(concat2) # [B, 32, H, W]
residual_disp = self.final_conv(out) # [B, 1, H, W]
disp = F.relu(left_disp + residual_disp, inplace=True) # [B, 1, H, W]
return disp
if __name__ == '__main__':
left = torch.rand(2, 3, 64, 64)
right = torch.rand(2, 3, 64, 64)
disp = torch.rand(2, 1, 64, 64)
RefineModule = StereoDRNetRefinement()
refine_disp = RefineModule(left, right, disp)
print(refine_disp.shape) |
SpadeLiu/Lac-GwcNet | networks/affinity_feature.py | import torch
import torch.nn as nn
import torch.nn.functional as F
class AffinityFeature(nn.Module):
def __init__(self, win_h, win_w, dilation, cut):
super(AffinityFeature, self).__init__()
self.win_w = win_w
self.win_h = win_h
self.dilation = dilation
self.cut = 0
def padding(self, x, win_h, win_w, dilation):
pad_t = (win_w // 2 * dilation, win_w // 2 * dilation,
win_h // 2 * dilation, win_h // 2 * dilation)
out = F.pad(x, pad_t, mode='constant')
return out
def forward(self, feature):
B, C, H, W = feature.size()
feature = F.normalize(feature, dim=1, p=2)
# affinity = []
# pad_feature = self.padding(feature, win_w=self.win_w, win_h=self.win_h, dilation=self.dilation)
# for i in range(self.win_w):
# for j in range(self.win_h):
# if (i == self.win_w // 2) & (j == self.win_h // 2):
# continue
# simi = self.cal_similarity(
# pad_feature[:, :, self.dilation*j:self.dilation*j+H, self.dilation*i:self.dilation*i+W],
# feature, self.simi_type)
#
# affinity.append(simi)
# affinity = torch.stack(affinity, dim=1)
#
# affinity[affinity < self.cut] = self.cut
unfold_feature = nn.Unfold(
kernel_size=(self.win_h, self.win_w), dilation=self.dilation, padding=self.dilation)(feature)
all_neighbor = unfold_feature.reshape(B, C, -1, H, W).transpose(1, 2)
num = (self.win_h * self.win_w) // 2
neighbor = torch.cat((all_neighbor[:, :num], all_neighbor[:, num+1:]), dim=1)
feature = feature.unsqueeze(1)
affinity = torch.sum(neighbor * feature, dim=2)
affinity[affinity < self.cut] = self.cut
return affinity
|
SpadeLiu/Lac-GwcNet | test_kitti.py | <reponame>SpadeLiu/Lac-GwcNet
import argparse
import torch
import torch.nn as nn
from torchvision import transforms
import os
from tqdm import tqdm, trange
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
from dataloader import KITTIloader as kt
from networks.stackhourglass import PSMNet
import loss_functions as lf
parser = argparse.ArgumentParser(description='LaC')
parser.add_argument('--no_cuda', action='store_true', default=False)
parser.add_argument('--gpu_id', type=str, default='2')
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--data_path', type=str, default='/media/data/dataset/KITTI/data_scene_flow/training/')
parser.add_argument('--load_path', type=str, default='state_dicts/kitti2015.pth')
parser.add_argument('--max_disp', type=int, default=192)
parser.add_argument('--lsp_width', type=int, default=3)
parser.add_argument('--lsp_height', type=int, default=3)
parser.add_argument('--lsp_dilation', type=list, default=[1, 2, 4, 8])
parser.add_argument('--lsp_mode', type=str, default='separate')
parser.add_argument('--lsp_channel', type=int, default=4)
parser.add_argument('--no_udc', action='store_true', default=False)
parser.add_argument('--refine', type=str, default='csr')
args = parser.parse_args()
if not args.no_cuda:
os.environ['CUDA_DEVICE_ORDER'] = "PCI_BUS_ID"
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id
cuda = torch.cuda.is_available()
torch.manual_seed(args.seed)
if cuda:
torch.cuda.manual_seed(args.seed)
all_limg, all_rimg, all_ldisp, test_limg, test_rimg, test_ldisp = kt.kt_loader(args.data_path)
affinity_settings = {}
affinity_settings['win_w'] = args.lsp_width
affinity_settings['win_h'] = args.lsp_width
affinity_settings['dilation'] = args.lsp_dilation
udc = not args.no_udc
model = PSMNet(maxdisp=args.max_disp, struct_fea_c=args.lsp_channel, fuse_mode=args.lsp_mode,
affinity_settings=affinity_settings, udc=udc, refine=args.refine)
model = nn.DataParallel(model)
print('Number of model parameters: {}'.format(sum([p.data.nelement() for p in model.parameters()])))
if cuda:
model.cuda()
model.eval()
ckpt = torch.load(args.load_path)
model.load_state_dict(ckpt)
mae = 0
op = 0
for i in trange(len(test_limg)):
limg = Image.open(test_limg[i]).convert('RGB')
rimg = Image.open(test_rimg[i]).convert('RGB')
w, h = limg.size
limg = limg.crop((w - 1232, h - 368, w, h))
rimg = rimg.crop((w - 1232, h - 368, w, h))
limg_tensor = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])(limg)
rimg_tensor = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])(rimg)
limg_tensor = limg_tensor.unsqueeze(0).cuda()
rimg_tensor = rimg_tensor.unsqueeze(0).cuda()
disp_gt = Image.open(test_ldisp[i])
disp_gt = disp_gt.crop((w - 1232, h - 368, w, h))
disp_gt = np.ascontiguousarray(disp_gt, dtype=np.float32) / 256
gt_tensor = torch.FloatTensor(disp_gt).unsqueeze(0).unsqueeze(0).cuda()
with torch.no_grad():
pred_disp = model(limg_tensor, rimg_tensor, gt_tensor)
predict_np = pred_disp.squeeze().cpu().numpy()
op_thresh = 3
mask = (disp_gt > 0)
error = np.abs(predict_np * mask.astype(np.float32) - disp_gt * mask.astype(np.float32))
op += np.sum((error > op_thresh) & (error > disp_gt * 0.05)) / np.sum(mask)
mae += np.mean(error[mask])
print('OP: %.2f%%' % (op / len(test_limg) * 100))
print('MAE: %.3f' % (mae / len(test_limg))) |
SpadeLiu/Lac-GwcNet | networks/U_net.py | <filename>networks/U_net.py
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
class conv_block(nn.Module):
def __init__(self, ch_in, ch_out):
super(conv_block, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(ch_in, ch_out, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(ch_out),
nn.ReLU(inplace=True),
nn.Conv2d(ch_out, ch_out, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(ch_out),
nn.ReLU(inplace=True)
)
def forward(self, x):
x = self.conv(x)
return x
class up_conv(nn.Module):
def __init__(self, ch_in, ch_out):
super(up_conv, self).__init__()
self.up = nn.Sequential(
nn.Upsample(scale_factor=2),
nn.Conv2d(ch_in, ch_out, kernel_size=3, stride=1, padding=1, bias=True),
nn.BatchNorm2d(ch_out),
nn.ReLU(inplace=True)
)
def forward(self, x):
x = self.up(x)
return x
class U_Net(nn.Module):
def __init__(self, img_ch=3, output_ch=1):
super(U_Net, self).__init__()
self.Maxpool = nn.MaxPool2d(kernel_size=2, stride=2)
self.Conv1 = conv_block(ch_in=img_ch, ch_out=32)
self.Conv2 = conv_block(ch_in=32, ch_out=64)
self.Conv3 = conv_block(ch_in=64, ch_out=128)
self.Conv4 = conv_block(ch_in=128, ch_out=128)
self.Conv5 = conv_block(ch_in=128, ch_out=128)
self.Up5 = up_conv(ch_in=128, ch_out=128)
self.Up_conv5 = conv_block(ch_in=256, ch_out=128)
self.Up4 = up_conv(ch_in=128, ch_out=128)
self.Up_conv4 = conv_block(ch_in=256, ch_out=64)
self.Conv_1x1 = nn.Conv2d(64, output_ch, kernel_size=1, stride=1, padding=0)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def forward(self, x):
x1 = self.Conv1(x)
x2 = self.Maxpool(x1)
x2 = self.Conv2(x2) # 64, 1/2
x3 = self.Maxpool(x2)
x3 = self.Conv3(x3) # 128, 1/4
x4 = self.Maxpool(x3)
x4 = self.Conv4(x4) # 128, 1/8
x5 = self.Maxpool(x4)
x5 = self.Conv5(x5) # 128, 1/16
d5 = self.Up5(x5)
d5 = torch.cat((x4, d5), dim=1)
d5 = self.Up_conv5(d5) # 128, 1/8
d4 = self.Up4(d5)
d4 = torch.cat((x3, d4), dim=1)
d4 = self.Up_conv4(d4) # 64, 1/4
d1 = self.Conv_1x1(d4)
d1 = F.relu(d1)
return d1
class U_Net_F(nn.Module):
def __init__(self, img_ch=3, output_ch=1):
super(U_Net_F, self).__init__()
self.Maxpool = nn.MaxPool2d(kernel_size=2, stride=2)
self.Conv1 = conv_block(ch_in=img_ch, ch_out=32)
self.Conv2 = conv_block(ch_in=32, ch_out=64)
self.Conv3 = conv_block(ch_in=64, ch_out=128)
self.Conv4 = conv_block(ch_in=128, ch_out=256)
self.Conv5 = conv_block(ch_in=256, ch_out=256)
self.Up5 = up_conv(ch_in=256, ch_out=256)
self.Up_conv5 = conv_block(ch_in=512, ch_out=256)
self.Up4 = up_conv(ch_in=256, ch_out=128)
self.Up_conv4 = conv_block(ch_in=256, ch_out=128)
self.Up3 = up_conv(ch_in=128, ch_out=64)
self.Up_conv3 = conv_block(ch_in=128, ch_out=64)
self.Up2 = up_conv(ch_in=64, ch_out=32)
self.Up_conv2 = conv_block(ch_in=64, ch_out=32)
self.Conv_1x1 = nn.Conv2d(32, output_ch, kernel_size=1, stride=1, padding=0)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def forward(self, x):
x1 = self.Conv1(x)
x2 = self.Maxpool(x1)
x2 = self.Conv2(x2)
x3 = self.Maxpool(x2)
x3 = self.Conv3(x3)
x4 = self.Maxpool(x3)
x4 = self.Conv4(x4)
x5 = self.Maxpool(x4)
x5 = self.Conv5(x5)
d5 = self.Up5(x5)
d5 = torch.cat((x4, d5), dim=1)
d5 = self.Up_conv5(d5)
d4 = self.Up4(d5)
d4 = torch.cat((x3, d4), dim=1)
d4 = self.Up_conv4(d4)
d3 = self.Up3(d4)
d3 = torch.cat((x2, d3), dim=1)
d3 = self.Up_conv3(d3)
d2 = self.Up2(d3)
d2 = torch.cat((x1, d2), dim=1)
d2 = self.Up_conv2(d2)
d1 = self.Conv_1x1(d2)
d1 = F.relu(d1)
return d1
class U_Net_F_v2(nn.Module):
def __init__(self, img_ch=3, output_ch=1):
super(U_Net_F_v2, self).__init__()
self.Maxpool = nn.MaxPool2d(kernel_size=2, stride=2)
self.Conv1 = conv_block(ch_in=img_ch, ch_out=32)
self.Conv2 = conv_block(ch_in=32, ch_out=64)
self.Conv3 = conv_block(ch_in=64, ch_out=128)
self.Conv4 = conv_block(ch_in=128, ch_out=128)
self.Conv5 = conv_block(ch_in=128, ch_out=128)
# self.Conv5 = SelfAttention(in_c=128)
self.Up5 = up_conv(ch_in=128, ch_out=128)
self.Up_conv5 = conv_block(ch_in=256, ch_out=128)
self.Up4 = up_conv(ch_in=128, ch_out=128)
self.Up_conv4 = conv_block(ch_in=256, ch_out=128)
self.Up3 = up_conv(ch_in=128, ch_out=64)
self.Up_conv3 = conv_block(ch_in=128, ch_out=64)
self.Up2 = up_conv(ch_in=64, ch_out=32)
self.Up_conv2 = conv_block(ch_in=64, ch_out=32)
self.Conv_1x1 = nn.Conv2d(32, output_ch, kernel_size=1, stride=1, padding=0)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def forward(self, x):
x1 = self.Conv1(x)
x2 = self.Maxpool(x1)
x2 = self.Conv2(x2)
x3 = self.Maxpool(x2)
x3 = self.Conv3(x3)
x4 = self.Maxpool(x3)
x4 = self.Conv4(x4)
x5 = self.Maxpool(x4)
x5 = self.Conv5(x5)
d5 = self.Up5(x5)
d5 = torch.cat((x4, d5), dim=1)
d5 = self.Up_conv5(d5)
d4 = self.Up4(d5)
d4 = torch.cat((x3, d4), dim=1)
d4 = self.Up_conv4(d4)
d3 = self.Up3(d4)
d3 = torch.cat((x2, d3), dim=1)
d3 = self.Up_conv3(d3)
d2 = self.Up2(d3)
d2 = torch.cat((x1, d2), dim=1)
d2 = self.Up_conv2(d2)
d1 = self.Conv_1x1(d2)
d1 = F.relu(d1)
return d1
class SelfAttention(nn.Module):
def __init__(self, in_c=128):
super(SelfAttention, self).__init__()
self.media_c = 64
self.query_conv = nn.Conv2d(in_c, self.media_c, 1, 1, 0, bias=False)
self.key_conv = nn.Conv2d(in_c, self.media_c, 1, 1, 0, bias=False)
self.value_conv = nn.Conv2d(in_c, self.media_c, 1, 1, 0, bias=False)
self.last_conv = nn.Conv2d(self.media_c, in_c, 1, 1, 0, bias=False)
def forward(self, x):
b, c, h, w = x.size()
query = self.query_conv(x).view(b, self.media_c, -1) # B, C, H*W
key = self.key_conv(x).view(b, self.media_c, -1).permute(0, 2, 1).contiguous() # B, H*W, C
weight = torch.matmul(key, query)
weight = torch.softmax(weight, dim=2)
value = self.value_conv(x).view(b, self.media_c, -1).permute(0, 2, 1).contiguous()
agg_v = torch.matmul(weight, value).permute(0, 2, 1).view(b, self.media_c, h, w).contiguous()
agg_v = self.last_conv(agg_v)
out = x + agg_v
return out
if __name__ == '__main__':
a = torch.rand(2, 3, 128, 256).cuda()
gt = torch.rand(2, 4, 128, 256).cuda()
net = U_Net_F_v2(img_ch=3, output_ch=4).cuda()
print('Number of model parameters: {}'.format(sum([p.data.nelement() for p in net.parameters()])))
b = net(a)
loss = F.mse_loss(b, gt)
loss.backward()
|
saramsv/decaying_human_body_part_classifier | predict_labels.py | <reponame>saramsv/decaying_human_body_part_classifier
#!/usr/bin/env python
## run : python3 predict_labels.py file_name
#(each line in file_name is a path_to_img)
import cv2
import numpy as np
from keras.models import load_model
import keras
import csv
import sys
import os
from keras.preprocessing import image
import pandas as pd
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
inception_img_size = 299
vgg_resnet_img_size = 224
batch_size = 2
test_csv = sys.argv[1]
body_parts = ['arm', 'hand', 'foot', 'legs','fullbody', 'head','backside', 'torso', 'stake', 'plastic']
model_name = 'inception_10000_epoch_-118-_acc_0.999569-_val_acc_0.98315.h5'
model_type = 'inception'
model = load_model("models/" + model_type + '/' + model_name)
model_copy = keras.models.clone_model(model)
model_copy.set_weights(model.get_weights())
model_copy.layers.pop()
model_copy.outputs = [model_copy.layers[-1].output]
model_copy.layers[-1].outbound_nodes = []
not_found = 0
df = pd.read_csv(test_csv, names = ['path'])
for path in df['path']:
test_data = []
#img_names = []
try:
if model_type == 'resnet' or model_type == 'vgg':
img = image.load_img(path.strip(),
target_size = (vgg_resnet_img_size, vgg_resnet_img_size, 3),
grayscale = False)
elif model_type == 'inception':
img = image.load_img(path.strip(),
target_size = (inception_img_size, inception_img_size, 3),
grayscale = False)
img = image.img_to_array(img)
img = img/255
test_data.append(img)
#img_names.append(path)
test = np.array(test_data)
prediction = model.predict(test)
pred_classes = prediction.argmax(axis=-1)
conf = prediction.max(axis=-1)
print(conf)
if conf > 0.99:
for i, label in enumerate(list(pred_classes)):
print(path + ":", body_parts[label]+ ":" + str(conf[i]))
else:
row = []
row.append(path)
prediction = model_copy.predict(test)
row.extend(list(prediction))
print(row)
#print(path + ":"+ str(list(prediction)))
except:
not_found += 1
|
saramsv/decaying_human_body_part_classifier | path2clustering.py | #RUN: python path2clustering file_with_paths
import tensorflow.keras
from tensorflow.keras.applications import ResNet50
from tensorflow.keras.applications.resnet50 import preprocess_input
from tensorflow.keras.models import Sequential
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Dense, Flatten, GlobalAveragePooling2D
from tensorflow.keras.callbacks import TensorBoard
import numpy as np
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
import cv2
import sys
import csv
import argparse
import pandas as pd
parser = argparse.ArgumentParser()
parser.add_argument('--img_path', type = str)
args = parser.parse_args()
imgs_path = args.img_path
img_size = 224
base_model = ResNet50
base_model = base_model(weights = 'imagenet', include_top = False)
x = base_model.output
x = GlobalAveragePooling2D()(x)
d = Dense(1024, activation= 'relu')(x)
clustering_model = Model(inputs = base_model.input, outputs = d)
clustering_model.compile(optimizer='sgd', loss='categorical_crossentropy', metrics=['accuracy'])
data = pd.read_csv(imgs_path, sep =":", names = ['path','label','conf'])
labels = data['label'].unique()
for label in labels:
rows = []
img_names = []
df = data[data['label'] == label]
df = df.reset_index()
for path in df['path']:
img_names.append(path)
'''
#######gray########
img_object = cv2.imread(correct_path, cv2.IMREAD_GRAYSCALE)
img_object = np.stack((img_object,)*3, axis=-1)
'''
img_object = cv2.imread(path)
img_object = cv2.resize(img_object, (img_size, img_size))
img_object = np.array(img_object, dtype = np.float64)
img_object = preprocess_input(np.expand_dims(img_object.copy(), axis = 0))
resnet_feature = clustering_model.predict(img_object)
resnet_feature = np.array(resnet_feature)
rows.append(list(resnet_feature.flatten()))
features = np.array(rows)
pca_model = PCA(n_components = 256)
PCAed = pca_model.fit_transform(features)
kmeans = KMeans(n_clusters = 20)
kmeans.fit(PCAed)
kmeans_labels = kmeans.predict(PCAed)
for i,cluster_label in enumerate(kmeans_labels):
print("{}: {}_{}".format(img_names[i].replace("JPG","icon.JPG"), label,cluster_label))
|
saramsv/decaying_human_body_part_classifier | sequence.py | <filename>sequence.py
import sys
import numpy as np
import csv
import ast
import datetime
import math
def key_func(x):
# For some year like 2011 the year is 2 digits so the date format should ne %m%d%y but for others like 2015 it should be %m%d%Y
try:
#date = ""
if '(' in x:
date_ = x.split('D_')[-1].split('(')[0].strip()
else:
date_ = x.split('D_')[-1].split('.')[0].strip()
mdy = date_.split('_')
m = mdy[0]
d = mdy[1]
y = mdy[2]
if len(m) == 1:
m = '0' + m
if len(d) == 1:
d = '0' + d
date_ = m + d + y
if len(date_) == 6: #the format that has 2 digits for year
return datetime.datetime.strptime(date_, '%m%d%y')
else:
return datetime.datetime.strptime(date_, '%m%d%Y')
except:
print(x)
import bpython
bpython.embed(locals())
exit()
def convert_to_time(img_name):
if '(' in img_name:
date_ = img_name.split('D_')[-1].split('(')[0].strip()
else:
date_ = img_name.split('D_')[-1].split('.')[0].strip()
mdy = date_.split('_')
m = mdy[0]
d = mdy[1]
y = mdy[2]
if len(m) == 1:
m = '0' + m
if len(d) == 1:
d = '0' + d
date_ = m + d + y
if len(date_) == 6: #the format that has 2 digits for year
return datetime.datetime.strptime(date_, '%m%d%y')
else:
return datetime.datetime.strptime(date_, '%m%d%Y')
def cosine_similarity(v1,v2):
"compute cosine similarity of v1 to v2: (v1 dot v2)/{||v1||*||v2||)"
sumxx, sumxy, sumyy = 0, 0, 0
for i in range(len(v1)):
x = v1[i]; y = v2[i]
sumxx += x*x
sumyy += y*y
sumxy += x*y
return sumxy/math.sqrt(sumxx*sumyy)
def overlap_merge(all_sims):
no_more_merge = False
while no_more_merge == False:
merged_dict = {}
seen = []
all_sims_keys = list(all_sims.keys())
no_more_merge = True
for key1 in all_sims_keys:
if key1 not in seen:
if key1 not in merged_dict :
merged_dict[key1] = list(set(all_sims[key1]))#to remove the duplicates
for key2 in all_sims_keys:
if key1 != key2:
intersect = len(set(all_sims[key1]).intersection(set(all_sims[key2])))
if intersect != 0:
no_more_merge = False
merged_dict[key1].extend(list(set(all_sims[key2])))
merged_dict[key1] = sorted(merged_dict[key1], key = key_func)
seen.append(key2)
all_sims = merged_dict
return all_sims
#########################################################################
def similarity_merge(all_sims, donor2img2embeding, donor2day2img, donor):
no_more_merge = False
while no_more_merge == False:
merged_dict = {}
seen = []
all_sims_keys = list(all_sims.keys())
no_more_merge = True
for key1 in all_sims_keys:
if key1 in seen:
continue
if key1 not in merged_dict :
# to remove the duplicates
merged_dict[key1] = list(set(all_sims[key1]))
one2nsimi = []
for key2 in all_sims_keys:
if all_sims_keys.index(key2) <= all_sims_keys.index(key1):
continue
head, tail, tail_size = find_tail_head(all_sims, key1, key2)
if tail_size >= 1 :
similarity = []
for img_index in range(tail_size):
emb1 = donor2img2embeding[donor][tail[img_index]]
emb2 = donor2img2embeding[donor][head[img_index]]
simi = cosine_similarity(emb1, emb2)
similarity.append(simi)
sub_seq_simi = sum(similarity) / tail_size
one2nsimi.append([key2,sub_seq_simi])
if len(one2nsimi) > 0:
one2nsimi = sorted(one2nsimi, key=lambda x: x[1], reverse=True)
val = max(one2nsimi[0][1], 0.83)
if one2nsimi[0][1] >= val:
#one2nsimi.append([key2,sub_seq_simi])
no_more_merge = False
merged_dict[key1].extend(list(set(all_sims[one2nsimi[0][0]])))
merged_dict[key1] = sorted(merged_dict[key1], key = key_func)
seen.append(one2nsimi[0][0])
all_sims = merged_dict
print_(merged_dict, donor)
####################################################################
def find_tail_head(all_sims, key1, key2):
list1 = sorted(all_sims[key1], key = key_func)
list2 = sorted(all_sims[key2], key = key_func)
sequence1 = []
sequence2 = []
if len(list1) > 0 and len(list2) > 0:
if convert_to_time(list1[0]) < convert_to_time(list2[0]) and \
convert_to_time(list1[-1]) > convert_to_time(list2[0]):
sequence1 = list1
sequence2 = list2
else:
sequence1 = list2
sequence2 = list1
head = tail = []
sequence1_times = [x.split("os//")[1].split()[0] for x in sequence1]
sequence2_times = [x.split("os//")[1].split()[0] for x in sequence2]
time_overlap = list(set(sequence1_times).intersection(set(sequence1_times)))
tail = [x for x in sequence1 if x.split("os//")[1].split()[0] in time_overlap]
head = [x for x in sequence2 if x.split("os//")[1].split()[0] in time_overlap]
sequence1 = tail
sequence2 = head
tail_size = min(len(sequence1), len(sequence2))
if tail_size == 1:
tail = [sequence1[-1]]
head = [sequence2[0]]
else:
tail = sequence1[-tail_size:]
head = sequence2[:tail_size]
return head, tail, tail_size
##########################################################################
def add_to_similarity_dict(all_sims, similarities, key):#, ratio):
similarities = sorted(similarities, key=lambda x: x[1], reverse=True)
max_ = similarities[0][1]
threshold = max(0.99 * max_, 0.89)
#print(max_, threshold)
if key not in all_sims:
all_sims[key] = [key]
for ind, pair in enumerate(similarities):
if pair[1] >= threshold:
#if key not in all_sims:
# all_sims[key] = []
all_sims[key].append(pair[0])
return all_sims
##################################################################
def print_(all_sims, donor):
label = 0
for key in all_sims:
label = label + 1
for img in all_sims[key]:
temp = img.replace('JPG', 'icon.JPG: ')
print(temp + donor + "_" + str(label))
def rolling_window(a, window):
shape = a.shape[:-1] + (a.shape[-1] - window + 1, window)
strides = a.strides + (a.strides[-1],)
return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)
#################################################################
def sequence_finder(donor2img2embeding, donor2day2img):
for donor in donor2img2embeding:
#if donor == 'UT16-13D':
days = list(donor2day2img[donor].keys())
days.sort()
all_embs = donor2img2embeding[donor]
all_sims = {} #key = imgs, value = [[im1, dist],im2, dit[],...]
window_size = 5
compared = []
windows = rolling_window(np.array(range(len(days))), window_size)
#print(windows)
for window in windows:
for ind1 in range(len(window)):
for ind2 in range(ind1 + 1, len(window)):
pair = (window[ind1], window[ind2])
if pair not in compared:
compared.append(pair)
day1_ind = pair[0]
day2_ind = pair[1]
day1_imgs = donor2day2img[donor][days[day1_ind]]
for day1_img in day1_imgs:
emb = all_embs[day1_img]
key = day1_img
for seen in all_sims:
for x in all_sims[seen]:
if day1_img == x: # if it is one of the matched ones
key = seen
day2_imgs = donor2day2img[donor][days[day2_ind]]
similarities = []
for day2_img in day2_imgs:
emb2 = all_embs[day2_img]
sim = cosine_similarity(emb, emb2)
#print(day1_img, day2_img, sim)
similarities.append([day2_img, sim])
all_sims = add_to_similarity_dict(all_sims, similarities, key)
#print_(all_sims, donor)
all_sims = overlap_merge(all_sims)
#print_(all_sims, donor)
similarity_merge(all_sims, donor2img2embeding, donor2day2img, donor)
|
saramsv/decaying_human_body_part_classifier | kmeans_embeddings_clustering.py | #python3 clustering.py --embeding_file data/pcaUT29-15 --cluster_number 7 > daily_merge_7ClusAll
# the file for this script should be image_name va1l val2.... valn.
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
from sklearn.cluster import DBSCAN
import matplotlib.pyplot as plt
import sys
import numpy as np
import csv
import ast
import argparse
from operator import itemgetter
from functools import reduce
from scipy.spatial import distance
import math
csv.field_size_limit(sys.maxsize)
def cluster_all(img_names, vectors):
vectors = np.array(vectors)
#vectors = vectors / vectors.max(axis=0)
## kmeans:
#pca = PCA(n_components=16)
#vectors = pca.fit_transform(vectors)
#print(vectors)
kmeans = KMeans(n_clusters = num_clusters)
kmeans.fit(vectors)
labels = kmeans.predict(vectors)
#clustering = DBSCAN(eps=0.5, min_samples=5).fit(vectors)
#labels = clustering.labels_
'''
######### Agglomerative ######
agglomerative = AgglomerativeClustering(n_clusters = num_clusters, linkage='single')
agglomerative.fit(list(vectors))
labels = agglomerative.labels_#predict(vectors)
'''
for index, label in enumerate(labels):
print(img_names[index].replace('JPG', 'icon.JPG:') , label)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--embeding_file', type = str)
parser.add_argument('--cluster_number') # A number
args = parser.parse_args()
embedings_file = args.embeding_file #sys.argv[1] # This should be a pca version the embedings
num_clusters = int(args.cluster_number)
img_names = []
vectors = []
with open(embedings_file, 'r') as csv_file:
data = csv.reader(csv_file,delimiter = '\n')
for row in data:
row = row[0]
row= row.split('JPG')
img_name = row[0] + 'JPG'
embeding = row[1].strip()
embeding = embeding.replace(' ', ',')
embeding = ast.literal_eval("[" + embeding[1:-1] + "]") # this embeding is a list now
vectors.append(embeding)
img_names.append(img_name)
cluster_all(img_names, vectors)
|
saramsv/decaying_human_body_part_classifier | pred_one_million.py | #!/usr/bin/env python
## run : python3 predict_bodypart.py file_name
#file name is each line a path and then : the grount truth label
import cv2
import numpy as np
from keras.models import load_model
import csv
import sys
import os
from keras.preprocessing import image
import pandas as pd
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
inception_img_size = 299
vgg_resnet_img_size = 224
batch_size = 2
test_csv = sys.argv[1]
body_parts = ['arm', 'hand', 'foot', 'legs','fullbody', 'head','backside', 'torso', 'stake', 'plastic']
dir_names = ['resnet', 'vgg', 'inception']
model_dir_name = dir_names[2]
dir_info = os.walk(model_dir_name)
model_names = ['inception_10000_epoch_-118-_acc_0.999569-_val_acc_0.98315.h5']
model_name = model_dir_name
not_found = 0
df = pd.read_csv(test_csv, names = ['path'])
model = load_model(model_dir_name + '/' + model_names[0])
for path in df['path']:
test_data = []
img_names = []
try:
if model_name == 'resnet' or model_name == 'vgg':
img = image.load_img(path.strip(),
target_size = (vgg_resnet_img_size, vgg_resnet_img_size, 3),
grayscale = False)
elif model_name == 'inception':
img = image.load_img(path.strip(),
target_size = (inception_img_size, inception_img_size, 3),
grayscale = False)
img = image.img_to_array(img)
img = img/255
test_data.append(img)
img_names.append(path)
test = np.array(test_data)
prediction = model.predict(test)
pred_classes = prediction.argmax(axis=-1)
conf = prediction.max(axis=-1)
for i, label in enumerate(list(pred_classes)):
print(img_names[i]+":", body_parts[label], conf[i])
except:
not_found += 1
#print(path)
|
saramsv/decaying_human_body_part_classifier | resnet2pca.py | #python3 resnet2pca.py --embeding_file 50000resnet.csv > 50000PCAed64
# the input file is image_name var1 var2.....varn.
#NOTE: No '' round the image_name
import csv
from sklearn.decomposition import PCA
import argparse
import ast
import numpy as np
import sys
csv.field_size_limit(sys.maxsize)
parser = argparse.ArgumentParser()
parser.add_argument('--embeding_file', type = str)
args = parser.parse_args()
embedings_file = args.embeding_file
with open(embedings_file, 'r') as csv_file:
data = csv.reader(csv_file,delimiter = '\n')
vectors = []
img_names = []
for row in data:
row = row[0]
row= row.split('JPG')
img_name = row[0] + 'JPG'
img_names.append(img_name)
embeding = row[1].strip('')
embeding = embeding.replace(' ', ',')
embeding = ast.literal_eval("[" + embeding[1:-1] + "]") # this embeding is a list now
vectors.append(embeding)
vectors = np.array(vectors)
model = PCA(n_components = 128)
results = model.fit_transform(vectors)
for index, img in enumerate(img_names):
print(img, ",", list(results[index,:]))
|
saramsv/decaying_human_body_part_classifier | label_voting.py | <gh_stars>0
#!usr/bin/env python3
import pandas as pd
import operator
import sys
pd.set_option('display.max_columns', None)
pd.set_option('display.expand_frame_repr', False)
pd.set_option('max_colwidth', -1)
pd.set_option('display.max_rows', 1000)
file_name = sys.argv[1]
df = pd.read_csv(file_name, delimiter = ':')
df.columns = ('path', 'seq', 'label', 'conf')
num_seq_uniq = df['seq'].unique().shape[0]
for i in range(num_seq_uniq):
labels = df.loc[df['seq']==df['seq'].unique()[i]]
labels = labels.reset_index(drop = True)
predicted_labels = list(labels['label'].values)
counts = dict((x, predicted_labels.count(x)) for x in set(predicted_labels))
frequent_label = max(counts.items(), key = operator.itemgetter(1))[0].strip()
#confs = list(labels['conf'].values)
#counts_conf = dict((x, confs.count(x)) for x in set(confs))
#frequent_conf = max(counts_conf.items(), key = operator.itemgetter(1))[0]
#print(labels['seq'].unique()[0], ":", frequent_label, ":", frequent_conf)
labels['label'] = frequent_label
print(labels)
|
saramsv/decaying_human_body_part_classifier | classifier.py | <filename>classifier.py<gh_stars>0
#!/usr/bin/env python
# run python3 classifier.py very_clean_label_data
import cv2
import numpy as np
import matplotlib.pyplot as plt
from keras.preprocessing import image
from keras.preprocessing.image import ImageDataGenerator
from keras.applications.inception_v3 import InceptionV3
from keras.layers import Dropout, Dense, Input
from keras.models import Sequential
from keras.models import Model
import keras.backend as K
from keras.utils import to_categorical
from keras.applications import VGG16
from keras.applications import ResNet50
from keras import optimizers
from sklearn.model_selection import train_test_split
from keras.callbacks import EarlyStopping
from keras.callbacks import ModelCheckpoint
from tensorflow.keras.models import load_model
import os
import csv
import sys
train_data = sys.argv[1]
body_parts = ['arm', 'hand', 'foot', 'legs','fullbody',
'head','backside', 'torso', 'stake', 'plastic']
num_classes = len(body_parts)
data = []
labels = []
vgg_resnet_img_size = 224
inception_img_size = 299
batch_size = 32
model_names = ['inception']#['resnet', 'vgg', 'inception']
for model_name in model_names:
not_found = 0
data = []
labels = []
#os.mkdir(model_name)
with open(train_data, 'r') as file_:
csv_reader = csv.reader(file_, delimiter = ":")
for row in csv_reader:
tag = row[1].strip()
if tag in body_parts:
try:
if model_name == 'inception':
img = image.load_img(row[0].strip(),
target_size = (inception_img_size,
inception_img_size, 3), grayscale = False)
else:
img = image.load_img(row[0].strip(),
target_size = (vgg_resnet_img_size,
vgg_resnet_img_size, 3), grayscale = False)
img = image.img_to_array(img)
img = img/255
data.append(img)
labels.append(body_parts.index(tag))
except:
not_found += 1
sample_sizes = [len(data)]
for sample_size in sample_sizes:
if model_name == 'vgg':
inp = Input((vgg_resnet_img_size, vgg_resnet_img_size, 3))
model = VGG16(include_top = False, weights='imagenet',
input_tensor = inp,
input_shape = (vgg_resnet_img_size,
vgg_resnet_img_size, 3),
pooling = 'avg')
if model_name == 'resnet':
inp = Input((vgg_resnet_img_size, vgg_resnet_img_size, 3))
model = ResNet50(include_top = False, weights='imagenet',
input_tensor = inp,
input_shape = (vgg_resnet_img_size,
vgg_resnet_img_size, 3),
pooling = 'avg')
if model_name == 'inception':
inp = Input((inception_img_size, inception_img_size, 3))
model = InceptionV3(include_top = False, weights='imagenet',
input_tensor = inp,
input_shape = (inception_img_size,
inception_img_size, 3),
pooling = 'avg')
x = model.output
x = Dense(256, activation='relu')(x)
x = Dropout(0.1)(x)
out = Dense(num_classes, activation='softmax')(x)
model = Model(inp, out)
sgd = optimizers.SGD(lr=0.001, decay=1e-6, momentum=0.9, nesterov=True, clipvalue = 0.5)
es = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=50)
model.load_weights('models/inception_epoch_-058-_acc_0.999475-_val_acc_0.94976.h5')
model.compile(optimizer = 'sgd', loss = 'categorical_crossentropy', metrics=['acc'])
d = data[0:sample_size]
l = labels[0:sample_size]
data1 = np.array(d)
labels1 = to_categorical(np.array(l), num_classes = num_classes)
X_train, X_test, y_train, y_test = train_test_split(data1, labels1,
test_size=0.3)
#checkpoint = ModelCheckpoint(model_name + '/' + str(sample_size) +
#'_epoch_-{epoch:03d}-_acc_{acc:03f}-_val_acc_{val_acc:.5f}.h5',
#verbose=1, monitor='val_acc', save_best_only=True, mode='auto')
checkpoint = ModelCheckpoint(model_name + '_epoch_-{epoch:03d}-_acc_{acc:03f}-_val_acc_{val_acc:.5f}.h5',
verbose=1, monitor='val_acc', save_best_only=True, mode='auto')
train_datagen = ImageDataGenerator()#rescale=1./255)
val_datagen = ImageDataGenerator()#rescale=1./255)
history = model.fit_generator(train_datagen.flow(X_train, y_train, batch_size=batch_size)
,steps_per_epoch = len(X_train) // batch_size
,validation_data=val_datagen.flow(X_test,y_test, batch_size=batch_size),
validation_steps=(len(X_test))//batch_size
,callbacks=[checkpoint, es]
,epochs = 200, verbose = 1)
import bpython
bpython.embed(locals())
print(history.history.keys())
# summarize history for accuracy
plt.plot(history.history['acc'])
plt.plot(history.history['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.savefig('acc.png')
# summarize history for loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.savefig('loss')
import bpython
bpython.embed(locals())
|
saramsv/decaying_human_body_part_classifier | decom_sequence_generator.py | <reponame>saramsv/decaying_human_body_part_classifier<gh_stars>0
#run python3 decom_sequence_generator.py file_with_the_following_format
# the file for this script should be image_name va1l val2.... valn.
import sys
import numpy as np
import csv
import ast
import datetime
import argparse
from operator import itemgetter
from functools import reduce
from scipy.spatial import distance
import math
import sequence
def key_func(x):
# For some year like 2011 the year is 2 digits so the date format should ne %m%d%y but for others like 2015 it should be %m%d%Y
try:
#date = ""
if '(' in x:
date_ = x.split('D_')[-1].split('(')[0].strip()
else:
date_ = x.split('D_')[-1].split('.')[0].strip()
mdy = date_.split('_')
m = mdy[0]
d = mdy[1]
y = mdy[2]
if len(m) == 1:
m = '0' + m
if len(d) == 1:
d = '0' + d
date_ = m + d + y
if len(date_) == 6: #the format that has 2 digits for year
return datetime.datetime.strptime(date_, '%m%d%y')
else:
return datetime.datetime.strptime(date_, '%m%d%Y')
except:
print(x)
import bpython
bpython.embed(locals())
exit()
def sort_dates(donors2imgs): #sorts the dates by getting a list of img_names for each donor and sorting that
for key in donors2imgs:
donors2imgs[key] = sorted(donors2imgs[key], key=key_func)
return donors2imgs
def convert_to_time(img_name):
if '(' in img_name:
date_ = img_name.split('D_')[-1].split('(')[0].strip()
else:
date_ = img_name.split('D_')[-1].split('.')[0].strip()
mdy = date_.split('_')
m = mdy[0]
d = mdy[1]
y = mdy[2]
if len(m) == 1:
m = '0' + m
if len(d) == 1:
d = '0' + d
date_ = m + d + y
if len(date_) == 6: #the format that has 2 digits for year
return datetime.datetime.strptime(date_, '%m%d%y')
else:
return datetime.datetime.strptime(date_, '%m%d%Y')
def cal_day_from_deth(donors2imgs_sorted):
for key in donors2imgs_sorted:
day2imgs = {}
first_img = True
for img in donors2imgs_sorted[key]:
if first_img == True:
start_time = convert_to_time(img)
first_img = False
img_time = convert_to_time(img)
time_from_start = (img_time - start_time).days
if time_from_start not in day2imgs:
day2imgs[time_from_start] = []
day2imgs[time_from_start].append(img)
donors2imgs_sorted[key] = day2imgs
return donors2imgs_sorted
# this a dictionary with each donor_id as keys and values are another
#dictionary with keys being xth days since day one and the values are a
#list of images that belong to day xth for that donor.
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--embeding_file', type = str)
args = parser.parse_args()
embedings_file = args.embeding_file #sys.argv[1] # This should be a pca version the embedings
donors2imgs = {}
donors2img2embed = {}
imgname2add = {}
with open(embedings_file, 'r') as csv_file:
data = csv.reader(csv_file,delimiter = '\n')
vectors = []
for row in data:
row = row[0]
row= row.split('JPG')
img_name = row[0] + 'JPG'
embeding = row[1].strip()
embeding = embeding.replace(' ', ',')
embeding = ast.literal_eval("[" + embeding[1:-1] + "]") # this embeding is a list now
donor_id = img_name.split('/Daily')[0].split('/')[-1]
if donor_id not in donors2img2embed and donor_id not in donors2imgs:
donors2img2embed[donor_id] = {}
donors2imgs[donor_id] = [] # a list for all of the images belonging to the same donor
donors2img2embed[donor_id][img_name] = embeding
# this a dictionary with each donor_id as keys and values are another dictionary
# with keys being an image and the values being the feature vector for that imag
donors2imgs[donor_id].append(img_name)
donors2imgs_sorted = sort_dates(donors2imgs) # this sorts the images for a donor based on their dates
donor2day2imgs = cal_day_from_deth(donors2imgs_sorted)
day2clus2emb = sequence.sequence_finder(donors2img2embed, donor2day2imgs)
|
saramsv/decaying_human_body_part_classifier | classify_all_1M_imgs.py | #!/usr/bin/env python
## run : python3 predict_bodypart.py file_name
#each line in file_name is a path
import cv2
import numpy as np
from tensorflow.keras.models import load_model
import csv
import sys
import os
from tensorflow.keras.preprocessing import image
import pandas as pd
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
inception_img_size = 299
vgg_resnet_img_size = 224
batch_size = 2
test_csv = sys.argv[1]
body_parts = ['arm', 'hand', 'foot', 'legs','fullbody', 'head','backside', 'torso', 'stake', 'plastic']
model_name = 'inception_epoch_-044-_acc_0.995226-_val_acc_0.96135.h5'#'models/inception_epoch_-058-_acc_0.999475-_val_acc_0.94976.h5'#'inception_10000_epoch_-118-_acc_0.999569-_val_acc_0.98315.h5'
model_type = 'inception'
#model = load_model("models/" + model_type + '/' + model_name)
model = load_model(model_name)
not_found = 0
df = pd.read_csv(test_csv, names = ['path'])
print("The results will be saved in", test_csv + '_preds')
f = open(test_csv + '_preds', 'w')
for path in df['path']:
test_data = []
img_names = []
try:
if model_type == 'resnet' or model_type == 'vgg':
img = image.load_img(path.strip(),
target_size = (vgg_resnet_img_size, vgg_resnet_img_size, 3),
grayscale = False)
elif model_type == 'inception':
img = image.load_img(path.strip(),
target_size = (inception_img_size, inception_img_size, 3),
grayscale = False)
img = image.img_to_array(img)
img = img/255
test_data.append(img)
img_names.append(path)
test = np.array(test_data)
prediction = model.predict(test)
pred_classes = prediction.argmax(axis=-1)
conf = prediction.max(axis=-1)
for i, label in enumerate(list(pred_classes)):
f.write("{}:{}:{:.2f}\n".format(img_names[i], body_parts[label],conf[i]*100))
#print(img_names[i]+ ":", body_parts[label],":", conf[i])
except:
not_found += 1
f.close()
|
saramsv/decaying_human_body_part_classifier | model_evaluation.py | <reponame>saramsv/decaying_human_body_part_classifier
#!/usr/bin/env python
## run : python3 predict_bodypart.py gt_file_name
#gt_file_name has a path to an image and the grount truth label seperated by ":" in each line
import os
import cv2
import csv
import sys
import numpy as np
import pandas as pd
from tqdm import tqdm
from keras.models import load_model
from keras.preprocessing import image
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
# Helper function
def conf_matrix(f, gt, pred, body_parts):
CM = confusion_matrix(gt, pred, labels=body_parts)
TP = CM.diagonal()
FN = np.sum(CM, axis = 1) - TP
FP = np.sum(CM, axis = 0) - TP
AP = TP/(TP + FP)
#print("AP: ", AP)
mAP = np.mean(AP)
#print("mAP: ", mAP)
recall = TP/(TP + FN)
#print("recall: ", recall)
mrecall = np.mean(recall)
#print("mrecall: ", mrecall)
#print(accuracy_score(gt, pred))
#print("moedel: {}, mAP: {}, mrecall: {}, aac: {}".format(m, mAP, mrecall, accuracy_score(gt, pred)))
print("mAP: {}, mrecall: {}, aac: {}\n".format(mAP, mrecall, accuracy_score(gt, pred)))
f.write("mAP: {}, mrecall: {}, aac: {}\n\n".format(mAP, mrecall, accuracy_score(gt, pred)))
# MAIN FUNC
if __name__ == "__main__":
inception_img_size = 299
vgg_resnet_img_size = 224
test_csv = sys.argv[1]
body_parts = ['arm', 'hand', 'foot', 'legs','fullbody',
'head','backside', 'torso', 'stake', 'plastic']
dir_names = ['resnet', 'vgg', 'inception']
model_dir_name = 'models'
f = open("eval_top3.res", "w")
for name in dir_names:
model_type = name
dir_info = os.walk(model_dir_name + "/" + model_type)
#print("Dir: {}".format(model_dir_name + "/" + model_type))
for root, dirs, models in dir_info:
test_data = []
img_names = []
not_found = 0
df = pd.read_csv(test_csv, names = ['path', 'gt'], sep = ':')
print("loading data")
for path in tqdm(df['path'], total=len(df)):
try:
if model_type == 'resnet' or model_type == 'vgg':
img = image.load_img(path.strip(),
target_size = (vgg_resnet_img_size, vgg_resnet_img_size, 3),
grayscale = False)
elif model_type == 'inception':
img = image.load_img(path.strip(),
target_size = (inception_img_size, inception_img_size, 3),
grayscale = False)
img = image.img_to_array(img)
img = img/255
test_data.append(img)
img_names.append(path)
# break
except:
not_found += 1
test = np.array(test_data)
for m in models:
print("\n{}".format(model_dir_name + "/" + model_type + '/' + m))
f.write("{}\n".format(model_dir_name + "/" + model_type + '/' + m))
model = load_model(model_dir_name + "/" + model_type + '/' + m, compile=False)
####### top 1 ######
f.write("top 1:\n")
#print("top 1:")
#print(m)
gt = list(df['gt'].values)
prediction = model.predict(test)
pred_classes = prediction.argmax(axis=-1)
pred = list(pred_classes)
pred = [body_parts[x] for x in pred]
conf_matrix(f, gt, pred, body_parts)
###### top 3 ######
#print("top k = 3:")
f.write("top 3:\n")
pred = []
k = 3
for index, p in enumerate(prediction): #p is for each image
preds = p.argsort()[0-k:] # the top k confident predictions
added = False
for p in preds:
predicted = body_parts[p]
if predicted == gt[index]: # if one of the top k is the right prediction
pred.append(predicted)
added = True
break
if added == False:
pred.append(body_parts[preds[-1]]) # the most confident
conf_matrix(f, gt, pred, body_parts)
f.close() |
saramsv/decaying_human_body_part_classifier | path2embeding.py | <reponame>saramsv/decaying_human_body_part_classifier<filename>path2embeding.py<gh_stars>0
# only use it to generate the resnet features then use the other script to convert to pca
#python3 path2embeding.py --img_path data/some_paths --weight_type pt > resnet_feautres_filename
#then clean the [] and , from the all_embedings.csv
import keras
from keras.applications import ResNet50
from keras.applications.vgg16 import VGG16
from keras.applications import VGG19
from keras.applications.resnet50 import preprocess_input
from keras.applications.vgg16 import preprocess_input
from keras.models import Sequential
from keras.layers import Dense, Flatten, GlobalAveragePooling2D
from keras.callbacks import TensorBoard
from keras.layers import Dropout, Dense, Input
from keras import Model
import numpy as np
from keras.models import load_model
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
import cv2
import sys
import csv
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--img_path', type = str)
parser.add_argument('--weight_type', type = str)
args = parser.parse_args()
imgs_path = args.img_path
weight_type = args.weight_type
img_size = 224
resnet_weigth_path = '../ImageSimilarityMultiMethods/data/resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5'
#resnet_weigth_path = 'resnet_model_epoch_-042-_acc_0.999860-_val_acc_0.98716.h5'
fine_tuned_resnet_weight_path = 'ResNet50/logs/ft-41-0.87.hdf5'
clustering_model = Sequential()
if weight_type == 'pt': # this is for pre_trained
'''
clustering_model = load_model('models/resnet_model_epoch_-042-_acc_0.999860-_val_acc_0.98716.h5')
inp = Input((224, 224, 3))
model = ResNet50(include_top=False, weights='imagenet',
pooling = 'avg')
x = model.output
x = Dense(256, activation='relu')(x)
x = Dropout(0.1)(x)
out = Dense(10, activation='softmax')(x)
clustering_model = Model(inp, out)
clustering_model.load_weights('models/resnet_model_epoch_-042-_acc_0.999860-_val_acc_0.98716.h5')
clustering_model.layers.pop()
clustering_model.layers.pop()
clustering_model.layers.pop()
clustering_model.outputs = [clustering_model.layers[-1].output]
clustering_model.layers[-1].outbound_nodes = []
clustering_model.layers[0].trainable = False # this would be the base model part
#clustering_model.outputs = [clustering_model.layers[-1].output]
#clustering_model.layers[-1].outbound_nodes = []
#clustering_model.layers[0].trainable = False # this would be the base model part
#clustering_model.add(ResNet50(include_top = False, pooling='ave', weights = resnet_weigth_path))
#clustering_model.layers[0].trainable = False
clustering_model.add(VGG16(weights= 'imagenet' ,include_top= False))
clustering_model.layers[0].trainable = False
'''
clustering_model.add(ResNet50(include_top = False, pooling='ave'))#, weights = resnet_weigth_path))
clustering_model.layers[0].trainable = False
elif weight_type == 'ft':
num_classes = 9
base_model = ResNet50
base_model = base_model(weights = 'imagenet', include_top = False)
x = base_model.output
x = GlobalAveragePooling2D()(x)
x = Dense(1024, activation= 'relu')(x)
predictions = Dense(num_classes, activation='softmax')(x)
clustering_model = Model(inputs = base_model.input, outputs = predictions)
clustering_model.load_weights(fine_tuned_resnet_weight_path)
clustering_model.layers.pop()
clustering_model.layers.pop()
clustering_model.outputs = [clustering_model.layers[-1].output]
clustering_model.layers[-1].outbound_nodes = []
clustering_model.layers[0].trainable = False # this would be the base model part
clustering_model.compile(optimizer='sgd', loss='categorical_crossentropy', metrics=['accuracy'])
missed_imgs = []
rows = []
with open(imgs_path) as csv_file:
paths = csv.reader (csv_file, delimiter='\n')
img_names = []
for path in paths:
row = []
correct_path = path[0]
correct_path.replace(' ', '\ ')
correct_path.replace('(', '\(')
correct_path.replace(')', '\)')
try:
'''
#######gray########
img_object = cv2.imread(correct_path, cv2.IMREAD_GRAYSCALE)
img_object = np.stack((img_object,)*3, axis=-1)
'''
img_object = cv2.imread(correct_path)
img_object = cv2.resize(img_object, (img_size, img_size))
img_object = np.array(img_object, dtype = np.float64)
img_object = preprocess_input(np.expand_dims(img_object.copy(), axis = 0))
resnet_feature = clustering_model.predict(img_object)
resnet_feature = np.array(resnet_feature)
row.append(correct_path)
row.extend(list(resnet_feature.flatten()))
print(row)
except:
missed_imgs.append(path)
|
NuthanReddy/mp3-meta-cleanser | mp3clean.py | from __future__ import print_function
import os, re, argparse
from eyed3.id3 import Tag
class CleanMyMusic:
def __init__(self):
self.regex = r""
self.t = Tag()
self.parser = argparse.ArgumentParser(description='A Python Script to clean/format mp3 file metadata',
prog='mp3clean', usage='python %(prog)s.py [options]',
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
epilog="And that's how its done")
self.args = None
self.t_map = {'Title': 'title', 'Album': 'album', 'Artist': 'artist', 'AlbumArtist': 'album_artist',
'Genre': 'genre.name'}
def main(self):
self._parse_args()
if self.args.case:
self._title_case()
if self.args.trim == 'r':
self._rtrim()
if self.args.trim == 'l':
self._ltrim()
if self.args.nocomments:
self._no_comments()
if self.args.set:
self._set_value()
if self.args.cleanse:
self._cleanse()
if self.args.trackno:
self._get_track()
if self.args.invtrackno:
self._inv_track()
if self.args.gettrack:
self._set_track()
if self.args.newtrack:
self._new_track()
if self.args.gettitle:
self._get_title()
def _title_case(self):
for filename in os.listdir(self.args.dir):
if filename.endswith(".mp3"):
file_path = os.path.join(self.args.dir, filename)
self.t.parse(file_path)
if "All" in self.args.entity:
for et in self.t_map:
eval('self.t._set' + et + '(self.t.' + self.t_map[et] + '.title())')
else:
for et in self.args.entity:
if et != "Filename":
try:
eval('self.t._set' + et + '(self.t.' + self.t_map[et] + '.title())')
except NotImplementedError:
continue
self.t.save()
if "Filename" in self.args.entity or "All" in self.args.entity:
os.rename(file_path, file_path[:-4].title() + ".mp3")
def _rtrim(self):
for filename in os.listdir(self.args.dir):
if filename.endswith(".mp3"):
file_path = os.path.join(self.args.dir, filename)
self.t.parse(file_path)
if "All" in self.args.entity:
for et in self.t_map:
eval('self.t._set' + et + '(self.t.' + self.t_map[et] + '[:-self.args.num])')
else:
for et in self.args.entity:
if et != "Filename":
eval('self.t._set' + et + '(self.t.' + self.t_map[et] + '[:-self.args.num])')
self.t.save()
if "Filename" in self.args.entity or "All" in self.args.entity:
os.rename(file_path, os.path.join(self.args.dir, filename[:-4 - self.args.num]) + ".mp3")
def _ltrim(self):
for filename in os.listdir(self.args.dir):
if filename.endswith(".mp3"):
file_path = os.path.join(self.args.dir, filename)
self.t.parse(file_path)
if "All" in self.args.entity:
for et in self.t_map:
eval('self.t._set' + et + '(self.t.' + self.t_map[et] + '[self.args.num:])')
else:
for et in self.args.entity:
if et != "Filename":
eval('self.t._set' + et + '(self.t.' + self.t_map[et] + '[self.args.num:])')
self.t.save()
if "Filename" in self.args.entity or "All" in self.args.entity:
os.rename(file_path, os.path.join(self.args.dir, filename[self.args.num:]))
def _no_comments(self):
for filename in os.listdir(self.args.dir):
if filename.endswith(".mp3"):
file_path = os.path.join(self.args.dir, filename)
self.t.parse(file_path)
self.t.comments.set(u"")
self.t.save()
def _set_value(self):
for filename in os.listdir(self.args.dir):
if filename.endswith(".mp3"):
file_path = os.path.join(self.args.dir, filename)
self.t.parse(file_path)
eval('self.t._set' + self.args.set + '(u"\u0020".join(self.args.value))')
self.t.save()
@staticmethod
def _clean_using_regex(strng, regex):
if regex == "[*]":
return re.sub(r"\[*\]", '', strng)
elif regex == "(*)":
return re.sub(r"\(*\)", '', strng)
elif regex == "* -":
return strng.split(" -")[-1].strip()
elif regex == "- *":
return strng.split("- ")[0].strip()
elif regex == ":: *":
return strng.split(":: ")[0].strip()
elif regex == '-':
return re.sub("-", '', strng)
elif regex == '-':
return re.sub("_", ' ', strng)
else:
return strng
def _cleanse(self):
for filename in os.listdir(self.args.dir):
if filename.endswith(".mp3"):
file_path = os.path.join(self.args.dir, filename)
self.t.parse(file_path.strip())
for et in self.args.entity:
if et == "Filename":
old = filename
new = self._clean_using_regex(old, self.args.cleanse)
os.rename(file_path, os.path.join(self.args.dir, new) + ".mp3")
else:
old = eval('self.t.' + self.t_map[et])
new = re.sub(' ', u"\u0020", self._clean_using_regex(old, self.args.cleanse))
eval('self.t._set' + et + '(u\"' + new + '\")')
self.t.save()
def _get_track(self):
for filename in os.listdir(self.args.dir):
if filename.endswith(".mp3"):
file_path = os.path.join(self.args.dir, filename)
self.t.parse(file_path)
self.t.track_num = (filename[:2], 5)
self.t.save()
def _set_track(self):
for filename in os.listdir(self.args.dir):
if filename.endswith(".mp3"):
file_path = os.path.join(self.args.dir, filename)
self.t.parse(file_path)
self.t.track_num = (self.t.title[:2], 5)
self.t.save()
def _inv_track(self):
for filename in os.listdir(self.args.dir):
if filename.endswith(".mp3"):
file_path = os.path.join(self.args.dir, filename)
self.t.parse(file_path)
if len(str(self.t.track_num[0])) == 1:
new_name = "0" + str(self.t.track_num[0]) + " " + filename
else:
new_name = str(self.t.track_num[0]) + " " + filename
new_path = os.path.join(self.args.dir, new_name)
os.rename(file_path, new_path)
def _new_track(self):
i = 1
for filename in os.listdir(self.args.dir):
if filename.endswith(".mp3"):
file_path = os.path.join(self.args.dir, filename)
self.t.parse(file_path)
self.t.track_num = (i, 5)
self.t.save()
if len(str(self.t.track_num[0])) == 1:
new_name = "0" + str(i) + " " + filename
else:
new_name = str(i) + " " + filename
new_path = os.path.join(self.args.dir, new_name)
os.rename(file_path, new_path)
i += 1
def _get_title(self):
# fails for single word titles
for filename in os.listdir(self.args.dir):
if filename.endswith(".mp3"):
file_path = os.path.join(self.args.dir, filename)
try:
self.t.parse(file_path)
new_title = re.sub(' ', u"\u0020", filename[:-4])
self.t._setTitle(new_title)
self.t.save()
except TypeError:
continue
def _parse_args(self):
group = self.parser.add_argument_group('options')
self.parser.add_argument('--version', action='version', version='%(prog)s 0.5.0')
self.parser.add_argument('dir', help='Directory of the files to be formatted', type=str)
group.add_argument('-c', '--case', help='Change the case to Title Case', action='store_true')
group.add_argument('-nc', '--nocomments', help='Remove comments if any', action='store_true')
group.add_argument('-tn', '--trackno', help='Get Track No from Filename', action='store_true')
group.add_argument('-itn', '--invtrackno', help='Add Track No to Filename', action='store_true')
group.add_argument('-cl', '--cleanse', help='Remove unwanted characters that match a pattern',
choices=['[*]', '(*)', '- *', '* -', ':: *', '_', '-'], type=str)
group.add_argument('-gt', '--gettrack', help='Gets track number from Title', action='store_true')
group.add_argument('-gtn', '--gettitle', help='Gets Title from Filename', action='store_true')
group.add_argument('-nt', '--newtrack', help='Adds new track numbers', action='store_true')
group.add_argument('-e', '--entity', help='What to format', required=False, action='append',
choices=['Title', 'Filename', 'Album', 'Artist', 'AlbumArtist', 'Genre', 'All'], type=str)
group.add_argument('-t', '--trim', help='Trim characters to left or right', choices=['l', 'r'],
type=str)
group.add_argument('-n', '--num', help='Number of character to be trimmed', type=int)
group.add_argument('-s', '--set', help='Set any option',
choices=['Album', 'Artist', 'AlbumArtist', 'Genre'], type=str)
group.add_argument('-v', '--value', nargs="*", help='Value of choice given in set', type=str)
self.args = self.parser.parse_args()
if self.args.entity == "All":
self.args.entity = self.t_map.keys()
if __name__ == '__main__':
CleanMyMusic().main()
|
echel0nn/project-management-system | register/admin.py | from django.contrib import admin
from .models import Team
from .models import UserProfile
from .models import Invite
class TeamAdmin(admin.ModelAdmin):
list_display = ['name','email','city','found_date']
search_fields = ['name', 'social_name','city']
class UserProfileAdmin(admin.ModelAdmin):
list_display = ['user', 'team',]
class InviteAdmin(admin.ModelAdmin):
list_display = ['inviter', 'invited',]
search_fields = ['inviter', 'invited',]
# list_filter = ['inviter', 'invited,']
# Register your models here.
admin.site.register(Team, TeamAdmin)
admin.site.register(UserProfile, UserProfileAdmin)
admin.site.register(Invite, InviteAdmin)
|
echel0nn/project-management-system | projects/views.py | from django.shortcuts import render
from django.db.models import Avg
from register.models import Project
from projects.models import Task
from projects.forms import TaskRegistrationForm
from projects.forms import ProjectRegistrationForm
from django.contrib.auth.decorators import login_required
# Create your views here.
@login_required
def projects(request):
projects = Project.objects.all()
avg_projects = Project.objects.all().aggregate(
Avg('complete_per'))['complete_per__avg']
tasks = Task.objects.all()
overdue_tasks = tasks.filter(due='2')
context = {
'avg_projects': avg_projects,
'projects': projects,
'tasks': tasks,
'overdue_tasks': overdue_tasks,
}
return render(request, 'projects/projects.html', context)
@login_required
def newTask(request):
if request.method == 'POST':
form = TaskRegistrationForm(request.POST)
context = {'form': form}
if form.is_valid():
form.save()
created = True
context = {
'created': created,
'form': form,
}
return render(request, 'projects/new_task.html', context)
else:
return render(request, 'projects/new_task.html', context)
else:
form = TaskRegistrationForm()
context = {
'form': form,
}
return render(request, 'projects/new_task.html', context)
@login_required
def newProject(request):
if request.method == 'POST':
form = ProjectRegistrationForm(request.POST)
context = {'form': form}
if form.is_valid():
form.save()
created = True
form = ProjectRegistrationForm()
context = {
'created': created,
'form': form,
}
return render(request, 'projects/new_project.html', context)
else:
return render(request, 'projects/new_project.html', context)
else:
form = ProjectRegistrationForm()
context = {
'form': form,
}
return render(request, 'projects/new_project.html', context)
|
echel0nn/project-management-system | register/urls.py | from django.urls import path
from . import views
app_name = 'register'
urlpatterns = [
path('new-user/', views.register, name='new-user'),
path('new-team/', views.newTeam, name='new-team'),
path('users/', views.usersView, name='users'),
path('users/profile', views.profile, name='profile'),
path('users/<int:profile_id>/', views.user_view, name='user'),
path('users/invite/<int:profile_id>/', views.invite, name='invite'),
path('users/invites/', views.invites, name='invites'),
path('users/invites/accept/<int:invite_id>/',
views.acceptInvite, name='accept-invite'),
path('users/invites/delete/<int:invite_id>/',
views.deleteInvite, name='delete-invite'),
path('users/friends/', views.friends, name='friends'),
path('users/friends/remove/<int:profile_id>/',
views.remove_friend, name='remove-friend'),
]
|
dokla/moncash_python | moncash/configuration.py | <reponame>dokla/moncash_python<gh_stars>10-100
from moncash.http import Http
from moncash.exceptions import ConfigurationError
from moncash.environment import Environment
class Configuration(object):
def __init__(self, client_id=None, client_secret=None, environment=None):
self.client_id = client_id
self.client_secret = client_secret
self.environment = environment
self.api_version = "v1"
def http(self):
return Http(self)
def base_url(self):
return self.environment.protocol+self.environment.host
|
dokla/moncash_python | moncash/gateway.py | from moncash.configuration import Configuration
from moncash.payement import Payment
class Moncash(object):
def __init__(self, config=None, **kwargs):
if isinstance(config, Configuration):
self.config = config
else:
self.config = Configuration(
client_id=kwargs.get("client_id"),
client_secret=kwargs.get("client_secret"),
environment=kwargs.get("environment")
)
self.payment = Payment(self)
|
dokla/moncash_python | moncash/version.py | <gh_stars>10-100
version="1.0.2" |
dokla/moncash_python | moncash/__init__.py | <filename>moncash/__init__.py
"""
_
/\/\ ___ _ __ ___ __ _ ___| |__
/ \ / _ \| '_ \ / __/ _` / __| '_ \
/ /\/\ \ (_) | | | | (_| (_| \__ \ | | |
\/ \/\___/|_| |_|\___\__,_|___/_| |_|
This is an unofficial wrapper providing convenient access to
the MonCash API for applications written in Python
:copyright: (c) 2020 by <NAME>.
:license: Apache 2.0, see LICENSE for more details.
"""
from moncash.gateway import Moncash
from moncash.configuration import Configuration
from moncash.environment import Environment
from moncash.version import version |
ITTV-tools/oilfoxpy | setup.py | <filename>setup.py<gh_stars>1-10
import setuptools
setuptools.setup(
name="oilfoxpy",
version="0.4",
author="ittv-tools",
description="API call to oilfox",
url="https://github.com/ITTV-tools/olifoxpy",
py_modules=["oilfox"],
package_dir={'': 'src'},
install_requires=['jwt', 'json', 'requests'],
classifieres=[
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9"
]
)
|
bincrafters/conan-http-parser | conanfile.py | from conans.errors import ConanInvalidConfiguration
import os
import shutil
from conans import ConanFile, CMake, tools
class HttpParserConan(ConanFile):
name = "http-parser"
version = "2.9.0"
description = "http request/response parser for c"
url = "https://github.com/bincrafters/conan-http-parser"
homepage = "https://github.com/nodejs/http-parser"
license = "MIT"
settings = "os", "compiler", "build_type", "arch"
options = {"shared": [True, False], "fPIC": [True, False]}
default_options = {'shared': False, 'fPIC': True}
exports = "LICENSE.md"
exports_sources = "CMakeLists.txt"
generators = "cmake"
_source_subfolder = "source_subfolder"
def config_options(self):
if self.settings.os == "Windows":
self.options.remove("fPIC")
if self.settings.compiler == "Visual Studio" and self.options.shared:
raise ConanInvalidConfiguration("Shared builds on Windows are not supported")
def configure(self):
del self.settings.compiler.libcxx
del self.settings.compiler.cppstd
def source(self):
upstream_ver = self.version.split("p")[0]
tools.get("https://github.com/nodejs/http-parser/archive/v%s.tar.gz" % upstream_ver)
os.rename('http-parser-%s' % upstream_ver, self._source_subfolder)
shutil.copy("CMakeLists.txt",
os.path.join(self._source_subfolder, "CMakeLists.txt"))
def build(self):
cmake = CMake(self)
cmake.definitions['WITH_CONAN'] = True
cmake.definitions['WITH_TESTS'] = False
# BUILD_SHARED_LIBS is set automatically
cmake.configure(source_folder=self._source_subfolder)
cmake.build()
cmake.install()
def package(self):
self.copy("LICENSE-MIT", dst="licenses", src=self._source_subfolder, ignore_case=True, keep_path=False)
def package_info(self):
self.cpp_info.libs = tools.collect_libs(self)
if not self.cpp_info.libs:
raise ConanInvalidConfiguration("No libs collected")
|
compositive/node-sketch-bridge | binding.gyp | <reponame>compositive/node-sketch-bridge
{
"targets": [
{
"target_name": "node_sketch_bridge",
"sources": [
"main.m"
],
"conditions": [
[
"OS=='mac'",
{
"defines": [
"__MACOSX_CORE__"
],
"link_settings": {
"libraries": [
"-framework CoreFoundation",
"-framework AppKit",
"-framework CoreGraphics"
]
},
"ccflags": [],
"xcode_settings": {
"GCC_ENABLE_CPP_EXCEPTIONS": "YES"
}
}
]
]
}
]
}
|
pinklite34/passes-rest-samples | python/main.py | <reponame>pinklite34/passes-rest-samples
"""
Copyright 2019 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import config # contains constants
import services # methods that returns JWTs to be used to rend "save to phone" button
import uuid # std library for unique identifier generation
SAVE_LINK = "https://pay.google.com/gp/v/save/" # Save link that uses JWT. See https://developers.google.com/pay/passes/guides/get-started/implementing-the-api/save-to-google-pay#add-link-to-email
def demoObjectJwt(verticalType ,classId, objectId):
print('''
#############################
#
# Generates a signed "object" JWT.
# 1 REST call is made to pre-insert class.
#
# This JWT can be used in JS web button.
# If this JWT only contains 1 object, usually isn't too long; can be used in Android intents/redirects.
#
#############################
''')
objectJwt = services.makeObjectJwt(verticalType, classId, objectId)
if objectJwt is not None:
print('This is an "object" jwt:\n%s\n' % (objectJwt.decode('UTF-8')) )
print('you can decode it with a tool to see the unsigned JWT representation:\n%s\n' % ('https://developers.google.com/pay/passes/support/testing#test-and-debug-a-jwt') )
print('Try this save link in your browser:\n%s%s' % (SAVE_LINK, objectJwt.decode('UTF-8')))
return
def demoFatJwt(verticalType, classId, objectId):
print('''
#############################
#
# Generates a signed "fat" JWT.
# No REST calls made.
#
# Use fat JWT in JS web button.
# Fat JWT is too long to be used in Android intents.
# Possibly might break in redirects.
#
#############################
''')
fatJwt = services.makeFatJwt(verticalType, classId, objectId)
if fatJwt is not None:
print('This is a "fat" jwt:\n%s\n' % (fatJwt.decode('UTF-8')) )
print('you can decode it with a tool to see the unsigned JWT representation:\n%s\n' % ('https://developers.google.com/pay/passes/support/testing#test-and-debug-a-jwt') )
print('Try this save link in your browser:\n%s%s\n' % (SAVE_LINK, fatJwt.decode('UTF-8')))
print('However, because a "fat" jwt is long, they are not suited for hyperlinks (get truncated). Recommend only using "fat" JWt with web-JS button only. Check:\n%s' % ('https://developers.google.com/pay/passes/reference/s2w-reference'))
return
def demoSkinnyJwt(verticalType, classId, objectId):
print('''
#############################
#
# Generates a signed "skinny" JWT.
# 2 REST calls are made:
# x1 pre-insert one classes
# x1 pre-insert one object which uses previously inserted class
#
# This JWT can be used in JS web button.
# This is the shortest type of JWT; recommended for Android intents/redirects.
#
#############################
''')
skinnyJwt = services.makeSkinnyJwt(verticalType, classId, objectId)
if skinnyJwt is not None:
print('This is an "skinny" jwt:\n%s\n' % (skinnyJwt.decode('UTF-8')) )
print('you can decode it with a tool to see the unsigned JWT representation:\n%s\n' % ('https://developers.google.com/pay/passes/support/testing#test-and-debug-a-jwt') )
print('Try this save link in your browser:\n%s%s\n' % (SAVE_LINK, skinnyJwt.decode('UTF-8')))
print('this is the shortest type of JWT; recommended for Android intents/redirects\n')
return
#############################
#
# RUNNER
#
# This script demonstrates using your services which make JWTs
#
# The JWTs are used to generate save links or JS Web buttons to save Pass(es)
#
# 1) Get credentials and check prerequisistes in: https://developers.google.com/pay/passes/samples/quickstart-python.
# 2) Modify config.py so the credentials are correct.
# 3) Try running it: python main.py . Check terminal output for server response, JWT, and save links.
#
#############################
choice = ''
while choice not in ['b', 'e', 'g', 'l', 'o', 't', 'q']:
choice = input(('\n\n*****************************\n'
'Which pass type would you like to demo?\n'
'b - Boarding Pass\n'
'e - Event Ticket\n'
'g - Gift Card\n'
'l - Loyalty\n'
'o - Offer\n'
't - Transit\n'
'q - Quit\n'
'\n\nEnter your choice:'))
if choice == 'b':
verticalType = services.VerticalType.FLIGHT
elif choice == 'e':
verticalType = services.VerticalType.EVENTTICKET
elif choice == 'g':
verticalType = services.VerticalType.GIFTCARD
elif choice == 'l':
verticalType = services.VerticalType.LOYALTY
elif choice == 'o':
verticalType = services.VerticalType.OFFER
elif choice == 't':
verticalType = services.VerticalType.TRANSIT
elif choice == 'q':
quit()
else:
print('\n* Invalid choice. Please select one of the pass types by entering it''s related letter.\n')
# your classUid should be a hash based off of pass metadata, for the demo we will use pass-type_class_uniqueid
classUid = str(verticalType).split('.')[1] + '_CLASS_'+ str(uuid.uuid4()) # CHANGEME
# check Reference API for format of "id" (https://developers.google.com/pay/passes/reference/v1/o).
# must be alphanumeric characters, '.', '_', or '-'.
classId = '%s.%s' % (config.ISSUER_ID,classUid)
# your objectUid should be a hash based off of pass metadata, for the demo we will use pass-type_object_uniqueid
objectUid = str(verticalType).split('.')[1] + '_OBJECT_'+ str(uuid.uuid4()) # CHANGEME
# check Reference API for format of "id" (https://developers.google.com/pay/passes/reference/v1/).
# Must be alphanumeric characters, '.', '_', or '-'.
objectId = '%s.%s' % (config.ISSUER_ID,objectUid)
# demonstrate the different "services" that make links/values for frontend to render a functional "save to phone" button
demoFatJwt(verticalType, classId, objectId)
demoObjectJwt(verticalType, classId, objectId)
demoSkinnyJwt(verticalType, classId, objectId)
|
Atry/hhvm | hphp/hack/test/fanout/tools/fanout_test_driver.py | <filename>hphp/hack/test/fanout/tools/fanout_test_driver.py
# pyre-strict
from __future__ import annotations
import json
import logging
import os
import re
import shutil
import subprocess
import sys
import tempfile
from typing import Dict, List, Tuple, Optional, Any
import attr
from .fanout_information import FanoutInformation
from .fanout_test_parser import FanoutTest
WHITESPACE_SPLITTER: re.Pattern = re.compile(r"\s+")
DEFAULT_HH_SERVER_FLAGS: List[str] = [
"--config",
"hg_aware=false",
"--config",
"remote_type_check_enabled=false",
"--config",
"use_dummy_informant=true",
"--config",
"experiments_config_enabled=false",
"--config",
"symbolindex_search_provider=NoIndex",
"--config",
"use_direct_decl_parser=true",
"--config",
"force_shallow_decl_fanout=true",
"--config",
"num_local_workers=1",
"--config",
"max_workers=1",
"--config",
"allow_unstable_features=true",
"--config",
"allow_all_files_for_module_declarations=true",
]
@attr.s(auto_attribs=True)
class Binaries(object):
hh_client: str
hh_server: str
hh_single_type_check: str
legacy_hh_fanout: str
def validate(self) -> None:
if os.path.join(os.path.dirname(self.hh_client), "hh_server") != self.hh_server:
# We don't have a way to specify the executable hh_client should use, and OCaml
# resolves symlinks, so hh_server is not in the same directory as hh_client.
# We does have to use PATH
raise ValueError("{} must be an executable called hh_server")
def exec_hh(
self,
args: List[str],
allow_type_errors: bool = True,
env: Optional[Dict[str, str]] = None,
check: bool = True,
**kwargs: Any,
) -> subprocess.CompletedProcess[str]:
if env is None:
env = {}
prev_path = env.get("PATH", ":" + (os.getenv("PATH") or ""))
env["PATH"] = os.path.dirname(self.hh_server) + prev_path
if "HH_TEST_MODE" not in env:
env["HH_TEST_MODE"] = "true"
r = _exec([self.hh_client] + args, env=env, check=False, **kwargs)
if allow_type_errors and r.returncode == 2:
return r
if check:
r.check_returncode()
return r
def exec_hh_stop(self, repo_root: str) -> subprocess.CompletedProcess[str]:
return self.exec_hh(["stop", repo_root])
def exec_hh_single_type_check(
self, args: List[str], **kwargs: Any
) -> subprocess.CompletedProcess[str]:
return _exec([self.hh_single_type_check] + args, **kwargs)
def exec_legacy_hh_fanout(
self, args: List[str], **kwargs: Any
) -> subprocess.CompletedProcess[str]:
return _exec([self.legacy_hh_fanout] + args, **kwargs)
@attr.s(auto_attribs=True)
class RepoRoot(object):
path: str
def cleanup(self) -> None:
shutil.rmtree(self.path)
def hhconfig_file(self) -> str:
return os.path.join(self.path, ".hhconfig")
@attr.s(auto_attribs=True)
class SavedStateDir(object):
path: str
def cleanup(self) -> None:
shutil.rmtree(self.path)
def edges_dir(self) -> str:
return os.path.join(self.path, "edges")
def naming_table_blob_file(self) -> str:
return os.path.join(self.path, "hh_mini")
def depgraph_file(self) -> str:
return os.path.join(self.path, "hh_mini.hhdg")
def naming_table_sqlite_file(self) -> str:
return os.path.join(self.path, "hh_naming.sql")
def saved_state_spec(self, changed_files: List[str]) -> str:
return json.dumps(
{
"data_dump": {
"deptable": self.depgraph_file(),
"state": self.naming_table_blob_file(),
"changes": changed_files,
"prechecked_changes": [],
"corresponding_base_revision": "-1",
}
}
)
@attr.s(auto_attribs=True)
class ExecResult(object):
exit_code: int
stdout: str
stderr: str
def _exec(
args: List[str],
capture_output: bool = True,
timeout: int = 300,
text: bool = True,
check: bool = True,
**kwargs: Any,
) -> subprocess.CompletedProcess[str]:
logging.debug("_exec: run: %s (%s)", args, kwargs)
try:
v = subprocess.run(
args,
capture_output=capture_output,
timeout=timeout,
text=text,
check=check,
**kwargs,
)
except subprocess.CalledProcessError as e:
logging.debug(
"_exec: error (%d):\n=== STDOUT ===\n%s\n=== STDERR ===\n%s\n",
e.returncode,
e.stdout,
e.stderr,
)
raise
logging.debug("_exec: result: %s", repr(v))
return v
def _create_temporary_directory(category: str, filename: str) -> str:
# use TEMP on Sandcastle, this is autocleaned
return tempfile.mkdtemp(
prefix="{}-{}-".format(os.path.basename(filename), category),
dir=os.getenv("TEMP"),
)
def _prepare_repo_root(test: FanoutTest) -> RepoRoot:
repo_root = RepoRoot(_create_temporary_directory("repo", test.filename))
logging.debug("Preparing repo root in %s", repo_root.path)
os.mknod(repo_root.hhconfig_file())
test.prepare_base_php_contents(repo_root.path)
return repo_root
def _make_repo_change(repo_root: RepoRoot, test: FanoutTest) -> List[str]:
logging.debug("Updating repo root at %s", repo_root.path)
return test.prepare_changed_php_contents(repo_root.path)
def _create_saved_state(
bins: Binaries, repo_root: RepoRoot, test: FanoutTest
) -> Tuple[subprocess.CompletedProcess[str], SavedStateDir]:
saved_state_dir = SavedStateDir(_create_temporary_directory("ss", test.filename))
logging.debug(
"Generating saved-state for %s in %s", repo_root.path, saved_state_dir.path
)
logging.debug("Step 1/3: Generating edges to %s", saved_state_dir.edges_dir())
_exec(["mkdir", "-p", saved_state_dir.edges_dir()])
bins.exec_hh(
[
"--no-load",
"--save-64bit",
saved_state_dir.edges_dir(),
"--save-state",
saved_state_dir.naming_table_blob_file(),
"--gen-saved-ignore-type-errors",
"--error-format",
"raw",
"--config",
"store_decls_in_saved_state=true",
]
+ DEFAULT_HH_SERVER_FLAGS
+ [
repo_root.path,
]
)
hh_result = bins.exec_hh(["--error-format", "raw", repo_root.path])
logging.debug(
"Step 2/3: Writing naming table to %s",
saved_state_dir.naming_table_sqlite_file(),
)
bins.exec_hh(
[
"--save-naming",
saved_state_dir.naming_table_sqlite_file(),
repo_root.path,
]
)
bins.exec_hh_stop(repo_root.path)
logging.debug(
"Step 3/3: Building dependency graph to %s", saved_state_dir.depgraph_file()
)
bins.exec_legacy_hh_fanout(
[
"build",
"--edges-dir",
saved_state_dir.edges_dir(),
"--output",
saved_state_dir.depgraph_file(),
]
)
_exec(["rm", "-rf", saved_state_dir.edges_dir()])
return (hh_result, saved_state_dir)
def _build_fanout_hash_map(bins: Binaries, test: FanoutTest) -> Dict[str, str]:
m = {}
r = bins.exec_hh_single_type_check(
["--dump-dep-hashes", "--no-builtins", test.filename]
)
for line in r.stdout.splitlines():
line = line.strip()
symbol_hash, symbol_name = WHITESPACE_SPLITTER.split(line, 1)
m[symbol_hash] = symbol_name
return m
def _launch_hh_from_saved_state(
bins: Binaries,
repo_root: RepoRoot,
saved_state_dir: SavedStateDir,
load_decls_from_saved_state: bool,
changed_files: List[str],
) -> subprocess.CompletedProcess[str]:
logging.debug("Launching hh from saved-state for %s", repo_root.path)
hh_result = bins.exec_hh(
[
"--config",
"use_mini_state=true",
"--config",
"lazy_decl=true",
"--config",
"lazy_init2=true",
"--config",
"lazy_parse=true",
"--config",
"load_decls_from_saved_state={}".format(
"true" if load_decls_from_saved_state else "false"
),
"--with-mini-state",
saved_state_dir.saved_state_spec(changed_files),
"--config",
"naming_sqlite_path={}".format(saved_state_dir.naming_table_sqlite_file()),
"--config",
"naming_sqlite_path={}".format(saved_state_dir.naming_table_sqlite_file()),
"--config",
"enable_naming_table_fallback=true",
"--config",
"log_categories=fanout_information",
"--error-format",
"raw",
]
+ DEFAULT_HH_SERVER_FLAGS
+ [
repo_root.path,
]
)
return hh_result
def _extract_fanout_information(
bins: Binaries, repo_root: RepoRoot, tags: List[str]
) -> List[FanoutInformation]:
server_log_file = bins.exec_hh(["--logname", repo_root.path]).stdout.strip()
logging.debug("Extracting fanout information from %s", server_log_file)
fis = FanoutInformation.extract_from_log_file(server_log_file)
return [fi for fi in fis if fi.tag in tags]
def _strip_repo_root_from_output(repo_root: str, output: str) -> str:
if repo_root[-1:] != os.sep:
repo_root += os.sep
return output.replace(repo_root, "")
def _format_result(
repo_root: RepoRoot,
hh_result_base: subprocess.CompletedProcess[str],
hh_result_changed: subprocess.CompletedProcess[str],
fanout_information: List[FanoutInformation],
fanout_hash_map: Dict[str, str],
) -> None:
print("=== base errors ===")
sys.stdout.write(
_strip_repo_root_from_output(repo_root.path, hh_result_base.stdout)
)
print("=== changed errors ===")
sys.stdout.write(
_strip_repo_root_from_output(repo_root.path, hh_result_changed.stdout)
)
print("=== fanout ===")
symbols = []
for fi in fanout_information:
symbols += [fanout_hash_map.get(h, h) for h in fi.hashes]
symbols.sort()
for s in symbols:
print(s)
def run_scenario_saved_state_init(bins: Binaries, test: FanoutTest) -> None:
"""Run the saved-state init fanout scenario.
This scenario involves a saved-state init with some local changes. It
includes the following steps:
1. Build a saved-state for the base version
2. Kill hack
3. Make the repo changes
4. Initialize from the saved-state
5. Extract saved-state fanout
"""
repo_root = _prepare_repo_root(test)
fanout_hash_map = _build_fanout_hash_map(bins, test)
(hh_result_base, saved_state_dir) = _create_saved_state(bins, repo_root, test)
changed_files = _make_repo_change(repo_root, test)
hh_result_changed = _launch_hh_from_saved_state(
bins,
repo_root,
saved_state_dir,
load_decls_from_saved_state=True,
changed_files=changed_files,
)
bins.exec_hh_stop(repo_root.path)
fanout_information = _extract_fanout_information(
bins, repo_root, tags=["saved_state_init_fanout"]
)
_format_result(
repo_root=repo_root,
hh_result_base=hh_result_base,
hh_result_changed=hh_result_changed,
fanout_information=fanout_information,
fanout_hash_map=fanout_hash_map,
)
repo_root.cleanup()
saved_state_dir.cleanup()
def run_scenario_incremental_no_old_decls(bins: Binaries, test: FanoutTest) -> None:
"""Run the incremental fanout scenario with old decls unavailable.
This scenario involves calculating the fanout in an incremental change
scenario (i.e. after saved-state initialization was successful), but where
the old versions of the declarations are unavailable and thus fine-grained
decl diffing is impossible.
1. Build a saved-state for the base version
2. Kill hack
3. Initialize from the saved-state, but disabled cached decl loading
4. Make the change
5. Type check and extract fanout
"""
repo_root = _prepare_repo_root(test)
fanout_hash_map = _build_fanout_hash_map(bins, test)
(hh_result_base, saved_state_dir) = _create_saved_state(bins, repo_root, test)
_launch_hh_from_saved_state(
bins,
repo_root,
saved_state_dir,
load_decls_from_saved_state=False,
changed_files=[],
)
_make_repo_change(repo_root, test)
hh_result_changed = bins.exec_hh(["--error-format", "raw", repo_root.path])
bins.exec_hh_stop(repo_root.path)
fanout_information = _extract_fanout_information(
bins, repo_root, tags=["incremental_fanout"]
)
_format_result(
repo_root=repo_root,
hh_result_base=hh_result_base,
hh_result_changed=hh_result_changed,
fanout_information=fanout_information,
fanout_hash_map=fanout_hash_map,
)
repo_root.cleanup()
saved_state_dir.cleanup()
def run_scenario_incremental_with_old_decls(bins: Binaries, test: FanoutTest) -> None:
"""Run the incremental fanout scenario with old decls available.
This scenario involves calculating the fanout in an incremental change
scenario (i.e. after saved-state initialization was successful), and where
the old versions of the declarations are available and thus fine-grained
decl diffing is possible.
1. Build a saved-state for the base version
2. Kill hack
3. Initialize from the saved-state, forcing a re-typecheck of all files to
make sure all decls are present in shared memory.
4. Make the change
5. Type check and extract fanout
"""
repo_root = _prepare_repo_root(test)
fanout_hash_map = _build_fanout_hash_map(bins, test)
(hh_result_base, saved_state_dir) = _create_saved_state(bins, repo_root, test)
_launch_hh_from_saved_state(
bins,
repo_root,
saved_state_dir,
load_decls_from_saved_state=True,
changed_files=test.all_base_php_files(),
)
_make_repo_change(repo_root, test)
hh_result_changed = bins.exec_hh(["--error-format", "raw", repo_root.path])
bins.exec_hh_stop(repo_root.path)
fanout_information = _extract_fanout_information(
bins, repo_root, tags=["incremental_fanout"]
)
_format_result(
repo_root=repo_root,
hh_result_base=hh_result_base,
hh_result_changed=hh_result_changed,
fanout_information=fanout_information,
fanout_hash_map=fanout_hash_map,
)
repo_root.cleanup()
saved_state_dir.cleanup()
|
rgreinho/yelper | tests/step_defs/test_features.py | <filename>tests/step_defs/test_features.py
"""
Define the scenarios for the feauture tests.
This file should contain our scenarios, but if it does, it cannot find the steps.
"""
# from pytest_bdd import scenario
# @scenario('../features/collect.feature', 'Collect information')
# def test_collect_information():
# """Ensure a user retrieves correct information."""
|
rgreinho/yelper | tests/step_defs/test_steps.py | """Define the feature test steps."""
import asyncio
import json
import os
from faker import Faker
import pytest
from pytest_bdd import given
from pytest_bdd import scenario
from pytest_bdd import then
from yelpapi import YelpAPI
from yelper.core.yelper import async_deep_query
from tests import mock_data
# The scenario MUST be defined here, otherwise it does not find the steps.
@scenario('../features/collect.feature', 'Collect information')
def test_collect_information():
"""Ensure a user retrieves correct information."""
def async_mock(result):
"""Create an awaitable object to simplify mocking awaitable functions."""
f = asyncio.Future()
f.set_result(result)
return f
@given('the user wants to store the results in a CSV file')
def create_tmp_csv_file(tmp_path, scope='session'):
d = tmp_path / 'sub'
d.mkdir()
output = d / "test-output.csv"
return output
# Note: If this step is made as a `when`, the `create_tmp_csv_file` does not behave properly or is not found and the
# test will break.
@given('the user research for "bike shops" in "Austin, TX"')
@pytest.mark.asyncio
async def research(mocker, create_tmp_csv_file):
fake = Faker()
mocker.patch.dict(os.environ, {"YELP_API_KEY": fake.pystr()})
mocker.patch.object(YelpAPI, '_query', side_effect=[json.loads(mock_data.YELP_SEARCH_RESULTS), {}])
mocker.patch('yelper.core.yelper.deep_link', return_value=async_mock(None), autospec=True)
await async_deep_query('bike shops', 'Austin, TX', output=create_tmp_csv_file)
@then('the generated file contains the collected data')
def ensure_results(create_tmp_csv_file):
output = create_tmp_csv_file
actual = output.read_text()
expected = mock_data.MOCKED_CSV_FILE_CONTENT
assert actual == expected
|
rgreinho/yelper | tests/mock_data.py | """Define the mock data for the tests."""
from textwrap import dedent
# Search results for "Bike shops in Austin, TX".
YELP_SEARCH_RESULTS = """
{
"businesses": [
{
"id": "WT_d47o-V5xlMNx8trI0-A",
"alias": "monkey-wrench-bicycles-austin",
"name": "<NAME>",
"image_url": "https://s3-media3.fl.yelpcdn.com/bphoto/a4Tl6tvBcmXsdAM5Paq7FA/o.jpg",
"is_closed": false,
"url": "https://www.yelp.com/biz/monkey-wrench-bicycles-austin?adjust_creative=TbA-w_CgKdY8RAZLNl6BZA&utm_campaign=yelp_api_v3&utm_medium=api_v3_business_search&utm_source=TbA-w_CgKdY8RAZLNl6BZA",
"review_count": 85,
"categories": [
{
"alias": "bikes",
"title": "Bikes"
},
{
"alias": "bike_repair_maintenance",
"title": "Bike Repair/Maintenance"
}
],
"rating": 5,
"coordinates": {
"latitude": 30.3224761537188,
"longitude": -97.7254818941176
},
"transactions": [],
"price": "$$",
"location": {
"address1": "5555 N Lamar",
"address2": "Ste L131",
"address3": "",
"city": "Austin",
"zip_code": "78751",
"country": "US",
"state": "TX",
"display_address": [
"5555 N Lamar",
"Ste L131",
"Austin, TX 78751"
]
},
"phone": "+15124672453",
"display_phone": "(512) 467-2453",
"distance": 3645.0000325093783
},
{
"id": "wfKxBxJ8RFZj8jOB6Lpn-Q",
"alias": "bicycle-sport-shop-austin-2",
"name": "<NAME>",
"image_url": "https://s3-media3.fl.yelpcdn.com/bphoto/d3F2l_l2O-idm3TMUMgWNQ/o.jpg",
"is_closed": false,
"url": "https://www.yelp.com/biz/bicycle-sport-shop-austin-2?adjust_creative=TbA-w_CgKdY8RAZLNl6BZA&utm_campaign=yelp_api_v3&utm_medium=api_v3_business_search&utm_source=TbA-w_CgKdY8RAZLNl6BZA",
"review_count": 230,
"categories": [
{
"alias": "bikes",
"title": "Bikes"
},
{
"alias": "bikerentals",
"title": "Bike Rentals"
},
{
"alias": "bike_repair_maintenance",
"title": "Bike Repair/Maintenance"
}
],
"rating": 4.5,
"coordinates": {
"latitude": 30.25964,
"longitude": -97.758156
},
"transactions": [],
"price": "$$",
"location": {
"address1": "517 S Lamar Blvd",
"address2": "",
"address3": "",
"city": "Austin",
"zip_code": "78704",
"country": "US",
"state": "TX",
"display_address": [
"517 S Lamar Blvd",
"Austin, TX 78704"
]
},
"phone": "+15124773472",
"display_phone": "(512) 477-3472",
"distance": 5061.350203710105
}
],
"total": 87,
"region": {
"center": {
"longitude": -97.75772094726562,
"latitude": 30.305156315977833
}
}
}
"""
MOCKED_CSV_FILE_CONTENT = dedent("""\
name,phone,address,zipcode,link,emails
Monkey Wrench Bicycles,+15124672453,"5555 N Lamar Ste L131 Austin, TX 78751",78751,,❌
Bicycle Sport Shop,+15124773472,"517 S Lamar Blvd Austin, TX 78704",78704,,❌
""")
|
rgreinho/yelper | yelper/core/yelper.py | <reponame>rgreinho/yelper
"""Define the core functions."""
import asyncio
import csv
import dataclasses
import os
import re
import urllib
import urllib3
import aiohttp
from lxml import html
from yelpapi import YelpAPI
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
HEADERS = {
'User-Agent':
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.90 Safari/537.36'
}
@dataclasses.dataclass
class YelpBusiness:
"""Defines a business from Yelp."""
name: str
phone: str = ''
address: str = ''
zipcode: str = ''
link: str = ''
emails: str = ''
@classmethod
def from_dict(cls, other_dict):
"""Create a `YelpBusiness` from a dictionary instance."""
d = YelpBusiness(other_dict['name'])
d.phone = other_dict.get('phone', '')
d.address = ' '.join(other_dict.get('location', {}).get('display_address', []))
d.zipcode = other_dict.get('location', {}).get('zip_code', '')
d.link = other_dict.get('link', '')
d.emails = other_dict.get('emails', '')
return d
async def deep_link(url, session):
"""Retrieve the URL from the business detail page."""
if not url:
return f'\u274C'
try:
async with session.get(url, headers=HEADERS, ssl=False) as request:
response = await request.text()
parser = html.fromstring(response)
raw_website_link = parser.xpath("//span[contains(@class,'biz-website')]/a/@href")
except Exception:
return f'\U0001F611'
if not raw_website_link:
return f'\u274C'
decoded_raw_website_link = urllib.parse.unquote(raw_website_link[0])
website = re.findall(r"biz_redir\?url=(.*)&website_link", decoded_raw_website_link)[0]
return website
async def deep_emails(url, session):
"""Retrieve the email addresses on the main page."""
if not url:
return f'\u274C'
try:
async with session.get(url, headers=HEADERS, ssl=False) as request:
response = await request.text()
emails = re.findall(r"[\w\.\+\-]+\@[\w]+\.[a-z]{2,4}", response)
except Exception:
return f'\U0001F611'
return ', '.join(set(emails)) if emails else f'\U0001F611'
async def deep_entry_parsing(business, counter):
"""."""
# Prepare the new entry.
try:
entry = YelpBusiness.from_dict(business)
except Exception:
print(f'{counter:04} Skipped due to error.')
print(f'{counter:04} {entry.name}')
# Dig deeper.
async with aiohttp.ClientSession() as session:
entry.link = await deep_link(business.get('url'), session)
entry.emails = await deep_emails(entry.link, session)
return entry
async def async_deep_query(terms, location, offset=0, limit=20, radius=40000, output='yelper.csv', pages=-1):
"""Define the application entrypoint."""
# Prepare the Yelp client.
yelp_api = YelpAPI(os.environ['YELP_API_KEY'])
params = {
'term': terms,
'location': location,
'offset': offset,
'limit': limit,
'radius': radius,
}
# Prepare the CSV file.
fieldnames = dataclasses.asdict(YelpBusiness('fake')).keys()
with open(output, 'w') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
# Search Yelp.
while True:
search_results = yelp_api.search_query(**params)
# Check whether we need to process further or not.
if not search_results:
break
if not search_results['businesses']:
break
if (params['offset'] / params['limit']) >= pages > 0:
break
# Process the results.
tasks = [
deep_entry_parsing(business, params['offset'] + i)
for i, business in enumerate(search_results['businesses'])
]
page_results = await asyncio.gather(*tasks)
# Write the entries to the file and flush.
for entry in page_results:
writer.writerow(dataclasses.asdict(entry))
csvfile.flush()
# Update the offset before looping again.
params['offset'] += params['limit']
def deep_query(terms, location, offset, limit, radius, output, pages):
"""."""
asyncio.run(
async_deep_query(terms, location, offset=offset, limit=limit, radius=radius, output=output, pages=pages))
|
rgreinho/yelper | yelper/main.py | """quick ."""
from yelper.cli.cli import cli
# pylint: disable=no-value-for-parameter
if __name__ == '__main__':
cli()
|
rgreinho/yelper | tests/test_yelper.py | """Test the Yelper module."""
class TestYelpBusiness:
"""Test the YelpBusiness class."""
|
jianbo-sudo/detectron2_layout | upload_detectron.py | <gh_stars>0
# coding:utf-8
from flask import Flask, render_template, request, redirect, url_for, make_response,jsonify
from werkzeug.utils import secure_filename
import time
from PIL import Image
from datetime import timedelta
import os, json, cv2, random, io
import numpy as np
import torch
from detectron2.engine import DefaultPredictor
from detectron2.config import get_cfg
from detectron2.utils.visualizer import Visualizer
from detectron2.data import MetadataCatalog
from detectron2.data import DatasetCatalog
MetadataCatalog.get("dla_train").thing_classes = ['caption', 'figure', 'page', 'table', 'title', 'text']
# 输入是一个图片的地址,输出为一张图片,可以直接把输出通过imwrite保存。
def inference(input_path,model,model_weight):
im = cv2.imread(input_path)
#im = input
#这里的im需要是一张图片,因此如果是图片路径就需要先通过imread变成图片,如果是url就需要通过load方法。
cfg = get_cfg()
cfg.merge_from_file(model)
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5 # set threshold for this mode
cfg.MODEL.WEIGHTS = model_weight
cfg.MODEL.ROI_HEADS.NUM_CLASSES = 6
if torch.cuda.is_available():
print('we use cuda!')
cfg.MODEL.DEVICE='cuda'
else:
print('running on cpu')
cfg.MODEL.DEVICE='cpu'
predictor = DefaultPredictor(cfg)
outputs = predictor(im)
outputs["instances"].pred_classes
outputs["instances"].pred_boxes
v = Visualizer(im[:,:,::-1], MetadataCatalog.get(cfg.DATASETS.TRAIN[0]), scale=1.2)
out = v.draw_instance_predictions(outputs["instances"].to("cpu"))
outs = out.get_image()[:, :, ::-1]
if os.path.exists(input_path):
os.remove(input_path)
print('remove image')
if os.path.splitext(input_path)[-1] == ".jpg":
cv2.imwrite(input_path,outs)
print('input is a jpg file:',outs)
return input_path
else:
image_name = os.path.splitext(os.path.split(input_path)[1])[0]
print('image name:',image_name)
jpg_name = os.path.join(os.path.split(input_path)[0],image_name+'.jpg')
cv2.imwrite(jpg_name,outs)
print('convert input to jpg:',jpg_name)
return jpg_name
#input = "demo/input1.jpg"
model = "configs/DLA_mask_rcnn_X_101_32x8d_FPN_3x.yaml"
model_weight = 'model_weight/pub_model_final.pth'
#设置允许的文件格式
ALLOWED_EXTENSIONS = set(['PNG', 'JPG', 'JPGE', 'PBM','PDF'])
def allowed_file(filename):
ext = filename.rsplit('.', 1)[1]
return '.' in filename and ext.upper() in ALLOWED_EXTENSIONS
app = Flask(__name__)
# 设置静态文件缓存过期时间
#app.send_file_max_age_default = timedelta(seconds=1)
def resize_image(files):
im = Image.open(files)
(w,h) = im.size
n_w = 500
n_h = int(h/w*n_w)
return n_w,n_h
@app.route('/upload', methods=['POST', 'GET']) # 添加路由
def upload():
if request.method == 'POST':
file = request.files['file']
# 如果非法的拓展名,或者为空,或者没有.那么返回error。
if not (file and allowed_file(file.filename)):
#return jsonify({"error": "please check the input form, only accept image file and PDF."})
return render_template('upload_start2.html',warning = "Illegal input, please choose again.")
# 根据当前文件所在路径,创建一个储存image的文件夹
basepath = os.path.dirname(__file__)
file_path = os.path.join(basepath, 'static/result')
print('file path:',file_path)
if not os.path.exists(file_path):
os.makedirs(file_path, 755)
# 保存图片
file_name = secure_filename(file.filename)
upload_path = os.path.join(file_path, file_name)
file.save(upload_path)
print('file path:',file_path,'file name:',file_name,'upload path:',upload_path)
# 推断结果,并保存
infer_path = inference(upload_path,model,model_weight)
infer_name = os.path.split(infer_path)[1]
# 重新调整图片大小,使得适合屏幕
n_w,n_h = resize_image(infer_path)
print('new size is:',n_w,n_h,'file name is:',file_name)
return render_template('upload_done2.html', input_name = infer_name, new_weight = n_w, new_height = n_h)
return render_template('upload_start2.html')
if __name__ == '__main__':
# app.debug = True
app.run(host='0.0.0.0', port=5000) |
alyshakt/mobile-appium-framework-py | setup_helpers/PlatformType.py | <filename>setup_helpers/PlatformType.py
"""To define the platform type"""
import enum
class PlatformType(enum.Enum):
"""To define a standardized platform type"""
ios = 1
android = 2
|
alyshakt/mobile-appium-framework-py | ios_objects/ios_pages.py | """Created December 15th, 2020 by <NAME> """
import datetime
import pytest
from appium.webdriver.common.touch_action import TouchAction
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.support import expected_conditions
from selenium.webdriver.support.wait import WebDriverWait
from ios_objects import ios_page_locators
from setup_helpers import driver_setup
from tests import conftest
import logging
created_date = str(datetime.datetime.utcnow().strftime("%m-%d-%H%M"))
file_name = 'test-reports/screenshots/ios/' + created_date
default_wait = 15
class BasePage(object):
"""Base class to initialize the page class that will be called from all pages"""
def __init__(self, driver):
self.driver = driver
# Functional/Interaction with Page Elements
def take_screenshot(self, name=None):
"""Takes a screenshot and saves it to test-reports"""
if name is None:
name = 'screenshot'
screenshot = self.driver.get_screenshot_as_png()
screenshot_file = open(file_name + name + '.png', "wb")
screenshot_file.write(screenshot)
screenshot_file.close()
def enter_text(self, element, text_to_enter):
element.clear()
element.send_keys(text_to_enter)
def get_element_text(self, element):
return element.text
def tap_element(self, element):
element.click()
self.wait_for_seconds(2)
def wait_for_seconds(self, seconds=3):
conftest.max_sleep(seconds)
def wait_for_element(self, element):
try:
logging.debug(msg='Waiting for element...')
WebDriverWait(driver=self.driver,
timeout=default_wait,
poll_frequency=2).until(
expected_conditions.visibility_of(element))
logging.debug(msg='The element is found? {}'.format(element.is_displayed()))
except NoSuchElementException as n:
logging.debug(msg='Element was not found: {}'.format(element))
exists = self.element_exists(element)
logging.debug(msg='The element exists? {}'.format(exists))
assert exists
def wait_for_invisibility(self, element):
logging.debug(msg='Waiting for invisibility of element...')
WebDriverWait(driver=self.driver,
timeout=default_wait,
poll_frequency=2).until(
expected_conditions.invisibility_of_element(element))
exists = self.element_exists(element)
logging.debug(msg='The element exists? {}'.format(exists))
assert exists is False
def element_exists(self, element):
return element.is_displayed()
def swipe_up(self):
logging.debug(msg='Trying to swipe up!')
actions = TouchAction(self.driver)
actions.long_press(x=180, y=510).move_to(x=150, y=250).release().perform()
def get_page_src_info(self):
source_hierarchy = self.driver.page_source
logging.debug(msg=source_hierarchy)
def process_failure(self, error):
self.get_page_src_info()
pytest.fail('The test failed. {}'.format(error), True)
def tear_down(self, failure):
if failure is None:
self.take_screenshot('Pass')
else:
self.take_screenshot('Failed')
self.driver.quit()
driver_setup.tear_down()
class IosMemberListPage(BasePage):
"""Member List Page Action Methods"""
def wait_for_load_complete(self):
self.wait_for_seconds(2)
title = self.member_list_title_exists()
self.take_screenshot('MemberListScreen')
while title is False:
logging.debug(msg='List page is initiated? {}'.format(title) + ' waiting...')
self.wait_for_seconds(1)
logging.debug(msg='is page is initiated? {}'.format(title))
def member_list_title_exists(self):
element = ios_page_locators.IosMemberListPageLocators.member_list_page_title(self)
return self.element_exists(element)
def get_member_list_title(self):
element = ios_page_locators.IosMemberListPageLocators.member_list_page_title(self)
return self.get_element_text(element)
def tap_member_name(self, member_name):
member_element_list = ios_page_locators.IosMemberListPageLocators.member_list(self)
for member_element in member_element_list:
this_member = self.get_element_text(member_element).lower()
if member_name.lower() in this_member:
logging.info(msg='Found member: {}'.format(this_member))
self.tap_element(member_element)
break
class IosMemberDetailPage(BasePage):
"""Member Detail Page Action Methods"""
def wait_for_load_complete(self):
initiated = self.member_bio_exists()
logging.debug(msg='Member detail page is initiated? {}'.format(initiated))
self.take_screenshot('MemberDetailScreen')
def member_picture_exists(self):
element = ios_page_locators.IosMemberDetailPageLocators.profile_image(self)
return self.element_exists(element)
def member_bio_exists(self):
element = ios_page_locators.IosMemberDetailPageLocators.bio(self)
return self.element_exists(element)
def get_member_bio(self):
element = ios_page_locators.IosMemberDetailPageLocators.bio(self)
return self.get_element_text(element)
|
alyshakt/mobile-appium-framework-py | tests/gen_rand_num_id.py | <reponame>alyshakt/mobile-appium-framework-py<filename>tests/gen_rand_num_id.py
import random
import string
def random_number(range=10):
amount = random.randrange(range)
if amount < 1:
amount = amount + 1
return amount
def random_string(stringLength=10):
letters = string.ascii_letters
return ''.join(random.choice(letters) for i in range(stringLength)).lower()
def random_item_for_note():
items = list()
items.append('T-shirt')
items.append('Cafe Latte')
items.append('Shipping')
items.append('Service Cancellation')
items.append('3rd night free')
items.append('Manager\'s gift')
num = random_number(len(items))
return items.__getitem__(num)
|
alyshakt/mobile-appium-framework-py | tests/conftest.py | import time
import logging
def pytest_addoption(parser):
"""Defines arguments that can be passed in"""
parser.addoption("--environment",
action="store",
default="dev",
help="Environment to run tests in")
parser.addoption("--is_headless",
action="store",
default=True,
help="Whether to run headless")
def pytest_generate_tests(metafunc):
"""Use of the arguments"""
if 'environment' in metafunc.fixturenames:
logging.info(msg='\n-----The environment: {}'.format(metafunc.config.option.environment))
metafunc.parametrize("environment",
[str(metafunc.config.option.environment)])
if 'is_headless' in metafunc.fixturenames:
logging.info(msg='\n-----Running headless? {}'.format(metafunc.config.option.is_headless))
metafunc.parametrize("is_headless",
[str(metafunc.config.option.is_headless)])
def max_sleep(seconds_to_wait=30):
"""Sleep for seconds"""
time.sleep(seconds_to_wait)
def max_wait_time_seconds(seconds_to_wait=30):
"""For waiting up to x amount of time, for use in while loops"""
return seconds_to_wait
def pytest_sessionfinish(session, exitstatus):
"""Reports session duration in seconds of the test pass/failure"""
reporter = session.config.pluginmanager.get_plugin('terminalreporter')
duration = time.time() - reporter._sessionstarttime
reporter.write_sep('=',
'duration: {} seconds'.format(duration),
yellow=True,
bold=True)
def pytest_unconfigure(config):
"""Configuration teardown"""
reporter = config.pluginmanager.get_plugin('terminalreporter')
duration = time.time() - reporter._sessionstarttime
reporter.write_sep('=',
'duration: {} seconds'.format(duration),
yellow=True,
bold=True)
|
alyshakt/mobile-appium-framework-py | ios_objects/ios_page_locators.py | <reponame>alyshakt/mobile-appium-framework-py
"""Created December 29th, 2020 by <NAME>
This Base Page object locator strategy was gleaned with much gratitude from
http://elementalselenium.com/tips/9-use-a-base-page-object
"""
class BasePageLocators(object):
"""Initializes the driver for use on all other pages and defines objects that are on almost every page"""
def __init__(self, driver):
self.driver = driver
def alert_type_box(self):
return self.driver.find_element_by_class_name("XCUIElementTypeAlert")
def alert_box(self):
return self.driver.find_element_by_class_name("XCUIElementTypeScrollView")
class IosMemberListPageLocators(BasePageLocators):
"""iOS Member List Page Locators"""
def member_list_page_title(self):
return self.driver.find_element_by_name('D+D Members')
def member_list(self):
return self.driver.find_elements_by_name('Picture')
class IosMemberDetailPageLocators(BasePageLocators):
"""iOS Member Detail Page Locators"""
def phone_number(self):
return self.driver.find_element_by_name('Phone')
def profile_image(self):
return self.driver.find_element_by_name('Profile image')
def email(self):
return self.driver.find_element_by_name('Email')
def bio(self):
return self.driver.find_element_by_name('Bio')
|
alyshakt/mobile-appium-framework-py | setup_helpers/driver_setup.py | """To set up the appium driver by Platform Type"""
import logging
"""Created December 15th, 2020 by <NAME> """
import os
from appium import webdriver
from appium.webdriver.appium_service import AppiumService
appium_service = AppiumService()
def get_app_type(PlatformType):
"""To Define the platform type for your app"""
switcher = {
PlatformType.ios: 'iOS',
PlatformType.android: 'Android'
}
app_type = switcher.get(PlatformType, 'Invalid environment option, or not yet implemented')
return app_type
def get_desired_caps(PlatformType, is_headless=False, app_path=None, device_name='iPhone 11', platform_version='14.3'):
"""To Define the desired capabilities type for your app
:param platform_version:
:param PlatformType: iOS or Android
:param is_headless: whether to run headless
:param app_path: explicit path of app file
:param device_name: iPhone name or Android emulator name
"""
platform_type = get_app_type(PlatformType)
lower_app_type = platform_type.lower()
if app_path is None:
app_path = get_app_path(PlatformType)
logging.info(msg='\nThe App Path we found: {}'.format(app_path))
desired_caps = None
if lower_app_type == 'ios':
if platform_version is None:
platform_version='14.3'
if device_name is None:
device_name='iPhone 11'
desired_caps = dict(
platformName='iOS',
platformVersion=platform_version,
deviceName=device_name,
automationName='XCUITest',
sendKeyStrategy='grouped',
app=app_path,
elementResponseAttributes=True,
isAutomationEnabled=True,
autoAcceptAlerts=False,
autoDismissAlerts=False,
connectHardwareKeyboard=True,
isHeadless=bool(is_headless),
showXcodeLog=True
)
elif lower_app_type == 'android':
if device_name is None:
device_name = 'S7 Edge API 29'
avd_name = device_name.replace(' ', '_')
desired_caps = dict(
platformName='Android',
deviceName=device_name,
avd=avd_name,
automationName='UIAutomator2',
autoGrantPermissions=False,
skipDeviceInitialization=False,
audioPlayback=False,
skipLogcatCapture=False,
app=app_path,
isHeadless=bool(is_headless)
)
logging.info(msg='DESIRED CAPABILITIES: {}'.format(desired_caps))
return desired_caps
def __start_service():
appium_service.start()
logging.info(msg='Appium is running? {}'.format(appium_service.is_running))
logging.info(msg='Appium is listening? {}'.format(appium_service.is_listening))
def get_driver(desired_caps):
running = appium_service.is_running
listening = appium_service.is_listening
logging.info(msg='Appium is running? {}'.format(running))
logging.info(msg='Appium is listening? {}'.format(listening))
if running or listening is False:
__start_service()
driver = webdriver.Remote(command_executor='http://127.0.0.1:4723/wd/hub',
desired_capabilities=desired_caps)
return driver
def get_app_path(PlatformType):
global file_path
platform_type = get_app_type(PlatformType)
logging.info(msg='The App Type is ... {}'.format(platform_type.lower()))
lower_platform_name = platform_type.lower()
logging.info(msg='\n Getting a dynamic app path')
directory_path = os.path.abspath("temp_app_files")
for root, dirs, files in os.walk(directory_path):
logging.info(msg=files)
for file in files:
logging.info(msg='This file is: {}'.format(file))
if 'ios' in lower_platform_name.lower():
if file.endswith('.zip') or file.endswith('.app'):
file_path = directory_path + '/' + file
logging.info(msg='Found file: {}'.format(file_path))
break
else:
if file.endswith('.apk'):
file_path = directory_path + '/' + file
logging.info(msg='Found file: {}'.format(file_path))
break
return os.path.abspath(file_path)
def tear_down():
appium_service.stop()
logging.info(msg='Appium is running? {}'.format(appium_service.is_running))
logging.info(msg='Appium is listening? {}'.format(appium_service.is_listening))
|
alyshakt/mobile-appium-framework-py | tests/ios_ui/test_filelist.py | """Created December 15th, 2020 by <NAME> """
import logging
from ios_objects import ios_pages
from setup_helpers import driver_setup
from setup_helpers.PlatformType import PlatformType
def test_filelist(is_headless, record_xml_attribute):
"""A basic test to check for a file list"""
record_xml_attribute(
"name",
"The title of the test for XML output")
platform = PlatformType.ios
desired_caps = driver_setup.get_desired_caps(PlatformType=platform, is_headless=is_headless)
# Setup Driver, define Platform Type and the page object
driver = driver_setup.get_driver(desired_caps)
# I recommend beginning with a try-catch-finally format
base_page = ios_pages.BasePage(driver)
member_list_page = ios_pages.IosMemberListPage(driver)
member_detail_page = ios_pages.IosMemberDetailPage(driver)
failed = None
try:
member_list_page.wait_for_load_complete()
assert member_list_page.member_list_title_exists()
logging.info(msg='The page title is: ' + member_list_page.get_member_list_title())
member_list_page.tap_member_name('Alysha')
member_detail_page.wait_for_load_complete()
assert member_detail_page.member_bio_exists()
logging.info(msg='The Bio exists and says: {}'.format(member_detail_page.get_member_bio()))
except (Exception, BaseException) as failure:
# If any assertions above fail, then mark the test as failed and capture a screenshot
logging.info(msg='!!!!! The test failed. {}'.format(failure))
failed = failure
base_page.process_failure(failed)
finally:
# Finally, quit the driver and appium service!
base_page.tear_down(failed)
|
nishadg246/pybullet-play | examples/pybullet/examples/nishad.py | import pybullet as p
import time
import math
import numpy as np
import random
p.connect(p.GUI)
p.loadURDF("plane.urdf",[0,0,-.2],globalScaling=6.0,useFixedBase=True)
cylId = p.loadURDF("simple_cylinder.urdf",[0,0,0.2],globalScaling=6.0,useFixedBase=False)
cubeId = p.loadURDF("cube.urdf",[2,2,0],globalScaling=0.6,useFixedBase=False)
t = 0
g = False
def eachIter():
p.setGravity(0,0,-10)
p.stepSimulation()
time.sleep(.002)
def increment(tup, ix, val):
temp = list(tup)
temp[ix] += val
return tuple(temp)
def applyAction(angle,offset):
cylPos, cylOrn = p.getBasePositionAndOrientation(cylId)
cubePos, cubeOrn = p.getBasePositionAndOrientation(cubeId)
cubeNewPos = [cylPos[0] + math.cos(math.radians(angle)),cylPos[1] + math.sin(math.radians(angle)),0]
vec = np.array([cylPos[0]-cubeNewPos[0],cylPos[1]-cubeNewPos[1],0])
vec = vec / np.linalg.norm(vec)
look = [0,0,math.atan(vec[0]/(-vec[1]))]
cubeNewPosWithOffset = cubeNewPos
cubeNewPosWithOffset[0] -= offset*math.sin(math.radians(angle))
cubeNewPosWithOffset[1] += offset*math.cos(math.radians(angle))
p.resetBasePositionAndOrientation(cubeId,cubeNewPosWithOffset,p.getQuaternionFromEuler(look))
for i in range(100):
eachIter()
for i in range(100):
p.applyExternalForce(cubeId, -1, 20*np.array(vec), cubeNewPos, flags = p.WORLD_FRAME)
eachIter()
for i in range(400):
eachIter()
cubePos, cubeOrn = p.getBasePositionAndOrientation(cubeId)
print np.array(list(cubePos)) - cubeNewPosWithOffset
actions = [(10,100),(190,100)]
while (1):
# a = [int(x) for x in raw_input().split()]
for i in range(1000):
angle = random.randint(0,360)
offset = random.uniform(-0.3, 0.3)
print angle, offset
applyAction(angle, offset)
# applyAction(*a)
eachIter()
t+=1
|
mikiec84/wagtail-filepreviews | wagtaildocs_previews/apps.py | <reponame>mikiec84/wagtail-filepreviews<filename>wagtaildocs_previews/apps.py<gh_stars>10-100
from __future__ import unicode_literals
from django.apps import AppConfig
class WagtailDocsPreviewsConfig(AppConfig):
name = 'wagtaildocs_previews'
label = 'wagtaildocs_previews'
verbose_name = 'Wagtail documents'
|
mikiec84/wagtail-filepreviews | wagtaildocs_previews/views.py | import json
from django.http import JsonResponse
from django.views.decorators.csrf import csrf_exempt
from .models import PreviewableDocument
@csrf_exempt
def filepreviews_webhook(request):
if request.method == 'POST':
body = json.loads(request.body.decode('utf8'))
user_data = body.get('user_data', {})
document_id = user_data.get('document_id')
try:
document = PreviewableDocument.objects.get(pk=document_id)
document.preview_data = body
document.save()
return JsonResponse({
'success': True
}, status=200)
except PreviewableDocument.DoesNotExist:
pass
return JsonResponse({
'success': False
}, status=400)
|
mikiec84/wagtail-filepreviews | wagtaildocs_previews/urls.py | from __future__ import absolute_import, unicode_literals
from django.conf.urls import include, url
from wagtail.documents import urls as wagtaildocs_urls
from .views import filepreviews_webhook
urlpatterns = [
url(r'', include(wagtaildocs_urls)),
url(r'webhooks/filepreviews$',
filepreviews_webhook, name='filepreviews_webhook'),
]
|
mikiec84/wagtail-filepreviews | setup.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import re
import shutil
import sys
from setuptools import setup
def read(*paths):
"""
Build a file path from paths and return the contents.
"""
with open(os.path.join(*paths), 'r') as f:
return f.read()
def get_version(package):
"""
Return package version as listed in `__version__` in `init.py`.
"""
init_py = open(os.path.join(package, '__init__.py')).read()
return re.search("__version__ = ['\"]([^'\"]+)['\"]", init_py).group(1)
def get_packages(package):
"""
Return root package and all sub-packages.
"""
return [
dirpath for dirpath, dirnames, filenames in os.walk(package)
if os.path.exists(os.path.join(dirpath, '__init__.py'))
]
install_requires = [
'wagtail>=2.0<3.0',
'jsonfield>=2.0.1<3.0',
'filepreviews>=2.0.2,<3.0',
'django-model-utils>=3.0.0,<4.0'
]
tests_require = [
'responses>=0.5.1,<1.0',
'flake8>=3.3.0<4.0',
'isort>=4.2.5,<5.0'
]
version = get_version('wagtaildocs_previews')
if sys.argv[-1] == 'publish':
os.system("python setup.py sdist bdist_wheel")
os.system("twine upload dist/*")
print("You probably want to also tag the version now:")
print(" git tag -a %s -m 'version %s'" % (version, version))
print(" git push --tags")
shutil.rmtree('dist')
shutil.rmtree('build')
shutil.rmtree('wagtaildocs_previews.egg-info')
sys.exit()
setup(
name='wagtaildocs_previews',
version=version,
description=(
'Extend Wagtail\'s Documents with image previews and '
'metadata from FilePreviews.io'
),
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/filepreviews/wagtail-filepreviews',
packages=get_packages('wagtaildocs_previews'),
license='MIT',
long_description=read('README.rst'),
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Framework :: Django',
'Framework :: Django :: 1.8',
'Framework :: Django :: 1.9',
'Framework :: Django :: 1.10',
'Topic :: Internet :: WWW/HTTP :: Site Management',
],
install_requires=install_requires,
extras_require={
'test': tests_require,
}
)
|
mikiec84/wagtail-filepreviews | wagtaildocs_previews/settings.py | from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.utils import six
from django.utils.module_loading import import_string
def get_callback_function(setting_name, default=None):
func = getattr(settings, setting_name, None)
if not func:
return default
if callable(func):
return func
if isinstance(func, six.string_types):
func = import_string(func)
if not callable(func):
raise ImproperlyConfigured(
'{name} must be callable.'.format(name=setting_name)
)
return func
def _get_previews_options(instance):
return {}
previews_options_callback = get_callback_function(
'WAGTAILDOCS_PREVIEWS_OPTIONS_CALLBACK',
default=_get_previews_options
)
|
mikiec84/wagtail-filepreviews | wagtaildocs_previews/migrations/0001_initial.py | <filename>wagtaildocs_previews/migrations/0001_initial.py
# -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-04-16 16:02
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import jsonfield.fields
import taggit.managers
import wagtail.core.models
import wagtail.search.index
class Migration(migrations.Migration):
initial = True
dependencies = [
('taggit', '0002_auto_20150616_2121'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('wagtailcore', '0032_add_bulk_delete_page_permission'),
]
operations = [
migrations.CreateModel(
name='FilePreviewsSettings',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('api_key', models.CharField(max_length=255)),
('api_secret', models.CharField(max_length=255)),
('site', models.OneToOneField(editable=False, on_delete=django.db.models.deletion.CASCADE, to='wagtailcore.Site')),
],
options={
'verbose_name': 'FilePreviews',
},
),
migrations.CreateModel(
name='PreviewableDocument',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255, verbose_name='title')),
('file', models.FileField(upload_to='documents', verbose_name='file')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='created at')),
('preview_data', jsonfield.fields.JSONField(blank=True, null=True)),
('collection', models.ForeignKey(default=wagtail.core.models.get_root_collection_id, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='wagtailcore.Collection', verbose_name='collection')),
('tags', taggit.managers.TaggableManager(blank=True, help_text=None, through='taggit.TaggedItem', to='taggit.Tag', verbose_name='tags')),
('uploaded_by_user', models.ForeignKey(blank=True, editable=False, null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, verbose_name='uploaded by user')),
],
options={
'abstract': False,
'verbose_name': 'document',
},
bases=(wagtail.search.index.Indexed, models.Model),
),
]
|
mikiec84/wagtail-filepreviews | wagtaildocs_previews/tests/test_models.py | import json
from django.core.files.base import ContentFile
from django.test import TestCase
from django.utils.six import b
import responses
from wagtail.core.models import Site
from wagtaildocs_previews.models import (
FilePreviewsSettings, PreviewableDocument
)
def setup_mock():
def request_callback(request):
payload = json.loads(request.body.decode('utf8'))
body = {
'id': '1',
'status': 'pending',
'thumbnails': None,
'url': 'https://api.filepreviews.io/v2/previews/1/',
'preview': None,
'original_file': None,
'user_data': payload['data']
}
headers = {
'content-type': 'application/json',
'location': body['url']
}
return (201, headers, json.dumps(body))
responses.add_callback(
responses.POST, 'https://api.filepreviews.io/v2/previews/',
callback=request_callback,
content_type='application/json',
)
class TestPreviewableDocument(TestCase):
def setUp(self):
self.site = Site.objects.get(is_default_site=True)
self.settings = FilePreviewsSettings.for_site(self.site)
self.settings.api_key = 'DUMMY_API_KEY'
self.settings.api_secret = 'DUMMY_API_SECRET'
self.settings.save()
@responses.activate
def test_filepreviews_generate_when_creating_doc(self):
setup_mock()
PreviewableDocument.objects.create(
title='Test document',
file=ContentFile(b('Hello world'), 'test1.txt')
)
self.assertEqual(len(responses.calls), 1)
@responses.activate
def test_filepreviews_generate_when_updating_file(self):
setup_mock()
document = PreviewableDocument.objects.create(
title='Test document',
file=ContentFile(b('Hello world'), 'test1.txt')
)
document.file = ContentFile(b('Hello world'), 'test2.txt')
document.save()
self.assertEqual(len(responses.calls), 2)
@responses.activate
def test_filepreviews_doesnt_generate_if_settings_not_enabled(self):
setup_mock()
self.settings.api_key = ''
self.settings.api_secret = ''
self.settings.save()
PreviewableDocument.objects.create(
title='Test document',
file=ContentFile(b('Hello world'), 'test1.txt')
)
self.assertEqual(len(responses.calls), 0)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.