repo_name
stringlengths 6
97
| path
stringlengths 3
341
| text
stringlengths 8
1.02M
|
|---|---|---|
icbi-lab/miopy
|
miopy/correlation.py
|
<filename>miopy/correlation.py
###############
#### AMYR #####
###############
import re
from typing import DefaultDict
from pandas.core.reshape.concat import concat
import scipy.stats
import pandas as pd
from .R_utils import tmm_normalization, deg_edger, deg_limma_array, voom_normalization
import numpy as np
import ranky as rk
from pandarallel import pandarallel
from os import path
import io
from sklearn.model_selection import StratifiedKFold, KFold
from statsmodels.stats.multitest import multipletests
from multiprocessing import Pool
import numpy as np
import functools
from .process_data import *
##################
## Miscelaneos ###
##################
def adjust_geneset(table):
from statsmodels.stats.multitest import multipletests
p = table["Pval"]
mask = np.isfinite(p)
#Creating an empty vector
table["FDR"] = 1
table.loc[mask,"FDR"] = multipletests(p[mask], method="fdr_bh")[1]
return table
def adjPval(df):
from statsmodels.stats.multitest import multipletests
lCor = ["Rho","R","Tau","Background"]
method = "fdr_bh"
for cor in lCor:
col = "%s_%s" %(cor,method)
col_raw = "%s_Pval" %(cor)
#Pval
try:
p = df[col_raw]
mask = np.isfinite(p)
#Creating an empty vector
df[col] = 1
df.loc[mask,col] = multipletests(p[mask], method=method)[1]
except:
pass
return df
##################
## GeneSetScore ##
##################
def calculate_gene_set_score(expr, conf):
#print(row)
#print(conf)
sum_gene_predictor = sum(expr * conf)
sum_predictor = sum(conf)
try:
GScore = sum_gene_predictor/sum_predictor
except:
GScore = 0
#print(GScore)
return GScore
def gene_set_correlation(exprDf, lGeneSet, GeneSetName = "GeneSet", lMirUser = None, n_core = 2):
pandarallel.initialize(verbose=1, nb_workers=n_core)
lMir, lGene = header_list(exprDF=exprDf)
dfConf = get_confident_df(load_matrix_counts().apply(lambda col: col.str.count("1")))
### Intersect with Gene and Mir from table##
lGene = intersection(lGene, dfConf.index.tolist())
lMir = intersection(lMir, dfConf.columns.tolist())
if lGeneSet is not None:
lGene = intersection(lGene,lGeneSet)
if lMirUser is not None:
lMir = intersection(lMir,lMirUser)
#print(lGene)
#print(lMir)
#print(dfConf.loc[lGene,lMir])
dfSetScore = dfConf.loc[lGene,lMir].parallel_apply(lambda conf: \
exprDf[lGene].apply(lambda expr: \
calculate_gene_set_score(expr, conf), \
axis = 1), axis = 0)
dfSetScore = dfSetScore.apply(lambda col: col.dropna())
cor = dfSetScore.parallel_apply(lambda col: col.corr(exprDf[col.name],method = \
lambda x, y: scipy.stats.pearsonr(x, y)))
cor = cor.apply(lambda col: col.dropna())
df = pd.DataFrame(cor).transpose()
dfPval = pd.DataFrame(df.loc[:,1])
dfCor = pd.DataFrame(df.loc[:,0])
dfPval.columns = [GeneSetName]
dfCor.columns = [GeneSetName]
return dfCor, dfPval, dfSetScore
##################
## EdgeR ###
##################
def differential_expression_edger(fPath, metaFile, bNormal = False, bFilter = False, paired = False, group = "event"):
"""
Function to obtain the DEG between 2 groups
with edgeR.
Args:
fPath string Path with the raw counts
metaFile string Path, the first Row is sample names, second is Group
outPath string File output
bNormal Bool Bool to normalize the data with TMM
bFilter Bool Bool to FIlter low expression genes
Returns:
df dataframe DataFrame with The LogFC, and pvalues for genes
"""
DEG = deg_edger(fPath = fPath, metaFile = metaFile, bNormal = str(bNormal), \
bFilter = str(bFilter), bPaired = str(paired), group = group)
return DEG
def tmm_normal(fPath, bFilter=True):
"""
Function to obtain the Voom normal Count
Args:
fPath string Path with the raw counts
outPath string File output
bFilter Bool Bool to FIlter low expression genes
Returns:
tmm dataframe DataFrame with the log2(TMM) counts
"""
tmm = tmm_normalization(fPath, str(bFilter))
return tmm
##################
## Limma Array ##
##################
def differential_expression_array(fPath, metaFile, bNormal = True, bFilter = True, paired = False):
"""
Function to obtain the DEG between 2 groups
with Limma.
Args:
fPath string Path with the raw counts
metaFile string Path, the first Row is sample names, second is Group
outPath string File output
bNormal Bool Bool to normalize the data with TMM
bFilter Bool Bool to FIlter low expression genes
Returns:
df dataframe DataFrame with The LogFC, and pvalues for genes
"""
DEG = deg_limma_array(fPath, metaFile, str(bNormal), str(bFilter), str(paired))
return DEG
def voom_normal(fPath, bFilter=True):
"""
Function to obtain the Voom normal Count
Args:
fPath string Path with the raw counts
outPath string File output
bFilter Bool Bool to FIlter low expression genes
Returns:
tmm dataframe DataFrame with the log2(TMM) counts
"""
voom = voom_normalization(fPath, str(bFilter))
return voom
##################
### sklearn ######
##################
def CoefLarsCV(x, y, n_core = 4):
from sklearn.linear_model import LarsCV, Lars
from sklearn.model_selection import train_test_split
X_train, X_test , y_train, y_test = \
train_test_split(x, y, test_size=0.2, random_state=1)
## CrossValidation
larscv = LarsCV(cv = 5, normalize=True)
larscv.fit(X_train, y_train)
coef = pd.Series(larscv.coef_, index = x.columns)
return coef
def CoefLassoCV(X, Y, k = 3, n_core = 4):
from sklearn.linear_model import Lasso, LassoCV
from sklearn.model_selection import train_test_split
skf = KFold(n_splits=k, shuffle=True)
indexes = [ (training, test) for training, test in skf.split(X, Y) ]
# iterate over all folds
dfTopCoefTemp = pd.DataFrame(dtype='float64', index=X.columns).fillna(0)
for train_index, test_index in indexes:
X_train, X_test = X.iloc[train_index,:], X.iloc[test_index,:]
y_train, y_test = Y[train_index], Y[test_index]
## CrossValidation
lassocv = LassoCV(cv = 5, max_iter=1000, normalize=True)
lassocv.fit(X_train, y_train)
lasso = Lasso(max_iter = 1e4, alpha=lassocv.alpha_).fit(X_train, y_train)
dfTopCoefTemp = pd.concat([dfTopCoefTemp, pd.Series(lasso.coef_, index = X.columns).fillna(0)], axis = 1)
#print(dfTopCoefTemp.apply(lambda row: row.mean(), axis=1))
return dfTopCoefTemp.apply(lambda row: row.mean(), axis=1)
def CoefLasso(x, y):
from sklearn.linear_model import Lasso
alphas = [0.001, 0.02, 0.01, 0.1, 0.5, 1, 5]
lasso = Lasso(alpha = 1, max_iter = 1e4 ).fit(x, y)
coef = pd.Series(lasso.coef_, index=x.columns)
#coef = coef.sort_values(0, ascending=False)
return coef
def CoefRandomForest(x, y, n_core = 4):
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import RandomizedSearchCV
#from treeinterpreter import treeinterpreter as ti
X_train, X_test , y_train, y_test = \
train_test_split(x, y, test_size=0.2, random_state=1)
rf = RandomizedSearchCV(RandomForestRegressor(),\
param_distributions = {
'n_estimators':np.arange(10,500,5)
#'max_features':np.arange(1,10,1)
},
cv=5, n_iter = 20,
iid=False,random_state=0,refit=True,
scoring="neg_mean_absolute_error", n_jobs = n_core)
rf.fit(X_train,y_train)
rf = rf.best_estimator_
prediction, bias, contributions = rf.predict(rf, X_test)
totalc1 = np.mean(contributions, axis=0)
coef = pd.Series(totalc1, index=x.columns)
return coef
def CoefElasticNetCV(X, Y, k=3, n_core = 4):
from sklearn.linear_model import ElasticNetCV,ElasticNet
from sklearn.model_selection import train_test_split
alphas = [0.001, 0.02, 0.01, 0.1, 0.5, 1, 5]
skf = KFold(n_splits=k, shuffle=True)
indexes = [ (training, test) for training, test in skf.split(X, Y) ]
# iterate over all folds
dfTopCoefTemp = pd.DataFrame(dtype='float64', index=X.columns).fillna(0)
for train_index, test_index in indexes:
X_train, X_test = X.iloc[train_index,:], X.iloc[test_index,:]
y_train, y_test = Y[train_index], Y[test_index]
elasticcv = ElasticNetCV(alphas=alphas, cv = 5, max_iter=1000, normalize=True)
elasticcv.fit(X_train, y_train)
elastic = ElasticNet(alpha=elasticcv.alpha_, max_iter=1e4, normalize=True).fit(X_train, y_train)
dfTopCoefTemp = pd.concat([dfTopCoefTemp, pd.Series(elastic.coef_, index = X.columns).fillna(0)], axis = 1)
return dfTopCoefTemp.apply(lambda row: row.mean(), axis=1)
def CoefRidgeCV(X,Y,k=3):
from sklearn.linear_model import Ridge, RidgeCV
alphas = np.logspace(-10, -2, 10)
skf = KFold(n_splits=k, shuffle=True)
indexes = [ (training, test) for training, test in skf.split(X, Y) ]
# iterate over all folds
dfTopCoefTemp = pd.DataFrame(dtype='float64', index=X.columns).fillna(0)
for train_index, test_index in indexes:
X_train, X_test = X.iloc[train_index,:], X.iloc[test_index,:]
y_train, y_test = Y[train_index], Y[test_index]
ridgecv = RidgeCV(alphas=alphas, cv = 5, normalize=True)
ridgecv.fit(X_train, y_train)
ridge = Ridge(alpha=ridgecv.alpha_, max_iter=1e4, normalize=True).fit(X_train, y_train)
dfTopCoefTemp = pd.concat([dfTopCoefTemp, pd.Series(ridge.coef_, index = X.columns).fillna(0)], axis =1)
return dfTopCoefTemp.apply(lambda row: row.mean(), axis=1)
def CoefRidge(x, y):
from sklearn.linear_model import Ridge, RidgeCV
from sklearn.model_selection import train_test_split
X_train, X_test , y_train, y_test = \
train_test_split(x, y, test_size=0.2, random_state=1)
ridge6 = Ridge(alpha = 0.01, normalize=True)
ridge6.fit(X_train, y_train)
coef = pd.Series(ridge6.coef_, index=x.columns)
return coef
##################
## Correlation ###
##################
def pearson(exprDF, lMirUser = None, lGeneUser = None, n_core = 2, pval = True):
"""
Function to calculate the Pearson correlation coefficient, and pval
of each pair of miRNA-mRNA, return a matrix of correlation coefficients
with columns are miRNAs and rows are mRNAs.
Args:
exprDF df Concat Dataframe rows are samples and cols are gene/mirs
Returns:
Cordf df A matrix that includes the Pearson correlation
coefficients. Columns are genes, rows are miRNA.
Pvaldf df A matrix that includes the Pearson correlation
pvalues. Columns are genes, rows are miRNA.
"""
pandarallel.initialize(verbose=1, nb_workers=n_core)
lMir, lGene = header_list(exprDF=exprDF)
if lGeneUser is not None:
lGene = intersection(lGene,lGeneUser)
if lMirUser is not None:
lMir = intersection(lMir,lMirUser)
Cordf = exprDF[lGene].parallel_apply(lambda gene: exprDF[lMir].corrwith(gene, \
method = lambda x, y: scipy.stats.pearsonr(x,y)[0]))
if pval:
Pvaldf = exprDF[lGene].parallel_apply(lambda gene: exprDF[lMir].corrwith(gene, \
method = lambda x, y: scipy.stats.pearsonr(x,y)[1]))
else:
Pvaldf = pd.DataFrame()
Cordf = Cordf.apply(lambda col: col.dropna())
Pvaldf = Pvaldf.apply(lambda col: col.dropna())
return Cordf, Pvaldf
def spearman(exprDF, lMirUser = None, lGeneUser = None, n_core = 2):
"""
Function to calculate the Spearman correlation coefficient, and pval
of each pair of miRNA-mRNA, return a matrix of correlation coefficients
with columns are miRNAs and rows are mRNAs.
Args:
exprDF df Concat Dataframe rows are samples and cols are gene/mirs
Returns:
Cordf df A matrix that includes the Spearman correlation
coefficients. Columns are genes, rows are miRNA.
Pvaldf df A matrix that includes the Spearman correlation
pvalues. Columns are genes, rows are miRNA.
"""
pandarallel.initialize(verbose=1, nb_workers=n_core)
lMir, lGene = header_list(exprDF=exprDF)
if lGeneUser is not None:
lGene = intersection(lGene,lGeneUser)
if lMirUser is not None:
lMir = intersection(lMir,lMirUser)
Cordf = exprDF[lGene].parallel_apply(lambda gene: exprDF[lMir].corrwith(gene, \
method = lambda x, y: scipy.stats.spearmanr(x,y)[0]))
Pvaldf = exprDF[lGene].parallel_apply(lambda gene: exprDF[lMir].corrwith(gene, \
method = lambda x, y: scipy.stats.spearmanr(x,y)[1]))
Cordf = Cordf.apply(lambda col: col.dropna())
Pvaldf = Pvaldf.apply(lambda col: col.dropna())
return Cordf, Pvaldf
def kendall(exprDF, lMirUser = None, lGeneUser = None, n_core = 2):
"""
Function to calculate the Kendall correlation coefficient, and pval
of each pair of miRNA-mRNA, return a matrix of correlation coefficients
with columns are miRNAs and rows are mRNAs.
Args:
exprDF df Concat Dataframe rows are samples and cols are gene/mirs
Returns:
Cordf df A matrix that includes the Kendall correlation
coefficients. Columns are genes, rows are miRNA.
Pvaldf df A matrix that includes the Kendall correlation
pvalues. Columns are genes, rows are miRNA.
"""
pandarallel.initialize(verbose=1, nb_workers=n_core)
lMir, lGene = header_list(exprDF=exprDF)
if lGeneUser is not None:
lGene = intersection(lGene,lGeneUser)
if lMirUser is not None:
lMir = intersection(lMir,lMirUser)
Cordf = exprDF[lGene].parallel_apply(lambda gene: exprDF[lMir].corrwith(gene, \
method = lambda x, y: scipy.stats.kendalltau(x,y)[0]))
Pvaldf = exprDF[lGene].parallel_apply(lambda gene: exprDF[lMir].corrwith(gene, \
method = lambda x, y: scipy.stats.kendalltau(x,y)[1]))
Cordf = Cordf.apply(lambda col: col.dropna())
Pvaldf = Pvaldf.apply(lambda col: col.dropna())
return Cordf, Pvaldf
def lasso(exprDF, lMirUser = None, lGeneUser = None, n_core = 2):
"""
Function to calculate the Lasso correlation coefficient
of each pair of miRNA-mRNA, return a matrix of correlation coefficients
with columns are miRNAs and rows are mRNAs.
Args:
exprDF df Concat Dataframe rows are samples and cols are gene/mirs
Returns:
Cordf df A matrix that includes the lasso correlation
coefficients. Columns are genes, rows are miRNA.
"""
pandarallel.initialize(verbose=1, nb_workers=n_core)
lMir, lGene = header_list(exprDF=exprDF)
if lGeneUser is not None:
lGene = intersection(lGene,lGeneUser)
if lMirUser is not None:
lMir = intersection(lMir,lMirUser)
Cordf = exprDF[lGene].parallel_apply(lambda gene: \
CoefLassoCV(exprDF[lMir], gene))
Cordf = Cordf.apply(lambda col: col.dropna())
return Cordf
def ridge (exprDF, lMirUser = None, lGeneUser = None, n_core = 2):
"""
Function to calculate the Ridge correlation coefficient
of each pair of miRNA-mRNA, return a matrix of correlation coefficients
with columns are miRNAs and rows are mRNAs.
Args:
exprDF df Concat Dataframe rows are samples and cols are gene/mirs
Returns:
Cordf df A matrix that includes the Ridge correlation
coefficients. Columns are genes, rows are miRNA.
"""
pandarallel.initialize(verbose=1, nb_workers=n_core)
lMir, lGene = header_list(exprDF=exprDF)
if lGeneUser is not None:
lGene = intersection(lGene,lGeneUser)
if lMirUser is not None:
lMir = intersection(lMir,lMirUser)
Cordf = exprDF[lGene].parallel_apply(lambda gene: \
CoefRidgeCV(exprDF[lMir],gene))
Cordf = Cordf.apply(lambda col: col.dropna())
return Cordf
def elasticnet(exprDF, lMirUser = None, lGeneUser = None, n_core = 2):
"""
Function to calculate the ElasticNet correlation coefficient
of each pair of miRNA-mRNA, return a matrix of correlation coefficients
with columns are miRNAs and rows are mRNAs.
Args:
exprDF df Concat Dataframe rows are samples and cols are gene/mirs
Returns:
Cordf df A matrix that includes the lasso correlation
coefficients. Columns are genes, rows are miRNA.
"""
pandarallel.initialize(verbose=1, nb_workers=n_core)
lMir, lGene = header_list(exprDF=exprDF)
if lGeneUser is not None:
lGene = intersection(lGene,lGeneUser)
if lMirUser is not None:
lMir = intersection(lMir,lMirUser)
Cordf = exprDF[lGene].parallel_apply(lambda gene: \
CoefElasticNetCV(exprDF[lMir],gene, n_core=n_core))
Cordf = Cordf.apply(lambda col: col.dropna())
return Cordf
def randomforest(exprDF, lMirUser = None, lGeneUser = None, n_core = 2):
"""
Function to calculate the RandomForest Regression coefficient
of each pair of miRNA-mRNA, return a matrix of correlation coefficients
with columns are miRNAs and rows are mRNAs.
Args:
exprDF df Concat Dataframe rows are samples and cols are gene/mirs
Returns:
Cordf df A matrix that includes the lasso correlation
coefficients. Columns are genes, rows are miRNA.
"""
pandarallel.initialize(verbose=1, nb_workers=n_core)
lMir, lGene = header_list(exprDF=exprDF)
if lGeneUser is not None:
lGene = intersection(lGene,lGeneUser)
if lMirUser is not None:
lMir = intersection(lMir,lMirUser)
Cordf = exprDF[lGene].parallel_apply(lambda gene: \
CoefRandomForest(exprDF[lMir],gene))
Cordf = Cordf.apply(lambda col: col.dropna())
return Cordf
def lars(exprDF, lMirUser = None, lGeneUser = None, n_core = 2):
"""
Function to calculate the RandomForest Regression coefficient
of each pair of miRNA-mRNA, return a matrix of correlation coefficients
with columns are miRNAs and rows are mRNAs.
Args:
exprDF df Concat Dataframe rows are samples and cols are gene/mirs
Returns:
Cordf df A matrix that includes the lasso correlation
coefficients. Columns are genes, rows are miRNA.
"""
pandarallel.initialize(verbose=1, nb_workers=n_core)
lMir, lGene = header_list(exprDF=exprDF)
if lGeneUser is not None:
lGene = intersection(lGene,lGeneUser)
if lMirUser is not None:
lMir = intersection(lMir,lMirUser)
Cordf = exprDF[lGene].parallel_apply(lambda gene: \
CoefLarsCV(exprDF[lMir],gene))
Cordf = Cordf.apply(lambda col: col.dropna())
return Cordf
def hoeffding (exprDF, lMirUser = None, lGeneUser = None, n_core = 2):
"""
Function to calculate the hoeffding correlation coefficient
of each pair of miRNA-mRNA, return a matrix of correlation coefficients
with columns are miRNAs and rows are mRNAs.
Args:
exprDF df Concat Dataframe rows are samples and cols are gene/mirs
Returns:
Cordf df A matrix that includes the hoeffding correlation
coefficients. Columns are genes, rows are miRNA.
Ref:
https://github.com/PaulVanDev/HoeffdingD
"""
from .metrics import hoeffding
pandarallel.initialize(verbose=1, nb_workers=n_core)
lMir, lGene = header_list(exprDF=exprDF)
if lGeneUser is not None:
lGene = intersection(lGene,lGeneUser)
if lMirUser is not None:
lMir = intersection(lMir,lMirUser)
Cordf = exprDF[lGene].parallel_apply(lambda gene: exprDF[lMir].corrwith(gene, \
method = lambda x, y: hoeffding(x,y)))
Cordf = Cordf.apply(lambda col: col.dropna())
return Cordf
def rdc (exprDF, lMirUser = None, lGeneUser = None, n_core = 2):
"""
Function to calculate the Randomized Dependence Coefficient coefficient
of each pair of miRNA-mRNA, return a matrix of correlation coefficients
with columns are miRNAs and rows are mRNAs.
Args:
exprDF df Concat Dataframe rows are samples and cols are gene/mirs
Returns:
Cordf df A matrix that includes the rdc correlation
coefficients. Columns are genes, rows are miRNA.
Ref:
https://github.com/garydoranjr/rdc
"""
from .metrics import rdc
pandarallel.initialize(verbose=1, nb_workers=n_core)
lMir, lGene = header_list(exprDF=exprDF)
if lGeneUser is not None:
lGene = intersection(lGene,lGeneUser)
if lMirUser is not None:
lMir = intersection(lMir,lMirUser)
Cordf = exprDF[lGene].parallel_apply(lambda gene: exprDF[lMir].corrwith(gene, \
method = lambda x, y: rdc(x,y)))
Cordf = Cordf.apply(lambda col: col.dropna())
return Cordf
def hazard_ratio_mirgen(exprDF, table, lMirUser = None, lGeneUser = None, n_core = 2):
"""
Function to calculate the Spearman correlation coefficient, and pval
of each pair of miRNA-mRNA, return a matrix of correlation coefficients
with columns are miRNAs and rows are mRNAs.
Args:
exprDF df Concat Dataframe rows are samples and cols are gene/mirs
Returns:
Cordf df A matrix that includes the Spearman correlation
coefficients. Columns are genes, rows are miRNA.
Pvaldf df A matrix that includes the Spearman correlation
pvalues. Columns are genes, rows are miRNA.
"""
from .survival import hazard_ratio
pandarallel.initialize(verbose=1, nb_workers=n_core)
lMir, lGene = header_list(exprDF=exprDF)
if lGeneUser is not None:
lGene = intersection(lGene,lGeneUser)
if lMirUser is not None:
lMir = intersection(lMir,lMirUser)
hr = hazard_ratio(exprDF=exprDF, lMirUser=lMir, lGeneUser=lGene, n_core = n_core)
hr.index = hr.target
table["HR_GENE"] = table.apply(lambda x: hr.loc[x["Gene"],"log(hr)"], axis = 1)
table["HR_MIR"] = table.apply(lambda x: hr.loc[x["Mir"],"log(hr)"], axis = 1)
return table
def all_methods(exprDF, lMirUser = None, lGeneUser = None, n_core = 2, hr = False, k = 10, background = True, test = False):
"""
Function to calculate all coefficient
of each pair of miRNA-mRNA, return a matrix of correlation coefficients
with columns are miRNAs and rows are mRNAs.
Args:
exprDF df Concat Dataframe rows are samples and cols are gene/mirs
Returns:
"""
import copy
lMir, lGene = header_list(exprDF=exprDF)
if lGeneUser is not None:
lGene = intersection(lGene,lGeneUser)
if lMirUser is not None:
lMir = intersection(lMir,lMirUser)
print("Obtain Concat Gene and MIR")
if test:
modelList = [[spearman,"Rho"],
[pearson,"R"],
[kendall,"Tau"],
]
else:
modelList = [[spearman,"Rho"],
[pearson,"R"],
[kendall,"Tau"],
[rdc, "RDC"],
[hoeffding,"Hoeffding"],
[ridge,"Ridge"],
[lasso,"Lasso"],
[elasticnet,"ElasticNet"],
# [hazard_ratio_mirgen, "Log(HR)" ]
]
print("Loading dataset...")
lTuple = []
for model, name in modelList :
print("\nClassifier " + name)
classifier = copy.deepcopy(model)
try:
if name in ["Rho", "R","Tau"]:
dfCor, pval = classifier(exprDF, lGeneUser=lGene, lMirUser=lMir, n_core=n_core)
lTuple.append((pval,"%s_Pval"%name))
else:
dfCor = classifier(exprDF, lGeneUser=lGene, lMirUser=lMir,n_core=n_core)
except Exception as error:
print(error)
pass
else:
#dfCor.to_csv("~/%s.csv"%name)
lTuple.append((dfCor,name))
table = process_matrix_list(lTuple, add_target=True)
table = table.loc[~table.duplicated(), :]
if hr:
print("\nHazard Ratio")
table = hazard_ratio_mirgen(exprDF, table, lGeneUser=lGene, lMirUser=lMir, n_core=n_core)
if background:
print("\nBackground")
table = background_estimation(exprDF, table, n_gene=3000, n_core=n_core, pval=False)
table = adjPval(table)
return table, lTuple[1][0]
#################
### Background ##
#################
def process_zscore(lMir, table_random):
lMean = []
lStd = []
for mir in lMir:
x = table_random.loc[table_random.Mir == mir,:].R.tolist()
try:
mean = np.mean(x)
standard_deviation = np.std(x)
except:
mean = np.nan
standard_deviation = np.nan
finally:
lMean.append(mean)
lStd.append(standard_deviation)
df = pd.DataFrame({"Mir":lMir,"Mean":lMean,"Standard Deviation":lStd})
return df
def get_mean_deviation(table_random, n_core = 4):
import functools
from multiprocessing import Pool
### Intersect with Gene and Mir from table##
lMir = table_random.Mir.unique().tolist()
##Split List
np_list_split = np.array_split(lMir, n_core)
split_list = [i.tolist() for i in np_list_split]
#split_list = same_length(split_list)
#Fix Exprs Variable
partial_func = functools.partial(process_zscore, table_random=table_random)
#Generating Pool
pool = Pool(n_core)
lres = pool.map(partial_func, split_list)
res = pd.concat(lres)
pool.close()
pool.join()
return res
def z_score(value, mean, std):
return (value - mean) / std
def background_estimation(exprDF, table, n_gene = 3000, method = "R", n_core = 6, pval = False):
"""
Function to calculate the Pearson correlation coefficient, and pval
of each pair of miRNA-mRNA, return a matrix of correlation coefficients
with columns are miRNAs and rows are mRNAs.
Args:
exprDF df Concat Dataframe rows are samples and cols are gene/mirs
Returns:
Cordf df A matrix that includes the Pearson correlation
coefficients. Columns are genes, rows are miRNA.
Pvaldf df A matrix that includes the Pearson correlation
pvalues. Columns are genes, rows are miRNA.
"""
import random
from pandarallel import pandarallel
pandarallel.initialize(verbose=1, nb_workers=n_core)
lMir, lGene = header_list(exprDF=exprDF)
#Select Random Genes for correlation
random.seed(10)
lGene = random.sample(lGene, n_gene)
dfCor, dfPval = pearson(exprDF, lGeneUser = lGene, n_core = n_core, pval = pval)
## Get table with number
table_random = process_matrix_list([(dfCor,"R")], add_target=True)
table_random["Number"] = table_random["Prediction Tools"].str.count("1")
table_filter = table_random.loc[table_random["Number"] <= 1,:]
#Get Mean and Std
res = get_mean_deviation(table_filter)
res.index = res.Mir
#Get Z-score, and P-value
res_z_p = table.parallel_apply(lambda row: \
z_score_value(row.R, res, row.Mir), axis = 1)
#print(res_z_p)
#res_z_p = res_z_p.apply(lambda col: col.dropna())
table["Background Z-Score"] = res_z_p.apply(lambda x: x[0])
table["Background_Pval"] = res_z_p.apply(lambda x: x[1])
return table
def z_score_value(x, res, mir):
from scipy.stats import norm
try:
z = z_score(x, res.loc[mir,"Mean"], res.loc[mir,"Standard Deviation"])
p_value = norm.cdf(x=x,loc=res.loc[mir,"Mean"],scale=res.loc[mir,"Standard Deviation"])
except:
z, p_value = 0, 1
return z, p_value
#################
### Reshaping ###
#################
def matrix2table(Cordf, value_name = "Value"):
"""
Function to reshaping a correlation matrix where Columns are genes,
rows are miRNA to Table.
Args:
Cordf df A matrix that includes the correlation
coefficients. Columns are genes, rows are miRNA.
Returns:
Table df A matrix that includes 3 col: gene, mir and value
Ref:
https://pandas.pydata.org/docs/user_guide/reshaping.html
"""
table = Cordf.melt(ignore_index=False)
#table["Mir"] = table.index.tolist()
#table = table.loc[["Mir", "Gene", "Value"],:]
table = table.reset_index()
table.columns = ["Mir","Gene", value_name]
return table
def merge_tables(lDf, method = "inner"):
"""
Function to concat the correlations Tables.
Args:
lDf list List of DFs with the correlation data
Returns:
df df A concat df
Ref:
https://stackoverflow.com/questions/23668427/pandas-three-way-joining-multiple-dataframes-on-columns
"""
from functools import reduce
df = reduce(lambda left, right: pd.merge(left,right,on=['Gene','Mir'], how = method), lDf)
return df
def process_matrix_list(lTuple, add_target=True, method = "outer"):
"""
Function to reshape a list of correlation matrix
in the correlations Tables.
Args:
lTuple list List of tuples with the correlation matrix, an the
name of the analysis (df,"value_name")
Returns:
table df A table with al the values merged
"""
lDf = []
if add_target:
df = lTuple[0][0]
target = load_matrix_counts()
lGen = intersection(df.columns.tolist(), target.index.tolist() )
lMir = intersection(df.index.tolist(), target.columns.tolist() )
target = target.loc[lGen,lMir].transpose()
lTuple.append((target, "Prediction Tools"))
for tuple in lTuple:
df = matrix2table(tuple[0], tuple[1])
lDf.append(df)
table = merge_tables(lDf, method = method)
return table
def filter_table(table, low_coef = -0.2, high_coef = 0.2, nDB = 3, pval = 0.05):
query_string = f"""
((R <= {low_coef} | R >= {high_coef} ) | \
(Rho <= {low_coef} | Rho >= {high_coef} ) | \
(Tau <= {low_coef} | Tau >= {high_coef} )) \
& (Tool Number >= {nDB}) & \
((Spear_fdr_bh <= {pval}) | \
(Pear_fdr_bh <= {pval}) | \
(Kendall_fdr_bh <= {pval}))
"""
table = table.query(query_string)
return table
def _mir_gene_ratio(mir, gene, df):
name = "%s/%s"%(mir.name,gene.name)
df[name] = pd.Series(mir/gene)
return None
def get_mir_gene_ratio(exprDF, lMirUser = None, lGeneUser = None, filter_pair = False, low_coef = -0.5, min_db = 20):
"""
Function to calculate all coefficient
of each pair of miRNA-mRNA, return a matrix of correlation coefficients
with columns are miRNAs and rows are mRNAs.
Args:
exprDF df Concat Dataframe rows are samples and cols are gene/mirs
Returns:
"""
lMir, lGene = header_list(exprDF=exprDF)
if lGeneUser is not None:
lGene = intersection(lGene,lGeneUser)
if lMirUser is not None:
lMir = intersection(lMir,lMirUser)
df = pd.DataFrame()
if filter_pair:
dfCor, pval = pearson(exprDF, lGeneUser=lGene, lMirUser=lMir, n_core=6)
table = process_matrix_list([(dfCor,"R")], add_target=True)
table = table.loc[table["R"] <= low_coef, :]
table = table.loc[table["Prediction Tools"].str.count("1") > min_db,:]
exprDF[lGene].apply(lambda gene:
exprDF[lMir].apply(lambda mir:
_mir_gene_ratio(mir, gene, df)))
lKeep = table.apply(lambda row: "%s/%s"%(row["Mir"],row["Gene"]), axis = 1)
df = df[lKeep]
return df
#################
### Sorting ###
#################
def obtain_top_matrix(Cordf, topk=100, value_name = "Value"):
"""
Function to obtain the top X miRNAs-mRNA interactions
from corr matrix.
Args:
Returns:
"""
table = matrix2table(Cordf, value_name)
top = obtain_top_table(table, topk, value_name)
return top
def obtain_top_table(table, topk=100, value_name=None):
"""
Function to obtain the top X miRNAs-mRNA interactions
from corr matrix.
Args:
Returns:
"""
table = table.dropna()
if value_name is None:
table = table.reindex(table[table.columns.tolist()[2]].abs().sort_values(ascending=False).index)
else:
table = table.reindex(table[value_name].abs().sort_values(ascending=False).index)
toprank = table.head(topk)
return toprank
def borda_table(table, lMethod = None):
"""
Function to use the Borda count election
to integrate the rankings from different miRNA
coefficients.
Args:
table df A table with al the values merged
lMethod list List of the columns to rank
Returns:
TableRank df A table with al the values merged and ranking
"""
TableRank = table
if lMethod == None:
lMethod = GetMethodList(table.columns.tolist())
TableRank.loc[:,"Ranking"] = rk.borda(TableRank[lMethod].abs(), reverse=False)
TableRank.loc[:,"Ranking"] = TableRank["Ranking"].round(0)
return TableRank.sort_values("Ranking", ignore_index = True).round(3)
def borda_matrix(lTuple):
"""
Function to use the Borda count election
to integrate the rankings from different miRNA
coefficients.
Args:
lTuple list List of tuples with the correlation matrix, an the
name of the analysis (df,"value_name")
Returns:
TableRank df A table with al the values merged and ranking
"""
lDf = []
for tuple in lTuple:
df = matrix2table(tuple[0], tuple[1])
lDf.append(df)
TableRank = merge_tables(lDf)
TableRank = borda_table(TableRank)
return TableRank
def borda_top_matrix(lTuple, topk=100):
"""
Function to obtain the the consensus ranking from the TopK
pair miRNA-mRNA for each correlation method. We only keep
the pairs present in all the top
Args:
table df A table with al the values merged
lMethod list List of the columns to rank
Returns:
TableRank df A table with al the values merged and ranking
"""
lDf = []
for tuple in lTuple:
df = obtain_top_matrix(tuple[0], value_name = tuple[1], topk = topk)
lDf.append(df)
TableRank = merge_tables(lDf, method = "outer")
TableRank = borda_table(TableRank)
return TableRank
def borda_top_table(table, topk=100, only_negative = False, method = "outer"):
"""
Function to obtain the the consensus ranking from the TopK
pair miRNA-mRNA for each correlation method. We only keep
the pairs present in all the top
Args:
lTuple list List of tuples with the correlation matrix, an the
name of the analysis (df,"value_name")
Returns:
TableRank df A table with al the values merged and ranking
"""
lMethod = GetMethodList(table.columns.tolist())
lDf = []
if only_negative:
for m in lMethod:
if m != "RDC" and m != "Hoeffding":
table = table[table[m] <= 0]
#Table = Table[Table["DB"]!="00000000000000000000000"]
for m in lMethod:
df = obtain_top_table(table[["Mir","Gene",m]], topk=topk, value_name=m)
lDf.append(df)
TableRank = merge_tables(lDf, method = method)
TableRank = borda_table(TableRank)
TableRank = merge_tables([TableRank, table[["Mir","Gene","Prediction Tools"]]], method="inner")
return TableRank.sort_values("Ranking", ignore_index = True)
def opposite_correlation(Table1, Table2, method="R"):
keepCol = ["Gene","Mir",method]
mergeTable = merge_tables([Table1[keepCol],Table2[keepCol]])
mergeTable["Anti"] = mergeTable["%s_x"%method] * mergeTable["%s_y"%method]
mergeTable = mergeTable[(mergeTable["Anti"] < 0)].sort_values("Anti",ignore_index=True)
return mergeTable
def predict_lethality(gene, table, topk=100, method = "outer"):
##Get Methods##
lMethod = GetMethodList(table.columns.tolist())
slDF = load_synthetic()
gene_list = slDF[(slDF["GeneA"]== gene)]["GeneB"].tolist()
match = table[table["Gene"].str.contains("|".join(gene_list))]
topmatch = borda_top_table(match, topk=topk, method = method )
#topmatch = merge_tables([topmatch, table[["Mir","Gene","DB"]]], method="inner")
return topmatch
def FilterDF(table = None, matrix = None, join = "or", lTool = [], low_coef = -0.5, high_coef = 0.5, pval = 0.05, analysis = "Correlation", min_db = 1, method = "R"):
dbQ = get_target_query(join, lTool)
#print(dbQ)
#Filter DF
if analysis == "Correlation":
if method in ["R","Rho","Tau"]:
query_string = f"""
(({method} <= {low_coef} | {method} >= {high_coef} )) \
& \
(({method}_fdr_bh <= {pval}))
"""
else:
query_string = f"""
(({method} <= {low_coef} | {method} >= {high_coef} ))
"""
table = table[table["Prediction Tools"].str.match(dbQ)==True]
table["Number Prediction Tools"] = table["Prediction Tools"].str.count("1")
table = table[table["Number Prediction Tools"] >= min_db]
elif analysis == "GeneSetScore":
query_string = f"""
((R <= {low_coef} | R >= {high_coef} ) \
& (FDR <= {pval}))
"""
table = table.query(query_string)#Query the Correlation Table
table = borda_table(table)
#print("Filtrado")
#print(table.head())
gene = table["Gene"].unique().tolist()#Obtain Unique Gene after filter the table
mir = table["Mir"].unique().tolist()#Obtain Unique mir after filter the table
if matrix is not None:
try:
matrix = matrix.loc[mir,gene]#Subset the Correlation matrix to the heatmap
except:
matrix = matrix.loc[gene,mir]#Subset the Correlation matrix to the heatmap
return table, matrix
else:
return table
def predict_target(table = None, matrix = None, lTarget = None, lTools = None, method = "or", min_db = 10, low_coef = -0.5, high_coef = 0.5, pval = 0.05):
lTools = lTools if lTools != None else load_matrix_header()
if len(lTarget) > 0:
if table is not None:
#print("Holi")
if matrix is not None:
table, matrix = FilterDF(table = table, matrix = matrix, join = method, lTool = lTools, \
low_coef = low_coef, high_coef = high_coef, pval = pval)
else:
table = FilterDF(table = table, matrix = matrix, join = method, lTool = lTools, \
low_coef = low_coef, high_coef = high_coef, pval = pval)
else:
table = load_table_counts()
dbQ = get_target_query(method, lTools)
table = table[table["Prediction Tools"].str.match(dbQ)==True]
#Read DF
#Obtain Target Table
target = table[table["Gene"].isin(lTarget)|table["Mir"].isin(lTarget)]
del table
#Filter by number
target["Number Prediction Tools"] = target["Prediction Tools"].str.count("1")
target = target[target["Number Prediction Tools"] >= min_db]
if not target.empty and matrix is not None:
gene = target["Gene"].unique().tolist()#Obtain Unique Gene after filter the table
mir = target["Mir"].unique().tolist()#Obtain Unique mir after filter the table
try:
matrix = matrix.loc[mir,gene]#Subset the Correlation matrix to the heatmap
except:
matrix = matrix.loc[gene,mir]#Subset the Correlation matrix to the heatmap
else:
matrix = None
else:
target, matrix = None, None
return target, matrix
def ora_mir(lGene, matrix, mir_name, q):
from scipy.stats import fisher_exact
total_number_gene_universe = len(set(matrix.index.tolist()))
total_number_gene_list = len(set(lGene))
target_gene_list = matrix.loc[matrix[mir_name].str.count("1") >= q, mir_name].index.tolist()
target_number_universe = len(set(target_gene_list))
target_number_list = len(set(lGene).intersection(set(target_gene_list)))
in_list, not_list = target_number_list, total_number_gene_list - target_number_list
in_universe, not_universe = target_number_universe, total_number_gene_universe - target_number_universe
data = {"List":[in_list, not_list], "Universe": [in_universe, not_universe]}
res = pd.DataFrame.from_dict(data)
odd, pval = fisher_exact(res)
expected = (in_universe / total_number_gene_universe) * total_number_gene_list
return pd.Series([mir_name, target_number_list, expected, odd, pval], \
index = ["microRNA","Target Number","Expected Number", "Fold Enrichment", "Raw P-value"], name = mir_name)
def ora_mir_list(lMir, lGene, matrix, minDB):
df = pd.DataFrame()
for mir in lMir:
res = ora_mir(lGene, matrix, mir, minDB)
df = pd.concat([df,res], axis = 1)
return df
def ora_mir_parallel(lGene, matrix, lMir, minDB, n_core = 2):
##Split List
np_list_split = np.array_split(lMir, n_core)
split_list = [i.tolist() for i in np_list_split]
#split_list = same_length(split_list)
#Fix Exprs Variable
partial_func = functools.partial(ora_mir_list, matrix = matrix, lGene = lGene, minDB=minDB)
#Generating Pool
pool = Pool(n_core)
lres = pool.map(partial_func, split_list)
res = pd.concat(lres, axis = 1)
res = res.transpose()
pool.close()
pool.join()
res["FDR"] = multipletests(res["Raw P-value"], method = "fdr_bh")[1]
return res
def predict_lethality2(table = None, matrix = None, lQuery = None, lTools = None, method = "or", min_db = 10, low_coef = -0.5, high_coef = 0.5, pval = 0.05):
##Get Methods##
import pandas as pd
slDF = load_synthetic()
qA = slDF.loc[slDF["GeneA"].isin(lQuery),:]
qA.columns = ["Query", "Synthetic Lethal"]
qB = slDF.loc[slDF["GeneB"].isin(lQuery),:]
qB.columns = ["Synthetic Lethal","Query"]
qSl = pd.concat([qA,qB])
lTarget = qSl["Synthetic Lethal"].tolist()
if len(lTarget) > 0:
target,matrix = predict_target(table = table, matrix = matrix, lTarget = lTarget, lTools = lTools, method = method, min_db = min_db, low_coef = low_coef, high_coef = high_coef, pval = pval)
target = pd.merge(qSl,target, left_on="Synthetic Lethal", right_on="Gene")
res = ora_mir_parallel(lTarget, load_matrix_counts(), target["Mir"].unique().tolist(), min_db)
else:
target = None
return target, matrix, res
|
icbi-lab/miopy
|
miopy/data.py
|
import pandas as pd
import numpy as np
from os import path
import io
import numpy as np
def _get_path_data():
return path.join(path.dirname(__file__), 'data')
def get_target_query(method = "and", lTarget = []):
#Build DB query:
dbQ = ""
lHeader = load_matrix_header()
if method == "and":
for db in lHeader:
if db in lTarget:
dbQ += "1"
else:
dbQ += "."
elif method == "or":
lQ = []
nullString = "."*len(lHeader)
for db in lHeader:
if db in lTarget:
i = lHeader.index(db)
q = list(nullString)
q[i] = "1"
lQ.append("".join(q))
dbQ="("+"|".join(lQ)+")"
return dbQ
def load_matrix_header():
import codecs
import pkg_resources
"""Return a dataframe about miRNA/Gene prediction tool.
Contains the following fields:
col gene symbol
index mirbase mature id
"""
stream = pkg_resources.resource_stream(__name__, 'data/MATRIX_LIST.txt')
return stream.read().decode('utf-8').split()
def load_matrix_counts():
import pkg_resources
"""Return a dataframe about miRNA/Gene prediction tool.
Contains the following fields:
col gene symbol
index mirbase mature id
"""
stream = pkg_resources.resource_filename(__name__, 'data/MATRIX.pickle.gz')
return pd.read_pickle(stream)
def load_table_counts():
import pkg_resources
"""Return a dataframe about miRNA/Gene prediction tool.
Contains the following fields:
col gene symbol
index mirbase mature id
"""
stream = pkg_resources.resource_filename(__name__, 'data/MATRIX_TABLE.pickle.gz')
return pd.read_pickle(stream)
def load_synthetic():
import pkg_resources
"""Return a dataframe about Gene/Gene synthetic lehal
Contains the following fields:
col1 gene symbol
col2 gene symbol
"""
stream = pkg_resources.resource_stream(__name__, 'data/SL.tsv')
return pd.read_csv(stream, sep = "\t",header=None, names = ["GeneA","GeneB"])
def load_dataset():
import pkg_resources
"""Return a 3dataframe about Gene/Gene synthetic lehal
Contains the following fields:
col1 gene symbol
col2 gene symbol
"""
stream = pkg_resources.resource_stream(__name__, 'dataset/TCGA-OV_miRNAs.csv')
dfMir = pd.read_csv(stream, index_col=0)
stream = pkg_resources.resource_stream(__name__, 'dataset/TCGA-OV_RNAseq.csv')
dfRna = pd.read_csv(stream, index_col=0)
stream = pkg_resources.resource_stream(__name__, 'dataset/metadata.csv')
metadata = pd.read_csv(stream, index_col=0)
return dfMir, dfRna, metadata
|
icbi-lab/miopy
|
miopy/feature_selection.py
|
<reponame>icbi-lab/miopy
from sklearn.ensemble import BaggingClassifier, AdaBoostClassifier,GradientBoostingClassifier, RandomForestClassifier
from sklearn.linear_model import LogisticRegression, RidgeClassifier
from sklearn.svm import SVC
# used for normalization
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import label_binarize
# used for cross-validation
from sklearn.model_selection import StratifiedKFold
import numpy as np
import pandas as pd
import copy
def sort_abs(df):
df = df.reindex(df.abs().sort_values(ascending=False).index)
return df
def rf(X_train, y_train, X_test, y_test, lFeature = None, seed = 123):
seed = np.random.RandomState(seed)
md = RandomForestClassifier(n_estimators=300, random_state = seed)
# let's normalize, anyway
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
md.fit(X_train, y_train)
scoreTraining = md.score(X_train, y_train)
scoreTest = md.score(X_test, y_test)
feature = pd.Series(md.feature_importances_, index = lFeature)
feature = sort_abs(feature)
return scoreTraining, scoreTest, feature
def gbc(X_train, y_train, X_test, y_test, lFeature = None, seed = 123):
seed = np.random.RandomState(seed)
md = GradientBoostingClassifier(n_estimators=300, random_state = seed)
# let's normalize, anyway
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
md.fit(X_train, y_train)
scoreTraining = md.score(X_train, y_train)
scoreTest = md.score(X_test, y_test)
feature = pd.Series(md.feature_importances_, index = lFeature)
feature = sort_abs(feature)
return scoreTraining, scoreTest, feature
def ada(X_train, y_train, X_test, y_test, lFeature = None, seed = 123):
seed = np.random.RandomState(seed)
md = AdaBoostClassifier(n_estimators=300, random_state = seed)
# let's normalize, anyway
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
md.fit(X_train, y_train)
scoreTraining = md.score(X_train, y_train)
scoreTest = md.score(X_test, y_test)
feature = pd.Series(md.feature_importances_, index = lFeature)
feature = sort_abs(feature)
return scoreTraining, scoreTest, feature
def lr(X_train, y_train, X_test, y_test, lFeature = None, seed = 123):
seed = np.random.RandomState(seed)
md = LogisticRegression(penalty="l2", max_iter=10000, random_state = seed)
# let's normalize, anyway
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
md.fit(X_train, y_train)
scoreTraining = md.score(X_train, y_train)
scoreTest = md.score(X_test, y_test)
feature = pd.Series(md.coef_[0], index = lFeature)
feature = sort_abs(feature)
return scoreTraining, scoreTest, feature
def ridge(X_train, y_train, X_test, y_test, lFeature = None, seed = 123):
seed = np.random.RandomState(seed)
md = RidgeClassifier(max_iter=10000, random_state = seed)
# let's normalize, anyway
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
md.fit(X_train, y_train)
scoreTraining = md.score(X_train, y_train)
scoreTest = md.score(X_test, y_test)
feature = pd.Series(md.coef_[0], index = lFeature)
feature = sort_abs(feature)
return scoreTraining, scoreTest, feature
def svm(X_train, y_train, X_test, y_test, lFeature = None, seed = 123):
seed = np.random.RandomState(seed)
md = SVC(kernel='linear', random_state = seed)
# let's normalize, anyway
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
md.fit(X_train, y_train)
scoreTraining = md.score(X_train, y_train)
scoreTest = md.score(X_test, y_test)
feature = pd.Series(md.coef_[0], index = lFeature)
feature = sort_abs(feature)
return scoreTraining, scoreTest, feature
def bagging(X_train, y_train, X_test, y_test, lFeature = None, seed = 123):
seed = np.random.RandomState(seed)
md = BaggingClassifier(n_estimators=300, random_state = seed)
# let's normalize, anyway
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
md.fit(X_train, y_train)
scoreTraining = md.score(X_train, y_train)
scoreTest = md.score(X_test, y_test)
feature_importances = np.mean([
tree.feature_importances_ for tree in md.estimators_
], axis=0)
feature = pd.Series(feature_importances, index = lFeature)
feature = sort_abs(feature)
return scoreTraining, scoreTest, feature
def feature_selection(data, k = 10, topk = 100, group = "Group", seed = 123):
# list of classifiers, selected on the basis of our previous paper "
modelList = [[rf,"Random Forest",],
[lr,"Logistic Regresion",],
[ridge,"Ridge Classfier",],
[svm,"Support Vector Machine Classfier",],
[ada, "Ada Classifier"],
[bagging,"Bagging Classifier",],
[gbc,"Gradient Boosting Classifier",]
]
print("Loading dataset...")
X, Y = data.drop(group, axis =1), label_binarize(data[group], classes = data[group].unique().tolist())[:,0]
skf = StratifiedKFold(n_splits=k, shuffle=True, random_state=np.random.RandomState(seed))
indexes = [ (training, test) for training, test in skf.split(X, Y) ]
lFeature = data.drop(group, axis =1).columns.tolist()
topFeatures = pd.Series(dtype='float64', index=lFeature).fillna(0)
lAll = []
DictScore = {}
for model, name in modelList :
print("\nClassifier " + name)
ListScore = []
classifierTopFeatures = pd.Series(dtype='float64', name = name, index=lFeature).fillna(0)
# iterate over all folds
for train_index, test_index in indexes:
X_train, X_test = X.iloc[train_index,:], X.iloc[test_index,:]
y_train, y_test = Y[train_index], Y[test_index]
classifier = copy.deepcopy(model)
scoreTraining, scoreTest, orderedFeatures = classifier(X_train, y_train,\
X_test, y_test, lFeature = lFeature, seed = seed)
print("\ttraining: %.4f, test: %.4f" % (scoreTraining, scoreTest))
ListScore.append( scoreTest )
# now, let's get a list of the most important features, then mark the ones in the top X
lF = orderedFeatures.index[0:topk].tolist()
for f in lF:
feature = f
topFeatures[ feature ] += 1
classifierTopFeatures[ feature ] += 1
print("\ttest mean: %.4f" % (np.mean(ListScore)))
DictScore[name] = np.mean(ListScore)
lAll.append(classifierTopFeatures)
feature_per = topFeatures.div(len(modelList)*k)*100
dAll = pd.DataFrame(lAll).div(k)*100
return feature_per.sort_values(ascending=False)[:topk], dAll, DictScore
|
icbi-lab/miopy
|
bin/mio_correlation.py
|
<gh_stars>0
import pandas as pd
import argparse
from miopy.correlation import (read_count, concat_matrix, all_methods, tmm_normal, voom_normal,
get_mir_gene_ratio, header_list,
intersection, gene_set_correlation,
process_matrix_list)
from pathlib import Path
def arg():
"""
Función que recibe los parámetros introducidos como argumento a la hora
de llamar al programa desde la terminal. Facilita su automatización
Returns:
args (argparse) Objeto que almacena los argumentos
"""
parser=argparse.ArgumentParser (
description ='''
'''
)
parser.add_argument("-m","--mir", dest="mirPath", action="store", required=True, \
help = "Path to the miRNA expression file", default = None)
parser.add_argument("-g","--gene", dest="genePath", action="store", required=True, \
help = "Path to the gene expression file", default = None)
parser.add_argument("-gs","--geneset", dest="gsPath", action="append", required=False, \
help = "Path to the geneset file. Call multiple time to use different GS", default = None)
parser.add_argument("-mt","--meta", dest="metadataPath", action="store", required=False, \
help = "Path to the clinical file. Columns have to be: sample, group, time, event", default = None)
parser.add_argument("-o","--out", dest="outPath", action="store", required=False, \
help = "Directory to save the output files", default = ".")
parser.add_argument("-t","--technology", dest="technology", action="store", required=False, \
help = "Sequencing technology used in the dataset: sequencing or microarray", default = "sequencing")
parser.add_argument("-mode","--mode", dest="method", action="store", required=False, \
help = "Mode to run the correlation: Correlation or GeneSetScore", default = "Correlation")
parser.add_argument("-f","--filter", dest="FilterChoice", action="store", required=False, \
help = """Apply to differential expresion analysis to filter the gene and mirna:
NF: No Filter,
CCU: Condition1 vs Condition2 Unpaired
CCP: Condition1 vs Condition2 Paired """, default = "NF")
parser.add_argument("-fc","--logfc", dest="logfc", type = int, action="store", required=False, \
help = "Absolute value of the Log(FC), use to filter the DE genes and mirnas", default = 1.22)
parser.add_argument("-pv","--adjust-pval", dest="pval", type = int, action="store", required=False, \
help = "Absolute value of the pval, use to filter the DE genes and mirnas", default = 0.005)
parser.add_argument("-hr","--hazard", dest="survival", action="store", type=bool, nargs='?',
default=False, help="Obtain log(Hazard Ratio for the gene/miRNA")
parser.add_argument("-n","--normal", dest="normal", action="store", type=bool, nargs='?',
default=False, help="Normalize matrix counts")
parser.add_argument("-p","--processor", dest="n_core", type = int, action="store", required=False, \
help = "NUmber of cores", default = 2)
args = parser.parse_args()
return args
def run_correlation(mirPath, genePath, gsPath=None, metadataPath = None, outPath=".", technology = "sequencing", method = "Correlation", FilterChoice = "NF", \
normal = False, logfc = 1.2, pval = 0.005, survival = False, n_core = 2, ratio = False):
#Get ExprDF
if mirPath.endswith(".csv"):
sep = ","
else:
sep = "\t"
print("Obtenido los ficheros")
#Read DF
if normal and technology == "sequencing":
mirExpr = tmm_normal(mirPath)
geneExpr = tmm_normal(genePath)
print("Normalizados los ficheros")
elif normal and technology == "microarray":
mirExpr = voom_normal(mirPath)
geneExpr = voom_normal(genePath)
print("Normalizados los ficheros")
else:
mirExpr = read_count(mirPath, sep)
geneExpr = read_count(genePath, sep)
print("Ya normalizados")
dfExpr = concat_matrix(mirExpr, geneExpr)
dfMeta = pd.read_csv(metadataPath, index_col=0)
lMir, lGene = header_list(exprDF=dfExpr)
if gsPath != None:
lGene = []
for gs in gsPath:
lGene += list(open(gs,"r").read().split())
lGene = list(set(lGene))
#Create Ratio mir/gene
if ratio:
dfExpr = get_mir_gene_ratio(dfExpr, lGeneUser=lGene)
#Add Label Column
if survival:
dfMeta = dfMeta[["event","time"]]
dfExpr = pd.concat([dfMeta,dfExpr], axis = 1).dropna()
if method == "Correlation":
table, dfPearson = all_methods(dfExpr, lMirUser = lMir, lGeneUser = lGene, n_core = n_core, hr = survival)
table = table.round(4)
table.to_csv(outPath+"/CorrelationTable.csv")
dfPearson.to_csv(outPath+"/CorrelationMatrix.csv")
return None
def main():
argu = arg()
run_correlation(argu.mirPath, argu.genePath, gsPath=argu.gsPath, metadataPath=argu.metadataPath, \
technology=argu.technology, method=argu.method, FilterChoice=argu.FilterChoice, survival=argu.survival, \
n_core=argu.n_core, normal=argu.normal, logfc = argu.logfc, pval=argu.pval)
if __name__ == "__main__":
main()
|
icbi-lab/miopy
|
miopy/multilabel_feature_selection.py
|
<reponame>icbi-lab/miopy
import numpy as np
import matplotlib.pyplot as plt
from itertools import cycle
from sklearn.model_selection import StratifiedShuffleSplit, StratifiedKFold
from sklearn.ensemble import BaggingClassifier, AdaBoostClassifier,GradientBoostingClassifier, RandomForestClassifier
from sklearn.linear_model import LogisticRegression, RidgeClassifier
from sklearn.multiclass import OneVsRestClassifier
from sklearn.svm import SVC
from sklearn import svm, datasets
from sklearn.metrics import roc_curve, auc
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import label_binarize
from sklearn.multiclass import OneVsRestClassifier
from numpy import interp
from sklearn.metrics import roc_auc_score
from sklearn.ensemble import BaggingClassifier, AdaBoostClassifier,GradientBoostingClassifier, RandomForestClassifier
from sklearn.linear_model import LogisticRegression, RidgeClassifier
from sklearn.svm import SVC
# used for normalization
from sklearn.preprocessing import StandardScaler
# used for cross-validation
from sklearn.model_selection import StratifiedKFold
import numpy as np
import pandas as pd
import copy
def plot_roc_curve(classifier, X_train, X_test, y_train, y_test, name, k, n_classes = 4):
# Binarize the output
try:
y_score = classifier.decision_function(X_test)
except:
y_score = classifier.predict_proba(X_test)
pass
# Compute ROC curve and ROC area for each class
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(0,n_classes):
fpr[i], tpr[i], _ = roc_curve(y_test.iloc[:, i], y_score[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
# Compute micro-average ROC curve and ROC area
fpr["micro"], tpr["micro"], _ = roc_curve(np.array(y_test).ravel(), y_score.ravel())
roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
# First aggregate all false positive rates
all_fpr = np.unique(np.concatenate([fpr[i] for i in range(n_classes)]))
# Then interpolate all ROC curves at this points
mean_tpr = np.zeros_like(all_fpr)
for i in range(0,n_classes):
mean_tpr += interp(all_fpr, fpr[i], tpr[i])
# Finally average it and compute AUC
mean_tpr /= n_classes
fpr["macro"] = all_fpr
tpr["macro"] = mean_tpr
roc_auc["macro"] = auc(fpr["macro"], tpr["macro"])
# Plot all ROC curves
fig = plt.figure(figsize = (15,15))
ax = fig.add_subplot(111)
ax.plot(fpr["micro"], tpr["micro"],
label='micro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["micro"]),
color='deeppink', linestyle=':', linewidth=10)
ax.plot(fpr["macro"], tpr["macro"],
label='macro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["macro"]),
color='navy', linestyle=':', linewidth=15)
colors = cycle(['aqua', 'darkorange', 'cornflowerblue', 'red'])
for i, color in zip(range(n_classes), colors):
ax.plot(fpr[i], tpr[i], color=color,lw=15,
label='(area = {%.2f})' %(roc_auc[i]))
plt.xlim([-0.1, 1.0])
plt.ylim([0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Classifier %s Kfold %i'%(name, k))
plt.legend(loc="lower right")
ax.grid()
plt.savefig("/data/projects/2020/OvarianCancerHH/TCGA_OV_SubGroup_Features/ROC/TCGA_OV_ROC_%s_%i.png"%(name,k))
def sort_abs(df):
df = df.reindex(df.abs().sort_values(ascending=False).index)
return df
def rf(X_train, y_train, X_test, y_test, lFeature = None):
md = OneVsRestClassifier(RandomForestClassifier(n_estimators=1000))
# let's normalize, anyway
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
GroupFeature = []
md.fit(X_train, y_train)
scoreTraining = md.score(X_train, y_train)
scoreTest = md.score(X_test, y_test)
for estimator in md.estimators_:
feature = pd.Series(estimator.feature_importances_, index = lFeature)
feature = sort_abs(feature)
#print(feature)
GroupFeature.append(feature)
return scoreTraining, scoreTest, GroupFeature, md
def bagging(X_train, y_train, X_test, y_test, lFeature = None):
md = OneVsRestClassifier(BaggingClassifier(n_estimators=1000))
# let's normalize, anyway
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
GroupFeature = []
md.fit(X_train, y_train)
scoreTraining = md.score(X_train, y_train)
scoreTest = md.score(X_test, y_test)
for estimator in md.estimators_:
feature_importances = np.mean([
tree.feature_importances_ for tree in estimator.estimators_
], axis=0)
feature = pd.Series(feature_importances, index = lFeature)
feature = sort_abs(feature)
GroupFeature.append(feature)
return scoreTraining, scoreTest, GroupFeature, md
def ada(X_train, y_train, X_test, y_test, lFeature = None):
md = OneVsRestClassifier(AdaBoostClassifier(n_estimators=1000))
# let's normalize, anyway
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
GroupFeature = []
md.fit(X_train, y_train)
scoreTraining = md.score(X_train, y_train)
scoreTest = md.score(X_test, y_test)
for estimator in md.estimators_:
feature = pd.Series(estimator.feature_importances_, index = lFeature)
feature = sort_abs(feature)
#print(feature)
GroupFeature.append(feature)
return scoreTraining, scoreTest, GroupFeature, md
def lr(X_train, y_train, X_test, y_test, lFeature = None):
md = OneVsRestClassifier(LogisticRegression(penalty="l2", max_iter=100000))
# let's normalize, anyway
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
GroupFeature = []
md.fit(X_train, y_train)
scoreTraining = md.score(X_train, y_train)
scoreTest = md.score(X_test, y_test)
for estimator in md.estimators_:
feature = pd.Series(estimator.coef_[0], index = lFeature)
feature = sort_abs(feature)
#print(feature)
GroupFeature.append(feature)
return scoreTraining, scoreTest, GroupFeature, md
def multilabel_feature_selection(data, k = 10, topk = 100, group = "Group", dGroup = None):
# list of classifiers, selected on the basis of our previous paper "
modelList = [
[bagging,"Bagging Classifier",],
[rf,"Random Forest",],
[lr,"Logistic Regresion",],
#[ridge,"Ridge Classfier",],
#[svm,"Support Vector Machine Classfier",],
[ada, "Ada Classifier"],
]
print("Loading dataset...")
X, Y = data.drop(group, axis =1), data[group]
Y = pd.DataFrame(label_binarize(Y, classes = Y.unique().tolist()))
skf = StratifiedKFold(n_splits=k, shuffle=True)
indexes = [ (training, test) for training, test in skf.split(X, data[group]) ]
lFeature = data.drop(group, axis =1).columns.tolist()
topFeatures = {}
for key in dGroup.keys():
topFeatures[key] = pd.Series(dtype='float64', index=lFeature).fillna(0)
lAll = []
DictScore = {}
for model, name in modelList :
print("\nClassifier " + name)
ListScore = []
classifierTopFeatures = pd.Series(dtype='float64', name = name, index=lFeature).fillna(0)
# iterate over all folds
j = 0
for train_index, test_index in indexes:
X_train, X_test = X.iloc[train_index,:], X.iloc[test_index,:]
y_train, y_test = Y.iloc[train_index,:], Y.iloc[test_index,:]
classifier = copy.deepcopy(model)
scoreTraining, scoreTest, GroupFeature, classifier = classifier(X_train, y_train,\
X_test, y_test, lFeature = lFeature)
print("\ttraining: %.4f, test: %.4f" % (scoreTraining, scoreTest))
ListScore.append( scoreTest )
# now, let's get a list of the most important features, then mark the ones in the top X
#print(len(GroupFeature))
for i in range(0,len(GroupFeature)):
orderedFeatures = GroupFeature[i]
lF = orderedFeatures.index[0:topk].tolist()
#print(lF)
for f in lF:
feature = f
#print(i)
#print(f)
topFeatures[list(dGroup.keys())[i]][ feature ] += 1
classifierTopFeatures[ feature ] += 1
try:
plot_roc_curve(classifier, X_train, X_test, y_train, y_test, name, j, n_classes = 4)
except Exception as error:
print("[-]%s"%error)
pass
j += 1
print("\ttest mean: %.4f" % (np.mean(ListScore)))
DictScore[name] = np.mean(ListScore)
lAll.append(classifierTopFeatures)
dPerc = {}
for key in dGroup.keys():
feature_per = topFeatures[key].div(len(modelList)*k)*100
dPerc[key] = feature_per.sort_values(ascending=False)[:topk]
dAll = pd.DataFrame(lAll).div(k*len(dGroup.keys()))*100
return dPerc, dAll, DictScore
|
alcorzheng/learn_python
|
Learn_pkgs/learn/BeautifulSoup/spider_ssq.py
|
#!/usr/bin/python
# -*- coding:utf-8 -*-
# auth: alcorzheng<<EMAIL>>
# date: 2018-03-14
# desc: 双色球开奖结果爬取
from bs4 import BeautifulSoup
from Spiders.spiders.lottery.lottery_model import LotteryCNSSQ
from Spiders.common import config, database, utils, utils_html
def get_page_num(url, headers):
"""获取url总页数"""
soup = BeautifulSoup(utils_html.getPage(url, headers).content, 'lxml')
pagenums = soup.select('body > table > tr > td > p.pg > strong:nth-of-type(1)')
if len(pagenums) > 0:
return int(pagenums[0].get_text().replace(',', ''))
else:
return 0
def ins_data_ssq():
"""爬取双色球开奖信息并插入数据库"""
# 获取上次爬取的最大ID
conn = database.CommonDBExecutor(config.get_database_url(), LotteryCNSSQ)
results = conn.querybysqlstr(r'''select max(id_) max_id from data_analysis.lottery_cn_ssq''')
end_id = utils.obj2int(results[0]['max_id'])
for list_num in range(1, get_page_num(utils_html.getSSQURL(1), utils_html.getHeaders())): # 从第一页到第getPageNum(url)页
url = utils_html.getSSQURL(list_num)
soup = BeautifulSoup(utils_html.getPage(url, utils_html.getHeaders()).content, 'lxml')
list_date_ = soup.select('body > table > tr > td:nth-of-type(1)')
list_id_ = soup.select('body > table > tr > td:nth-of-type(2)')
list_win_nums = soup.select('body > table > tr > td:nth-of-type(3)')
list_amount_ = soup.select('body > table > tr > td:nth-of-type(4) > strong')
list_prize_first = soup.select('body > table > tr > td:nth-of-type(5) > strong')
list_prize_second = soup.select('body > table > tr > td:nth-of-type(6) > strong')
ssqdatas = []
for date_, id_, win_nums, amount_, prize_first, prize_second in zip(list_date_, list_id_, list_win_nums,
list_amount_, list_prize_first,
list_prize_second):
if int(id_.get_text().replace(',', '')) <= int(end_id): break
data = {
'id_': utils.obj2int(id_.get_text().replace(',', '')),
'date_': date_.get_text(),
'win_nums_red': ','.join(list(win_nums.stripped_strings)[:-1]),
'win_nums_blue': list(win_nums.stripped_strings)[-1],
'amount_': utils.obj2int(amount_.get_text().replace(',', '').strip()),
'prize_first': utils.obj2int(prize_first.get_text().replace(',', '').strip()),
'prize_second': utils.obj2int(prize_second.get_text().replace(',', '').strip())
}
ssqdatas.append(data)
if len(ssqdatas) == 0:
print("【双色球】未爬取到符合条件数据!")
break
else:
print("【双色球】本次爬取到%s条符合条件数据!" % (len(ssqdatas)))
# 插入数据库
conn.insert_by_batch(ssqdatas)
|
alcorzheng/learn_python
|
Algorithms/algorithms/algorithms.py
|
<gh_stars>1-10
#!/usr/bin/python
# -*- coding:utf-8 -*-
# auth: alcorzheng<<EMAIL>>
# date:
# desc:
from Algorithms.algorithms import Sorts
if __name__ == "__main__":
sotr_list = [2, 8, 4, 9, 3, 6]
print(Sorts.insertion_sort(sotr_list))
|
alcorzheng/learn_python
|
Spiders/spiders/blankchain/finance_eth.py
|
<filename>Spiders/spiders/blankchain/finance_eth.py<gh_stars>1-10
#!/usr/bin/python
# -*- coding:utf-8 -*-
"""
@auth: alcorzheng<<EMAIL>>
@file: finance_eth.py
@time: 2018/4/315:24
@desc: ETH钱包地址财务统计
"""
import requests
from bs4 import BeautifulSoup
def cal_ethorses():
# 以太马地址
ethorses_address = '0xf1c38359ffec224cb5de98f981dd79ab749f8ed0'
balance_data = _get_balance(ethorses_address)
trans_datas = _get_trans(ethorses_address)
account_info = {
'address': ethorses_address,
'balance': float(balance_data),
'invest': 0,
'trans_in_by_cal': 0,
'trans_out': 0,
'trans_outtx': 0
}
for trans_data in trans_datas:
if trans_data['trans_direction'] == 'OUT':
account_info['trans_out'] += float(trans_data['trans_value'])
account_info['trans_outtx'] += float(trans_data['trans_txfee'])
elif trans_data['trans_direction'] == 'IN':
account_info['invest'] += float(trans_data['trans_value'])
account_info['trans_in_by_cal'] = \
account_info['trans_out'] + account_info['trans_outtx'] - account_info['balance'] - account_info['invest']
for key in account_info:
print(str(key) + ' : ' + str(account_info[key]))
def _getheaders():
headers = {
'User-Agent': 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6'
}
return headers
def _get_balance_url(eth_address):
return 'https://etherscan.io/address/' + str(eth_address)
def _get_trans_url(eth_address, page_num):
return 'https://etherscan.io/txs?a=' + str(eth_address) + '&p=' + str(page_num)
def _getpage(url, headers, proxy=None):
try:
req = requests.get(url, headers=headers, proxies=proxy)
req.raise_for_status()
req.encoding = req.apparent_encoding
return req
except requests.HTTPError as e:
print(e)
def _get_balance(eth_address):
url = _get_balance_url(eth_address)
soup = BeautifulSoup(_getpage(url, _getheaders()).content, 'lxml')
balance_data = soup.select(
'#ContentPlaceHolder1_divSummary > div:nth-of-type(1) > table > tr:nth-of-type(1) > td:nth-of-type(2)'
)
if len(balance_data) > 0:
return balance_data[0].get_text().replace(' Ether', '').strip()
else:
return 0
def _get_trans(eth_address):
trans_datas = []
for pagenum in range(1, _get_trans_pagenum(eth_address, _getheaders()) + 1): # 从第一页到第getPageNum(url)页
url = _get_trans_url(eth_address, pagenum)
soup = BeautifulSoup(_getpage(url, _getheaders()).content, 'lxml')
tr_datas = soup.select('#ContentPlaceHolder1_mainrow > div > div > div > table > tbody > tr')
for tr_data in tr_datas:
data = {
'trans_direction': tr_data.select('td:nth-of-type(5)')[0].get_text().replace('\\\xa0', '').strip(),
'trans_from_address': tr_data.select('td:nth-of-type(4)')[0].get_text().strip(),
'trans_to_address': tr_data.select('td:nth-of-type(6)')[0].get_text().strip(),
'trans_value': tr_data.select('td:nth-of-type(7)')[0].get_text().replace(' Ether', '').strip(),
'trans_txfee': tr_data.select('td:nth-of-type(8)')[0].get_text().strip()
}
trans_datas.append(data)
return trans_datas
def _get_trans_pagenum(eth_address, headers):
url = _get_trans_url(eth_address, 1)
soup = BeautifulSoup(_getpage(url, headers).content, 'lxml')
pagenums = soup.select(
'body > div.wrapper > div.profile.container > div.row > div:nth-of-type(2) > p > span > b:nth-of-type(2)'
)
if len(pagenums) > 0:
return int(pagenums[0].get_text().strip())
else:
return 0
|
alcorzheng/learn_python
|
Spiders/common/config.py
|
#!/usr/bin/python
# -*- coding:utf-8 -*-
"""
@auth: alcorzheng<<EMAIL>>
@file: config.py
@time: 2018/4/109:47
@desc: 配置文件
"""
# -------------------------------
# 数据库配置信息
DB_TYPE = 'postgresql'
DB_NAME = 'zhengx_study'
DB_USER = 'data_analysis'
DB_PASS = '<PASSWORD>'
DB_HOST = 'db.zhengx.xyz'
DB_PORT = '5432'
# -------------------------------
def get_database_url():
"""获取数据库链接"""
if DB_TYPE == 'postgresql':
return DB_TYPE+'://'+DB_USER+':'+DB_PASS+'@'+DB_HOST+':'+DB_PORT+'/'+DB_PORT
return None
|
alcorzheng/learn_python
|
Spiders/common/utils_html.py
|
#!/usr/bin/python
# -*- coding:utf-8 -*-
# auth: alcorzheng<<EMAIL>>
# date: 2018-03-14
# desc: HTML通用工具类
import requests
#伪装成浏览器登陆,获取网页源代码
def getPage(url,headers):
try:
req = requests.get(url, headers=headers)
req.raise_for_status()
req.encoding = req.apparent_encoding
return req
except requests.HTTPError as e:
print(e)
# 获取URL
def getSSQURL(page_num):
return 'http://kaijiang.zhcw.com/zhcw/html/ssq/list_' + str(page_num) + '.html'
# 获取URL
def getDLTURL(page_num):
if int(page_num)!=1:
return 'http://www.lottery.gov.cn/historykj/history_' + str(page_num) + '.jspx?_ltype=dlt'
else:
return 'http://www.lottery.gov.cn/historykj/history.jspx?_ltype=dlt'
# 获取headers
def getHeaders():
headers = {
'User-Agent': 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6'
}
return headers
|
alcorzheng/learn_python
|
Spiders/common/database.py
|
#!/usr/bin/python
# -*- coding:utf-8 -*-
# auth: alcorzheng<<EMAIL>>
# date: 2018-03-22
# desc: 数据库工具
from sqlalchemy import create_engine, and_
from sqlalchemy.orm import sessionmaker
from Spiders.common import config
class CommonDBExecutor(object):
def __init__(self, db_url, table=None):
"""初始化"""
if db_url is None:
db_url = config.get_database_url()
self.db = create_engine(db_url, pool_size=100, max_overflow=200, pool_recycle=3600, encoding='utf8')
dbsession = sessionmaker(bind=self.db)
self.session = dbsession()
if table:
self.tb = table
else:
raise Exception("Failed to get table for executor.")
def __del__(self):
"""自动关闭"""
self.session.close_all()
def insert(self, **kwargs):
"""插入"""
service = self.tb(**kwargs)
self.session.add(service)
self.session.commit()
def insert_by_batch(self, values):
"""批量插入"""
self.session.execute(self.tb.__table__.insert(), values)
self.session.commit()
def _filter_kwargs_map(self, filter_table):
"""这个列名对应的值,设置过滤条件,这里支持在表列名前面加 "not_" 将过滤条件设置为不等于"""
new_filter_list = []
for key, val in filter_table.items():
filter_expression = None
if len(key) > 4 and "not_" == key[0:4]:
class_key = getattr(self.tb, key[4:])
filter_expression = class_key != val
else:
class_key = getattr(self.tb, key)
filter_expression = class_key == val
new_filter_list.append(filter_expression)
return new_filter_list
def query(self, **kwargs):
"""查询"""
ret = None
data = None
if kwargs:
new_filter_list = self._filter_kwargs_map(kwargs)
if len(new_filter_list) > 1:
data = self.session.query(self.tb).filter(and_(*new_filter_list)).all()
else:
data = self.session.query(self.tb).filter(*new_filter_list).all()
else:
data = self.session.query(self.tb).all()
if isinstance(data, list):
ret = [d.to_dict() for d in data]
else:
ret = data.to_dict()
return ret
def querybysqlstr(self, sqlstrs, params=None):
"""自定义SQL查询"""
ret = []
data = None
if params:
data = self.db.execute(sqlstrs, params)
else:
data = self.db.execute(sqlstrs, {})
for row in data:
ret.append(row)
return ret
def update(self, update_dict={}, **kwargs):
"""更新"""
ret = None
if kwargs:
new_filter_list = self._filter_kwargs_map(kwargs)
if len(new_filter_list) > 1:
ret = self.session.query(self.tb).filter(and_(*new_filter_list)).update(update_dict)
else:
ret = self.session.query(self.tb).filter(*new_filter_list).update(update_dict)
else:
ret = self.session.query(self.tb).update(update_dict)
self.session.commit()
return ret
def delete(self, **kwargs):
"""删除"""
ret = None
if kwargs:
new_filter_list = self._filter_kwargs_map(kwargs)
if len(new_filter_list) > 1:
ret = self.session.query(self.tb).filter(and_(*new_filter_list)).delete()
else:
ret = self.session.query(self.tb).filter(*new_filter_list).delete()
else:
ret = self.session.query(self.tb).delete()
self.session.commit()
return ret
|
alcorzheng/learn_python
|
Test/pgsqlByPsycopg2.py
|
# /usr/bin/env python
# -*- coding:utf-8 -*-
import psycopg2
# 连接数据库
conn = psycopg2.connect("dbname=study_python user=study password=<PASSWORD> host=127.0.0.1 port=5432")
# 创建cursor以访问数据库
cur = conn.cursor()
# 创建表
cur.execute(
'CREATE TABLE Employee ('
'name varchar(80),'
'address varchar(80),'
'age int,'
'date date'
')'
)
# 插入数据
cur.execute("INSERT INTO Employee VALUES('Gopher', 'China Beijing', 100, '2017-05-27')")
# 查询数据
cur.execute("SELECT * FROM Employee")
rows = cur.fetchall()
for row in rows:
print('name=' + str(row[0]) + ' address=' + str(row[1]) + ' age=' + str(row[2]) + ' date=' + str(row[3]))
# 更新数据
cur.execute("UPDATE Employee SET age=12 WHERE name='Gopher'")
# 删除数据
cur.execute("DELETE FROM Employee WHERE name='Gopher'")
# 提交事务
conn.commit()
# 关闭连接
conn.close()
|
alcorzheng/learn_python
|
CrazyPythonFor163/crazyPy/class03/Class03_HomeWork.py
|
<filename>CrazyPythonFor163/crazyPy/class03/Class03_HomeWork.py
# /usr/bin/env python
# -*- coding:utf-8 -*-
# 画美国国旗
import turtle
import math
# 自定义参数
baselen = 100
af_Color_White="#FFFFFF"
af_Color_Red="#B22234"
af_Color_Blue="#3C3B6E"
# 国旗固定参数
af_Stripe_Length=1.9*baselen
af_Stripe_Width=0.0769*baselen
af_StarArea_Width=af_Stripe_Width*7
ad_Star_High=af_StarArea_Width/10
af_Star_DrawLen=ad_Star_High/math.cos(math.radians(18))
af_Star_Margin_Left=0.0633*baselen/2
af_Star_Margin_Top=ad_Star_High-af_Star_DrawLen/2/math.cos(math.radians(18))*math.sin(math.radians(18))
af_StarArea_Length=af_Stripe_Length*2/5
def drawStripe(lineColor, fillColor):
turtle.color(lineColor, fillColor)
turtle.begin_fill()
turtle.forward(af_Stripe_Length)
turtle.right(90)
turtle.forward(af_Stripe_Width )
turtle.right(90)
turtle.forward(af_Stripe_Length)
turtle.right(90)
turtle.forward(af_Stripe_Width)
turtle.right(90)
turtle.end_fill()
return
def drawStar(lineColor, fillColor):
turtle.color(lineColor, fillColor)
turtle.begin_fill()
for i in range(5):
turtle.forward(af_Star_DrawLen)
turtle.right(144)
turtle.end_fill()
return
def drawAllStripes(redColor,whiteColor):
turtle.up()
turtle.goto(0, 0)
turtle.down()
for i in range(13):
turtle.up()
turtle.goto(0, -af_Stripe_Width * i)
turtle.down()
drawColor = redColor if i%2==0 else whiteColor
drawStripe(drawColor, drawColor)
return
def drawStarArea(lineColor, fillColor):
turtle.up()
turtle.goto(0, 0)
turtle.down()
turtle.color(lineColor, fillColor)
turtle.begin_fill()
turtle.forward(af_StarArea_Length)
turtle.right(90)
turtle.forward(af_StarArea_Width)
turtle.right(90)
turtle.forward(af_StarArea_Length)
turtle.right(90)
turtle.forward(af_StarArea_Width)
turtle.right(90)
turtle.end_fill()
return
def drawAllStars(starColor):
turtle.up()
turtle.goto(0, 0)
turtle.down()
for i in range(9):
lineStarNum = 6 if i%2==0 else 5
for j in range(lineStarNum):
turtle.up()
turtle.goto(af_Star_Margin_Left*(1+2*(i%2+2*j)),-af_Star_Margin_Top-ad_Star_High*i)
turtle.down()
drawStar(starColor, starColor)
return
drawAllStripes(af_Color_Red,af_Color_White)
drawStarArea(af_Color_Blue,af_Color_Blue)
drawAllStars(af_Color_White)
|
alcorzheng/learn_python
|
Learn_pkgs/learn_pkgs.py
|
#!/usr/bin/python
# -*- coding:utf-8 -*-
# auth: alcorzheng<<EMAIL>>
# date:
# desc:
from Learn_pkgs.learn.pyecharts import test
if __name__ == "__main__":
test
|
alcorzheng/learn_python
|
Spiders/spiders/lottery/lottery_cn_dlt.py
|
#!/usr/bin/python
# -*- coding:utf-8 -*-
"""
@auth: alcorzheng<<EMAIL>>
@file: lottery_cn_dlt.py
@time: 2018/4/1013:59
@desc:
"""
|
alcorzheng/learn_python
|
CrazyPythonFor163/test/test.py
|
# /usr/bin/env python
# -*- coding:utf-8 -*-
import math
# 自定义参数
baselen = 500
af_color_white="#FFFFFF"
af_color_red="#B22234"
af_color_blue="#3C3B6E"
# 国旗固定参数
af_stripe_length=1.9*baselen
af_stripe_width=0.0769*baselen
af_stararea_length=af_stripe_length*2/5
af_stararea_width=af_stripe_width*7
ad_star_high=af_stararea_width/10
af_star_drawlen=ad_star_high/math.cos(math.radians(18))
af_star_margin_left=(af_stararea_length-af_star_drawlen*11)/2
af_star_margin_top=ad_star_high-af_star_drawlen/2/math.cos(math.radians(18))*math.sin(math.radians(18))
print(ad_star_high)
print(af_star_drawlen)
print(af_star_margin_left)
print(af_star_margin_top)
print(af_star_drawlen/2/math.cos(math.radians(18))*math.sin(math.radians(18)))
|
alcorzheng/learn_python
|
Spiders/spiders/lottery/lottery_model.py
|
<filename>Spiders/spiders/lottery/lottery_model.py
#!/usr/bin/python
# -*- coding:utf-8 -*-
# auth: alcorzheng<<EMAIL>>
# date:
# desc:
from sqlalchemy import Column, Integer, String, Date, Numeric
from sqlalchemy.ext.declarative import declarative_base
from Spiders.common import utils
# 创建对象的基类:
Base = declarative_base()
Base.to_dict = utils.to_dict
class LotteryCNSSQ(Base):
# 表的名字:
__tablename__ = 'lottery_cn_ssq'
# 表的结构:
id_ = Column(Integer, primary_key=True)
date_ = Column(Date)
win_nums_red = Column(String(500))
win_nums_blue = Column(String(500))
amount_ = Column(Numeric(32, 6))
prize_first = Column(Integer)
prize_second = Column(Integer)
class LotteryCNDLT(Base):
# 表的名字:
__tablename__ = 'lottery_cn_dlt'
# 表的结构:
id_ = Column(Integer, primary_key=True)
date_ = Column(Date)
win_nums_red = Column(String(500))
win_nums_blue = Column(String(500))
amount_ = Column(Numeric(32, 6))
prize_first = Column(Integer)
prize_second = Column(Integer)
|
alcorzheng/learn_python
|
Algorithms/algorithms/Sorts.py
|
#!/usr/bin/python
# -*- coding:utf-8 -*-
# auth: alcorzheng<<EMAIL>>
# date: 2018-03-19
# desc: 排序算法
"""
插入排序
"""
def insertion_sort(lists):
lists_len = len(lists)
for i in range(1, lists_len):
key = lists[i]
j = i-1
while j >= 0:
if lists[j] > key:
lists[j+1] = lists[j]
lists[j] = key
j -= 1
return lists
|
alcorzheng/learn_python
|
Test/envparam.py
|
<gh_stars>1-10
#!/usr/bin/python
# -*- coding:utf-8 -*-
# auth: alcorzheng<<EMAIL>>
# date:
# desc: 全局配置文件
import re
import os
import tempfile
global _env
CONF_FILE_URL = "config/lottery.properties"
def _init_():
"""初始化"""
global _env
_env = Properties(CONF_FILE_URL).properties
def setVal(key, value, bwrite=False):
"""设置一个全局变量"""
_env[key] = value
if bwrite:
_env.replace_property(key + '=.*', key + '=' + value, True)
def getVal(key, default_value=None):
"""获取一个全局变量,不存在则返回默认值"""
try:
return _env[key]
except KeyError:
return default_value
class Properties:
def __init__(self, file_name):
self.file_name = file_name
self.properties = {}
try:
fopen = open(self.file_name, 'r')
for line in fopen:
line = line.strip()
if line.find('=') > 0 and not line.startswith('#'):
strs = line.split('=')
self.properties[strs[0].strip()] = strs[1].strip()
except Exception as e:
raise e
else:
fopen.close()
def replace_property(self, from_regex, to_str, append_on_not_exists=True):
file = tempfile.TemporaryFile() # 创建临时文件
if os.path.exists(self.file_name):
r_open = open(self.file_name, 'r')
pattern = re.compile(r'' + from_regex)
found = None
for line in r_open: # 读取原文件
if pattern.search(line) and not line.strip().startswith('#'):
found = True
line = re.sub(from_regex, to_str, line)
file.write(line) # 写入临时文件
if not found and append_on_not_exists:
file.write('\n' + to_str)
r_open.close()
file.seek(0)
content = file.read() # 读取临时文件中的所有内容
if os.path.exists(self.file_name):
os.remove(self.file_name)
w_open = open(self.file_name, 'w')
w_open.write(content) # 将临时文件中的内容写入原文件
w_open.close()
file.close() # 关闭临时文件,同时也会自动删掉临时文件
else:
print("file %s not found" % self.file_name)
|
alcorzheng/learn_python
|
Learn_pkgs/learn/BeautifulSoup/spider_dlt.py
|
#!/usr/bin/python
# -*- coding:utf-8 -*-
# auth: alcorzheng<<EMAIL>>
# date: 2018-03-14
# desc: 大乐透开奖结果爬取
from bs4 import BeautifulSoup
from Spiders.spiders.lottery import lottery_model
from Spiders.common import config, database, utils, utils_html
def get_page_num(url, headers):
"""获取url总页数"""
soup = BeautifulSoup(utils_html.getPage(url, headers).content, 'lxml')
pagenums = soup.select('body > div.yyl > div.yylMain > div.result > div > div > select > option')
if len(pagenums) > 0:
return int(pagenums[-1].get_text().replace(',', ''))
else:
return 0
def ins_data_dlt():
"""爬取双色球开奖信息并插入数据库"""
# 获取上次爬取的最大ID
conn = database.CommonDBExecutor(config.get_database_url(), lottery_model.LotteryCNDLT)
results = conn.querybysqlstr('select max(id_) max_id from data_analysis.lottery_cn_dlt')
end_id = utils.obj2int(results[0]['max_id'])
for list_num in range(1, get_page_num(utils_html.getDLTURL(1), utils_html.getHeaders())): # 从第一页到第getPageNum(url)页
url = utils_html.getDLTURL(list_num)
soup = BeautifulSoup(utils_html.getPage(url, utils_html.getHeaders()).content, 'lxml')
list_dlt = soup.select('body > div.yyl > div.yylMain > div.result > table > tbody > tr')
dltdatas = []
for dlt in list_dlt:
if int(dlt.select('td:nth-of-type(1)')[0].get_text().replace(',', '')) <= int(end_id):
break
data = {
'id_': utils.obj2int(dlt.select('td:nth-of-type(1)')[0].get_text().replace(',', '')),
'date_': dlt.select('td:nth-of-type(20)')[0].get_text(),
'win_nums_red': ','.join([win_num.get_text() for win_num in dlt.select('td.red')]),
'win_nums_blue': ','.join([win_num.get_text() for win_num in dlt.select('td.blue')]),
'amount_': utils.obj2int(dlt.select('td:nth-of-type(18)')[0].get_text().replace(',', '').replace('-', '').strip()),
'prize_first': utils.obj2int(dlt.select('td:nth-of-type(9)')[0].get_text().replace(',', '').strip()),
'prize_second': utils.obj2int(dlt.select('td:nth-of-type(13)')[0].get_text().replace(',', '').strip())
}
dltdatas.append(data)
if len(dltdatas) == 0:
print("【大乐透】未爬取到符合条件数据!")
break
else:
print("【大乐透】本次爬取到%s条符合条件数据!" % (len(dltdatas)))
# 插入数据库
conn.insert_by_batch(dltdatas)
|
alcorzheng/learn_python
|
Spiders/spiders/lottery/lottery_cn_ssq.py
|
<gh_stars>1-10
#!/usr/bin/python
# -*- coding:utf-8 -*-
"""
@auth: alcorzheng<<EMAIL>>
@file: lottery_cn_ssq.py
@time: 2018/4/1013:59
@desc:
"""
from requests_html import HTMLSession
session = HTMLSession()
def spiders_data():
""""按照规则爬取数据"""
response = session.get('http://kaijiang.zhcw.com/zhcw/html/ssq/list_1.html')
content = response.html.find('.wqhgt', first=True)
tr_list = content.lxml.find('tr')
for tr in tr_list:
print(tr.attrs)
if __name__ == '__main__':
spiders_data()
|
alcorzheng/learn_python
|
Learn_pkgs/learn/tushare/GetDataByID.py
|
# /usr/bin/env python
# -*- coding:utf-8 -*-
import tushare as ts
data=ts.get_hist_data('300032')
print(data)
|
alcorzheng/learn_python
|
Spiders/common/utils.py
|
<gh_stars>1-10
#!/usr/bin/python
# -*- coding:utf-8 -*-
# auth: alcorzheng<<EMAIL>>
# date: 2018-03-15
# desc: 基础工具包
def obj2int(val):
"""整数转换处理,空值转为0"""
if val:
return int(val)
else:
return 0
def to_dict(self):
"""将sqlAlchemy中的对象转换为dict"""
return {c.name: getattr(self, c.name, None) for c in self.__table__.columns if c.name!="Status"}
|
alcorzheng/learn_python
|
CrazyPythonFor163/crazyPy/class03/Class03_DrawStar.py
|
<reponame>alcorzheng/learn_python
# /usr/bin/env python
# -*- coding:utf-8 -*-
import turtle
turtle.forward(100)
turtle.right(144)
turtle.forward(100)
turtle.right(144)
turtle.forward(100)
turtle.right(144)
turtle.forward(100)
turtle.right(144)
turtle.forward(100)
|
alcorzheng/learn_python
|
Spiders/spiders.py
|
#!/usr/bin/python
# -*- coding:utf-8 -*-
# auth: alcorzheng<<EMAIL>>
# date:
# desc:
from Learn_pkgs.learn.BeautifulSoup import spider_ssq
if __name__ == "__main__":
print("-----开始-----")
spider_ssq.ins_data_ssq()
# spider_dlt.ins_data_dlt()
print("-----结束-----")
|
diadochos/quora-scraper
|
setup.py
|
<filename>setup.py
from setuptools import setup
def readme():
with open('README.md') as f:
README = f.read()
return README
setup(
name = 'quora-scraper',
packages = ['quora_scraper'],
version = '1.1.3',
license='MIT',
description = "Python based code to scrap and download data from quora website: questions related to certain topics, answers given on certain questions and users profile data",
long_description=readme(),
long_description_content_type="text/markdown",
author = '<NAME>',
author_email = '<EMAIL>',
url="https://github.com/banyous/quora-scraper",
download_url = 'https://github.com/user/reponame/archive/v_01.tar.gz',
keywords = ['quora', 'topics', 'Q&A','user','scraper', 'download','answers','questions'],
include_package_data=True,
install_requires=[
'selenium',
'bs4',
'webdriver-manager',
'dateparser',
'userpaths'
],
entry_points={
"console_scripts": [
"quora-scraper=quora_scraper.scraper:main",
]
},
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
|
diadochos/quora-scraper
|
quora_scraper/scraper.py
|
<gh_stars>0
# __main__.py
DEBUG = 1
import os
import re
import subprocess
import sys
import time
import ast
import csv
import json
import pathlib
from pathlib import Path
import random
import userpaths
import dateparser
import argparse
from datetime import datetime, timedelta
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
# -------------------------------------------------------------
# -------------------------------------------------------------
def connectchrome():
options = Options()
options.add_argument('--headless')
options.add_argument('log-level=3')
options.add_argument("--incognito")
options.add_argument("--no-sandbox")
options.add_argument("--disable-dev-shm-usage")
driver = webdriver.Chrome(executable_path="chromedriver", options=options)
driver.maximize_window()
time.sleep(2)
return driver
# -------------------------------------------------------------
# -------------------------------------------------------------
# remove 'k'(kilo) and 'm'(million) from Quora numbers
def convertnumber(number):
if 'k' in number:
n=float(number.lower().replace('k', '').replace(' ', ''))*1000
elif 'm' in number:
n=float(number.lower().replace('m', '').replace(' ', ''))*1000000
else:
n=number
return int(n)
# -------------------------------------------------------------
# -------------------------------------------------------------
# convert Quora dates (such as 2 months ago) to DD-MM-YYYY format
def convertDateFormat(dateText):
try:
if ("Updated" in dateText):
date = dateText[8:]
else:
date = dateText[9:]
date = dateparser.parse(dateText).strftime("%Y-%m-%d")
except: # when updated or answered in the same week (ex: Updated Sat)
date = dateparser.parse("7 days ago").strftime("%Y-%m-%d")
return date
# -------------------------------------------------------------
# -------------------------------------------------------------
def scrollup_alittle(self,nbtimes):
for iii in range(0,nbtimes):
self.execute_script("window.scrollBy(0,-200)")
time.sleep(1)
# -------------------------------------------------------------
# -------------------------------------------------------------
# method for loading quora dynamic content
def scrolldown(self,type_of_page='users'):
last_height = self.page_source
loop_scroll=True
attempt = 0
# we generate a random waiting time between 2 and 4
waiting_scroll_time=round(random.uniform(2, 4),1)
print('scrolling down to get all answers...')
max_waiting_time=round(random.uniform(5, 7),1)
# we increase waiting time when we look for questions urls
if type_of_page=='questions' : max_waiting_time= round(random.uniform(20, 30),1)
# scroll down loop until page not changing
while loop_scroll:
self.execute_script("window.scrollTo(0, document.body.scrollHeight);")
time.sleep(2)
if type_of_page=='answers':
scrollup_alittle(self,2)
new_height=self.page_source
if new_height == last_height:
# in case of not change, we increase the waiting time
waiting_scroll_time= max_waiting_time
attempt += 1
if attempt==3:# in the third attempt we end the scrolling
loop_scroll=False
#print('attempt',attempt)
else:
attempt=0
waiting_scroll_time=round(random.uniform(2, 4),1)
last_height=new_height
# -------------------------------------------------------------
# -------------------------------------------------------------
# questions urls crawler
def questions(topics_list,save_path):
browser=connectchrome()
topic_index=-1
loop_limit=len(topics_list)
print('Starting the questions crawling')
while True:
print('--------------------------------------------------')
topic_index += 1
if topic_index>=loop_limit:
print('Crawling completed, questions have been saved to : ', save_path)
browser.quit()
break
topic_term = topics_list[topic_index].strip()
# we remove hashtags (optional)
topic_term.replace("#",'')
# Looking if the topic has an existing Quora url
print('#########################################################')
print('Looking for topic number : ',topic_index,' | ', topic_term)
try:
url = "https://www.quora.com/topic/" + topic_term.strip() + "/all_questions"
browser.get(url)
time.sleep(2)
except Exception as e0:
print('topic does not exist in Quora')
# print('exception e0')
# print('Error on line {}'.format(sys.exc_info()[-1].tb_lineno), type(e0).__name__, e0)
continue
# get browser source
html_source = browser.page_source
question_count_soup = BeautifulSoup(html_source, 'html.parser')
# get total number of questions
question_count_str = question_count_soup.find('a', attrs={'class': 'TopicQuestionsStatsRow'})
if str(question_count_str) =='None':
print('topic does not have questions...')
continue
question_count = convertnumber(question_count_str.contents[0].text)
question_count_str = question_count_soup.find('a', attrs={'class': 'TopicQuestionsStatsRow'})
if question_count ==0:
print('topic does not have questions...')
continue
print('number of questions for this topic : '+ str(question_count))
# Get scroll height
last_height = browser.execute_script("return document.body.scrollHeight")
# infinite while loop, break it when you reach the end of the page or not able to scroll further.
# Note that Quora
# if there is more than 10 questions, we need to scroll down the profile to load remaining questions
if int(question_count)>10:
scrolldown(browser,'questions')
# next we harvest all questions URLs that exists in the Quora topic's page
# get html page source
html_source = browser.page_source
soup = BeautifulSoup(html_source, 'html.parser')
# question_link is the class for questions
question_link = soup.find_all('a', attrs={'class': 'question_link'}, href=True)
# add questions to a set for uniqueness
question_set = set()
for ques in question_link:
question_set.add(ques)
# write content of set to Qyestions_URLs/ folder
save_file= Path(save_path) / str(topic_term.strip('\n') + '_question_urls.txt')
file_question_urls = open(save_file, mode='w', encoding='utf-8')
for ques in question_set:
link_url = "http://www.quora.com" + ques.attrs['href']
file_question_urls.write(link_url+'\n')
file_question_urls.close()
# sleep every while in order to not get banned
if topic_index % 5 == 4:
sleep_time = (round(random.uniform(5, 10), 1))
time.sleep(sleep_time)
browser.quit()
# -------------------------------------------------------------
# -------------------------------------------------------------
# answers cralwer
def answers(urls_list,save_path):
browser= connectchrome()
url_index = -1
loop_limit= len(urls_list)
# output file containing all answers
file_answers = open(Path(save_path) / "answers.txt", mode='a')
print('Starting the answers crawling...')
while True:
url_index += 1
print('--------------------------------------------------')
if url_index >= loop_limit:
print('Crawling completed, answers have been saved to : ', save_path)
browser.quit()
file_answers.close()
break
current_line = urls_list[url_index]
print('processing question number : '+ str(url_index+1))
print(current_line)
if '/unanswered/' in str(current_line):
print('answer is unanswered')
continue
question_id = current_line
# opening Question page
try:
browser.get(current_line)
time.sleep(2)
except Exception as OpenEx:
print('cant open the following question link : ',current_line)
print('Error on line {}'.format(sys.exc_info()[-1].tb_lineno), type(OpenEx).__name__, OpenEx)
print(str(OpenEx))
continue
try:
nb_answers_text = WebDriverWait(browser, 10).until(
EC.visibility_of_element_located((By.XPATH, "//div[text()[contains(.,'Answer')]]"))).text
nb_answers=[int(s.strip('+')) for s in nb_answers_text.split() if s.strip('+').isdigit()][0]
print('Question have :', nb_answers_text)
except Exception as Openans:
print('cant get answers')
print('Error on line {}'.format(sys.exc_info()[-1].tb_lineno), type(Openans).__name__, Openans)
print(str(Openans))
continue
#nb_answers_text = browser.find_element_by_xpath("//div[@class='QuestionPageAnswerHeader']//div[@class='answer_count']").text
if nb_answers>7:
scrolldown(browser,'answers')
continue_reading_buttons = browser.find_elements_by_xpath("//a[@role='button']")
time.sleep(2)
for button in continue_reading_buttons:
try:
ActionChains(browser).click(button).perform()
time.sleep(1)
except:
print('cant click more')
continue
time.sleep(2)
html_source = browser.page_source
soup = BeautifulSoup(html_source,"html.parser")
# get the question-id
question_id = current_line.rsplit('/', 1)[-1]
# find title
title= current_line.replace("https://www.quora.com/","")
# find question's topics
questions_topics= soup.findAll("div", {"class": "q-box qu-mr--tiny qu-mb--tiny"})
questions_topics_text=[]
for topic in questions_topics : questions_topics_text.append(topic.text.rstrip())
# number of answers
# not all answers are saved!
# answers that collapsed, and those written by annonymous users are not saved
try:
split_html = html_source.split('class="q-box qu-pt--medium qu-pb--medium"')
except Exception as notexist :#mostly because question is deleted by quora
print('question no long exists')
print('Error on line {}'.format(sys.exc_info()[-1].tb_lineno), type(notexist).__name__, notexist)
print(str(notexist))
continue
# The underneath loop will generate len(split_html)/2 exceptions, cause answers in split_html
# are eitheir in Odd or Pair positions, so ignore printed exceptions.
#print('len split : ',len(split_html))
for i in range(1, len(split_html)):
try:
part = split_html[i]
part_soup = BeautifulSoup(part,"html.parser" )
#print('===============================================================')
#find users names of answers authors
try:
authors=part_soup.find("a", href=lambda href: href and "/profile/" in href)
user_id = authors['href'].rsplit('/', 1)[-1]
#print(user_id)
except Exception as notexist2 :#mostly because question is deleted by quora
print('author extract pb')
print('Error on line {}'.format(sys.exc_info()[-1].tb_lineno), type(notexist2).__name__, notexist2)
print(str(notexist2))
continue
# find answer dates
answer_date= part_soup.find("a", string=lambda string: string and ("Answered" in string or "Updated" in string))#("a", {"class": "answer_permalink"})
try:
date=answer_date.text
if ("Updated" in date):
date= date[8:]
else:
date= date[9:]
date=dateparser.parse(date).strftime("%Y-%m-%d")
except: # when updated or answered in the same week (ex: Updated Sat)
date=dateparser.parse("7 days ago").strftime("%Y-%m-%d")
#print(date)
# find answers text
answer_text = part_soup.find("div", {"class": "q-relative spacing_log_answer_content"})
#print(" answer_text", answer_text.text)
answer_text = answer_text.text
#write answer elements to file
s= str(question_id.rstrip()) +'\t' + str(date) + "\t"+ user_id + "\t"+ str(questions_topics_text) + "\t" + str(answer_text.rstrip()) + "\n"
#print("wrting down the answer...")
file_answers.write(s)
print('writing down answers...')
except Exception as e1: # Most times because user is anonymous , continue without saving anything
print('---------------There is an Exception-----------')
print('Error on line {}'.format(sys.exc_info()[-1].tb_lineno), type(e1).__name__, e1)
print(str(e1))
o=1
# we sleep every while in order to avoid IP ban
if url_index%3==2:
sleep_time=(round(random.uniform(5, 10),1))
time.sleep(sleep_time)
browser.quit()
# -------------------------------------------------------------
# -------------------------------------------------------------
# Users profile crawler
def users(users_list,save_path):
browser= connectchrome()
user_index=-1
loop_limit=len(users_list)
print('Starting the users crawling...')
while True:
print('_______________________________________________________________')
user_index+=1
if user_index >=loop_limit:
print('Crawling completed, answers have been saved to : ', save_path)
browser.quit()
break
# a dict to contain information about profile
quora_profile_information=dict()
current_line= users_list[user_index].strip()
current_line=current_line.replace('http', 'https')
# we change proxy and sleep every 200 request (number can be changed)
# sleep every while in order to not get banned
if user_index % 5 == 4:
sleep_time = (round(random.uniform(5, 10), 1))
# print('*********')
# print('Seleeping the browser for ', sleep_time)
# print('*********')
time.sleep(sleep_time)
user_id=current_line.strip().replace('\r', '').replace('\n', '')
url= "https://www.quora.com/profile/"+user_id
print('processing quora user number : ', user_index +1, ' ', url)
browser.get(url)
time.sleep(2)
# get profile description
try:
description= browser.find_element_by_class_name('IdentityCredential')
description= description.text.replace('\n', ' ')
#print(description)
except:
description=''
#print('no description')
quora_profile_information['description']=description
# get profile bio
try:
more_button = browser.find_elements_by_link_text('(more)')
ActionChains(browser).move_to_element(more_button[0]).click(more_button[0]).perform()
time.sleep(0.5)
profile_bio = browser.find_element_by_class_name('ProfileDescriptionPreviewSection')
profile_bio_text=profile_bio.text.replace('\n', ' ')
#print(profile_bio_text)
except Exception as e:
#print('no profile bio')
#print(e)
profile_bio_text=''
quora_profile_information['profile_bio']=profile_bio_text
html_source = browser.page_source
source_soup = BeautifulSoup(html_source,"html.parser")
#get location
#print('trying to get location')
location='None'
try:
location1= (source_soup.find(attrs={"class":"LocationCredentialListItem"}))
location2= (location1.find(attrs={"class":"main_text"})).text
location= location2.replace('Lives in ','')
except Exception as e3:
#print('exception regarding finding location')
#print(e3)
pass
quora_profile_information['location']=location
#get total number of views
total_views='0'
try:
#views=wait.until(EC.visibility_of_element_located((By.CLASS_NAME, "AnswerViewsAboutListItem.AboutListItem")))
views= (source_soup.find(attrs={"class":"ContentViewsAboutListItem"}))
total_views=views.text.split("content")[0]
except Exception as e4:
###print('exception regarding finding number of views')
###print(e4)
pass
#print(total_views)
#print('@@@@@@@@@')
total_views=convertnumber(total_views)
#print(' location : ',location)
#print("total_views",total_views)
#print(total_views)
quora_profile_information['total_views']=total_views
nbanswers=0
nbquestions=0
nbfollowers=0
nbfollowing=0
#print('trying to get answers stats')
try:
html_source = browser.page_source
source_soup = BeautifulSoup(html_source,"html.parser")
# Find user social attributes : #answers, #questions, #shares, #posts, #blogs, #followers, #following, #topics, #edits
nbanswers=browser.find_element_by_xpath("//span[text()[contains(.,'Answers')]]/parent::*")
nbanswers=nbanswers.text.strip('Answers').strip().replace(',','')
nbquestions =browser.find_element_by_xpath("//span[text()[contains(.,'Questions')]]/parent::*")
nbquestions=nbquestions.text.strip('Questions').strip().replace(',','')
#print("questions ",nbquestions)
nbfollowers= browser.find_element_by_xpath("//span[text()[contains(.,'Followers')]]/parent::*")
nbfollowers=nbfollowers.text.strip('Followers').strip().replace(',','')
#print("followers ",nbfollowers)
nbfollowing= browser.find_element_by_xpath("//span[text()[contains(.,'Following')]]/parent::*")
nbfollowing = nbfollowing.text.strip('Following').strip().replace(',','')
#print("following ",nbfollowing)
except Exception as ea:
# print('cant get profile attributes answers quesitons followers following')
# print('Error on line {}'.format(sys.exc_info()[-1].tb_lineno), type(ea).__name__, ea)
time.sleep(1)
if nbanswers==0:
print(' User does not exists or does not have answers...')
continue
# Open User profile file (save file)
save_file= save_path / str( user_id + '.txt')
file_user_profile = open(save_file, "w", encoding="utf8")
quora_profile_information['user_id'] = user_id
# writing answers stats to file
quora_profile_information['nb_answers']=nbanswers
quora_profile_information['nb_questions']=nbquestions
quora_profile_information['nb_followers']=nbfollowers
quora_profile_information['nb_following']=nbfollowing
json.dump(quora_profile_information,file_user_profile)
file_user_profile.write('\n')
# scroll down profile for loading all answers
print('user has ', nbanswers,' answers')
if int(nbanswers)>9:
scrolldown(browser)
# get answers text (we click on (more) button of each answer)
if int(nbanswers)>0:
#print('scrolling down for answers collect')
i=0
# Find and click on all (more) to load full text of answers
more_button = browser.find_elements_by_xpath("//div[contains(text(), '(more)')]")
#print('nb more buttons',len(more_button))
for jk in range(0,len(more_button)):
ActionChains(browser).move_to_element(more_button[jk]).click(more_button[jk]).perform()
time.sleep(1)
try:
questions_and_dates_tags= browser.find_elements_by_xpath("//a[@class='q-box qu-cursor--pointer qu-hover--textDecoration--underline' and contains(@href,'/answer/') and not(contains(@href,'/comment/')) and not(contains(@style,'font-style: normal')) ]")
questions_link=[]
questions_date=[]
#filtering only unique questions and dates
for QD in questions_and_dates_tags:
Qlink= QD.get_attribute("href").split('/')[3]
if Qlink not in questions_link:
questions_link.append(Qlink)
questions_date.append(QD.get_attribute("text"))
questions_date=[convertDateFormat(d) for d in questions_date]
answersText = browser.find_elements_by_xpath("//div[@class='q-relative spacing_log_answer_content']")
answersText=[' '.join(answer.text.split('\n')[:]).replace('\r', '').replace('\t', '').strip() for answer in answersText]
except Exception as eans:
print('cant get answers')
print (eans)
print('Error on line {}'.format(sys.exc_info()[-1].tb_lineno), type(eans).__name__, eans)
continue
# writing down answers ( date+ Question-ID + Answer text)
for ind in range(0,int(nbanswers)):
try:
#print(ind)
file_user_profile.write(questions_date[ind] +'\t' + questions_link[ind].rstrip() + '\t' + answersText[ind].rstrip() + '\n')
except Exception as ew:
# print(ew)
# print('Error on line {}'.format(sys.exc_info()[-1].tb_lineno), type(ew).__name__, ew)
print('could not write to file...')
continue
file_user_profile.close()
browser.quit()
# -------------------------------------------------------------
# -------------------------------------------------------------
def main():
start_time = datetime.now()
# Input Folder
input_path = Path(userpaths.get_my_documents()) / "QuoraScraperData" / "input"
pathlib.Path(input_path).mkdir(parents=True, exist_ok=True)
# Read arguments
parser=argparse.ArgumentParser()
parser.add_argument("module", choices=['questions', 'answers', 'users'],help="type of crawler")
group = parser.add_mutually_exclusive_group()
group.add_argument("-f","--verbose",action="store_true",help="input keywords file path ")
group.add_argument("-l","--quiet",action="store_true",help="input keywords list")
parser.add_argument("input", help=" Input filepath or input list")
parser.add_argument("-i","--index", type=int, default=0,help="index from which to start scraping ")
args=parser.parse_args()
# set starting crawl index
list_index = args.index
# set input list for crawling
# if input is filepath
keywords_list=[]
if args.verbose:
filename=args.input
print("Input file is : ", filename)
if os.path.isfile(filename):
with open(filename, mode='r', encoding='utf-8') as keywords_file:
keywords_list = keywords_file.readlines()
elif os.path.isfile(Path(input_path) / filename):
with open(Path(input_path) / filename, mode='r', encoding='utf-8') as keywords_file:
keywords_list = keywords_file.readlines()
else:
print()
print("Reading file error: Please put the file in the program directory: ",Path.cwd() ," or in the QuoraScraperData folder :",input_path ," and try again")
print()
# if input is list
elif args.quiet:
keywords_list = [item.strip() for item in args.input.strip('[]').split(',')]
keywords_list=keywords_list[list_index:]
#create ouptut folder
module_name=args.module
save_path = Path(userpaths.get_my_documents()) / "QuoraScraperData" / module_name
pathlib.Path(save_path).mkdir(parents=True, exist_ok=True)
# launch scraper
if module_name.strip()=='questions':
questions(keywords_list,save_path)
elif module_name.strip() == 'answers':
answers(keywords_list,save_path)
elif module_name.strip() == 'users':
users(keywords_list,save_path)
end_time = datetime.now()
print(' Crawling tooks a total time of : ',end_time-start_time)
if __name__ == '__main__': main()
|
l0nax/gitlint
|
qa/samples/user_rules/incorrect_linerule/my_line_rule.py
|
from gitlint.rules import LineRule
class MyUserLineRule(LineRule):
id = "UC2"
name = "my-line-rule"
# missing validate method, missing target attribute
|
l0nax/gitlint
|
gitlint/tests/test_cache.py
|
<gh_stars>0
# -*- coding: utf-8 -*-
from gitlint.tests.base import BaseTestCase
from gitlint.cache import PropertyCache, cache
class CacheTests(BaseTestCase):
class MyClass(PropertyCache):
""" Simple class that has cached properties, used for testing. """
def __init__(self):
PropertyCache.__init__(self)
self.counter = 0
@property
@cache
def foo(self):
self.counter += 1
return u"bår"
@property
@cache(cachekey=u"hür")
def bar(self):
self.counter += 1
return u"fōo"
def test_cache(self):
# Init new class with cached properties
myclass = self.MyClass()
self.assertEqual(myclass.counter, 0)
self.assertDictEqual(myclass._cache, {})
# Assert that function is called on first access, cache is set
self.assertEqual(myclass.foo, u"bår")
self.assertEqual(myclass.counter, 1)
self.assertDictEqual(myclass._cache, {"foo": u"bår"})
# After function is not called on subsequent access, cache is still set
self.assertEqual(myclass.foo, u"bår")
self.assertEqual(myclass.counter, 1)
self.assertDictEqual(myclass._cache, {"foo": u"bår"})
def test_cache_custom_key(self):
# Init new class with cached properties
myclass = self.MyClass()
self.assertEqual(myclass.counter, 0)
self.assertDictEqual(myclass._cache, {})
# Assert that function is called on first access, cache is set with custom key
self.assertEqual(myclass.bar, u"fōo")
self.assertEqual(myclass.counter, 1)
self.assertDictEqual(myclass._cache, {u"hür": u"fōo"})
# After function is not called on subsequent access, cache is still set
self.assertEqual(myclass.bar, u"fōo")
self.assertEqual(myclass.counter, 1)
self.assertDictEqual(myclass._cache, {u"hür": u"fōo"})
|
l0nax/gitlint
|
examples/my_line_rules.py
|
# -*- coding: utf-8 -*-
from gitlint.rules import LineRule, RuleViolation, CommitMessageTitle
from gitlint.options import ListOption
"""
Full details on user-defined rules: https://jorisroovers.com/gitlint/user_defined_rules
The SpecialChars class below is an example of a user-defined LineRule. Line rules are gitlint rules that only act on a
single line at once. Once the rule is discovered, gitlint will automatically take care of applying this rule
against each line of the commit message title or body (whether it is applied to the title or body is determined by the
`target` attribute of the class).
A LineRule contrasts with a CommitRule (see examples/my_commit_rules.py) in that a commit rule is only applied once on
an entire commit. This allows commit rules to implement more complex checks that span multiple lines and/or checks
that should only be done once per gitlint run.
While every LineRule can be implemented as a CommitRule, it's usually easier and more concise to go with a LineRule if
that fits your needs.
"""
class SpecialChars(LineRule):
""" This rule will enforce that the commit message title does not contain any of the following characters:
$^%@!*() """
# A rule MUST have a human friendly name
name = "title-no-special-chars"
# A rule MUST have a *unique* id, we recommend starting with UL (for User-defined Line-rule), but this can
# really be anything.
id = "UL1"
# A line-rule MUST have a target (not required for CommitRules).
target = CommitMessageTitle
# A rule MAY have an option_spec if its behavior should be configurable.
options_spec = [ListOption('special-chars', ['$', '^', '%', '@', '!', '*', '(', ')'],
"Comma separated list of characters that should not occur in the title")]
def validate(self, line, _commit):
self.log.debug("SpecialChars: This line will be visible when running `gitlint --debug`")
violations = []
# options can be accessed by looking them up by their name in self.options
for char in self.options['special-chars'].value:
if char in line:
violation = RuleViolation(self.id, "Title contains the special character '{0}'".format(char), line)
violations.append(violation)
return violations
|
l0nax/gitlint
|
gitlint/rules.py
|
# pylint: disable=inconsistent-return-statements
import copy
import logging
import re
from gitlint.options import IntOption, BoolOption, StrOption, ListOption, RegexOption
from gitlint.utils import sstr
class Rule(object):
""" Class representing gitlint rules. """
options_spec = []
id = None
name = None
target = None
_log = None
def __init__(self, opts=None):
if not opts:
opts = {}
self.options = {}
for op_spec in self.options_spec:
self.options[op_spec.name] = copy.deepcopy(op_spec)
actual_option = opts.get(op_spec.name)
if actual_option is not None:
self.options[op_spec.name].set(actual_option)
@property
def log(self):
if not self._log:
self._log = logging.getLogger(__name__)
logging.basicConfig()
return self._log
def __eq__(self, other):
return self.id == other.id and self.name == other.name and \
self.options == other.options and self.target == other.target # noqa
def __ne__(self, other):
return not self.__eq__(other) # required for py2
def __str__(self):
return sstr(self) # pragma: no cover
def __unicode__(self):
return u"{0} {1}".format(self.id, self.name) # pragma: no cover
def __repr__(self):
return self.__str__() # pragma: no cover
class ConfigurationRule(Rule):
""" Class representing rules that can dynamically change the configuration of gitlint during runtime. """
pass
class CommitRule(Rule):
""" Class representing rules that act on an entire commit at once """
pass
class LineRule(Rule):
""" Class representing rules that act on a line by line basis """
pass
class LineRuleTarget(object):
""" Base class for LineRule targets. A LineRuleTarget specifies where a given rule will be applied
(e.g. commit message title, commit message body).
Each LineRule MUST have a target specified. """
pass
class CommitMessageTitle(LineRuleTarget):
""" Target class used for rules that apply to a commit message title """
pass
class CommitMessageBody(LineRuleTarget):
""" Target class used for rules that apply to a commit message body """
pass
class RuleViolation(object):
""" Class representing a violation of a rule. I.e.: When a rule is broken, the rule will instantiate this class
to indicate how and where the rule was broken. """
def __init__(self, rule_id, message, content=None, line_nr=None):
self.rule_id = rule_id
self.line_nr = line_nr
self.message = message
self.content = content
def __eq__(self, other):
equal = self.rule_id == other.rule_id and self.message == other.message
equal = equal and self.content == other.content and self.line_nr == other.line_nr
return equal
def __ne__(self, other):
return not self.__eq__(other) # required for py2
def __str__(self):
return sstr(self) # pragma: no cover
def __unicode__(self):
return u"{0}: {1} {2}: \"{3}\"".format(self.line_nr, self.rule_id, self.message,
self.content) # pragma: no cover
def __repr__(self):
return self.__unicode__() # pragma: no cover
class UserRuleError(Exception):
""" Error used to indicate that an error occurred while trying to load a user rule """
pass
class MaxLineLength(LineRule):
name = "max-line-length"
id = "R1"
options_spec = [IntOption('line-length', 80, "Max line length")]
violation_message = "Line exceeds max length ({0}>{1})"
def validate(self, line, _commit):
max_length = self.options['line-length'].value
if len(line) > max_length:
return [RuleViolation(self.id, self.violation_message.format(len(line), max_length), line)]
class TrailingWhiteSpace(LineRule):
name = "trailing-whitespace"
id = "R2"
violation_message = "Line has trailing whitespace"
pattern = re.compile(r"\s$", re.UNICODE)
def validate(self, line, _commit):
if self.pattern.search(line):
return [RuleViolation(self.id, self.violation_message, line)]
class HardTab(LineRule):
name = "hard-tab"
id = "R3"
violation_message = "Line contains hard tab characters (\\t)"
def validate(self, line, _commit):
if "\t" in line:
return [RuleViolation(self.id, self.violation_message, line)]
class LineMustNotContainWord(LineRule):
""" Violation if a line contains one of a list of words (NOTE: using a word in the list inside another word is not
a violation, e.g: WIPING is not a violation if 'WIP' is a word that is not allowed.) """
name = "line-must-not-contain"
id = "R5"
options_spec = [ListOption('words', [], "Comma separated list of words that should not be found")]
violation_message = u"Line contains {0}"
def validate(self, line, _commit):
strings = self.options['words'].value
violations = []
for string in strings:
regex = re.compile(r"\b%s\b" % string.lower(), re.IGNORECASE | re.UNICODE)
match = regex.search(line.lower())
if match:
violations.append(RuleViolation(self.id, self.violation_message.format(string), line))
return violations if violations else None
class LeadingWhiteSpace(LineRule):
name = "leading-whitespace"
id = "R6"
violation_message = "Line has leading whitespace"
def validate(self, line, _commit):
pattern = re.compile(r"^\s", re.UNICODE)
if pattern.search(line):
return [RuleViolation(self.id, self.violation_message, line)]
class TitleMaxLength(MaxLineLength):
name = "title-max-length"
id = "T1"
target = CommitMessageTitle
options_spec = [IntOption('line-length', 72, "Max line length")]
violation_message = "Title exceeds max length ({0}>{1})"
class TitleTrailingWhitespace(TrailingWhiteSpace):
name = "title-trailing-whitespace"
id = "T2"
target = CommitMessageTitle
violation_message = "Title has trailing whitespace"
class TitleTrailingPunctuation(LineRule):
name = "title-trailing-punctuation"
id = "T3"
target = CommitMessageTitle
def validate(self, title, _commit):
punctuation_marks = '?:!.,;'
for punctuation_mark in punctuation_marks:
if title.endswith(punctuation_mark):
return [RuleViolation(self.id, u"Title has trailing punctuation ({0})".format(punctuation_mark), title)]
class TitleHardTab(HardTab):
name = "title-hard-tab"
id = "T4"
target = CommitMessageTitle
violation_message = "Title contains hard tab characters (\\t)"
class TitleMustNotContainWord(LineMustNotContainWord):
name = "title-must-not-contain-word"
id = "T5"
target = CommitMessageTitle
options_spec = [ListOption('words', ["WIP"], "Must not contain word")]
violation_message = u"Title contains the word '{0}' (case-insensitive)"
class TitleLeadingWhitespace(LeadingWhiteSpace):
name = "title-leading-whitespace"
id = "T6"
target = CommitMessageTitle
violation_message = "Title has leading whitespace"
class TitleRegexMatches(LineRule):
name = "title-match-regex"
id = "T7"
target = CommitMessageTitle
options_spec = [RegexOption('regex', None, "Regex the title should match")]
def validate(self, title, _commit):
# If no regex is specified, immediately return
if not self.options['regex'].value:
return
if not self.options['regex'].value.search(title):
violation_msg = u"Title does not match regex ({0})".format(self.options['regex'].value.pattern)
return [RuleViolation(self.id, violation_msg, title)]
class BodyMaxLineLength(MaxLineLength):
name = "body-max-line-length"
id = "B1"
target = CommitMessageBody
class BodyTrailingWhitespace(TrailingWhiteSpace):
name = "body-trailing-whitespace"
id = "B2"
target = CommitMessageBody
class BodyHardTab(HardTab):
name = "body-hard-tab"
id = "B3"
target = CommitMessageBody
class BodyFirstLineEmpty(CommitRule):
name = "body-first-line-empty"
id = "B4"
def validate(self, commit):
if len(commit.message.body) >= 1:
first_line = commit.message.body[0]
if first_line != "":
return [RuleViolation(self.id, "Second line is not empty", first_line, 2)]
class BodyMinLength(CommitRule):
name = "body-min-length"
id = "B5"
options_spec = [IntOption('min-length', 20, "Minimum body length")]
def validate(self, commit):
min_length = self.options['min-length'].value
body_message_no_newline = "".join([line for line in commit.message.body if line is not None])
actual_length = len(body_message_no_newline)
if 0 < actual_length < min_length:
violation_message = "Body message is too short ({0}<{1})".format(actual_length, min_length)
return [RuleViolation(self.id, violation_message, body_message_no_newline, 3)]
class BodyMissing(CommitRule):
name = "body-is-missing"
id = "B6"
options_spec = [BoolOption('ignore-merge-commits', True, "Ignore merge commits")]
def validate(self, commit):
# ignore merges when option tells us to, which may have no body
if self.options['ignore-merge-commits'].value and commit.is_merge_commit:
return
if len(commit.message.body) < 2:
return [RuleViolation(self.id, "Body message is missing", None, 3)]
class BodyChangedFileMention(CommitRule):
name = "body-changed-file-mention"
id = "B7"
options_spec = [ListOption('files', [], "Files that need to be mentioned")]
def validate(self, commit):
violations = []
for needs_mentioned_file in self.options['files'].value:
# if a file that we need to look out for is actually changed, then check whether it occurs
# in the commit msg body
if needs_mentioned_file in commit.changed_files:
if needs_mentioned_file not in " ".join(commit.message.body):
violation_message = u"Body does not mention changed file '{0}'".format(needs_mentioned_file)
violations.append(RuleViolation(self.id, violation_message, None, len(commit.message.body) + 1))
return violations if violations else None
class BodyRegexMatches(CommitRule):
name = "body-match-regex"
id = "B8"
options_spec = [RegexOption('regex', None, "Regex the body should match")]
def validate(self, commit):
# If no regex is specified, immediately return
if not self.options['regex'].value:
return
# We intentionally ignore the first line in the body as that's the empty line after the title,
# which most users are not going to expect to be part of the body when matching a regex.
# If this causes contention, we can always introduce an option to change the behavior in a backward-
# compatible way.
body_lines = commit.message.body[1:] if len(commit.message.body) > 1 else []
# Similarly, the last line is often empty, this has to do with how git returns commit messages
# User's won't expect this, so prune it off by default
if body_lines and body_lines[-1] == "":
body_lines.pop()
full_body = "\n".join(body_lines)
if not self.options['regex'].value.search(full_body):
violation_msg = u"Body does not match regex ({0})".format(self.options['regex'].value.pattern)
return [RuleViolation(self.id, violation_msg, None, len(commit.message.body) + 1)]
class AuthorValidEmail(CommitRule):
name = "author-valid-email"
id = "M1"
options_spec = [RegexOption('regex', r"[^@ ]+@[^@ ]+\.[^@ ]+", "Regex that author email address should match")]
def validate(self, commit):
# If no regex is specified, immediately return
if not self.options['regex'].value:
return
if commit.author_email and not self.options['regex'].value.match(commit.author_email):
return [RuleViolation(self.id, "Author email for commit is invalid", commit.author_email)]
class IgnoreByTitle(ConfigurationRule):
name = "ignore-by-title"
id = "I1"
options_spec = [RegexOption('regex', None, "Regex matching the titles of commits this rule should apply to"),
StrOption('ignore', "all", "Comma-separated list of rules to ignore")]
def apply(self, config, commit):
# If no regex is specified, immediately return
if not self.options['regex'].value:
return
if self.options['regex'].value.match(commit.message.title):
config.ignore = self.options['ignore'].value
message = u"Commit title '{0}' matches the regex '{1}', ignoring rules: {2}"
message = message.format(commit.message.title, self.options['regex'].value.pattern,
self.options['ignore'].value)
self.log.debug("Ignoring commit because of rule '%s': %s", self.id, message)
class IgnoreByBody(ConfigurationRule):
name = "ignore-by-body"
id = "I2"
options_spec = [RegexOption('regex', None, "Regex matching lines of the body of commits this rule should apply to"),
StrOption('ignore', "all", "Comma-separated list of rules to ignore")]
def apply(self, config, commit):
# If no regex is specified, immediately return
if not self.options['regex'].value:
return
for line in commit.message.body:
if self.options['regex'].value.match(line):
config.ignore = self.options['ignore'].value
message = u"Commit message line '{0}' matches the regex '{1}', ignoring rules: {2}"
message = message.format(line, self.options['regex'].value.pattern, self.options['ignore'].value)
self.log.debug("Ignoring commit because of rule '%s': %s", self.id, message)
# No need to check other lines if we found a match
return
class IgnoreBodyLines(ConfigurationRule):
name = "ignore-body-lines"
id = "I3"
options_spec = [RegexOption('regex', None, "Regex matching lines of the body that should be ignored")]
def apply(self, _, commit):
# If no regex is specified, immediately return
if not self.options['regex'].value:
return
new_body = []
for line in commit.message.body:
if self.options['regex'].value.match(line):
debug_msg = u"Ignoring line '%s' because it matches '%s'"
self.log.debug(debug_msg, line, self.options['regex'].value.pattern)
else:
new_body.append(line)
commit.message.body = new_body
commit.message.full = u"\n".join([commit.message.title] + new_body)
|
l0nax/gitlint
|
gitlint/tests/test_display.py
|
# -*- coding: utf-8 -*-
try:
# python 2.x
from StringIO import StringIO
except ImportError:
# python 3.x
from io import StringIO
try:
# python 2.x
from mock import patch
except ImportError:
# python 3.x
from unittest.mock import patch # pylint: disable=no-name-in-module, import-error
from gitlint.display import Display
from gitlint.config import LintConfig
from gitlint.tests.base import BaseTestCase
class DisplayTests(BaseTestCase):
def test_v(self):
display = Display(LintConfig())
display.config.verbosity = 2
with patch('gitlint.display.stderr', new=StringIO()) as stderr:
# Non exact outputting, should output both v and vv output
with patch('gitlint.display.stdout', new=StringIO()) as stdout:
display.v(u"tëst")
display.vv(u"tëst2")
# vvvv should be ignored regardless
display.vvv(u"tëst3.1")
display.vvv(u"tëst3.2", exact=True)
self.assertEqual(u"tëst\ntëst2\n", stdout.getvalue())
# exact outputting, should only output v
with patch('gitlint.display.stdout', new=StringIO()) as stdout:
display.v(u"tëst", exact=True)
display.vv(u"tëst2", exact=True)
# vvvv should be ignored regardless
display.vvv(u"tëst3.1")
display.vvv(u"tëst3.2", exact=True)
self.assertEqual(u"tëst2\n", stdout.getvalue())
# standard error should be empty throughtout all of this
self.assertEqual('', stderr.getvalue())
def test_e(self):
display = Display(LintConfig())
display.config.verbosity = 2
with patch('gitlint.display.stdout', new=StringIO()) as stdout:
# Non exact outputting, should output both v and vv output
with patch('gitlint.display.stderr', new=StringIO()) as stderr:
display.e(u"tëst")
display.ee(u"tëst2")
# vvvv should be ignored regardless
display.eee(u"tëst3.1")
display.eee(u"tëst3.2", exact=True)
self.assertEqual(u"tëst\ntëst2\n", stderr.getvalue())
# exact outputting, should only output v
with patch('gitlint.display.stderr', new=StringIO()) as stderr:
display.e(u"tëst", exact=True)
display.ee(u"tëst2", exact=True)
# vvvv should be ignored regardless
display.eee(u"tëst3.1")
display.eee(u"tëst3.2", exact=True)
self.assertEqual(u"tëst2\n", stderr.getvalue())
# standard output should be empty throughtout all of this
self.assertEqual('', stdout.getvalue())
|
l0nax/gitlint
|
gitlint/tests/config/test_config_precedence.py
|
# -*- coding: utf-8 -*-
try:
# python 2.x
from StringIO import StringIO
except ImportError:
# python 3.x
from io import StringIO
from click.testing import CliRunner
try:
# python 2.x
from mock import patch
except ImportError:
# python 3.x
from unittest.mock import patch # pylint: disable=no-name-in-module, import-error
from gitlint.tests.base import BaseTestCase
from gitlint import cli
from gitlint.config import LintConfigBuilder
class LintConfigPrecedenceTests(BaseTestCase):
def setUp(self):
self.cli = CliRunner()
@patch('gitlint.cli.get_stdin_data', return_value=u"WIP\n\nThis is å test message\n")
def test_config_precedence(self, _):
# TODO(jroovers): this test really only test verbosity, we need to do some refactoring to gitlint.cli
# to more easily test everything
# Test that the config precedence is followed:
# 1. commandline convenience flags
# 2. commandline -c flags
# 3. config file
# 4. default config
config_path = self.get_sample_path("config/gitlintconfig")
# 1. commandline convenience flags
with patch('gitlint.display.stderr', new=StringIO()) as stderr:
result = self.cli.invoke(cli.cli, ["-vvv", "-c", "general.verbosity=2", "--config", config_path])
self.assertEqual(result.output, "")
self.assertEqual(stderr.getvalue(), "1: T5 Title contains the word 'WIP' (case-insensitive): \"WIP\"\n")
# 2. commandline -c flags
with patch('gitlint.display.stderr', new=StringIO()) as stderr:
result = self.cli.invoke(cli.cli, ["-c", "general.verbosity=2", "--config", config_path])
self.assertEqual(result.output, "")
self.assertEqual(stderr.getvalue(), "1: T5 Title contains the word 'WIP' (case-insensitive)\n")
# 3. config file
with patch('gitlint.display.stderr', new=StringIO()) as stderr:
result = self.cli.invoke(cli.cli, ["--config", config_path])
self.assertEqual(result.output, "")
self.assertEqual(stderr.getvalue(), "1: T5\n")
# 4. default config
with patch('gitlint.display.stderr', new=StringIO()) as stderr:
result = self.cli.invoke(cli.cli)
self.assertEqual(result.output, "")
self.assertEqual(stderr.getvalue(), "1: T5 Title contains the word 'WIP' (case-insensitive): \"WIP\"\n")
@patch('gitlint.cli.get_stdin_data', return_value=u"WIP: This is å test")
def test_ignore_precedence(self, get_stdin_data):
with patch('gitlint.display.stderr', new=StringIO()) as stderr:
# --ignore takes precedence over -c general.ignore
result = self.cli.invoke(cli.cli, ["-c", "general.ignore=T5", "--ignore", "B6"])
self.assertEqual(result.output, "")
self.assertEqual(result.exit_code, 1)
# We still expect the T5 violation, but no B6 violation as --ignore overwrites -c general.ignore
self.assertEqual(stderr.getvalue(),
u"1: T5 Title contains the word 'WIP' (case-insensitive): \"WIP: This is å test\"\n")
# test that we can also still configure a rule that is first ignored but then not
with patch('gitlint.display.stderr', new=StringIO()) as stderr:
get_stdin_data.return_value = u"This is å test"
# --ignore takes precedence over -c general.ignore
result = self.cli.invoke(cli.cli, ["-c", "general.ignore=title-max-length",
"-c", "title-max-length.line-length=5",
"--ignore", "B6"])
self.assertEqual(result.output, "")
self.assertEqual(result.exit_code, 1)
# We still expect the T1 violation with custom config,
# but no B6 violation as --ignore overwrites -c general.ignore
self.assertEqual(stderr.getvalue(), u"1: T1 Title exceeds max length (14>5): \"This is å test\"\n")
def test_general_option_after_rule_option(self):
# We used to have a bug where we didn't process general options before setting specific options, this would
# lead to errors when e.g.: trying to configure a user rule before the rule class was loaded by extra-path
# This test is here to test for regressions against this.
config_builder = LintConfigBuilder()
config_builder.set_option(u'my-üser-commit-rule', 'violation-count', 3)
user_rules_path = self.get_sample_path("user_rules")
config_builder.set_option('general', 'extra-path', user_rules_path)
config = config_builder.build()
self.assertEqual(config.extra_path, user_rules_path)
self.assertEqual(config.get_rule_option(u'my-üser-commit-rule', 'violation-count'), 3)
|
l0nax/gitlint
|
gitlint/tests/contrib/rules/test_signedoff_by.py
|
# -*- coding: utf-8 -*-
from gitlint.tests.base import BaseTestCase
from gitlint.rules import RuleViolation
from gitlint.contrib.rules.signedoff_by import SignedOffBy
from gitlint.config import LintConfig
class ContribSignedOffByTests(BaseTestCase):
def test_enable(self):
# Test that rule can be enabled in config
for rule_ref in ['CC1', 'contrib-body-requires-signed-off-by']:
config = LintConfig()
config.contrib = [rule_ref]
self.assertIn(SignedOffBy(), config.rules)
def test_signedoff_by(self):
# No violations when 'Signed-Off-By' line is present
rule = SignedOffBy()
violations = rule.validate(self.gitcommit(u"Föobar\n\nMy Body\nSigned-Off-By: <NAME>"))
self.assertListEqual([], violations)
# Assert violation when no 'Signed-Off-By' line is present
violations = rule.validate(self.gitcommit(u"Föobar\n\nMy Body"))
expected_violation = RuleViolation("CC1", "Body does not contain a 'Signed-Off-By' line", line_nr=1)
self.assertListEqual(violations, [expected_violation])
# Assert violation when no 'Signed-Off-By' in title but not in body
violations = rule.validate(self.gitcommit(u"Signed-Off-By\n\nFöobar"))
self.assertListEqual(violations, [expected_violation])
|
l0nax/gitlint
|
gitlint/tests/git/test_git_context.py
|
# -*- coding: utf-8 -*-
try:
# python 2.x
from mock import patch, call
except ImportError:
# python 3.x
from unittest.mock import patch, call # pylint: disable=no-name-in-module, import-error
from gitlint.tests.base import BaseTestCase
from gitlint.git import GitContext
class GitContextTests(BaseTestCase):
# Expected special_args passed to 'sh'
expected_sh_special_args = {
'_tty_out': False,
'_cwd': u"fåke/path"
}
@patch('gitlint.git.sh')
def test_gitcontext(self, sh):
sh.git.side_effect = [
u"#", # git config --get core.commentchar
u"\nfoöbar\n"
]
expected_calls = [
call("config", "--get", "core.commentchar", _ok_code=[0, 1], **self.expected_sh_special_args),
call("rev-parse", "--abbrev-ref", "HEAD", **self.expected_sh_special_args)
]
context = GitContext(u"fåke/path")
self.assertEqual(sh.git.mock_calls, [])
# gitcontext.comment_branch
self.assertEqual(context.commentchar, u"#")
self.assertEqual(sh.git.mock_calls, expected_calls[0:1])
# gitcontext.current_branch
self.assertEqual(context.current_branch, u"foöbar")
self.assertEqual(sh.git.mock_calls, expected_calls)
@patch('gitlint.git.sh')
def test_gitcontext_equality(self, sh):
sh.git.side_effect = [
u"û\n", # context1: git config --get core.commentchar
u"û\n", # context2: git config --get core.commentchar
u"my-brånch\n", # context1: git rev-parse --abbrev-ref HEAD
u"my-brånch\n", # context2: git rev-parse --abbrev-ref HEAD
]
context1 = GitContext(u"fåke/path")
context1.commits = [u"fōo", u"bår"] # we don't need real commits to check for equality
context2 = GitContext(u"fåke/path")
context2.commits = [u"fōo", u"bår"]
self.assertEqual(context1, context2)
# INEQUALITY
# Different commits
context2.commits = [u"hür", u"dür"]
self.assertNotEqual(context1, context2)
# Different repository_path
context2.commits = context1.commits
context2.repository_path = u"ōther/path"
self.assertNotEqual(context1, context2)
# Different comment_char
context3 = GitContext(u"fåke/path")
context3.commits = [u"fōo", u"bår"]
sh.git.side_effect = ([
u"ç\n", # context3: git config --get core.commentchar
u"my-brånch\n" # context3: git rev-parse --abbrev-ref HEAD
])
self.assertNotEqual(context1, context3)
# Different current_branch
context4 = GitContext(u"fåke/path")
context4.commits = [u"fōo", u"bår"]
sh.git.side_effect = ([
u"û\n", # context4: git config --get core.commentchar
u"different-brånch\n" # context4: git rev-parse --abbrev-ref HEAD
])
self.assertNotEqual(context1, context4)
|
l0nax/gitlint
|
gitlint/tests/rules/test_meta_rules.py
|
<filename>gitlint/tests/rules/test_meta_rules.py<gh_stars>0
# -*- coding: utf-8 -*-
from gitlint.tests.base import BaseTestCase
from gitlint.rules import AuthorValidEmail, RuleViolation
class MetaRuleTests(BaseTestCase):
def test_author_valid_email_rule(self):
rule = AuthorValidEmail()
# valid email addresses
valid_email_addresses = [u"<EMAIL>", u"<EMAIL>", u"<EMAIL>", u"<EMAIL>",
u"<EMAIL>"]
for email in valid_email_addresses:
commit = self.gitcommit(u"", author_email=email)
violations = rule.validate(commit)
self.assertIsNone(violations)
# No email address (=allowed for now, as gitlint also lints messages passed via stdin that don't have an
# email address)
commit = self.gitcommit(u"")
violations = rule.validate(commit)
self.assertIsNone(violations)
# Invalid email addresses: no TLD, no domain, no @, space anywhere (=valid but not allowed by gitlint)
invalid_email_addresses = [u"föo@bar", u"JöhnDoe", u"<NAME>", u"Jöhn <EMAIL>", u" <EMAIL>",
u"<EMAIL>", u"<EMAIL>", u"<EMAIL>", u"<EMAIL>",
u"föo@.com"]
for email in invalid_email_addresses:
commit = self.gitcommit(u"", author_email=email)
violations = rule.validate(commit)
self.assertListEqual(violations,
[RuleViolation("M1", "Author email for commit is invalid", email)])
def test_author_valid_email_rule_custom_regex(self):
# regex=None -> the rule isn't applied
rule = AuthorValidEmail()
rule.options['regex'].set(None)
emailadresses = [u"föo", None, u"hür dür"]
for email in emailadresses:
commit = self.gitcommit(u"", author_email=email)
violations = rule.validate(commit)
self.assertIsNone(violations)
# Custom domain
rule = AuthorValidEmail({'regex': u"[^<EMAIL>"})
valid_email_addresses = [
u"<EMAIL>", u"<EMAIL>", u"<EMAIL>", u"jöhn/<EMAIL>"]
for email in valid_email_addresses:
commit = self.gitcommit(u"", author_email=email)
violations = rule.validate(commit)
self.assertIsNone(violations)
# Invalid email addresses
invalid_email_addresses = [u"<EMAIL>"]
for email in invalid_email_addresses:
commit = self.gitcommit(u"", author_email=email)
violations = rule.validate(commit)
self.assertListEqual(violations,
[RuleViolation("M1", "Author email for commit is invalid", email)])
|
l0nax/gitlint
|
gitlint/__init__.py
|
__version__ = "0.14.0dev"
|
l0nax/gitlint
|
gitlint/tests/samples/user_rules/parent_package/my_commit_rules.py
|
<filename>gitlint/tests/samples/user_rules/parent_package/my_commit_rules.py
# -*- coding: utf-8 -*-
from gitlint.rules import CommitRule
class MyUserCommitRule(CommitRule):
name = u"my-user-cömmit-rule"
id = "UC2"
options_spec = []
def validate(self, _commit):
return []
|
l0nax/gitlint
|
qa/samples/user_rules/extra/extra_rules.py
|
# -*- coding: utf-8 -*-
from gitlint.rules import CommitRule, RuleViolation, ConfigurationRule
from gitlint.options import IntOption, StrOption, ListOption
from gitlint.utils import sstr
class GitContextRule(CommitRule):
""" Rule that tests whether we can correctly access certain gitcontext properties """
name = u"gïtcontext"
id = "UC1"
def validate(self, commit):
violations = [
RuleViolation(self.id, u"GitContext.current_branch: {0}".format(commit.context.current_branch), line_nr=1),
RuleViolation(self.id, u"GitContext.commentchar: {0}".format(commit.context.commentchar), line_nr=1)
]
return violations
class GitCommitRule(CommitRule):
""" Rule that tests whether we can correctly access certain commit properties """
name = u"gïtcommit"
id = "UC2"
def validate(self, commit):
violations = [
RuleViolation(self.id, u"GitCommit.branches: {0}".format(sstr(commit.branches)), line_nr=1),
RuleViolation(self.id, u"GitCommit.custom_prop: {0}".format(commit.custom_prop), line_nr=1),
]
return violations
class GitlintConfigurationRule(ConfigurationRule):
""" Rule that tests whether we can correctly access the config as well as modify the commit message """
name = u"cönfigrule"
id = "UC3"
def apply(self, config, commit):
# We add a line to the commit message body that pulls a value from config, this proves we can modify the body
# and read the config contents
commit.message.body.append("{0} ".format(config.target)) # trailing whitespace deliberate to trigger violation
# We set a custom property that we access in CommitRule, to prove we can add extra properties to the commit
commit.custom_prop = u"foöbar"
# We also ignore some extra rules, proving that we can modify the config
config.ignore.append("B4")
class ConfigurableCommitRule(CommitRule):
""" Rule that tests that we can add configuration to user-defined rules """
name = u"configürable"
id = "UC4"
options_spec = [IntOption(u"int-öption", 2, u"int-öption description"),
StrOption(u"str-öption", u"föo", u"int-öption description"),
ListOption(u"list-öption", [u"foo", u"bar"], u"list-öption description")]
def validate(self, _):
violations = [
RuleViolation(self.id, u"int-öption: {0}".format(self.options[u'int-öption'].value), line_nr=1),
RuleViolation(self.id, u"str-öption: {0}".format(self.options[u'str-öption'].value), line_nr=1),
RuleViolation(self.id, u"list-öption: {0}".format(sstr(self.options[u'list-öption'].value)), line_nr=1),
]
return violations
|
l0nax/gitlint
|
gitlint/tests/samples/user_rules/my_commit_rules.py
|
<reponame>l0nax/gitlint<filename>gitlint/tests/samples/user_rules/my_commit_rules.py
# -*- coding: utf-8 -*-
from gitlint.rules import CommitRule, RuleViolation
from gitlint.options import IntOption
class MyUserCommitRule(CommitRule):
name = u"my-üser-commit-rule"
id = "UC1"
options_spec = [IntOption('violation-count', 1, u"Number of violåtions to return")]
def validate(self, _commit):
violations = []
for i in range(1, self.options['violation-count'].value + 1):
violations.append(RuleViolation(self.id, u"Commit violåtion %d" % i, u"Contënt %d" % i, i))
return violations
# The below code is present so that we can test that we actually ignore it
def func_should_be_ignored():
pass
global_variable_should_be_ignored = True
|
l0nax/gitlint
|
gitlint/hooks.py
|
<filename>gitlint/hooks.py
import io
import shutil
import os
import stat
from gitlint.utils import DEFAULT_ENCODING
from gitlint.git import git_hooks_dir
COMMIT_MSG_HOOK_SRC_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "files", "commit-msg")
COMMIT_MSG_HOOK_DST_PATH = "commit-msg"
GITLINT_HOOK_IDENTIFIER = "### gitlint commit-msg hook start ###\n"
class GitHookInstallerError(Exception):
pass
class GitHookInstaller(object):
""" Utility class that provides methods for installing and uninstalling the gitlint commitmsg hook. """
@staticmethod
def commit_msg_hook_path(lint_config):
return os.path.join(git_hooks_dir(lint_config.target), COMMIT_MSG_HOOK_DST_PATH)
@staticmethod
def _assert_git_repo(target):
""" Asserts that a given target directory is a git repository """
hooks_dir = git_hooks_dir(target)
if not os.path.isdir(hooks_dir):
raise GitHookInstallerError(u"{0} is not a git repository.".format(target))
@staticmethod
def install_commit_msg_hook(lint_config):
GitHookInstaller._assert_git_repo(lint_config.target)
dest_path = GitHookInstaller.commit_msg_hook_path(lint_config)
if os.path.exists(dest_path):
raise GitHookInstallerError(
u"There is already a commit-msg hook file present in {0}.\n".format(dest_path) +
u"gitlint currently does not support appending to an existing commit-msg file.")
# copy hook file
shutil.copy(COMMIT_MSG_HOOK_SRC_PATH, dest_path)
# make hook executable
st = os.stat(dest_path)
os.chmod(dest_path, st.st_mode | stat.S_IEXEC)
@staticmethod
def uninstall_commit_msg_hook(lint_config):
GitHookInstaller._assert_git_repo(lint_config.target)
dest_path = GitHookInstaller.commit_msg_hook_path(lint_config)
if not os.path.exists(dest_path):
raise GitHookInstallerError(u"There is no commit-msg hook present in {0}.".format(dest_path))
with io.open(dest_path, encoding=DEFAULT_ENCODING) as fp:
lines = fp.readlines()
if len(lines) < 2 or lines[1] != GITLINT_HOOK_IDENTIFIER:
msg = u"The commit-msg hook in {0} was not installed by gitlint (or it was modified).\n" + \
u"Uninstallation of 3th party or modified gitlint hooks is not supported."
raise GitHookInstallerError(msg.format(dest_path))
# If we are sure it's a gitlint hook, go ahead and remove it
os.remove(dest_path)
|
l0nax/gitlint
|
gitlint/tests/rules/test_title_rules.py
|
# -*- coding: utf-8 -*-
from gitlint.tests.base import BaseTestCase
from gitlint.rules import TitleMaxLength, TitleTrailingWhitespace, TitleHardTab, TitleMustNotContainWord, \
TitleTrailingPunctuation, TitleLeadingWhitespace, TitleRegexMatches, RuleViolation
class TitleRuleTests(BaseTestCase):
def test_max_line_length(self):
rule = TitleMaxLength()
# assert no error
violation = rule.validate(u"å" * 72, None)
self.assertIsNone(violation)
# assert error on line length > 72
expected_violation = RuleViolation("T1", "Title exceeds max length (73>72)", u"å" * 73)
violations = rule.validate(u"å" * 73, None)
self.assertListEqual(violations, [expected_violation])
# set line length to 120, and check no violation on length 73
rule = TitleMaxLength({'line-length': 120})
violations = rule.validate(u"å" * 73, None)
self.assertIsNone(violations)
# assert raise on 121
expected_violation = RuleViolation("T1", "Title exceeds max length (121>120)", u"å" * 121)
violations = rule.validate(u"å" * 121, None)
self.assertListEqual(violations, [expected_violation])
def test_trailing_whitespace(self):
rule = TitleTrailingWhitespace()
# assert no error
violations = rule.validate(u"å", None)
self.assertIsNone(violations)
# trailing space
expected_violation = RuleViolation("T2", "Title has trailing whitespace", u"å ")
violations = rule.validate(u"å ", None)
self.assertListEqual(violations, [expected_violation])
# trailing tab
expected_violation = RuleViolation("T2", "Title has trailing whitespace", u"å\t")
violations = rule.validate(u"å\t", None)
self.assertListEqual(violations, [expected_violation])
def test_hard_tabs(self):
rule = TitleHardTab()
# assert no error
violations = rule.validate(u"This is å test", None)
self.assertIsNone(violations)
# contains hard tab
expected_violation = RuleViolation("T4", "Title contains hard tab characters (\\t)", u"This is å\ttest")
violations = rule.validate(u"This is å\ttest", None)
self.assertListEqual(violations, [expected_violation])
def test_trailing_punctuation(self):
rule = TitleTrailingPunctuation()
# assert no error
violations = rule.validate(u"This is å test", None)
self.assertIsNone(violations)
# assert errors for different punctuations
punctuation = u"?:!.,;"
for char in punctuation:
line = u"This is å test" + char # note that make sure to include some unicode!
gitcontext = self.gitcontext(line)
expected_violation = RuleViolation("T3", u"Title has trailing punctuation ({0})".format(char), line)
violations = rule.validate(line, gitcontext)
self.assertListEqual(violations, [expected_violation])
def test_title_must_not_contain_word(self):
rule = TitleMustNotContainWord()
# no violations
violations = rule.validate(u"This is å test", None)
self.assertIsNone(violations)
# no violation if WIP occurs inside a wor
violations = rule.validate(u"This is å wiping test", None)
self.assertIsNone(violations)
# match literally
violations = rule.validate(u"WIP This is å test", None)
expected_violation = RuleViolation("T5", "Title contains the word 'WIP' (case-insensitive)",
u"WIP This is å test")
self.assertListEqual(violations, [expected_violation])
# match case insensitive
violations = rule.validate(u"wip This is å test", None)
expected_violation = RuleViolation("T5", "Title contains the word 'WIP' (case-insensitive)",
u"wip This is å test")
self.assertListEqual(violations, [expected_violation])
# match if there is a colon after the word
violations = rule.validate(u"WIP:This is å test", None)
expected_violation = RuleViolation("T5", "Title contains the word 'WIP' (case-insensitive)",
u"WIP:This is å test")
self.assertListEqual(violations, [expected_violation])
# match multiple words
rule = TitleMustNotContainWord({'words': u"wip,test,å"})
violations = rule.validate(u"WIP:This is å test", None)
expected_violation = RuleViolation("T5", "Title contains the word 'wip' (case-insensitive)",
u"WIP:This is å test")
expected_violation2 = RuleViolation("T5", "Title contains the word 'test' (case-insensitive)",
u"WIP:This is å test")
expected_violation3 = RuleViolation("T5", u"Title contains the word 'å' (case-insensitive)",
u"WIP:This is å test")
self.assertListEqual(violations, [expected_violation, expected_violation2, expected_violation3])
def test_leading_whitespace(self):
rule = TitleLeadingWhitespace()
# assert no error
violations = rule.validate("a", None)
self.assertIsNone(violations)
# leading space
expected_violation = RuleViolation("T6", "Title has leading whitespace", " a")
violations = rule.validate(" a", None)
self.assertListEqual(violations, [expected_violation])
# leading tab
expected_violation = RuleViolation("T6", "Title has leading whitespace", "\ta")
violations = rule.validate("\ta", None)
self.assertListEqual(violations, [expected_violation])
# unicode test
expected_violation = RuleViolation("T6", "Title has leading whitespace", u" ☺")
violations = rule.validate(u" ☺", None)
self.assertListEqual(violations, [expected_violation])
def test_regex_matches(self):
commit = self.gitcommit(u"US1234: åbc\n")
# assert no violation on default regex (=everything allowed)
rule = TitleRegexMatches()
violations = rule.validate(commit.message.title, commit)
self.assertIsNone(violations)
# assert no violation on matching regex
rule = TitleRegexMatches({'regex': u"^US[0-9]*: å"})
violations = rule.validate(commit.message.title, commit)
self.assertIsNone(violations)
# assert violation when no matching regex
rule = TitleRegexMatches({'regex': u"^UÅ[0-9]*"})
violations = rule.validate(commit.message.title, commit)
expected_violation = RuleViolation("T7", u"Title does not match regex (^UÅ[0-9]*)", u"US1234: åbc")
self.assertListEqual(violations, [expected_violation])
|
l0nax/gitlint
|
gitlint/tests/config/test_config.py
|
<gh_stars>0
# -*- coding: utf-8 -*-
try:
# python 2.x
from mock import patch
except ImportError:
# python 3.x
from unittest.mock import patch # pylint: disable=no-name-in-module, import-error
from gitlint import rules
from gitlint.config import LintConfig, LintConfigError, LintConfigGenerator, GITLINT_CONFIG_TEMPLATE_SRC_PATH
from gitlint import options
from gitlint.tests.base import BaseTestCase, ustr
class LintConfigTests(BaseTestCase):
def test_set_rule_option(self):
config = LintConfig()
# assert default title line-length
self.assertEqual(config.get_rule_option('title-max-length', 'line-length'), 72)
# change line length and assert it is set
config.set_rule_option('title-max-length', 'line-length', 60)
self.assertEqual(config.get_rule_option('title-max-length', 'line-length'), 60)
def test_set_rule_option_negative(self):
config = LintConfig()
# non-existing rule
expected_error_msg = u"No such rule 'föobar'"
with self.assertRaisesMessage(LintConfigError, expected_error_msg):
config.set_rule_option(u'föobar', u'lïne-length', 60)
# non-existing option
expected_error_msg = u"Rule 'title-max-length' has no option 'föobar'"
with self.assertRaisesMessage(LintConfigError, expected_error_msg):
config.set_rule_option('title-max-length', u'föobar', 60)
# invalid option value
expected_error_msg = u"'föo' is not a valid value for option 'title-max-length.line-length'. " + \
u"Option 'line-length' must be a positive integer (current value: 'föo')."
with self.assertRaisesMessage(LintConfigError, expected_error_msg):
config.set_rule_option('title-max-length', 'line-length', u"föo")
def test_set_general_option(self):
config = LintConfig()
# Check that default general options are correct
self.assertTrue(config.ignore_merge_commits)
self.assertTrue(config.ignore_fixup_commits)
self.assertTrue(config.ignore_squash_commits)
self.assertTrue(config.ignore_revert_commits)
self.assertFalse(config.ignore_stdin)
self.assertFalse(config.staged)
self.assertFalse(config.debug)
self.assertEqual(config.verbosity, 3)
active_rule_classes = tuple(type(rule) for rule in config.rules)
self.assertTupleEqual(active_rule_classes, config.default_rule_classes)
# ignore - set by string
config.set_general_option("ignore", "title-trailing-whitespace, B2")
self.assertEqual(config.ignore, ["title-trailing-whitespace", "B2"])
# ignore - set by list
config.set_general_option("ignore", ["T1", "B3"])
self.assertEqual(config.ignore, ["T1", "B3"])
# verbosity
config.set_general_option("verbosity", 1)
self.assertEqual(config.verbosity, 1)
# ignore_merge_commit
config.set_general_option("ignore-merge-commits", "false")
self.assertFalse(config.ignore_merge_commits)
# ignore_fixup_commit
config.set_general_option("ignore-fixup-commits", "false")
self.assertFalse(config.ignore_fixup_commits)
# ignore_squash_commit
config.set_general_option("ignore-squash-commits", "false")
self.assertFalse(config.ignore_squash_commits)
# ignore_revert_commit
config.set_general_option("ignore-revert-commits", "false")
self.assertFalse(config.ignore_revert_commits)
# debug
config.set_general_option("debug", "true")
self.assertTrue(config.debug)
# ignore-stdin
config.set_general_option("ignore-stdin", "true")
self.assertTrue(config.debug)
# staged
config.set_general_option("staged", "true")
self.assertTrue(config.staged)
# target
config.set_general_option("target", self.SAMPLES_DIR)
self.assertEqual(config.target, self.SAMPLES_DIR)
# extra_path has its own test: test_extra_path and test_extra_path_negative
# contrib has its own tests: test_contrib and test_contrib_negative
def test_contrib(self):
config = LintConfig()
contrib_rules = ["contrib-title-conventional-commits", "CC1"]
config.set_general_option("contrib", ",".join(contrib_rules))
self.assertEqual(config.contrib, contrib_rules)
# Check contrib-title-conventional-commits contrib rule
actual_rule = config.rules.find_rule("contrib-title-conventional-commits")
self.assertTrue(actual_rule.is_contrib)
self.assertEqual(ustr(type(actual_rule)), "<class 'conventional_commit.ConventionalCommit'>")
self.assertEqual(actual_rule.id, 'CT1')
self.assertEqual(actual_rule.name, u'contrib-title-conventional-commits')
self.assertEqual(actual_rule.target, rules.CommitMessageTitle)
expected_rule_option = options.ListOption(
"types",
["fix", "feat", "chore", "docs", "style", "refactor", "perf", "test", "revert", "ci", "build"],
"Comma separated list of allowed commit types.",
)
self.assertListEqual(actual_rule.options_spec, [expected_rule_option])
self.assertDictEqual(actual_rule.options, {'types': expected_rule_option})
# Check contrib-body-requires-signed-off-by contrib rule
actual_rule = config.rules.find_rule("contrib-body-requires-signed-off-by")
self.assertTrue(actual_rule.is_contrib)
self.assertEqual(ustr(type(actual_rule)), "<class 'signedoff_by.SignedOffBy'>")
self.assertEqual(actual_rule.id, 'CC1')
self.assertEqual(actual_rule.name, u'contrib-body-requires-signed-off-by')
# reset value (this is a different code path)
config.set_general_option("contrib", "contrib-body-requires-signed-off-by")
self.assertEqual(actual_rule, config.rules.find_rule("contrib-body-requires-signed-off-by"))
self.assertIsNone(config.rules.find_rule("contrib-title-conventional-commits"))
# empty value
config.set_general_option("contrib", "")
self.assertListEqual(config.contrib, [])
def test_contrib_negative(self):
config = LintConfig()
# non-existent contrib rule
with self.assertRaisesMessage(LintConfigError, u"No contrib rule with id or name 'föo' found."):
config.contrib = u"contrib-title-conventional-commits,föo"
# UserRuleError, RuleOptionError should be re-raised as LintConfigErrors
side_effects = [rules.UserRuleError(u"üser-rule"), options.RuleOptionError(u"rüle-option")]
for side_effect in side_effects:
with patch('gitlint.config.rule_finder.find_rule_classes', side_effect=side_effect):
with self.assertRaisesMessage(LintConfigError, ustr(side_effect)):
config.contrib = u"contrib-title-conventional-commits"
def test_extra_path(self):
config = LintConfig()
config.set_general_option("extra-path", self.get_user_rules_path())
self.assertEqual(config.extra_path, self.get_user_rules_path())
actual_rule = config.rules.find_rule('UC1')
self.assertTrue(actual_rule.is_user_defined)
self.assertEqual(ustr(type(actual_rule)), "<class 'my_commit_rules.MyUserCommitRule'>")
self.assertEqual(actual_rule.id, 'UC1')
self.assertEqual(actual_rule.name, u'my-üser-commit-rule')
self.assertEqual(actual_rule.target, None)
expected_rule_option = options.IntOption('violation-count', 1, u"Number of violåtions to return")
self.assertListEqual(actual_rule.options_spec, [expected_rule_option])
self.assertDictEqual(actual_rule.options, {'violation-count': expected_rule_option})
# reset value (this is a different code path)
config.set_general_option("extra-path", self.SAMPLES_DIR)
self.assertEqual(config.extra_path, self.SAMPLES_DIR)
self.assertIsNone(config.rules.find_rule("UC1"))
def test_extra_path_negative(self):
config = LintConfig()
regex = u"Option extra-path must be either an existing directory or file (current value: 'föo/bar')"
# incorrect extra_path
with self.assertRaisesMessage(LintConfigError, regex):
config.extra_path = u"föo/bar"
# extra path contains classes with errors
with self.assertRaisesMessage(LintConfigError,
"User-defined rule class 'MyUserLineRule' must have a 'validate' method"):
config.extra_path = self.get_sample_path("user_rules/incorrect_linerule")
def test_set_general_option_negative(self):
config = LintConfig()
# Note that we shouldn't test whether we can set unicode because python just doesn't allow unicode attributes
with self.assertRaisesMessage(LintConfigError, "'foo' is not a valid gitlint option"):
config.set_general_option("foo", u"bår")
# try setting _config_path, this is a real attribute of LintConfig, but the code should prevent it from
# being set
with self.assertRaisesMessage(LintConfigError, "'_config_path' is not a valid gitlint option"):
config.set_general_option("_config_path", u"bår")
# invalid verbosity
incorrect_values = [-1, u"föo"]
for value in incorrect_values:
expected_msg = u"Option 'verbosity' must be a positive integer (current value: '{0}')".format(value)
with self.assertRaisesMessage(LintConfigError, expected_msg):
config.verbosity = value
incorrect_values = [4]
for value in incorrect_values:
with self.assertRaisesMessage(LintConfigError, "Option 'verbosity' must be set between 0 and 3"):
config.verbosity = value
# invalid ignore_xxx_commits
ignore_attributes = ["ignore_merge_commits", "ignore_fixup_commits", "ignore_squash_commits",
"ignore_revert_commits"]
incorrect_values = [-1, 4, u"föo"]
for attribute in ignore_attributes:
for value in incorrect_values:
option_name = attribute.replace("_", "-")
with self.assertRaisesMessage(LintConfigError,
"Option '{0}' must be either 'true' or 'false'".format(option_name)):
setattr(config, attribute, value)
# invalid ignore -> not here because ignore is a ListOption which converts everything to a string before
# splitting which means it it will accept just about everything
# invalid boolean options
for attribute in ['debug', 'staged', 'ignore_stdin']:
option_name = attribute.replace("_", "-")
with self.assertRaisesMessage(LintConfigError,
"Option '{0}' must be either 'true' or 'false'".format(option_name)):
setattr(config, attribute, u"föobar")
# extra-path has its own negative test
# invalid target
with self.assertRaisesMessage(LintConfigError,
u"Option target must be an existing directory (current value: 'föo/bar')"):
config.target = u"föo/bar"
def test_ignore_independent_from_rules(self):
# Test that the lintconfig rules are not modified when setting config.ignore
# This was different in the past, this test is mostly here to catch regressions
config = LintConfig()
original_rules = config.rules
config.ignore = ["T1", "T2"]
self.assertEqual(config.ignore, ["T1", "T2"])
self.assertSequenceEqual(config.rules, original_rules)
def test_config_equality(self):
self.assertEqual(LintConfig(), LintConfig())
self.assertNotEqual(LintConfig(), LintConfigGenerator())
# Ensure LintConfig are not equal if they differ on their attributes
attrs = [("verbosity", 1), ("rules", []), ("ignore_stdin", True), ("debug", True),
("ignore", ["T1"]), ("staged", True), ("_config_path", self.get_sample_path()),
("ignore_merge_commits", False), ("ignore_fixup_commits", False),
("ignore_squash_commits", False), ("ignore_revert_commits", False),
("extra_path", self.get_sample_path("user_rules")), ("target", self.get_sample_path()),
("contrib", ["CC1"])]
for attr, val in attrs:
config = LintConfig()
setattr(config, attr, val)
self.assertNotEqual(LintConfig(), config)
# Other attributes don't matter
config1 = LintConfig()
config2 = LintConfig()
config1.foo = u"bår"
self.assertEqual(config1, config2)
config2.foo = u"dūr"
self.assertEqual(config1, config2)
class LintConfigGeneratorTests(BaseTestCase):
@staticmethod
@patch('gitlint.config.shutil.copyfile')
def test_install_commit_msg_hook_negative(copy):
LintConfigGenerator.generate_config(u"föo/bar/test")
copy.assert_called_with(GITLINT_CONFIG_TEMPLATE_SRC_PATH, u"föo/bar/test")
|
l0nax/gitlint
|
gitlint/tests/samples/user_rules/incorrect_linerule/my_line_rule.py
|
<gh_stars>100-1000
# -*- coding: utf-8 -*-
from gitlint.rules import LineRule
class MyUserLineRule(LineRule):
id = "UC2"
name = "my-lïne-rule"
# missing validate method, missing target attribute
|
l0nax/gitlint
|
gitlint/tests/test_utils.py
|
# -*- coding: utf-8 -*-
from gitlint import utils
from gitlint.tests.base import BaseTestCase
try:
# python 2.x
from mock import patch
except ImportError:
# python 3.x
from unittest.mock import patch # pylint: disable=no-name-in-module, import-error
class UtilsTests(BaseTestCase):
def tearDown(self):
# Since we're messing around with `utils.PLATFORM_IS_WINDOWS` during these tests, we need to reset
# its value after we're done this doesn't influence other tests
utils.PLATFORM_IS_WINDOWS = utils.platform_is_windows()
@patch('os.environ')
def test_use_sh_library(self, patched_env):
patched_env.get.return_value = "1"
self.assertEqual(utils.use_sh_library(), True)
patched_env.get.assert_called_once_with("GITLINT_USE_SH_LIB", None)
for invalid_val in ["0", u"foöbar"]:
patched_env.get.reset_mock() # reset mock call count
patched_env.get.return_value = invalid_val
self.assertEqual(utils.use_sh_library(), False, invalid_val)
patched_env.get.assert_called_once_with("GITLINT_USE_SH_LIB", None)
# Assert that when GITLINT_USE_SH_LIB is not set, we fallback to checking whether we're on Windows
utils.PLATFORM_IS_WINDOWS = True
patched_env.get.return_value = None
self.assertEqual(utils.use_sh_library(), False)
utils.PLATFORM_IS_WINDOWS = False
self.assertEqual(utils.use_sh_library(), True)
@patch('gitlint.utils.locale')
def test_default_encoding_non_windows(self, mocked_locale):
utils.PLATFORM_IS_WINDOWS = False
mocked_locale.getpreferredencoding.return_value = u"foöbar"
self.assertEqual(utils.getpreferredencoding(), u"foöbar")
mocked_locale.getpreferredencoding.assert_called_once()
mocked_locale.getpreferredencoding.return_value = False
self.assertEqual(utils.getpreferredencoding(), u"UTF-8")
@patch('os.environ')
def test_default_encoding_windows(self, patched_env):
utils.PLATFORM_IS_WINDOWS = True
# Mock out os.environ
mock_env = {}
def mocked_get(key, default):
return mock_env.get(key, default)
patched_env.get.side_effect = mocked_get
# Assert getpreferredencoding reads env vars in order: LC_ALL, LC_CTYPE, LANG
mock_env = {"LC_ALL": u"lc_all_välue", "LC_CTYPE": u"foo", "LANG": u"bar"}
self.assertEqual(utils.getpreferredencoding(), u"lc_all_välue")
mock_env = {"LC_CTYPE": u"lc_ctype_välue", "LANG": u"hur"}
self.assertEqual(utils.getpreferredencoding(), u"lc_ctype_välue")
mock_env = {"LANG": u"lang_välue"}
self.assertEqual(utils.getpreferredencoding(), u"lang_välue")
# Assert split on dot
mock_env = {"LANG": u"foo.bär"}
self.assertEqual(utils.getpreferredencoding(), u"bär")
# assert default encoding is UTF-8
mock_env = {}
self.assertEqual(utils.getpreferredencoding(), "UTF-8")
mock_env = {"FOO": u"föo"}
self.assertEqual(utils.getpreferredencoding(), "UTF-8")
|
l0nax/gitlint
|
gitlint/tests/samples/user_rules/import_exception/invalid_python.py
|
# flake8: noqa
# This is invalid python code which will cause an import exception
class MyObject:
|
l0nax/gitlint
|
examples/my_configuration_rules.py
|
# -*- coding: utf-8 -*-
from gitlint.rules import ConfigurationRule
from gitlint.options import IntOption
"""
Full details on user-defined rules: https://jorisroovers.com/gitlint/user_defined_rules
The ReleaseConfigurationRule class below is an example of a user-defined ConfigurationRule. Configuration rules are
gitlint rules that are applied once per commit and BEFORE any other rules are run. Configuration Rules are meant to
dynamically change gitlint's configuration and/or the commit that is about to be linted. A typically use-case for this
is modifying the behavior of gitlint's rules based on a commit contents.
Notes:
- Modifying the commit object DOES NOT modify the actual git commit message in the target repo, only gitlint's copy of
it.
- Modifying the config object only has effect on the commit that is being linted, subsequent commits will not
automatically inherit this configuration.
"""
class ReleaseConfigurationRule(ConfigurationRule):
"""
This rule will modify gitlint's behavior for Release Commits.
This example might not be the most realistic for a real-world scenario,
but is meant to give an overview of what's possible.
"""
# A rule MUST have a human friendly name
name = "release-configuration-rule"
# A rule MUST have a *unique* id, we recommend starting with UCR
# (for User-defined Configuration-Rule), but this can really be anything.
id = "UCR1"
# A rule MAY have an option_spec if its behavior should be configurable.
options_spec = [IntOption('custom-verbosity', 2, "Gitlint verbosity for release commits")]
def apply(self, config, commit):
self.log.debug("ReleaseConfigurationRule: This line will be visible when running `gitlint --debug`")
# If the commit title starts with 'Release', we want to modify
# how all subsequent rules interpret that commit
if commit.message.title.startswith("Release"):
# If your Release commit messages are auto-generated, the
# body might contain trailing whitespace. Let's ignore that
config.ignore.append("body-trailing-whitespace")
# Similarly, the body lines might exceed 80 chars,
# let's set gitlint's limit to 200
# To set rule options use:
# config.set_rule_option(<rule-name>, <rule-option>, <value>)
config.set_rule_option("body-max-line-length", "line-length", 200)
# For kicks, let's set gitlint's verbosity to 2
# To set general options use
# config.set_general_option(<general-option>, <value>)
config.set_general_option("verbosity", 2)
# Wwe can also use custom options to make this configurable
config.set_general_option("verbosity", self.options['custom-verbosity'].value)
# Strip any lines starting with $ from the commit message
# (this only affects how gitlint sees your commit message, it does
# NOT modify your actual commit in git)
commit.message.body = [line for line in commit.message.body if not line.startswith("$")]
# You can add any extra properties you want to the commit object, these will be available later on
# in all rules.
commit.my_property = u"This is my property"
|
l0nax/gitlint
|
gitlint/cache.py
|
<filename>gitlint/cache.py
class PropertyCache(object):
""" Mixin class providing a simple cache. """
def __init__(self):
self._cache = {}
def _try_cache(self, cache_key, cache_populate_func):
""" Tries to get a value from the cache identified by `cache_key`.
If no value is found in the cache, do a function call to `cache_populate_func` to populate the cache
and then return the value from the cache. """
if cache_key not in self._cache:
cache_populate_func()
return self._cache[cache_key]
def cache(original_func=None, cachekey=None):
""" Cache decorator. Caches function return values.
Requires the parent class to extend and initialize PropertyCache.
Usage:
# Use function name as cache key
@cache
def myfunc(args):
...
# Specify cache key
@cache(cachekey="foobar")
def myfunc(args):
...
"""
# Decorators with optional arguments are a bit convoluted in python, especially if you want to support both
# Python 2 and 3. See some of the links below for details.
def cache_decorator(func):
# If no specific cache key is given, use the function name as cache key
if not cache_decorator.cachekey:
cache_decorator.cachekey = func.__name__
def wrapped(*args):
def cache_func_result():
# Call decorated function and store its result in the cache
args[0]._cache[cache_decorator.cachekey] = func(*args)
return args[0]._try_cache(cache_decorator.cachekey, cache_func_result)
return wrapped
# Passing parent function variables to child functions requires special voodoo in python2:
# https://stackoverflow.com/a/14678445/381010
cache_decorator.cachekey = cachekey # attribute on the function
# To support optional kwargs for decorators, we need to check if a function is passed as first argument or not.
# https://stackoverflow.com/a/24617244/381010
if original_func:
return cache_decorator(original_func)
return cache_decorator
|
vinayak19th/Brevis-2.0
|
brevis/predict/apps.py
|
<reponame>vinayak19th/Brevis-2.0<gh_stars>0
from django.apps import AppConfig
from .paper import get_articles
class PredictConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'predict'
|
vinayak19th/Brevis-2.0
|
brevis/predict/admin.py
|
<filename>brevis/predict/admin.py
from django.contrib import admin
from .models import Article
class ArticleAdmin(admin.ModelAdmin):
list_display = ('title', 'body',)
prepopulated_fields = {'slug': ('title',)}
admin.site.register(Article, ArticleAdmin)
|
vinayak19th/Brevis-2.0
|
brevis/predict/startup.py
|
from .models import Article
import os
import signal
import sys
from .models import Article
from .paper import get_articles
def startup_hook():
print("Clearing DB old")
Article.objects.all().delete()
print("Gathering articles")
articles = get_articles()
for title in articles:
a = Article(title = title, body=articles[title]["content"],link=articles[title]["url"])
a.save()
print("Startup Hook")
def run_startup():
## Server Startup Scipt
startup_hook()
|
vinayak19th/Brevis-2.0
|
brevis/predict/forms.py
|
<gh_stars>0
from django import forms
""" Class based form model to extend usage to add ability to save summaries into database"""
class WebsiteForm(forms.Form):
weblink = forms.URLField(label = 'Paste url here (with https)')
|
vinayak19th/Brevis-2.0
|
brevis/predict/bart.py
|
import requests
import json
from transformers import BartTokenizer
class SummaryModel:
model = "facebook/bart-large-xsum"
tokenizer = BartTokenizer.from_pretrained(model)
print("Tokenizer loaded")
def __init__(self):
print("model loaded")
def preprocess(self,text,maxlen):
"""Function to pre-process text inputs
Args:
text [str]: Text to be summarized
Returns:
[list]: tokens to pass to the predictor
"""
#Pre-Process Data
assert(type(text) == str),"Text being passed is not a string"
assert(type(maxlen) == int),"Maxlen must be an integer"
print("datatype Text confirmed")
tokens = SummaryModel.tokenizer(text)
print("Tokens generated")
batch = dict(tokens)
batch["maxlen"] = maxlen
batch = [batch]
#Send Server Request
input_data = {"signature_name": "serving_default","instances": batch}
input_data = json.dumps(input_data)
return input_data
def pred(self,text,maxlen=300,tokens_only=False):
"""Returns summaries from model
Args:
text ([str]): text to be summarized
tokens_only (bool, optional): Flag to set decoding returns. Defaults to False.
Returns:
[list]: returns summary text if 'tokens_only' set to false,
else returns summary tokens
"""
input_data = self.preprocess(text,maxlen)
print("Data pre-processed")
r = requests.post("http://localhost:8501/v1/models/bart:predict", data=input_data)
print("Model inference complete")
flag = False
try:
output_tokens = json.loads(r.text)['predictions']
except KeyError:
output_tokens = "ERROR:" + json.loads(r.text)['error']
flag = True
if(tokens_only or flag):
return output_tokens
else:
return self.decoder(output_tokens)
def decoder(self,tokens):
"""Returns summary text from summary
Args:
tokens ([list]): summary tokens
Returns:
[list]: list containing the string summary
"""
result = [SummaryModel.tokenizer.decode(g, skip_special_tokens=False, clean_up_tokenization_spaces=True) for g in tokens]
return result
|
vinayak19th/Brevis-2.0
|
brevis/tools/trie.py
|
import os
import string
import pickle
import json
from pathlib import Path
import argparse
from tqdm import tqdm
punctuations = string.punctuation + "'' “”'-"
class TrieNode:
def __init__(self):
self.is_end = False
self.children = {}
class Trie:
def __init__(self):
self.root = TrieNode()
self.ctx = self.root
def reset_ctx(self):
self.ctx = self.root
def step(self, ch):
if ch in self.ctx.children:
self.ctx = self.ctx.children[ch]
return True
return False
def insert(self, word):
ptr = self.root
for ch in word:
if ch not in ptr.children:
ptr.children[ch] = TrieNode()
ptr = ptr.children[ch]
ptr.is_end = True
def insert_vocab_list(self, vocab_list: list):
for word in tqdm(vocab_list):
self.insert(word.lower())
def insert_vocab_json(self, jsonfilepath):
with open(jsonfilepath) as f:
jsonfile = json.load(f)
self.insert_vocab_list(jsonfile)
def test_corpus__(self, corpus) -> bool:
self.reset_ctx()
c = 0
not_in_vocab = []
for word in corpus.lower().split():
self.reset_ctx()
stale = False
for ch in word:
if stale or not ch.isalpha():
continue
if not self.step(ch):
c += 1
not_in_vocab.append(word)
stale = True
print(f"{c} words not in vocab {not_in_vocab}")
return True
def test_corpus_(self, corpus) -> bool:
# tests to see if there is any word in the corpus that is not in the dictionary
ptr = self.root
c = 0
not_in_vocab = []
stale = False
for word in corpus.lower().split():
stale = False
ptr = self.root
for ch in word:
if stale:
continue
if ch in punctuations:
print(word, ch)
continue
# test for punctuations
# if not ptr.is_end:
# # return False
# c += 1
# not_in_vocab.append(word)
# stale = True
ptr = self.root
else:
if ch not in ptr.children:
# return False
c += 1
not_in_vocab.append(word)
stale = True
if not stale:
ptr = ptr.children[ch]
print(f"{c} words not in vocab {not_in_vocab}")
return True
def test_corpus(self, corpus, dispbad=False) -> bool:
# tests to see if there is any word in the corpus that is not in the dictionary
ptr = self.root
for ch in corpus.lower():
if ch in punctuations:
# test for punctuations
if not ptr.is_end:
return False
ptr = self.root
else:
if ch not in ptr.children:
return False
ptr = ptr.children[ch]
return ptr.is_end or ptr == self.root
def save(self, filepath):
with open(filepath, 'wb') as f:
pickle.dump(self, f, protocol=pickle.HIGHEST_PROTOCOL)
def load(self, filepath):
with open(filepath, 'rb') as f:
trie_saved = pickle.load(f)
self.root = trie_saved.root
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-json", "--jsonfilename", help="json vocab file name")
parser.add_argument("-save", "--savefilename", help="trie save file name")
args = parser.parse_args()
BASE_DIR = Path(__file__).resolve().parent
VOCAB_DIR = os.path.join(BASE_DIR, 'savedvocab')
SAVED_TRIE_DIR = os.path.join(BASE_DIR, 'savedtries')
trie = Trie()
if args.jsonfilename:
vocabjsonpath = os.path.join(VOCAB_DIR, args.jsonfilename)
print(f"opening {vocabjsonpath}.")
trie.insert_vocab_json(vocabjsonpath)
if args.savefilename:
print(f"Saving to {args.savefilename}.")
trie.save(os.path.join(SAVED_TRIE_DIR, args.savefilename))
print(VOCAB_DIR)
|
vinayak19th/Brevis-2.0
|
brevis/predict/models.py
|
<filename>brevis/predict/models.py
from django.db import models
from django.template.defaultfilters import slugify
from django.urls import reverse
from .summarizer import summarizer
class Article(models.Model):
title = models.CharField(max_length=255)
body = models.TextField()
body_summarized = models.TextField(null=True, blank=True)
slug = models.SlugField(null=False, unique=True)
text_len = models.PositiveIntegerField(null=True, blank=True)
summary_len = models.PositiveIntegerField(null=True, blank=True)
link = models.URLField(null=True, blank=True)
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse('article_detail', kwargs={ 'slug': self.slug })
def summarize(self):
if not self.body_summarized:
print(f"Summarizing {self.title}")
self.body_summarized = summarizer(self.body)
if len(self.body_summarized) >1 :
self.summary_len = len(self.body_summarized.split(" "))
else:
self.summary_len = len(self.body_summarized[0].split(" "))
self.save()
def save(self, *args, **kwargs): # new
self.text_len = len(self.body.split(" "))
if not self.slug:
self.slug = slugify(self.title)
print(self.slug)
return super().save(*args, **kwargs)
|
vinayak19th/Brevis-2.0
|
brevis/predict/urls.py
|
<reponame>vinayak19th/Brevis-2.0
from django.urls import path
from . import views
urlpatterns = [
path('', views.ArticleListView.as_view(), name='article_list'),
path('<slug:slug>', views.ArticleDetailView.as_view(), name='article_detail')
#path('', views.index, name='index')
]
from .startup import run_startup
# run_startup()
|
vinayak19th/Brevis-2.0
|
brevis/predict/migrations/0001_initial.py
|
# Generated by Django 3.2 on 2021-04-14 05:28
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Article',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255)),
('body', models.TextField()),
('body_summarized', models.TextField(blank=True, null=True)),
('slug', models.SlugField(unique=True)),
('text_len', models.PositiveIntegerField(blank=True, null=True)),
('summary_len', models.PositiveIntegerField(blank=True, null=True)),
('link', models.URLField(blank=True, null=True)),
],
),
]
|
vinayak19th/Brevis-2.0
|
brevis/predict/views.py
|
from django.shortcuts import render, redirect
from django.http import HttpResponse
from django.views.generic import DetailView, ListView
from .models import Article
from .paper import get_articles
def index(request):
hot_articles = get_articles()
return render(request, 'predict/index.html', { "hot_articles" : hot_articles })
class ArticleListView(ListView):
model = Article
template_name = 'article_list.html'
class ArticleDetailView(DetailView):
model = Article
template_name = 'article_detail.html'
def get_context_data(self, *args, **kwargs):
context = super().get_context_data(*args, **kwargs)
context['object_list'] = Article.objects.all()
return context
def get_object(self):
obj = super().get_object()
obj.summarize()
return obj
|
vinayak19th/Brevis-2.0
|
brevis/predict/paper.py
|
import random
import os
import sys
import newspaper
from tools.trie import *
import requests
from bs4 import BeautifulSoup
from newsapi import NewsApiClient
# NUM_POPULAR_URLS = 3
# NUM_EACH_POPULAR = 2
# FORGET_ARTICLE = False
NUM_ARTICLES = 20
BASE_DIR = Path(__file__).resolve().parent.parent
SAVED_TRIE_DIR = BASE_DIR / 'tools' / 'savedtries'
def get_articles():
newsapi = NewsApiClient(api_key='d84cf1257d084ed3b9eec34250c389ca')
all_articles_response = newsapi.get_everything(sources='bbc-news,the-verge',
domains='bbc.co.uk,techcrunch.com',
language='en',
sort_by='relevancy')
title_content_dict = {}
articles = all_articles_response['articles']
for i in range(NUM_ARTICLES):
article = articles[i]
title = article['title']
if title in title_content_dict:
continue
html = requests.get(article['url'])
soup = BeautifulSoup(html.text, 'html.parser')
content = [p_tag.get_text() for p_tag in soup.find_all('p')]
content = '\n'.join(content)
title_content_dict[title] = {'content':content,'url':article['url']}
return title_content_dict
# def forget_articles(url):
# print(f"Forgettig {url} articles")
# domain = \
# url.replace("https://", "http://").replace("http://", "").split("/")[0]
# d_pth = os.path.join(newspaper.settings.MEMO_DIR, domain + ".txt")
# if os.path.exists(d_pth):
# os.remove(d_pth)
# def get_articles():
# word_checker = Trie()
# print(SAVED_TRIE_DIR / 'trie.pkl')
# print("Loading word checker...")
# word_checker.load(SAVED_TRIE_DIR / 'trie.pkl')
# print(f"Obtaining {NUM_POPULAR_URLS} popular URLs")
# populars = newspaper.popular_urls()[:NUM_POPULAR_URLS]
# for p in populars:
# if FORGET_ARTICLE:
# forget_articles(p)
# print(p)
# print("Building popular newspapers...")
# popular_newspaper_build = []
# for idx, p in enumerate(populars):
# print(f"Building {idx + 1} \t {p}")
# popular_newspaper_build.append(newspaper.build(p, memoize_articles = False, language='en'))
# print("Getting articles text list...")
# articles_text_list = []
# title_article_dict = {}
# for pb in popular_newspaper_build:
# size = len(pb.articles)
# print(f"{pb.brand} has {size} articles.")
# for _ in range(NUM_EACH_POPULAR):
# while True:
# index = random.randint(0, size-1)
# print(index, end = ' ')
# article = pb.articles[index]
# try:
# article.download()
# article.parse()
# title = article.title
# text = article.text
# if not text:
# raise Exception('')
# except:
# continue
# print(text, word_checker.test_corpus__(text))
# title_article_dict[title] = text
# # articles_text_list.append(text)
# break
# print()
# return title_article_dict
|
vinayak19th/Brevis-2.0
|
brevis/predict/summarizer.py
|
<filename>brevis/predict/summarizer.py
from .bart import SummaryModel
def summarizer(text,maxlen=200):
model = SummaryModel()
summary = model.pred(text,maxlen=maxlen)
if (len(summary[0])>2):
summary = summary[0]
print("Summary:",len(summary))
print("Summary[0]:",len(summary[0]))
return summary
|
milebag/pythons
|
app.py
|
#-*- coding: UTF-8 -*-
import socket,select
import sys
import threading
from multiprocessing import Process
class Proxy:
def __init__(self,soc):
print("init")
self.client,_=soc.accept()
print("accept")
self.target=None
self.request_url=None
self.BUFSIZE=4096
self.method=None
self.targetHost=None
def getClientRequest(self):
request=self.client.recv(self.BUFSIZE)
print(request)
if not request:
return None
cn=request.find('\n')
firstLine=request[:cn]
print (firstLine[:len(firstLine)-9])
line=firstLine.split()
self.method=line[0]
self.targetHost=line[1]
return request
def commonMethod(self,request):
tmp=self.targetHost.split('/')
net=tmp[0]+'//'+tmp[2]
request=request.replace(net,'')
targetAddr=self.getTargetInfo(tmp[2])
try:
(fam,_,_,_,addr)=socket.getaddrinfo(targetAddr[0],targetAddr[1])[0]
except:
return
self.target=socket.socket(fam)
self.target.connect(addr)
self.target.send(request)
self.nonblocking()
def connectMethod(self,request): #对于CONNECT处理可以添加在这里
pass
def run(self):
request=self.getClientRequest()
if request:
if self.method in ['GET','POST','PUT',"DELETE",'HAVE']:
self.commonMethod(request)
elif self.method=='CONNECT':
self.connectMethod(request)
def nonblocking(self):
inputs=[self.client,self.target]
while True:
readable,writeable,errs=select.select(inputs,[],inputs,3)
if errs:
break
for soc in readable:
data=soc.recv(self.BUFSIZE)
if data:
if soc is self.client:
self.target.send(data)
elif soc is self.target:
self.client.send(data)
else:
break
self.client.close()
self.target.close()
def getTargetInfo(self,host):
port=0
site=None
if ':' in host:
tmp=host.split(':')
site=tmp[0]
port=int(tmp[1])
else:
site=host
port=80
print("host",site,port)
return "www.google.com",port
if __name__=='__main__':
host = ''
port = 8080
backlog = 5
server = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
server.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR,1)
server.bind((host,port))
server.listen(5)
while True:
proxy =Proxy(server)
t=threading.Thread(target = proxy.run,args=())
t.start()
# p=Process(target=Proxy(server).run, args=()) #多进程
# p.start()
|
skdys/thermalspin
|
old_sph_version/set_simulation.py
|
#!/usr/bin/env python3
from thermalspin.set_simulation import set_simulation
set_simulation()
|
skdys/thermalspin
|
old_sph_version/thermalspin/dynamic_simulation.py
|
<filename>old_sph_version/thermalspin/dynamic_simulation.py
#!/usr/bin/env python3
"""
User interface for initializing and run simulations
"""
import getopt
import sys
from thermalspin.heisenberg_simulation import init_simulation_tilted, init_simulation_random, init_simulation_aligned, \
run_simulation
from thermalspin.read_config import read_config_file
def usage():
print("""
Single ensemble simulation.
Usage: dynamic_simulation.py [OPTIONS] [PARAMETERS]\n
-i, --init=SIMNAME Initialize a simulation, need to specify next a dimension and a magnetization
-r, --run=SIMNAME Run a simulation named SIMNAME
-d, --dimensions=SIZE Generate a default simulation with SIZE specified e.g. 10x10x10
-m, --magnetization=DIRECTION Initial magnetization along DIRECTION specified like 0,0
--tilted Tilted initial position
-h, --help Shows this message
""")
def dynamic_simulation():
DEFAULT_PARAMS, SIMULATIONS_DIRECTORY, PROCESSES_NUMBER = read_config_file()
mode = None
simname = None
nx, ny, nz = (None, None, None)
theta_0, phi_0 = (None, None)
tilted = False
try:
opts, args = getopt.getopt(sys.argv[1:], "hr:i:d:m:", ["help", "initialize=", "run=", "dimensions=", "tilted"])
except getopt.GetoptError:
usage()
sys.exit(2)
for opt, arg in opts:
if opt == '-Hz':
usage()
sys.exit()
elif opt in ("-r", "--run"):
mode = "run"
simname = arg
elif opt in ("-i", "--initialize"):
mode = "init"
simname = arg
elif opt in ("-d", "--dimensions"):
nx, ny, nz = arg.split("x")
elif opt in ("-m", "--magnetization"):
theta_0, phi_0 = arg.split(",")
elif opt in ("--tilted"):
tilted = True
if mode == "run":
print(f"Running simulation {simname}\n")
run_simulation(SIMULATIONS_DIRECTORY + simname + "/", verbose=True)
elif mode == "init":
if tilted:
init_simulation_tilted(SIMULATIONS_DIRECTORY + simname + "/", int(nx), int(ny), int(nz),
params=DEFAULT_PARAMS)
print(f"Simulation {simname} generated with default params. \n"
f"Lattice has dimensions {nx}x{ny}x{nz} \n"
f"Tilted initial magnetization\n")
elif theta_0 is None:
init_simulation_random(SIMULATIONS_DIRECTORY + simname + "/", int(nx), int(ny), int(nz),
params=DEFAULT_PARAMS)
print(f"Simulation {simname} generated with default params. \n"
f"Lattice has dimensions {nx}x{ny}x{nz} \n"
f"Random initial magnetization\n")
else:
init_simulation_aligned(SIMULATIONS_DIRECTORY + simname + "/", int(nx), int(ny), int(nz),
params=DEFAULT_PARAMS, theta_0=int(theta_0), phi_0=int(phi_0))
print(f"Default simulation {simname} generated with default params. \n"
f"Lattice has dimensions {nx}x{ny}x{nz} \n"
f"Initial magnetization ({theta_0},{phi_0})\n")
else:
usage()
sys.exit(2)
print("Finished\n")
|
skdys/thermalspin
|
old_sph_version/thermalspin/heisenberg_system.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Classical Heisenberg model Monte Carlo simulator
"""
import numpy as np
from numba import jit
from skdylib.spherical_coordinates import sph_dot, sph2xyz, sph_urand
class HeisenbergSystem:
"""
This class represent a system described by an Heisenberg Hamiltonian, also known as O(3) model
"""
def __init__(self, state, J, Hz, T):
self.J = J
self.Hz = Hz
self.T = T
self.beta = 1 / T
self.state = state
self.nx = self.state.shape[0]
self.ny = self.state.shape[1]
self.nz = self.state.shape[2]
self.nspin = self.nx * self.ny * self.nz
# Compute energy and magnetization of the initial state
self.energy = compute_energy(self.state, self.nx, self.ny, self.nz, J, Hz)
self.total_magnetization = compute_magnetization(self.state, self.nx, self.ny, self.nz)
@property
def magnetization(self):
"""
The magnetization of the system
:return: The value of the magnetization
"""
return self.total_magnetization / self.nspin
def step(self):
"""
Evolve the system computing a step of Metropolis-Hastings Monte Carlo.
It actually calls the non-object oriented function.
"""
s, e, m = numba_step(self.state, self.nx, self.ny, self.nz, self.J, self.Hz, self.beta, self.energy,
self.total_magnetization)
self.state = s
self.energy = e
self.total_magnetization = m
# Compiled functions
@jit(nopython=True, cache=True)
def compute_magnetization(state, nx, ny, nz):
"""
Compute the total magnetization
:return: [Mx, My, Mz] vector of mean magnetization
"""
counter_r = np.zeros(3)
for i, j, k in np.ndindex(nx, ny, nz):
r = sph2xyz(state[i, j, k, 0], state[i, j, k, 1])
counter_r += r
return counter_r
@jit(nopython=True, cache=True)
def site_energy(i, j, k, state, nx, ny, nz, J, h):
energy = 0.0
ii = (i + 1) % nx
energy += sph_dot(state[i, j, k, 0], state[ii, j, k, 0],
state[i, j, k, 1] - state[ii, j, k, 1])
ii = (i - 1) % nx
energy += sph_dot(state[i, j, k, 0], state[ii, j, k, 0],
state[i, j, k, 1] - state[ii, j, k, 1])
jj = (j + 1) % ny
energy += sph_dot(state[i, j, k, 0], state[i, jj, k, 0],
state[i, j, k, 1] - state[i, jj, k, 1])
jj = (j - 1) % ny
energy += sph_dot(state[i, j, k, 0], state[i, jj, k, 0],
state[i, j, k, 1] - state[i, jj, k, 1])
if nz > 1:
kk = (k + 1) % nz
energy += sph_dot(state[i, j, k, 0], state[i, j, kk, 0],
state[i, j, k, 1] - state[i, j, kk, 1])
kk = (k - 1) % nz
energy += sph_dot(state[i, j, k, 0], state[i, j, kk, 0],
state[i, j, k, 1] - state[i, j, kk, 1])
energy *= - J / 2
energy += -h * np.cos(state[i, j, k, 0])
return energy
@jit(nopython=True, cache=True)
def compute_energy(state, nx, ny, nz, J, h):
"""
Compute the energy of the system
:return: The value of the energy
"""
energy_counter = 0.0
for i, j, k in np.ndindex(nx, ny, nz):
energy_counter += site_energy(i, j, k, state, nx, ny, nz, J, h)
return energy_counter
@jit(nopython=True, cache=True)
def numba_step(state, nx, ny, nz, J, h, beta, energy, total_magnetization):
"""
Evolve the system computing a step of Metropolis-Hastings Monte Carlo.
This non OOP function is accelerated trough jit compilation.
"""
# Select a random spin in the system
i = np.random.randint(0, nx)
j = np.random.randint(0, ny)
k = np.random.randint(0, nz)
# Compute the energy due to that spin
e0 = 0
ii = (i + 1) % nx
e0 += sph_dot(state[i, j, k, 0], state[ii, j, k, 0],
state[i, j, k, 1] - state[ii, j, k, 1])
ii = (i - 1) % nx
e0 += sph_dot(state[i, j, k, 0], state[ii, j, k, 0],
state[i, j, k, 1] - state[ii, j, k, 1])
jj = (j + 1) % ny
e0 += sph_dot(state[i, j, k, 0], state[i, jj, k, 0],
state[i, j, k, 1] - state[i, jj, k, 1])
jj = (j - 1) % ny
e0 += sph_dot(state[i, j, k, 0], state[i, jj, k, 0],
state[i, j, k, 1] - state[i, jj, k, 1])
if nz > 1:
kk = (k + 1) % nz
e0 += sph_dot(state[i, j, k, 0], state[i, j, kk, 0],
state[i, j, k, 1] - state[i, j, kk, 1])
kk = (k - 1) % nz
e0 += sph_dot(state[i, j, k, 0], state[i, j, kk, 0],
state[i, j, k, 1] - state[i, j, kk, 1])
e0 *= -J
e0 += -h * np.cos(state[i, j, k, 0])
# Generate a new random direction and compute energy due to the spin in the new direction
r_theta, r_phi = sph_urand()
e1 = 0
ii = (i + 1) % nx
e1 += sph_dot(r_theta, state[ii, j, k, 0],
r_phi - state[ii, j, k, 1])
ii = (i - 1) % nx
e1 += sph_dot(r_theta, state[ii, j, k, 0],
r_phi - state[ii, j, k, 1])
jj = (j + 1) % ny
e1 += sph_dot(r_theta, state[i, jj, k, 0],
r_phi - state[i, jj, k, 1])
jj = (j - 1) % ny
e1 += sph_dot(r_theta, state[i, jj, k, 0],
r_phi - state[i, jj, k, 1])
if nz > 1:
kk = (k + 1) % nz
e1 += sph_dot(r_theta, state[i, j, kk, 0],
r_phi - state[i, j, kk, 1])
kk = (k - 1) % nz
e1 += sph_dot(r_theta, state[i, j, kk, 0],
r_phi - state[i, j, kk, 1])
e1 *= -J
e1 += -h * np.cos(r_theta)
# Apply Metropolis algorithm
w = np.exp(beta * (e0 - e1))
dice = np.random.uniform(0, 1)
if dice < w:
energy += (e1 - e0)
total_magnetization += (sph2xyz(r_theta, r_phi) - sph2xyz(state[i, j, k, 0], state[i, j, k, 1]))
state[i, j, k, :] = np.array([r_theta, r_phi])
return state, energy, total_magnetization
|
skdys/thermalspin
|
old_sph_version/thermalspin/heisenberg_simulation.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Classical Heisenberg model Monte Carlo simulator
"""
import json
import os
import shutil
import time
import numpy as np
from kent_distribution.kent_distribution import kent2
from skdylib.spherical_coordinates import sph_urand, xyz2sph
from thermalspin.heisenberg_system import HeisenbergSystem
SNAPSHOTS_ARRAY_INITIAL_DIMENSION = int(3e4)
class HeisenbergSimulation:
"""
Handler of the HeisenbergSystem simulation.
It run the simulation and collect the results.
"""
def __init__(self, hsys: HeisenbergSystem, take_states_snapshots=False):
"""
:param hsys: system to be evolved
"""
self.system = hsys
self.steps_counter = 0
self.snapshots_counter = 0
self.snapshots_array_dimension = SNAPSHOTS_ARRAY_INITIAL_DIMENSION
if take_states_snapshots:
self.snapshots = np.zeros(
shape=(SNAPSHOTS_ARRAY_INITIAL_DIMENSION, self.system.nx, self.system.ny, self.system.nz, 2))
else:
self.snapshots = None
self.snapshots_t = np.zeros(shape=SNAPSHOTS_ARRAY_INITIAL_DIMENSION)
self.snapshots_e = np.zeros(shape=SNAPSHOTS_ARRAY_INITIAL_DIMENSION)
self.snapshots_m = np.zeros(shape=(SNAPSHOTS_ARRAY_INITIAL_DIMENSION, 3))
self.snapshots_J = np.zeros(shape=SNAPSHOTS_ARRAY_INITIAL_DIMENSION)
self.snapshots_T = np.zeros(shape=SNAPSHOTS_ARRAY_INITIAL_DIMENSION)
self.snapshots_Hz = np.zeros(shape=SNAPSHOTS_ARRAY_INITIAL_DIMENSION)
self.take_snapshot()
def run(self, nsteps):
"""
Evolve the system for a given number of steps
:param nsteps: The number of steps
"""
self.steps_counter += nsteps
for t in range(1, nsteps + 1):
self.system.step()
def take_snapshot(self):
""""
Take a snapshot of the system, parameters and results
"""
# First check if the snapshots array needs reshape
if self.snapshots_counter == self.snapshots_array_dimension:
if self.snapshots is not None:
self.snapshots.resize((self.snapshots_counter + SNAPSHOTS_ARRAY_INITIAL_DIMENSION, self.system.nx,
self.system.ny, self.system.nz, 2))
else:
self.snapshots = None
self.snapshots_t.resize(self.snapshots_counter + SNAPSHOTS_ARRAY_INITIAL_DIMENSION)
self.snapshots_e.resize(self.snapshots_counter + SNAPSHOTS_ARRAY_INITIAL_DIMENSION)
self.snapshots_m.resize((self.snapshots_counter + SNAPSHOTS_ARRAY_INITIAL_DIMENSION, 3))
self.snapshots_J.resize(self.snapshots_counter + SNAPSHOTS_ARRAY_INITIAL_DIMENSION)
self.snapshots_T.resize(self.snapshots_counter + SNAPSHOTS_ARRAY_INITIAL_DIMENSION)
self.snapshots_Hz.resize(self.snapshots_counter + SNAPSHOTS_ARRAY_INITIAL_DIMENSION)
# Then takes a snapshot
if self.snapshots is not None:
self.snapshots[self.snapshots_counter, :, :, :, :] = self.system.state.copy()
self.snapshots_t[self.snapshots_counter] = self.steps_counter
self.snapshots_e[self.snapshots_counter] = self.system.energy
self.snapshots_m[self.snapshots_counter, :] = self.system.magnetization
self.snapshots_J[self.snapshots_counter] = self.system.J
self.snapshots_T[self.snapshots_counter] = self.system.T
self.snapshots_Hz[self.snapshots_counter] = self.system.Hz
self.snapshots_counter += 1
def run_with_snapshots(self, steps_number, delta_snapshots, verbose=False):
"""
Evolve the system while taking snapshots
:param steps_number: Number of steps to be computed
:param delta_snapshots: Distance between snapshots
"""
if steps_number % delta_snapshots != 0:
raise Exception("steps_number must be multiple of delta_snapshots")
nsnapshots = int(steps_number / delta_snapshots)
for t in range(0, nsnapshots):
self.run(delta_snapshots)
self.take_snapshot()
if verbose:
print(f"Step number {self.steps_counter}", end="\r")
# Functions for initialization and saving to disk the results of a simulation
def init_simulation_aligned(simdir, nx, ny, nz, params, theta_0=None, phi_0=None):
"""
Generate a lattice of spins aligned toward an axis
:param simdir: Directory of the simulation
:param nx: Number of x cells
:param ny: Number of y cells
:param nz: Number of z cells
:param params: parameters of the simulation
:param phi_0:
:param theta_0:
"""
shutil.rmtree(simdir, ignore_errors=True)
state = np.ones(shape=(nx, ny, nz, 2))
state[:, :, :, 0] = state[:, :, :, 0] * theta_0
state[:, :, :, 1] = state[:, :, :, 1] * phi_0
os.makedirs(simdir)
params_file = open(simdir + "params.json", "w")
json.dump(params, params_file, indent=2)
np.save(simdir + "state.npy", state)
def init_simulation_tilted(simdir, nx, ny, nz, params):
"""
Generate a lattice of spins aligned toward tan axis if specified, random if not
:param simdir: Directory of the simulation
:param nx: Number of x cells
:param ny: Number of y cells
:param nz: Number of z cells
:param params: parameters of the simulation
"""
shutil.rmtree(simdir, ignore_errors=True)
state = np.ones(shape=(nx, ny, nz, 2))
gamma1 = np.array([0, 0, 1], dtype=np.float)
gamma2 = np.array([0, 1, 0], dtype=np.float)
gamma3 = np.array([1, 0, 0], dtype=np.float)
kent = kent2(gamma1, gamma2, gamma3, kappa=20, beta=0)
for i, j, k in np.ndindex((nx, ny, nz)):
state[i, j, k, :] = xyz2sph(kent.rvs())
os.makedirs(simdir)
params_file = open(simdir + "params.json", "w")
json.dump(params, params_file, indent=2)
np.save(simdir + "state.npy", state)
def init_simulation_random(simdir, nx, ny, nz, params):
"""
Generate a lattice of spins aligned toward tan axis if specified, random if not
:param simdir: Directory of the simulation
:param nx: Number of x cells
:param ny: Number of y cells
:param nz: Number of z cells
:param params: parameters of the simulation
"""
shutil.rmtree(simdir, ignore_errors=True)
state = np.zeros(shape=(nx, ny, nz, 2))
for i, j, k in np.ndindex(nx, ny, nz):
theta_r, phi_r = sph_urand()
state[i, j, k, 0] = theta_r
state[i, j, k, 1] = phi_r
os.makedirs(simdir)
params_file = open(simdir + "params.json", "w")
json.dump(params, params_file, indent=2)
np.save(simdir + "state.npy", state)
def run_simulation(simulation_directory, verbose=True):
"""
Run a simulation and save to disk the results
:param simulation_directory: the directory of the simulation
:param verbose: print step numbers in real time
"""
if os.path.isfile(simulation_directory + "params.json"):
params_file = open(simulation_directory + "params.json", "r")
params = json.load(params_file)
else:
raise Exception("Missing params.json file")
if os.path.isfile(simulation_directory + "state.npy"):
state = np.load(simulation_directory + "state.npy")
else:
raise Exception("Missing state.npy file")
param_J = np.array(params["param_J"])
param_Hz = np.array(params["param_Hz"])
param_T = np.array(params["param_T"])
steps_number = params["steps_number"]
delta_snapshots = params["delta_snapshots"]
save_snapshots = params["save_snapshots"]
sys = HeisenbergSystem(state, param_J[0], param_Hz[0], param_T[0])
hsim = HeisenbergSimulation(sys, take_states_snapshots=save_snapshots)
for i in range(param_T.shape[0]):
T_str = "{0:.3f}".format(param_T[i])
Hz_str = "{0:.3f}".format(param_Hz[i])
print(f"Simulation stage: {i}\n"
f"Temperature: {T_str}\n"
f"Hz: {Hz_str}\n"
f"Steps number: {steps_number}\n"
f"Delta snapshots: {delta_snapshots}\n")
hsim.system.J = param_J[i]
hsim.system.T = param_T[i]
hsim.system.Hz = param_Hz[i]
start_time = time.time()
hsim.run_with_snapshots(steps_number, delta_snapshots, verbose=verbose)
end_time = time.time()
run_time = end_time - start_time
run_time_str = "{0:.2f}".format(run_time)
print(f"Stage completed in {run_time_str} seconds\n")
print("Saving results ...", end="")
start = time.time()
# Save the last state
np.save(simulation_directory + "state.npy", hsim.system.state)
# Collect the results of the simulation
new_results = np.zeros(shape=(hsim.snapshots_counter, 4))
new_results[:, 0] = hsim.snapshots_e[:hsim.snapshots_counter]
new_results[:, 1:4] = hsim.snapshots_m[:hsim.snapshots_counter]
# Collect the snapshots and params
new_snapshots_params = np.zeros(shape=(hsim.snapshots_counter, 4))
new_snapshots_params[:, 0] = hsim.snapshots_t[:hsim.snapshots_counter]
new_snapshots_params[:, 1] = hsim.snapshots_J[:hsim.snapshots_counter]
new_snapshots_params[:, 2] = hsim.snapshots_Hz[:hsim.snapshots_counter]
new_snapshots_params[:, 3] = hsim.snapshots_T[:hsim.snapshots_counter]
# If old data is found, append the new one
if os.path.isfile(simulation_directory + "snapshots_params.npy") and os.path.isfile(
simulation_directory + "results.npy"):
old_results = np.load(simulation_directory + "results.npy")
results = np.concatenate((old_results, new_results[1:]))
old_snapshots_params = np.load(simulation_directory + "snapshots_params.npy")
last_t = old_snapshots_params[-1, 0]
new_snapshots_params[:, 0] += last_t
snapshots_params = np.concatenate((old_snapshots_params, new_snapshots_params[1:]))
else:
snapshots_params = new_snapshots_params
results = new_results
# Save all
np.save(simulation_directory + "snapshots_params.npy", snapshots_params)
np.save(simulation_directory + "results.npy", results)
if save_snapshots:
new_snapshots = hsim.snapshots[:hsim.snapshots_counter]
if os.path.isfile(simulation_directory + "snapshots.npy"):
old_snapshots = np.load(simulation_directory + "snapshots.npy")
snapshots = np.concatenate((old_snapshots, new_snapshots[1:]))
else:
snapshots = new_snapshots
np.save(simulation_directory + "snapshots.npy", snapshots)
end = time.time()
saving_time = end - start
saving_time_str = "{0:.6f}".format(saving_time)
print(f"done in {saving_time_str} seconds.")
|
skdys/thermalspin
|
src/thermalspin/data_analysis.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Show simple data of the simulation
"""
import colorsys
import os
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
from numba import jit
from skdylib.spherical_coordinates import xyz2sph
SIMULATIONS_DIRECTORY = "./simulations/"
# ----------------------------------------- LOADING --------------------------------------------------------------------
def load_results(simulation_name):
"""
Load the results of a simulation
:param simulation_name: name of the simulation
:return: final_state, t, e, m
"""
simdir = SIMULATIONS_DIRECTORY + f"{simulation_name}/"
final_state = np.load(simdir + "state.npy")
results = np.load(simdir + "results.npy")
E = results[:, 0]
m = results[:, 1:4]
# Build steps axis
snapshots_params = np.load(simdir + "snapshots_params.npy")
t = snapshots_params[:, 0]
J = snapshots_params[:, 1]
D = snapshots_params[:, 2]
Hz = snapshots_params[:, 3]
T = snapshots_params[:, 4]
return final_state, t, J, D, Hz, T, E, m
def load_snapshots(simulation_name):
"""
:param simulation_name: The name of the simulation
:return: An array of the snapshots
"""
simulation_directory = SIMULATIONS_DIRECTORY + f"{simulation_name}/"
snapshots = np.load(simulation_directory + "snapshots.npy")
return snapshots
def load_set_results(set_name, load_set_snapshots=False):
path = SIMULATIONS_DIRECTORY + set_name + "/"
simulations_list = [f for f in os.listdir(path) if not f.startswith('.')]
simulations_list.sort()
simulation_number = len(simulations_list)
for i in range(simulation_number):
try:
final_state_loaded, t_loaded, J_loaded, D_loaded, Hz_loaded, T_loaded, E_loaded, m_loaded = load_results(
set_name + "/" + simulations_list[i])
if i == 0:
final_state = []
snapshots = []
L = np.zeros(shape=(simulation_number, 3))
t = np.zeros(shape=((simulation_number,) + t_loaded.shape))
J = np.zeros(shape=((simulation_number,) + J_loaded.shape))
D = np.zeros(shape=((simulation_number,) + D_loaded.shape))
Hz = np.zeros(shape=((simulation_number,) + Hz_loaded.shape))
T = np.zeros(shape=((simulation_number,) + T_loaded.shape))
E = np.zeros(shape=((simulation_number,) + E_loaded.shape))
m = np.zeros(shape=((simulation_number,) + m_loaded.shape))
final_state.append(final_state_loaded)
L[i] = np.array(final_state_loaded.shape[0])
t[i] = t_loaded
J[i] = J_loaded
D[i] = D_loaded
Hz[i] = Hz_loaded
T[i] = T_loaded
E[i] = E_loaded
m[i] = m_loaded
print(simulations_list[i], "loaded")
except Exception as e:
print(f"Error in {simulations_list[i]}")
print(e)
if load_set_snapshots:
snapshots.append(load_snapshots(set_name + "/" + simulations_list[i]))
return final_state, L, t, J, D, Hz, T, E, m, snapshots
def arrange_set_results_LT(L_lst, t_lst, J_lst, D_lst, Hz_lst, T_lst, E_lst, m_lst, final_state_lst,
snapshots_lst=None):
L_new = np.unique(L_lst)
for i in range(len(T_lst)):
T_lst[i] = np.unique(np.around(T_lst[i], decimals=5))
T_new = np.unique(T_lst)
L_num = L_new.shape[0]
T_num = T_new.shape[0]
t_num = t_lst.shape[1]
sim_num = t_lst.shape[0]
final_state_new = [[None] * T_num for _ in range(L_num)]
snapshots_new = [[None] * T_num for _ in range(L_num)]
E_new = np.zeros(shape=(L_num, T_num, t_num))
t_new = np.zeros(shape=(L_num, T_num, t_num))
J_new = np.zeros(shape=(L_num, T_num, t_num))
D_new = np.zeros(shape=(L_num, T_num, t_num))
Hz_new = np.zeros(shape=(L_num, T_num, t_num))
m_new = np.zeros(shape=(L_num, T_num, t_num, 3))
for i in range(sim_num):
T_idx = int(np.argmax(np.equal(T_new, T_lst[i, 0])))
L_idx = int(np.argmax(np.equal(L_new, L_lst[i, 0])))
final_state_new[L_idx][T_idx] = final_state_lst[i]
if snapshots_lst is not None:
snapshots_new[L_idx][T_idx] = snapshots_lst[i]
E_new[L_idx, T_idx] = E_lst[i]
t_new[L_idx, T_idx] = t_lst[i]
J_new[L_idx, T_idx] = J_lst[i]
D_new[L_idx, T_idx] = D_lst[i]
Hz_new[L_idx, T_idx] = Hz_lst[i]
m_new[L_idx, T_idx] = m_lst[i]
return L_new, T_new, t_new, J_new, D_new, Hz_new, E_new, m_new, final_state_new, snapshots_new
def arrange_set_results_LH(L_lst, t_lst, J_lst, H_lst, T_lst, e_lst, m_lst, final_state_lst, snapshots=None):
L_new = np.unique(L_lst)
H_new = np.unique(H_lst)
L_num = L_new.shape[0]
H_num = H_new.shape[0]
t_num = t_lst.shape[1]
sim_num = t_lst.shape[0]
final_state_new = [[None] * H_num for _ in range(L_num)]
snapshots_new = [[None] * H_num for _ in range(L_num)]
e_new = np.zeros(shape=(L_num, H_num, t_num))
t_new = np.zeros(shape=(L_num, H_num, t_num))
J_new = np.zeros(shape=(L_num, H_num, t_num))
T_new = np.zeros(shape=(L_num, H_num, t_num))
m_new = np.zeros(shape=(L_num, H_num, t_num, 3))
for i in range(sim_num):
Hz_idx = int(np.argmax(np.equal(H_new, H_lst[i, 0])))
L_idx = int(np.argmax(np.equal(L_new, L_lst[i, 0])))
final_state_new[L_idx][Hz_idx] = final_state_lst[i]
if snapshots is not None:
snapshots_new[L_idx][Hz_idx] = snapshots[i]
e_new[L_idx, Hz_idx] = e_lst[i]
t_new[L_idx, Hz_idx] = t_lst[i]
J_new[L_idx, Hz_idx] = J_lst[i]
T_new[L_idx, Hz_idx] = T_lst[i]
m_new[L_idx, Hz_idx] = m_lst[i]
return L_new, H_new, t_new, J_new, T_new, e_new, m_new, final_state_new, snapshots_new
# --------------------------------------------- COMPUTING --------------------------------------------------------------
def bootstrap(initial_samples, n, new_samples_number):
old_samples_number = initial_samples.shape[0]
new_shape = (new_samples_number, n)
new_samples = np.zeros(shape=new_shape)
for i in range(new_samples_number):
indices = np.random.choice(old_samples_number, n)
new_samples[i] = initial_samples.take(indices)
return new_samples
# @jit(nopython=True, cache=True)
def time_correlation(snapshot1, snapshot2):
"""
Compute the time correlation between two snapshots (averaging on each site)
"""
s1 = np.mean(snapshot1, axis=(0, 1, 2))
s2 = np.mean(snapshot2, axis=(0, 1, 2))
s1s2 = s1.dot(s2)
return np.mean(np.inner(snapshot1, snapshot2), axis=(0, 1, 2)) - s1s2
@jit(nopython=True, cache=True)
def translate_snapshot(snapshot, x, y, z):
nx, ny, nz, u = snapshot.shape
ret = np.zeros(shape=snapshot.shape)
for i, j, k in np.ndindex(nx, ny, nz):
ret[i, j, k] = snapshot[(i + x) % nx, (j + y) % ny, (k + z) % nz]
return ret
@jit(nopython=True, cache=True)
def product_over_last_axis(a, b):
nx, ny, nz, u = a.shape
ret = np.zeros(shape=(nx, ny, nz))
for i, j, k in np.ndindex(nx, ny, nz):
ret[i, j, k] = np.dot(a[i, j, k], b[i, j, k])
return ret
def spatial_correlation_matrix(snapshot):
nx, ny, nz, u = snapshot.shape
ret = np.zeros(shape=(nx, ny, nz))
s = np.mean(snapshot, axis=(0, 1, 2))
s1s2 = s.dot(s)
for i, j, k in np.ndindex(nx, ny, nz):
ts = translate_snapshot(snapshot, i, j, k)
ret[i, j, k] = np.mean(product_over_last_axis(snapshot, ts), axis=(0, 1, 2)) - s1s2
return ret
# @jit(nopython=True, cache=True)
def radial_distribution(correlation_matrix):
nx, ny, nz = correlation_matrix.shape
corr = np.zeros(shape=(nx * ny * nz, 2)) * np.NaN
l = 0
for i, j, k in np.ndindex(nx, ny, nz):
corr[l, 0] = np.sqrt(i ** 2 + j ** 2 + k ** 2)
corr[l, 1] = correlation_matrix[i, j, k]
l += 1
r = np.unique(corr[:, 0])
c = np.zeros(shape=r.shape)
for i in range(r.shape[0]):
c[i] = np.mean(corr[(corr[:] == r[i])[:, 0], 1])
return r, c
# ---------------------------------------------- PLOTTING --------------------------------------------------------------
def plot_state(snapshot, hls=False):
"""
Plot system state
"""
nx = snapshot.shape[0]
ny = snapshot.shape[1]
nz = snapshot.shape[2]
x, y, z = np.meshgrid(np.arange(0, nx),
np.arange(0, ny),
np.arange(0, nz))
u = np.zeros(shape=(nx, ny, nz))
v = np.zeros(shape=(nx, ny, nz))
w = np.zeros(shape=(nx, ny, nz))
h = np.zeros(shape=(nx, ny, nz))
l = np.zeros(shape=(nx, ny, nz))
for i, j, k in np.ndindex(nx, ny, nz):
u[i, j, k], v[i, j, k], w[i, j, k] = snapshot[i, j, k, 0], snapshot[i, j, k, 1], snapshot[i, j, k, 2]
theta, phi = xyz2sph(snapshot[i, j, k])
h[i, j, k] = phi / 2 / np.pi
l[i, j, k] = theta / np.pi
c = np.zeros(shape=(nx, ny, nz, 4))
if not hls:
c[:, :, :, 0] = u
c[:, :, :, 1] = v
c[:, :, :, 2] = w
else:
for i, j, k in np.ndindex(nx, ny, nz):
col = colorsys.hls_to_rgb(h[i, j, k], l[i, j, k], 1)
c[i, j, k, 0] = col[0]
c[i, j, k, 1] = col[1]
c[i, j, k, 2] = col[2]
c[:, :, :, 3] = np.ones(shape=(nx, ny, nz))
c = np.abs(c)
c2 = np.zeros(shape=(nx * ny * nz, 4))
p = 0
for i, j, k in np.ndindex((nx, ny, nz)):
c2[p] = c[i, j, k]
p += 1
c3 = np.concatenate((c2, np.repeat(c2, 2, axis=0)), axis=0)
fig = plt.figure()
ax: Axes3D = fig.gca(projection='3d')
ax.quiver(x, y, z, u, v, w, pivot='middle', color=c3)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
return fig, ax
def plot_state_2D(snapshot, hls=False):
"""
Plot system state
"""
nx = snapshot.shape[0]
ny = snapshot.shape[1]
nz = snapshot.shape[2]
x, y, z = np.meshgrid(np.arange(0, nx),
np.arange(0, ny),
np.arange(0, nz))
u = np.zeros(shape=(nx, ny, nz))
v = np.zeros(shape=(nx, ny, nz))
w = np.zeros(shape=(nx, ny, nz))
h = np.zeros(shape=(nx, ny, nz))
l = np.zeros(shape=(nx, ny, nz))
for i, j, k in np.ndindex(nx, ny, nz):
u[i, j, k], v[i, j, k], w[i, j, k] = snapshot[i, j, k, 0], snapshot[i, j, k, 1], snapshot[i, j, k, 2]
theta, phi = xyz2sph(snapshot[i, j, k])
h[i, j, k] = phi / 2 / np.pi
l[i, j, k] = theta / np.pi
c = np.zeros(shape=(nx, ny, nz, 4))
if not hls:
c[:, :, :, 0] = u
c[:, :, :, 1] = v
c[:, :, :, 2] = w
else:
for i, j, k in np.ndindex(nx, ny, nz):
col = colorsys.hls_to_rgb(h[i, j, k], l[i, j, k], 1)
c[i, j, k, 0] = col[0]
c[i, j, k, 1] = col[1]
c[i, j, k, 2] = col[2]
c[:, :, :, 3] = np.ones(shape=(nx, ny, nz))
c = np.abs(c)
c2 = np.zeros(shape=(nx * ny * nz, 4))
p = 0
for i, j, k in np.ndindex((nx, ny, nz)):
c2[p] = c[i, j, k]
p += 1
c3 = np.concatenate((c2, np.repeat(c2, 2, axis=0)), axis=0)
fig = plt.figure()
ax: Axes3D = fig.gca(projection='3d')
ax.quiver(x, y, z, u, v, w, pivot='middle', color=c3)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
ax.tick_params(
axis='z', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
labelbottom=False) # labels along the bottom edge are off
ax.set_zlim([-1.1, 1.1])
return fig, ax
def plot_spin_directions(snapshot, hls=False):
"""
Plot spherical representation of spins
"""
nx = snapshot.shape[0]
ny = snapshot.shape[1]
nz = snapshot.shape[2]
points = np.zeros(shape=(nx * ny * nz, 3))
n = 0
for i, j, k in np.ndindex(nx, ny, nz):
points[n] = snapshot[i, j, k]
n += 1
if not hls:
c = np.abs(points)
else:
c_arr = np.zeros((nx, ny, nz, 4))
for i, j, k in np.ndindex(nx, ny, nz):
theta, phi = xyz2sph(snapshot[i, j, k])
h = phi / 2 / np.pi
l = theta / np.pi
col = colorsys.hls_to_rgb(h, l, 1)
c_arr[i, j, k, 0] = col[0]
c_arr[i, j, k, 1] = col[1]
c_arr[i, j, k, 2] = col[2]
c_arr[:, :, :, 3] = np.ones(shape=(nx, ny, nz))
c_arr = np.abs(c_arr)
c = np.zeros(shape=(nx * ny * nz, 4))
p = 0
for i, j, k in np.ndindex((nx, ny, nz)):
c[p] = c_arr[i, j, k]
p += 1
fig = plt.figure()
ax: Axes3D = fig.gca(projection='3d')
ax.scatter(points[:, 0], points[:, 1], points[:, 2], c=c, s=2)
ax.set_xlim([-1.1, 1.1])
ax.set_ylim([-1.1, 1.1])
ax.set_zlim([-1.1, 1.1])
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
return fig, ax
|
skdys/thermalspin
|
src/set_simulation.py
|
#!/usr/bin/env python3
"""
Wrapper for the real program
"""
from thermalspin.set_simulation import set_simulation
set_simulation()
|
skdys/thermalspin
|
old_sph_version/dynamic_simulation.py
|
<gh_stars>1-10
#!/usr/bin/env python3
from thermalspin.dynamic_simulation import dynamic_simulation
dynamic_simulation()
|
skdys/thermalspin
|
src/thermalspin/set_simulation.py
|
#!/usr/bin/env python3
"""
Run multiple instance of Heisenberg
"""
import getopt
import os
import sys
import time
from multiprocessing import pool, cpu_count
import numpy as np
from skdylib.counter import Counter
from thermalspin.read_config import read_config_file
from thermalspin.simulation import init_simulation_tilted, init_simulation_aligned, init_simulation_random, \
run_simulation
DEFAULT_PARAMS, SIMULATIONS_DIRECTORY, PROCESSES_NUMBER = None, None, None
def init_set(setname, J, Hz, T, L, S_0=None, tilted=False):
simdir_list = []
params_list = []
L_list = []
set_directory = SIMULATIONS_DIRECTORY + setname + "/"
for i, j, k in np.ndindex((T.shape[0], L.shape[0], Hz.shape[0])):
T_str = "{0:.3f}".format(T[i])
Hz_str = "{0:.3f}".format(Hz[k])
simdir_list.append(set_directory + setname + f"_T{T_str}_L{L[j]}_H{Hz_str}/")
params = DEFAULT_PARAMS
params["param_T"] = [float(T[i])]
params["param_Hz"] = [float(Hz[k])]
params["param_J"] = [float(J[0])]
params_list.append(params.copy())
L_list.append(L[j].copy())
if tilted:
for i in range(len(simdir_list)):
init_simulation_tilted(simdir_list[i], nx=L_list[i], ny=L_list[i], nz=L_list[i], params=params_list[i])
elif S_0 is None:
for i in range(len(simdir_list)):
init_simulation_random(simdir_list[i], nx=L_list[i], ny=L_list[i], nz=L_list[i], params=params_list[i])
else:
for i in range(len(simdir_list)):
init_simulation_aligned(simdir_list[i], nx=L_list[i], ny=L_list[i], nz=L_list[i], params=params_list[i],
S_0=S_0)
def init_2D_set(setname, J, Hz, T, L, S_0=None):
simdir_list = []
params_list = []
L_list = []
set_directory = SIMULATIONS_DIRECTORY + setname + "/"
for i, j, k in np.ndindex((T.shape[0], L.shape[0], Hz.shape[0])):
T_str = "{0:.3f}".format(T[i])
Hz_str = "{0:.3f}".format(Hz[k])
simdir_list.append(set_directory + setname + f"_T{T_str}_L{L[j]}_H{Hz_str}/")
params = DEFAULT_PARAMS
params["param_T"] = [float(T[i])]
params["param_Hz"] = [float(Hz[k])]
params["param_J"] = [float(J[0])]
params_list.append(params.copy())
L_list.append(L[j].copy())
if S_0 is None:
for i in range(len(simdir_list)):
init_simulation_random(simdir_list[i], nx=L_list[i], ny=L_list[i], nz=1, params=params_list[i])
else:
for i in range(len(simdir_list)):
init_simulation_aligned(simdir_list[i], nx=L_list[i], ny=L_list[i], nz=1, params=params_list[i], S_0=S_0)
# Run
simulations_number = 0
completed_simulations_counter = Counter()
def run_simulation_wrapper(simdir):
run_simulation(simdir, verbose=False)
completed_simulations_counter.increment()
completed_simulations_number = completed_simulations_counter.value()
print(f"Completed simulations {completed_simulations_number}/{simulations_number}")
def run_set(set_name):
path = SIMULATIONS_DIRECTORY + set_name + "/"
file_list = [f for f in os.listdir(path) if not f.startswith('.')]
file_list.sort()
simdir_list = []
for filename in file_list:
simdir_list.append(SIMULATIONS_DIRECTORY + set_name + "/" + filename + "/")
global simulations_number
simulations_number = len(simdir_list)
if PROCESSES_NUMBER <= 0:
processes_number = cpu_count() + PROCESSES_NUMBER
else:
processes_number = PROCESSES_NUMBER
processes_pool = pool.Pool(processes=processes_number)
processes_pool.map(run_simulation_wrapper, simdir_list)
def usage():
print("""
Usage: set_simulation.py [OPTIONS] [PARAMETERS]\n
-i, --init=SETNAME Initialize a set of simulations, need to specify next a dimension and a magnetization
-r, --run=SETNAME Run a set of simulations named SETNAME
-L Specify the side dimension of the lattices size like 6,8,10
-m, --magnetization=DIRECTION Initial magnetization along DIRECTION specified like 0,0
-T, --temperature=TEMP Specify the range of temperature with TEMP like T_initial,T_final,dT e.g 0.5,3.5,1
-H Specify the range of external field like H_initial,H_final, dH
--tilted Tilted initial condition
-h, --help Shows this message
""")
def set_simulation():
global DEFAULT_PARAMS, SIMULATIONS_DIRECTORY, PROCESSES_NUMBER
DEFAULT_PARAMS, SIMULATIONS_DIRECTORY, PROCESSES_NUMBER = read_config_file()
mode = None
setname = None
L = []
J = DEFAULT_PARAMS["param_J"]
T = np.array(DEFAULT_PARAMS["param_T"])
Hz = np.array(DEFAULT_PARAMS["param_Hz"])
sim_2D = False
theta_0, phi_0 = (None, None)
Ti, Tf, dt = (None, None, None)
tilted = False
try:
opts, args = getopt.getopt(sys.argv[1:], "hr:i:L:m:T:J:H:",
["help", "initialize=", "run=", "2D", "temperatures=", 'tilted'])
except getopt.GetoptError:
usage()
sys.exit(2)
for opt, arg in opts:
if opt in ("-h", "--help"):
usage()
sys.exit()
elif opt in ("-r", "--run"):
mode = "run"
setname = arg
elif opt in ("-i", "--initialize"):
mode = "init"
setname = arg
elif opt in ("-L"):
for dim in arg.split(","):
L.append(int(dim))
L = np.array(L)
elif opt in ("-m", "--magnetization"):
theta_0, phi_0 = arg.split(",")
elif opt in ("-T", "--temperature"):
if len(arg.split(",")) == 3:
Ti, Tf, dT = arg.split(",")
Ti = float(Ti)
Tf = float(Tf)
dT = float(dT)
T = np.arange(Ti, Tf, dT)
elif len(arg.split(",")) == 1:
T = np.array([float(arg)])
elif opt in ("-H"):
if len(arg.split(",")) == 3:
Hi, Hf, dH = arg.split(",")
Hi = float(Hi)
Hf = float(Hf)
dH = float(dH)
Hz = np.arange(Hi, Hf, dH)
elif len(arg.split(",")) == 1:
Hz = np.array([float(arg)])
elif opt in "-J":
J = np.array([float(arg)])
elif opt in "--2D":
sim_2D = True
elif opt in "--tilted":
tilted = True
if mode == "run":
print(f"Running simulations in set {setname}")
starting_time = time.time()
run_set(setname)
total_time = time.time() - starting_time
total_time_str = "{0:.2f}".format(total_time)
print(f"Total running time: {total_time_str} s")
elif mode == "init":
if sim_2D:
init_2D_set(setname, J, Hz, T, L)
elif theta_0 is None:
init_set(setname, J, Hz, T, L, tilted=tilted)
else:
init_set(setname, J, Hz, T, L, theta_0, phi_0, tilted=tilted)
else:
usage()
sys.exit(2)
print("Finished")
|
skdys/thermalspin
|
src/kent_distribution/kent_distribution.py
|
<filename>src/kent_distribution/kent_distribution.py
#!/usr/bin/env python
"""
The algorithms here are partially based on methods described in:
[The Fisher-Bingham Distribution on the Sphere, <NAME>
Journal of the Royal Statistical Society. Series B (Methodological)
Vol. 44, No. 1 (1982), pp. 71-80 Published by: Wiley
Article Stable URL: http://www.jstor.org/stable/2984712]
The example code in kent_example.py serves not only as an example
but also as a test. It performs some higher level tests but it also
generates example plots if called directly from the shell.
"""
import warnings
from numpy import *
from scipy.linalg import eig
from scipy.optimize import fmin_bfgs
from scipy.special import gamma as gamma_fun
from scipy.special import iv as modified_bessel_2ndkind
from scipy.special import ivp as modified_bessel_2ndkind_derivative
# to avoid confusion with the norm of a vector we give the normal distribution a less confusing name here
from scipy.stats import norm as gauss
from scipy.stats import uniform
# helper function
def MMul(A, B):
return inner(A, transpose(B))
# helper function to compute the L2 norm. scipy.linalg.norm is not used because this function does not allow to choose an axis
def norm(x, axis=None):
if isinstance(x, list) or isinstance(x, tuple):
x = array(x)
return sqrt(sum(x * x, axis=axis))
def kent(theta, phi, psi, kappa, beta):
"""
Generates the Kent distribution based on the spherical coordinates theta, phi, psi
with the concentration parameter kappa and the ovalness beta
"""
gamma1, gamma2, gamma3 = KentDistribution.spherical_coordinates_to_gammas(theta, phi, psi)
k = KentDistribution(gamma1, gamma2, gamma3, kappa, beta)
return k
def kent2(gamma1, gamma2, gamma3, kappa, beta):
"""
Generates the Kent distribution using the orthonormal vectors gamma1,
gamma2 and gamma3, with the concentration parameter kappa and the ovalness beta
"""
assert abs(inner(gamma1, gamma2)) < 1E-10
assert abs(inner(gamma2, gamma3)) < 1E-10
assert abs(inner(gamma3, gamma1)) < 1E-10
return KentDistribution(gamma1, gamma2, gamma3, kappa, beta)
def kent3(A, B):
"""
Generates the Kent distribution using the orthogonal vectors A and B
where A = gamma1*kappa and B = gamma2*beta (gamma3 is inferred)
A may have not have length zero but may be arbitrarily close to zero
B may have length zero however. If so, then an arbitrary value for gamma2
(orthogonal to gamma1) is chosen
"""
kappa = norm(A)
beta = norm(B)
gamma1 = A / kappa
if beta == 0.0:
gamma2 = __generate_arbitrary_orthogonal_unit_vector(gamma1)
else:
gamma2 = B / beta
theta, phi, psi = KentDistribution.gammas_to_spherical_coordinates(gamma1, gamma2)
gamma1, gamma2, gamma3 = KentDistribution.spherical_coordinates_to_gammas(theta, phi, psi)
return KentDistribution(gamma1, gamma2, gamma3, kappa, beta)
def kent4(Gamma, kappa, beta):
"""
Generates the kent distribution
"""
gamma1 = Gamma[:, 0]
gamma2 = Gamma[:, 1]
gamma3 = Gamma[:, 2]
return kent2(gamma1, gamma2, gamma3, kappa, beta)
def __generate_arbitrary_orthogonal_unit_vector(x):
v1 = cross(x, array([1.0, 0.0, 0.0]))
v2 = cross(x, array([0.0, 1.0, 0.0]))
v3 = cross(x, array([0.0, 0.0, 1.0]))
v1n = norm(v1)
v2n = norm(v2)
v3n = norm(v3)
v = [v1, v2, v3][argmax([v1n, v2n, v3n])]
return v / norm(v)
class KentDistribution(object):
minimum_value_for_kappa = 1E-6
@staticmethod
def create_matrix_H(theta, phi):
return array([
[cos(theta), -sin(theta), 0.0],
[sin(theta) * cos(phi), cos(theta) * cos(phi), -sin(phi)],
[sin(theta) * sin(phi), cos(theta) * sin(phi), cos(phi)]
])
@staticmethod
def create_matrix_Ht(theta, phi):
return transpose(KentDistribution.create_matrix_H(theta, phi))
@staticmethod
def create_matrix_K(psi):
return array([
[1.0, 0.0, 0.0],
[0.0, cos(psi), -sin(psi)],
[0.0, sin(psi), cos(psi)]
])
@staticmethod
def create_matrix_Kt(psi):
return transpose(KentDistribution.create_matrix_K(psi))
@staticmethod
def create_matrix_Gamma(theta, phi, psi):
H = KentDistribution.create_matrix_H(theta, phi)
K = KentDistribution.create_matrix_K(psi)
return MMul(H, K)
@staticmethod
def create_matrix_Gammat(theta, phi, psi):
return transpose(KentDistribution.create_matrix_Gamma(theta, phi, psi))
@staticmethod
def spherical_coordinates_to_gammas(theta, phi, psi):
Gamma = KentDistribution.create_matrix_Gamma(theta, phi, psi)
gamma1 = Gamma[:, 0]
gamma2 = Gamma[:, 1]
gamma3 = Gamma[:, 2]
return gamma1, gamma2, gamma3
@staticmethod
def gamma1_to_spherical_coordinates(gamma1):
theta = arccos(gamma1[0])
phi = arctan2(gamma1[2], gamma1[1])
return theta, phi
@staticmethod
def gammas_to_spherical_coordinates(gamma1, gamma2):
theta, phi = KentDistribution.gamma1_to_spherical_coordinates(gamma1)
Ht = KentDistribution.create_matrix_Ht(theta, phi)
u = MMul(Ht, reshape(gamma2, (3, 1)))
psi = arctan2(u[2][0], u[1][0])
return theta, phi, psi
def __init__(self, gamma1, gamma2, gamma3, kappa, beta):
self.gamma1 = array(gamma1, dtype=float64)
self.gamma2 = array(gamma2, dtype=float64)
self.gamma3 = array(gamma3, dtype=float64)
self.kappa = float(kappa)
self.beta = float(beta)
self.theta, self.phi, self.psi = KentDistribution.gammas_to_spherical_coordinates(self.gamma1, self.gamma2)
for gamma in gamma1, gamma2, gamma3:
assert len(gamma) == 3
self._cached_rvs = array([], dtype=float64)
self._cached_rvs.shape = (0, 3)
@property
def Gamma(self):
return self.create_matrix_Gamma(self.theta, self.phi, self.psi)
def normalize(self, cache=dict(), return_num_iterations=False):
"""
Returns the normalization constant of the Kent distribution.
The proportional error may be expected not to be greater than
1E-11.
>>> gamma1 = array([1.0, 0.0, 0.0])
>>> gamma2 = array([0.0, 1.0, 0.0])
>>> gamma3 = array([0.0, 0.0, 1.0])
>>> tiny = KentDistribution.minimum_value_for_kappa
>>> abs(kent2(gamma1, gamma2, gamma3, tiny, 0.0).normalize() - 4*pi) < 4*pi*1E-12
True
>>> for kappa in [0.01, 0.1, 0.2, 0.5, 2, 4, 8, 16]:
... print abs(kent2(gamma1, gamma2, gamma3, kappa, 0.0).normalize() - 4*pi*sinh(kappa)/kappa) < 1E-15*4*pi*sinh(kappa)/kappa,
...
True True True True True True True True
"""
k, b = self.kappa, self.beta
if not (k, b) in cache:
G = gamma_fun
I = modified_bessel_2ndkind
result = 0.0
j = 0
if b == 0.0:
result = (
((0.5 * k) ** (-2 * j - 0.5)) *
(I(2 * j + 0.5, k))
)
result /= G(j + 1)
result *= G(j + 0.5)
else:
while True:
a = (
exp(
log(b) * 2 * j +
log(0.5 * k) * (-2 * j - 0.5)
) * I(2 * j + 0.5, k)
)
a /= G(j + 1)
a *= G(j + 0.5)
result += a
j += 1
if abs(a) < abs(result) * 1E-12 and j > 5:
break
cache[k, b] = 2 * pi * result
if return_num_iterations:
return cache[k, b], j
else:
return cache[k, b]
def log_normalize(self, return_num_iterations=False):
"""
Returns the logarithm of the normalization constant.
"""
if return_num_iterations:
normalize, num_iter = self.normalize(return_num_iterations=True)
return log(normalize), num_iter
else:
return log(self.normalize())
def pdf_max(self, normalize=True):
return exp(self.log_pdf_max(normalize))
def log_pdf_max(self, normalize=True):
"""
Returns the maximum value of the log(pdf)
"""
if self.beta == 0.0:
x = 1
else:
x = self.kappa * 1.0 / (2 * self.beta)
if x > 1.0:
x = 1
fmax = self.kappa * x + self.beta * (1 - x ** 2)
if normalize:
return fmax - self.log_normalize()
else:
return fmax
def pdf(self, xs, normalize=True):
"""
Returns the pdf of the kent distribution for 3D vectors that
are stored in xs which must be an array of N x 3 or N x M x 3
N x M x P x 3 etc.
The code below shows how points in the pdf can be evaluated. An integral is
calculated using random points on the sphere to determine wether the pdf is
properly normalized.
>>> from numpy.random import seed
>>> from scipy.stats import norm as gauss
>>> seed(666)
>>> num_samples = 400000
>>> xs = gauss(0, 1).rvs((num_samples, 3))
>>> xs = divide(xs, reshape(norm(xs, 1), (num_samples, 1)))
>>> assert abs(4*pi*average(kent(1.0, 1.0, 1.0, 4.0, 2.0).pdf(xs)) - 1.0) < 0.01
>>> assert abs(4*pi*average(kent(1.0, 2.0, 3.0, 4.0, 2.0).pdf(xs)) - 1.0) < 0.01
>>> assert abs(4*pi*average(kent(1.0, 2.0, 3.0, 4.0, 8.0).pdf(xs)) - 1.0) < 0.01
>>> assert abs(4*pi*average(kent(1.0, 2.0, 3.0, 16.0, 8.0).pdf(xs)) - 1.0) < 0.01
"""
return exp(self.log_pdf(xs, normalize))
def log_pdf(self, xs, normalize=True):
"""
Returns the log(pdf) of the kent distribution.
"""
axis = len(shape(xs)) - 1
g1x = sum(self.gamma1 * xs, axis)
g2x = sum(self.gamma2 * xs, axis)
g3x = sum(self.gamma3 * xs, axis)
k, b = self.kappa, self.beta
f = k * g1x + b * (g2x ** 2 - g3x ** 2)
if normalize:
return f - self.log_normalize()
else:
return f
def pdf_prime(self, xs, normalize=True):
"""
Returns the derivative of the pdf with respect to kappa and beta.
"""
return self.pdf(xs, normalize) * self.log_pdf_prime(xs, normalize)
def log_pdf_prime(self, xs, normalize=True):
"""
Returns the derivative of the log(pdf) with respect to kappa and beta.
"""
axis = len(shape(xs)) - 1
g1x = sum(self.gamma1 * xs, axis)
g2x = sum(self.gamma2 * xs, axis)
g3x = sum(self.gamma3 * xs, axis)
k, b = self.kappa, self.beta
dfdk = g1x
dfdb = g2x ** 2 - g3x ** 2
df = array([dfdk, dfdb])
if normalize:
return transpose(transpose(df) - self.log_normalize_prime())
else:
return df
def normalize_prime(self, cache=dict(), return_num_iterations=False):
"""
Returns the derivative of the normalization factor with respect to kappa and beta.
"""
k, b = self.kappa, self.beta
if not (k, b) in cache:
G = gamma_fun
I = modified_bessel_2ndkind
dIdk = lambda v, z: modified_bessel_2ndkind_derivative(v, z, 1)
dcdk, dcdb = 0.0, 0.0
j = 0
if b == 0:
dcdk = (
(G(j + 0.5) / G(j + 1)) *
((-0.5 * j - 0.125) * (k) ** (-2 * j - 1.5)) *
(I(2 * j + 0.5, k))
)
dcdk += (
(G(j + 0.5) / G(j + 1)) *
((0.5 * k) ** (-2 * j - 0.5)) *
(dIdk(2 * j + 0.5, k))
)
dcdb = 0.0
else:
while True:
dk = (
(-1 * j - 0.25) * exp(
log(b) * 2 * j +
log(0.5 * k) * (-2 * j - 1.5)
) * I(2 * j + 0.5, k)
)
dk += (
exp(
log(b) * 2 * j +
log(0.5 * k) * (-2 * j - 0.5)
) * dIdk(2 * j + 0.5, k)
)
dk /= G(j + 1)
dk *= G(j + 0.5)
db = (
2 * j * exp(
log(b) * (2 * j - 1) +
log(0.5 * k) * (-2 * j - 0.5)
) * I(2 * j + 0.5, k)
)
db /= G(j + 1)
db *= G(j + 0.5)
dcdk += dk
dcdb += db
j += 1
if abs(dk) < abs(dcdk) * 1E-12 and abs(db) < abs(dcdb) * 1E-12 and j > 5:
break
# print "dc", dcdk, dcdb, "(", k, b
cache[k, b] = 2 * pi * array([dcdk, dcdb])
if return_num_iterations:
return cache[k, b], j
else:
return cache[k, b]
def log_normalize_prime(self, return_num_iterations=False):
"""
Returns the derivative of the logarithm of the normalization factor.
"""
if return_num_iterations:
normalize_prime, num_iter = self.normalize_prime(return_num_iterations=True)
return normalize_prime / self.normalize(), num_iter
else:
return self.normalize_prime() / self.normalize()
def log_likelihood(self, xs):
"""
Returns the log likelihood for xs.
"""
retval = self.log_pdf(xs)
return sum(retval, len(shape(retval)) - 1)
def log_likelihood_prime(self, xs):
"""
Returns the derivative with respect to kappa and beta of the log likelihood for xs.
"""
retval = self.log_pdf_prime(xs)
if len(shape(retval)) == 1:
return retval
else:
return sum(retval, len(shape(retval)) - 1)
def _rvs_helper(self):
num_samples = 10000
xs = gauss(0, 1).rvs((num_samples, 3))
xs = divide(xs, reshape(norm(xs, 1), (num_samples, 1)))
pvalues = self.pdf(xs, normalize=False)
fmax = self.pdf_max(normalize=False)
return xs[uniform(0, fmax).rvs(num_samples) < pvalues]
def rvs(self, n_samples=None):
"""
Returns random samples from the Kent distribution by rejection sampling.
May become inefficient for large kappas.
The returned random samples are 3D unit vectors.
If n_samples == None then a single sample x is returned with shape (3,)
If n_samples is an integer value N then N samples are returned in an array with shape (N, 3)
"""
num_samples = 1 if n_samples == None else n_samples
rvs = self._cached_rvs
while len(rvs) < num_samples:
new_rvs = self._rvs_helper()
rvs = concatenate([rvs, new_rvs])
if n_samples == None:
self._cached_rvs = rvs[1:]
return rvs[0]
else:
self._cached_rvs = rvs[num_samples:]
retval = rvs[:num_samples]
return retval
def __repr__(self):
return "kent(%s, %s, %s, %s, %s)" % (self.theta, self.phi, self.psi, self.kappa, self.beta)
def kent_me(xs):
"""Generates and returns a KentDistribution based on a moment estimation."""
lenxs = len(xs)
xbar = average(xs, 0) # average direction of samples from origin
S = average(xs.reshape((lenxs, 3, 1)) * xs.reshape((lenxs, 1, 3)),
0) # dispersion (or covariance) matrix around origin
gamma1 = xbar / norm(xbar) # has unit length and is in the same direction and parallel to xbar
theta, phi = KentDistribution.gamma1_to_spherical_coordinates(gamma1)
H = KentDistribution.create_matrix_H(theta, phi)
Ht = KentDistribution.create_matrix_Ht(theta, phi)
B = MMul(Ht, MMul(S, H))
eigvals, eigvects = eig(B[1:, 1:])
eigvals = real(eigvals)
if eigvals[0] < eigvals[1]:
eigvals[0], eigvals[1] = eigvals[1], eigvals[0]
eigvects = eigvects[:, ::-1]
K = diag([1.0, 1.0, 1.0])
K[1:, 1:] = eigvects
G = MMul(H, K)
Gt = transpose(G)
T = MMul(Gt, MMul(S, G))
r1 = norm(xbar)
t22, t33 = T[1, 1], T[2, 2]
r2 = t22 - t33
# kappa and beta can be estimated but may not lie outside their permitted ranges
min_kappa = KentDistribution.minimum_value_for_kappa
kappa = max(min_kappa, 1.0 / (2.0 - 2.0 * r1 - r2) + 1.0 / (2.0 - 2.0 * r1 + r2))
beta = 0.5 * (1.0 / (2.0 - 2.0 * r1 - r2) - 1.0 / (2.0 - 2.0 * r1 + r2))
return kent4(G, kappa, beta)
def __kent_mle_output1(k_me, callback):
print("******** Maximum Likelihood Estimation ********"
"Initial moment estimates are:"
f"theta = {k_me.theta} "
f"phi = {k_me.phi}"
f"psi = {k_me.psi}"
f"kappa = {k_me.kappa}"
f"beta = {k_me.beta}"
"******** Starting the Gradient Descent ********"
"[iteration] kappa beta -L"
)
def __kent_mle_output2(x, minusL, output_count, verbose):
interval = verbose if isinstance(verbose, int) else 1
str_values = list()
for value in (tuple(x) + (minusL,)):
str_value = "%- 8g" % value
while len(str_value) < 12:
str_value += " "
str_values.append(str_value)
if output_count[0] % interval == 0:
print("[%3i] " + " %s" * 3) % tuple(output_count + str_values)
output_count[0] = output_count[0] + 1
def kent_mle(xs, verbose=False, return_intermediate_values=False, return_bfgs_values=False, bfgs_kwargs=dict(),
warning='warn'):
"""
Generates a KentDistribution fitted to xs using maximum likelihood estimation
For a first approximation kent_me() is used. The function
-k.log_likelihood(xs)/len(xs) (where k is an instance of KentDistribution) is
minimized.
Input:
- xs: values on the sphere to be fitted by MLE
- verbose: if True, output is given for every step
- return_intermediate_values: if true the values of all intermediate steps
are returned as well
- return_bfgs_values: if true the values from the bfgs_min algorithm are
returned as well
- bfgs_args: extra arguments that can be passed to min_bfgs: not all arguments may
be overwritten. Default value of 'disp' is 0 but may be set to 1 'full_output'
'gtol' is chosen to be 1E-7 but may be set to other values.
is 1 (can't be overwritten), 'callback' can't be overwritten and the first
three arguments of min_bfgs can't be overwritten.
- warning: choices are
- "warn": issues any warning via warning.warn
- a file object: which results in any warning message being written to a file
(e.g. stdout)
- "none": or any other value for this argument results in no warnings to be issued
Output:
- an instance of the fitted KentDistribution
Extra output:
- if return_intermediate_values and/or return_bfgs_values is specified then
a tuple is returned with the KentDistribution argument as the first element
and containing the extra requested values in the rest of the elements.
"""
# first get estimated moments
if 'disp' not in bfgs_kwargs:
bfgs_kwargs['disp'] = 0
if 'gtol' not in bfgs_kwargs:
bfgs_kwargs['gtol'] = 1E-7
k_me = kent_me(xs)
gamma1, gamma2, gamma3, kappa, beta = k_me.gamma1, k_me.gamma2, k_me.gamma3, k_me.kappa, k_me.beta
min_kappa = KentDistribution.minimum_value_for_kappa
# method that generates an instance of KentDistribution
def generate_k(fudge_kappa, fudge_beta):
# small value is added to kappa = min_kappa + abs(fudge_kappa) > min_kappa
return kent2(gamma1, gamma2, gamma3, min_kappa + abs(fudge_kappa), abs(fudge_beta))
# method that generates the minus L to be minimized
def minus_log_likelihood(x):
return -generate_k(*x).log_likelihood(xs) / len(xs)
def minus_log_likelihood_prime(x):
return -generate_k(*x).log_likelihood_prime(xs) / len(xs)
# callback for keeping track of the values
intermediate_values = list()
def callback(x, output_count=[0]):
minusL = -generate_k(*x).log_likelihood(xs)
fudge_kappa, fudge_beta = x
kappa, beta = min_kappa + abs(fudge_kappa), abs(fudge_beta)
imv = intermediate_values
imv.append((kappa, beta, minusL))
# starting parameters (small value is subtracted from kappa and add in generatke k)
x_start = array([kappa - min_kappa, beta])
if verbose:
__kent_mle_output1(k_me, callback)
# here the mle is done
all_values = fmin_bfgs(minus_log_likelihood, x_start, minus_log_likelihood_prime,
callback=callback, full_output=1, **bfgs_kwargs)
x_opt = all_values[0]
warnflag = all_values[6]
if warnflag:
warning_message = "Unknownw warning %s" % warnflag
if warnflag == 2:
warning_message = "Desired error not necessarily achieved due to precision loss."
if warnflag == 1:
warning_message = "Maximum number of iterations has been exceeded."
if warning == "warn":
warnings.warn(warning_message, RuntimeWarning)
if hasattr(warning, "write"):
warning.write("Warning: " + warning_message + "\n")
k = (generate_k(*x_opt),)
if return_intermediate_values:
k += (intermediate_values,)
if return_bfgs_values:
k += (all_values,)
if len(k) == 1:
k = k[0]
return k
if __name__ == "__main__":
__doc__ += """
>>> from kent_example import test_example_normalization, test_example_mle, test_example_mle2
>>> from numpy.random import seed
>>> test_example_normalization(gridsize=10)
Calculating the matrix M_ij of values that can be calculated: kappa=100.0*i+1, beta=100.0+j*1
Calculating normalization factor for combinations of kappa and beta:
Iterations necessary to calculate normalize(kappa, beta):
8 x x x x x x x x x
6 81 150 172 172 x x x x x
6 51 134 172 172 172 172 172 172 x
6 27 108 172 172 172 172 172 172 x
6 19 71 159 172 172 172 172 x x
6 15 42 126 172 172 172 x x x
6 13 29 86 172 172 x x x x
6 12 23 55 141 x x x x x
x x x x x x x x x x
x x x x x x x x x x
Iterations necessary to calculate the gradient of normalize(kappa, beta):
9 x x x x x x x x x
6 82 150 172 172 x x x x x
6 54 135 172 172 172 172 172 172 x
6 31 109 172 172 172 172 172 172 x
6 22 75 160 172 172 172 172 x x
6 18 47 128 172 172 172 x x x
6 15 34 91 172 172 x x x x
6 14 27 61 143 x x x x x
x x x x x x x x x x
x x x x x x x x x x
>>> test_example_mle()
Original Distribution: k = kent(0.0, 0.0, 0.0, 1.0, 0.0)
Drawing 10000 samples from k
Moment estimation: k_me = kent(0.023855003559, 0.214069389101, -1.3132066017, 1.46267336487, 0.00126428116096)
Fitted with MLE: k_mle = kent(0.023855003559, 0.214069389101, -1.3132066017, 1.01193032035, 0.0097247745197)
Original Distribution: k = kent(0.75, 2.39159265359, 2.39159265359, 20.0, 0.0)
Drawing 10000 samples from k
Moment estimation: k_me = kent(0.749919453671, 2.38852872459, -1.88413666083, 19.9741138811, 0.084525921375)
Fitted with MLE: k_mle = kent(0.749919453671, 2.38852872459, -1.88413666083, 19.974220064, 0.0985914019577)
Original Distribution: k = kent(0.785398163397, 2.35619449019, -2.82743338823, 20.0, 2.0)
Drawing 10000 samples from k
Moment estimation: k_me = kent(0.786399024085, 2.3579915613, 0.279826232127, 19.9901236404, 1.74323732912)
Fitted with MLE: k_mle = kent(0.786399024085, 2.3579915613, 0.279826232127, 20.0399896422, 2.05207823993)
Original Distribution: k = kent(0.785398163397, 2.35619449019, -2.94524311274, 20.0, 5.0)
Drawing 10000 samples from k
Moment estimation: k_me = kent(0.781987502626, 2.35443398261, 0.198219177812, 19.3583928323, 3.99437892323)
Fitted with MLE: k_mle = kent(0.781987502626, 2.35443398261, 0.198219177812, 19.8021238599, 5.0054799622)
Original Distribution: k = kent(1.09955742876, 2.35619449019, -3.04341788317, 50.0, 25.0)
Drawing 10000 samples from k
Moment estimation: k_me = kent(1.09977161299, 2.35530110258, 0.100204406833, 37.2827008959, 14.8149016955)
Fitted with MLE: k_mle = kent(1.09977161299, 2.35530110258, 0.100204406833, 50.5020652363, 25.3106110745)
Original Distribution: k = kent(0.0, 0.0, 0.0981747704247, 50.0, 25.0)
Drawing 10000 samples from k
Moment estimation: k_me = kent(0.00316353001387, -2.71047863482, -0.332583051616, 37.0898399888, 14.7385957253)
Fitted with MLE: k_mle = kent(0.00316353001387, -2.71047863482, -0.332583051616, 50.3265157358, 25.2489782311)
>>> seed(2323)
>>> assert test_example_mle2(300)
Testing various combinations of kappa and beta for 300 samples.
MSE of MLE is higher than 0.7 times the moment estimate for beta/kappa <= 0.2
MSE of MLE is higher than moment estimate for beta/kappa >= 0.3
MSE of MLE is five times higher than moment estimates for beta/kappa >= 0.5
A test to ensure that the vectors gamma1 ... gamma3 are orthonormal
>>> for k in [
... kent(0.0, 0.0, 0.0, 20.0, 0.0),
... kent(-0.25*pi, -0.25*pi, 0.0, 20.0, 0.0),
... kent(-0.25*pi, -0.25*pi, 0.0, 20.0, 5.0),
... kent(0.0, 0.0, 0.5*pi, 10.0, 7.0),
... kent(0.0, 0.0, 0.5*pi, 0.1, 0.0),
... kent(0.0, 0.0, 0.5*pi, 0.1, 0.1),
... kent(0.0, 0.0, 0.5*pi, 0.1, 8.0),
... ]:
... assert(abs(sum(k.gamma1 * k.gamma2)) < 1E-14)
... assert(abs(sum(k.gamma1 * k.gamma3)) < 1E-14)
... assert(abs(sum(k.gamma3 * k.gamma2)) < 1E-14)
... assert(abs(sum(k.gamma1 * k.gamma1) - 1.0) < 1E-14)
... assert(abs(sum(k.gamma2 * k.gamma2) - 1.0) < 1E-14)
... assert(abs(sum(k.gamma3 * k.gamma3) - 1.0) < 1E-14)
A test to ensure that the pdf() and the pdf_max() are calculated
correctly.
>>> from numpy.random import seed
>>> from scipy.stats import norm as gauss
>>> seed(666)
>>> for k, pdf_value in [
... (kent(0.0, 0.0, 0.0, 20.0, 0.0), 3.18309886184),
... (kent(-0.25*pi, -0.25*pi, 0.0, 20.0, 0.0), 0.00909519370),
... (kent(-0.25*pi, -0.25*pi, 0.0, 20.0, 5.0), 0.09865564569),
... (kent(0.0, 0.0, 0.5*pi, 10.0, 7.0), 0.59668931662),
... (kent(0.0, 0.0, 0.5*pi, 0.1, 0.0), 0.08780030026),
... (kent(0.0, 0.0, 0.5*pi, 0.1, 0.1), 0.08768344462),
... (kent(0.0, 0.0, 0.5*pi, 0.1, 8.0), 0.00063128997),
... ]:
... assert abs(k.pdf(array([1.0, 0.0, 0.0])) - pdf_value) < 1E-8
... assert abs(k.log_pdf(array([1.0, 0.0, 0.0])) - log(pdf_value)) < 1E-8
... num_samples = 100000
... xs = gauss(0, 1).rvs((num_samples, 3))
... xs = divide(xs, reshape(norm(xs, 1), (num_samples, 1)))
... values = k.pdf(xs, normalize=False)
... fmax = k.pdf_max(normalize=False)
... assert all(values <= fmax)
... assert any(values > fmax*0.999)
... values = k.pdf(xs)
... fmax = k.pdf_max()
... assert all(values <= fmax)
... assert any(values > fmax*0.999)
These are tests to ensure that the coordinate transformations are done correctly
that the functions that generate instances of KentDistribution are consistent and
that the derivatives are calculated correctly. In addition some more orthogonality
testing is done.
>>> from kent_distribution import *
>>> from numpy.random import seed
>>> from scipy.stats import uniform
>>> def test_orth(k):
... # a bit more orthonormality testing for good measure
... assert(abs(sum(k.gamma1 * k.gamma2)) < 1E-14)
... assert(abs(sum(k.gamma1 * k.gamma3)) < 1E-14)
... assert(abs(sum(k.gamma3 * k.gamma2)) < 1E-14)
... assert(abs(sum(k.gamma1 * k.gamma1) - 1.0) < 1E-14)
... assert(abs(sum(k.gamma2 * k.gamma2) - 1.0) < 1E-14)
... assert(abs(sum(k.gamma3 * k.gamma3) - 1.0) < 1E-14)
...
>>> # generating some specific boundary values and some random values
>>> seed(666)
>>> upi, u2pi = uniform(0, pi), uniform(-pi, 2*pi)
>>> thetas, phis, psis = list(upi.rvs(925)), list(u2pi.rvs(925)), list(u2pi.rvs(925))
>>> for a in (0.0, 0.5*pi, pi):
... for b in (-pi, -0.5*pi, 0, 0.5*pi, pi):
... for c in (-pi, -0.5*pi, 0, 0.5*pi, pi):
... thetas.append(a)
... phis.append(b)
... psis.append(c)
...
>>> # testing consintency of angles (specifically kent())
>>> for theta, phi, psi in zip(thetas, phis, psis):
... k = kent(theta, phi, psi, 1.0, 1.0)
... assert abs(theta - k.theta) < 1E-12
... a = abs(phi - k.phi)
... b = abs(psi - k.psi)
... if theta != 0 and theta != pi:
... assert a < 1E-12 or abs(a-2*pi) < 1E-12
... assert b < 1E-12 or abs(b-2*pi) < 1E-12
... test_orth(k)
...
>>> # testing consistency of gammas and consistency of back and forth
>>> # calculations between gammas and angles (specifically kent2(), kent3() and kent4())
>>> kappas = gauss(0, 2).rvs(1000)**2
>>> betas = gauss(0, 2).rvs(1000)**2
>>> for theta, phi, psi, kappa, beta in zip(thetas, phis, psis, kappas, betas):
... gamma1, gamma2, gamma3 = KentDistribution.spherical_coordinates_to_gammas(theta, phi, psi)
... theta, phi, psi = KentDistribution.gammas_to_spherical_coordinates(gamma1, gamma2)
... gamma1a, gamma2a, gamma3a = KentDistribution.spherical_coordinates_to_gammas(theta, phi, psi)
... assert all(abs(gamma1a-gamma1) < 1E-12)
... assert all(abs(gamma2a-gamma2) < 1E-12)
... assert all(abs(gamma3a-gamma3) < 1E-12)
... k2 = kent2(gamma1, gamma2, gamma3, kappa, beta)
... assert all(abs(gamma1 - k2.gamma1) < 1E-12)
... assert all(abs(gamma2 - k2.gamma2) < 1E-12)
... assert all(abs(gamma3 - k2.gamma3) < 1E-12)
... A = gamma1*kappa
... B = gamma2*beta
... k3 = kent3(A, B)
... assert all(abs(gamma1 - k3.gamma1) < 1E-12)
... assert all(abs(gamma2 - k3.gamma2) < 1E-12)
... assert all(abs(gamma3 - k3.gamma3) < 1E-12)
... test_orth(k)
... gamma = array([
... [gamma1[0], gamma2[0], gamma3[0]],
... [gamma1[1], gamma2[1], gamma3[1]],
... [gamma1[2], gamma2[2], gamma3[2]],
... ])
... k4 = kent4(gamma, kappa, beta)
... assert all(k2.gamma1 == k4.gamma1)
... assert all(k2.gamma2 == k4.gamma2)
... assert all(k2.gamma3 == k4.gamma3)
...
>>> # testing special case for B with zero length (kent3())
>>> for theta, phi, psi, kappa, beta in zip(thetas, phis, psis, kappas, betas):
... gamma1, gamma2, gamma3 = KentDistribution.spherical_coordinates_to_gammas(theta, phi, psi)
... A = gamma1*kappa
... B = gamma2*0.0
... k = kent3(A, B)
... assert all(abs(gamma1 - k.gamma1) < 1E-12)
... test_orth(k)
...
>>> # testing the derivatives
>>> for theta, phi, psi, kappa, beta in zip(thetas, phis, psis, kappas, betas):
... k0 = kent(theta, phi, psi, kappa, beta)
... eps = 1E-7
... kk = kent(theta, phi, psi, kappa+eps, beta)
... kb = kent(theta, phi, psi, kappa, beta+eps)
... num_samples = 101
... xs = gauss(0, 1).rvs((num_samples, 3))
... xs = divide(xs, reshape(norm(xs, 1), (num_samples, 1)))
... for ys in [xs[0], xs[1:], k0.rvs(), k0.rvs(100)]:
... f = False
... for name, f0, fk, fb, fprime in (
... ("k0.pdf(ys), ", k0.pdf(ys), kk.pdf(ys), kb.pdf(ys), k0.pdf_prime(ys) ),
... ("k0.pdf(ys, f), ", k0.pdf(ys, f), kk.pdf(ys, f), kb.pdf(ys, f), k0.pdf_prime(ys, f) ),
... ("k0.log_pdf(ys), ", k0.log_pdf(ys), kk.log_pdf(ys), kb.log_pdf(ys), k0.log_pdf_prime(ys) ),
... ("k0.log_pdf(ys, f), ", k0.log_pdf(ys, f), kk.log_pdf(ys, f), kb.log_pdf(ys, f), k0.log_pdf_prime(ys, f) ),
... ("k0.normalize(), ", k0.normalize(), kk.normalize(), kb.normalize(), k0.normalize_prime() ),
... ("k0.log_normalize(), ", k0.log_normalize(), kk.log_normalize(), kb.log_normalize(), k0.log_normalize_prime() ),
... ("k0.log_likelihood(ys), ", k0.log_likelihood(ys), kk.log_likelihood(ys), kb.log_likelihood(ys), k0.log_likelihood_prime(ys)),
... ):
... fprime_approx = array([(fk-f0)/eps, (fb-f0)/eps])
... assert all(abs(fprime_approx - fprime) < 1E-2*(abs(fprime_approx)+abs(fprime)))
... assert sum(abs(fprime_approx - fprime) > 1E-4*(abs(fprime_approx)+abs(fprime))) < 5
...
"""
import doctest
doctest.testmod(optionflags=doctest.ELLIPSIS)
|
skdys/thermalspin
|
src/dynamic_simulation.py
|
<reponame>skdys/thermalspin
#!/usr/bin/env python3
"""
Wrapper for the real program
"""
from thermalspin.dynamic_simulation import dynamic_simulation
dynamic_simulation()
|
skdys/thermalspin
|
old_sph_version/skdylib/spherical_coordinates.py
|
<reponame>skdys/thermalspin
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Mathematical utility functions
"""
import numpy as np
from numba import jit
@jit(nopython=True, cache=True)
def xyz2sph(v):
"""
Convert a unit vector from cartesian coordinates to spherical coordinates
:param v: unit vector
:return: [theta, phi] polar coordinates
"""
return np.array([np.arccos(v[2]), np.arctan2(v[1], v[0])])
@jit(nopython=True, cache=True)
def sph2xyz(theta, phi):
"""
Convert spherical coordinates to unit vector
:param theta: theta angle
:param phi: phi angle
:return: (x, y, z) coordinates
"""
x = np.sin(theta) * np.cos(phi)
y = np.sin(theta) * np.sin(phi)
z = np.cos(theta)
return np.array([x, y, z])
@jit(nopython=True, cache=True)
def sph_dot(theta1, theta2, delta_phi):
"""
Compute the dot product of two unit vectors in spherical coordinates
"""
return np.sin(theta1) * np.cos(delta_phi) * np.sin(theta2) + np.cos(theta1) * np.cos(theta2)
@jit(nopython=True, cache=True)
def sph_urand():
"""
Generate random unit vector in spherical coordinates
:return: (theta, phi) the two angles
"""
phi = np.random.uniform(0, 2 * np.pi)
u = np.random.uniform(0, 1)
theta = np.arccos(2 * u - 1)
return theta, phi
|
skdys/thermalspin
|
src/thermalspin/read_config.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Read the configuration file
"""
import json
import os
CONFIG_FILE_NAME = "config.json"
def read_config_file():
if os.path.isfile(CONFIG_FILE_NAME):
config_file = open(CONFIG_FILE_NAME, "r")
config = json.load(config_file)
else:
raise Exception("Missing config.json file")
processes_number = int(config["process_number"])
simulations_directory = config["simulations_directory"]
default_param_J = config["default_param_J"]
default_param_D = config["default_param_D"]
default_param_Hz = config["default_param_Hz"]
default_param_T = config["default_param_T"]
default_steps_number = int(config["default_steps_number"])
default_delta_snapshots = int(config["default_delta_snapshots"])
default_save_snapshots = bool(config["default_save_snapshots"])
default_params = dict(param_J=default_param_J, param_D=default_param_D, param_Hz=default_param_Hz,
param_T=default_param_T, steps_number=default_steps_number,
delta_snapshots=default_delta_snapshots, save_snapshots=default_save_snapshots)
return default_params, simulations_directory, processes_number
|
skdys/thermalspin
|
src/thermalspin/spin_system.py
|
<gh_stars>1-10
#!/usr/bin/env python3
"""
Classical Spin System Monte Carlo simulator
"""
import numpy as np
from numba import jit
from skdylib.spherical_coordinates import xyz_sph_urand
USE_NUMBA = True
class SpinSystem:
"""
This class represent a spin system with Heisenberg interaction, DMI and Zeeman terms
"""
def __init__(self, initial_state, J, D, Hz, T):
self.J = J
self.D = D
self.Hz = Hz
self.T = T
self.beta = 1 / T
self.state = initial_state
self.nx = self.state.shape[0]
self.ny = self.state.shape[1]
self.nz = self.state.shape[2]
self.sites_number = self.nx * self.ny * self.nz
# Set the best energy functions depending on the the presence of an antisymmetric term
self.step_function = compute_step_Heisenberg if D == 0 else compute_step_DMI
self.compute_energy_function = compute_energy_Heisenberg if D == 0 else compute_energy_DMI
# Compute energy and magnetization of the initial initial_state
self.energy = self.compute_energy_function(self.state, self.nx, self.ny, self.nz, J, D, Hz)
self.total_magnetization = compute_magnetization(self.state)
@property
def magnetization(self):
"""
The magnetization of the system
:return: The value of the magnetization
"""
return self.total_magnetization / self.sites_number
def step(self):
"""
Evolve the system computing a step of Metropolis-Hastings Monte Carlo.
It actually calls the non-object oriented procedure.
"""
s, e, m = self.step_function(self.state, self.nx, self.ny, self.nz, self.J, self.D, self.Hz, self.beta,
self.energy,
self.total_magnetization)
self.state = s
self.energy = e
self.total_magnetization = m
# Compiled functions
@jit(nopython=USE_NUMBA, cache=USE_NUMBA)
def compute_magnetization(state):
"""
Compute the total magnetization
:return: [Mx, My, Mz] vector of mean magnetization
"""
mx = np.sum(state[:,:,:,0])
my = np.sum(state[:,:,:,1])
mz = np.sum(state[:,:,:,2])
return np.array([mx,my,mz])
# Pure Heisenberg model
@jit(nopython=USE_NUMBA, cache=USE_NUMBA)
def neighbhour_energy_Heisenberg(i, j, k, ii, jj, kk, state, J):
"""
Compute the energy of two adjacent spins due to the Heisenberg Hamiltonian
:return: the energy computed
"""
heisenberg_term = -J*np.dot(state[i, j, k], state[ii, jj, kk])
return heisenberg_term
@jit(nopython=USE_NUMBA, cache=USE_NUMBA)
def compute_energy_Heisenberg(state, nx, ny, nz, J, D, Hz):
"""
Compute the energy of the system with Heisenberg Hamiltonian
:return: The value of the energy
"""
energy_counter = 0.0
for i, j, k in np.ndindex(nx, ny, nz):
ii = (i + 1) % nx
energy_counter += neighbhour_energy_Heisenberg(i, j, k, ii, j, k, state, J)
jj = (j + 1) % ny
energy_counter += neighbhour_energy_Heisenberg(i, j, k, i, jj, k, state, J)
if nz > 1:
kk = (k + 1) % nz
energy_counter += neighbhour_energy_Heisenberg(i, j, k, i, j, kk, state, J)
energy_counter += - Hz * state[i, j, k, 2]
return energy_counter
@jit(nopython=USE_NUMBA, cache=USE_NUMBA)
def compute_step_Heisenberg(state, nx, ny, nz, J, D, Hz, beta, energy, total_magnetization):
"""
Evolve the system computing a step of Metropolis-Hastings Monte Carlo.
This non OOP function is accelerated trough jit compilation.
"""
# Select a random spin in the system
i = np.random.randint(0, nx)
j = np.random.randint(0, ny)
k = np.random.randint(0, nz)
# Compute the energy due to that spin
e0 = 0
ii = (i + 1) % nx
e0 += neighbhour_energy_Heisenberg(i, j, k, ii, j, k, state, J)
ii = (i - 1) % nx
e0 += neighbhour_energy_Heisenberg(i, j, k, ii, j, k, state, J)
jj = (j + 1) % ny
e0 += neighbhour_energy_Heisenberg(i, j, k, i, jj, k, state, J)
jj = (j - 1) % ny
e0 += neighbhour_energy_Heisenberg(i, j, k, i, jj, k, state, J)
if nz > 1:
kk = (k + 1) % nz
e0 += neighbhour_energy_Heisenberg(i, j, k, i, j, kk, state, J)
kk = (k - 1) % nz
e0 += neighbhour_energy_Heisenberg(i, j, k, i, j, kk, state, J)
e0 += -Hz * state[i, j, k, 2]
# Generate a new random direction and compute energy due to the spin in the new direction
old_spin = state[i, j, k].copy()
state[i, j, k] = xyz_sph_urand()
e1 = 0
ii = (i + 1) % nx
e1 += neighbhour_energy_Heisenberg(i, j, k, ii, j, k, state, J)
ii = (i - 1) % nx
e1 += neighbhour_energy_Heisenberg(i, j, k, ii, j, k, state, J)
jj = (j + 1) % ny
e1 += neighbhour_energy_Heisenberg(i, j, k, i, jj, k, state, J)
jj = (j - 1) % ny
e1 += neighbhour_energy_Heisenberg(i, j, k, i, jj, k, state, J)
if nz > 1:
kk = (k + 1) % nz
e1 += neighbhour_energy_Heisenberg(i, j, k, i, j, kk, state, J)
kk = (k - 1) % nz
e1 += neighbhour_energy_Heisenberg(i, j, k, i, j, kk, state, J)
e1 += -Hz * state[i, j, k, 2]
# Apply Metropolis algorithm
w = np.exp(beta * (e0 - e1))
dice = np.random.uniform(0, 1)
if dice < w:
energy += (e1 - e0)
total_magnetization += state[i, j, k] - old_spin
else:
state[i,j,k] = old_spin
return state, energy, total_magnetization
# Heisenberg interaction + DMI
def interaction_matrices(J, D):
Axp = np.array(([-J, 0, 0],
[0, -J, -D],
[0, +D, -J]))
Axn = np.array(([-J, 0, 0],
[0, -J, +D],
[0, -D, -J]))
Ayp = np.array(([-J, 0, +D],
[0, -J, 0],
[-D, 0, -J]))
Ayn = np.array(([-J, 0, -D],
[0, -J, 0],
[+D, 0, -J]))
Azp = np.array(([-J, -D, 0],
[+D, -J, 0],
[0, 0, -J]))
Azn = np.array(([-J, +D, 0],
[-D, -J, 0],
[0, 0, -J]))
return Axp, Axn, Ayp, Ayn, Azp, Azn
@jit(nopython=USE_NUMBA, cache=USE_NUMBA)
def compute_energy_DMI(state, nx, ny, nz, J, D, Hz):
"""
Compute the energy of the system with Heisenberg, DMI and Zeeman term
:return: The value of the energy
"""
energy_counter = 0.0
Axp, Axn, Ayp, Ayn, Azp, Azn = interaction_matrices(J, D)
for i, j, k in np.ndindex(nx, ny, nz):
ii = (i + 1) % nx
energy_counter += state[i, j, k].T.dot(Axp).dot(state[ii, j, k])
jj = (j + 1) % ny
energy_counter += state[i, j, k].T.dot(Ayp).dot(state[i, jj, k])
if nz > 1:
kk = (k + 1) % nz
energy_counter += state[i, j, k].T.dot(Azp).dot(state[i, j, kk])
energy_counter += - Hz * state[i, j, k, 2]
return energy_counter
@jit(nopython=USE_NUMBA, cache=USE_NUMBA)
def compute_step_DMI(state, nx, ny, nz, J, D, Hz, beta, energy, total_magnetization):
"""
Evolve the system computing a step of Metropolis-Hastings Monte Carlo.
"""
Axp, Axn, Ayp, Ayn, Azp, Azn = interaction_matrices(J, D)
# Select a random spin in the system
i = np.random.randint(0, nx)
j = np.random.randint(0, ny)
k = np.random.randint(0, nz)
# Compute the energy due to that spin
e0 = 0
ii = (i + 1) % nx
e0 += state[i, j, k].T.dot(Axp).dot(state[ii, j, k])
ii = (i - 1) % nx
e0 += state[i, j, k].T.dot(Axn).dot(state[ii, j, k])
jj = (j + 1) % ny
e0 += state[i, j, k].T.dot(Ayp).dot(state[i, jj, k])
jj = (j - 1) % ny
e0 += state[i, j, k].T.dot(Ayn).dot(state[i, jj, k])
if nz > 1:
kk = (k + 1) % nz
e0 += state[i, j, k].T.dot(Azp).dot(state[i, j, kk])
kk = (k - 1) % nz
e0 += state[i, j, k].T.dot(Azn).dot(state[i, j, kk])
e0 += -Hz * state[i, j, k, 2]
# Generate a new random direction and compute energy due to the spin in the new direction
old_spin = state[i, j, k].copy()
state[i, j, k] = xyz_sph_urand()
e1 = 0
ii = (i + 1) % nx
e1 += state[i, j, k].T.dot(Axp).dot(state[ii, j, k])
ii = (i - 1) % nx
e1 += state[i, j, k].T.dot(Axn).dot(state[ii, j, k])
jj = (j + 1) % ny
e1 += state[i, j, k].T.dot(Ayp).dot(state[i, jj, k])
jj = (j - 1) % ny
e1 += state[i, j, k].T.dot(Ayn).dot(state[i, jj, k])
if nz > 1:
kk = (k + 1) % nz
e1 += state[i, j, k].T.dot(Azp).dot(state[i, j, kk])
kk = (k - 1) % nz
e1 += state[i, j, k].T.dot(Azn).dot(state[i, j, kk])
e1 += -Hz * state[i, j, k, 2]
# Apply Metropolis algorithm
w = np.exp(beta * (e0 - e1))
dice = np.random.uniform(0, 1)
if dice < w:
energy += (e1 - e0)
total_magnetization += state[i, j, k] - old_spin
else:
state[i, j, k] = old_spin
return state, energy, total_magnetization
|
lucasjurado/Alura-IO
|
contatos_uteis.py
|
<gh_stars>0
import csv, pickle, json
from contato import Contato
def csv_para_contatos(caminho, encoding='latin_1'):
contatos = []
with open(caminho, encoding=encoding) as arquivo:
leitor = csv.reader(arquivo)
for linha in leitor:
id = linha[0]
nome = linha[1]
email = linha[2]
contato = Contato(id, nome, email)
contatos.append(contato)
return contatos
# Pickle in Python is primarily used in serializing and deserializing a Python object structure.
# In other words, it’s the process of converting a Python object into a byte stream to store it
# in a file/database, maintain program state across sessions, or transport data over the network.
# The pickled byte stream can be used to re-create the original object hierarchy by unpickling the stream.
# This whole process is similar to object serialization in Java or .Net.
def contatos_para_pickle(contatos, caminho):
with open(caminho, mode='wb') as arquivo:
pickle.dump(contatos, arquivo)
def pickle_para_contatos(caminho):
with open(caminho, mode='rb') as arquivo:
contatos = pickle.load(arquivo)
return contatos
# Transformar o objeto em um JSON e trafegá-lo pela web. Dessa forma, sistemas escritos
# em outras linguagens conseguirão interpretar o conteúdo.
# O JSON é hoje um dos formatos mais utilizados na comunicação entre sistemas.
# Porém, podem ser utilizados outros formatos como XML, ou YAML.
def contatos_para_json(contatos, caminho):
with open(caminho, mode='w') as arquivo:
json.dump(contatos, arquivo, default=_contato_para_json)
def _contato_para_json(contato):
return contato.__dict__
def json_para_contatos(caminho):
contatos = []
with open(caminho, mode='r') as arquivo:
contatos_json = json.load(arquivo)
for contato in contatos_json:
c = Contato(contato['id'], contato['nome'], contato['email'])
contatos.append(c)
return contatos
|
lucasjurado/Alura-IO
|
contato.py
|
class Contato:
def __init__(self, id, nome, email):
self.id = id
self.nome = nome
self.email = email
# Pensando em uma abordagem mais orientada a objetos, podemos utilizar um objeto de acesso a dados que fica
# sendo responsável pela comunicação do mundo Python com o mundo dos arquivos.
# Este objeto é conhecido como DAO, ou Objeto de Acesso a Dados (Data Access Object, em inglês).
#
# Quem já trabalhou com banco de dados provavelmente conhece esse padrão de persistência.
# O DAO é um padrão de projeto muito utilizado por quem busca um meio de acessar seus dados.
# Popularmente, ele é muito utilizado para acessar o banco de dados e realizar as operações de criação,
# busca, exclusão e atualização. Além disso, ele pode ser utilizado para salvar e
# recuperar dados em arquivo s, por exemplo.
|
lucasjurado/Alura-IO
|
main.py
|
<gh_stars>0
# Ao abrir o arquivo com uma codificação diferente da que ele foi escrito, alguns caracteres podem apresentar erros
# try/finally é usado para certificar que mesmo que houver algum erro no processo indentado try, o arquivo será fechado
import contatos_uteis
try:
contatos = contatos_uteis.csv_para_contatos('contatos.csv')
# contatos_uteis.contatos_para_pickle(contatos, 'contatos.pickle')
#
# contatos = contatos_uteis.pickle_para_contatos('contatos.pickle')
#
# contatos_uteis.contatos_para_json(contatos, 'contatos.json')
# contatos = contatos_uteis.json_para_contatos('contatos.json')
for contato in contatos:
print(f'{contato.id} - {contato.nome} - {contato.email}')
print(type(contatos)) # <class 'list'>
except FileNotFoundError:
print('Arquivo não encontrado!')
except PermissionError:
print('Sem permissão para escrita!')
|
lucasjurado/Alura-IO
|
testa_escrita.py
|
# Sempre que abrimos algum arquivo em modo w, o Python vai truncar o arquivo, isto é, limpará seu conteúdo.
# Para adicionar conteúdo a um arquivo sem apagar o que já está escrito, temos que utilizar o modo a.
# (+) --> mode de atualização do arquivo
arquivo_contatos = open('contatos-lista.csv', encoding='latin_1', mode='w+')
contatos = ['11,Carol,<EMAIL>\n',
'12,Ana,<EMAIL>\n',
'13,Thais,<EMAIL>\n',
'14,Felipe,<EMAIL>\n']
# Para inserir uma Lista de contatos, devemos iterá-la
for contato in contatos:
arquivo_contatos.write(contato)
arquivo_contatos.flush() # Força a inserção dos dados no arquivo .csv
arquivo_contatos.seek(26) # Faz o ponteiro retornar para a primeira linha do arquivo .csv
arquivo_contatos.write('12,Ana,<EMAIL>\n'.upper()) # Sobrescrevendo a linha ana
arquivo_contatos.flush()
arquivo_contatos.seek(0)
for linha in arquivo_contatos:
print(linha, end='')
|
dpoulopoulos/forma
|
forma/detector.py
|
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/detector.ipynb (unless otherwise specified).
__all__ = ['FormatDetector']
# Cell
import numpy as np
import pandas as pd
from typing import Any, Dict, Callable
from tqdm import tqdm
from .judge import FormatJudge
from .utils import PatternGenerator
# Cell
class FormatDetector:
def __init__(self, skip: list = None):
self.skip = skip
def fit(self, df: pd.DataFrame, generator: PatternGenerator or List['str': PatternGenerator],
n: int = 3, dim: int = 1):
self.judges = {}
self.df = df
with tqdm(total=len(self.df.columns)) as pbar:
if isinstance(generator, PatternGenerator):
for col in self.df.columns:
if col in self.skip:
continue
col_values = self.df[col].tolist()
format_judge = FormatJudge(generator, n, dim)
format_judge.fit(col_values)
self.judges[col] = format_judge
pbar.update(1)
else:
for col in self.df.columns:
if col in self.skip:
continue
col_values = self.df[col].tolist()
gen = generator.get(col, PatternGenerator())
format_judge = FormatJudge(gen, n, dim)
format_judge.fit(col_values)
self.judges[col] = format_judge
pbar.update(1)
def detect(self, reduction: Callable = np.min, softmax: bool = True) -> dict:
scores = []
with tqdm(total=len(self.df)) as pbar:
for index, row in self.df.iterrows():
tuple_score = []
for col in self.df.columns:
if col in self.skip:
continue
judge = self.judges[col]
score = np.mean(judge(row[col]))
tuple_score.append(score)
if softmax:
tuple_score = np.exp(tuple_score)
softmax_tuple_score = [score / sum(tuple_score) for score in tuple_score]
if reduction == np.ptp:
scores.append(reduction(softmax_tuple_score))
else:
scores.append(1 - reduction(softmax_tuple_score))
else:
if reduction == np.ptp:
scores.append(reduction(tuple_score))
else:
scores.append(1 - reduction(tuple_score))
pbar.update(1)
assessed_df = self.df.copy()
assessed_df['p'] = scores
return assessed_df
|
dpoulopoulos/forma
|
forma/utils.py
|
<gh_stars>0
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/utils.ipynb (unless otherwise specified).
__all__ = ['PatternGenerator']
# Cell
import re
from typing import Tuple, Any
from itertools import groupby
from collections import defaultdict
# Cell
class PatternGenerator:
"""Generates a generic pattern given a value and a generalization language."""
def __init__(self, alpha: str = 'type', digit: str = 'type', other: str = 'type',
preserve_space: bool = False, add_length: bool = True):
self.alpha = alpha
self.digit = digit
self.other = other
self.preserve_space= preserve_space
self.add_length = add_length
def __call__(self, value: Any) -> str:
pattern = ''
value = list(str(value))
for c in value:
pattern += self._get_representation(c)
grouped_pattern = [''.join(g) for _, g in groupby(pattern)]
if self.add_length:
return ''.join([f'{v[0]}({len(v)})' for v in grouped_pattern])
return ''.join([s[0] for s in grouped_pattern])
def _get_representation(self, c: str):
if c.isalpha():
return self._get_alpha_representation(c)
if c.isnumeric():
return self._get_digit_representation(c)
return self._get_other_representation(c)
def _get_alpha_representation(self, c: str):
if self.alpha == 'root':
return 'A'
if self.alpha == 'type':
return 'L'
if self.alpha == 'case':
return 'u' if c.isupper() else 'l'
return c
def _get_digit_representation(self, c: str):
if self.digit == 'root':
return 'A'
if self.digit == 'type':
return 'D'
return c
def _get_other_representation(self, c: str):
if c == ' ':
return ' ' if self.preserve_space else ''
if self.other == 'root':
return 'A'
if self.other == 'type':
return 'S'
return c
|
dpoulopoulos/forma
|
forma/_nbdev.py
|
<gh_stars>0
# AUTOGENERATED BY NBDEV! DO NOT EDIT!
__all__ = ["index", "modules", "custom_doc_links", "git_url"]
index = {"FormatDetector": "detector.ipynb",
"FormatJudge": "judge.ipynb",
"PatternGenerator": "utils.ipynb"}
modules = ["detector.py",
"judge.py",
"utils.py"]
doc_url = "https://dpoulopoulos.github.io/forma/"
git_url = "https://github.com/dpoulopoulos/forma/tree/master/"
def custom_doc_links(name): return None
|
dpoulopoulos/forma
|
forma/judge.py
|
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/judge.ipynb (unless otherwise specified).
__all__ = ['FormatJudge']
# Cell
import heapq
import numpy as np
from typing import Any, List
from nltk.lm import MLE
from nltk.util import ngrams
from nltk.lm.preprocessing import flatten
from nltk.lm.preprocessing import pad_both_ends
from .utils import PatternGenerator
# Cell
class FormatJudge:
"""Detects format errors on a tabular data set."""
def __init__(self, generator: PatternGenerator, n: int = 3, dim: int = 1):
self.generator = generator
self.n = n
self.dim = dim
def __call__(self, o: Any) -> list:
return self.judge(o)
def fit(self, values: List[Any]):
patterns = [self.generator(v) for v in values]
padded_patterns = [pad_both_ends(p, n=self.n) for p in patterns]
ngrams_ = [ngrams(pp, n=self.n) for pp in padded_patterns]
self.vocab = list(flatten(pad_both_ends(p, n=self.n) for p in patterns))
self.model = MLE(self.n)
self.model.fit(ngrams_, self.vocab)
def judge(self, o: Any) -> list:
scores = []
p = self.generator(o)
p = list(pad_both_ends(p, n=self.n))
for i,v in enumerate(p):
if i < self.n-1:
continue
letters = []
for j in range(i - (self.n-1), i):
letters.append(p[j])
scores.append(self.model.score(v, letters))
return heapq.nsmallest(self.dim, scores)
|
lig/picket2014
|
picket/documents.py
|
<gh_stars>1-10
from datetime import datetime
from django.db.models import permalink
from mongoengine.document import Document
from mongoengine.fields import (SequenceField, StringField, ListField,
ReferenceField, DateTimeField)
from users.documents import User
class Project(Document):
meta = {
'ordering': ['name']
}
name = StringField(required=True, unique=True)
def __str__(self):
return self.name
class Issue(Document):
meta = {
'ordering': ['-id']
}
id = SequenceField(primary_key=True)
subject = StringField(required=True)
project = ReferenceField(Project, required=True)
creator = ReferenceField(User)
comments = ListField(StringField())
created = DateTimeField(default=lambda: datetime.utcnow())
modified = DateTimeField(default=lambda: datetime.utcnow())
def __str__(self):
return '#{}: {}'.format(self.id, self.subject)
@permalink
def get_absolute_url(self):
return ('issue', (), {'project': self.project.id, 'n': self.id})
def save(self, *args, **kwargs):
self.modified = datetime.utcnow()
return Document.save(self, *args, **kwargs)
|
lig/picket2014
|
users/management/commands/picketuseradd.py
|
<reponame>lig/picket2014<gh_stars>1-10
from django.core.management.base import BaseCommand
from ...documents import User
class Command(BaseCommand):
args = '<email> <display_name> <password>'
help = 'Creates picket admin user'
def handle(self, *args, **options):
email, display_name, password = args
user = User.create_user(email, display_name, password)
print('User with id {} created.'.format(user.id))
|
lig/picket2014
|
users/auth.py
|
<reponame>lig/picket2014
from .documents import User
class Backend(object):
def authenticate(self, username=None, email=None, password=None):
email = User.normalize_email(username or email)
user = User.objects(email=email.lower()).first()
if user and password and user.check_password(password):
return user
def get_user(self, user_id):
return User.objects.with_id(user_id)
|
lig/picket2014
|
picket/settings/local_sample.py
|
from mongoengine import connect
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/dev/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '-'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
TEMPLATE_DEBUG = False
ALLOWED_HOSTS = []
connect('picket')
|
lig/picket2014
|
picket/settings/dist.py
|
<reponame>lig/picket2014
"""
Django settings for picket project.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.abspath(
os.path.join(os.path.pardir, os.path.pardir, os.path.dirname(__file__)))
# Application definition
INSTALLED_APPS = (
'django.contrib.auth',
'mongoengine.django.mongo_auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'users',
'picket',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'picket.urls'
WSGI_APPLICATION = 'picket.wsgi.application'
# Database
# https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.dummy'
}
}
# List of processors used by RequestContext to populate the context.
# Each one should be a callable that takes the request object as its
# only parameter and returns a dictionary to add to the context.
from django.conf.global_settings import TEMPLATE_CONTEXT_PROCESSORS
TEMPLATE_CONTEXT_PROCESSORS += (
'picket.context_processors.projects',
)
# Internationalization
# https://docs.djangoproject.com/en/dev/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/dev/howto/static-files/
STATIC_URL = '/static/'
# MongoEngine
AUTHENTICATION_BACKENDS = (
'users.auth.Backend',
)
AUTH_USER_MODEL = 'mongo_auth.MongoUser'
MONGOENGINE_USER_DOCUMENT = 'users.documents.User'
SESSION_ENGINE = 'mongoengine.django.sessions'
SESSION_SERIALIZER = 'mongoengine.django.sessions.BSONSerializer'
|
lig/picket2014
|
picket/forms.py
|
from django import forms
from .documents import Project
class ProjectForm(forms.Form):
name = forms.CharField()
class IssueForm(forms.Form):
project = forms.ModelChoiceField(queryset=Project.objects.all())
subject = forms.CharField()
text = forms.CharField(required=False, widget=forms.Textarea)
class CommentForm(forms.Form):
text = forms.CharField(required=False, widget=forms.Textarea)
|
lig/picket2014
|
users/documents.py
|
from django.utils.translation import ugettext_lazy as _
from mongoengine.django.auth import make_password, check_password
from mongoengine.django.utils import datetime_now
from mongoengine.document import Document
from mongoengine.fields import (StringField, EmailField, DateTimeField,
BooleanField)
class User(Document):
"""A User document that aims to mirror most of the API specified by Django
at http://docs.djangoproject.com/en/dev/topics/auth/#users
"""
email = EmailField(verbose_name=_('e-mail address'))
display_name = StringField(max_length=255, verbose_name=_('first name'))
password = StringField(
max_length=128,
verbose_name=_('password'),
help_text=_(
"Use '[algo]$[iterations]$[salt]$[hexdigest]' or use the"
" <a href=\"password/\">change password form</a>."))
last_login = DateTimeField(
default=datetime_now,
verbose_name=_('last login'))
date_joined = DateTimeField(
default=datetime_now,
verbose_name=_('date joined'))
is_active = BooleanField(default=True)
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['display_name']
meta = {
'allow_inheritance': True,
'indexes': [
{'fields': ['email'], 'unique': True, 'sparse': True}
]
}
def __unicode__(self):
return self.display_name
def get_full_name(self):
return self.display_name
def is_anonymous(self):
return False
def is_authenticated(self):
return True
def set_password(self, raw_password):
"""Sets the user's password - always use this rather than directly
assigning to :attr:`~mongoengine.django.auth.User.password` as the
password is hashed before storage.
"""
self.password = make_password(raw_password)
self.save()
return self
def check_password(self, raw_password):
"""Checks the user's password against a provided password - always use
this rather than directly comparing to
:attr:`~mongoengine.django.auth.User.password` as the password is
hashed before storage.
"""
return check_password(raw_password, self.password)
@classmethod
def normalize_email(cls, email):
try:
email_name, domain_part = email.strip().split('@', 1)
except ValueError:
pass
else:
email = '@'.join([email_name, domain_part.lower()])
return email
@classmethod
def create_user(cls, email, display_name, password):
"""Create (and save) a new user with the given email address, display
name and password.
"""
now = datetime_now()
email = cls.normalize_email(email)
user = cls(email=email, display_name=display_name, date_joined=now)
user.set_password(password)
user.save()
return user
def email_user(self, subject, message, from_email=None):
"Sends an e-mail to this User."
from django.core.mail import send_mail
send_mail(subject, message, from_email, [self.email])
|
lig/picket2014
|
users/views.py
|
from actionviews.base import TemplateView
from actionviews.decorators import action_decorator
from django.contrib.auth.decorators import login_required
from mongoengine.django.shortcuts import get_document_or_404
from .documents import User
class UserView(TemplateView):
@action_decorator(login_required)
def do_profile(self:''):
return {'profile': self.request.user}
def do_user(self:'', user):
return {'profile': get_document_or_404(User, pk=user)}
|
lig/picket2014
|
picket/context_processors.py
|
<gh_stars>1-10
from .documents import Project
def projects(request):
return {'projects': Project.objects.all()}
|
lig/picket2014
|
users/urls.py
|
<reponame>lig/picket2014
from django.conf.urls import patterns, include, url
from users.views import UserView
urlpatterns = patterns('',
url(r'^', include('django.contrib.auth.urls')),
url(r'^profile/', include(UserView.urls)),
)
|
lig/picket2014
|
picket/settings/__init__.py
|
from .dist import *
try:
from .local import *
except ImportError:
from .local_sample import *
from warnings import warn
warn('Cannot import local settings. Using local_sample instead.')
|
lig/picket2014
|
picket/urls.py
|
<reponame>lig/picket2014
from django.conf.urls import patterns, include, url
from .views import ProjectView, IssueView
urlpatterns = patterns('',
url(r'^', include(ProjectView.urls)),
url(r'^', include(IssueView.urls)),
url(r'^accounts/', include('users.urls')),
)
|
lig/picket2014
|
picket/views.py
|
from actionviews import TemplateView
from actionviews.decorators import action_decorator, child_view
from django.contrib.auth.decorators import login_required
from django.shortcuts import redirect
from mongoengine.django.shortcuts import get_document_or_404
from .documents import Issue, Project
from .forms import IssueForm, ProjectForm, CommentForm
class IssueView(TemplateView):
def get_queryset(self):
qs = Issue.objects.all()
project=self.context.get('project')
if project:
qs = qs.filter(project=project)
return qs
def do_list(self:'', page:r'\d+'=1):
issues = self.get_queryset()
return {'issues': issues}
def do_issue(self, n:r'\d+'):
return {
'issue': get_document_or_404(Issue, id=int(n)),
'comment_form': CommentForm(),
}
@action_decorator(login_required)
def do_create(self):
request = self.request
if request.method == 'POST':
form = IssueForm(request.POST)
if form.is_valid():
issue = Issue(
project=form.cleaned_data['project'],
subject=form.cleaned_data['subject'],
creator=request.user,
comments=[form.cleaned_data['text']])
issue.save()
context_project = self.context.get('project')
return (context_project and
redirect('list', context_project.id) or
redirect('list'))
else:
form = IssueForm(initial={'project': self.context.get('project')})
return {'form': form}
@action_decorator(login_required)
def do_comment(self, issue:r'\d+'):
request = self.request
issue = self.do_issue(issue)['issue']
if request.method == 'POST':
form = CommentForm(request.POST)
if form.is_valid():
issue.comments.append(form.cleaned_data['text'])
issue.save()
return redirect(issue.get_absolute_url())
class ProjectView(TemplateView):
@child_view(IssueView)
def do_project(self:'', project):
return {'project': get_document_or_404(Project, pk=project)}
@action_decorator(login_required)
def do_project_create(self):
# @todo: allow this method to be called `do_create`
request = self.request
if request.method == 'POST':
form = ProjectForm(request.POST)
if form.is_valid():
project = Project(**form.cleaned_data)
project.save()
return redirect('list', project=project.id)
else:
form = ProjectForm()
return {'form': form}
|
dreamorosi/quip-spreadsheet
|
quip_spreadsheet/quip.py
|
<gh_stars>0
from __future__ import annotations
import logging
from typing import Any, List, Dict, Optional, Union
from json.decoder import JSONDecodeError
from dataclasses import dataclass, field
from enum import Enum
import xml.etree.ElementTree
import requests
from requests.exceptions import Timeout, ConnectionError, HTTPError
class QuipSpreadsheetClientException(Exception):
"""Generic Exception raised when an error occurs in the client."""
class QuipApiTimeoutException(QuipSpreadsheetClientException):
"""Exception raised when a Timeout occurs while making a request to the Quip API."""
class QuipApiConnectionException(QuipSpreadsheetClientException):
"""Exception raised when a ConnectionError occurs while making a request to the Quip API."""
class QuipApiHTTPErrorException(QuipSpreadsheetClientException):
"""Exception raised when an HTTPError occurs while making a request to the Quip API."""
class QuipApiNotAJSON(QuipSpreadsheetClientException):
"""Exception raised when the response from the Quip API is not a JSON."""
class QuipUserException(QuipSpreadsheetClientException):
"""Exception raised when it's not possible to parse the current authenticated user."""
class QuipFolderException(QuipSpreadsheetClientException):
"""Exception raised when it's not possible to parse a given folder."""
class QuipThreadException(QuipSpreadsheetClientException):
"""Exception raised when it's not possible to parse a given thread."""
class QuipSpreadsheetDomParsingException(QuipSpreadsheetClientException):
"""Exception raised when it's not possible to parse the DOM of a given spreadsheet."""
class QuipSpreadsheetPageNotFoundException(QuipSpreadsheetClientException):
"""Exception raised when the the page requested does not exist in a given spreadsheet."""
@dataclass
class QuipUser:
name: str
id: str
is_robot: bool
affinity: float
desktop_folder_id: str
archive_folder_id: str
starred_folder_id: str
private_folder_id: str
trash_folder_id: str
shared_folder_ids: List[str]
group_folder_ids: List[str]
profile_picture_url: str
subdomain: str
url: str
@dataclass
class QuipSearchResults:
threads: List[QuipThread] = field(default_factory=list)
spreadsheets: List[QuipSpreadsheet] = field(default_factory=list)
class API_ROUTES(Enum):
CURRENT_USER = "users/current"
FOLDERS = "folders/%s"
THREADS = "threads/%s"
SEARCH_THREADS = "threads/search"
class QuipClient:
def __init__(
self,
access_token: str,
base_url: str,
request_timeout: int = 10,
logger: Any = logging.getLogger(__name__),
):
self.__access_token = access_token
self.__auth = f"Bearer {access_token}"
self.base_url = base_url
self.url = f"{base_url}1" if base_url.endswith("/") else f"{base_url}/1"
self.request_timeout = request_timeout
self.logger = logger
def _make_request(self, path: str, kwargs: dict = {}) -> dict:
kwargs = {
"headers": {"Authorization": self.__auth},
"timeout": self.request_timeout,
**kwargs,
}
http_code = 0
try:
self.logger.debug("Making request to %s/%s.", self.base_url, path)
res = requests.get(f"{self.url}/{path}", **kwargs)
http_code = res.status_code
res.raise_for_status()
data = res.json()
except Timeout as exc:
self.logger.error("Timeout of %ss has been exceeded.", self.request_timeout)
self.logger.debug(exc)
raise QuipApiTimeoutException from exc
except ConnectionError as exc:
self.logger.error("A connection error occurred.")
self.logger.debug(exc)
raise QuipApiConnectionException from exc
except HTTPError as exc:
self.logger.error(
"An HTTP Error occurred while making the request: %s.", http_code
)
self.logger.debug(exc)
raise QuipApiHTTPErrorException from exc
except JSONDecodeError as exc:
self.logger.error("The response returned by the API is not a valid JSON.")
self.logger.debug(exc)
raise QuipApiNotAJSON from exc
return data
def get_authenticated_user(self) -> QuipUser:
"""Returns the user corresponding to our access token."""
data = self._make_request(API_ROUTES.CURRENT_USER.value)
try:
user = QuipUser(**data)
except TypeError as exc:
self.logger.error("Unable to parse current user.")
self.logger.debug(exc)
raise QuipUserException from exc
return user
def get_folder(self, folder_id: str) -> QuipFolder:
"""Returns the folder with the given ID."""
data = self._make_request(API_ROUTES.FOLDERS.value % folder_id)
try:
folder = QuipFolder(
access_token=self.__access_token,
base_url=self.base_url,
request_timeout=self.request_timeout,
logger=self.logger,
**data.get("folder"),
member_ids=data["member_ids"],
children=data["children"],
)
except (TypeError, KeyError) as exc:
self.logger.error("Unable to parse retrieved folder.")
self.logger.debug(exc)
raise QuipFolderException from exc
return folder
def __make_thread(self, data: Dict[str, Any]) -> Union[QuipThread, QuipSpreadsheet]:
try:
if "document_id" not in data.get("thread"):
data["thread"]["document_id"] = None
if data.get("thread").get("type") == "spreadsheet":
return QuipSpreadsheet(
access_token=self.__access_token,
base_url=self.base_url,
request_timeout=self.request_timeout,
logger=self.logger,
**data.get("thread"),
access_levels=data.get("access_levels"),
expanded_user_ids=data.get("expanded_user_ids"),
user_ids=data.get("user_ids"),
shared_folder_ids=data.get("shared_folder_ids"),
invited_user_emails=data.get("invited_user_emails"),
html=data.get("html"),
)
else:
return QuipThread(
access_token=self.__access_token,
base_url=self.base_url,
request_timeout=self.request_timeout,
logger=self.logger,
**data.get("thread"),
access_levels=data.get("access_levels"),
expanded_user_ids=data.get("expanded_user_ids"),
user_ids=data.get("user_ids"),
shared_folder_ids=data.get("shared_folder_ids"),
invited_user_emails=data.get("invited_user_emails"),
html=data.get("html"),
)
except (TypeError, KeyError) as exc:
self.logger.error("Unable to parse retrieved thread.")
self.logger.debug(exc)
raise QuipThreadException from exc
def get_thread(self, thread_id: str) -> Union[QuipThread, QuipSpreadsheet]:
"""Returns the thread with the given ID."""
data = self._make_request(API_ROUTES.THREADS.value % thread_id)
thread = self.__make_thread(data)
return thread
def search_threads(
self, query: str, count: int = None, only_match_titles: bool = True
) -> QuipSearchResults:
"""Search threads by query."""
params = {"query": query, "only_match_titles": only_match_titles}
if count is not None:
params.update({"count": count})
data = self._make_request(API_ROUTES.SEARCH_THREADS.value, {"params": params})
results = QuipSearchResults()
for thread_data in data:
search_result = self.__make_thread(thread_data)
if isinstance(search_result, QuipSpreadsheet):
results.spreadsheets.append(search_result)
elif isinstance(search_result, QuipThread):
results.threads.append(search_result)
return results
@dataclass
class QuipFolder(QuipClient):
access_token: str
base_url: str
request_timeout: int
id: str
title: str
creator_id: str
created_usec: int
updated_usec: int
member_ids: List[str]
children: List[Dict[str, str]]
logger: Any = logging.getLogger(__name__)
def __post_init__(self):
super().__init__(
self.access_token, self.base_url, self.request_timeout, self.logger
)
def resolve_children(self):
resolved_children = (
list()
) # type: List[Union[QuipThread, QuipFolder, QuipSpreadsheet]]
for child in self.children:
if "folder_id" in child:
resolved_children.append(self.get_folder(child["folder_id"]))
elif "thread_id" in child:
resolved_children.append(self.get_thread(child["thread_id"]))
else:
continue
return resolved_children
@dataclass
class QuipThread(QuipClient):
access_token: str
base_url: str
request_timeout: int
author_id: str
created_usec: int
id: str
link: str
thread_class: str
title: str
type: str
updated_usec: str
access_levels: Optional[Dict[str, Dict[str, str]]]
expanded_user_ids: Optional[List[str]]
user_ids: Optional[List[str]]
shared_folder_ids: Optional[List[str]]
invited_user_emails: Optional[List[str]]
document_id: Optional[str]
html: Optional[str]
sharing: Optional[Dict[str, Dict[str, str]]]
owning_company_id: Optional[str]
is_deleted: bool = False
logger: Any = logging.getLogger(__name__)
def __post_init__(self):
super().__init__(
self.access_token, self.base_url, self.request_timeout, self.logger
)
def load_content(self):
data = self._make_request(API_ROUTES.THREADS.value % self.id)
self.access_levels = data["access_levels"]
self.expanded_user_ids = data["expanded_user_ids"]
self.user_ids = data["user_ids"]
self.shared_folder_ids = data["shared_folder_ids"]
self.invited_user_emails = data["invited_user_emails"]
self.document_id = data.get("thread")["document_id"]
self.html = data["html"]
def get_content(self) -> str:
if self.html is None or len(self.html.strip()) == 0:
self.load_content()
if self.html is not None:
return self.html
else:
raise QuipSpreadsheetClientException
class QuipPage:
def __init__(self, node: xml.etree.ElementTree.Element, logger: Any):
self.node = node
self.logger = logger
def __eq__(self, other: QuipPage) -> bool:
return self.node == other.node
def get_rows(self) -> List[QuipRow]:
items = self.node.iterfind(".//tbody/tr")
rows = list()
for item in items:
rows.append(QuipRow(item, self.logger))
return rows
def get_nth_row(self, index: int) -> QuipRow:
"""Returns the `Element` corresponding to the nth row in a given page."""
items = self.get_rows()
index = -1 if index == -1 else index - 1
try:
row = items[index]
except (IndexError) as exc:
self.logger.error("Row requested not found.")
raise QuipSpreadsheetDomParsingException from exc
return row
def get_first_row(self) -> QuipRow:
"""Returns the `Element` corresponding to the first row in a given page."""
return self.get_nth_row(1)
def get_last_row(self) -> QuipRow:
"""Returns the `Element` corresponding to the last row in a given page."""
return self.get_nth_row(-1)
def get_row_by_content(self, content: str) -> QuipRow:
try:
[row] = list(self.node.iterfind("tbody/tr/td/*[.='%s']...." % content))
except ValueError as exc:
self.logger.error("Unable to find row with content (%s).", content)
raise QuipSpreadsheetDomParsingException from exc
return QuipRow(row, self.logger)
class QuipRow:
def __init__(self, node: xml.etree.ElementTree.Element, logger: Any):
self.node = node
self.logger = logger
self.index = self.__get_row_index()
def __get_row_index(self):
try:
[column] = self.node.iterfind("./td[1]")
index = int(column.text.strip())
except ValueError as exc:
self.logger.error("Unable to find row index.")
raise QuipSpreadsheetDomParsingException from exc
return index
@staticmethod
def _recursive_get_text(
node: xml.etree.ElementTree.Element,
strip: bool = True,
separator: str = "",
):
"""Finds the text in a node and recursively traverses
its children to find more text."""
text_list = list()
if node.text is not None and len(node.text.replace("\u200b", "").strip()) > 0:
node_text = node.text.strip() if strip == True else node.text
text_list.append(node_text)
for child in node.getchildren():
child_text_list = QuipRow._recursive_get_text(child, strip, separator)
if len(child_text_list) > 0:
text_list.append(child_text_list)
return separator.join(text_list)
def get_row_cells_content(
self,
include_index: bool = True,
strip: bool = True,
separator: str = "",
) -> List[str]:
"""Returns the text of items in each cell of the given row."""
cells_content = list()
columns = self.node.iterfind("./td")
for column in columns:
cells_content.append(self._recursive_get_text(column, strip, separator))
return cells_content if include_index == True else cells_content[1:]
class QuipSpreadsheet(QuipThread):
dom = None
def parse_document_html(self) -> xml.etree.ElementTree.Element:
"""Returns an `Element` corresponding the Quip document HTML"""
document_xml = f"<html>{self.get_content()}</html>"
try:
self.dom = xml.etree.ElementTree.fromstring(document_xml.encode("utf-8"))
except xml.etree.ElementTree.ParseError as exc:
self.logger.error("Unable to parse spreadsheet DOM.")
self.logger.debug(exc)
raise QuipSpreadsheetDomParsingException from exc
return self.dom
def get_named_page(self, name: str) -> QuipPage:
"""Returns an `Element` corresponding the given page,
optionally sets is as current page."""
if self.dom is None:
self.dom = self.parse_document_html()
try:
[page] = list(self.dom.iterfind(".//table[@title='%s']" % name))
except ValueError as exc:
self.logger.error(
"Unable to find page requested in spreadsheet (%s).", name
)
raise QuipSpreadsheetPageNotFoundException from exc
return QuipPage(page, self.logger)
def get_nth_page(self, index: int) -> QuipPage:
"""Returns an `Element` corresponding the nth page,
optionally sets is as current page."""
if self.dom is None:
self.dom = self.parse_document_html()
try:
pages = list(self.dom.iterfind(".//table"))
page = pages[index]
except (ValueError, IndexError) as exc:
self.logger.error(
"Unable to find page requested in spreadsheet (%s).", index
)
raise QuipSpreadsheetPageNotFoundException from exc
return QuipPage(page, self.logger)
def get_first_page(self) -> QuipPage:
"""Returns the `Element` of the first page in the document,
optionally sets is as current page."""
return self.get_nth_page(0)
def get_last_page(self) -> QuipPage:
"""Returns the `Element` of the last page in the document,
optionally sets is as current page."""
return self.get_nth_page(-1)
|
dreamorosi/quip-spreadsheet
|
tests/test_quip_client.py
|
import os
import unittest
from copy import deepcopy
from xml.etree.ElementTree import Element
import requests_mock
from requests.exceptions import ConnectTimeout, ConnectionError
from quip.quip import (
QuipClient,
QuipPage,
QuipRow,
QuipSearchResults,
QuipSpreadsheetClientException,
API_ROUTES,
QuipUser,
QuipFolder,
QuipThread,
QuipSpreadsheet,
QuipApiHTTPErrorException,
QuipApiTimeoutException,
QuipApiConnectionException,
QuipApiNotAJSON,
QuipUserException,
QuipFolderException,
QuipThreadException,
QuipSpreadsheetDomParsingException,
QuipSpreadsheetPageNotFoundException,
)
from tests.constants import (
QUIP_ACCESS_TOKEN,
QUIP_BASE_URL,
LOGS_PREFIX,
USER_JSON,
FOLDER_JSON,
THREAD_JSON,
SPREADSHEET_CONTENT,
)
class TestQuipClient(unittest.TestCase):
@classmethod
def setUpClass(cls):
pass
@classmethod
def tearDownClass(cls):
if os.getenv("ENV") is not None:
del os.environ["ENV"]
def setUp(self) -> None:
self.quip = QuipClient(QUIP_ACCESS_TOKEN, QUIP_BASE_URL, 5)
def tearDown(self) -> None:
del self.quip
def test_init(self):
self.assertEqual(
f"Bearer {QUIP_ACCESS_TOKEN}", self.quip._QuipClient__auth # type: ignore
)
self.assertEqual(f"{QUIP_BASE_URL}/1", self.quip.url)
self.assertEqual(5, self.quip.request_timeout)
@requests_mock.Mocker(kw="mock")
def test__make_request(self, **kwargs):
kwargs["mock"].get(f"{QUIP_BASE_URL}/1/", status_code=500)
with self.assertLogs(level="ERROR") as context_mngr:
with self.assertRaises(QuipApiHTTPErrorException):
self.quip._make_request("")
self.assertEqual(
context_mngr.output[0],
f"ERROR{LOGS_PREFIX}An HTTP Error occurred while making the request: 500.",
)
kwargs["mock"].get(f"{QUIP_BASE_URL}/1/", exc=ConnectTimeout)
with self.assertLogs(level="ERROR") as context_mngr:
with self.assertRaises(QuipApiTimeoutException):
self.quip._make_request("")
self.assertEqual(
context_mngr.output[0],
f"ERROR{LOGS_PREFIX}Timeout of 5s has been exceeded.",
)
kwargs["mock"].get(f"{QUIP_BASE_URL}/1/", exc=ConnectionError)
with self.assertLogs(level="ERROR") as context_mngr:
with self.assertRaises(QuipApiConnectionException):
self.quip._make_request("")
self.assertEqual(
context_mngr.output[0],
f"ERROR{LOGS_PREFIX}A connection error occurred.",
)
kwargs["mock"].get(f"{QUIP_BASE_URL}/1/", content=b"abc")
with self.assertLogs(level="ERROR") as context_mngr:
with self.assertRaises(QuipApiNotAJSON):
self.quip._make_request("")
self.assertEqual(
context_mngr.output[0],
f"ERROR{LOGS_PREFIX}The response returned by the API is not a valid JSON.",
)
kwargs["mock"].get(f"{QUIP_BASE_URL}/1/", json={"Hello": "World"})
data = self.quip._make_request("")
self.assertDictEqual(data, {"Hello": "World"})
@requests_mock.Mocker(kw="mock")
def test_get_authenticated_user(self, **kwargs):
wrong_user = deepcopy(USER_JSON)
del wrong_user["id"]
wrong_user["user_id"] = "1234"
kwargs["mock"].get(
f"{QUIP_BASE_URL}/1/{API_ROUTES.CURRENT_USER.value}", json=wrong_user
)
with self.assertLogs(level="ERROR") as context_mngr:
with self.assertRaises(QuipUserException):
self.quip.get_authenticated_user()
self.assertEqual(
context_mngr.output[0],
f"ERROR{LOGS_PREFIX}Unable to parse current user.",
)
kwargs["mock"].get(
f"{QUIP_BASE_URL}/1/{API_ROUTES.CURRENT_USER.value}", json=USER_JSON
)
expected_user = QuipUser(**USER_JSON)
user = self.quip.get_authenticated_user()
self.assertEqual(expected_user.name, user.name)
self.assertEqual(expected_user.id, user.id)
self.assertEqual(expected_user.is_robot, user.is_robot)
self.assertEqual(expected_user.affinity, user.affinity)
self.assertEqual(expected_user.desktop_folder_id, user.desktop_folder_id)
self.assertEqual(expected_user.archive_folder_id, user.archive_folder_id)
self.assertEqual(expected_user.starred_folder_id, user.starred_folder_id)
self.assertEqual(expected_user.private_folder_id, user.private_folder_id)
self.assertEqual(expected_user.trash_folder_id, user.trash_folder_id)
self.assertEqual(expected_user.shared_folder_ids, user.shared_folder_ids)
self.assertEqual(expected_user.group_folder_ids, user.group_folder_ids)
self.assertEqual(expected_user.profile_picture_url, user.profile_picture_url)
self.assertEqual(expected_user.subdomain, user.subdomain)
self.assertEqual(expected_user.url, user.url)
@requests_mock.Mocker(kw="mock")
def test_get_folder(self, **kwargs):
wrong_folder = deepcopy(FOLDER_JSON)
del wrong_folder["member_ids"]
wrong_folder["members"] = "1234"
kwargs["mock"].get(
f"{QUIP_BASE_URL}/1/{API_ROUTES.FOLDERS.value}" % "abc1234",
json=wrong_folder,
)
with self.assertLogs(level="ERROR") as context_mngr:
with self.assertRaises(QuipFolderException):
self.quip.get_folder("abc1234")
self.assertEqual(
context_mngr.output[0],
f"ERROR{LOGS_PREFIX}Unable to parse retrieved folder.",
)
kwargs["mock"].get(
f"{QUIP_BASE_URL}/1/{API_ROUTES.FOLDERS.value}" % "abc1234",
json=FOLDER_JSON,
)
expected_folder = QuipFolder(
QUIP_ACCESS_TOKEN,
QUIP_BASE_URL,
request_timeout=10,
logger=object,
**FOLDER_JSON.get("folder"),
member_ids=FOLDER_JSON["member_ids"],
children=FOLDER_JSON["children"],
)
folder = self.quip.get_folder("abc1234")
self.assertEqual(expected_folder.id, folder.id)
self.assertEqual(expected_folder.title, folder.title)
self.assertEqual(expected_folder.creator_id, folder.creator_id)
self.assertEqual(expected_folder.created_usec, folder.created_usec)
self.assertEqual(expected_folder.updated_usec, folder.updated_usec)
self.assertListEqual(expected_folder.member_ids, folder.member_ids)
self.assertListEqual(expected_folder.children, folder.children)
@requests_mock.Mocker(kw="mock")
def test_get_thread(self, **kwargs):
wrong_thread = deepcopy(THREAD_JSON)
del wrong_thread["thread"]["id"]
kwargs["mock"].get(
f"{QUIP_BASE_URL}/1/{API_ROUTES.THREADS.value}" % "abc1234",
json=wrong_thread,
)
with self.assertLogs(level="ERROR") as context_mngr:
with self.assertRaises(QuipThreadException):
self.quip.get_thread("abc1234")
self.assertEqual(
context_mngr.output[0],
f"ERROR{LOGS_PREFIX}Unable to parse retrieved thread.",
)
not_a_spreadsheet_thread = deepcopy(THREAD_JSON)
not_a_spreadsheet_thread["thread"]["type"] = "something_else"
kwargs["mock"].get(
f"{QUIP_BASE_URL}/1/{API_ROUTES.THREADS.value}" % "abc1234",
json=not_a_spreadsheet_thread,
)
thread = self.quip.get_thread("abc1234")
self.assertIsInstance(thread, QuipThread)
kwargs["mock"].get(
f"{QUIP_BASE_URL}/1/{API_ROUTES.THREADS.value}" % "abc1234",
json=THREAD_JSON,
)
expected_thread = QuipSpreadsheet(
QUIP_ACCESS_TOKEN,
QUIP_BASE_URL,
request_timeout=10,
**THREAD_JSON.get("thread"),
access_levels=THREAD_JSON["access_levels"],
expanded_user_ids=THREAD_JSON["expanded_user_ids"],
user_ids=THREAD_JSON["user_ids"],
shared_folder_ids=THREAD_JSON["shared_folder_ids"],
invited_user_emails=THREAD_JSON["invited_user_emails"],
html=THREAD_JSON["html"],
)
thread = self.quip.get_thread("abc1234")
self.assertIsInstance(thread, QuipSpreadsheet)
self.assertEqual(expected_thread.author_id, thread.author_id)
self.assertEqual(expected_thread.created_usec, thread.created_usec)
self.assertEqual(expected_thread.document_id, thread.document_id)
self.assertEqual(expected_thread.id, thread.id)
self.assertEqual(expected_thread.is_deleted, thread.is_deleted)
self.assertEqual(expected_thread.link, thread.link)
self.assertEqual(expected_thread.thread_class, thread.thread_class)
self.assertEqual(expected_thread.title, thread.title)
self.assertEqual(expected_thread.type, thread.type)
self.assertEqual(expected_thread.updated_usec, thread.updated_usec)
self.assertDictEqual(expected_thread.access_levels, thread.access_levels) # type: ignore
self.assertListEqual(
expected_thread.expanded_user_ids, thread.expanded_user_ids # type: ignore
)
self.assertListEqual(expected_thread.user_ids, thread.user_ids) # type: ignore
self.assertListEqual(
expected_thread.shared_folder_ids, thread.shared_folder_ids # type: ignore
)
self.assertListEqual(
expected_thread.invited_user_emails, thread.invited_user_emails # type: ignore
)
self.assertEqual(expected_thread.html, thread.html)
@requests_mock.Mocker(kw="mock")
def test_search_thread(self, **kwargs):
kwargs["mock"].get(
f"{QUIP_BASE_URL}/1/{API_ROUTES.SEARCH_THREADS.value}",
json=[THREAD_JSON],
)
results = self.quip.search_threads("My", 1)
self.assertIsInstance(results, QuipSearchResults)
self.assertIsInstance(results.spreadsheets[0], QuipSpreadsheet)
class TestQuipFolder(unittest.TestCase):
@classmethod
def setUpClass(cls):
pass
@classmethod
def tearDownClass(cls):
if os.getenv("ENV") is not None:
del os.environ["ENV"]
@requests_mock.Mocker(kw="mock")
def test_resolve_children(self, **kwargs):
one_thread = deepcopy(THREAD_JSON)
one_thread["thread"]["id"] = "abc1234"
kwargs["mock"].register_uri(
"GET",
f"{QUIP_BASE_URL}/1/{API_ROUTES.THREADS.value}"
% one_thread.get("thread").get("id"),
json=one_thread,
)
other_thread = deepcopy(THREAD_JSON)
other_thread["thread"]["id"] = "abc5678"
kwargs["mock"].register_uri(
"GET",
f"{QUIP_BASE_URL}/1/{API_ROUTES.THREADS.value}"
% other_thread.get("thread").get("id"),
json=other_thread,
)
folder_w_threads = QuipFolder(
QUIP_ACCESS_TOKEN,
QUIP_BASE_URL,
request_timeout=10,
**FOLDER_JSON.get("folder"),
member_ids=FOLDER_JSON["member_ids"],
children=[
{"thread_id": one_thread.get("thread").get("id")},
{"thread_id": other_thread.get("thread").get("id")},
],
)
threads = folder_w_threads.resolve_children()
self.assertIsInstance(threads[0], QuipThread)
self.assertEqual(threads[0].id, one_thread.get("thread").get("id"))
self.assertIsInstance(threads[1], QuipThread)
self.assertEqual(threads[1].id, other_thread.get("thread").get("id"))
one_folder = deepcopy(FOLDER_JSON)
one_folder["folder"]["id"] = "abc1234"
kwargs["mock"].register_uri(
"GET",
f"{QUIP_BASE_URL}/1/{API_ROUTES.FOLDERS.value}"
% one_folder.get("folder").get("id"),
json=one_folder,
)
other_folder = deepcopy(FOLDER_JSON)
other_folder["folder"]["id"] = "abc5678"
kwargs["mock"].register_uri(
"GET",
f"{QUIP_BASE_URL}/1/{API_ROUTES.FOLDERS.value}"
% other_folder.get("folder").get("id"),
json=other_folder,
)
folder_w_folders = QuipFolder(
QUIP_ACCESS_TOKEN,
QUIP_BASE_URL,
request_timeout=10,
**FOLDER_JSON.get("folder"),
member_ids=FOLDER_JSON["member_ids"],
children=[
{"folder_id": one_folder.get("folder").get("id")},
{"folder_id": other_folder.get("folder").get("id")},
],
)
folders = folder_w_folders.resolve_children()
self.assertIsInstance(folders[0], QuipFolder)
self.assertEqual(folders[0].id, one_folder.get("folder").get("id"))
self.assertIsInstance(folders[1], QuipFolder)
self.assertEqual(folders[1].id, other_folder.get("folder").get("id"))
folder_w_both = QuipFolder(
QUIP_ACCESS_TOKEN,
QUIP_BASE_URL,
request_timeout=10,
**FOLDER_JSON.get("folder"),
member_ids=FOLDER_JSON["member_ids"],
children=[
{"folder_id": one_folder.get("folder").get("id")},
{"thread_id": one_thread.get("thread").get("id")},
],
)
folders_n_threads = folder_w_both.resolve_children()
self.assertIsInstance(folders_n_threads[0], QuipFolder)
self.assertEqual(folders_n_threads[0].id, one_folder.get("folder").get("id"))
self.assertIsInstance(folders_n_threads[1], QuipThread)
self.assertEqual(folders_n_threads[1].id, one_thread.get("thread").get("id"))
class TestQuipThread(unittest.TestCase):
@classmethod
def setUpClass(cls):
pass
@classmethod
def tearDownClass(cls):
if os.getenv("ENV") is not None:
del os.environ["ENV"]
@requests_mock.Mocker(kw="mock")
def test_get_content(self, **kwargs):
kwargs["mock"].get(
f"{QUIP_BASE_URL}/1/{API_ROUTES.THREADS.value}"
% THREAD_JSON.get("thread").get("id"),
exc=ConnectionError,
)
thread = QuipThread(
QUIP_ACCESS_TOKEN,
QUIP_BASE_URL,
request_timeout=10,
**THREAD_JSON.get("thread"),
access_levels=THREAD_JSON["access_levels"],
expanded_user_ids=THREAD_JSON["expanded_user_ids"],
user_ids=THREAD_JSON["user_ids"],
shared_folder_ids=THREAD_JSON["shared_folder_ids"],
invited_user_emails=THREAD_JSON["invited_user_emails"],
html=THREAD_JSON["html"],
)
self.assertEqual(thread.get_content(), THREAD_JSON["html"])
kwargs["mock"].get(
f"{QUIP_BASE_URL}/1/{API_ROUTES.THREADS.value}"
% THREAD_JSON.get("thread").get("id"),
json=THREAD_JSON,
)
other_thread = QuipThread(
QUIP_ACCESS_TOKEN,
QUIP_BASE_URL,
request_timeout=10,
**THREAD_JSON.get("thread"),
access_levels=None,
expanded_user_ids=None,
user_ids=None,
shared_folder_ids=None,
invited_user_emails=None,
html=None,
)
self.assertEqual(other_thread.get_content(), THREAD_JSON["html"])
@requests_mock.Mocker(kw="mock")
def test_load_content(self, **kwargs):
kwargs["mock"].get(
f"{QUIP_BASE_URL}/1/{API_ROUTES.THREADS.value}"
% THREAD_JSON.get("thread").get("id"),
json=THREAD_JSON,
)
original_thread = deepcopy(THREAD_JSON)
original_thread["thread"]["document_id"] = None
thread = QuipThread(
QUIP_ACCESS_TOKEN,
QUIP_BASE_URL,
request_timeout=10,
**original_thread.get("thread"),
access_levels=None,
expanded_user_ids=None,
user_ids=None,
shared_folder_ids=None,
invited_user_emails=None,
html=None,
)
thread.load_content()
self.assertEqual(thread.access_levels, THREAD_JSON["access_levels"])
self.assertListEqual(
thread.expanded_user_ids, THREAD_JSON["expanded_user_ids"] # type: ignore
)
self.assertListEqual(thread.user_ids, THREAD_JSON["user_ids"]) # type: ignore
self.assertListEqual(
thread.shared_folder_ids, THREAD_JSON["shared_folder_ids"] # type: ignore
)
self.assertListEqual(
thread.invited_user_emails, THREAD_JSON["invited_user_emails"] # type: ignore
)
self.assertEqual(thread.document_id, THREAD_JSON["thread"]["document_id"])
class TestQuipSpreadsheet(unittest.TestCase):
@classmethod
def setUpClass(cls):
pass
@classmethod
def tearDownClass(cls):
if os.getenv("ENV") is not None:
del os.environ["ENV"]
def setUp(self) -> None:
self.spreadsheet = QuipSpreadsheet(
QUIP_ACCESS_TOKEN,
QUIP_BASE_URL,
request_timeout=10,
**THREAD_JSON.get("thread"),
access_levels=THREAD_JSON["access_levels"],
expanded_user_ids=THREAD_JSON["expanded_user_ids"],
user_ids=THREAD_JSON["user_ids"],
shared_folder_ids=THREAD_JSON["shared_folder_ids"],
invited_user_emails=THREAD_JSON["invited_user_emails"],
html=SPREADSHEET_CONTENT,
)
def tearDown(self) -> None:
self.spreadsheet = None
def test_parse_document_html(self):
dom = self.spreadsheet.parse_document_html()
self.assertIsInstance(dom, Element)
self.assertEqual(dom.getchildren()[0].tag, "h1")
self.assertEqual(dom.getchildren()[1].getchildren()[0].tag, "table")
spreadsheet = QuipSpreadsheet(
QUIP_ACCESS_TOKEN,
QUIP_BASE_URL,
request_timeout=10,
**THREAD_JSON.get("thread"),
access_levels=THREAD_JSON["access_levels"],
expanded_user_ids=THREAD_JSON["expanded_user_ids"],
user_ids=THREAD_JSON["user_ids"],
shared_folder_ids=THREAD_JSON["shared_folder_ids"],
invited_user_emails=THREAD_JSON["invited_user_emails"],
html="<<</",
)
with self.assertLogs(level="ERROR") as context_mngr:
with self.assertRaises(QuipSpreadsheetDomParsingException):
dom = spreadsheet.parse_document_html()
self.assertEqual(
context_mngr.output[0],
f"ERROR{LOGS_PREFIX}Unable to parse spreadsheet DOM.",
)
def test_get_named_page(self):
page = self.spreadsheet.get_named_page("Sheet1")
self.assertIsInstance(page, QuipPage)
with self.assertLogs(level="ERROR") as context_mngr:
with self.assertRaises(QuipSpreadsheetPageNotFoundException):
self.spreadsheet.get_named_page("Sheet2")
self.assertEqual(
context_mngr.output[0],
f"ERROR{LOGS_PREFIX}Unable to find page requested in spreadsheet (Sheet2).",
)
def test_get_page_by_index(self):
first_page = self.spreadsheet.get_first_page()
self.assertIsInstance(first_page, QuipPage)
last_page = self.spreadsheet.get_last_page()
self.assertEqual(first_page, last_page)
with self.assertLogs(level="ERROR") as context_mngr:
with self.assertRaises(QuipSpreadsheetPageNotFoundException):
self.spreadsheet.get_nth_page(2)
self.assertEqual(
context_mngr.output[0],
f"ERROR{LOGS_PREFIX}Unable to find page requested in spreadsheet (2).",
)
class TestQuipPage(unittest.TestCase):
@classmethod
def setUpClass(cls):
pass
@classmethod
def tearDownClass(cls):
if os.getenv("ENV") is not None:
del os.environ["ENV"]
def setUp(self) -> None:
self.spreadsheet = QuipSpreadsheet(
QUIP_ACCESS_TOKEN,
QUIP_BASE_URL,
request_timeout=10,
**THREAD_JSON.get("thread"),
access_levels=THREAD_JSON["access_levels"],
expanded_user_ids=THREAD_JSON["expanded_user_ids"],
user_ids=THREAD_JSON["user_ids"],
shared_folder_ids=THREAD_JSON["shared_folder_ids"],
invited_user_emails=THREAD_JSON["invited_user_emails"],
html=SPREADSHEET_CONTENT,
)
self.page = self.spreadsheet.get_named_page("Sheet1")
def tearDown(self) -> None:
self.spreadsheet = None
self.page = None
def test_get_row_by_index(self):
first_row = self.page.get_first_row()
self.assertIsInstance(first_row, QuipRow)
self.assertEqual(first_row.node.getchildren()[0].text, "1")
last_row = self.page.get_last_row()
self.assertIsInstance(last_row, QuipRow)
self.assertEqual(last_row.node.getchildren()[0].text, "3")
nth_row = self.page.get_nth_row(1)
self.assertIsInstance(nth_row, QuipRow)
self.assertEqual(nth_row.node.getchildren()[0].text, "1")
with self.assertLogs(level="ERROR") as context_mngr:
with self.assertRaises(QuipSpreadsheetDomParsingException):
self.page.get_nth_row(5)
self.assertEqual(
context_mngr.output[0],
f"ERROR{LOGS_PREFIX}Row requested not found.",
)
def test_get_row_by_content(self):
row = self.page.get_row_by_content("TECH TRACK")
self.assertIsInstance(row, QuipRow)
self.assertEqual(row.index, 1)
class TestQuipRow(unittest.TestCase):
@classmethod
def setUpClass(cls):
pass
@classmethod
def tearDownClass(cls):
if os.getenv("ENV") is not None:
del os.environ["ENV"]
def setUp(self) -> None:
self.spreadsheet = QuipSpreadsheet(
QUIP_ACCESS_TOKEN,
QUIP_BASE_URL,
request_timeout=10,
**THREAD_JSON.get("thread"),
access_levels=THREAD_JSON["access_levels"],
expanded_user_ids=THREAD_JSON["expanded_user_ids"],
user_ids=THREAD_JSON["user_ids"],
shared_folder_ids=THREAD_JSON["shared_folder_ids"],
invited_user_emails=THREAD_JSON["invited_user_emails"],
html=SPREADSHEET_CONTENT,
)
self.page = self.spreadsheet.get_named_page("Sheet1")
def tearDown(self) -> None:
self.spreadsheet = None
self.page = None
def test_get_row_cells_content(self):
row = self.page.get_nth_row(2)
cells = row.get_row_cells_content()
self.assertTrue(len(cells), 8)
self.assertListEqual(
cells,
["2", "", "Date", "Title", "Location", "Language", "Capacity", "Owner"],
)
cells = row.get_row_cells_content(include_index=False)
self.assertTrue(len(cells), 7)
self.assertListEqual(
cells,
["", "Date", "Title", "Location", "Language", "Capacity", "Owner"],
)
if __name__ == "__main__":
unittest.main()
|
dreamorosi/quip-spreadsheet
|
tests/constants.py
|
<gh_stars>0
QUIP_ACCESS_TOKEN = "<KEY> # This is a fake token :D
QUIP_BASE_URL = "https://platform.quip.com"
ENV = "DEBUG"
LOGS_PREFIX = ":quip.quip:"
USER_JSON = {
"name": "<NAME>",
"id": "1234",
"is_robot": False,
"affinity": 0.0,
"desktop_folder_id": "abc1234",
"archive_folder_id": "abc5678",
"starred_folder_id": "abc9101",
"private_folder_id": "abc1121",
"trash_folder_id": "abc3141",
"shared_folder_ids": ["abc5161", "abc7181"],
"group_folder_ids": ["abc9202", "abc1222"],
"profile_picture_url": f"{QUIP_BASE_URL}/pic.jpg",
"subdomain": None,
"url": QUIP_BASE_URL,
}
FOLDER_JSON = {
"children": [{"thread_id": "abc7181"}, {"folder_id": "abc5161"}],
"folder": {
"created_usec": 1606998498297926,
"creator_id": "1234",
"id": "abc9101",
"title": "Starred",
"updated_usec": 1609328575547507,
},
"member_ids": ["1234"],
}
THREAD_JSON = {
"access_levels": {
"1234": {"access_level": "OWN"},
},
"expanded_user_ids": ["1234"],
"thread": {
"author_id": "1234",
"thread_class": "document",
"id": "567mnb",
"created_usec": 1608114559763651,
"updated_usec": 1609261609357637,
"link": f"{QUIP_BASE_URL}/abcdefg12345",
"type": "spreadsheet",
"title": "My Spreadsheet",
"document_id": "9876poiu",
"is_deleted": False,
},
"user_ids": [],
"shared_folder_ids": ["abc1234"],
"invited_user_emails": [],
"html": "<h1 id='9876poiu'>My Spreadsheet</h1>",
}
SPREADSHEET_CONTENT = """<h1 id='9876poiu'>My Spreadsheet</h1>
<div data-section-style='13'>
<table id='Aec9CAvyP44' title='Sheet1' style='width: 237.721em'>
<thead>
<tr>
<th class='empty' style='width: 2em'/>
<th id='Aec9CAACdyH' class='empty' style='width: 1.8em'>A<br/></th>
<th id='Aec9CAH7YBR' class='empty' style='width: 7.46667em'>B<br/></th>
<th id='Aec9CAwvN9F' class='empty' style='width: 19.9333em'>C<br/></th>
<th id='Aec9CAe1yQ0' class='empty' style='width: 6.71634em'>D<br/></th>
<th id='Aec9CAAIaj1' class='empty' style='width: 6em'>T<br/></th>
<th id='Aec9CAWcoFU' class='empty' style='width: 6em'>U<br/></th>
<th id='Aec9CAkrCad' class='empty' style='width: 6em'>V<br/></th>
</tr>
</thead>
<tbody>
<tr id='Aec9CAmLjDE'>
<td style='background-color:#f0f0f0'>1</td>
<td id='s:Aec9CAmLjDE_Aec9CA0oPBj' style='background-color:#FFDF99;' class='bold'>
<span id='s:Aec9CAmLjDE_Aec9CA0oPBj'>TECH TRACK</span>
<br/>
</td>
<td id='s:Aec9CAmLjDE_Aec9CAWFNOe' style=''>
<span id='s:Aec9CAmLjDE_Aec9CAWFNOe'>\u200b</span>
<br/>
</td>
<td id='s:Aec9CAmLjDE_Aec9CAHOMxq' style=''>
<span id='s:Aec9CAmLjDE_Aec9CAHOMxq'>\u200b</span>
<br/>
</td>
<td id='s:Aec9CAmLjDE_Aec9CAf7d0s' style=''>
<span id='s:Aec9CAmLjDE_Aec9CAf7d0s'>\u200b</span>
<br/>
</td>
<td id='s:Aec9CAmLjDE_Aec9CAEwmwC' style=''>
<span id='s:Aec9CAmLjDE_Aec9CAEwmwC'>\u200b</span>
<br/>
</td>
<td id='s:Aec9CAmLjDE_Aec9CA0lijP' style=''>
<span id='s:Aec9CAmLjDE_Aec9CA0lijP'>\u200b</span>
<br/>
</td>
<td id='s:Aec9CAmLjDE_Aec9CA003da' style=''>
<span id='s:Aec9CAmLjDE_Aec9CA0lijP'>\u200b</span>
<br/>
</td>
</tr>
<tr id='Aec9CAITZFz'>
<td style='background-color:#f0f0f0'>2</td>
<td id='s:Aec9CAITZFz_Aec9CA0oPBj' style='background-color:#FFDF99;' class='bold'>
<span id='s:Aec9CAITZFz_Aec9CA0oPBj'>\u200b</span>
<br/>
</td>
<td id='s:Aec9CAITZFz_Aec9CAWFNOe' style='background-color:#FFDF99;' class='bold'>
<span id='s:Aec9CAITZFz_Aec9CAWFNOe'>Date</span>
<br/>
</td>
<td id='s:Aec9CAITZFz_Aec9CAHOMxq' style='background-color:#FFDF99;' class='bold'>
<span id='s:Aec9CAITZFz_Aec9CAHOMxq'>Title</span>
<br/>
</td>
<td id='s:Aec9CAITZFz_Aec9CAf7d0s' style='background-color:#FFDF99;' class='bold'>
<span id='s:Aec9CAITZFz_Aec9CAf7d0s'>Location</span>
<br/>
</td>
<td id='s:Aec9CAITZFz_Aec9CAwdFrL' style='background-color:#FFDF99;' class='bold'>
<span id='s:Aec9CAITZFz_Aec9CAwdFrL'>Language</span>
<br/>
</td>
<td id='s:Aec9CAITZFz_Aec9CAEwmwC' style='background-color:#FFDF99;' class='bold'>
<span id='s:Aec9CAITZFz_Aec9CAEwmwC'>Capacity</span>
<br/>
</td>
<td id='s:Aec9CAITZFz_Aec9CA0lijP' style='background-color:#FFDF99;' class='bold'>
<span id='s:Aec9CAITZFz_Aec9CA0lijP'>Owner</span>
<br/>
</td>
</tr>
<tr id='Aec9CADeBjI'>
<td style='background-color:#f0f0f0'>3</td>
<td id='s:Aec9CADeBjI_Aec9CA0oPBj' style='background-color:#AFEFA9;'>
<span id='s:Aec9CADeBjI_Aec9CA0oPBj'>\u200b</span>
<br/>
</td>
<td id='s:Aec9CADeBjI_Aec9CAWFNOe' style=''>
<span id='s:Aec9CADeBjI_Aec9CAWFNOe'>Date</span>
<br/>
</td>
<td id='s:Aec9CADeBjI_Aec9CAHOMxq' style=''>
<span id='s:Aec9CADeBjI_Aec9CAHOMxq'>Intro to ML on AWS</span>
<br/>
</td>
<td id='s:Aec9CADeBjI_Aec9CAf7d0s' style=''>
<span id='s:Aec9CADeBjI_Aec9CAf7d0s'>Virtual (Chime)</span>
<br/>
</td>
<td id='s:Aec9CADeBjI_Aec9CAwdFrL' style=''>
<span id='s:Aec9CADeBjI_Aec9CAwdFrL'>ES</span>
<br/>
</td>
<td id='s:Aec9CADeBjI_Aec9CAEwmwC' style=''>
<span id='s:Aec9CADeBjI_Aec9CAEwmwC'>50</span>
<br/>
</td>
<td id='s:Aec9CADeBjI_Aec9CA0lijP' style=''>
<span id='s:Aec9CADeBjI_Aec9CA0lijP'><NAME></span>
<br/>
</td>
</tr>
</tbody>
</table>
</div>"""
|
MainTime/MotionDetection
|
main.py
|
import cv2
def difference(value0, value1):
return value1 - value0
def find_movement(bild1, bild2):
bild1 = convert_to_grayscale(bild1)
bild2 = convert_to_grayscale(bild2)
bild1 = cv2.GaussianBlur(bild1, (21, 21), 0)
bild2 = cv2.GaussianBlur(bild2, (21, 21), 0)
# Differenz zu 2 Bildern berechnen
diff = difference(bild1, bild2)
thresh = cv2.threshold(diff, 5, 255, cv2.THRESH_BINARY)[1]
return thresh
def has_color(bild1):
return bild1.shape[2] == 3
def convert_to_grayscale(bild1):
if has_color(bild1):
bild1 = cv2.cvtColor(bild1, cv2.COLOR_BGR2GRAY)
return bild1
if __name__ == '__main__':
camera = cv2.VideoCapture(0) # create camera object
_, old_frame = camera.read()
while(True):
_, current_frame = camera.read()
if cv2.waitKey(1) & 0xFF == ord("q"):
break
cv2.imshow("frame", find_movement(old_frame, current_frame))
old_frame = current_frame
camera.release()
cv2.destroyAllWindows()
|
daoluan/Leaderboard-python
|
Leaderboard.py
|
<filename>Leaderboard.py
# -*- coding: utf-8 -*-
#!/usr/bin/python
import redis
class Leaderboard:
def __init__(self,host,port,key,db):
self.host = host
self.port = port
self.key = key
self.db = db
self.r = redis.StrictRedis(host=self.host,port=self.port,db=self.db)
def isRedisValid(self):
return self.r is None
def addMember(self,member,score):
if self.isRedisValid():
return None
return self.r.zadd(self.key,score,member)
def delMember(self,member):
if self.isRedisValid():
return None
return self.r.zrem(self.key,member)
def incrScore(self,member,increment):
"""increase score on specified member"""
if self.isRedisValid():
return None
return self.r.zincrby(self.key,member,increment)
def getRankByMember(self,member):
"""Get ranking by specified member."""
if self.isRedisValid():
return None
return self.r.zrank(self.key,member)
def getLeaderboard(self,start,stop,reverse,with_score):
"""Return the whole leaderboard."""
if self.isRedisValid():
return None
return self.r.zrange(self.key,start,stop,reverse,with_score)
def getLeaderboardByPage(self,item_per_page,page_num,reverse=False,with_score=False):
"""Return part of leaderboard configurably."""
# fix parameters
if item_per_page <= 0:
item_per_page = 5
if page_num <= 0:
page_num = 1
# note: it is possible that return value is empty list.
return self.getLeaderboard((page_num-1)*item_per_page,
page_num*item_per_page-1,
reverse,with_score)
def getWholeLeaderboard(self,reverse=False,with_score=False):
"""Return the whole leaderboard."""
return self.getLeaderboard(0,-1,reverse,with_score)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.