repo_name stringlengths 6 97 | path stringlengths 3 341 | text stringlengths 8 1.02M |
|---|---|---|
philskillz-coder/discord-py-paginator | examples/with_config.py | from discord import app_commands, Embed, Color, Interaction
from discord.ext.paginator import paginator
from typing import Dict, Any
class GuildPaginator(paginator.Paginator):
CONFIG = {
"quick_navigation_button_enabled": False # this paginator never uses quick nav by default
}
async def get_page_count(self, interaction: Interaction) -> int:
return len(self.client.guilds)
async def get_page_content(self, interaction: Interaction, page: int) -> Dict[str, Any]:
# this method should return the arguments used for interaction.response.edit_message
# e.g. {'content': 'hello'} means the message content will be edited to hello
guild = self.client.guilds[page]
# this cannot throw a index error because page is between 0 and the guild count
return {
"content": f"Guild {page+1}/{await self.get_page_count(interaction)}",
"embed": (
Embed(
title="Guild",
colour=Color.green()
)
.add_field(name="Name", value=guild.name)
.add_field(name="ID", value=str(guild.id))
.add_field(name="Member count", value=str(guild.member_count), inline=False)
),
"ephemeral": True
}
@app_commands.command(
name="guilds_0",
description="Show all the guilds"
)
async def guild_default_config(interaction: Interaction):
await interaction.response.send_message(
content="The bot guilds",
view=await GuildPaginator(
interaction.client,
interaction.user
).run() # in this case the config does not get overwritten so quick nav is not used
)
@app_commands.command(
name="guilds_1",
description="Show all the guilds"
)
async def guild_overwrite_config(interaction: Interaction):
await interaction.response.send_message(
content="The bot guilds",
view=await GuildPaginator(
interaction.client,
interaction.user,
config={
"quick_navigation_button_enabled": True
}
).run() # this instance of the GuildPaginator uses quick nav
)
import discord
from discord.ext.paginator import paginator
my_paginator = paginator.Paginator.from_list(
client,
user,
config={
"paginator_ephemeral": True,
"quick_navigation_button_enabled": False
},
data=[
{
"embed": discord.Embed(title=guild.name, description=f"This guild has {guild.member_count} members"),
} for guild in client.guilds
]
) |
philskillz-coder/discord-py-paginator | discord/ext/paginator/errors.py | class ViewException(Exception):
def __init__(self, message: str) -> None:
self.message = message
class BetterException(ViewException):
pass
class ButtonException(BetterException):
pass
class ButtonFailed(ButtonException):
pass
class NotAuthor(ButtonException):
pass
class SelectException(ViewException):
pass
class SelectFailed(SelectException):
pass
|
wwwennie/spectra_fit | main_raman.py | <reponame>wwwennie/spectra_fit
#!/usr/bin/python
# This module is for analyzing Raman spectra
# It has the following capabilities
# - Gaussian peak fitting
# - First derivatives via finite difference
# - Differential Raman spectroscopy
# This file is the main driver for running things
# Dependecies:
# numpy, lmfit, matplotlib
# created by: <NAME> (<EMAIL>)
if __name__=="__main__":
# import user input
from dat_wo3 import *
#from dat_sto import *
# import other functions
from fitpeaks import *
from interpdiff import *
from smooth import *
from finite_diff import *
#------ Main functionalities -------#
## fit spectra with decomposed peaks and plot
fitpeaks(filefit,initpar,typefit=2,shift=25)
# take difference of two spectra
interpdiff(file1,file2)
# smooth peaks for easier peak finding and first-order derivatives
smooth(filesmooth)
# take first-derivative using finite difference
finite_diff(filediff)
|
wwwennie/spectra_fit | dat_wo3.py | <reponame>wwwennie/spectra_fit<filename>dat_wo3.py
#!/usr/bin/python
#==============================#
#========= USER INPUTS ========#
#==============================#
# take difference of two spectra, and interpolate
file1 = "./example-data/wo3-side1.txt"
file2 = "./example-data/wo3-side2.txt"
# take first derivative of spectra
filediff=file1
# smooth spectra, with convolution
filesmooth="./example-data/smooth_me.txt"
# peak fiting
filefit = file1
nfits = 12 # number of peaks to fit
typefit = 2 # 1 = Gaussian, 2 = Lorentzian, 3 = Voigt type fits
shift = 25 # background shift
#================ Initial guesses =================#
# as nested dictionary
# adapt to different models by changing parameters
# use float("inf") for infinity or import math; math.inf
# default bounds:
# center: [-inf:inf]
# sigma: [0:inf]
# amplitude: [-inf:inf]
# the initial guesses for center are based on phonon modes calculated from DFPT
initpar = {'center': {1:{'val':90,'min':0,'max':120},\
2:{'val':93,'min':0,'max':120},\
3:{'val':133,'min':100,'max':150},\
4:{'val':180,'min':150,'max':225},\
5:{'val':225,'min':200,'max':275},\
6:{'val':275,'min':250,'max':300},\
7:{'val':350,'min':300,'max':400},\
8:{'val':610,'min':570,'max':650},\
9:{'val':650,'min':600,'max':700},\
10:{'val':720,'min':675,'max':800},\
11:{'val':810,'min':775,'max':850},\
12:{'val':1100,'min':1000,'max':1500}},\
'sigma':{1:{'val':15,'min':1,'max':35},\
2:{'val':15,'min':1,'max':35},\
3:{'val':15,'min':1,'max':55},\
4:{'val':15,'min':1,'max':35},\
5:{'val':15,'min':1,'max':35},\
6:{'val':15,'min':1,'max':45},\
7:{'val':15,'min':1,'max':45},\
8:{'val':15,'min':1,'max':35},\
9:{'val':15,'min':1,'max':35},\
10:{'val':15,'min':1,'max':35},\
11:{'val':15,'min':1,'max':25},\
12:{'val':15,'min':1,'max':100}},\
'amplitude':{1:{'val':30000,'min':10,'max':float("inf")},\
2:{'val':30000,'min':100,'max':float("inf")},\
3:{'val':10000,'min':2000,'max':float("inf")},\
4:{'val':10000,'min':1000,'max':float("inf")},\
5:{'val':10000,'min':1000,'max':float("inf")},\
6:{'val':5000,'min':0,'max':float("inf")},\
7:{'val':5000,'min':0,'max':float("inf")},\
8:{'val':5000,'min':0,'max':float("inf")},\
9:{'val':5000,'min':0,'max':float("inf")},\
10:{'val':5000,'min':500,'max':float("inf")},\
11:{'val':2000,'min':500,'max':float("inf")},\
12:{'val':2000,'min':500,'max':float("inf")}}\
}
|
wwwennie/spectra_fit | truncdata.py | #!/usr/bin/python
# created by: <NAME> (<EMAIL>)
# created on: 20 July 2016
import numpy as np
import matplotlib.pyplot as plt
from scipy.interpolate import interp1d
#==============================#
#========= USER INPUTS ========#
#==============================#
def trunc_data(filename,min_index,max_index,outfile="out-truncdata.dat"):
""" Truncate dat within specified range """
# Import data
data = np.loadtxt(infile)
x = data[:,0]
y = data[:,1]
#== Save interpolation =="
c = open(outfile+'trunc','w')
outc = zip(x1,cubdiff)
for line in outc:
c.write(" ".join(str(x) for x in line) + "\n")
#========== Plotting truncated data =============#
#interpolation results
plt.figure(1)
plt.plot(x2,y2, label=file2,linewidth=2.0)
plt.legend(loc='best')
plt.xlabel("Wavenumber (cm^{-1})")
plt.ylabel("Intensity (counts/s)")
plt.draw()
plt.pause(0.001)
input("Please press [enter] to continue")
|
wwwennie/spectra_fit | finite_diff.py | <filename>finite_diff.py<gh_stars>1-10
#!/usr/bin/python
# created by: <NAME> (<EMAIL>)
# created on: 31 July 2016
import numpy as np
import matplotlib.pyplot as plt
def finite_diff(filename,outfile="out-finitediff.dat"):
"""
This file is for finding the first-order
derivative of a set of data (i.e, Raman peaks)
using central difference
filename, outfile: (string) input and output text files
"""
# Import data from filename
data = np.loadtxt(filename)
wavenum = data[:,0] #x
counts = data[:,1] #y
# TO DO
# interpolate data for evenly spaced data points
# minimize error of central difference
fdcts = np.zeros(len(wavenum)) # first-order
fd2cts = np.zeros(len(wavenum)) # second-order
for i in range(len(wavenum)):
# skip first and final points of data
if (i == 0) or (i == len(wavenum)-1):
continue
else:
width = wavenum[i+1]-wavenum[i-1]
fdcts[i] = (counts[i+1] - counts[i-1])/(2*width)
fd2cts[i] = (counts[i+1]-2*counts[i]-counts[i-1])/(width**2)
#========== Save results =============#
f = open(outfile,'w')
outfd = zip(wavenum, fdcts)
for line in outfd:
f.write(" ".join(str(x) for x in line) + "\n")
#========== Plotting results =============#
fig,ax1 = plt.subplots()
ax2 = ax1.twinx()
ax1.plot(wavenum,counts, label='Measured',linewidth=2.0)
ax2.plot(wavenum,fdcts,'k--',label='First-order derivative',linewidth=2.0)
ax2.plot(wavenum,fd2cts,'g:',label='Second-order deriviative',linewidth=3.0)
ax1.set_xlabel('Wave number (cm^-1)')
ax1.set_ylabel('Intensity counts')
ax2.set_ylabel('Slope Intensity')
plt.legend(loc='upper right')
plt.draw()
plt.pause(0.001)
input("Please press [enter] to continue")
|
wwwennie/spectra_fit | dat_sto.py | #!/usr/bin/python
#========= USER INPUTS ========#
#==============================#
filefit = "./example-data/sto-raman-spectra.txt"
typefit = 1 # 1 = Gaussian, 2 = Lorentzian, 3 = Voigt type fits
shift = 20 # background shift
#================ Initial guesses =================#
# as nested dictionary
# adapt to different models by changing parameters
# use float("inf") for infinity or import math; math.inf
# default bounds:
# center: [-inf:inf]
# sigma: [0:inf]
# amplitude: [-inf:inf]
initpar = {'center': {1:{'val':130,'min':0,'max':150},\
2:{'val':175,'min':150,'max':200},\
3:{'val':260,'min':250,'max':275},\
4:{'val':300,'min':275,'max':310},\
5:{'val':375,'min':325,'max':400},\
6:{'val':410,'min':400,'max':435},\
7:{'val':630,'min':600,'max':650},\
8:{'val':670,'min':650,'max':700},\
9:{'val':730,'min':725,'max':780},\
10:{'val':800,'min':775,'max':825},\
11:{'val':1050,'min':1025,'max':1075},\
12:{'val':1250,'min':1200,'max':1350},\
13:{'val':1390,'min':1300,'max':1400}},\
'sigma':{1:{'val':15,'min':1,'max':35},\
2:{'val':15,'min':1,'max':35},\
3:{'val':15,'min':1,'max':35},\
4:{'val':15,'min':1,'max':35},\
5:{'val':15,'min':1,'max':35},\
6:{'val':15,'min':1,'max':45},\
7:{'val':15,'min':1,'max':25},\
8:{'val':15,'min':1,'max':25},\
9:{'val':15,'min':1,'max':25},\
10:{'val':35,'min':1,'max':75},\
11:{'val':35,'min':1,'max':100},\
12:{'val':15,'min':1,'max':35},\
13:{'val':15,'min':1,'max':35}},\
'amplitude':{1:{'val':3000,'min':10,'max':float("inf")},\
2:{'val':3000,'min':100,'max':float("inf")},\
3:{'val':5000,'min':2000,'max':float("inf")},\
4:{'val':6000,'min':2000,'max':float("inf")},\
5:{'val':5000,'min':2000,'max':float("inf")},\
6:{'val':5000,'min':500,'max':float("inf")},\
7:{'val':3000,'min':1000,'max':float("inf")},\
8:{'val':3000,'min':1000,'max':float("inf")},\
9:{'val':3000,'min':1000,'max':float("inf")},\
10:{'val':2000,'min':10,'max':float("inf")},\
11:{'val':2000,'min':10,'max':float("inf")},\
12:{'val':1500,'min':10,'max':float("inf")},\
13:{'val':1500,'min':10,'max':float("inf")}}\
}
|
wwwennie/spectra_fit | fitpeaks.py | <gh_stars>1-10
#!/usr/bin/python
# created by: <NAME> (<EMAIL>)
# created on: 4 July 2016
import numpy as np
import matplotlib.pyplot as plt
from lmfit.models import GaussianModel,LorentzianModel,VoigtModel, ExponentialModel
import time
def fitpeaks(filename,initpar,typefit=1,shift=25):
"""
This file is for fitting peaks in Raman spectra
Uses Python's LMFIT to find peaks
Timing for more complicated fits
filename: (string) file pointing to data, e.g., Raman spectra
two-column format (x, y), e.g., (wavenum, intensity)
initpar: (dictionary) containing initial guesses; keys: "center", "sigma", "amplitude"
typefit: (integer) type of fit
1 = Gaussian (default), 2 = Loretzian , 3 = Voigt
shift; (integer) data points to account for background shift in spectra; default 25
"""
nfits = len(initpar['center'].values())
start_time = time.time()
# Import data from filename
data = np.loadtxt(filename)
wavenum = data[:,0] #x
counts = data[:,1]-shift #y
print("Imported data: %.2f s" % (time.time()-start_time))
# need instantiate size of list first?
mods = []
prefixes = []
if (typefit == 1):
for i in range(nfits):
prefixes.append( 'g'+str(i+1)+'_' )
mods.append(GaussianModel(prefix=prefixes[i]))
elif (typefit == 2):
for i in range(nfits):
prefixes.append('L'+str(i+1)+'_')
mods.append(LorentzianModel(prefix=prefixes[i]))
elif (typefit == 3):
for i in range(nfits):
prefixes.append('v'+str(i+1)+'_')
mods.append(VoigtModel(prefix=prefixes[i]))
# setting initial guesses in fits
modpars = ['center','sigma','amplitude'] # adjustable parameters in peak-fit models
pars = mods[0].make_params() # initiate starting parameters
keys = []
for peak in range(len(mods)):
pars.update( mods[peak].make_params() )
for i in range(len(modpars)):
param = modpars[i]
key = prefixes[peak]+param
keys.append(key)
val = initpar[param][peak+1]['val']
minim = initpar[param][peak+1]['min']
maxim = initpar[param][peak+1]['max']
pars[key].set(val,min=minim,max=maxim) # lmfit peeps used built-in function names...
print("Set initial parameters: %.2f s" % (time.time()-start_time))
## Debugging, using testdata
#mods.append(ExponentialModel(prefix='exp_'))
#pars.update(mods[nfits].guess(counts,x=wavenum)) # vars switched in file
#prefixes.append('exp_') # note: not updated in plotting
#============= Evaluating model ===========#
model = np.sum(mods)
#initial model
initguess = model.eval(pars,x=wavenum)
#initial model, decomposed into constituents; dictionary indexed by prefixes
initcomp = model.eval_components(params=pars,x=wavenum)
# fit data
out = model.fit(counts,pars,x=wavenum)
fitpars = out.best_values # dictionary, indexed with keys
print("Fit model: %.2f s" % (time.time()-start_time))
#update parameters with best fit
# is there a better way to update Parameters() with dictionary?
bestpars=pars
for i in range(len(keys)):
val = fitpars[keys[i]]
bestpars[keys[i]].set(val)
bestcomp = model.eval_components(params=bestpars,x=wavenum)
print("Updated parameters: %.2f s" % (time.time()-start_time))
#========= Save and print report ==========#
repout = out.fit_report(min_correl=0.5)
print(repout)
outfile=filename.split(".")[0]+".out"
f = open(outfile,'w')
f.write(repout)
#========== Plotting results =============#
#total results
plt.figure()
plt.plot(wavenum,counts, label='Measured',linewidth=2.0)
plt.plot(wavenum,initguess,'k--',label='Initial')
plt.plot(wavenum,out.best_fit,'r--',label='Fit',linewidth=2.0)
plt.legend(loc='upper right')
plt.title("Total fits")
#decomposed results
fig,(axinit, axbest) = plt.subplots(2,1,sharex=True)
axbest.plot(wavenum,counts,label='Measured',linewidth=2.0)
for i in range(len(prefixes)):
axbest.plot(wavenum,bestcomp[prefixes[i]],'--',linewidth=1.5)
axbest.set_title("Decomposed Best Fits")
axinit.plot(wavenum,counts,label='Measured')
for i in range(len(prefixes)):
axinit.plot(wavenum,initcomp[prefixes[i]],'--',linewidth=1.5)
axinit.set_title("Decomposed Initial Fits")
plt.draw()
plt.pause(0.001)
input("Please press [enter] to continue")
|
wwwennie/spectra_fit | interpdiff.py | <gh_stars>1-10
#!/usr/bin/python
# created by: <NAME> (<EMAIL>)
# created on: 7 July 2016
import numpy as np
import matplotlib.pyplot as plt
from scipy.interpolate import interp1d
def interpdiff(file1,file2, outfile="out-interpdiff.dat"):
"""
Takes difference of two Raman Spectra
Requires interpolation since wavenum do not exactly match
between measured spectra
interpolated values are based on x-values of file1
file1,file2: (string) name of files with data
two-column (x,y) format
"""
# Import data
data1 = np.loadtxt(file1)
data2 = np.loadtxt(file2)
x1 = data1[:,0]
y1 = data1[:,1]
x2 = data2[:,0]
y2 = data2[:,1]
#== Interpolate ==#
f2lin = interp1d(x2,y2)
f2cub = interp1d(x2,y2,kind='cubic')
cubdiff = abs(f2cub(x1)-y1)
lindiff = abs(f2lin(x1)-y1)
#== Save interpolation =="
c = open('cub'+outfile,'w')
outc = zip(x1,cubdiff)
for line in outc:
c.write(" ".join(str(x) for x in line) + "\n")
l = open('lin'+outfile,'w')
outl = zip(x1,lindiff)
for line in outl:
l.write(" ".join(str(x) for x in line) + "\n")
#========== Plotting results =============#
#interpolation results
plt.figure()
plt.plot(x2,y2, label=file2,linewidth=2.0)
plt.plot(x1,f2lin(x1),'k--',label='Linear Interp.')
plt.plot(x1,f2cub(x1),'g-',label='Cubic Interp.')
plt.legend(loc='best')
plt.legend(loc='best')
plt.xlabel("Wavenumber (cm^{-1})")
plt.ylabel("Intensity (counts/s)")
plt.title("Interpolation")
#difference of spectra results
plt.figure(2)
plt.plot(x1,y1,'k-.',label=file1,linewidth=0.8)
plt.plot(x2,y2,'k--',label=file2,linewidth=0.8)
plt.plot(x1,cubdiff,'g-',label='Difference, Cubic Interp.',linewidth=2.0)
plt.legend(loc='best')
plt.xlabel("Wavenumber (cm^{-1})")
plt.ylabel("Intensity (counts/s)")
plt.title("Difference")
plt.draw()
plt.pause(0.001)
input("Please press [enter] to continue")
|
wwwennie/spectra_fit | smooth.py | #!/usr/bin/python
# created by: <NAME> (<EMAIL>)
# created on: 31 July 2016
import numpy as np
import matplotlib.pyplot as plt
def smooth_spectra(y, box_pts):
box = np.ones(box_pts)/box_pts
y_smooth = np.convolve(y, box, mode='same')
return y_smooth
def smooth(filename,outfile="out-smooth.dat"):
"""
For smoothing out spectra in order to find first-order derivative
and peaks easier
filename, outfile: (string) name of input and output files
"""
window =5 # window for moving-average convolution
# Import data from filename
data = np.loadtxt(filename)
wavenum = data[:,0] #x
counts = data[:,1] #y
smoothcts = smooth_spectra(counts,window)
#========== Save results =============#
f = open(outfile,'w')
outsmooth = zip(wavenum, smoothcts)
for line in outsmooth:
f.write(" ".join(str(x) for x in line) + "\n")
#========== Plotting results =============#
#total results
plt.figure()
plt.plot(wavenum,counts, label='Measured',linewidth=2.0)
plt.plot(wavenum,smoothcts,'k--',label='Smoothed',linewidth=2.0)
plt.legend(loc='upper right')
plt.draw()
plt.pause(0.001)
input("Please press [enter] to continue")
|
tetienne/synologydsm-api | tests/test_synology_dsm.py | """Synology DSM tests."""
from unittest import TestCase
import pytest
from . import SynologyDSMMock
from . import USER_MAX_TRY
from . import VALID_HOST
from . import VALID_HTTPS
from . import VALID_OTP
from . import VALID_PASSWORD
from . import VALID_PORT
from . import VALID_USER
from . import VALID_USER_2SA
from . import VALID_VERIFY_SSL
from .const import DEVICE_TOKEN
from .const import SESSION_ID
from .const import SYNO_TOKEN
from synology_dsm.api.core.security import SynoCoreSecurity
from synology_dsm.api.dsm.information import SynoDSMInformation
from synology_dsm.const import API_AUTH
from synology_dsm.const import API_INFO
from synology_dsm.exceptions import SynologyDSMAPIErrorException
from synology_dsm.exceptions import SynologyDSMAPINotExistsException
from synology_dsm.exceptions import SynologyDSMLogin2SAFailedException
from synology_dsm.exceptions import SynologyDSMLogin2SARequiredException
from synology_dsm.exceptions import SynologyDSMLoginFailedException
from synology_dsm.exceptions import SynologyDSMLoginInvalidException
from synology_dsm.exceptions import SynologyDSMRequestException
class TestSynologyDSM(TestCase):
"""SynologyDSM test cases."""
api = None
def setUp(self):
"""Context initialisation called for all tests."""
self.api = SynologyDSMMock(
VALID_HOST,
VALID_PORT,
VALID_USER,
VALID_PASSWORD,
VALID_HTTPS,
VALID_VERIFY_SSL,
)
def test_init(self):
"""Test init."""
assert self.api.username
assert self.api._base_url
assert self.api._timeout == 10
assert not self.api.apis.get(API_AUTH)
assert not self.api._session_id
def test_connection_failed(self):
"""Test failed connection."""
# No internet
api = SynologyDSMMock(
"no_internet",
VALID_PORT,
VALID_USER,
VALID_PASSWORD,
VALID_HTTPS,
VALID_VERIFY_SSL,
)
with pytest.raises(SynologyDSMRequestException) as error:
api.login()
error_value = error.value.args[0]
assert not error_value["api"]
assert error_value["code"] == -1
assert error_value["reason"] == "Unknown"
assert (
"ConnectionError = <urllib3.connection.VerifiedHTTPSConnection "
in error_value["details"]
)
assert not api.apis.get(API_AUTH)
assert not api._session_id
# Wrong host
api = SynologyDSMMock(
"host",
VALID_PORT,
VALID_USER,
VALID_PASSWORD,
VALID_HTTPS,
VALID_VERIFY_SSL,
)
with pytest.raises(SynologyDSMRequestException) as error:
api.login()
error_value = error.value.args[0]
assert not error_value["api"]
assert error_value["code"] == -1
assert error_value["reason"] == "Unknown"
assert (
"ConnectionError = <urllib3.connection.HTTPConnection "
in error_value["details"]
)
assert not api.apis.get(API_AUTH)
assert not api._session_id
# Wrong port
api = SynologyDSMMock(
VALID_HOST, 0, VALID_USER, VALID_PASSWORD, VALID_HTTPS, VALID_VERIFY_SSL
)
with pytest.raises(SynologyDSMRequestException) as error:
api.login()
error_value = error.value.args[0]
assert not error_value["api"]
assert error_value["code"] == -1
assert error_value["reason"] == "Unknown"
assert error_value["details"] == (
"SSLError = [SSL: WRONG_VERSION_NUMBER] "
"wrong version number (_ssl.c:1076)"
)
assert not api.apis.get(API_AUTH)
assert not api._session_id
# Wrong HTTPS
api = SynologyDSMMock(
VALID_HOST,
VALID_PORT,
VALID_USER,
VALID_PASSWORD,
False,
VALID_VERIFY_SSL,
)
with pytest.raises(SynologyDSMRequestException) as error:
api.login()
error_value = error.value.args[0]
assert not error_value["api"]
assert error_value["code"] == -1
assert error_value["reason"] == "Unknown"
assert error_value["details"] == "RequestException = Bad request"
assert not api.apis.get(API_AUTH)
assert not api._session_id
# Wrong SSL
api = SynologyDSMMock(
VALID_HOST,
VALID_PORT,
VALID_USER,
VALID_PASSWORD,
VALID_HTTPS,
False,
)
with pytest.raises(SynologyDSMRequestException) as error:
api.login()
error_value = error.value.args[0]
assert not error_value["api"]
assert error_value["code"] == -1
assert error_value["reason"] == "Unknown"
assert (
error_value["details"]
== f"SSLError = hostname '192.168.0.35' doesn't match '{VALID_HOST}'"
)
assert not api.apis.get(API_AUTH)
assert not api._session_id
def test_login(self):
"""Test login."""
assert self.api.login()
assert self.api.apis.get(API_AUTH)
assert self.api._session_id == SESSION_ID
assert self.api._syno_token == SYNO_TOKEN
def test_login_failed(self):
"""Test failed login."""
api = SynologyDSMMock(
VALID_HOST,
VALID_PORT,
"user",
VALID_PASSWORD,
VALID_HTTPS,
VALID_VERIFY_SSL,
)
with pytest.raises(SynologyDSMLoginInvalidException) as error:
api.login()
error_value = error.value.args[0]
assert error_value["api"] == "SYNO.API.Auth"
assert error_value["code"] == 400
assert error_value["reason"] == "Invalid credentials"
assert error_value["details"] == "Invalid password or not admin account: user"
assert api.apis.get(API_AUTH)
assert not api._session_id
api = SynologyDSMMock(
VALID_HOST,
VALID_PORT,
VALID_USER,
"pass",
VALID_HTTPS,
VALID_VERIFY_SSL,
)
with pytest.raises(SynologyDSMLoginInvalidException) as error:
api.login()
error_value = error.value.args[0]
assert error_value["api"] == "SYNO.API.Auth"
assert error_value["code"] == 400
assert error_value["reason"] == "Invalid credentials"
assert (
error_value["details"]
== "Invalid password or not admin account: valid_user"
)
assert api.apis.get(API_AUTH)
assert not api._session_id
def test_login_2sa(self):
"""Test login with 2SA."""
api = SynologyDSMMock(
VALID_HOST,
VALID_PORT,
VALID_USER_2SA,
VALID_PASSWORD,
VALID_HTTPS,
VALID_VERIFY_SSL,
)
with pytest.raises(SynologyDSMLogin2SARequiredException) as error:
api.login()
error_value = error.value.args[0]
assert error_value["api"] == "SYNO.API.Auth"
assert error_value["code"] == 403
assert error_value["reason"] == "One time password not specified"
assert (
error_value["details"]
== "Two-step authentication required for account: valid_user_2sa"
)
assert api.login(VALID_OTP)
assert api._session_id == SESSION_ID
assert api._syno_token == SYNO_TOKEN
assert api._device_token == DEVICE_TOKEN
assert api.device_token == DEVICE_TOKEN
def test_login_2sa_new_session(self):
"""Test login with 2SA and a new session with granted device."""
api = SynologyDSMMock(
VALID_HOST,
VALID_PORT,
VALID_USER_2SA,
VALID_PASSWORD,
VALID_HTTPS,
VALID_VERIFY_SSL,
device_token=DEVICE_TOKEN,
)
assert api.login()
assert api._session_id == SESSION_ID
assert api._syno_token == SYNO_TOKEN
assert api._device_token == DEVICE_TOKEN
assert api.device_token == DEVICE_TOKEN
def test_login_2sa_failed(self):
"""Test failed login with 2SA."""
api = SynologyDSMMock(
VALID_HOST,
VALID_PORT,
VALID_USER_2SA,
VALID_PASSWORD,
VALID_HTTPS,
VALID_VERIFY_SSL,
)
with pytest.raises(SynologyDSMLogin2SARequiredException) as error:
api.login()
error_value = error.value.args[0]
assert error_value["api"] == "SYNO.API.Auth"
assert error_value["code"] == 403
assert error_value["reason"] == "One time password not specified"
assert (
error_value["details"]
== "Two-step authentication required for account: valid_user_2sa"
)
with pytest.raises(SynologyDSMLogin2SAFailedException) as error:
api.login(888888)
error_value = error.value.args[0]
assert error_value["api"] == "SYNO.API.Auth"
assert error_value["code"] == 404
assert error_value["reason"] == "One time password authenticate failed"
assert (
error_value["details"]
== "Two-step authentication failed, retry with a new pass code"
)
assert api._session_id is None
assert api._syno_token is None
assert api._device_token is None
def test_login_basic_failed(self):
"""Test basic failed login."""
api = SynologyDSMMock(
VALID_HOST,
VALID_PORT,
USER_MAX_TRY,
VALID_PASSWORD,
VALID_HTTPS,
VALID_VERIFY_SSL,
)
with pytest.raises(SynologyDSMLoginFailedException) as error:
api.login()
error_value = error.value.args[0]
assert error_value["api"] == "SYNO.API.Auth"
assert error_value["code"] == 407
assert error_value["reason"] == "Max Tries (if auto blocking is set to true)"
assert error_value["details"] == USER_MAX_TRY
def test_request_timeout(self):
"""Test request timeout."""
api = SynologyDSMMock(
VALID_HOST,
VALID_PORT,
VALID_USER,
VALID_PASSWORD,
VALID_HTTPS,
VALID_VERIFY_SSL,
timeout=2,
)
assert api._timeout == 2
def test_request_get(self):
"""Test get request."""
assert self.api.get(API_INFO, "query")
assert self.api.get(API_AUTH, "login")
assert self.api.get("SYNO.DownloadStation2.Task", "list")
assert self.api.get(API_AUTH, "logout")
def test_request_get_failed(self):
"""Test failed get request."""
with pytest.raises(SynologyDSMAPINotExistsException) as error:
self.api.get("SYNO.Virtualization.API.Task.Info", "list")
error_value = error.value.args[0]
assert error_value["api"] == "SYNO.Virtualization.API.Task.Info"
assert error_value["code"] == -2
assert error_value["reason"] == "Unknown"
assert (
error_value["details"]
== "API SYNO.Virtualization.API.Task.Info does not exists"
)
def test_request_post(self):
"""Test post request."""
assert self.api.post(
"SYNO.FileStation.Upload",
"upload",
params={"dest_folder_path": "/upload/test", "create_parents": True},
files={"file": "open('file.txt','rb')"},
)
assert self.api.post(
"SYNO.DownloadStation2.Task",
"create",
params={
"uri": "ftps://192.0.0.1:21/test/test.zip",
"username": "admin",
"password": "<PASSWORD>",
},
)
def test_request_post_failed(self):
"""Test failed post request."""
with pytest.raises(SynologyDSMAPIErrorException) as error:
self.api.post(
"SYNO.FileStation.Upload",
"upload",
params={"dest_folder_path": "/upload/test", "create_parents": True},
files={"file": "open('file_already_exists.txt','rb')"},
)
error_value = error.value.args[0]
assert error_value["api"] == "SYNO.FileStation.Upload"
assert error_value["code"] == 1805
assert error_value["reason"] == (
"Can’t overwrite or skip the existed file, if no overwrite"
" parameter is given"
)
assert not error_value["details"]
with pytest.raises(SynologyDSMAPIErrorException) as error:
self.api.post(
"SYNO.DownloadStation2.Task",
"create",
params={
"uri": "ftps://192.0.0.1:21/test/test_not_exists.zip",
"username": "admin",
"password": "<PASSWORD>",
},
)
error_value = error.value.args[0]
assert error_value["api"] == "SYNO.DownloadStation2.Task"
assert error_value["code"] == 408
assert error_value["reason"] == "File does not exist"
assert not error_value["details"]
def test_reset_str_attr(self):
"""Test reset with string attr."""
assert not self.api._security
assert self.api.security
assert self.api._security
assert self.api.reset("security")
assert not self.api._security
def test_reset_str_key(self):
"""Test reset with string API key."""
assert not self.api._security
assert self.api.security
assert self.api._security
assert self.api.reset(SynoCoreSecurity.API_KEY)
assert not self.api._security
def test_reset_object(self):
"""Test reset with object."""
assert not self.api._security
assert self.api.security
assert self.api._security
assert self.api.reset(self.api.security)
assert not self.api._security
def test_reset_str_attr_information(self):
"""Test reset with string information attr (should not be reset)."""
assert not self.api._information
assert self.api.information
assert self.api._information
assert not self.api.reset("information")
assert self.api._information
def test_reset_str_key_information(self):
"""Test reset with string information API key (should not be reset)."""
assert not self.api._information
assert self.api.information
assert self.api._information
assert not self.api.reset(SynoDSMInformation.API_KEY)
assert self.api._information
def test_reset_object_information(self):
"""Test reset with information object (should not be reset)."""
assert not self.api._information
assert self.api.information
assert self.api._information
assert not self.api.reset(self.api.information)
assert self.api._information
def test_information(self):
"""Test information."""
assert self.api.information
self.api.information.update()
assert self.api.information.model == "DS918+"
assert self.api.information.ram == 4096
assert self.api.information.serial == "1920PDN001501"
assert self.api.information.temperature == 40
assert not self.api.information.temperature_warn
assert self.api.information.uptime == 155084
assert self.api.information.version == "24922"
assert self.api.information.version_string == "DSM 6.2.2-24922 Update 4"
def test_network(self):
"""Test network."""
assert self.api.network
self.api.network.update()
assert self.api.network.dns
assert self.api.network.gateway
assert self.api.network.hostname
assert self.api.network.interfaces
assert self.api.network.interface("eth0")
assert self.api.network.interface("eth1")
assert self.api.network.macs
assert self.api.network.workgroup
def test_security(self):
"""Test security, safe status."""
assert self.api.security
self.api.security.update()
assert self.api.security.checks
assert self.api.security.last_scan_time
assert not self.api.security.start_time # Finished scan
assert self.api.security.success
assert self.api.security.progress
assert self.api.security.status == "safe"
assert self.api.security.status_by_check
assert self.api.security.status_by_check["malware"] == "safe"
assert self.api.security.status_by_check["network"] == "safe"
assert self.api.security.status_by_check["securitySetting"] == "safe"
assert self.api.security.status_by_check["systemCheck"] == "safe"
assert self.api.security.status_by_check["update"] == "safe"
assert self.api.security.status_by_check["userInfo"] == "safe"
def test_security_error(self):
"""Test security, outOfDate status."""
self.api.error = True
assert self.api.security
self.api.security.update()
assert self.api.security.checks
assert self.api.security.last_scan_time
assert not self.api.security.start_time # Finished scan
assert self.api.security.success
assert self.api.security.progress
assert self.api.security.status == "outOfDate"
assert self.api.security.status_by_check
assert self.api.security.status_by_check["malware"] == "safe"
assert self.api.security.status_by_check["network"] == "safe"
assert self.api.security.status_by_check["securitySetting"] == "safe"
assert self.api.security.status_by_check["systemCheck"] == "safe"
assert self.api.security.status_by_check["update"] == "outOfDate"
assert self.api.security.status_by_check["userInfo"] == "safe"
def test_shares(self):
"""Test shares."""
assert self.api.share
self.api.share.update()
assert self.api.share.shares
for share_uuid in self.api.share.shares_uuids:
assert self.api.share.share_name(share_uuid)
assert self.api.share.share_path(share_uuid)
assert self.api.share.share_recycle_bin(share_uuid) is not None
assert self.api.share.share_size(share_uuid) is not None
assert self.api.share.share_size(share_uuid, human_readable=True)
assert (
self.api.share.share_name("2ee6c06a-8766-48b5-013d-63b18652a393")
== "test_share"
)
assert (
self.api.share.share_path("2ee6c06a-8766-48b5-013d-63b18652a393")
== "/volume1"
)
assert (
self.api.share.share_recycle_bin("2ee6c06a-8766-48b5-013d-63b18652a393")
is True
)
assert (
self.api.share.share_size("2ee6c06a-8766-48b5-013d-63b18652a393")
== 3.790251876432216e19
)
assert (
self.api.share.share_size("2ee6c06a-8766-48b5-013d-63b18652a393", True)
== "32.9Eb"
)
def test_system(self):
"""Test system."""
assert self.api.system
self.api.system.update()
assert self.api.system.cpu_clock_speed
assert self.api.system.cpu_cores
assert self.api.system.cpu_family
assert self.api.system.cpu_series
assert self.api.system.firmware_ver
assert self.api.system.model
assert self.api.system.ram_size
assert self.api.system.serial
assert self.api.system.sys_temp
assert self.api.system.time
assert self.api.system.time_zone
assert self.api.system.time_zone_desc
assert self.api.system.up_time
for usb_dev in self.api.system.usb_dev:
assert usb_dev.get("cls")
assert usb_dev.get("pid")
assert usb_dev.get("producer")
assert usb_dev.get("product")
assert usb_dev.get("rev")
assert usb_dev.get("vid")
def test_upgrade(self):
"""Test upgrade."""
assert self.api.upgrade
self.api.upgrade.update()
assert self.api.upgrade.update_available
assert self.api.upgrade.available_version == "DSM 6.2.3-25426 Update 2"
assert self.api.upgrade.reboot_needed == "now"
assert self.api.upgrade.service_restarts == "some"
def test_utilisation(self):
"""Test utilisation."""
assert self.api.utilisation
self.api.utilisation.update()
def test_utilisation_error(self):
"""Test utilisation error."""
self.api.error = True
with pytest.raises(SynologyDSMAPIErrorException) as error:
self.api.utilisation.update()
error_value = error.value.args[0]
assert error_value["api"] == "SYNO.Core.System.Utilization"
assert error_value["code"] == 1055
assert error_value["reason"] == "Unknown"
assert error_value["details"] == {
"err_key": "",
"err_line": 883,
"err_msg": "Transmition failed.",
"err_session": "",
}
def test_utilisation_cpu(self):
"""Test utilisation CPU."""
self.api.utilisation.update()
assert self.api.utilisation.cpu
assert self.api.utilisation.cpu_other_load
assert self.api.utilisation.cpu_user_load
assert self.api.utilisation.cpu_system_load
assert self.api.utilisation.cpu_total_load
assert self.api.utilisation.cpu_1min_load
assert self.api.utilisation.cpu_5min_load
assert self.api.utilisation.cpu_15min_load
def test_utilisation_memory(self):
"""Test utilisation memory."""
self.api.utilisation.update()
assert self.api.utilisation.memory
assert self.api.utilisation.memory_real_usage
assert self.api.utilisation.memory_size()
assert self.api.utilisation.memory_size(True)
assert self.api.utilisation.memory_available_swap()
assert self.api.utilisation.memory_available_swap(True)
assert self.api.utilisation.memory_cached()
assert self.api.utilisation.memory_cached(True)
assert self.api.utilisation.memory_available_real()
assert self.api.utilisation.memory_available_real(True)
assert self.api.utilisation.memory_total_real()
assert self.api.utilisation.memory_total_real(True)
assert self.api.utilisation.memory_total_swap()
assert self.api.utilisation.memory_total_swap(True)
def test_utilisation_network(self):
"""Test utilisation network."""
self.api.utilisation.update()
assert self.api.utilisation.network
assert self.api.utilisation.network_up()
assert self.api.utilisation.network_up(True)
assert self.api.utilisation.network_down()
assert self.api.utilisation.network_down(True)
def test_storage(self):
"""Test storage roots."""
assert self.api.storage
self.api.storage.update()
assert self.api.storage.disks
assert self.api.storage.env
assert self.api.storage.storage_pools
assert self.api.storage.volumes
def test_storage_raid_volumes(self):
"""Test RAID storage volumes."""
self.api.storage.update()
# Basics
assert self.api.storage.volumes_ids
for volume_id in self.api.storage.volumes_ids:
if volume_id == "test_volume":
continue
assert self.api.storage.volume_status(volume_id)
assert self.api.storage.volume_device_type(volume_id)
assert self.api.storage.volume_size_total(volume_id)
assert self.api.storage.volume_size_total(volume_id, True)
assert self.api.storage.volume_size_used(volume_id)
assert self.api.storage.volume_size_used(volume_id, True)
assert self.api.storage.volume_percentage_used(volume_id)
assert self.api.storage.volume_disk_temp_avg(volume_id)
assert self.api.storage.volume_disk_temp_max(volume_id)
# Existing volume
assert self.api.storage.volume_status("volume_1") == "normal"
assert self.api.storage.volume_device_type("volume_1") == "raid_5"
assert self.api.storage.volume_size_total("volume_1") == 7672030584832
assert self.api.storage.volume_size_total("volume_1", True) == "7.0Tb"
assert self.api.storage.volume_size_used("volume_1") == 4377452806144
assert self.api.storage.volume_size_used("volume_1", True) == "4.0Tb"
assert self.api.storage.volume_percentage_used("volume_1") == 57.1
assert self.api.storage.volume_disk_temp_avg("volume_1") == 24.0
assert self.api.storage.volume_disk_temp_max("volume_1") == 24
# Non existing volume
assert not self.api.storage.volume_status("not_a_volume")
assert not self.api.storage.volume_device_type("not_a_volume")
assert not self.api.storage.volume_size_total("not_a_volume")
assert not self.api.storage.volume_size_total("not_a_volume", True)
assert not self.api.storage.volume_size_used("not_a_volume")
assert not self.api.storage.volume_size_used("not_a_volume", True)
assert not self.api.storage.volume_percentage_used("not_a_volume")
assert not self.api.storage.volume_disk_temp_avg("not_a_volume")
assert not self.api.storage.volume_disk_temp_max("not_a_volume")
# Test volume
assert self.api.storage.volume_status("test_volume") is None
assert self.api.storage.volume_device_type("test_volume") is None
assert self.api.storage.volume_size_total("test_volume") is None
assert self.api.storage.volume_size_total("test_volume", True) is None
assert self.api.storage.volume_size_used("test_volume") is None
assert self.api.storage.volume_size_used("test_volume", True) is None
assert self.api.storage.volume_percentage_used("test_volume") is None
assert self.api.storage.volume_disk_temp_avg("test_volume") is None
assert self.api.storage.volume_disk_temp_max("test_volume") is None
def test_storage_shr_volumes(self):
"""Test SHR storage volumes."""
self.api.disks_redundancy = "SHR1"
self.api.storage.update()
# Basics
assert self.api.storage.volumes_ids
for volume_id in self.api.storage.volumes_ids:
if volume_id == "test_volume":
continue
assert self.api.storage.volume_status(volume_id)
assert self.api.storage.volume_device_type(volume_id)
assert self.api.storage.volume_size_total(volume_id)
assert self.api.storage.volume_size_total(volume_id, True)
assert self.api.storage.volume_size_used(volume_id)
assert self.api.storage.volume_size_used(volume_id, True)
assert self.api.storage.volume_percentage_used(volume_id)
assert self.api.storage.volume_disk_temp_avg(volume_id)
assert self.api.storage.volume_disk_temp_max(volume_id)
# Existing volume
assert self.api.storage.volume_status("volume_1") == "normal"
assert (
self.api.storage.volume_device_type("volume_1")
== "shr_without_disk_protect"
)
assert self.api.storage.volume_size_total("volume_1") == 2948623499264
assert self.api.storage.volume_size_total("volume_1", True) == "2.7Tb"
assert self.api.storage.volume_size_used("volume_1") == 2710796488704
assert self.api.storage.volume_size_used("volume_1", True) == "2.5Tb"
assert self.api.storage.volume_percentage_used("volume_1") == 91.9
assert self.api.storage.volume_disk_temp_avg("volume_1") == 29.0
assert self.api.storage.volume_disk_temp_max("volume_1") == 29
assert self.api.storage.volume_status("volume_2") == "normal"
assert (
self.api.storage.volume_device_type("volume_2")
== "shr_without_disk_protect"
)
assert self.api.storage.volume_size_total("volume_2") == 1964124495872
assert self.api.storage.volume_size_total("volume_2", True) == "1.8Tb"
assert self.api.storage.volume_size_used("volume_2") == 1684179374080
assert self.api.storage.volume_size_used("volume_2", True) == "1.5Tb"
assert self.api.storage.volume_percentage_used("volume_2") == 85.7
assert self.api.storage.volume_disk_temp_avg("volume_2") == 30.0
assert self.api.storage.volume_disk_temp_max("volume_2") == 30
# Non existing volume
assert not self.api.storage.volume_status("not_a_volume")
assert not self.api.storage.volume_device_type("not_a_volume")
assert not self.api.storage.volume_size_total("not_a_volume")
assert not self.api.storage.volume_size_total("not_a_volume", True)
assert not self.api.storage.volume_size_used("not_a_volume")
assert not self.api.storage.volume_size_used("not_a_volume", True)
assert not self.api.storage.volume_percentage_used("not_a_volume")
assert not self.api.storage.volume_disk_temp_avg("not_a_volume")
assert not self.api.storage.volume_disk_temp_max("not_a_volume")
# Test volume
assert self.api.storage.volume_status("test_volume") is None
assert self.api.storage.volume_device_type("test_volume") is None
assert self.api.storage.volume_size_total("test_volume") is None
assert self.api.storage.volume_size_total("test_volume", True) is None
assert self.api.storage.volume_size_used("test_volume") is None
assert self.api.storage.volume_size_used("test_volume", True) is None
assert self.api.storage.volume_percentage_used("test_volume") is None
assert self.api.storage.volume_disk_temp_avg("test_volume") is None
assert self.api.storage.volume_disk_temp_max("test_volume") is None
def test_storage_shr2_volumes(self):
"""Test SHR2 storage volumes."""
self.api.disks_redundancy = "SHR2"
self.api.storage.update()
# Basics
assert self.api.storage.volumes_ids
for volume_id in self.api.storage.volumes_ids:
assert self.api.storage.volume_status(volume_id)
assert self.api.storage.volume_device_type(volume_id)
assert self.api.storage.volume_size_total(volume_id)
assert self.api.storage.volume_size_total(volume_id, True)
assert self.api.storage.volume_size_used(volume_id)
assert self.api.storage.volume_size_used(volume_id, True)
assert self.api.storage.volume_percentage_used(volume_id)
assert self.api.storage.volume_disk_temp_avg(volume_id)
assert self.api.storage.volume_disk_temp_max(volume_id)
# Existing volume
assert self.api.storage.volume_status("volume_1") == "normal"
assert (
self.api.storage.volume_device_type("volume_1") == "shr_with_2_disk_protect"
)
assert self.api.storage.volume_size_total("volume_1") == 38378964738048
assert self.api.storage.volume_size_total("volume_1", True) == "34.9Tb"
assert self.api.storage.volume_size_used("volume_1") == 26724878606336
assert self.api.storage.volume_size_used("volume_1", True) == "24.3Tb"
assert self.api.storage.volume_percentage_used("volume_1") == 69.6
assert self.api.storage.volume_disk_temp_avg("volume_1") == 37.0
assert self.api.storage.volume_disk_temp_max("volume_1") == 41
def test_storage_shr2_expansion_volumes(self):
"""Test SHR2 storage with expansion unit volumes."""
self.api.disks_redundancy = "SHR2_EXPANSION"
self.api.storage.update()
# Basics
assert self.api.storage.volumes_ids
for volume_id in self.api.storage.volumes_ids:
assert self.api.storage.volume_status(volume_id)
assert self.api.storage.volume_device_type(volume_id)
assert self.api.storage.volume_size_total(volume_id)
assert self.api.storage.volume_size_total(volume_id, True)
assert self.api.storage.volume_size_used(volume_id)
assert self.api.storage.volume_size_used(volume_id, True)
assert self.api.storage.volume_percentage_used(volume_id)
assert self.api.storage.volume_disk_temp_avg(volume_id)
assert self.api.storage.volume_disk_temp_max(volume_id)
# Existing volume
assert self.api.storage.volume_status("volume_1") == "normal"
assert (
self.api.storage.volume_device_type("volume_1") == "shr_with_2_disk_protect"
)
assert self.api.storage.volume_size_total("volume_1") == 31714659872768
assert self.api.storage.volume_size_total("volume_1", True) == "28.8Tb"
assert self.api.storage.volume_size_used("volume_1") == 25419707531264
assert self.api.storage.volume_size_used("volume_1", True) == "23.1Tb"
assert self.api.storage.volume_percentage_used("volume_1") == 80.2
assert self.api.storage.volume_disk_temp_avg("volume_1") == 33.0
assert self.api.storage.volume_disk_temp_max("volume_1") == 35
def test_storage_disks(self):
"""Test storage disks."""
self.api.storage.update()
# Basics
assert self.api.storage.disks_ids
for disk_id in self.api.storage.disks_ids:
if disk_id == "test_disk":
continue
assert "Drive" in self.api.storage.disk_name(disk_id)
assert "/dev/" in self.api.storage.disk_device(disk_id)
assert self.api.storage.disk_smart_status(disk_id) == "normal"
assert self.api.storage.disk_status(disk_id) == "normal"
assert not self.api.storage.disk_exceed_bad_sector_thr(disk_id)
assert not self.api.storage.disk_below_remain_life_thr(disk_id)
assert self.api.storage.disk_temp(disk_id)
# Non existing disk
assert not self.api.storage.disk_name("not_a_disk")
assert not self.api.storage.disk_device("not_a_disk")
assert not self.api.storage.disk_smart_status("not_a_disk")
assert not self.api.storage.disk_status("not_a_disk")
assert not self.api.storage.disk_exceed_bad_sector_thr("not_a_disk")
assert not self.api.storage.disk_below_remain_life_thr("not_a_disk")
assert not self.api.storage.disk_temp("not_a_disk")
# Test disk
assert self.api.storage.disk_name("test_disk") is None
assert self.api.storage.disk_device("test_disk") is None
assert self.api.storage.disk_smart_status("test_disk") is None
assert self.api.storage.disk_status("test_disk") is None
assert self.api.storage.disk_exceed_bad_sector_thr("test_disk") is None
assert self.api.storage.disk_below_remain_life_thr("test_disk") is None
assert self.api.storage.disk_temp("test_disk") is None
def test_download_station(self):
"""Test DownloadStation."""
assert self.api.download_station
assert not self.api.download_station.get_all_tasks()
assert self.api.download_station.get_info()["data"]["version"]
assert self.api.download_station.get_config()["data"]["default_destination"]
assert self.api.download_station.get_stat()["data"]["speed_download"]
self.api.download_station.update()
assert self.api.download_station.get_all_tasks()
assert len(self.api.download_station.get_all_tasks()) == 8
# BT DL
assert self.api.download_station.get_task("dbid_86").status == "downloading"
assert not self.api.download_station.get_task("dbid_86").status_extra
assert self.api.download_station.get_task("dbid_86").type == "bt"
assert self.api.download_station.get_task("dbid_86").additional.get("file")
assert (
len(self.api.download_station.get_task("dbid_86").additional.get("file"))
== 9
)
# HTTPS error
assert self.api.download_station.get_task("dbid_549").status == "error"
assert (
self.api.download_station.get_task("dbid_549").status_extra["error_detail"]
== "broken_link"
)
assert self.api.download_station.get_task("dbid_549").type == "https"
def test_surveillance_station(self):
"""Test SurveillanceStation."""
self.api.with_surveillance = True
assert self.api.surveillance_station
assert not self.api.surveillance_station.get_all_cameras()
self.api.surveillance_station.update()
assert self.api.surveillance_station.get_all_cameras()
assert self.api.surveillance_station.get_camera(1)
assert self.api.surveillance_station.get_camera_live_view_path(1)
assert self.api.surveillance_station.get_camera_live_view_path(1, "rtsp")
# Motion detection
assert self.api.surveillance_station.enable_motion_detection(1).get("success")
assert self.api.surveillance_station.disable_motion_detection(1).get("success")
# Home mode
assert self.api.surveillance_station.get_home_mode_status()
assert self.api.surveillance_station.set_home_mode(False)
assert self.api.surveillance_station.set_home_mode(True)
|
tetienne/synologydsm-api | tests/test_synology_dsm_5.py | """Synology DSM tests."""
from unittest import TestCase
from . import SynologyDSMMock
from . import VALID_HOST
from . import VALID_HTTPS
from . import VALID_OTP
from . import VALID_PASSWORD
from . import VALID_PORT
from . import VALID_USER
from . import VALID_USER_2SA
from . import VALID_VERIFY_SSL
from .const import DEVICE_TOKEN
from .const import SESSION_ID
from synology_dsm.const import API_AUTH
from synology_dsm.const import API_INFO
from synology_dsm.exceptions import SynologyDSMAPIErrorException
from synology_dsm.exceptions import SynologyDSMAPINotExistsException
from synology_dsm.exceptions import SynologyDSMLogin2SAFailedException
from synology_dsm.exceptions import SynologyDSMLogin2SARequiredException
from synology_dsm.exceptions import SynologyDSMLoginInvalidException
from synology_dsm.exceptions import SynologyDSMRequestException
class TestSynologyDSM(TestCase):
"""SynologyDSM test cases."""
api = None
def setUp(self):
"""Context initialisation called for all tests."""
self.api = SynologyDSMMock(
VALID_HOST,
VALID_PORT,
VALID_USER,
VALID_PASSWORD,
VALID_HTTPS,
VALID_VERIFY_SSL,
)
self.api.dsm_version = 5
def test_init(self):
"""Test init."""
assert self.api.username
assert self.api._base_url
assert not self.api.apis.get(API_AUTH)
assert not self.api._session_id
def test_connection_failed(self):
"""Test failed connection."""
api = SynologyDSMMock(
"no_internet",
VALID_PORT,
VALID_USER,
VALID_PASSWORD,
VALID_HTTPS,
VALID_VERIFY_SSL,
)
api.dsm_version = 5
with self.assertRaises(SynologyDSMRequestException):
assert not api.login()
assert not api.apis.get(API_AUTH)
assert not api._session_id
api = SynologyDSMMock(
"host",
VALID_PORT,
VALID_USER,
VALID_PASSWORD,
VALID_HTTPS,
VALID_VERIFY_SSL,
)
api.dsm_version = 5
with self.assertRaises(SynologyDSMRequestException):
assert not api.login()
assert not api.apis.get(API_AUTH)
assert not api._session_id
api = SynologyDSMMock(
VALID_HOST, 0, VALID_USER, VALID_PASSWORD, VALID_HTTPS, VALID_VERIFY_SSL
)
api.dsm_version = 5
with self.assertRaises(SynologyDSMRequestException):
assert not api.login()
assert not api.apis.get(API_AUTH)
assert not api._session_id
api = SynologyDSMMock(
VALID_HOST,
VALID_PORT,
VALID_USER,
VALID_PASSWORD,
False,
VALID_VERIFY_SSL,
)
api.dsm_version = 5
with self.assertRaises(SynologyDSMRequestException):
assert not api.login()
assert not api.apis.get(API_AUTH)
assert not api._session_id
def test_login(self):
"""Test login."""
assert self.api.login()
assert self.api.apis.get(API_AUTH)
assert self.api._session_id == SESSION_ID
assert self.api._syno_token is None
def test_login_failed(self):
"""Test failed login."""
api = SynologyDSMMock(
VALID_HOST,
VALID_PORT,
"user",
VALID_PASSWORD,
VALID_HTTPS,
VALID_VERIFY_SSL,
)
api.dsm_version = 5
with self.assertRaises(SynologyDSMLoginInvalidException):
assert not api.login()
assert api.apis.get(API_AUTH)
assert not api._session_id
api = SynologyDSMMock(
VALID_HOST,
VALID_PORT,
VALID_USER,
"pass",
VALID_HTTPS,
VALID_VERIFY_SSL,
)
api.dsm_version = 5
with self.assertRaises(SynologyDSMLoginInvalidException):
assert not api.login()
assert api.apis.get(API_AUTH)
assert not api._session_id
def test_login_2sa(self):
"""Test login with 2SA."""
api = SynologyDSMMock(
VALID_HOST,
VALID_PORT,
VALID_USER_2SA,
VALID_PASSWORD,
VALID_HTTPS,
VALID_VERIFY_SSL,
)
api.dsm_version = 5
with self.assertRaises(SynologyDSMLogin2SARequiredException):
api.login()
api.login(VALID_OTP)
assert api._session_id == SESSION_ID
assert api._syno_token is None
assert api._device_token == DEVICE_TOKEN
assert api.device_token == DEVICE_TOKEN
def test_login_2sa_new_session(self):
"""Test login with 2SA and a new session with granted device."""
api = SynologyDSMMock(
VALID_HOST,
VALID_PORT,
VALID_USER_2SA,
VALID_PASSWORD,
VALID_HTTPS,
VALID_VERIFY_SSL,
device_token=DEVICE_TOKEN,
)
api.dsm_version = 5
assert api.login()
assert api._session_id == SESSION_ID
assert api._syno_token is None
assert api._device_token == DEVICE_TOKEN
assert api.device_token == DEVICE_TOKEN
def test_login_2sa_failed(self):
"""Test failed login with 2SA."""
api = SynologyDSMMock(
VALID_HOST,
VALID_PORT,
VALID_USER_2SA,
VALID_PASSWORD,
VALID_HTTPS,
VALID_VERIFY_SSL,
)
api.dsm_version = 5
with self.assertRaises(SynologyDSMLogin2SARequiredException):
api.login()
with self.assertRaises(SynologyDSMLogin2SAFailedException):
api.login(888888)
assert api._session_id is None
assert api._syno_token is None
assert api._device_token is None
def test_request_get(self):
"""Test get request."""
assert self.api.get(API_INFO, "query")
assert self.api.get(API_AUTH, "login")
assert self.api.get("SYNO.DownloadStation2.Task", "list")
assert self.api.get(API_AUTH, "logout")
def test_request_get_failed(self):
"""Test failed get request."""
with self.assertRaises(SynologyDSMAPINotExistsException):
assert self.api.get("SYNO.Virtualization.API.Task.Info", "list")
def test_request_post(self):
"""Test post request."""
assert self.api.post(
"SYNO.FileStation.Upload",
"upload",
params={"dest_folder_path": "/upload/test", "create_parents": True},
files={"file": "open('file.txt','rb')"},
)
assert self.api.post(
"SYNO.DownloadStation2.Task",
"create",
params={
"uri": "ftps://192.0.0.1:21/test/test.zip",
"username": "admin",
"password": "<PASSWORD>",
},
)
def test_request_post_failed(self):
"""Test failed post request."""
with self.assertRaises(SynologyDSMAPIErrorException):
assert self.api.post(
"SYNO.FileStation.Upload",
"upload",
params={"dest_folder_path": "/upload/test", "create_parents": True},
files={"file": "open('file_already_exists.txt','rb')"},
)
with self.assertRaises(SynologyDSMAPIErrorException):
assert self.api.post(
"SYNO.DownloadStation2.Task",
"create",
params={
"uri": "ftps://192.0.0.1:21/test/test_not_exists.zip",
"username": "admin",
"password": "<PASSWORD>",
},
)
def test_information(self):
"""Test information."""
assert self.api.information
self.api.information.update()
assert self.api.information.model == "DS3615xs"
assert self.api.information.ram == 6144
assert self.api.information.serial == "B3J4N01003"
assert self.api.information.temperature == 40
assert not self.api.information.temperature_warn
assert self.api.information.uptime == 3897
assert self.api.information.version == "5967"
assert self.api.information.version_string == "DSM 5.2-5967 Update 9"
def test_network(self):
"""Test network."""
assert self.api.network
self.api.network.update()
assert self.api.network.dns
assert self.api.network.gateway
assert self.api.network.hostname
assert self.api.network.interfaces
assert self.api.network.interface("eth0")
assert self.api.network.interface("eth1") is None
assert self.api.network.macs
assert self.api.network.workgroup
def test_utilisation(self):
"""Test utilization."""
assert self.api.utilisation
self.api.utilisation.update()
def test_utilisation_cpu(self):
"""Test utilization CPU."""
self.api.utilisation.update()
assert self.api.utilisation.cpu
assert self.api.utilisation.cpu_other_load
assert self.api.utilisation.cpu_user_load
assert self.api.utilisation.cpu_system_load
assert self.api.utilisation.cpu_total_load
assert self.api.utilisation.cpu_1min_load
assert self.api.utilisation.cpu_5min_load
assert self.api.utilisation.cpu_15min_load
def test_utilisation_memory(self):
"""Test utilization memory."""
self.api.utilisation.update()
assert self.api.utilisation.memory
assert self.api.utilisation.memory_real_usage
assert self.api.utilisation.memory_size()
assert self.api.utilisation.memory_size(True)
assert self.api.utilisation.memory_available_swap()
assert self.api.utilisation.memory_available_swap(True)
assert self.api.utilisation.memory_cached()
assert self.api.utilisation.memory_cached(True)
assert self.api.utilisation.memory_available_real()
assert self.api.utilisation.memory_available_real(True)
assert self.api.utilisation.memory_total_real()
assert self.api.utilisation.memory_total_real(True)
assert self.api.utilisation.memory_total_swap()
assert self.api.utilisation.memory_total_swap(True)
def test_utilisation_network(self):
"""Test utilization network."""
self.api.utilisation.update()
assert self.api.utilisation.network
assert self.api.utilisation.network_up()
assert self.api.utilisation.network_up(True)
assert self.api.utilisation.network_down()
assert self.api.utilisation.network_down(True)
def test_storage(self):
"""Test storage roots."""
assert self.api.storage
self.api.storage.update()
assert self.api.storage.disks
assert self.api.storage.env
assert self.api.storage.storage_pools == []
assert self.api.storage.volumes
def test_storage_volumes(self):
"""Test storage volumes."""
self.api.storage.update()
# Basics
assert self.api.storage.volumes_ids
for volume_id in self.api.storage.volumes_ids:
if volume_id == "test_volume":
continue
assert self.api.storage.volume_status(volume_id)
assert self.api.storage.volume_device_type(volume_id)
assert self.api.storage.volume_size_total(volume_id)
assert self.api.storage.volume_size_total(volume_id, True)
assert self.api.storage.volume_size_used(volume_id)
assert self.api.storage.volume_size_used(volume_id, True)
assert self.api.storage.volume_percentage_used(volume_id)
assert (
self.api.storage.volume_disk_temp_avg(volume_id) is None
) # because of empty storagePools
assert (
self.api.storage.volume_disk_temp_max(volume_id) is None
) # because of empty storagePools
# Existing volume
assert self.api.storage.volume_status("volume_1") == "normal"
assert self.api.storage.volume_device_type("volume_1") == "raid_5"
assert self.api.storage.volume_size_total("volume_1") == 8846249701376
assert self.api.storage.volume_size_total("volume_1", True) == "8.0Tb"
assert self.api.storage.volume_size_used("volume_1") == 5719795761152
assert self.api.storage.volume_size_used("volume_1", True) == "5.2Tb"
assert self.api.storage.volume_percentage_used("volume_1") == 64.7
assert (
self.api.storage.volume_disk_temp_avg("volume_1") is None
) # because of empty storagePools
assert (
self.api.storage.volume_disk_temp_max("volume_1") is None
) # because of empty storagePools
# Non existing volume
assert not self.api.storage.volume_status("not_a_volume")
assert not self.api.storage.volume_device_type("not_a_volume")
assert not self.api.storage.volume_size_total("not_a_volume")
assert not self.api.storage.volume_size_total("not_a_volume", True)
assert not self.api.storage.volume_size_used("not_a_volume")
assert not self.api.storage.volume_size_used("not_a_volume", True)
assert not self.api.storage.volume_percentage_used("not_a_volume")
assert not self.api.storage.volume_disk_temp_avg("not_a_volume")
assert not self.api.storage.volume_disk_temp_max("not_a_volume")
# Test volume
assert self.api.storage.volume_status("test_volume") is None
assert self.api.storage.volume_device_type("test_volume") is None
assert self.api.storage.volume_size_total("test_volume") is None
assert self.api.storage.volume_size_total("test_volume", True) is None
assert self.api.storage.volume_size_used("test_volume") is None
assert self.api.storage.volume_size_used("test_volume", True) is None
assert self.api.storage.volume_percentage_used("test_volume") is None
assert self.api.storage.volume_disk_temp_avg("test_volume") is None
assert self.api.storage.volume_disk_temp_max("test_volume") is None
def test_storage_disks(self):
"""Test storage disks."""
self.api.storage.update()
# Basics
assert self.api.storage.disks_ids
for disk_id in self.api.storage.disks_ids:
if disk_id == "test_disk":
continue
assert "Disk" in self.api.storage.disk_name(disk_id)
assert "/dev/" in self.api.storage.disk_device(disk_id)
if disk_id == "sda":
assert self.api.storage.disk_smart_status(disk_id) == "90%"
else:
assert self.api.storage.disk_smart_status(disk_id) == "safe"
assert self.api.storage.disk_status(disk_id) == "normal"
assert not self.api.storage.disk_exceed_bad_sector_thr(disk_id)
assert not self.api.storage.disk_below_remain_life_thr(disk_id)
assert self.api.storage.disk_temp(disk_id)
# Non existing disk
assert not self.api.storage.disk_name("not_a_disk")
assert not self.api.storage.disk_device("not_a_disk")
assert not self.api.storage.disk_smart_status("not_a_disk")
assert not self.api.storage.disk_status("not_a_disk")
assert not self.api.storage.disk_exceed_bad_sector_thr("not_a_disk")
assert not self.api.storage.disk_below_remain_life_thr("not_a_disk")
assert not self.api.storage.disk_temp("not_a_disk")
# Test disk
assert self.api.storage.disk_name("test_disk") is None
assert self.api.storage.disk_device("test_disk") is None
assert self.api.storage.disk_smart_status("test_disk") is None
assert self.api.storage.disk_status("test_disk") is None
assert self.api.storage.disk_exceed_bad_sector_thr("test_disk") is None
assert self.api.storage.disk_below_remain_life_thr("test_disk") is None
assert self.api.storage.disk_temp("test_disk") is None
|
olafapl/arxivdigest_recommenders | arxivdigest_recommenders/semantic_scholar.py | <gh_stars>0
import asyncio
import json
from abc import ABC, abstractmethod
from aiohttp import ClientSession, ClientResponseError
from motor.motor_asyncio import AsyncIOMotorClient
from aioredis import Redis
from datetime import timedelta, date
from collections import defaultdict
from typing import Optional, List
from arxivdigest_recommenders.util import gather, AsyncRateLimiter
from arxivdigest_recommenders.log import get_logger
from arxivdigest_recommenders import config
logger = get_logger(__name__, "SemanticScholar")
class CacheBackend(ABC):
@abstractmethod
async def exists(self, key: str) -> bool:
pass
@abstractmethod
async def get(self, key: str) -> dict:
pass
@abstractmethod
async def set(self, key: str, value: dict):
pass
class MongoDbBackend(CacheBackend):
def __init__(self):
self._db = None
def _set_up(self):
if self._db is None:
self._db = AsyncIOMotorClient(config.MONGODB_HOST, config.MONGODB_PORT)[
config.S2_MONGODB_DB
]
async def exists(self, key: str) -> bool:
self._set_up()
return (
await self._db[config.S2_MONGODB_COLLECTION].count_documents(
{"_id": key}, limit=1
)
) == 1
async def get(self, key: str) -> dict:
self._set_up()
return await self._db[config.S2_MONGODB_COLLECTION].find_one({"_id": key})
async def set(self, key: str, value: dict):
self._set_up()
await self._db[config.S2_MONGODB_COLLECTION].replace_one(
{"_id": key}, value, upsert=True
)
class RedisBackend(CacheBackend):
def __init__(self):
self._redis = Redis(
host=config.REDIS_HOST, port=config.REDIS_PORT, decode_responses=True
)
async def exists(self, key: str) -> bool:
return await self._redis.exists(key)
async def get(self, key: str) -> dict:
return json.loads(await self._redis.get(key))
async def set(self, key: str, value: dict):
await self._redis.set(key, json.dumps(value))
class SemanticScholar:
"""Wrapper for the Semantic Scholar RESTful API."""
_limiter = AsyncRateLimiter(
config.S2_MAX_REQUESTS,
config.S2_WINDOW_SIZE,
)
_base_url = (
"https://partner.semanticscholar.org/v1"
if config.S2_API_KEY is not None
else "https://api.semanticscholar.org/v1"
)
_cache = RedisBackend() if config.S2_CACHE_BACKEND == "redis" else MongoDbBackend()
_locks = defaultdict(asyncio.Lock)
_sem = asyncio.BoundedSemaphore(config.S2_MAX_CONCURRENT_REQUESTS)
_errors = {}
requests = 0
cache_hits = 0
cache_misses = 0
errors = 0
def __init__(self):
self._session: Optional[ClientSession] = None
async def __aenter__(self):
self._session = ClientSession(raise_for_status=True)
if config.S2_API_KEY is not None:
self._session.headers.update({"x-api-key": config.S2_API_KEY})
return self
async def __aexit__(self, *err):
await self._session.close()
self._session = None
async def _get(self, endpoint: str, **kwargs) -> dict:
async with SemanticScholar._limiter:
async with SemanticScholar._sem:
res = await self._session.get(
f"{SemanticScholar._base_url}{endpoint}", **kwargs
)
SemanticScholar.requests += 1
if SemanticScholar.requests % 100 == 0:
logger.debug(
"Requests/errors: %d/%d",
SemanticScholar.requests,
SemanticScholar.errors,
)
return await res.json()
async def _cached_get(self, endpoint: str, max_age: int) -> dict:
if endpoint in SemanticScholar._errors:
# There's no point in refetching and relogging exceptions for endpoints that have already responded with
# error codes, so we just reraise any previous exception.
raise SemanticScholar._errors[endpoint]
async with SemanticScholar._locks[endpoint]:
try:
if config.S2_CACHE_RESPONSES:
if await SemanticScholar._cache.exists(endpoint):
cached = await SemanticScholar._cache.get(endpoint)
if date.fromisoformat(cached["expiration"]) >= date.today():
SemanticScholar.cache_hits += 1
return cached["data"]
SemanticScholar.cache_misses += 1
doc = {
"expiration": (
date.today() + timedelta(days=max_age)
).isoformat(),
"data": await self._get(endpoint),
}
await SemanticScholar._cache.set(endpoint, doc)
return doc["data"]
else:
return await self._get(endpoint)
except ClientResponseError as e:
logger.warn("%s: %s %s.", endpoint, e.status, e.message)
SemanticScholar.errors += 1
SemanticScholar._errors[endpoint] = e
raise
async def paper(self, s2_id: str = None, arxiv_id: str = None):
"""Get paper metadata.
Exactly one type of paper ID must be provided.
:param s2_id: S2 paper ID.
:param arxiv_id: arXiv paper ID.
:return: Paper metadata.
"""
if sum(i is None for i in (s2_id, arxiv_id)) != 1:
raise ValueError("Exactly one type of paper ID must be provided.")
paper_id = s2_id if s2_id is not None else f"arXiv:{arxiv_id}"
return await self._cached_get(
f"/paper/{paper_id}",
config.S2_PAPER_EXPIRATION,
)
async def author(self, s2_id: str):
"""Get author metadata.
:param s2_id: S2 author ID.
:return: Author metadata.
"""
return await self._cached_get(
f"/author/{s2_id}",
config.S2_AUTHOR_EXPIRATION,
)
async def author_papers(
self, s2_id: str, max_age=config.MAX_PAPER_AGE
) -> List[dict]:
"""Get metadata of an author's published papers.
:param s2_id: S2 author ID.
:param max_age: Max paper age.
:return: Metadata of published papers.
"""
author = await self.author(s2_id)
min_year = -1 if max_age is None else date.today().year - max_age
return await gather(
*[
self.paper(s2_id=paper["paperId"])
for paper in author["papers"]
if paper["year"] is not None and paper["year"] >= min_year
]
)
|
olafapl/arxivdigest_recommenders | arxivdigest_recommenders/prev_cited_topic.py | <filename>arxivdigest_recommenders/prev_cited_topic.py
import asyncio
from elasticsearch import Elasticsearch
from elasticsearch.helpers import bulk
from arxivdigest.connector import ArxivdigestConnector
from collections import defaultdict
from typing import DefaultDict, Dict, Sequence
from arxivdigest_recommenders.recommender import ArxivdigestRecommender
from arxivdigest_recommenders.semantic_scholar import SemanticScholar
from arxivdigest_recommenders import config
def explanation(author: dict, num_cites: int, topics: Sequence[str]) -> str:
topics = [f"**{topic}**" for topic in topics]
if len(topics) > 1:
topics[-1] = "and " + topics[-1]
return (
f"This article seems to be about {(' ' if len(topics) < 3 else ', ').join(topics)}, "
f"and is authored by {author['name']}, who you have cited {num_cites} "
f"{'time' if num_cites == 1 else 'times'} in the last {config.MAX_PAPER_AGE} years."
)
class PrevCitedTopicSearchRecommender(ArxivdigestRecommender):
"""Recommender system that recommends papers published by authors that the user has previously cited and that are
relevant to the user's topics of interest."""
def __init__(self):
super().__init__(
config.PREV_CITED_TOPIC_API_KEY, "PrevCitedTopicSearchRecommender"
)
self._citation_counts: DefaultDict[str, DefaultDict[str, int]] = defaultdict(
lambda: defaultdict(int)
)
self._topic_scores: Dict[str, Dict[str, DefaultDict[str, int]]] = {}
self._indexing_run = False
self._es = Elasticsearch(hosts=[config.ELASTICSEARCH_HOST])
if not self._es.indices.exists(config.PREV_CITED_TOPIC_INDEX):
self._es.indices.create(config.PREV_CITED_TOPIC_INDEX)
async def index_papers(self, paper_ids: Sequence[str]):
self._logger.info("Indexing candidate papers in Elasticsearch.")
async with SemanticScholar() as s2:
papers = await asyncio.gather(
*[s2.paper(arxiv_id=paper_id) for paper_id in paper_ids],
return_exceptions=True,
)
paper_data = ArxivdigestConnector(
config.PREV_CITED_TOPIC_API_KEY
).get_article_data(paper_ids)
bulk(
self._es,
(
{
"_index": config.PREV_CITED_TOPIC_INDEX,
"_id": paper_id,
"_source": {
"title": paper["title"],
"abstract": paper["abstract"],
"fieldsOfStudy": paper["fieldsOfStudy"],
"topics": [t["topic"] for t in paper["topics"]],
"date": paper_data[paper_id]["date"],
},
}
for paper_id, paper in zip(paper_ids, papers)
if not isinstance(paper, BaseException)
),
request_timeout=10,
)
self._indexing_run = True
def topic_search(self, topic: str):
query = {
"query": {
"bool": {
"must": [
{"simple_query_string": {"query": topic}},
],
"filter": {"range": {"date": {"gte": "now-7d"}}},
}
}
}
return self._es.search(
index=config.PREV_CITED_TOPIC_INDEX, body=query, size=10000, _source=False
)["hits"]["hits"]
async def citation_counts(self, s2_id: str) -> DefaultDict[str, int]:
if s2_id not in self._citation_counts:
async with SemanticScholar() as s2:
papers = await s2.author_papers(s2_id)
for paper in papers:
for reference in paper["references"]:
for author in reference["authors"]:
if author["authorId"]:
self._citation_counts[s2_id][author["authorId"]] += 1
return self._citation_counts[s2_id]
def topic_scores(
self, user: dict, user_s2_id: str
) -> Dict[str, DefaultDict[str, int]]:
if user_s2_id not in self._topic_scores:
self._topic_scores[user_s2_id] = {
topic: defaultdict(
int,
{
paper["_id"]: paper["_score"]
for paper in self.topic_search(topic)
},
)
for topic in user["topics"]
}
return self._topic_scores[user_s2_id]
async def score_paper(self, user, user_s2_id, paper_id):
async with SemanticScholar() as s2:
paper = await s2.paper(arxiv_id=paper_id)
if len(paper["authors"]) == 0 or user_s2_id in [
a["authorId"] for a in paper["authors"]
]:
return
topic_scores = self.topic_scores(user, user_s2_id)
citation_counts = await self.citation_counts(user_s2_id)
top_topics = sorted(
[
(topic, paper_scores[paper_id])
for topic, paper_scores in topic_scores.items()
],
key=lambda t: t[1],
reverse=True,
)[: config.MAX_EXPLANATION_TOPICS]
most_cited_author = max(
paper["authors"], key=lambda a: citation_counts[a["authorId"]]
)
num_cites = citation_counts[most_cited_author["authorId"]]
score = sum(topic_score for _, topic_score in top_topics) * num_cites
return {
"article_id": paper_id,
"score": score,
"explanation": explanation(
most_cited_author, num_cites, [topic for topic, _ in top_topics]
)
if score > 0
else "",
}
async def user_ranking(self, user, user_s2_id, paper_ids, batch_size=10):
if not self._indexing_run:
await self.index_papers(paper_ids)
return await ArxivdigestRecommender.user_ranking(
self, user, user_s2_id, paper_ids, batch_size
)
if __name__ == "__main__":
recommender = PrevCitedTopicSearchRecommender()
asyncio.run(recommender.recommend())
|
olafapl/arxivdigest_recommenders | arxivdigest_recommenders/config.py | <gh_stars>0
import os
import json
from typing import List
file_locations = [
os.curdir + "/config.json",
os.path.expanduser("~") + "/arxivdigest-recommenders/config.json",
"/etc/arxivdigest-recommenders/config.json",
]
def get_config_from_file(file_paths: List[str]):
"""Checks the given list of file paths for a config file,
returns None if not found."""
for file_location in file_paths:
if os.path.isfile(file_location):
print("Found config file at: {}".format(os.path.abspath(file_location)))
with open(file_location) as file:
return json.load(file)
return {}
config_file = get_config_from_file(file_locations)
LOG_LEVEL = config_file.get("log_level", "INFO").upper()
ARXIVDIGEST_BASE_URL = config_file.get(
"arxivdigest_base_url", "https://api.arxivdigest.org/"
)
MONGODB_CONFIG = config_file.get("mongodb", {})
MONGODB_HOST = MONGODB_CONFIG.get("host", "127.0.0.1")
MONGODB_PORT = MONGODB_CONFIG.get("port", 27017)
REDIS_CONFIG = config_file.get("redis", {})
REDIS_HOST = REDIS_CONFIG.get("host", "127.0.0.1")
REDIS_PORT = REDIS_CONFIG.get("port", 6379)
ELASTICSEARCH_HOST = config_file.get(
"elasticsearch", {"host": "127.0.0.1", "port": 9200}
)
S2_CONFIG = config_file.get("semantic_scholar", {})
S2_API_KEY = S2_CONFIG.get("api_key")
S2_MAX_CONCURRENT_REQUESTS = S2_CONFIG.get("max_concurrent_requests", 100)
S2_MAX_REQUESTS = S2_CONFIG.get("max_requests", 100)
S2_WINDOW_SIZE = S2_CONFIG.get("window_size", 300)
S2_CACHE_RESPONSES = S2_CONFIG.get("cache_responses", True)
S2_CACHE_BACKEND = S2_CONFIG.get("cache_backend", "redis").lower()
S2_MONGODB_DB = S2_CONFIG.get("mongodb_db", "s2cache")
S2_MONGODB_COLLECTION = S2_CONFIG.get("mongodb_collection", "s2cache")
S2_PAPER_EXPIRATION = S2_CONFIG.get("paper_cache_expiration", 30)
S2_AUTHOR_EXPIRATION = S2_CONFIG.get("author_cache_expiration", 7)
MAX_PAPER_AGE = config_file.get("max_paper_age", 5)
MAX_EXPLANATION_VENUES = config_file.get("max_explanation_venues", 3)
VENUE_BLACKLIST = [
venue.lower() for venue in config_file.get("venue_blacklist", ["arxiv"])
]
FREQUENT_VENUES_API_KEY = config_file.get("frequent_venues_recommender", {}).get(
"arxivdigest_api_key", ""
)
VENUE_COPUB_CONFIG = config_file.get("venue_copub_recommender", {})
VENUE_COPUB_API_KEY = VENUE_COPUB_CONFIG.get("arxivdigest_api_key", "")
WEIGHTED_INF_CONFIG = config_file.get("weighted_inf_recommender", {})
WEIGHTED_INF_API_KEY = WEIGHTED_INF_CONFIG.get("arxivdigest_api_key", "")
WEIGHTED_INF_MIN_INFLUENCE = WEIGHTED_INF_CONFIG.get("min_influence", 20)
PREV_CITED_API_KEY = config_file.get("prev_cited_recommender", {}).get(
"arxivdigest_api_key", ""
)
PREV_CITED_COLLAB_API_KEY = config_file.get("prev_cited_collab_recommender", {}).get(
"arxivdigest_api_key", ""
)
PREV_CITED_TOPIC_CONFIG = config_file.get("prev_cited_topic_recommender", {})
PREV_CITED_TOPIC_API_KEY = PREV_CITED_TOPIC_CONFIG.get("arxivdigest_api_key", "")
PREV_CITED_TOPIC_INDEX = PREV_CITED_TOPIC_CONFIG.get("index", "arxivdigest_papers")
MAX_EXPLANATION_TOPICS = PREV_CITED_TOPIC_CONFIG.get("max_explanation_topics", 3)
|
olafapl/arxivdigest_recommenders | arxivdigest_recommenders/prev_cited_collab.py | <gh_stars>0
import asyncio
from collections import defaultdict
from typing import DefaultDict, Dict, Any
from arxivdigest_recommenders.recommender import ArxivdigestRecommender
from arxivdigest_recommenders.semantic_scholar import SemanticScholar
from arxivdigest_recommenders import config
def explanation(author: dict, collaborator: dict, num_cites: int) -> str:
return (
f"This article is authored by {author['name']}, who has been cited by your previous collaborator "
f"{collaborator['name']} {num_cites} {'time' if num_cites == 1 else 'times'} in the last "
f"{config.MAX_PAPER_AGE} years."
)
class PrevCitedCollabRecommender(ArxivdigestRecommender):
"""Recommender system that recommends papers published by authors that have been cited by the user's previous
collaborators."""
def __init__(self):
super().__init__(config.PREV_CITED_COLLAB_API_KEY, "PrevCitedCollabRecommender")
self._citation_counts: DefaultDict[str, DefaultDict[str, int]] = defaultdict(
lambda: defaultdict(int)
)
self._collaborators: DefaultDict[str, Dict[str, Any]] = defaultdict(dict)
async def citation_counts(self, s2_id: str) -> DefaultDict[str, int]:
if s2_id not in self._citation_counts:
async with SemanticScholar() as s2:
papers = await s2.author_papers(s2_id)
for paper in papers:
for reference in paper["references"]:
for author in reference["authors"]:
if author["authorId"]:
self._citation_counts[s2_id][author["authorId"]] += 1
return self._citation_counts[s2_id]
async def collaborators(self, s2_id: str) -> Dict[str, Any]:
if s2_id not in self._collaborators:
async with SemanticScholar() as s2:
papers = await s2.author_papers(s2_id)
for paper in papers:
for author in paper["authors"]:
if author["authorId"] and author["authorId"] != s2_id:
self._collaborators[s2_id][author["authorId"]] = author
return self._collaborators[s2_id]
async def score_paper(self, user, user_s2_id, paper_id):
async with SemanticScholar() as s2:
paper = await s2.paper(arxiv_id=paper_id)
if len(paper["authors"]) == 0 or user_s2_id in [
a["authorId"] for a in paper["authors"]
]:
return
collaborators = await self.collaborators(user_s2_id)
score = 0
most_cited_author = None
citer = None
for collaborator_id, collaborator in collaborators.items():
citation_counts = await self.citation_counts(collaborator_id)
if collaborator["authorId"] in [a["authorId"] for a in paper["authors"]]:
continue
collaborator_most_cited_author = max(
paper["authors"],
key=lambda a: citation_counts[a["authorId"]],
)
collaborator_score = citation_counts[
collaborator_most_cited_author["authorId"]
]
if collaborator_score > score:
score = collaborator_score
most_cited_author = collaborator_most_cited_author
citer = collaborator
return {
"article_id": paper_id,
"score": score,
"explanation": explanation(most_cited_author, citer, score)
if score > 0
else "",
}
if __name__ == "__main__":
recommender = PrevCitedCollabRecommender()
asyncio.run(recommender.recommend())
|
olafapl/arxivdigest_recommenders | arxivdigest_recommenders/weighted_inf.py | <gh_stars>0
import asyncio
from collections import defaultdict
from typing import List, Dict, DefaultDict
import numpy as np
from arxivdigest_recommenders.recommender import ArxivdigestRecommender
from arxivdigest_recommenders.semantic_scholar import SemanticScholar
from arxivdigest_recommenders.author_representation import venue_author_representation
from arxivdigest_recommenders.util import pad_shortest, padded_cosine_sim
from arxivdigest_recommenders import config
def explanation(
venues: List[str],
user: np.ndarray,
author: np.ndarray,
author_name: str,
author_influence: DefaultDict[int, int],
) -> str:
user, author = pad_shortest(user, author)
common_venue_indexes = sorted(
[i for i, user_count in enumerate(user) if user_count > 0 and author[i] > 0],
key=lambda i: author_influence[i],
reverse=True,
)[: config.MAX_EXPLANATION_VENUES]
common_venues = [f"**{venues[i]}**" for i in common_venue_indexes]
if len(common_venues) > 1:
common_venues[-1] = "and " + common_venues[-1]
return (
f"{author_name} has had influential publications at "
f"{(' ' if len(common_venues) < 3 else ', ').join(common_venues)} in the last {config.MAX_PAPER_AGE} years. "
f"You have also published at {'this venue' if len(common_venues) == 1 else 'these venues'} in the same time "
f"period."
)
class WeightedInfRecommender(ArxivdigestRecommender):
"""Recommender system based on venue co-publishing and author influence."""
def __init__(self):
super().__init__(config.WEIGHTED_INF_API_KEY, "WeightedInfRecommender")
self._venues: List[str] = []
self._authors: Dict[str, np.ndarray] = {}
self._influence: Dict[str, DefaultDict[int, int]] = {}
async def author_representation(self, s2_id: str) -> np.ndarray:
if s2_id not in self._authors:
async with SemanticScholar() as s2:
papers = await s2.author_papers(s2_id)
self._authors[s2_id] = venue_author_representation(self._venues, papers)
author_influence = defaultdict(int)
for paper in papers:
if paper["venue"] and paper["venue"] in self._venues:
author_influence[self._venues.index(paper["venue"])] += paper[
"influentialCitationCount"
]
self._influence[s2_id] = defaultdict(
int,
{
venue_index: venue_influence
for venue_index, venue_influence in author_influence.items()
if venue_influence >= config.WEIGHTED_INF_MIN_INFLUENCE
},
)
return self._authors[s2_id]
async def score_paper(self, user, user_s2_id, paper_id):
async with SemanticScholar() as s2:
paper = await s2.paper(arxiv_id=paper_id)
if user_s2_id in [a["authorId"] for a in paper["authors"]]:
return
user_representation = await self.author_representation(user_s2_id)
user_venue_indexes = np.nonzero(user_representation)[0]
similar_author = None
similar_author_name = None
similar_author_influence = None
score = 0
for author in paper["authors"]:
if not author["authorId"]:
continue
try:
author_representation = await self.author_representation(
author["authorId"]
)
author_influence = self._influence[author["authorId"]]
except Exception:
continue
author_score = np.sum(
np.vectorize(author_influence.__getitem__)(user_venue_indexes)
) * padded_cosine_sim(user_representation, author_representation)
if author_score > score:
similar_author = author_representation
similar_author_name = author["name"]
similar_author_influence = author_influence
score = author_score
return {
"article_id": paper_id,
"score": score,
"explanation": explanation(
self._venues,
user_representation,
similar_author,
similar_author_name,
similar_author_influence,
)
if score > 0
else "",
}
if __name__ == "__main__":
recommender = WeightedInfRecommender()
asyncio.run(recommender.recommend())
|
olafapl/arxivdigest_recommenders | arxivdigest_recommenders/frequent_venues.py | import asyncio
import numpy as np
from typing import List, Dict
from arxivdigest_recommenders.recommender import ArxivdigestRecommender
from arxivdigest_recommenders.semantic_scholar import SemanticScholar
from arxivdigest_recommenders.author_representation import venue_author_representation
from arxivdigest_recommenders.util import pad_shortest
from arxivdigest_recommenders import config
def explanation(
venues: List[str],
user: List[int],
paper: List[int],
) -> str:
venue_index = paper.index(1)
return (
f"This article is published at **{venues[venue_index]}**, where you have published {user[venue_index]} "
f"{'paper' if user[venue_index] == 1 else 'papers'} in the last {config.MAX_PAPER_AGE} years."
)
class FrequentVenuesRecommender(ArxivdigestRecommender):
"""Recommender system that recommends papers published at venues that the user has published papers at."""
def __init__(self):
super().__init__(config.FREQUENT_VENUES_API_KEY, "FrequentVenuesRecommender")
self._venues: List[str] = []
self._authors: Dict[str, np.ndarray] = {}
async def author_representation(self, s2_id: str) -> np.ndarray:
if s2_id not in self._authors:
async with SemanticScholar() as s2:
papers = await s2.author_papers(s2_id)
self._authors[s2_id] = venue_author_representation(self._venues, papers)
return self._authors[s2_id]
async def score_paper(self, user, user_s2_id, paper_id):
async with SemanticScholar() as s2:
paper = await s2.paper(arxiv_id=paper_id)
user_representation = await self.author_representation(user_s2_id)
if not paper["venue"] or paper["venue"] not in self._venues:
return
paper_representation, user_representation = pad_shortest(
[int(v == paper["venue"]) for v in self._venues], user_representation
)
score = int(np.dot(paper_representation, user_representation))
return {
"article_id": paper_id,
"score": score,
"explanation": explanation(
self._venues, user_representation, paper_representation
)
if score > 0
else "",
}
if __name__ == "__main__":
recommender = FrequentVenuesRecommender()
asyncio.run(recommender.recommend())
|
olafapl/arxivdigest_recommenders | arxivdigest_recommenders/recommender.py | import asyncio
from abc import ABC, abstractmethod
from typing import List, Dict, Any, Sequence, Optional
from arxivdigest.connector import ArxivdigestConnector
from arxivdigest_recommenders import config
from arxivdigest_recommenders.semantic_scholar import SemanticScholar
from arxivdigest_recommenders.util import extract_s2_id, chunks
from arxivdigest_recommenders.log import get_logger
class ArxivdigestRecommender(ABC):
"""Base class for arXivDigest recommender systems."""
def __init__(self, arxivdigest_api_key: str, name: str):
self._arxivdigest_api_key = arxivdigest_api_key
self._logger = get_logger(name, name)
@abstractmethod
async def score_paper(
self, user: dict, user_s2_id: str, paper_id: str
) -> Optional[Dict[str, Any]]:
"""Score a paper for a user.
If the paper for some reason cannot be scored (e.g., if there's not enough data available or because the paper
is authored by the user), nothing (or None) should be returned.
:param user: User data.
:param user_s2_id: S2 author ID of the user.
:param paper_id: arXiv ID of paper.
:return: Dictionary containing article_id, explanation, and score keys.
"""
pass
async def user_ranking(
self, user: dict, user_s2_id: str, paper_ids: Sequence[str], batch_size=10
) -> List[Dict[str, Any]]:
"""Generate ranking of papers for a user.
:param user: User data.
:param user_s2_id: S2 author ID of the user.
:param paper_ids: arXiv IDs of papers.
:param batch_size: Number of papers scored concurrently.
:return: Ranking of candidate papers.
"""
results = []
for paper_id_chunk in chunks(paper_ids, 5):
chunk_results = await asyncio.gather(
*[self.score_paper(user, user_s2_id, p) for p in paper_id_chunk],
return_exceptions=True
)
results.extend(
r for r in chunk_results if isinstance(r, dict) and r["score"] > 0
)
return results
async def recommendations(
self,
users: dict,
interleaved_papers: dict,
paper_ids: Sequence[str],
max_recommendations=10,
) -> Dict[str, List[Dict[str, Any]]]:
"""Generate recommendations for a user batch.
:param users: Users.
:param interleaved_papers: Interleaved papers that will be excluded from the generated recommendations
before submission.
:param paper_ids: arXiv IDs of candidate papers.
:param max_recommendations: Max number of recommendations per user.
:return: Recommendations.
"""
recommendations = {}
for user_id, user_data in users.items():
s2_id = extract_s2_id(user_data)
if s2_id is None:
self._logger.info("User %s: skipped (no S2 ID provided).", user_id)
continue
try:
# Validate the user's S2 ID.
async with SemanticScholar() as s2:
await s2.author(s2_id)
except Exception:
self._logger.error(
"User %: unable to get author details for S2 ID %s.", user_id, s2_id
)
continue
user_ranking = [
r
for r in await self.user_ranking(user_data, s2_id, paper_ids)
if r["article_id"] not in interleaved_papers[user_id]
]
user_recommendations = sorted(
user_ranking, key=lambda r: r["score"], reverse=True
)[:max_recommendations]
self._logger.info(
"User %s: recommended %d papers.", user_id, len(user_recommendations)
)
recommendations[user_id] = user_recommendations
return {
user_id: user_recommendations
for user_id, user_recommendations in recommendations.items()
if len(user_recommendations) > 0
}
async def recommend(
self, submit_recommendations=True
) -> Dict[str, List[Dict[str, Any]]]:
"""Generate and submit recommendations for all users.
:param submit_recommendations: Submit recommendations to arXivDigest.
:return: Recommendations.
"""
connector = ArxivdigestConnector(
self._arxivdigest_api_key, config.ARXIVDIGEST_BASE_URL
)
paper_ids = connector.get_article_ids()
total_users = connector.get_number_of_users()
self._logger.info(
"%d candidate papers and %d users.", len(paper_ids), total_users
)
recommendation_count = 0
recommendations = {}
while recommendation_count < total_users:
user_ids = connector.get_user_ids(recommendation_count)
users = connector.get_user_info(user_ids)
interleaved = connector.get_interleaved_articles(user_ids)
batch_recommendations = await self.recommendations(
users, interleaved, paper_ids
)
recommendations.update(batch_recommendations)
if batch_recommendations and submit_recommendations:
connector.send_article_recommendations(batch_recommendations)
recommendation_count += len(user_ids)
self._logger.info("Processed %d users.", recommendation_count)
self._logger.info("Finished recommending.")
self._logger.info(
"Semantic Scholar API: %d cache hits, %d cache misses, %d requests, and %d errors.",
SemanticScholar.cache_hits,
SemanticScholar.cache_misses,
SemanticScholar.requests,
SemanticScholar.errors,
)
return recommendations
|
olafapl/arxivdigest_recommenders | tests/test_rate_limiter.py | <filename>tests/test_rate_limiter.py
import unittest
import time
from arxivdigest_recommenders.util import AsyncRateLimiter
class TestRateLimiter(unittest.IsolatedAsyncioTestCase):
async def test_rate_limit(self):
stamps = []
limiter = AsyncRateLimiter(1000, 2)
while len(stamps) < 10000:
async with limiter:
stamps.append(time.monotonic())
for stamp in stamps[1:1000]:
self.assertAlmostEqual(stamps[0], stamp, delta=0.1)
self.assertLess(stamps[0] + 2, stamps[1000])
for stamp in stamps[9001:]:
self.assertAlmostEqual(stamps[9000], stamp, delta=0.1)
if __name__ == "__main__":
unittest.main()
|
olafapl/arxivdigest_recommenders | setup.py | <filename>setup.py<gh_stars>0
from setuptools import setup, find_packages
with open("requirements.txt") as file:
install_requires = file.read().splitlines()
setup(
name="arxivdigest-recommenders",
author="<NAME>",
packages=find_packages(),
install_requires=install_requires,
)
|
olafapl/arxivdigest_recommenders | tests/test_author_representation.py | import unittest
from arxivdigest_recommenders.author_representation import venue_author_representation
author_papers = [
[{"venue": venue} for venue in ("a", "a", "b", "c", "d", "e", "f", "b")],
[{"venue": "g"}],
]
class TestAuthorRepresentation(unittest.TestCase):
def test_venue_author_representation(self):
venues = []
author_representations = [
venue_author_representation(venues, papers) for papers in author_papers
]
self.assertEqual(list(author_representations[0]), [2, 2, 1, 1, 1, 1])
self.assertEqual(list(author_representations[1]), [0, 0, 0, 0, 0, 0, 1])
self.assertEqual(venues, ["a", "b", "c", "d", "e", "f", "g"])
if __name__ == "__main__":
unittest.main()
|
olafapl/arxivdigest_recommenders | arxivdigest_recommenders/venue_copub.py | import asyncio
import random
import numpy as np
from typing import List, Dict
from arxivdigest_recommenders.recommender import ArxivdigestRecommender
from arxivdigest_recommenders.semantic_scholar import SemanticScholar
from arxivdigest_recommenders.author_representation import venue_author_representation
from arxivdigest_recommenders.util import pad_shortest, padded_cosine_sim
from arxivdigest_recommenders import config
def explanation(
venues: List[str], user: np.ndarray, author: np.ndarray, author_name: str
) -> str:
"""Generate a recommendation explanation.
:param venues: List of venues.
:param user: User.
:param author: Author.
:param author_name: Author name.
:return: Explanation.
"""
user, author = pad_shortest(user, author)
common_venue_indexes = [
i for i, user_count in enumerate(user) if user_count > 0 and author[i] > 0
]
if max([user[i] for i in common_venue_indexes]) == 1:
common_venues = [
f"**{venues[i]}**"
for i in random.sample(common_venue_indexes, len(common_venue_indexes))
][: config.MAX_EXPLANATION_VENUES]
if len(common_venues) > 1:
common_venues[-1] = "and " + common_venues[-1]
return (
f"You and {author_name} have both published at "
f"{(' ' if len(common_venues) < 3 else ', ').join(common_venues)} in the last "
f"{config.MAX_PAPER_AGE} years."
)
else:
frequent_venue_indexes = sorted(
[i for i in common_venue_indexes if user[i] > 1],
key=lambda i: user[i],
reverse=True,
)[: config.MAX_EXPLANATION_VENUES]
frequent_venues = [
f"{user[i]} times at **{venues[i]}**" for i in frequent_venue_indexes
]
if len(frequent_venues) > 1:
frequent_venues[-1] = "and " + frequent_venues[-1]
return (
f"You have published {(' ' if len(frequent_venues) < 3 else ', ').join(frequent_venues)} in "
f"the last {config.MAX_PAPER_AGE} years. {author_name} has also published at "
f"{'this venue' if len(frequent_venues) == 1 else 'these venues'} in the same time period."
)
class VenueCoPubRecommender(ArxivdigestRecommender):
"""Recommender system based on venue co-publishing."""
def __init__(self):
super().__init__(config.VENUE_COPUB_API_KEY, "VenueCoPubRecommender")
self._venues: List[str] = []
self._authors: Dict[str, np.ndarray] = {}
async def author_representation(self, s2_id: str) -> np.ndarray:
if s2_id not in self._authors:
async with SemanticScholar() as s2:
papers = await s2.author_papers(s2_id)
self._authors[s2_id] = venue_author_representation(self._venues, papers)
return self._authors[s2_id]
async def score_paper(self, user, user_s2_id, paper_id):
async with SemanticScholar() as s2:
paper = await s2.paper(arxiv_id=paper_id)
if user_s2_id in [a["authorId"] for a in paper["authors"]]:
return
user_representation = await self.author_representation(user_s2_id)
similar_author = None
similar_author_name = None
score = 0
for author in paper["authors"]:
if not author["authorId"]:
continue
try:
author_representation = await self.author_representation(
author["authorId"]
)
except Exception:
continue
author_score = padded_cosine_sim(user_representation, author_representation)
if author_score > score:
similar_author = author_representation
similar_author_name = author["name"]
score = author_score
return {
"article_id": paper_id,
"score": score,
"explanation": explanation(
self._venues,
user_representation,
similar_author,
similar_author_name,
)
if score > 0
else "",
}
if __name__ == "__main__":
recommender = VenueCoPubRecommender()
asyncio.run(recommender.recommend())
|
olafapl/arxivdigest_recommenders | arxivdigest_recommenders/author_representation.py | import numpy as np
from typing import List, Dict, Any
from arxivdigest_recommenders import config
def venue_author_representation(
venues: List[str], published_papers: List[Dict[str, Any]]
) -> np.ndarray:
"""Create an author vector representation based on the venues an author has published at.
The returned vector is N-dimensional, where N is the number of venues that have been discovered thus far. Each value
in the vector corresponds to a certain venue and represents the number of times the author has published there.
:param venues: List of venues. Venues the author has published at that are not already in this list are appended.
:param published_papers: Papers published by the author.
:return: Author vector representation.
"""
author_venues = (paper["venue"] for paper in published_papers if paper["venue"])
num_author_venues = sum(int(paper["venue"] is not None) for paper in published_papers)
representation = np.zeros(len(venues) + num_author_venues, dtype=int)
for author_venue in author_venues:
if author_venue.lower() in config.VENUE_BLACKLIST:
continue
if author_venue not in venues:
venues.append(author_venue)
venue_index = venues.index(author_venue)
representation[venue_index] += 1
return np.trim_zeros(representation, "b")
|
olafapl/arxivdigest_recommenders | arxivdigest_recommenders/log.py | <filename>arxivdigest_recommenders/log.py
import sys
import logging
from arxivdigest_recommenders import config
LOG_LEVELS = {
"FATAL": 50,
"ERROR": 40,
"WARNING": 30,
"INFO": 20,
"DEBUG": 10,
}
def get_logger(name: str, prefix: str):
formatter = logging.Formatter(
fmt="%(asctime)s [%(levelname)s] %(prefix)s - %(message)s"
)
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(formatter)
logger = logging.getLogger(name)
logger.setLevel(LOG_LEVELS.get(config.LOG_LEVEL, 20))
logger.addHandler(handler)
logger = logging.LoggerAdapter(logger, {"prefix": prefix})
return logger
|
olafapl/arxivdigest_recommenders | arxivdigest_recommenders/prev_cited.py | <filename>arxivdigest_recommenders/prev_cited.py
import asyncio
from collections import defaultdict
from typing import DefaultDict
from arxivdigest_recommenders.recommender import ArxivdigestRecommender
from arxivdigest_recommenders.semantic_scholar import SemanticScholar
from arxivdigest_recommenders import config
def explanation(author: dict, num_cites: int) -> str:
return (
f"This article is authored by {author['name']}, who you have cited {num_cites} "
f"{'time' if num_cites == 1 else 'times'} in the last {config.MAX_PAPER_AGE} years."
)
class PrevCitedRecommender(ArxivdigestRecommender):
"""Recommender system that recommends papers published by authors that the user has previously cited."""
def __init__(self):
super().__init__(config.PREV_CITED_API_KEY, "PrevCitedRecommender")
self._citation_counts: DefaultDict[str, DefaultDict[str, int]] = defaultdict(
lambda: defaultdict(int)
)
async def citation_counts(self, s2_id: str) -> DefaultDict[str, int]:
if s2_id not in self._citation_counts:
async with SemanticScholar() as s2:
papers = await s2.author_papers(s2_id)
for paper in papers:
for reference in paper["references"]:
for author in reference["authors"]:
if author["authorId"]:
self._citation_counts[s2_id][author["authorId"]] += 1
return self._citation_counts[s2_id]
async def score_paper(self, user, user_s2_id, paper_id):
async with SemanticScholar() as s2:
paper = await s2.paper(arxiv_id=paper_id)
if len(paper["authors"]) == 0 or user_s2_id in [
a["authorId"] for a in paper["authors"]
]:
return
citation_counts = await self.citation_counts(user_s2_id)
most_cited_author = max(
paper["authors"], key=lambda a: citation_counts[a["authorId"]]
)
score = citation_counts[most_cited_author["authorId"]]
return {
"article_id": paper_id,
"score": score,
"explanation": explanation(most_cited_author, score) if score > 0 else "",
}
if __name__ == "__main__":
recommender = PrevCitedRecommender()
asyncio.run(recommender.recommend())
|
olafapl/arxivdigest_recommenders | tests/test_venue_copub.py | import unittest
from arxivdigest_recommenders.venue_copub import explanation
from arxivdigest_recommenders import config
authors = [[2, 2, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 1]]
users = [[4, 2, 0, 0, 3, 1], [1, 1, 0, 0, 0, 0]]
venues = ["a", "b", "c", "d", "e", "f", "g"]
class TestVenueCoPubRecommender(unittest.TestCase):
def test_explanation(self):
explanations = [
explanation(venues, u, authors[0], "Author McAuthor") for u in users
]
self.assertEqual(
explanations[0],
f"You have published 4 times at **a**, 3 times at **e**, and 2 times at **b** during the last "
f"{config.MAX_PAPER_AGE} years. Author McAuthor has also published at these venues in the same time "
f"period.",
)
self.assertIn(
explanations[1],
(
f"You and Author McAuthor have both published at **a** and **b** during the last "
f"{config.MAX_PAPER_AGE} years.",
"You and Author McAuthor have both published at **b** and **a** during the last "
f"{config.MAX_PAPER_AGE} years.",
),
)
if __name__ == "__main__":
unittest.main()
|
olafapl/arxivdigest_recommenders | arxivdigest_recommenders/util.py | import asyncio
import time
import numpy as np
import numpy.typing as npt
from urllib.parse import urlparse
from typing import Optional, List, Tuple, Any, Sequence, TypeVar, Iterator
T = TypeVar("T")
def extract_s2_id(user: dict) -> Optional[str]:
"""Extract S2 ID from a user's Semantic Scholar profile link.
:param user: User.
:return: User S2 author ID.
"""
s2_id = str(urlparse(user["semantic_scholar_profile"]).path).split("/")[-1]
return s2_id if len(s2_id) > 0 else None
def pad_shortest(a: npt.ArrayLike, b: npt.ArrayLike, pad: Any = 0):
"""Pad the shortest of two arrays in order to make them the same length.
:param a: Vector a.
:param b: Vector b.
:param pad: Padding object.
:return: Padded vectors.
"""
len_diff = len(a) - len(b)
if len_diff > 0:
b = np.pad(b, (0, len_diff), constant_values=pad)
elif len_diff < 0:
a = np.pad(a, (0, abs(len_diff)), constant_values=pad)
return a, b
def padded_cosine_sim(a: npt.ArrayLike, b: npt.ArrayLike) -> float:
"""Find the cosine similarity between two vectors. The shortest vector is padded with zeros.
:param a: Vector a.
:param b: Vector b.
:return: Cosine similarity.
"""
if not np.count_nonzero(a) or not np.count_nonzero(b):
return 0.0
a, b = pad_shortest(a, b)
return np.dot(a, b) / (np.linalg.norm(a) * np.linalg.norm(b))
async def gather(*args):
"""Wrapper around asyncio.gather that ignores and excludes exceptions."""
results = await asyncio.gather(*args, return_exceptions=True)
return [result for result in results if not isinstance(result, Exception)]
class AsyncRateLimiter:
"""Limits the amount of times a section of code is entered within a window of time."""
def __init__(self, max_enters: int, window_size: int):
"""
:param max_enters: Max number of enters inside a window.
:param window_size: Window size in seconds.
"""
self.max_enters = max_enters
self.window_size = window_size
self.enters = []
self.lock = None
async def __aenter__(self):
if self.lock is None:
self.lock = asyncio.Lock()
async with self.lock:
if len(self.enters) == self.max_enters:
time_to_new_window = max(
self.enters[0] + self.window_size - time.monotonic(), 0
)
await asyncio.sleep(time_to_new_window)
self.enters.clear()
self.enters.append(time.monotonic())
async def __aexit__(self, *err):
pass
def chunks(seq: Sequence[T], chunk_size: int) -> Iterator[Sequence[T]]:
"""Divide a sequence into chunks.
:param seq: Sequence:
:param chunk_size: Chunk size.
"""
for i in range(0, len(seq), chunk_size):
yield seq[i : i + chunk_size]
|
chrisguest75/py_coverage_example | binary_search_tree.py |
class binary_search_tree():
def __init__(self, initial_value=None):
self.left = None
self.right = None
self.value = initial_value
def __len__(self):
return self.num_nodes()
def num_nodes(self):
if self.value is None:
return 0
else:
count = 1
if self.left is not None:
count += self.left.num_nodes()
if self.right is not None:
count += self.right.num_nodes()
return count
def contains(self, value):
if self.value == value:
return True
else:
if value < self.value:
if self.left is not None:
return self.left.contains(value)
else:
return False
else:
if self.right is not None:
return self.right.contains(value)
else:
return False
def add(self, value):
if self.value is None:
self.value = value
return self
node = binary_search_tree(value)
if value < self.value:
if self.left is None:
self.left = node
else:
self.left.add(value)
else:
if self.right is None:
self.right = node
else:
self.right.add(value)
return self
|
chrisguest75/py_coverage_example | test_binary_search_tree.py | <gh_stars>0
import pytest
from binary_search_tree import binary_search_tree
@pytest.fixture
def empty_bst():
return binary_search_tree()
def test_create_empty(empty_bst):
bst = empty_bst
assert(len(bst) == 0)
assert(bst.num_nodes() == 0)
def test_add_first_value(empty_bst):
bst = empty_bst
bst.add(10)
assert(bst.num_nodes() == 1)
def test_add_multiple_values(empty_bst):
bst = empty_bst
bst.add(10).add(20).add(5)
assert(bst.num_nodes() == 3)
def test_add_multiple_values_deep(empty_bst):
bst = empty_bst
bst.add(10).add(20).add(5).add(80).add(2)
assert(bst.num_nodes() == 5)
def test_contains(empty_bst):
bst = empty_bst
bst.add(10).add(20).add(5).add(80).add(2)
assert(bst.contains(5))
assert(bst.contains(50) is False)
assert(bst.contains(7) is False)
|
ttan/Mute-o-Matic-V2 | code.py | <reponame>ttan/Mute-o-Matic-V2
import board
import time
import usb_hid
from digitalio import DigitalInOut, Direction, Pull
from adafruit_hid.keyboard import Keyboard
from adafruit_hid.keycode import Keycode
switch = DigitalInOut(board.GP4)
switch.direction = Direction.INPUT
switch.pull = Pull.UP
bt1 = DigitalInOut(board.GP18)
bt1.direction = Direction.INPUT
bt1.pull = Pull.UP
bt2 = DigitalInOut(board.GP20)
bt2.direction = Direction.INPUT
bt2.pull = Pull.UP
led = DigitalInOut(board.GP10)
led.direction = Direction.OUTPUT
keyboard=Keyboard(usb_hid.devices)
bt1Released = True
bt2Released = True
while True:
if not bt1.value and bt1Released:
bt1Released = False
if switch.value:
#MacOs
print("Button 1, Switch position 1")
led.value = not led.value
keyboard.send(Keycode.GUI, Keycode.SHIFT, Keycode.M)
else:
#Windows
print("Button 1, Switch position 2")
led.value = not led.value
keyboard.send(Keycode.CONTROL, Keycode.SHIFT, Keycode.M)
if not bt2.value and bt2Released:
bt2Released = False
if switch.value:
#MacOs
print("Button 2, Switch position 1")
keyboard.send(Keycode.GUI, Keycode.SHIFT, Keycode.B)
else:
#Windows
print("Button 2, Switch position 2")
keyboard.send(Keycode.CONTROL, Keycode.SHIFT, Keycode.B)
if bt1.value and not bt1Released:
bt1Released = true
if bt2.value and not bt2Released:
bt2Released = true
|
Drayang/ERPNext-Production-Scheduing | production_scheduling_shrdc/hooks.py | <reponame>Drayang/ERPNext-Production-Scheduing
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from . import __version__ as app_version
app_name = "production_scheduling_shrdc"
app_title = "Production Scheduling Shrdc"
app_publisher = "DCKY"
app_description = "This is a production scheduling app."
app_icon = "octicon octicon-file-directory"
app_color = "grey"
app_email = "<EMAIL>"
app_license = "MIT"
# test_string = 'value'
# test_list = ['value']
# test_dict = {
# 'key': 'value'
# }
# Includes in <head>
# ------------------
# include js, css files in header of desk.html
# app_include_css = "/assets/production_scheduling_shrdc/css/production_scheduling_shrdc.css"
# app_include_js = "/assets/production_scheduling_shrdc/js/production_scheduling_shrdc.js"
# include js, css files in header of web template
# web_include_css = "/assets/production_scheduling_shrdc/css/production_scheduling_shrdc.css"
# web_include_js = "/assets/production_scheduling_shrdc/js/production_scheduling_shrdc.js"
# include js in page
# page_js = {"page" : "public/js/file.js"}
# include js in doctype views
# doctype_js = {"doctype" : "public/js/doctype.js"}
# doctype_list_js = {"doctype" : "public/js/doctype_list.js"}
# doctype_tree_js = {"doctype" : "public/js/doctype_tree.js"}
# doctype_calendar_js = {"doctype" : "public/js/doctype_calendar.js"}
# Home Pages
# ----------
# application home page (will override Website Settings)
# home_page = "orders"
# website user home page (by Role)
# role_home_page = {
# "Customer": "orders",
# "Supplier": "bills"
# }
# Website user home page (by function)
# get_website_user_home_page = "production_scheduling_shrdc.utils.get_home_page"
# Generators
# ----------
# automatically create page for each record of this doctype
# website_generators = ["Web Page"]
# Installation
# ------------
# before_install = "production_scheduling_shrdc.install.before_install"
# after_install = "production_scheduling_shrdc.install.after_install"
# Desk Notifications
# ------------------
# See frappe.core.notifications.get_notification_config
# notification_config = "production_scheduling_shrdc.notifications.get_notification_config"
# Permissions
# -----------
# Permissions evaluated in scripted ways
# permission_query_conditions = {
# "Event": "frappe.desk.doctype.event.event.get_permission_query_conditions",
# }
#
# has_permission = {
# "Event": "frappe.desk.doctype.event.event.has_permission",
# }
# Document Events
# ---------------
# Hook on document methods and events
'''
Can use it to overwrite a doctype.js event [a workaround/trick to overwrite js that we cant do using
doctype_js hook] [havent try out no sure can work or not]
Official doc: https://frappeframework.com/docs/v13/user/en/python-api/hooks#crud-events
Forum related: https://discuss.erpnext.com/t/override-all-save-function-in-all-doctype/49800/4
'''
# doc_events = {
# "*": {
# "on_update": "method",
# "on_cancel": "method",
# "on_trash": "method"
# }
# }
'''
Extend the js file event, can write like how we write the custom script, it will not overwrite the
original doctype.js code
'''
doctype_js = {
# 'Item':'public/js/item.js',
# 'Customer':'public/js/customer.js',
# 'Supplier':'public/js/supplier.js',
# 'Employee':'public/js/employee.js',
# 'BOM':'public/js/bom.js',
# 'Workstation':'public/js/workstation.js',
# 'Sales Order':'public/js/sales_order.js',
# 'Skill':'public/js/skill.js',
# 'Employee Skill Map':'public/js/employee_skill_map.js',
}
# Scheduled Tasks
# ---------------
# scheduler_events = {
# "all": [
# "production_scheduling_shrdc.tasks.all"
# ],
# "daily": [
# "production_scheduling_shrdc.tasks.daily"
# ],
# "hourly": [
# "production_scheduling_shrdc.tasks.hourly"
# ],
# "weekly": [
# "production_scheduling_shrdc.tasks.weekly"
# ]
# "monthly": [
# "production_scheduling_shrdc.tasks.monthly"
# ]
# }
# Testing
# -------
# before_tests = "production_scheduling_shrdc.install.before_tests"
# Overriding Methods
# ------------------------------
'''
Able to overwrite the whitelisted method, it is OVERWRITE
'''
override_whitelisted_methods = {
"erpnext.hr.doctype.holiday_list.holiday_list.get_events": "production_scheduling_shrdc.event.get_events"
}
#
# each overriding function accepts a `data` argument;
# generated from the base implementation of the doctype dashboard,
# along with any modifications made in other Frappe apps
# override_doctype_dashboards = {
# "Task": "production_scheduling_shrdc.task.get_dashboard_data"
# }
#customapp.override folder.doctype python file({Doctype}.py).Class we defined(Custom{Doctype})
override_doctype_class = {
'Sales Order': 'production_scheduling_shrdc.overrides.sales_order.CustomSalesOrder'
}
|
Drayang/ERPNext-Production-Scheduing | production_scheduling_shrdc/production_scheduling_shrdc/doctype/frepple_integration/frepple_integration.py | <filename>production_scheduling_shrdc/production_scheduling_shrdc/doctype/frepple_integration/frepple_integration.py
# -*- coding: utf-8 -*-
# Copyright (c) 2022, DCKY and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
# import frappe
from frappe.model.document import Document
import frappe
import json
from frappe.integrations.utils import make_get_request, make_post_request, create_request_log
from frappe.utils import get_request_session
import requests
from requests.structures import CaseInsensitiveDict
from datetime import datetime
'''
Naming convention:
Get request: get_{frepple document}
Post request: post_{frepple document}
'''
class FreppleIntegration(Document):
# @frappe.whitelist()
def get_demand(self):
api = 'demand'
# url,headers = get_frepple_params(api=api,filter=None)
''' With filtering'''
filter=None
# filter="?status=open"
url,headers = get_frepple_params(api=api,filter=filter)
# filter = None
ro = make_get_request(url,headers=headers) #ro is a list type, so need [] to access
# dt = datetime.fromisoformat(startdate)
name = ro[0]['name']
print(type(ro))
dd = ro[0]['due']
#convert iso8601 time type to datetime.datetime type
dt = datetime.fromisoformat(dd)
self.sales_order = name
self.delivery_date = dt
return ro
@frappe.whitelist()
def test(self):
# doc = frappe.get_doc('Frepple Integration', doc['name']) #To get the current item
print(self.password)
print(type(self.password))
# print(type(doc.password))
# return doc.password
@frappe.whitelist()
def testing(doc):
doc = json.loads(doc)
print(doc)
doc = frappe.get_doc('Frepple Integration', doc['name']) #To get the current item
print(type(doc.password))
return doc.password
def make_put_request(url, auth=None, headers=None, data=None):
if not auth:
auth = ''
if not data:
data = {}
if not headers:
headers = {}
try:
s = get_request_session()
frappe.flags.integration_request = s.put(url, data=data, auth=auth, headers=headers)
frappe.flags.integration_request.raise_for_status()
if frappe.flags.integration_request.headers.get("content-type") == "text/plain; charset=utf-8":
return parse_qs(frappe.flags.integration_request.text)
return frappe.flags.integration_request.json()
except Exception as exc:
frappe.log_error()
raise exc
# @frappe.whitelist()
# def put_demand():
# if(frappe.get_doc("Frepple Setting").frepple_integration):
# # doc = frappe.get_doc('Sales Order', doc['name']) #To get the current doc
# data = json.dumps({
# "status": "closed", #default
# })
# api = "demand/SAL-ORD-2022-00033" #equivalent sales order
# url,headers = get_frepple_params(api=api,filter=None)
# output = make_put_request(url,headers=headers, data=data)
# frappe.msgprint(
# msg='Data have been updated.',
# title='Note',
# )
# return output
@frappe.whitelist()
def get_frepple_params(api=None,filter = None):
if not api:
api = "" #default get the demand(=sales order in ERPNext) list from frepple
if not filter:
filter = ""
frepple_settings = frappe.get_doc("Frepple Setting")
temp_url = frepple_settings.url.split("//")
url1 = "http://"
url2 = frepple_settings.username + ":" + frepple_settings.password + "@"
url3 = temp_url[1] + "/api/input/"
url4 = "/"
# "/?format=json"
# "/?format=api"
#Concatenate the URL
url = url1 + url2 + url3 + api + url4 + filter
# example outcome : http://admin:admin@192.168.112.1:5000/api/input/manufacturingorder/
headers= {
'Content-type': 'application/json; charset=UTF-8',
'Authorization': frepple_settings.authorization_header,
}
print(url+ "-------------------------------------------------------------------------")
return url,headers
#Testing purpose GET request
@frappe.whitelist()
def make_request():
# url = 'https://httpbin.org/post'
url = 'https://jsonplaceholder.typicode.com/todos/1'
# headers= {
# 'Content-type': 'application/json; charset=UTF-8',
# # 'Accept': 'text/html; q=1.0, */*',
# 'Authorization': 'Bearer <KEY>',
# # 'user': 'admin',
# # 'password': '<PASSWORD>'
# }
url = 'http://admin:admin@192.168.112.1:5000/api/input/demand/?format=json'
# url = 'http://192.168.112.1:5000/api/input/demand/?format=json'
url = 'http://192.168.0.145:5000/api/input/demand/?format=json'
# url = 'http://admin:admin@172.17.0.1:5000/api/input/demand/?format=json'
# url = 'http://172.17.0.1:5000/api/input/demand/?format=json'
headers= {
'Content-type': 'application/json',
'Authorization': "<KEY>",
}
abc = make_get_request(url,headers=headers)
return abc
'''
# api = 'string' e.g 'manufacturingorder'
# filter = '?filter' e.g 'name=SALE-ORDER-005' ,'location=SHRDC&customer=Drayang'
'''
@frappe.whitelist()
def get_manufacturingorder():
api = 'manufacturingorder'
url,headers = get_frepple_params(api=api,filter=None)
''' With filtering'''
# filter = "?name=SAL-ORDER-0002"
# filter = None
# filter = "?status__contain=open"
# url,headers = get_frepple_params(api=None,filter=filter)
ro = make_get_request(url,headers=headers)
startdate = ro[0]['startdate']
#convert iso8601 time type to datetime.datetime type
dt = datetime.fromisoformat(startdate)
return ro
@frappe.whitelist()
def post_item(doc):
if(frappe.get_doc("Frepple Setting").frepple_integration):
doc = json.loads(doc) #dict form
doc = frappe.get_doc('Item', doc['name']) #To get the current item
''' Define the Frepple table you want to match'''
api = "item" #equivalent to customer doctype
url,headers = get_frepple_params(api=api,filter=None)
'''Add the item_group to frepple to use it as the owner to ensure no request error happen'''
data = json.dumps({
"name": doc.item_group,
})
output = make_post_request(url,headers=headers, data=data)
'''Add the actual item to frepple'''
data = json.dumps({
"name": doc.name,
"owner":doc.item_group,
"description":doc.item_name,
"uom":doc.stock_uom,
"cost":doc.valuation_rate,
})
output = make_post_request(url,headers=headers, data=data)
frappe.msgprint(
msg='Data have been exported to Frepple.',
title='Note',
)
return output
@frappe.whitelist()
def post_location():
if(frappe.get_doc("Frepple Setting").frepple_integration):
''' Define the Frepple table you want to match'''
api = "location"
url,headers = get_frepple_params(api=api,filter=None)
company_list = frappe.db.get_list('Company', # Find the Machine Status name via filter the workstation name
fields =['name'],
)
for company in company_list:
'''Add the company to the location table'''
data = json.dumps({
"name": company.name,
})
output = make_post_request(url,headers=headers, data=data)
warehouse_list = frappe.db.get_list('Warehouse', # Find the Machine Status name via filter the workstation name
fields =['name','company','parent_warehouse'],
)
for warehouse in warehouse_list:
'''Add the warehouse location to the location table'''
data = json.dumps({
"name": warehouse.name,
"owner":warehouse.parent_warehouse if (warehouse.parent_warehouse) else warehouse.company,
# If a warehouse is the parent warehouse, let its owner to be the company name
})
output = make_post_request(url,headers=headers, data=data)
frappe.msgprint(
msg='Data have been exported to Frepple.',
title='Note',
)
return output
@frappe.whitelist()
def post_employee(doc):
if(frappe.get_doc("Frepple Setting").frepple_integration):
doc = json.loads(doc) #dict form
doc = frappe.get_doc('Employee', doc['name']) #To get the current item
if(doc.status == "Active"):
''' Define the Frepple table you want to match'''
api = "resource" #equivalent to employee doctype
url,headers = get_frepple_params(api=api,filter=None)
'''Add a null operator to frepple to use it as the owner to ensure no request error happen'''
data = json.dumps({
"name": "Operator",#default
})
output = make_post_request(url,headers=headers, data=data)
'''Add the actual employee to frepple'''
data = json.dumps({
"name": doc.name,
"description":doc.employee_name,
"owner":"Operator" #default
})
output = make_post_request(url,headers=headers, data=data)
frappe.msgprint(
msg='Data have been exported to Frepple.',
title='Note',
)
return output
@frappe.whitelist()
def post_workstation(doc):
if(frappe.get_doc("Frepple Setting").frepple_integration):
''' Define the Frepple table you want to match'''
api = "resource" #equivalent to employee doctype
url,headers = get_frepple_params(api=api,filter=None)
temp_doc = json.loads(doc) #dict form
'''If doctype = BOM'''
if (temp_doc['doctype'] == "BOM" and temp_doc['is_active'] ): # Only add an active BOM
doc = frappe.get_doc('BOM', temp_doc['name']) #To get the current item
operations = doc.operations
# Get the workstation
for row in operations:
d = frappe.get_doc('BOM Operation', row.name) #To access child doctype
workstation = d.workstation
'''Add the workstation to frepple'''
data = json.dumps({
"name": workstation,
})
output = make_post_request(url,headers=headers, data=data)
'''If doctype = Workstation'''
if (temp_doc['doctype'] == "Workstation"):
doc = frappe.get_doc('Workstation', temp_doc['name']) #To get the current item
data = json.dumps({
"name": doc.name,
})
output = make_post_request(url,headers=headers, data=data)
frappe.msgprint(
msg='Data have been exported to Frepple.',
title='Note',
)
return output
@frappe.whitelist()
def post_customer(doc):
if(frappe.get_doc("Frepple Setting").frepple_integration):
doc = json.loads(doc) #dict form
doc = frappe.get_doc('Customer', doc['name']) #To get the current sales order
''' Define the Frepple table you want to match'''
api = "customer" #equivalent to customer doctype
url,headers = get_frepple_params(api=api,filter=None)
'''To create a cutomer with Group type first to ensure no request error happen'''
data = json.dumps({
"name": doc.customer_group,
})
output = make_post_request(url,headers=headers, data=data)
'''Create the actual customer we would like to add to'''
data = json.dumps({
"name": doc.name,
"category":doc.customer_type,
"owner":doc.customer_group
})
output = make_post_request(url,headers=headers, data=data)
frappe.msgprint(
msg='Data have been exported to Frepple.',
title='Note',
)
return output
@frappe.whitelist()
def post_supplier(doc):
if(frappe.get_doc("Frepple Setting").frepple_integration):
doc = json.loads(doc) #dict form
doc = frappe.get_doc('Supplier', doc['name']) #To get the current sales order
''' Define the Frepple table you want to match'''
api = "supplier" #equivalent to customer doctype
url,headers = get_frepple_params(api=api,filter=None)
'''Create the actual supplier we would like to add to'''
data = json.dumps({
"name": doc.name,
"category":doc.supplier_group
})
output = make_post_request(url,headers=headers, data=data)
frappe.msgprint(
msg='Data have been exported to Frepple.',
title='Note',
)
return output
@frappe.whitelist()
def post_demand(doc):
if(frappe.get_doc("Frepple Setting").frepple_integration):
#parse: mean convert string to object/dictionary
'''https://discuss.erpnext.com/t/how-to-call-external-web-service/40448/14 check this to
check eval() and how to pass it argument in data variable
'''
# NOTE
# throw in string as variable can work , e.g name = "SAL-ORDER-00012"
# throw in integer as variable can work also
# cannot pass in datetime
# duetime = eval(datetime type) cannot work, eval only accept: string, code, byte
# current_time = datetime.datetime.strptime(current_time,'%Y-%m-%d %H:%M:%S') #datetime field type pass in as string
# duetime = datetime(2022,1,16,12,25,00)# datetime(year, month, day, hour, minute, second, microsecond)
# duetime = duetime.isoformat() # Can be in any form
# json.dumps convert object to json string
doc = json.loads(doc) #dict form
doc = frappe.get_doc('Sales Order', doc['name']) #To get the current sales order
# doc = frappe.get_doc('Sales Order', 'SAL-ORD-2022-00033') #To get the current
so_items = doc.items
# Get the item and its quantity (assume only one item per sales order now)
for row in so_items:
d = frappe.get_doc('Sales Order Item', row.name) #To access child doctype
quantity = d.qty
item = d.item_name
# doc = frappe.get_doc('Sales Order', doc['name']) #To get the current doc
data = json.dumps({
"name": doc.name,
"description": "Item ordered by " + doc.customer_name, #default
"category": "", #default
"subcategory": "", #default
"item": item,
"customer": doc.customer_name,
"location": "SHRDC", #default
"due": (doc.delivery_date.isoformat()+"T00:00:00"),
"status": "open", #default
"quantity": quantity,
"priority": "10" #default
})
api = "demand" #equivalent sales order
url,headers = get_frepple_params(api=api,filter=None)
output = make_post_request(url,headers=headers, data=data)
frappe.msgprint(
msg='Data have been exported to Frepple.',
title='Note',
)
return output
@frappe.whitelist()
def post_skill(doc):
if(frappe.get_doc("Frepple Setting").frepple_integration):
doc = json.loads(doc) #dict form
doc = frappe.get_doc('Skill', doc['name']) #To get the current sales order
''' Define the Frepple table you want to match'''
api = "skill" #equivalent to customer doctype
url,headers = get_frepple_params(api=api,filter=None)
'''Create the actual supplier we would like to add to'''
data = json.dumps({
"name": doc.name,
})
output = make_post_request(url,headers=headers, data=data)
frappe.msgprint(
msg='Data have been exported to Frepple.',
title='Note',
)
return output
@frappe.whitelist()
def post_resourceskill(doc):
if(frappe.get_doc("Frepple Setting").frepple_integration):
''' Define the Frepple table you want to match'''
api = "resourceskill" #equivalent to employee doctype
url,headers = get_frepple_params(api=api,filter=None)
doc = json.loads(doc) #dict form
doc = frappe.get_doc('Employee Skill Map', doc['name']) #To get the current item
skill_list = doc.employee_skills
# Get the workstation
for row in skill_list:
d = frappe.get_doc('Employee Skill', row.name) #To access child doctype
'''Add the skill to frepple'''
data = json.dumps({
"resource":doc.employee,
"location":"work in progress",
"skill" : d.skill,
"priority":5-d.proficiency, #use priority in frepple to define how proficiency the employee is
})
output = make_post_request(url,headers=headers, data=data)
frappe.msgprint(
msg='Data have been exported to Frepple.',
title='Note',
)
return output
@frappe.whitelist()
def run_plan():
if(frappe.get_doc("Frepple Setting").frepple_integration):
filter = "/execute/api/runplan/?constraint=15&plantype=1&env=fcst,invplan,balancing,supply"
frepple_settings = frappe.get_doc("Frepple Setting")
temp_url = frepple_settings.url.split("//")
url = "http://"+ frepple_settings.username + ":" + frepple_settings.password + "@" + temp_url[1] + filter
print(url + "-----------------------------------------------------------------------")
headers= {
'Content-type': 'application/json; charset=UTF-8',
'Authorization': frepple_settings.authorization_header,
}
output = make_post_request(url,headers=headers, data=None)
frappe.msgprint(
msg='Plan have been runned succesffully',
title='Success',
)
return output
# @frappe.whitelist()
# def get_demand(self):
# api = 'demand'
# # url,headers = get_frepple_params(api=api,filter=None)
# ''' With filtering'''
# filter=None
# filter="?status=open"
# url,headers = get_frepple_params(api=None,filter=filter)
# # filter = None
# ro = make_get_request(url,headers=headers)
# # dt = datetime.fromisoformat(startdate)
# name = ro[0]['name']
# print(type(ro))
# dd = ro[0]['due']
# #convert iso8601 time type to datetime.datetime type
# dt = datetime.fromisoformat(dd)
# self.delivery_date = dt
# return ro |
Drayang/ERPNext-Production-Scheduing | production_scheduling_shrdc/production_scheduling_shrdc/doctype/production_scheduling_backend/production_scheduling_backend.py | # -*- coding: utf-8 -*-
# Copyright (c) 2021, DCKY and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe, json
from frappe import get_all, msgprint, _
from frappe.utils import add_to_date ,getdate,today,now #utils now and today is string form
from frappe.utils import flt, cint, formatdate, comma_and, time_diff_in_seconds, to_timedelta
import datetime
from frappe.model.document import Document
# from erpnext.erpnext.manufacturing.doctype.workstation.workstation import
from erpnext.manufacturing.doctype.production_plan.production_plan import ProductionPlan
from erpnext.manufacturing.doctype.workstation.workstation import get_default_holiday_list
#from frappe.utils.background_jobs import start_worker
from frappe.utils.data import time_diff_in_hours
class WorkstationHolidayError(frappe.ValidationError): pass
class ProductionSchedulingBackend(Document):
pass
def get_all_info(doc,current_time):
# current_time = current datetime , pass in as arg because now() and datetime.datetime.now() cant get the correct now
current_time = datetime.datetime.strptime(current_time,'%Y-%m-%d %H:%M:%S') #current_time pass in as datetime type
#pp_date = today()
pp_date = datetime.datetime.strptime(today(),'%Y-%m-%d') #convert to datetime type
# To access the current from field type. doc is in dict type, access using doc['fieldname']
doc = json.loads(doc) #dict form
doc = frappe.get_doc('Production Plan', doc['name']) #To get the current doc
po_items = doc.po_items #access the production plan item child doctype
# Obtain the total operation time and working hours of workstation
total_op_time,start_time,end_time = read_bom(po_items,pp_date)
#last_datetime=datetime.datetime(1900,1,1,0,0,0) #initialise datetime.datetime
# Find the latest datetime we can assigned to the work order,meanwhile,update the start_time and end_time
last_datetime,start_time,end_time = find_datetime(pp_date,current_time,start_time,end_time)
info = dict()
info['last_datetime']=last_datetime
info['start_time']=start_time
info['end_time']=end_time
info['total_op_time']=total_op_time
info['po_items']=po_items
info['pp_date']=pp_date
info['current_time']=current_time
return info
@frappe.whitelist()
def production_scheduling(doc,current_time):
machine_status,valid_ms,machine = check_workstation_status(doc) #Tell us whether the machine is under maintainance or sudden downtime,valid_ms = True:Machine available, False: unavailable
valid_emp = check_employee(doc) # Check employee availability
info = {}
if (valid_ms and valid_emp): #Only schedule if machine and employee valid
info = get_all_info(doc,current_time)
po_items = info['po_items']
last_datetime = info['last_datetime']
start_time =info['start_time']
end_time=info['end_time']
total_op_time= info['total_op_time']
pp_date=info['pp_date']
current_time = info['current_time']
predict_op_time = ''
for row in po_items:
d = frappe.get_doc('Production Plan Item', row.name) #To access child doctype
last_datetime,temp_datetime,exceed,start_time,end_time = check_within_operating_hours(d,current_time,last_datetime,pp_date,start_time,end_time,total_op_time,machine)
if (~exceed): #only run if we no exceed the day
d.planned_start_date = last_datetime # Set the child doctype planned start date as the last_datetime
last_datetime = temp_datetime #Assign the new datetime to the last_datetime
# d.planned_start_date = last_datetime # Set the child doctype planned start date as the last_datetime
# last_datetime = temp_datetime #Assign the new datetime to the last_datetime
print(temp_datetime)
print(last_datetime)
d.save(ignore_permissions=True, ignore_version=True)
d.reload()
#abcd= planned_datetime
#abc = get_default_holiday_list()
#to access the date : r.message.pp_setup
#'name you want access': variable that you want be assigned to in this python file
frappe.response['message'] = {
'valid_ms':valid_ms,
'valid_emp':valid_emp,
#'abc':abc,
#'abcd':abcd,
#'abcde':abcde,
'machine':machine,
'doc':doc,
}
@frappe.whitelist()
def update_production_plan(doc,current_time):
#Valid_ms: True = Up(available), False = Down(unavailable)
machine_status,valid_ms,machine = check_workstation_status(doc) #Tell us whether the machine is under maintainance or sudden downtime,valid = True:Machine available, False: unavailable
valid_emp = check_employee(doc) # Check employee availability
affected_idx = 0
#Should have a way to get the real downtime but i self define one first
downtime = datetime.datetime(2022,1,4,12,25,00)# datetime(year, month, day, hour, minute, second, microsecond)
info = {} #intialise dict
#Change: True = Can reschedule production plan, False = Cannot reschdeule
change = False #Becareful ~False = -2, no True, so the condition check need directly ==False
if (valid_ms == False): # Only run if the workstation is unavailable(under Down)
if (machine_status == "Down: Scheduling Maintenance"): #Four type of Down status,only this status do not have planned end date for me to reschedule production plan
change = False
else: #Can change the production plan if the status == other 3
info = get_all_info(doc,current_time)
po_items = info['po_items']
last_datetime = info['last_datetime']
start_time =info['start_time']
end_time=info['end_time']
total_op_time= info['total_op_time']
pp_date=info['pp_date']
current_time = info['current_time']
#Find the maintenance work order for the break down machine
mwo = frappe.db.get_list('Maintenance Work Order', # Find the Machine Status name via filter the workstation name
filters ={
'workstation':machine,
},
fields =['name'],
)
if (len(mwo) != 0): #mean the desired maintenance work order is exist
change = True
mwo_doc = frappe.get_doc('Maintenance Work Order',mwo[0])
#Get the planned complete date of the maintanance, and add 5 minute delay to it
last_datetime = add_to_date(mwo_doc.planned_complete_date,minutes=5,as_datetime=True) #add a five minute delay
#### To set a scheduled maintenance datetime and try out
#last_datetime = datetime.datetime(2022,1,5,12,20,00)
downtime = mwo_doc.starting_downtime
#temp = datetime.datetime.now()
# abc = len(po_items) # can get the lens //
# Find the affected work order
affected_idx = find_affected_wo(po_items,downtime)
print('affected_idx')
print(affected_idx)
#update the date until all parameter date are the same with the last_datetime date
pp_date, start_time, end_time = update_time(pp_date,start_time,end_time,last_datetime)
# loop through the po_items table, look for the work order that are affected and update the new planned start date
for row in po_items:
d = frappe.get_doc('Production Plan Item', row.name) #To access child doctype
# abc = d.idx #/// can get
print(d.idx)
print(last_datetime)
if (d.idx >= affected_idx) and (affected_idx != 0): #0 indicate no affected wo
last_datetime,temp_datetime,exceed,start_time,end_time = check_within_operating_hours(d,current_time,last_datetime,pp_date,start_time,end_time,total_op_time,machine)
if (~exceed): #only run if we no exceed the day
d.db_set('planned_start_date', last_datetime)
last_datetime = temp_datetime #Assign the new datetime to the last_datetime
d.save(ignore_permissions=True)
d.reload()
frappe.response['message'] = {
'valid_ms':valid_ms,
'change':change,
'affected_idx':affected_idx,
'machine':machine,
'info':info,
'doc':doc,
}
@frappe.whitelist()
def update_work_order(doc):
# To access the current from field type. doc is in dict type, access using doc['fieldname']
doc = json.loads(doc) #dict form
doc = frappe.get_doc('Production Plan', doc['name']) #To get the current doc
po_items = doc.po_items #access the production plan item child doctype
for row in po_items:
d = frappe.get_doc('Production Plan Item', row.name) #To access child doctype
print(d.sales_order)
work_order = frappe.db.get_list('Work Order',
filters ={
'sales_order':d.sales_order
},
fields =['name'],
)
# To set the field of the work order
frappe.db.set_value('Work Order', work_order[0].name, 'planned_start_date', d.planned_start_date)
# To refresh the work order doctype and store the value
document = frappe.get_doc('Work Order', work_order[0].name)
document.save(ignore_permissions=True, ignore_version=True)
document.reload()
frappe.response['message'] = {
#'abc':abc,
#'abcd':abcd,
#'abcde':abcde,
'doc':doc,
}
@frappe.whitelist()
def check_workstation_status_js(doc):
# To access the current from field type. doc is in dict type, access using doc['fieldname']
doc = json.loads(doc) #dict form
doc_dict = doc
doc = frappe.get_doc('Production Plan', doc['name']) #To get the current doc
po_items = doc.po_items #access the production plan item child doctype
bom_no = po_items[0].bom_no # assume all link to the same BOM
bom = frappe.get_doc('BOM', bom_no)
# Status list
machine_status=''
machine = ''
status_1 = "Up: Idle" #valid_ms:True
status_2 = "Up: Running" #valid_ms:True
status_3 = "Down: Scheduling Maintenance" #valid_ms:False
status_4 = "Down: Waiting For Maintenance" #valid_ms:False
status_5 = "Down: Under Maintenance" #valid_ms:False
status_6 = "Down: Post Inspection" #valid_ms:False
valid_ms = True
if (doc_dict['check_machine_status']):#if the checkbox is ticked
#Run this to save the checkbox value to the database so when we run frm.reload_doc() will not affect the state
for row in bom.operations: #Loop through the bom.operation to access each workstation
workstation = frappe.get_doc('Workstation',row.workstation) #Get the Workstation we want
machine_status_name = frappe.db.get_list('Machine Status', # Find the Machine Status name via filter the workstation name
filters ={
'workstation':workstation.workstation_name,
},
fields =['name'],
)
document = frappe.get_doc('Machine Status',machine_status_name[0].name) #Get the machine status doctype we want
machine = workstation.workstation_name
machine_status = document.machine_status
#If the machine is under Down, directly return valid_ms = False
if (machine_status == status_3 ) or (machine_status == status_4 ) or (machine_status == status_5 ) or (machine_status == status_6 ):
valid_ms = False
return machine_status,valid_ms,machine
# valid_ms : True= for all Up , False = for all down
# frappe.response['message'] = {
# 'valid_ms':valid_ms,
# #'abcd':abcd,
# #'abcde':abcde,
# 'doc':doc,
# }
# r.message[0] = status,r.message[1]=valid_ms
# if only return one variable, can do r.message.status
return machine_status,valid_ms,machine
#to be called internally
def check_workstation_status(doc):
# To access the current from field type. doc is in dict type, access using doc['fieldname']
doc = json.loads(doc) #dict form
doc_dict = doc
doc = frappe.get_doc('Production Plan', doc['name']) #To get the current doc
po_items = doc.po_items #access the production plan item child doctype
bom_no = po_items[0].bom_no # assume all link to the same BOM
bom = frappe.get_doc('BOM', bom_no)
doc.db_set('check_machine_status',doc_dict['check_machine_status'],commit = True )
doc.save(ignore_permissions=True, ignore_version=True)
doc.reload()
# Status list
machine_status=''
machine = ''
status_1 = "Up: Idle" #valid_ms:True
status_2 = "Up: Running" #valid_ms:True
status_3 = "Down: Scheduling Maintenance" #valid_ms:False
status_4 = "Down: Waiting For Maintenance" #valid_ms:False
status_5 = "Down: Under Maintenance" #valid_ms:False
status_6 = "Down: Post Inspection" #valid_ms:False
valid_ms = True
if (doc_dict['check_machine_status']):#if the checkbox is ticked
#Run this to save the checkbox value to the database so when we run frm.reload_doc() will not affect the state
for row in bom.operations: #Loop through the bom.operation to access each workstation
workstation = frappe.get_doc('Workstation',row.workstation) #Get the Workstation we want
machine_status_name = frappe.db.get_list('Machine Status', # Find the Machine Status name via filter the workstation name
filters ={
'workstation':workstation.workstation_name,
},
fields =['name'],
)
document = frappe.get_doc('Machine Status',machine_status_name[0].name) #Get the machine status doctype we want
machine = workstation.workstation_name
machine_status = document.machine_status
#If the machine is under Down, directly return valid_ms = False
if (machine_status == status_3 ) or (machine_status == status_4 ) or (machine_status == status_5 ) or (machine_status == status_6 ):
valid_ms = False
return machine_status,valid_ms,machine
# valid_ms : True= for all Up , False = for all down
return machine_status,valid_ms,machine
@frappe.whitelist()
def check_duplicate_wo(doc):
# To access the current from field type. doc is in dict type, access using doc['fieldname']
doc = json.loads(doc) #dict form
doc = frappe.get_doc('Production Plan', doc['name']) #To get the current doc
po_items = doc.po_items #access the production plan item child doctype
######### Condition Checking to avoid duplicate work order###########
manufacture_qty = 0
row_to_remove=[] #store the index of row that need to be remove (first index coutn from 0)
for row in po_items:
cd = frappe.get_doc('Production Plan Item', row.name) #To access child doctype
exist = frappe.db.exists({ #check whether the document exist or not by checking the sales_order
'doctype': 'Work Order',
'sales_order': cd.sales_order,
})
if (exist):
#manufactuer_qty = existed work order's qty
manufacture_qty = frappe.db.get_value('Work Order', {'sales_order': cd.sales_order}, ['qty'])
planned_qty = cd.planned_qty-manufacture_qty
if (planned_qty == 0): # If the existed qty - planned_qty = 0, indicating same amount of qty ady be created, so we should remove the work order in the po_items
row_to_remove.append(cd.sales_order)
frappe.response['message'] = {
'row_to_remove':row_to_remove,
#'abcde':abcde,
'doc':doc,
}
@frappe.whitelist()
def check_employee_js(doc):
valid_emp = 0
valid_emp_skill = 1
valid_emp_emount = 1
doc = json.loads(doc) #dict form
doc_dict = doc
doc = frappe.get_doc('Production Plan', doc['name']) #To get the current doc
po_items = doc.po_items #access the production plan item child doctype
#Run this to save the checkbox value to the database so when we run frm.reload_doc() will not affect the state
# doc.db_set('check_employee',doc_dict['check_employee'] )
# doc.save(ignore_permissions=True)
# doc.reload()
bom,workstation = get_bom_detail(po_items)
if (doc_dict['check_employee']):#if the checkbox to check employee is ticked
valid_emp_emount = check_emp_amout(workstation)
valid_emp_skill = check_emp_skill(workstation)
if (valid_emp_emount) and (valid_emp_skill):
valid_emp = 1
return valid_emp
#to be called internally
def check_employee(doc):
valid_emp = 0
valid_emp_skill = 1
valid_emp_emount = 1
doc = json.loads(doc) #dict form
doc_dict = doc
doc = frappe.get_doc('Production Plan', doc['name']) #To get the current doc
po_items = doc.po_items #access the production plan item child doctype
#Run this to save the checkbox value to the database so when we run frm.reload_doc() will not affect the state
doc.db_set('check_employee',doc_dict['check_employee'] )
doc.save(ignore_permissions=True)
doc.reload()
bom,workstation = get_bom_detail(po_items)
if (doc_dict['check_employee']):#if the checkbox to check employee is ticked
valid_emp_emount = check_emp_amout(workstation)
valid_emp_skill = check_emp_skill(workstation)
if (valid_emp_emount) and (valid_emp_skill):
valid_emp = 1
return valid_emp
def check_emp_skill(workstation):
valid_emp_skill = 1 #initialise
skill_rq = [] #store each workstation name as the key for dict
emp_list = frappe.db.get_list('Employee Skill Map',
fields =['name'],
)
emp_with_skill = {} #initialise a list of dictionary to store the available employee for each skill
idx = 0 #to access the skill_rq list
''' The idea here is to loop through the workstation, get the skill required(skill_req) and store them into the list
Then loop through the Employee Skill Map> Employee> Employee Skill to see whether there is any match skill
If yes, append a value "1" to the particular dict key (the key is the skill name)
In the end, sum each key value, if each key value >1 mean valid
## Limitation: 1. Cannot check duplicate employee if the employee have more than 1 skill needed.
'''
if (len(emp_list) != 0):
for workstation in workstation:
temp_list= []
skill_rq.append((workstation.split(": "))[1]) #Workstation name : Workstation 1-Loading, [1] is to obtain loading
emp_with_skill[skill_rq[idx]] = [] #initialise the dict key with an empty list
''' https://www.geeksforgeeks.org/python-ways-to-create-a-dictionary-of-lists/ refer to this to understand why i need
to define a empty list and append it first'''
for row in emp_list:
doc = frappe.get_doc('Employee Skill Map',row.name)
skill = doc.employee_skills
for skill in doc.employee_skills:
if (skill_rq[idx] == skill.skill): #check whehter the employee have the skill we want(skill_rq)
temp_list.append(1)
print(skill_rq[idx])
print(skill.skill)
emp_with_skill[skill_rq[idx]].append(temp_list)
idx = idx+1
print(emp_with_skill)
for key in skill_rq:
print(sum(emp_with_skill[key][0]))
if (sum(emp_with_skill[key][0]) < 1):
valid_emp_skill = 0
return valid_emp_skill
print(valid_emp_skill)
return valid_emp_skill
def check_emp_amout(workstation):
valid_emp_amount = 0 #initialise
emp_list = frappe.db.get_list('Employee',
filters =
{
'status':'active'
},
fields =['name'],
# order_by = 'planned_start_date desc'
)
if len(emp_list)>= len(workstation):
valid_emp_amount = 1
print(len(emp_list))
print(len(workstation))
print(valid_emp_amount)
return valid_emp_amount
################## update_production_plan ###################
def find_affected_wo(po_items,downtime):
affected_idx = 0 # e.g = 1 mean first row WO get affected
inRange = 0
end_idx = len(po_items) #size of the po_items list = the last idx
for row in po_items:
d = frappe.get_doc('Production Plan Item', row.name) #To access child doctype
if (d.idx != 1): #First row do not run this function
#Check the previous loop (e.g 2nd loop but we actually finding for 1st work order) whether it is scheduled within the downtime
inRange = time_in_range(temp_planned_start_date,d.planned_start_date,downtime)
print('inRange')
print(inRange)
print(downtime)
print(temp_planned_start_date)
print(d.planned_start_date)
if (inRange): #if inRange, indicating this WO and the consequene work order are all affected
affected_idx = d.idx-1 #minus 1 so it fulfill the "2nd loop but is refering 1st work order" idea
return affected_idx
if (d.idx == end_idx): # We should explicitly run a time_in_range func for the very last row
#assume loop till last row if still not within then no work order is affected
return affected_idx #should be 0
# inRange = time_in_range(temp_planned_start_date,d.planned_start_date,downtime)
temp_planned_start_date = d.planned_start_date
def time_in_range(start,end,downtime):
inRange = 0
if (downtime<=end) and (downtime >= start):
inRange = 1
return inRange
################## update_production_plan end ###################
################## production_scheduling ##################
def check_within_operating_hours(cd,current_time,last_datetime,pp_date,start_time,end_time,total_op_time,machine):
delay = 5
exceed = 0
isHoliday = 0 #Workstation is holiday or not
remainder = 0
split_qty = 0 #planeed qty that have been split
######### Condition Checking to avoid duplicate work order###########
# manufacture_qty = 0
# exist = frappe.db.exists({ #check whether the document exist or not
# 'doctype': 'Work Order',
# 'sales_order': cd.sales_order,
# })
# if (exist):
# manufacture_qty = frappe.db.get_value('Work Order', {'sales_order': cd.sales_order}, ['qty'])
#planned_qty = cd.planned_qty-manufacture_qty
#cd.planned_qty = planned_qty
# if (planned_qty == 3):
# frappe.delete_doc('Production Plan Item', row['name'])
# delay = 0
planned_qty = cd.planned_qty
predict_op_time = planned_qty*total_op_time + delay # Calculate the total operation time needed
temp_datetime = add_to_date(last_datetime,minutes=(predict_op_time),as_datetime=True) #Predict the end datetime
isHoliday = check_workstation_for_holiday(machine, pp_date.date(),pp_date.date())
print('isHoliday')
print(isHoliday)
while(isHoliday):#update until the date that is not holiday for the machine
pp_date, start_time, end_time = update_time(pp_date,start_time,end_time,None) #update all the time by one day
isHoliday = check_workstation_for_holiday(machine, pp_date.date(),pp_date.date())
print('Inside the check within')
print(last_datetime)
print(temp_datetime)
print(end_time)
if start_time and end_time:
# datetime.datetime(2021, 12, 28, 12, 00, 00)
if (temp_datetime< end_time): # check whether the operation time over the working hour
#planned_datetime.append(last_datetime)
last_datetime = last_datetime
#doc.db_set('planned_start_date', last_datetime, commit=True) #cannot use because my doc is not a doctype
else:
exceed = 1 #Represent we exceed today
# time_left = time_diff_in_hours(end_time,last_datetime)*60
# split_qty = round(round(time_left)/total_op_time)
# remainder = planned_qty - split_qty
# cd.planned_qty = split_qty #
cd.planned_start_date = last_datetime
pp_date, start_time, end_time = update_time(pp_date,start_time,end_time,None) #update all the time by one day
#### try to add a new child row after this then we can remove all the part below include exceed check,
#### the new child doctype should have the value of the remainder
#### [i think is impossible because we are in a loop of po_items, add new child doctype cannot have real time
#### changes]
last_datetime,start_time,end_time = find_datetime(pp_date,current_time,start_time,end_time) #Get the word order list of next day
print('end_time')
print(end_time)
last_datetime,temp_datetime,exceed_temp,start_time,end_time = check_within_operating_hours(cd,current_time,last_datetime,pp_date,start_time,end_time,total_op_time,machine)
# last_datetime = temp_datetime
# Reassign back the last_datetime so it match what we want
return last_datetime,temp_datetime,exceed,start_time,end_time
def find_datetime(pp_date,current_time,start_time,end_time):
if (pp_date == ''):
pp_date = today()
#if last_datetime == initialise, mean this is the first time we finding last_datetime, run normal check
#if (last_datetime == datetime.datetime(1900,1,1,0,0,0)):
wo_listt = frappe.db.get_list('Work Order',
filters =
[[
'planned_start_date', 'between', [pp_date, pp_date] #Limit the filter only on that particular day where we want to the work order
]],
fields =['qty','planned_start_date'],
order_by = 'planned_start_date desc'
)
# # Find the total wo_qty on that particular day
# wo_qty = 0
# for i in range(0, len(wo_listt)):
# wo_qty = wo_qty + wo_listt[i].qty
# Find the very last manufacture time on that date
#last_datetime = "" #The latest date
if (len(wo_listt)!= 0): #if we have work order on that particular day
last_datetime = wo_listt[0].planned_start_date #get the latest date
last_datetime = add_to_date(last_datetime,minutes=5,as_datetime=True)
else:
last_datetime= start_time #set the first operation datetime at the start working hour of the workstation
#condition check whether the last_datetime is before current datetime
#current_time = datetime.datetime.strptime(now(),'%Y-%m-%d %H:%M:%S.%f') #now() is string type
# current_time = datetime.datetime.now()
if (last_datetime < current_time):
last_datetime = add_to_date(current_time,minutes=10,as_datetime=True) #Planned the start datetime 10 minute later
if (last_datetime > end_time):
last_datetime = add_to_date(start_time,days=1,as_datetime=True)
ignore,start_time,end_time = update_time(pp_date, start_time,end_time,None) #update the start and end time of workstation
# else:
# if (last_datetime > end_time):
# last_datetime = add_to_date(start_time,days=1,as_datetime=True)
# ignore,start_time,end_time = update_time(pp_date, start_time,end_time) #update the start and end time of workstation
return last_datetime,start_time,end_time
def read_bom(po_items,pp_date):
bom_no = po_items[0].bom_no # access list then only access the attribute
bom = frappe.get_doc('BOM', bom_no)
total_op_time = 0
if bom.operations:
for row in bom.operations:
total_op_time = total_op_time + row.time_in_mins
#Access only the first row of operations's workstation [assume all workstation have same working hours]
workstation = frappe.get_doc('Workstation',bom.operations[0].workstation)
for row in workstation.working_hours:
if row.start_time and row.end_time:
start_temp = row.start_time
end_temp = row.end_time
# Convert the datetime.timedelta format into datetime.datetime fomr
# start_time = str(pp_date)+ ' '+str(start_temp)
# start_time = datetime.datetime.strptime(start_time,'%Y-%m-%d %H:%M:%S')
# end_time = str(pp_date)+ ' '+str(end_temp)
# end_time = datetime.datetime.strptime(end_time,'%Y-%m-%d %H:%M:%S')
print('read_bom')
start_time = datetime.datetime.strftime(pp_date,'%Y-%m-%d')+ ' '+str(start_temp)
start_time = datetime.datetime.strptime(start_time,'%Y-%m-%d %H:%M:%S')
end_time = datetime.datetime.strftime(pp_date,'%Y-%m-%d')+ ' '+str(end_temp)
end_time = datetime.datetime.strptime(end_time,'%Y-%m-%d %H:%M:%S')
#end_time = datetime.datetime(2021, 12, 28, 12, 20, 00)
print(end_time)
return total_op_time,start_time,end_time
#Update all datetime by 1 day while running the else loop of checkwithin opearting time
def update_time(pp_date, start_time,end_time,last_datetime):
if (last_datetime == None):
pp_date = add_to_date(pp_date,days = 1,as_datetime=True)
start_time = add_to_date(start_time,days=1,as_datetime=True)
end_time = add_to_date(end_time,days=1,as_datetime=True)
else:
while(last_datetime.date()>pp_date.date()): #to make sure the date are all sync with last_datetime
pp_date = add_to_date(pp_date,days = 1,as_datetime=True)
start_time = add_to_date(start_time,days=1,as_datetime=True)
end_time = add_to_date(end_time,days=1,as_datetime=True)
'''
# Else loop is specialy make for the update_production_plan because if I want to force the planned complete_date = last_datetime
# to be the first option while assign to the planned_start_date, and if the complete date is not on the same day
# after running the check_within_operation func, the last_datetime will update based on the first WO time on the day, which is
# not valid since no work order should be created that day (if follow the checking of my function, it will take start time as default time)
'''
return pp_date,start_time,end_time
################## production_scheduling end ##################
def get_bom_detail(po_items):
bom_no = po_items[0].bom_no # access list then only access the attribute
bom = frappe.get_doc('BOM', bom_no)
#Access only the first row of operations's workstation [assume all workstation have same working hours]
# workstation = frappe.get_doc('Workstation',bom.operations[0].workstation)
workstation= []
for row in bom.operations:
workstation.append(frappe.get_value('Workstation',row.workstation,'name'))
return bom,workstation
#from_datetime,to_datetime = datetime.date type
def check_workstation_for_holiday(workstation, from_datetime, to_datetime):
holiday_list = frappe.db.get_value("Workstation", workstation, "holiday_list")
isHoliday = 0
if holiday_list and from_datetime and to_datetime:
applicable_holidays = []
for d in frappe.db.sql("""select holiday_date from `tabHoliday` where parent = %s
and holiday_date between %s and %s """,
(holiday_list, from_datetime, to_datetime)):
applicable_holidays.append(formatdate(d[0]))
print(applicable_holidays)
if applicable_holidays:
isHoliday = 1
# frappe.throw(_("Workstation is closed on the following dates as per Holiday List: {0}")
# .format(holiday_list) + "\n" + "\n".join(applicable_holidays), WorkstationHolidayError)
return isHoliday
|
Drayang/ERPNext-Production-Scheduing | production_scheduling_shrdc/overrides/sales_order.py | from erpnext.selling.doctype.sales_order.sales_order import SalesOrder
from frappe import msgprint
import frappe
from frappe import _
class CustomSalesOrder(SalesOrder):
def on_submit(self):
self.my_custom_code()
def validate(self):
self.my_custom_code()
def my_custom_code(self):
frappe.msgprint(_("Hi how are you "))
print('Hi how are yo ')
|
Drayang/ERPNext-Production-Scheduing | setup.py | # -*- coding: utf-8 -*-
from setuptools import setup, find_packages
with open("requirements.txt") as f:
install_requires = f.read().strip().split("\n")
# get version from __version__ variable in production_scheduling_shrdc/__init__.py
from production_scheduling_shrdc import __version__ as version
setup(
name="production_scheduling_shrdc",
version=version,
description="This is a production scheduling app.",
author="DCKY",
author_email="<EMAIL>",
packages=find_packages(),
zip_safe=False,
include_package_data=True,
install_requires=install_requires
)
|
Drayang/ERPNext-Production-Scheduing | production_scheduling_shrdc/production_scheduling_shrdc/doctype/frepple_setting/frepple_setting.py | <reponame>Drayang/ERPNext-Production-Scheduing<filename>production_scheduling_shrdc/production_scheduling_shrdc/doctype/frepple_setting/frepple_setting.py<gh_stars>0
# -*- coding: utf-8 -*-
# Copyright (c) 2022, DCKY and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
# import frappe
from frappe.model.document import Document
class FreppleSetting(Document):
def get_params_and_url(self):
params = {
"USER": self.username,
"PWD": self.get_password(fieldname="password", raise_exception=False),
"SIGNATURE": self.authorization_header,
}
return params
|
Drayang/ERPNext-Production-Scheduing | production_scheduling_shrdc/event.py | from __future__ import unicode_literals
from six import print_
import frappe
import json
from frappe.utils import cint, getdate, formatdate, today
from frappe import throw, _
from frappe.model.document import Document
@frappe.whitelist()
def get_events(start, end, filters=None):
"""Returns events for Gantt / Calendar view rendering.
:param start: Start date-time.
:param end: End date-time.
:param filters: Filters (JSON).
"""
if filters:
filters = json.loads(filters)
else:
filters = []
if start:
filters.append(['Holiday', 'holiday_date', '>', getdate(start)])
if end:
filters.append(['Holiday', 'holiday_date', '<', getdate(end)])
return frappe.get_list('Holiday List',
fields=['name', '`tabHoliday`.holiday_date', '`tabHoliday`.description', '`tabHoliday List`.color'],
filters = filters,
update={"allDay": 1}) |
Drayang/ERPNext-Production-Scheduing | production_scheduling_shrdc/config/production_scheduling_shrdc.py | <reponame>Drayang/ERPNext-Production-Scheduing<gh_stars>0
from __future__ import unicode_literals
from frappe import _
def get_data():
config = [
{
"label": _("Production Scheduling"),
"items": [
{
"type": "doctype",
"name": "Production Scheduling Backend",
"onboard": 1,
# "dependencies": ["Item"],
},
# {
# "type": "doctype",
# "name": "Frepple Integration",
# "label": "Frepple Integration",
# "onboard": 2,
# },
]
},
{
"label": _("Frepple Integration"),
"items": [
{
"type": "doctype",
"name": "Frepple Integration",
"label": "Frepple Integration",
"onboard": 1,
# "dependencies": ["Item"],
},
{
"type": "doctype",
"name": "Frepple Settings",
"label": "Frepple Settings",
"onboard": 2,
},
]
}
]
return config |
Drayang/ERPNext-Production-Scheduing | production_scheduling_shrdc/production_scheduling_shrdc/doctype/work_order_backend/work_order_backend.py | # -*- coding: utf-8 -*-
# Copyright (c) 2021, DCKY and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe, json
from frappe import msgprint, _
from frappe.utils import add_to_date ,getdate,today,now
from frappe.utils import flt, cint, formatdate, comma_and, time_diff_in_seconds, to_timedelta
import datetime
from frappe.model.document import Document
# from erpnext.erpnext.manufacturing.doctype.workstation.workstation import
from erpnext.manufacturing.doctype.production_plan.production_plan import ProductionPlan
from erpnext.manufacturing.doctype.workstation.workstation import get_default_holiday_list,check_if_within_operating_hours
#from frappe.utils.background_jobs import start_worker
from frappe.utils.data import time_diff_in_hours
class WorkOrderBackend(Document):
pass
@frappe.whitelist()
def work_order_scheduling(doc,pp_isLinked):
pp_date = today()
abc = ''
# To access the current from field type. doc is in dict type, access using doc['fieldname']
doc = json.loads(doc) #dict form
if not doc['production_plan']:
abc = 'hi'
frappe.response['message'] = {
'abc':abc,
# 'abcd':abcd,
#'abcde':abcde,
'doc':doc,
}
|
casassg/sonome | main.py | <reponame>casassg/sonome<gh_stars>0
#!./env/bin/python
import cmd
import json
import os
import random
import mingus.core.scales as scales
import requests
import tweepy
from mingus.containers import Track, Composition
from mingus.midi import fluidsynth
from tweepy import OAuthHandler, API
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ACCESS_TOKEN = os.environ.get("ACCESS_TOKEN", "ENTER YOUR ACCESS TOKEN")
ACCESS_TOKEN_SECRET = os.environ.get("ACCESS_TOKEN_SECRET", "ENTER YOUR ACCESS TOKEN SECRET")
CONSUMER_KEY = os.environ.get("CONSUMER_KEY", "ENTER YOUR API KEY")
CONSUMER_SECRET = os.environ.get("CONSUMER_SECRET", "ENTER YOUR API SECRET")
auth = OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET)
api = API(auth)
def generate_composition(happy=True, energetic=False, text='Hola em dic Gerard'):
t = Track()
t.augment()
if happy:
scale = scales.Diatonic('C', (3, 7)).ascending()
else:
scale = scales.NaturalMinor("A").descending()
if energetic:
chords = []
for st in text.split(' '):
chords.append([scale[ord(i) % len(scale)] for i in st])
t = Track().from_chords(chords[:1], 1)
else:
ns = [scale[ord(i) % len(scale)] if i != ' ' else None for i in text]
amount = int(20 * (len(text) / 280.0))
ns = ns[:amount]
for note in ns:
t.add_notes(note)
c = Composition()
c.add_track(t)
return c
def is_text_happy(text):
result = requests.post('http://text-processing.com/api/sentiment/', {'text': text}).json()['probability']
return (result['pos'] - result['neg']) > 0
def random_line(afile):
line = next(afile)
for num, aline in enumerate(afile):
if random.randrange(num + 2):
continue
line = aline
return line
def get_random_tweet(line):
filename = os.path.join(BASE_DIR, line + '.json')
with open(filename, 'r') as f:
tweet = json.loads(random_line(f))['text']
tweet = ' '.join(w for w in tweet.lower().split() if 'http' not in w)
return tweet
def get_tweets_user(line):
filename = os.path.join(BASE_DIR, line + '.json')
if not os.path.isfile(filename):
with open(filename, 'w') as f:
for tweet in tweepy.Cursor(api.user_timeline, id=line, count=200).items():
json.dump(tweet._json, f)
f.write("\n")
return filename
class SonomeCmd(cmd.Cmd):
"""Simple command processor example."""
intro = 'Welcome to the Sonome interactive shell. Type help or ? to list commands.\nAuthor: @casassaez 2017'
prompt = 'sonome> '
def do_about(self, line):
"""about\nPrints about information"""
print("Sonome is a music generator that extracts sentiments from text and plays a song accordingly")
def do_quit(self, line):
return True
def do_text(self, line):
"""text <text_to_analyze>\nCreate song for input text"""
happy = is_text_happy(line)
c = generate_composition(happy=happy, text=line)
fluidsynth.play_Composition(c, bpm=280 if happy else 150)
def do_user(self, line):
"""user <twitter_user_to_analyze>\nGets a random tweet from specified user and creates song"""
get_tweets_user(line)
tweet = get_random_tweet(line)
happy = is_text_happy(tweet)
print(tweet)
c = generate_composition(happy=happy, text=tweet)
fluidsynth.play_Composition(c, bpm=280 if happy else 150)
def preloop(self):
fluidsynth.init('choriumreva.sf2', "coreaudio")
def postloop(self):
print('Bye! see you soon?')
if __name__ == '__main__':
SonomeCmd().cmdloop()
|
AnnieLan/Web | proyectodjango/Registro/admin.py | <gh_stars>0
from django.contrib import admin
# Register your models here.
from .models import ModelRegistro
admin.site.register(ModelRegistro) |
AnnieLan/Web | proyectodjango/Login/urls.py | <gh_stars>0
from django.contrib.auth import views as auth_view
from Login.views import LandingClass
from Login.views import LoginClass
from Login.views import DashboardClass
from django.urls import include, path, re_path
app_name = 'Login'
urlpatterns = [
path('', LandingClass.as_view(), name='Landing'),
path('Login/', LoginClass.as_view(), name='Login'),
path('Dashboard', DashboardClass.as_view(), name='dashboard')
] |
AnnieLan/Web | proyectodjango/Login/models.py | from django.db import models
# Create your models here.
class Login (models.Model):
nombre = models.CharField(max_length = 254,null=False)
ap_pat = models.CharField(max_length = 254,null=False)
ap_mat = models.CharField(max_length = 254,null=False)
edad = models.IntegerField(null=False) |
AnnieLan/Web | proyectodjango/Alumnos/migrations/0002_auto_20200226_2209.py | <gh_stars>0
# Generated by Django 3.0.3 on 2020-02-27 04:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Alumnos', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='alumno',
name='matricula',
field=models.IntegerField(),
),
]
|
AnnieLan/Web | proyectodjango/Registro/models.py | <reponame>AnnieLan/Web
from django.db import models
# Create your models here.
class ModelRegistro(models.Model):
nombre = models.CharField(max_length = 254,null = False)
edad = models.IntegerField(null=False)
direccion = models.CharField(max_length = 254,null = False)
genero = models.CharField(max_length = 254,null = False)
pais = models.CharField(max_length = 254,null = False)
estado = models.CharField(max_length = 254,null = False) |
AnnieLan/Web | proyectodjango/Almacen/admin.py | <gh_stars>0
from django.contrib import admin
# Register your models here.
from .models import Almacen
admin.site.register(Almacen) |
AnnieLan/Web | proyectodjango/Almacen/models.py | <filename>proyectodjango/Almacen/models.py
from django.db import models
# Create your models here.
class Almacen(models.Model):
nombre = models.CharField(max_length = 254,null = False)
fechaProducto = models.CharField(max_length = 254,null = False)
FechaCaducidad = models.CharField(max_length = 254,null = False)
cantidadProducto = models.CharField(max_length = 254,null = False)
VentasProcuto = models.CharField(max_length = 254,null = False) |
AnnieLan/Web | proyectodjango/Alumnos/migrations/0001_initial.py | <filename>proyectodjango/Alumnos/migrations/0001_initial.py
# Generated by Django 3.0.3 on 2020-02-13 20:45
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Materia',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('materia', models.CharField(max_length=254)),
],
),
migrations.CreateModel(
name='Alumno',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre', models.CharField(max_length=254)),
('carrera', models.CharField(max_length=254)),
('edad', models.IntegerField()),
('direccion', models.CharField(max_length=254)),
('genero', models.CharField(max_length=254)),
('matricula', models.CharField(max_length=254)),
('materia', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='Alumnos.Materia')),
],
),
]
|
AnnieLan/Web | proyectodjango/Almacen/migrations/0001_initial.py | # Generated by Django 3.0.3 on 2020-02-13 20:26
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Almacen',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre', models.CharField(max_length=254)),
('fechaProducto', models.CharField(max_length=254)),
('FechaCaducidad', models.CharField(max_length=254)),
('cantidadProducto', models.CharField(max_length=254)),
('VentasProcuto', models.CharField(max_length=254)),
],
),
]
|
AnnieLan/Web | proyectodjango/Alumnos/admin.py | <reponame>AnnieLan/Web
from django.contrib import admin
# Register your models here.
from .models import Materia
admin.site.register(Materia)
from .models import Alumno
admin.site.register(Alumno) |
AnnieLan/Web | proyectodjango/Login/views.py | <filename>proyectodjango/Login/views.py
from django.shortcuts import render, redirect
from django.views.generic import View
# Importar metodo de auntenticacion
from django.contrib.auth import authenticate
# Create your views here.
def index (request):
return render(request,'index.html')
def landing (request):
return render(request,'landing.html')
class LoginClass(View):
templates = 'Login/index.html'
templates_ok = 'Landing/landing.html'
templates_oke = 'Dashboard/dashboard.html'
def get(self, request, *args, **kwargs):
print("GET")
return render(request, self.templates,{})
def post(self, request, *args, **kwargs):
print("POST")
user_post = request.POST['user']
password_post = request.POST['password']
user_session = authenticate(username = user_post, password=<PASSWORD>)
if user_session is not None:
return redirect ('Login:dashboard')
# return render(request, self.templates_oke,{})
else:
self.message = 'Usuario o contraseña incorrecta'
return render(request, self.templates, self.get_context())
def get_context(self):
return{
'error':self.message,
}
class LandingClass(View):
templates_ok = 'Landing/landing.html'
def get(self, request, *args, **kwargs):
print("GET")
return render(request, self.templates_ok,{})
class DashboardClass(View):
templates_oke = 'Dashboard/dashboard.html'
def get(self, request, *args, **kwargs):
print("GET")
return render(request, self.templates_oke,{}) |
AnnieLan/Web | proyectodjango/Alumnos/models.py | <gh_stars>0
from django.db import models
# Create your models here.
class Materia(models.Model):
materia = models.CharField(max_length = 254,null = False)
class Alumno (models.Model):
materia = models.ForeignKey(Materia, on_delete=models.CASCADE)
nombre = models.CharField(max_length = 254,null = False)
carrera = models.CharField(max_length = 254,null = False)
edad = models.IntegerField(null = False)
direccion = models.CharField(max_length = 254,null = False)
genero = models.CharField(max_length = 254,null = False)
matricula = models.IntegerField(null = False)
|
AnnieLan/Web | proyectodjango/Registro/apps.py | <filename>proyectodjango/Registro/apps.py
from django.apps import AppConfig
class RegistroConfig(AppConfig):
name = 'Registro'
|
AnnieLan/Web | proyectodjango/Almacen/apps.py | <filename>proyectodjango/Almacen/apps.py
from django.apps import AppConfig
class AlmacenConfig(AppConfig):
name = 'Almacen'
|
AnnieLan/Web | Scripts/django-admin.py | #!c:\users\danie\onedrive\escritorio\web\.proyecto\scripts\python.exe
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
|
citrix-openstack-build/python-ceilometerclient | ceilometerclient/v1/shell.py | <gh_stars>0
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from ceilometerclient.common import utils
import ceilometerclient.exc as exc
@utils.arg('-m', '--metaquery', metavar='<METAQUERY>',
help='Query into the metadata metadata.key=value:..')
@utils.arg('-s', '--source', metavar='<SOURCE>',
help='ID of the resource to show samples for.')
@utils.arg('-r', '--resource_id', metavar='<RESOURCE_ID>',
help='ID of the resource to show samples for.')
@utils.arg('-u', '--user_id', metavar='<USER_ID>',
help='ID of the user to show samples for.')
@utils.arg('-p', '--project_id', metavar='<PROJECT_ID>',
help='ID of the project to show samples for.')
@utils.arg('-c', '--counter_name', metavar='<NAME>',
help='Name of meter to show samples for.')
@utils.arg('--start', metavar='<START_TIMESTAMP>',
help='ISO date in UTC which limits events by '
'timestamp >= this value')
@utils.arg('--end', metavar='<END_TIMESTAMP>',
help='ISO date in UTC which limits events by '
'timestamp <= this value')
def do_sample_list(cc, args):
'''List the samples for this meters.'''
fields = {'counter_name': args.counter_name,
'resource_id': args.resource_id,
'user_id': args.user_id,
'project_id': args.project_id,
'source': args.source,
'start_timestamp': args.start,
'end_timestamp': args.end,
'metaquery': args.metaquery}
try:
samples = cc.samples.list(**fields)
except exc.HTTPNotFound:
raise exc.CommandError('Samples not found: %s' % args.counter_name)
else:
field_labels = ['Resource ID', 'Name', 'Type', 'Volume', 'Timestamp']
fields = ['resource_id', 'counter_name', 'counter_type',
'counter_volume', 'timestamp']
utils.print_list(samples, fields, field_labels,
sortby=0)
@utils.arg('-m', '--metaquery', metavar='<METAQUERY>',
help='Query into the metadata metadata.key=value:..')
@utils.arg('-s', '--source', metavar='<SOURCE>',
help='ID of the resource to show samples for.')
@utils.arg('-r', '--resource_id', metavar='<RESOURCE_ID>',
help='ID of the resource to show samples for.')
@utils.arg('-u', '--user_id', metavar='<USER_ID>',
help='ID of the user to show samples for.')
@utils.arg('-p', '--project_id', metavar='<PROJECT_ID>',
help='ID of the project to show samples for.')
def do_meter_list(cc, args={}):
'''List the user's meter'''
fields = {'resource_id': args.resource_id,
'user_id': args.user_id,
'project_id': args.project_id,
'source': args.source}
meters = cc.meters.list(**fields)
field_labels = ['Name', 'Type', 'Resource ID', 'User ID', 'Project ID']
fields = ['name', 'type', 'resource_id',
'user_id', 'project_id']
utils.print_list(meters, fields, field_labels,
sortby=0)
@utils.arg('-s', '--source', metavar='<SOURCE>',
help='ID of the resource to show projects for.')
def do_user_list(cc, args={}):
'''List the users.'''
kwargs = {'source': args.source}
users = cc.users.list(**kwargs)
field_labels = ['User ID']
fields = ['user_id']
utils.print_list(users, fields, field_labels,
sortby=0)
@utils.arg('-s', '--source', metavar='<SOURCE>',
help='ID of the resource to show for.')
@utils.arg('-u', '--user_id', metavar='<USER_ID>',
help='ID of the user to show resources for.')
@utils.arg('-p', '--project_id', metavar='<PROJECT_ID>',
help='ID of the project to show samples for.')
@utils.arg('-m', '--metaquery', metavar='<METAQUERY>',
help='Query into the metadata metadata.key=value:..')
@utils.arg('--start', metavar='<START_TIMESTAMP>',
help='ISO date in UTC which limits resouces by '
'last update time >= this value')
@utils.arg('--end', metavar='<END_TIMESTAMP>',
help='ISO date in UTC which limits resouces by '
'last update time <= this value')
def do_resource_list(cc, args={}):
'''List the resources.'''
kwargs = {'source': args.source,
'user_id': args.user_id,
'project_id': args.project_id,
'start_timestamp': args.start,
'end_timestamp': args.end,
'metaquery': args.metaquery}
resources = cc.resources.list(**kwargs)
field_labels = ['Resource ID', 'Source', 'User ID', 'Project ID']
fields = ['resource_id', 'source', 'user_id', 'project_id']
utils.print_list(resources, fields, field_labels,
sortby=1)
@utils.arg('-s', '--source', metavar='<SOURCE>',
help='ID of the resource to show projects for.')
def do_project_list(cc, args={}):
'''List the projects.'''
kwargs = {'source': args.source}
projects = cc.projects.list(**kwargs)
field_labels = ['Project ID']
fields = ['project_id']
utils.print_list(projects, fields, field_labels,
sortby=0)
|
citrix-openstack-build/python-ceilometerclient | ceilometerclient/v2/samples.py | <gh_stars>0
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from ceilometerclient.common import base
from ceilometerclient.v2 import options
CREATION_ATTRIBUTES = ('source',
'counter_name',
'counter_type',
'counter_unit',
'counter_volume',
'user_id',
'project_id',
'resource_id',
'timestamp',
'resource_metadata')
class Sample(base.Resource):
def __repr__(self):
return "<Sample %s>" % self._info
class SampleManager(base.Manager):
resource_class = Sample
@staticmethod
def _path(counter_name=None):
return '/v2/meters/%s' % counter_name if counter_name else '/v2/meters'
def list(self, meter_name=None, q=None):
path = self._path(counter_name=meter_name)
return self._list(options.build_url(path, q))
def create(self, **kwargs):
new = dict((key, value) for (key, value) in kwargs.items()
if key in CREATION_ATTRIBUTES)
url = self._path(counter_name=kwargs['counter_name'])
resp, body = self.api.json_request('POST',
url,
body=[new])
if body:
return [Sample(self, b) for b in body]
|
citrix-openstack-build/python-ceilometerclient | ceilometerclient/tests/v2/test_alarms.py | # -*- encoding: utf-8 -*-
#
# Copyright © 2013 Red Hat, Inc
#
# Author: <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import testtools
from ceilometerclient.tests import utils
from ceilometerclient.v2 import alarms
AN_ALARM = {u'alarm_actions': [u'http://site:8000/alarm'],
u'ok_actions': [u'http://site:8000/ok'],
u'description': u'An alarm',
u'type': u'threshold',
u'threshold_rule': {
u'meter_name': u'storage.objects',
u'query': [{u'field': u'key_name',
u'op': u'eq',
u'value': u'key_value'}],
u'evaluation_periods': 2,
u'period': 240.0,
u'statistic': u'avg',
u'threshold': 200.0,
u'comparison_operator': 'gt',
},
u'timestamp': u'2013-05-09T13:41:23.085000',
u'enabled': True,
u'alarm_id': u'alarm-id',
u'state': u'ok',
u'insufficient_data_actions': [u'http://site:8000/nodata'],
u'user_id': u'user-id',
u'project_id': u'project-id',
u'state_timestamp': u'2013-05-09T13:41:23.085000',
u'repeat_actions': False,
u'name': 'SwiftObjectAlarm'}
CREATE_ALARM = copy.deepcopy(AN_ALARM)
del CREATE_ALARM['timestamp']
del CREATE_ALARM['state_timestamp']
del CREATE_ALARM['alarm_id']
DELTA_ALARM = {u'alarm_actions': ['url1', 'url2']}
DELTA_ALARM_RULE = {u'comparison_operator': u'lt',
u'threshold': 42.1,
u'meter_name': u'foobar',
u'query': [{u'field': u'key_name',
u'op': u'eq',
u'value': u'key_value'}]}
UPDATED_ALARM = copy.deepcopy(AN_ALARM)
UPDATED_ALARM.update(DELTA_ALARM)
UPDATED_ALARM['threshold_rule'].update(DELTA_ALARM_RULE)
DELTA_ALARM['threshold_rule'] = DELTA_ALARM_RULE
UPDATE_ALARM = copy.deepcopy(UPDATED_ALARM)
del UPDATE_ALARM['user_id']
del UPDATE_ALARM['project_id']
del UPDATE_ALARM['name']
del UPDATE_ALARM['alarm_id']
del UPDATE_ALARM['timestamp']
del UPDATE_ALARM['state_timestamp']
AN_LEGACY_ALARM = {u'alarm_actions': [u'http://site:8000/alarm'],
u'ok_actions': [u'http://site:8000/ok'],
u'description': u'An alarm',
u'matching_metadata': {u'key_name': u'key_value'},
u'evaluation_periods': 2,
u'timestamp': u'2013-05-09T13:41:23.085000',
u'enabled': True,
u'meter_name': u'storage.objects',
u'period': 240.0,
u'alarm_id': u'alarm-id',
u'state': u'ok',
u'insufficient_data_actions': [u'http://site:8000/nodata'],
u'statistic': u'avg',
u'threshold': 200.0,
u'user_id': u'user-id',
u'project_id': u'project-id',
u'state_timestamp': u'2013-05-09T13:41:23.085000',
u'comparison_operator': 'gt',
u'repeat_actions': False,
u'name': 'SwiftObjectAlarm'}
CREATE_LEGACY_ALARM = copy.deepcopy(AN_LEGACY_ALARM)
del CREATE_LEGACY_ALARM['timestamp']
del CREATE_LEGACY_ALARM['state_timestamp']
del CREATE_LEGACY_ALARM['alarm_id']
DELTA_LEGACY_ALARM = {u'alarm_actions': ['url1', 'url2'],
u'comparison_operator': u'lt',
u'meter_name': u'foobar',
u'threshold': 42.1}
UPDATED_LEGACY_ALARM = copy.deepcopy(AN_LEGACY_ALARM)
UPDATED_LEGACY_ALARM.update(DELTA_LEGACY_ALARM)
UPDATE_LEGACY_ALARM = copy.deepcopy(UPDATED_LEGACY_ALARM)
del UPDATE_LEGACY_ALARM['user_id']
del UPDATE_LEGACY_ALARM['project_id']
del UPDATE_LEGACY_ALARM['name']
del UPDATE_LEGACY_ALARM['alarm_id']
del UPDATE_LEGACY_ALARM['timestamp']
del UPDATE_LEGACY_ALARM['state_timestamp']
FULL_DETAIL = ('{"alarm_actions": [], '
'"user_id": "8185aa72421a4fd396d4122cba50e1b5", '
'"name": "scombo", '
'"timestamp": "2013-10-03T08:58:33.647912", '
'"enabled": true, '
'"state_timestamp": "2013-10-03T08:58:33.647912", '
'"rule": {"operator": "or", "alarm_ids": '
'["062cc907-3a9f-4867-ab3b-fa83212b39f7"]}, '
'"alarm_id": "alarm-id, '
'"state": "insufficient data", '
'"insufficient_data_actions": [], '
'"repeat_actions": false, '
'"ok_actions": [], '
'"project_id": "57d04f24d0824b78b1ea9bcecedbda8f", '
'"type": "combination", '
'"description": "Combined state of alarms '
'062cc907-3a9f-4867-ab3b-fa83212b39f7"}')
ALARM_HISTORY = [{'on_behalf_of': '57d04f24d0824b78b1ea9bcecedbda8f',
'user_id': '8185aa72421a4fd396d4122cba50e1b5',
'event_id': 'c74a8611-6553-4764-a860-c15a6aabb5d0',
'timestamp': '2013-10-03T08:59:28.326000',
'detail': '{"state": "alarm"}',
'alarm_id': 'alarm-id',
'project_id': '57d04f24d0824b78b1ea9bcecedbda8f',
'type': 'state transition'},
{'on_behalf_of': '57d04f24d0824b78b1ea9bcecedbda8f',
'user_id': '8185aa72421a4fd396d4122cba50e1b5',
'event_id': 'c74a8611-6553-4764-a860-c15a6aabb5d0',
'timestamp': '2013-10-03T08:59:28.326000',
'detail': '{"description": "combination of one"}',
'alarm_id': 'alarm-id',
'project_id': '57d04f24d0824b78b1ea9bcecedbda8f',
'type': 'rule change'},
{'on_behalf_of': '57d04f24d0824b78b1ea9bcecedbda8f',
'user_id': '8185aa72421a4fd396d4122cba50e1b5',
'event_id': '4fd7df9e-190d-4471-8884-dc5a33d5d4bb',
'timestamp': '2013-10-03T08:58:33.647000',
'detail': FULL_DETAIL,
'alarm_id': 'alarm-id',
'project_id': '57d04f24d0824b78b1ea9bcecedbda8f',
'type': 'creation'}]
fixtures = {
'/v2/alarms':
{
'GET': (
{},
[AN_ALARM],
),
'POST': (
{},
CREATE_ALARM,
),
},
'/v2/alarms/alarm-id':
{
'GET': (
{},
AN_ALARM,
),
'PUT': (
{},
UPDATED_ALARM,
),
},
'/v2/alarms/alarm-id/state':
{
'PUT': (
{},
'alarm'
),
'GET': (
{},
'alarm'
),
},
'/v2/alarms?q.op=&q.op=&q.value=project-id&q.value=SwiftObjectAlarm'
'&q.field=project_id&q.field=name':
{
'GET': (
{},
[AN_ALARM],
),
},
'/v2/alarms/victim-id':
{
'DELETE': (
{},
None,
),
},
'/v2/alarms/alarm-id/history':
{
'GET': (
{},
ALARM_HISTORY,
),
},
'/v2/alarms/alarm-id/history?q.op=&q.value=NOW&q.field=timestamp':
{
'GET': (
{},
ALARM_HISTORY,
),
},
}
class AlarmManagerTest(testtools.TestCase):
def setUp(self):
super(AlarmManagerTest, self).setUp()
self.api = utils.FakeAPI(fixtures)
self.mgr = alarms.AlarmManager(self.api)
def test_list_all(self):
alarms = list(self.mgr.list())
expect = [
('GET', '/v2/alarms', {}, None),
]
self.assertEqual(self.api.calls, expect)
self.assertEqual(len(alarms), 1)
self.assertEqual(alarms[0].alarm_id, 'alarm-id')
def test_list_with_query(self):
alarms = list(self.mgr.list(q=[
{"field": "project_id",
"value": "project-id"},
{"field": "name",
"value": "SwiftObjectAlarm"},
]))
expect = [
('GET',
'/v2/alarms?q.op=&q.op=&q.value=project-id&q.value='
'SwiftObjectAlarm&q.field=project_id&q.field=name',
{}, None),
]
self.assertEqual(self.api.calls, expect)
self.assertEqual(len(alarms), 1)
self.assertEqual(alarms[0].alarm_id, 'alarm-id')
def test_get(self):
alarm = self.mgr.get(alarm_id='alarm-id')
expect = [
('GET', '/v2/alarms/alarm-id', {}, None),
]
self.assertEqual(self.api.calls, expect)
self.assertTrue(alarm)
self.assertEqual(alarm.alarm_id, 'alarm-id')
self.assertEqual(alarm.rule, alarm.threshold_rule)
def test_create(self):
alarm = self.mgr.create(**CREATE_ALARM)
expect = [
('POST', '/v2/alarms', {}, CREATE_ALARM),
]
self.assertEqual(self.api.calls, expect)
self.assertTrue(alarm)
def test_update(self):
alarm = self.mgr.update(alarm_id='alarm-id', **UPDATE_ALARM)
expect = [
('GET', '/v2/alarms/alarm-id', {}, None),
('PUT', '/v2/alarms/alarm-id', {}, UPDATED_ALARM),
]
self.assertEqual(self.api.calls, expect)
self.assertTrue(alarm)
self.assertEqual(alarm.alarm_id, 'alarm-id')
for (key, value) in UPDATED_ALARM.iteritems():
self.assertEqual(getattr(alarm, key), value)
def test_update_delta(self):
alarm = self.mgr.update(alarm_id='alarm-id', **DELTA_ALARM)
expect = [
('GET', '/v2/alarms/alarm-id', {}, None),
('PUT', '/v2/alarms/alarm-id', {}, UPDATED_ALARM),
]
self.assertEqual(self.api.calls, expect)
self.assertTrue(alarm)
self.assertEqual(alarm.alarm_id, 'alarm-id')
for (key, value) in UPDATED_ALARM.iteritems():
self.assertEqual(getattr(alarm, key), value)
def test_set_state(self):
state = self.mgr.set_state(alarm_id='alarm-id', state='alarm')
expect = [
('PUT', '/v2/alarms/alarm-id/state', {}, 'alarm'),
]
self.assertEqual(self.api.calls, expect)
self.assertEqual(state, 'alarm')
def test_get_state(self):
state = self.mgr.get_state(alarm_id='alarm-id')
expect = [
('GET', '/v2/alarms/alarm-id/state', {}, None),
]
self.assertEqual(self.api.calls, expect)
self.assertEqual(state, 'alarm')
def test_delete(self):
deleted = self.mgr.delete(alarm_id='victim-id')
expect = [
('DELETE', '/v2/alarms/victim-id', {}, None),
]
self.assertEqual(self.api.calls, expect)
self.assertTrue(deleted is None)
def _do_test_get_history(self, q, url):
history = self.mgr.get_history(q=q, alarm_id='alarm-id')
expect = [('GET', url, {}, None)]
self.assertEqual(self.api.calls, expect)
for i in xrange(len(history)):
change = history[i]
self.assertTrue(isinstance(change, alarms.AlarmChange))
for k, v in ALARM_HISTORY[i].iteritems():
self.assertEqual(getattr(change, k), v)
def test_get_all_history(self):
url = '/v2/alarms/alarm-id/history'
self._do_test_get_history(None, url)
def test_get_constrained_history(self):
q = [dict(field='timestamp', value='NOW')]
url = ('/v2/alarms/alarm-id/history'
'?q.op=&q.value=NOW&q.field=timestamp')
self._do_test_get_history(q, url)
class AlarmLegacyManagerTest(testtools.TestCase):
def setUp(self):
super(AlarmLegacyManagerTest, self).setUp()
self.api = utils.FakeAPI(fixtures)
self.mgr = alarms.AlarmManager(self.api)
def test_create(self):
alarm = self.mgr.create(**CREATE_LEGACY_ALARM)
expect = [
('POST', '/v2/alarms', {}, CREATE_ALARM),
]
self.assertEqual(self.api.calls, expect)
self.assertTrue(alarm)
def test_create_counter_name(self):
create = {}
create.update(CREATE_LEGACY_ALARM)
create['counter_name'] = CREATE_LEGACY_ALARM['meter_name']
del create['meter_name']
alarm = self.mgr.create(**create)
expect = [
('POST', '/v2/alarms', {}, CREATE_ALARM),
]
self.assertEqual(self.api.calls, expect)
self.assertTrue(alarm)
def test_update(self):
alarm = self.mgr.update(alarm_id='alarm-id', **DELTA_LEGACY_ALARM)
expect = [
('GET', '/v2/alarms/alarm-id', {}, None),
('PUT', '/v2/alarms/alarm-id', {}, UPDATED_ALARM),
]
self.assertEqual(self.api.calls, expect)
self.assertTrue(alarm)
self.assertEqual(alarm.alarm_id, 'alarm-id')
for (key, value) in UPDATED_ALARM.iteritems():
self.assertEqual(getattr(alarm, key), value)
def test_update_counter_name(self):
updated = {}
updated.update(UPDATE_LEGACY_ALARM)
updated['counter_name'] = UPDATED_LEGACY_ALARM['meter_name']
del updated['meter_name']
alarm = self.mgr.update(alarm_id='alarm-id', **updated)
expect = [
('GET', '/v2/alarms/alarm-id', {}, None),
('PUT', '/v2/alarms/alarm-id', {}, UPDATED_ALARM),
]
self.assertEqual(self.api.calls, expect)
self.assertTrue(alarm)
self.assertEqual(alarm.alarm_id, 'alarm-id')
for (key, value) in UPDATED_ALARM.iteritems():
self.assertEqual(getattr(alarm, key), value)
|
citrix-openstack-build/python-ceilometerclient | ceilometerclient/tests/v2/test_shell.py | <reponame>citrix-openstack-build/python-ceilometerclient
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import cStringIO
import mock
import re
import sys
from testtools import matchers
from ceilometerclient.tests import utils
from ceilometerclient.v2 import alarms
from ceilometerclient.v2 import shell as ceilometer_shell
class ShellAlarmStateCommandsTest(utils.BaseTestCase):
ALARM_ID = 'foobar'
def setUp(self):
super(ShellAlarmStateCommandsTest, self).setUp()
self.cc = mock.Mock()
self.cc.alarms = mock.Mock()
self.args = mock.Mock()
self.args.alarm_id = self.ALARM_ID
def test_alarm_state_get(self):
ceilometer_shell.do_alarm_state_get(self.cc, self.args)
self.cc.alarms.get_state.assert_called_once_with(self.ALARM_ID)
self.assertFalse(self.cc.alarms.set_state.called)
def test_alarm_state_set(self):
self.args.state = 'ok'
ceilometer_shell.do_alarm_state_set(self.cc, self.args)
self.cc.alarms.set_state.assert_called_once_with(self.ALARM_ID, 'ok')
self.assertFalse(self.cc.alarms.get_state.called)
class ShellAlarmHistoryCommandTest(utils.BaseTestCase):
ALARM_ID = '768ff714-8cfb-4db9-9753-d484cb33a1cc'
FULL_DETAIL = ('{"alarm_actions": [], '
'"user_id": "8185aa72421a4fd396d4122cba50e1b5", '
'"name": "scombo", '
'"timestamp": "2013-10-03T08:58:33.647912", '
'"enabled": true, '
'"state_timestamp": "2013-10-03T08:58:33.647912", '
'"rule": {"operator": "or", "alarm_ids": '
'["062cc907-3a9f-4867-ab3b-fa83212b39f7"]}, '
'"alarm_id": "768ff714-8cfb-4db9-9753-d484cb33a1cc", '
'"state": "insufficient data", '
'"insufficient_data_actions": [], '
'"repeat_actions": false, '
'"ok_actions": [], '
'"project_id": "57d04f24d0824b78b1ea9bcecedbda8f", '
'"type": "combination", '
'"description": "Combined state of alarms '
'062cc907-3a9f-4867-ab3b-fa83212b39f7"}')
ALARM_HISTORY = [{'on_behalf_of': '57d04f24d0824b78b1ea9bcecedbda8f',
'user_id': '8185aa72421a4fd396d4122cba50e1b5',
'event_id': 'c74a8611-6553-4764-a860-c15a6aabb5d0',
'timestamp': '2013-10-03T08:59:28.326000',
'detail': '{"state": "alarm"}',
'alarm_id': '768ff714-8cfb-4db9-9753-d484cb33a1cc',
'project_id': '57d04f24d0824b78b1ea9bcecedbda8f',
'type': 'state transition'},
{'on_behalf_of': '57d04f24d0824b78b1ea9bcecedbda8f',
'user_id': '8185aa72421a4fd396d4122cba50e1b5',
'event_id': 'c74a8611-6553-4764-a860-c15a6aabb5d0',
'timestamp': '2013-10-03T08:59:28.326000',
'detail': '{"description": "combination of one"}',
'alarm_id': '768ff714-8cfb-4db9-9753-d484cb33a1cc',
'project_id': '57d04f24d0824b78b1ea9bcecedbda8f',
'type': 'rule change'},
{'on_behalf_of': '57d04f24d0824b78b1ea9bcecedbda8f',
'user_id': '8185aa72421a4fd396d4122cba50e1b5',
'event_id': '4fd7df9e-190d-4471-8884-dc5a33d5d4bb',
'timestamp': '2013-10-03T08:58:33.647000',
'detail': FULL_DETAIL,
'alarm_id': '768ff714-8cfb-4db9-9753-d484cb33a1cc',
'project_id': '57d04f24d0824b78b1ea9bcecedbda8f',
'type': 'creation'}]
TIMESTAMP_RE = (' +\| (\d{4})-(\d{2})-(\d{2})T'
'(\d{2})\:(\d{2})\:(\d{2})\.(\d{6}) \| +')
def setUp(self):
super(ShellAlarmHistoryCommandTest, self).setUp()
self.cc = mock.Mock()
self.cc.alarms = mock.Mock()
self.args = mock.Mock()
self.args.alarm_id = self.ALARM_ID
def _do_test_alarm_history(self, raw_query=None, parsed_query=None):
self.args.query = raw_query
orig = sys.stdout
sys.stdout = cStringIO.StringIO()
history = [alarms.AlarmChange(mock.Mock(), change)
for change in self.ALARM_HISTORY]
self.cc.alarms.get_history.return_value = history
try:
ceilometer_shell.do_alarm_history(self.cc, self.args)
self.cc.alarms.get_history.assert_called_once_with(
q=parsed_query,
alarm_id=self.ALARM_ID
)
out = sys.stdout.getvalue()
required = [
'.*creation%sname: scombo.*' % self.TIMESTAMP_RE,
'.*rule change%sdescription: combination of one.*' %
self.TIMESTAMP_RE,
'.*state transition%sstate: alarm.*' % self.TIMESTAMP_RE,
]
for r in required:
self.assertThat(out, matchers.MatchesRegex(r, re.DOTALL))
finally:
sys.stdout.close()
sys.stdout = orig
def test_alarm_all_history(self):
self._do_test_alarm_history()
def test_alarm_constrained_history(self):
parsed_query = [dict(field='timestamp',
value='2013-10-03T08:59:28',
op='gt')]
self._do_test_alarm_history(raw_query='timestamp>2013-10-03T08:59:28',
parsed_query=parsed_query)
|
citrix-openstack-build/python-ceilometerclient | ceilometerclient/tests/v2/test_statistics.py | <reponame>citrix-openstack-build/python-ceilometerclient
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from ceilometerclient.tests import utils
import ceilometerclient.v2.statistics
base_url = '/v2/meters/instance/statistics'
qry = 'q.op=&q.op=&q.value=foo&q.value=bar&q.field=resource_id&q.field=source'
period = '&period=60'
samples = [{
u'count': 135,
u'duration_start': u'2013-02-04T10:51:42',
u'min': 1.0,
u'max': 1.0,
u'duration_end':
u'2013-02-05T15:46:09',
u'duration': 1734.0,
u'avg': 1.0,
u'sum': 135.0,
}]
fixtures = {
base_url:
{
'GET': (
{},
samples
),
},
'%s?%s' % (base_url, qry):
{
'GET': (
{},
samples
),
},
'%s?%s%s' % (base_url, qry, period):
{
'GET': (
{},
samples
),
},
}
class StatisticsManagerTest(utils.BaseTestCase):
def setUp(self):
super(StatisticsManagerTest, self).setUp()
self.api = utils.FakeAPI(fixtures)
self.mgr = ceilometerclient.v2.statistics.StatisticsManager(self.api)
def test_list_by_meter_name(self):
stats = list(self.mgr.list(meter_name='instance'))
expect = [
('GET', '/v2/meters/instance/statistics', {}, None),
]
self.assertEqual(self.api.calls, expect)
self.assertEqual(len(stats), 1)
self.assertEqual(stats[0].count, 135)
def test_list_by_meter_name_extended(self):
stats = list(self.mgr.list(meter_name='instance',
q=[
{"field": "resource_id",
"value": "foo"},
{"field": "source",
"value": "bar"},
]))
expect = [
('GET',
'%s?%s' % (base_url, qry), {}, None),
]
self.assertEqual(self.api.calls, expect)
self.assertEqual(len(stats), 1)
self.assertEqual(stats[0].count, 135)
def test_list_by_meter_name_with_period(self):
stats = list(self.mgr.list(meter_name='instance',
q=[
{"field": "resource_id",
"value": "foo"},
{"field": "source",
"value": "bar"},
],
period=60))
expect = [
('GET',
'%s?%s%s' % (base_url, qry, period), {}, None),
]
self.assertEqual(self.api.calls, expect)
self.assertEqual(len(stats), 1)
self.assertEqual(stats[0].count, 135)
|
citrix-openstack-build/python-ceilometerclient | ceilometerclient/v2/shell.py | <filename>ceilometerclient/v2/shell.py
# -*- encoding: utf-8 -*-
#
# Copyright © 2013 Red Hat, Inc
#
# Author: <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import json
from ceilometerclient.common import utils
from ceilometerclient import exc
from ceilometerclient.v2 import options
ALARM_STATES = ['ok', 'alarm', 'insufficient_data']
ALARM_OPERATORS = ['lt', 'le', 'eq', 'ne', 'ge', 'gt']
ALARM_COMBINATION_OPERATORS = ['and', 'or']
STATISTICS = ['max', 'min', 'avg', 'sum', 'count']
OPERATORS_STRING = dict(gt='>', ge='>=',
lt='<', le="<=",
eq='==', ne='!=')
@utils.arg('-q', '--query', metavar='<QUERY>',
help='key[op]value; list.')
@utils.arg('-m', '--meter', metavar='<NAME>', required=True,
help='Name of meter to show samples for.')
@utils.arg('-p', '--period', metavar='<PERIOD>',
help='Period in seconds over which to group samples.')
def do_statistics(cc, args):
'''List the statistics for this meter.'''
fields = {'meter_name': args.meter,
'q': options.cli_to_array(args.query),
'period': args.period}
try:
statistics = cc.statistics.list(**fields)
except exc.HTTPNotFound:
raise exc.CommandError('Samples not found: %s' % args.meter)
else:
field_labels = ['Period', 'Period Start', 'Period End',
'Count', 'Min', 'Max', 'Sum', 'Avg',
'Duration', 'Duration Start', 'Duration End']
fields = ['period', 'period_start', 'period_end',
'count', 'min', 'max', 'sum', 'avg',
'duration', 'duration_start', 'duration_end']
utils.print_list(statistics, fields, field_labels)
@utils.arg('-q', '--query', metavar='<QUERY>',
help='key[op]value; list.')
@utils.arg('-m', '--meter', metavar='<NAME>', required=True,
help='Name of meter to show samples for.')
def do_sample_list(cc, args):
'''List the samples for this meters.'''
fields = {'meter_name': args.meter,
'q': options.cli_to_array(args.query)}
try:
samples = cc.samples.list(**fields)
except exc.HTTPNotFound:
raise exc.CommandError('Samples not found: %s' % args.meter)
else:
field_labels = ['Resource ID', 'Name', 'Type', 'Volume', 'Unit',
'Timestamp']
fields = ['resource_id', 'counter_name', 'counter_type',
'counter_volume', 'counter_unit', 'timestamp']
utils.print_list(samples, fields, field_labels,
sortby=0)
@utils.arg('--project-id', metavar='<PROJECT_ID>',
help='Tenant to associate with sample '
'(only settable by admin users)')
@utils.arg('--user-id', metavar='<USER_ID>',
help='User to associate with sample '
'(only settable by admin users)')
@utils.arg('-r', '--resource-id', metavar='<RESOURCE_ID>', required=True,
help='ID of the resource.')
@utils.arg('-m', '--meter-name', metavar='<METER_NAME>',
help='the meter name')
@utils.arg('--meter-type', metavar='<METER_TYPE>', required=True,
help='the meter type')
@utils.arg('--meter-unit', metavar='<METER_UNIT>', required=True,
help='the meter unit')
@utils.arg('--sample-volume', metavar='<SAMPLE_VOLUME>', required=True,
help='The sample volume')
@utils.arg('--resource-metadata', metavar='<RESOURCE_METADATA>',
help='resource metadata')
def do_sample_create(cc, args={}):
'''Create a sample.'''
arg_to_field_mapping = {'meter_name': 'counter_name',
'meter_unit': 'counter_unit',
'meter_type': 'counter_type',
'sample_volume': 'counter_volume'}
fields = {}
for var in vars(args).items():
k, v = var[0], var[1]
if v is not None:
if k == 'resource_metadata':
fields[k] = json.loads(v)
else:
fields[arg_to_field_mapping.get(k, k)] = v
cc.samples.create(**fields)
@utils.arg('-q', '--query', metavar='<QUERY>',
help='key[op]value; list.')
def do_meter_list(cc, args={}):
'''List the user's meters.'''
meters = cc.meters.list(q=options.cli_to_array(args.query))
field_labels = ['Name', 'Type', 'Unit', 'Resource ID', 'User ID',
'Project ID']
fields = ['name', 'type', 'unit', 'resource_id', 'user_id',
'project_id']
utils.print_list(meters, fields, field_labels,
sortby=0)
def _display_rule(type, rule):
if type == 'threshold':
return ('%(meter_name)s %(comparison_operator)s '
'%(threshold)s during %(evaluation_periods)s x %(period)ss' %
{
'meter_name': rule['meter_name'],
'threshold': rule['threshold'],
'evaluation_periods': rule['evaluation_periods'],
'period': rule['period'],
'comparison_operator': OPERATORS_STRING.get(
rule['comparison_operator'])
})
elif type == 'combination':
return ('combinated states (%(operator)s) of %(alarms)s' % {
'operator': rule['operator'].upper(),
'alarms': ", ".join(rule['alarm_ids'])})
else:
# just dump all
return "\n".join(["%s: %s" % (f, v)
for f, v in rule.iteritems()])
def alarm_rule_formatter(alarm):
return _display_rule(alarm.type, alarm.rule)
def _infer_type(detail):
if 'type' in detail:
return detail['type']
elif 'meter_name' in detail['rule']:
return 'threshold'
elif 'alarms' in detail['rule']:
return 'combination'
else:
return 'unknown'
def alarm_change_detail_formatter(change):
detail = json.loads(change.detail)
fields = []
if change.type == 'state transition':
fields.append('state: %s' % detail['state'])
elif change.type == 'creation' or change.type == 'deletion':
for k in ['name', 'description', 'type', 'rule']:
if k == 'rule':
fields.append('rule: %s' % _display_rule(detail['type'],
detail[k]))
else:
fields.append('%s: %s' % (k, detail[k]))
elif change.type == 'rule change':
for k, v in detail.iteritems():
if k == 'rule':
fields.append('rule: %s' % _display_rule(_infer_type(detail),
v))
else:
fields.append('%s: %s' % (k, v))
return '\n'.join(fields)
@utils.arg('-q', '--query', metavar='<QUERY>',
help='key[op]value; list.')
def do_alarm_list(cc, args={}):
'''List the user's alarms.'''
alarms = cc.alarms.list(q=options.cli_to_array(args.query))
# omit action initially to keep output width sane
# (can switch over to vertical formatting when available from CLIFF)
field_labels = ['Alarm ID', 'Name', 'State', 'Enabled', 'Continuous',
'Alarm condition']
fields = ['alarm_id', 'name', 'state', 'enabled', 'repeat_actions',
'rule']
utils.print_list(alarms, fields, field_labels,
formatters={'rule': alarm_rule_formatter}, sortby=0)
def alarm_query_formater(alarm):
qs = []
for q in alarm.rule['query']:
qs.append('%s %s %s' % (
q['field'], OPERATORS_STRING.get(q['op']), q['value']))
return r' AND\n'.join(qs)
def _display_alarm(alarm):
fields = ['name', 'description', 'type',
'state', 'enabled', 'alarm_id', 'user_id', 'project_id',
'alarm_actions', 'ok_actions', 'insufficient_data_actions',
'repeat_actions']
data = dict([(f, getattr(alarm, f, '')) for f in fields])
data.update(alarm.rule)
if alarm.type == 'threshold':
data['query'] = alarm_query_formater(alarm)
utils.print_dict(data, wrap=72)
@utils.arg('-a', '--alarm_id', metavar='<ALARM_ID>', required=True,
help='ID of the alarm to show.')
def do_alarm_show(cc, args={}):
'''Show an alarm.'''
try:
alarm = cc.alarms.get(args.alarm_id)
except exc.HTTPNotFound:
raise exc.CommandError('Alarm not found: %s' % args.alarm_id)
else:
_display_alarm(alarm)
def common_alarm_arguments(create=False):
def _wrapper(func):
@utils.arg('--name', metavar='<NAME>', required=create,
help='Name of the alarm (must be unique per tenant)')
@utils.arg('--project-id', metavar='<PROJECT_ID>',
help='Tenant to associate with alarm '
'(only settable by admin users)')
@utils.arg('--user-id', metavar='<USER_ID>',
help='User to associate with alarm '
'(only settable by admin users)')
@utils.arg('--description', metavar='<DESCRIPTION>',
help='Free text description of the alarm')
@utils.arg('--state', metavar='<STATE>',
help='State of the alarm, one of: ' + str(ALARM_STATES))
@utils.arg('--enabled', type=utils.string_to_bool,
metavar='{True|False}',
help='True if alarm evaluation/actioning is enabled')
@utils.arg('--alarm-action', dest='alarm_actions',
metavar='<Webhook URL>', action='append', default=None,
help=('URL to invoke when state transitions to alarm. '
'May be used multiple times.'))
@utils.arg('--ok-action', dest='ok_actions',
metavar='<Webhook URL>', action='append', default=None,
help=('URL to invoke when state transitions to OK. '
'May be used multiple times.'))
@utils.arg('--insufficient-data-action',
dest='insufficient_data_actions',
metavar='<Webhook URL>', action='append', default=None,
help=('URL to invoke when state transitions to unkown. '
'May be used multiple times.'))
@utils.arg('--repeat-actions', dest='repeat_actions',
metavar='{True|False}', type=utils.string_to_bool,
default=False,
help=('True if actions should be repeatedly notified '
'while alarm remains in target state'))
@functools.wraps(func)
def _wrapped(*args, **kwargs):
return func(*args, **kwargs)
return _wrapped
return _wrapper
@common_alarm_arguments(create=True)
@utils.arg('--period', type=int, metavar='<PERIOD>',
help='Length of each period (seconds) to evaluate over')
@utils.arg('--evaluation-periods', type=int, metavar='<COUNT>',
help='Number of periods to evaluate over')
@utils.arg('--meter-name', metavar='<METRIC>', required=True,
help='Metric to evaluate against')
@utils.arg('--statistic', metavar='<STATISTIC>',
help='Statistic to evaluate, one of: ' + str(STATISTICS))
@utils.arg('--comparison-operator', metavar='<OPERATOR>',
help='Operator to compare with, one of: ' + str(ALARM_OPERATORS))
@utils.arg('--threshold', type=float, metavar='<THRESHOLD>', required=True,
help='Threshold to evaluate against')
@utils.arg('--matching-metadata', dest='matching_metadata',
metavar='<Matching Metadata>', action='append', default=None,
help=('A meter should match this resource metadata (key=value) '
'additionally to the meter_name'))
def do_alarm_create(cc, args={}):
'''Create a new alarm (Deprecated).'''
fields = dict(filter(lambda x: not (x[1] is None), vars(args).items()))
fields = utils.args_array_to_dict(fields, "matching_metadata")
alarm = cc.alarms.create(**fields)
_display_alarm(alarm)
@common_alarm_arguments(create=True)
@utils.arg('--meter-name', metavar='<METRIC>', required=True,
dest='threshold_rule/meter_name',
help='Metric to evaluate against')
@utils.arg('--period', type=int, metavar='<PERIOD>',
dest='threshold_rule/period',
help='Length of each period (seconds) to evaluate over')
@utils.arg('--evaluation-periods', type=int, metavar='<COUNT>',
dest='threshold_rule/evaluation_periods',
help='Number of periods to evaluate over')
@utils.arg('--statistic', metavar='<STATISTIC>',
dest='threshold_rule/statistic',
help='Statistic to evaluate, one of: ' + str(STATISTICS))
@utils.arg('--comparison-operator', metavar='<OPERATOR>',
dest='threshold_rule/comparison_operator',
help='Operator to compare with, one of: ' + str(ALARM_OPERATORS))
@utils.arg('--threshold', type=float, metavar='<THRESHOLD>', required=True,
dest='threshold_rule/threshold',
help='Threshold to evaluate against')
@utils.arg('-q', '--query', metavar='<QUERY>',
dest='threshold_rule/query',
help='The query to find the data for computing statistics '
'(key[op]value; list.)')
def do_alarm_threshold_create(cc, args={}):
'''Create a new alarm based on computed statistics.'''
fields = dict(filter(lambda x: not (x[1] is None), vars(args).items()))
fields = utils.key_with_slash_to_nested_dict(fields)
fields['type'] = 'threshold'
if 'query' in fields['threshold_rule']:
fields['threshold_rule']['query'] = options.cli_to_array(
fields['threshold_rule']['query'])
alarm = cc.alarms.create(**fields)
_display_alarm(alarm)
@common_alarm_arguments(create=True)
@utils.arg('--alarm_ids', action='append', metavar='<ALARM IDS>',
required=True, dest='combination_rule/alarm_ids',
help='List of alarm id')
@utils.arg('--operator', metavar='<OPERATOR>',
dest='combination_rule/operator',
help='Operator to compare with, one of: ' + str(
ALARM_COMBINATION_OPERATORS))
def do_alarm_combination_create(cc, args={}):
'''Create a new alarm based on state of other alarms.'''
fields = dict(filter(lambda x: not (x[1] is None), vars(args).items()))
fields = utils.key_with_slash_to_nested_dict(fields)
fields['type'] = 'combination'
alarm = cc.alarms.create(**fields)
_display_alarm(alarm)
@utils.arg('-a', '--alarm_id', metavar='<ALARM_ID>', required=True,
help='ID of the alarm to update.')
@common_alarm_arguments()
@utils.arg('--period', type=int, metavar='<PERIOD>',
help='Length of each period (seconds) to evaluate over')
@utils.arg('--evaluation-periods', type=int, metavar='<COUNT>',
help='Number of periods to evaluate over')
@utils.arg('--meter-name', metavar='<METRIC>',
help='Metric to evaluate against')
@utils.arg('--statistic', metavar='<STATISTIC>',
help='Statistic to evaluate, one of: ' + str(STATISTICS))
@utils.arg('--comparison-operator', metavar='<OPERATOR>',
help='Operator to compare with, one of: ' + str(ALARM_OPERATORS))
@utils.arg('--threshold', type=float, metavar='<THRESHOLD>',
help='Threshold to evaluate against')
@utils.arg('--matching-metadata', dest='matching_metadata',
metavar='<Matching Metadata>', action='append', default=None,
help=('A meter should match this resource metadata (key=value) '
'additionally to the meter_name'))
def do_alarm_update(cc, args={}):
'''Update an existing alarm.'''
fields = dict(filter(lambda x: not (x[1] is None), vars(args).items()))
fields = utils.args_array_to_dict(fields, "matching_metadata")
fields.pop('alarm_id')
try:
alarm = cc.alarms.update(args.alarm_id, **fields)
except exc.HTTPNotFound:
raise exc.CommandError('Alarm not found: %s' % args.alarm_id)
_display_alarm(alarm)
@utils.arg('-a', '--alarm_id', metavar='<ALARM_ID>', required=True,
help='ID of the alarm to update.')
@common_alarm_arguments()
@utils.arg('--meter-name', metavar='<METRIC>',
dest='threshold_rule/meter_name',
help='Metric to evaluate against')
@utils.arg('--period', type=int, metavar='<PERIOD>',
dest='threshold_rule/period',
help='Length of each period (seconds) to evaluate over')
@utils.arg('--evaluation-periods', type=int, metavar='<COUNT>',
dest='threshold_rule/evaluation_periods',
help='Number of periods to evaluate over')
@utils.arg('--statistic', metavar='<STATISTIC>',
dest='threshold_rule/statistic',
help='Statistic to evaluate, one of: ' + str(STATISTICS))
@utils.arg('--comparison-operator', metavar='<OPERATOR>',
dest='threshold_rule/comparison_operator',
help='Operator to compare with, one of: ' + str(ALARM_OPERATORS))
@utils.arg('--threshold', type=float, metavar='<THRESHOLD>',
dest='threshold_rule/threshold',
help='Threshold to evaluate against')
@utils.arg('-q', '--query', metavar='<QUERY>',
dest='threshold_rule/query',
help='The query to find the data for computing statistics '
'(key[op]value; list.)')
def do_alarm_threshold_update(cc, args={}):
'''Update an existing alarm based on computed statistics.'''
fields = dict(filter(lambda x: not (x[1] is None), vars(args).items()))
fields = utils.key_with_slash_to_nested_dict(fields)
fields.pop('alarm_id')
fields['type'] = 'threshold'
if 'query' in fields['threshold_rule']:
fields['threshold_rule']['query'] = options.cli_to_array(
fields['threshold_rule']['query'])
try:
alarm = cc.alarms.update(args.alarm_id, **fields)
except exc.HTTPNotFound:
raise exc.CommandError('Alarm not found: %s' % args.alarm_id)
_display_alarm(alarm)
@utils.arg('-a', '--alarm_id', metavar='<ALARM_ID>', required=True,
help='ID of the alarm to update.')
@common_alarm_arguments()
@utils.arg('--alarm_ids', action='append', metavar='<ALARM IDS>',
dest='combination_rule/alarm_ids',
help='List of alarm id')
@utils.arg('---operator', metavar='<OPERATOR>',
dest='combination_rule/operator',
help='Operator to compare with, one of: ' + str(
ALARM_COMBINATION_OPERATORS))
def do_alarm_combination_update(cc, args={}):
'''Update an existing alarm based on state of other alarms.'''
fields = dict(filter(lambda x: not (x[1] is None), vars(args).items()))
fields = utils.key_with_slash_to_nested_dict(fields)
fields.pop('alarm_id')
fields['type'] = 'combination'
try:
alarm = cc.alarms.update(args.alarm_id, **fields)
except exc.HTTPNotFound:
raise exc.CommandError('Alarm not found: %s' % args.alarm_id)
_display_alarm(alarm)
@utils.arg('-a', '--alarm_id', metavar='<ALARM_ID>', required=True,
help='ID of the alarm to delete.')
def do_alarm_delete(cc, args={}):
'''Delete an alarm.'''
try:
cc.alarms.delete(args.alarm_id)
except exc.HTTPNotFound:
raise exc.CommandError('Alarm not found: %s' % args.alarm_id)
@utils.arg('-a', '--alarm_id', metavar='<ALARM_ID>', required=True,
help='ID of the alarm state to set.')
@utils.arg('--state', metavar='<STATE>', required=True,
help='State of the alarm, one of: ' + str(ALARM_STATES))
def do_alarm_state_set(cc, args={}):
'''Set the state of an alarm.'''
try:
state = cc.alarms.set_state(args.alarm_id, args.state)
except exc.HTTPNotFound:
raise exc.CommandError('Alarm not found: %s' % args.alarm_id)
utils.print_dict({'state': state}, wrap=72)
@utils.arg('-a', '--alarm_id', metavar='<ALARM_ID>', required=True,
help='ID of the alarm state to show.')
def do_alarm_state_get(cc, args={}):
'''Get the state of an alarm.'''
try:
state = cc.alarms.get_state(args.alarm_id)
except exc.HTTPNotFound:
raise exc.CommandError('Alarm not found: %s' % args.alarm_id)
utils.print_dict({'state': state}, wrap=72)
@utils.arg('-a', '--alarm_id', metavar='<ALARM_ID>', required=True,
help='ID of the alarm for which history is shown.')
@utils.arg('-q', '--query', metavar='<QUERY>',
help='key[op]value; list.')
def do_alarm_history(cc, args={}):
'''Display the change history of an alarm.'''
kwargs = dict(alarm_id=args.alarm_id,
q=options.cli_to_array(args.query))
try:
history = cc.alarms.get_history(**kwargs)
except exc.HTTPNotFound:
raise exc.CommandError('Alarm not found: %s' % args.alarm_id)
field_labels = ['Type', 'Timestamp', 'Detail']
fields = ['type', 'timestamp', 'detail']
utils.print_list(history, fields, field_labels,
formatters={'detail': alarm_change_detail_formatter},
sortby=1)
@utils.arg('-q', '--query', metavar='<QUERY>',
help='key[op]value; list.')
def do_resource_list(cc, args={}):
'''List the resources.'''
resources = cc.resources.list(q=options.cli_to_array(args.query))
field_labels = ['Resource ID', 'Source', 'User ID', 'Project ID']
fields = ['resource_id', 'source', 'user_id', 'project_id']
utils.print_list(resources, fields, field_labels,
sortby=1)
@utils.arg('-r', '--resource_id', metavar='<RESOURCE_ID>', required=True,
help='ID of the resource to show.')
def do_resource_show(cc, args={}):
'''Show the resource.'''
try:
resource = cc.resources.get(args.resource_id)
except exc.HTTPNotFound:
raise exc.CommandError('Resource not found: %s' % args.resource_id)
else:
fields = ['resource_id', 'source', 'user_id',
'project_id', 'metadata']
data = dict([(f, getattr(resource, f, '')) for f in fields])
utils.print_dict(data, wrap=72)
|
citrix-openstack-build/python-ceilometerclient | ceilometerclient/tests/test_utils.py | # Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import cStringIO
import sys
from ceilometerclient.common import utils
from ceilometerclient.tests import utils as test_utils
class UtilsTest(test_utils.BaseTestCase):
def test_prettytable(self):
class Struct:
def __init__(self, **entries):
self.__dict__.update(entries)
# test that the prettytable output is wellformatted (left-aligned)
saved_stdout = sys.stdout
try:
sys.stdout = output_dict = cStringIO.StringIO()
utils.print_dict({'K': 'k', 'Key': 'Value'})
finally:
sys.stdout = saved_stdout
self.assertEqual(output_dict.getvalue(), '''\
+----------+-------+
| Property | Value |
+----------+-------+
| K | k |
| Key | Value |
+----------+-------+
''')
def test_args_array_to_dict(self):
my_args = {
'matching_metadata': ['metadata.key=metadata_value'],
'other': 'value'
}
cleaned_dict = utils.args_array_to_dict(my_args,
"matching_metadata")
self.assertEqual(cleaned_dict, {
'matching_metadata': {'metadata.key': 'metadata_value'},
'other': 'value'
})
def test_key_with_slash_to_nested_dict(self):
my_args = {
'combination_rule/alarm_ids': ['id1', 'id2'],
'combination_rule/operator': 'and',
'threshold_rule/threshold': 400,
'threshold_rule/statictic': 'avg',
'threshold_rule/comparison_operator': 'or',
}
nested_dict = utils.key_with_slash_to_nested_dict(my_args)
self.assertEqual(nested_dict, {
'combination_rule': {'alarm_ids': ['id1', 'id2'],
'operator': 'and'},
'threshold_rule': {'threshold': 400,
'statictic': 'avg',
'comparison_operator': 'or'},
})
def test_arg(self):
@utils.arg(help="not_required_no_default.")
def not_required_no_default():
pass
_, args = not_required_no_default.__dict__['arguments'][0]
self.assertEqual(args['help'], "not_required_no_default.")
@utils.arg(required=True, help="required_no_default.")
def required_no_default():
pass
_, args = required_no_default.__dict__['arguments'][0]
self.assertEqual(args['help'], "required_no_default. Required.")
@utils.arg(default=42, help="not_required_default.")
def not_required_default():
pass
_, args = not_required_default.__dict__['arguments'][0]
self.assertEqual(args['help'], "not_required_default. Defaults to 42.")
def test_merge_nested_dict(self):
dest = {'key': 'value',
'nested': {'key2': 'value2',
'key3': 'value3',
'nested2': {'key': 'value',
'some': 'thing'}}}
source = {'key': 'modified',
'nested': {'key3': 'modified3',
'nested2': {'key5': 'value5'}}}
utils.merge_nested_dict(dest, source, depth=1)
self.assertEqual(dest, {'key': 'modified',
'nested': {'key2': 'value2',
'key3': 'modified3',
'nested2': {'key5': 'value5'}}})
def test_merge_nested_dict_no_depth(self):
dest = {'key': 'value',
'nested': {'key2': 'value2',
'key3': 'value3',
'nested2': {'key': 'value',
'some': 'thing'}}}
source = {'key': 'modified',
'nested': {'key3': 'modified3',
'nested2': {'key5': 'value5'}}}
utils.merge_nested_dict(dest, source)
self.assertEqual(dest, {'key': 'modified',
'nested': {'key3': 'modified3',
'nested2': {'key5': 'value5'}}})
|
akentner/home-assistant | homeassistant/components/nest/binary_sensor.py | """Support for Nest Thermostat binary sensors."""
from itertools import chain
import logging
from homeassistant.components.binary_sensor import (
DEVICE_CLASS_SOUND,
BinarySensorEntity,
)
from homeassistant.const import CONF_MONITORED_CONDITIONS
from . import CONF_BINARY_SENSORS, DATA_NEST, DATA_NEST_CONFIG, NestSensorDevice
_LOGGER = logging.getLogger(__name__)
BINARY_TYPES = {"online": "connectivity"}
CLIMATE_BINARY_TYPES = {
"fan": None,
"is_using_emergency_heat": "heat",
"is_locked": None,
"has_leaf": None,
}
CAMERA_BINARY_TYPES = {
"motion_detected": "motion",
"sound_detected": DEVICE_CLASS_SOUND,
"person_detected": "occupancy",
}
STRUCTURE_BINARY_TYPES = {"away": None}
STRUCTURE_BINARY_STATE_MAP = {"away": {"away": True, "home": False}}
_BINARY_TYPES_DEPRECATED = [
"hvac_ac_state",
"hvac_aux_heater_state",
"hvac_heater_state",
"hvac_heat_x2_state",
"hvac_heat_x3_state",
"hvac_alt_heat_state",
"hvac_alt_heat_x2_state",
"hvac_emer_heat_state",
]
_VALID_BINARY_SENSOR_TYPES = {
**BINARY_TYPES,
**CLIMATE_BINARY_TYPES,
**CAMERA_BINARY_TYPES,
**STRUCTURE_BINARY_TYPES,
}
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Nest binary sensors.
No longer used.
"""
async def async_setup_entry(hass, entry, async_add_entities):
"""Set up a Nest binary sensor based on a config entry."""
nest = hass.data[DATA_NEST]
discovery_info = hass.data.get(DATA_NEST_CONFIG, {}).get(CONF_BINARY_SENSORS, {})
# Add all available binary sensors if no Nest binary sensor config is set
if discovery_info == {}:
conditions = _VALID_BINARY_SENSOR_TYPES
else:
conditions = discovery_info.get(CONF_MONITORED_CONDITIONS, {})
for variable in conditions:
if variable in _BINARY_TYPES_DEPRECATED:
wstr = (
f"{variable} is no a longer supported "
"monitored_conditions. See "
"https://www.home-assistant.io/integrations/binary_sensor.nest/ "
"for valid options."
)
_LOGGER.error(wstr)
def get_binary_sensors():
"""Get the Nest binary sensors."""
sensors = []
for structure in nest.structures():
sensors += [
NestBinarySensor(structure, None, variable)
for variable in conditions
if variable in STRUCTURE_BINARY_TYPES
]
device_chain = chain(nest.thermostats(), nest.smoke_co_alarms(), nest.cameras())
for structure, device in device_chain:
sensors += [
NestBinarySensor(structure, device, variable)
for variable in conditions
if variable in BINARY_TYPES
]
sensors += [
NestBinarySensor(structure, device, variable)
for variable in conditions
if variable in CLIMATE_BINARY_TYPES and device.is_thermostat
]
if device.is_camera:
sensors += [
NestBinarySensor(structure, device, variable)
for variable in conditions
if variable in CAMERA_BINARY_TYPES
]
for activity_zone in device.activity_zones:
sensors += [
NestActivityZoneSensor(structure, device, activity_zone)
]
return sensors
async_add_entities(await hass.async_add_job(get_binary_sensors), True)
class NestBinarySensor(NestSensorDevice, BinarySensorEntity):
"""Represents a Nest binary sensor."""
@property
def is_on(self):
"""Return true if the binary sensor is on."""
return self._state
@property
def device_class(self):
"""Return the device class of the binary sensor."""
return _VALID_BINARY_SENSOR_TYPES.get(self.variable)
def update(self):
"""Retrieve latest state."""
value = getattr(self.device, self.variable)
if self.variable in STRUCTURE_BINARY_TYPES:
self._state = bool(STRUCTURE_BINARY_STATE_MAP[self.variable].get(value))
else:
self._state = bool(value)
class NestActivityZoneSensor(NestBinarySensor):
"""Represents a Nest binary sensor for activity in a zone."""
def __init__(self, structure, device, zone):
"""Initialize the sensor."""
super().__init__(structure, device, "")
self.zone = zone
self._name = f"{self._name} {self.zone.name} activity"
@property
def unique_id(self):
"""Return unique id based on camera serial and zone id."""
return f"{self.device.serial}-{self.zone.zone_id}"
@property
def device_class(self):
"""Return the device class of the binary sensor."""
return "motion"
def update(self):
"""Retrieve latest state."""
self._state = self.device.has_ongoing_motion_in_zone(self.zone.zone_id)
|
ColOfAbRiX/cf-utils | cfutils/debug.py | <reponame>ColOfAbRiX/cf-utils
#!/usr/bin/env python
#
# MIT License
#
# Copyright (c) 2017 <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import logging
log_indent = 0
log_status = None
def logfunction(func):
def _wrapper(*args, **kwargs):
global log_indent, log_status
indent = " " * log_indent
if log_status is None:
logging.basicConfig(filename='/tmp/python-logging.log', level=logging.DEBUG)
log_status = 1
logging.info(indent + "Name : %s" % func.__name__)
logging.debug(indent + "Args #1: %s" % str(args))
logging.debug(indent + "Args #2: %s" % str(kwargs))
log_indent += 1
result = func(*args, **kwargs)
log_indent -= 1
logging.debug(indent + "Result : %s" % str(result))
return result
return _wrapper
# vim: ft=python:ts=4:sw=4 |
ColOfAbRiX/cf-utils | cfutils/gitutils.py | <filename>cfutils/gitutils.py
#!/usr/bin/env python
#
# MIT License
#
# Copyright (c) 2017 <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import os
import git
from execute import exec_cmd
def is_git_repo(path=None):
"""
Checks if the current directory is part of a GIT repository
"""
if path is None:
path = os.getcwd()
try:
git_repo = git.Repo(path, search_parent_directories=True)
except git.exc.InvalidGitRepositoryError:
return False
return True
def get_git_root(path=None):
"""
Returns the root path of the GIT repository on the Current Working Directory
"""
if path is None:
path = os.getcwd()
git_repo = git.Repo(path, search_parent_directories=True)
return git_repo.git.rev_parse("--show-toplevel")
def exec_git(git_cmd):
"""
Executes GIT with specific options and manages errors.
"""
stdout, stderr, rc = exec_cmd("git %s" % git_cmd)
if rc > 0:
raise ScriptError("Error running command: \"git %s\"\nOutput: %s" % (git_cmd, stderr))
return stdout
# vim: ft=python:ts=4:sw=4 |
ColOfAbRiX/cf-utils | cfutils/execute.py | #!/usr/bin/env python
#
# MIT License
#
# Copyright (c) 2017 <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
from __future__ import print_function
import os
import shlex
import getpass
import subprocess as sp
from common import ScriptError
def paths_full(*chunks):
"""
Joins together chunks of a path and return the absolute, clean path
"""
res = ""
for path in chunks:
if path.startswith(os.path.pathsep):
res = path
else:
res = os.path.join(res, path)
if '~' in res:
res = os.path.expanduser(res)
elif not res.startswith(os.path.pathsep):
res = os.path.abspath(res)
return res.replace('/./', '/')
def get_bin_path(arg, opt_dirs=[]):
"""
Find system executable in PATH.
"""
sbin_paths = ['/sbin', '/usr/sbin', '/usr/local/sbin']
paths = []
for d in opt_dirs:
if d is not None and os.path.exists(d):
paths.append(d)
paths += os.environ.get('PATH', '').split(os.pathsep)
bin_path = None
# mangle PATH to include /sbin dirs
for p in sbin_paths:
if p not in paths and os.path.exists(p):
paths.append(p)
for d in paths:
if not d:
continue
path = os.path.join(d, arg)
if os.path.exists(path) and not os.path.isdir(path) and os.access(path, os.X_OK):
bin_path = path
break
return bin_path
def switch_cmd(command, run_as=None, cwd=None, env=[]):
"""
Executes a command switching the current process
"""
# Change working directory
if cwd is None:
cwd = os.getcwd()
os.chdir(cwd)
# Set environment
os.environ.update(env)
env = os.environ.copy()
# Check if sudo
current_user = getpass.getuser()
if run_as is not None and run_as not in [current_user, '']:
command = "%s -Esu %s -- %s" % (get_bin_path("sudo"), run_as, command)
command = shlex.split(command)
os.execv(command[0], command)
def exec_cmd(command, run_as="", cwd=None, env=None, async=False):
"""
Executes a system command as a specific user
"""
# Change environment and working directory
if env is None:
env = os.environ.copy()
if cwd is None:
cwd = os.getcwd()
env['PWD'] = cwd
# Check if sudo
current_user = getpass.getuser()
if run_as not in [current_user, '']:
command = "%s -su %s -- %s" % (get_bin_path("sudo"), run_as, command)
p = sp.Popen(command, shell=True, cwd=cwd, env=env, stdout=sp.PIPE, stderr=sp.PIPE)
# Managing output
if async:
return p
p.wait()
return (p.stdout.read().strip(), p.stderr.read().strip(), p.returncode)
# vim: ft=python:ts=4:sw=4 |
ColOfAbRiX/cf-utils | cfutils/sysinfo.py | <filename>cfutils/sysinfo.py
#!/usr/bin/env python
#
# MIT License
#
# Copyright (c) 2017 <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
from __future__ import print_function
import crypt
import fcntl
import socket
import struct
def network_info():
""" Retrieves the network configuration of the host """
output = {'ip': '127.0.0.1', 'netmask': '255.0.0.0', 'iface': '', 'gateway': ''}
# Default interface and gateway
with open("/proc/net/route") as f:
for line in f.readlines():
try:
iface, dest, gateway, flags, _, _, _, _, _, _, _, = line.strip().split()
if dest != '00000000' or not int(flags, 16) & 2:
continue
output['iface'] = iface
output['gateway'] = socket.inet_ntoa(struct.pack("<L", int(gateway, 16)))
except:
continue
# IP Address and netmask
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
# Doesn't even have to be reachable
s.connect(('10.255.255.255', 0))
output['ip'] = s.getsockname()[0]
output['netmask'] = socket.inet_ntoa(fcntl.ioctl(
socket.socket(socket.AF_INET, socket.SOCK_DGRAM),
35099,
struct.pack('256s', output['iface'])
)[20:24])
finally:
s.close()
return output
# vim: ft=python:ts=4:sw=4 |
ColOfAbRiX/cf-utils | cfutils/common.py | #!/usr/bin/env python
#
# MIT License
#
# Copyright (c) 2017 <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
from __future__ import print_function
import crypt
import random
import string
import urllib2
from operator import *
from itertools import *
try:
import json
except ImportError:
import simplejson as json
class ScriptError(Exception):
"""
Exceptions raised by scripts
"""
def __init__(self, message, *args):
self.message = message
super(ScriptError, self).__init__(message, *args)
def funcall_info(frame):
"""
Display name, argument and their values of a function call
"""
args, _, _, values = inspect.getargvalues(frame)
print(
'Called "%s" with arguments (%s)' % (
inspect.getframeinfo(frame)[2],
', '.join(map(lambda n: "%s=%s" % (n, str(values[n])), args))
)
)
return [(i, values[i]) for i in args]
def flatmap(f, items):
return list(chain.from_iterable(imap(f, items)))
def group_by(data, group_by=[], **aggregators):
"""
SQL-like group by function.
"""
result = []
grouper = itemgetter(*group_by)
for key, grp in groupby(sorted(data, key=grouper), grouper):
if not isinstance(key, tuple):
key = (key, )
# Groupby key
partial = dict(zip(group_by, key))
# Aggregators
grp = list(grp)
for field, function in aggregators.iteritems():
partial[field] = function(grp)
result.append(partial)
return result
def build_regex(pattern, pattern_name=None, **kwargs):
"""
Return regex string as a named capture group.
See: https://tonysyu.github.io/readable-regular-expressions-in-python.html
"""
pattern = pattern.format(**kwargs)
if pattern_name is not None:
return r'(?P<{name}>{pattern})'.format(name=pattern_name, pattern=pattern)
return pattern
def union(a, b):
"""
Return the union of two lists
"""
if not isinstance(a, list) and isinstance(b, list):
return b
if isinstance(a, list) and not isinstance(b, list):
return a
if not isinstance(a, list) and not isinstance(b, list):
return []
result = []
a.extend(b)
for x in a:
if x not in result:
result.append(x)
return result
def merge(a, b):
"""
Recursively merges hash b into a so that keys from b take precedence over keys from a
See: https://github.com/ansible/ansible/blob/6787fc70a643fb6e2bdd2c6a6202072d21db72ef/lib/ansible/utils/vars.py
"""
# if a is empty or equal to b, return b
if a == {} or a == b:
return b.copy()
# if b is empty the below unfolds quickly
result = a.copy()
# next, iterate over b keys and values
for k, v in b.iteritems():
# if there's already such key in a
# and that key contains a MutableMapping
if k in result and isinstance(result[k], dict) and isinstance(v, dict):
# merge those dicts recursively
result[k] = merge(result[k], v)
else:
# otherwise, just copy the value from b to a
result[k] = v
return result
def random_string(N):
""" Returns a random string of ASCII characters """
return ''.join(
random.SystemRandom().choice(
string.ascii_letters + string.digits
) for _ in range(N)
)
def sha512_crypt(password, rounds):
"""
Encrypts a password for Linux shadow file
See: https://stackoverflow.com/questions/34463134/sha-512-crypt-output-written-with-python-code-is-different-from-mkpasswd
"""
rounds = max(1000, min(999999999, rounds or 5000))
prefix = '$6$rounds={0}$'.format(rounds)
return crypt.crypt(password, prefix + random_string(8))
def rest_request(url, data={}):
"""
Makes a REST call to the server optionally sending JSON data
"""
try:
data = json.load(
urllib2.urlopen(url, data=json.dumps(data), timeout=5)
)
except (urllib2.HTTPError, urllib2.URLError, ValueError) as e:
raise ScriptError(e.message)
return data
# vim: ft=python:ts=4:sw=4 |
ColOfAbRiX/cf-utils | setup.py | <gh_stars>0
#!/usr/bin/env python
#
# MIT License
#
# Copyright (c) 2017 <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import os
from setuptools import setup
setup(
name='cfutils',
version='1.0',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/ColOfAbRiX',
description='Generic Python Utilities',
long_description=open('README.md').read(),
license='MIT',
packages=setuptools.find_packages(),
install_requires=[
'colored',
'pycrypto',
'GitPython'
],
)
# vim: ft=python:ts=4:sw=4 |
ColOfAbRiX/cf-utils | cfutils/formatting.py | #!/usr/bin/env python
#
# MIT License
#
# Copyright (c) 2017 <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
from __future__ import print_function
import sys
import math
try:
import json
except ImportError:
import simplejson as json
try:
import colored
__colored__ = True
except ImportError:
__colored__ = False
def print_c(text, color=None, **kwargs):
"""
Prints using colors.
"""
if __colored__ and color is not None and sys.stdout.isatty():
print(colored.fg(color), end='')
print(text, **kwargs)
print(colored.attr("reset"), end='')
else:
print(text, **kwargs)
sys.stdout.flush()
def p_json(data, color=None):
"""
Prints a formatted JSON to the output
"""
if sys.stdout.isatty():
print_c(format_json(data), color)
else:
print(json.dumps(data, separators=(',', ':')))
def format_json(data):
"""
Returns a human-formatted JSON
"""
return json.dumps(data, sort_keys=True, indent=2, separators=(',', ': '))
def format_timedelta(seconds, lookup=None, sep=', '):
"""
Formats a timedelta into a human readable expanded format with a precusion up to microsecond
"""
if lookup is None:
loopkup = [
{'divider': 1, 'format': '{0:.0f} {1}', 'unit': 'us', 'units': 'us', 'value': None},
{'divider': 1000, 'format': '{0:.0f} {1}', 'unit': 'ms', 'units': 'ms', 'value': 0},
{'divider': 1000, 'format': '{0:.0f} {1}', 'unit': 'sec', 'units': 'secs', 'value': 0},
{'divider': 60, 'format': '{0:.0f} {1}', 'unit': 'min', 'units': 'mins', 'value': 0},
{'divider': 60, 'format': '{0:.0f} {1}', 'unit': 'hour', 'units': 'hours', 'value': 0},
{'divider': 24, 'format': '{0:.0f} {1}', 'unit': 'day', 'units': 'days', 'value': 0},
{'divider': 7, 'format': '{0:.0f} {1}', 'unit': 'week', 'units': 'weeks', 'value': 0},
{'divider': 4.348214, 'format': '{0:.0f} {1}', 'unit': 'month', 'units': 'months', 'value': 0},
{'divider': 12, 'format': '{0:.0f} {1}', 'unit': 'year', 'units': 'years', 'value': 0},
]
for i, current in enumerate(loopkup):
if i == 0:
current.update({'value': round(seconds * 1E+6)})
else:
previous = loopkup[i - 1]
current.update({'value': math.floor(previous['value'] / current['divider'])})
previous.update({'value': previous['value'] - current['value'] * current['divider']})
output = ""
for entry in loopkup:
if entry['value'] != 0:
unit = entry['unit'] if entry['value'] == 1 else entry['units']
entry = entry['format'].format(entry['value'], unit)
output = entry if output == "" else entry + sep + output
if output == "":
return "0s"
return output
def format_filesize(num, suffix='B'):
"""
See: https://stackoverflow.com/questions/1094841/reusable-library-to-get-human-readable-version-of-file-size
"""
for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']:
if abs(num) < 1024.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f%s%s" % (num, 'Yi', suffix)
# vim: ft=python:ts=4:sw=4 |
openedx/edx-repo-health | repo_health/check_django_dependencies_compatibility.py | <gh_stars>1-10
"""
contains check that reads/parses dependencies of a repo
"""
import csv
import json
import logging
import os
import re
import tempfile
from pathlib import Path
from packaging.version import parse
import pytest
import requests
from pytest_repo_health import health_metadata
from repo_health import get_file_lines, DJANGO_DEPS_SHEET_URL, GITHUB_URL_PATTERN, PYPI_PACKAGE_PATTERN
logger = logging.getLogger(__name__)
MODULE_DICT_KEY = "django_packages"
@pytest.fixture(scope='session') # pragma: no cover
def csv_filepath():
tmpdir = tempfile.mkdtemp()
return os.path.join(tmpdir, "django_dependencies_sheet.csv")
@pytest.fixture(name='django_deps_sheet', scope="session") # pragma: no cover
def django_dependency_sheet_fixture(csv_filepath): # pylint: disable=redefined-outer-name
"""
Returns the path for csv file which contains django dependencies status.
Also, makes a request for latest sheet & dumps response into the csv file if request was successful.
"""
res = requests.get(DJANGO_DEPS_SHEET_URL)
if res.status_code == 200:
with open(csv_filepath, 'w', encoding="utf8") as fp:
fp.write(res.text)
return csv_filepath
class DjangoDependencyReader:
"""
Django dependency reader class
"""
def __init__(self, repo_path):
self.repo_path = repo_path
self.dependencies = {}
def _is_python_repo(self) -> bool:
return os.path.exists(os.path.join(self.repo_path, "requirements"))
def _read_dependencies(self):
"""
Method processing python requirements files
"""
requirement_files = [str(file) for file
in Path(os.path.join(self.repo_path, "requirements")).rglob('*.txt')
if 'constraints' not in str(file)]
for file_path in requirement_files:
lines = get_file_lines(file_path)
for line in lines:
stripped_line = self.strip_requirement(line)
if not stripped_line:
continue
if 'git+http' in stripped_line:
name, version = self.extract_from_github_link(stripped_line)
else:
name, version = self.extract_from_pypi_package(stripped_line)
self.dependencies[name] = version
@staticmethod
def strip_requirement(line):
"""
Finds if the requirement line is actually a requirement & not a reference to other files
"""
if line and not re.search('^[#-]', line):
return re.sub(r' +[;#].*', "", line).replace('-e ', "")
return None
@staticmethod
def extract_from_github_link(github_dep) -> tuple:
"""
Extracts the package name from Github URL
"""
match = re.search(GITHUB_URL_PATTERN, github_dep)
if match:
return match.group("package"), ''
return '', ''
@staticmethod
def extract_from_pypi_package(pypi_dependency) -> tuple:
"""
Sanitizes the package name from any version constraint and extra spaces
"""
pypi_dependency = "".join(pypi_dependency.split())
match = re.match(PYPI_PACKAGE_PATTERN, pypi_dependency)
if match:
return match.group('package_name'), match.group('version')
return '', ''
def read(self) -> dict:
"""
Entry method for reading data
"""
if not self._is_python_repo():
return {}
self._read_dependencies()
return self.dependencies
def get_upgraded_dependencies_count(repo_path, django_dependency_sheet) -> tuple:
"""
Entry point to read, parse and calculate django dependencies
@param repo_path: path for repo which we are calculating django deps
@param django_dependency_sheet: csv which contains latest status of django deps
@return: count for all + upgraded django deps in repo
"""
reader_instance = DjangoDependencyReader(repo_path)
deps = reader_instance.read()
django_deps = []
deps_support_django32 = []
upgraded_in_repo = []
csv_path = django_dependency_sheet
with open(csv_path, encoding="utf8") as csv_file:
csv_reader = csv.DictReader(csv_file, delimiter=',', quotechar='"')
for line in csv_reader:
package_name = line["Django Package Name"]
if package_name in deps.keys(): # pylint: disable=consider-iterating-dictionary
django_deps.append(package_name)
if line["Django 3.2"] and line["Django 3.2"] != '-':
deps_support_django32.append(package_name)
if parse(deps[package_name]) >= parse(line["Django 3.2"]):
upgraded_in_repo.append(package_name)
django_deps = list(set(django_deps))
deps_support_django32 = list(set(deps_support_django32))
upgraded_in_repo = list(set(upgraded_in_repo))
return django_deps, deps_support_django32, upgraded_in_repo
@health_metadata(
[MODULE_DICT_KEY],
{
"total": "Dependencies that depend on Django",
"django_32": "Dependencies that support Django 3.2",
"upgraded": "Dependencies that are upgraded to support Django 3.2"
},
)
def check_django_dependencies_status(repo_path, all_results, django_deps_sheet):
"""
Test to find the django dependencies compatibility
"""
django_deps, support_django32_deps, upgraded_in_repo = get_upgraded_dependencies_count(
repo_path, django_deps_sheet)
all_results[MODULE_DICT_KEY] = {
'total': {
'count': len(django_deps),
'list': json.dumps(django_deps),
},
'django_32': {
'count': len(support_django32_deps),
'list': json.dumps(support_django32_deps)
},
'upgraded': {
'count': len(upgraded_in_repo),
'list': json.dumps(upgraded_in_repo)
}
}
|
openedx/edx-repo-health | repo_health/check_github_integration.py | <gh_stars>1-10
"""
Checks repository is on github actions workflow and tests are enabled.
"""
import json
import logging
import os
import re
import requests
from pytest_repo_health import add_key_to_metadata
logger = logging.getLogger(__name__)
module_dict_key = "github_actions"
URL_PATTERN = r"github.com[/:](?P<org_name>[^/]+)/(?P<repo_name>[^/]+).git"
def get_githubworkflow_api_response(repo_name):
"""
get the workflows information using api.
"""
# For unauthenticated requests, the rate limit allows for up to 60 requests per hour.
# https://developer.github.com/v3/#rate-limiting
return requests.get(
url=f'https://api.github.com/repos/edx/{repo_name}/actions/workflows',
headers={'Authorization': f'Bearer {os.environ["GITHUB_TOKEN"]}'}
)
class GitHubIntegrationHandler:
"""
sets up the operations and required github actions workflow CI integration information on instance
"""
def __init__(self, repo_name):
self.repo_name = repo_name
self.api_data = None
self.github_actions = False
self._set_github_actions_integration_data()
def _set_github_actions_integration_data(self):
self.api_response = get_githubworkflow_api_response(self.repo_name)
def handle(self):
"""
initiates the process to fetch github actions workflow integration information
"""
if self.api_response.status_code != 200:
logger.error(
"An error occurred while fetching %s. status code %s content info %s.",
self.repo_name,
self.api_response.status_code,
self.api_response.content
)
return
self.api_data = json.loads(self.api_response.content)
if self.api_data and 'workflows' in self.api_data:
self.github_actions = [
True for workflow in self.api_data['workflows']
if workflow['path'] in [
'.github/workflows/ci.yml',
'.github/workflows/playbook-test.yml',
'.github/workflows/syntax-test.yml'
] and workflow['state'] == 'active'
]
@add_key_to_metadata((module_dict_key,))
def check_github_actions_integration(all_results, git_origin_url):
"""
Checks repository integrated with github actions workflow
"""
match = re.search(URL_PATTERN, git_origin_url)
repo_name = match.group("repo_name")
integration_handler = GitHubIntegrationHandler(repo_name)
integration_handler.handle()
all_results[module_dict_key] = bool(integration_handler.github_actions)
all_results['org_name'] = match.group("org_name")
|
openedx/edx-repo-health | tests/test_check_renovate.py | <reponame>openedx/edx-repo-health
import os
from repo_health.check_renovate import (
check_renovate,
MODULE_DICT_KEY,
)
from unittest import mock, TestCase
def get_repo_path(repo_name):
tests_directory = os.path.dirname(__file__)
return f"{tests_directory}/fake_repos/{repo_name}"
async def mocked_responses(*args, **kwargs):
return "24-4-2016"
@mock.patch('repo_health.check_renovate.get_last_pull_date')
async def test_check_renovate_true(mock_get):
mock_get.return_value = await mocked_responses()
all_results = {MODULE_DICT_KEY: {}}
await check_renovate(all_results, repo_path=get_repo_path('renovate_repo1'), github_repo=None)
assert all_results[MODULE_DICT_KEY]['configured'] == True
@mock.patch('repo_health.check_renovate.get_last_pull_date')
async def test_check_renovate_false(mock_get):
mock_get.return_value = await mocked_responses()
all_results = {MODULE_DICT_KEY: {}}
await check_renovate(all_results, repo_path=get_repo_path('js_repo'), github_repo=None)
assert all_results[MODULE_DICT_KEY]['configured'] == False
|
openedx/edx-repo-health | repo_health/check_makefile.py | <filename>repo_health/check_makefile.py
"""
Checks to see if Makefile follows standards
"""
import re
import os
import pytest
from pytest_repo_health import health_metadata
from repo_health import get_file_content
module_dict_key = "makefile"
output_keys = {
"upgrade": "target that upgrades our dependencies to newer released versions",
"test": "target that runs tests",
"quality": "target that runs code quality checks",
"test-js": "target that runs javascript unit tests",
"quality-js": "target that runs javascript code quality checks",
"test-python": "target that runs python unit tests",
"quality-python": "target that runs python code quality checks",
}
@pytest.fixture(name='makefile')
def fixture_makefile(repo_path):
"""Fixture containing the text content of Makefile"""
full_path = os.path.join(repo_path, "Makefile")
return get_file_content(full_path)
@health_metadata(
[module_dict_key, "has_target"],
output_keys
)
def check_has_make_target(makefile, all_results):
"""
Checks make file has provided targets
"""
for target, __ in output_keys.items():
all_results[module_dict_key][target] = False
regex_pattern = "".join(["^", target, ":"])
match = re.search(regex_pattern, makefile, re.MULTILINE)
if match:
all_results[module_dict_key][target] = True
|
openedx/edx-repo-health | repo_health/check_docs.py | <reponame>openedx/edx-repo-health
"""
Check some details of Read The Docs integration.
"""
import json
import logging
import os.path
import re
import pytest
import requests
import yaml
from pytest_repo_health import health_metadata
from repo_health import fixture_readme, get_file_content # pylint: disable=unused-import
logger = logging.getLogger(__name__)
module_dict_key = "docs"
@health_metadata(
[module_dict_key],
{
"build_badge": "Check that the README file has docs build badge"
}
)
def check_build_bagde(readme, all_results):
"""
Check that the README file has a docs build badge.
"""
if readme is None:
return
if re.search(r'image:: *https?://readthedocs\.org/projects', readme):
all_results[module_dict_key]["build_badge"] = True
else:
all_results[module_dict_key]["build_badge"] = False
class ReadTheDocsChecker:
"""
Handles all the operations related to Read the Docs checks.
"""
PROJECTS_URL = 'https://readthedocs.org/api/v3/projects/?limit=100'
_projects = None
def __init__(self, repo_path=None, git_origin_url=None, token=None):
self._yml_file_name = ".readthedocs.yml"
self.repo_path = repo_path
self.git_origin_url = git_origin_url
self._token = token
self._headers = {'Authorization': f'token {self._token}'}
self.build_details = []
def _read_readthedocs_yml_file(self):
full_path = os.path.join(self.repo_path, ".readthedocs.yml")
return get_file_content(full_path)
def _parse_readthedocs_yml_file(self):
"""
Parses the .readthdocs.yml file and returns parsed data.
"""
readthedocs_yml = self._read_readthedocs_yml_file()
try:
data = yaml.safe_load(readthedocs_yml)
data = {} if data is None else data
return data
except yaml.YAMLError:
return {}
@classmethod
def _get_projects(cls, headers):
"""
Lists all the projects related to the provided token.
"""
if cls._projects is not None:
return cls._projects
response = requests.get(cls.PROJECTS_URL, headers=headers)
response.raise_for_status()
cls._projects = response.json()['results']
return cls._projects
def _get_all_builds(self, slug):
"""
Returns all build details for the project whose slug is provided.
"""
build_url = f"https://readthedocs.org/api/v3/projects/{slug}/builds/"
response = requests.get(build_url, headers=self._headers)
response.raise_for_status()
_json = response.json()
return _json['results']
def get_python_version(self):
"""
Returns the version of Python mentioned in .readthedocs.yml file.
"""
parsed_data = self._parse_readthedocs_yml_file()
if "python" in parsed_data.keys():
if "version" in parsed_data['python'].keys():
return parsed_data['python']['version']
return None
def update_build_details(self):
"""
Updates the status of latest Read the Docs build and when last build ran.
"""
self.build_details = []
for item in self._get_projects(self._headers): # pylint: disable=not-an-iterable
if item['repository']['url'] == self.git_origin_url:
all_builds = self._get_all_builds(item['slug'])
last_build = all_builds[0]
last_successful_build = next((build for build in all_builds if build['success']), None)
self.build_details.append({
'project': item['name'],
'last_build_status': 'success' if last_build['success'] else 'failure',
'last_build_time': last_build['created'],
'last_good_build_time': last_successful_build['created'] if last_successful_build else None
})
@health_metadata(
[module_dict_key],
{
"python_version": "The version of Python mentioned in .readthedocs.yml file"
}
)
def check_python_version(repo_path, all_results):
"""
Check the Python version mentioned in .readthedocs.yml file.
"""
rtd_checker = ReadTheDocsChecker(repo_path=repo_path)
all_results[module_dict_key]["python_version"] = rtd_checker.get_python_version()
@health_metadata(
[module_dict_key],
{
"build_details": "This contains the build details of all Read the Docs projects connected with the repo",
}
)
def check_readthedocs_build(all_results, git_origin_url):
"""
Checks the Read the Docs build status and when last build ran.
"""
try:
token = os.environ["READTHEDOCS_API_KEY"]
except KeyError:
logger.error("READTHEDOCS_API_KEY is missing in environment variables")
pytest.skip("READTHEDOCS_API_KEY is missing in environment variables")
rtd_checker = ReadTheDocsChecker(git_origin_url=git_origin_url, token=token)
rtd_checker.update_build_details()
all_results[module_dict_key]["build_details"] = json.dumps(rtd_checker.build_details)
|
openedx/edx-repo-health | repo_health/check_ownership.py | """
Checks to fetch repository ownership information from the Google Sheets speadsheet.
"""
import logging
import re
import os
import gspread
import pytest
from pytest_repo_health import health_metadata
from pytest_repo_health.fixtures.github import URL_PATTERN
logger = logging.getLogger(__name__)
MODULE_DICT_KEY = "ownership"
GOOGLE_CREDENTIALS = "REPO_HEALTH_GOOGLE_CREDS_FILE"
REPO_HEALTH_SHEET_URL = "REPO_HEALTH_OWNERSHIP_SPREADSHEET_URL"
REPO_HEALTH_WORKSHEET = "REPO_HEALTH_REPOS_WORKSHEET_ID"
class KnownError(Exception):
"""
Known exception cases where we won't need a stack trace.
"""
def __init__(self, message):
super().__init__(message)
self.message = message
def find_worksheet(google_creds_file, spreadsheet_url, worksheet_id):
"""
Authenticate to Google and return the matching worksheet.
"""
all_worksheets = gspread.service_account(filename=google_creds_file) \
.open_by_url(spreadsheet_url) \
.worksheets()
matching = list(filter(lambda w: w.id == worksheet_id, all_worksheets))
if not matching:
raise KnownError(f"Cannot find a worksheet with ID {worksheet_id}")
worksheet = matching[0]
expected_headers = ["repo url", "owner.theme", "owner.squad", "owner.priority"]
return worksheet.get_all_records(expected_headers=expected_headers)
@health_metadata(
[MODULE_DICT_KEY],
{
"theme": "Theme that owns the component",
"squad": "Squad that owns the component",
"priority": "How critical is the component to edX?",
"description": "Description of the what the component is",
"notes": "Notes maintained by the owner",
},
)
def check_ownership(all_results, git_origin_url):
"""
Get all the fields of interest from the tech ownership spreadsheet entry
for the repository.
"""
try:
google_creds_file = os.environ[GOOGLE_CREDENTIALS]
spreadsheet_url = os.environ[REPO_HEALTH_SHEET_URL]
worksheet_id = int(os.environ[REPO_HEALTH_WORKSHEET])
except KeyError:
logger.error(
"At least one of the following REPO_HEALTH_* environment variables is missing:\n %s \n %s \n %s",
GOOGLE_CREDENTIALS, REPO_HEALTH_SHEET_URL, REPO_HEALTH_WORKSHEET
)
pytest.skip("At least one of the REPO_HEALTH_* environment variables is missing")
match = re.search(URL_PATTERN, git_origin_url)
assert match is not None
org_name = match.group("org_name")
repo_name = match.group("repo_name")
repo_url = f"https://github.com/{org_name}/{repo_name}"
results = all_results[MODULE_DICT_KEY]
records = find_worksheet(google_creds_file, spreadsheet_url, worksheet_id)
for row in records:
if row["repo url"] != repo_url:
continue
results["theme"] = row["owner.theme"]
results["squad"] = row["owner.squad"]
results["priority"] = row["owner.priority"]
|
openedx/edx-repo-health | repo_health/check_readme.py | """
Check some details in the readme file.
"""
import re
import urllib.parse
import requests
from pytest_repo_health import health_metadata
from repo_health import fixture_readme # pylint:disable=unused-import
module_dict_key = "readme"
# Good things should be there, and are True if they are present.
GOOD_THINGS = {
"security": {
"description": "Has a security contact",
"re": [
r"<EMAIL>",
],
},
"getting-help": {
"description": "Has a link to get help",
"re": [
r"https://open\.?edx\.org/getting-help",
],
},
}
# Bad things should not be there, and are True if they are absent, so that all
# the values should be True.
BAD_THINGS = {
"irc-missing": {
"description": "Avoids obsolete IRC info",
"re": [
r"(?i)`#?edx-code`? IRC channel",
],
},
"mailing-list-missing": {
"description": "Avoids obsolete mailing list info",
"re": [
r"https?://groups.google.com/forum/#!forum/edx-code",
],
},
}
@health_metadata(
[module_dict_key],
{
key: val["description"]
for key, val in {**GOOD_THINGS, **BAD_THINGS}.items()
}
)
def check_readme_contents(readme, all_results):
"""
Check that the README file has or does not have desired or undesirable contents.
"""
if readme is None:
return
for key, val in GOOD_THINGS.items():
present = any(re.search(regex, readme) for regex in val["re"])
all_results[module_dict_key][key] = present
for key, val in BAD_THINGS.items():
present = any(re.search(regex, readme) for regex in val["re"])
all_results[module_dict_key][key] = not present
# URLs have to start with a scheme, but can have lots of stuff in them. They
# have to end with word or slash, so that trailing punctuation won't be
# included.
URL_REGEX = r"https?://[\w._/?&%=@+\-\[\]]+[\w/]"
# Some links in READMEs are just examples, don't bother checking these domains.
EXAMPLE_DOMAINS = {
"localhost",
"127.0.0.1",
"example.com",
".ngrok.io",
}
# If a URL has any weird meta-characters, it's not a real URL.
METACHARACTERS = r"[\[\]]"
def is_example_url(url):
"""
Is this URL just an example, no need to check it?
"""
if re.search(METACHARACTERS, url):
return True
parts = urllib.parse.urlparse(url)
for domain in EXAMPLE_DOMAINS:
if domain == parts.hostname:
return True
if domain.startswith(".") and parts.hostname.endswith(domain):
return True
return False
@health_metadata(
[module_dict_key],
{
"bad_links": "Links in the README that can't be fetched.",
"good_links": "Links in the README that are good.",
}
)
def check_readme_links(readme, all_results):
"""
Check that the links in the README actually work.
"""
if readme is None:
return
seen = set()
bad = all_results[module_dict_key]["bad_links"] = []
good = all_results[module_dict_key]["good_links"] = []
for url in re.findall(URL_REGEX, readme):
if url in seen:
continue
seen.add(url)
if is_example_url(url):
continue
try:
resp = requests.head(url, allow_redirects=True)
except requests.RequestException as e:
bad.append(f"{url}: {e}")
continue
if 200 <= resp.status_code <= 300:
good.append(url)
else:
bad.append(f"{url}: {resp.status_code}")
|
openedx/edx-repo-health | tests/test_check_github_integration.py | <gh_stars>1-10
import os
from repo_health.check_github_integration import check_github_actions_integration, module_dict_key
from unittest import mock
class MockResponse:
def __init__(self, content, status_code):
self.content = content
self.status_code = status_code
def json(self):
return self.content
def mocked_responses(*args, **kwargs):
current_dir = os.path.dirname(__file__)
if kwargs['url'] == 'https://api.github.com/repos/edx/integrated/actions/workflows':
workflows_data = open(os.path.join(current_dir, 'data/github_integrated.json'), 'r')
return MockResponse(workflows_data.read(), 200)
elif kwargs['url'] == 'https://api.github.com/repos/edx/not_integrated/actions/workflows':
workflows_data = open(os.path.join(current_dir, 'data/github_not_integrated.json'), 'r')
return MockResponse(workflows_data.read(), 200)
return MockResponse(None, 404)
@mock.patch('repo_health.check_github_integration.get_githubworkflow_api_response')
def test_check_github_integration_true(mock_get):
mock_get.return_value = mocked_responses(url='https://api.github.com/repos/edx/integrated/actions/workflows')
all_results = {module_dict_key: {}}
check_github_actions_integration(all_results, git_origin_url=f"github.com/edx/integrated.git")
assert all_results[module_dict_key] == True
assert all_results["org_name"] == 'edx'
@mock.patch('repo_health.check_github_integration.get_githubworkflow_api_response')
def test_check_github_integration_false(mock_get):
mock_get.return_value = mocked_responses(url='https://api.github.com/repos/edx/not_integrated/actions/workflows')
all_results = {module_dict_key: {}}
check_github_actions_integration(all_results, git_origin_url=f"github.com/edx/not_integrated.git")
assert all_results[module_dict_key] == False
assert all_results["org_name"] == 'edx'
|
openedx/edx-repo-health | tests/test_check_makefile.py | import os
import pytest
from repo_health.check_makefile import (
module_dict_key,
check_has_make_target,
output_keys,
)
def get_repo_path(repo_name):
tests_directory = os.path.dirname(__file__)
return f"{tests_directory}/{repo_name}"
@pytest.mark.parametrize("fake_repo, flag_list", [
("makefile_repo1",
{"upgrade": True,
"quality": True,
"test": True,
"test-js": False,
"test-python": False,
"quality-js": False,
"quality-python": False
}),
("makefile_repo2",
{"upgrade": True,
"quality": True,
"test": False,
"test-js": False,
"test-python": False,
"quality-js": True,
"quality-python": False
})])
def test_check_file_existence(fake_repo, flag_list):
repo_path = get_repo_path('fake_repos/'+ fake_repo)
all_results = {module_dict_key:{}}
file = open(repo_path+'/Makefile', 'r')
check_has_make_target(file.read(), all_results)
for key, desc in output_keys.items():
assert all_results[module_dict_key][key] == flag_list[key]
|
openedx/edx-repo-health | repo_health/check_existence.py | <filename>repo_health/check_existence.py
"""
Functions to check the existence of files.
"""
from pytest_repo_health import health_metadata
from .utils import dir_exists, file_exists
module_dict_key = "exists"
req_files = {
"openedx.yaml": "openedx.yaml contains repository metadata as outlined in OEP-2",
"Makefile": "Make targets",
"tox.ini": "Tox configuration",
".travis.yml": "Travis configuration",
"CHANGELOG.rst": "Change history",
"pylintrc": "Pylint configuration",
"setup.cfg": "Application setup configuration",
"setup.py": "Application setup",
".coveragerc": "Test coverage configuration",
".editorconfig": "IDE configuration",
".pii_annotations.yml": "PII annotations as outline in OEP-0030",
".gitignore": "git ignore configuration",
"package.json": "packages managed by npm",
"transifex_config": "transifex config file"
}
req_dirs = {
"requirements": "separate folder for requirement files",
}
req_paths = [
# Tuple is path-to-file, key-name, description.
(".github/workflows/commitlint.yml", "commitlint.yml", "GitHub Action to check conventional commits"),
]
@health_metadata(
[module_dict_key],
req_files
)
def check_file_existence(repo_path, all_results):
"""
Checks repository contains file which is not empty at root level
"""
for file_name, _ in req_files.items():
all_results[module_dict_key][file_name] = file_exists(
repo_path, file_name
)
@health_metadata(
[module_dict_key],
req_dirs
)
def check_dir_existence(repo_path, all_results):
"""
Checks whether repository contains required folders at root level
"""
for dir_name, _ in req_dirs.items():
all_results[module_dict_key][dir_name] = dir_exists(
repo_path, dir_name
)
@health_metadata(
[module_dict_key],
{key: desc for _, key, desc in req_paths},
)
def check_path_existence(repo_path, all_results):
"""
Checks whether the repo contains required files at deep levels.
"""
for file_path, key, _ in req_paths:
exists = file_exists(repo_path, file_path)
all_results[module_dict_key][key] = exists
@health_metadata(
[module_dict_key],
{"README": "Basic level of documentation in the form of README.rst or README.md"}
)
def check_readme_existence(repo_path, all_results):
"""
Check if README exists in repository.
"""
exists = any(file_exists(repo_path, file) for file in ['README.rst', 'README.md'])
all_results[module_dict_key]['README'] = exists
@health_metadata(
[module_dict_key],
{"transifex_config": "transifex config file"}
)
def check_transifex_config_existence(repo_path, all_results):
"""
Check if transifex config exists in repository.
"""
exists = file_exists(repo_path, '.tx/config')
all_results[module_dict_key]['transifex_config'] = exists
|
openedx/edx-repo-health | tests/test_check_docs.py | <gh_stars>1-10
import os
import pytest
import responses
import json
from repo_health.check_docs import check_build_bagde, check_python_version, module_dict_key, ReadTheDocsChecker
def get_repo_path(repo_name):
tests_directory = os.path.dirname(__file__)
return f"{tests_directory}/{repo_name}"
@pytest.mark.parametrize("fake_repo, expected_result", [
("read_the_docs", True),
("no_read_the_docs", False)])
def test_check_build_badge(fake_repo, expected_result):
repo_path = get_repo_path('fake_repos/'+ fake_repo)
all_results = {module_dict_key:{}}
file = open(repo_path+'/README.rst','r')
check_build_bagde(file.read(), all_results)
assert all_results[module_dict_key]["build_badge"] == expected_result
@pytest.mark.parametrize("fake_repo, expected_result", [
("read_the_docs",3.8),
("no_read_the_docs",None)])
def test_check_python_version(fake_repo, expected_result):
repo_path = get_repo_path('fake_repos/'+ fake_repo)
all_results = {module_dict_key:{}}
check_python_version(repo_path, all_results)
assert all_results[module_dict_key]["python_version"] == expected_result
@responses.activate
def test_check_readthedocs_build_success():
current_dir = os.path.dirname(__file__)
projects_data = open(os.path.join(current_dir, 'data/readthedocs_projects.json'), 'r')
responses.add(responses.GET, 'https://readthedocs.org/api/v3/projects/?limit=100',
json= json.load(projects_data))
builds_data = open(os.path.join(current_dir, 'data/readthedocs_builds_success.json'),'r')
responses.add(responses.GET, "https://readthedocs.org/api/v3/projects/testing-demo/builds/",
json= json.load(builds_data))
checker = ReadTheDocsChecker(git_origin_url="https://github.com/readthedocs/readthedocs.git", token='token')
checker.update_build_details()
all_results = {module_dict_key: {}}
all_results[module_dict_key]["build_details"] = json.dumps(checker.build_details)
assert all_results[module_dict_key]["build_details"] == '[{"project": "testing-demo", "last_build_status": "success", "last_build_time": "2021-06-11T12:31:31.357860Z", "last_good_build_time": "2021-06-11T12:31:31.357860Z"}]'
@responses.activate
def test_check_readthedocs_build_failure():
current_dir = os.path.dirname(__file__)
projects_data = open(os.path.join(current_dir, 'data/readthedocs_projects.json'), 'r')
responses.add(responses.GET, 'https://readthedocs.org/api/v3/projects/?limit=100',
json= json.load(projects_data))
builds_data = open(os.path.join(current_dir, 'data/readthedocs_builds_failure.json'),'r')
responses.add(responses.GET, "https://readthedocs.org/api/v3/projects/testing-demo/builds/",
json= json.load(builds_data))
checker = ReadTheDocsChecker(git_origin_url="https://github.com/readthedocs/readthedocs.git", token='token')
checker.update_build_details()
all_results = {module_dict_key: {}}
all_results[module_dict_key]["build_details"] = json.dumps(checker.build_details)
assert all_results[module_dict_key]["build_details"] == '[{"project": "testing-demo", "last_build_status": "failure", "last_build_time": "2021-06-11T12:31:31.357860Z", "last_good_build_time": "2021-03-12T20:36:20.239344Z"}]'
|
openedx/edx-repo-health | repo_health/__init__.py | <gh_stars>0
"""
This package contains checks for edx repo standards
"""
import codecs
import os
from configparser import ConfigParser
import glob
import pytest
import dockerfile
__version__ = "0.2.3"
GITHUB_URL_PATTERN = r"github.com[/:](?P<org_name>[^/]+)/(?P<repo_name>[^/]+).*#egg=(?P<package>[^\/]+).*"
PYPI_PACKAGE_PATTERN = r"(?P<package_name>[^\/]+)==(?P<version>[^\/]+)"
DJANGO_DEPS_SHEET_URL = "https://docs.google.com/spreadsheets/d/" \
"19-BzpcX3XvqlazHcLhn1ZifBMVNund15EwY3QQM390M/export?format=csv"
def parse_config_file(path):
"""
Get the parsed content of an INI-style config file (using ConfigParser).
Used for pytest fixtures.
"""
config = ConfigParser()
if os.path.exists(path):
config.read(path)
return config
def get_file_content(path):
"""
Get the content of the UTF-8 text file at the specified path.
Used for pytest fixtures.
"""
if not os.path.exists(path):
return ""
with codecs.open(path, "r", "utf-8") as f:
return f.read()
def get_file_lines(path):
"""
Get a list of the lines in the UTF-8 text file at the specified path.
Strips leading and trailing whitespace from each line.
Used for pytest fixtures.
"""
if not os.path.exists(path):
return []
with codecs.open(path, "r", "utf-8") as f:
return [line.strip() for line in f.readlines()]
def get_file_names(path, file_type):
"""
Get a list of files with given file_type in path's directory and its subdirectories
If the directory is large, this might take forever, so use with care
"""
path_pattern = path + "**/*." + file_type
files = glob.glob(path_pattern, recursive=True)
return files
@pytest.fixture(name='readme')
def fixture_readme(repo_path):
"""Fixture producing the text of the readme file."""
# These checks don't care what the readme is called, just that it has the
# right information in it. So try a bunch of possibilities.
for readme_name in ["README.rst", "README.md", "README.txt", "README"]:
try:
with open(os.path.join(repo_path, readme_name), encoding="utf-8") as freadme:
return freadme.read()
except FileNotFoundError:
continue
# There is no README at all, so nothing to check.
return None
def read_docker_file(path):
"""
Read the docker file using dockerfile package.
"""
if not os.path.exists(path):
return None
return dockerfile.parse_file(path)
|
openedx/edx-repo-health | tests/test_check_ownership.py | import csv
import os
from repo_health.check_ownership import check_ownership, MODULE_DICT_KEY
from unittest import mock, TestCase
def mocked_responses(*args, **kwargs):
current_dir = os.path.dirname(__file__)
with open(os.path.join(current_dir, 'data/ownership_data.csv'), 'r') as csv_file:
file_data = csv.reader(csv_file)
headers = next(file_data)
return [dict(zip(headers, i)) for i in file_data]
@mock.patch('repo_health.check_ownership.find_worksheet')
@mock.patch.dict(os.environ, {"REPO_HEALTH_GOOGLE_CREDS_FILE": "test", "REPO_HEALTH_OWNERSHIP_SPREADSHEET_URL": "test", "REPO_HEALTH_REPOS_WORKSHEET_ID": "23"})
def test_check_ownership(mock_get):
mock_get.return_value = mocked_responses()
all_results = {MODULE_DICT_KEY: {}}
check_ownership(all_results, git_origin_url=f"github.com/edx/ownership_repo1.git")
assert all_results[MODULE_DICT_KEY]['theme'] == 'openedx'
assert all_results[MODULE_DICT_KEY]['squad'] == 'arch'
assert all_results[MODULE_DICT_KEY]['priority'] == 'High'
|
openedx/edx-repo-health | tests/test_check_travis_yml.py | import os
import pytest
from repo_health.check_travis_yml import (
check_yaml_parsable,
check_has_tests_with_py38,
check_travis_python_versions,
fixture_parsed_data,
fixture_python_version,
fixture_travis_yaml,
module_dict_key
)
def get_repo_path(repo_name):
tests_directory = os.path.dirname(__file__)
return f"{tests_directory}/fake_repos/{repo_name}"
@pytest.mark.parametrize("repo_path, result", [
(get_repo_path('travis_repo1'), True),
(get_repo_path('travis_repo2'), True),
(get_repo_path('docs_repo'), False)
])
def test_check_yaml_parsable(travis_yml, result):
all_results = {module_dict_key: {}}
check_yaml_parsable(travis_yml, all_results)
assert all_results[module_dict_key]['parsable'] == result
@pytest.mark.parametrize("repo_path, result", [
(get_repo_path('travis_repo1'), True),
(get_repo_path('travis_repo2'), False),
(get_repo_path('docs_repo'), False),
])
def test_check_has_tests_with_py38(python_versions_in_travis, result):
all_results = {module_dict_key: {}}
check_has_tests_with_py38(python_versions_in_travis, all_results)
assert all_results[module_dict_key]['py38_tests'] == result
@pytest.mark.parametrize("repo_path, result_list", [
(get_repo_path('travis_repo1'), [3.6, 3.8]),
(get_repo_path('travis_repo2'), [3.5]),
(get_repo_path('docs_repo'), [])
])
def test_check_travis_python_versions(python_versions_in_travis, result_list):
all_results = {module_dict_key: {}}
check_travis_python_versions(python_versions_in_travis, all_results)
for field in all_results[module_dict_key]['python_versions']:
assert field in result_list
|
openedx/edx-repo-health | repo_health_dashboard/utils/utils.py | """
utils used to create dashboard
"""
import csv
import html
import os
def squash_dict(input_dict, delimiter="."):
"""
Takes very nested dict(metadata_by_repo inside of metadata_by_repo) and squashes it to only one level
For example:
for input: {'a':{'b':'1', 'c':{'d':'2'}, 'f':[1,2,3]}, 'e':2}
the output: {'a.f': [1, 2, 3], 'e': 2, 'a.b': '1', 'a.c.d': '2'}
"""
output = {}
for key, value in input_dict.items():
if isinstance(value, dict):
temp_output = squash_dict(value)
for key2, value2 in temp_output.items():
temp_key = key + delimiter + key2
output[temp_key] = value2
else:
output[key] = value
return output
def get_superset_of_keys(dicts):
"""
Iterates through all the input dicts and returns a superset of keys
"""
output_set = set()
for _, item in dicts.items():
output_set.update(item.keys())
return output_set
def standardize_metadata_by_repo(metadata_by_repo):
"""
Input: dict: squashed dict of one level(no dict nesting)
Parses through metadata_by_repo dict, finds all possible keys(superset) from
the metadata_by_repo and makes sure the same keys exist in each dict.
If a key is missing, it's added with a None value
TODO(jinder): standardize is not the right name,
there is a better word for: making all metadata_by_repo have the same keys
"""
superset_keys = get_superset_of_keys(metadata_by_repo)
defaults = {k: None for k in superset_keys}
output = {}
for dict_name, item in metadata_by_repo.items():
superset_output = defaults.copy()
superset_output.update(item)
output[dict_name] = superset_output
return output
def squash_and_standardize_metadata_by_repo(metadata_by_repo):
"""
Squashes all metadata_by_repo to only one level and makes sure each has the same keys
"""
for dict_name, item in metadata_by_repo.items():
metadata_by_repo[dict_name] = squash_dict(item)
return standardize_metadata_by_repo(metadata_by_repo)
def get_sheets(parsed_yaml_file, sheet_name):
"""
Parses configuration yaml file and makes sure each requested output configuration has
the right setting keys
Rest of the system expects the keys("check_order", "repo_name_order", and "key_aliases")
to exists in configuration dict
"""
sheet_configuration = {}
sheet_configuration.update(parsed_yaml_file[sheet_name])
sheet_configuration["check_order"] = parsed_yaml_file[sheet_name].get(
"check_order", []
)
sheet_configuration["repo_name_order"] = parsed_yaml_file[sheet_name].get(
"repo_name_order", []
)
sheet_configuration["key_aliases"] = parsed_yaml_file[sheet_name].get(
"key_aliases", {}
)
return sheet_configuration
def write_squashed_metadata_to_csv(metadata_by_repo, filename, configuration, append):
"""
Assume all the metadata_by_repo have the same keys
"""
superset_keys = get_superset_of_keys(metadata_by_repo)
for key in configuration["check_order"]:
superset_keys.discard(key)
if configuration.get("subset", False):
sorted_keys = configuration["check_order"]
else:
sorted_keys = configuration["check_order"] + list(sorted(superset_keys))
# change key names to its alias for display(csv header row)
sorted_aliased_keys = []
for key in sorted_keys:
if key in configuration["key_aliases"]:
sorted_aliased_keys.append(configuration["key_aliases"][key])
else:
sorted_aliased_keys.append(key)
mode = 'a' if append else 'w'
with open(filename + ".csv", mode, encoding="utf8") as csvfile:
writer = csv.writer(csvfile)
# In case of appending an empty file, we still need headers
if not append or os.path.getsize(filename + ".csv") == 0:
csv_header = ["repo_name"] + sorted_aliased_keys
writer.writerow(csv_header)
# TODO(jinder): order repos based on configuration["repo_name_order"]
for repo_name, item in metadata_by_repo.items():
writer.writerow(
[repo_name] + [item[k] if k in item else None for k in sorted_keys]
)
def write_squashed_metadata_to_html(metadata_by_repo=None, filename="dashboard.html"):
"""
Write HTML report of repo metadata (takes output of squash-and-standardize).
"""
if not metadata_by_repo:
metadata_by_repo = {}
sorted_key_tuples = sorted(list(get_superset_of_keys(metadata_by_repo)))
with open(filename + ".html", "w", encoding="utf8") as f:
f.write(
"""<!DOCTYPE html>
<html lang="en">
<head>
<title>Repo health dashboard</title>
<style>
table {
border-collapse: collapse;
border: 2px solid rgb(100, 100, 100);
font-family: sans-serif;
}
td, th {
border: 1px solid rgb(100, 100, 100);
padding: .5rem .5rem;
}
td {
text-align: center;
}
caption {
padding: .5rem;
caption-side: top;
text-align: start;
}
</style>
</head>
<body>\n"""
)
f.write("""<table>\n""")
f.write(
"<caption>Results of health checks for various repositories</caption>\n"
)
f.write("<thead>\n")
f.write(""" <tr>\n""")
f.write(""" <th scope="col">Repository</th>\n""")
for k in sorted_key_tuples:
f.write(f""" <th scope="col">{html.escape(k)}</th>\n""")
f.write(" </tr>\n")
f.write("</thead>\n")
f.write("<tbody>\n")
# TODO(timmc): Sort rows by repo name
for dict_name, item in metadata_by_repo.items():
f.write(" <tr>\n")
f.write(f""" <th scope="row">{html.escape(dict_name)}</th>\n""")
for k in sorted_key_tuples:
f.write(f""" <td><pre>{html.escape(str(item[k]))}</pre></td>\n""")
f.write(" </tr>\n")
f.write("</tbody>\n")
f.write("</table>\n")
f.write("</body></html>\n")
|
openedx/edx-repo-health | tests/test_check_github.py | #!/usr/bin/env python3
"""Testing functions in repo_health/check_github.py"""
from unittest.mock import Mock
from repo_health.check_github import (
check_settings,
MODULE_DICT_KEY,
repo_license_exemptions,
)
async def test_check_settings_license_exemption_present():
"""
Test to make sure having an exemption in repo_license_exemptions results in change of license in all_results dict
"""
all_results = {MODULE_DICT_KEY: {}}
github_repo_mock = Mock()
test_repo = "test_repo"
test_org = "test_org"
test_license = "test_license"
repo_license_exemptions[test_repo] = {
"license": test_license,
"owner": test_org,
"more_info": "Bah",
}
github_repo_mock.object.name = test_repo
github_repo_mock.object.owner.login = test_org
github_repo_mock.object.license = None
await check_settings(all_results, github_repo_mock)
# clean up changes made to repo_license_exemption
del repo_license_exemptions[test_repo]
assert "license" in all_results["github"]
assert all_results["github"]["license"] == test_license
async def test_check_settings_no_license_exemption_present():
"""
Test to make sure exemptions code does not make any changes when no exemption is present.
"""
all_results = {MODULE_DICT_KEY: {}}
github_repo_mock = Mock()
test_repo = "test_repo"
# make sure test_repo is not in repo_license_exemption,
# if it is, you might want to change name of test_repo
assert test_repo not in repo_license_exemptions
github_repo_mock.object.license = None
await check_settings(all_results, github_repo_mock)
assert "license" in all_results["github"]
assert all_results["github"]["license"] is None
|
openedx/edx-repo-health | tests/test_check_readme.py | <gh_stars>1-10
import re
from unittest.mock import patch
import requests
import responses
from repo_health.check_readme import check_readme_links, module_dict_key
@responses.activate
def test_one_good_one_bad():
# If you attempt to fetch a url which isn't registered, responses will raise a ConnectionError:
bad_url = "http://badurl.com/"
good_url = "http://goodurl.org"
# Any url at good_url will be OK.
good_match = re.compile(f"{good_url}(/.*)?")
responses.add(responses.HEAD, good_match, status=200)
test_readme = f"""
{bad_url} is the place to go for more info.
{good_url} is better.
End of a sentence is ok: {good_url}.
Commas are fine: {good_url}, {bad_url}.
In parens is ok: ({good_url}).
{good_url}/page?x=1&y=2 is also a URL,
and {good_url}/page/another/ will be fine.
"""
all_results = {module_dict_key: {}}
check_readme_links(test_readme, all_results)
assert good_url in all_results[module_dict_key]["good_links"]
bad_links = all_results[module_dict_key]["bad_links"]
print(bad_links)
assert len(bad_links) == 1
assert bad_links[0].startswith(f"{bad_url}: Connection refused by Responses")
|
openedx/edx-repo-health | tests/test_check_openedx_yaml.py | import os
import pytest
from repo_health.check_openedx_yaml import (
check_oeps,
check_obsolete_fields,
check_release_maybe,
check_release_ref,
check_yaml_parsable,
module_dict_key,
fixture_openedx_yaml,
fixture_oeps,
fixture_parsed_data,
output_keys,
obsolete_fields,
)
def get_repo_path(repo_name):
tests_directory = os.path.dirname(__file__)
return f"{tests_directory}/fake_repos/{repo_name}"
@pytest.mark.parametrize("repo_path, result", [
(get_repo_path('openedx_repo1'),'master'),
(get_repo_path('openedx_repo2'),''),
])
def test_check_release_ref(parsed_data, result):
all_results = {module_dict_key:{}}
check_release_ref(parsed_data, all_results)
assert all_results[module_dict_key]['release'] == result
@pytest.mark.parametrize("repo_path, result", [
(get_repo_path('openedx_repo1'),True),
(get_repo_path('openedx_repo2'),False)
])
def test_check_release_maybe(parsed_data, result):
all_results = {module_dict_key:{}}
check_release_maybe(parsed_data, all_results)
assert all_results[module_dict_key]['release-maybe'] == result
@pytest.mark.parametrize("repo_path, result", [
(get_repo_path('openedx_repo1'),True),
(get_repo_path('openedx_repo2'),True),
(get_repo_path('docs_repo'), False),
])
def test_check_yaml_parsable(openedx_yaml, result):
all_results = {module_dict_key:{}}
check_yaml_parsable(openedx_yaml, all_results)
assert all_results[module_dict_key]['parsable'] == result
@pytest.mark.parametrize("repo_path, result_list", [
(get_repo_path('openedx_repo1'),{
"oep-2":True,
"oep-7":False,
"oep-18":True,
"oep-30":False
}),
(get_repo_path('openedx_repo2'),{
"oep-2":True,
"oep-7":True,
"oep-18":False,
"oep-30":False
})
])
def test_check_oeps(oeps, result_list):
all_results = {module_dict_key:{}}
check_oeps(oeps, all_results)
for key, desc in output_keys.items():
assert all_results[module_dict_key][key] == result_list[key]
@pytest.mark.parametrize("repo_path, result_list", [
(get_repo_path('openedx_repo1'),['nick']),
(get_repo_path('openedx_repo2'),['nick', 'supporting-teams'])
])
def test_check_obsolete_fields(parsed_data, result_list):
all_results = {module_dict_key:{}}
check_obsolete_fields(parsed_data, all_results)
for field in all_results[module_dict_key]['obsolete_fields'].split():
assert field in result_list
|
openedx/edx-repo-health | tests/test_ubuntu_dependencies.py | <filename>tests/test_ubuntu_dependencies.py
import os
from repo_health.check_ubuntufiles import get_apt_get_txt, get_docker_file_content
def get_repo_path(repo_name):
tests_directory = os.path.dirname(__file__)
return f"{tests_directory}/{repo_name}"
def test_loading_docker_file_check():
repo_path = get_repo_path('fake_repos/python_repo')
dependencies = get_docker_file_content(repo_path)
assert 'libssl-dev' in dependencies
for data in ['git-core', 'language-pack-en', 'python3.8', 'python3-pip', 'libssl-dev']:
assert data in dependencies
def test_no_docker_file_check():
repo_path = get_repo_path('fake_repos/docs_repo')
assert get_docker_file_content(repo_path) is None
def test_empty_docker_file_check():
repo_path = get_repo_path('fake_repos/kodejail')
assert get_docker_file_content(repo_path) is None
def test_docker_different_format():
repo_path = get_repo_path('fake_repos/python_js_repo')
dependencies = get_docker_file_content(repo_path)
assert 'curl' in dependencies
def test_loading_apt_packages_txt():
repo_path = get_repo_path('fake_repos/python_repo')
dependencies = get_apt_get_txt(repo_path)
assert 'curl' in dependencies
def test_loading_apt_packages_txt_root_path():
repo_path = get_repo_path('fake_repos/python_js_repo')
dependencies = get_apt_get_txt(repo_path)
assert 'python-numpy' in dependencies
def test_not_available_apt_packages_txt():
repo_path = get_repo_path('fake_repos/js_repo')
dependencies = get_apt_get_txt(repo_path)
assert dependencies == []
|
openedx/edx-repo-health | tests/test_check_dependencies.py | import os
from repo_health.check_dependencies import get_dependencies
def get_repo_path(repo_name):
tests_directory = os.path.dirname(__file__)
return f"{tests_directory}/{repo_name}"
def test_python_js_repo_dependency_check():
repo_path = get_repo_path('fake_repos/python_js_repo')
dependencies = get_dependencies(repo_path)
assert 'fs==2.0.18' in dependencies["pypi_all"]["list"]
assert "react-redux" in dependencies["js"]["list"]
assert dependencies["count"] == 350
assert dependencies["pypi_all"]["count"] == 299
assert dependencies["github"]["count"] == 14
assert dependencies["js.all"]["count"] == 10
assert dependencies["js"]["count"] == 26
assert dependencies["pypi"]["count"] == 225
def test_js_repo_dependency_check():
repo_path = get_repo_path('fake_repos/js_repo')
dependencies = get_dependencies(repo_path)
assert 'core-js' in dependencies["js"]["list"]
assert 'jest' in dependencies["js.dev"]["list"]
assert 'babel' in dependencies["js.all"]["list"]
assert dependencies["count"] == 37
assert dependencies["js"]["count"] == 26
assert dependencies["js.dev"]["count"] == 11
assert dependencies["pypi_all"]["count"] == 0
assert dependencies["js.all"]["count"] == 12
assert dependencies["pypi"]["count"] == 0
def test_python_repo_dependency_check():
repo_path = get_repo_path('fake_repos/python_repo')
dependencies = get_dependencies(repo_path)
assert 'django==2.2.24' in dependencies["pypi_all"]["list"]
assert 'git+https://github.com/edx/credentials-themes.git@0.1.62#egg=edx_credentials_themes==0.1.62' \
in dependencies["github"]["list"]
assert dependencies["pypi_all"]["count"] == 65
assert dependencies["github"]["count"] == 1
assert dependencies["js"]["count"] == 0
assert dependencies["pypi"]["count"] == 8
|
openedx/edx-repo-health | repo_health/check_npm_package.py | """
Checks package published name on npm.
"""
import json
import os
import pytest
from pytest_repo_health import health_metadata
from repo_health import get_file_content
module_dict_key = "npm_package"
def get_dependencies(repo_path):
"""
entry point to read parse and read dependencies
@param repo_path:
@return: json data.
"""
full_path = os.path.join(repo_path, "package.json")
content = get_file_content(full_path)
if content:
return json.loads(get_file_content(full_path))
return {}
@pytest.fixture(name='content')
def fixture_npm_package(repo_path):
"""Fixture containing the text content of package.json"""
return get_dependencies(repo_path)
@health_metadata(
[module_dict_key],
{
"npm_package": "package name published on npm."
})
def check_npm_package(content, all_results):
"""
verify pattern on npm site where package has prefix @edx/.
"""
if 'name' in content:
all_results[module_dict_key] = content['name'] if '@edx/' in content['name'] else ''
else:
all_results[module_dict_key] = ''
|
openedx/edx-repo-health | tests/test_npm_package_name.py | import os
from repo_health.check_npm_package import get_dependencies, check_npm_package, module_dict_key
def get_repo_path(repo_name):
tests_directory = os.path.dirname(__file__)
return f"{tests_directory}/{repo_name}"
def test_valid_npm_package_name():
repo_path = get_repo_path('fake_repos/js_repo')
dependencies = get_dependencies(repo_path)
all_results = {module_dict_key: {}}
check_npm_package(dependencies, all_results)
assert all_results["npm_package"] == '@edx/fakename@1.1.0'
def test_invalid_npm_package_name():
repo_path = get_repo_path('fake_repos/python_js_repo')
dependencies = get_dependencies(repo_path)
all_results = {module_dict_key: {}}
check_npm_package(dependencies, all_results)
assert all_results["npm_package"] == ''
def test_no_package_name():
repo_path = get_repo_path('fake_repos/just_setup_py')
dependencies = get_dependencies(repo_path)
all_results = {module_dict_key: {}}
check_npm_package(dependencies, all_results)
assert all_results["npm_package"] == ''
|
openedx/edx-repo-health | repo_health/check_dependencies.py | """
contains check that reads/parses dependencies of a repo
"""
import copy
import json
import logging
import os
import re
from abc import ABC, abstractmethod
from pathlib import Path
from pytest_repo_health import health_metadata
from repo_health import get_file_lines, get_file_content
logger = logging.getLogger(__name__)
module_dict_key = "dependencies"
default_output = {
"count": 0,
"github": {
"count": 0,
"list": ""
},
"pypi_all": {
"count": 0,
"list": ""
},
"pypi": {
"count": 0,
"list": ""
},
"js": {
"count": 0,
"list": "",
},
"js.dev": {
"count": 0,
"list": ""
},
"js.all": {
"count": 0,
"list": ""
}
}
class DependencyReader(ABC):
"""
class containing the read/parse logic for dependencies
"""
def __init__(self, repo_path):
self._repo_path = repo_path
@abstractmethod
def read(self) -> dict:
"""
entry point method of the class
"""
raise NotImplementedError
class JavascriptDependencyReader(DependencyReader):
"""
Javascript dependency reader class
"""
def __init__(self, repo_path):
super().__init__(repo_path)
self.js_dependencies = None
self.js_dev_dependencies = None
self.js_dependencies_all = {}
self.js_dependencies_count = 0
self.js_dev_dependencies_count = 0
def _is_js_repo(self) -> bool:
return os.path.exists(os.path.join(self._repo_path, "package.json"))
def _read_dependencies(self) -> dict:
"""
method processing javascript dependencies file
"""
package_json_content = open( # pylint: disable=consider-using-with
os.path.join(self._repo_path, "package.json"), 'r', encoding="utf8"
).read()
package_json_data = json.loads(package_json_content)
self.js_dependencies = package_json_data.get('dependencies', {})
self.js_dev_dependencies = package_json_data.get('devDependencies', {})
self.js_dependencies_count = len(self.js_dependencies)
self.js_dev_dependencies_count = len(self.js_dev_dependencies)
package_lock_content = get_file_content(os.path.join(self._repo_path, "package-lock.json"))
if package_lock_content:
package_lock_data = json.loads(package_lock_content)
for dependency, details in package_lock_data.get('dependencies', {}).items():
self.js_dependencies_all[dependency] = details["version"]
return {
"count": self.js_dependencies_count + self.js_dev_dependencies_count,
"js": {
"count": self.js_dependencies_count,
"list": json.dumps(self.js_dependencies),
},
"js.dev": {
"count": self.js_dev_dependencies_count,
"list": json.dumps(self.js_dev_dependencies)
},
"js.all": {
"count": len(self.js_dependencies_all),
"list": json.dumps(self.js_dependencies_all)
}
}
def read(self) -> dict:
if not self._is_js_repo():
return {}
return self._read_dependencies()
class PythonDependencyReader(DependencyReader):
"""
Python dependency reader class
"""
def __init__(self, repo_path):
super().__init__(repo_path)
self.github_dependencies = None
self.pypi_dependencies = None
self.production_packages = None
def _is_python_repo(self) -> bool:
return os.path.exists(os.path.join(self._repo_path, "requirements"))
def _read_dependencies(self) -> dict:
"""
method processing python requirements files
"""
pypi_packages = []
github_packages = []
production_packages = []
files = [str(file) for file in Path(os.path.join(self._repo_path, "requirements")).rglob('*.txt')]
constraints_files = ("constraints.txt", "pins.txt",)
requirement_files = [file for file in files if not file.endswith(constraints_files)]
for file_path in requirement_files:
lines = get_file_lines(file_path)
stripped_lines = [re.sub(r' +#.*', "", line).replace('-e ', "")
for line in lines if line and not line.startswith("#")]
github_packages.extend([line for line in stripped_lines if re.match(r'^git\+.*', line)])
pypi_packages.extend([line for line in stripped_lines if line not in github_packages and "==" in line])
self.github_dependencies = list(set(github_packages))
self.pypi_dependencies = list(set(pypi_packages))
# services have production.txt and base.txt but packages have only base.txt
# so if both appeared only pick production.
# few packages have development.txt or dev.txt also.
priority_list = ["production.txt", "base.txt", "development.txt", "dev.txt"]
for file_name in priority_list:
requirement_files = [file for file in files if file.endswith(file_name)]
if requirement_files:
break
if not requirement_files:
logger.error("No production.txt or base.txt files found for this repo %s", self._repo_path)
for file_path in requirement_files:
lines = get_file_lines(file_path)
stripped_lines = self.cleanup_lines(lines)
production_packages.extend(stripped_lines["pypi"])
self.production_packages = list(set(production_packages))
return {
"github": {
"count": len(set(self.github_dependencies)),
"list": json.dumps(github_packages),
},
"pypi_all": {
"count": len(set(self.pypi_dependencies)),
"list": json.dumps(self.pypi_dependencies),
},
"pypi": {
"count": len(set(self.production_packages)),
"list": json.dumps(self.production_packages),
},
"count": len(set(self.pypi_dependencies)) + len(set(self.github_dependencies))
}
def cleanup_lines(self, lines):
"""
remove un-necessary strings from lines.
@return: dependencies_output
"""
stripped_lines = [
re.sub(r' +#.*', "", line).replace('-e ', "")
for line in lines if line and not line.startswith("#")
]
github_packages = [line for line in stripped_lines if re.match(r'^git\+.*', line)]
return {
'github': github_packages,
'pypi': [line for line in stripped_lines if line not in github_packages and "==" in line]
}
def read(self) -> dict:
if not self._is_python_repo():
return {}
return self._read_dependencies()
def get_dependencies(repo_path) -> dict:
"""
entry point to read parse and read dependencies
@param repo_path:
@return: dependencies_output
"""
dependencies_count = 0
dependencies_output = copy.deepcopy(default_output)
for reader in DependencyReader.__subclasses__():
reader_instance = reader(repo_path)
result = reader_instance.read()
if not result:
continue
dependencies_count += result.get('count', 0)
dependencies_output.update(result)
dependencies_output.update({"count": dependencies_count})
return dependencies_output
@health_metadata(
[module_dict_key],
{
"count": "count of total dependencies",
"pypi_all.count": "count of PyPI packages",
"pypi_all.list": "list of PyPI packages with required versions of all files",
"pypi.count": "count of PyPI packages only production files.",
"pypi.list": "list of PyPI packages with required versions only production files.",
"github.count": "count of GitHub packages",
"github.list": "list of GitHub packages",
"js.count": "count of javascript dependencies",
"js.list": "list of javascript dependencies",
"js.dev": "list of javascript development dependencies"
},
)
def check_dependencies(repo_path, all_results):
"""
Test to find the dependencies of the repo
"""
all_results[module_dict_key] = get_dependencies(repo_path)
|
openedx/edx-repo-health | tests/test_check_ubuntufiles.py | import os
import re
import pytest
from repo_health.check_ubuntufiles import PlaybookAPTPackagesReader, VARIABLE_PATTERN
def get_repo_path(repo_name):
tests_directory = os.path.dirname(__file__)
return f"{tests_directory}/{repo_name}"
def test_playbooks_apt_reader():
repo_path = get_repo_path('fake_repos/configuration')
reader = PlaybookAPTPackagesReader(repo_path)
reader.update_packages_from_playbooks()
config_yaml_data = reader.packages_from_playbooks
assert 'apparmor-utils' in config_yaml_data["edxapp"]
assert 'python3.8-distutils' in config_yaml_data["edxapp"]
assert 'curl' in config_yaml_data["edxapp"]
assert len(config_yaml_data["edxapp"]) == 13
assert 'mongodb-org-shell=4.0.22' in config_yaml_data["mongo_4_0"]
assert 'mongodb-org=4.0.22' in config_yaml_data["mongo_4_0"]
assert 'jq' in config_yaml_data["mongo_4_0"]
assert len(config_yaml_data["mongo_4_0"]) == 6
@pytest.mark.parametrize("variable, expected", [
("{{ debian_pkgs }}", True),
("{{ debian_pkgs + focal_only_pkgs }}", True),
("{{','.join(openstack_debian_pkgs)}}", True),
("debian_pkgs", False),
("{{ debian_pkgs", False),
("debian_pkgs }}", False),
])
def test_variable_pattern_regex(variable, expected):
match = re.match(VARIABLE_PATTERN, variable)
assert bool(match) is expected
@pytest.mark.parametrize("variable, expected", [
("{{ debian_pkgs }}", ["debian_pkgs", None]),
("{{ debian_pkgs + focal_only_pkgs }}", ["debian_pkgs", "focal_only_pkgs"]),
("{{','.join(openstack_debian_pkgs)}}", ["openstack_debian_pkgs", None]),
("debian_pkgs", [None, None]),
("{{ debian_pkgs", [None, None]),
])
def test_variable_pattern_regex_groups(variable, expected):
match = re.match(VARIABLE_PATTERN, variable)
variables = [match['var_name'] if match else None, match['var2_name'] if match else None]
assert variables == expected
|
openedx/edx-repo-health | tests/test_check_tox_ini.py | import os
import pytest
from repo_health.check_tox_ini import (
module_dict_key,
check_has_sections,
check_whitelist_externals,
fixture_tox_ini,
)
def get_repo_path(repo_name):
tests_directory = os.path.dirname(__file__)
return f"{tests_directory}/fake_repos/{repo_name}"
@pytest.mark.parametrize("repo_path, result", [
(get_repo_path('tox_repo1'),True),
(get_repo_path('tox_repo2'),False)
])
def test_check_whitelist_externals(tox_ini, result):
all_results = {module_dict_key:{}}
check_whitelist_externals(tox_ini, all_results)
assert all_results[module_dict_key]['uses_whitelist_externals'] == result
@pytest.mark.parametrize("repo_path, result_list", [
(get_repo_path('tox_repo1'),{
"tox":True,
"testenv":True,
"testenv:quality":False,
}),
(get_repo_path('tox_repo2'),{
"tox":True,
"testenv":True,
"testenv:quality":True,
})
])
def test_check_has_sections(tox_ini, result_list):
all_results = {module_dict_key:{}}
check_has_sections(tox_ini, all_results)
for key, value in result_list.items():
assert all_results[module_dict_key]['has_section'][key] == value
|
openedx/edx-repo-health | setup.py | <reponame>openedx/edx-repo-health
#!/usr/bin/env python
"""
Package metadata for repo_health.
"""
import os
import re
import sys
from setuptools import setup
def get_version(file_path):
"""
Extract the version string from the file at the given relative path fragments.
"""
filename = os.path.join(os.path.dirname(__file__), file_path)
version_file = open(filename, encoding="utf8").read() # pylint: disable=consider-using-with
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
def load_requirements(*requirements_paths):
"""
Load all requirements from the specified requirements files.
Requirements will include any constraints from files specified
with -c in the requirements files.
Returns a list of requirement strings.
"""
# UPDATED VIA SEMGREP - if you need to remove/modify this method remove this line and add a comment specifying why.
# Minor change to add encodings in calls to open() to appease pylint
requirements = {}
constraint_files = set()
# groups "my-package-name<=x.y.z,..." into ("my-package-name", "<=x.y.z,...")
requirement_line_regex = re.compile(r"([a-zA-Z0-9-_.]+)([<>=][^#\s]+)?")
def add_version_constraint_or_raise(current_line, current_requirements, add_if_not_present):
regex_match = requirement_line_regex.match(current_line)
if regex_match:
package = regex_match.group(1)
version_constraints = regex_match.group(2)
existing_version_constraints = current_requirements.get(package, None)
# it's fine to add constraints to an unconstrained package, but raise an error if there are already
# constraints in place
if existing_version_constraints and existing_version_constraints != version_constraints:
raise BaseException(f'Multiple constraint definitions found for {package}:'
f' "{existing_version_constraints}" and "{version_constraints}".'
f'Combine constraints into one location with {package}'
f'{existing_version_constraints},{version_constraints}.')
if add_if_not_present or package in current_requirements:
current_requirements[package] = version_constraints
# process .in files and store the path to any constraint files that are pulled in
for path in requirements_paths:
with open(path, encoding="utf8") as reqs:
for line in reqs:
if is_requirement(line):
add_version_constraint_or_raise(line, requirements, True)
if line and line.startswith('-c') and not line.startswith('-c http'):
constraint_files.add(os.path.dirname(path) + '/' + line.split('#')[0].replace('-c', '').strip())
# process constraint files and add any new constraints found to existing requirements
for constraint_file in constraint_files:
with open(constraint_file, encoding="utf8") as reader:
for line in reader:
if is_requirement(line):
add_version_constraint_or_raise(line, requirements, False)
# process back into list of pkg><=constraints strings
constrained_requirements = [f'{pkg}{version or ""}' for (pkg, version) in sorted(requirements.items())]
return constrained_requirements
def is_requirement(line):
"""
Return True if the requirement line is a package requirement.
Returns:
bool: True if the line is not blank, a comment,
a URL, or an included file
"""
# UPDATED VIA SEMGREP - if you need to remove/modify this method remove this line and add a comment specifying why
return line and line.strip() and not line.startswith(('-r', '#', '-e', 'git+', '-c'))
VERSION = get_version("repo_health/__init__.py")
if sys.argv[-1] == "tag":
print("Tagging the version on github:")
os.system("git tag -a %s -m 'version %s'" % (VERSION, VERSION)) # pylint: disable=consider-using-f-string
os.system("git push --tags")
sys.exit()
README = open(os.path.join(os.path.dirname(__file__), "README.rst"), encoding="utf8").read() # pylint: disable=consider-using-with
setup(
name="edx-repo-health",
version=VERSION,
description="""blah blah blah""",
long_description=README,
author="edX",
author_email="<EMAIL>",
url="https://github.com/edx/edx-repo-health",
include_package_data=True,
install_requires=load_requirements("requirements/base.in"),
packages=["repo_health_dashboard", "repo_health_dashboard.utils"],
python_requires=">=3.8",
license="Apache Software License 2.0",
zip_safe=False,
keywords="Django edx",
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Natural Language :: English",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.8",
],
entry_points={
"console_scripts": [
"repo_health_dashboard = repo_health_dashboard.repo_health_dashboard:main",
"run_checks = scripts.run_checks:main",
]
},
)
|
openedx/edx-repo-health | tests/test_check_existence.py | import os
import pytest
from repo_health.check_existence import (
check_readme_existence,
check_dir_existence,
check_file_existence,
check_path_existence,
req_dirs,
req_files,
req_paths,
module_dict_key,
check_transifex_config_existence
)
def get_repo_path(repo_name):
tests_directory = os.path.dirname(__file__)
return f"{tests_directory}/{repo_name}"
@pytest.mark.parametrize("fake_repo, flag_list", [
("kodegail", {
"openedx.yaml": False,
"Makefile": False,
"tox.ini": False,
".travis.yml": False,
"README.rst": False,
"CHANGELOG.rst": False,
"pylintrc": False,
"setup.cfg": True,
"setup.py": True,
".coveragerc": False,
".editorconfig": False,
".pii_annotations.yml": False,
".gitignore": False,
"package.json": False,
"transifex_config": False,
}),
("python_js_repo", {
"openedx.yaml": False,
"Makefile": False,
"tox.ini": False,
".travis.yml": False,
"README.rst": False,
"CHANGELOG.rst": False,
"pylintrc": False,
"setup.cfg": False,
"setup.py": False,
".coveragerc": False,
".editorconfig": False,
".pii_annotations.yml": False,
".gitignore": False,
"package.json": True,
"transifex_config": False,
}),
("just_setup_py", {
"openedx.yaml": False,
"Makefile": False,
"tox.ini": False,
".travis.yml": False,
"README.rst": False,
"CHANGELOG.rst": False,
"pylintrc": False,
"setup.cfg": False,
"setup.py": True,
".coveragerc": False,
".editorconfig": False,
".pii_annotations.yml": False,
".gitignore": False,
"package.json": False,
"transifex_config": False,
})
])
def test_check_file_existence(fake_repo, flag_list):
repo_path = get_repo_path(f'fake_repos/{fake_repo}')
all_results = {module_dict_key: {}}
check_file_existence(repo_path, all_results)
for file in req_files.keys():
assert all_results[module_dict_key][file] == flag_list[file]
@pytest.mark.parametrize("fake_repo, flag_list", [
("kodegail", {"requirements": False}),
("python_js_repo", {"requirements": True}),
])
def test_check_dir_existence(fake_repo, flag_list):
repo_path = get_repo_path(f'fake_repos/{fake_repo}')
all_results = {module_dict_key: {}}
check_dir_existence(repo_path, all_results)
for dir in req_dirs.keys():
assert all_results[module_dict_key][dir] == flag_list[dir]
@pytest.mark.parametrize("fake_repo, flag_list", [
("just_setup_cfg", {
"commitlint.yml": False,
}),
("python_repo", {
"commitlint.yml": True,
}),
])
def test_check_path_existence(fake_repo, flag_list):
repo_path = get_repo_path(f'fake_repos/{fake_repo}')
all_results = {module_dict_key: {}}
check_path_existence(repo_path, all_results)
for _, key, _ in req_paths:
assert all_results[module_dict_key][key] == flag_list[key]
@pytest.mark.parametrize("fake_repo, flag", [
("docs_repo", True),
("js_repo", True),
("just_setup_cfg", False),
])
def test_readme_existence(fake_repo, flag):
repo_path = get_repo_path(f'fake_repos/{fake_repo}')
all_results = {module_dict_key: {}}
check_readme_existence(repo_path, all_results)
assert all_results[module_dict_key]['README'] == flag
@pytest.mark.parametrize("fake_repo, flag", [
("python_repo", True),
])
def test_transifex_config_existence(fake_repo, flag):
repo_path = get_repo_path(f'fake_repos/{fake_repo}')
all_results = {module_dict_key: {}}
check_transifex_config_existence(repo_path, all_results)
assert all_results[module_dict_key]['transifex_config'] == flag
|
openedx/edx-repo-health | tests/test_check_requirements.py | import os
import pytest
import pdb
from repo_health.check_requirements import (
fixture_req_lines,
module_dict_key,
check_requires,
)
def get_repo_path(repo_name):
tests_directory = os.path.dirname(__file__)
return f"{tests_directory}/fake_repos/{repo_name}"
@pytest.mark.parametrize("repo_path, flag_list", [
(get_repo_path("django_pytest_requirement"),
{"django": True, "pytest": True,
"boto": False, "nose": False,}),
(get_repo_path("django_boto_nose_requirement"),
{"django": True, "pytest": False,
"boto": True, "nose": True, })])
def test_check_requires(req_lines, flag_list):
all_results = {module_dict_key:{}}
check_requires(req_lines, all_results)
requirements = ['django', 'pytest', 'nose', 'boto']
for req in requirements:
assert all_results[module_dict_key][req] == flag_list[req]
|
openedx/edx-repo-health | tests/test_check_django_deps.py | <gh_stars>1-10
import os
import pytest
from repo_health.check_django_dependencies_compatibility import (
DjangoDependencyReader,
MODULE_DICT_KEY,
check_django_dependencies_status,
)
TEST_CSV_PATH = os.path.join(os.path.dirname(__file__), 'data/mock_django_dependencies_sheet.csv')
def get_repo_path(repo_name):
tests_directory = os.path.dirname(__file__)
return f"{tests_directory}/fake_repos/{repo_name}"
@pytest.mark.parametrize("repo_path", [get_repo_path("python_repo")])
def test_django_deps_upgrade(repo_path):
all_results = {MODULE_DICT_KEY: {}}
check_django_dependencies_status(repo_path, all_results, TEST_CSV_PATH)
assert all_results[MODULE_DICT_KEY]
assert all_results[MODULE_DICT_KEY]['total']['count'] == 3
assert all_results[MODULE_DICT_KEY]['django_32']['count'] == 2
assert all_results[MODULE_DICT_KEY]['upgraded']['count'] == 1
assert 'django-waffle' in all_results[MODULE_DICT_KEY]['total']['list']
assert 'django-waffle' not in all_results[MODULE_DICT_KEY]['django_32']['list']
assert 'edx-django-utils' in all_results[MODULE_DICT_KEY]['django_32']['list']
assert 'edx-django-utils' not in all_results[MODULE_DICT_KEY]['upgraded']['list']
assert 'djangorestframework' in all_results[MODULE_DICT_KEY]['upgraded']['list']
@pytest.mark.parametrize("repo_path", [get_repo_path("js_repo")])
def test_django_deps_upgrade_non_django_repo(repo_path):
all_results = {MODULE_DICT_KEY: {}}
check_django_dependencies_status(repo_path, all_results, TEST_CSV_PATH)
assert all_results[MODULE_DICT_KEY]
assert all_results[MODULE_DICT_KEY]['total']['count'] == 0
assert all_results[MODULE_DICT_KEY]['django_32']['count'] == 0
assert all_results[MODULE_DICT_KEY]['upgraded']['count'] == 0
def test_strip_requirement_line():
reader = DjangoDependencyReader('')
constraint = "-c constraints.txt"
req_file = "-r base.txt"
real_package = "package-name==1.0"
assert not reader.strip_requirement(constraint)
assert not reader.strip_requirement(req_file)
assert reader.strip_requirement(real_package) == "package-name==1.0"
def test_extract_from_github():
reader = DjangoDependencyReader('')
github_req = "git+https://github.com/jazkarta/edx-jsme.git@690dbf75441fa91c7c4899df0b83d77f7deb5458#egg=edx-jsme"
assert reader.extract_from_github_link(github_req) == ("edx-jsme", "")
def test_extract_pypi():
reader = DjangoDependencyReader('')
pypi_dep = "edx-django-release-util==1.0.0"
assert reader.extract_from_pypi_package(pypi_dep) == ("edx-django-release-util", "1.0.0")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.