id int64 0 458k | file_name stringlengths 4 119 | file_path stringlengths 14 227 | content stringlengths 24 9.96M | size int64 24 9.96M | language stringclasses 1 value | extension stringclasses 14 values | total_lines int64 1 219k | avg_line_length float64 2.52 4.63M | max_line_length int64 5 9.91M | alphanum_fraction float64 0 1 | repo_name stringlengths 7 101 | repo_stars int64 100 139k | repo_forks int64 0 26.4k | repo_open_issues int64 0 2.27k | repo_license stringclasses 12 values | repo_extraction_date stringclasses 433 values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
30,100 | sineModel.py | MTG_sms-tools/smstools/models/sineModel.py | # functions that implement analysis and synthesis of sounds using the Sinusoidal Model
# (for example usage check the examples in interface)
import math
import numpy as np
from scipy.fft import fftshift, ifft
from scipy.signal.windows import blackmanharris, triang
from smstools.models import dftModel as DFT
from smstools.models import utilFunctions as UF
def sineTracking(pfreq, pmag, pphase, tfreq, freqDevOffset=20, freqDevSlope=0.01):
"""
Tracking sinusoids from one frame to the next
pfreq, pmag, pphase: frequencies and magnitude of current frame
tfreq: frequencies of incoming tracks from previous frame
freqDevOffset: maximum frequency deviation at 0Hz
freqDevSlope: slope increase of maximum frequency deviation
returns tfreqn, tmagn, tphasen: frequency, magnitude and phase of tracks
"""
tfreqn = np.zeros(tfreq.size) # initialize array for output frequencies
tmagn = np.zeros(tfreq.size) # initialize array for output magnitudes
tphasen = np.zeros(tfreq.size) # initialize array for output phases
pindexes = np.array(np.nonzero(pfreq), dtype=int)[0] # indexes of current peaks
incomingTracks = np.array(np.nonzero(tfreq), dtype=int)[
0
] # indexes of incoming tracks
newTracks = np.zeros(tfreq.size, dtype=int) - 1 # initialize to -1 new tracks
magOrder = np.argsort(-pmag[pindexes]) # order current peaks by magnitude
pfreqt = np.copy(pfreq) # copy current peaks to temporary array
pmagt = np.copy(pmag) # copy current peaks to temporary array
pphaset = np.copy(pphase) # copy current peaks to temporary array
# continue incoming tracks
if incomingTracks.size > 0: # if incoming tracks exist
for i in magOrder: # iterate over current peaks
if incomingTracks.size == 0: # break when no more incoming tracks
break
track = np.argmin(
abs(pfreqt[i] - tfreq[incomingTracks])
) # closest incoming track to peak
freqDistance = abs(
pfreq[i] - tfreq[incomingTracks[track]]
) # measure freq distance
if freqDistance < (
freqDevOffset + freqDevSlope * pfreq[i]
): # choose track if distance is small
newTracks[incomingTracks[track]] = i # assign peak index to track index
incomingTracks = np.delete(
incomingTracks, track
) # delete index of track in incomming tracks
indext = np.array(np.nonzero(newTracks != -1), dtype=int)[
0
] # indexes of assigned tracks
if indext.size > 0:
indexp = newTracks[indext] # indexes of assigned peaks
tfreqn[indext] = pfreqt[indexp] # output freq tracks
tmagn[indext] = pmagt[indexp] # output mag tracks
tphasen[indext] = pphaset[indexp] # output phase tracks
pfreqt = np.delete(pfreqt, indexp) # delete used peaks
pmagt = np.delete(pmagt, indexp) # delete used peaks
pphaset = np.delete(pphaset, indexp) # delete used peaks
# create new tracks from non used peaks
emptyt = np.array(np.nonzero(tfreq == 0), dtype=int)[
0
] # indexes of empty incoming tracks
peaksleft = np.argsort(-pmagt) # sort left peaks by magnitude
if (peaksleft.size > 0) & (emptyt.size >= peaksleft.size): # fill empty tracks
tfreqn[emptyt[: peaksleft.size]] = pfreqt[peaksleft]
tmagn[emptyt[: peaksleft.size]] = pmagt[peaksleft]
tphasen[emptyt[: peaksleft.size]] = pphaset[peaksleft]
elif (peaksleft.size > 0) & (
emptyt.size < peaksleft.size
): # add more tracks if necessary
tfreqn[emptyt] = pfreqt[peaksleft[: emptyt.size]]
tmagn[emptyt] = pmagt[peaksleft[: emptyt.size]]
tphasen[emptyt] = pphaset[peaksleft[: emptyt.size]]
tfreqn = np.append(tfreqn, pfreqt[peaksleft[emptyt.size :]])
tmagn = np.append(tmagn, pmagt[peaksleft[emptyt.size :]])
tphasen = np.append(tphasen, pphaset[peaksleft[emptyt.size :]])
return tfreqn, tmagn, tphasen
def cleaningSineTracks(tfreq, minTrackLength=3):
"""
Delete short fragments of a collection of sinusoidal tracks
tfreq: frequency of tracks
minTrackLength: minimum duration of tracks in number of frames
returns tfreqn: output frequency of tracks
"""
if tfreq.shape[1] == 0: # if no tracks return input
return tfreq
nFrames = tfreq[:, 0].size # number of frames
nTracks = tfreq[0, :].size # number of tracks in a frame
for t in range(nTracks): # iterate over all tracks
trackFreqs = tfreq[:, t] # frequencies of one track
trackBegs = (
np.nonzero(
(trackFreqs[: nFrames - 1] <= 0) # begining of track contours
& (trackFreqs[1:] > 0)
)[0]
+ 1
)
if trackFreqs[0] > 0:
trackBegs = np.insert(trackBegs, 0, 0)
trackEnds = (
np.nonzero(
(trackFreqs[: nFrames - 1] > 0) # end of track contours
& (trackFreqs[1:] <= 0)
)[0]
+ 1
)
if trackFreqs[nFrames - 1] > 0:
trackEnds = np.append(trackEnds, nFrames - 1)
trackLengths = 1 + trackEnds - trackBegs # lengths of trach contours
for i, j in zip(trackBegs, trackLengths): # delete short track contours
if j <= minTrackLength:
trackFreqs[i : i + j] = 0
return tfreq
def sineModel(x, fs, w, N, t):
"""
Analysis/synthesis of a sound using the sinusoidal model, without sine tracking
x: input array sound, w: analysis window, N: size of complex spectrum, t: threshold in negative dB
returns y: output array sound
"""
hM1 = int(math.floor((w.size + 1) / 2)) # half analysis window size by rounding
hM2 = int(math.floor(w.size / 2)) # half analysis window size by floor
Ns = 512 # FFT size for synthesis (even)
H = Ns // 4 # Hop size used for analysis and synthesis
hNs = Ns // 2 # half of synthesis FFT size
pin = max(hNs, hM1) # init sound pointer in middle of anal window
pend = x.size - max(hNs, hM1) # last sample to start a frame
yw = np.zeros(Ns) # initialize output sound frame
y = np.zeros(x.size) # initialize output array
w = w / sum(w) # normalize analysis window
sw = np.zeros(Ns) # initialize synthesis window
ow = triang(2 * H) # triangular window
sw[hNs - H : hNs + H] = ow # add triangular window
bh = blackmanharris(Ns) # blackmanharris window
bh = bh / sum(bh) # normalized blackmanharris window
sw[hNs - H : hNs + H] = (
sw[hNs - H : hNs + H] / bh[hNs - H : hNs + H]
) # normalized synthesis window
while pin < pend: # while input sound pointer is within sound
# -----analysis-----
x1 = x[pin - hM1 : pin + hM2] # select frame
mX, pX = DFT.dftAnal(x1, w, N) # compute dft
ploc = UF.peakDetection(mX, t) # detect locations of peaks
iploc, ipmag, ipphase = UF.peakInterp(
mX, pX, ploc
) # refine peak values by interpolation
ipfreq = fs * iploc / float(N) # convert peak locations to Hertz
# -----synthesis-----
Y = UF.genSpecSines(
ipfreq, ipmag, ipphase, Ns, fs
) # generate sines in the spectrum
fftbuffer = np.real(ifft(Y)) # compute inverse FFT
yw[: hNs - 1] = fftbuffer[hNs + 1 :] # undo zero-phase window
yw[hNs - 1 :] = fftbuffer[: hNs + 1]
y[pin - hNs : pin + hNs] += sw * yw # overlap-add and apply a synthesis window
pin += H # advance sound pointer
return y
def sineModelAnal(
x,
fs,
w,
N,
H,
t,
maxnSines=100,
minSineDur=0.01,
freqDevOffset=20,
freqDevSlope=0.01,
):
"""
Analysis of a sound using the sinusoidal model with sine tracking
x: input array sound, w: analysis window, N: size of complex spectrum, H: hop-size, t: threshold in negative dB
maxnSines: maximum number of sines per frame, minSineDur: minimum duration of sines in seconds
freqDevOffset: minimum frequency deviation at 0Hz, freqDevSlope: slope increase of minimum frequency deviation
returns xtfreq, xtmag, xtphase: frequencies, magnitudes and phases of sinusoidal tracks
"""
if minSineDur < 0: # raise error if minSineDur is smaller than 0
raise ValueError("Minimum duration of sine tracks smaller than 0")
hM1 = int(math.floor((w.size + 1) / 2)) # half analysis window size by rounding
hM2 = int(math.floor(w.size / 2)) # half analysis window size by floor
x = np.append(
np.zeros(hM2), x
) # add zeros at beginning to center first window at sample 0
x = np.append(x, np.zeros(hM2)) # add zeros at the end to analyze last sample
pin = hM1 # initialize sound pointer in middle of analysis window
pend = x.size - hM1 # last sample to start a frame
w = w / sum(w) # normalize analysis window
tfreq = np.array([])
while pin < pend: # while input sound pointer is within sound
x1 = x[pin - hM1 : pin + hM2] # select frame
mX, pX = DFT.dftAnal(x1, w, N) # compute dft
ploc = UF.peakDetection(mX, t) # detect locations of peaks
iploc, ipmag, ipphase = UF.peakInterp(
mX, pX, ploc
) # refine peak values by interpolation
ipfreq = fs * iploc / float(N) # convert peak locations to Hertz
# perform sinusoidal tracking by adding peaks to trajectories
tfreq, tmag, tphase = sineTracking(
ipfreq, ipmag, ipphase, tfreq, freqDevOffset, freqDevSlope
)
tfreq = np.resize(
tfreq, min(maxnSines, tfreq.size)
) # limit number of tracks to maxnSines
tmag = np.resize(
tmag, min(maxnSines, tmag.size)
) # limit number of tracks to maxnSines
tphase = np.resize(
tphase, min(maxnSines, tphase.size)
) # limit number of tracks to maxnSines
jtfreq = np.zeros(maxnSines) # temporary output array
jtmag = np.zeros(maxnSines) # temporary output array
jtphase = np.zeros(maxnSines) # temporary output array
jtfreq[: tfreq.size] = tfreq # save track frequencies to temporary array
jtmag[: tmag.size] = tmag # save track magnitudes to temporary array
jtphase[: tphase.size] = tphase # save track magnitudes to temporary array
if pin == hM1: # if first frame initialize output sine tracks
xtfreq = jtfreq
xtmag = jtmag
xtphase = jtphase
else: # rest of frames append values to sine tracks
xtfreq = np.vstack((xtfreq, jtfreq))
xtmag = np.vstack((xtmag, jtmag))
xtphase = np.vstack((xtphase, jtphase))
pin += H
# delete sine tracks shorter than minSineDur
xtfreq = cleaningSineTracks(xtfreq, round(fs * minSineDur / H))
return xtfreq, xtmag, xtphase
def sineModelSynth(tfreq, tmag, tphase, N, H, fs):
"""
Synthesis of a sound using the sinusoidal model
tfreq,tmag,tphase: frequencies, magnitudes and phases of sinusoids
N: synthesis FFT size, H: hop size, fs: sampling rate
returns y: output array sound
"""
hN = N // 2 # half of FFT size for synthesis
L = tfreq.shape[0] # number of frames
pout = 0 # initialize output sound pointer
ysize = H * (L + 3) # output sound size
y = np.zeros(ysize) # initialize output array
sw = np.zeros(N) # initialize synthesis window
ow = triang(2 * H) # triangular window
sw[hN - H : hN + H] = ow # add triangular window
bh = blackmanharris(N) # blackmanharris window
bh = bh / sum(bh) # normalized blackmanharris window
sw[hN - H : hN + H] = (
sw[hN - H : hN + H] / bh[hN - H : hN + H]
) # normalized synthesis window
lastytfreq = tfreq[0, :] # initialize synthesis frequencies
ytphase = (
2 * np.pi * np.random.rand(tfreq[0, :].size)
) # initialize synthesis phases
for l in range(L): # iterate over all frames
if tphase.size > 0: # if no phases generate them
ytphase = tphase[l, :]
else:
ytphase += (np.pi * (lastytfreq + tfreq[l, :]) / fs) * H # propagate phases
# Y = UF.genSpecSines_p(tfreq[l,:], tmag[l,:], ytphase, N, fs) # generate sines in the spectrum (python version)
Y = UF.genSpecSines(
tfreq[l, :], tmag[l, :], ytphase, N, fs
) # generate sines in the spectrum
lastytfreq = tfreq[l, :] # save frequency for phase propagation
ytphase = ytphase % (2 * np.pi) # make phase inside 2*pi
yw = np.real(fftshift(ifft(Y))) # compute inverse FFT
y[pout : pout + N] += sw * yw # overlap-add and apply a synthesis window
pout += H # advance sound pointer
y = np.delete(y, range(hN)) # delete half of first window
y = np.delete(y, range(y.size - hN, y.size)) # delete half of the last window
return y
| 13,096 | Python | .py | 267 | 41.348315 | 122 | 0.636009 | MTG/sms-tools | 1,630 | 751 | 12 | AGPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
30,101 | sprModel.py | MTG_sms-tools/smstools/models/sprModel.py | # functions that implement analysis and synthesis of sounds using the Sinusoidal plus Residual Model
# (for example usage check the examples interface)
import math
import numpy as np
from scipy.fft import fft, ifft
from scipy.signal.windows import blackmanharris, triang
from smstools.models import dftModel as DFT
from smstools.models import sineModel as SM
from smstools.models import utilFunctions as UF
def sprModelAnal(x, fs, w, N, H, t, minSineDur, maxnSines, freqDevOffset, freqDevSlope):
"""
Analysis of a sound using the sinusoidal plus residual model
x: input sound, fs: sampling rate, w: analysis window; N: FFT size, t: threshold in negative dB,
minSineDur: minimum duration of sinusoidal tracks
maxnSines: maximum number of parallel sinusoids
freqDevOffset: frequency deviation allowed in the sinusoids from frame to frame at frequency 0
freqDevSlope: slope of the frequency deviation, higher frequencies have bigger deviation
returns hfreq, hmag, hphase: harmonic frequencies, magnitude and phases; xr: residual signal
"""
# perform sinusoidal analysis
tfreq, tmag, tphase = SM.sineModelAnal(
x, fs, w, N, H, t, maxnSines, minSineDur, freqDevOffset, freqDevSlope
)
Ns = 512
xr = UF.sineSubtraction(
x, Ns, H, tfreq, tmag, tphase, fs
) # subtract sinusoids from original sound
return tfreq, tmag, tphase, xr
def sprModelSynth(tfreq, tmag, tphase, xr, N, H, fs):
"""
Synthesis of a sound using the sinusoidal plus residual model
tfreq, tmag, tphase: sinusoidal frequencies, amplitudes and phases; stocEnv: stochastic envelope
N: synthesis FFT size; H: hop size, fs: sampling rate
returns y: output sound, y: sinusoidal component
"""
ys = SM.sineModelSynth(tfreq, tmag, tphase, N, H, fs) # synthesize sinusoids
y = (
ys[: min(ys.size, xr.size)] + xr[: min(ys.size, xr.size)]
) # sum sinusoids and residual components
return y, ys
def sprModel(x, fs, w, N, t):
"""
Analysis/synthesis of a sound using the sinusoidal plus residual model, one frame at a time
x: input sound, fs: sampling rate, w: analysis window,
N: FFT size (minimum 512), t: threshold in negative dB,
returns y: output sound, ys: sinusoidal component, xr: residual component
"""
hM1 = int(math.floor((w.size + 1) / 2)) # half analysis window size by rounding
hM2 = int(math.floor(w.size / 2)) # half analysis window size by floor
Ns = 512 # FFT size for synthesis (even)
H = Ns // 4 # Hop size used for analysis and synthesis
hNs = Ns // 2
pin = max(hNs, hM1) # initialize sound pointer in middle of analysis window
pend = x.size - max(hNs, hM1) # last sample to start a frame
ysw = np.zeros(Ns) # initialize output sound frame
xrw = np.zeros(Ns) # initialize output sound frame
ys = np.zeros(x.size) # initialize output array
xr = np.zeros(x.size) # initialize output array
w = w / sum(w) # normalize analysis window
sw = np.zeros(Ns)
ow = triang(2 * H) # overlapping window
sw[hNs - H : hNs + H] = ow
bh = blackmanharris(Ns) # synthesis window
bh = bh / sum(bh) # normalize synthesis window
wr = bh # window for residual
sw[hNs - H : hNs + H] = sw[hNs - H : hNs + H] / bh[hNs - H : hNs + H]
while pin < pend:
# -----analysis-----
x1 = x[pin - hM1 : pin + hM2] # select frame
mX, pX = DFT.dftAnal(x1, w, N) # compute dft
ploc = UF.peakDetection(mX, t) # find peaks
iploc, ipmag, ipphase = UF.peakInterp(
mX, pX, ploc
) # refine peak values iploc, ipmag, ipphase = UF.peakInterp(mX, pX, ploc) # refine peak values
ipfreq = fs * iploc / float(N) # convert peak locations to Hertz
ri = pin - hNs - 1 # input sound pointer for residual analysis
xw2 = x[ri : ri + Ns] * wr # window the input sound
fftbuffer = np.zeros(Ns) # reset buffer
fftbuffer[:hNs] = xw2[hNs:] # zero-phase window in fftbuffer
fftbuffer[hNs:] = xw2[:hNs]
X2 = fft(fftbuffer) # compute FFT for residual analysis
# -----synthesis-----
Ys = UF.genSpecSines(
ipfreq, ipmag, ipphase, Ns, fs
) # generate spec of sinusoidal component
Xr = X2 - Ys # get the residual complex spectrum
fftbuffer = np.real(ifft(Ys)) # inverse FFT of sinusoidal spectrum
ysw[: hNs - 1] = fftbuffer[hNs + 1 :] # undo zero-phase window
ysw[hNs - 1 :] = fftbuffer[: hNs + 1]
fftbuffer = np.real(ifft(Xr)) # inverse FFT of residual spectrum
xrw[: hNs - 1] = fftbuffer[hNs + 1 :] # undo zero-phase window
xrw[hNs - 1 :] = fftbuffer[: hNs + 1]
ys[ri : ri + Ns] += sw * ysw # overlap-add for sines
xr[ri : ri + Ns] += sw * xrw # overlap-add for residual
pin += H # advance sound pointer
y = ys + xr # sum of sinusoidal and residual components
return y, ys, xr
| 5,003 | Python | .py | 97 | 45.587629 | 114 | 0.657131 | MTG/sms-tools | 1,630 | 751 | 12 | AGPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
30,102 | hpsModel.py | MTG_sms-tools/smstools/models/hpsModel.py | # functions that implement analysis and synthesis of sounds using the Harmonic plus Stochastic Model
# (for example usage check the examples interface)
import math
import numpy as np
from scipy.fft import fft, ifft
from scipy.signal import resample
from scipy.signal.windows import blackmanharris, hann, triang
from smstools.models import dftModel as DFT
from smstools.models import harmonicModel as HM
from smstools.models import sineModel as SM
from smstools.models import stochasticModel as STM
from smstools.models import utilFunctions as UF
def hpsModelAnal(
x, fs, w, N, H, t, nH, minf0, maxf0, f0et, harmDevSlope, minSineDur, Ns, stocf
):
"""
Analysis of a sound using the harmonic plus stochastic model
x: input sound, fs: sampling rate, w: analysis window; N: FFT size, t: threshold in negative dB,
nH: maximum number of harmonics, minf0: minimum f0 frequency in Hz,
maxf0: maximim f0 frequency in Hz; f0et: error threshold in the f0 detection (ex: 5),
harmDevSlope: slope of harmonic deviation; minSineDur: minimum length of harmonics
returns hfreq, hmag, hphase: harmonic frequencies, magnitude and phases; stocEnv: stochastic residual
"""
# perform harmonic analysis
hfreq, hmag, hphase = HM.harmonicModelAnal(
x, fs, w, N, H, t, nH, minf0, maxf0, f0et, harmDevSlope, minSineDur
)
# subtract sinusoids from original sound
xr = UF.sineSubtraction(x, Ns, H, hfreq, hmag, hphase, fs)
# perform stochastic analysis of residual
stocEnv = STM.stochasticModelAnal(xr, H, H * 2, stocf)
return hfreq, hmag, hphase, stocEnv
def hpsModelSynth(hfreq, hmag, hphase, stocEnv, N, H, fs):
"""
Synthesis of a sound using the harmonic plus stochastic model
hfreq, hmag: harmonic frequencies and amplitudes; stocEnv: stochastic envelope
Ns: synthesis FFT size; H: hop size, fs: sampling rate
returns y: output sound, yh: harmonic component, yst: stochastic component
"""
yh = SM.sineModelSynth(hfreq, hmag, hphase, N, H, fs) # synthesize harmonics
yst = STM.stochasticModelSynth(stocEnv, H, H * 2) # synthesize stochastic residual
y = (
yh[: min(yh.size, yst.size)] + yst[: min(yh.size, yst.size)]
) # sum harmonic and stochastic components
return y, yh, yst
def hpsModel(x, fs, w, N, t, nH, minf0, maxf0, f0et, stocf):
"""
Analysis/synthesis of a sound using the harmonic plus stochastic model, one frame at a time, no harmonic tracking
x: input sound; fs: sampling rate, w: analysis window; N: FFT size (minimum 512), t: threshold in negative dB,
nH: maximum number of harmonics, minf0: minimum f0 frequency in Hz; maxf0: maximim f0 frequency in Hz,
f0et: error threshold in the f0 detection (ex: 5); stocf: decimation factor of mag spectrum for stochastic analysis
returns y: output sound, yh: harmonic component, yst: stochastic component
"""
hM1 = int(math.floor((w.size + 1) / 2)) # half analysis window size by rounding
hM2 = int(math.floor(w.size / 2)) # half analysis window size by floor
Ns = 512 # FFT size for synthesis (even)
H = Ns // 4 # Hop size used for analysis and synthesis
hNs = Ns // 2
pin = max(hNs, hM1) # initialize sound pointer in middle of analysis window
pend = x.size - max(hNs, hM1) # last sample to start a frame
yhw = np.zeros(Ns) # initialize output sound frame
ystw = np.zeros(Ns) # initialize output sound frame
yh = np.zeros(x.size) # initialize output array
yst = np.zeros(x.size) # initialize output array
w = w / sum(w) # normalize analysis window
sw = np.zeros(Ns)
ow = triang(2 * H) # overlapping window
sw[hNs - H : hNs + H] = ow
bh = blackmanharris(Ns) # synthesis window
bh = bh / sum(bh) # normalize synthesis window
wr = bh # window for residual
sw[hNs - H : hNs + H] = (
sw[hNs - H : hNs + H] / bh[hNs - H : hNs + H]
) # synthesis window for harmonic component
sws = H * hann(Ns) / 2 # synthesis window for stochastic
hfreqp = []
f0t = 0
f0stable = 0
while pin < pend:
# -----analysis-----
x1 = x[pin - hM1 : pin + hM2] # select frame
mX, pX = DFT.dftAnal(x1, w, N) # compute dft
ploc = UF.peakDetection(mX, t) # find peaks
iploc, ipmag, ipphase = UF.peakInterp(mX, pX, ploc) # refine peak values
ipfreq = fs * iploc / N # convert peak locations to Hz
f0t = UF.f0Twm(ipfreq, ipmag, f0et, minf0, maxf0, f0stable) # find f0
if ((f0stable == 0) & (f0t > 0)) or (
(f0stable > 0) & (np.abs(f0stable - f0t) < f0stable / 5.0)
):
f0stable = f0t # consider a stable f0 if it is close to the previous one
else:
f0stable = 0
hfreq, hmag, hphase = HM.harmonicDetection(
ipfreq, ipmag, ipphase, f0t, nH, hfreqp, fs
) # find harmonics
hfreqp = hfreq
ri = pin - hNs - 1 # input sound pointer for residual analysis
xw2 = x[ri : ri + Ns] * wr # window the input sound
fftbuffer = np.zeros(Ns) # reset buffer
fftbuffer[:hNs] = xw2[hNs:] # zero-phase window in fftbuffer
fftbuffer[hNs:] = xw2[:hNs]
X2 = fft(fftbuffer) # compute FFT for residual analysis
# -----synthesis-----
Yh = UF.genSpecSines(
hfreq, hmag, hphase, Ns, fs
) # generate spec sines of harmonic component
Xr = X2 - Yh # get the residual complex spectrum
mXr = 20 * np.log10(abs(Xr[:hNs])) # magnitude spectrum of residual
mXrenv = resample(
np.maximum(-200, mXr), mXr.size * stocf
) # decimate the magnitude spectrum and avoid -Inf
stocEnv = resample(mXrenv, hNs) # interpolate to original size
pYst = 2 * np.pi * np.random.rand(hNs) # generate phase random values
Yst = np.zeros(Ns, dtype=complex)
Yst[:hNs] = 10 ** (stocEnv / 20) * np.exp(1j * pYst) # generate positive freq.
Yst[hNs + 1 :] = 10 ** (stocEnv[:0:-1] / 20) * np.exp(
-1j * pYst[:0:-1]
) # generate negative freq.
fftbuffer = np.real(ifft(Yh)) # inverse FFT of harmonic spectrum
yhw[: hNs - 1] = fftbuffer[hNs + 1 :] # undo zero-phase window
yhw[hNs - 1 :] = fftbuffer[: hNs + 1]
fftbuffer = np.real(ifft(Yst)) # inverse FFT of stochastic spectrum
ystw[: hNs - 1] = fftbuffer[hNs + 1 :] # undo zero-phase window
ystw[hNs - 1 :] = fftbuffer[: hNs + 1]
yh[ri : ri + Ns] += sw * yhw # overlap-add for sines
yst[ri : ri + Ns] += sws * ystw # overlap-add for stochastic
pin += H # advance sound pointer
y = yh + yst # sum of harmonic and stochastic components
return y, yh, yst
| 6,777 | Python | .py | 129 | 46.108527 | 119 | 0.649729 | MTG/sms-tools | 1,630 | 751 | 12 | AGPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
30,103 | stochasticModel.py | MTG_sms-tools/smstools/models/stochasticModel.py | # functions that implement analysis and synthesis of sounds using the Stochastic Model
# (for example usage check stochasticModel_function.py in the interface directory)
import numpy as np
from scipy.fft import fft, ifft
from scipy.interpolate import splev, splrep
from scipy.signal import resample
from scipy.signal.windows import hann
from smstools.models import utilFunctions as UF
def hertz_to_mel(f):
"""
Conversion from hertz scale to mel scale
"""
return 2595 * np.log10(1 + f / 700)
def mel_to_hetz(m):
"""
Conversion from mel scale to hertz scale
"""
return 700 * (10 ** (m / 2595) - 1)
def stochasticModelAnal(x, H, N, stocf, fs=44100, melScale=1):
"""
Stochastic analysis of a sound
x: input array sound, H: hop size, N: fftsize
stocf: decimation factor of mag spectrum for stochastic analysis, bigger than 0, maximum of 1
fs: sampling rate
melScale: choose between linear scale, 0, or mel scale, 1
returns stocEnv: stochastic envelope
"""
hN = N // 2 + 1 # positive size of fft
No2 = N // 2 # half of N
if hN * stocf < 3: # raise exception if decimation factor too small
raise ValueError("Stochastic decimation factor too small")
if stocf > 1: # raise exception if decimation factor too big
raise ValueError("Stochastic decimation factor above 1")
if H <= 0: # raise error if hop size 0 or negative
raise ValueError("Hop size (H) smaller or equal to 0")
if not (UF.isPower2(N)): # raise error if N not a power of two
raise ValueError("FFT size (N) is not a power of 2")
w = hann(N) # analysis window
x = np.append(
np.zeros(No2), x
) # add zeros at beginning to center first window at sample 0
x = np.append(x, np.zeros(No2)) # add zeros at the end to analyze last sample
pin = No2 # initialize sound pointer in middle of analysis window
pend = x.size - No2 # last sample to start a frame
if melScale == 1:
binFreqsMel = hertz_to_mel(np.arange(hN) * fs / float(N))
uniformMelFreq = np.linspace(binFreqsMel[0], binFreqsMel[-1], hN)
while pin <= pend:
xw = x[pin - No2 : pin + No2] * w # window the input sound
X = fft(xw) # compute FFT
mX = 20 * np.log10(abs(X[:hN])) # magnitude spectrum of positive frequencies
if melScale == 1:
spl = splrep(binFreqsMel, np.maximum(-200, mX))
mY = resample(
splev(uniformMelFreq, spl), int(stocf * hN)
) # decimate the mag spectrum
else:
mY = resample(
np.maximum(-200, mX), int(stocf * hN)
) # decimate the mag spectrum
if pin == No2: # first frame
stocEnv = np.array([mY])
else: # rest of frames
stocEnv = np.vstack((stocEnv, np.array([mY])))
pin += H # advance sound pointer
return stocEnv
def stochasticModelSynth(stocEnv, H, N, fs=44100, melScale=1):
"""
Stochastic synthesis of a sound
stocEnv: stochastic envelope; H: hop size; N: fft size
fs: sampling rate
melScale: choose between linear scale, 0, or mel scale, 1 (should match the analysis)
returns y: output sound
"""
if not (UF.isPower2(N)): # raise error if N not a power of two
raise ValueError("N is not a power of two")
hN = N // 2 + 1 # positive size of fft
No2 = N // 2 # half of N
L = stocEnv[:, 0].size # number of frames
ysize = H * (L + 3) # output sound size
y = np.zeros(ysize) # initialize output array
ws = 2 * hann(N) # synthesis window
pout = 0 # output sound pointer
if melScale == 1:
binFreqsMel = hertz_to_mel(np.arange(hN) * fs / float(N))
uniformMelFreq = np.linspace(binFreqsMel[0], binFreqsMel[-1], hN)
for l in range(L):
mY = resample(stocEnv[l, :], hN) # interpolate to original size
if melScale == 1:
spl = splrep(uniformMelFreq, mY)
mY = splev(binFreqsMel, spl)
pY = 2 * np.pi * np.random.rand(hN) # generate phase random values
Y = np.zeros(N, dtype=complex) # initialize synthesis spectrum
Y[:hN] = 10 ** (mY / 20) * np.exp(1j * pY) # generate positive freq.
Y[hN:] = 10 ** (mY[-2:0:-1] / 20) * np.exp(
-1j * pY[-2:0:-1]
) # generate negative freq.
fftbuffer = np.real(ifft(Y)) # inverse FFT
y[pout : pout + N] += ws * fftbuffer # overlap-add
pout += H
y = np.delete(y, range(No2)) # delete half of first window
y = np.delete(y, range(y.size - No2, y.size)) # delete half of the last window
return y
def stochasticModel(x, H, N, stocf, fs=44100, melScale=1):
"""
Stochastic analysis/synthesis of a sound, one frame at a time
x: input array sound, H: hop size, N: fft size
stocf: decimation factor of mag spectrum for stochastic analysis, bigger than 0, maximum of 1
fs: sampling rate
melScale: choose between linear scale, 0, or mel scale, 1 (should match the analysis)
returns y: output sound
"""
hN = N // 2 + 1 # positive size of fft
No2 = N // 2 # half of N
if hN * stocf < 3: # raise exception if decimation factor too small
raise ValueError("Stochastic decimation factor too small")
if stocf > 1: # raise exception if decimation factor too big
raise ValueError("Stochastic decimation factor above 1")
if H <= 0: # raise error if hop size 0 or negative
raise ValueError("Hop size (H) smaller or equal to 0")
if not (UF.isPower2(N)): # raise error if N not a power of twou
raise ValueError("FFT size (N) is not a power of 2")
w = hann(N) # analysis/synthesis window
x = np.append(
np.zeros(No2), x
) # add zeros at beginning to center first window at sample 0
x = np.append(x, np.zeros(No2)) # add zeros at the end to analyze last sample
pin = No2 # initialize sound pointer in middle of analysis window
pend = x.size - No2 # last sample to start a frame
y = np.zeros(x.size) # initialize output array
if melScale == 1:
binFreqsMel = hertz_to_mel(np.arange(hN) * fs / float(N))
uniformMelFreq = np.linspace(binFreqsMel[0], binFreqsMel[-1], hN)
while pin <= pend:
# -----analysis-----
xw = x[pin - No2 : pin + No2] * w # window the input sound
X = fft(xw) # compute FFT
mX = 20 * np.log10(abs(X[:hN])) # magnitude spectrum of positive frequencies
if melScale == 1:
spl = splrep(binFreqsMel, np.maximum(-200, mX))
stocEnv = resample(
splev(uniformMelFreq, spl), int(stocf * hN)
) # decimate the mag spectrum
else:
stocEnv = resample(
np.maximum(-200, mX), int(stocf * hN)
) # decimate the mag spectrum
# -----synthesis-----
mY = resample(stocEnv, hN) # interpolate to original size
if melScale == 1:
spl = splrep(uniformMelFreq, mY)
mY = splev(binFreqsMel, spl)
pY = 2 * np.pi * np.random.rand(hN) # generate phase random values
Y = np.zeros(N, dtype=complex)
Y[:hN] = 10 ** (mY / 20) * np.exp(1j * pY) # generate positive freq.
Y[hN:] = 10 ** (mY[-2:0:-1] / 20) * np.exp(
-1j * pY[-2:0:-1]
) # generate negative freq.
fftbuffer = np.real(ifft(Y)) # inverse FFT
y[pin - No2 : pin + No2] += w * fftbuffer # overlap-add
pin += H # advance sound pointer
y = np.delete(y, range(No2)) # delete half of first window which was added
y = np.delete(
y, range(y.size - No2, y.size)
) # delete half of last window which was added
return y
| 7,769 | Python | .py | 166 | 39.542169 | 97 | 0.616623 | MTG/sms-tools | 1,630 | 751 | 12 | AGPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
30,104 | hprModel.py | MTG_sms-tools/smstools/models/hprModel.py | # functions that implement analysis and synthesis of sounds using the Harmonic plus Residual Model
# (for example usage check the interface directory)
import math
import numpy as np
from scipy.fft import fft, ifft
from scipy.signal.windows import blackmanharris, triang
from smstools.models import dftModel as DFT
from smstools.models import harmonicModel as HM
from smstools.models import sineModel as SM
from smstools.models import utilFunctions as UF
def hprModelAnal(x, fs, w, N, H, t, minSineDur, nH, minf0, maxf0, f0et, harmDevSlope):
"""Analysis of a sound using the harmonic plus residual model
x: input sound, fs: sampling rate, w: analysis window; N: FFT size, t: threshold in negative dB,
minSineDur: minimum duration of sinusoidal tracks
nH: maximum number of harmonics; minf0: minimum fundamental frequency in sound
maxf0: maximum fundamental frequency in sound; f0et: maximum error accepted in f0 detection algorithm
harmDevSlope: allowed deviation of harmonic tracks, higher harmonics have higher allowed deviation
returns hfreq, hmag, hphase: harmonic frequencies, magnitude and phases; xr: residual signal
"""
# perform harmonic analysis
hfreq, hmag, hphase = HM.harmonicModelAnal(
x, fs, w, N, H, t, nH, minf0, maxf0, f0et, harmDevSlope, minSineDur
)
Ns = 512
xr = UF.sineSubtraction(
x, Ns, H, hfreq, hmag, hphase, fs
) # subtract sinusoids from original sound
return hfreq, hmag, hphase, xr
def hprModelSynth(hfreq, hmag, hphase, xr, N, H, fs):
"""
Synthesis of a sound using the sinusoidal plus residual model
tfreq, tmag, tphase: sinusoidal frequencies, amplitudes and phases; stocEnv: stochastic envelope
N: synthesis FFT size; H: hop size, fs: sampling rate
returns y: output sound, yh: harmonic component
"""
yh = SM.sineModelSynth(hfreq, hmag, hphase, N, H, fs) # synthesize sinusoids
y = (
yh[: min(yh.size, xr.size)] + xr[: min(yh.size, xr.size)]
) # sum sinusoids and residual components
return y, yh
def hprModel(x, fs, w, N, t, nH, minf0, maxf0, f0et):
"""
Analysis/synthesis of a sound using the harmonic plus residual model
x: input sound, fs: sampling rate, w: analysis window,
N: FFT size (minimum 512), t: threshold in negative dB,
nH: maximum number of harmonics, minf0: minimum f0 frequency in Hz,
maxf0: maximim f0 frequency in Hz,
f0et: error threshold in the f0 detection (ex: 5),
maxhd: max. relative deviation in harmonic detection (ex: .2)
returns y: output sound, yh: harmonic component, xr: residual component
"""
hN = N // 2 # size of positive spectrum
hM1 = int(math.floor((w.size + 1) / 2)) # half analysis window size by rounding
hM2 = int(math.floor(w.size / 2)) # half analysis window size by floor
Ns = 512 # FFT size for synthesis (even)
H = Ns // 4 # Hop size used for analysis and synthesis
hNs = Ns // 2
pin = max(hNs, hM1) # initialize sound pointer in middle of analysis window
pend = x.size - max(hNs, hM1) # last sample to start a frame
yhw = np.zeros(Ns) # initialize output sound frame
xrw = np.zeros(Ns) # initialize output sound frame
yh = np.zeros(x.size) # initialize output array
xr = np.zeros(x.size) # initialize output array
w = w / sum(w) # normalize analysis window
sw = np.zeros(Ns)
ow = triang(2 * H) # overlapping window
sw[hNs - H : hNs + H] = ow
bh = blackmanharris(Ns) # synthesis window
bh = bh / sum(bh) # normalize synthesis window
wr = bh # window for residual
sw[hNs - H : hNs + H] = sw[hNs - H : hNs + H] / bh[hNs - H : hNs + H]
hfreqp = []
f0t = 0
f0stable = 0
while pin < pend:
# -----analysis-----
x1 = x[pin - hM1 : pin + hM2] # select frame
mX, pX = DFT.dftAnal(x1, w, N) # compute dft
ploc = UF.peakDetection(mX, t) # find peaks
iploc, ipmag, ipphase = UF.peakInterp(mX, pX, ploc) # refine peak values
ipfreq = fs * iploc / N # convert locations to Hz
f0t = UF.f0Twm(ipfreq, ipmag, f0et, minf0, maxf0, f0stable) # find f0
if ((f0stable == 0) & (f0t > 0)) or (
(f0stable > 0) & (np.abs(f0stable - f0t) < f0stable / 5.0)
):
f0stable = f0t # consider a stable f0 if it is close to the previous one
else:
f0stable = 0
hfreq, hmag, hphase = HM.harmonicDetection(
ipfreq, ipmag, ipphase, f0t, nH, hfreqp, fs
) # find harmonics
hfreqp = hfreq
ri = pin - hNs - 1 # input sound pointer for residual analysis
xw2 = x[ri : ri + Ns] * wr # window the input sound
fftbuffer = np.zeros(Ns) # reset buffer
fftbuffer[:hNs] = xw2[hNs:] # zero-phase window in fftbuffer
fftbuffer[hNs:] = xw2[:hNs]
X2 = fft(fftbuffer) # compute FFT of input signal for residual analysis
# -----synthesis-----
Yh = UF.genSpecSines(hfreq, hmag, hphase, Ns, fs) # generate sines
Xr = X2 - Yh # get the residual complex spectrum
fftbuffer = np.real(ifft(Yh)) # inverse FFT of harmonic spectrum
yhw[: hNs - 1] = fftbuffer[hNs + 1 :] # undo zero-phase window
yhw[hNs - 1 :] = fftbuffer[: hNs + 1]
fftbuffer = np.real(ifft(Xr)) # inverse FFT of residual spectrum
xrw[: hNs - 1] = fftbuffer[hNs + 1 :] # undo zero-phase window
xrw[hNs - 1 :] = fftbuffer[: hNs + 1]
yh[ri : ri + Ns] += sw * yhw # overlap-add for sines
xr[ri : ri + Ns] += sw * xrw # overlap-add for residual
pin += H # advance sound pointer
y = yh + xr # sum of harmonic and residual components
return y, yh, xr
| 5,744 | Python | .py | 112 | 45.142857 | 105 | 0.652313 | MTG/sms-tools | 1,630 | 751 | 12 | AGPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
30,105 | cutilFunctions.pyx | MTG_sms-tools/smstools/models/utilFunctions_C/cutilFunctions.pyx | #this is a cython wrapper on C functions to call them in python
import numpy as np
cimport numpy as np
from cutilFunctions cimport *
from libc.stdlib cimport *
np.import_array()
def twm(pfreq, pmag, f0c):
"""This is a cython wrapper for a C function which is bit exact with the python version of this function
For information about the input arguments please refere to the original python function
"""
cdef np.ndarray[np.float_t, ndim=1] f0_arr
cdef np.ndarray[np.float_t, ndim=1] f0Error_arr
cdef np.ndarray[np.float_t, ndim=1] pfreq_arr
cdef np.ndarray[np.float_t, ndim=1] pmag_arr
cdef np.ndarray[np.float_t, ndim=1] f0c_arr
f0_arr = np.ascontiguousarray(np.array([-1]), dtype=float)
f0Error_arr = np.ascontiguousarray(np.array([-1]), dtype=float)
pfreq_arr = np.ascontiguousarray(pfreq, dtype=float)
pmag_arr = np.ascontiguousarray(pmag, dtype=float)
f0c_arr = np.ascontiguousarray(f0c, dtype=float)
TWM_C(<double*>pfreq_arr.data, <double *>pmag_arr.data, pfreq_arr.shape[0], <double *>f0c_arr.data, f0c_arr.shape[0], <double*>f0_arr.data, <double*>f0Error_arr.data)
return f0_arr[0], f0Error_arr[0]
def genbh92lobe(x):
"comments"
cdef np.ndarray[np.float_t, ndim=1] x_arr
cdef np.ndarray[np.float_t, ndim=1] y_arr
x_arr = np.ascontiguousarray(x, dtype=float)
y_arr = np.empty((x_arr.shape[0],), dtype=float)
genbh92lobe_C(<double *>x_arr.data,<double *>y_arr.data, x_arr.shape[0])
return y_arr
def genSpecSines(iploc, ipmag, ipphase,N):
"comments"
cdef np.ndarray[np.float_t, ndim=1] iploc_arr
cdef np.ndarray[np.float_t, ndim=1] ipmag_arr
cdef np.ndarray[np.float_t, ndim=1] ipphase_arr
cdef np.ndarray[np.float_t, ndim=1] real_arr
cdef np.ndarray[np.float_t, ndim=1] imag_arr
iploc_arr = np.ascontiguousarray(iploc, dtype=float)
ipmag_arr = np.ascontiguousarray(ipmag, dtype=float)
ipphase_arr = np.ascontiguousarray(ipphase, dtype=float)
real_arr = np.zeros((N,), dtype=float)
imag_arr = np.zeros((N,), dtype=float)
genspecsines_C(<double *>iploc_arr.data, <double *>ipmag_arr.data, <double *>ipphase_arr.data, iploc_arr.shape[0], <double *>real_arr.data, <double *>imag_arr.data, N)
out = real_arr.astype(complex)
out.imag = imag_arr
return out
| 2,406 | Python | .py | 46 | 45.869565 | 173 | 0.704113 | MTG/sms-tools | 1,630 | 751 | 12 | AGPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
30,106 | runserver.py | idank_explainshell/runserver.py | from explainshell import config
from explainshell.web import app
import logging.config
logging.config.dictConfig(config.LOGGING_DICT)
if __name__ == '__main__':
if config.HOST_IP:
app.run(debug=config.DEBUG, host=config.HOST_IP)
else:
app.run(debug=config.DEBUG)
| 289 | Python | .py | 9 | 28.222222 | 56 | 0.730216 | idank/explainshell | 13,114 | 782 | 152 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,107 | util.py | idank_explainshell/explainshell/util.py | import itertools
from operator import itemgetter
def consecutive(l, fn):
'''yield consecutive items from l that fn returns True for them
>>> even = lambda x: x % 2 == 0
>>> list(consecutive([], even))
[]
>>> list(consecutive([1], even))
[[1]]
>>> list(consecutive([1, 2], even))
[[1], [2]]
>>> list(consecutive([2, 4], even))
[[2, 4]]
>>> list(consecutive([1, 2, 4], even))
[[1], [2, 4]]
>>> list(consecutive([1, 2, 4, 5, 7, 8, 10], even))
[[1], [2, 4], [5], [7], [8, 10]]
'''
it = iter(l)
ll = []
try:
while True:
x = it.next()
if fn(x):
ll.append(x)
else:
if ll:
yield ll
ll = []
yield [x]
except StopIteration:
if ll:
yield ll
def groupcontinuous(l, key=None):
'''
>>> list(groupcontinuous([1, 2, 4, 5, 7, 8, 10]))
[[1, 2], [4, 5], [7, 8], [10]]
>>> list(groupcontinuous(range(5)))
[[0, 1, 2, 3, 4]]
'''
if key is None:
key = lambda x: x
for k, g in itertools.groupby(enumerate(l), lambda (i, x): i-key(x)):
yield map(itemgetter(1), g)
def toposorted(graph, parents):
"""
Returns vertices of a DAG in topological order.
Arguments:
graph -- vetices of a graph to be toposorted
parents -- function (vertex) -> vertices to preceed
given vertex in output
"""
result = []
used = set()
def use(v, top):
if id(v) in used:
return
for parent in parents(v):
if parent is top:
raise ValueError('graph is cyclical', graph)
use(parent, v)
used.add(id(v))
result.append(v)
for v in graph:
use(v, v)
return result
def pairwise(iterable):
a, b = itertools.tee(iterable)
next(b, None)
return itertools.izip(a, b)
class peekable(object):
'''
>>> it = peekable(iter('abc'))
>>> it.index, it.peek(), it.index, it.peek(), it.next(), it.index, it.peek(), it.next(), it.next(), it.index
(0, 'a', 0, 'a', 'a', 1, 'b', 'b', 'c', 3)
>>> it.peek()
Traceback (most recent call last):
File "<stdin>", line 1, in ?
StopIteration
>>> it.peek()
Traceback (most recent call last):
File "<stdin>", line 1, in ?
StopIteration
>>> it.next()
Traceback (most recent call last):
File "<stdin>", line 1, in ?
StopIteration
'''
def __init__(self, it):
self.it = it
self._peeked = False
self._peekvalue = None
self._idx = 0
def __iter__(self):
return self
def next(self):
if self._peeked:
self._peeked = False
self._idx += 1
return self._peekvalue
n = self.it.next()
self._idx += 1
return n
def hasnext(self):
try:
self.peek()
return True
except StopIteration:
return False
def peek(self):
if self._peeked:
return self._peekvalue
else:
self._peekvalue = self.it.next()
self._peeked = True
return self._peekvalue
@property
def index(self):
'''return the index of the next item returned by next()'''
return self._idx
def namesection(path):
assert '.gz' not in path
name, section = path.rsplit('.', 1)
return name, section
class propertycache(object):
def __init__(self, func):
self.func = func
self.name = func.__name__
def __get__(self, obj, type=None):
result = self.func(obj)
self.cachevalue(obj, result)
return result
def cachevalue(self, obj, value):
setattr(obj, self.name, value)
| 3,810 | Python | .py | 134 | 20.985075 | 112 | 0.523602 | idank/explainshell | 13,114 | 782 | 152 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,108 | config.py | idank_explainshell/explainshell/config.py | import os
_currdir = os.path.dirname(os.path.dirname(__file__))
MANPAGEDIR = os.path.join(_currdir, 'manpages')
CLASSIFIER_CUTOFF = 0.7
TOOLSDIR = os.path.join(_currdir, 'tools')
MAN2HTML = os.path.join(TOOLSDIR, 'w3mman2html.cgi')
# host to pass into Flask's app.run.
HOST_IP = os.getenv('HOST_IP', False)
MONGO_URI = os.getenv('MONGO_URI', 'mongodb://localhost')
DEBUG = True
LOGGING_DICT = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format': '%(asctime)s [%(levelname)s] %(name)s: %(message)s'
},
},
'handlers': {
'console': {
'level' : 'INFO',
'class' : 'logging.StreamHandler',
'formatter': 'standard',
},
'file': {
'class': 'logging.FileHandler',
'level': 'INFO',
'formatter': 'standard',
'filename': 'application.log',
'mode': 'a',
},
},
'loggers': {
'explainshell': {
'handlers': ['console'],
'level': 'INFO',
'propagate': False
}
}
}
| 1,127 | Python | .py | 40 | 21.05 | 73 | 0.528651 | idank/explainshell | 13,114 | 782 | 152 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,109 | options.py | idank_explainshell/explainshell/options.py | import re, collections, logging
from explainshell import store
tokenstate = collections.namedtuple('tokenstate', 'startpos endpos token')
logger = logging.getLogger(__name__)
def extract(manpage):
'''extract options from all paragraphs that have been classified as containing
options'''
for i, p in enumerate(manpage.paragraphs):
if p.is_option:
s, l = extract_option(p.cleantext())
if s or l:
expectsarg = any(x.expectsarg for x in s + l)
s = [x.flag for x in s]
l = [x.flag for x in l]
manpage.paragraphs[i] = store.option(p, s, l, expectsarg)
else:
logger.error("no options could be extracted from paragraph %r", p)
opt_regex = re.compile(r'''
(?P<opt>--?(?:\?|\#|(?:\w+-)*\w+)) # option starts with - or -- and can have - in the middle but not at the end, also allow '-?'
(?:
(?:\s?(=)?\s?) # -a=
(?P<argoptional>[<\[])? # -a=< or -a=[
(?:\s?(=)?\s?) # or maybe -a<=
(?P<arg>
(?(argoptional) # if we think we have an arg (we saw [ or <)
[^\]>]+ # either read everything until the closing ] or >
|
(?(2)
[-a-zA-Z]+ # or if we didn't see [ or < but just saw =, read all letters, e.g. -a=abc
|
[A-Z]+ # but if we didn't have =, only allow uppercase letters, e.g. -a FOO
)
)
)
(?(argoptional)(?P<argoptionalc>[\]>])) # read closing ] or > if we have an arg
)? # the whole arg thing is optional
(?P<ending>,\s*|\s+|\Z|/|\|)''', re.X) # read any trailing whitespace or the end of the string
opt2_regex = re.compile(r'''
(?P<opt>\w+) # an option that doesn't start with any of the usual characters, e.g. options from 'dd' like bs=BYTES
(?:
(?:\s*=\s*) # an optional arg, e.g. bs=BYTES
(?P<arg>\w+)
)
(?:,\s*|\s+|\Z)''', re.X) # end with , or whitespace or the end of the string
def _flag(s, pos=0):
'''
>>> _flag('a=b').groupdict()
{'opt': 'a', 'arg': 'b'}
>>> bool(_flag('---c-d'))
False
>>> bool(_flag('foobar'))
False
'''
m = opt2_regex.match(s, pos)
return m
def _option(s, pos=0):
'''
>>> bool(_option('-'))
False
>>> bool(_option('--'))
False
>>> bool(_option('---'))
False
>>> bool(_option('-a-'))
False
>>> bool(_option('--a-'))
False
>>> bool(_option('--a-b-'))
False
>>> sorted(_option('-a').groupdict().iteritems())
[('arg', None), ('argoptional', None), ('argoptionalc', None), ('ending', ''), ('opt', '-a')]
>>> sorted(_option('--a').groupdict().iteritems())
[('arg', None), ('argoptional', None), ('argoptionalc', None), ('ending', ''), ('opt', '--a')]
>>> sorted(_option('-a<b>').groupdict().iteritems())
[('arg', 'b'), ('argoptional', '<'), ('argoptionalc', '>'), ('ending', ''), ('opt', '-a')]
>>> sorted(_option('-a=[foo]').groupdict().iteritems())
[('arg', 'foo'), ('argoptional', '['), ('argoptionalc', ']'), ('ending', ''), ('opt', '-a')]
>>> sorted(_option('-a=<foo>').groupdict().iteritems())
[('arg', 'foo'), ('argoptional', '<'), ('argoptionalc', '>'), ('ending', ''), ('opt', '-a')]
>>> sorted(_option('-a=<foo bar>').groupdict().iteritems())
[('arg', 'foo bar'), ('argoptional', '<'), ('argoptionalc', '>'), ('ending', ''), ('opt', '-a')]
>>> sorted(_option('-a=foo').groupdict().iteritems())
[('arg', 'foo'), ('argoptional', None), ('argoptionalc', None), ('ending', ''), ('opt', '-a')]
>>> bool(_option('-a=[foo>'))
False
>>> bool(_option('-a=[foo bar'))
False
>>> _option('-a foo').end(0)
3
'''
m = opt_regex.match(s, pos)
if m:
if m.group('argoptional'):
c = m.group('argoptional')
cc = m.group('argoptionalc')
if (c == '[' and cc == ']') or (c == '<' and cc == '>'):
return m
else:
return
return m
_eatbetweenregex = re.compile(r'\s*(?:or|,|\|)\s*')
def _eatbetween(s, pos):
'''
>>> _eatbetween('foo', 0)
0
>>> _eatbetween('a, b', 1)
3
>>> _eatbetween('a|b', 1)
2
>>> _eatbetween('a or b', 1)
5
'''
m = _eatbetweenregex.match(s, pos)
if m:
return m.end(0)
return pos
class extractedoption(collections.namedtuple('extractedoption', 'flag expectsarg')):
def __eq__(self, other):
if isinstance(other, str):
return self.flag == other
else:
return super(extractedoption, self).__eq__(other)
def __str__(self):
return self.flag
def extract_option(txt):
'''this is where the magic is (suppose) to happen. try and find options
using a regex'''
startpos = currpos = len(txt) - len(txt.lstrip())
short, long = [], []
m = _option(txt, currpos)
# keep going as long as options are found
while m:
s = m.group('opt')
po = extractedoption(s, m.group('arg'))
if s.startswith('--'):
long.append(po)
else:
short.append(po)
currpos = m.end(0)
currpos = _eatbetween(txt, currpos)
if m.group('ending') == '|':
m = _option(txt, currpos)
if not m:
startpos = currpos
while currpos < len(txt) and not txt[currpos].isspace():
if txt[currpos] == '|':
short.append(extractedoption(txt[startpos:currpos], None))
startpos = currpos
currpos += 1
leftover = txt[startpos:currpos]
if leftover:
short.append(extractedoption(leftover, None))
else:
m = _option(txt, currpos)
if currpos == startpos:
m = _flag(txt, currpos)
while m:
s = m.group('opt')
po = extractedoption(s, m.group('arg'))
long.append(po)
currpos = m.end(0)
currpos = _eatbetween(txt, currpos)
m = _flag(txt, currpos)
return short, long
| 6,226 | Python | .py | 164 | 30.103659 | 133 | 0.501902 | idank/explainshell | 13,114 | 782 | 152 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,110 | manager.py | idank_explainshell/explainshell/manager.py | import sys, os, argparse, logging, glob
from explainshell import options, store, fixer, manpage, errors, util, config
from explainshell.algo import classifier
logger = logging.getLogger('explainshell.manager')
class managerctx(object):
def __init__(self, classifier, store, manpage):
self.classifier = classifier
self.store = store
self.manpage = manpage
self.name = manpage.name
self.classifiermanpage = None
self.optionsraw = None
self.optionsextracted = None
self.aliases = None
class manager(object):
'''the manager uses all parts of the system to read, classify, parse, extract
and write a man page to the database'''
def __init__(self, dbhost, dbname, paths, overwrite=False, drop=False):
self.paths = paths
self.overwrite = overwrite
self.store = store.store(dbname, dbhost)
self.classifier = classifier.classifier(self.store, 'bayes')
self.classifier.train()
if drop:
self.store.drop(True)
def ctx(self, m):
return managerctx(self.classifier, self.store, m)
def _read(self, ctx, frunner):
frunner.pre_get_raw_manpage()
ctx.manpage.read()
ctx.manpage.parse()
assert len(ctx.manpage.paragraphs) > 1
ctx.manpage = store.manpage(ctx.manpage.shortpath, ctx.manpage.name,
ctx.manpage.synopsis, ctx.manpage.paragraphs, list(ctx.manpage.aliases))
frunner.post_parse_manpage()
def _classify(self, ctx, frunner):
ctx.classifiermanpage = store.classifiermanpage(ctx.name, ctx.manpage.paragraphs)
frunner.pre_classify()
_ = list(ctx.classifier.classify(ctx.classifiermanpage))
frunner.post_classify()
def _extract(self, ctx, frunner):
options.extract(ctx.manpage)
frunner.post_option_extraction()
if not ctx.manpage.options:
logger.warn("couldn't find any options for manpage %s", ctx.manpage.name)
def _write(self, ctx, frunner):
frunner.pre_add_manpage()
return ctx.store.addmanpage(ctx.manpage)
def _update(self, ctx, frunner):
frunner.pre_add_manpage()
return ctx.store.updatemanpage(ctx.manpage)
def process(self, ctx):
frunner = fixer.runner(ctx)
self._read(ctx, frunner)
self._classify(ctx, frunner)
self._extract(ctx, frunner)
m = self._write(ctx, frunner)
return m
def edit(self, m, paragraphs=None):
ctx = self.ctx(m)
frunner = fixer.runner(ctx)
if paragraphs:
m.paragraphs = paragraphs
frunner.disable('paragraphjoiner')
frunner.post_option_extraction()
else:
self._extract(ctx, frunner)
m = self._update(ctx, frunner)
return m
def run(self):
added = []
exists = []
for path in self.paths:
try:
m = manpage.manpage(path)
logger.info('handling manpage %s (from %s)', m.name, path)
try:
mps = self.store.findmanpage(m.shortpath[:-3])
mps = [mp for mp in mps if m.shortpath == mp.source]
if mps:
assert len(mps) == 1
mp = mps[0]
if not self.overwrite or mp.updated:
logger.info('manpage %r already in the data store, not overwriting it', m.name)
exists.append(m)
continue
except errors.ProgramDoesNotExist:
pass
# the manpage is not in the data store; process and add it
ctx = self.ctx(m)
m = self.process(ctx)
if m:
added.append(m)
except errors.EmptyManpage, e:
logger.error('manpage %r is empty!', e.args[0])
except ValueError:
logger.fatal('uncaught exception when handling manpage %s', path)
except KeyboardInterrupt:
raise
except:
logger.fatal('uncaught exception when handling manpage %s', path)
raise
if not added:
logger.warn('no manpages added')
else:
self.findmulticommands()
return added, exists
def findmulticommands(self):
manpages = {}
potential = []
for _id, m in self.store.names():
if '-' in m:
potential.append((m.split('-'), _id))
else:
manpages[m] = _id
mappings = set([x[0] for x in self.store.mappings()])
mappingstoadd = []
multicommands = {}
for p, _id in potential:
if ' '.join(p) in mappings:
continue
if p[0] in manpages:
mappingstoadd.append((' '.join(p), _id))
multicommands[p[0]] = manpages[p[0]]
for src, dst in mappingstoadd:
self.store.addmapping(src, dst, 1)
logger.info('inserting mapping (multicommand) %s -> %s', src, dst)
for multicommand, _id in multicommands.iteritems():
self.store.setmulticommand(_id)
logger.info('making %r a multicommand', multicommand)
return mappingstoadd, multicommands
def main(files, dbname, dbhost, overwrite, drop, verify):
if verify:
s = store.store(dbname, dbhost)
ok = s.verify()
return 0 if ok else 1
if drop:
if raw_input('really drop db (y/n)? ').strip().lower() != 'y':
drop = False
else:
overwrite = True # if we drop, no need to take overwrite into account
gzs = set()
for path in files:
if os.path.isdir(path):
gzs.update([os.path.abspath(f) for f in glob.glob(os.path.join(path, '*.gz'))])
else:
gzs.add(os.path.abspath(path))
m = manager(dbhost, dbname, gzs, overwrite, drop)
added, exists = m.run()
for mp in added:
print 'successfully added %s' % mp.source
if exists:
print 'these manpages already existed and werent overwritten: \n\n%s' % '\n'.join([m.path for m in exists])
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='process man pages and save them in the store')
parser.add_argument('--log', type=str, default='ERROR', help='use log as the logger log level')
parser.add_argument('--overwrite', action='store_true', default=False, help='overwrite man pages that already exist in the store')
parser.add_argument('--drop', action='store_true', default=False, help='delete all existing man pages')
parser.add_argument('--db', default='explainshell', help='mongo db name')
parser.add_argument('--host', default=config.MONGO_URI, help='mongo host')
parser.add_argument('--verify', action='store_true', default=False, help='verify db integrity')
parser.add_argument('files', nargs='*')
args = parser.parse_args()
logging.basicConfig(level=getattr(logging, args.log.upper()))
sys.exit(main(args.files, args.db, args.host, args.overwrite, args.drop, args.verify))
| 7,255 | Python | .py | 165 | 33.393939 | 134 | 0.595265 | idank/explainshell | 13,114 | 782 | 152 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,111 | helpconstants.py | idank_explainshell/explainshell/helpconstants.py | # -*- coding: utf-8 -*-
import textwrap
NOSYNOPSIS = 'no synopsis found'
PIPELINES = textwrap.dedent(''' <b>Pipelines</b>
A <u>pipeline</u> is a sequence of one or more commands separated by one of the control operators <b>|</b> or <b>|&</b>. The
format for a pipeline is:
[<b>time</b> [<b>-p</b>]] [ ! ] <u>command</u> [ [<b>|</b>⎪<b>|&</b>] <u>command2</u> ... ]
The standard output of <u>command</u> is connected via a pipe to the standard input of <u>command2</u>. This
connection is performed before any redirections specified by the command (see <b>REDIRECTION</b> below). If <b>|&</b>
is used, the standard error of <u>command</u> is connected to <u>command2</u>'s standard input through the pipe; it is
shorthand for <b>2>&1</b> <b>|</b>. This implicit redirection of the standard error is performed after any
redirections specified by the command.
The return status of a pipeline is the exit status of the last command, unless the <b>pipefail</b> option is
enabled. If <b>pipefail</b> is enabled, the pipeline's return status is the value of the last (rightmost)
command to exit with a non-zero status, or zero if all commands exit successfully. If the reserved word
<b>!</b> precedes a pipeline, the exit status of that pipeline is the logical negation of the exit status as
described above. The shell waits for all commands in the pipeline to terminate before returning a value.
If the <b>time</b> reserved word precedes a pipeline, the elapsed as well as user and system time consumed by
its execution are reported when the pipeline terminates. The <b>-p</b> option changes the output format to that
specified by POSIX. When the shell is in <u>posix</u> <u>mode</u>, it does not recognize <b>time</b> as a reserved word if
the next token begins with a `-'. The <b>TIMEFORMAT</b> variable may be set to a format string that specifies
how the timing information should be displayed; see the description of <b>TIMEFORMAT</b> under <b>Shell</b> <b>Variables</b>
below.
When the shell is in <u>posix</u> <u>mode</u>, <b>time</b> may be followed by a newline. In this case, the shell displays the
total user and system time consumed by the shell and its children. The <b>TIMEFORMAT</b> variable may be used
to specify the format of the time information.
Each command in a pipeline is executed as a separate process (i.e., in a subshell).''')
OPSEMICOLON = textwrap.dedent(''' Commands separated by a <b>;</b> are executed sequentially; the shell waits for each command to terminate in turn. The
return status is the exit status of the last command executed.''')
OPBACKGROUND = textwrap.dedent(''' If a command is terminated by the control operator <b>&</b>, the shell executes the command in the <u>background</u> in
a subshell. The shell does not wait for the command to finish, and the return status is 0.''')
OPANDOR = textwrap.dedent(''' AND and OR lists are sequences of one of more pipelines separated by the <b>&&</b> and <b>||</b> control operators,
respectively. AND and OR lists are executed with left associativity. An AND list has the form
<u>command1</u> <b>&&</b> <u>command2</u>
<u>command2</u> is executed if, and only if, <u>command1</u> returns an exit status of zero.
An OR list has the form
<u>command1</u> <b>||</b> <u>command2</u>
<u>command2</u> is executed if and only if <u>command1</u> returns a non-zero exit status. The return status of AND
and OR lists is the exit status of the last command executed in the list.''')
OPERATORS = {';' : OPSEMICOLON, '&' : OPBACKGROUND, '&&' : OPANDOR, '||' : OPANDOR}
REDIRECTION = textwrap.dedent(''' Before a command is executed, its input and output may be <u>redirected</u> using a special notation interpreted
by the shell. Redirection may also be used to open and close files for the current shell execution
environment. The following redirection operators may precede or appear anywhere within a <u>simple</u> <u>command</u>
or may follow a <u>command</u>. Redirections are processed in the order they appear, from left to right.''')
REDIRECTING_INPUT = textwrap.dedent(''' <b>Redirecting</b> <b>Input</b>
Redirection of input causes the file whose name results from the expansion of <u>word</u> to be opened for
reading on file descriptor <u>n</u>, or the standard input (file descriptor 0) if <u>n</u> is not specified.
The general format for redirecting input is:
[<u>n</u>]<b><</b><u>word</u>''')
REDIRECTING_OUTPUT = textwrap.dedent(''' <b>Redirecting</b> <b>Output</b>
Redirection of output causes the file whose name results from the expansion of <u>word</u> to be opened for
writing on file descriptor <u>n</u>, or the standard output (file descriptor 1) if <u>n</u> is not specified. If the
file does not exist it is created; if it does exist it is truncated to zero size.
The general format for redirecting output is:
[<u>n</u>]<b>></b><u>word</u>
If the redirection operator is <b>></b>, and the <b>noclobber</b> option to the <b>set</b> builtin has been enabled, the
redirection will fail if the file whose name results from the expansion of <u>word</u> exists and is a regular
file. If the redirection operator is <b>>|</b>, or the redirection operator is <b>></b> and the <b>noclobber</b> option to
the <b>set</b> builtin command is not enabled, the redirection is attempted even if the file named by <u>word</u>
exists.''')
APPENDING_REDIRECTED_OUTPUT = textwrap.dedent(''' <b>Appending</b> <b>Redirected</b> <b>Output</b>
Redirection of output in this fashion causes the file whose name results from the expansion of <u>word</u> to be
opened for appending on file descriptor <u>n</u>, or the standard output (file descriptor 1) if <u>n</u> is not
specified. If the file does not exist it is created.
The general format for appending output is:
[<u>n</u>]<b>>></b><u>word</u>''')
REDIRECTING_OUTPUT_ERROR = textwrap.dedent(''' <b>Redirecting</b> <b>Standard</b> <b>Output</b> <b>and</b> <b>Standard</b> <b>Error</b>
This construct allows both the standard output (file descriptor 1) and the standard error output (file
descriptor 2) to be redirected to the file whose name is the expansion of <u>word</u>.
There are two formats for redirecting standard output and standard error:
<b>&></b><u>word</u>
and
<b>>&</b><u>word</u>
Of the two forms, the first is preferred. This is semantically equivalent to
<b>></b><u>word</u> 2<b>>&</b>1''')
APPENDING_OUTPUT_ERROR = textwrap.dedent(''' <b>Appending</b> <b>Standard</b> <b>Output</b> <b>and</b> <b>Standard</b> <b>Error</b>
This construct allows both the standard output (file descriptor 1) and the standard error output (file
descriptor 2) to be appended to the file whose name is the expansion of <u>word</u>.
The format for appending standard output and standard error is:
<b>&>></b><u>word</u>
This is semantically equivalent to
<b>>></b><u>word</u> 2<b>>&</b>1''')
HERE_DOCUMENTS = textwrap.dedent(''' <b>Here</b> <b>Documents</b>
This type of redirection instructs the shell to read input from the current source until a line
containing only <u>delimiter</u> (with no trailing blanks) is seen. All of the lines read up to that point are
then used as the standard input for a command.
The format of here-documents is:
<b><<</b>[<b>-</b>]<u>word</u>
<u>here-document</u>
<u>delimiter</u>
No parameter expansion, command substitution, arithmetic expansion, or pathname expansion is performed on
<u>word</u>. If any characters in <u>word</u> are quoted, the <u>delimiter</u> is the result of quote removal on <u>word</u>, and
the lines in the here-document are not expanded. If <u>word</u> is unquoted, all lines of the here-document are
subjected to parameter expansion, command substitution, and arithmetic expansion. In the latter case,
the character sequence <b>\<newline></b> is ignored, and <b>\</b> must be used to quote the characters <b>\</b>, <b>$</b>, and <b>`</b>.
If the redirection operator is <b><<-</b>, then all leading tab characters are stripped from input lines and the
line containing <u>delimiter</u>. This allows here-documents within shell scripts to be indented in a natural
fashion.
<b>Here</b> <b>Strings</b>
A variant of here documents, the format is:
<b><<<</b><u>word</u>
The <u>word</u> is expanded and supplied to the command on its standard input.''')
REDIRECTION_KIND = {'<' : REDIRECTING_INPUT,
'>' : REDIRECTING_OUTPUT,
'>>' : APPENDING_REDIRECTED_OUTPUT,
'&>' : REDIRECTING_OUTPUT_ERROR,
'>&' : REDIRECTING_OUTPUT_ERROR,
'&>>' : APPENDING_OUTPUT_ERROR,
'<<' : HERE_DOCUMENTS,
'<<<' : HERE_DOCUMENTS}
ASSIGNMENT = textwrap.dedent(''' A <u>variable</u> may be assigned to by a statement of the form
<u>name</u>=[<u>value</u>]
If <u>value</u> is not given, the variable is assigned the null string. All <u>values</u> undergo tilde expansion,
parameter and variable expansion, command substitution, arithmetic expansion, and quote removal (see
<b>EXPANSION</b> below). If the variable has its <b>integer</b> attribute set, then <u>value</u> is evaluated as an
arithmetic expression even if the $((...)) expansion is not used (see <b>Arithmetic</b> <b>Expansion</b> below). Word
splitting is not performed, with the exception of <b>"$@"</b> as explained below under <b>Special</b> <b>Parameters</b>.
Pathname expansion is not performed. Assignment statements may also appear as arguments to the <b>alias</b>,
<b>declare</b>, <b>typeset</b>, <b>export</b>, <b>readonly</b>, and <b>local</b> builtin commands.
In the context where an assignment statement is assigning a value to a shell variable or array index, the
+= operator can be used to append to or add to the variable's previous value. When += is applied to a
variable for which the <u>integer</u> attribute has been set, <u>value</u> is evaluated as an arithmetic expression and
added to the variable's current value, which is also evaluated. When += is applied to an array variable
using compound assignment (see <b>Arrays</b> below), the variable's value is not unset (as it is when using =),
and new values are appended to the array beginning at one greater than the array's maximum index (for
indexed arrays) or added as additional key-value pairs in an associative array. When applied to a
string-valued variable, <u>value</u> is expanded and appended to the variable's value.''')
_group = textwrap.dedent(''' { <u>list</u>; }
<u>list</u> is simply executed in the current shell environment. <u>list</u> must be terminated with a newline
or semicolon. This is known as a <u>group</u> <u>command</u>. The return status is the exit status of <u>list</u>.
Note that unlike the metacharacters <b>(</b> and <b>)</b>, <b>{</b> and <b>}</b> are <u>reserved</u> <u>words</u> and must occur where a
reserved word is permitted to be recognized. Since they do not cause a word break, they must be
separated from <u>list</u> by whitespace or another shell metacharacter.''')
_subshell = textwrap.dedent(''' (<u>list</u>) <u>list</u> is executed in a subshell environment (see <b>COMMAND</b> <b>EXECUTION</b> <b>ENVIRONMENT</b> below). Variable
assignments and builtin commands that affect the shell's environment do not remain in effect after
the command completes. The return status is the exit status of <u>list</u>.''')
_negate = '''If the reserved word <b>!</b> precedes a pipeline, the exit status of that pipeline is the logical negation of the
exit status as described above.'''
_if = textwrap.dedent(''' <b>if</b> <u>list</u>; <b>then</b> <u>list;</u> [ <b>elif</b> <u>list</u>; <b>then</b> <u>list</u>; ] ... [ <b>else</b> <u>list</u>; ] <b>fi</b>
The <b>if</b> <u>list</u> is executed. If its exit status is zero, the <b>then</b> <u>list</u> is executed. Otherwise, each
<b>elif</b> <u>list</u> is executed in turn, and if its exit status is zero, the corresponding <b>then</b> <u>list</u> is
executed and the command completes. Otherwise, the <b>else</b> <u>list</u> is executed, if present. The exit
status is the exit status of the last command executed, or zero if no condition tested true.''')
_for = textwrap.dedent(''' <b>for</b> <u>name</u> [ [ <b>in</b> [ <u>word</u> <u>...</u> ] ] ; ] <b>do</b> <u>list</u> ; <b>done</b>
The list of words following <b>in</b> is expanded, generating a list of items. The variable <u>name</u> is set
to each element of this list in turn, and <u>list</u> is executed each time. If the <b>in</b> <u>word</u> is omitted,
the <b>for</b> command executes <u>list</u> once for each positional parameter that is set (see <b>PARAMETERS</b>
below). The return status is the exit status of the last command that executes. If the expansion
of the items following <b>in</b> results in an empty list, no commands are executed, and the return
status is 0.''')
_whileuntil = textwrap.dedent(''' <b>while</b> <u>list-1</u>; <b>do</b> <u>list-2</u>; <b>done</b>
<b>until</b> <u>list-1</u>; <b>do</b> <u>list-2</u>; <b>done</b>
The <b>while</b> command continuously executes the list <u>list-2</u> as long as the last command in the list
<u>list-1</u> returns an exit status of zero. The <b>until</b> command is identical to the <b>while</b> command,
except that the test is negated; <u>list-2</u> is executed as long as the last command in <u>list-1</u> returns
a non-zero exit status. The exit status of the <b>while</b> and <b>until</b> commands is the exit status of the
last command executed in <u>list-2</u>, or zero if none was executed.''')
_select = textwrap.dedent(''' <b>select</b> <u>name</u> [ <b>in</b> <u>word</u> ] ; <b>do</b> <u>list</u> ; <b>done</b>
The list of words following <b>in</b> is expanded, generating a list of items. The set of expanded words
is printed on the standard error, each preceded by a number. If the <b>in</b> <u>word</u> is omitted, the
positional parameters are printed (see <b>PARAMETERS</b> below). The <b>PS3</b> prompt is then displayed and a
line read from the standard input. If the line consists of a number corresponding to one of the
displayed words, then the value of <u>name</u> is set to that word. If the line is empty, the words and
prompt are displayed again. If EOF is read, the command completes. Any other value read causes
<u>name</u> to be set to null. The line read is saved in the variable <b>REPLY</b>. The <u>list</u> is executed after
each selection until a <b>break</b> command is executed. The exit status of <b>select</b> is the exit status of
the last command executed in <u>list</u>, or zero if no commands were executed.''')
RESERVEDWORDS = {
'!' : _negate,
'{' : _group,
'}' : _group,
'(' : _subshell,
')' : _subshell,
';' : OPSEMICOLON,
}
def _addwords(key, text, *words):
for word in words:
COMPOUNDRESERVEDWORDS.setdefault(key, {})[word] = text
COMPOUNDRESERVEDWORDS = {}
_addwords('if', _if, 'if', 'then', 'elif', 'else', 'fi', ';')
_addwords('for', _for, 'for', 'in', 'do', 'done', ';')
_addwords('while', _whileuntil, 'while', 'do', 'done', ';')
_addwords('until', _whileuntil, 'until', 'do', 'done')
_addwords('select', _select, 'select', 'in', 'do', 'done')
_function = textwrap.dedent(''' A shell function is an object that is called like a simple command and executes a compound command with a
new set of positional parameters. Shell functions are declared as follows:
<u>name</u> () <u>compound-command</u> [<u>redirection</u>]
<b>function</b> <u>name</u> [()] <u>compound-command</u> [<u>redirection</u>]
This defines a function named <u>name</u>. The reserved word <b>function</b> is optional. If the <b>function</b>
reserved word is supplied, the parentheses are optional. The <u>body</u> of the function is the compound
command <u>compound-command</u> (see <b>Compound</b> <b>Commands</b> above). That command is usually a <u>list</u> of
commands between { and }, but may be any command listed under <b>Compound</b> <b>Commands</b> above.
<u>compound-command</u> is executed whenever <u>name</u> is specified as the name of a simple command. Any
redirections (see <b>REDIRECTION</b> below) specified when a function is defined are performed when the
function is executed. The exit status of a function definition is zero unless a syntax error
occurs or a readonly function with the same name already exists. When executed, the exit status
of a function is the exit status of the last command executed in the body. (See <b>FUNCTIONS</b> below.)''')
_functioncall = 'call shell function %r'
_functionarg = 'argument for shell function %r'
COMMENT = textwrap.dedent('''<b>COMMENTS</b>
In a non-interactive shell, or an interactive shell in which the <b>interactive_comments</b> option to the <b>shopt</b>
builtin is enabled (see <b>SHELL</b> <b>BUILTIN</b> <b>COMMANDS</b> below), a word beginning with <b>#</b> causes that word and all
remaining characters on that line to be ignored. An interactive shell without the <b>interactive_comments</b>
option enabled does not allow comments. The <b>interactive_comments</b> option is on by default in interactive
shells.''')
parameters = {
'*' : 'star',
'@' : 'at',
'#' : 'pound',
'?' : 'question',
'-' : 'hyphen',
'$' : 'dollar',
'!' : 'exclamation',
'0' : 'zero',
'_' : 'underscore',
}
| 19,316 | Python | .py | 216 | 80.263889 | 178 | 0.639521 | idank/explainshell | 13,114 | 782 | 152 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,112 | store.py | idank_explainshell/explainshell/store.py | '''data objects to save processed man pages to mongodb'''
import pymongo, collections, re, logging
from explainshell import errors, util, helpconstants, config
logger = logging.getLogger(__name__)
class classifiermanpage(collections.namedtuple('classifiermanpage', 'name paragraphs')):
'''a man page that had its paragraphs manually tagged as containing options
or not'''
@staticmethod
def from_store(d):
m = classifiermanpage(d['name'], [paragraph.from_store(p) for p in d['paragraphs']])
return m
def to_store(self):
return {'name' : self.name,
'paragraphs' : [p.to_store() for p in self.paragraphs]}
class paragraph(object):
'''a paragraph inside a man page is text that ends with two new lines'''
def __init__(self, idx, text, section, is_option):
self.idx = idx
self.text = text
self.section = section
self.is_option = is_option
def cleantext(self):
t = re.sub(r'<[^>]+>', '', self.text)
t = re.sub('<', '<', t)
t = re.sub('>', '>', t)
return t
@staticmethod
def from_store(d):
p = paragraph(d.get('idx', 0), d['text'].encode('utf8'), d['section'], d['is_option'])
return p
def to_store(self):
return {'idx' : self.idx, 'text' : self.text, 'section' : self.section,
'is_option' : self.is_option}
def __repr__(self):
t = self.cleantext()
t = t[:min(20, t.find('\n'))].lstrip()
return '<paragraph %d, %s: %r>' % (self.idx, self.section, t)
def __eq__(self, other):
if not other:
return False
return self.__dict__ == other.__dict__
class option(paragraph):
'''a paragraph that contains extracted options
short - a list of short options (-a, -b, ..)
long - a list of long options (--a, --b)
expectsarg - specifies if one of the short/long options expects an additional argument
argument - specifies if to consider this as positional arguments
nestedcommand - specifies if the arguments to this option can start a nested command
'''
def __init__(self, p, short, long, expectsarg, argument=None, nestedcommand=False):
paragraph.__init__(self, p.idx, p.text, p.section, p.is_option)
self.short = short
self.long = long
self._opts = self.short + self.long
self.argument = argument
self.expectsarg = expectsarg
self.nestedcommand = nestedcommand
if nestedcommand:
assert expectsarg, 'an option that can nest commands must expect an argument'
@property
def opts(self):
return self._opts
@classmethod
def from_store(cls, d):
p = paragraph.from_store(d)
return cls(p, d['short'], d['long'], d['expectsarg'], d['argument'],
d.get('nestedcommand'))
def to_store(self):
d = paragraph.to_store(self)
assert d['is_option']
d['short'] = self.short
d['long'] = self.long
d['expectsarg'] = self.expectsarg
d['argument'] = self.argument
d['nestedcommand'] = self.nestedcommand
return d
def __str__(self):
return '(%s)' % ', '.join([str(x) for x in self.opts])
def __repr__(self):
return '<options for paragraph %d: %s>' % (self.idx, str(self))
class manpage(object):
'''processed man page
source - the path to the original source man page
name - the name of this man page as extracted by manpage.manpage
synopsis - the synopsis of this man page as extracted by manpage.manpage
paragraphs - a list of paragraphs (and options) that contain all of the text and options
extracted from this man page
aliases - a list of aliases found for this man page
partialmatch - allow interperting options without a leading '-'
multicommand - consider sub commands when explaining a command with this man page,
e.g. git -> git commit
updated - whether this man page was manually updated
nestedcommand - specifies if positional arguments to this program can start a nested command,
e.g. sudo, xargs
'''
def __init__(self, source, name, synopsis, paragraphs, aliases,
partialmatch=False, multicommand=False, updated=False,
nestedcommand=False):
self.source = source
self.name = name
self.synopsis = synopsis
self.paragraphs = paragraphs
self.aliases = aliases
self.partialmatch = partialmatch
self.multicommand = multicommand
self.updated = updated
self.nestedcommand = nestedcommand
def removeoption(self, idx):
for i, p in self.paragraphs:
if p.idx == idx:
if not isinstance(p, option):
raise ValueError("paragraph %d isn't an option" % idx)
self.paragraphs[i] = paragraph(p.idx, p.text, p.section, False)
return
raise ValueError('idx %d not found' % idx)
@property
def namesection(self):
name, section = util.namesection(self.source[:-3])
return '%s(%s)' % (name, section)
@property
def section(self):
name, section = util.namesection(self.source[:-3])
return section
@property
def options(self):
return [p for p in self.paragraphs if isinstance(p, option)]
@property
def arguments(self):
# go over all paragraphs and look for those with the same 'argument'
# field
groups = collections.OrderedDict()
for opt in self.options:
if opt.argument:
groups.setdefault(opt.argument, []).append(opt)
# merge all the paragraphs under the same argument to a single string
for k, l in groups.iteritems():
groups[k] = '\n\n'.join([p.text for p in l])
return groups
@property
def synopsisnoname(self):
return re.match(r'[\w|-]+ - (.*)$', self.synopsis).group(1)
def find_option(self, flag):
for option in self.options:
for o in option.opts:
if o == flag:
return option
def to_store(self):
return {'source' : self.source, 'name' : self.name, 'synopsis' : self.synopsis,
'paragraphs' : [p.to_store() for p in self.paragraphs],
'aliases' : self.aliases, 'partialmatch' : self.partialmatch,
'multicommand' : self.multicommand, 'updated' : self.updated,
'nestedcommand' : self.nestedcommand}
@staticmethod
def from_store(d):
paragraphs = []
for pd in d.get('paragraphs', []):
pp = paragraph.from_store(pd)
if pp.is_option == True and 'short' in pd:
pp = option.from_store(pd)
paragraphs.append(pp)
synopsis = d['synopsis']
if synopsis:
synopsis = synopsis.encode('utf8')
else:
synopsis = helpconstants.NOSYNOPSIS
return manpage(d['source'], d['name'], synopsis, paragraphs,
[tuple(x) for x in d['aliases']], d['partialmatch'],
d['multicommand'], d['updated'], d.get('nestedcommand'))
@staticmethod
def from_store_name_only(name, source):
return manpage(source, name, None, [], [], None, None, None)
def __repr__(self):
return '<manpage %r(%s), %d options>' % (self.name, self.section, len(self.options))
class store(object):
'''read/write processed man pages from mongodb
we use three collections:
1) classifier - contains manually tagged paragraphs from man pages
2) manpage - contains a processed man page
3) mapping - contains (name, manpageid, score) tuples
'''
def __init__(self, db='explainshell', host=config.MONGO_URI):
logger.info('creating store, db = %r, host = %r', db, host)
self.connection = pymongo.MongoClient(host)
self.db = self.connection[db]
self.classifier = self.db['classifier']
self.manpage = self.db['manpage']
self.mapping = self.db['mapping']
def close(self):
self.connection.disconnect()
self.classifier = self.manpage = self.mapping = self.db = None
def drop(self, confirm=False):
if not confirm:
return
logger.info('dropping mapping, manpage, collections')
self.mapping.drop()
self.manpage.drop()
def trainingset(self):
for d in self.classifier.find():
yield classifiermanpage.from_store(d)
def __contains__(self, name):
c = self.mapping.find({'src' : name}).count()
return c > 0
def __iter__(self):
for d in self.manpage.find():
yield manpage.from_store(d)
def findmanpage(self, name):
'''find a man page by its name, everything following the last dot (.) in name,
is taken as the section of the man page
we return the man page found with the highest score, and a list of
suggestions that also matched the given name (only the first item
is prepopulated with the option data)'''
if name.endswith('.gz'):
logger.info('name ends with .gz, looking up an exact match by source')
d = self.manpage.find_one({'source':name})
if not d:
raise errors.ProgramDoesNotExist(name)
m = manpage.from_store(d)
logger.info('returning %s', m)
return [m]
section = None
origname = name
# don't try to look for a section if it's . (source)
if name != '.':
splitted = name.rsplit('.', 1)
name = splitted[0]
if len(splitted) > 1:
section = splitted[1]
logger.info('looking up manpage in mapping with src %r', name)
cursor = self.mapping.find({'src' : name})
count = cursor.count()
if not count:
raise errors.ProgramDoesNotExist(name)
dsts = dict(((d['dst'], d['score']) for d in cursor))
cursor = self.manpage.find({'_id' : {'$in' : list(dsts.keys())}}, {'name' : 1, 'source' : 1})
if cursor.count() != len(dsts):
logger.error('one of %r mappings is missing in manpage collection '
'(%d mappings, %d found)', dsts, len(dsts), cursor.count())
results = [(d.pop('_id'), manpage.from_store_name_only(**d)) for d in cursor]
results.sort(key=lambda x: dsts.get(x[0], 0), reverse=True)
logger.info('got %s', results)
if section is not None:
if len(results) > 1:
results.sort(key=lambda (oid, m): m.section == section, reverse=True)
logger.info(r'sorting %r so %s is first', results, section)
if not results[0][1].section == section:
raise errors.ProgramDoesNotExist(origname)
results.extend(self._discovermanpagesuggestions(results[0][0], results))
oid = results[0][0]
results = [x[1] for x in results]
results[0] = manpage.from_store(self.manpage.find_one({'_id' : oid}))
return results
def _discovermanpagesuggestions(self, oid, existing):
'''find suggestions for a given man page
oid is the objectid of the man page in question,
existing is a list of (oid, man page) of suggestions that were
already discovered
'''
skip = set([oid for oid, m in existing])
cursor = self.mapping.find({'dst' : oid})
# find all srcs that point to oid
srcs = [d['src'] for d in cursor]
# find all dsts of srcs
suggestionoids = self.mapping.find({'src' : {'$in' : srcs}}, {'dst' : 1})
# remove already discovered
suggestionoids = [d['dst'] for d in suggestionoids if d['dst'] not in skip]
if not suggestionoids:
return []
# get just the name and source of found suggestions
suggestionoids = self.manpage.find({'_id' : {'$in' : suggestionoids}},
{'name' : 1, 'source' : 1})
return [(d.pop('_id'), manpage.from_store_name_only(**d)) for d in suggestionoids]
def addmapping(self, src, dst, score):
self.mapping.insert({'src' : src, 'dst' : dst, 'score' : score})
def addmanpage(self, m):
'''add m into the store, if it exists first remove it and its mappings
each man page may have aliases besides the name determined by its
basename'''
d = self.manpage.find_one({'source' : m.source})
if d:
logger.info('removing old manpage %s (%s)', m.source, d['_id'])
self.manpage.remove(d['_id'])
# remove old mappings if there are any
c = self.mapping.count()
self.mapping.remove({'dst' : d['_id']})
c -= self.mapping.count()
logger.info('removed %d mappings for manpage %s', c, m.source)
o = self.manpage.insert(m.to_store())
for alias, score in m.aliases:
self.addmapping(alias, o, score)
logger.info('inserting mapping (alias) %s -> %s (%s) with score %d', alias, m.name, o, score)
return m
def updatemanpage(self, m):
'''update m and add new aliases if necessary
change updated attribute so we don't overwrite this in the future'''
logger.info('updating manpage %s', m.source)
m.updated = True
self.manpage.update({'source' : m.source}, m.to_store())
_id = self.manpage.find_one({'source' : m.source}, fields={'_id':1})['_id']
for alias, score in m.aliases:
if alias not in self:
self.addmapping(alias, _id, score)
logger.info('inserting mapping (alias) %s -> %s (%s) with score %d', alias, m.name, _id, score)
else:
logger.debug('mapping (alias) %s -> %s (%s) already exists', alias, m.name, _id)
return m
def verify(self):
# check that everything in manpage is reachable
mappings = list(self.mapping.find())
reachable = set([m['dst'] for m in mappings])
manpages = set([m['_id'] for m in self.manpage.find(fields={'_id':1})])
ok = True
unreachable = manpages - reachable
if unreachable:
logger.error('manpages %r are unreachable (nothing maps to them)', unreachable)
unreachable = [self.manpage.find_one({'_id' : u})['name'] for u in unreachable]
ok = False
notfound = reachable - manpages
if notfound:
logger.error('mappings to inexisting manpages: %r', notfound)
ok = False
return ok, unreachable, notfound
def names(self):
cursor = self.manpage.find(fields={'name':1})
for d in cursor:
yield d['_id'], d['name']
def mappings(self):
cursor = self.mapping.find(fields={'src':1})
for d in cursor:
yield d['src'], d['_id']
def setmulticommand(self, manpageid):
self.manpage.update({'_id' : manpageid}, {'$set' : {'multicommand' : True}})
| 15,180 | Python | .py | 331 | 36.36858 | 111 | 0.596171 | idank/explainshell | 13,114 | 782 | 152 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,113 | fixer.py | idank_explainshell/explainshell/fixer.py | import textwrap, logging
from explainshell import util
class basefixer(object):
'''The base fixer class which other fixers inherit from.
Subclasses override the base methods in order to fix manpage content during
different parts of the parsing/classifying/saving process.'''
runbefore = []
runlast = False
def __init__(self, mctx):
self.mctx = mctx
self.run = True
self.logger = logging.getLogger(self.__class__.__name__)
def pre_get_raw_manpage(self):
pass
def pre_parse_manpage(self):
pass
def post_parse_manpage(self):
pass
def pre_classify(self):
pass
def post_classify(self):
pass
def post_option_extraction(self):
pass
def pre_add_manpage(self):
pass
fixerscls = []
fixerspriority = {}
class runner(object):
'''The runner coordinates the fixers.'''
def __init__(self, mctx):
self.mctx = mctx
self.fixers = [f(mctx) for f in fixerscls]
def disable(self, name):
before = len(self.fixers)
self.fixers = [f for f in self.fixers if f.__class__.__name__ != name]
if before == len(self.fixers):
raise ValueError('fixer %r not found' % name)
def _fixers(self):
return (f for f in self.fixers if f.run)
def pre_get_raw_manpage(self):
for f in self._fixers():
f.pre_get_raw_manpage()
def pre_parse_manpage(self):
for f in self._fixers():
f.pre_parse_manpage()
def post_parse_manpage(self):
for f in self._fixers():
f.post_parse_manpage()
def pre_classify(self):
for f in self._fixers():
f.pre_classify()
def post_classify(self):
for f in self._fixers():
f.post_classify()
def post_option_extraction(self):
for f in self._fixers():
f.post_option_extraction()
def pre_add_manpage(self):
for f in self._fixers():
f.pre_add_manpage()
def register(fixercls):
fixerscls.append(fixercls)
for f in fixercls.runbefore:
if not hasattr(f, '_parents'):
f._parents = []
f._parents.append(fixercls)
return fixercls
@register
class bulletremover(basefixer):
'''remove list bullets from paragraph start, see mysqlslap.1'''
def post_parse_manpage(self):
toremove = []
for i, p in enumerate(self.mctx.manpage.paragraphs):
try:
idx = p.text.index('\xc2\xb7')
p.text = p.text[:idx] + p.text[idx+2:]
if not p.text.strip():
toremove.append(i)
except ValueError:
pass
for i in reversed(toremove):
del self.mctx.manpage.paragraphs[i]
@register
class leadingspaceremover(basefixer):
'''go over all known option paragraphs and remove their leading spaces
by the amount of spaces in the first line'''
def post_option_extraction(self):
for i, p in enumerate(self.mctx.manpage.options):
text = self._removewhitespace(p.text)
p.text = text
def _removewhitespace(self, text):
'''
>>> f = leadingspaceremover(None)
>>> f._removewhitespace(' a\\n b ')
'a\\n b'
>>> f._removewhitespace('\\t a\\n\\t \\tb')
'a\\n\\tb'
'''
return textwrap.dedent(text).rstrip()
@register
class tarfixer(basefixer):
def __init__(self, *args):
super(tarfixer, self).__init__(*args)
self.run = self.mctx.name == 'tar'
def pre_add_manpage(self):
self.mctx.manpage.partialmatch = True
@register
class paragraphjoiner(basefixer):
runbefore = [leadingspaceremover]
maxdistance = 5
def post_option_extraction(self):
options = [p for p in self.mctx.manpage.paragraphs if p.is_option]
self._join(self.mctx.manpage.paragraphs, options)
def _join(self, paragraphs, options):
def _paragraphsbetween(op1, op2):
assert op1.idx < op2.idx
r = []
start = None
for i, p in enumerate(paragraphs):
if op1.idx < p.idx < op2.idx:
if not r:
start = i
r.append(p)
return r, start
totalmerged = 0
for curr, next in util.pairwise(options):
between, start = _paragraphsbetween(curr, next)
if curr.section == next.section and 1 <= len(between) < self.maxdistance:
self.logger.info('merging paragraphs %d through %d (inclusive)', curr.idx, next.idx-1)
newdesc = [curr.text.rstrip()]
newdesc.extend([p.text.rstrip() for p in between])
curr.text = '\n\n'.join(newdesc)
del paragraphs[start:start+len(between)]
totalmerged += len(between)
return totalmerged
@register
class optiontrimmer(basefixer):
runbefore = [paragraphjoiner]
d = {'git-rebase' : (50, -1)}
def __init__(self, mctx):
super(optiontrimmer, self).__init__(mctx)
self.run = self.mctx.name in self.d
def post_classify(self):
start, end = self.d[self.mctx.name]
classifiedoptions = [p for p in self.mctx.manpage.paragraphs if p.is_option]
assert classifiedoptions
if end == -1:
end = classifiedoptions[-1].idx
else:
assert start > end
for p in classifiedoptions:
if not (start <= p.idx <= end):
p.is_option = False
self.logger.info('removing option %r', p)
def _parents(fixercls):
p = getattr(fixercls, '_parents', [])
last = fixercls.runlast
if last and p:
raise ValueError("%s can't be last and also run before someone else" % fixercls.__name__)
if last:
return [f for f in fixerscls if f is not fixercls]
return p
fixerscls = util.toposorted(fixerscls, _parents)
| 6,002 | Python | .py | 164 | 27.902439 | 102 | 0.59403 | idank/explainshell | 13,114 | 782 | 152 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,114 | matcher.py | idank_explainshell/explainshell/matcher.py | import collections, logging, itertools
import bashlex.parser
import bashlex.ast
from explainshell import errors, util, helpconstants
class matchgroup(object):
'''a class to group matchresults together
we group all shell results in one group and create a new group for every
command'''
def __init__(self, name):
self.name = name
self.results = []
def __repr__(self):
return '<matchgroup %r with %d results>' % (self.name, len(self.results))
class matchresult(collections.namedtuple('matchresult', 'start end text match')):
@property
def unknown(self):
return self.text is None
matchwordexpansion = collections.namedtuple('matchwordexpansion',
'start end kind')
logger = logging.getLogger(__name__)
class matcher(bashlex.ast.nodevisitor):
'''parse a command line and return a list of matchresults describing
each token.
'''
def __init__(self, s, store):
self.s = s.encode('latin1', 'replace')
self.store = store
self._prevoption = self._currentoption = None
self.groups = [matchgroup('shell')]
# a list of matchwordexpansions where expansions happened during word
# expansion
self.expansions = []
# a stack to manage nested command groups: whenever a new simple
# command is started, we push a tuple with:
# - the node that started this group. this is used to find it when
# a command ends (see visitnodeend)
# - its matchgroup. new matchresults will be added to it.
# - a word used to end the top-most command. this is used when a flag
# starts a new command, e.g. find -exec.
self.groupstack = [(None, self.groups[-1], None)]
# keep a stack of the currently visited compound command (if/for..)
# to provide context when matching reserved words, since for example
# the keyword 'done' can appear in a for, while..
self.compoundstack = []
# a set of functions defined in the current input, we will try to match
# commands against them so if one refers to defined function, it won't
# show up as unknown or be taken from the db
self.functions = set()
def _generatecommandgroupname(self):
existing = len([g for g in self.groups if g.name.startswith('command')])
return 'command%d' % existing
@property
def matches(self):
'''return the list of results from the most recently created group'''
return self.groupstack[-1][1].results
@property
def allmatches(self):
return list(itertools.chain.from_iterable(g.results for g in self.groups))
@property
def manpage(self):
group = self.groupstack[-1][1]
# we do not have a manpage if the top of the stack is the shell group.
# this can happen if the first argument is a command substitution
# and we're not treating it as a "man page not found"
if group.name != 'shell':
return group.manpage
def find_option(self, opt):
self._currentoption = self.manpage.find_option(opt)
logger.debug('looking up option %r, got %r', opt, self._currentoption)
return self._currentoption
def findmanpages(self, prog):
prog = prog.decode('latin1')
logger.info('looking up %r in store', prog)
manpages = self.store.findmanpage(prog)
logger.info('found %r in store, got: %r, using %r', prog, manpages, manpages[0])
return manpages
def unknown(self, token, start, end):
logger.debug('nothing to do with token %r', token)
return matchresult(start, end, None, None)
def visitreservedword(self, node, word):
# first try the compound reserved words
helptext = None
if self.compoundstack:
currentcompound = self.compoundstack[-1]
helptext = helpconstants.COMPOUNDRESERVEDWORDS.get(currentcompound, {}).get(word)
# try these if we don't have anything specific
if not helptext:
helptext = helpconstants.RESERVEDWORDS[word]
self.groups[0].results.append(matchresult(node.pos[0], node.pos[1], helptext, None))
def visitoperator(self, node, op):
helptext = None
if self.compoundstack:
currentcompound = self.compoundstack[-1]
helptext = helpconstants.COMPOUNDRESERVEDWORDS.get(currentcompound, {}).get(op)
if not helptext:
helptext = helpconstants.OPERATORS[op]
self.groups[0].results.append(matchresult(node.pos[0], node.pos[1], helptext, None))
def visitpipe(self, node, pipe):
self.groups[0].results.append(
matchresult(node.pos[0], node.pos[1], helpconstants.PIPELINES, None))
def visitredirect(self, node, input, type, output, heredoc):
helptext = [helpconstants.REDIRECTION]
if type == '>&' and isinstance(output, int):
type = type[:-1]
if type in helpconstants.REDIRECTION_KIND:
helptext.append(helpconstants.REDIRECTION_KIND[type])
self.groups[0].results.append(
matchresult(node.pos[0], node.pos[1], '\n\n'.join(helptext), None))
# the output might contain a wordnode, visiting it will confuse the
# matcher who'll think it's an argument, instead visit the expansions
# directly, if we have any
if isinstance(output, bashlex.ast.node):
for part in output.parts:
self.visit(part)
return False
def visitcommand(self, node, parts):
assert parts
# look for the first WordNode, which might not be at parts[0]
idxwordnode = bashlex.ast.findfirstkind(parts, 'word')
if idxwordnode == -1:
logger.info('no words found in command (probably contains only redirects)')
return
wordnode = parts[idxwordnode]
# check if this refers to a previously defined function
if wordnode.word in self.functions:
logger.info('word %r is a function, not trying to match it or its '
'arguments', wordnode)
# first, add a matchresult for the function call
mr = matchresult(wordnode.pos[0], wordnode.pos[1],
helpconstants._functioncall % wordnode.word, None)
self.matches.append(mr)
# this is a bit nasty: if we were to visit the command like we
# normally do it would try to match it against a manpage. but
# we don't have one here, we just want to take all the words and
# consider them part of the function call
for part in parts:
# maybe it's a redirect...
if part.kind != 'word':
self.visit(part)
else:
# this is an argument to the function
if part is not wordnode:
mr = matchresult(part.pos[0], part.pos[1],
helpconstants._functionarg % wordnode.word,
None)
self.matches.append(mr)
# visit any expansions in there
for ppart in part.parts:
self.visit(ppart)
# we're done with this commandnode, don't visit its children
return False
self.startcommand(node, parts, None)
def visitif(self, *args):
self.compoundstack.append('if')
def visitfor(self, node, parts):
self.compoundstack.append('for')
for part in parts:
# don't visit words since they're not part of the current command,
# instead consider them part of the for construct
if part.kind == 'word':
mr = matchresult(part.pos[0], part.pos[1], helpconstants._for, None)
self.groups[0].results.append(mr)
# but we do want to visit expanions
for ppart in part.parts:
self.visit(ppart)
else:
self.visit(part)
return False
def visitwhile(self, *args):
self.compoundstack.append('while')
def visituntil(self, *args):
self.compoundstack.append('until')
def visitnodeend(self, node):
if node.kind == 'command':
# it's possible for visitcommand/end to be called without a command
# group being pushed if it contains only redirect nodes
if len(self.groupstack) > 1:
logger.info('visitnodeend %r, groups %d', node, len(self.groupstack))
while self.groupstack[-1][0] is not node:
logger.info('popping groups that are a result of nested commands')
self.endcommand()
self.endcommand()
elif node.kind in ('if', 'for', 'while', 'until'):
kind = self.compoundstack.pop()
assert kind == node.kind
def startcommand(self, commandnode, parts, endword, addgroup=True):
logger.info('startcommand commandnode=%r parts=%r, endword=%r, addgroup=%s',
commandnode, parts, endword, addgroup)
idxwordnode = bashlex.ast.findfirstkind(parts, 'word')
assert idxwordnode != -1
wordnode = parts[idxwordnode]
if wordnode.parts:
logger.info('node %r has parts (it was expanded), no point in looking'
' up a manpage for it', wordnode)
if addgroup:
mg = matchgroup(self._generatecommandgroupname())
mg.manpage = None
mg.suggestions = None
self.groups.append(mg)
self.groupstack.append((commandnode, mg, endword))
return False
startpos, endpos = wordnode.pos
try:
mps = self.findmanpages(wordnode.word)
# we consume this node here, pop it from parts so we
# don't visit it again as an argument
parts.pop(idxwordnode)
except errors.ProgramDoesNotExist, e:
if addgroup:
# add a group for this command, we'll mark it as unknown
# when visitword is called
logger.info('no manpage found for %r, adding a group for it',
wordnode.word)
mg = matchgroup(self._generatecommandgroupname())
mg.error = e
mg.manpage = None
mg.suggestions = None
self.groups.append(mg)
self.groupstack.append((commandnode, mg, endword))
return False
manpage = mps[0]
idxnextwordnode = bashlex.ast.findfirstkind(parts, 'word')
# check the next word for a possible multicommand if:
# - the matched manpage says so
# - we have another word node
# - the word node has no expansions in it
if manpage.multicommand and idxnextwordnode != -1 and not parts[idxnextwordnode].parts:
nextwordnode = parts[idxnextwordnode]
try:
multi = '%s %s' % (wordnode.word, nextwordnode.word)
logger.info('%r is a multicommand, trying to get another token and look up %r', manpage, multi)
mps = self.findmanpages(multi)
manpage = mps[0]
# we consume this node here, pop it from parts so we
# don't visit it again as an argument
parts.pop(idxnextwordnode)
endpos = nextwordnode.pos[1]
except errors.ProgramDoesNotExist:
logger.info('no manpage %r for multicommand %r', multi, manpage)
# create a new matchgroup for the current command
mg = matchgroup(self._generatecommandgroupname())
mg.manpage = manpage
mg.suggestions = mps[1:]
self.groups.append(mg)
self.groupstack.append((commandnode, mg, endword))
self.matches.append(matchresult(startpos, endpos,
manpage.synopsis or helpconstants.NOSYNOPSIS, None))
return True
def endcommand(self):
'''end the most recently created command group by popping it from the
group stack. groups are created by visitcommand or a nested command'''
assert len(self.groupstack) >= 2, 'groupstack must contain shell and command groups'
g = self.groupstack.pop()
logger.info('ending group %s', g)
def visitcommandsubstitution(self, node, command):
kind = self.s[node.pos[0]]
substart = 2 if kind == '$' else 1
# start the expansion after the $( or `
self.expansions.append(matchwordexpansion(node.pos[0] + substart,
node.pos[1] - 1,
'substitution'))
# do not try to match the child nodes
return False
def visitprocesssubstitution(self, node, command):
# don't include opening <( and closing )
self.expansions.append(matchwordexpansion(node.pos[0] + 2,
node.pos[1] - 1,
'substitution'))
# do not try to match the child nodes
return False
def visitassignment(self, node, word):
helptext = helpconstants.ASSIGNMENT
self.groups[0].results.append(matchresult(node.pos[0], node.pos[1], helptext, None))
def visitword(self, node, word):
def attemptfuzzy(chars):
m = []
if chars[0] == '-':
tokens = [chars[0:2]] + list(chars[2:])
considerarg = True
else:
tokens = list(chars)
considerarg = False
pos = node.pos[0]
prevoption = None
for i, t in enumerate(tokens):
op = t if t[0] == '-' else '-' + t
option = self.find_option(op)
if option:
if considerarg and not m and option.expectsarg:
logger.info('option %r expected an arg, taking the rest too', option)
# reset the current option if we already took an argument,
# this prevents the next word node to also consider itself
# as an argument
self._currentoption = None
return [matchresult(pos, pos+len(chars), option.text, None)]
mr = matchresult(pos, pos+len(t), option.text, None)
m.append(mr)
# if the previous option expected an argument and we couldn't
# match the current token, take the rest as its argument, this
# covers a series of short options where the last one has an argument
# with no space between it, such as 'xargs -r0n1'
elif considerarg and prevoption and prevoption.expectsarg:
pmr = m[-1]
mr = matchresult(pmr.start, pmr.end+(len(tokens)-i), pmr.text, None)
m[-1] = mr
# reset the current option if we already took an argument,
# this prevents the next word node to also consider itself
# as an argument
self._currentoption = None
break
else:
m.append(self.unknown(t, pos, pos+len(t)))
pos += len(t)
prevoption = option
return m
def _visitword(node, word):
if not self.manpage:
logger.info('inside an unknown command, giving up on %r', word)
self.matches.append(self.unknown(word, node.pos[0], node.pos[1]))
return
logger.info('trying to match token: %r', word)
self._prevoption = self._currentoption
if word.startswith('--'):
word = word.split('=', 1)[0]
option = self.find_option(word)
if option:
logger.info('found an exact match for %r: %r', word, option)
mr = matchresult(node.pos[0], node.pos[1], option.text, None)
self.matches.append(mr)
# check if we splitted the word just above, if we did then reset
# the current option so the next word doesn't consider itself
# an argument
if word != node.word:
self._currentoption = None
else:
word = node.word
# check if we're inside a nested command and this word marks the end
if isinstance(self.groupstack[-1][-1], list) and word in self.groupstack[-1][-1]:
logger.info('token %r ends current nested command', word)
self.endcommand()
mr = matchresult(node.pos[0], node.pos[1], self.matches[-1].text, None)
self.matches.append(mr)
elif word != '-' and word.startswith('-') and not word.startswith('--'):
logger.debug('looks like a short option')
if len(word) > 2:
logger.info("trying to split it up")
self.matches.extend(attemptfuzzy(word))
else:
self.matches.append(self.unknown(word, node.pos[0], node.pos[1]))
elif self._prevoption and self._prevoption.expectsarg:
logger.info("previous option possibly expected an arg, and we can't"
" find an option to match the current token, assuming it's an arg")
ea = self._prevoption.expectsarg
possibleargs = ea if isinstance(ea, list) else []
take = True
if possibleargs and word not in possibleargs:
take = False
logger.info('token %r not in list of possible args %r for %r',
word, possibleargs, self._prevoption)
if take:
if self._prevoption.nestedcommand:
logger.info('option %r can nest commands', self._prevoption)
if self.startcommand(None, [node], self._prevoption.nestedcommand, addgroup=False):
self._currentoption = None
return
pmr = self.matches[-1]
mr = matchresult(pmr.start, node.pos[1], pmr.text, None)
self.matches[-1] = mr
else:
self.matches.append(self.unknown(word, node.pos[0], node.pos[1]))
else:
if self.manpage.partialmatch:
logger.info('attemping to do a partial match')
m = attemptfuzzy(word)
if not any(mm.unknown for mm in m):
logger.info('found a match for everything, taking it')
self.matches.extend(m)
return
if self.manpage.arguments:
if self.manpage.nestedcommand:
logger.info('manpage %r can nest commands', self.manpage)
if self.startcommand(None, [node], self.manpage.nestedcommand, addgroup=False):
self._currentoption = None
return
d = self.manpage.arguments
k = list(d.keys())[0]
logger.info('got arguments, using %r', k)
text = d[k]
mr = matchresult(node.pos[0], node.pos[1], text, None)
self.matches.append(mr)
return
# if all of that failed, we can't explain it so mark it unknown
self.matches.append(self.unknown(word, node.pos[0], node.pos[1]))
_visitword(node, word)
def visitfunction(self, node, name, body, parts):
self.functions.add(name.word)
def _iscompoundopenclosecurly(compound):
first, last = compound.list[0], compound.list[-1]
if (first.kind == 'reservedword' and last.kind == 'reservedword' and
first.word == '{' and last.word == '}'):
return True
# if the compound command we have there is { }, let's include the
# {} as part of the function declaration. normally it would be
# treated as a group command, but that seems less informative in this
# context
if _iscompoundopenclosecurly(body):
# create a matchresult until after the first {
mr = matchresult(node.pos[0], body.list[0].pos[1],
helpconstants._function, None)
self.groups[0].results.append(mr)
# create a matchresult for the closing }
mr = matchresult(body.list[-1].pos[0], body.list[-1].pos[1],
helpconstants._function, None)
self.groups[0].results.append(mr)
# visit anything in between the { }
for part in body.list[1:-1]:
self.visit(part)
else:
beforebody = bashlex.ast.findfirstkind(parts, 'compound') - 1
assert beforebody > 0
beforebody = parts[beforebody]
# create a matchresult ending at the node before body
mr = matchresult(node.pos[0], beforebody.pos[1],
helpconstants._function, None)
self.groups[0].results.append(mr)
self.visit(body)
return False
def visittilde(self, node, value):
self.expansions.append(matchwordexpansion(node.pos[0], node.pos[1],
'tilde'))
def visitparameter(self, node, value):
try:
int(value)
kind = 'digits'
except ValueError:
kind = helpconstants.parameters.get(value, 'param')
self.expansions.append(matchwordexpansion(node.pos[0], node.pos[1],
'parameter-%s' % kind))
def match(self):
logger.info('matching string %r', self.s)
# limit recursive parsing to a depth of 1
self.ast = bashlex.parser.parsesingle(self.s, expansionlimit=1,
strictmode=False)
if self.ast:
self.visit(self.ast)
assert len(self.groupstack) == 1, 'groupstack should contain only shell group after matching'
# if we only have one command in there and no shell results/expansions,
# reraise the original exception
if (len(self.groups) == 2 and not self.groups[0].results and
self.groups[1].manpage is None and not self.expansions):
raise self.groups[1].error
else:
logger.warn('no AST generated for %r', self.s)
def debugmatch():
s = '\n'.join(['%d) %r = %r' % (i, self.s[m.start:m.end], m.text) for i, m in enumerate(self.allmatches)])
return s
self._markunparsedunknown()
# fix each matchgroup seperately
for group in self.groups:
if group.results:
if getattr(group, 'manpage', None):
# ensure that the program part isn't unknown (i.e. it has
# something as its synopsis)
assert not group.results[0].unknown
group.results = self._mergeadjacent(group.results)
# add matchresult.match to existing matches
for i, m in enumerate(group.results):
assert m.end <= len(self.s), '%d %d' % (m.end, len(self.s))
portion = self.s[m.start:m.end].decode('latin1')
group.results[i] = matchresult(m.start, m.end, m.text, portion)
logger.debug('%r matches:\n%s', self.s, debugmatch())
# not strictly needed, but doesn't hurt
self.expansions.sort()
return self.groups
def _markunparsedunknown(self):
'''the parser may leave a remainder at the end of the string if it doesn't
match any of the rules, mark them as unknowns'''
parsed = [False]*len(self.s)
# go over all existing matches to see if we've covered the
# current position
for start, end, _, _ in self.allmatches:
for i in range(start, end):
parsed[i] = True
for i in range(len(parsed)):
c = self.s[i]
# whitespace is always 'unparsed'
if c.isspace():
parsed[i] = True
# the parser ignores comments but we can use a trick to see if this
# starts a comment and is beyond the ending index of the parsed
# portion of the inpnut
if (not self.ast or i > self.ast.pos[1]) and c == '#':
comment = matchresult(i, len(parsed), helpconstants.COMMENT, None)
self.groups[0].results.append(comment)
break
if not parsed[i]:
# add unparsed results to the 'shell' group
self.groups[0].results.append(self.unknown(c, i, i+1))
# there are no overlaps, so sorting by the start is enough
self.groups[0].results.sort(key=lambda mr: mr.start)
def _resultindex(self):
'''return a mapping of matchresults to their index among all
matches, sorted by the start position of the matchresult'''
d = {}
i = 0
for result in sorted(self.allmatches, key=lambda mr: mr.start):
d[result] = i
i += 1
return d
def _mergeadjacent(self, matches):
merged = []
resultindex = self._resultindex()
sametext = itertools.groupby(matches, lambda m: m.text)
for text, ll in sametext:
for l in util.groupcontinuous(ll, key=lambda m: resultindex[m]):
if len(l) == 1:
merged.append(l[0])
else:
start = l[0].start
end = l[-1].end
endindex = resultindex[l[-1]]
for mr in l:
del resultindex[mr]
merged.append(matchresult(start, end, text, None))
resultindex[merged[-1]] = endindex
return merged
| 26,896 | Python | .py | 526 | 36.555133 | 118 | 0.560862 | idank/explainshell | 13,114 | 782 | 152 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,115 | manpage.py | idank_explainshell/explainshell/manpage.py | import os, subprocess, re, logging, collections, urllib
from explainshell import config, store, errors
devnull = open(os.devnull, 'w')
SPLITSYNOP = re.compile(r'([^ ]+) - (.*)$')
ENV = dict(os.environ)
ENV["W3MMAN_MAN"] = "man --no-hyphenation"
ENV["MAN_KEEP_FORMATTING"] = "1"
ENV["MANWIDTH"] = "115"
ENV["LC_ALL"] = "en_US.UTF-8"
logger = logging.getLogger(__name__)
def extractname(gzname):
'''
>>> extractname('ab.1.gz')
'ab'
>>> extractname('ab.1.1.gz')
'ab.1'
>>> extractname('ab.1xyz.gz')
'ab'
>>> extractname('ab.1.1xyz.gz')
'ab.1'
>>> extractname('a/b/c/ab.1.1xyz.gz')
'ab.1'
'''
if '/' in gzname:
gzname = os.path.basename(gzname)
return gzname.rsplit('.', 2)[0]
def bold(l):
'''
>>> bold('a')
([], ['a'])
>>> bold('<b>a</b>')
(['a'], [])
>>> bold('a<b>b</b>c')
(['b'], ['a', 'c'])
>>> bold('<b>first</b> <b>second:</b>')
(['first', 'second:'], [])
'''
inside = []
for m in _section.finditer(l):
inside.append(m.span(0))
current = 0
outside = []
for start, end in inside:
outside.append((current, start))
current = end
outside.append((current, len(l)))
inside = [l[s:e] for s, e in inside]
inside = [s.replace('<b>', '').replace('</b>', '') for s in inside]
outside = [l[s:e] for s, e in outside]
outside = [l for l in outside if l and not l.isspace()]
return inside, outside
# w3mman2html.cgi (the tool we're using to output html from a man page) does
# some strange escaping which causes it to output invalid utf8. we look these
# up and fix them manually
_replacementsprefix = [
('\xe2\x80\xe2\x80\x98', None, True), # left single quote
('\xe2\x80\xe2\x80\x99', None, True), # right single quote
('\xe2\x80\xe2\x80\x9c', None, True), # left double quote
('\xe2\x80\xe2\x80\x9d', None, True), # right double quote
('\xe2\x94\xe2\x94\x82', '|', False), # pipe
('\xe2\x8e\xe2\x8e\xaa', None, False), # pipe 2
('\xe2\x80\xe2\x80\x90', None, True), # hyphen
('\xe2\x80\xe2\x80\x94', None, True), # hyphen 2
('\xe2\x80\xc2\xbd', None, True), # half
('\xe2\x88\xe2\x88\x97', None, True), # asterisk
('\xe2\x86\xe2\x86\x92', None, True), # right arrow
('\xe2\x88\xe2\x88\x92', None, True), # minus sign
('\xe2\x80\xe2\x80\x93', None, True), # en dash
('\xe2\x80\xe2\x80\xb2', None, False), # prime
('\xe2\x88\xe2\x88\xbc', None, False), # tilde operator
('\xe2\x86\xe2\x86\xb5', None, False), # downwards arrow with corner leftwards
('\xef\xbf\xef\xbf\xbd', None, False) # replacement char
]
_replacements = []
for searchfor, replacewith, underline in _replacementsprefix:
if replacewith is None:
replacewith = searchfor[2:]
_replacements.append((searchfor, replacewith))
if underline:
x = list(replacewith)
x.insert(1, '</u>')
x = ''.join(x)
_replacements.append((x, '%s</u>' % replacewith))
_replacementsnoprefix = ['\xc2\xb7', # bullet
'\xc2\xb4', # apostrophe
'\xc2\xa0', # no break space
'\xc3\xb8', '\xe4\xbd\xa0', '\xe5\xa5\xbd', # gibberish
'\xc2\xa7', # section sign
'\xef\xbf\xbd', # replacement char
'\xc2\xa4', # latin small letter a with diaeresis
'\xc3\xa4', # latin small letter a with diaeresis
'\xc4\xa4', # latin small letter a with diaeresis
'\xc3\xaa', # latin small letter e with circumflex
]
for s in _replacementsnoprefix:
x = list(s)
x.insert(1, '</u>')
x = ''.join(x)
_replacements.append((x, '%s</u>' % s))
_href = re.compile(r'<a href="file:///[^\?]*\?([^\(]*)\(([^\)]*)\)">')
_section = re.compile(r'<b>([^<]+)</b>')
def _parsetext(lines):
paragraphlines = []
section = None
i = 0
for l in lines:
l = re.sub(_href, r'<a href="http://manpages.ubuntu.com/manpages/precise/en/man\2/\1.\2.html">', l)
for lookfor, replacewith in _replacements:
l = re.sub(lookfor, replacewith, l)
# confirm the line is valid utf8
lreplaced = l.decode('utf8', 'ignore').encode('utf8')
if lreplaced != l:
logger.error('line %r contains invalid utf8', l)
l = lreplaced
raise ValueError
if l.startswith('<b>'): # section
section = re.sub(_section, r'\1', l)
else:
foundsection = False
if l.strip().startswith('<b>'):
inside, outside = bold(l.strip())
if not outside and inside[-1][-1] == ':':
foundsection = True
section = ' '.join(inside)[:-1]
if not foundsection:
if not l.strip() and paragraphlines:
yield store.paragraph(i, '\n'.join(paragraphlines), section, False)
i += 1
paragraphlines = []
elif l.strip():
paragraphlines.append(l)
if paragraphlines:
yield store.paragraph(i, '\n'.join(paragraphlines), section, False)
def _parsesynopsis(base, synopsis):
'''
>>> _parsesynopsis('/a/b/c', '/a/b/c: "p-r+o++g - foo bar."')
('p-r+o++g', 'foo bar')
'''
synopsis = synopsis[len(base)+3:-1]
if synopsis[-1] == '.':
synopsis = synopsis[:-1]
return SPLITSYNOP.match(synopsis).groups()
class manpage(object):
'''read the man page at path by executing w3mman2html.cgi and find its
synopsis with lexgrog
since some man pages share the same name (different versions), each
alias of a man page has a score that's determined in this simple fashion:
- name of man page source file is given a score of 10
- all other names found for a particular man page are given a score of 1
(other names are found by scanning the output of lexgrog)
'''
def __init__(self, path):
self.path = path
self.shortpath = os.path.basename(self.path)
self.name = extractname(self.path)
self.aliases = set([self.name])
self.synopsis = None
self.paragraphs = None
self._text = None
def read(self):
'''Read the content from a local manpage file and store it in usable formats
on the class instance.'''
cmd = [config.MAN2HTML, urllib.urlencode({'local' : os.path.abspath(self.path)})]
logger.info('executing %r', ' '.join(cmd))
self._text = subprocess.check_output(cmd, stderr=devnull, env=ENV)
try:
self.synopsis = subprocess.check_output(['lexgrog', self.path], stderr=devnull).rstrip()
except subprocess.CalledProcessError:
logger.error('failed to extract synopsis for %s', self.name)
def parse(self):
self.paragraphs = list(_parsetext(self._text.splitlines()[7:-3]))
if not self.paragraphs:
raise errors.EmptyManpage(self.shortpath)
if self.synopsis:
self.synopsis = [_parsesynopsis(self.path, l) for l in self.synopsis.splitlines()]
# figure out aliases from the synopsis
d = collections.OrderedDict()
for prog, text in self.synopsis:
d.setdefault(text, []).append(prog)
text, progs = d.items()[0]
self.synopsis = text
self.aliases.update(progs)
self.aliases.remove(self.name)
# give the name of the man page the highest score
self.aliases = [(self.name, 10)] + [(x, 1) for x in self.aliases]
| 7,602 | Python | .py | 185 | 33.448649 | 107 | 0.57993 | idank/explainshell | 13,114 | 782 | 152 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,116 | classifier.py | idank_explainshell/explainshell/algo/classifier.py | import itertools, collections, logging
import nltk
import nltk.metrics
import nltk.classify
import nltk.classify.maxent
from explainshell import algo, config
logger = logging.getLogger(__name__)
def get_features(paragraph):
features = {}
ptext = paragraph.cleantext()
assert ptext
features['starts_with_hyphen'] = algo.features.starts_with_hyphen(ptext)
features['is_indented'] = algo.features.is_indented(ptext)
features['par_length'] = algo.features.par_length(ptext)
for w in ('=', '--', '[', '|', ','):
features['first_line_contains_%s' % w] = algo.features.first_line_contains(ptext, w)
features['first_line_length'] = algo.features.first_line_length(ptext)
features['first_line_word_count'] = algo.features.first_line_word_count(ptext)
features['is_good_section'] = algo.features.is_good_section(paragraph)
features['word_count'] = algo.features.word_count(ptext)
return features
class classifier(object):
'''classify the paragraphs of a man page as having command line options
or not'''
def __init__(self, store, algo, **classifier_args):
self.store = store
self.algo = algo
self.classifier_args = classifier_args
self.classifier = None
def train(self):
if self.classifier:
return
manpages = self.store.trainingset()
# flatten the manpages so we get a list of (manpage-name, paragraph)
def flatten_manpages(manpage):
l = []
for para in manpage.paragraphs:
l.append(para)
return l
paragraphs = itertools.chain(*[flatten_manpages(m) for m in manpages])
training = list(paragraphs)
negids = [p for p in training if not p.is_option]
posids = [p for p in training if p.is_option]
negfeats = [(get_features(p), False) for p in negids]
posfeats = [(get_features(p), True) for p in posids]
negcutoff = len(negfeats)*3/4
poscutoff = len(posfeats)*3/4
trainfeats = negfeats[:negcutoff] + posfeats[:poscutoff]
self.testfeats = negfeats[negcutoff:] + posfeats[poscutoff:]
logger.info('train on %d instances', len(trainfeats))
if self.algo == 'maxent':
c = nltk.classify.maxent.MaxentClassifier
elif self.algo == 'bayes':
c = nltk.classify.NaiveBayesClassifier
else:
raise ValueError('unknown classifier')
self.classifier = c.train(trainfeats, **self.classifier_args)
def evaluate(self):
self.train()
refsets = collections.defaultdict(set)
testsets = collections.defaultdict(set)
for i, (feats, label) in enumerate(self.testfeats):
refsets[label].add(i)
guess = self.classifier.prob_classify(feats)
observed = guess.max()
testsets[observed].add(i)
#if label != observed:
# print 'label:', label, 'observed:', observed, feats
print 'pos precision:', nltk.metrics.precision(refsets[True], testsets[True])
print 'pos recall:', nltk.metrics.recall(refsets[True], testsets[True])
print 'neg precision:', nltk.metrics.precision(refsets[False], testsets[False])
print 'neg recall:', nltk.metrics.recall(refsets[False], testsets[False])
print self.classifier.show_most_informative_features(10)
def classify(self, manpage):
self.train()
for item in manpage.paragraphs:
features = get_features(item)
guess = self.classifier.prob_classify(features)
option = guess.max()
certainty = guess.prob(option)
if option:
if certainty < config.CLASSIFIER_CUTOFF:
pass
else:
logger.info('classified %s (%f) as an option paragraph', item, certainty)
item.is_option = True
yield certainty, item
| 3,988 | Python | .py | 87 | 36.563218 | 93 | 0.634253 | idank/explainshell | 13,114 | 782 | 152 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,117 | features.py | idank_explainshell/explainshell/algo/features.py | import re
def extract_first_line(paragraph):
'''
>>> extract_first_line('a b cd')
'a b'
>>> extract_first_line('a b cd')
'a b cd'
>>> extract_first_line(' a b cd')
'a b cd'
>>> extract_first_line(' a b cd')
'a b'
'''
lines = paragraph.splitlines()
first = lines[0].strip()
spaces = list(re.finditer(r'(\s+)', first))
# handle options that have their description in the first line by trying
# to treat it as two lines (looking at spaces between option and the rest
# of the text)
if spaces:
longest = max(spaces, key=lambda m: m.span()[1] - m.span()[0])
if longest and longest.start() > 1 and longest.end() - longest.start() > 1:
first = first[:longest.start()]
return first
def starts_with_hyphen(paragraph):
return paragraph.lstrip()[0] == '-'
def is_indented(paragraph):
return paragraph != paragraph.lstrip()
def par_length(paragraph):
return round(len(paragraph.strip()), -1) / 2
def first_line_contains(paragraph, what):
l = paragraph.splitlines()[0]
return what in l
def first_line_length(paragraph):
first = extract_first_line(paragraph)
return round(len(first), -1) / 2
def first_line_word_count(paragraph):
first = extract_first_line(paragraph)
splitted = [s for s in first.split() if len(s) > 1]
return round(len(splitted), -1)
def is_good_section(paragraph):
if not paragraph.section:
return False
s = paragraph.section.lower()
if 'options' in s:
return True
if s in ('description', 'function letters'):
return True
return False
def word_count(text):
return round(len(re.findall(r'\w+', text)), -1)
def has_bold(html):
return '<b>' in html
| 1,759 | Python | .py | 52 | 28.923077 | 83 | 0.642099 | idank/explainshell | 13,114 | 782 | 152 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,118 | debugviews.py | idank_explainshell/explainshell/web/debugviews.py | import logging
from flask import render_template, request, abort, redirect, url_for, json
from explainshell import manager, config, store
from explainshell.web import app, helpers
logger = logging.getLogger(__name__)
@app.route('/debug')
def debug():
s = store.store('explainshell', config.MONGO_URI)
d = {'manpages' : []}
for mp in s:
synopsis = ''
if mp.synopsis:
synopsis = mp.synopsis[:20]
dd = {'name' : mp.name, 'synopsis' : synopsis}
l = []
for o in mp.options:
l.append(str(o))
dd['options'] = ', '.join(l)
d['manpages'].append(dd)
d['manpages'].sort(key=lambda d: d['name'].lower())
return render_template('debug.html', d=d)
def _convertvalue(value):
if isinstance(value, list):
return [s.strip() for s in value]
elif value.lower() == 'true':
return True
elif value:
return value.strip()
return False
@app.route('/debug/tag/<source>', methods=['GET', 'POST'])
def tag(source):
mngr = manager.manager(config.MONGO_URI, 'explainshell', [], False, False)
s = mngr.store
m = s.findmanpage(source)[0]
assert m
if 'paragraphs' in request.form:
paragraphs = json.loads(request.form['paragraphs'])
mparagraphs = []
for d in paragraphs:
idx = d['idx']
text = d['text']
section = d['section']
short = [s.strip() for s in d['short']]
long = [s.strip() for s in d['long']]
expectsarg = _convertvalue(d['expectsarg'])
nestedcommand = _convertvalue(d['nestedcommand'])
if isinstance(nestedcommand, str):
nestedcommand = [nestedcommand]
elif nestedcommand is True:
logger.error('nestedcommand %r must be a string or list', nestedcommand)
abort(503)
argument = d['argument']
if not argument:
argument = None
p = store.paragraph(idx, text, section, d['is_option'])
if d['is_option'] and (short or long or argument):
p = store.option(p, short, long, expectsarg, argument, nestedcommand)
mparagraphs.append(p)
if request.form.get('nestedcommand', '').lower() == 'true':
m.nestedcommand = True
else:
m.nestedcommand = False
m = mngr.edit(m, mparagraphs)
if m:
return redirect(url_for('explain', cmd=m.name))
else:
abort(503)
else:
helpers.convertparagraphs(m)
for p in m.paragraphs:
if isinstance(p, store.option):
if isinstance(p.expectsarg, list):
p.expectsarg = ', '.join(p.expectsarg)
if isinstance(p.nestedcommand, list):
p.nestedcommand = ', '.join(p.nestedcommand)
return render_template('tagger.html', m=m)
| 2,945 | Python | .py | 76 | 29.315789 | 88 | 0.573077 | idank/explainshell | 13,114 | 782 | 152 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,119 | __init__.py | idank_explainshell/explainshell/web/__init__.py | from flask import Flask
app = Flask(__name__)
from explainshell.web import views
from explainshell import store, config
if config.DEBUG:
from explainshell.web import debugviews
app.config.from_object(config)
| 215 | Python | .py | 7 | 28.714286 | 43 | 0.819512 | idank/explainshell | 13,114 | 782 | 152 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,120 | helpers.py | idank_explainshell/explainshell/web/helpers.py | from explainshell import util
def convertparagraphs(manpage):
for p in manpage.paragraphs:
p.text = p.text.decode('utf-8')
return manpage
def suggestions(matches, command):
'''enrich command matches with links to other man pages with the
same name'''
for m in matches:
if 'name' in m and 'suggestions' in m:
before = command[:m['start']]
after = command[m['end']:]
newsuggestions = []
for othermp in sorted(m['suggestions'], key=lambda mp: mp.section):
mid = '%s.%s' % (othermp.name, othermp.section)
newsuggestions.append({'cmd' : ''.join([before, mid, after]),
'text' : othermp.namesection})
m['suggestions'] = newsuggestions
| 792 | Python | .py | 18 | 33.888889 | 79 | 0.586788 | idank/explainshell | 13,114 | 782 | 152 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,121 | views.py | idank_explainshell/explainshell/web/views.py | import logging, itertools, urllib
import markupsafe
from flask import render_template, request, redirect
import bashlex.errors
from explainshell import matcher, errors, util, store, config
from explainshell.web import app, helpers
logger = logging.getLogger(__name__)
@app.route('/')
def index():
return render_template('index.html')
@app.route('/about')
def about():
return render_template('about.html')
@app.route('/explain')
def explain():
if 'cmd' not in request.args or not request.args['cmd'].strip():
return redirect('/')
command = request.args['cmd'].strip()
command = command[:1000] # trim commands longer than 1000 characters
if '\n' in command:
return render_template('errors/error.html', title='parsing error!',
message='no newlines please')
s = store.store('explainshell', config.MONGO_URI)
try:
matches, helptext = explaincommand(command, s)
return render_template('explain.html',
matches=matches,
helptext=helptext,
getargs=command)
except errors.ProgramDoesNotExist, e:
return render_template('errors/missingmanpage.html', title='missing man page', e=e)
except bashlex.errors.ParsingError, e:
logger.warn('%r parsing error: %s', command, e.message)
return render_template('errors/parsingerror.html', title='parsing error!', e=e)
except NotImplementedError, e:
logger.warn('not implemented error trying to explain %r', command)
msg = ("the parser doesn't support %r constructs in the command you tried. you may "
"<a href='https://github.com/idank/explainshell/issues'>report a "
"bug</a> to have this added, if one doesn't already exist.") % e.args[0]
return render_template('errors/error.html', title='error!', message=msg)
except:
logger.error('uncaught exception trying to explain %r', command, exc_info=True)
msg = 'something went wrong... this was logged and will be checked'
return render_template('errors/error.html', title='error!', message=msg)
@app.route('/explain/<program>', defaults={'section' : None})
@app.route('/explain/<section>/<program>')
def explainold(section, program):
logger.info('/explain section=%r program=%r', section, program)
s = store.store('explainshell', config.MONGO_URI)
if section is not None:
program = '%s.%s' % (program, section)
# keep links to old urls alive
if 'args' in request.args:
args = request.args['args']
command = '%s %s' % (program, args)
return redirect('/explain?cmd=%s' % urllib.quote_plus(command), 301)
else:
try:
mp, suggestions = explainprogram(program, s)
return render_template('options.html', mp=mp, suggestions=suggestions)
except errors.ProgramDoesNotExist, e:
return render_template('errors/missingmanpage.html', title='missing man page', e=e)
def explainprogram(program, store):
mps = store.findmanpage(program)
mp = mps.pop(0)
program = mp.namesection
synopsis = mp.synopsis
if synopsis:
synopsis = synopsis.decode('utf-8')
mp = {'source' : mp.source[:-3],
'section' : mp.section,
'program' : program,
'synopsis' : synopsis,
'options' : [o.text.decode('utf-8') for o in mp.options]}
suggestions = []
for othermp in mps:
d = {'text' : othermp.namesection,
'link' : '%s/%s' % (othermp.section, othermp.name)}
suggestions.append(d)
logger.info('suggestions: %s', suggestions)
return mp, suggestions
def _makematch(start, end, match, commandclass, helpclass):
return {'match' : match, 'start' : start, 'end' : end, 'spaces' : '',
'commandclass' : commandclass, 'helpclass' : helpclass}
def explaincommand(command, store):
matcher_ = matcher.matcher(command, store)
groups = matcher_.match()
expansions = matcher_.expansions
shellgroup = groups[0]
commandgroups = groups[1:]
matches = []
# save a mapping between the help text to its assigned id,
# we're going to reuse ids that have the same text
texttoid = {}
# remember where each assigned id has started in the source,
# we're going to use it later on to sort the help text by start
# position
idstartpos = {}
l = []
for m in shellgroup.results:
commandclass = shellgroup.name
helpclass = 'help-%d' % len(texttoid)
text = m.text
if text:
text = text.decode('utf-8')
helpclass = texttoid.setdefault(text, helpclass)
else:
# unknowns in the shell group are possible when our parser left
# an unparsed remainder, see matcher._markunparsedunknown
commandclass += ' unknown'
helpclass = ''
if helpclass:
idstartpos.setdefault(helpclass, m.start)
d = _makematch(m.start, m.end, m.match, commandclass, helpclass)
formatmatch(d, m, expansions)
l.append(d)
matches.append(l)
for commandgroup in commandgroups:
l = []
for m in commandgroup.results:
commandclass = commandgroup.name
helpclass = 'help-%d' % len(texttoid)
text = m.text
if text:
text = text.decode('utf-8')
helpclass = texttoid.setdefault(text, helpclass)
else:
commandclass += ' unknown'
helpclass = ''
if helpclass:
idstartpos.setdefault(helpclass, m.start)
d = _makematch(m.start, m.end, m.match, commandclass, helpclass)
formatmatch(d, m, expansions)
l.append(d)
d = l[0]
d['commandclass'] += ' simplecommandstart'
if commandgroup.manpage:
d['name'] = commandgroup.manpage.name
d['section'] = commandgroup.manpage.section
if '.' not in d['match']:
d['match'] = '%s(%s)' % (d['match'], d['section'])
d['suggestions'] = commandgroup.suggestions
d['source'] = commandgroup.manpage.source[:-5]
matches.append(l)
matches = list(itertools.chain.from_iterable(matches))
helpers.suggestions(matches, command)
# _checkoverlaps(matcher_.s, matches)
matches.sort(key=lambda d: d['start'])
it = util.peekable(iter(matches))
while it.hasnext():
m = it.next()
spaces = 0
if it.hasnext():
spaces = it.peek()['start'] - m['end']
m['spaces'] = ' ' * spaces
helptext = sorted(texttoid.iteritems(), key=lambda (k, v): idstartpos[v])
return matches, helptext
def formatmatch(d, m, expansions):
'''populate the match field in d by escaping m.match and generating
links to any command/process substitutions'''
# save us some work later: do any expansions overlap
# the current match?
hassubsinmatch = False
for start, end, kind in expansions:
if m.start <= start and end <= m.end:
hassubsinmatch = True
break
# if not, just escape the current match
if not hassubsinmatch:
d['match'] = markupsafe.escape(m.match)
return
# used in es.js
d['commandclass'] += ' hasexpansion'
# go over the expansions, wrapping them with a link; leave everything else
# untouched
expandedmatch = ''
i = 0
for start, end, kind in expansions:
if start >= m.end:
break
relativestart = start - m.start
relativeend = end - m.start
if i < relativestart:
for j in range(i, relativestart):
if m.match[j].isspace():
expandedmatch += markupsafe.Markup(' ')
else:
expandedmatch += markupsafe.escape(m.match[j])
i = relativestart + 1
if m.start <= start and end <= m.end:
s = m.match[relativestart:relativeend]
if kind == 'substitution':
content = markupsafe.Markup(_substitutionmarkup(s))
else:
content = s
expandedmatch += markupsafe.Markup(
'<span class="expansion-{0}">{1}</span>').format(kind, content)
i = relativeend
if i < len(m.match):
expandedmatch += markupsafe.escape(m.match[i:])
assert expandedmatch
d['match'] = expandedmatch
def _substitutionmarkup(cmd):
'''
>>> _substitutionmarkup('foo')
'<a href="/explain?cmd=foo" title="Zoom in to nested command">foo</a>'
>>> _substitutionmarkup('cat <&3')
'<a href="/explain?cmd=cat+%3C%263" title="Zoom in to nested command">cat <&3</a>'
'''
encoded = urllib.urlencode({'cmd': cmd})
return ('<a href="/explain?{query}" title="Zoom in to nested command">{cmd}'
'</a>').format(cmd=cmd, query=encoded)
def _checkoverlaps(s, matches):
explained = [None]*len(s)
for d in matches:
for i in range(d['start'], d['end']):
if explained[i]:
raise RuntimeError("explained overlap for group %s at %d with %s" % (d, i, explained[i]))
explained[i] = d
| 9,329 | Python | .py | 219 | 33.863014 | 105 | 0.611828 | idank/explainshell | 13,114 | 782 | 152 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,122 | shellbuiltins.py | idank_explainshell/tools/shellbuiltins.py | # -*- coding: utf-8 -*-
'''manually define and add shell builtins into the store
unfortunately the bash section for builtins isn't written in a way
explainshell can understannd, so we have to resort to manually
writing these down and adding them.'''
import textwrap
from explainshell import store, config
sp = store.paragraph
so = store.option
sm = store.manpage
BUILTINS = {}
def _add(names, synopsis, options):
name = names[0]
# hack: fake a source man page (this breaks the outgoing links from
# explainshell, oh well)
names.append('bash-%s' % name)
BUILTINS[name] = sm('bash-%s.1.gz' % name, name, synopsis, options, [(name, 20) for name in names])
_add([':'], 'the command does nothing', [so(sp(0, '''No effect; the command does nothing beyond expanding arguments and performing any specified redirections. A zero
exit code is returned.''', '', True), [], [], False, True, False)])
source = textwrap.dedent(''' <b>.</b> <u>filename</u> [<u>arguments</u>]
<b>source</b> <u>filename</u> [<u>arguments</u>]
Read and execute commands from <u>filename</u> in the current shell environment and return the exit status of
the last command executed from <u>filename</u>. If <u>filename</u> does not contain a slash, filenames in <b>PATH</b> are used
to find the directory containing <u>filename</u>. The file searched for in <b>PATH</b> need not be executable. When
<b>bash</b> is not in <u>posix</u> <u>mode</u>, the current directory is searched if no file is found in <b>PATH</b>. If the
<b>sourcepath</b> option to the <b>shopt</b> builtin command is turned off, the <b>PATH</b> is not searched. If any
<u>arguments</u> are supplied, they become the positional parameters when <u>filename</u> is executed. Otherwise
the positional parameters are unchanged. The return status is the status of the last command exited within
the script (0 if no commands are executed), and false if <u>filename</u> is not found or cannot be read.''')
_add(['source', '.'], 'read and execute commands in the current shell', [so(sp(0, source, '', True), [], [], False, True, False)])
_add(['break'], 'exit from within a for, while, until, or select loop',
[so(sp(0, '''If <u>n</u> is specified, break <u>n</u> levels. <u>n</u> must be ≥ 1. If <u>n</u> is greater than the number of enclosing loops, all enclosing loops are exited. The return value is 0 unless <u>n</u> is not greater than or equal to 1.''', '', True), [], [], False, True, False)])
_add(['history'], 'display the command history list with line numbers',
[so(sp(0, '''<b>history</b> <b>[</b><u>n</u><b>]</b>
<b>history</b> <b>-c</b>
<b>history</b> <b>-d</b> <u>offset</u>
<b>history</b> <b>-anrw</b> [<u>filename</u>]
<b>history</b> <b>-p</b> <u>arg</u> [<u>arg</u> <u>...</u>]
<b>history</b> <b>-s</b> <u>arg</u> [<u>arg</u> <u>...</u>]
With no options, display the command history list with line numbers. Lines listed with a <b>*</b> have been modified.
An argument of <u>n</u> lists only the last <u>n</u> lines. If the shell variable <b>HISTTIMEFORMAT</b> is set
and not null, it is used as a format string for <u>strftime</u>(3) to display the time stamp associated with each
displayed history entry. No intervening blank is printed between the formatted time stamp and the history
line. If <u>filename</u> is supplied, it is used as the name of the history file; if not, the value of
<b>HISTFILE</b> is used.''', '', True), [], [], False, True, False),
so(sp(1, '<b>-c</b> Clear the history list by deleting all the entries.', '', True), ['-c'], [], False, False, False),
so(sp(2, textwrap.dedent(''' <b>-d</b> <u>offset</u>
Delete the history entry at position <u>offset</u>.'''), '', True), ['-d'], [], 'offset', False, False),
so(sp(3, textwrap.dedent(''' <b>-a</b> Append the ``new'' history lines (history lines entered since the beginning of the current <b>bash</b> session)
to the history file.'''), '', True), ['-a'], [], False, False, False),
so(sp(4, textwrap.dedent(''' <b>-n</b> Read the history lines not already read from the history file into the current history list. These are
lines appended to the history file since the beginning of the current <b>bash</b> session.'''), '', True), ['-n'], [], False, False, False),
so(sp(5, textwrap.dedent(''' <b>-r</b> Read the contents of the history file and append them to the current history list.'''), '', True), ['-r'], [], False, False, False),
so(sp(6, textwrap.dedent(''' <b>-w</b> Write the current history list to the history file, overwriting the history file's contents.'''), '', True), ['-w'], [], 'filename', False, False),
so(sp(7, textwrap.dedent(''' <b>-p</b> Perform history substitution on the following <u>args</u> and display the result on the standard output.
Does not store the results in the history list. Each <u>arg</u> must be quoted to disable normal history expansion.'''), '', True), ['-p'], [], 'arg', True, False),
so(sp(8, textwrap.dedent(''' <b>-s</b> Store the <u>args</u> in the history list as a single entry. The last command in the history list is
removed before the <u>args</u> are added.'''), '', True), ['-s'], [], 'arg', False, False)])
if __name__ == '__main__':
import logging.config
logging.config.dictConfig(config.LOGGING_DICT)
s = store.store('explainshell', config.MONGO_URI)
for m in BUILTINS.itervalues():
s.addmanpage(m)
| 5,876 | Python | .py | 64 | 85.65625 | 306 | 0.615451 | idank/explainshell | 13,114 | 782 | 152 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,123 | test-options.py | idank_explainshell/tests/test-options.py | import unittest
from explainshell import options, store, errors
class test_options(unittest.TestCase):
def test_simple(self):
s = '\t-a description'
self.assertEquals(options.extract_option(s), (['-a'], []))
s = '\t-a, description'
self.assertEquals(options.extract_option(s), (['-a'], []))
r = (['-a', '-b'], [])
s = '\t-a, -b description'
self.assertEquals(options.extract_option(s), r)
s = '\t-a/-b description'
self.assertEquals(options.extract_option(s), r)
s = '\t-a -b description'
self.assertEquals(options.extract_option(s), r)
s = '\t-a -b,-c, -d description'
self.assertEquals(options.extract_option(s), (['-a', '-b', '-c', '-d'], []))
s = '\t--a, -b, --c-d description'
self.assertEquals(options.extract_option(s), (['-b'], ['--a', '--c-d']))
s = '---c-d '
self.assertEquals(options.extract_option(s), ([], []))
s = '-c- '
self.assertEquals(options.extract_option(s), ([], []))
def test_option_arg(self):
s = '\t-a FOO, -b=BAR, description'
self.assertEquals(options.extract_option(s),
([('-a', 'FOO'), ('-b', 'BAR')], []))
s = '\t-a [FOO], -b[=BAR], description'
self.assertEquals(options.extract_option(s),
([('-a', 'FOO'), ('-b', 'BAR')], []))
s = '\t-a<n>, -b=<BAR>, -C <ah>'
self.assertEquals(options.extract_option(s),
([('-a', 'n'), ('-b', 'BAR'), ('-C', 'ah')], []))
s = '\t--aa FOO, --bb=BAR, description'
self.assertEquals(options.extract_option(s),
([], [('--aa', 'FOO'), ('--bb', 'BAR')]))
s = '-a or -b'
self.assertEquals(options.extract_option(s),
(['-a', '-b'], []))
def test_pipe_separator(self):
s = '-a|b'
self.assertEquals(options.extract_option(s),
(['-a', 'b'], []))
s = '-a|-b|--c|d'
self.assertEquals(options.extract_option(s),
(['-a', '-b', 'd'], ['--c']))
def test_multiline_options(self):
s = '\t-a, -b, \n-c, --ddd description'
self.assertEquals(options.extract_option(s),
(['-a', '-b', '-c'], ['--ddd']))
def test_multiline_desc(self):
s = '\t-a, -b description\n\tmultiline\n another line'
self.assertEquals(options.extract_option(s), (['-a', '-b'], []))
def test_not_an_option(self):
self.assertEquals(options.extract_option('foobar'), ([], []))
def test_no_hyphen(self):
s = '\ta=b description'
self.assertEquals(options.extract_option(s), ([], [('a', 'b')]))
def test_hyphen_in_arg(self):
s = '-a=FOO-BAR, --aa=FOO-BAR'
self.assertEquals(options.extract_option(s),
([('-a', 'FOO-BAR')], [('--aa', 'FOO-BAR')]))
#s = '-a FOO-BAR, --aa FOO-BAR'
#self.assertEquals(options.extract_option(s),
# ([('-a', 'FOO-BAR')], [('--aa', 'FOO-BAR')]))
def test_extract(self):
p1 = store.paragraph(0, '<b>--test</b>=<u>arg</u>\ndesc', '', True)
p2 = store.paragraph(1, 'no options here', '', True)
p3 = store.paragraph(2, '--foo-bar=<arg>\ndesc', '', True)
m = store.manpage('', '', '', [p1, p2, p3], [])
options.extract(m)
r = m.options
self.assertEquals(len(r), 2)
self.assertEquals(r[0].text, p1.text)
self.assertEquals(r[0].short, [])
self.assertEquals(r[0].long, ['--test'])
self.assertEquals(r[0].expectsarg, True)
self.assertEquals(r[1].text, p3.text)
self.assertEquals(r[1].short, [])
self.assertEquals(r[1].long, ['--foo-bar'])
self.assertEquals(r[1].expectsarg, True)
def test_help(self):
s = '\t-?, --help description'
self.assertEquals(options.extract_option(s), (['-?'], ['--help']))
| 4,063 | Python | .py | 84 | 37.880952 | 84 | 0.498482 | idank/explainshell | 13,114 | 782 | 152 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,124 | test-fixer.py | idank_explainshell/tests/test-fixer.py | import unittest
import copy
from explainshell import fixer, options, store
class test_fixer(unittest.TestCase):
def setUp(self):
self._oldfixerscls = fixer.fixerscls[:]
def tearDown(self):
fixer.fixerscls = self._oldfixerscls
def test_changes(self):
class myfixer(fixer.basefixer):
def pre_get_raw_manpage(self):
self.mctx['foo'] = 'bar'
d = {}
fixer.fixerscls = [myfixer]
r = fixer.runner(d)
self.assertTrue('foo' not in d)
r.pre_get_raw_manpage()
self.assertEquals(d['foo'], 'bar')
def test_paragraphjoiner(self):
maxdistance = fixer.paragraphjoiner.maxdistance
paragraphs = [store.paragraph(i, chr(ord('a') + i), None, False) for i in range(26)]
options = [
store.option(paragraphs[0], [], [], False),
store.option(paragraphs[1], [], [], False),
store.option(paragraphs[5], [], [], False),
store.option(paragraphs[5+maxdistance-1], [], [], False),
store.option(paragraphs[15], [], [], False),
store.option(paragraphs[17], [], [], False),
store.option(paragraphs[-1], [], [], False)]
f = fixer.paragraphjoiner(None)
merged = f._join(paragraphs, options)
#self.assertEquals(merged, 7)
#self.assertEquals(len(paragraphs), 19)
self.assertEquals(options[0].text, 'a')
self.assertEquals(options[1].text.replace('\n', ''), 'bcde')
self.assertEquals(options[2].text.replace('\n', ''), 'fghi')
self.assertEquals(options[3].text, 'j')
self.assertEquals(options[4].text.replace('\n', ''), 'pq')
self.assertEquals(options[5].text, 'r')
self.assertEquals(options[6].text, 'z')
# join again to make sure nothing is changed
oldparagraphs = copy.deepcopy(paragraphs)
oldoptions = copy.deepcopy(options)
f._join(paragraphs, options)
self.assertEquals(oldparagraphs, paragraphs)
self.assertEquals(oldoptions, options)
| 2,101 | Python | .py | 46 | 36.021739 | 92 | 0.599511 | idank/explainshell | 13,114 | 782 | 152 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,125 | test-manager.py | idank_explainshell/tests/test-manager.py | import unittest, os
from explainshell import manager, config, store, errors
@unittest.skip("nltk usage is broken due to new version")
class test_manager(unittest.TestCase):
def setUp(self):
store.store('explainshell_tests').drop(True)
def _getmanager(self, names, **kwargs):
l = []
for n in names:
l.append(os.path.join(config.MANPAGEDIR, '1', n))
m = manager.manager(config.MONGO_URI, 'explainshell_tests', l, **kwargs)
return m
def test(self):
m = self._getmanager(['tar.1.gz'])
m.run()
self.assertRaises(errors.ProgramDoesNotExist, m.store.findmanpage, 'tar.2')
mp = m.store.findmanpage('tar')[0]
self.assertEquals(mp.source, 'tar.1.gz')
self.assertEquals(mp.name, 'tar')
self.assertEquals(mp.aliases, [('tar', 10)])
self.assertEquals(len(mp.paragraphs), 154)
self.assertEquals(len(mp.options), 134)
self.assertTrue(mp.find_option('-v'))
self.assertEquals(mp.synopsis, 'The GNU version of the tar archiving utility')
self.assertTrue(mp.partialmatch) # fixer is working
self.assertEquals(m.run()[0], [])
def test_verify(self):
m = self._getmanager(['tar.1.gz'])
s = m.store
# invalid mapping
s.addmapping('foo', 'bar', 1)
ok, unreachable, notfound = s.verify()
self.assertFalse(ok)
self.assertEquals(list(notfound), ['bar'])
s.mapping.drop()
m.run()
ok, unreachable, notfound = s.verify()
self.assertTrue(ok)
s.mapping.drop()
ok, unreachable, notfound = s.verify()
self.assertEquals(list(unreachable), ['tar'])
s.addmapping('foo', 'bar', 1)
ok, unreachable, notfound = s.verify()
self.assertEquals(list(notfound), ['bar'])
self.assertEquals(list(unreachable), ['tar'])
@unittest.skip("https://github.com/idank/explainshell/pull/303#issuecomment-1272387073")
def test_aliases(self):
m = self._getmanager(['lsbcpp.1.gz', 'tar.1.gz', 'bsdtar.1.gz', 'basket.1.gz'])
m.run()
mp = m.store.findmanpage('lsbcpp')
self.assertTrue('lsbcc' in m.store)
self.assertTrue('lsbc++' in m.store)
self.assertTrue('lsbcpp' in m.store)
self.assertEquals(len(mp), 1)
mp = m.store.findmanpage('tar')
self.assertEquals(len(mp), 2)
self.assertEquals(mp[0].source, 'tar.1.gz')
self.assertEquals(mp[1].source, 'bsdtar.1.gz')
def test_overwrite(self):
m = self._getmanager(['tar.1.gz'], overwrite=False)
self.assertEquals(len(list(m.store)), 0)
a, e = m.run()
self.assertTrue(a)
self.assertFalse(e)
self.assertEquals(m.store.mapping.count(), 1)
self.assertEquals(len(list(m.store)), 1)
a, e = m.run()
self.assertFalse(a)
self.assertTrue(e)
self.assertEquals(m.store.mapping.count(), 1)
self.assertEquals(len(list(m.store)), 1)
m = manager.manager(config.MONGO_URI, 'explainshell_tests', [os.path.join(config.MANPAGEDIR, '1', 'tar.1.gz')], overwrite=True)
a, e = m.run()
self.assertTrue(a)
self.assertFalse(e)
self.assertEquals(m.store.mapping.count(), 1)
self.assertEquals(len(list(m.store)), 1)
m.store.verify()
def test_multicommand(self):
m = self._getmanager(['git.1.gz', 'git-rebase.1.gz'])
m.run()
self.assertTrue(m.store.findmanpage('git')[0].multicommand)
self.assertTrue('git rebase' in m.store)
def test_edit(self):
m = self._getmanager(['tar.1.gz'], overwrite=False)
self.assertEquals(len(list(m.store)), 0)
a, e = m.run()
mp = a[0]
mp.synopsis = 'foo'
m.edit(mp)
mp = m.store.findmanpage('tar')[0]
self.assertEquals(mp.synopsis, 'foo')
self.assertTrue(m.store.verify())
mp.aliases.append(('foo', 1))
m.edit(mp)
self.assertTrue('foo' in m.store)
self.assertEquals(m.store.findmanpage('tar')[0].paragraphs,
m.store.findmanpage('foo')[0].paragraphs)
self.assertTrue(m.store.verify()[0])
def test_samename(self):
pages = [os.path.join(config.MANPAGEDIR, '1', 'node.1.gz'), os.path.join(config.MANPAGEDIR, '8', 'node.8.gz')]
m = manager.manager(config.MONGO_URI, 'explainshell_tests', pages)
a, e = m.run()
self.assertEquals(len(a), 2)
self.assertEquals(len(m.store.findmanpage('node')), 2)
mps = m.store.findmanpage('node.8')
self.assertEquals(len(mps), 2)
self.assertEquals(mps[0].section, '8')
def test_samename_samesection(self):
m = self._getmanager(['xargs.1.gz', 'xargs.1posix.gz'])
a, e = m.run()
self.assertEquals(len(a), 2)
self.assertEquals(len(m.store.findmanpage('xargs')), 2)
mps = m.store.findmanpage('xargs.1posix')
self.assertEquals(len(mps), 2)
self.assertEquals(mps[0].section, '1posix')
| 5,091 | Python | .py | 116 | 35.103448 | 135 | 0.607966 | idank/explainshell | 13,114 | 782 | 152 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,126 | test-integration.py | idank_explainshell/tests/test-integration.py | import unittest, subprocess, pymongo, os
from explainshell import manager, config, matcher
@unittest.skip("nltk usage is broken due to new version")
class test_integration(unittest.TestCase):
def test(self):
mngr = manager.manager(config.MONGO_URI, 'explainshell_tests', [os.path.join(os.path.dirname(__file__), 'echo.1.gz')], drop=True)
mngr.run()
cmd = 'echo -en foobar --version'
m = matcher.matcher(cmd, mngr.store)
group = m.match()[1]
matchprog, matches = group.manpage.name, group.results
self.assertEquals(matchprog, 'echo')
#self.assertEquals(matches[0].text, 'display a line of text')
self.assertEquals(matches[0].match, 'echo')
self.assertEquals(matches[1].text, '<b>-e</b> enable interpretation of backslash escapes')
self.assertEquals(matches[1].match, '-e')
self.assertEquals(matches[2].text, '<b>-n</b> do not output the trailing newline')
self.assertEquals(matches[2].match, 'n')
self.assertEquals(matches[3].text, None)
self.assertEquals(matches[3].match, 'foobar')
self.assertEquals(matches[4].text, '<b>--version</b>\n output version information and exit')
self.assertEquals(matches[4].match, '--version')
| 1,289 | Python | .py | 22 | 50.772727 | 137 | 0.666667 | idank/explainshell | 13,114 | 782 | 152 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,127 | helpers.py | idank_explainshell/tests/helpers.py | from explainshell import matcher, store, errors, options, helpconstants
class mockstore(object):
def __init__(self):
sp = store.paragraph
so = store.option
sm = store.manpage
p0 = sp(0, '-a desc', '', True)
p1 = sp(1, '-b <arg> desc', '', True)
p2 = sp(2, '-? help text', '', True)
p3 = sp(3, '-c=one,two\ndesc', '', True)
p4 = sp(4, 'FILE argument', '', True)
p5 = sp(5, '-exec nest', '', True)
opts = [so(p0, ['-a'], ['--a'], False),
so(p1, ['-b'], ['--b'], '<arg>'),
so(p2, ['-?'], [], False),
so(p3, ['-c'], [], ['one', 'two'])]
self.manpages = {
'bar' : sm('bar.1.gz', 'bar', 'bar synopsis', opts, [], multicommand=True),
'baz' : sm('baz.1.gz', 'baz', 'baz synopsis', opts, [], partialmatch=True),
'bar foo' : sm('bar-foo.1.gz', 'bar-foo', 'bar foo synopsis', opts, [], partialmatch=True),
'nosynopsis' : sm('bar.1.gz', 'bar', None, opts, [])}
self.dup = [sm('dup.1.gz', 'dup', 'dup1 synopsis', opts, []),
sm('dup.2.gz', 'dup', 'dup2 synopsis', opts, [])]
opts = list(opts)
opts.append(so(p4, [], [], False, 'FILE'))
opts.append(so(p5, ['-exec'], [], True, nestedcommand=['EOF', ';']))
self.manpages['withargs'] = sm('withargs.1.gz', 'withargs', 'withargs synopsis',
opts, [], partialmatch=True, nestedcommand=True)
def findmanpage(self, x, section=None):
try:
if x == 'dup':
return self.dup
return [self.manpages[x]]
except KeyError:
raise errors.ProgramDoesNotExist(x)
s = mockstore()
| 1,774 | Python | .py | 36 | 37.666667 | 107 | 0.477759 | idank/explainshell | 13,114 | 782 | 152 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,128 | test-manpage.py | idank_explainshell/tests/test-manpage.py | import unittest, os, subprocess
from explainshell import manpage, store
class test_manpage(unittest.TestCase):
def test_first_paragraph_no_section(self):
m = 'foo\nbar'
l = list(manpage._parsetext(m.splitlines()))
self.assertEquals(l, [store.paragraph(0, 'foo\nbar', None, False)])
def test_sections(self):
m = '''<b>SECTION</b>
a
b
c
<b>SECTION2</b>
a
<b>WITH SPACES</b>
a
<b>EMPTY SECTION SHOULD BE IGNORED</b>
<b>SECTION3</b>
tNOTASECTION'''
parsed = list(manpage._parsetext(m.splitlines()))
self.assertTrue(len(parsed) == 5)
self.assertEquals(parsed, [store.paragraph(0, 'a\nb', 'SECTION', False),
store.paragraph(1, 'c', 'SECTION', False),
store.paragraph(2, 'a', 'SECTION2', False),
store.paragraph(3, 'a', 'WITH SPACES', False),
store.paragraph(4, 'tNOTASECTION', 'SECTION3', False)])
def test_no_synopsis(self):
m = manpage.manpage('foo')
m._text = 'a b c d e f g h i j k l'.replace(' ', '\n')
m.parse()
self.assertEquals(m.aliases, [('foo', 10)])
| 1,206 | Python | .py | 31 | 29.83871 | 90 | 0.567382 | idank/explainshell | 13,114 | 782 | 152 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,129 | test-matcher.py | idank_explainshell/tests/test-matcher.py | import unittest
import bashlex.errors, bashlex.ast
from explainshell import matcher, errors, helpconstants
from tests import helpers
s = helpers.mockstore()
class test_matcher(unittest.TestCase):
def assertMatchSingle(self, what, expectedmanpage, expectedresults):
m = matcher.matcher(what, s)
groups = m.match()
self.assertEquals(len(groups), 2)
self.assertEquals(groups[1].manpage, expectedmanpage)
self.assertEquals(groups[1].results, expectedresults)
def test_unknown_prog(self):
self.assertRaises(errors.ProgramDoesNotExist, matcher.matcher('foo', s).match)
def test_unicode(self):
matchedresult = [
(0, 3, 'bar synopsis', 'bar'),
(4, 13, '-b <arg> desc', '-b uni???')]
self.assertMatchSingle(u'bar -b uni\u05e7\u05d5\u05d3', s.findmanpage('bar')[0], matchedresult)
def test_no_options(self):
matchedresult = [(0, 3, 'bar synopsis', 'bar')]
self.assertMatchSingle('bar', s.findmanpage('bar')[0], matchedresult)
def test_known_arg(self):
matchedresult = [
(0, 3, 'bar synopsis', 'bar'),
(4, 10, '-a desc', '-a --a'),
(11, 13, '-? help text', '-?')]
self.assertMatchSingle('bar -a --a -?', s.findmanpage('bar')[0], matchedresult)
def test_arg_in_fuzzy_with_expected_value(self):
cmd = 'baz -ab arg'
matchedresult = [
(0, 3, 'baz synopsis', 'baz'),
(4, 6, '-a desc', '-a'),
(6, 11, '-b <arg> desc', 'b arg')]
self.assertMatchSingle(cmd, s.findmanpage('baz')[0], matchedresult)
cmd = 'baz -ab12'
matchedresult = [
(0, 3, 'baz synopsis', 'baz'),
(4, 6, '-a desc', '-a'),
(6, 9, '-b <arg> desc', 'b12')]
self.assertMatchSingle(cmd, s.findmanpage('baz')[0], matchedresult)
def test_partialmatch_with_arguments(self):
cmd = 'withargs arg'
matchedresult = [
(0, 8, 'withargs synopsis', 'withargs'),
(9, 12, 'FILE argument', 'arg')]
self.assertMatchSingle(cmd, s.findmanpage('withargs')[0], matchedresult)
def test_reset_current_option_if_argument_taken(self):
cmd = 'withargs -ab12 arg'
matchedresult = [
(0, 8, 'withargs synopsis', 'withargs'),
(9, 11, '-a desc', '-a'),
(11, 14, '-b <arg> desc', 'b12'),
(15, 18, 'FILE argument', 'arg')]
self.assertMatchSingle(cmd, s.findmanpage('withargs')[0], matchedresult)
cmd = 'withargs -b12 arg'
matchedresult = [
(0, 8, 'withargs synopsis', 'withargs'),
(9, 13, '-b <arg> desc', '-b12'),
(14, 17, 'FILE argument', 'arg')]
self.assertMatchSingle(cmd, s.findmanpage('withargs')[0], matchedresult)
# here we reset it implicitly by looking up '12'
cmd = 'withargs -b 12 arg'
matchedresult = [
(0, 8, 'withargs synopsis', 'withargs'),
(9, 14, '-b <arg> desc', '-b 12'),
(15, 18, 'FILE argument', 'arg')]
self.assertMatchSingle(cmd, s.findmanpage('withargs')[0], matchedresult)
def test_arg_with_expected_value(self):
cmd = 'bar -b arg --b arg'
matchedresult = [
(0, 3, 'bar synopsis', 'bar'),
(4, 18, '-b <arg> desc', '-b arg --b arg')]
self.assertMatchSingle(cmd, s.findmanpage('bar')[0], matchedresult)
def test_arg_with_expected_value_from_list(self):
cmd = 'bar -c one'
matchedresult = [
(0, 3, 'bar synopsis', 'bar'),
(4, 10, '-c=one,two\ndesc', '-c one')]
self.assertMatchSingle(cmd, s.findmanpage('bar')[0], matchedresult)
cmd = 'bar -c notinlist'
matchedresult = [
(0, 3, 'bar synopsis', 'bar'),
(4, 6, '-c=one,two\ndesc', '-c'),
(7, 16, None, 'notinlist')]
self.assertMatchSingle(cmd, s.findmanpage('bar')[0], matchedresult)
def test_arg_with_expected_value_clash(self):
'''the first option expects an arg but the arg is actually an option'''
cmd = 'bar -b -a'
matchedresult = [
(0, 3, 'bar synopsis', 'bar'),
(4, 6, '-b <arg> desc', '-b'),
(7, 9, '-a desc', '-a')]
self.assertMatchSingle(cmd, s.findmanpage('bar')[0], matchedresult)
def test_arg_with_expected_value_no_clash(self):
'''the first option expects an arg but the arg is not an option even though
it looks like one'''
cmd = 'bar -b -xa'
matchedresult = [
(0, 3, 'bar synopsis', 'bar'),
(4, 6, '-b <arg> desc', '-b'),
(7, 9, None, '-x'),
(9, 10, '-a desc', 'a')]
self.assertMatchSingle(cmd, s.findmanpage('bar')[0], matchedresult)
def test_unknown_arg(self):
matchedresult = [(0, 3, 'bar synopsis', 'bar'), (4, 6, None, '-x')]
self.assertMatchSingle('bar -x', s.findmanpage('bar')[0], matchedresult)
# merges
matchedresult = [(0, 3, 'bar synopsis', 'bar'), (4, 10, None, '-x --x')]
self.assertMatchSingle('bar -x --x', s.findmanpage('bar')[0], matchedresult)
matchedresult = [(0, 3, 'bar synopsis', 'bar'), (4, 8, None, '-xyz')]
self.assertMatchSingle('bar -xyz', s.findmanpage('bar')[0], matchedresult)
matchedresult = [(0, 3, 'bar synopsis', 'bar'),
(4, 6, None, '-x'),
(6, 7, '-a desc', 'a'), (7, 8, None, 'z')]
self.assertMatchSingle('bar -xaz', s.findmanpage('bar')[0], matchedresult)
def test_merge_same_match(self):
matchedresult = [(0, 3, 'bar synopsis', 'bar'), (4, 8, '-a desc', '-aaa')]
self.assertMatchSingle('bar -aaa', s.findmanpage('bar')[0], matchedresult)
def test_known_and_unknown_arg(self):
matchedresult = [(0, 3, 'bar synopsis', 'bar'), (4, 6, '-a desc', '-a'), (7, 9, None, '-x')]
self.assertMatchSingle('bar -a -x', s.findmanpage('bar')[0], matchedresult)
matchedresult = [(0, 3, 'bar synopsis', 'bar'), (4, 6, '-a desc', '-a'), (6, 7, None, 'x')]
self.assertMatchSingle('bar -ax', s.findmanpage('bar')[0], matchedresult)
def test_long(self):
cmd = 'bar --b=b foo'
matchedresult = [
(0, 3, 'bar synopsis', 'bar'),
(4, 9, '-b <arg> desc', '--b=b'),
(10, 13, None, 'foo')]
self.assertMatchSingle(cmd, s.findmanpage('bar')[0], matchedresult)
def test_arg_no_dash(self):
cmd = 'baz ab -x'
matchedresult = [
(0, 3, 'baz synopsis', 'baz'),
(4, 5, '-a desc', 'a'),
(5, 6, '-b <arg> desc', 'b'),
(7, 9, None, '-x')]
self.assertMatchSingle(cmd, s.findmanpage('baz')[0], matchedresult)
def test_multicommand(self):
cmd = 'bar baz --b foo'
matchedresult = [
(0, 3, 'bar synopsis', 'bar'),
(4, 7, None, 'baz'),
(8, 15, '-b <arg> desc', '--b foo')]
self.assertMatchSingle(cmd, s.findmanpage('bar')[0], matchedresult)
cmd = 'bar foo --b foo'
matchedresult = [
(0, 7, 'bar foo synopsis', 'bar foo'),
(8, 15, '-b <arg> desc', '--b foo')]
self.assertMatchSingle(cmd, s.findmanpage('bar foo')[0], matchedresult)
def test_multiple_matches(self):
cmd = 'dup -ab'
matchedresult = [
(0, 3, 'dup1 synopsis', 'dup'),
(4, 6, '-a desc', '-a'),
(6, 7, '-b <arg> desc', 'b')]
groups = matcher.matcher(cmd, s).match()
self.assertEquals(groups[1].results, matchedresult)
self.assertEquals(groups[1].suggestions[0].source, 'dup.2.gz')
def test_arguments(self):
cmd = 'withargs -x -b freearg freearg'
matchedresult = [
(0, 8, 'withargs synopsis', 'withargs'),
# tokens that look like options are still unknown
(9, 11, None, '-x'),
(12, 22, '-b <arg> desc', '-b freearg'),
(23, 30, 'FILE argument', 'freearg')]
self.assertMatchSingle(cmd, s.findmanpage('withargs')[0], matchedresult)
def test_arg_is_dash(self):
cmd = 'bar -b - -a -'
matchedresult = [
(0, 3, 'bar synopsis', 'bar'),
(4, 8, '-b <arg> desc', '-b -'),
(9, 11, '-a desc', '-a'),
(12, 13, None, '-')]
self.assertMatchSingle(cmd, s.findmanpage('bar')[0], matchedresult)
def test_nested_command(self):
cmd = 'withargs -b arg bar -a unknown'
matchedresult = [[(0, 8, 'withargs synopsis', 'withargs'),
(9, 15, '-b <arg> desc', '-b arg')],
[(16, 19, 'bar synopsis', 'bar'),
(20, 22, '-a desc', '-a'),
(23, 30, None, 'unknown')]]
groups = matcher.matcher(cmd, s).match()
self.assertEquals(len(groups), 3)
self.assertEquals(groups[0].results, [])
self.assertEquals(groups[1].results, matchedresult[0])
self.assertEquals(groups[2].results, matchedresult[1])
def test_nested_option(self):
cmd = 'withargs -b arg -exec bar -a EOF -b arg'
matchedresult = [[(0, 8, 'withargs synopsis', 'withargs'),
(9, 15, '-b <arg> desc', '-b arg'),
(16, 21, '-exec nest', '-exec'),
(29, 32, '-exec nest', 'EOF'),
(33, 39, '-b <arg> desc', '-b arg')],
[(22, 25, 'bar synopsis', 'bar'),
(26, 28, '-a desc', '-a')]]
groups = matcher.matcher(cmd, s).match()
self.assertEquals(len(groups), 3)
self.assertEquals(groups[0].results, [])
self.assertEquals(groups[1].results, matchedresult[0])
self.assertEquals(groups[2].results, matchedresult[1])
cmd = "withargs -b arg -exec bar -a ';' -a"
matchedresult = [[(0, 8, 'withargs synopsis', 'withargs'),
(9, 15, '-b <arg> desc', '-b arg'),
(16, 21, '-exec nest', '-exec'),
(29, 32, '-exec nest', "';'"),
(33, 35, '-a desc', '-a')],
[(22, 25, 'bar synopsis', 'bar'),
(26, 28, '-a desc', '-a')]]
groups = matcher.matcher(cmd, s).match()
self.assertEquals(len(groups), 3)
self.assertEquals(groups[0].results, [])
self.assertEquals(groups[1].results, matchedresult[0])
self.assertEquals(groups[2].results, matchedresult[1])
cmd = "withargs -b arg -exec bar -a \\; -a"
matchedresult = [[(0, 8, 'withargs synopsis', 'withargs'),
(9, 15, '-b <arg> desc', '-b arg'),
(16, 21, '-exec nest', '-exec'),
(29, 31, '-exec nest', "\\;"),
(32, 34, '-a desc', '-a')],
[(22, 25, 'bar synopsis', 'bar'),
(26, 28, '-a desc', '-a')]]
groups = matcher.matcher(cmd, s).match()
self.assertEquals(len(groups), 3)
self.assertEquals(groups[0].results, [])
self.assertEquals(groups[1].results, matchedresult[0])
self.assertEquals(groups[2].results, matchedresult[1])
cmd = 'withargs -exec bar -a -u'
matchedresult = [[(0, 8, 'withargs synopsis', 'withargs'),
(9, 14, '-exec nest', '-exec')],
[(15, 18, 'bar synopsis', 'bar'),
(19, 21, '-a desc', '-a'),
(22, 24, None, '-u')]]
groups = matcher.matcher(cmd, s).match()
self.assertEquals(len(groups), 3)
self.assertEquals(groups[0].results, [])
self.assertEquals(groups[1].results, matchedresult[0])
self.assertEquals(groups[2].results, matchedresult[1])
def test_multiple_nests(self):
cmd = 'withargs withargs -b arg bar'
matchedresult = [[(0, 8, 'withargs synopsis', 'withargs')],
[(9, 17, 'withargs synopsis', 'withargs'),
(18, 24, '-b <arg> desc', '-b arg')],
[(25, 28, 'bar synopsis', 'bar')]]
groups = matcher.matcher(cmd, s).match()
self.assertEquals(len(groups), 4)
self.assertEquals(groups[0].results, [])
self.assertEquals(groups[1].results, matchedresult[0])
self.assertEquals(groups[2].results, matchedresult[1])
self.assertEquals(groups[3].results, matchedresult[2])
def test_nested_command_is_unknown(self):
cmd = 'withargs -b arg unknown'
matchedresult = [(0, 8, 'withargs synopsis', 'withargs'),
(9, 15, '-b <arg> desc', '-b arg'),
(16, 23, 'FILE argument', 'unknown')]
groups = matcher.matcher(cmd, s).match()
self.assertEquals(len(groups), 2)
self.assertEquals(groups[0].results, [])
self.assertEquals(groups[1].results, matchedresult)
def test_unparsed(self):
cmd = '(bar; bar) c'
self.assertRaises(bashlex.errors.ParsingError,
matcher.matcher(cmd, s).match)
def test_known_and_unknown_program(self):
cmd = 'bar; foo arg >f; baz'
matchedresult = [[(3, 4, helpconstants.OPERATORS[';'], ';'),
(13, 15, helpconstants.REDIRECTION + '\n\n' +
helpconstants.REDIRECTION_KIND['>'], '>f'),
(15, 16, helpconstants.OPERATORS[';'], ';')],
[(0, 3, 'bar synopsis', 'bar')],
[(5, 12, None, 'foo arg')],
[(17, 20, 'baz synopsis', 'baz')]]
groups = matcher.matcher(cmd, s).match()
self.assertEquals(groups[0].results, matchedresult[0])
self.assertEquals(groups[1].results, matchedresult[1])
self.assertEquals(groups[2].results, matchedresult[2])
def test_pipe(self):
cmd = 'bar | baz'
matchedresult = [[(4, 5, helpconstants.PIPELINES, '|')],
[(0, 3, 'bar synopsis', 'bar')],
[(6, 9, 'baz synopsis', 'baz')]]
groups = matcher.matcher(cmd, s).match()
self.assertEquals(groups[0].results, matchedresult[0])
self.assertEquals(groups[1].results, matchedresult[1])
def test_subshells(self):
cmd = '((bar); bar)'
matchedresult = [[(0, 2, helpconstants._subshell, '(('),
(5, 6, helpconstants._subshell, ')'),
(6, 7, helpconstants.OPERATORS[';'], ';'),
(11, 12, helpconstants._subshell, ')')],
[(2, 5, 'bar synopsis', 'bar')],
[(8, 11, 'bar synopsis', 'bar')]]
groups = matcher.matcher(cmd, s).match()
self.assertEquals(groups[0].results, matchedresult[0])
self.assertEquals(groups[1].results, matchedresult[1])
self.assertEquals(groups[2].results, matchedresult[2])
def test_redirect_first_word_of_command(self):
cmd = '2>&1'
matchedresult = [(0, 4, helpconstants.REDIRECTION + '\n\n' +
helpconstants.REDIRECTION_KIND['>'], '2>&1')]
groups = matcher.matcher(cmd, s).match()
self.assertEquals(len(groups), 1)
self.assertEquals(groups[0].results, matchedresult)
cmd = '2>&1 bar'
matchedresult = [[(0, 4, helpconstants.REDIRECTION + '\n\n' +
helpconstants.REDIRECTION_KIND['>'], '2>&1')],
[(5, 8, 'bar synopsis', 'bar')]]
groups = matcher.matcher(cmd, s).match()
self.assertEquals(len(groups), 2)
self.assertEquals(groups[0].results, matchedresult[0])
self.assertEquals(groups[1].results, matchedresult[1])
def test_comsub(self):
cmd = 'bar $(a) -b "b $(c) `c`" \'$(d)\' >$(e) `f`'
matchedresult = [(0, 3, 'bar synopsis', 'bar'),
(4, 8, None, '$(a)'),
(9, 24, '-b <arg> desc', '-b "b $(c) `c`"'),
(25, 31, None, "'$(d)'"),
(38, 41, None, '`f`')]
shellresult = [(32, 37, helpconstants.REDIRECTION + '\n\n' +
helpconstants.REDIRECTION_KIND['>'], '>$(e)')]
m = matcher.matcher(cmd, s)
groups = m.match()
self.assertEquals(groups[0].results, shellresult)
self.assertEquals(groups[1].results, matchedresult)
# check expansions
self.assertEquals(m.expansions, [(6, 7, 'substitution'),
(17, 18, 'substitution'),
(21, 22, 'substitution'),
(35, 36, 'substitution'),
(39, 40, 'substitution')])
def test_comsub_as_arg(self):
cmd = 'withargs $(a) $0'
matchedresult = [(0, 8, 'withargs synopsis', 'withargs'),
(9, 16, 'FILE argument', '$(a) $0')]
m = matcher.matcher(cmd, s)
groups = m.match()
self.assertEquals(groups[0].results, [])
self.assertEquals(groups[1].results, matchedresult)
# check expansions
self.assertEquals(m.expansions, [(11, 12, 'substitution'),
(14, 16, 'parameter-digits')])
def test_comsub_as_first_word(self):
cmd = '$(a) b'
m = matcher.matcher(cmd, s)
groups = m.match()
self.assertEquals(len(groups), 2)
self.assertEquals(groups[0].results, [])
self.assertEquals(groups[1].results, [(0, 6, None, '$(a) b')])
# check expansions
self.assertEquals(m.expansions, [(2, 3, 'substitution')])
def test_procsub(self):
cmd = 'withargs -b <(a) >(b)'
matchedresult = [(0, 8, 'withargs synopsis', 'withargs'),
(9, 16, '-b <arg> desc', '-b <(a)'),
(17, 21, 'FILE argument', '>(b)')]
m = matcher.matcher(cmd, s)
groups = m.match()
self.assertEquals(groups[0].results, [])
self.assertEquals(groups[1].results, matchedresult)
# check expansions
self.assertEquals(m.expansions, [(14, 15, 'substitution'),
(19, 20, 'substitution')])
def test_if(self):
cmd = 'if bar -a; then b; fi'
shellresults = [(0, 2, helpconstants._if, 'if'),
(9, 15, helpconstants._if, '; then'),
(17, 21, helpconstants._if, '; fi')]
matchresults = [[(3, 6, 'bar synopsis', 'bar'), (7, 9, '-a desc', '-a')],
[(16, 17, None, 'b')]]
groups = matcher.matcher(cmd, s).match()
self.assertEquals(len(groups), 3)
self.assertEquals(groups[0].results, shellresults)
self.assertEquals(groups[1].results, matchresults[0])
self.assertEquals(groups[2].results, matchresults[1])
def test_nested_controlflows(self):
cmd = 'for a; do while bar; do baz; done; done'
shellresults = [(0, 9, helpconstants._for, 'for a; do'),
(10, 15, helpconstants._whileuntil, 'while'),
(19, 23, helpconstants._whileuntil, '; do'),
(27, 33, helpconstants._whileuntil, '; done'),
(33, 39, helpconstants._for, '; done')]
matchresults = [[(16, 19, 'bar synopsis', 'bar')],
[(24, 27, 'baz synopsis', 'baz')]]
groups = matcher.matcher(cmd, s).match()
self.assertEquals(len(groups), 3)
self.assertEquals(groups[0].results, shellresults)
self.assertEquals(groups[1].results, matchresults[0])
self.assertEquals(groups[2].results, matchresults[1])
def test_for_expansion(self):
cmd = 'for a in $(bar); do baz; done'
shellresults = [(0, 19, helpconstants._for, 'for a in $(bar); do'),
(23, 29, helpconstants._for, '; done')]
matchresults = [(20, 23, 'baz synopsis', 'baz')]
m = matcher.matcher(cmd, s)
groups = m.match()
self.assertEquals(len(groups), 2)
self.assertEquals(groups[0].results, shellresults)
self.assertEquals(groups[1].results, matchresults)
self.assertEquals(m.expansions, [(11, 14, 'substitution')])
def test_assignment_with_expansion(self):
cmd = 'a="$1" bar'
shellresults = [(0, 6, helpconstants.ASSIGNMENT, 'a="$1"')]
matchresults = [[(7, 10, 'bar synopsis', 'bar')]]
groups = matcher.matcher(cmd, s).match()
self.assertEquals(len(groups), 2)
self.assertEquals(groups[0].results, shellresults)
self.assertEquals(groups[1].results, matchresults[0])
def test_assignment_as_first_word(self):
cmd = 'a=b bar'
shellresults = [(0, 3, helpconstants.ASSIGNMENT, 'a=b')]
matchresults = [(4, 7, 'bar synopsis', 'bar')]
groups = matcher.matcher(cmd, s).match()
self.assertEquals(len(groups), 2)
self.assertEquals(groups[0].results, shellresults)
self.assertEquals(groups[1].results, matchresults)
def test_expansion_limit(self):
cmd = 'a $(b $(c))'
m = matcher.matcher(cmd, s)
m.match()
class depthchecker(bashlex.ast.nodevisitor):
def __init__(self):
self.depth = 0
self.maxdepth = 0
def visitnode(self, node):
if 'substitution' in node.kind:
self.depth += 1
self.maxdepth = max(self.maxdepth, self.depth)
def visitendnode(self, node):
if 'substitution' in node.kind:
self.depth -= 1
v = depthchecker()
v.visit(m.ast)
self.assertEquals(v.maxdepth, 1)
def test_functions(self):
cmd = 'function a() { bar; }'
shellresults = [(0, 14, helpconstants._function, 'function a() {'),
(18, 19, helpconstants.OPSEMICOLON, ';'),
(20, 21, helpconstants._function, '}'),]
matchresults = [(15, 18, 'bar synopsis', 'bar')]
groups = matcher.matcher(cmd, s).match()
self.assertEquals(len(groups), 2)
self.assertEquals(groups[0].results, shellresults)
self.assertEquals(groups[1].results, matchresults)
cmd = 'function a() { bar "$(a)"; }'
shellresults = [(0, 14, helpconstants._function, 'function a() {'),
(25, 26, helpconstants.OPSEMICOLON, ';'),
(27, 28, helpconstants._function, '}'),]
matchresults = [(15, 18, 'bar synopsis', 'bar'),
(19, 25, None, '"$(a)"')]
m = matcher.matcher(cmd, s)
groups = m.match()
self.assertEquals(len(groups), 2)
self.assertEquals(groups[0].results, shellresults)
self.assertEquals(groups[1].results, matchresults)
self.assertEquals(m.expansions, [(22, 23, 'substitution')])
def test_function_reference(self):
cmd = 'function a() { bar; a b; }; a'
shellresults = [(0, 14, helpconstants._function, 'function a() {'),
(18, 19, helpconstants.OPSEMICOLON, ';'),
(20, 21, helpconstants._functioncall % 'a', 'a'),
(22, 23, helpconstants._functionarg % 'a', 'b'),
(23, 24, helpconstants.OPSEMICOLON, ';'),
(25, 26, helpconstants._function, '}'),
(26, 27, helpconstants.OPSEMICOLON, ';'),
(28, 29, helpconstants._functioncall % 'a', 'a'),]
matchresults = [(15, 18, 'bar synopsis', 'bar')]
m = matcher.matcher(cmd, s)
groups = m.match()
self.assertEquals(len(groups), 2)
self.assertEquals(groups[0].results, shellresults)
self.assertEquals(groups[1].results, matchresults)
self.assertEquals(m.functions, set(['a']))
def test_comment(self):
cmd = 'bar # a comment'
shellresults = [(4, 15, helpconstants.COMMENT, '# a comment')]
matchresults = [(0, 3, 'bar synopsis', 'bar')]
m = matcher.matcher(cmd, s)
groups = m.match()
self.assertEquals(len(groups), 2)
self.assertEquals(groups[0].results, shellresults)
self.assertEquals(groups[1].results, matchresults)
cmd = '# just a comment'
shellresults = [(0, 16, helpconstants.COMMENT, '# just a comment')]
m = matcher.matcher(cmd, s)
groups = m.match()
self.assertEquals(len(groups), 1)
self.assertEquals(groups[0].results, shellresults)
def test_heredoc_at_eof(self):
cmd = 'bar <<EOF'
shellresults = [(4, 9, helpconstants.REDIRECTION + '\n\n' +
helpconstants.REDIRECTION_KIND['<<'], '<<EOF')]
matchresults = [(0, 3, 'bar synopsis', 'bar')]
groups = matcher.matcher(cmd, s).match()
self.assertEquals(len(groups), 2)
self.assertEquals(groups[0].results, shellresults)
self.assertEquals(groups[1].results, matchresults)
def test_no_synopsis(self):
cmd = 'nosynopsis a'
matchresults = [(0, 10, helpconstants.NOSYNOPSIS, 'nosynopsis'),
(11, 12, None, 'a')]
groups = matcher.matcher(cmd, s).match()
self.assertEquals(len(groups), 2)
self.assertEquals(groups[0].results, [])
self.assertEquals(groups[1].results, matchresults)
| 25,908 | Python | .py | 506 | 38.488142 | 103 | 0.526132 | idank/explainshell | 13,114 | 782 | 152 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,130 | w3mman2html.cgi | idank_explainshell/tools/w3mman2html.cgi | #!/usr/bin/env perl
$MAN = $ENV{'W3MMAN_MAN'} || '@MAN@';
$QUERY = $ENV{'QUERY_STRING'} || $ARGV[0];
$SCRIPT_NAME = $ENV{'SCRIPT_NAME'} || $0;
$CGI = "file://$SCRIPT_NAME";
$CGI2 = "file:";
# $CGI2 = "file:///\$LIB/hlink.cgi?";
$SQUEEZE = 1;
$ENV{'PAGER'} = 'cat';
if ($QUERY =~ /\=/) {
for (split('&', $QUERY)) {
($v, $q) = split('=', $_, 2);
$query{$v} = &form_decode($q);
}
} else {
$QUERY =~ s/^man=//;
$query{"man"} = &form_decode($QUERY);
}
if ((! $query{"man"}) && (! $query{"local"})) {
if ($query{"keyword"}) {
$keyword = $query{"keyword"};
$k = &html_quote($keyword);
print <<EOF;
Content-Type: text/html
<html>
<head><title>man -k $k</title></head>
<body>
<h2>man -k <b>$k</b></h2>
<ul>
EOF
$keyword =~ s:([^-\w\200-\377.,])::g;
open(F, "$MAN -k $keyword 2> /dev/null |");
@line = ();
while(<F>) {
chop;
$_ = &html_quote($_);
s/(\s+-.*)$//;
$title = $1;
s@(\w[\w.\-]*(\s*\,\s*\w[\w.\-]*)*)\s*(\([\dn]\w*\))@&keyword_ref($1, $3)@ge;
print "<li>$_$title\n";
}
close(F);
print <<EOF;
</ul>
</body>
</html>
EOF
exit;
}
print <<EOF;
Content-Type: text/html
<html>
<head><title>man</title></head>
<body>
<form action="$CGI">
<table>
<tr><td>Manual:<td><input name=man>
<tr><td>Section:<td><input name=section>
<tr><td>Keyword:<td><input name=keyword>
<tr><td><td><input type=submit> <input type=reset>
</table>
</form>
</body>
</html>
EOF
exit;
}
if ($query{"local"}) {
$file = $query{"local"};
if (! ($file =~ /^\//)) {
$file = $query{"pwd"} . '/' . $file;
}
open(F, "MAN_KEEP_FORMATTING=1 $MAN -l $file 2> /dev/null |");
} else {
$man = $query{"man"};
if ($man =~ s/\((\w+)\)$//) {
$section = $1;
$man_section = "$man($1)";
} elsif ($query{"section"}) {
$section = $query{"section"};
$man_section = "$man($section)";
} else {
$section = "";
$man_section = "$man";
}
$section =~ s:([^-\w\200-\377.,])::g;
$man =~ s:([^-\w\200-\377.,])::g;
open(F, "MAN_KEEP_FORMATTING=1 $MAN $section $man 2> /dev/null |");
}
$ok = 0;
undef $header;
$blank = -1;
$cmd = "";
$prev = "";
while(<F>) {
if (! defined($header)) {
/^\s*$/ && next;
$header = $_;
$space = $header;
chop $space;
$space =~ s/\S.*//;
} elsif ($_ eq $header) { # delete header
$blank = -1;
next;
} elsif (!/\010/ && /^$space[\w\200-\377].*\s\S/o) { # delete footer
$blank = -1;
next;
}
if ($SQUEEZE) {
if (/^\s*$/) {
$blank || $blank++;
next;
} elsif ($blank) {
$blank > 0 && print "\n";
$blank = 0;
}
}
s/\&/\&/g;
s/\</\</g;
s/\>/\>/g;
s@([\200-\377].)(\010{1,2}\1)+@<b>$1</b>@g;
s@(\&\w+;|.)(\010\1)+@<b>$1</b>@g;
s@__\010{1,2}((\<b\>)?[\200-\377].(\</b\>)?)@<u>$1</u>@g;
s@_\010((\<b\>)?(\&\w+\;|.)(\</b\>)?)@<u>$1</u>@g;
s@((\<b\>)?[\200-\377].(\</b\>)?)\010{1,2}__@<u>$1</u>@g;
s@((\<b\>)?(\&\w+\;|.)(\</b\>)?)\010_@<u>$1</u>@g;
s@.\010(.)@$1@g;
s@\</b\>\</u\>\<b\>_\</b\>\<u\>\<b\>@_@g;
s@\</u\>\<b\>_\</b\>\<u\>@_@g;
s@\</u\>\<u\>@@g;
s@\</b\>\<b\>@@g;
if (! $ok) {
/^No/ && last;
print <<EOF;
Content-Type: text/html
<html>
<head><title>man $man_section</title></head>
<body>
<pre>
EOF
print;
$ok = 1;
next;
}
s@(http|ftp)://[\w.\-/~]+[\w/]@<a href="$&">$&</a>@g;
s@(\W)(mailto:)?(\w[\w.\-]*\@\w[\w.\-]*\.[\w.\-]*\w)@$1<a href="mailto:$3">$2$3</a>@g;
s@(\W)(\~?/[\w.][\w.\-/~]*)@$1 . &file_ref($2)@ge;
s@(include(<\/?[bu]\>|\s)*\<)([\w.\-/]+)@$1 . &include_ref($3)@ge;
if ($prev && m@^\s*(\<[bu]\>)*(\w[\w.\-]*)(\</[bu]\>)*(\([\dm]\w*\))@) {
$cmd .= "$2$4";
$prev =~ s@(\w[\w.\-]*-)((\</[bu]\>)*\s*)$@<a href="$CGI?$cmd">$1</a>$2@;
print $prev;
$prev = '';
s@^(\s*(\<[bu]\>)*)(\w[\w.\-]*)@@;
print "$1<a href=\"$CGI?$cmd\">$3</a>";
} elsif ($prev) {
print $prev;
$prev = '';
}
s@(\w[\w.\-]*)((\</[bu]\>)*)(\([\dm]\w*\))@<a href="$CGI?$1$4">$1</a>$2$4@g;
if (m@(\w[\w.\-]*)-(\</[bu]\>)*\s*$@) {
$cmd = $1;
$prev = $_;
next;
}
print;
}
if ($prev) {
print $prev;
}
close(F);
if (! $ok) {
if ($query{'quit'}) {
if ($query{'local'}) {
print STDERR "File $file not found.\n";
} else {
print STDERR "No manual entry for $man_section.\n";
}
print STDERR "No manual entry for $man_section.\n";
print <<EOF;
w3m-control: EXIT
EOF
exit 1;
}
print <<EOF;
Content-Type: text/html
<html>
<head><title>man $man_section</title></head>
<body>
<pre>
EOF
if ($query{'local'}) {
print "File <B>$file</B> not found.\n";
} else {
print "No manual entry for <B>$man_section</B>.\n";
}
}
print <<EOF;
</pre>
</body>
</html>
EOF
sub is_command {
local($_) = @_;
local($p);
(! -d && -x) || return 0;
if (! (%PATH)) {
for $p (split(":", $ENV{'PATH'})) {
$p =~ s@/+$@@;
$PATH{$p} = 1;
}
}
s@/[^/]*$@@;
return defined($PATH{$_});
}
sub file_ref {
local($_) = @_;
if (&is_command($_)) {
($man = $_) =~ s@.*/@@;
return "<a href=\"$CGI?$man\">$_</a>";
}
if (/^\~/ || -f || -d) {
return "<a href=\"$CGI2$_\">$_</a>";
}
return $_;
}
sub include_ref {
local($_) = @_;
local($d);
for $d (
"/usr/include",
"/usr/local/include",
"/usr/X11R6/include",
"/usr/X11/include",
"/usr/X/include",
"/usr/include/X11"
) {
-f "$d/$_" && return "<a href=\"$CGI2$d/$_\">$_</a>";
}
return $_;
}
sub keyword_ref {
local($_, $s) = @_;
local(@a) = ();
for (split(/\s*,\s*/)) {
push(@a, "<a href=\"$CGI?$_$s\">$_</a>");
}
return join(", ", @a) . $s;
}
sub html_quote {
local($_) = @_;
local(%QUOTE) = (
'<', '<',
'>', '>',
'&', '&',
'"', '"',
);
s/[<>&"]/$QUOTE{$&}/g;
return $_;
}
sub form_decode {
local($_) = @_;
s/\+/ /g;
s/%([\da-f][\da-f])/pack('c', hex($1))/egi;
return $_;
}
| 5,895 | Python | .cgi | 266 | 19.018797 | 88 | 0.443612 | idank/explainshell | 13,114 | 782 | 152 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,131 | uwsgi.conf | idank_explainshell/misc/supervisord/uwsgi.conf | [program:explainshell]
command=/usr/local/bin/uwsgi
--socket /tmp/explainshell.sock
--logto /home/idan/logs/uwsgi.log
--home /home/idan/venv
--pythonpath /home/idan/code
--wsgi-file /home/idan/code/runserver.py
--callable app
--max-requests 1000
--master
--processes 1
--chmod
directory=/home/idan/code
autostart=true
autorestart=true
user=idan
| 365 | Python | .wsgi | 16 | 20.5625 | 42 | 0.759312 | idank/explainshell | 13,114 | 782 | 152 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,132 | conftest.py | ansible_ansible-lint/conftest.py | """PyTest Fixtures."""
import importlib
import os
import platform
import subprocess
import sys
import warnings
from pathlib import Path
import pytest
# Ensure we always run from the root of the repository
if Path.cwd() != Path(__file__).parent:
os.chdir(Path(__file__).parent)
# checking if user is running pytest without installing test dependencies:
missing = [
module
for module in ["ansible", "black", "mypy", "pylint"]
if not importlib.util.find_spec(module)
]
if missing:
pytest.exit(
reason=f"FATAL: Missing modules: {', '.join(missing)} -- probably you missed installing test requirements with: pip install -e '.[test]'",
returncode=1,
)
# See: https://github.com/pytest-dev/pytest/issues/1402#issuecomment-186299177
def pytest_configure(config: pytest.Config) -> None:
"""Ensure we run preparation only on master thread when running in parallel."""
if is_help_option_present(config):
return
if is_master(config):
# we need to be sure that we have the requirements installed as some tests
# might depend on these. This approach is compatible with GHA caching.
try:
subprocess.check_output( # noqa: S603
["./tools/install-reqs.sh"],
stderr=subprocess.PIPE,
text=True,
)
except subprocess.CalledProcessError as exc:
print(f"{exc}\n{exc.stderr}\n{exc.stdout}", file=sys.stderr) # noqa: T201
sys.exit(1)
def is_help_option_present(config: pytest.Config) -> bool:
"""Return true if pytest invocation was not about running tests."""
return any(config.getoption(x) for x in ["--fixtures", "--help", "--collect-only"])
def is_master(config: pytest.Config) -> bool:
"""Return true if is run on master thread."""
return not hasattr(config, "workerinput")
# ruff: noqa: E402
from ansible.module_utils.common.yaml import ( # pylint: disable=wrong-import-position
HAS_LIBYAML,
)
if not HAS_LIBYAML:
# While presence of libyaml is not required for runtime, we keep this error
# fatal here in order to be sure that we spot libyaml errors during testing.
arch = platform.machine()
if arch not in ("arm64", "x86_64"):
warnings.warn(
f"This architecture ({arch}) is not supported by libyaml, performance will be degraded.",
category=pytest.PytestWarning,
stacklevel=1,
)
else:
pytest.fail(
"FATAL: For testing, we require pyyaml to be installed with its native extension, missing it would make testing 3x slower and risk missing essential bugs.",
)
@pytest.fixture(name="project_path")
def fixture_project_path() -> Path:
"""Fixture to linter root folder."""
return Path(__file__).resolve().parent
| 2,822 | Python | .py | 68 | 35.632353 | 168 | 0.67835 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,133 | conftest.py | ansible_ansible-lint/test/conftest.py | """PyTest fixtures for testing the project."""
from __future__ import annotations
import shutil
import subprocess
from pathlib import Path
from typing import TYPE_CHECKING
import pytest
# pylint: disable=wildcard-import,unused-wildcard-import
from ansiblelint.testing.fixtures import * # noqa: F403
from ansiblelint.yaml_utils import FormattedYAML
if TYPE_CHECKING:
from _pytest import nodes
from _pytest.config import Config
from _pytest.config.argparsing import Parser
def pytest_addoption(parser: Parser) -> None:
"""Add --regenerate-formatting-fixtures option to pytest."""
parser.addoption(
"--regenerate-formatting-fixtures",
action="store_true",
default=False,
help="Regenerate formatting fixtures with prettier and internal formatter",
)
def pytest_collection_modifyitems(items: list[nodes.Item], config: Config) -> None:
"""Skip tests based on --regenerate-formatting-fixtures option."""
do_regenerate = config.getoption("--regenerate-formatting-fixtures")
skip_other = pytest.mark.skip(
reason="not a formatting_fixture test and "
"--regenerate-formatting-fixtures was specified",
)
skip_formatting_fixture = pytest.mark.skip(
reason="specify --regenerate-formatting-fixtures to "
"only run formatting_fixtures test",
)
for item in items:
if do_regenerate and "formatting_fixtures" not in item.keywords:
item.add_marker(skip_other)
elif not do_regenerate and "formatting_fixtures" in item.keywords:
item.add_marker(skip_formatting_fixture)
def pytest_configure(config: Config) -> None:
"""Register custom markers."""
if config.getoption("--regenerate-formatting-fixtures"):
regenerate_formatting_fixtures()
def regenerate_formatting_fixtures() -> None:
"""Re-generate formatting fixtures with prettier and internal formatter.
Pass ``--regenerate-formatting-fixtures`` to run this and skip all other tests.
This is a "test" because once fixtures are regenerated,
we run prettier again to make sure it does not change files formatted
with our internal formatting code.
"""
subprocess.check_call(["which", "prettier"])
yaml = FormattedYAML()
fixtures_dir = Path("test/fixtures/")
fixtures_dir_before = fixtures_dir / "formatting-before"
fixtures_dir_prettier = fixtures_dir / "formatting-prettier"
fixtures_dir_after = fixtures_dir / "formatting-after"
fixtures_dir_prettier.mkdir(exist_ok=True)
fixtures_dir_after.mkdir(exist_ok=True)
# Copying before fixtures...
for fixture in fixtures_dir_before.glob("fmt-[0-9].yml"):
shutil.copy(str(fixture), str(fixtures_dir_prettier / fixture.name))
shutil.copy(str(fixture), str(fixtures_dir_after / fixture.name))
# Writing fixtures with prettier...
subprocess.check_call(["prettier", "-w", str(fixtures_dir_prettier)])
# NB: pre-commit end-of-file-fixer can also modify files.
# Writing fixtures with ansiblelint.yaml_utils.FormattedYAML()
for fixture in fixtures_dir_after.glob("fmt-[0-9].yml"):
data = yaml.load(fixture.read_text())
output = yaml.dumps(data)
fixture.write_text(output)
# Make sure prettier won't make changes in {fixtures_dir_after}
subprocess.check_call(["prettier", "-c", str(fixtures_dir_after)])
| 3,397 | Python | .py | 71 | 42.267606 | 83 | 0.719818 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,134 | test_app.py | ansible_ansible-lint/test/test_app.py | """Test for app module."""
from pathlib import Path
from ansiblelint.constants import RC
from ansiblelint.file_utils import Lintable
from ansiblelint.testing import run_ansible_lint
def test_generate_ignore(tmp_path: Path) -> None:
"""Validate that --generate-ignore dumps expected ignore to the file."""
lintable = Lintable(tmp_path / "vars.yaml")
lintable.content = "foo: bar\nfoo: baz\n"
lintable.write(force=True)
ignore_file = tmp_path / ".ansible-lint-ignore"
assert not ignore_file.exists()
result = run_ansible_lint(lintable.filename, "--generate-ignore", cwd=tmp_path)
assert result.returncode == 2
assert ignore_file.exists()
with ignore_file.open(encoding="utf-8") as f:
assert "vars.yaml yaml[key-duplicates]\n" in f.readlines()
# Run again and now we expect to succeed as we have an ignore file.
result = run_ansible_lint(lintable.filename, cwd=tmp_path)
assert result.returncode == 0
def test_app_no_matches(tmp_path: Path) -> None:
"""Validate that linter returns special exit code if no files are analyzed."""
result = run_ansible_lint(cwd=tmp_path)
assert result.returncode == RC.NO_FILES_MATCHED
| 1,193 | Python | .py | 24 | 45.416667 | 83 | 0.726334 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,135 | test_constants.py | ansible_ansible-lint/test/test_constants.py | """Tests for constants module."""
from ansiblelint.constants import States
def test_states() -> None:
"""Test that states are evaluated as boolean false."""
assert bool(States.NOT_LOADED) is False
assert bool(States.LOAD_FAILED) is False
assert bool(States.UNKNOWN_DATA) is False
| 299 | Python | .py | 7 | 39 | 58 | 0.743945 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,136 | test_verbosity.py | ansible_ansible-lint/test/test_verbosity.py | """Tests related to our logging/verbosity setup."""
from __future__ import annotations
from pathlib import Path
import pytest
from ansiblelint.testing import run_ansible_lint
from ansiblelint.text import strip_ansi_escape
# substrs is a list of tuples, where:
# component 1 is the substring in question
# component 2 is whether or not to invert ("NOT") the match
@pytest.mark.parametrize(
("verbosity", "substrs"),
(
pytest.param(
"",
[
("WARNING Listing 1 violation(s) that are fatal", False),
("DEBUG ", True),
("INFO ", True),
],
id="default",
),
pytest.param(
"-q",
[
("WARNING ", True),
("DEBUG ", True),
("INFO ", True),
],
id="q",
),
pytest.param(
"-qq",
[
("WARNING ", True),
("DEBUG ", True),
("INFO ", True),
],
id="qq",
),
pytest.param(
"-v",
[
("WARNING Listing 1 violation(s) that are fatal", False),
("INFO Set ANSIBLE_LIBRARY=", False),
("DEBUG ", True),
],
id="v",
),
pytest.param(
"-vv",
[
("WARNING Listing 1 violation(s) that are fatal", False),
("INFO Set ANSIBLE_LIBRARY=", False),
],
id="really-loquacious",
),
pytest.param(
"-vv",
[
("WARNING Listing 1 violation(s) that are fatal", False),
("INFO Set ANSIBLE_LIBRARY=", False),
],
id="vv",
),
),
)
def test_verbosity(
verbosity: str,
substrs: list[tuple[str, bool]],
project_path: Path,
) -> None:
"""Checks that our default verbosity displays (only) warnings."""
# Piggyback off the .yamllint in the root of the repo, just for testing.
# We'll "override" it with the one in the fixture, to produce a warning.
fakerole = Path() / "test" / "fixtures" / "verbosity-tests"
if verbosity:
result = run_ansible_lint(verbosity, str(fakerole), cwd=project_path)
else:
result = run_ansible_lint(str(fakerole), cwd=project_path)
result.stderr = strip_ansi_escape(result.stderr)
result.stdout = strip_ansi_escape(result.stdout)
assert result.returncode == 2, result
for substr, invert in substrs:
if invert:
assert substr not in result.stderr, result.stderr
else:
assert substr in result.stderr, result.stderr
| 2,763 | Python | .py | 87 | 21.931034 | 77 | 0.511619 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,137 | test_skiputils.py | ansible_ansible-lint/test/test_skiputils.py | """Validate ansiblelint.skip_utils."""
from __future__ import annotations
from pathlib import Path
from typing import TYPE_CHECKING, Any
import pytest
from ansiblelint.constants import SKIPPED_RULES_KEY
from ansiblelint.file_utils import Lintable
from ansiblelint.runner import Runner
from ansiblelint.skip_utils import (
append_skipped_rules,
get_rule_skips_from_line,
is_nested_task,
)
if TYPE_CHECKING:
from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject
from ansiblelint.rules import RulesCollection
from ansiblelint.testing import RunFromText
PLAYBOOK_WITH_NOQA = """\
---
- name: Fixture
hosts: all
vars:
SOME_VAR_NOQA: "Foo" # noqa: var-naming
SOME_VAR: "Bar"
tasks:
- name: "Set the SOME_OTHER_VAR"
ansible.builtin.set_fact:
SOME_OTHER_VAR_NOQA: "Baz" # noqa: var-naming
SOME_OTHER_VAR: "Bat"
"""
@pytest.mark.parametrize(
("line", "expected"),
(
pytest.param("foo # noqa: bar", "bar", id="0"),
pytest.param("foo # noqa bar", "bar", id="1"),
),
)
def test_get_rule_skips_from_line(line: str, expected: str) -> None:
"""Validate get_rule_skips_from_line."""
v = get_rule_skips_from_line(line, lintable=Lintable(""))
assert v == [expected]
def test_playbook_noqa(default_text_runner: RunFromText) -> None:
"""Check that noqa is properly taken into account on vars and tasks."""
results = default_text_runner.run_playbook(PLAYBOOK_WITH_NOQA)
# Should raise error at "SOME_VAR".
assert len(results) == 1
def test_playbook_noqa2(default_text_runner: RunFromText) -> None:
"""Check that noqa is properly taken into account on vars and tasks."""
results = default_text_runner.run_playbook(PLAYBOOK_WITH_NOQA, "test")
# Should raise error at "SOME_VAR".
assert len(results) == 1
@pytest.mark.parametrize(
("lintable", "yaml", "expected_form"),
(
pytest.param(
Lintable("examples/playbooks/noqa.yml", kind="playbook"),
[
{
"hosts": "localhost",
"tasks": [
{
"name": "This would typically fire latest[git] and partial-become",
"become_user": "alice",
"git": "src=/path/to/git/repo dest=checkout",
"__line__": 4,
"__file__": Path("examples/playbooks/noqa.yml"),
},
],
"__line__": 2,
"__file__": Path("examples/playbooks/noqa.yml"),
},
],
[
{
"hosts": "localhost",
"tasks": [
{
"name": "This would typically fire latest[git] and partial-become",
"become_user": "alice",
"git": "src=/path/to/git/repo dest=checkout",
"__line__": 4,
"__file__": Path("examples/playbooks/noqa.yml"),
SKIPPED_RULES_KEY: ["latest[git]", "partial-become"],
},
],
"__line__": 2,
"__file__": Path("examples/playbooks/noqa.yml"),
},
],
),
pytest.param(
Lintable("examples/playbooks/noqa-nested.yml", kind="playbook"),
[
{
"hosts": "localhost",
"tasks": [
{
"name": "Example of multi-level block",
"block": [
{
"name": "2nd level",
"block": [
{
"ansible.builtin.debug": {
"msg": "Test unnamed task in block",
"__line__": 9,
"__file__": Path(
"examples/playbooks/noqa-nested.yml",
),
},
"__line__": 8,
"__file__": Path(
"examples/playbooks/noqa-nested.yml",
),
},
],
"__line__": 6,
"__file__": Path(
"examples/playbooks/noqa-nested.yml",
),
},
],
"__line__": 4,
"__file__": Path("examples/playbooks/noqa-nested.yml"),
},
],
"__line__": 2,
"__file__": Path("examples/playbooks/noqa-nested.yml"),
},
],
[
{
"hosts": "localhost",
"tasks": [
{
"name": "Example of multi-level block",
"block": [
{
"name": "2nd level",
"block": [
{
"ansible.builtin.debug": {
"msg": "Test unnamed task in block",
"__line__": 9,
"__file__": Path(
"examples/playbooks/noqa-nested.yml",
),
},
"__line__": 8,
"__file__": Path(
"examples/playbooks/noqa-nested.yml",
),
SKIPPED_RULES_KEY: ["name[missing]"],
},
],
"__line__": 6,
"__file__": Path(
"examples/playbooks/noqa-nested.yml",
),
SKIPPED_RULES_KEY: ["name[missing]"],
},
],
"__line__": 4,
"__file__": Path("examples/playbooks/noqa-nested.yml"),
SKIPPED_RULES_KEY: ["name[missing]"],
},
],
"__line__": 2,
"__file__": Path("examples/playbooks/noqa-nested.yml"),
},
],
),
),
)
def test_append_skipped_rules(
lintable: Lintable,
yaml: AnsibleBaseYAMLObject,
expected_form: AnsibleBaseYAMLObject,
) -> None:
"""Check that it appends skipped_rules properly."""
assert append_skipped_rules(yaml, lintable) == expected_form
@pytest.mark.parametrize(
("task", "expected"),
(
pytest.param(
{
"name": "ensure apache is at the latest version",
"yum": {"name": "httpd", "state": "latest"},
},
False,
),
pytest.param(
{
"name": "Attempt and graceful roll back",
"block": [
{
"name": "Force a failure",
"ansible.builtin.command": "/bin/false",
},
],
"rescue": [
{
"name": "Force a failure in middle of recovery!",
"ansible.builtin.command": "/bin/false",
},
],
"always": [
{
"name": "Always do this",
"ansible.builtin.debug": {"msg": "This always executes"},
},
],
},
True,
),
),
)
def test_is_nested_task(task: dict[str, Any], expected: bool) -> None:
"""Test is_nested_task() returns expected bool."""
assert is_nested_task(task) == expected
def test_capture_warning_outdated_tag(
default_rules_collection: RulesCollection,
) -> None:
"""Test that exclude paths do work."""
runner = Runner(
"examples/playbooks/capture-warning.yml",
rules=default_rules_collection,
)
matches = runner.run()
assert len(matches) == 1
assert matches[0].rule.id == "warning"
assert matches[0].tag == "warning[outdated-tag]"
assert matches[0].lineno == 8
| 9,395 | Python | .py | 233 | 21.450644 | 95 | 0.382192 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,138 | test_formatter.py | ansible_ansible-lint/test/test_formatter.py | """Test for output formatter."""
# Copyright (c) 2016 Will Thames <will@thames.id.au>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import pathlib
from ansiblelint.errors import MatchError
from ansiblelint.file_utils import Lintable
from ansiblelint.formatters import Formatter
from ansiblelint.rules import AnsibleLintRule, RulesCollection
collection = RulesCollection()
rule = AnsibleLintRule()
rule.id = "TCF0001"
collection.register(rule)
formatter = Formatter(pathlib.Path.cwd(), display_relative_path=True)
# These details would generate a rich rendering error if not escaped:
DETAILS = "Some [/tmp/foo] details."
def test_format_coloured_string() -> None:
"""Test formetting colored."""
match = MatchError(
message="message",
lineno=1,
details=DETAILS,
lintable=Lintable("filename.yml", content=""),
rule=rule,
)
formatter.apply(match)
def test_unicode_format_string() -> None:
"""Test formatting unicode."""
match = MatchError(
message="\U0001f427",
lineno=1,
details=DETAILS,
lintable=Lintable("filename.yml", content=""),
rule=rule,
)
formatter.apply(match)
def test_dict_format_line() -> None:
"""Test formatting dictionary details."""
match = MatchError(
message="xyz",
lineno=1,
details={"hello": "world"}, # type: ignore[arg-type]
lintable=Lintable("filename.yml", content=""),
rule=rule,
)
formatter.apply(match)
| 2,517 | Python | .py | 62 | 36.741935 | 79 | 0.733442 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,139 | test_file_utils.py | ansible_ansible-lint/test/test_file_utils.py | """Tests for file utility functions."""
from __future__ import annotations
import copy
import logging
import os
import time
from pathlib import Path
from typing import TYPE_CHECKING, Any
import pytest
from ansiblelint import cli, file_utils
from ansiblelint.file_utils import (
Lintable,
cwd,
expand_path_vars,
expand_paths_vars,
find_project_root,
normpath,
normpath_path,
)
from ansiblelint.runner import Runner
if TYPE_CHECKING:
from _pytest.capture import CaptureFixture
from _pytest.logging import LogCaptureFixture
from _pytest.monkeypatch import MonkeyPatch
from ansiblelint.constants import FileType
from ansiblelint.rules import RulesCollection
@pytest.mark.parametrize(
("path", "expected"),
(
pytest.param(Path("a/b/../"), "a", id="pathlib.Path"),
pytest.param("a/b/../", "a", id="str"),
pytest.param("", ".", id="empty"),
pytest.param(".", ".", id="empty"),
),
)
def test_normpath(path: str, expected: str) -> None:
"""Ensure that relative parent dirs are normalized in paths."""
assert normpath(path) == expected
def test_expand_path_vars(monkeypatch: MonkeyPatch) -> None:
"""Ensure that tilde and env vars are expanded in paths."""
test_path = "/test/path"
monkeypatch.setenv("TEST_PATH", test_path)
assert expand_path_vars("~") == os.path.expanduser("~") # noqa: PTH111
assert expand_path_vars("$TEST_PATH") == test_path
@pytest.mark.parametrize(
("test_path", "expected"),
(
pytest.param(Path("$TEST_PATH"), "/test/path", id="pathlib.Path"),
pytest.param("$TEST_PATH", "/test/path", id="str"),
pytest.param(" $TEST_PATH ", "/test/path", id="stripped-str"),
pytest.param("~", os.path.expanduser("~"), id="home"), # noqa: PTH111
),
)
def test_expand_paths_vars(
test_path: str | Path,
expected: str,
monkeypatch: MonkeyPatch,
) -> None:
"""Ensure that tilde and env vars are expanded in paths lists."""
monkeypatch.setenv("TEST_PATH", "/test/path")
assert expand_paths_vars([test_path]) == [expected] # type: ignore[list-item]
def test_discover_lintables_silent(
monkeypatch: MonkeyPatch,
capsys: CaptureFixture[str],
caplog: LogCaptureFixture,
) -> None:
"""Verify that no stderr output is displayed while discovering yaml files.
(when the verbosity is off, regardless of the Git or Git-repo presence)
Also checks expected number of files are detected.
"""
caplog.set_level(logging.FATAL)
options = cli.get_config([])
test_dir = Path(__file__).resolve().parent
lint_path = (test_dir / ".." / "examples" / "roles" / "test-role").resolve()
yaml_count = len(list(lint_path.glob("**/*.yml"))) + len(
list(lint_path.glob("**/*.yaml")),
)
monkeypatch.chdir(str(lint_path))
my_options = copy.deepcopy(options)
my_options.lintables = [str(lint_path)]
files = file_utils.discover_lintables(my_options)
stderr = capsys.readouterr().err
assert (
not stderr
), f"No stderr output is expected when the verbosity is off, got: {stderr}"
assert (
len(files) == yaml_count
), "Expected to find {yaml_count} yaml files in {lint_path}".format_map(
locals(),
)
def test_discover_lintables_umlaut(monkeypatch: MonkeyPatch) -> None:
"""Verify that filenames containing German umlauts are not garbled by the discover_lintables."""
options = cli.get_config([])
test_dir = Path(__file__).resolve().parent
lint_path = (test_dir / ".." / "examples" / "playbooks").resolve()
monkeypatch.chdir(str(lint_path))
files = file_utils.discover_lintables(options)
assert '"with-umlaut-\\303\\244.yml"' not in files
assert "with-umlaut-ä.yml" in files
@pytest.mark.parametrize(
("path", "kind"),
(
pytest.param("tasks/run_test_playbook.yml", "tasks", id="0"),
pytest.param("foo/playbook.yml", "playbook", id="1"),
pytest.param("playbooks/foo.yml", "playbook", id="2"),
pytest.param("examples/roles/foo.yml", "yaml", id="3"),
# the only yml file that is not a playbook inside molecule/ folders
pytest.param(
"examples/.config/molecule/config.yml",
"yaml",
id="4",
), # molecule shared config
pytest.param(
"test/schemas/test/molecule/cluster/base.yml",
"yaml",
id="5",
), # molecule scenario base config
pytest.param(
"test/schemas/test/molecule/cluster/molecule.yml",
"yaml",
id="6",
), # molecule scenario config
pytest.param(
"test/schemas/test/molecule/cluster/foobar.yml",
"playbook",
id="7",
), # custom playbook name
pytest.param(
"test/schemas/test/molecule/cluster/converge.yml",
"playbook",
id="8",
), # common playbook name
pytest.param(
"roles/foo/molecule/scenario3/requirements.yml",
"requirements",
id="9",
), # requirements
pytest.param(
"roles/foo/molecule/scenario3/collections.yml",
"requirements",
id="10",
), # requirements
pytest.param(
"roles/foo/meta/argument_specs.yml",
"role-arg-spec",
id="11",
), # role argument specs
# tasks files:
pytest.param("tasks/directory with spaces/main.yml", "tasks", id="12"), # tasks
pytest.param("tasks/requirements.yml", "tasks", id="13"), # tasks
# requirements (we do not support includes yet)
pytest.param(
"requirements.yml",
"requirements",
id="14",
), # collection requirements
pytest.param(
"roles/foo/meta/requirements.yml",
"requirements",
id="15",
), # inside role requirements
# Undeterminable files:
pytest.param("test/fixtures/unknown-type.yml", "yaml", id="16"),
pytest.param(
"releasenotes/notes/run-playbooks-refactor.yaml",
"reno",
id="17",
), # reno
pytest.param("examples/host_vars/localhost.yml", "vars", id="18"),
pytest.param("examples/group_vars/all.yml", "vars", id="19"),
pytest.param("examples/playbooks/vars/other.yml", "vars", id="20"),
pytest.param(
"examples/playbooks/vars/subfolder/settings.yml",
"vars",
id="21",
), # deep vars
pytest.param(
"molecule/scenario/collections.yml",
"requirements",
id="22",
), # deprecated 2.8 format
pytest.param(
"../roles/geerlingguy.mysql/tasks/configure.yml",
"tasks",
id="23",
), # relative path involved
pytest.param("galaxy.yml", "galaxy", id="24"),
pytest.param("foo.j2.yml", "jinja2", id="25"),
pytest.param("foo.yml.j2", "jinja2", id="26"),
pytest.param("foo.j2.yaml", "jinja2", id="27"),
pytest.param("foo.yaml.j2", "jinja2", id="28"),
pytest.param(
"examples/playbooks/rulebook.yml",
"playbook",
id="29",
), # playbooks folder should determine kind
pytest.param(
"examples/rulebooks/rulebook-pass.yml",
"rulebook",
id="30",
), # content should determine it as a rulebook
pytest.param(
"examples/yamllint/valid.yml",
"yaml",
id="31",
), # empty yaml is valid yaml, not assuming anything else
pytest.param(
"examples/other/guess-1.yml",
"playbook",
id="32",
), # content should determine is as a play
pytest.param(
"examples/playbooks/tasks/passing_task.yml",
"tasks",
id="33",
), # content should determine is tasks
pytest.param("examples/.collection/galaxy.yml", "galaxy", id="34"),
pytest.param("examples/meta/runtime.yml", "meta-runtime", id="35"),
pytest.param("examples/meta/changelogs/changelog.yaml", "changelog", id="36"),
pytest.param("examples/inventory/inventory.yml", "inventory", id="37"),
pytest.param("examples/inventory/production.yml", "inventory", id="38"),
pytest.param("examples/playbooks/vars/empty_vars.yml", "vars", id="39"),
pytest.param(
"examples/playbooks/vars/subfolder/settings.yaml",
"vars",
id="40",
),
pytest.param(
"examples/sanity_ignores/tests/sanity/ignore-2.14.txt",
"sanity-ignore-file",
id="41",
),
pytest.param("examples/playbooks/tasks/vars/bug-3289.yml", "vars", id="42"),
pytest.param(
"examples/site.yml",
"playbook",
id="43",
), # content should determine it as a play
pytest.param(
"plugins/modules/fake_module.py",
"plugin",
id="44",
),
pytest.param("examples/meta/changelogs/changelog.yml", "changelog", id="45"),
),
)
def test_kinds(path: str, kind: FileType) -> None:
"""Verify auto-detection logic based on DEFAULT_KINDS."""
# assert Lintable is able to determine file type
lintable_detected = Lintable(path)
lintable_expected = Lintable(path, kind=kind)
assert lintable_detected == lintable_expected
def test_find_project_root_1(tmp_path: Path) -> None:
"""Verify find_project_root()."""
# this matches black behavior in absence of any config files or .git/.hg folders.
with cwd(tmp_path):
path, method = find_project_root([])
assert str(path) == "/"
assert method == "file system root"
def test_find_project_root_dotconfig() -> None:
"""Verify find_project_root()."""
# this expects to return examples folder as project root because this
# folder already has an .config/ansible-lint.yml file inside, which should
# be enough.
with cwd(Path("examples")):
assert Path(
".config/ansible-lint.yml",
).exists(), "Test requires config file inside .config folder."
path, method = find_project_root([])
assert str(path) == str(Path.cwd())
assert ".config/ansible-lint.yml" in method
BASIC_PLAYBOOK = """
- name: "playbook"
tasks:
- name: Hello
debug:
msg: 'world'
"""
@pytest.fixture(name="tmp_updated_lintable")
def fixture_tmp_updated_lintable(
tmp_path: Path,
path: str,
content: str,
updated_content: str,
) -> Lintable:
"""Create a temp file Lintable with a content update that is not on disk."""
lintable = Lintable(tmp_path / path, content)
with lintable.path.open("w", encoding="utf-8") as f:
f.write(content)
# move mtime to a time in the past to avoid race conditions in the test
mtime = time.time() - 60 * 60 # 1hr ago
os.utime(str(lintable.path), (mtime, mtime))
lintable.content = updated_content
return lintable
@pytest.mark.parametrize(
("path", "content", "updated_content", "updated"),
(
pytest.param(
"no_change.yaml",
BASIC_PLAYBOOK,
BASIC_PLAYBOOK,
False,
id="no_change",
),
pytest.param(
"quotes.yaml",
BASIC_PLAYBOOK,
BASIC_PLAYBOOK.replace('"', "'"),
True,
id="updated_quotes",
),
pytest.param(
"shorten.yaml",
BASIC_PLAYBOOK,
"# short file\n",
True,
id="shorten_file",
),
),
)
def test_lintable_updated(
path: str,
content: str,
updated_content: str,
updated: bool,
) -> None:
"""Validate ``Lintable.updated`` when setting ``Lintable.content``."""
lintable = Lintable(path, content)
assert lintable.content == content
lintable.content = updated_content
assert lintable.content == updated_content
assert lintable.updated is updated
@pytest.mark.parametrize(
"updated_content",
((None,), (b"bytes",)),
ids=("none", "bytes"),
)
def test_lintable_content_setter_with_bad_types(updated_content: Any) -> None:
"""Validate ``Lintable.updated`` when setting ``Lintable.content``."""
lintable = Lintable("bad_type.yaml", BASIC_PLAYBOOK)
assert lintable.content == BASIC_PLAYBOOK
with pytest.raises(TypeError):
lintable.content = updated_content
assert not lintable.updated
def test_lintable_with_new_file(tmp_path: Path) -> None:
"""Validate ``Lintable.updated`` for a new file."""
lintable = Lintable(tmp_path / "new.yaml")
lintable.content = BASIC_PLAYBOOK
lintable.content = BASIC_PLAYBOOK
assert lintable.content == BASIC_PLAYBOOK
assert lintable.updated
assert not lintable.path.exists()
lintable.write()
assert lintable.path.exists()
assert lintable.path.read_text(encoding="utf-8") == BASIC_PLAYBOOK
@pytest.mark.parametrize(
("path", "force", "content", "updated_content", "updated"),
(
pytest.param(
"no_change.yaml",
False,
BASIC_PLAYBOOK,
BASIC_PLAYBOOK,
False,
id="no_change",
),
pytest.param(
"forced.yaml",
True,
BASIC_PLAYBOOK,
BASIC_PLAYBOOK,
False,
id="forced_rewrite",
),
pytest.param(
"quotes.yaml",
False,
BASIC_PLAYBOOK,
BASIC_PLAYBOOK.replace('"', "'"),
True,
id="updated_quotes",
),
pytest.param(
"shorten.yaml",
False,
BASIC_PLAYBOOK,
"# short file\n",
True,
id="shorten_file",
),
pytest.param(
"forced.yaml",
True,
BASIC_PLAYBOOK,
BASIC_PLAYBOOK.replace('"', "'"),
True,
id="forced_and_updated",
),
),
)
def test_lintable_write(
tmp_updated_lintable: Lintable,
force: bool,
content: str,
updated_content: str,
updated: bool,
) -> None:
"""Validate ``Lintable.write`` writes when it should."""
pre_updated = tmp_updated_lintable.updated
pre_stat = tmp_updated_lintable.path.stat()
tmp_updated_lintable.write(force=force)
post_stat = tmp_updated_lintable.path.stat()
post_updated = tmp_updated_lintable.updated
# write() should not hide that an update happened
assert pre_updated == post_updated == updated
if force or updated:
assert pre_stat.st_mtime < post_stat.st_mtime
else:
assert pre_stat.st_mtime == post_stat.st_mtime
with tmp_updated_lintable.path.open("r", encoding="utf-8") as f:
post_content = f.read()
if updated:
assert content != post_content
else:
assert content == post_content
assert post_content == updated_content
@pytest.mark.parametrize(
("path", "content", "updated_content"),
(
pytest.param(
"quotes.yaml",
BASIC_PLAYBOOK,
BASIC_PLAYBOOK.replace('"', "'"),
id="updated_quotes",
),
),
)
def test_lintable_content_deleter(
tmp_updated_lintable: Lintable,
content: str,
updated_content: str,
) -> None:
"""Ensure that resetting content cache triggers re-reading file."""
assert content != updated_content
assert tmp_updated_lintable.content == updated_content
del tmp_updated_lintable.content
assert tmp_updated_lintable.content == content
@pytest.mark.parametrize(
("path", "result"),
(
pytest.param("foo", "foo", id="rel"),
pytest.param(
os.path.expanduser("~/xxx"), # noqa: PTH111
"~/xxx",
id="rel-to-home",
),
pytest.param("/a/b/c", "/a/b/c", id="absolute"),
pytest.param(
"examples/playbooks/roles",
"examples/roles",
id="resolve-symlink",
),
),
)
def test_normpath_path(path: str, result: str) -> None:
"""Tests behavior of normpath."""
assert normpath_path(path) == Path(result)
def test_bug_2513(
tmp_path: Path,
default_rules_collection: RulesCollection,
) -> None:
"""Regression test for bug 2513.
Test that when CWD is outside ~, and argument is like ~/playbook.yml
we will still be able to process the files.
See: https://github.com/ansible/ansible-lint/issues/2513
"""
filename = Path("~/.cache/ansible-lint/playbook.yml").expanduser()
filename.parent.mkdir(parents=True, exist_ok=True)
lintable = Lintable(filename, content="---\n- hosts: all\n")
lintable.write(force=True)
with cwd(tmp_path):
results = Runner(filename, rules=default_rules_collection).run()
assert len(results) == 1
assert results[0].rule.id == "name"
| 17,164 | Python | .py | 484 | 27.683884 | 100 | 0.599314 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,140 | test_schemas.py | ansible_ansible-lint/test/test_schemas.py | """Test schemas modules."""
import json
import logging
import os
import subprocess
import sys
import urllib
import warnings
from pathlib import Path
from typing import Any
from unittest.mock import DEFAULT, MagicMock, patch
import license_expression
import pytest
from ansiblelint.file_utils import Lintable
from ansiblelint.schemas import __file__ as schema_module
from ansiblelint.schemas.__main__ import refresh_schemas
from ansiblelint.schemas.main import validate_file_schema
schema_path = Path(schema_module).parent
spdx_config_path = (
Path(license_expression.__file__).parent / "data" / "scancode-licensedb-index.json"
)
def urlopen_side_effect(*_args: Any, **kwargs: Any) -> DEFAULT:
"""Actual test that timeout parameter is defined."""
assert "timeout" in kwargs
assert kwargs["timeout"] > 0
return DEFAULT
@patch("urllib.request")
def test_requests_uses_timeout(mock_request: MagicMock) -> None:
"""Test that schema refresh uses timeout."""
mock_request.urlopen.side_effect = urlopen_side_effect
refresh_schemas(min_age_seconds=0)
mock_request.urlopen.assert_called()
@patch("urllib.request")
def test_request_timeouterror_handling(
mock_request: MagicMock,
caplog: pytest.LogCaptureFixture,
) -> None:
"""Test that schema refresh can handle time out errors."""
error_msg = "Simulating handshake operation time out."
mock_request.urlopen.side_effect = urllib.error.URLError(TimeoutError(error_msg))
with caplog.at_level(logging.DEBUG):
assert refresh_schemas(min_age_seconds=0) == 0
mock_request.urlopen.assert_called()
assert "Skipped schema refresh due to unexpected exception: " in caplog.text
assert error_msg in caplog.text
def test_schema_refresh_cli() -> None:
"""Ensure that we test the cli schema refresh command."""
proc = subprocess.run(
[sys.executable, "-m", "ansiblelint.schemas"],
check=False,
capture_output=True,
text=True,
)
assert proc.returncode == 0, proc
def test_validate_file_schema() -> None:
"""Test file schema validation failure on unknown file kind."""
lintable = Lintable("foo.bar", kind="")
result = validate_file_schema(lintable)
assert len(result) == 1, result
assert "Unable to find JSON Schema" in result[0]
def test_spdx() -> None:
"""Test that SPDX license identifiers are in sync."""
license_ids = set()
with spdx_config_path.open(encoding="utf-8") as license_fh:
licenses = json.load(license_fh)
for lic in licenses:
if lic.get("is_deprecated"):
continue
lic_id = lic["spdx_license_key"]
if lic_id.startswith("LicenseRef"):
continue
license_ids.add(lic_id)
galaxy_json = schema_path / "galaxy.json"
with galaxy_json.open(encoding="utf-8") as f:
schema = json.load(f)
spx_enum = schema["$defs"]["SPDXLicenseEnum"]["enum"]
if set(spx_enum) != license_ids:
# In absence of a
if os.environ.get("PIP_CONSTRAINT", "/dev/null") == "/dev/null":
with galaxy_json.open("w", encoding="utf-8") as f:
schema["$defs"]["SPDXLicenseEnum"]["enum"] = sorted(license_ids)
json.dump(schema, f, indent=2)
pytest.fail(
"SPDX license list inside galaxy.json JSON Schema file was updated.",
)
else:
warnings.warn(
"test_spdx failure was ignored because constraints were not pinned (PIP_CONSTRAINTS). This is expected for py310 and py-devel jobs.",
category=pytest.PytestWarning,
stacklevel=1,
)
| 3,674 | Python | .py | 91 | 34.395604 | 149 | 0.680595 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,141 | test_dependencies_in_meta.py | ansible_ansible-lint/test/test_dependencies_in_meta.py | """Tests about dependencies in meta."""
from ansiblelint.rules import RulesCollection
from ansiblelint.runner import Runner
def test_external_dependency_is_ok(default_rules_collection: RulesCollection) -> None:
"""Check that external dep in role meta is not a violation."""
playbook_path = "examples/roles/dependency_in_meta/meta/main.yml"
good_runner = Runner(playbook_path, rules=default_rules_collection)
assert good_runner.run() == []
| 458 | Python | .py | 8 | 53.875 | 86 | 0.767338 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,142 | test_with_skip_tagid.py | ansible_ansible-lint/test/test_with_skip_tagid.py | """Tests related to skip tag id."""
from ansiblelint.rules import RulesCollection
from ansiblelint.rules.yaml_rule import YamllintRule
from ansiblelint.runner import Runner
from ansiblelint.testing import run_ansible_lint
FILE = "examples/playbooks/with-skip-tag-id.yml"
collection = RulesCollection()
collection.register(YamllintRule())
def test_negative_no_param() -> None:
"""Negative test no param."""
bad_runner = Runner(FILE, rules=collection)
errs = bad_runner.run()
assert len(errs) > 0
def test_negative_with_id() -> None:
"""Negative test with_id."""
with_id = "yaml"
bad_runner = Runner(FILE, rules=collection, tags=frozenset([with_id]))
errs = bad_runner.run()
assert len(errs) == 1
def test_negative_with_tag() -> None:
"""Negative test with_tag."""
with_tag = "yaml[trailing-spaces]"
bad_runner = Runner(FILE, rules=collection, tags=frozenset([with_tag]))
errs = bad_runner.run()
assert len(errs) == 1
def test_positive_skip_id() -> None:
"""Positive test skip_id."""
skip_id = "yaml"
good_runner = Runner(FILE, rules=collection, skip_list=[skip_id])
assert good_runner.run() == []
def test_positive_skip_id_2() -> None:
"""Positive test skip_id."""
skip_id = "key-order"
good_runner = Runner(FILE, rules=collection, tags=frozenset([skip_id]))
assert good_runner.run() == []
def test_positive_skip_tag() -> None:
"""Positive test skip_tag."""
skip_tag = "yaml[trailing-spaces]"
good_runner = Runner(FILE, rules=collection, skip_list=[skip_tag])
assert good_runner.run() == []
def test_run_skip_rule() -> None:
"""Test that we can skip a rule with -x."""
result = run_ansible_lint(
"-x",
"name[casing]",
"examples/playbooks/rule-name-casing.yml",
executable="ansible-lint",
)
assert result.returncode == 0
assert not result.stdout
| 1,916 | Python | .py | 50 | 33.88 | 75 | 0.673514 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,143 | test_skip_inside_yaml.py | ansible_ansible-lint/test/test_skip_inside_yaml.py | """Tests related to use of inline noqa."""
import pytest
from ansiblelint.rules import RulesCollection
from ansiblelint.runner import Runner
from ansiblelint.testing import run_ansible_lint
def test_role_tasks_with_block(default_rules_collection: RulesCollection) -> None:
"""Check that blocks in role tasks can contain skips."""
results = Runner(
"examples/playbooks/roles/fixture_1",
rules=default_rules_collection,
).run()
assert len(results) == 4
for result in results:
assert result.tag == "latest[git]"
@pytest.mark.parametrize(
("lintable", "expected"),
(pytest.param("examples/playbooks/test_skip_inside_yaml.yml", 4, id="yaml"),),
)
def test_inline_skips(
default_rules_collection: RulesCollection,
lintable: str,
expected: int,
) -> None:
"""Check that playbooks can contain skips."""
results = Runner(lintable, rules=default_rules_collection).run()
assert len(results) == expected
def test_role_meta() -> None:
"""Test running from inside meta folder."""
role_path = "examples/roles/meta_noqa"
result = run_ansible_lint("-v", role_path)
assert len(result.stdout) == 0
assert result.returncode == 0
| 1,215 | Python | .py | 32 | 33.65625 | 82 | 0.70844 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,144 | test_cli.py | ansible_ansible-lint/test/test_cli.py | """Test cli arguments and config."""
from __future__ import annotations
import os
from pathlib import Path
from typing import TYPE_CHECKING
import pytest
from ansiblelint import cli
if TYPE_CHECKING:
from _pytest.monkeypatch import MonkeyPatch
@pytest.fixture(name="base_arguments")
def fixture_base_arguments() -> list[str]:
"""Define reusable base arguments for tests in current module."""
return ["../test/skiptasks.yml"]
@pytest.mark.parametrize(
("args", "config_path"),
(
pytest.param(["-p"], "test/fixtures/parseable.yml", id="1"),
pytest.param(["-q"], "test/fixtures/quiet.yml", id="2"),
pytest.param(
["-r", "test/fixtures/rules/"],
"test/fixtures/rulesdir.yml",
id="3",
),
pytest.param(
["-R", "-r", "test/fixtures/rules/"],
"test/fixtures/rulesdir-defaults.yml",
id="4",
),
pytest.param(["-s"], "test/fixtures/strict.yml", id="5"),
pytest.param(["-t", "skip_ansible_lint"], "test/fixtures/tags.yml", id="6"),
pytest.param(["-v"], "test/fixtures/verbosity.yml", id="7"),
pytest.param(["-x", "bad_tag"], "test/fixtures/skip-tags.yml", id="8"),
pytest.param(["--exclude", "../"], "test/fixtures/exclude-paths.yml", id="9"),
pytest.param(["--show-relpath"], "test/fixtures/show-abspath.yml", id="10"),
pytest.param([], "test/fixtures/show-relpath.yml", id="11"),
),
)
def test_ensure_config_are_equal(
base_arguments: list[str],
args: list[str],
config_path: str,
) -> None:
"""Check equality of the CLI options to config files."""
command = base_arguments + args
cli_parser = cli.get_cli_parser()
options = cli_parser.parse_args(command)
file_config = cli.load_config(config_path)[0]
for key, val in file_config.items():
# config_file does not make sense in file_config
if key == "config_file":
continue
if key == "rulesdir":
# this is list of Paths
val = [Path(p) for p in val]
assert val == getattr(options, key), f"Mismatch for {key}"
@pytest.mark.parametrize(
("with_base", "args", "config", "expected"),
(
pytest.param(
True,
["--fix"],
"test/fixtures/config-with-write-all.yml",
["all"],
id="1",
),
pytest.param(
True,
["--fix=all"],
"test/fixtures/config-with-write-all.yml",
["all"],
id="2",
),
pytest.param(
True,
["--fix", "all"],
"test/fixtures/config-with-write-all.yml",
["all"],
id="3",
),
pytest.param(
True,
["--fix=none"],
"test/fixtures/config-with-write-none.yml",
[],
id="4",
),
pytest.param(
True,
["--fix", "none"],
"test/fixtures/config-with-write-none.yml",
[],
id="5",
),
pytest.param(
True,
["--fix=rule-tag,rule-id"],
"test/fixtures/config-with-write-subset.yml",
["rule-tag", "rule-id"],
id="6",
),
pytest.param(
True,
["--fix", "rule-tag,rule-id"],
"test/fixtures/config-with-write-subset.yml",
["rule-tag", "rule-id"],
id="7",
),
pytest.param(
True,
["--fix", "rule-tag", "--fix", "rule-id"],
"test/fixtures/config-with-write-subset.yml",
["rule-tag", "rule-id"],
id="8",
),
pytest.param(
False,
["--fix", "examples/playbooks/example.yml"],
"test/fixtures/config-with-write-all.yml",
["all"],
id="9",
),
pytest.param(
False,
["--fix", "examples/playbooks/example.yml", "non-existent.yml"],
"test/fixtures/config-with-write-all.yml",
["all"],
id="10",
),
),
)
def test_ensure_write_cli_does_not_consume_lintables(
base_arguments: list[str],
with_base: bool,
args: list[str],
config: str,
expected: list[str],
) -> None:
"""Check equality of the CLI --fix options to config files."""
cli_parser = cli.get_cli_parser()
command = base_arguments + args if with_base else args
options = cli_parser.parse_args(command)
file_config = cli.load_config(config)[0]
file_config.get("write_list")
orig_cli_value = options.write_list
cli_value = cli.WriteArgAction.merge_fix_list_config(
from_file=[],
from_cli=orig_cli_value,
)
assert cli_value == expected
def test_config_can_be_overridden(base_arguments: list[str]) -> None:
"""Check that config can be overridden from CLI."""
no_override = cli.get_config([*base_arguments, "-t", "bad_tag"])
overridden = cli.get_config(
[*base_arguments, "-t", "bad_tag", "-c", "test/fixtures/tags.yml"],
)
assert [*no_override.tags, "skip_ansible_lint"] == overridden.tags
def test_different_config_file(base_arguments: list[str]) -> None:
"""Ensures an alternate config_file can be used."""
diff_config = cli.get_config(
[*base_arguments, "-c", "test/fixtures/ansible-config.yml"],
)
no_config = cli.get_config([*base_arguments, "-v"])
assert diff_config.verbosity == no_config.verbosity
def test_expand_path_user_and_vars_config_file(base_arguments: list[str]) -> None:
"""Ensure user and vars are expanded when specified as exclude_paths."""
config1 = cli.get_config(
[*base_arguments, "-c", "test/fixtures/exclude-paths-with-expands.yml"],
)
config2 = cli.get_config(
[
*base_arguments,
"--exclude",
"~/.ansible/roles",
"--exclude",
"$HOME/.ansible/roles",
],
)
assert str(config1.exclude_paths[0]) == os.path.expanduser( # noqa: PTH111
"~/.ansible/roles",
)
assert str(config1.exclude_paths[1]) == os.path.expandvars("$HOME/.ansible/roles")
# exclude-paths coming in via cli are PosixPath objects; which hold the (canonical) real path (without symlinks)
assert str(config2.exclude_paths[0]) == os.path.realpath(
os.path.expanduser("~/.ansible/roles"), # noqa: PTH111
)
assert str(config2.exclude_paths[1]) == os.path.realpath(
os.path.expandvars("$HOME/.ansible/roles"),
)
def test_path_from_config_do_not_depend_on_cwd(
monkeypatch: MonkeyPatch,
) -> None: # Issue 572
"""Check that config-provided paths are decoupled from CWD."""
config1 = cli.load_config("test/fixtures/config-with-relative-path.yml")[0]
monkeypatch.chdir("test")
config2 = cli.load_config("fixtures/config-with-relative-path.yml")[0]
assert config1["exclude_paths"].sort() == config2["exclude_paths"].sort()
@pytest.mark.parametrize(
"config_file",
(
pytest.param("test/fixtures/ansible-config-invalid.yml", id="invalid"),
pytest.param("/dev/null/ansible-config-missing.yml", id="missing"),
),
)
def test_config_failure(base_arguments: list[str], config_file: str) -> None:
"""Ensures specific config files produce error code 3."""
with pytest.raises(SystemExit, match="^3$"):
cli.get_config([*base_arguments, "-c", config_file])
def test_extra_vars_loaded(base_arguments: list[str]) -> None:
"""Ensure ``extra_vars`` option is loaded from file config."""
config = cli.get_config(
[*base_arguments, "-c", "test/fixtures/config-with-extra-vars.yml"],
)
assert config.extra_vars == {"foo": "bar", "knights_favorite_word": "NI"}
@pytest.mark.parametrize(
"config_file",
(pytest.param("/dev/null", id="dev-null"),),
)
def test_config_dev_null(base_arguments: list[str], config_file: str) -> None:
"""Ensures specific config files produce error code 3."""
cfg = cli.get_config([*base_arguments, "-c", config_file])
assert cfg.config_file == "/dev/null"
| 8,205 | Python | .py | 221 | 29.266968 | 116 | 0.580649 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,145 | test_rule_properties.py | ansible_ansible-lint/test/test_rule_properties.py | """Tests related to rule properties."""
from ansiblelint.rules import RulesCollection
def test_severity_valid(default_rules_collection: RulesCollection) -> None:
"""Test that rules collection only has allow-listed severities."""
valid_severity_values = [
"VERY_HIGH",
"HIGH",
"MEDIUM",
"LOW",
"VERY_LOW",
"INFO",
]
for rule in default_rules_collection:
assert rule.severity in valid_severity_values
| 474 | Python | .py | 14 | 27.5 | 75 | 0.660832 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,146 | test_matcherrror.py | ansible_ansible-lint/test/test_matcherrror.py | """Tests for MatchError."""
import operator
from collections.abc import Callable
from typing import Any
import pytest
from ansiblelint.errors import MatchError
from ansiblelint.file_utils import Lintable
from ansiblelint.rules.no_changed_when import CommandHasChangesCheckRule
from ansiblelint.rules.partial_become import BecomeUserWithoutBecomeRule
class DummyTestObject:
"""A dummy object for equality tests."""
def __repr__(self) -> str:
"""Return a dummy object representation for parametrize."""
return f"{self.__class__.__name__}()"
def __eq__(self, other: object) -> bool:
"""Report the equality check failure with any object."""
return False
def __ne__(self, other: object) -> bool:
"""Report the confirmation of inequality with any object."""
return True
class DummySentinelTestObject:
"""A dummy object for equality protocol tests with sentinel."""
def __eq__(self, other: object) -> bool:
"""Return sentinel as result of equality check w/ anything."""
return "EQ_SENTINEL" # type: ignore[return-value]
def __ne__(self, other: object) -> bool:
"""Return sentinel as result of inequality check w/ anything."""
return "NE_SENTINEL" # type: ignore[return-value]
def __lt__(self, other: object) -> bool:
"""Return sentinel as result of less than check w/ anything."""
return "LT_SENTINEL" # type: ignore[return-value]
def __gt__(self, other: object) -> bool:
"""Return sentinel as result of greater than chk w/ anything."""
return "GT_SENTINEL" # type: ignore[return-value]
@pytest.mark.parametrize(
("left_match_error", "right_match_error"),
(
(MatchError("foo"), MatchError("foo")),
(MatchError("a", details="foo"), MatchError("a", details="foo")),
),
)
def test_matcherror_compare(
left_match_error: MatchError,
right_match_error: MatchError,
) -> None:
"""Check that MatchError instances with similar attrs are equivalent."""
assert left_match_error == right_match_error
def test_matcherror_invalid() -> None:
"""Ensure that MatchError requires message or rule."""
with pytest.raises(TypeError):
MatchError() # pylint: disable=pointless-exception-statement
@pytest.mark.parametrize(
("left_match_error", "right_match_error"),
(
# sorting by message
(MatchError("z"), MatchError("a")),
# filenames takes priority in sorting
(
MatchError("a", lintable=Lintable("b", content="")),
MatchError("a", lintable=Lintable("a", content="")),
),
# rule id partial-become > rule id no-changed-when
(
MatchError(rule=BecomeUserWithoutBecomeRule()),
MatchError(rule=CommandHasChangesCheckRule()),
),
# details are taken into account
(MatchError("a", details="foo"), MatchError("a", details="bar")),
# columns are taken into account
(MatchError("a", column=3), MatchError("a", column=1)),
(MatchError("a", column=3), MatchError("a")),
),
)
class TestMatchErrorCompare:
"""Test the comparison of MatchError instances."""
@staticmethod
def test_match_error_less_than(
left_match_error: MatchError,
right_match_error: MatchError,
) -> None:
"""Check 'less than' protocol implementation in MatchError."""
assert right_match_error < left_match_error
@staticmethod
def test_match_error_greater_than(
left_match_error: MatchError,
right_match_error: MatchError,
) -> None:
"""Check 'greater than' protocol implementation in MatchError."""
assert left_match_error > right_match_error
@staticmethod
def test_match_error_not_equal(
left_match_error: MatchError,
right_match_error: MatchError,
) -> None:
"""Check 'not equals' protocol implementation in MatchError."""
assert left_match_error != right_match_error
@pytest.mark.parametrize(
"other",
(
pytest.param(None, id="none"),
pytest.param("foo", id="str"),
pytest.param(42, id="int"),
pytest.param(Exception("foo"), id="exc"),
),
ids=repr,
)
@pytest.mark.parametrize(
("operation", "operator_char"),
(
pytest.param(operator.le, "<=", id="le"),
pytest.param(operator.gt, ">", id="gt"),
),
)
def test_matcherror_compare_no_other_fallback(
other: Any,
operation: Callable[..., bool],
operator_char: str,
) -> None:
"""Check that MatchError comparison with other types causes TypeError."""
expected_error = (
r"^("
rf"unsupported operand type\(s\) for {operator_char!s}:|"
rf"'{operator_char!s}' not supported between instances of"
rf") 'MatchError' and '{type(other).__name__!s}'$"
)
with pytest.raises(TypeError, match=expected_error):
operation(MatchError("foo"), other)
@pytest.mark.parametrize(
"other",
(
pytest.param(None, id="none"),
pytest.param("foo", id="str"),
pytest.param(42, id="int"),
pytest.param(Exception("foo"), id="exception"),
pytest.param(DummyTestObject(), id="obj"),
),
ids=repr,
)
@pytest.mark.parametrize(
("operation", "expected_value"),
(
pytest.param(operator.eq, False, id="eq"),
pytest.param(operator.ne, True, id="ne"),
),
)
def test_matcherror_compare_with_other_fallback(
other: object,
operation: Callable[..., bool],
expected_value: bool,
) -> None:
"""Check that MatchError comparison runs other types fallbacks."""
assert operation(MatchError(message="foo"), other) is expected_value
@pytest.mark.parametrize(
("operation", "expected_value"),
(
pytest.param(operator.eq, "EQ_SENTINEL", id="eq"),
pytest.param(operator.ne, "NE_SENTINEL", id="ne"),
# NOTE: these are swapped because when we do `x < y`, and `x.__lt__(y)`
# NOTE: returns `NotImplemented`, Python will reverse the check into
# NOTE: `y > x`, and so `y.__gt__(x) is called.
# Ref: https://docs.python.org/3/reference/datamodel.html#object.__lt__
pytest.param(operator.lt, "GT_SENTINEL", id="gt"),
pytest.param(operator.gt, "LT_SENTINEL", id="lt"),
),
)
def test_matcherror_compare_with_dummy_sentinel(
operation: Callable[..., bool],
expected_value: str,
) -> None:
"""Check that MatchError comparison runs other types fallbacks."""
dummy_obj = DummySentinelTestObject()
# NOTE: This assertion abuses the CPython property to cache short string
# NOTE: objects because the identity check is more precise and we don't
# NOTE: want extra operator protocol methods to influence the test.
assert operation(MatchError("foo"), dummy_obj) is expected_value # type: ignore[comparison-overlap]
| 6,919 | Python | .py | 175 | 33.434286 | 104 | 0.649144 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,147 | test_mockings.py | ansible_ansible-lint/test/test_mockings.py | """Test mockings module."""
from pathlib import Path
import pytest
from ansiblelint._mockings import _make_module_stub
from ansiblelint.config import Options
from ansiblelint.constants import RC
def test_make_module_stub(config_options: Options) -> None:
"""Test make module stub."""
config_options.cache_dir = Path() # current directory
with pytest.raises(SystemExit) as exc:
_make_module_stub(module_name="", options=config_options)
assert exc.type is SystemExit
assert exc.value.code == RC.INVALID_CONFIG
| 542 | Python | .py | 13 | 38.153846 | 65 | 0.757634 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,148 | test_load_failure.py | ansible_ansible-lint/test/test_load_failure.py | """Tests for LoadFailureRule."""
import pytest
from ansiblelint.rules import RulesCollection
from ansiblelint.runner import Runner
@pytest.mark.parametrize(
"path",
(
pytest.param("examples/broken/encoding.j2", id="jinja2"),
pytest.param("examples/broken/encoding.yml", id="yaml"),
),
)
def test_load_failure_encoding(
path: str,
default_rules_collection: RulesCollection,
) -> None:
"""Check that we fail when file encoding is wrong."""
runner = Runner(path, rules=default_rules_collection)
matches = runner.run()
assert len(matches) == 1, matches
assert matches[0].rule.id == "load-failure"
assert "'utf-8' codec can't decode byte" in matches[0].message
assert matches[0].tag == "load-failure[unicodedecodeerror]"
| 784 | Python | .py | 22 | 31.545455 | 66 | 0.708443 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,149 | test_formatter_base.py | ansible_ansible-lint/test/test_formatter_base.py | """Tests related to base formatter."""
from __future__ import annotations
from pathlib import Path
from typing import Any
import pytest
from ansiblelint.formatters import BaseFormatter
@pytest.mark.parametrize(
("base_dir", "relative_path"),
(
pytest.param(None, True, id="0"),
pytest.param("/whatever", False, id="1"),
pytest.param(Path("/whatever"), False, id="2"),
),
)
@pytest.mark.parametrize(
"path",
(
pytest.param("/whatever/string", id="a"),
pytest.param(Path("/whatever/string"), id="b"),
),
)
def test_base_formatter_when_base_dir(
base_dir: Any,
relative_path: bool,
path: str,
) -> None:
"""Check that base formatter accepts relative pathlib and str."""
# Given
base_formatter = BaseFormatter(base_dir, relative_path) # type: ignore[var-annotated]
# When
output_path = base_formatter._format_path( # noqa: SLF001
path,
)
# Then
assert isinstance(output_path, str | Path)
assert base_formatter.base_dir is None or isinstance(
base_formatter.base_dir,
str | Path,
)
assert output_path == path
@pytest.mark.parametrize(
"base_dir",
(
pytest.param(Path("/whatever"), id="0"),
pytest.param("/whatever", id="1"),
),
)
@pytest.mark.parametrize(
"path",
(
pytest.param("/whatever/string", id="a"),
pytest.param(Path("/whatever/string"), id="b"),
),
)
def test_base_formatter_when_base_dir_is_given_and_relative_is_true(
path: str | Path,
base_dir: str | Path,
) -> None:
"""Check that the base formatter equally accepts pathlib and str."""
# Given
base_formatter = BaseFormatter(base_dir, True) # type: ignore[var-annotated]
# When
output_path = base_formatter._format_path(path) # noqa: SLF001
# Then
assert isinstance(output_path, str | Path)
assert isinstance(base_formatter.base_dir, str | Path)
assert output_path == Path(path).name
# vim: et:sw=4:syntax=python:ts=4:
| 2,040 | Python | .py | 68 | 25.205882 | 90 | 0.647089 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,150 | test_examples.py | ansible_ansible-lint/test/test_examples.py | """Assure samples produced desire outcomes."""
import pytest
from ansiblelint.app import get_app
from ansiblelint.rules import RulesCollection
from ansiblelint.runner import Runner
from ansiblelint.testing import run_ansible_lint
def test_example(default_rules_collection: RulesCollection) -> None:
"""example.yml is expected to have exact number of errors inside."""
result = Runner(
"examples/playbooks/example.yml",
rules=default_rules_collection,
).run()
assert len(result) == 22
@pytest.mark.parametrize(
("filename", "expected_results"),
(
pytest.param(
"examples/playbooks/syntax-error-string.yml",
[("syntax-check[unknown-module]", 6, 7)],
id="0",
),
pytest.param(
"examples/playbooks/syntax-error.yml",
[("syntax-check[specific]", 2, 3)],
id="1",
),
),
)
def test_example_syntax_error(
default_rules_collection: RulesCollection,
filename: str,
expected_results: list[tuple[str, int | None, int | None]],
) -> None:
"""Validates that loading valid YAML string produce error."""
result = Runner(filename, rules=default_rules_collection).run()
assert len(result) == len(expected_results)
for i, expected in enumerate(expected_results):
if expected[0] is not None:
assert result[i].tag == expected[0]
# This also ensures that line and column numbers start at 1, so they
# match what editors will show (or output from other linters)
if expected[1] is not None:
assert result[i].lineno == expected[1]
if expected[2] is not None:
assert result[i].column == expected[2]
def test_example_custom_module(default_rules_collection: RulesCollection) -> None:
"""custom_module.yml is expected to pass."""
app = get_app(offline=True)
result = Runner(
"examples/playbooks/custom_module.yml",
rules=default_rules_collection,
).run()
assert len(result) == 0, f"{app.runtime.cache_dir}"
def test_vault_full(default_rules_collection: RulesCollection) -> None:
"""Check ability to process fully vaulted files."""
result = Runner(
"examples/playbooks/vars/vault_full.yml",
rules=default_rules_collection,
).run()
assert len(result) == 0
def test_vault_partial(
default_rules_collection: RulesCollection,
caplog: pytest.LogCaptureFixture,
) -> None:
"""Check ability to process files that container !vault inside."""
result = Runner(
"examples/playbooks/vars/vault_partial.yml",
rules=default_rules_collection,
).run()
assert len(result) == 0
# Ensure that we do not have side-effect extra logging even if the vault
# content cannot be decrypted.
assert caplog.record_tuples == []
def test_custom_kinds() -> None:
"""Check if user defined kinds are used."""
result = run_ansible_lint("-vv", "--offline", "examples/other/")
assert result.returncode == 0
# .yaml-too is not a recognized extension and unless is manually defined
# in our ansible-lint config, the test would not identify it as yaml file.
assert "Examining examples/other/some.yaml-too of type yaml" in result.stderr
assert "Examining examples/other/some.j2.yaml of type jinja2" in result.stderr
def test_bug_3216(capsys: pytest.CaptureFixture[str]) -> None:
"""Check that we hide ansible-core originating warning about fallback on unique filter."""
result = run_ansible_lint(
"-vv",
"--offline",
"examples/playbooks/bug-core-warning-unique-filter-fallback.yml",
)
captured = capsys.readouterr()
assert result.returncode == 0
warn_msg = "Falling back to Ansible unique filter"
assert warn_msg not in captured.err
assert warn_msg not in captured.out
| 3,868 | Python | .py | 93 | 35.516129 | 94 | 0.679702 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,151 | test_cli_role_paths.py | ansible_ansible-lint/test/test_cli_role_paths.py | """Tests related to role paths."""
from __future__ import annotations
import os
from pathlib import Path
import pytest
from ansiblelint.constants import RC
from ansiblelint.testing import run_ansible_lint
from ansiblelint.text import strip_ansi_escape
@pytest.fixture(name="local_test_dir")
def fixture_local_test_dir() -> Path:
"""Fixture to return local test directory."""
return Path(__file__).resolve().parent.parent / "examples"
def test_run_single_role_path_no_trailing_slash_module(local_test_dir: Path) -> None:
"""Test that a role path without a trailing slash is accepted."""
cwd = local_test_dir
role_path = "roles/test-role"
result = run_ansible_lint(role_path, cwd=cwd)
assert "Use shell only when shell functionality is required" in result.stdout
def test_run_single_role_path_no_trailing_slash_script(local_test_dir: Path) -> None:
"""Test that a role path without a trailing slash is accepted."""
cwd = local_test_dir
role_path = "roles/test-role"
result = run_ansible_lint(role_path, cwd=cwd, executable="ansible-lint")
assert "Use shell only when shell functionality is required" in result.stdout
def test_run_single_role_path_with_trailing_slash(local_test_dir: Path) -> None:
"""Test that a role path with a trailing slash is accepted."""
cwd = local_test_dir
role_path = "roles/test-role/"
result = run_ansible_lint(role_path, cwd=cwd)
assert "Use shell only when shell functionality is required" in result.stdout
def test_run_multiple_role_path_no_trailing_slash(local_test_dir: Path) -> None:
"""Test that multiple roles paths without a trailing slash are accepted."""
cwd = local_test_dir
role_path = "roles/test-role"
result = run_ansible_lint(role_path, cwd=cwd)
assert "Use shell only when shell functionality is required" in result.stdout
def test_run_multiple_role_path_with_trailing_slash(local_test_dir: Path) -> None:
"""Test that multiple roles paths without a trailing slash are accepted."""
cwd = local_test_dir
role_path = "roles/test-role/"
result = run_ansible_lint(role_path, cwd=cwd)
assert "Use shell only when shell functionality is required" in result.stdout
def test_run_inside_role_dir(local_test_dir: Path) -> None:
"""Tests execution from inside a role."""
cwd = local_test_dir / "roles" / "test-role"
role_path = "."
result = run_ansible_lint(role_path, cwd=cwd)
assert "Use shell only when shell functionality is required" in result.stdout
def test_run_role_three_dir_deep(local_test_dir: Path) -> None:
"""Tests execution from deep inside a role."""
cwd = local_test_dir
role_path = "testproject/roles/test-role"
result = run_ansible_lint(role_path, cwd=cwd)
assert "Use shell only when shell functionality is required" in result.stdout
def test_run_playbook(local_test_dir: Path) -> None:
"""Call ansible-lint the way molecule does."""
cwd = local_test_dir / "roles" / "test-role"
lintable = "molecule/default/include-import-role.yml"
role_path = str(Path(cwd).parent.resolve())
env = os.environ.copy()
env["ANSIBLE_ROLES_PATH"] = role_path
env["NO_COLOR"] = "1"
result = run_ansible_lint("-f", "pep8", lintable, cwd=cwd, env=env)
# All 4 failures are expected to be found inside the included role and not
# from the playbook given as argument.
assert result.returncode == RC.VIOLATIONS_FOUND
assert "tasks/main.yml:2: command-instead-of-shell" in result.stdout
assert "tasks/world.yml:2: name[missing]" in result.stdout
@pytest.mark.parametrize(
("args", "expected_msg"),
(
pytest.param(
[],
"role-name: Role name invalid-name does not match",
id="normal",
),
pytest.param(["--skip-list", "role-name"], "", id="skipped"),
),
)
def test_run_role_name_invalid(
local_test_dir: Path,
args: list[str],
expected_msg: str,
) -> None:
"""Test run with a role with invalid name."""
cwd = local_test_dir
role_path = "roles/invalid-name"
result = run_ansible_lint(*args, role_path, cwd=cwd)
assert result.returncode == (2 if expected_msg else 0), result
if expected_msg:
assert expected_msg in strip_ansi_escape(result.stdout)
def test_run_role_name_with_prefix(local_test_dir: Path) -> None:
"""Test run where role path has a prefix."""
cwd = local_test_dir
role_path = "roles/ansible-role-foo"
result = run_ansible_lint("-v", role_path, cwd=cwd)
assert len(result.stdout) == 0
assert result.returncode == 0
def test_run_role_name_from_meta(local_test_dir: Path) -> None:
"""Test running from inside meta folder."""
cwd = local_test_dir
role_path = "roles/valid-due-to-meta"
result = run_ansible_lint("-v", role_path, cwd=cwd)
assert len(result.stdout) == 0
assert result.returncode == 0
def test_run_invalid_role_name_from_meta(local_test_dir: Path) -> None:
"""Test invalid role from inside meta folder."""
cwd = local_test_dir
role_path = "roles/invalid_due_to_meta"
result = run_ansible_lint(role_path, cwd=cwd)
assert (
"role-name: Role name invalid-due-to-meta does not match"
in strip_ansi_escape(result.stdout)
)
def test_run_single_role_path_with_roles_path_env(local_test_dir: Path) -> None:
"""Test for role name collision with ANSIBLE_ROLES_PATH.
Test if ansible-lint chooses the role in the current directory when the role
specified as parameter exists in the current directory and the ANSIBLE_ROLES_PATH.
"""
cwd = local_test_dir
role_path = "roles/test-role"
env = os.environ.copy()
env["ANSIBLE_ROLES_PATH"] = os.path.realpath((cwd / "../examples/roles").resolve())
result = run_ansible_lint(role_path, cwd=cwd, env=env)
assert "Use shell only when shell functionality is required" in result.stdout
@pytest.mark.parametrize(
("result", "env"),
(
(True, {"GITHUB_ACTIONS": "true", "GITHUB_WORKFLOW": "foo", "NO_COLOR": "1"}),
(False, None),
),
ids=("on", "off"),
)
def test_run_playbook_github(result: bool, env: dict[str, str]) -> None:
"""Call ansible-lint simulating GitHub Actions environment."""
cwd = Path(__file__).parent.parent.resolve()
role_path = "examples/playbooks/example.yml"
if env is None:
env = {}
env["PATH"] = os.environ["PATH"]
result_gh = run_ansible_lint(role_path, cwd=cwd, env=env)
expected = (
"::error file=examples/playbooks/example.yml,line=44,severity=VERY_LOW,title=package-latest::"
"Package installs should not use latest"
)
assert (expected in result_gh.stderr) is result
def test_run_role_identified(local_test_dir: Path) -> None:
"""Test that role name is identified correctly."""
cwd = local_test_dir
env = os.environ.copy()
env["ANSIBLE_ROLES_PATH"] = os.path.realpath(
(cwd / "../examples/roles/role_detection").resolve(),
)
result = run_ansible_lint(
Path("roles/role_detection/foo/defaults/main.yml"),
cwd=cwd,
env=env,
)
assert result.returncode == RC.SUCCESS
def test_run_role_identified_prefix_missing(local_test_dir: Path) -> None:
"""Test that role name is identified correctly, with prefix violations."""
cwd = local_test_dir
env = os.environ.copy()
env["ANSIBLE_ROLES_PATH"] = os.path.realpath(
(cwd / "../examples/roles/role_detection/base").resolve(),
)
result = run_ansible_lint(
Path("roles/role_detection/base/bar/defaults/main.yml"),
cwd=cwd,
env=env,
)
assert result.returncode == RC.VIOLATIONS_FOUND
assert (
"Variables names from within roles should use bar_ as a prefix" in result.stdout
)
assert (
"Variables names from within roles should use bar_ as a prefix" in result.stdout
)
| 7,952 | Python | .py | 178 | 39.438202 | 102 | 0.683904 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,152 | test_strict.py | ansible_ansible-lint/test/test_strict.py | """Test strict mode."""
import os
import pytest
from ansiblelint.testing import run_ansible_lint
@pytest.mark.parametrize(
("strict", "returncode", "message"),
(
pytest.param(True, 2, "Failed", id="on"),
pytest.param(False, 0, "Passed", id="off"),
),
)
def test_strict(strict: bool, returncode: int, message: str) -> None:
"""Test running from inside meta folder."""
args = ["examples/playbooks/strict-mode.yml"]
env = os.environ.copy()
env["NO_COLOR"] = "1"
if strict:
args.insert(0, "--strict")
result = run_ansible_lint(*args, env=env)
assert result.returncode == returncode
assert "args[module]" in result.stdout
for summary_line in result.stderr.splitlines():
if summary_line.startswith(message):
break
else:
pytest.fail(f"Failed to find {message} inside stderr output")
| 885 | Python | .py | 26 | 28.846154 | 69 | 0.653396 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,153 | test_rules_collection.py | ansible_ansible-lint/test/test_rules_collection.py | """Tests for rule collection class."""
# Copyright (c) 2013-2014 Will Thames <will@thames.id.au>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import annotations
import collections
import re
from pathlib import Path
from typing import TYPE_CHECKING
import pytest
from ansiblelint.file_utils import Lintable
from ansiblelint.rules import RulesCollection
from ansiblelint.testing import run_ansible_lint
if TYPE_CHECKING:
from ansiblelint.config import Options
@pytest.fixture(name="test_rules_collection")
def fixture_test_rules_collection() -> RulesCollection:
"""Create a shared rules collection test instance."""
return RulesCollection([Path("./test/rules/fixtures").resolve()])
@pytest.fixture(name="ematchtestfile")
def fixture_ematchtestfile() -> Lintable:
"""Produce a test lintable with an id violation."""
return Lintable("examples/playbooks/ematcher-rule.yml", kind="playbook")
@pytest.fixture(name="bracketsmatchtestfile")
def fixture_bracketsmatchtestfile() -> Lintable:
"""Produce a test lintable with matching brackets."""
return Lintable("examples/playbooks/bracketsmatchtest.yml", kind="playbook")
def test_load_collection_from_directory(test_rules_collection: RulesCollection) -> None:
"""Test that custom rules extend the default ones."""
# two detected rules plus the internal ones
assert len(test_rules_collection) == 7
def test_run_collection(
test_rules_collection: RulesCollection,
ematchtestfile: Lintable,
) -> None:
"""Test that default rules match pre-meditated violations."""
matches = test_rules_collection.run(ematchtestfile)
assert len(matches) == 4 # 3 occurrences of BANNED using TEST0001 + 1 for raw-task
assert matches[0].lineno == 3
def test_tags(
test_rules_collection: RulesCollection,
ematchtestfile: Lintable,
bracketsmatchtestfile: Lintable,
) -> None:
"""Test that tags are treated as skip markers."""
matches = test_rules_collection.run(ematchtestfile, tags={"test1"})
assert len(matches) == 3
matches = test_rules_collection.run(ematchtestfile, tags={"test2"})
assert len(matches) == 0
matches = test_rules_collection.run(bracketsmatchtestfile, tags={"test1"})
assert len(matches) == 0
matches = test_rules_collection.run(bracketsmatchtestfile, tags={"test2"})
assert len(matches) == 2
def test_skip_tags(
test_rules_collection: RulesCollection,
ematchtestfile: Lintable,
bracketsmatchtestfile: Lintable,
) -> None:
"""Test that tags can be skipped."""
matches = test_rules_collection.run(ematchtestfile, skip_list=["test1", "test3"])
assert len(matches) == 0
matches = test_rules_collection.run(ematchtestfile, skip_list=["test2", "test3"])
assert len(matches) == 3
matches = test_rules_collection.run(bracketsmatchtestfile, skip_list=["test1"])
assert len(matches) == 2
matches = test_rules_collection.run(bracketsmatchtestfile, skip_list=["test2"])
assert len(matches) == 0
def test_skip_id(
test_rules_collection: RulesCollection,
ematchtestfile: Lintable,
bracketsmatchtestfile: Lintable,
) -> None:
"""Check that skipping valid IDs excludes their violations."""
matches = test_rules_collection.run(
ematchtestfile,
skip_list=["TEST0001", "raw-task"],
)
assert len(matches) == 0
matches = test_rules_collection.run(
ematchtestfile,
skip_list=["TEST0002", "raw-task"],
)
assert len(matches) == 3
matches = test_rules_collection.run(bracketsmatchtestfile, skip_list=["TEST0001"])
assert len(matches) == 2
matches = test_rules_collection.run(bracketsmatchtestfile, skip_list=["TEST0002"])
assert len(matches) == 0
def test_skip_non_existent_id(
test_rules_collection: RulesCollection,
ematchtestfile: Lintable,
) -> None:
"""Check that skipping invalid IDs changes nothing."""
matches = test_rules_collection.run(ematchtestfile, skip_list=["DOESNOTEXIST"])
assert len(matches) == 4
def test_no_duplicate_rule_ids() -> None:
"""Check that rules of the collection don't have duplicate IDs."""
real_rules = RulesCollection([Path("./src/ansiblelint/rules").resolve()])
rule_ids = [rule.id for rule in real_rules]
assert not any(y > 1 for y in collections.Counter(rule_ids).values())
def test_rich_rule_listing() -> None:
"""Test that rich list format output is rendered as a table.
This check also offers the contract of having rule id, short and long
descriptions in the console output.
"""
rules_path = Path("./test/rules/fixtures").resolve()
result = run_ansible_lint("-r", str(rules_path), "-f", "full", "-L")
assert result.returncode == 0
for rule in RulesCollection([rules_path]):
assert rule.id in result.stdout
assert rule.shortdesc in result.stdout
# description could wrap inside table, so we do not check full length
assert rule.description[:30] in result.stdout
def test_rules_id_format(config_options: Options) -> None:
"""Assure all our rules have consistent format."""
rule_id_re = re.compile("^[a-z-]{4,30}$")
rules = RulesCollection(
[Path("./src/ansiblelint/rules").resolve()],
options=config_options,
conditional=False,
)
keys: set[str] = set()
for rule in rules:
assert rule_id_re.match(
rule.id,
), f"Rule id {rule.id} did not match our required format."
keys.add(rule.id)
assert (
rule.help or rule.description or rule.__doc__
), f"Rule {rule.id} must have at least one of: .help, .description, .__doc__"
assert "yaml" in keys, "yaml rule is missing"
assert len(rules) == 51 # update this number when adding new rules!
assert len(keys) == len(rules), "Duplicate rule ids?"
| 6,869 | Python | .py | 148 | 42.013514 | 88 | 0.721375 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,154 | test_utils.py | ansible_ansible-lint/test/test_utils.py | # Copyright (c) 2013-2014 Will Thames <will@thames.id.au>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""Tests for generic utility functions."""
from __future__ import annotations
import logging
import subprocess
import sys
from pathlib import Path
from typing import TYPE_CHECKING, Any
import pytest
from ansible.utils.sentinel import Sentinel
from ansible_compat.runtime import Runtime
from ansiblelint import cli, constants, utils
from ansiblelint.__main__ import initialize_logger
from ansiblelint.cli import get_rules_dirs
from ansiblelint.constants import RC
from ansiblelint.file_utils import Lintable, cwd
from ansiblelint.runner import Runner
from ansiblelint.testing import run_ansible_lint
if TYPE_CHECKING:
from collections.abc import Sequence
from _pytest.capture import CaptureFixture
from _pytest.logging import LogCaptureFixture
from _pytest.monkeypatch import MonkeyPatch
from ansiblelint.rules import RulesCollection
runtime = Runtime(require_module=True)
@pytest.mark.parametrize(
("string", "expected_args", "expected_kwargs"),
(
pytest.param("", [], {}, id="a"),
pytest.param("a=1", [], {"a": "1"}, id="b"),
pytest.param("hello a=1", ["hello"], {"a": "1"}, id="c"),
pytest.param(
"whatever bobbins x=y z=x c=3",
["whatever", "bobbins"],
{"x": "y", "z": "x", "c": "3"},
id="more_than_one_arg",
),
pytest.param(
"command chdir=wxy creates=zyx tar xzf zyx.tgz",
["command", "tar", "xzf", "zyx.tgz"],
{"chdir": "wxy", "creates": "zyx"},
id="command_with_args",
),
pytest.param(
"{{ varset }}.yml",
["{{ varset }}.yml"],
{},
id="x",
),
pytest.param(
"foo bar.yml",
["foo bar.yml"],
{},
id="path-with-spaces",
),
),
)
def test_tokenize(
string: str,
expected_args: Sequence[str],
expected_kwargs: dict[str, Any],
) -> None:
"""Test that tokenize works for different input types."""
(args, kwargs) = utils.tokenize(string)
assert args == expected_args
assert kwargs == expected_kwargs
@pytest.mark.parametrize(
("reference_form", "alternate_forms"),
(
pytest.param(
{"name": "hello", "action": "command chdir=abc echo hello world"},
({"name": "hello", "command": "chdir=abc echo hello world"},),
id="simple_command",
),
pytest.param(
{"git": {"version": "abc"}, "args": {"repo": "blah", "dest": "xyz"}},
(
{"git": {"version": "abc", "repo": "blah", "dest": "xyz"}},
{"git": "version=abc repo=blah dest=xyz"},
{
"git": None,
"args": {"repo": "blah", "dest": "xyz", "version": "abc"},
},
),
id="args",
),
),
)
def test_normalize(
reference_form: dict[str, Any],
alternate_forms: tuple[dict[str, Any]],
) -> None:
"""Test that tasks specified differently are normalized same way."""
task = utils.Task(reference_form, filename="tasks.yml")
normal_form = task._normalize_task() # noqa: SLF001
for form in alternate_forms:
task2 = utils.Task(form, filename="tasks.yml")
assert normal_form == task2._normalize_task() # noqa: SLF001
def test_normalize_complex_command() -> None:
"""Test that tasks specified differently are normalized same way."""
task1 = utils.Task(
{
"name": "hello",
"action": {"module": "pip", "name": "df", "editable": "false"},
},
filename="tasks.yml",
)
task2 = utils.Task(
{"name": "hello", "pip": {"name": "df", "editable": "false"}},
filename="tasks.yml",
)
task3 = utils.Task(
{"name": "hello", "pip": "name=df editable=false"},
filename="tasks.yml",
)
task4 = utils.Task(
{"name": "hello", "action": "pip name=df editable=false"},
filename="tasks.yml",
)
assert task1._normalize_task() == task2._normalize_task() # noqa: SLF001
assert task2._normalize_task() == task3._normalize_task() # noqa: SLF001
assert task3._normalize_task() == task4._normalize_task() # noqa: SLF001
@pytest.mark.parametrize(
("task_raw", "expected_form"),
(
pytest.param(
{
"name": "ensure apache is at the latest version",
"yum": {"name": "httpd", "state": "latest"},
},
{
"delegate_to": Sentinel,
"name": "ensure apache is at the latest version",
"action": {
"__ansible_module__": "yum",
"__ansible_module_original__": "yum",
"name": "httpd",
"state": "latest",
},
},
id="0",
),
pytest.param(
{
"name": "Attempt and graceful roll back",
"block": [
{
"name": "Install httpd and memcached",
"ansible.builtin.yum": ["httpd", "memcached"],
"state": "present",
},
],
},
{
"name": "Attempt and graceful roll back",
"block": [
{
"name": "Install httpd and memcached",
"ansible.builtin.yum": ["httpd", "memcached"],
"state": "present",
},
],
"action": {
"__ansible_module__": "block/always/rescue",
"__ansible_module_original__": "block/always/rescue",
},
},
id="1",
),
),
)
def test_normalize_task_v2(
task_raw: dict[str, Any],
expected_form: dict[str, Any],
) -> None:
"""Check that it normalizes task and returns the expected form."""
task = utils.Task(task_raw)
assert utils.normalize_task_v2(task) == expected_form
def test_extract_from_list() -> None:
"""Check that tasks get extracted from blocks if present."""
block = {
"block": [{"tasks": {"name": "hello", "command": "whoami"}}],
"test_none": None,
"test_string": "foo",
}
blocks = [block]
test_list = utils.extract_from_list(blocks, ["block"])
test_none = utils.extract_from_list(blocks, ["test_none"])
assert list(block["block"]) == test_list # type: ignore[arg-type]
assert not test_none
with pytest.raises(RuntimeError):
utils.extract_from_list(blocks, ["test_string"])
def test_extract_from_list_recursive() -> None:
"""Check that tasks get extracted from blocks if present."""
block = {
"block": [{"block": [{"name": "hello", "command": "whoami"}]}],
}
blocks = [block]
test_list = utils.extract_from_list(blocks, ["block"])
assert list(block["block"]) == test_list
test_list_recursive = utils.extract_from_list(blocks, ["block"], recursive=True)
assert block["block"] + block["block"][0]["block"] == test_list_recursive
@pytest.mark.parametrize(
("template", "output"),
(
pytest.param("{{ playbook_dir }}", "/a/b/c", id="simple"),
pytest.param(
"{{ 'hello' | doesnotexist }}",
"hello", # newer implementation ignores unknown filters
id="unknown_filter",
),
pytest.param(
"{{ hello | to_json }}",
"{{ hello | to_json }}",
id="to_json_filter_on_undefined_variable",
),
pytest.param(
"{{ hello | to_nice_yaml }}",
"{{ hello | to_nice_yaml }}",
id="to_nice_yaml_filter_on_undefined_variable",
),
),
)
def test_template(template: str, output: str) -> None:
"""Verify that resolvable template vars and filters get rendered."""
result = utils.template(
basedir=Path("/base/dir"),
value=template,
variables={"playbook_dir": "/a/b/c"},
fail_on_error=False,
)
assert result == output
def test_task_to_str_unicode() -> None:
"""Ensure that extracting messages from tasks preserves Unicode."""
task = utils.Task({"fail": {"msg": "unicode é ô à"}}, filename="filename.yml")
result = utils.task_to_str(task._normalize_task()) # noqa: SLF001
assert result == "fail msg=unicode é ô à"
def test_logger_debug(caplog: LogCaptureFixture) -> None:
"""Test that the double verbosity arg causes logger to be DEBUG."""
options = cli.get_config(["-vv"])
initialize_logger(options.verbosity)
expected_info = (
"ansiblelint.__main__",
logging.DEBUG,
"Logging initialized to level 10",
)
assert expected_info in caplog.record_tuples
def test_cli_auto_detect(capfd: CaptureFixture[str]) -> None:
"""Test that run without arguments it will detect and lint the entire repository."""
cmd = [
sys.executable,
"-m",
"ansiblelint",
"-x",
"schema", # exclude schema as our test file would fail it
"-v",
"-p",
"--nocolor",
]
result = subprocess.run(cmd, check=False).returncode
# We de expect to fail on our own repo due to test examples we have
assert result == RC.VIOLATIONS_FOUND
out, err = capfd.readouterr()
# An expected rule match from our examples
assert (
"examples/playbooks/empty_playbook.yml:1:1: "
"syntax-check[empty-playbook]: Empty playbook, nothing to do" in out
)
# assures that our ansible-lint config exclude was effective in excluding github files
assert "Identified: .github/" not in out
# assures that we can parse playbooks as playbooks
assert "Identified: test/test/always-run-success.yml" not in err
assert (
"Executing syntax check on playbook examples/playbooks/mocked_dependency.yml"
in err
)
def test_is_playbook() -> None:
"""Verify that we can detect a playbook as a playbook."""
assert utils.is_playbook("examples/playbooks/always-run-success.yml")
@pytest.mark.parametrize(
"exclude",
(pytest.param("foo", id="1"), pytest.param("foo/", id="2")),
)
def test_auto_detect_exclude(tmp_path: Path, exclude: str) -> None:
"""Verify that exclude option can be used to narrow down detection."""
with cwd(tmp_path):
subprocess.check_output(
"git init",
stderr=subprocess.STDOUT,
text=True,
shell=True,
cwd=tmp_path,
)
(tmp_path / "foo").mkdir()
(tmp_path / "bar").mkdir()
(tmp_path / "foo" / "playbook.yml").touch()
(tmp_path / "bar" / "playbook.yml").touch()
options = cli.get_config(["--exclude", exclude])
options.cwd = tmp_path
result = utils.get_lintables(options)
assert result == [Lintable("bar/playbook.yml", kind="playbook")]
# now we also test with .gitignore exclude approach
(tmp_path / ".gitignore").write_text(f".gitignore\n{exclude}\n")
options = cli.get_config([])
options.cwd = tmp_path
result = utils.get_lintables(options)
assert result == [Lintable("bar/playbook.yml", kind="playbook")]
_DEFAULT_RULEDIRS = [constants.DEFAULT_RULESDIR]
_CUSTOM_RULESDIR = Path(__file__).parent / "custom_rules"
_CUSTOM_RULEDIRS = [
_CUSTOM_RULESDIR / "example_inc",
_CUSTOM_RULESDIR / "example_com",
]
@pytest.mark.parametrize(
("user_ruledirs", "use_default", "expected"),
(
([], True, _DEFAULT_RULEDIRS),
([], False, _DEFAULT_RULEDIRS),
(_CUSTOM_RULEDIRS, True, _CUSTOM_RULEDIRS + _DEFAULT_RULEDIRS),
(_CUSTOM_RULEDIRS, False, _CUSTOM_RULEDIRS),
),
)
def test_get_rules_dirs(
user_ruledirs: list[Path],
use_default: bool,
expected: list[Path],
) -> None:
"""Test it returns expected dir lists."""
assert get_rules_dirs(user_ruledirs, use_default=use_default) == expected
@pytest.mark.parametrize(
("user_ruledirs", "use_default", "expected"),
(
([], True, sorted(_CUSTOM_RULEDIRS) + _DEFAULT_RULEDIRS),
([], False, sorted(_CUSTOM_RULEDIRS) + _DEFAULT_RULEDIRS),
(
_CUSTOM_RULEDIRS,
True,
_CUSTOM_RULEDIRS + sorted(_CUSTOM_RULEDIRS) + _DEFAULT_RULEDIRS,
),
(_CUSTOM_RULEDIRS, False, _CUSTOM_RULEDIRS),
),
)
def test_get_rules_dirs_with_custom_rules(
user_ruledirs: list[Path],
use_default: bool,
expected: list[Path],
monkeypatch: MonkeyPatch,
) -> None:
"""Test it returns expected dir lists when custom rules exist."""
monkeypatch.setenv(constants.CUSTOM_RULESDIR_ENVVAR, str(_CUSTOM_RULESDIR))
assert get_rules_dirs(user_ruledirs, use_default=use_default) == expected
def test_find_children(default_rules_collection: RulesCollection) -> None:
"""Verify correct function of find_children()."""
Runner(
rules=default_rules_collection,
).find_children(Lintable("examples/playbooks/find_children.yml"))
def test_find_children_in_task(default_rules_collection: RulesCollection) -> None:
"""Verify correct function of find_children() in tasks."""
Runner(
Lintable("examples/playbooks/tasks/bug-2875.yml"),
rules=default_rules_collection,
).run()
@pytest.mark.parametrize(
("file", "names", "positions"),
(
pytest.param(
"examples/playbooks/task_in_list-0.yml",
["A", "B", "C", "D", "E", "F", "G"],
[
".[0].tasks[0]",
".[0].tasks[1]",
".[0].pre_tasks[0]",
".[0].post_tasks[0]",
".[0].post_tasks[0].block[0]",
".[0].post_tasks[0].rescue[0]",
".[0].post_tasks[0].always[0]",
],
id="0",
),
),
)
def test_task_in_list(file: str, names: list[str], positions: list[str]) -> None:
"""Check that tasks get extracted from blocks if present."""
lintable = Lintable(file)
assert lintable.kind
tasks = list(
utils.task_in_list(data=lintable.data, file=lintable, kind=lintable.kind),
)
assert len(tasks) == len(names)
for index, task in enumerate(tasks):
assert task.name == names[index]
assert task.position == positions[index]
def test_find_children_in_module(default_rules_collection: RulesCollection) -> None:
"""Verify correct function of find_children() in tasks."""
lintable = Lintable("plugins/modules/fake_module.py")
children = Runner(
rules=default_rules_collection,
).find_children(lintable)
assert len(children) == 1
child = children[0]
# Parent is a python file
assert lintable.base_kind == "text/python"
# Child correctly looks like a YAML file
assert child.base_kind == "text/yaml"
assert child.content.startswith("---")
def test_find_children_in_playbook(default_rules_collection: RulesCollection) -> None:
"""Verify correct function of find_children() in playbooks."""
lintable = Lintable("examples/playbooks/bug-4095.yml")
children = Runner(
rules=default_rules_collection,
).find_children(lintable)
assert len(children) == 1
assert children[0].role == "bug4095"
def test_include_children_load_playbook_failed_syntax_check() -> None:
"""Verify include_children() logs playbook failed to load due to syntax-check."""
result = run_ansible_lint(
Path("playbooks/import-failed-syntax-check.yml"),
cwd=Path(__file__).resolve().parent.parent / "examples",
)
assert (
"Failed to load syntax-error.yml playbook due to failing syntax check."
in result.stderr
)
def test_import_playbook_children() -> None:
"""Verify import_playbook_children()."""
result = run_ansible_lint(
Path("playbooks/import_playbook_fqcn.yml"),
cwd=Path(__file__).resolve().parent.parent / "examples",
env={
"ANSIBLE_COLLECTIONS_PATH": "../collections",
},
)
assert "Failed to find local.testcollection.foo playbook." not in result.stderr
assert (
"Failed to load local.testcollection.foo playbook due to failing syntax check."
not in result.stderr
)
| 17,559 | Python | .py | 455 | 30.874725 | 90 | 0.599577 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,155 | test_adjacent_plugins.py | ansible_ansible-lint/test/test_adjacent_plugins.py | """Test ability to recognize adjacent modules/plugins."""
import logging
import pytest
from ansiblelint.rules import RulesCollection
from ansiblelint.runner import Runner
def test_adj_action(
default_rules_collection: RulesCollection,
caplog: pytest.LogCaptureFixture,
) -> None:
"""Assures local collections are found."""
playbook_path = "examples/playbooks/adj_action.yml"
with caplog.at_level(logging.DEBUG):
runner = Runner(playbook_path, rules=default_rules_collection, verbosity=1)
results = runner.run()
assert "Unable to load module" not in caplog.text
assert "Unable to resolve FQCN" not in caplog.text
assert len(runner.lintables) == 1
assert len(results) == 0
| 730 | Python | .py | 18 | 36.277778 | 83 | 0.748936 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,156 | test_formatter_sarif.py | ansible_ansible-lint/test/test_formatter_sarif.py | """Test the codeclimate JSON formatter."""
from __future__ import annotations
import json
import os
import pathlib
import subprocess
import sys
from tempfile import NamedTemporaryFile
import pytest
from ansiblelint.errors import MatchError
from ansiblelint.file_utils import Lintable
from ansiblelint.formatters import SarifFormatter
from ansiblelint.rules import AnsibleLintRule, RulesCollection
class TestSarifFormatter:
"""Unit test for SarifFormatter."""
rule1 = AnsibleLintRule()
rule2 = AnsibleLintRule()
matches: list[MatchError] = []
formatter: SarifFormatter | None = None
collection = RulesCollection()
collection.register(rule1)
collection.register(rule2)
def setup_class(self) -> None:
"""Set up few MatchError objects."""
self.rule1.id = "TCF0001"
self.rule1.severity = "VERY_HIGH"
self.rule1.description = "This is the rule description."
self.rule1.link = "https://rules/help#TCF0001"
self.rule1.tags = ["tag1", "tag2"]
self.rule2.id = "TCF0002"
self.rule2.severity = "MEDIUM"
self.rule2.link = "https://rules/help#TCF0002"
self.rule2.tags = ["tag3", "tag4"]
self.matches.extend(
[
MatchError(
message="message1",
lineno=1,
column=10,
details="details1",
lintable=Lintable("filename1.yml", content=""),
rule=self.rule1,
tag="yaml[test1]",
ignored=False,
),
MatchError(
message="message2",
lineno=2,
details="",
lintable=Lintable("filename2.yml", content=""),
rule=self.rule1,
tag="yaml[test2]",
ignored=True,
),
MatchError(
message="message3",
lineno=666,
column=667,
details="details3",
lintable=Lintable("filename3.yml", content=""),
rule=self.rule2,
tag="yaml[test3]",
ignored=False,
),
],
)
self.formatter = SarifFormatter(pathlib.Path.cwd(), display_relative_path=True)
def test_sarif_format_list(self) -> None:
"""Test if the return value is a string."""
assert isinstance(self.formatter, SarifFormatter)
assert isinstance(self.formatter.format_result(self.matches), str)
def test_sarif_result_is_json(self) -> None:
"""Test if returned string value is a JSON."""
assert isinstance(self.formatter, SarifFormatter)
output = self.formatter.format_result(self.matches)
json.loads(output)
# https://github.com/ansible/ansible-navigator/issues/1490
assert "\n" not in output
def test_sarif_single_match(self) -> None:
"""Test negative case. Only lists are allowed. Otherwise, a RuntimeError will be raised."""
assert isinstance(self.formatter, SarifFormatter)
with pytest.raises(TypeError):
self.formatter.format_result(self.matches[0]) # type: ignore[arg-type]
def test_sarif_format(self) -> None:
"""Test if the return SARIF object contains the expected results."""
assert isinstance(self.formatter, SarifFormatter)
sarif = json.loads(self.formatter.format_result(self.matches))
assert len(sarif["runs"][0]["results"]) == 3
for result in sarif["runs"][0]["results"]:
# Ensure all reported entries have a level
assert "level" in result
# Ensure reported levels are either error or warning
assert result["level"] in ("error", "warning")
def test_validate_sarif_schema(self) -> None:
"""Test if the returned JSON is a valid SARIF report."""
assert isinstance(self.formatter, SarifFormatter)
sarif = json.loads(self.formatter.format_result(self.matches))
assert sarif["$schema"] == SarifFormatter.SARIF_SCHEMA
assert sarif["version"] == SarifFormatter.SARIF_SCHEMA_VERSION
driver = sarif["runs"][0]["tool"]["driver"]
assert driver["name"] == SarifFormatter.TOOL_NAME
assert driver["informationUri"] == SarifFormatter.TOOL_URL
rules = driver["rules"]
assert len(rules) == 3
assert rules[0]["id"] == self.matches[0].tag
assert rules[0]["name"] == self.matches[0].tag
assert rules[0]["shortDescription"]["text"] == self.matches[0].message
assert rules[0]["defaultConfiguration"][
"level"
] == SarifFormatter.get_sarif_rule_severity_level(self.matches[0].rule)
assert rules[0]["help"]["text"] == self.matches[0].rule.description
assert rules[0]["properties"]["tags"] == self.matches[0].rule.tags
assert rules[0]["helpUri"] == self.matches[0].rule.url
results = sarif["runs"][0]["results"]
assert len(results) == 3
for i, result in enumerate(results):
assert result["ruleId"] == self.matches[i].tag
assert (
result["locations"][0]["physicalLocation"]["artifactLocation"]["uri"]
== self.matches[i].filename
)
assert (
result["locations"][0]["physicalLocation"]["artifactLocation"][
"uriBaseId"
]
== SarifFormatter.BASE_URI_ID
)
assert (
result["locations"][0]["physicalLocation"]["region"]["startLine"]
== self.matches[i].lineno
)
if self.matches[i].column:
assert (
result["locations"][0]["physicalLocation"]["region"]["startColumn"]
== self.matches[i].column
)
else:
assert (
"startColumn"
not in result["locations"][0]["physicalLocation"]["region"]
)
assert result["level"] == SarifFormatter.get_sarif_result_severity_level(
self.matches[i],
)
assert sarif["runs"][0]["originalUriBaseIds"][SarifFormatter.BASE_URI_ID]["uri"]
assert results[0]["message"]["text"] == self.matches[0].details
assert results[1]["message"]["text"] == self.matches[1].message
def test_sarif_parsable_ignored() -> None:
"""Test that -p option does not alter SARIF format."""
cmd = [
sys.executable,
"-m",
"ansiblelint",
"-v",
"-p",
]
file = "examples/playbooks/empty_playbook.yml"
result = subprocess.run([*cmd, file], check=False)
result2 = subprocess.run([*cmd, "-p", file], check=False)
assert result.returncode == result2.returncode
assert result.stdout == result2.stdout
@pytest.mark.parametrize(
("file", "return_code"),
(
pytest.param("examples/playbooks/valid.yml", 0, id="0"),
pytest.param("playbook.yml", 2, id="1"),
),
)
def test_sarif_file(file: str, return_code: int) -> None:
"""Test ability to dump sarif file (--sarif-file)."""
with NamedTemporaryFile(mode="w", suffix=".sarif", prefix="output") as output_file:
cmd = [
sys.executable,
"-m",
"ansiblelint",
"--sarif-file",
str(output_file.name),
]
result = subprocess.run([*cmd, file], check=False, capture_output=True)
assert result.returncode == return_code
assert os.path.exists(output_file.name) # noqa: PTH110
assert pathlib.Path(output_file.name).stat().st_size > 0
@pytest.mark.parametrize(
("file", "return_code"),
(pytest.param("examples/playbooks/valid.yml", 0, id="0"),),
)
def test_sarif_file_creates_it_if_none_exists(file: str, return_code: int) -> None:
"""Test ability to create sarif file if none exists and dump output to it (--sarif-file)."""
sarif_file_name = "test_output.sarif"
cmd = [
sys.executable,
"-m",
"ansiblelint",
"--sarif-file",
sarif_file_name,
]
result = subprocess.run([*cmd, file], check=False, capture_output=True)
assert result.returncode == return_code
assert os.path.exists(sarif_file_name) # noqa: PTH110
assert pathlib.Path(sarif_file_name).stat().st_size > 0
pathlib.Path.unlink(pathlib.Path(sarif_file_name))
| 8,595 | Python | .py | 201 | 32.373134 | 99 | 0.587385 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,157 | test_lint_rule.py | ansible_ansible-lint/test/test_lint_rule.py | """Tests for lintable."""
# Copyright (c) 2013-2014 Will Thames <will@thames.id.au>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import pytest
from ansiblelint.file_utils import Lintable
from .rules.fixtures import ematcher, raw_task
@pytest.fixture(name="lintable")
def fixture_lintable() -> Lintable:
"""Return a playbook Lintable for use in this file's tests."""
return Lintable("examples/playbooks/ematcher-rule.yml", kind="playbook")
def test_rule_matching(lintable: Lintable) -> None:
"""Test rule.matchlines() on a playbook."""
rule = ematcher.EMatcherRule()
matches = rule.matchlines(lintable)
assert len(matches) == 3
def test_raw_rule_matching(lintable: Lintable) -> None:
"""Test rule.matchlines() on a playbook."""
rule = raw_task.RawTaskRule()
matches = rule.matchtasks(lintable)
assert len(matches) == 1
| 1,879 | Python | .py | 37 | 48.459459 | 79 | 0.765412 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,158 | test_ansiblesyntax.py | ansible_ansible-lint/test/test_ansiblesyntax.py | """Test Ansible Syntax.
This module contains tests that validate that linter does not produce errors
when encountering what counts as valid Ansible syntax.
"""
from ansiblelint.testing import RunFromText
PB_WITH_NULL_TASKS = """\
---
- name: Fixture for test_null_tasks
hosts: all
tasks:
"""
def test_null_tasks(default_text_runner: RunFromText) -> None:
"""Assure we do not fail when encountering null tasks."""
results = default_text_runner.run_playbook(PB_WITH_NULL_TASKS)
assert not results
| 516 | Python | .py | 15 | 32 | 76 | 0.762097 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,159 | test_yaml_utils.py | ansible_ansible-lint/test/test_yaml_utils.py | """Tests for yaml-related utility functions."""
# pylint: disable=too-many-lines
from __future__ import annotations
from io import StringIO
from pathlib import Path
from typing import TYPE_CHECKING, Any, cast
import pytest
from ruamel.yaml.main import YAML
from yamllint.linter import run as run_yamllint
import ansiblelint.yaml_utils
from ansiblelint.file_utils import Lintable, cwd
from ansiblelint.utils import task_in_list
if TYPE_CHECKING:
from ruamel.yaml.comments import CommentedMap, CommentedSeq
from ruamel.yaml.emitter import Emitter
fixtures_dir = Path(__file__).parent / "fixtures"
formatting_before_fixtures_dir = fixtures_dir / "formatting-before"
formatting_prettier_fixtures_dir = fixtures_dir / "formatting-prettier"
formatting_after_fixtures_dir = fixtures_dir / "formatting-after"
@pytest.fixture(name="empty_lintable")
def fixture_empty_lintable() -> Lintable:
"""Return a Lintable with no contents."""
lintable = Lintable("__empty_file__.yaml", content="")
return lintable
def test_tasks_in_list_empty_file(empty_lintable: Lintable) -> None:
"""Make sure that task_in_list returns early when files are empty."""
assert empty_lintable.kind
assert empty_lintable.path
res = list(
task_in_list(
data=empty_lintable,
file=empty_lintable,
kind=empty_lintable.kind,
),
)
assert not res
def test_nested_items_path() -> None:
"""Verify correct function of nested_items_path()."""
data = {
"foo": "text",
"bar": {"some": "text2"},
"fruits": ["apple", "orange"],
"answer": [{"forty-two": ["life", "universe", "everything"]}],
}
items = [
("foo", "text", []),
("bar", {"some": "text2"}, []),
("some", "text2", ["bar"]),
("fruits", ["apple", "orange"], []),
(0, "apple", ["fruits"]),
(1, "orange", ["fruits"]),
("answer", [{"forty-two": ["life", "universe", "everything"]}], []),
(0, {"forty-two": ["life", "universe", "everything"]}, ["answer"]),
("forty-two", ["life", "universe", "everything"], ["answer", 0]),
(0, "life", ["answer", 0, "forty-two"]),
(1, "universe", ["answer", 0, "forty-two"]),
(2, "everything", ["answer", 0, "forty-two"]),
]
assert list(ansiblelint.yaml_utils.nested_items_path(data)) == items
@pytest.mark.parametrize(
"invalid_data_input",
(
"string",
42,
1.234,
("tuple",),
{"set"},
# NoneType is no longer include, as we assume we have to ignore it
),
)
def test_nested_items_path_raises_typeerror(invalid_data_input: Any) -> None:
"""Verify non-dict/non-list types make nested_items_path() raises TypeError."""
with pytest.raises(TypeError, match=r"Expected a dict or a list.*"):
list(ansiblelint.yaml_utils.nested_items_path(invalid_data_input))
_input_playbook = [
{
"name": "It's a playbook", # unambiguous; no quotes needed
"tasks": [
{
"name": '"fun" task', # should be a single-quoted string
"debug": {
# ruamel.yaml default to single-quotes
# our Emitter defaults to double-quotes
"msg": "{{ msg }}",
},
},
],
},
]
_SINGLE_QUOTE_WITHOUT_INDENTS = """\
---
- name: It's a playbook
tasks:
- name: '"fun" task'
debug:
msg: '{{ msg }}'
"""
_SINGLE_QUOTE_WITH_INDENTS = """\
---
- name: It's a playbook
tasks:
- name: '"fun" task'
debug:
msg: '{{ msg }}'
"""
_DOUBLE_QUOTE_WITHOUT_INDENTS = """\
---
- name: It's a playbook
tasks:
- name: '"fun" task'
debug:
msg: "{{ msg }}"
"""
_DOUBLE_QUOTE_WITH_INDENTS_EXCEPT_ROOT_LEVEL = """\
---
- name: It's a playbook
tasks:
- name: '"fun" task'
debug:
msg: "{{ msg }}"
"""
@pytest.mark.parametrize(
(
"map_indent",
"sequence_indent",
"sequence_dash_offset",
"alternate_emitter",
"expected_output",
),
(
pytest.param(
2,
2,
0,
None,
_SINGLE_QUOTE_WITHOUT_INDENTS,
id="single_quote_without_indents",
),
pytest.param(
2,
4,
2,
None,
_SINGLE_QUOTE_WITH_INDENTS,
id="single_quote_with_indents",
),
pytest.param(
2,
2,
0,
ansiblelint.yaml_utils.FormattedEmitter,
_DOUBLE_QUOTE_WITHOUT_INDENTS,
id="double_quote_without_indents",
),
pytest.param(
2,
4,
2,
ansiblelint.yaml_utils.FormattedEmitter,
_DOUBLE_QUOTE_WITH_INDENTS_EXCEPT_ROOT_LEVEL,
id="double_quote_with_indents_except_root_level",
),
),
)
def test_custom_ruamel_yaml_emitter(
map_indent: int,
sequence_indent: int,
sequence_dash_offset: int,
alternate_emitter: Emitter | None,
expected_output: str,
) -> None:
"""Test ``ruamel.yaml.YAML.dump()`` sequence formatting and quotes."""
yaml = YAML(typ="rt")
# NB: ruamel.yaml does not have typehints, so mypy complains about everything here.
yaml.explicit_start = True
yaml.map_indent = map_indent
yaml.sequence_indent = sequence_indent
yaml.sequence_dash_offset = sequence_dash_offset
if alternate_emitter is not None:
yaml.Emitter = alternate_emitter
# ruamel.yaml only writes to a stream (there is no `dumps` function)
with StringIO() as output_stream:
yaml.dump(_input_playbook, output_stream)
output = output_stream.getvalue()
assert output == expected_output
def load_yaml_formatting_fixtures(fixture_filename: str) -> tuple[str, str, str]:
"""Get the contents for the formatting fixture files.
To regenerate these fixtures, please run ``pytest --regenerate-formatting-fixtures``.
Ideally, prettier should not have to change any ``formatting-after`` fixtures.
"""
before_path = formatting_before_fixtures_dir / fixture_filename
prettier_path = formatting_prettier_fixtures_dir / fixture_filename
after_path = formatting_after_fixtures_dir / fixture_filename
before_content = before_path.read_text()
prettier_content = prettier_path.read_text()
formatted_content = after_path.read_text()
return before_content, prettier_content, formatted_content
@pytest.mark.parametrize(
("before", "after", "version"),
(
pytest.param("---\nfoo: bar\n", "---\nfoo: bar\n", None, id="1"),
# verify that 'on' is not translated to bool (1.2 behavior)
pytest.param("---\nfoo: on\n", "---\nfoo: on\n", None, id="2"),
# When version is manually mentioned by us, we expect to output without version directive
pytest.param("---\nfoo: on\n", "---\nfoo: on\n", (1, 2), id="3"),
pytest.param("---\nfoo: on\n", "---\nfoo: true\n", (1, 1), id="4"),
pytest.param("%YAML 1.1\n---\nfoo: on\n", "---\nfoo: true\n", (1, 1), id="5"),
# verify that in-line directive takes precedence but dumping strips if we mention a specific version
pytest.param("%YAML 1.1\n---\nfoo: on\n", "---\nfoo: true\n", (1, 2), id="6"),
# verify that version directive are kept if present
pytest.param("%YAML 1.1\n---\nfoo: on\n", "---\nfoo: true\n", None, id="7"),
pytest.param(
"%YAML 1.2\n---\nfoo: on\n",
"%YAML 1.2\n---\nfoo: on\n",
None,
id="8",
),
pytest.param("---\nfoo: YES\n", "---\nfoo: true\n", (1, 1), id="9"),
pytest.param("---\nfoo: YES\n", "---\nfoo: YES\n", (1, 2), id="10"),
pytest.param("---\nfoo: YES\n", "---\nfoo: YES\n", None, id="11"),
pytest.param(
"---\n # quoted-strings:\n # quote-type: double\n # required: only-when-needed\n\nignore:\n - secrets.yml\n",
"---\n # quoted-strings:\n # quote-type: double\n # required: only-when-needed\n\nignore:\n - secrets.yml\n",
None,
id="12",
),
pytest.param(
"---\nWSLENV: HOSTNAME:CI:FORCE_COLOR:GITHUB_ACTION:GITHUB_ACTION_PATH/p:GITHUB_ACTION_REPOSITORY:GITHUB_WORKFLOW:GITHUB_WORKSPACE/p:GITHUB_PATH/p:GITHUB_ENV/p:VIRTUAL_ENV/p:SKIP_PODMAN:SKIP_DOCKER\n",
"---\nWSLENV:\n HOSTNAME:CI:FORCE_COLOR:GITHUB_ACTION:GITHUB_ACTION_PATH/p:GITHUB_ACTION_REPOSITORY:GITHUB_WORKFLOW:GITHUB_WORKSPACE/p:GITHUB_PATH/p:GITHUB_ENV/p:VIRTUAL_ENV/p:SKIP_PODMAN:SKIP_DOCKER\n",
None,
id="13",
),
),
)
def test_fmt(before: str, after: str, version: tuple[int, int] | None) -> None:
"""Tests behavior of formatter in regards to different YAML versions, specified or not."""
yaml = ansiblelint.yaml_utils.FormattedYAML(version=version)
data = yaml.load(before)
result = yaml.dumps(data)
assert result == after
@pytest.mark.parametrize(
("fixture_filename", "version"),
(
pytest.param("fmt-1.yml", (1, 1), id="1"),
pytest.param("fmt-2.yml", (1, 1), id="2"),
pytest.param("fmt-3.yml", (1, 1), id="3"),
pytest.param("fmt-4.yml", (1, 1), id="4"),
pytest.param("fmt-5.yml", (1, 1), id="5"),
pytest.param("fmt-hex.yml", (1, 1), id="hex"),
),
)
def test_formatted_yaml_loader_dumper(
fixture_filename: str,
version: tuple[int, int],
) -> None:
"""Ensure that FormattedYAML loads/dumps formatting fixtures consistently."""
before_content, prettier_content, after_content = load_yaml_formatting_fixtures(
fixture_filename,
)
assert before_content != prettier_content
assert before_content != after_content
yaml = ansiblelint.yaml_utils.FormattedYAML(version=version)
data_before = yaml.load(before_content)
dump_from_before = yaml.dumps(data_before)
data_prettier = yaml.load(prettier_content)
dump_from_prettier = yaml.dumps(data_prettier)
data_after = yaml.load(after_content)
dump_from_after = yaml.dumps(data_after)
# comparing data does not work because the Comment objects
# have different IDs even if contents do not match.
assert dump_from_before == after_content
assert dump_from_prettier == after_content
assert dump_from_after == after_content
# We can't do this because FormattedYAML is stricter in some cases:
#
# Instead, `pytest --regenerate-formatting-fixtures` will fail if prettier would
# change any files in test/fixtures/formatting-after
# Running our files through yamllint, after we reformatted them,
# should not yield any problems.
config = ansiblelint.yaml_utils.load_yamllint_config()
assert not list(run_yamllint(after_content, config))
@pytest.fixture(name="lintable")
def fixture_lintable(file_path: str) -> Lintable:
"""Return a playbook Lintable for use in ``get_path_to_*`` tests."""
return Lintable(file_path)
@pytest.fixture(name="ruamel_data")
def fixture_ruamel_data(lintable: Lintable) -> CommentedMap | CommentedSeq:
"""Return the loaded YAML data for the Lintable."""
yaml = ansiblelint.yaml_utils.FormattedYAML()
data: CommentedMap | CommentedSeq = yaml.load(lintable.content)
return data
@pytest.mark.parametrize(
("file_path", "lineno", "expected_path"),
(
# ignored lintables
pytest.param(
"examples/playbooks/tasks/passing_task.yml",
2,
[],
id="ignore_tasks_file",
),
pytest.param(
"examples/roles/more_complex/handlers/main.yml",
2,
[],
id="ignore_handlers_file",
),
pytest.param("examples/playbooks/vars/other.yml", 2, [], id="ignore_vars_file"),
pytest.param(
"examples/host_vars/localhost.yml",
2,
[],
id="ignore_host_vars_file",
),
pytest.param("examples/group_vars/all.yml", 2, [], id="ignore_group_vars_file"),
pytest.param(
"examples/inventory/inventory.yml",
2,
[],
id="ignore_inventory_file",
),
pytest.param(
"examples/roles/dependency_in_meta/meta/main.yml",
2,
[],
id="ignore_meta_file",
),
pytest.param(
"examples/reqs_v1/requirements.yml",
2,
[],
id="ignore_requirements_v1_file",
),
pytest.param(
"examples/reqs_v2/requirements.yml",
2,
[],
id="ignore_requirements_v2_file",
),
# we don't have any release notes examples. Oh well.
pytest.param(
".pre-commit-config.yaml",
2,
[],
id="ignore_unrecognized_yaml_file",
),
# playbook lintables
pytest.param(
"examples/playbooks/become.yml",
1,
[],
id="1_play_playbook-line_before_play",
),
pytest.param(
"examples/playbooks/become.yml",
2,
[0],
id="1_play_playbook-first_line_in_play",
),
pytest.param(
"examples/playbooks/become.yml",
10,
[0],
id="1_play_playbook-middle_line_in_play",
),
pytest.param(
"examples/playbooks/become.yml",
100,
[0],
id="1_play_playbook-line_after_eof",
),
pytest.param(
"examples/playbooks/rule-partial-become-without-become-pass.yml",
1,
[],
id="4_play_playbook-line_before_play_1",
),
pytest.param(
"examples/playbooks/rule-partial-become-without-become-pass.yml",
2,
[0],
id="4_play_playbook-first_line_in_play_1",
),
pytest.param(
"examples/playbooks/rule-partial-become-without-become-pass.yml",
5,
[0],
id="4_play_playbook-middle_line_in_play_1",
),
pytest.param(
"examples/playbooks/rule-partial-become-without-become-pass.yml",
9,
[0],
id="4_play_playbook-last_line_in_play_1",
),
pytest.param(
"examples/playbooks/rule-partial-become-without-become-pass.yml",
12,
[1],
id="4_play_playbook-first_line_in_play_2",
),
pytest.param(
"examples/playbooks/rule-partial-become-without-become-pass.yml",
14,
[1],
id="4_play_playbook-middle_line_in_play_2",
),
pytest.param(
"examples/playbooks/rule-partial-become-without-become-pass.yml",
18,
[1],
id="4_play_playbook-last_line_in_play_2",
),
pytest.param(
"examples/playbooks/rule-partial-become-without-become-pass.yml",
21,
[2],
id="4_play_playbook-first_line_in_play_3",
),
pytest.param(
"examples/playbooks/rule-partial-become-without-become-pass.yml",
23,
[2],
id="4_play_playbook-middle_line_in_play_3",
),
pytest.param(
"examples/playbooks/rule-partial-become-without-become-pass.yml",
27,
[2],
id="4_play_playbook-last_line_in_play_3",
),
pytest.param(
"examples/playbooks/rule-partial-become-without-become-pass.yml",
31,
[3],
id="4_play_playbook-first_line_in_play_4",
),
pytest.param(
"examples/playbooks/rule-partial-become-without-become-pass.yml",
31,
[3],
id="4_play_playbook-middle_line_in_play_4",
),
pytest.param(
"examples/playbooks/rule-partial-become-without-become-pass.yml",
35,
[3],
id="4_play_playbook-last_line_in_play_4",
),
pytest.param(
"examples/playbooks/rule-partial-become-without-become-pass.yml",
100,
[3],
id="4_play_playbook-line_after_eof",
),
pytest.param(
"examples/playbooks/playbook-parent.yml",
1,
[],
id="import_playbook-line_before_play_1",
),
pytest.param(
"examples/playbooks/playbook-parent.yml",
2,
[0],
id="import_playbook-first_line_in_play_1",
),
pytest.param(
"examples/playbooks/playbook-parent.yml",
3,
[0],
id="import_playbook-middle_line_in_play_1",
),
pytest.param(
"examples/playbooks/playbook-parent.yml",
4,
[0],
id="import_playbook-last_line_in_play_1",
),
pytest.param(
"examples/playbooks/playbook-parent.yml",
5,
[1],
id="import_playbook-first_line_in_play_2",
),
pytest.param(
"examples/playbooks/playbook-parent.yml",
6,
[1],
id="import_playbook-middle_line_in_play_2",
),
pytest.param(
"examples/playbooks/playbook-parent.yml",
7,
[1],
id="import_playbook-last_line_in_play_2",
),
pytest.param(
"examples/playbooks/playbook-parent.yml",
8,
[2],
id="import_playbook-first_line_in_play_3",
),
pytest.param(
"examples/playbooks/playbook-parent.yml",
9,
[2],
id="import_playbook-last_line_in_play_3",
),
pytest.param(
"examples/playbooks/playbook-parent.yml",
15,
[2],
id="import_playbook-line_after_eof",
),
),
)
def test_get_path_to_play(
lintable: Lintable,
lineno: int,
ruamel_data: CommentedMap | CommentedSeq,
expected_path: list[int | str],
) -> None:
"""Ensure ``get_path_to_play`` returns the expected path given a file + line."""
path_to_play = ansiblelint.yaml_utils.get_path_to_play(
lintable,
lineno,
ruamel_data,
)
assert path_to_play == expected_path
@pytest.mark.parametrize(
("file_path", "lineno", "expected_path"),
(
# ignored lintables
pytest.param("examples/playbooks/vars/other.yml", 2, [], id="ignore_vars_file"),
pytest.param(
"examples/host_vars/localhost.yml",
2,
[],
id="ignore_host_vars_file",
),
pytest.param("examples/group_vars/all.yml", 2, [], id="ignore_group_vars_file"),
pytest.param(
"examples/inventory/inventory.yml",
2,
[],
id="ignore_inventory_file",
),
pytest.param(
"examples/roles/dependency_in_meta/meta/main.yml",
2,
[],
id="ignore_meta_file",
),
pytest.param(
"examples/reqs_v1/requirements.yml",
2,
[],
id="ignore_requirements_v1_file",
),
pytest.param(
"examples/reqs_v2/requirements.yml",
2,
[],
id="ignore_requirements_v2_file",
),
# we don't have any release notes examples. Oh well.
pytest.param(
".pre-commit-config.yaml",
2,
[],
id="ignore_unrecognized_yaml_file",
),
# tasks-containing lintables
pytest.param(
"examples/playbooks/become.yml",
4,
[],
id="1_task_playbook-line_before_tasks",
),
pytest.param(
"examples/playbooks/become.yml",
5,
[0, "tasks", 0],
id="1_task_playbook-first_line_in_task_1",
),
pytest.param(
"examples/playbooks/become.yml",
10,
[0, "tasks", 0],
id="1_task_playbook-middle_line_in_task_1",
),
pytest.param(
"examples/playbooks/become.yml",
15,
[0, "tasks", 0],
id="1_task_playbook-last_line_in_task_1",
),
pytest.param(
"examples/playbooks/become.yml",
100,
[0, "tasks", 0],
id="1_task_playbook-line_after_eof_without_anything_after_task",
),
pytest.param(
"examples/playbooks/rule-partial-become-without-become-pass.yml",
1,
[],
id="4_play_playbook-play_1_line_before_tasks",
),
pytest.param(
"examples/playbooks/rule-partial-become-without-become-pass.yml",
8,
[0, "tasks", 0],
id="4_play_playbook-play_1_first_line_task_1",
),
pytest.param(
"examples/playbooks/rule-partial-become-without-become-pass.yml",
9,
[0, "tasks", 0],
id="4_play_playbook-play_1_last_line_task_1",
),
pytest.param(
"examples/playbooks/rule-partial-become-without-become-pass.yml",
13,
[],
id="4_play_playbook-play_2_line_before_tasks",
),
pytest.param(
"examples/playbooks/rule-partial-become-without-become-pass.yml",
12,
[],
id="4_play_playbook-play_2_line_before_tasks",
),
pytest.param(
"examples/playbooks/rule-partial-become-without-become-pass.yml",
15,
[1, "tasks", 0],
id="4_play_playbook-play_2_first_line_task_1",
),
pytest.param(
"examples/playbooks/rule-partial-become-without-become-pass.yml",
18,
[1, "tasks", 0],
id="4_play_playbook-play_2_middle_line_task_1",
),
pytest.param(
"examples/playbooks/rule-partial-become-without-become-pass.yml",
18,
[1, "tasks", 0],
id="4_play_playbook-play_2_last_line_task_1",
),
pytest.param(
"examples/playbooks/rule-partial-become-without-become-pass.yml",
23,
[],
id="4_play_playbook-play_3_line_before_tasks",
),
pytest.param(
"examples/playbooks/rule-partial-become-without-become-pass.yml",
22,
[],
id="4_play_playbook-play_3_line_before_tasks",
),
pytest.param(
"examples/playbooks/rule-partial-become-without-become-pass.yml",
25,
[2, "tasks", 0],
id="4_play_playbook-play_3_first_line_task_1",
),
pytest.param(
"examples/playbooks/rule-partial-become-without-become-pass.yml",
25,
[2, "tasks", 0],
id="4_play_playbook-play_3_middle_line_task_1",
),
pytest.param(
"examples/playbooks/rule-partial-become-without-become-pass.yml",
27,
[2, "tasks", 0],
id="4_play_playbook-play_3_last_line_task_1",
),
pytest.param(
"examples/playbooks/rule-partial-become-without-become-pass.yml",
33,
[],
id="4_play_playbook-play_4_line_before_tasks",
),
pytest.param(
"examples/playbooks/rule-partial-become-without-become-pass.yml",
31,
[],
id="4_play_playbook-play_4_line_before_tasks",
),
pytest.param(
"examples/playbooks/rule-partial-become-without-become-pass.yml",
35,
[3, "tasks", 0],
id="4_play_playbook-play_4_first_line_task_1",
),
pytest.param(
"examples/playbooks/rule-partial-become-without-become-pass.yml",
39,
[3, "tasks", 0],
id="4_play_playbook-play_4_middle_line_task_1",
),
pytest.param(
"examples/playbooks/rule-partial-become-without-become-pass.yml",
35,
[3, "tasks", 0],
id="4_play_playbook-play_4_last_line_task_1",
),
# playbook with multiple tasks + tasks blocks in a play
pytest.param(
# must have at least one key after one of the tasks blocks
"examples/playbooks/include.yml",
6,
[0, "pre_tasks", 0],
id="playbook-multi_tasks_blocks-pre_tasks_last_task_before_roles",
),
pytest.param(
"examples/playbooks/include.yml",
7,
[],
id="playbook-multi_tasks_blocks-roles_after_pre_tasks",
),
pytest.param(
"examples/playbooks/include.yml",
10,
[],
id="playbook-multi_tasks_blocks-roles_before_tasks",
),
pytest.param(
"examples/playbooks/include.yml",
12,
[0, "tasks", 0],
id="playbook-multi_tasks_blocks-tasks_first_task",
),
pytest.param(
"examples/playbooks/include.yml",
14,
[0, "tasks", 2],
id="playbook-multi_tasks_blocks-tasks_last_task_before_handlers",
),
pytest.param(
"examples/playbooks/include.yml",
17,
[0, "handlers", 0],
id="playbook-multi_tasks_blocks-handlers_task",
),
# playbook with subtasks blocks
pytest.param(
"examples/playbooks/blockincludes.yml",
14,
[0, "tasks", 0, "block", 1, "block", 0],
id="playbook-deeply_nested_task",
),
pytest.param(
"examples/playbooks/block.yml",
12,
[0, "tasks", 0, "block", 1],
id="playbook-subtasks-block_task_2",
),
pytest.param(
"examples/playbooks/block.yml",
22,
[0, "tasks", 0, "rescue", 2],
id="playbook-subtasks-rescue_task_3",
),
pytest.param(
"examples/playbooks/block.yml",
25,
[0, "tasks", 0, "always", 0],
id="playbook-subtasks-always_task_3",
),
# tasks files
pytest.param("examples/playbooks/tasks/x.yml", 2, [0], id="tasks-null_task"),
pytest.param(
"examples/playbooks/tasks/x.yml",
6,
[1],
id="tasks-null_task_next",
),
pytest.param(
"examples/playbooks/tasks/empty_blocks.yml",
7,
[0], # this IS part of the first task and "rescue" does not have subtasks.
id="tasks-null_rescue",
),
pytest.param(
"examples/playbooks/tasks/empty_blocks.yml",
8,
[0], # this IS part of the first task and "always" does not have subtasks.
id="tasks-empty_always",
),
pytest.param(
"examples/playbooks/tasks/empty_blocks.yml",
16,
[1, "always", 0],
id="tasks-task_beyond_empty_blocks",
),
pytest.param(
"examples/roles/more_complex/tasks/main.yml",
1,
[],
id="tasks-line_before_tasks",
),
pytest.param(
"examples/roles/more_complex/tasks/main.yml",
2,
[0],
id="tasks-first_line_in_task_1",
),
pytest.param(
"examples/roles/more_complex/tasks/main.yml",
3,
[0],
id="tasks-middle_line_in_task_1",
),
pytest.param(
"examples/roles/more_complex/tasks/main.yml",
4,
[0],
id="tasks-last_line_in_task_1",
),
pytest.param(
"examples/roles/more_complex/tasks/main.yml",
5,
[1],
id="tasks-first_line_in_task_2",
),
pytest.param(
"examples/roles/more_complex/tasks/main.yml",
6,
[1],
id="tasks-middle_line_in_task_2",
),
pytest.param(
"examples/roles/more_complex/tasks/main.yml",
7,
[1],
id="tasks-last_line_in_task_2",
),
pytest.param(
"examples/roles/more_complex/tasks/main.yml",
8,
[2],
id="tasks-first_line_in_task_3",
),
pytest.param(
"examples/roles/more_complex/tasks/main.yml",
9,
[2],
id="tasks-last_line_in_task_3",
),
pytest.param(
"examples/roles/more_complex/tasks/main.yml",
100,
[2],
id="tasks-line_after_eof",
),
# handlers
pytest.param(
"examples/roles/more_complex/handlers/main.yml",
1,
[],
id="handlers-line_before_tasks",
),
pytest.param(
"examples/roles/more_complex/handlers/main.yml",
2,
[0],
id="handlers-first_line_in_task_1",
),
pytest.param(
"examples/roles/more_complex/handlers/main.yml",
3,
[0],
id="handlers-last_line_in_task_1",
),
pytest.param(
"examples/roles/more_complex/handlers/main.yml",
100,
[0],
id="handlers-line_after_eof",
),
),
)
def test_get_path_to_task(
lintable: Lintable,
lineno: int,
ruamel_data: CommentedMap | CommentedSeq,
expected_path: list[int | str],
) -> None:
"""Ensure ``get_task_to_play`` returns the expected path given a file + line."""
path_to_task = ansiblelint.yaml_utils.get_path_to_task(
lintable,
lineno,
ruamel_data,
)
assert path_to_task == expected_path
@pytest.mark.parametrize(
("file_path", "lineno"),
(
pytest.param("examples/playbooks/become.yml", 0, id="1_play_playbook"),
pytest.param(
"examples/playbooks/rule-partial-become-without-become-pass.yml",
0,
id="4_play_playbook",
),
pytest.param("examples/playbooks/playbook-parent.yml", 0, id="import_playbook"),
pytest.param("examples/playbooks/become.yml", 0, id="1_task_playbook"),
),
)
def test_get_path_to_play_raises_value_error_for_bad_lineno(
lintable: Lintable,
lineno: int,
ruamel_data: CommentedMap | CommentedSeq,
) -> None:
"""Ensure ``get_path_to_play`` raises ValueError for lineno < 1."""
with pytest.raises(
ValueError,
match=f"expected lineno >= 1, got {lineno}",
):
ansiblelint.yaml_utils.get_path_to_play(lintable, lineno, ruamel_data)
@pytest.mark.parametrize(
("file_path", "lineno"),
(pytest.param("examples/roles/more_complex/tasks/main.yml", 0, id="tasks"),),
)
def test_get_path_to_task_raises_value_error_for_bad_lineno(
lintable: Lintable,
lineno: int,
ruamel_data: CommentedMap | CommentedSeq,
) -> None:
"""Ensure ``get_task_to_play`` raises ValueError for lineno < 1."""
with pytest.raises(
ValueError,
match=f"expected lineno >= 1, got {lineno}",
):
ansiblelint.yaml_utils.get_path_to_task(lintable, lineno, ruamel_data)
@pytest.mark.parametrize(
("before", "after"),
(
pytest.param(None, None, id="1"),
pytest.param(1, 1, id="2"),
pytest.param({}, {}, id="3"),
pytest.param({"__file__": 1}, {}, id="simple"),
pytest.param({"foo": {"__file__": 1}}, {"foo": {}}, id="nested"),
pytest.param([{"foo": {"__file__": 1}}], [{"foo": {}}], id="nested-in-lint"),
pytest.param({"foo": [{"__file__": 1}]}, {"foo": [{}]}, id="nested-in-lint"),
),
)
def test_deannotate(
before: Any,
after: Any,
) -> None:
"""Ensure deannotate works as intended."""
assert ansiblelint.yaml_utils.deannotate(before) == after
def test_yamllint_incompatible_config() -> None:
"""Ensure we can detect incompatible yamllint settings."""
with (cwd(Path("examples/yamllint/incompatible-config")),):
config = ansiblelint.yaml_utils.load_yamllint_config()
assert config.incompatible
@pytest.mark.parametrize(
("yaml_version", "explicit_start"),
(
pytest.param((1, 1), True),
pytest.param((1, 1), False),
),
)
def test_document_start(
yaml_version: tuple[int, int] | None,
explicit_start: bool,
) -> None:
"""Ensure the explicit_start config option from .yamllint is applied correctly."""
config = ansiblelint.yaml_utils.FormattedYAML.default_config
config["explicit_start"] = explicit_start
yaml = ansiblelint.yaml_utils.FormattedYAML(
version=yaml_version,
config=cast(dict[str, bool | int | str], config),
)
assert (
yaml.dumps(yaml.load(_SINGLE_QUOTE_WITHOUT_INDENTS)).startswith("---")
== explicit_start
)
| 33,466 | Python | .py | 985 | 24.539086 | 216 | 0.548182 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,160 | test_local_content.py | ansible_ansible-lint/test/test_local_content.py | """Test playbooks with local content."""
from ansiblelint.rules import RulesCollection
from ansiblelint.runner import Runner
def test_local_collection(default_rules_collection: RulesCollection) -> None:
"""Assures local collections are found."""
playbook_path = "test/local-content/test-collection.yml"
runner = Runner(playbook_path, rules=default_rules_collection)
results = runner.run()
assert len(runner.lintables) == 1
assert len(results) == 0
| 476 | Python | .py | 10 | 43.8 | 77 | 0.75974 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,161 | test_formatter_json.py | ansible_ansible-lint/test/test_formatter_json.py | """Test the codeclimate JSON formatter."""
from __future__ import annotations
import json
import pathlib
import subprocess
import sys
import pytest
from ansiblelint.errors import MatchError
from ansiblelint.file_utils import Lintable
from ansiblelint.formatters import CodeclimateJSONFormatter
from ansiblelint.rules import AnsibleLintRule, RulesCollection
class TestCodeclimateJSONFormatter:
"""Unit test for CodeclimateJSONFormatter."""
rule = AnsibleLintRule()
matches: list[MatchError] = []
formatter: CodeclimateJSONFormatter | None = None
collection = RulesCollection()
def setup_class(self) -> None:
"""Set up few MatchError objects."""
self.rule = AnsibleLintRule()
self.rule.id = "TCF0001"
self.rule.severity = "VERY_HIGH"
self.collection.register(self.rule)
self.matches = []
self.matches.append(
MatchError(
message="message",
lineno=1,
details="hello",
lintable=Lintable("filename.yml", content=""),
rule=self.rule,
),
)
self.matches.append(
MatchError(
message="message",
lineno=2,
details="hello",
lintable=Lintable("filename.yml", content=""),
rule=self.rule,
ignored=True,
),
)
self.formatter = CodeclimateJSONFormatter(
pathlib.Path.cwd(),
display_relative_path=True,
)
def test_json_format_list(self) -> None:
"""Test if the return value is a string."""
assert isinstance(self.formatter, CodeclimateJSONFormatter)
assert isinstance(self.formatter.format_result(self.matches), str)
def test_result_is_json(self) -> None:
"""Test if returned string value is a JSON."""
assert isinstance(self.formatter, CodeclimateJSONFormatter)
output = self.formatter.format_result(self.matches)
json.loads(output)
# https://github.com/ansible/ansible-navigator/issues/1490
assert "\n" not in output
def test_json_single_match(self) -> None:
"""Test negative case. Only lists are allowed. Otherwise a RuntimeError will be raised."""
assert isinstance(self.formatter, CodeclimateJSONFormatter)
with pytest.raises(TypeError):
self.formatter.format_result(self.matches[0]) # type: ignore[arg-type]
def test_result_is_list(self) -> None:
"""Test if the return JSON contains a list with a length of 2."""
assert isinstance(self.formatter, CodeclimateJSONFormatter)
result = json.loads(self.formatter.format_result(self.matches))
assert len(result) == 2
def test_validate_codeclimate_schema(self) -> None:
"""Test if the returned JSON is a valid codeclimate report."""
assert isinstance(self.formatter, CodeclimateJSONFormatter)
result = json.loads(self.formatter.format_result(self.matches))
single_match = result[0]
assert "type" in single_match
assert single_match["type"] == "issue"
assert "check_name" in single_match
assert "categories" in single_match
assert isinstance(single_match["categories"], list)
assert "severity" in single_match
assert single_match["severity"] == "major"
assert "description" in single_match
assert "fingerprint" in single_match
assert "location" in single_match
assert "path" in single_match["location"]
assert single_match["location"]["path"] == self.matches[0].filename
assert "lines" in single_match["location"]
assert single_match["location"]["lines"]["begin"] == self.matches[0].lineno
assert "positions" not in single_match["location"]
# check that the 2nd match is marked as 'minor' because it was created with ignored=True
assert result[1]["severity"] == "minor"
def test_validate_codeclimate_schema_with_positions(self) -> None:
"""Test if the returned JSON is a valid codeclimate report (containing 'positions' instead of 'lines')."""
assert isinstance(self.formatter, CodeclimateJSONFormatter)
result = json.loads(
self.formatter.format_result(
[
MatchError(
message="message",
lineno=1,
column=42,
details="hello",
lintable=Lintable("filename.yml", content=""),
rule=self.rule,
),
],
),
)
assert result[0]["location"]["positions"]["begin"]["line"] == 1
assert result[0]["location"]["positions"]["begin"]["column"] == 42
assert "lines" not in result[0]["location"]
def test_code_climate_parsable_ignored() -> None:
"""Test that -p option does not alter codeclimate format."""
cmd = [
sys.executable,
"-m",
"ansiblelint",
"-v",
"-p",
]
file = "examples/playbooks/empty_playbook.yml"
result = subprocess.run([*cmd, file], check=False)
result2 = subprocess.run([*cmd, "-p", file], check=False)
assert result.returncode == result2.returncode
assert result.stdout == result2.stdout
| 5,403 | Python | .py | 124 | 33.822581 | 114 | 0.622577 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,162 | test_skip_playbook_items.py | ansible_ansible-lint/test/test_skip_playbook_items.py | """Tests related to use of noqa inside playbooks."""
import pytest
from ansiblelint.testing import RunFromText
PLAYBOOK_PRE_TASKS = """\
---
- name: Fixture
hosts: all
tasks:
- name: Bad git 1 # noqa: latest[git]
action: ansible.builtin.git a=b c=d
- name: Bad git 2
action: ansible.builtin.git a=b c=d
pre_tasks:
- name: Bad git 3 # noqa: latest[git]
action: ansible.builtin.git a=b c=d
- name: Bad git 4
action: ansible.builtin.git a=b c=d
"""
PLAYBOOK_POST_TASKS = """\
---
- name: Fixture
hosts: all
tasks:
- name: Bad git 1 # noqa: latest[git]
action: ansible.builtin.git a=b c=d
- name: Bad git 2
action: ansible.builtin.git a=b c=d
post_tasks:
- name: Bad git 3 # noqa: latest[git]
action: ansible.builtin.git a=b c=d
- name: Bad git 4
action: ansible.builtin.git a=b c=d
"""
PLAYBOOK_HANDLERS = """\
---
- name: Fixture
hosts: all
tasks:
- name: Bad git 1 # noqa: latest[git]
action: ansible.builtin.git a=b c=d
- name: Bad git 2
action: ansible.builtin.git a=b c=d
handlers:
- name: Bad git 3 # noqa: latest[git]
action: ansible.builtin.git a=b c=d
- name: Bad git 4
action: ansible.builtin.git a=b c=d
"""
PLAYBOOK_TWO_PLAYS = """\
---
- name: Fixture
hosts: all
tasks:
- name: Bad git 1 # noqa: latest[git]
action: ansible.builtin.git a=b c=d
- name: Bad git 2
action: ansible.builtin.git a=b c=d
- name: Fixture 2
hosts: all
tasks:
- name: Bad git 3 # noqa: latest[git]
action: ansible.builtin.git a=b c=d
- name: Bad git 4
action: ansible.builtin.git a=b c=d
"""
PLAYBOOK_WITH_BLOCK = """\
---
- name: Fixture
hosts: all
tasks:
- name: Bad git 1 # noqa: latest[git]
action: ansible.builtin.git a=b c=d
- name: Bad git 2
action: ansible.builtin.git a=b c=d
- name: Block with rescue and always section
block:
- name: Bad git 3 # noqa: latest[git]
action: ansible.builtin.git a=b c=d
- name: Bad git 4
action: ansible.builtin.git a=b c=d
rescue:
- name: Bad git 5 # noqa: latest[git]
action: ansible.builtin.git a=b c=d
- name: Bad git 6
action: ansible.builtin.git a=b c=d
always:
- name: Bad git 7 # noqa: latest[git]
action: ansible.builtin.git a=b c=d
- name: Bad git 8
action: ansible.builtin.git a=b c=d
"""
@pytest.mark.parametrize(
("playbook", "length"),
(
pytest.param(PLAYBOOK_PRE_TASKS, 6, id="PRE_TASKS"),
pytest.param(PLAYBOOK_POST_TASKS, 6, id="POST_TASKS"),
pytest.param(PLAYBOOK_HANDLERS, 6, id="HANDLERS"),
pytest.param(PLAYBOOK_TWO_PLAYS, 6, id="TWO_PLAYS"),
pytest.param(PLAYBOOK_WITH_BLOCK, 12, id="WITH_BLOCK"),
),
)
def test_pre_tasks(
default_text_runner: RunFromText,
playbook: str,
length: int,
) -> None:
"""Check that skipping is possible in different playbook parts."""
# When
results = default_text_runner.run_playbook(playbook)
# Then
assert len(results) == length
| 3,154 | Python | .py | 111 | 23.495495 | 70 | 0.620712 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,163 | test_import_playbook.py | ansible_ansible-lint/test/test_import_playbook.py | """Test ability to import playbooks."""
from ansiblelint.rules import RulesCollection
from ansiblelint.runner import Runner
def test_task_hook_import_playbook(default_rules_collection: RulesCollection) -> None:
"""Assures import_playbook includes are recognized."""
playbook_path = "examples/playbooks/playbook-parent.yml"
runner = Runner(playbook_path, rules=default_rules_collection)
results = runner.run()
results_text = str(results)
assert len(runner.lintables) == 2
assert len(results) == 2
# Assures we detected the issues from imported playbook
assert "Commands should not change things" in results_text
assert "[name]" in results_text
assert "All tasks should be named" in results_text
def test_import_playbook_from_collection(
default_rules_collection: RulesCollection,
) -> None:
"""Assures import_playbook from collection."""
playbook_path = "examples/playbooks/test_import_playbook.yml"
runner = Runner(playbook_path, rules=default_rules_collection)
results = runner.run()
assert len(runner.lintables) == 1
assert len(results) == 0
def test_import_playbook_invalid(
default_rules_collection: RulesCollection,
) -> None:
"""Assures import_playbook from collection."""
playbook_path = "examples/playbooks/test_import_playbook_invalid.yml"
runner = Runner(playbook_path, rules=default_rules_collection)
results = runner.run()
assert len(runner.lintables) == 1
assert len(results) == 1
assert results[0].tag == "syntax-check[specific]"
assert results[0].lineno == 2
| 1,592 | Python | .py | 35 | 41.114286 | 86 | 0.739496 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,164 | test_main.py | ansible_ansible-lint/test/test_main.py | """Tests related to ansiblelint.__main__ module."""
import os
import shutil
import subprocess
import sys
import tempfile
import time
from http.client import RemoteDisconnected
from pathlib import Path
import pytest
from pytest_mock import MockerFixture
from ansiblelint.config import get_version_warning, options
from ansiblelint.constants import RC
@pytest.mark.parametrize(
("expected_warning"),
(False, True),
ids=("normal", "isolated"),
)
def test_call_from_outside_venv(expected_warning: bool) -> None:
"""Asserts ability to be called w/ or w/o venv activation."""
git_location = shutil.which("git")
if not git_location:
pytest.fail("git not found")
git_path = Path(git_location).parent
if expected_warning:
env = {"HOME": str(Path.home()), "PATH": str(git_path)}
else:
env = os.environ.copy()
for v in ("COVERAGE_FILE", "COVERAGE_PROCESS_START"):
if v in os.environ:
env[v] = os.environ[v]
py_path = Path(sys.executable).parent
# Passing custom env prevents the process from inheriting PATH or other
# environment variables from the current process, so we emulate being
# called from outside the venv.
proc = subprocess.run(
[str(py_path / "ansible-lint"), "--version"],
check=False,
capture_output=True,
text=True,
env=env,
)
assert proc.returncode == 0, proc
warning_found = "PATH altered to include" in proc.stderr
assert warning_found is expected_warning
@pytest.mark.parametrize(
("ver_diff", "found", "check", "outlen"),
(
pytest.param("v1.2.2", True, "pre-release", 1, id="0"),
pytest.param("v1.2.3", False, "", 1, id="1"),
pytest.param("v1.2.4", True, "new release", 2, id="2"),
),
)
def test_get_version_warning(
mocker: MockerFixture,
ver_diff: str,
found: bool,
check: str,
outlen: int,
) -> None:
"""Assert get_version_warning working as expected."""
data = f'{{"html_url": "https://127.0.0.1", "tag_name": "{ver_diff}"}}'
# simulate cache file
mocker.patch("os.path.exists", return_value=True)
mocker.patch("os.path.getmtime", return_value=time.time())
mocker.patch("builtins.open", mocker.mock_open(read_data=data))
# overwrite ansible-lint version
mocker.patch("ansiblelint.config.__version__", "1.2.3")
# overwrite install method to custom one. This one will increase msg line count
# to easily detect unwanted call to it.
mocker.patch("ansiblelint.config.guess_install_method", return_value="\n")
msg = get_version_warning()
if not found:
assert msg == check
else:
assert check in msg
assert len(msg.split("\n")) == outlen
def test_get_version_warning_no_pip(mocker: MockerFixture) -> None:
"""Test that we do not display any message if install method is not pip."""
mocker.patch("ansiblelint.config.guess_install_method", return_value="")
assert get_version_warning() == ""
def test_get_version_warning_remote_disconnect(mocker: MockerFixture) -> None:
"""Test that we can handle remote disconnect when fetching release url."""
mocker.patch("urllib.request.urlopen", side_effect=RemoteDisconnected)
try:
get_version_warning()
except RemoteDisconnected:
pytest.fail("Failed to handle a remote disconnect")
def test_get_version_warning_offline(mocker: MockerFixture) -> None:
"""Test that offline mode does not display any message."""
with tempfile.TemporaryDirectory() as temporary_directory:
# ensures a real cache_file is not loaded
mocker.patch("ansiblelint.config.CACHE_DIR", Path(temporary_directory))
options.offline = True
assert get_version_warning() == ""
@pytest.mark.parametrize(
("lintable"),
(
pytest.param("examples/playbooks/nodeps.yml", id="1"),
pytest.param("examples/playbooks/nodeps2.yml", id="2"),
),
)
def test_nodeps(lintable: str) -> None:
"""Asserts ability to be called w/ or w/o venv activation."""
env = os.environ.copy()
env["ANSIBLE_LINT_NODEPS"] = "1"
py_path = Path(sys.executable).parent
proc = subprocess.run(
[str(py_path / "ansible-lint"), lintable],
check=False,
capture_output=True,
text=True,
env=env,
)
assert proc.returncode == 0, proc
def test_broken_ansible_cfg() -> None:
"""Asserts behavior when encountering broken ansible.cfg files."""
py_path = Path(sys.executable).parent
proc = subprocess.run(
[str(py_path / "ansible-lint"), "--version"],
check=False,
capture_output=True,
text=True,
cwd="test/fixtures/broken-ansible.cfg",
)
assert proc.returncode == RC.INVALID_CONFIG, proc
assert (
"Invalid type for configuration option setting: CACHE_PLUGIN_TIMEOUT"
in proc.stderr
)
| 4,922 | Python | .py | 130 | 32.423077 | 83 | 0.669671 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,165 | test_import_tasks.py | ansible_ansible-lint/test/test_import_tasks.py | """Test related to import of invalid files."""
import pytest
from ansiblelint.rules import RulesCollection
from ansiblelint.runner import Runner
@pytest.mark.parametrize(
("playbook_path", "lintable_count", "match_count"),
(
pytest.param(
"examples/playbooks/test_import_with_conflicting_action_statements.yml",
2,
4,
id="0",
),
pytest.param("examples/playbooks/test_import_with_malformed.yml", 2, 2, id="1"),
),
)
def test_import_tasks(
default_rules_collection: RulesCollection,
playbook_path: str,
lintable_count: int,
match_count: int,
) -> None:
"""Assures import_playbook includes are recognized."""
runner = Runner(playbook_path, rules=default_rules_collection)
results = runner.run()
assert len(runner.lintables) == lintable_count
assert len(results) == match_count
# Assures we detected the issues from imported file
assert results[0].rule.id in ("syntax-check", "load-failure")
| 1,021 | Python | .py | 29 | 29.62069 | 88 | 0.680851 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,166 | test_list_rules.py | ansible_ansible-lint/test/test_list_rules.py | """Tests related to our logging/verbosity setup."""
from pathlib import Path
import pytest
from ansiblelint.testing import run_ansible_lint
def test_list_rules_includes_opt_in_rules(project_path: Path) -> None:
"""Checks that listing rules also includes the opt-in rules."""
# Piggyback off the .yamllint in the root of the repo, just for testing.
# We'll "override" it with the one in the fixture.
fakerole = Path("test") / "fixtures" / "list-rules-tests"
result_list_rules = run_ansible_lint("-L", fakerole, cwd=project_path)
assert ("opt-in" in result_list_rules.stdout) is True
def test_list_rules_includes_autofix() -> None:
"""Checks that listing rules also includes the autofix label for applicable rules."""
result_list_rules = run_ansible_lint("--list-rules")
assert ("autofix" in result_list_rules.stdout) is True
@pytest.mark.parametrize(
("result", "returncode", "format_string"),
(
(False, 0, "brief"),
(False, 0, "full"),
(False, 0, "md"),
(True, 2, "json"),
(True, 2, "codeclimate"),
(True, 2, "quiet"),
(True, 2, "pep8"),
(True, 2, "foo"),
),
ids=(
"plain",
"full",
"md",
"json",
"codeclimate",
"quiet",
"pep8",
"foo",
),
)
def test_list_rules_with_format_option(
result: bool,
returncode: int,
format_string: str,
project_path: Path,
) -> None:
"""Checks that listing rules with format options works."""
# Piggyback off the .yamllint in the root of the repo, just for testing.
# We'll "override" it with the one in the fixture.
fakerole = Path("test") / "fixtures" / "list-rules-tests"
result_list_rules = run_ansible_lint(
"-f",
format_string,
"-L",
fakerole,
cwd=project_path,
)
assert (f"invalid choice: '{format_string}'" in result_list_rules.stderr) is result
assert ("syntax-check" in result_list_rules.stdout) is not result
assert result_list_rules.returncode is returncode
def test_list_tags_includes_opt_in_rules(project_path: Path) -> None:
"""Checks that listing tags also includes the opt-in rules."""
# Piggyback off the .yamllint in the root of the repo, just for testing.
# We'll "override" it with the one in the fixture.
fakerole = Path("test") / "fixtures" / "list-rules-tests"
result_list_tags = run_ansible_lint("-L", str(fakerole), cwd=project_path)
assert ("opt-in" in result_list_tags.stdout) is True
| 2,559 | Python | .py | 65 | 33.476923 | 89 | 0.642569 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,167 | test_import_include_role.py | ansible_ansible-lint/test/test_import_include_role.py | """Tests related to role imports."""
from __future__ import annotations
from typing import TYPE_CHECKING
import pytest
from ansiblelint.runner import Runner
if TYPE_CHECKING:
from pathlib import Path
from _pytest.fixtures import SubRequest
from ansiblelint.rules import RulesCollection
ROLE_TASKS_MAIN = """\
---
- name: Shell instead of command
shell: echo hello world # noqa: fqcn no-free-form
changed_when: false
"""
ROLE_TASKS_WORLD = """\
---
- ansible.builtin.debug:
msg: "this is a task without a name"
"""
PLAY_IMPORT_ROLE = """\
---
- name: Test fixture
hosts: all
tasks:
- name: Some import # noqa: fqcn
import_role:
name: test-role
"""
PLAY_IMPORT_ROLE_FQCN = """\
---
- name: Test fixture
hosts: all
tasks:
- name: Some import
ansible.builtin.import_role:
name: test-role
"""
PLAY_IMPORT_ROLE_INLINE = """\
---
- name: Fixture
hosts: all
tasks:
- name: Some import
import_role: name=test-role # noqa: no-free-form fqcn
"""
PLAY_INCLUDE_ROLE = """\
---
- name: Fixture
hosts: all
tasks:
- name: Some import
include_role:
name: test-role
tasks_from: world
"""
PLAY_INCLUDE_ROLE_FQCN = """\
---
- name: Fixture
hosts: all
tasks:
- name: Some import
ansible.builtin.include_role:
name: test-role
tasks_from: world
"""
PLAY_INCLUDE_ROLE_INLINE = """\
---
- name: Fixture
hosts: all
tasks:
- name: Some import
include_role: name=test-role tasks_from=world # noqa: no-free-form
"""
@pytest.fixture(name="playbook_path")
def fixture_playbook_path(request: SubRequest, tmp_path: Path) -> str:
"""Create a reusable per-test role skeleton."""
playbook_text = request.param
role_tasks_dir = tmp_path / "test-role" / "tasks"
role_tasks_dir.mkdir(parents=True)
(role_tasks_dir / "main.yml").write_text(ROLE_TASKS_MAIN)
(role_tasks_dir / "world.yml").write_text(ROLE_TASKS_WORLD)
play_path = tmp_path / "playbook.yml"
play_path.write_text(playbook_text)
return str(play_path)
@pytest.mark.parametrize(
("playbook_path", "messages"),
(
pytest.param(
PLAY_IMPORT_ROLE,
["only when shell functionality is required", "All tasks should be named"],
id="IMPORT_ROLE",
),
pytest.param(
PLAY_IMPORT_ROLE_FQCN,
["only when shell functionality is required", "All tasks should be named"],
id="IMPORT_ROLE_FQCN",
),
pytest.param(
PLAY_IMPORT_ROLE_INLINE,
["only when shell functionality is require", "All tasks should be named"],
id="IMPORT_ROLE_INLINE",
),
pytest.param(
PLAY_INCLUDE_ROLE,
["only when shell functionality is require", "All tasks should be named"],
id="INCLUDE_ROLE",
),
pytest.param(
PLAY_INCLUDE_ROLE_FQCN,
["only when shell functionality is require", "All tasks should be named"],
id="INCLUDE_ROLE_FQCN",
),
pytest.param(
PLAY_INCLUDE_ROLE_INLINE,
["only when shell functionality is require", "All tasks should be named"],
id="INCLUDE_ROLE_INLINE",
),
),
indirect=("playbook_path",),
)
def test_import_role2(
default_rules_collection: RulesCollection,
playbook_path: str,
messages: list[str],
) -> None:
"""Test that include_role digs deeper than import_role."""
runner = Runner(
playbook_path,
rules=default_rules_collection,
skip_list=["fqcn[action-core]"],
)
results = runner.run()
for message in messages:
assert message in str(results)
# Ensure no other unexpected messages are present
assert len(messages) == len(results), results
| 3,852 | Python | .py | 137 | 22.671533 | 87 | 0.632648 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,168 | test_text.py | ansible_ansible-lint/test/test_text.py | """Tests for text module."""
from typing import Any
import pytest
from ansiblelint.text import has_glob, has_jinja, strip_ansi_escape, toidentifier
@pytest.mark.parametrize(
("value", "expected"),
(
pytest.param("\x1b[1;31mHello", "Hello", id="0"),
pytest.param("\x1b[2;37;41mExample_file.zip", "Example_file.zip", id="1"),
pytest.param(b"ansible-lint", "ansible-lint", id="2"),
),
)
def test_strip_ansi_escape(value: Any, expected: str) -> None:
"""Tests for strip_ansi_escape()."""
assert strip_ansi_escape(value) == expected
@pytest.mark.parametrize(
("value", "expected"),
(
pytest.param("foo-bar", "foo_bar", id="0"),
pytest.param("foo--bar", "foo_bar", id="1"),
),
)
def test_toidentifier(value: Any, expected: str) -> None:
"""Tests for toidentifier()."""
assert toidentifier(value) == expected
@pytest.mark.parametrize(
("value", "expected"),
(pytest.param("example_test.zip", "Unable to convert role name", id="0"),),
)
def test_toidentifier_fail(value: Any, expected: str) -> None:
"""Tests for toidentifier()."""
with pytest.raises(RuntimeError) as err:
toidentifier(value)
assert str(err.value).find(expected) > -1
@pytest.mark.parametrize(
("value", "expected"),
(
pytest.param("", False, id="0"),
pytest.param("{{ }}", True, id="1"),
pytest.param("foo {# #} bar", True, id="2"),
pytest.param("foo \n{% %} bar", True, id="3"),
pytest.param(None, False, id="4"),
pytest.param(42, False, id="5"),
pytest.param(True, False, id="6"),
),
)
def test_has_jinja(value: Any, expected: bool) -> None:
"""Tests for has_jinja()."""
assert has_jinja(value) == expected
@pytest.mark.parametrize(
("value", "expected"),
(
pytest.param("", False, id="0"),
pytest.param("*", True, id="1"),
pytest.param("foo.*", True, id="2"),
pytest.param(None, False, id="4"),
pytest.param(42, False, id="5"),
pytest.param(True, False, id="6"),
),
)
def test_has_glob(value: Any, expected: bool) -> None:
"""Tests for has_jinja()."""
assert has_glob(value) == expected
| 2,215 | Python | .py | 63 | 29.952381 | 82 | 0.602618 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,169 | test_internal_rules.py | ansible_ansible-lint/test/test_internal_rules.py | """Tests for internal rules."""
import pytest
from ansiblelint._internal.rules import BaseRule
from ansiblelint.rules import RulesCollection
from ansiblelint.runner import Runner
def test_base_rule_url() -> None:
"""Test that rule URL is set to expected value."""
rule = BaseRule()
assert rule.url == "https://ansible.readthedocs.io/projects/lint/rules/"
@pytest.mark.parametrize(
("path"),
(
pytest.param(
"examples/playbooks/incorrect_module_args.yml",
id="playbook",
),
),
)
def test_incorrect_module_args(
path: str,
default_rules_collection: RulesCollection,
) -> None:
"""Check that we fail when file encoding is wrong."""
runner = Runner(path, rules=default_rules_collection)
matches = runner.run()
assert len(matches) == 1, matches
assert matches[0].rule.id == "load-failure"
assert "Failed to find required 'name' key in include_role" in matches[0].message
assert matches[0].tag == "internal-error"
| 1,014 | Python | .py | 29 | 30.310345 | 85 | 0.691522 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,170 | test_transform_mixin.py | ansible_ansible-lint/test/test_transform_mixin.py | """Tests for TransformMixin."""
from __future__ import annotations
from typing import TYPE_CHECKING
import pytest
from ansiblelint.rules import TransformMixin
if TYPE_CHECKING:
from collections.abc import MutableMapping, MutableSequence
from typing import Any
DUMMY_MAP: dict[str, Any] = {
"foo": "text",
"bar": {"some": "text2"},
"fruits": ["apple", "orange"],
"answer": [{"forty-two": ["life", "universe", "everything"]}],
}
DUMMY_LIST: list[dict[str, Any]] = [
{"foo": "text"},
{"bar": {"some": "text2"}, "fruits": ["apple", "orange"]},
{"answer": [{"forty-two": ["life", "universe", "everything"]}]},
]
@pytest.mark.parametrize(
("yaml_path", "data", "expected_error"),
(
([0], DUMMY_MAP, KeyError),
(["bar", 0], DUMMY_MAP, KeyError),
(["fruits", 100], DUMMY_MAP, IndexError),
(["answer", 1], DUMMY_MAP, IndexError),
(["answer", 0, 42], DUMMY_MAP, KeyError),
(["answer", 0, "42"], DUMMY_MAP, KeyError),
([100], DUMMY_LIST, IndexError),
([0, 0], DUMMY_LIST, KeyError),
([0, "wrong key"], DUMMY_LIST, KeyError),
([1, "bar", "wrong key"], DUMMY_LIST, KeyError),
([1, "fruits", "index should be int"], DUMMY_LIST, TypeError),
([1, "fruits", 100], DUMMY_LIST, IndexError),
),
)
def test_seek_with_bad_path(
yaml_path: list[int | str],
data: MutableMapping[str, Any] | MutableSequence[Any] | str,
expected_error: type[Exception],
) -> None:
"""Verify that TransformMixin.seek() propagates errors."""
with pytest.raises(expected_error):
TransformMixin.seek(yaml_path, data)
@pytest.mark.parametrize(
("yaml_path", "data", "expected"),
(
pytest.param([], DUMMY_MAP, DUMMY_MAP, id="0"),
pytest.param(["foo"], DUMMY_MAP, DUMMY_MAP["foo"], id="1"),
pytest.param(["bar"], DUMMY_MAP, DUMMY_MAP["bar"], id="2"),
pytest.param(["bar", "some"], DUMMY_MAP, DUMMY_MAP["bar"]["some"], id="3"),
pytest.param(["fruits"], DUMMY_MAP, DUMMY_MAP["fruits"], id="4"),
pytest.param(["fruits", 0], DUMMY_MAP, DUMMY_MAP["fruits"][0], id="5"),
pytest.param(["fruits", 1], DUMMY_MAP, DUMMY_MAP["fruits"][1], id="6"),
pytest.param(["answer"], DUMMY_MAP, DUMMY_MAP["answer"], id="7"),
pytest.param(["answer", 0], DUMMY_MAP, DUMMY_MAP["answer"][0], id="8"),
pytest.param(
["answer", 0, "forty-two"],
DUMMY_MAP,
DUMMY_MAP["answer"][0]["forty-two"],
id="9",
),
pytest.param(
["answer", 0, "forty-two", 0],
DUMMY_MAP,
DUMMY_MAP["answer"][0]["forty-two"][0],
id="10",
),
pytest.param(
["answer", 0, "forty-two", 1],
DUMMY_MAP,
DUMMY_MAP["answer"][0]["forty-two"][1],
id="11",
),
pytest.param(
["answer", 0, "forty-two", 2],
DUMMY_MAP,
DUMMY_MAP["answer"][0]["forty-two"][2],
id="12",
),
pytest.param([], DUMMY_LIST, DUMMY_LIST, id="13"),
pytest.param([0], DUMMY_LIST, DUMMY_LIST[0], id="14"),
pytest.param([0, "foo"], DUMMY_LIST, DUMMY_LIST[0]["foo"], id="15"),
pytest.param([1], DUMMY_LIST, DUMMY_LIST[1], id="16"),
pytest.param([1, "bar"], DUMMY_LIST, DUMMY_LIST[1]["bar"], id="17"),
pytest.param(
[1, "bar", "some"],
DUMMY_LIST,
DUMMY_LIST[1]["bar"]["some"],
id="18",
),
pytest.param([1, "fruits"], DUMMY_LIST, DUMMY_LIST[1]["fruits"], id="19"),
pytest.param([1, "fruits", 0], DUMMY_LIST, DUMMY_LIST[1]["fruits"][0], id="20"),
pytest.param([1, "fruits", 1], DUMMY_LIST, DUMMY_LIST[1]["fruits"][1], id="21"),
pytest.param([2], DUMMY_LIST, DUMMY_LIST[2], id="22"),
pytest.param([2, "answer"], DUMMY_LIST, DUMMY_LIST[2]["answer"], id="23"),
pytest.param([2, "answer", 0], DUMMY_LIST, DUMMY_LIST[2]["answer"][0], id="24"),
pytest.param(
[2, "answer", 0, "forty-two"],
DUMMY_LIST,
DUMMY_LIST[2]["answer"][0]["forty-two"],
id="25",
),
pytest.param(
[2, "answer", 0, "forty-two", 0],
DUMMY_LIST,
DUMMY_LIST[2]["answer"][0]["forty-two"][0],
id="26",
),
pytest.param(
[2, "answer", 0, "forty-two", 1],
DUMMY_LIST,
DUMMY_LIST[2]["answer"][0]["forty-two"][1],
id="27",
),
pytest.param(
[2, "answer", 0, "forty-two", 2],
DUMMY_LIST,
DUMMY_LIST[2]["answer"][0]["forty-two"][2],
id="28",
),
pytest.param(
[],
"this is a string that should be returned as is, ignoring path.",
"this is a string that should be returned as is, ignoring path.",
id="29",
),
pytest.param(
[2, "answer", 0, "forty-two", 2],
"this is a string that should be returned as is, ignoring path.",
"this is a string that should be returned as is, ignoring path.",
id="30",
),
),
)
def test_seek(
yaml_path: list[int | str],
data: MutableMapping[str, Any] | MutableSequence[Any] | str,
expected: Any,
) -> None:
"""Ensure TransformMixin.seek() retrieves the correct data."""
actual = TransformMixin.seek(yaml_path, data)
assert actual == expected
| 5,551 | Python | .py | 143 | 30.244755 | 88 | 0.524736 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,171 | test_task_includes.py | ansible_ansible-lint/test/test_task_includes.py | """Tests related to task inclusions."""
import pytest
from ansiblelint.file_utils import Lintable
from ansiblelint.rules import RulesCollection
from ansiblelint.runner import Runner
@pytest.mark.parametrize(
("filename", "file_count", "match_count"),
(
pytest.param(
"examples/playbooks/blockincludes.yml",
4,
3,
id="blockincludes",
),
pytest.param(
"examples/playbooks/blockincludes2.yml",
4,
3,
id="blockincludes2",
),
pytest.param("examples/playbooks/taskincludes.yml", 3, 6, id="taskincludes"),
pytest.param("examples/playbooks/taskimports.yml", 5, 3, id="taskimports"),
pytest.param(
"examples/playbooks/include-in-block.yml",
3,
1,
id="include-in-block",
),
pytest.param(
"examples/playbooks/include-import-tasks-in-role.yml",
4,
2,
id="role_with_task_inclusions",
),
),
)
def test_included_tasks(
default_rules_collection: RulesCollection,
filename: str,
file_count: int,
match_count: int,
) -> None:
"""Check if number of loaded files is correct."""
lintable = Lintable(filename)
default_rules_collection.options.enable_list = ["name[prefix]"]
runner = Runner(lintable, rules=default_rules_collection)
result = runner.run()
assert len(runner.lintables) == file_count
assert len(result) == match_count
| 1,541 | Python | .py | 49 | 23.673469 | 85 | 0.610887 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,172 | test_requirements.py | ansible_ansible-lint/test/test_requirements.py | """Tests requirements module."""
from ansible_compat.runtime import Runtime
from ansiblelint.requirements import Reqs
def test_reqs() -> None:
"""Performs basic testing of Reqs class."""
reqs = Reqs()
runtime = Runtime()
assert "ansible-core" in reqs
# checks that this ansible core version is not supported:
assert reqs.matches("ansible-core", "0.0") is False
# assert that invalid package name
assert reqs.matches("this-package-does-not-exist", "0.0") is False
# check the current ansible core version is supported:
assert reqs.matches("ansible-core", runtime.version)
| 615 | Python | .py | 14 | 39.785714 | 70 | 0.726968 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,173 | test_profiles.py | ansible_ansible-lint/test/test_profiles.py | """Tests for the --profile feature."""
import platform
import subprocess
import sys
from _pytest.capture import CaptureFixture
from ansiblelint.rules import RulesCollection, filter_rules_with_profile
from ansiblelint.rules.risky_shell_pipe import ShellWithoutPipefail
from ansiblelint.text import strip_ansi_escape
def test_profile_min() -> None:
"""Asserts our ability to unload rules based on profile."""
collection = RulesCollection()
assert len(collection.rules) == 4, "Unexpected number of implicit rules."
# register one extra rule that we know not to be part of "min" profile
collection.register(ShellWithoutPipefail())
assert len(collection.rules) == 5, "Failed to register new rule."
filter_rules_with_profile(collection.rules, "min")
assert (
len(collection.rules) == 4
), "Failed to unload rule that is not part of 'min' profile."
def test_profile_listing(capfd: CaptureFixture[str]) -> None:
"""Test that run without arguments it will detect and lint the entire repository."""
cmd = [
sys.executable,
"-m",
"ansiblelint",
"-P",
]
result = subprocess.run(cmd, check=False).returncode
assert result == 0
out, err = capfd.readouterr()
# Confirmation that it runs in auto-detect mode
assert "command-instead-of-module" in out
# On WSL we might see this warning on stderr:
# [WARNING]: Ansible is being run in a world writable directory
# WSL2 has "WSL2" in platform name but WSL1 has "microsoft":
platform_name = platform.platform().lower()
err_lines = []
for line in strip_ansi_escape(err).splitlines():
if "SyntaxWarning:" in line:
continue
if (
"Skipped installing collection dependencies due to running in offline mode."
in line
):
continue
err_lines.append(line)
if all(word not in platform_name for word in ["wsl", "microsoft"]) and err_lines:
assert (
not err_lines
), f"Unexpected stderr output found while running on {platform_name} platform:\n{err_lines}"
| 2,129 | Python | .py | 50 | 36.48 | 100 | 0.688104 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,174 | test_skip_import_playbook.py | ansible_ansible-lint/test/test_skip_import_playbook.py | """Test related to skipping import_playbook."""
from pathlib import Path
import pytest
from ansiblelint.rules import RulesCollection
from ansiblelint.runner import Runner
IMPORTED_PLAYBOOK = """\
---
- name: Fixture
hosts: all
tasks:
- name: Success # noqa: no-free-form
ansible.builtin.fail: msg="fail"
when: false
"""
MAIN_PLAYBOOK = """\
---
- name: Fixture
hosts: all
tasks:
- name: Should be shell # noqa: command-instead-of-shell no-changed-when no-free-form
ansible.builtin.shell: echo lol
- name: Should not be imported
import_playbook: imported_playbook.yml
"""
@pytest.fixture(name="playbook")
def fixture_playbook(tmp_path: Path) -> str:
"""Create a reusable per-test playbook."""
playbook_path = tmp_path / "playbook.yml"
playbook_path.write_text(MAIN_PLAYBOOK)
(tmp_path / "imported_playbook.yml").write_text(IMPORTED_PLAYBOOK)
return str(playbook_path)
def test_skip_import_playbook(
default_rules_collection: RulesCollection,
playbook: str,
) -> None:
"""Verify that a playbook import is skipped after a failure."""
runner = Runner(playbook, rules=default_rules_collection)
results = runner.run()
assert len(results) == 0
| 1,229 | Python | .py | 39 | 28.179487 | 90 | 0.72095 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,175 | test_file_path_evaluation.py | ansible_ansible-lint/test/test_file_path_evaluation.py | """Testing file path evaluation when using import_tasks / include_tasks."""
from __future__ import annotations
import textwrap
from typing import TYPE_CHECKING
import pytest
from ansiblelint.runner import Runner
if TYPE_CHECKING:
from pathlib import Path
from ansiblelint.rules import RulesCollection
LAYOUT_IMPORTS: dict[str, str] = {
"main.yml": textwrap.dedent(
"""\
---
- name: Fixture
hosts: target
gather_facts: false
tasks:
- name: From main import task 1
ansible.builtin.import_tasks: tasks/task_1.yml
""",
),
"tasks/task_1.yml": textwrap.dedent(
"""\
---
- name: task_1 | From task 1 import task 2
ansible.builtin.import_tasks: tasks/task_2.yml
""",
),
"tasks/task_2.yml": textwrap.dedent(
"""\
---
- name: task_2 | From task 2 import subtask 1
ansible.builtin.import_tasks: tasks/subtasks/subtask_1.yml
""",
),
"tasks/subtasks/subtask_1.yml": textwrap.dedent(
"""\
---
- name: subtasks | subtask_1 | From subtask 1 import subtask 2
ansible.builtin.import_tasks: tasks/subtasks/subtask_2.yml
""",
),
"tasks/subtasks/subtask_2.yml": textwrap.dedent(
"""\
---
- name: subtasks | subtask_2 | From subtask 2 do something
debug: # <-- expected to raise fqcn[action-core]
msg: |
Something...
""",
),
}
LAYOUT_INCLUDES: dict[str, str] = {
"main.yml": textwrap.dedent(
"""\
---
- name: Fixture
hosts: target
gather_facts: false
tasks:
- name: From main import task 1
ansible.builtin.include_tasks: tasks/task_1.yml
""",
),
"tasks/task_1.yml": textwrap.dedent(
"""\
---
- name: task_1 | From task 1 import task 2
ansible.builtin.include_tasks: tasks/task_2.yml
""",
),
"tasks/task_2.yml": textwrap.dedent(
"""\
---
- name: task_2 | From task 2 import subtask 1
ansible.builtin.include_tasks: tasks/subtasks/subtask_1.yml
""",
),
"tasks/subtasks/subtask_1.yml": textwrap.dedent(
"""\
---
- name: subtasks | subtask_1 | From subtask 1 import subtask 2
ansible.builtin.include_tasks: tasks/subtasks/subtask_2.yml
""",
),
"tasks/subtasks/subtask_2.yml": textwrap.dedent(
"""\
---
- name: subtasks | subtask_2 | From subtask 2 do something
debug: # <-- expected to raise fqcn[action-core]
msg: |
Something...
""",
),
}
@pytest.mark.parametrize(
"ansible_project_layout",
(
pytest.param(LAYOUT_IMPORTS, id="using-only-import_tasks"),
pytest.param(LAYOUT_INCLUDES, id="using-only-include_tasks"),
),
)
def test_file_path_evaluation(
tmp_path: Path,
default_rules_collection: RulesCollection,
ansible_project_layout: dict[str, str],
) -> None:
"""Test file path evaluation when using import_tasks / include_tasks in the project.
The goal of this test is to verify our ability to find errors from within
nested includes.
"""
for file_path, file_content in ansible_project_layout.items():
full_path = tmp_path / file_path
full_path.parent.mkdir(parents=True, exist_ok=True)
full_path.write_text(file_content)
result = Runner(str(tmp_path), rules=default_rules_collection).run()
assert len(result) == 1
assert result[0].rule.id == "fqcn"
| 3,690 | Python | .py | 118 | 23.822034 | 88 | 0.585839 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,176 | test_errors.py | ansible_ansible-lint/test/test_errors.py | """Test ansiblelint.errors."""
import pytest
from ansiblelint.errors import MatchError
def test_matcherror() -> None:
"""."""
match = MatchError("foo", lineno=1, column=2)
with pytest.raises(TypeError):
assert match <= 0
assert match != 0
assert match.position == "1:2"
match2 = MatchError("foo", lineno=1)
assert match2.position == "1"
# str and repr are for the moment the same
assert str(match) == repr(match)
# tests implicit level
assert match.level == "warning"
| 529 | Python | .py | 16 | 28.25 | 49 | 0.666667 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,177 | test_runner.py | ansible_ansible-lint/test/test_runner.py | """Tests for runner submodule."""
# Copyright (c) 2013-2014 Will Thames <will@thames.id.au>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import annotations
from pathlib import Path
from typing import TYPE_CHECKING, Any
import pytest
from ansiblelint import formatters
from ansiblelint.file_utils import Lintable
from ansiblelint.runner import Runner
if TYPE_CHECKING:
from ansiblelint.rules import RulesCollection
LOTS_OF_WARNINGS_PLAYBOOK = Path("examples/playbooks/lots_of_warnings.yml").resolve()
@pytest.mark.parametrize(
("playbook", "exclude", "length"),
(
pytest.param(
Path("examples/playbooks/nomatchestest.yml"),
[],
0,
id="nomatchestest",
),
pytest.param(Path("examples/playbooks/unicode.yml"), [], 1, id="unicode"),
pytest.param(
LOTS_OF_WARNINGS_PLAYBOOK,
[LOTS_OF_WARNINGS_PLAYBOOK],
993,
id="lots_of_warnings",
),
pytest.param(Path("examples/playbooks/become.yml"), [], 0, id="become"),
pytest.param(
Path("examples/playbooks/contains_secrets.yml"),
[],
0,
id="contains_secrets",
),
),
)
def test_runner(
default_rules_collection: RulesCollection,
playbook: Path,
exclude: list[str],
length: int,
) -> None:
"""Test that runner can go through any corner cases."""
runner = Runner(playbook, rules=default_rules_collection, exclude_paths=exclude)
matches = runner.run()
assert len(matches) == length
def test_runner_exclude_paths(default_rules_collection: RulesCollection) -> None:
"""Test that exclude paths do work."""
runner = Runner(
"examples/playbooks/deep/",
rules=default_rules_collection,
exclude_paths=["examples/playbooks/deep/empty.yml"],
)
matches = runner.run()
assert len(matches) == 0
@pytest.mark.parametrize(
("exclude_path"),
(pytest.param("**/playbooks_globs/*b.yml", id="1"),),
)
def test_runner_exclude_globs(
default_rules_collection: RulesCollection,
exclude_path: str,
) -> None:
"""Test that globs work."""
runner = Runner(
"examples/playbooks_globs",
rules=default_rules_collection,
exclude_paths=[exclude_path],
)
matches = runner.run()
assert len(matches) == 0
@pytest.mark.parametrize(
("formatter_cls"),
(
pytest.param(formatters.Formatter, id="Formatter-plain"),
pytest.param(formatters.ParseableFormatter, id="ParseableFormatter-colored"),
pytest.param(formatters.QuietFormatter, id="QuietFormatter-colored"),
pytest.param(formatters.Formatter, id="Formatter-colored"),
),
)
def test_runner_unicode_format(
default_rules_collection: RulesCollection,
formatter_cls: type[formatters.BaseFormatter[Any]],
) -> None:
"""Check that all formatters are unicode-friendly."""
formatter = formatter_cls(Path.cwd(), display_relative_path=True)
runner = Runner(
Lintable("examples/playbooks/unicode.yml", kind="playbook"),
rules=default_rules_collection,
)
matches = runner.run()
formatter.apply(matches[0])
@pytest.mark.parametrize(
"directory_name",
(
pytest.param(Path("test/fixtures/verbosity-tests"), id="rel"),
pytest.param(Path("test/fixtures/verbosity-tests").resolve(), id="abs"),
),
)
def test_runner_with_directory(
default_rules_collection: RulesCollection,
directory_name: Path,
) -> None:
"""Check that runner detects a directory as role."""
runner = Runner(directory_name, rules=default_rules_collection)
expected = Lintable(name=directory_name, kind="role")
assert expected in runner.lintables
def test_files_not_scanned_twice(default_rules_collection: RulesCollection) -> None:
"""Ensure that lintables aren't double-checked."""
checked_files: set[Lintable] = set()
filename = Path("examples/playbooks/common-include-1.yml").resolve()
runner = Runner(
filename,
rules=default_rules_collection,
verbosity=0,
checked_files=checked_files,
)
run1 = runner.run()
assert len(runner.checked_files) == 2
assert len(run1) == 1
filename = Path("examples/playbooks/common-include-2.yml").resolve()
runner = Runner(
str(filename),
rules=default_rules_collection,
verbosity=0,
checked_files=checked_files,
)
run2 = runner.run()
assert len(runner.checked_files) == 3
# this second run should return 0 because the included filed was already
# processed and added to checked_files, which acts like a bypass list.
assert len(run2) == 0
@pytest.mark.parametrize(
("filename", "failures", "checked_files_no"),
(
pytest.param(
"examples/playbooks/common-include-wrong-syntax.yml",
1,
1,
id="1",
),
pytest.param(
"examples/playbooks/common-include-wrong-syntax2.yml",
1,
1,
id="2",
),
pytest.param(
"examples/playbooks/common-include-wrong-syntax3.yml",
0,
2,
id="3",
),
),
)
def test_include_wrong_syntax(
filename: str,
failures: int,
checked_files_no: int,
default_rules_collection: RulesCollection,
) -> None:
"""Ensure that lintables aren't double-checked."""
checked_files: set[Lintable] = set()
path = Path(filename).resolve()
runner = Runner(
path,
rules=default_rules_collection,
verbosity=0,
checked_files=checked_files,
)
result = runner.run()
assert len(runner.checked_files) == checked_files_no
assert len(result) == failures, result
for item in result:
assert item.tag == "syntax-check[no-file]"
def test_runner_not_found(default_rules_collection: RulesCollection) -> None:
"""Ensure that lintables aren't double-checked."""
checked_files: set[Lintable] = set()
filename = Path("this/folder/does/not/exist").resolve()
runner = Runner(
filename,
rules=default_rules_collection,
verbosity=0,
checked_files=checked_files,
)
result = runner.run()
assert len(runner.checked_files) == 1
assert len(result) == 1
assert result[0].tag == "load-failure[not-found]"
def test_runner_tmp_file(
tmp_path: Path,
default_rules_collection: RulesCollection,
) -> None:
"""Ensure we do not ignore an explicit temporary file from linting."""
# https://github.com/ansible/ansible-lint/issues/2628
filename = tmp_path / "playbook.yml"
filename.write_text("---\n")
runner = Runner(
filename,
rules=default_rules_collection,
verbosity=0,
)
result = runner.run()
assert len(result) == 1
assert result[0].tag == "syntax-check[empty-playbook]"
def test_with_full_path(default_rules_collection: RulesCollection) -> None:
"""Ensure that lintables include file path starting from home directory."""
filename = Path("examples/playbooks/deep").absolute()
runner = Runner(
filename,
rules=default_rules_collection,
verbosity=0,
)
result = runner.run()
assert len(result) == 1
assert result[0].tag == "name[casing]"
| 8,407 | Python | .py | 235 | 29.868085 | 85 | 0.670928 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,178 | test_config.py | ansible_ansible-lint/test/test_config.py | """Tests for config module."""
from ansiblelint.config import PROFILES
from ansiblelint.rules import RulesCollection
def test_profiles(default_rules_collection: RulesCollection) -> None:
"""Test the rules included in profiles are valid."""
profile_banned_tags = {"opt-in", "experimental"}
for name, data in PROFILES.items():
for profile_rule_id in data["rules"]:
for rule in default_rules_collection.rules:
if profile_rule_id == rule.id:
forbidden_tags = profile_banned_tags & set(rule.tags)
assert (
not forbidden_tags
), f"Rule {profile_rule_id} from {name} profile cannot use {profile_banned_tags & set(rule.tags)} tag."
| 759 | Python | .py | 14 | 43.571429 | 123 | 0.633423 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,179 | test_include_miss_file_with_role.py | ansible_ansible-lint/test/test_include_miss_file_with_role.py | """Tests related to inclusions."""
import pytest
from _pytest.logging import LogCaptureFixture
from ansiblelint.rules import RulesCollection
from ansiblelint.runner import Runner
def test_cases_warning_message(default_rules_collection: RulesCollection) -> None:
"""Test that including a non-existing file produces an error."""
playbook_path = "examples/playbooks/play_miss_include.yml"
runner = Runner(playbook_path, rules=default_rules_collection)
results = runner.run()
assert len(runner.lintables) == 3
assert len(results) == 1
assert "No such file or directory" in results[0].message
@pytest.mark.parametrize(
"playbook_path",
(
pytest.param("examples/playbooks/test_include_inplace.yml", id="inplace"),
pytest.param("examples/playbooks/test_include_relative.yml", id="relative"),
),
)
def test_cases_that_do_not_report(
playbook_path: str,
default_rules_collection: RulesCollection,
caplog: LogCaptureFixture,
) -> None:
"""Test that relative inclusions are properly followed."""
runner = Runner(playbook_path, rules=default_rules_collection)
result = runner.run()
noexist_message_count = 0
for record in caplog.records:
for msg in ("No such file or directory", "Couldn't open"):
if msg in str(record):
noexist_message_count += 1
assert noexist_message_count == 0
assert len(result) == 0
| 1,434 | Python | .py | 35 | 35.942857 | 84 | 0.715827 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,180 | test_ansiblelintrule.py | ansible_ansible-lint/test/test_ansiblelintrule.py | """Generic tests for AnsibleLintRule class."""
from __future__ import annotations
from typing import TYPE_CHECKING, Any
import pytest
from ansiblelint.rules import AnsibleLintRule, RulesCollection
if TYPE_CHECKING:
from ansiblelint.config import Options
def test_unjinja() -> None:
"""Verify that unjinja understands nested mustache."""
text = "{{ a }} {% b %} {# try to confuse parsing inside a comment { {{}} } #}"
output = "JINJA_EXPRESSION JINJA_STATEMENT JINJA_COMMENT"
assert AnsibleLintRule.unjinja(text) == output
@pytest.mark.parametrize("rule_config", ({}, {"foo": True, "bar": 1}))
def test_rule_config(
rule_config: dict[str, Any],
config_options: Options,
) -> None:
"""Check that a rule config can be accessed."""
config_options.rules["load-failure"] = rule_config
rules = RulesCollection(options=config_options)
for rule in rules:
if rule.id == "load-failure":
assert rule._collection # noqa: SLF001
assert rule.rule_config == rule_config
| 1,040 | Python | .py | 24 | 38.791667 | 83 | 0.696127 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,181 | test_loaders.py | ansible_ansible-lint/test/test_loaders.py | """Tests for loaders submodule."""
import os
import tempfile
import uuid
from pathlib import Path
from textwrap import dedent
from ansiblelint.loaders import IGNORE_FILE, load_ignore_txt
def test_load_ignore_txt_default_empty() -> None:
"""Test load_ignore_txt when no ignore-file is present."""
with tempfile.TemporaryDirectory() as temporary_directory:
cwd = Path.cwd()
try:
os.chdir(temporary_directory)
result = load_ignore_txt()
finally:
os.chdir(cwd)
assert not result
def test_load_ignore_txt_default_success() -> None:
"""Test load_ignore_txt with an existing ignore-file in the default location."""
with tempfile.TemporaryDirectory() as temporary_directory:
ignore_file = Path(temporary_directory) / IGNORE_FILE.default
with ignore_file.open("w", encoding="utf-8") as _ignore_file:
_ignore_file.write(
dedent(
"""
# See https://ansible.readthedocs.io/projects/lint/configuring/#ignoring-rules-for-entire-files
playbook2.yml package-latest # comment
playbook2.yml foo-bar
""",
),
)
cwd = Path.cwd()
try:
os.chdir(temporary_directory)
result = load_ignore_txt()
finally:
os.chdir(cwd)
assert result == {"playbook2.yml": {"package-latest", "foo-bar"}}
def test_load_ignore_txt_default_success_alternative() -> None:
"""Test load_ignore_txt with an ignore-file in the alternative location ('.config' subdirectory)."""
with tempfile.TemporaryDirectory() as temporary_directory:
ignore_file = Path(temporary_directory) / IGNORE_FILE.alternative
ignore_file.parent.mkdir(parents=True)
with ignore_file.open("w", encoding="utf-8") as _ignore_file:
_ignore_file.write(
dedent(
"""
playbook.yml foo-bar
playbook.yml more-foo # what-the-foo?
tasks/main.yml more-bar
""",
),
)
cwd = Path.cwd()
try:
os.chdir(temporary_directory)
result = load_ignore_txt()
finally:
os.chdir(cwd)
assert result == {
"playbook.yml": {"more-foo", "foo-bar"},
"tasks/main.yml": {"more-bar"},
}
def test_load_ignore_txt_custom_success() -> None:
"""Test load_ignore_txt with an ignore-file in a user defined location."""
with tempfile.TemporaryDirectory() as temporary_directory:
ignore_file = Path(temporary_directory) / "subdir" / "my_ignores.txt"
ignore_file.parent.mkdir(parents=True, exist_ok=True)
with ignore_file.open("w", encoding="utf-8") as _ignore_file:
_ignore_file.write(
dedent(
"""
playbook.yml hector
vars/main.yml tuco
roles/guzman/tasks/main.yml lalo
roles/eduardo/tasks/main.yml lalo
""",
),
)
cwd = Path.cwd()
try:
os.chdir(temporary_directory)
result = load_ignore_txt(Path(ignore_file))
finally:
os.chdir(cwd)
assert result == {
"playbook.yml": {"hector"},
"roles/eduardo/tasks/main.yml": {"lalo"},
"roles/guzman/tasks/main.yml": {"lalo"},
"vars/main.yml": {"tuco"},
}
def test_load_ignore_txt_custom_fail() -> None:
"""Test load_ignore_txt with a user defined but invalid ignore-file location."""
result = load_ignore_txt(Path(str(uuid.uuid4())))
assert not result
| 3,787 | Python | .py | 95 | 29.147368 | 115 | 0.575716 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,182 | test_transformer.py | ansible_ansible-lint/test/test_transformer.py | # cspell:ignore classinfo
"""Tests for Transformer."""
from __future__ import annotations
import builtins
import os
import shutil
from pathlib import Path
from typing import TYPE_CHECKING, Any
from unittest import mock
import pytest
import ansiblelint.__main__ as main
from ansiblelint.app import App
from ansiblelint.file_utils import Lintable
from ansiblelint.rules import TransformMixin
# noinspection PyProtectedMember
from ansiblelint.runner import LintResult, get_matches
from ansiblelint.transformer import Transformer
if TYPE_CHECKING:
from ansiblelint.config import Options
from ansiblelint.errors import MatchError
from ansiblelint.rules import RulesCollection
@pytest.fixture(name="runner_result")
def fixture_runner_result(
config_options: Options,
default_rules_collection: RulesCollection,
playbook_str: str,
monkeypatch: pytest.MonkeyPatch,
) -> LintResult:
"""Fixture that runs the Runner to populate a LintResult for a given file."""
# needed for testing transformer when roles/modules are missing:
monkeypatch.setenv("ANSIBLE_LINT_NODEPS", "1")
config_options.lintables = [playbook_str]
result = get_matches(rules=default_rules_collection, options=config_options)
return result
@pytest.mark.parametrize(
("playbook_str", "matches_count", "transformed", "is_owned_by_ansible"),
(
# reuse TestRunner::test_runner test cases to ensure transformer does not mangle matches
pytest.param(
"examples/playbooks/nomatchestest.yml",
0,
False,
True,
id="nomatchestest",
),
pytest.param("examples/playbooks/unicode.yml", 1, False, True, id="unicode"),
pytest.param(
"examples/playbooks/lots_of_warnings.yml",
993,
False,
True,
id="lots_of_warnings",
),
pytest.param("examples/playbooks/become.yml", 0, False, True, id="become"),
pytest.param(
"examples/playbooks/contains_secrets.yml",
0,
False,
True,
id="contains_secrets",
),
pytest.param(
"examples/playbooks/vars/empty_vars.yml",
0,
False,
True,
id="empty_vars",
),
pytest.param(
"examples/playbooks/vars/strings.yml",
0,
True,
True,
id="strings",
),
pytest.param("examples/playbooks/vars/empty.yml", 1, False, True, id="empty"),
pytest.param("examples/playbooks/fqcn.yml", 3, True, True, id="fqcn"),
pytest.param(
"examples/playbooks/multi_yaml_doc.yml",
1,
False,
True,
id="multi_yaml_doc",
),
pytest.param(
"examples/playbooks/transform_command_instead_of_shell.yml",
3,
True,
True,
id="cmd_instead_of_shell",
),
pytest.param(
"examples/playbooks/transform-deprecated-local-action.yml",
1,
True,
True,
id="dep_local_action",
),
pytest.param(
"examples/playbooks/transform-block-indentation-indicator.yml",
0,
True,
True,
id="multiline_msg_with_indent_indicator",
),
pytest.param(
"examples/playbooks/transform-jinja.yml",
7,
True,
True,
id="jinja_spacing",
),
pytest.param(
"examples/playbooks/transform-no-jinja-when.yml",
3,
True,
True,
id="no_jinja_when",
),
pytest.param(
"examples/playbooks/vars/transform_nested_data.yml",
3,
True,
True,
id="nested",
),
pytest.param(
"examples/playbooks/transform-key-order.yml",
6,
True,
True,
id="key_order_transform",
),
pytest.param(
"examples/playbooks/transform-no-free-form.yml",
5,
True,
True,
id="no_free_form_transform",
),
pytest.param(
"examples/playbooks/transform-partial-become.yml",
4,
True,
True,
id="partial_become",
),
pytest.param(
"examples/playbooks/transform-key-order-play.yml",
1,
True,
True,
id="key_order_play_transform",
),
pytest.param(
"examples/playbooks/transform-key-order-block.yml",
1,
True,
True,
id="key_order_block_transform",
),
pytest.param(
"examples/.github/workflows/sample.yml",
0,
False,
False,
id="github-workflow",
),
pytest.param(
"examples/playbooks/invalid-transform.yml",
1,
False,
True,
id="invalid_transform",
),
pytest.param(
"examples/roles/name_prefix/tasks/test.yml",
1,
True,
True,
id="name_casing_prefix",
),
pytest.param(
"examples/roles/name_casing/tasks/main.yml",
2,
True,
True,
id="name_case_roles",
),
pytest.param(
"examples/playbooks/4114/transform-with-missing-role-and-modules.yml",
1,
True,
True,
id="4114",
),
),
)
@mock.patch.dict(os.environ, {"ANSIBLE_LINT_WRITE_TMP": "1"}, clear=True)
def test_transformer( # pylint: disable=too-many-arguments,too-many-positional-arguments
config_options: Options,
playbook_str: str,
runner_result: LintResult,
transformed: bool,
is_owned_by_ansible: bool,
matches_count: int,
) -> None:
"""Test that transformer can go through any corner cases.
Based on TestRunner::test_runner
"""
# test ability to detect is_owned_by_ansible
assert Lintable(playbook_str).is_owned_by_ansible() == is_owned_by_ansible
playbook = Path(playbook_str)
config_options.write_list = ["all"]
matches = runner_result.matches
assert len(matches) == matches_count
transformer = Transformer(result=runner_result, options=config_options)
transformer.run()
orig_content = playbook.read_text(encoding="utf-8")
if transformed:
expected_content = playbook.with_suffix(
f".transformed{playbook.suffix}",
).read_text(encoding="utf-8")
transformed_content = playbook.with_suffix(f".tmp{playbook.suffix}").read_text(
encoding="utf-8",
)
assert orig_content != transformed_content
assert expected_content == transformed_content
playbook.with_suffix(f".tmp{playbook.suffix}").unlink()
@pytest.mark.parametrize(
("write_list", "expected"),
(
# 1 item
(["all"], {"all"}),
(["none"], {"none"}),
(["rule-id"], {"rule-id"}),
# 2 items
(["all", "all"], {"all"}),
(["all", "none"], {"none"}),
(["all", "rule-id"], {"all"}),
(["none", "all"], {"all"}),
(["none", "none"], {"none"}),
(["none", "rule-id"], {"rule-id"}),
(["rule-id", "all"], {"all"}),
(["rule-id", "none"], {"none"}),
(["rule-id", "rule-id"], {"rule-id"}),
# 3 items
(["all", "all", "all"], {"all"}),
(["all", "all", "none"], {"none"}),
(["all", "all", "rule-id"], {"all"}),
(["all", "none", "all"], {"all"}),
(["all", "none", "none"], {"none"}),
(["all", "none", "rule-id"], {"rule-id"}),
(["all", "rule-id", "all"], {"all"}),
(["all", "rule-id", "none"], {"none"}),
(["all", "rule-id", "rule-id"], {"all"}),
(["none", "all", "all"], {"all"}),
(["none", "all", "none"], {"none"}),
(["none", "all", "rule-id"], {"all"}),
(["none", "none", "all"], {"all"}),
(["none", "none", "none"], {"none"}),
(["none", "none", "rule-id"], {"rule-id"}),
(["none", "rule-id", "all"], {"all"}),
(["none", "rule-id", "none"], {"none"}),
(["none", "rule-id", "rule-id"], {"rule-id"}),
(["rule-id", "all", "all"], {"all"}),
(["rule-id", "all", "none"], {"none"}),
(["rule-id", "all", "rule-id"], {"all"}),
(["rule-id", "none", "all"], {"all"}),
(["rule-id", "none", "none"], {"none"}),
(["rule-id", "none", "rule-id"], {"rule-id"}),
(["rule-id", "rule-id", "all"], {"all"}),
(["rule-id", "rule-id", "none"], {"none"}),
(["rule-id", "rule-id", "rule-id"], {"rule-id"}),
),
)
def test_effective_write_set(write_list: list[str], expected: set[str]) -> None:
"""Make sure effective_write_set handles all/none keywords correctly."""
actual = Transformer.effective_write_set(write_list)
assert actual == expected
def test_pruned_err_after_fix(monkeypatch: pytest.MonkeyPatch, tmpdir: Path) -> None:
"""Test that pruned errors are not reported after fixing.
:param monkeypatch: Monkeypatch
:param tmpdir: Temporary directory
"""
file = Path("examples/playbooks/transform-jinja.yml")
source = Path.cwd() / file
dest = tmpdir / source.name
shutil.copyfile(source, dest)
monkeypatch.setattr("sys.argv", ["ansible-lint", str(dest), "--fix=all"])
fix_called = False
orig_fix = main.fix
def test_fix(
runtime_options: Options,
result: LintResult,
rules: RulesCollection,
) -> None:
"""Wrap main.fix to check if it was called and match count is correct.
:param runtime_options: Runtime options
:param result: Lint result
:param rules: Rules collection
"""
nonlocal fix_called
fix_called = True
assert len(result.matches) == 7
orig_fix(runtime_options, result, rules)
report_called = False
class TestApp(App):
"""Wrap App to check if it was called and match count is correct."""
def report_outcome(
self: TestApp,
result: LintResult,
*,
mark_as_success: bool = False,
) -> int:
"""Wrap App.report_outcome to check if it was called and match count is correct.
:param result: Lint result
:param mark_as_success: Mark as success
:returns: Exit code
"""
nonlocal report_called
report_called = True
assert len(result.matches) == 1
return super().report_outcome(result, mark_as_success=mark_as_success)
monkeypatch.setattr("ansiblelint.__main__.fix", test_fix)
monkeypatch.setattr("ansiblelint.app.App", TestApp)
# disable the App() caching because we cannot prevent the initial initialization from happening
monkeypatch.setattr("ansiblelint.app._CACHED_APP", None)
main.main()
assert fix_called
assert report_called
class TransformTests:
"""A carrier for some common test constants."""
FILE_NAME = "examples/playbooks/transform-no-free-form.yml"
FILE_TYPE = "playbook"
LINENO = 5
ID = "no-free-form"
MATCH_TYPE = "task"
VERSION_PART = "version=(1, 1)"
@classmethod
def match_id(cls) -> str:
"""Generate a match id.
:returns: Match id string
"""
return f"{cls.ID}/{cls.MATCH_TYPE} {cls.FILE_NAME}:{cls.LINENO}"
@classmethod
def rewrite_part(cls) -> str:
"""Generate a rewrite part.
:returns: Rewrite part string
"""
return f"{cls.FILE_NAME} ({cls.FILE_TYPE}), {cls.VERSION_PART}"
@pytest.fixture(name="test_result")
def fixture_test_result(
config_options: Options,
default_rules_collection: RulesCollection,
) -> tuple[LintResult, Options]:
"""Fixture that runs the Runner to populate a LintResult for a given file.
The results are confirmed and a limited to a single match.
:param config_options: Configuration options
:param default_rules_collection: Default rules collection
:returns: Tuple of LintResult and Options
"""
config_options.write_list = [TransformTests.ID]
config_options.lintables = [TransformTests.FILE_NAME]
result = get_matches(rules=default_rules_collection, options=config_options)
match = result.matches[0]
def write(*_args: Any, **_kwargs: Any) -> None:
"""Don't rewrite the test fixture.
:param _args: Arguments
:param _kwargs: Keyword arguments
"""
setattr(match.lintable, "write", write) # noqa: B010
assert match.rule.id == TransformTests.ID
assert match.filename == TransformTests.FILE_NAME
assert match.lineno == TransformTests.LINENO
assert match.match_type == TransformTests.MATCH_TYPE
result.matches = [match]
return result, config_options
def test_transform_na(
caplog: pytest.LogCaptureFixture,
monkeypatch: pytest.MonkeyPatch,
test_result: tuple[LintResult, Options],
) -> None:
"""Test the transformer is not available.
:param caplog: Log capture fixture
:param monkeypatch: Monkeypatch
:param test_result: Test result fixture
"""
result = test_result[0]
options = test_result[1]
_isinstance = builtins.isinstance
called = False
def mp_isinstance(t_object: Any, classinfo: type) -> bool:
if classinfo is TransformMixin:
nonlocal called
called = True
return False
return _isinstance(t_object, classinfo)
monkeypatch.setattr(builtins, "isinstance", mp_isinstance)
transformer = Transformer(result=result, options=options)
with caplog.at_level(10):
transformer.run()
assert called
logs = [record for record in caplog.records if record.module == "transformer"]
assert len(logs) == 2
log_0 = f"{transformer.FIX_NA_MSG} {TransformTests.match_id()}"
assert logs[0].message == log_0
assert logs[0].levelname == "DEBUG"
log_1 = f"{transformer.DUMP_MSG} {TransformTests.rewrite_part()}"
assert logs[1].message == log_1
assert logs[1].levelname == "DEBUG"
def test_transform_no_tb(
caplog: pytest.LogCaptureFixture,
test_result: tuple[LintResult, Options],
) -> None:
"""Test the transformer does not traceback.
:param caplog: Log capture fixture
:param test_result: Test result fixture
:raises RuntimeError: If the rule is not a TransformMixin
"""
result = test_result[0]
options = test_result[1]
exception_msg = "FixFailure"
def transform(*_args: Any, **_kwargs: Any) -> None:
"""Raise an exception for the transform call.
:raises RuntimeError: Always
"""
raise RuntimeError(exception_msg)
if isinstance(result.matches[0].rule, TransformMixin):
setattr(result.matches[0].rule, "transform", transform) # noqa: B010
else:
err = "Rule is not a TransformMixin"
raise TypeError(err)
transformer = Transformer(result=result, options=options)
with caplog.at_level(10):
transformer.run()
logs = [record for record in caplog.records if record.module == "transformer"]
assert len(logs) == 5
log_0 = f"{transformer.FIX_APPLY_MSG} {TransformTests.match_id()}"
assert logs[0].message == log_0
assert logs[0].levelname == "DEBUG"
log_1 = f"{transformer.FIX_FAILED_MSG} {TransformTests.match_id()}"
assert logs[1].message == log_1
assert logs[1].levelname == "ERROR"
log_2 = exception_msg
assert logs[2].message == log_2
assert logs[2].levelname == "ERROR"
log_3 = f"{transformer.FIX_ISSUE_MSG}"
assert logs[3].message == log_3
assert logs[3].levelname == "ERROR"
log_4 = f"{transformer.DUMP_MSG} {TransformTests.rewrite_part()}"
assert logs[4].message == log_4
assert logs[4].levelname == "DEBUG"
def test_transform_applied(
caplog: pytest.LogCaptureFixture,
test_result: tuple[LintResult, Options],
) -> None:
"""Test the transformer is applied.
:param caplog: Log capture fixture
:param test_result: Test result fixture
"""
result = test_result[0]
options = test_result[1]
transformer = Transformer(result=result, options=options)
with caplog.at_level(10):
transformer.run()
logs = [record for record in caplog.records if record.module == "transformer"]
assert len(logs) == 3
log_0 = f"{transformer.FIX_APPLY_MSG} {TransformTests.match_id()}"
assert logs[0].message == log_0
assert logs[0].levelname == "DEBUG"
log_1 = f"{transformer.FIX_APPLIED_MSG} {TransformTests.match_id()}"
assert logs[1].message == log_1
assert logs[1].levelname == "DEBUG"
log_2 = f"{transformer.DUMP_MSG} {TransformTests.rewrite_part()}"
assert logs[2].message == log_2
assert logs[2].levelname == "DEBUG"
def test_transform_not_enabled(
caplog: pytest.LogCaptureFixture,
test_result: tuple[LintResult, Options],
) -> None:
"""Test the transformer is not enabled.
:param caplog: Log capture fixture
:param test_result: Test result fixture
"""
result = test_result[0]
options = test_result[1]
options.write_list = []
transformer = Transformer(result=result, options=options)
with caplog.at_level(10):
transformer.run()
logs = [record for record in caplog.records if record.module == "transformer"]
assert len(logs) == 2
log_0 = f"{transformer.FIX_NE_MSG} {TransformTests.match_id()}"
assert logs[0].message == log_0
assert logs[0].levelname == "DEBUG"
log_1 = f"{transformer.DUMP_MSG} {TransformTests.rewrite_part()}"
assert logs[1].message == log_1
assert logs[1].levelname == "DEBUG"
def test_transform_not_applied(
caplog: pytest.LogCaptureFixture,
test_result: tuple[LintResult, Options],
) -> None:
"""Test the transformer is not applied.
:param caplog: Log capture fixture
:param test_result: Test result fixture
:raises RuntimeError: If the rule is not a TransformMixin
"""
result = test_result[0]
options = test_result[1]
called = False
def transform(match: MatchError, *_args: Any, **_kwargs: Any) -> None:
"""Do not apply the transform.
:param match: Match object
:param _args: Arguments
:param _kwargs: Keyword arguments
"""
nonlocal called
called = True
match.fixed = False
if isinstance(result.matches[0].rule, TransformMixin):
setattr(result.matches[0].rule, "transform", transform) # noqa: B010
else:
err = "Rule is not a TransformMixin"
raise TypeError(err)
transformer = Transformer(result=result, options=options)
with caplog.at_level(10):
transformer.run()
assert called
logs = [record for record in caplog.records if record.module == "transformer"]
assert len(logs) == 3
log_0 = f"{transformer.FIX_APPLY_MSG} {TransformTests.match_id()}"
assert logs[0].message == log_0
assert logs[0].levelname == "DEBUG"
log_1 = f"{transformer.FIX_NOT_APPLIED_MSG} {TransformTests.match_id()}"
assert logs[1].message == log_1
assert logs[1].levelname == "ERROR"
log_2 = f"{transformer.DUMP_MSG} {TransformTests.rewrite_part()}"
assert logs[2].message == log_2
assert logs[2].levelname == "DEBUG"
| 19,690 | Python | .py | 540 | 28.601852 | 99 | 0.603317 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,183 | b_success.py | ansible_ansible-lint/test/local-content/test-roles-success/roles/role2/test_plugins/b_success.py | """A test plugin."""
def compatibility_in_test(element, container):
"""Return True when element contained in container."""
return element in container
# pylint: disable=too-few-public-methods
class TestModule:
"""Test plugin."""
@staticmethod
def tests():
"""Return tests."""
return {
"b_test_success": compatibility_in_test,
}
| 389 | Python | .py | 13 | 24.230769 | 58 | 0.644205 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,184 | test_module_3_success.py | ansible_ansible-lint/test/local-content/test-roles-success/roles/role3/library/test_module_3_success.py | #!/usr/bin/python
"""A module."""
from ansible.module_utils.basic import AnsibleModule
def main() -> None:
"""Execute module."""
module = AnsibleModule({})
module.exit_json(msg="Hello 3!")
if __name__ == "__main__":
main()
| 244 | Python | .py | 9 | 23.777778 | 52 | 0.630435 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,185 | test_module_1_success.py | ansible_ansible-lint/test/local-content/test-roles-success/roles/role1/library/test_module_1_success.py | #!/usr/bin/python
"""A module."""
from ansible.module_utils.basic import AnsibleModule
def main() -> None:
"""Execute module."""
module = AnsibleModule({})
module.exit_json(msg="Hello 1!")
if __name__ == "__main__":
main()
| 244 | Python | .py | 9 | 23.777778 | 52 | 0.630435 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,186 | test_module_2.py | ansible_ansible-lint/test/local-content/collections/ansible_collections/testns/test_collection/plugins/modules/test_module_2.py | #!/usr/bin/python
"""A module."""
from ansible.module_utils.basic import AnsibleModule
def main() -> None:
"""Execute module."""
module = AnsibleModule({})
module.exit_json(msg="Hello 2!")
if __name__ == "__main__":
main()
| 244 | Python | .py | 9 | 23.777778 | 52 | 0.630435 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,187 | test_filter.py | ansible_ansible-lint/test/local-content/collections/ansible_collections/testns/test_collection/plugins/filter/test_filter.py | """A filter plugin."""
def a_test_filter(a, b):
"""Return a string containing both a and b."""
return f"{a}:{b}"
# pylint: disable=too-few-public-methods
class FilterModule:
"""Filter plugin."""
@staticmethod
def filters():
"""Return filters."""
return {"test_filter": a_test_filter}
| 325 | Python | .py | 11 | 24.818182 | 50 | 0.624595 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,188 | b_failed.py | ansible_ansible-lint/test/local-content/test-roles-failed/roles/role2/test_plugins/b_failed.py | """A test plugin."""
def compatibility_in_test(element, container):
"""Return True when element is contained in container."""
return element in container
# pylint: disable=too-few-public-methods
class TestModule:
"""Test plugin."""
@staticmethod
def tests():
"""Return tests."""
return {
"b_test_failed": compatibility_in_test,
}
| 391 | Python | .py | 13 | 24.384615 | 61 | 0.643432 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,189 | test_module_3_failed.py | ansible_ansible-lint/test/local-content/test-roles-failed/roles/role3/library/test_module_3_failed.py | #!/usr/bin/python
"""A module."""
from ansible.module_utils.basic import AnsibleModule
def main() -> None:
"""Execute module."""
module = AnsibleModule({})
module.exit_json(msg="Hello 3!")
if __name__ == "__main__":
main()
| 244 | Python | .py | 9 | 23.777778 | 52 | 0.630435 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,190 | test_module_1_failed.py | ansible_ansible-lint/test/local-content/test-roles-failed/roles/role1/library/test_module_1_failed.py | #!/usr/bin/python
"""A module."""
from ansible.module_utils.basic import AnsibleModule
def main() -> None:
"""Execute module."""
module = AnsibleModule({})
module.exit_json(msg="Hello 1!")
if __name__ == "__main__":
main()
| 244 | Python | .py | 9 | 23.777778 | 52 | 0.630435 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,191 | b_failed_complete.py | ansible_ansible-lint/test/local-content/test-roles-failed-complete/roles/role2/test_plugins/b_failed_complete.py | """A test plugin."""
def compatibility_in_test(a, b):
"""Return True when a is contained in b."""
return a in b
# pylint: disable=too-few-public-methods
class TestModule:
"""Test plugin."""
@staticmethod
def tests():
"""Return tests."""
return {
"b_test_failed_complete": compatibility_in_test,
}
| 358 | Python | .py | 13 | 21.846154 | 60 | 0.605882 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,192 | test_module_3_failed_complete.py | ansible_ansible-lint/test/local-content/test-roles-failed-complete/roles/role3/library/test_module_3_failed_complete.py | #!/usr/bin/python
"""A module."""
from ansible.module_utils.basic import AnsibleModule
def main() -> None:
"""Execute module."""
module = AnsibleModule({})
module.exit_json(msg="Hello 3!")
if __name__ == "__main__":
main()
| 244 | Python | .py | 9 | 23.777778 | 52 | 0.630435 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,193 | test_module_1_failed_complete.py | ansible_ansible-lint/test/local-content/test-roles-failed-complete/roles/role1/library/test_module_1_failed_complete.py | #!/usr/bin/python
"""A module."""
from ansible.module_utils.basic import AnsibleModule
def main() -> None:
"""Execute module."""
module = AnsibleModule({})
module.exit_json(msg="Hello 1!")
if __name__ == "__main__":
main()
| 244 | Python | .py | 9 | 23.777778 | 52 | 0.630435 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,194 | test_inline_env_var.py | ansible_ansible-lint/test/rules/test_inline_env_var.py | """Tests for inline-env-var rule."""
from ansiblelint.rules import RulesCollection
from ansiblelint.rules.inline_env_var import EnvVarsInCommandRule
from ansiblelint.testing import RunFromText
SUCCESS_PLAY_TASKS = """
- hosts: localhost
tasks:
- name: Actual use of environment
shell: echo $HELLO
environment:
HELLO: hello
- name: Use some key-value pairs
command: chdir=/tmp creates=/tmp/bobbins touch bobbins
- name: Commands can have flags
command: abc --xyz=def blah
- name: Commands can have equals in them
command: echo "==========="
- name: Commands with cmd
command:
cmd:
echo "-------"
- name: Command with stdin (ansible > 2.4)
command: /bin/cat
args:
stdin: "Hello, world!"
- name: Use argv to send the command as a list
command:
argv:
- /bin/echo
- Hello
- World
- name: Another use of argv
command:
args:
argv:
- echo
- testing
- name: Environment variable with shell
shell: HELLO=hello echo $HELLO
- name: Command with stdin_add_newline (ansible > 2.8)
command: /bin/cat
args:
stdin: "Hello, world!"
stdin_add_newline: false
- name: Command with strip_empty_ends (ansible > 2.8)
command: echo
args:
strip_empty_ends: false
"""
FAIL_PLAY_TASKS = """
- hosts: localhost
tasks:
- name: Environment variable with command
command: HELLO=hello echo $HELLO
- name: Typo some stuff
command: crates=/tmp/blah touch /tmp/blah
"""
def test_success() -> None:
"""Positive test for inline-env-var."""
collection = RulesCollection()
collection.register(EnvVarsInCommandRule())
runner = RunFromText(collection)
results = runner.run_playbook(SUCCESS_PLAY_TASKS)
assert len(results) == 0
def test_fail() -> None:
"""Negative test for inline-env-var."""
collection = RulesCollection()
collection.register(EnvVarsInCommandRule())
runner = RunFromText(collection)
results = runner.run_playbook(FAIL_PLAY_TASKS)
assert len(results) == 2
| 2,096 | Python | .py | 71 | 24.774648 | 65 | 0.674314 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,195 | test_package_latest.py | ansible_ansible-lint/test/rules/test_package_latest.py | """Tests for package-latest rule."""
from ansiblelint.rules import RulesCollection
from ansiblelint.rules.package_latest import PackageIsNotLatestRule
from ansiblelint.runner import Runner
def test_package_not_latest_positive() -> None:
"""Positive test for package-latest."""
collection = RulesCollection()
collection.register(PackageIsNotLatestRule())
success = "examples/playbooks/package-check-success.yml"
good_runner = Runner(success, rules=collection)
assert good_runner.run() == []
def test_package_not_latest_negative() -> None:
"""Negative test for package-latest."""
collection = RulesCollection()
collection.register(PackageIsNotLatestRule())
failure = "examples/playbooks/package-check-failure.yml"
bad_runner = Runner(failure, rules=collection)
errs = bad_runner.run()
assert len(errs) == 5
| 862 | Python | .py | 19 | 41.368421 | 67 | 0.754177 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,196 | test_role_names.py | ansible_ansible-lint/test/rules/test_role_names.py | """Test the RoleNames rule."""
from __future__ import annotations
from typing import TYPE_CHECKING, Any
import pytest
from ansiblelint.rules import RulesCollection
from ansiblelint.rules.role_name import RoleNames
from ansiblelint.runner import Runner
if TYPE_CHECKING:
from pathlib import Path
from _pytest.fixtures import SubRequest
ROLE_NAME_VALID = "test_role"
TASK_MINIMAL = """
- name: Some task
ping:
"""
ROLE_MINIMAL = {"tasks": {"main.yml": TASK_MINIMAL}}
ROLE_META_EMPTY = {"meta": {"main.yml": ""}}
ROLE_WITH_EMPTY_META = {**ROLE_MINIMAL, **ROLE_META_EMPTY}
PLAY_INCLUDE_ROLE = f"""
- hosts: all
roles:
- {ROLE_NAME_VALID}
"""
@pytest.fixture(name="test_rules_collection")
def fixture_test_rules_collection() -> RulesCollection:
"""Instantiate a roles collection for tests."""
collection = RulesCollection()
collection.register(RoleNames())
return collection
def dict_to_files(parent_dir: Path, file_dict: dict[str, Any]) -> None:
"""Write a nested dict to a file and directory structure below parent_dir."""
for file, content in file_dict.items():
if isinstance(content, dict):
directory = parent_dir / file
directory.mkdir()
dict_to_files(directory, content)
else:
(parent_dir / file).write_text(content)
@pytest.fixture(name="playbook_path")
def fixture_playbook_path(request: SubRequest, tmp_path: Path) -> str:
"""Create a playbook with a role in a temporary directory."""
playbook_text = request.param[0]
role_name = request.param[1]
role_layout = request.param[2]
role_path = tmp_path / role_name
role_path.mkdir()
dict_to_files(role_path, role_layout)
play_path = tmp_path / "playbook.yml"
play_path.write_text(playbook_text)
return str(play_path)
@pytest.mark.parametrize(
("playbook_path", "messages"),
(
pytest.param(
(PLAY_INCLUDE_ROLE, ROLE_NAME_VALID, ROLE_WITH_EMPTY_META),
[],
id="ROLE_EMPTY_META",
),
),
indirect=("playbook_path",),
)
def test_role_name(
test_rules_collection: RulesCollection,
playbook_path: str,
messages: list[str],
) -> None:
"""Lint a playbook and compare the expected messages with the actual messages."""
runner = Runner(playbook_path, rules=test_rules_collection)
results = runner.run()
assert len(results) == len(messages)
results_text = str(results)
for message in messages:
assert message in results_text
| 2,536 | Python | .py | 73 | 29.972603 | 85 | 0.684124 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,197 | test_syntax_check.py | ansible_ansible-lint/test/rules/test_syntax_check.py | """Tests for syntax-check rule."""
from typing import Any
import pytest
from ansiblelint.file_utils import Lintable
from ansiblelint.rules import RulesCollection
from ansiblelint.runner import Runner
@pytest.mark.parametrize(
("filename", "expected_results"),
(
pytest.param(
"examples/playbooks/conflicting_action.yml",
[
(
"syntax-check[specific]",
4,
7,
"conflicting action statements: ansible.builtin.debug, ansible.builtin.command",
),
],
id="0",
),
pytest.param(
"examples/playbooks/conflicting_action2.yml",
[
(
"parser-error",
1,
None,
"conflicting action statements: block, include_role",
),
(
"syntax-check[specific]",
5,
7,
"'include_role' is not a valid attribute for a Block",
),
],
id="1",
),
),
)
def test_get_ansible_syntax_check_matches(
default_rules_collection: RulesCollection,
filename: str,
expected_results: list[tuple[str, int, int, str]],
) -> None:
"""Validate parsing of ansible output."""
lintable = Lintable(
filename,
kind="playbook",
)
result = sorted(Runner(lintable, rules=default_rules_collection).run())
assert len(result) == len(expected_results)
for index, expected in enumerate(expected_results):
assert result[index].tag == expected[0]
assert result[index].lineno == expected[1]
assert result[index].column == expected[2]
assert str(expected[3]) in result[index].message
# We internally convert absolute paths returned by ansible into paths
# relative to current directory.
# assert result[index].filename.endswith("/conflicting_action.yml")
def test_empty_playbook(default_rules_collection: RulesCollection) -> None:
"""Validate detection of empty-playbook."""
lintable = Lintable("examples/playbooks/empty_playbook.yml", kind="playbook")
result = Runner(lintable, rules=default_rules_collection).run()
assert result[0].lineno == 1
# We internally convert absolute paths returned by ansible into paths
# relative to current directory.
assert result[0].filename.endswith("/empty_playbook.yml")
assert result[0].tag == "syntax-check[empty-playbook]"
assert result[0].message == "Empty playbook, nothing to do"
assert len(result) == 1
def test_extra_vars_passed_to_command(
default_rules_collection: RulesCollection,
config_options: Any,
) -> None:
"""Validate `extra-vars` are passed to syntax check command."""
config_options.extra_vars = {
"foo": "bar",
"complex_variable": ":{;\t$()",
}
lintable = Lintable("examples/playbooks/extra_vars.yml", kind="playbook")
result = Runner(lintable, rules=default_rules_collection).run()
assert not result
def test_syntax_check_role(default_rules_collection: RulesCollection) -> None:
"""Validate syntax check of a broken role."""
lintable = Lintable("examples/playbooks/roles/invalid_due_syntax", kind="role")
result = Runner(lintable, rules=default_rules_collection).run()
assert len(result) == 1, result
assert result[0].lineno == 2
assert result[0].filename == "examples/roles/invalid_due_syntax/tasks/main.yml"
assert result[0].tag == "syntax-check[specific]"
assert result[0].message == "no module/action detected in task."
| 3,727 | Python | .py | 93 | 31.344086 | 100 | 0.623377 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,198 | test_args.py | ansible_ansible-lint/test/rules/test_args.py | """Tests for args rule."""
from ansiblelint.file_utils import Lintable
from ansiblelint.rules import RulesCollection
from ansiblelint.runner import Runner
def test_args_module_relative_import(default_rules_collection: RulesCollection) -> None:
"""Validate args check of a module with a relative import."""
lintable = Lintable(
"examples/playbooks/module_relative_import.yml",
kind="playbook",
)
result = Runner(lintable, rules=default_rules_collection).run()
assert len(result) == 1, result
assert result[0].lineno == 5
assert result[0].filename == "examples/playbooks/module_relative_import.yml"
assert result[0].tag == "args[module]"
assert result[0].message == "missing required arguments: name"
| 754 | Python | .py | 16 | 42.6875 | 88 | 0.734694 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |
30,199 | test_deprecated_module.py | ansible_ansible-lint/test/rules/test_deprecated_module.py | """Tests for deprecated-module rule."""
from pathlib import Path
from ansiblelint.rules import RulesCollection
from ansiblelint.rules.deprecated_module import DeprecatedModuleRule
from ansiblelint.testing import RunFromText
MODULE_DEPRECATED = """
- name: Task example
docker:
debug: test
"""
def test_module_deprecated(tmp_path: Path) -> None:
"""Test for deprecated-module."""
collection = RulesCollection()
collection.register(DeprecatedModuleRule())
runner = RunFromText(collection)
results = runner.run_role_tasks_main(MODULE_DEPRECATED, tmp_path=tmp_path)
assert len(results) == 1
# based on version and blend of ansible being used, we may
# get a missing module, so we future proof the test
assert (
"couldn't resolve module" not in results[0].message
or "Deprecated module" not in results[0].message
)
| 877 | Python | .py | 23 | 34.217391 | 78 | 0.744405 | ansible/ansible-lint | 3,433 | 653 | 91 | GPL-3.0 | 9/5/2024, 5:14:14 PM (Europe/Amsterdam) |