Search is not available for this dataset
text stringlengths 75 104k |
|---|
def _coeff4(N, a0, a1, a2, a3):
"""a common internal function to some window functions with 4 coeffs
For the blackmna harris for instance, the results are identical to octave if N is odd
but not for even values...if n =0 whatever N is, the w(0) must be equal to a0-a1+a2-a3, which
is the case here, but not in octave..."""
if N == 1:
return ones(1)
n = arange(0, N)
N1 = N - 1.
w = a0 -a1*cos(2.*pi*n / N1) + a2*cos(4.*pi*n / N1) - a3*cos(6.*pi*n / N1)
return w |
def window_nuttall(N):
r"""Nuttall tapering window
:param N: window length
.. math:: w(n) = a_0 - a_1 \cos\left(\frac{2\pi n}{N-1}\right)+ a_2 \cos\left(\frac{4\pi n}{N-1}\right)- a_3 \cos\left(\frac{6\pi n}{N-1}\right)
with :math:`a_0 = 0.355768`, :math:`a_1 = 0.487396`, :math:`a_2=0.144232` and :math:`a_3=0.012604`
.. plot::
:width: 80%
:include-source:
from spectrum import window_visu
window_visu(64, 'nuttall', mindB=-80)
.. seealso:: :func:`create_window`, :class:`Window`
"""
a0 = 0.355768
a1 = 0.487396
a2 = 0.144232
a3 = 0.012604
return _coeff4(N, a0, a1, a2, a3) |
def window_blackman_nuttall(N):
r"""Blackman Nuttall window
returns a minimum, 4-term Blackman-Harris window. The window is minimum in the sense that its maximum sidelobes are minimized.
The coefficients for this window differ from the Blackman-Harris window coefficients and produce slightly lower sidelobes.
:param N: window length
.. math:: w(n) = a_0 - a_1 \cos\left(\frac{2\pi n}{N-1}\right)+ a_2 \cos\left(\frac{4\pi n}{N-1}\right)- a_3 \cos\left(\frac{6\pi n}{N-1}\right)
with :math:`a_0 = 0.3635819`, :math:`a_1 = 0.4891775`, :math:`a_2=0.1365995` and :math:`0_3=.0106411`
.. plot::
:width: 80%
:include-source:
from spectrum import window_visu
window_visu(64, 'blackman_nuttall', mindB=-80)
.. seealso:: :func:`spectrum.window.create_window`
.. seealso:: :func:`create_window`, :class:`Window`
"""
a0 = 0.3635819
a1 = 0.4891775
a2 = 0.1365995
a3 = 0.0106411
return _coeff4(N, a0, a1, a2, a3) |
def window_blackman_harris(N):
r"""Blackman Harris window
:param N: window length
.. math:: w(n) = a_0 - a_1 \cos\left(\frac{2\pi n}{N-1}\right)+ a_2 \cos\left(\frac{4\pi n}{N-1}\right)- a_3 \cos\left(\frac{6\pi n}{N-1}\right)
=============== =========
coeff value
=============== =========
:math:`a_0` 0.35875
:math:`a_1` 0.48829
:math:`a_2` 0.14128
:math:`a_3` 0.01168
=============== =========
.. plot::
:width: 80%
:include-source:
from spectrum import window_visu
window_visu(64, 'blackman_harris', mindB=-80)
.. seealso:: :func:`spectrum.window.create_window`
.. seealso:: :func:`create_window`, :class:`Window`
"""
a0 = 0.35875
a1 = 0.48829
a2 = 0.14128
a3 = 0.01168
return _coeff4(N, a0, a1, a2, a3) |
def window_bohman(N):
r"""Bohman tapering window
:param N: window length
.. math:: w(n) = (1-|x|) \cos (\pi |x|) + \frac{1}{\pi} \sin(\pi |x|)
where x is a length N vector of linearly spaced values between
-1 and 1.
.. plot::
:width: 80%
:include-source:
from spectrum import window_visu
window_visu(64, 'bohman')
.. seealso:: :func:`create_window`, :class:`Window`
"""
x = linspace(-1, 1, N)
w = (1.-abs(x)) * cos(pi*abs(x)) + 1./pi * sin(pi*abs(x))
return w |
def window_tukey(N, r=0.5):
"""Tukey tapering window (or cosine-tapered window)
:param N: window length
:param r: defines the ratio between the constant section and the cosine
section. It has to be between 0 and 1.
The function returns a Hanning window for `r=0` and a full box for `r=1`.
.. plot::
:width: 80%
:include-source:
from spectrum import window_visu
window_visu(64, 'tukey')
window_visu(64, 'tukey', r=1)
.. math:: 0.5 (1+cos(2pi/r (x-r/2))) for 0<=x<r/2
.. math:: 0.5 (1+cos(2pi/r (x-1+r/2))) for x>=1-r/2
.. seealso:: :func:`create_window`, :class:`Window`
"""
assert r>=0 and r<=1 , "r must be in [0,1]"
if N==1:
return ones(1)
if r == 0:
return ones(N)
elif r == 1:
return window_hann(N)
else:
from numpy import flipud, concatenate, where
## cosine-tapered window
x = linspace(0, 1, N)
x1 = where(x<r/2.)
w = 0.5*(1+cos(2*pi/r*(x[x1[0]]-r/2)))
w = concatenate((w, ones(N-len(w)*2), flipud(w)))
return w |
def window_parzen(N):
r"""Parsen tapering window (also known as de la Valle-Poussin)
:param N: window length
Parzen windows are piecewise cubic approximations
of Gaussian windows. Parzen window sidelobes fall off as :math:`1/\omega^4`.
if :math:`0\leq|x|\leq (N-1)/4`:
.. math:: w(n) = 1-6 \left( \frac{|n|}{N/2} \right)^2 +6 \left( \frac{|n|}{N/2}\right)^3
if :math:`(N-1)/4\leq|x|\leq (N-1)/2`
.. math:: w(n) = 2 \left(1- \frac{|n|}{N/2}\right)^3
.. plot::
:width: 80%
:include-source:
from spectrum import window_visu
window_visu(64, 'parzen')
.. seealso:: :func:`create_window`, :class:`Window`
"""
from numpy import where, concatenate
n = linspace(-(N-1)/2., (N-1)/2., N)
n1 = n[where(abs(n)<=(N-1)/4.)[0]]
n2 = n[where(n>(N-1)/4.)[0]]
n3 = n[where(n<-(N-1)/4.)[0]]
w1 = 1. -6.*(abs(n1)/(N/2.))**2 + 6*(abs(n1)/(N/2.))**3
w2 = 2.*(1-abs(n2)/(N/2.))**3
w3 = 2.*(1-abs(n3)/(N/2.))**3
w = concatenate((w3, w1, w2))
return w |
def window_flattop(N, mode='symmetric',precision=None):
r"""Flat-top tapering window
Returns symmetric or periodic flat top window.
:param N: window length
:param mode: way the data are normalised. If mode is *symmetric*, then
divide n by N-1. IF mode is *periodic*, divide by N,
to be consistent with octave code.
When using windows for filter design, the *symmetric* mode
should be used (default). When using windows for spectral analysis, the *periodic*
mode should be used. The mathematical form of the flat-top window in the symmetric
case is:
.. math:: w(n) = a_0
- a_1 \cos\left(\frac{2\pi n}{N-1}\right)
+ a_2 \cos\left(\frac{4\pi n}{N-1}\right)
- a_3 \cos\left(\frac{6\pi n}{N-1}\right)
+ a_4 \cos\left(\frac{8\pi n}{N-1}\right)
===== =============
coeff value
===== =============
a0 0.21557895
a1 0.41663158
a2 0.277263158
a3 0.083578947
a4 0.006947368
===== =============
.. plot::
:width: 80%
:include-source:
from spectrum import window_visu
window_visu(64, 'bohman')
.. seealso:: :func:`create_window`, :class:`Window`
"""
assert mode in ['periodic', 'symmetric']
t = arange(0, N)
# FIXME: N=1 for mode = periodic ?
if mode == 'periodic':
x = 2*pi*t/float(N)
else:
if N ==1:
return ones(1)
x = 2*pi*t/float(N-1)
a0 = 0.21557895
a1 = 0.41663158
a2 = 0.277263158
a3 = 0.083578947
a4 = 0.006947368
if precision == 'octave':
#to compare with octave, same as above but less precise
d = 4.6402
a0 = 1./d
a1 = 1.93/d
a2 = 1.29/d
a3 = 0.388/d
a4 = 0.0322/d
w = a0-a1*cos(x)+a2*cos(2*x)-a3*cos(3*x)+a4*cos(4*x)
return w |
def window_taylor(N, nbar=4, sll=-30):
"""Taylor tapering window
Taylor windows allows you to make tradeoffs between the
mainlobe width and sidelobe level (sll).
Implemented as described by Carrara, Goodman, and Majewski
in 'Spotlight Synthetic Aperture Radar: Signal Processing Algorithms'
Pages 512-513
:param N: window length
:param float nbar:
:param float sll:
The default values gives equal height
sidelobes (nbar) and maximum sidelobe level (sll).
.. warning:: not implemented
.. seealso:: :func:`create_window`, :class:`Window`
"""
B = 10**(-sll/20)
A = log(B + sqrt(B**2 - 1))/pi
s2 = nbar**2 / (A**2 + (nbar - 0.5)**2)
ma = arange(1,nbar)
def calc_Fm(m):
numer = (-1)**(m+1) * prod(1-m**2/s2/(A**2 + (ma - 0.5)**2))
denom = 2* prod([ 1-m**2/j**2 for j in ma if j != m])
return numer/denom
Fm = array([calc_Fm(m) for m in ma])
def W(n):
return 2 * np.sum(Fm * cos(2*pi*ma*(n-N/2 + 1/2)/N)) + 1
w = array([W(n) for n in range(N)])
# normalize (Note that this is not described in the original text)
scale = W((N-1)/2)
w /= scale
return w |
def window_riesz(N):
r"""Riesz tapering window
:param N: window length
.. math:: w(n) = 1 - \left| \frac{n}{N/2} \right|^2
with :math:`-N/2 \leq n \leq N/2`.
.. plot::
:width: 80%
:include-source:
from spectrum import window_visu
window_visu(64, 'riesz')
.. seealso:: :func:`create_window`, :class:`Window`
"""
n = linspace(-N/2., (N)/2., N)
w = 1 - abs(n/(N/2.))**2.
return w |
def window_riemann(N):
r"""Riemann tapering window
:param int N: window length
.. math:: w(n) = 1 - \left| \frac{n}{N/2} \right|^2
with :math:`-N/2 \leq n \leq N/2`.
.. plot::
:width: 80%
:include-source:
from spectrum import window_visu
window_visu(64, 'riesz')
.. seealso:: :func:`create_window`, :class:`Window`
"""
n = linspace(-N/2., (N)/2., N)
w = sin(n/float(N)*2.*pi) / (n / float(N)*2.*pi)
return w |
def window_poisson(N, alpha=2):
r"""Poisson tapering window
:param int N: window length
.. math:: w(n) = \exp^{-\alpha \frac{|n|}{N/2} }
with :math:`-N/2 \leq n \leq N/2`.
.. plot::
:width: 80%
:include-source:
from spectrum import window_visu
window_visu(64, 'poisson')
window_visu(64, 'poisson', alpha=3)
window_visu(64, 'poisson', alpha=4)
.. seealso:: :func:`create_window`, :class:`Window`
"""
n = linspace(-N/2., (N)/2., N)
w = exp(-alpha * abs(n)/(N/2.))
return w |
def window_poisson_hanning(N, alpha=2):
r"""Hann-Poisson tapering window
This window is constructed as the product of the Hanning and Poisson
windows. The parameter **alpha** is the Poisson parameter.
:param int N: window length
:param float alpha: parameter of the poisson window
.. plot::
:width: 80%
:include-source:
from spectrum import window_visu
window_visu(64, 'poisson_hanning', alpha=0.5)
window_visu(64, 'poisson_hanning', alpha=1)
window_visu(64, 'poisson_hanning')
.. seealso:: :func:`window_poisson`, :func:`window_hann`
"""
w1 = window_hann(N)
w2 = window_poisson(N, alpha=alpha)
return w1*w2 |
def window_cauchy(N, alpha=3):
r"""Cauchy tapering window
:param int N: window length
:param float alpha: parameter of the poisson window
.. math:: w(n) = \frac{1}{1+\left(\frac{\alpha*n}{N/2}\right)**2}
.. plot::
:width: 80%
:include-source:
from spectrum import window_visu
window_visu(64, 'cauchy', alpha=3)
window_visu(64, 'cauchy', alpha=4)
window_visu(64, 'cauchy', alpha=5)
.. seealso:: :func:`window_poisson`, :func:`window_hann`
"""
n = linspace(-N/2., (N)/2., N)
w = 1./(1.+ (alpha*n/(N/2.))**2)
return w |
def compute_response(self, **kargs):
"""Compute the window data frequency response
:param norm: True by default. normalised the frequency data.
:param int NFFT: total length of the final data sets( 2048 by default.
if less than data length, then NFFT is set to the data length*2).
The response is stored in :attr:`response`.
.. note:: Units are dB (20 log10) since we plot the frequency response)
"""
from numpy.fft import fft, fftshift
norm = kargs.get('norm', self.norm)
# do some padding. Default is max(2048, data.len*2)
NFFT = kargs.get('NFFT', 2048)
if NFFT < len(self.data):
NFFT = self.data.size * 2
# compute the fft modulus
A = fft(self.data, NFFT)
mag = abs(fftshift(A))
# do we want to normalise the data
if norm is True:
mag = mag / max(mag)
response = 20. * stools.log10(mag) # factor 20 we are looking at the response
# not the powe
#response = clip(response,mindB,100)
self.__response = response |
def plot_frequencies(self, mindB=None, maxdB=None, norm=True):
"""Plot the window in the frequency domain
:param mindB: change the default lower y bound
:param maxdB: change the default upper lower bound
:param bool norm: if True, normalise the frequency response.
.. plot::
:width: 80%
:include-source:
from spectrum.window import Window
w = Window(64, name='hamming')
w.plot_frequencies()
"""
from pylab import plot, title, xlim, grid, ylim, xlabel, ylabel
# recompute the response
self.compute_response(norm=norm)
plot(self.frequencies, self.response)
title("ENBW=%2.1f" % (self.enbw))
ylabel('Frequency response (dB)')
xlabel('Fraction of sampling frequency')
# define the plot limits
xlim(-0.5, 0.5)
y0, y1 = ylim()
if mindB:
y0 = mindB
if maxdB is not None:
y1 = maxdB
else:
y1 = max(self.response)
ylim(y0, y1)
grid(True) |
def plot_window(self):
"""Plot the window in the time domain
.. plot::
:width: 80%
:include-source:
from spectrum.window import Window
w = Window(64, name='hamming')
w.plot_window()
"""
from pylab import plot, xlim, grid, title, ylabel, axis
x = linspace(0, 1, self.N)
xlim(0, 1)
plot(x, self.data)
grid(True)
title('%s Window (%s points)' % (self.name.capitalize(), self.N))
ylabel('Amplitude')
axis([0, 1, 0, 1.1]) |
def plot_time_freq(self, mindB=-100, maxdB=None, norm=True,
yaxis_label_position="right"):
"""Plotting method to plot both time and frequency domain results.
See :meth:`plot_frequencies` for the optional arguments.
.. plot::
:width: 80%
:include-source:
from spectrum.window import Window
w = Window(64, name='hamming')
w.plot_time_freq()
"""
from pylab import subplot, gca
subplot(1, 2, 1)
self.plot_window()
subplot(1, 2, 2)
self.plot_frequencies(mindB=mindB, maxdB=maxdB, norm=norm)
if yaxis_label_position=="left":
try: tight_layout()
except: pass
else:
ax = gca()
ax.yaxis.set_label_position("right") |
def TOEPLITZ(T0, TC, TR, Z):
"""solve the general toeplitz linear equations
Solve TX=Z
:param T0: zero lag value
:param TC: r1 to rN
:param TR: r1 to rN
returns X
requires 3M^2+M operations instead of M^3 with gaussian elimination
.. warning:: not used right now
"""
assert len(TC)>0
assert len(TC)==len(TR)
M = len(TC)
X = numpy.zeros(M+1,dtype=complex)
A = numpy.zeros(M,dtype=complex)
B = numpy.zeros(M,dtype=complex)
P = T0
if P == 0: raise ValueError("P must be different from zero")
if P == 0: raise ValueError("P must be different from zero")
X[0] = Z[0]/T0
for k in range(0, M):
save1 = TC[k]
save2 = TR[k]
beta = X[0]*TC[k]
if k == 0:
temp1 = -save1 / P
temp2 = -save2 / P
else:
for j in range(0, k):
save1 = save1 + A[j] * TC[k-j-1]
save2 = save2 + B[j] * TR[k-j-1]
beta = beta + X[j+1] * TC[k-j-1]
temp1 = -save1 / P
temp2 = -save2/P
P = P * (1. - (temp1*temp2))
if P <= 0:
raise ValueError("singular matrix")
A[k] = temp1
B[k] = temp2
alpha = (Z[k+1]-beta)/P
if k == 0:
X[k+1] = alpha
for j in range(0,k+1):
X[j] = X[j] + alpha * B[k-j]
continue
for j in range(0, k):
kj = k-j-1
save1 = A[j]
A[j] = save1 + temp1 * B[kj]
B[kj] = B[kj] + temp2*save1
X[k+1] = alpha
for j in range(0,k+1):
X[j] = X[j] + alpha*B[k-j]
return X |
def HERMTOEP(T0, T, Z):
"""solve Tx=Z by a variation of Levinson algorithm where T
is a complex hermitian toeplitz matrix
:param T0: zero lag value
:param T: r1 to rN
:return: X
used by eigen PSD method
"""
assert len(T)>0
M = len(T)
X = numpy.zeros(M+1,dtype=complex)
A = numpy.zeros(M,dtype=complex)
P = T0
if P == 0: raise ValueError("P must be different from zero")
X[0] = Z[0]/T0
for k in range(0, M):
save = T[k]
beta = X[0]*T[k]
if k == 0:
temp = -save / P
else:
for j in range(0, k):
save = save + A[j] * T[k-j-1]
beta = beta + X[j+1] * T[k-j-1]
temp = -save / P
P = P * (1. - (temp.real**2+temp.imag**2))
if P <= 0:
raise ValueError("singular matrix")
A[k] = temp
alpha = (Z[k+1]-beta)/P
if k == 0:
#print 'skipping code for k=0'
X[k+1] = alpha
for j in range(0,k+1):
X[j] = X[j] + alpha * A[k-j].conjugate()
continue
khalf = (k+1)//2
for j in range(0, khalf):
kj = k-j-1
save=A[j]
A[j] = save+temp*A[kj].conjugate()
if j != kj:
A[kj] = A[kj] + temp*save.conjugate()
X[k+1] = alpha
for j in range(0,k+1):
X[j] = X[j] + alpha * A[k-j].conjugate()
return X |
def get_short_module_name(module_name, obj_name):
""" Get the shortest possible module name """
parts = module_name.split('.')
short_name = module_name
for i in range(len(parts) - 1, 0, -1):
short_name = '.'.join(parts[:i])
try:
exec('from %s import %s' % (short_name, obj_name))
except ImportError:
# get the last working module name
short_name = '.'.join(parts[:(i + 1)])
break
return short_name |
def identify_names(code):
"""Builds a codeobj summary by identifying and resolving used names
>>> code = '''
... from a.b import c
... import d as e
... print(c)
... e.HelloWorld().f.g
... '''
>>> for name, o in sorted(identify_names(code).items()):
... print(name, o['name'], o['module'], o['module_short'])
c c a.b a.b
e.HelloWorld HelloWorld d d
"""
finder = NameFinder()
finder.visit(ast.parse(code))
example_code_obj = {}
for name, full_name in finder.get_mapping():
# name is as written in file (e.g. np.asarray)
# full_name includes resolved import path (e.g. numpy.asarray)
module, attribute = full_name.rsplit('.', 1)
# get shortened module name
module_short = get_short_module_name(module, attribute)
cobj = {'name': attribute, 'module': module,
'module_short': module_short}
example_code_obj[name] = cobj
return example_code_obj |
def _thumbnail_div(full_dir, fname, snippet, is_backref=False):
"""Generates RST to place a thumbnail in a gallery"""
thumb = os.path.join(full_dir, 'images', 'thumb',
'sphx_glr_%s_thumb.png' % fname[:-3])
ref_name = os.path.join(full_dir, fname).replace(os.path.sep, '_')
template = BACKREF_THUMBNAIL_TEMPLATE if is_backref else THUMBNAIL_TEMPLATE
return template.format(snippet=snippet, thumbnail=thumb, ref_name=ref_name) |
def modcovar_marple (X,IP):
"""Fast algorithm for the solution of the modified covariance least squares normal equations.
This implementation is based on [Marple]_. This code is far more
complicated and slower than :func:`modcovar` function, which is now the official version.
See :func:`modcovar` for a detailed description of Modified Covariance method.
:param X: - Array of complex data samples X(1) through X(N)
:param int IP: - Order of linear prediction model (integer)
:return:
* P - Real linear prediction variance at order IP
* A - Array of complex linear prediction coefficients
* ISTAT - Integer status indicator at time of exit
0. for normal exit (no numerical ill-conditioning)
1. if P is not a positive value
2. if DELTA' and GAMMA' do not lie in the range 0 to 1
3. if P' is not a positive value
4. if DELTA and GAMMA do not lie in the range 0 to 1
:validation: the AR parameters are the same as those returned by
a completely different function :func:`modcovar`.
.. note:: validation. results similar to test example in Marple but
starts to differ for ip~8. with ratio of 0.975 for ip=15 probably
due to precision.
:References: [Marple]_
"""
Pv = []
N = len(X)
A = np.zeros(N, dtype=complex)
D = np.zeros(N, dtype=complex)
C = np.zeros(N, dtype=complex)
R = np.zeros(N, dtype=complex)
# Initialization
R1=0.
for K in range(1, N-1):
R1=R1 + 2.*(X[K].real**2 + X[K].imag**2)
R2 = X[0].real**2 + X[0].imag**2
R3 = X[N-1].real**2 + X[N-1].imag**2
R4 = 1. / (R1 + 2. * (R2 + R3))
P = R1 + R2 + R3
DELTA = 1. - R2 * R4
GAMMA = 1. - R3 * R4
LAMBDA = (X[0] * X[N-1]).conjugate()*R4
C[0] = X[N-1] * R4
D[0] = X[0].conjugate() * R4
M = 0
if (IP ==0):
P = (.5*R1+R2+R3)/float(N)
return [], P, []
#C Main loop
for M in range(0, IP):
#print '---------------------', M
SAVE1 = 0+0j
for K in range(M+1, N):
SAVE1 = SAVE1 + X[K]*X[K-M-1].conjugate()
SAVE1 *= 2.
R[M] = SAVE1.conjugate()
THETA = X[N-1]*D[0]
PSI=X[N-1]*C[0]
XI = X[0].conjugate() * D[0]
if M==0:
pass
else:
for K in range(0, M):
THETA=THETA+X[N-K-2]*D[K+1] # Eq. (8.D.45)
PSI = PSI + X[N-K-2]*C[K+1] # Eq. (8.D.45)
XI = XI + X[K+1].conjugate() * D[K+1] # Eq. (8.D.45)
R[K] = R[K]-X[N-M-1] * X[N+1-M+K-1].conjugate() - X[M].conjugate() * X[M-K-1] # Eq. (8.D.37)
SAVE1=SAVE1+R[K].conjugate()*A[M-K-1] # Eq. (8.D.24)
#print 'withini loop', K, THETA, PSI, XI, R[K], SAVE1
#print 'x -------', C[K], C[K+1]
#print 'x -------', D[K], D[K+1]
#print 'x -------', N-K-2, K+1,N-M-1, M, M-K-1
#print 'x -------', X[N-K-2], X[K+1],X[N-M-1], X[M], X[M-K-1]
# Order update of A vector
C1 = -SAVE1/P
A[M]=C1 # Eq. (8.D.23)
P=P*(1.-C1.real**2-C1.imag**2) # Eq. (8.D.25)
#print 'C1=' , C1, 'A[M]', A[M], 'P=',P
if M==0:
pass
else:
#for k in range(0, (M-1)/2+1):
#print 'BEFORE', A[0:4]
for K in range(0, (M+1)//2):
MK = M-K-1
SAVE1=A[K]
A[K]=SAVE1+C1*A[MK].conjugate() # Eq. (8.D.22)
#print 'A uopdate---->',M, K, MK, A[K], A[MK], SAVE1, C1
if (K != MK):
#print 'ENTERI IF', K, MK, A[MK], C1, SAVE1
A[MK]=A[MK]+C1*(SAVE1.conjugate()) # Eq. (8.D.22)
#print 'AFTER', A[0:4]
#print M+1, IP
if M+1 == IP:
#Pv.append(P)
P=.5*P/float(N-M-1)
Pv.append(P)
return A, P, Pv
else:
Pv.append(.5*P/float(N-M-1))
#Pv.append(P)
# Time update of C,D vectors and GAMMA,DELTA,LAMBDA scalars
R1=1./(DELTA*GAMMA-(LAMBDA.real)**2-(LAMBDA.imag)**2)
C1=(THETA*(LAMBDA.conjugate())+PSI*DELTA)*R1
C2=(PSI*LAMBDA+THETA*GAMMA)*R1
C3=(XI*(LAMBDA.conjugate())+THETA*DELTA)*R1
C4=(THETA*LAMBDA+XI*GAMMA)*R1
#print 'C1C2C3C4', C1, C2, C3, C4
#print M, M/2+1, (M-1)/2+1
for K in range(0, (M)//2+1):
MK=M-K
SAVE1=C[K].conjugate()
SAVE2=D[K].conjugate()
SAVE3=C[MK].conjugate()
SAVE4=D[MK].conjugate()
C[K]=C[K]+C1*SAVE3+C2*SAVE4 # Eq. (8.D.43)
D[K]=D[K]+C3*SAVE3+C4*SAVE4 # Eq. (8.D.44)
#print '-------->', K,MK, SAVE1, SAVE2, SAVE3, SAVE4
if K != MK:
C[MK]=C[MK]+C1*SAVE1+C2*SAVE2 # Eq. (8.D.43)
D[MK]=D[MK]+C3*SAVE1+C4*SAVE2 # Eq. (8.D.44)
#print 'witninloop loop ',K,MK,C[MK], D[MK]
#print 'after loop'
#print 'C=',C[0:2]
#print 'D=',D[0:2]
R2=PSI.real**2+PSI.imag**2
R3=THETA.real**2+THETA.imag**2
R4=XI.real**2+XI.imag**2
R5=GAMMA-(R2*DELTA+R3*GAMMA+2.*np.real(PSI*LAMBDA*THETA.conjugate()))*R1
R2=DELTA-(R3*DELTA+R4*GAMMA+2.*np.real(THETA*LAMBDA*XI.conjugate()))*R1
GAMMA=R5 # Eq. (8.D.46)
DELTA=R2 # Eq. (8.D.47)
LAMBDA=LAMBDA+C3*PSI.conjugate()+C4*THETA.conjugate() # Eq. (8.D.48)
#print 'before time updaqt',R2, R3, R4, R5, LAMBDA
if P <= 0.:
raise ValueError('Found an invalid negative value for P.')
if (DELTA > 0. and DELTA <= 1. and GAMMA > 0. and GAMMA <=1.):
pass
else:
raise ValueError('Found an invalid DELTA or GAMMA value.')
# Time update of A vector; order updates of C,D vectors aand GAMMA,
# DELTA,LAMBDA scalars
#print 'time update--------------'
R1=1./P
R2=1./(DELTA*GAMMA-LAMBDA.real**2-LAMBDA.imag**2) # Eq. (8.D.41)
EF=X[M+1]
EB=X[N-M-2]
#print 'efeb', EF, EB
#print 'A=',A[0:2]
for K in range(0, M+1):
#print 'efeb lloop', K,A[K], X[M-K], X[N-M+K-1]
EF=EF+A[K]*X[M-K] # Eq. (8.D.1)
EB=EB+A[K].conjugate()*X[N-M+K-1] # Eq. (8.D.2)
C1=EB*R1 # Eq. (8.D.28)
C2=EF.conjugate()*R1 # Eq. (8.D.29)
C3=(EB.conjugate()*DELTA+EF*LAMBDA)*R2
C4=(EF*GAMMA+(EB*LAMBDA).conjugate())*R2
#print 'c1c2c3c4', C1 ,C2, C3, C4
#print 'efeb',EF, EB
for K in range(M, -1, -1):
SAVE1=A[K]
A[K]=SAVE1+C3*C[K]+C4*D[K] # Eq. (8.D.38)
C[K+1]=C[K]+C1*SAVE1 # Eq. (8.D.26)
D[K+1]=D[K]+C2*SAVE1 # Eq. (8.D.27)
#print 'ACD',K, A[K], C[K+1], D[K+1]
C[0]=C1
D[0]=C2
R3=EB.real**2+EB.imag**2
R4=EF.real**2+EF.imag**2
P=P-(R3*DELTA+R4*GAMMA+2.*np.real(EF*EB*LAMBDA))*R2 # Eq. (8.D.42)
DELTA=DELTA-R4*R1 # Eq. (8.D.32)
GAMMA=GAMMA-R3*R1 # Eq. (8.D.33)
LAMBDA=LAMBDA+(EF*EB).conjugate()*R1 # Eq. (8.D.35)
#print 'final'
#print R3, R4, P, DELTA, GAMMA, LAMBDA
#print 'Afinal=',A[0:2]
#print 'P=',P
if (P > 0.):
pass
else:
raise ValueError("Found a invalid negative P value ")
if (DELTA > 0. and DELTA <= 1. and GAMMA > 0. and GAMMA <= 1.):
pass
else:
raise ValueError("Found an invalid negative GAMMA or DELTA value ") |
def modcovar(x, order):
"""Simple and fast implementation of the covariance AR estimate
This code is 10 times faster than :func:`modcovar_marple` and more importantly
only 10 lines of code, compared to a 200 loc for :func:`modcovar_marple`
:param X: Array of complex data samples
:param int order: Order of linear prediction model
:return:
* P - Real linear prediction variance at order IP
* A - Array of complex linear prediction coefficients
.. plot::
:include-source:
:width: 80%
from spectrum import modcovar, marple_data, arma2psd, cshift
from pylab import log10, linspace, axis, plot
a, p = modcovar(marple_data, 15)
PSD = arma2psd(a)
PSD = cshift(PSD, len(PSD)/2) # switch positive and negative freq
plot(linspace(-0.5, 0.5, 4096), 10*log10(PSD/max(PSD)))
axis([-0.5,0.5,-60,0])
.. seealso:: :class:`~spectrum.modcovar.pmodcovar`
:validation: the AR parameters are the same as those returned by
a completely different function :func:`modcovar_marple`.
:References: Mathworks
"""
from spectrum import corrmtx
import scipy.linalg
X = corrmtx(x, order, 'modified')
Xc = np.matrix(X[:,1:])
X1 = np.array(X[:,0])
# Coefficients estimated via the covariance method
# Here we use lstsq rathre than solve function because Xc is not square matrix
a, residues, rank, singular_values = scipy.linalg.lstsq(-Xc, X1)
# Estimate the input white noise variance
Cz = np.dot(X1.conj().transpose(), Xc)
e = np.dot(X1.conj().transpose(), X1) + np.dot(Cz, a)
assert e.imag < 1e-4, 'wierd behaviour'
e = float(e.real) # ignore imag part that should be small
return a, e |
def aryule(X, order, norm='biased', allow_singularity=True):
r"""Compute AR coefficients using Yule-Walker method
:param X: Array of complex data values, X(1) to X(N)
:param int order: Order of autoregressive process to be fitted (integer)
:param str norm: Use a biased or unbiased correlation.
:param bool allow_singularity:
:return:
* AR coefficients (complex)
* variance of white noise (Real)
* reflection coefficients for use in lattice filter
.. rubric:: Description:
The Yule-Walker method returns the polynomial A corresponding to the
AR parametric signal model estimate of vector X using the Yule-Walker
(autocorrelation) method. The autocorrelation may be computed using a
**biased** or **unbiased** estimation. In practice, the biased estimate of
the autocorrelation is used for the unknown true autocorrelation. Indeed,
an unbiased estimate may result in nonpositive-definite autocorrelation
matrix.
So, a biased estimate leads to a stable AR filter.
The following matrix form represents the Yule-Walker equations. The are
solved by means of the Levinson-Durbin recursion:
.. math::
\left( \begin{array}{cccc}
r(1) & r(2)^* & \dots & r(n)^*\\
r(2) & r(1)^* & \dots & r(n-1)^*\\
\dots & \dots & \dots & \dots\\
r(n) & \dots & r(2) & r(1) \end{array} \right)
\left( \begin{array}{cccc}
a(2)\\
a(3) \\
\dots \\
a(n+1) \end{array} \right)
=
\left( \begin{array}{cccc}
-r(2)\\
-r(3) \\
\dots \\
-r(n+1) \end{array} \right)
The outputs consists of the AR coefficients, the estimated variance of the
white noise process, and the reflection coefficients. These outputs can be
used to estimate the optimal order by using :mod:`~spectrum.criteria`.
.. rubric:: Examples:
From a known AR process or order 4, we estimate those AR parameters using
the aryule function.
.. doctest::
>>> from scipy.signal import lfilter
>>> from spectrum import *
>>> from numpy.random import randn
>>> A =[1, -2.7607, 3.8106, -2.6535, 0.9238]
>>> noise = randn(1, 1024)
>>> y = lfilter([1], A, noise);
>>> #filter a white noise input to create AR(4) process
>>> [ar, var, reflec] = aryule(y[0], 4)
>>> # ar should contains values similar to A
The PSD estimate of a data samples is computed and plotted as follows:
.. plot::
:width: 80%
:include-source:
from spectrum import *
from pylab import *
ar, P, k = aryule(marple_data, 15, norm='biased')
psd = arma2psd(ar)
plot(linspace(-0.5, 0.5, 4096), 10 * log10(psd/max(psd)))
axis([-0.5, 0.5, -60, 0])
.. note:: The outputs have been double checked against (1) octave outputs
(octave has norm='biased' by default) and (2) Marple test code.
.. seealso:: This function uses :func:`~spectrum.levinson.LEVINSON` and
:func:`~spectrum.correlation.CORRELATION`. See the :mod:`~spectrum.criteria`
module for criteria to automatically select the AR order.
:References: [Marple]_
"""
assert norm in ['biased', 'unbiased']
r = CORRELATION(X, maxlags=order, norm=norm)
A, P, k = LEVINSON(r, allow_singularity=allow_singularity)
return A, P, k |
def LEVINSON(r, order=None, allow_singularity=False):
r"""Levinson-Durbin recursion.
Find the coefficients of a length(r)-1 order autoregressive linear process
:param r: autocorrelation sequence of length N + 1 (first element being the zero-lag autocorrelation)
:param order: requested order of the autoregressive coefficients. default is N.
:param allow_singularity: false by default. Other implementations may be True (e.g., octave)
:return:
* the `N+1` autoregressive coefficients :math:`A=(1, a_1...a_N)`
* the prediction errors
* the `N` reflections coefficients values
This algorithm solves the set of complex linear simultaneous equations
using Levinson algorithm.
.. math::
\bold{T}_M \left( \begin{array}{c} 1 \\ \bold{a}_M \end{array} \right) =
\left( \begin{array}{c} \rho_M \\ \bold{0}_M \end{array} \right)
where :math:`\bold{T}_M` is a Hermitian Toeplitz matrix with elements
:math:`T_0, T_1, \dots ,T_M`.
.. note:: Solving this equations by Gaussian elimination would
require :math:`M^3` operations whereas the levinson algorithm
requires :math:`M^2+M` additions and :math:`M^2+M` multiplications.
This is equivalent to solve the following symmetric Toeplitz system of
linear equations
.. math::
\left( \begin{array}{cccc}
r_1 & r_2^* & \dots & r_{n}^*\\
r_2 & r_1^* & \dots & r_{n-1}^*\\
\dots & \dots & \dots & \dots\\
r_n & \dots & r_2 & r_1 \end{array} \right)
\left( \begin{array}{cccc}
a_2\\
a_3 \\
\dots \\
a_{N+1} \end{array} \right)
=
\left( \begin{array}{cccc}
-r_2\\
-r_3 \\
\dots \\
-r_{N+1} \end{array} \right)
where :math:`r = (r_1 ... r_{N+1})` is the input autocorrelation vector, and
:math:`r_i^*` denotes the complex conjugate of :math:`r_i`. The input r is typically
a vector of autocorrelation coefficients where lag 0 is the first
element :math:`r_1`.
.. doctest::
>>> import numpy; from spectrum import LEVINSON
>>> T = numpy.array([3., -2+0.5j, .7-1j])
>>> a, e, k = LEVINSON(T)
"""
#from numpy import isrealobj
T0 = numpy.real(r[0])
T = r[1:]
M = len(T)
if order is None:
M = len(T)
else:
assert order <= M, 'order must be less than size of the input data'
M = order
realdata = numpy.isrealobj(r)
if realdata is True:
A = numpy.zeros(M, dtype=float)
ref = numpy.zeros(M, dtype=float)
else:
A = numpy.zeros(M, dtype=complex)
ref = numpy.zeros(M, dtype=complex)
P = T0
for k in range(0, M):
save = T[k]
if k == 0:
temp = -save / P
else:
#save += sum([A[j]*T[k-j-1] for j in range(0,k)])
for j in range(0, k):
save = save + A[j] * T[k-j-1]
temp = -save / P
if realdata:
P = P * (1. - temp**2.)
else:
P = P * (1. - (temp.real**2+temp.imag**2))
if P <= 0 and allow_singularity==False:
raise ValueError("singular matrix")
A[k] = temp
ref[k] = temp # save reflection coeff at each step
if k == 0:
continue
khalf = (k+1)//2
if realdata is True:
for j in range(0, khalf):
kj = k-j-1
save = A[j]
A[j] = save + temp * A[kj]
if j != kj:
A[kj] += temp*save
else:
for j in range(0, khalf):
kj = k-j-1
save = A[j]
A[j] = save + temp * A[kj].conjugate()
if j != kj:
A[kj] = A[kj] + temp * save.conjugate()
return A, P, ref |
def rlevinson(a, efinal):
"""computes the autocorrelation coefficients, R based
on the prediction polynomial A and the final prediction error Efinal,
using the stepdown algorithm.
Works for real or complex data
:param a:
:param efinal:
:return:
* R, the autocorrelation
* U prediction coefficient
* kr reflection coefficients
* e errors
A should be a minimum phase polynomial and A(1) is assumed to be unity.
:returns: (P+1) by (P+1) upper triangular matrix, U,
that holds the i'th order prediction polynomials
Ai, i=1:P, where P is the order of the input
polynomial, A.
[ 1 a1(1)* a2(2)* ..... aP(P) * ]
[ 0 1 a2(1)* ..... aP(P-1)* ]
U = [ .................................]
[ 0 0 0 ..... 1 ]
from which the i'th order prediction polynomial can be extracted
using Ai=U(i+1:-1:1,i+1)'. The first row of U contains the
conjugates of the reflection coefficients, and the K's may be
extracted using, K=conj(U(1,2:end)).
.. todo:: remove the conjugate when data is real data, clean up the code
test and doc.
"""
a = numpy.array(a)
realdata = numpy.isrealobj(a)
assert a[0] == 1, 'First coefficient of the prediction polynomial must be unity'
p = len(a)
if p < 2:
raise ValueError('Polynomial should have at least two coefficients')
if realdata == True:
U = numpy.zeros((p, p)) # This matrix will have the prediction
# polynomials of orders 1:p
else:
U = numpy.zeros((p, p), dtype=complex)
U[:, p-1] = numpy.conj(a[-1::-1]) # Prediction coefficients of order p
p = p -1
e = numpy.zeros(p)
# First we find the prediction coefficients of smaller orders and form the
# Matrix U
# Initialize the step down
e[-1] = efinal # Prediction error of order p
# Step down
for k in range(p-1, 0, -1):
[a, e[k-1]] = levdown(a, e[k])
U[:, k] = numpy.concatenate((numpy.conj(a[-1::-1].transpose()) ,
[0]*(p-k) ))
e0 = e[0]/(1.-abs(a[1]**2)) #% Because a[1]=1 (true polynomial)
U[0,0] = 1 #% Prediction coefficient of zeroth order
kr = numpy.conj(U[0,1:]) #% The reflection coefficients
kr = kr.transpose() #% To make it into a column vector
# % Once we have the matrix U and the prediction error at various orders, we can
# % use this information to find the autocorrelation coefficients.
R = numpy.zeros(1, dtype=complex)
#% Initialize recursion
k = 1
R0 = e0 # To take care of the zero indexing problem
R[0] = -numpy.conj(U[0,1])*R0 # R[1]=-a1[1]*R[0]
# Actual recursion
for k in range(1,p):
r = -sum(numpy.conj(U[k-1::-1,k])*R[-1::-1]) - kr[k]*e[k-1]
R = numpy.insert(R, len(R), r)
# Include R(0) and make it a column vector. Note the dot transpose
#R = [R0 R].';
R = numpy.insert(R, 0, e0)
return R, U, kr, e |
def levdown(anxt, enxt=None):
"""One step backward Levinson recursion
:param anxt:
:param enxt:
:return:
* acur the P'th order prediction polynomial based on the P+1'th order prediction polynomial, anxt.
* ecur the the P'th order prediction error based on the P+1'th order prediction error, enxt.
.. * knxt the P+1'th order reflection coefficient.
"""
#% Some preliminaries first
#if nargout>=2 & nargin<2
# raise ValueError('Insufficient number of input arguments');
if anxt[0] != 1:
raise ValueError('At least one of the reflection coefficients is equal to one.')
anxt = anxt[1:] # Drop the leading 1, it is not needed
# in the step down
# Extract the k+1'th reflection coefficient
knxt = anxt[-1]
if knxt == 1.0:
raise ValueError('At least one of the reflection coefficients is equal to one.')
# A Matrix formulation from Stoica is used to avoid looping
acur = (anxt[0:-1]-knxt*numpy.conj(anxt[-2::-1]))/(1.-abs(knxt)**2)
ecur = None
if enxt is not None:
ecur = enxt/(1.-numpy.dot(knxt.conj().transpose(),knxt))
acur = numpy.insert(acur, 0, 1)
return acur, ecur |
def levup(acur, knxt, ecur=None):
"""LEVUP One step forward Levinson recursion
:param acur:
:param knxt:
:return:
* anxt the P+1'th order prediction polynomial based on the P'th order prediction polynomial, acur, and the
P+1'th order reflection coefficient, Knxt.
* enxt the P+1'th order prediction prediction error, based on the P'th order prediction error, ecur.
:References: P. Stoica R. Moses, Introduction to Spectral Analysis Prentice Hall, N.J., 1997, Chapter 3.
"""
if acur[0] != 1:
raise ValueError('At least one of the reflection coefficients is equal to one.')
acur = acur[1:] # Drop the leading 1, it is not needed
# Matrix formulation from Stoica is used to avoid looping
anxt = numpy.concatenate((acur, [0])) + knxt * numpy.concatenate((numpy.conj(acur[-1::-1]), [1]))
enxt = None
if ecur is not None:
# matlab version enxt = (1-knxt'.*knxt)*ecur
enxt = (1. - numpy.dot(numpy.conj(knxt), knxt)) * ecur
anxt = numpy.insert(anxt, 0, 1)
return anxt, enxt |
def arcovar_marple(x, order):
r"""Estimate AR model parameters using covariance method
This implementation is based on [Marple]_. This code is far more
complicated and slower than :func:`arcovar` function, which is now the official version.
See :func:`arcovar` for a detailed description of Covariance method.
This function should be used in place of arcovar only if order<=4, for
which :func:`arcovar` does not work.
Fast algorithm for the solution of the covariance least squares normal
equations from Marple.
:param array X: Array of complex data samples
:param int oder: Order of linear prediction model
:return:
* AF - Array of complex forward linear prediction coefficients
* PF - Real forward linear prediction variance at order IP
* AB - Array of complex backward linear prediction coefficients
* PB - Real backward linear prediction variance at order IP
* PV - store linear prediction coefficients
.. note:: this code and the original code in Marple diverge for ip>10.
it seems that this is related to single precision used with
complex type in fortran whereas numpy uses double precision for
complex type.
:validation: the AR parameters are the same as those returned by
a completely different function :func:`arcovar`.
:References: [Marple]_
"""
assert len(x) >= order, "X must be dimensioned >=N"
# ----------------------------------------------------- Initialization
x = np.array(x)
N = len(x)
# Equations 8.C.42
r0 = sum(abs(x)**2.)
r1 = abs(x[0])**2
rN = abs(x[N-1])**2
pf = r0 - r1
pb = r0 - rN
delta = 1. - r1 / r0
gamma = 1. - rN / r0
c = np.zeros(N, dtype=complex)
d = np.zeros(N, dtype=complex)
r = np.zeros(N, dtype=complex)
af = np.zeros(N, dtype=complex)
ab = np.zeros(N, dtype=complex)
c[0] = x[N-1].conjugate() / r0
d[0] = x[0].conjugate() / r0
# special case
if order == 0:
pf = r0 / float(N)
pb = pf
return af, pf, ab, pb, 0
# ---------------------------------------------------------- MAIN LOOP
#ip +1 because we want to enter in the loop to run the first part of the code.
pbv = []
for m in range(0, order+1):
logging.debug('----------------------------m=', m)
logging.debug(c[0:2])
logging.debug(d[0:2])
r1 = 1./pf
r2 = 1./pb
r3 = 1./delta
r4 = 1./gamma
#logging.debug('starting r1r2r3r4=', r1, r2, r3, r4, pf, pb, delta, gamma)
#Order update: AF and AB vectors ; time update: C and D vectors
temp = 0.+0.j
for k in range(m+1, N):
temp = temp + x[k]*x[k-m-1].conjugate()
r[m] = temp.conjugate()
theta = x[0] * c[m]
#print(('theta', theta))
# print(('c=', c[0:2]))
# print(('d=', d[0:2]))
if m == 0:
pass
else:
for k in range(0, m):
theta = theta + x[m-k] * c[k] # Eq. (8.C.39)
r[k] = r[k] - x[N-m-1] * x[N-m+k].conjugate() # Eq. (8.C.32)
temp = temp + af[m-k-1] * r[k].conjugate()
#print 'loop1 k=', k
#print ' theta=',theta, 'r[k]=',r[k], 'temp=', temp
#print ' c=',c[k], 'af=',af[m-k-1]
"""if m > 0:
if debug:
print((m, N-m))
print(('Xk=0',x[m-0],x[N-m-1], x[N-m+0]))
if m > 1:
if debug:
print('Xk=1',x[m-1],x[N-m-1], x[N-m+1])
"""
c1 = -temp * r2
c2 = -r1 * temp.conjugate()
c3 = theta * r3
c4 = r4 *theta.conjugate()
#if debug:
# print('c1 c2 c3 c4 before af=',c1 ,c2 ,c3 ,c4)
af[m] = c1 # ! Eq. (8.C.19)
ab[m] = c2 # ! Eq. (8.C.22)
save = c[m]
c[m] = save + c3*d[m]
d[m] = d[m] + c4*save
#if debug:
# print('res',m,'af[m]=',af[m], ab[m], save, 'temp=',temp)
if m == 0:
pass
else:
#if debug:print('af before', af[0:2])
for k in range(0, m):
save = af[k]
af[k] = save + c1 * ab[m-k-1] # Eq. (8.C.18)
ab[m-k-1] = ab[m-k-1] + c2 * save # Eq. (8.C.21)
save = c[k]
c[k] = save + c3*d[k] # Eq. (8.C.37)
d[k] = d[k] + c4*save # Eq. (8.C.38)
#if debug:
# print('loop2 k=', k)
# print(' af[k]=', af[k])
# print(' ab[m-k-1]=', ab[m-k-1])
# print(' c[k]=', c[k])
# print(' d[k]=', d[k])
#if debug:
# print('af after=', af[0:2])
# print('ab=', ab[0:2])
r5 = temp.real**2 + temp.imag**2
pf = pf - r5*r2 # Eq. (8.C.20)
pb = pb - r5*r1 # Eq. (8.C.23)
r5 = theta.real**2 + theta.imag**2
delta = delta - r5*r4 # Eq. (8.C.39)
gamma = gamma - r5*r3 # Eq. (8.C.40)
#if debug:
# print('r5r2r1deltagamma', r5, r2, r1 , delta, gamma)
# print('pf before norm', pf, pb, N-m-1)
if m != order-1:
pass
else:
pf = pf / float(N-m-1)
pb = pb / float(N-m-1)
#if debug:
# print('ENDING', N-m-1)
break
#if debug:
# print('pf and pb', pf, pb)
if pf > 0 and pb > 0:
pass
else:
ValueError("Negative PF or PB value")
if (delta > 0. and delta <=1 and gamma > 0. and gamma <=1):
pass
else:
ValueError("Invalid delta or gamma value")
#C Time update: AF and AB vectors; order update: C and D vectors
r1 = 1./pf
r2 = 1./pb
r3 = 1./delta
r4 = 1./gamma
#if debug:
# print('--------time update', r1, r2, r3, r4, m+1, N-m-1, x[m+1], x[N-m-2])
ef = x[m+1]
eb = x[(N-1)-m-1]
for k in range(0,m+1):
#print 'k=', k, 'ef=', ef, ' eb=',eb,' af=',af[k], ab[k]
#print x[m-k],x[N-m+k-1]
ef = ef + af[k] * x[m-k] # Eq. (8.C.1)
eb = eb + ab[k] * x[N-m+k-1] # Eq. (8.C.2)
#ef = sum(af)
#if debug:
# print('efweb', ef , eb)
c1 = ef*r3
c2 = eb*r4
c3 = eb.conjugate() * r2
c4 = ef.conjugate() * r1
#if debug:
# print('c1c2c3c4', c1, c2, c3, c4)
# print('af before', af[0:2])
for k in range(m, -1, -1):
save = af[k]
af[k] = save + c1 * d[k] # Eq. (8.C.33)
d[k+1] = d[k] + c4 * save # Eq. (8.C.25)
save = ab[k]
ab[k] = save + c2 * c[m-k] # Eq. (8.C.35)
c[m-k] = c[m-k] + c3 * save # Eq. (8.C.24)
#if debug:
# print('af after', af[0:2])
# print('d', d[0:2])
# print('ab', ab[0:2])
# print('c', c[0:2])
#if debug:print('Pb before', pf, pb)
c[m+1] = c3
d[0] = c4
#r5 = abs(ef)**2
r5 = ef.real**2 + ef.imag**2
pf = pf - r5 * r3 # Eq. (8.C.34)
delta = delta-r5 * r1 # Eq. (8.C.30)
#r5 = abs(eb)**2
r5 = eb.real**2 + eb.imag**2
pb = pb - r5 * r4 # Eq. (8.C.36)
#if debug:
# print('Pb---------------------', m, pb, r5, r4)
gamma = gamma-r5*r2 # Eq. (8.C.31)
pbv.append(pb)
if (pf > 0. and pb > 0.):
pass
else:
ValueError("Negative PF or PB value")
#if debug:
# print(delta, gamma)
if (delta > 0. and delta <= 1.) and (gamma > 0. and gamma <= 1.):
pass
else:
ValueError("Invalid delta or gamma value")
#af=array of forward coeff
#ab=array of barward coeff
#pb=backward variance
#pf=forward variance
return af, pf, ab, pb, pbv |
def arcovar(x, order):
r"""Simple and fast implementation of the covariance AR estimate
This code is 10 times faster than :func:`arcovar_marple` and more importantly
only 10 lines of code, compared to a 200 loc for :func:`arcovar_marple`
:param array X: Array of complex data samples
:param int oder: Order of linear prediction model
:return:
* a - Array of complex forward linear prediction coefficients
* e - error
The covariance method fits a Pth order autoregressive (AR) model to the
input signal, which is assumed to be the output of
an AR system driven by white noise. This method minimizes the forward
prediction error in the least-squares sense. The output vector
contains the normalized estimate of the AR system parameters
The white noise input variance estimate is also returned.
If is the power spectral density of y(n), then:
.. math:: \frac{e}{\left| A(e^{jw}) \right|^2} = \frac{e}{\left| 1+\sum_{k-1}^P a(k)e^{-jwk}\right|^2}
Because the method characterizes the input data using an all-pole model,
the correct choice of the model order p is important.
.. plot::
:width: 80%
:include-source:
from spectrum import arcovar, marple_data, arma2psd
from pylab import plot, log10, linspace, axis
ar_values, error = arcovar(marple_data, 15)
psd = arma2psd(ar_values, sides='centerdc')
plot(linspace(-0.5, 0.5, len(psd)), 10*log10(psd/max(psd)))
axis([-0.5, 0.5, -60, 0])
.. seealso:: :class:`pcovar`
:validation: the AR parameters are the same as those returned by
a completely different function :func:`arcovar_marple`.
:References: [Mathworks]_
"""
from spectrum import corrmtx
import scipy.linalg
X = corrmtx(x, order, 'covariance')
Xc = np.matrix(X[:, 1:])
X1 = np.array(X[:, 0])
# Coefficients estimated via the covariance method
# Here we use lstsq rathre than solve function because Xc is not square
# matrix
a, _residues, _rank, _singular_values = scipy.linalg.lstsq(-Xc, X1)
# Estimate the input white noise variance
Cz = np.dot(X1.conj().transpose(), Xc)
e = np.dot(X1.conj().transpose(), X1) + np.dot(Cz, a)
assert e.imag < 1e-4, 'wierd behaviour'
e = float(e.real) # ignore imag part that should be small
return a, e |
def lpc(x, N=None):
"""Linear Predictor Coefficients.
:param x:
:param int N: default is length(X) - 1
:Details:
Finds the coefficients :math:`A=(1, a(2), \dots a(N+1))`, of an Nth order
forward linear predictor that predicts the current value value of the
real-valued time series x based on past samples:
.. math:: \hat{x}(n) = -a(2)*x(n-1) - a(3)*x(n-2) - ... - a(N+1)*x(n-N)
such that the sum of the squares of the errors
.. math:: err(n) = X(n) - Xp(n)
is minimized. This function uses the Levinson-Durbin recursion to
solve the normal equations that arise from the least-squares formulation.
.. seealso:: :func:`levinson`, :func:`aryule`, :func:`prony`, :func:`stmcb`
.. todo:: matrix case, references
:Example:
::
from scipy.signal import lfilter
noise = randn(50000,1); % Normalized white Gaussian noise
x = filter([1], [1 1/2 1/3 1/4], noise)
x = x[45904:50000]
x.reshape(4096, 1)
x = x[0]
Compute the predictor coefficients, estimated signal, prediction error, and autocorrelation sequence of the prediction error:
1.00000 + 0.00000i 0.51711 - 0.00000i 0.33908 - 0.00000i 0.24410 - 0.00000i
::
a = lpc(x, 3)
est_x = lfilter([0 -a(2:end)],1,x); % Estimated signal
e = x - est_x; % Prediction error
[acs,lags] = xcorr(e,'coeff'); % ACS of prediction error
"""
m = len(x)
if N is None:
N = m - 1 #default value if N is not provided
elif N > m-1:
#disp('Warning: zero-padding short input sequence')
x.resize(N+1)
#todo: check this zero-padding.
X = fft(x, 2**nextpow2(2.*len(x)-1))
R = real(ifft(abs(X)**2))
R = R/(m-1.) #Biased autocorrelation estimate
a, e, ref = LEVINSON(R, N)
return a, e |
def minvar(X, order, sampling=1., NFFT=default_NFFT):
r"""Minimum Variance Spectral Estimation (MV)
This function computes the minimum variance spectral estimate using
the Musicus procedure. The Burg algorithm from :func:`~spectrum.burg.arburg`
is used for the estimation of the autoregressive parameters.
The MV spectral estimator is given by:
.. math:: P_{MV}(f) = \frac{T}{e^H(f) R^{-1}_p e(f)}
where :math:`R^{-1}_p` is the inverse of the estimated autocorrelation
matrix (Toeplitz) and :math:`e(f)` is the complex sinusoid vector.
:param X: Array of complex or real data samples (length N)
:param int order: Dimension of correlation matrix (AR order = order - 1 )
:param float T: Sample interval (PSD scaling)
:param int NFFT: length of the final PSD
:return:
* PSD - Power spectral density values (two-sided)
* AR - AR coefficients (Burg algorithm)
* k - Reflection coefficients (Burg algorithm)
.. note:: The MV spectral estimator is not a true PSD function because the
area under the MV estimate does not represent the total power in the
measured process. MV minimises the variance of the output of a narrowband
filter and adpats itself to the spectral content of the input data
at each frequency.
:Example: The following example computes a PSD estimate using :func:`minvar`
The output PSD is transformed to a ``centerdc`` PSD and plotted.
.. plot::
:width: 80%
:include-source:
from spectrum import *
from pylab import plot, log10, linspace, xlim
psd, A, k = minvar(marple_data, 15)
psd = twosided_2_centerdc(psd) # switch positive and negative freq
f = linspace(-0.5, 0.5, len(psd))
plot(f, 10 * log10(psd/max(psd)))
xlim(-0.5, 0.5 )
.. seealso::
* External functions used are :meth:`~spectrum.burg.arburg`
and numpy.fft.fft
* :class:`pminvar`, a Class dedicated to MV method.
:Reference: [Marple]_
"""
errors.is_positive_integer(order)
errors.is_positive_integer(NFFT)
psi = np.zeros(NFFT, dtype=complex)
# First, we need to compute the AR values (note that order-1)
A, P, k = arburg (X, order - 1)
# add the order 0
A = np.insert(A, 0, 1.+0j)
# We cannot compare the output with those of MARPLE in a precise way.
# Indeed the burg algorithm is only single precision in fortram code
# So, the AR values are slightly differnt.
# The followign values are those from Marple
"""A[1] = 2.62284255-0.701703191j
A[2] = 4.97930574-2.32781982j
A[3] = 6.78445101-5.02477741j
A[4] =7.85207081-8.01284409j
A[5] =7.39412165-10.7684202j
A[6] =6.03175116-12.7067814j
A[7] =3.80106878-13.6808891j
A[8] =1.48207295-13.2265558j
A[9] =-0.644280195-11.4574194j
A[10] =-2.02386642-8.53268814j
A[11] =-2.32437634-5.25636244j
A[12] =-1.75356281-2.46820402j
A[13] =-0.888899028-0.781434655j
A[14] =-0.287197977-0.0918145925j
P = 0.00636525545
"""
# if we use exactly the same AR coeff and P from Marple Burg output, then
# we can compare the following code. This has been done and reveals that
# the FFT in marple is also slightly different (precision) from this one.
# However, the results are sufficiently close (when NFFT is small) that
# we are confident the following code is correct.
# Compute the psi coefficients
for K in range(0, order):
SUM = 0.
MK = order-K
# Correlate the autoregressive parameters
for I in range(0, order - K):
SUM = SUM + float(MK-2*I) * A[I].conjugate()*A[I+K] # Eq. (12.25)
SUM = SUM/P
if K != 0:
psi[NFFT-K] = SUM.conjugate()
psi[K] = SUM
# Compute FFT of denominator
psi = fft(psi, NFFT)
# Invert the psi terms at this point to get PSD values
PSD = sampling / np.real(psi)
return PSD, A, k |
def pascal(n):
"""Return Pascal matrix
:param int n: size of the matrix
.. doctest::
>>> from spectrum import pascal
>>> pascal(6)
array([[ 1., 1., 1., 1., 1., 1.],
[ 1., 2., 3., 4., 5., 6.],
[ 1., 3., 6., 10., 15., 21.],
[ 1., 4., 10., 20., 35., 56.],
[ 1., 5., 15., 35., 70., 126.],
[ 1., 6., 21., 56., 126., 252.]])
.. todo:: use the symmetric property to improve computational time if needed
"""
errors.is_positive_integer(n)
result = numpy.zeros((n, n))
#fill the first row and column
for i in range(0, n):
result[i, 0] = 1
result[0, i] = 1
if n > 1:
for i in range(1, n):
for j in range(1, n):
result[i, j] = result[i-1, j] + result[i, j-1]
return result |
def corrmtx(x_input, m, method='autocorrelation'):
r"""Correlation matrix
This function is used by PSD estimator functions. It generates
the correlation matrix from a correlation data set and a maximum lag.
:param array x: autocorrelation samples (1D)
:param int m: the maximum lag
Depending on the choice of the method, the correlation matrix has different
sizes, but the number of rows is always m+1.
Method can be :
* 'autocorrelation': (default) X is the (n+m)-by-(m+1) rectangular Toeplitz
matrix derived using prewindowed and postwindowed data.
* 'prewindowed': X is the n-by-(m+1) rectangular Toeplitz matrix derived
using prewindowed data only.
* 'postwindowed': X is the n-by-(m+1) rectangular Toeplitz matrix that
derived using postwindowed data only.
* 'covariance': X is the (n-m)-by-(m+1) rectangular Toeplitz matrix
derived using nonwindowed data.
* 'modified': X is the 2(n-m)-by-(m+1) modified rectangular Toeplitz
matrix that generates an autocorrelation estimate for the length n data
vector x, derived using forward and backward prediction error estimates.
:return:
* the autocorrelation matrix
* R, the (m+1)-by-(m+1) autocorrelation matrix estimate ``R= X'*X``.
.. rubric:: Algorithm details:
The **autocorrelation** matrix is a :math:`(N+p) \times (p+1)` rectangular Toeplilz
data matrix:
.. math:: X_p = \begin{pmatrix}L_p\\T_p\\Up\end{pmatrix}
where the lower triangular :math:`p \times (p+1)` matrix :math:`L_p` is
.. math:: L_p =
\begin{pmatrix}
x[1] & \cdots & 0 & 0 \\
\vdots & \ddots & \vdots & \vdots \\
x[p] & \cdots & x[1] & 0
\end{pmatrix}
where the rectangular :math:`(N-p) \times (p+1)` matrix :math:`T_p` is
.. math:: T_p =
\begin{pmatrix}
x[p+1] & \cdots & x[1] \\
\vdots & \ddots & \vdots \\
x[N-p] & \cdots & x[p+1] \\
\vdots & \ddots & \vdots \\
x[N] & \cdots & x[N-p]
\end{pmatrix}
and where the upper triangular :math:`p \times (p+1)` matrix :math:`U_p` is
.. math:: U_p =
\begin{pmatrix}
0 & x[N] & \cdots & x[N-p+1] \\
\vdots & \vdots & \ddots & \vdots \\
0 & 0 & \cdots & x[N]
\end{pmatrix}
From this definition, the prewindowed matrix is
.. math:: X_p = \begin{pmatrix}L_p\\T_p\end{pmatrix}
the postwindowed matrix is
.. math:: X_p = \begin{pmatrix}T_p\\U_p\end{pmatrix}
the covariance matrix is:
.. math:: X_p = \begin{pmatrix}T_p\end{pmatrix}
and the modified covariance matrix is:
.. math:: X_p = \begin{pmatrix}T_p\\T_p^*\end{pmatrix}
"""
valid_methods = ['autocorrelation', 'prewindowed', 'postwindowed',
'covariance', 'modified']
if method not in valid_methods:
raise ValueError("Invalid method. Try one of %s" % valid_methods)
from scipy.linalg import toeplitz
# create the relevant matrices that will be useful to create
# the correlation matrices
N = len(x_input)
# FIXME:do we need a copy ?
if isinstance(x_input, list):
x = numpy.array(x_input)
else:
x = x_input.copy()
if x.dtype == complex:
complex_type = True
else:
complex_type = False
# Compute the Lp, Up and Tp matrices according to the requested method
if method in ['autocorrelation', 'prewindowed']:
Lp = toeplitz(x[0:m], [0]*(m+1))
Tp = toeplitz(x[m:N], x[m::-1])
if method in ['autocorrelation', 'postwindowed']:
Up = toeplitz([0]*(m+1), numpy.insert(x[N:N-m-1:-1],0,0))
# Create the output matrix
if method == 'autocorrelation':
if complex_type == True:
C = numpy.zeros((N+m, m+1), dtype=complex)
else:
C = numpy.zeros((N+m, m+1))
for i in range(0, m):
C[i] = Lp[i]
for i in range(m, N):
C[i] = Tp[i-m]
for i in range(N, N+m):
C[i] = Up[i-N]
elif method == 'prewindowed':
if complex_type == True:
C = numpy.zeros((N, m+1), dtype=complex)
else:
C = numpy.zeros((N, m+1))
for i in range(0, m):
C[i] = Lp[i]
for i in range(m, N):
C[i] = Tp[i-m]
elif method == 'postwindowed':
if complex_type == True:
C = numpy.zeros((N, m+1), dtype=complex)
else:
C = numpy.zeros((N, m+1))
for i in range(0, N-m):
C[i] = Tp[i]
for i in range(N-m, N):
C[i] = Up[i-N+m]
elif method == 'covariance':
return Tp
elif method == 'modified':
if complex_type == True:
C = numpy.zeros((2*(N-m), m+1), dtype=complex)
else:
C = numpy.zeros((2*(N-m), m+1))
for i in range(0, N-m):
C[i] = Tp[i]
Tp = numpy.fliplr(Tp.conj())
for i in range(N-m, 2*(N-m)):
C[i] = Tp[i-N+m]
return C |
def csvd(A):
"""SVD decomposition using numpy.linalg.svd
:param A: a M by N matrix
:return:
* U, a M by M matrix
* S the N eigen values
* V a N by N matrix
See :func:`numpy.linalg.svd` for a detailed documentation.
Should return the same as in [Marple]_ , CSVD routine.
::
U, S, V = numpy.linalg.svd(A)
U, S, V = cvsd(A)
"""
U, S, V = numpy.linalg.svd(A)
return U, S, V |
def compatible_staticpath(path):
"""
Try to return a path to static the static files compatible all
the way back to Django 1.2. If anyone has a cleaner or better
way to do this let me know!
"""
if VERSION >= (1, 10):
# Since Django 1.10, forms.Media automatically invoke static
# lazily on the path if it is relative.
return path
try:
# >= 1.4
from django.templatetags.static import static
return static(path)
except ImportError:
pass
try:
# >= 1.3
return '%s/%s' % (settings.STATIC_URL.rstrip('/'), path)
except AttributeError:
pass
try:
return '%s/%s' % (settings.PAGEDOWN_URL.rstrip('/'), path)
except AttributeError:
pass
return '%s/%s' % (settings.MEDIA_URL.rstrip('/'), path) |
def main(argv=None):
"""Main command line interface."""
if argv is None:
argv = sys.argv[1:]
cli = CommandLineTool()
return cli.run(argv) |
def pass_from_pipe(cls):
"""Return password from pipe if not on TTY, else False.
"""
is_pipe = not sys.stdin.isatty()
return is_pipe and cls.strip_last_newline(sys.stdin.read()) |
def _load_plugins():
"""
Locate all setuptools entry points by the name 'keyring backends'
and initialize them.
Any third-party library may register an entry point by adding the
following to their setup.py::
entry_points = {
'keyring.backends': [
'plugin_name = mylib.mymodule:initialize_func',
],
},
`plugin_name` can be anything, and is only used to display the name
of the plugin at initialization time.
`initialize_func` is optional, but will be invoked if callable.
"""
group = 'keyring.backends'
entry_points = entrypoints.get_group_all(group=group)
for ep in entry_points:
try:
log.info('Loading %s', ep.name)
init_func = ep.load()
if callable(init_func):
init_func()
except Exception:
log.exception("Error initializing plugin %s." % ep) |
def get_all_keyring():
"""
Return a list of all implemented keyrings that can be constructed without
parameters.
"""
_load_plugins()
viable_classes = KeyringBackend.get_viable_backends()
rings = util.suppress_exceptions(viable_classes, exceptions=TypeError)
return list(rings) |
def name(cls):
"""
The keyring name, suitable for display.
The name is derived from module and class name.
"""
parent, sep, mod_name = cls.__module__.rpartition('.')
mod_name = mod_name.replace('_', ' ')
return ' '.join([mod_name, cls.__name__]) |
def get_credential(self, service, username):
"""Gets the username and password for the service.
Returns a Credential instance.
The *username* argument is optional and may be omitted by
the caller or ignored by the backend. Callers must use the
returned username.
"""
# The default implementation requires a username here.
if username is not None:
password = self.get_password(service, username)
if password is not None:
return credentials.SimpleCredential(
username,
password,
)
return None |
def get_password(self, service, username):
"""Get password of the username for the service
"""
if not self.connected(service):
# the user pressed "cancel" when prompted to unlock their keyring.
raise KeyringLocked("Failed to unlock the keyring!")
if not self.iface.hasEntry(self.handle, service, username, self.appid):
return None
password = self.iface.readPassword(
self.handle, service, username, self.appid)
return str(password) |
def set_password(self, service, username, password):
"""Set password for the username of the service
"""
if not self.connected(service):
# the user pressed "cancel" when prompted to unlock their keyring.
raise PasswordSetError("Cancelled by user")
self.iface.writePassword(
self.handle, service, username, password, self.appid) |
def delete_password(self, service, username):
"""Delete the password for the username of the service.
"""
if not self.connected(service):
# the user pressed "cancel" when prompted to unlock their keyring.
raise PasswordDeleteError("Cancelled by user")
if not self.iface.hasEntry(self.handle, service, username, self.appid):
raise PasswordDeleteError("Password not found")
self.iface.removeEntry(self.handle, service, username, self.appid) |
def _get_env(self, env_var):
"""Helper to read an environment variable
"""
value = os.environ.get(env_var)
if not value:
raise ValueError('Missing environment variable:%s' % env_var)
return value |
def get_preferred_collection(self):
"""If self.preferred_collection contains a D-Bus path,
the collection at that address is returned. Otherwise,
the default collection is returned.
"""
bus = secretstorage.dbus_init()
try:
if hasattr(self, 'preferred_collection'):
collection = secretstorage.Collection(
bus, self.preferred_collection)
else:
collection = secretstorage.get_default_collection(bus)
except exceptions.SecretStorageException as e:
raise InitError("Failed to create the collection: %s." % e)
if collection.is_locked():
collection.unlock()
if collection.is_locked(): # User dismissed the prompt
raise KeyringLocked("Failed to unlock the collection!")
return collection |
def get_password(self, service, username):
"""Get password of the username for the service
"""
collection = self.get_preferred_collection()
items = collection.search_items(
{"username": username, "service": service})
for item in items:
if hasattr(item, 'unlock'):
item.unlock()
if item.is_locked(): # User dismissed the prompt
raise KeyringLocked('Failed to unlock the item!')
return item.get_secret().decode('utf-8') |
def set_password(self, service, username, password):
"""Set password for the username of the service
"""
collection = self.get_preferred_collection()
attributes = {
"application": self.appid,
"service": service,
"username": username
}
label = "Password for '{}' on '{}'".format(username, service)
collection.create_item(label, attributes, password, replace=True) |
def delete_password(self, service, username):
"""Delete the stored password (only the first one)
"""
collection = self.get_preferred_collection()
items = collection.search_items(
{"username": username, "service": service})
for item in items:
return item.delete()
raise PasswordDeleteError("No such password!") |
def unpack(word):
r"""
>>> PackedAttributes.unpack(0)
0
>>> PackedAttributes.unpack('\x00\x00\x00\x01')
1
>>> PackedAttributes.unpack('abcd')
1633837924
"""
if not isinstance(word, str):
return word
val, = struct.unpack('!I', word.encode('ascii'))
return val |
def backends(cls):
"""
Discover all keyrings for chaining.
"""
allowed = (
keyring
for keyring in filter(backend._limit, backend.get_all_keyring())
if not isinstance(keyring, ChainerBackend)
and keyring.priority > 0
)
return sorted(allowed, key=backend.by_priority, reverse=True) |
def set_keyring(keyring):
"""Set current keyring backend.
"""
global _keyring_backend
if not isinstance(keyring, backend.KeyringBackend):
raise TypeError("The keyring must be a subclass of KeyringBackend")
_keyring_backend = keyring |
def disable():
"""
Configure the null keyring as the default.
"""
root = platform.config_root()
try:
os.makedirs(root)
except OSError:
pass
filename = os.path.join(root, 'keyringrc.cfg')
if os.path.exists(filename):
msg = "Refusing to overwrite {filename}".format(**locals())
raise RuntimeError(msg)
with open(filename, 'w') as file:
file.write('[backend]\ndefault-keyring=keyring.backends.null.Keyring') |
def init_backend(limit=None):
"""
Load a keyring specified in the config file or infer the best available.
Limit, if supplied, should be a callable taking a backend and returning
True if that backend should be included for consideration.
"""
# save the limit for the chainer to honor
backend._limit = limit
# get all keyrings passing the limit filter
keyrings = filter(limit, backend.get_all_keyring())
set_keyring(
load_env()
or load_config()
or max(keyrings, default=fail.Keyring(), key=backend.by_priority)
) |
def _load_keyring_class(keyring_name):
"""
Load the keyring class indicated by name.
These popular names are tested to ensure their presence.
>>> popular_names = [
... 'keyring.backends.Windows.WinVaultKeyring',
... 'keyring.backends.OS_X.Keyring',
... 'keyring.backends.kwallet.DBusKeyring',
... 'keyring.backends.SecretService.Keyring',
... ]
>>> list(map(_load_keyring_class, popular_names))
[...]
These legacy names are retained for compatibility.
>>> legacy_names = [
... ]
>>> list(map(_load_keyring_class, legacy_names))
[...]
"""
module_name, sep, class_name = keyring_name.rpartition('.')
__import__(module_name)
module = sys.modules[module_name]
return getattr(module, class_name) |
def load_config():
"""Load a keyring using the config file in the config root."""
filename = 'keyringrc.cfg'
keyring_cfg = os.path.join(platform.config_root(), filename)
if not os.path.exists(keyring_cfg):
return
config = configparser.RawConfigParser()
config.read(keyring_cfg)
_load_keyring_path(config)
# load the keyring class name, and then load this keyring
try:
if config.has_section("backend"):
keyring_name = config.get("backend", "default-keyring").strip()
else:
raise configparser.NoOptionError('backend', 'default-keyring')
except (configparser.NoOptionError, ImportError):
logger = logging.getLogger('keyring')
logger.warning("Keyring config file contains incorrect values.\n"
+ "Config file: %s" % keyring_cfg)
return
return load_keyring(keyring_name) |
def _load_keyring_path(config):
"load the keyring-path option (if present)"
try:
path = config.get("backend", "keyring-path").strip()
sys.path.insert(0, path)
except (configparser.NoOptionError, configparser.NoSectionError):
pass |
def _data_root_Linux():
"""
Use freedesktop.org Base Dir Specfication to determine storage
location.
"""
fallback = os.path.expanduser('~/.local/share')
root = os.environ.get('XDG_DATA_HOME', None) or fallback
return os.path.join(root, 'python_keyring') |
def _check_old_config_root():
"""
Prior versions of keyring would search for the config
in XDG_DATA_HOME, but should probably have been
searching for config in XDG_CONFIG_HOME. If the
config exists in the former but not in the latter,
raise a RuntimeError to force the change.
"""
# disable the check - once is enough and avoids infinite loop
globals()['_check_old_config_root'] = lambda: None
config_file_new = os.path.join(_config_root_Linux(), 'keyringrc.cfg')
config_file_old = os.path.join(_data_root_Linux(), 'keyringrc.cfg')
if os.path.isfile(config_file_old) and not os.path.isfile(config_file_new):
msg = ("Keyring config exists only in the old location "
"{config_file_old} and should be moved to {config_file_new} "
"to work with this version of keyring.")
raise RuntimeError(msg.format(**locals())) |
def _config_root_Linux():
"""
Use freedesktop.org Base Dir Specfication to determine config
location.
"""
_check_old_config_root()
fallback = os.path.expanduser('~/.local/share')
key = 'XDG_CONFIG_HOME'
root = os.environ.get(key, None) or fallback
return os.path.join(root, 'python_keyring') |
def make_formatter(format_name):
"""Returns a callable that outputs the data. Defaults to print."""
if "json" in format_name:
from json import dumps
import datetime
def jsonhandler(obj): obj.isoformat() if isinstance(obj, (datetime.datetime, datetime.date)) else obj
if format_name == "prettyjson":
def jsondumps(data): return dumps(data, default=jsonhandler, indent=2, separators=(',', ': '))
else:
def jsondumps(data): return dumps(data, default=jsonhandler)
def jsonify(data):
if isinstance(data, dict):
print(jsondumps(data))
elif isinstance(data, list):
print(jsondumps([device._asdict() for device in data]))
else:
print(dumps({'result': data}))
return jsonify
else:
def printer(data):
if isinstance(data, dict):
print(data)
else:
for row in data:
print(row)
return printer |
def argparser():
"""Constructs the ArgumentParser for the CLI"""
parser = ArgumentParser(prog='pynetgear')
parser.add_argument("--format", choices=['json', 'prettyjson', 'py'], default='prettyjson')
router_args = parser.add_argument_group("router connection config")
router_args.add_argument("--host", help="Hostname for the router")
router_args.add_argument("--user", help="Account for login")
router_args.add_argument("--port", help="Port exposed on the router")
router_args.add_argument("--login-v2", help="Force the use of the cookie-based authentication",
dest="force_login_v2", default=False, action="store_true")
router_args.add_argument(
"--password",
help="Not required with a wired connection." +
"Optionally, set the PYNETGEAR_PASSWORD environment variable")
router_args.add_argument(
"--url", help="Overrides host:port and ssl with url to router")
router_args.add_argument("--no-ssl",
dest="ssl", default=True,
action="store_false",
help="Connect with https")
subparsers = parser.add_subparsers(
description="Runs subcommand against the specified router",
dest="subcommand")
block_parser = subparsers.add_parser(
"block_device",
help="Blocks a device from connecting by mac address")
block_parser.add_argument("--mac-addr")
allow_parser = subparsers.add_parser(
"allow_device",
help="Allows a device with the mac address to connect")
allow_parser.add_argument("--mac-addr")
subparsers.add_parser("login", help="Attempts to login to router.")
attached_devices = subparsers.add_parser("attached_devices", help="Outputs all attached devices")
attached_devices.add_argument(
"-v", "--verbose",
action="store_true",
default=False,
help="Choose between verbose and slower or terse and fast.")
subparsers.add_parser("traffic_meter", help="Output router's traffic meter data")
return parser |
def run_subcommand(netgear, args):
"""Runs the subcommand configured in args on the netgear session"""
subcommand = args.subcommand
if subcommand == "block_device" or subcommand == "allow_device":
return netgear.allow_block_device(args.mac_addr, BLOCK if subcommand == "block_device" else ALLOW)
if subcommand == "attached_devices":
if args.verbose:
return netgear.get_attached_devices_2()
else:
return netgear.get_attached_devices()
if subcommand == 'traffic_meter':
return netgear.get_traffic_meter()
if subcommand == 'login':
return netgear.login()
print("Unknown subcommand") |
def main():
"""Scan for devices and print results."""
args = argparser().parse_args(sys.argv[1:])
password = os.environ.get('PYNETGEAR_PASSWORD') or args.password
netgear = Netgear(password, args.host, args.user, args.port, args.ssl, args.url, args.force_login_v2)
results = run_subcommand(netgear, args)
formatter = make_formatter(args.format)
if results is None:
print("Error communicating with the Netgear router")
else:
formatter(results) |
def autodetect_url():
"""
Try to autodetect the base URL of the router SOAP service.
Returns None if it can't be found.
"""
for url in ["http://routerlogin.net:5000", "https://routerlogin.net",
"http://routerlogin.net"]:
try:
r = requests.get(url + "/soap/server_sa/",
headers=_get_soap_headers("Test:1", "test"),
verify=False)
if r.status_code == 200:
return url
except requests.exceptions.RequestException:
pass
return None |
def _xml_get(e, name):
"""
Returns the value of the subnode "name" of element e.
Returns None if the subnode doesn't exist
"""
r = e.find(name)
if r is not None:
return r.text
return None |
def _convert(value, to_type, default=None):
"""Convert value to to_type, returns default if fails."""
try:
return default if value is None else to_type(value)
except ValueError:
# If value could not be converted
return default |
def login(self):
"""
Login to the router.
Will be called automatically by other actions.
"""
if not self.force_login_v2:
v1_result = self.login_v1()
if v1_result:
return v1_result
return self.login_v2() |
def get_attached_devices(self):
"""
Return list of connected devices to the router.
Returns None if error occurred.
"""
_LOGGER.info("Get attached devices")
success, response = self._make_request(SERVICE_DEVICE_INFO,
"GetAttachDevice")
if not success:
_LOGGER.error("Get attached devices failed")
return None
success, node = _find_node(
response.text,
".//GetAttachDeviceResponse/NewAttachDevice")
if not success:
return None
devices = []
# Netgear inserts a double-encoded value for "unknown" devices
decoded = node.text.strip().replace(UNKNOWN_DEVICE_ENCODED,
UNKNOWN_DEVICE_DECODED)
if not decoded or decoded == "0":
_LOGGER.error("Can't parse attached devices string")
_LOGGER.debug(node.text.strip())
return devices
entries = decoded.split("@")
# First element is the total device count
entry_count = None
if len(entries) > 1:
entry_count = _convert(entries.pop(0), int)
if entry_count is not None and entry_count != len(entries):
_LOGGER.info(
"""Number of devices should \
be: %d but is: %d""", entry_count, len(entries))
for entry in entries:
info = entry.split(";")
if len(info) == 0:
continue
# Not all routers will report those
signal = None
link_type = None
link_rate = None
allow_or_block = None
if len(info) >= 8:
allow_or_block = info[7]
if len(info) >= 7:
link_type = info[4]
link_rate = _convert(info[5], int)
signal = _convert(info[6], int)
if len(info) < 4:
_LOGGER.warning("Unexpected entry: %s", info)
continue
ipv4, name, mac = info[1:4]
devices.append(Device(name, ipv4, mac,
link_type, signal, link_rate, allow_or_block,
None, None, None, None))
return devices |
def get_attached_devices_2(self):
"""
Return list of connected devices to the router with details.
This call is slower and probably heavier on the router load.
Returns None if error occurred.
"""
_LOGGER.info("Get attached devices 2")
success, response = self._make_request(SERVICE_DEVICE_INFO,
"GetAttachDevice2")
if not success:
return None
success, devices_node = _find_node(
response.text,
".//GetAttachDevice2Response/NewAttachDevice")
if not success:
return None
xml_devices = devices_node.findall("Device")
devices = []
for d in xml_devices:
ip = _xml_get(d, 'IP')
name = _xml_get(d, 'Name')
mac = _xml_get(d, 'MAC')
signal = _convert(_xml_get(d, 'SignalStrength'), int)
link_type = _xml_get(d, 'ConnectionType')
link_rate = _xml_get(d, 'Linkspeed')
allow_or_block = _xml_get(d, 'AllowOrBlock')
device_type = _convert(_xml_get(d, 'DeviceType'), int)
device_model = _xml_get(d, 'DeviceModel')
ssid = _xml_get(d, 'SSID')
conn_ap_mac = _xml_get(d, 'ConnAPMAC')
devices.append(Device(name, ip, mac, link_type, signal, link_rate,
allow_or_block, device_type, device_model,
ssid, conn_ap_mac))
return devices |
def get_traffic_meter(self):
"""
Return dict of traffic meter stats.
Returns None if error occurred.
"""
_LOGGER.info("Get traffic meter")
def parse_text(text):
"""
there are three kinds of values in the returned data
This function parses the different values and returns
(total, avg), timedelta or a plain float
"""
def tofloats(lst): return (float(t) for t in lst)
try:
if "/" in text: # "6.19/0.88" total/avg
return tuple(tofloats(text.split('/')))
elif ":" in text: # 11:14 hr:mn
hour, mins = tofloats(text.split(':'))
return timedelta(hours=hour, minutes=mins)
else:
return float(text)
except ValueError:
return None
success, response = self._make_request(SERVICE_DEVICE_CONFIG,
"GetTrafficMeterStatistics")
if not success:
return None
success, node = _find_node(
response.text,
".//GetTrafficMeterStatisticsResponse")
if not success:
return None
return {t.tag: parse_text(t.text) for t in node} |
def config_start(self):
"""
Start a configuration session.
For managing router admin functionality (ie allowing/blocking devices)
"""
_LOGGER.info("Config start")
success, _ = self._make_request(
SERVICE_DEVICE_CONFIG, "ConfigurationStarted", {"NewSessionID": SESSION_ID})
self.config_started = success
return success |
def config_finish(self):
"""
End of a configuration session.
Tells the router we're done managing admin functionality.
"""
_LOGGER.info("Config finish")
if not self.config_started:
return True
success, _ = self._make_request(
SERVICE_DEVICE_CONFIG, "ConfigurationFinished", {"NewStatus": "ChangesApplied"})
self.config_started = not success
return success |
def allow_block_device(self, mac_addr, device_status=BLOCK):
"""
Allow or Block a device via its Mac Address.
Pass in the mac address for the device that you want to set. Pass in the
device_status you wish to set the device to: Allow (allow device to access the
network) or Block (block the device from accessing the network).
"""
_LOGGER.info("Allow block device")
if self.config_started:
_LOGGER.error("Inconsistant configuration state, configuration already started")
return False
if not self.config_start():
_LOGGER.error("Could not start configuration")
return False
success, _ = self._make_request(
SERVICE_DEVICE_CONFIG, "SetBlockDeviceByMAC",
{"NewAllowOrBlock": device_status, "NewMACAddress": mac_addr})
if not success:
_LOGGER.error("Could not successfully call allow/block device")
return False
if not self.config_finish():
_LOGGER.error("Inconsistant configuration state, configuration already finished")
return False
return True |
def _make_request(self, service, method, params=None, body="",
need_auth=True):
"""Make an API request to the router."""
# If we have no cookie (v2) or never called login before (v1)
# and we need auth, the request will fail for sure.
if need_auth and not self.cookie:
if not self.login():
return False, None
headers = self._get_headers(service, method, need_auth)
if not body:
if not params:
params = ""
if isinstance(params, dict):
_map = params
params = ""
for k in _map:
params += "<" + k + ">" + _map[k] + "</" + k + ">\n"
body = CALL_BODY.format(service=SERVICE_PREFIX + service,
method=method, params=params)
message = SOAP_REQUEST.format(session_id=SESSION_ID, body=body)
try:
response = requests.post(self.soap_url, headers=headers,
data=message, timeout=30, verify=False)
if need_auth and _is_unauthorized_response(response):
# let's discard the cookie because it probably expired (v2)
# or the IP-bound (?) session expired (v1)
self.cookie = None
_LOGGER.warning("Unauthorized response, let's login and retry...")
if self.login():
# reset headers with new cookie first
headers = self._get_headers(service, method, need_auth)
response = requests.post(self.soap_url, headers=headers,
data=message, timeout=30, verify=False)
success = _is_valid_response(response)
if not success:
_LOGGER.error("Invalid response")
_LOGGER.debug("%s\n%s\n%s", response.status_code, str(response.headers), response.text)
return success, response
except requests.exceptions.RequestException:
_LOGGER.exception("Error talking to API")
# Maybe one day we will distinguish between
# different errors..
return False, None |
def ip2long(ip):
"""
Wrapper function for IPv4 and IPv6 converters.
:arg ip: IPv4 or IPv6 address
"""
try:
return int(binascii.hexlify(socket.inet_aton(ip)), 16)
except socket.error:
return int(binascii.hexlify(socket.inet_pton(socket.AF_INET6, ip)), 16) |
def str2fp(data):
"""
Convert bytes data to file handle object (StringIO or BytesIO).
:arg data: String data to transform
"""
return BytesIO(bytearray(data, const.ENCODING)) if const.PY3 else StringIO(data) |
def _setup_segments(self):
"""
Parses the database file to determine what kind of database is
being used and setup segment sizes and start points that will
be used by the seek*() methods later.
"""
self._databaseType = const.COUNTRY_EDITION
self._recordLength = const.STANDARD_RECORD_LENGTH
self._databaseSegments = const.COUNTRY_BEGIN
filepos = self._fp.tell()
self._fp.seek(-3, os.SEEK_END)
for i in range(const.STRUCTURE_INFO_MAX_SIZE):
chars = chr(255) * 3
delim = self._fp.read(3)
if PY3 and type(delim) is bytes:
delim = delim.decode(ENCODING)
if PY2:
chars = chars.decode(ENCODING)
if type(delim) is str:
delim = delim.decode(ENCODING)
if delim == chars:
byte = self._fp.read(1)
self._databaseType = ord(byte)
# Compatibility with databases from April 2003 and earlier
if self._databaseType >= 106:
self._databaseType -= 105
if self._databaseType == const.REGION_EDITION_REV0:
self._databaseSegments = const.STATE_BEGIN_REV0
elif self._databaseType == const.REGION_EDITION_REV1:
self._databaseSegments = const.STATE_BEGIN_REV1
elif self._databaseType in (const.CITY_EDITION_REV0,
const.CITY_EDITION_REV1,
const.CITY_EDITION_REV1_V6,
const.ORG_EDITION,
const.ISP_EDITION,
const.NETSPEED_EDITION_REV1,
const.NETSPEED_EDITION_REV1_V6,
const.ASNUM_EDITION,
const.ASNUM_EDITION_V6):
self._databaseSegments = 0
buf = self._fp.read(const.SEGMENT_RECORD_LENGTH)
if PY3 and type(buf) is bytes:
buf = buf.decode(ENCODING)
for j in range(const.SEGMENT_RECORD_LENGTH):
self._databaseSegments += (ord(buf[j]) << (j * 8))
LONG_RECORDS = (const.ORG_EDITION, const.ISP_EDITION)
if self._databaseType in LONG_RECORDS:
self._recordLength = const.ORG_RECORD_LENGTH
break
else:
self._fp.seek(-4, os.SEEK_CUR)
self._fp.seek(filepos, os.SEEK_SET) |
def _seek_country(self, ipnum):
"""
Using the record length and appropriate start points, seek to the
country that corresponds to the converted IP address integer.
Return offset of record.
:arg ipnum: Result of ip2long conversion
"""
try:
offset = 0
seek_depth = 127 if len(str(ipnum)) > 10 else 31
for depth in range(seek_depth, -1, -1):
if self._flags & const.MEMORY_CACHE:
startIndex = 2 * self._recordLength * offset
endIndex = startIndex + (2 * self._recordLength)
buf = self._memory[startIndex:endIndex]
else:
startIndex = 2 * self._recordLength * offset
readLength = 2 * self._recordLength
try:
self._lock.acquire()
self._fp.seek(startIndex, os.SEEK_SET)
buf = self._fp.read(readLength)
finally:
self._lock.release()
if PY3 and type(buf) is bytes:
buf = buf.decode(ENCODING)
x = [0, 0]
for i in range(2):
for j in range(self._recordLength):
byte = buf[self._recordLength * i + j]
x[i] += ord(byte) << (j * 8)
if ipnum & (1 << depth):
if x[1] >= self._databaseSegments:
self._netmask = seek_depth - depth + 1
return x[1]
offset = x[1]
else:
if x[0] >= self._databaseSegments:
self._netmask = seek_depth - depth + 1
return x[0]
offset = x[0]
except (IndexError, UnicodeDecodeError):
pass
raise GeoIPError('Corrupt database') |
def _get_org(self, ipnum):
"""
Seek and return organization or ISP name for ipnum.
Return org/isp name.
:arg ipnum: Result of ip2long conversion
"""
seek_org = self._seek_country(ipnum)
if seek_org == self._databaseSegments:
return None
read_length = (2 * self._recordLength - 1) * self._databaseSegments
try:
self._lock.acquire()
self._fp.seek(seek_org + read_length, os.SEEK_SET)
buf = self._fp.read(const.MAX_ORG_RECORD_LENGTH)
finally:
self._lock.release()
if PY3 and type(buf) is bytes:
buf = buf.decode(ENCODING)
return buf[:buf.index(chr(0))] |
def _get_region(self, ipnum):
"""
Seek and return the region information.
Returns dict containing country_code and region_code.
:arg ipnum: Result of ip2long conversion
"""
region_code = None
country_code = None
seek_country = self._seek_country(ipnum)
def get_region_code(offset):
region1 = chr(offset // 26 + 65)
region2 = chr(offset % 26 + 65)
return ''.join([region1, region2])
if self._databaseType == const.REGION_EDITION_REV0:
seek_region = seek_country - const.STATE_BEGIN_REV0
if seek_region >= 1000:
country_code = 'US'
region_code = get_region_code(seek_region - 1000)
else:
country_code = const.COUNTRY_CODES[seek_region]
elif self._databaseType == const.REGION_EDITION_REV1:
seek_region = seek_country - const.STATE_BEGIN_REV1
if seek_region < const.US_OFFSET:
pass
elif seek_region < const.CANADA_OFFSET:
country_code = 'US'
region_code = get_region_code(seek_region - const.US_OFFSET)
elif seek_region < const.WORLD_OFFSET:
country_code = 'CA'
region_code = get_region_code(seek_region - const.CANADA_OFFSET)
else:
index = (seek_region - const.WORLD_OFFSET) // const.FIPS_RANGE
if index < len(const.COUNTRY_CODES):
country_code = const.COUNTRY_CODES[index]
elif self._databaseType in const.CITY_EDITIONS:
rec = self._get_record(ipnum)
region_code = rec.get('region_code')
country_code = rec.get('country_code')
return {'country_code': country_code, 'region_code': region_code} |
def _get_record(self, ipnum):
"""
Populate location dict for converted IP.
Returns dict with numerous location properties.
:arg ipnum: Result of ip2long conversion
"""
seek_country = self._seek_country(ipnum)
if seek_country == self._databaseSegments:
return {}
read_length = (2 * self._recordLength - 1) * self._databaseSegments
try:
self._lock.acquire()
self._fp.seek(seek_country + read_length, os.SEEK_SET)
buf = self._fp.read(const.FULL_RECORD_LENGTH)
finally:
self._lock.release()
if PY3 and type(buf) is bytes:
buf = buf.decode(ENCODING)
record = {
'dma_code': 0,
'area_code': 0,
'metro_code': None,
'postal_code': None
}
latitude = 0
longitude = 0
char = ord(buf[0])
record['country_code'] = const.COUNTRY_CODES[char]
record['country_code3'] = const.COUNTRY_CODES3[char]
record['country_name'] = const.COUNTRY_NAMES[char]
record['continent'] = const.CONTINENT_NAMES[char]
def read_data(buf, pos):
cur = pos
while buf[cur] != '\0':
cur += 1
return cur, buf[pos:cur] if cur > pos else None
offset, record['region_code'] = read_data(buf, 1)
offset, record['city'] = read_data(buf, offset + 1)
offset, record['postal_code'] = read_data(buf, offset + 1)
offset = offset + 1
for j in range(3):
latitude += (ord(buf[offset + j]) << (j * 8))
for j in range(3):
longitude += (ord(buf[offset + j + 3]) << (j * 8))
record['latitude'] = (latitude / 10000.0) - 180.0
record['longitude'] = (longitude / 10000.0) - 180.0
if self._databaseType in (const.CITY_EDITION_REV1, const.CITY_EDITION_REV1_V6):
if record['country_code'] == 'US':
dma_area = 0
for j in range(3):
dma_area += ord(buf[offset + j + 6]) << (j * 8)
record['dma_code'] = int(floor(dma_area / 1000))
record['area_code'] = dma_area % 1000
record['metro_code'] = const.DMA_MAP.get(record['dma_code'])
params = (record['country_code'], record['region_code'])
record['time_zone'] = time_zone_by_country_and_region(*params)
return record |
def _gethostbyname(self, hostname):
"""
Hostname lookup method, supports both IPv4 and IPv6.
"""
if self._databaseType in const.IPV6_EDITIONS:
response = socket.getaddrinfo(hostname, 0, socket.AF_INET6)
family, socktype, proto, canonname, sockaddr = response[0]
address, port, flow, scope = sockaddr
return address
else:
return socket.gethostbyname(hostname) |
def id_by_name(self, hostname):
"""
Returns the database ID for specified hostname.
The id might be useful as array index. 0 is unknown.
:arg hostname: Hostname to get ID from.
"""
addr = self._gethostbyname(hostname)
return self.id_by_addr(addr) |
def id_by_addr(self, addr):
"""
Returns the database ID for specified address.
The ID might be useful as array index. 0 is unknown.
:arg addr: IPv4 or IPv6 address (eg. 203.0.113.30)
"""
if self._databaseType in (const.PROXY_EDITION, const.NETSPEED_EDITION_REV1, const.NETSPEED_EDITION_REV1_V6):
raise GeoIPError('Invalid database type; this database is not supported')
ipv = 6 if addr.find(':') >= 0 else 4
if ipv == 4 and self._databaseType not in (const.COUNTRY_EDITION, const.NETSPEED_EDITION):
raise GeoIPError('Invalid database type; this database supports IPv6 addresses, not IPv4')
if ipv == 6 and self._databaseType != const.COUNTRY_EDITION_V6:
raise GeoIPError('Invalid database type; this database supports IPv4 addresses, not IPv6')
ipnum = util.ip2long(addr)
return self._seek_country(ipnum) - const.COUNTRY_BEGIN |
def country_code_by_addr(self, addr):
"""
Returns 2-letter country code (e.g. US) from IP address.
:arg addr: IP address (e.g. 203.0.113.30)
"""
VALID_EDITIONS = (const.COUNTRY_EDITION, const.COUNTRY_EDITION_V6)
if self._databaseType in VALID_EDITIONS:
country_id = self.id_by_addr(addr)
return const.COUNTRY_CODES[country_id]
elif self._databaseType in const.REGION_CITY_EDITIONS:
return self.region_by_addr(addr).get('country_code')
raise GeoIPError('Invalid database type, expected Country, City or Region') |
def country_code_by_name(self, hostname):
"""
Returns 2-letter country code (e.g. US) from hostname.
:arg hostname: Hostname (e.g. example.com)
"""
addr = self._gethostbyname(hostname)
return self.country_code_by_addr(addr) |
def netspeed_by_addr(self, addr):
"""
Returns NetSpeed name from address.
:arg addr: IP address (e.g. 203.0.113.30)
"""
if self._databaseType == const.NETSPEED_EDITION:
return const.NETSPEED_NAMES[self.id_by_addr(addr)]
elif self._databaseType in (const.NETSPEED_EDITION_REV1,
const.NETSPEED_EDITION_REV1_V6):
ipnum = util.ip2long(addr)
return self._get_org(ipnum)
raise GeoIPError(
'Invalid database type, expected NetSpeed or NetSpeedCell') |
def netspeed_by_name(self, hostname):
"""
Returns NetSpeed name from hostname. Can be Unknown, Dial-up,
Cable, or Corporate.
:arg hostname: Hostname (e.g. example.com)
"""
addr = self._gethostbyname(hostname)
return self.netspeed_by_addr(addr) |
def country_name_by_addr(self, addr):
"""
Returns full country name for specified IP address.
:arg addr: IP address (e.g. 203.0.113.30)
"""
VALID_EDITIONS = (const.COUNTRY_EDITION, const.COUNTRY_EDITION_V6)
if self._databaseType in VALID_EDITIONS:
country_id = self.id_by_addr(addr)
return const.COUNTRY_NAMES[country_id]
elif self._databaseType in const.CITY_EDITIONS:
return self.record_by_addr(addr).get('country_name')
else:
message = 'Invalid database type, expected Country or City'
raise GeoIPError(message) |
def country_name_by_name(self, hostname):
"""
Returns full country name for specified hostname.
:arg hostname: Hostname (e.g. example.com)
"""
addr = self._gethostbyname(hostname)
return self.country_name_by_addr(addr) |
def org_by_addr(self, addr):
"""
Returns Organization, ISP, or ASNum name for given IP address.
:arg addr: IP address (e.g. 203.0.113.30)
"""
valid = (const.ORG_EDITION, const.ISP_EDITION,
const.ASNUM_EDITION, const.ASNUM_EDITION_V6)
if self._databaseType not in valid:
message = 'Invalid database type, expected Org, ISP or ASNum'
raise GeoIPError(message)
ipnum = util.ip2long(addr)
return self._get_org(ipnum) |
def org_by_name(self, hostname):
"""
Returns Organization, ISP, or ASNum name for given hostname.
:arg hostname: Hostname (e.g. example.com)
"""
addr = self._gethostbyname(hostname)
return self.org_by_addr(addr) |
def record_by_addr(self, addr):
"""
Returns dictionary with city data containing `country_code`, `country_name`,
`region`, `city`, `postal_code`, `latitude`, `longitude`, `dma_code`,
`metro_code`, `area_code`, `region_code` and `time_zone`.
:arg addr: IP address (e.g. 203.0.113.30)
"""
if self._databaseType not in const.CITY_EDITIONS:
message = 'Invalid database type, expected City'
raise GeoIPError(message)
ipnum = util.ip2long(addr)
rec = self._get_record(ipnum)
if not rec:
return None
return rec |
def record_by_name(self, hostname):
"""
Returns dictionary with city data containing `country_code`, `country_name`,
`region`, `city`, `postal_code`, `latitude`, `longitude`, `dma_code`,
`metro_code`, `area_code`, `region_code` and `time_zone`.
:arg hostname: Hostname (e.g. example.com)
"""
addr = self._gethostbyname(hostname)
return self.record_by_addr(addr) |
def region_by_addr(self, addr):
"""
Returns dictionary containing `country_code` and `region_code`.
:arg addr: IP address (e.g. 203.0.113.30)
"""
if self._databaseType not in const.REGION_CITY_EDITIONS:
message = 'Invalid database type, expected Region or City'
raise GeoIPError(message)
ipnum = util.ip2long(addr)
return self._get_region(ipnum) |
def region_by_name(self, hostname):
"""
Returns dictionary containing `country_code` and `region_code`.
:arg hostname: Hostname (e.g. example.com)
"""
addr = self._gethostbyname(hostname)
return self.region_by_addr(addr) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.