Search is not available for this dataset
text stringlengths 75 104k |
|---|
def aic_eigen(s, N):
r"""AIC order-selection using eigen values
:param s: a list of `p` sorted eigen values
:param N: the size of the input data. To be defined precisely.
:return:
* an array containing the AIC values
Given :math:`n` sorted eigen values :math:`\lambda_i` with
:math:`0 <= i < n`, the proposed criterion from Wax and Kailath (1985)
is:
.. math:: AIC(k) = -2(n-k)N \ln \frac{g(k)}{a(k)} + 2k(2n-k)
where the arithmetic sum :math:`a(k)` is:
.. math:: a(k) = \sum_{i=k+1}^{n}\lambda_i
and the geometric sum :math:`g(k)` is:
.. math:: g(k) = \prod_{i=k+1}^{n} \lambda_i^{-(n-k)}
The number of relevant sinusoids in the signal subspace is determined by
selecting the minimum of `AIC`.
.. seealso:: :func:`~spectrum.eigenfreq.eigen`
.. todo:: define precisely the input parameter N. Should be the input
data length but when using correlation matrix (SVD), I suspect it
should be the length of the correlation matrix rather than the
original data.
:References:
* [Marple]_ Chap 13,
* [Wax]_
"""
import numpy as np
kaic = []
n = len(s)
for k in range(0, n-1):
ak = 1./(n-k) * np.sum(s[k+1:])
gk = np.prod(s[k+1:]**(1./(n-k)))
kaic.append( -2.*(n-k)*N * np.log(gk/ak) + 2.*k*(2.*n-k))
return kaic |
def mdl_eigen(s, N):
r"""MDL order-selection using eigen values
:param s: a list of `p` sorted eigen values
:param N: the size of the input data. To be defined precisely.
:return:
* an array containing the AIC values
.. math:: MDL(k) = (n-k)N \ln \frac{g(k)}{a(k)} + 0.5k(2n-k) log(N)
.. seealso:: :func:`aic_eigen` for details
:References:
* [Marple]_ Chap 13,
* [Wax]_
"""
import numpy as np
kmdl = []
n = len(s)
for k in range(0, n-1):
ak = 1./(n-k) * np.sum(s[k+1:])
gk = np.prod(s[k+1:]**(1./(n-k)))
kmdl.append( -(n-k)*N * np.log(gk/ak) + 0.5*k*(2.*n-k)*np.log(N))
return kmdl |
def generate_gallery_rst(app):
"""Generate the Main examples gallery reStructuredText
Start the sphinx-gallery configuration and recursively scan the examples
directories in order to populate the examples gallery
"""
try:
plot_gallery = eval(app.builder.config.plot_gallery)
except TypeError:
plot_gallery = bool(app.builder.config.plot_gallery)
gallery_conf.update(app.config.sphinx_gallery_conf)
gallery_conf.update(plot_gallery=plot_gallery)
gallery_conf.update(abort_on_example_error=app.builder.config.abort_on_example_error)
# this assures I can call the config in other places
app.config.sphinx_gallery_conf = gallery_conf
app.config.html_static_path.append(glr_path_static())
clean_gallery_out(app.builder.outdir)
examples_dirs = gallery_conf['examples_dirs']
gallery_dirs = gallery_conf['gallery_dirs']
if not isinstance(examples_dirs, list):
examples_dirs = [examples_dirs]
if not isinstance(gallery_dirs, list):
gallery_dirs = [gallery_dirs]
mod_examples_dir = os.path.relpath(gallery_conf['mod_example_dir'],
app.builder.srcdir)
seen_backrefs = set()
for examples_dir, gallery_dir in zip(examples_dirs, gallery_dirs):
examples_dir = os.path.relpath(examples_dir,
app.builder.srcdir)
gallery_dir = os.path.relpath(gallery_dir,
app.builder.srcdir)
for workdir in [examples_dir, gallery_dir, mod_examples_dir]:
if not os.path.exists(workdir):
os.makedirs(workdir)
# we create an index.rst with all examples
fhindex = open(os.path.join(gallery_dir, 'index.rst'), 'w')
# Here we don't use an os.walk, but we recurse only twice: flat is
# better than nested.
fhindex.write(generate_dir_rst(examples_dir, gallery_dir, gallery_conf,
seen_backrefs))
for directory in sorted(os.listdir(examples_dir)):
if os.path.isdir(os.path.join(examples_dir, directory)):
src_dir = os.path.join(examples_dir, directory)
target_dir = os.path.join(gallery_dir, directory)
fhindex.write(generate_dir_rst(src_dir, target_dir,
gallery_conf,
seen_backrefs))
fhindex.flush() |
def setup(app):
"""Setup sphinx-gallery sphinx extension"""
app.add_config_value('plot_gallery', True, 'html')
app.add_config_value('abort_on_example_error', False, 'html')
app.add_config_value('sphinx_gallery_conf', gallery_conf, 'html')
app.add_stylesheet('gallery.css')
app.connect('builder-inited', generate_gallery_rst)
app.connect('build-finished', embed_code_links) |
def CORRELATION(x, y=None, maxlags=None, norm='unbiased'):
r"""Correlation function
This function should give the same results as :func:`xcorr` but it
returns the positive lags only. Moreover the algorithm does not use
FFT as compared to other algorithms.
:param array x: first data array of length N
:param array y: second data array of length N. If not specified, computes the
autocorrelation.
:param int maxlags: compute cross correlation between [0:maxlags]
when maxlags is not specified, the range of lags is [0:maxlags].
:param str norm: normalisation in ['biased', 'unbiased', None, 'coeff']
* *biased* correlation=raw/N,
* *unbiased* correlation=raw/(N-`|lag|`)
* *coeff* correlation=raw/(rms(x).rms(y))/N
* None correlation=raw
:return:
* a numpy.array correlation sequence, r[1,N]
* a float for the zero-lag correlation, r[0]
The *unbiased* correlation has the form:
.. math::
\hat{r}_{xx} = \frac{1}{N-m}T \sum_{n=0}^{N-m-1} x[n+m]x^*[n] T
The *biased* correlation differs by the front factor only:
.. math::
\check{r}_{xx} = \frac{1}{N}T \sum_{n=0}^{N-m-1} x[n+m]x^*[n] T
with :math:`0\leq m\leq N-1`.
.. doctest::
>>> from spectrum import CORRELATION
>>> x = [1,2,3,4,5]
>>> res = CORRELATION(x,x, maxlags=0, norm='biased')
>>> res[0]
11.0
.. note:: this function should be replaced by :func:`xcorr`.
.. seealso:: :func:`xcorr`
"""
assert norm in ['unbiased','biased', 'coeff', None]
#transform lag into list if it is an integer
x = np.array(x)
if y is None:
y = x
else:
y = np.array(y)
# N is the max of x and y
N = max(len(x), len(y))
if len(x) < N:
x = y.copy()
x.resize(N)
if len(y) < N:
y = y.copy()
y.resize(N)
#default lag is N-1
if maxlags is None:
maxlags = N - 1
assert maxlags < N, 'lag must be less than len(x)'
realdata = np.isrealobj(x) and np.isrealobj(y)
#create an autocorrelation array with same length as lag
if realdata == True:
r = np.zeros(maxlags, dtype=float)
else:
r = np.zeros(maxlags, dtype=complex)
if norm == 'coeff':
rmsx = pylab_rms_flat(x)
rmsy = pylab_rms_flat(y)
for k in range(0, maxlags+1):
nk = N - k - 1
if realdata == True:
sum = 0
for j in range(0, nk+1):
sum = sum + x[j+k] * y[j]
else:
sum = 0. + 0j
for j in range(0, nk+1):
sum = sum + x[j+k] * y[j].conjugate()
if k == 0:
if norm in ['biased', 'unbiased']:
r0 = sum/float(N)
elif norm is None:
r0 = sum
else:
r0 = 1.
else:
if norm == 'unbiased':
r[k-1] = sum / float(N-k)
elif norm == 'biased':
r[k-1] = sum / float(N)
elif norm is None:
r[k-1] = sum
elif norm == 'coeff':
r[k-1] = sum/(rmsx*rmsy)/float(N)
r = np.insert(r, 0, r0)
return r |
def xcorr(x, y=None, maxlags=None, norm='biased'):
"""Cross-correlation using numpy.correlate
Estimates the cross-correlation (and autocorrelation) sequence of a random
process of length N. By default, there is no normalisation and the output
sequence of the cross-correlation has a length 2*N+1.
:param array x: first data array of length N
:param array y: second data array of length N. If not specified, computes the
autocorrelation.
:param int maxlags: compute cross correlation between [-maxlags:maxlags]
when maxlags is not specified, the range of lags is [-N+1:N-1].
:param str option: normalisation in ['biased', 'unbiased', None, 'coeff']
The true cross-correlation sequence is
.. math:: r_{xy}[m] = E(x[n+m].y^*[n]) = E(x[n].y^*[n-m])
However, in practice, only a finite segment of one realization of the
infinite-length random process is available.
The correlation is estimated using numpy.correlate(x,y,'full').
Normalisation is handled by this function using the following cases:
* 'biased': Biased estimate of the cross-correlation function
* 'unbiased': Unbiased estimate of the cross-correlation function
* 'coeff': Normalizes the sequence so the autocorrelations at zero
lag is 1.0.
:return:
* a numpy.array containing the cross-correlation sequence (length 2*N-1)
* lags vector
.. note:: If x and y are not the same length, the shorter vector is
zero-padded to the length of the longer vector.
.. rubric:: Examples
.. doctest::
>>> from spectrum import xcorr
>>> x = [1,2,3,4,5]
>>> c, l = xcorr(x,x, maxlags=0, norm='biased')
>>> c
array([ 11.])
.. seealso:: :func:`CORRELATION`.
"""
N = len(x)
if y is None:
y = x
assert len(x) == len(y), 'x and y must have the same length. Add zeros if needed'
if maxlags is None:
maxlags = N-1
lags = np.arange(0, 2*N-1)
else:
assert maxlags <= N, 'maxlags must be less than data length'
lags = np.arange(N-maxlags-1, N+maxlags)
res = np.correlate(x, y, mode='full')
if norm == 'biased':
Nf = float(N)
res = res[lags] / float(N) # do not use /= !!
elif norm == 'unbiased':
res = res[lags] / (float(N)-abs(np.arange(-N+1, N)))[lags]
elif norm == 'coeff':
Nf = float(N)
rms = pylab_rms_flat(x) * pylab_rms_flat(y)
res = res[lags] / rms / Nf
else:
res = res[lags]
lags = np.arange(-maxlags, maxlags+1)
return res, lags |
def MINEIGVAL(T0, T, TOL):
"""Finds the minimum eigenvalue of a Hermitian Toeplitz matrix
The classical power method is used together with a fast Toeplitz
equation solution routine. The eigenvector is normalized to unit length.
:param T0: Scalar corresponding to real matrix element t(0)
:param T: Array of M complex matrix elements t(1),...,t(M) C from the left column of the Toeplitz matrix
:param TOL: Real scalar tolerance; routine exits when [ EVAL(k) - EVAL(k-1) ]/EVAL(k-1) < TOL , where the index k denotes the iteration number.
:return:
* EVAL - Real scalar denoting the minimum eigenvalue of matrix
* EVEC - Array of M complex eigenvector elements associated
.. note::
* External array T must be dimensioned >= M
* array EVEC must be >= M+1
* Internal array E must be dimensioned >= M+1 .
* **dependencies**
* :meth:`spectrum.toeplitz.HERMTOEP`
"""
M = len(T)
eigval = 10
eigvalold = 1
eigvec = numpy.zeros(M+1, dtype=complex)
for k in range(0,M+1):
eigvec[k] = 1+0j
it=0
#print 'initialisation',T0, T, eigval, eigvec
maxit = 15
while abs(eigvalold-eigval)>TOL*eigvalold and it<maxit:
it=it+1
eigvalold = eigval
#print 'iteration ',it, 'eigvalold=',eigvalold, 'eigval=', eigval
eig = toeplitz.HERMTOEP(T0, T, eigvec)
SUM = 0
save =0.+0j
for k in range(0, M+1):
SUM = SUM + eig[k].real**2+eig[k].imag**2
save = save +eig[k]*eigvec[k].conjugate()
SUM=1./SUM
eigval = save.real*SUM
for k in range(0,M+1):
eigvec[k] = SUM * eig[k]
if it==maxit:
print('warning reached max number of iteration (%s)' % maxit)
return eigval, eigvec |
def morlet(lb, ub, n):
r"""Generate the Morlet waveform
The Morlet waveform is defined as follows:
.. math:: w[x] = \cos{5x} \exp^{-x^2/2}
:param lb: lower bound
:param ub: upper bound
:param int n: waveform data samples
.. plot::
:include-source:
:width: 80%
from spectrum import morlet
from pylab import plot
plot(morlet(0,10,100))
"""
if n <= 0:
raise ValueError("n must be strictly positive")
x = numpy.linspace(lb, ub, n)
psi = numpy.cos(5*x) * numpy.exp(-x**2/2.)
return psi |
def chirp(t, f0=0., t1=1., f1=100., form='linear', phase=0):
r"""Evaluate a chirp signal at time t.
A chirp signal is a frequency swept cosine wave.
.. math:: a = \pi (f_1 - f_0) / t_1
.. math:: b = 2 \pi f_0
.. math:: y = \cos\left( \pi\frac{f_1-f_0}{t_1} t^2 + 2\pi f_0 t + \rm{phase} \right)
:param array t: times at which to evaluate the chirp signal
:param float f0: frequency at time t=0 (Hz)
:param float t1: time t1
:param float f1: frequency at time t=t1 (Hz)
:param str form: shape of frequency sweep in ['linear', 'quadratic', 'logarithmic']
:param float phase: phase shift at t=0
The parameter **form** can be:
* 'linear' :math:`f(t) = (f_1-f_0)(t/t_1) + f_0`
* 'quadratic' :math:`f(t) = (f_1-f_0)(t/t_1)^2 + f_0`
* 'logarithmic' :math:`f(t) = (f_1-f_0)^{(t/t_1)} + f_0`
Example:
.. plot::
:include-source:
:width: 80%
from spectrum import chirp
from pylab import linspace, plot
t = linspace(0, 1, 1000)
y = chirp(t, form='linear')
plot(y)
y = chirp(t, form='quadratic')
plot(y, 'r')
"""
valid_forms = ['linear', 'quadratic', 'logarithmic']
if form not in valid_forms:
raise ValueError("Invalid form. Valid form are %s"
% valid_forms)
t = numpy.array(t)
phase = 2. * pi * phase / 360.
if form == "linear":
a = pi * (f1 - f0)/t1
b = 2. * pi * f0
y = numpy.cos(a * t**2 + b*t + phase)
elif form == "quadratic":
a = (2/3. * pi * (f1-f0)/t1/t1)
b = 2. * pi * f0
y = numpy.cos(a*t**3 + b * t + phase)
elif form == "logarithmic":
a = 2. * pi * t1/numpy.log(f1-f0)
b = 2. * pi * f0
x = (f1-f0)**(1./t1)
y = numpy.cos(a * x**t + b * t + phase)
return y |
def mexican(lb, ub, n):
r"""Generate the mexican hat wavelet
The Mexican wavelet is:
.. math:: w[x] = \cos{5x} \exp^{-x^2/2}
:param lb: lower bound
:param ub: upper bound
:param int n: waveform data samples
:return: the waveform
.. plot::
:include-source:
:width: 80%
from spectrum import mexican
from pylab import plot
plot(mexican(0, 10, 100))
"""
if n <= 0:
raise ValueError("n must be strictly positive")
x = numpy.linspace(lb, ub, n)
psi = (1.-x**2.) * (2./(numpy.sqrt(3.)*pi**0.25)) * numpy.exp(-x**2/2.)
return psi |
def ac2poly(data):
"""Convert autocorrelation sequence to prediction polynomial
:param array data: input data (list or numpy.array)
:return:
* AR parameters
* noise variance
This is an alias to::
a, e, c = LEVINSON(data)
:Example:
.. doctest::
>>> from spectrum import ac2poly
>>> from numpy import array
>>> r = [5, -2, 1.01]
>>> ar, e = ac2poly(r)
>>> ar
array([ 1. , 0.38, -0.05])
>>> e
4.1895000000000007
"""
a, e, _c = LEVINSON(data)
a = numpy.insert(a, 0, 1)
return a, e |
def rc2poly(kr, r0=None):
"""convert reflection coefficients to prediction filter polynomial
:param k: reflection coefficients
"""
# Initialize the recursion
from .levinson import levup
p = len(kr) #% p is the order of the prediction polynomial.
a = numpy.array([1, kr[0]]) #% a is a true polynomial.
e = numpy.zeros(len(kr))
if r0 is None:
e0 = 0
else:
e0 = r0
e[0] = e0 * (1. - numpy.conj(numpy.conjugate(kr[0])*kr[0]))
# Continue the recursion for k=2,3,...,p, where p is the order of the
# prediction polynomial.
for k in range(1, p):
[a, e[k]] = levup(a, kr[k], e[k-1])
efinal = e[-1]
return a, efinal |
def rc2ac(k, R0):
"""Convert reflection coefficients to autocorrelation sequence.
:param k: reflection coefficients
:param R0: zero-lag autocorrelation
:returns: the autocorrelation sequence
.. seealso:: :func:`ac2rc`, :func:`poly2rc`, :func:`ac2poly`, :func:`poly2rc`, :func:`rc2poly`.
"""
[a,efinal] = rc2poly(k, R0)
R, u, kr, e = rlevinson(a, efinal)
return R |
def rc2is(k):
"""Convert reflection coefficients to inverse sine parameters.
:param k: reflection coefficients
:return: inverse sine parameters
.. seealso:: :func:`is2rc`, :func:`rc2poly`, :func:`rc2acC`, :func:`rc2lar`.
Reference: J.R. Deller, J.G. Proakis, J.H.L. Hansen, "Discrete-Time
Processing of Speech Signals", Prentice Hall, Section 7.4.5.
"""
assert numpy.isrealobj(k), 'Inverse sine parameters not defined for complex reflection coefficients.'
if max(numpy.abs(k)) >= 1:
raise ValueError('All reflection coefficients should have magnitude less than unity.')
return (2/numpy.pi)*numpy.arcsin(k) |
def rc2lar(k):
"""Convert reflection coefficients to log area ratios.
:param k: reflection coefficients
:return: inverse sine parameters
The log area ratio is defined by G = log((1+k)/(1-k)) , where the K
parameter is the reflection coefficient.
.. seealso:: :func:`lar2rc`, :func:`rc2poly`, :func:`rc2ac`, :func:`rc2ic`.
:References:
[1] J. Makhoul, "Linear Prediction: A Tutorial Review," Proc. IEEE, Vol.63, No.4, pp.561-580, Apr 1975.
"""
assert numpy.isrealobj(k), 'Log area ratios not defined for complex reflection coefficients.'
if max(numpy.abs(k)) >= 1:
raise ValueError('All reflection coefficients should have magnitude less than unity.')
# Use the relation, atanh(x) = (1/2)*log((1+k)/(1-k))
return -2 * numpy.arctanh(-numpy.array(k)) |
def lar2rc(g):
"""Convert log area ratios to reflection coefficients.
:param g: log area ratios
:returns: the reflection coefficients
.. seealso: :func:`rc2lar`, :func:`poly2rc`, :func:`ac2rc`, :func:`is2rc`.
:References:
[1] J. Makhoul, "Linear Prediction: A Tutorial Review," Proc. IEEE, Vol.63, No.4, pp.561-580, Apr 1975.
"""
assert numpy.isrealobj(g), 'Log area ratios not defined for complex reflection coefficients.'
# Use the relation, tanh(x) = (1-exp(2x))/(1+exp(2x))
return -numpy.tanh(-numpy.array(g)/2) |
def lsf2poly(lsf):
"""Convert line spectral frequencies to prediction filter coefficients
returns a vector a containing the prediction filter coefficients from a vector lsf of line spectral frequencies.
.. doctest::
>>> from spectrum import lsf2poly
>>> lsf = [0.7842 , 1.5605 , 1.8776 , 1.8984, 2.3593]
>>> a = lsf2poly(lsf)
# array([ 1.00000000e+00, 6.14837835e-01, 9.89884967e-01,
# 9.31594056e-05, 3.13713832e-03, -8.12002261e-03 ])
.. seealso:: poly2lsf, rc2poly, ac2poly, rc2is
"""
# Reference: A.M. Kondoz, "Digital Speech: Coding for Low Bit Rate Communications
# Systems" John Wiley & Sons 1994 ,Chapter 4
# Line spectral frequencies must be real.
lsf = numpy.array(lsf)
if max(lsf) > numpy.pi or min(lsf) < 0:
raise ValueError('Line spectral frequencies must be between 0 and pi.')
p = len(lsf) # model order
# Form zeros using the LSFs and unit amplitudes
z = numpy.exp(1.j * lsf)
# Separate the zeros to those belonging to P and Q
rQ = z[0::2]
rP = z[1::2]
# Include the conjugates as well
rQ = numpy.concatenate((rQ, rQ.conjugate()))
rP = numpy.concatenate((rP, rP.conjugate()))
# Form the polynomials P and Q, note that these should be real
Q = numpy.poly(rQ);
P = numpy.poly(rP);
# Form the sum and difference filters by including known roots at z = 1 and
# z = -1
if p%2:
# Odd order: z = +1 and z = -1 are roots of the difference filter, P1(z)
P1 = numpy.convolve(P, [1, 0, -1])
Q1 = Q
else:
# Even order: z = -1 is a root of the sum filter, Q1(z) and z = 1 is a
# root of the difference filter, P1(z)
P1 = numpy.convolve(P, [1, -1])
Q1 = numpy.convolve(Q, [1, 1])
# Prediction polynomial is formed by averaging P1 and Q1
a = .5 * (P1+Q1)
return a[0:-1:1] |
def poly2lsf(a):
"""Prediction polynomial to line spectral frequencies.
converts the prediction polynomial specified by A,
into the corresponding line spectral frequencies, LSF.
normalizes the prediction polynomial by A(1).
.. doctest::
>>> from spectrum import poly2lsf
>>> a = [1.0000, 0.6149, 0.9899, 0.0000 ,0.0031, -0.0082]
>>> lsf = poly2lsf(a)
>>> lsf = array([0.7842, 1.5605, 1.8776, 1.8984, 2.3593])
.. seealso:: lsf2poly, poly2rc, poly2qc, rc2is
"""
#Line spectral frequencies are not defined for complex polynomials.
# Normalize the polynomial
a = numpy.array(a)
if a[0] != 1:
a/=a[0]
if max(numpy.abs(numpy.roots(a))) >= 1.0:
error('The polynomial must have all roots inside of the unit circle.');
# Form the sum and differnce filters
p = len(a)-1 # The leading one in the polynomial is not used
a1 = numpy.concatenate((a, numpy.array([0])))
a2 = a1[-1::-1]
P1 = a1 - a2 # Difference filter
Q1 = a1 + a2 # Sum Filter
# If order is even, remove the known root at z = 1 for P1 and z = -1 for Q1
# If odd, remove both the roots from P1
if p%2: # Odd order
P, r = deconvolve(P1,[1, 0 ,-1])
Q = Q1
else: # Even order
P, r = deconvolve(P1, [1, -1])
Q, r = deconvolve(Q1, [1, 1])
rP = numpy.roots(P)
rQ = numpy.roots(Q)
aP = numpy.angle(rP[1::2])
aQ = numpy.angle(rQ[1::2])
lsf = sorted(numpy.concatenate((-aP,-aQ)))
return lsf |
def _swapsides(data):
"""todo is it really useful ?
Swap sides
.. doctest::
>>> from spectrum import swapsides
>>> x = [-2, -1, 1, 2]
>>> swapsides(x)
array([ 2, -2, -1])
"""
N = len(data)
return np.concatenate((data[N//2+1:], data[0:N//2])) |
def twosided_2_onesided(data):
"""Convert a one-sided PSD to a twosided PSD
In order to keep the power in the onesided PSD the same
as in the twosided version, the onesided values are twice
as much as in the input data (except for the zero-lag value).
::
>>> twosided_2_onesided([10, 2,3,3,2,8])
array([ 10., 4., 6., 8.])
"""
assert len(data) % 2 == 0
N = len(data)
psd = np.array(data[0:N//2+1]) * 2.
psd[0] /= 2.
psd[-1] = data[-1]
return psd |
def onesided_2_twosided(data):
"""Convert a two-sided PSD to a one-sided PSD
In order to keep the power in the twosided PSD the same
as in the onesided version, the twosided values are 2 times
lower than the input data (except for the zero-lag and N-lag
values).
::
>>> twosided_2_onesided([10, 4, 6, 8])
array([ 10., 2., 3., 3., 2., 8.])
"""
psd = np.concatenate((data[0:-1], cshift(data[-1:0:-1], -1)))/2.
psd[0] *= 2.
psd[-1] *= 2.
return psd |
def twosided_2_centerdc(data):
"""Convert a two-sided PSD to a center-dc PSD"""
N = len(data)
# could us int() or // in python 3
newpsd = np.concatenate((cshift(data[N//2:], 1), data[0:N//2]))
newpsd[0] = data[-1]
return newpsd |
def centerdc_2_twosided(data):
"""Convert a center-dc PSD to a twosided PSD"""
N = len(data)
newpsd = np.concatenate((data[N//2:], (cshift(data[0:N//2], -1))))
return newpsd |
def _twosided_zerolag(data, zerolag):
"""Build a symmetric vector out of stricly positive lag vector and zero-lag
.. doctest::
>>> data = [3,2,1]
>>> zerolag = 4
>>> twosided_zerolag(data, zerolag)
array([1, 2, 3, 4, 3, 2, 1])
.. seealso:: Same behaviour as :func:`twosided_zerolag`
"""
res = twosided(np.insert(data, 0, zerolag))
return res |
def cshift(data, offset):
"""Circular shift to the right (within an array) by a given offset
:param array data: input data (list or numpy.array)
:param int offset: shift the array with the offset
.. doctest::
>>> from spectrum import cshift
>>> cshift([0, 1, 2, 3, -2, -1], 2)
array([-2, -1, 0, 1, 2, 3])
"""
# the deque method is suppose to be optimal when using rotate to shift the
# data that playing with the data to build a new list.
if isinstance(offset, float):
offset = int(offset)
a = deque(data)
a.rotate(offset)
return np.array(a) |
def data_cosine(N=1024, A=0.1, sampling=1024., freq=200):
r"""Return a noisy cosine at a given frequency.
:param N: the final data size
:param A: the strength of the noise
:param float sampling: sampling frequency of the input :attr:`data`.
:param float freq: the frequency :math:`f_0` of the cosine.
.. math:: x[t] = cos(2\pi t * f_0) + A w[t]
where w[t] is a white noise of variance 1.
.. doctest::
>>> from spectrum import data_cosine
>>> a = data_cosine(N=1024, sampling=1024, A=0.5, freq=100)
"""
t = arange(0, float(N)/sampling, 1./sampling)
x = cos(2.*pi*t*freq) + A * randn(t.size)
return x |
def data_two_freqs(N=200):
"""A simple test example with two close frequencies
"""
nn = arange(N)
xx = cos(0.257*pi*nn) + sin(0.2*pi*nn) + 0.01*randn(nn.size)
return xx |
def spectrum_data(filename):
"""Simple utilities to retrieve data sets from """
import os
import pkg_resources
info = pkg_resources.get_distribution('spectrum')
location = info.location
# first try develop mode
share = os.sep.join([location, "spectrum", 'data'])
filename2 = os.sep.join([share, filename])
if os.path.exists(filename2):
return filename2
else:
raise Exception('unknown file %s' % filename2) |
def plot(self, **kargs):
"""Plot the data set, using the sampling information to set the x-axis
correctly."""
from pylab import plot, linspace, xlabel, ylabel, grid
time = linspace(1*self.dt, self.N*self.dt, self.N)
plot(time, self.data, **kargs)
xlabel('Time')
ylabel('Amplitude')
grid(True) |
def readwav(filename):
"""Read a WAV file and returns the data and sample rate
::
from spectrum.io import readwav
readwav()
"""
from scipy.io.wavfile import read as readwav
samplerate, signal = readwav(filename)
return signal, samplerate |
def pmtm(x, NW=None, k=None, NFFT=None, e=None, v=None, method='adapt', show=False):
"""Multitapering spectral estimation
:param array x: the data
:param float NW: The time half bandwidth parameter (typical values are
2.5,3,3.5,4). Must be provided otherwise the tapering windows and
eigen values (outputs of dpss) must be provided
:param int k: uses the first k Slepian sequences. If *k* is not provided,
*k* is set to *NW*2*.
:param NW:
:param e: the window concentrations (eigenvalues)
:param v: the matrix containing the tapering windows
:param str method: set how the eigenvalues are used. Must be
in ['unity', 'adapt', 'eigen']
:param bool show: plot results
:return: Sk (complex), weights, eigenvalues
Usually in spectral estimation the mean to reduce bias is to use tapering
window. In order to reduce variance we need to average different spectrum.
The problem is that we have only one set of data. Thus we need to
decompose a set into several segments. Such method are well-known: simple
daniell's periodogram, Welch's method and so on. The drawback of such
methods is a loss of resolution since the segments used to compute the
spectrum are smaller than the data set.
The interest of multitapering method is to keep a good resolution while
reducing bias and variance.
How does it work? First we compute different simple periodogram with the
whole data set (to keep good resolution) but each periodgram is computed
with a differenttapering windows. Then, we average all these spectrum.
To avoid redundancy and bias due to the tapers mtm use special tapers.
.. plot::
:width: 80%
:include-source:
from spectrum import data_cosine, dpss, pmtm
data = data_cosine(N=2048, A=0.1, sampling=1024, freq=200)
# If you already have the DPSS windows
[tapers, eigen] = dpss(2048, 2.5, 4)
res = pmtm(data, e=eigen, v=tapers, show=False)
# You do not need to compute the DPSS before end
res = pmtm(data, NW=2.5, show=False)
res = pmtm(data, NW=2.5, k=4, show=True)
.. versionchanged:: 0.6.2
APN modified method to return each Sk as complex values, the eigenvalues
and the weights
"""
assert method in ['adapt','eigen','unity']
N = len(x)
# if dpss not provided, compute them
if e is None and v is None:
if NW is not None:
[tapers, eigenvalues] = dpss(N, NW, k=k)
else:
raise ValueError("NW must be provided (e.g. 2.5, 3, 3.5, 4")
elif e is not None and v is not None:
eigenvalues = e[:]
tapers = v[:]
else:
raise ValueError("if e provided, v must be provided as well and viceversa.")
nwin = len(eigenvalues) # length of the eigen values vector to be used later
# set the NFFT
if NFFT==None:
NFFT = max(256, 2**nextpow2(N))
Sk_complex = np.fft.fft(np.multiply(tapers.transpose(), x), NFFT)
Sk = abs(Sk_complex)**2
# si nfft smaller thqn N, cut otherwise add wero.
# compute
if method in ['eigen', 'unity']:
if method == 'unity':
weights = np.ones((nwin, 1))
elif method == 'eigen':
# The S_k spectrum can be weighted by the eigenvalues, as in Park et al.
weights = np.array([_x/float(i+1) for i,_x in enumerate(eigenvalues)])
weights = weights.reshape(nwin,1)
elif method == 'adapt':
# This version uses the equations from [2] (P&W pp 368-370).
# Wrap the data modulo nfft if N > nfft
sig2 = np.dot(x, x) / float(N)
Sk = abs(np.fft.fft(np.multiply(tapers.transpose(), x), NFFT))**2
Sk = Sk.transpose()
S = (Sk[:,0] + Sk[:,1]) / 2 # Initial spectrum estimate
S = S.reshape(NFFT, 1)
Stemp = np.zeros((NFFT,1))
S1 = np.zeros((NFFT,1))
# Set tolerance for acceptance of spectral estimate:
tol = 0.0005 * sig2 / float(NFFT)
i = 0
a = sig2 * (1 - eigenvalues)
# converges very quickly but for safety; set i<100
while sum(np.abs(S-S1))/NFFT > tol and i<100:
i = i + 1
# calculate weights
b1 = np.multiply(S, np.ones((1,nwin)))
b2 = np.multiply(S,eigenvalues.transpose()) + np.ones((NFFT,1))*a.transpose()
b = b1/b2
# calculate new spectral estimate
wk=(b**2)*(np.ones((NFFT,1))*eigenvalues.transpose())
S1 = sum(wk.transpose()*Sk.transpose())/ sum(wk.transpose())
S1 = S1.reshape(NFFT, 1)
Stemp = S1
S1 = S
S = Stemp # swap S and S1
weights=wk
if show is True:
from pylab import semilogy
if method == "adapt":
Sk = np.mean(Sk * weights, axis=1)
else:
Sk = np.mean(Sk * weights, axis=0)
semilogy(Sk)
return Sk_complex, weights, eigenvalues |
def dpss(N, NW=None, k=None):
r"""Discrete prolate spheroidal (Slepian) sequences
Calculation of the Discrete Prolate Spheroidal Sequences also known as the
slepian sequences, and the corresponding eigenvalues.
:param int N: desired window length
:param float NW: The time half bandwidth parameter (typical values are
2.5,3,3.5,4).
:param int k: returns the first k Slepian sequences. If *k* is not
provided, *k* is set to *NW*2*.
:return:
* tapers, a matrix of tapering windows. Matrix is a N by *k* (k
is the number of windows)
* eigen, a vector of eigenvalues of length *k*
The discrete prolate spheroidal or Slepian sequences derive from the following
time-frequency concentration problem. For all finite-energy sequences index
limited to some set , which sequence maximizes the following ratio:
.. math::
\lambda = \frac{\int_{-W}^{W}\left| X(f) \right|^2 df}
{\int_{-F_s/2}^{F_s/2}\left| X(f) \right|^2 df}
where :math:`F_s` is the sampling frequency and :math:`|W| < F_s/2`.
This ratio determines which index-limited sequence has the largest proportion of its
energy in the band :math:`[-W,W]` with :math:`0 < \lambda < 1`.
The sequence maximizing the ratio is the first
discrete prolate spheroidal or Slepian sequence. The second Slepian sequence
maximizes the ratio and is orthogonal to the first Slepian sequence. The third
Slepian sequence maximizes the ratio of integrals and is orthogonal to both
the first and second Slepian sequences and so on.
.. note:: Note about the implementation. Since the slepian generation is
computationally expensive, we use a C implementation based on the C
code written by Lees as published in:
Lees, J. M. and J. Park (1995): Multiple-taper spectral analysis: A stand-alone
C-subroutine: Computers & Geology: 21, 199-236.
However, the original C code has been trimmed. Indeed, we only require the
multitap function (that depends on jtridib, jtinvit functions only).
.. plot::
:width: 80%
:include-source:
from spectrum import *
from pylab import *
N = 512
[w, eigens] = dpss(N, 2.5, 4)
plot(w)
title('Slepian Sequences N=%s, NW=2.5' % N)
axis([0, N, -0.15, 0.15])
legend(['1st window','2nd window','3rd window','4th window'])
Windows are normalised:
.. math:: \sum_k h_k h_k = 1
:references: [Percival]_
Slepian, D. Prolate spheroidal wave functions, Fourier analysis, and
uncertainty V: The discrete case. Bell System Technical Journal,
Volume 57 (1978), 1371430
.. note:: the C code to create the slepian windows is extracted from original C code
from Lees and Park (1995) and uses the conventions of Percival and Walden (1993).
Functions that are not used here were removed.
"""
assert NW < N/2 , "NW ({}) must be stricly less than N/2 ({}/2)".format(NW, N)
if k is None:
k = min(round(2*NW),N)
k = int(max(k,1))
from numpy import dot, zeros, arange, sqrt
mtspeclib.multitap.restype = None
lam = zeros(k, dtype=float)
tapers = zeros(k*N, dtype=float)
tapsum = zeros(k, dtype=float)
res = mtspeclib.multitap(
c_int(N),
c_int(k),
lam.ctypes.data_as(c_void_p),
c_float(NW),
tapers.ctypes.data_as(c_void_p),
tapsum.ctypes.data_as(c_void_p),
)
# normalisation by sqtr(N). It is required to have normalised windows
tapers = tapers.reshape(k,N).transpose() / sqrt(N)
for i in range(k):
# By convention (Percival and Walden, 1993 pg 379)
# * symmetric tapers (k=0,2,4,...) should have a positive average.
# * antisymmetric tapers should begin with a positive lobe
if i%2 == 0:
if tapsum[i]<0:
tapsum[i] *= -1
tapers[:,i] *= -1
else:
if tapers[0,i] < 0:
tapsum[i] *= -1
tapers[:,i] *= -1
# Now find the eigenvalues of the original
# Use the autocovariance sequence technique from Percival and Walden, 1993
# pg 390 to get the eigenvalues more precisely (same as matlab output)
# The values returned in lam are not exacly the same as in the following methods.
acvs = _autocov(tapers.transpose(), debias=False) * N
nidx = arange(N)
W = float(NW)/N
r = 4*W*np.sinc(2*W*nidx)
r[0] = 2*W
eigvals = dot(acvs, r)
#return (tapers, lam)
return [tapers, eigvals] |
def _other_dpss_method(N, NW, Kmax):
"""Returns the Discrete Prolate Spheroidal Sequences of orders [0,Kmax-1]
for a given frequency-spacing multiple NW and sequence length N.
See dpss function that is the official version. This version is indepedant
of the C code and relies on Scipy function. However, it is slower by a factor 3
Tridiagonal form of DPSS calculation from:
"""
# here we want to set up an optimization problem to find a sequence
# whose energy is maximally concentrated within band [-W,W].
# Thus, the measure lambda(T,W) is the ratio between the energy within
# that band, and the total energy. This leads to the eigen-system
# (A - (l1)I)v = 0, where the eigenvector corresponding to the largest
# eigenvalue is the sequence with maximally concentrated energy. The
# collection of eigenvectors of this system are called Slepian sequences,
# or discrete prolate spheroidal sequences (DPSS). Only the first K,
# K = 2NW/dt orders of DPSS will exhibit good spectral concentration
# [see http://en.wikipedia.org/wiki/Spectral_concentration_problem]
# Here I set up an alternative symmetric tri-diagonal eigenvalue problem
# such that
# (B - (l2)I)v = 0, and v are our DPSS (but eigenvalues l2 != l1)
# the main diagonal = ([N-1-2*t]/2)**2 cos(2PIW), t=[0,1,2,...,N-1]
# and the first off-diangonal = t(N-t)/2, t=[1,2,...,N-1]
# [see Percival and Walden, 1993]
from scipy import linalg as la
Kmax = int(Kmax)
W = float(NW)/N
ab = np.zeros((2,N), 'd')
nidx = np.arange(N)
ab[0,1:] = nidx[1:]*(N-nidx[1:])/2.
ab[1] = ((N-1-2*nidx)/2.)**2 * np.cos(2*np.pi*W)
# only calculate the highest Kmax-1 eigenvectors
l,v = la.eig_banded(ab, select='i', select_range=(N-Kmax, N-1))
dpss = v.transpose()[::-1]
# By convention (Percival and Walden, 1993 pg 379)
# * symmetric tapers (k=0,2,4,...) should have a positive average.
# * antisymmetric tapers should begin with a positive lobe
fix_symmetric = (dpss[0::2].sum(axis=1) < 0)
for i, f in enumerate(fix_symmetric):
if f:
dpss[2*i] *= -1
fix_skew = (dpss[1::2,1] < 0)
for i, f in enumerate(fix_skew):
if f:
dpss[2*i+1] *= -1
# Now find the eigenvalues of the original
# Use the autocovariance sequence technique from Percival and Walden, 1993
# pg 390
# XXX : why debias false? it's all messed up o.w., even with means
# on the order of 1e-2
acvs = _autocov(dpss, debias=False) * N
r = 4*W*np.sinc(2*W*nidx)
r[0] = 2*W
eigvals = np.dot(acvs, r)
return dpss, eigvals |
def _autocov(s, **kwargs):
"""Returns the autocovariance of signal s at all lags.
Adheres to the definition
sxx[k] = E{S[n]S[n+k]} = cov{S[n],S[n+k]}
where E{} is the expectation operator, and S is a zero mean process
"""
# only remove the mean once, if needed
debias = kwargs.pop('debias', True)
axis = kwargs.get('axis', -1)
if debias:
s = _remove_bias(s, axis)
kwargs['debias'] = False
return _crosscov(s, s, **kwargs) |
def _crosscov(x, y, axis=-1, all_lags=False, debias=True):
"""Returns the crosscovariance sequence between two ndarrays.
This is performed by calling fftconvolve on x, y[::-1]
Parameters
x: ndarray
y: ndarray
axis: time axis
all_lags: {True/False}
whether to return all nonzero lags, or to clip the length of s_xy
to be the length of x and y. If False, then the zero lag covariance
is at index 0. Otherwise, it is found at (len(x) + len(y) - 1)/2
debias: {True/False}
Always removes an estimate of the mean along the axis, unless
told not to.
cross covariance is defined as
sxy[k] := E{X[t]*Y[t+k]}, where X,Y are zero mean random processes
"""
if x.shape[axis] != y.shape[axis]:
raise ValueError(
'crosscov() only works on same-length sequences for now'
)
if debias:
x = _remove_bias(x, axis)
y = _remove_bias(y, axis)
slicing = [slice(d) for d in x.shape]
slicing[axis] = slice(None,None,-1)
sxy = _fftconvolve(x, y[tuple(slicing)], axis=axis, mode='full')
N = x.shape[axis]
sxy /= N
if all_lags:
return sxy
slicing[axis] = slice(N-1,2*N-1)
return sxy[tuple(slicing)] |
def _crosscorr(x, y, **kwargs):
"""
Returns the crosscorrelation sequence between two ndarrays.
This is performed by calling fftconvolve on x, y[::-1]
Parameters
x: ndarray
y: ndarray
axis: time axis
all_lags: {True/False}
whether to return all nonzero lags, or to clip the length of r_xy
to be the length of x and y. If False, then the zero lag correlation
is at index 0. Otherwise, it is found at (len(x) + len(y) - 1)/2
Notes
cross correlation is defined as
rxy[k] := E{X[t]*Y[t+k]}/(E{X*X}E{Y*Y})**.5,
where X,Y are zero mean random processes. It is the noramlized cross
covariance.
"""
sxy = _crosscov(x, y, **kwargs)
# estimate sigma_x, sigma_y to normalize
sx = np.std(x)
sy = np.std(y)
return sxy/(sx*sy) |
def _remove_bias(x, axis):
"Subtracts an estimate of the mean from signal x at axis"
padded_slice = [slice(d) for d in x.shape]
padded_slice[axis] = np.newaxis
mn = np.mean(x, axis=axis)
return x - mn[tuple(padded_slice)] |
def get_docstring_and_rest(filename):
"""Separate `filename` content between docstring and the rest
Strongly inspired from ast.get_docstring.
Returns
-------
docstring: str
docstring of `filename`
rest: str
`filename` content without the docstring
"""
with open(filename) as f:
content = f.read()
node = ast.parse(content)
if not isinstance(node, ast.Module):
raise TypeError("This function only supports modules. "
"You provided {0}".format(node.__class__.__name__))
if node.body and isinstance(node.body[0], ast.Expr) and \
isinstance(node.body[0].value, ast.Str):
docstring_node = node.body[0]
docstring = docstring_node.value.s
# This get the content of the file after the docstring last line
# Note: 'maxsplit' argument is not a keyword argument in python2
rest = content.split('\n', docstring_node.lineno)[-1]
return docstring, rest
else:
raise ValueError(('Could not find docstring in file "{0}". '
'A docstring is required by sphinx-gallery')
.format(filename)) |
def split_code_and_text_blocks(source_file):
"""Return list with source file separated into code and text blocks.
Returns
-------
blocks : list of (label, content)
List where each element is a tuple with the label ('text' or 'code'),
and content string of block.
"""
docstring, rest_of_content = get_docstring_and_rest(source_file)
blocks = [('text', docstring)]
pattern = re.compile(
r'(?P<header_line>^#{20,}.*)\s(?P<text_content>(?:^#.*\s)*)',
flags=re.M)
pos_so_far = 0
for match in re.finditer(pattern, rest_of_content):
match_start_pos, match_end_pos = match.span()
code_block_content = rest_of_content[pos_so_far:match_start_pos]
text_content = match.group('text_content')
sub_pat = re.compile('^#', flags=re.M)
text_block_content = dedent(re.sub(sub_pat, '', text_content))
if code_block_content.strip():
blocks.append(('code', code_block_content))
if text_block_content.strip():
blocks.append(('text', text_block_content))
pos_so_far = match_end_pos
remaining_content = rest_of_content[pos_so_far:]
if remaining_content.strip():
blocks.append(('code', remaining_content))
return blocks |
def codestr2rst(codestr, lang='python'):
"""Return reStructuredText code block from code string"""
code_directive = "\n.. code-block:: {0}\n\n".format(lang)
indented_block = indent(codestr, ' ' * 4)
return code_directive + indented_block |
def extract_intro(filename):
""" Extract the first paragraph of module-level docstring. max:95 char"""
docstring, _ = get_docstring_and_rest(filename)
# lstrip is just in case docstring has a '\n\n' at the beginning
paragraphs = docstring.lstrip().split('\n\n')
if len(paragraphs) > 1:
first_paragraph = re.sub('\n', ' ', paragraphs[1])
first_paragraph = (first_paragraph[:95] + '...'
if len(first_paragraph) > 95 else first_paragraph)
else:
raise ValueError(
"Example docstring should have a header for the example title "
"and at least a paragraph explaining what the example is about. "
"Please check the example file:\n {}\n".format(filename))
return first_paragraph |
def get_md5sum(src_file):
"""Returns md5sum of file"""
with open(src_file, 'r') as src_data:
src_content = src_data.read()
# data needs to be encoded in python3 before hashing
if sys.version_info[0] == 3:
src_content = src_content.encode('utf-8')
src_md5 = hashlib.md5(src_content).hexdigest()
return src_md5 |
def check_md5sum_change(src_file):
"""Returns True if src_file has a different md5sum"""
src_md5 = get_md5sum(src_file)
src_md5_file = src_file + '.md5'
src_file_changed = True
if os.path.exists(src_md5_file):
with open(src_md5_file, 'r') as file_checksum:
ref_md5 = file_checksum.read()
if src_md5 == ref_md5:
src_file_changed = False
if src_file_changed:
with open(src_md5_file, 'w') as file_checksum:
file_checksum.write(src_md5)
return src_file_changed |
def _plots_are_current(src_file, image_file):
"""Test existence of image file and no change in md5sum of
example"""
first_image_file = image_file.format(1)
has_image = os.path.exists(first_image_file)
src_file_changed = check_md5sum_change(src_file)
return has_image and not src_file_changed |
def save_figures(image_path, fig_count, gallery_conf):
"""Save all open matplotlib figures of the example code-block
Parameters
----------
image_path : str
Path where plots are saved (format string which accepts figure number)
fig_count : int
Previous figure number count. Figure number add from this number
Returns
-------
list of strings containing the full path to each figure
"""
figure_list = []
fig_managers = matplotlib._pylab_helpers.Gcf.get_all_fig_managers()
for fig_mngr in fig_managers:
# Set the fig_num figure as the current figure as we can't
# save a figure that's not the current figure.
fig = plt.figure(fig_mngr.num)
kwargs = {}
to_rgba = matplotlib.colors.colorConverter.to_rgba
for attr in ['facecolor', 'edgecolor']:
fig_attr = getattr(fig, 'get_' + attr)()
default_attr = matplotlib.rcParams['figure.' + attr]
if to_rgba(fig_attr) != to_rgba(default_attr):
kwargs[attr] = fig_attr
current_fig = image_path.format(fig_count + fig_mngr.num)
fig.savefig(current_fig, **kwargs)
figure_list.append(current_fig)
if gallery_conf.get('find_mayavi_figures', False):
from mayavi import mlab
e = mlab.get_engine()
last_matplotlib_fig_num = len(figure_list)
total_fig_num = last_matplotlib_fig_num + len(e.scenes)
mayavi_fig_nums = range(last_matplotlib_fig_num, total_fig_num)
for scene, mayavi_fig_num in zip(e.scenes, mayavi_fig_nums):
current_fig = image_path.format(mayavi_fig_num)
mlab.savefig(current_fig, figure=scene)
# make sure the image is not too large
scale_image(current_fig, current_fig, 850, 999)
figure_list.append(current_fig)
mlab.close(all=True)
return figure_list |
def scale_image(in_fname, out_fname, max_width, max_height):
"""Scales an image with the same aspect ratio centered in an
image with a given max_width and max_height
if in_fname == out_fname the image can only be scaled down
"""
# local import to avoid testing dependency on PIL:
try:
from PIL import Image
except ImportError:
import Image
img = Image.open(in_fname)
width_in, height_in = img.size
scale_w = max_width / float(width_in)
scale_h = max_height / float(height_in)
if height_in * scale_w <= max_height:
scale = scale_w
else:
scale = scale_h
if scale >= 1.0 and in_fname == out_fname:
return
width_sc = int(round(scale * width_in))
height_sc = int(round(scale * height_in))
# resize the image
img.thumbnail((width_sc, height_sc), Image.ANTIALIAS)
# insert centered
thumb = Image.new('RGB', (max_width, max_height), (255, 255, 255))
pos_insert = ((max_width - width_sc) // 2, (max_height - height_sc) // 2)
thumb.paste(img, pos_insert)
thumb.save(out_fname)
# Use optipng to perform lossless compression on the resized image if
# software is installed
if os.environ.get('SKLEARN_DOC_OPTIPNG', False):
try:
subprocess.call(["optipng", "-quiet", "-o", "9", out_fname])
except Exception:
warnings.warn('Install optipng to reduce the size of the \
generated images') |
def save_thumbnail(image_path, base_image_name, gallery_conf):
"""Save the thumbnail image"""
first_image_file = image_path.format(1)
thumb_dir = os.path.join(os.path.dirname(first_image_file), 'thumb')
if not os.path.exists(thumb_dir):
os.makedirs(thumb_dir)
thumb_file = os.path.join(thumb_dir,
'sphx_glr_%s_thumb.png' % base_image_name)
if os.path.exists(first_image_file):
scale_image(first_image_file, thumb_file, 400, 280)
elif not os.path.exists(thumb_file):
# create something to replace the thumbnail
default_thumb_file = os.path.join(glr_path_static(), 'no_image.png')
default_thumb_file = gallery_conf.get("default_thumb_file",
default_thumb_file)
scale_image(default_thumb_file, thumb_file, 200, 140) |
def generate_dir_rst(src_dir, target_dir, gallery_conf, seen_backrefs):
"""Generate the gallery reStructuredText for an example directory"""
if not os.path.exists(os.path.join(src_dir, 'README.txt')):
print(80 * '_')
print('Example directory %s does not have a README.txt file' %
src_dir)
print('Skipping this directory')
print(80 * '_')
return "" # because string is an expected return type
fhindex = open(os.path.join(src_dir, 'README.txt')).read()
if not os.path.exists(target_dir):
os.makedirs(target_dir)
sorted_listdir = [fname for fname in sorted(os.listdir(src_dir))
if fname.endswith('.py')]
entries_text = []
for fname in sorted_listdir:
amount_of_code = generate_file_rst(fname, target_dir, src_dir,
gallery_conf)
new_fname = os.path.join(src_dir, fname)
intro = extract_intro(new_fname)
write_backreferences(seen_backrefs, gallery_conf,
target_dir, fname, intro)
this_entry = _thumbnail_div(target_dir, fname, intro) + """
.. toctree::
:hidden:
/%s/%s\n""" % (target_dir, fname[:-3])
entries_text.append((amount_of_code, this_entry))
# sort to have the smallest entries in the beginning
entries_text.sort()
for _, entry_text in entries_text:
fhindex += entry_text
# clear at the end of the section
fhindex += """.. raw:: html\n
<div style='clear:both'></div>\n\n"""
return fhindex |
def execute_script(code_block, example_globals, image_path, fig_count,
src_file, gallery_conf):
"""Executes the code block of the example file"""
time_elapsed = 0
stdout = ''
# We need to execute the code
print('plotting code blocks in %s' % src_file)
plt.close('all')
cwd = os.getcwd()
# Redirect output to stdout and
orig_stdout = sys.stdout
try:
# First cd in the original example dir, so that any file
# created by the example get created in this directory
os.chdir(os.path.dirname(src_file))
my_buffer = StringIO()
my_stdout = Tee(sys.stdout, my_buffer)
sys.stdout = my_stdout
t_start = time()
exec(code_block, example_globals)
time_elapsed = time() - t_start
sys.stdout = orig_stdout
my_stdout = my_buffer.getvalue().strip().expandtabs()
if my_stdout:
stdout = CODE_OUTPUT.format(indent(my_stdout, ' ' * 4))
os.chdir(cwd)
figure_list = save_figures(image_path, fig_count, gallery_conf)
# Depending on whether we have one or more figures, we're using a
# horizontal list or a single rst call to 'image'.
image_list = ""
if len(figure_list) == 1:
figure_name = figure_list[0]
image_list = SINGLE_IMAGE % figure_name.lstrip('/')
elif len(figure_list) > 1:
image_list = HLIST_HEADER
for figure_name in figure_list:
image_list += HLIST_IMAGE_TEMPLATE % figure_name.lstrip('/')
except Exception:
formatted_exception = traceback.format_exc()
print(80 * '_')
print('%s is not compiling:' % src_file)
print(formatted_exception)
print(80 * '_')
figure_list = []
image_list = codestr2rst(formatted_exception, lang='pytb')
# Overrides the output thumbnail in the gallery for easy identification
broken_img = os.path.join(glr_path_static(), 'broken_example.png')
shutil.copyfile(broken_img, os.path.join(cwd, image_path.format(1)))
fig_count += 1 # raise count to avoid overwriting image
# Breaks build on first example error
if gallery_conf['abort_on_example_error']:
raise
finally:
os.chdir(cwd)
sys.stdout = orig_stdout
print(" - time elapsed : %.2g sec" % time_elapsed)
code_output = "\n{0}\n\n{1}\n\n".format(image_list, stdout)
return code_output, time_elapsed, fig_count + len(figure_list) |
def generate_file_rst(fname, target_dir, src_dir, gallery_conf):
""" Generate the rst file for a given example.
Returns the amout of code (in characters) of the corresponding
files.
"""
src_file = os.path.join(src_dir, fname)
example_file = os.path.join(target_dir, fname)
shutil.copyfile(src_file, example_file)
image_dir = os.path.join(target_dir, 'images')
if not os.path.exists(image_dir):
os.makedirs(image_dir)
base_image_name = os.path.splitext(fname)[0]
image_fname = 'sphx_glr_' + base_image_name + '_{0:03}.png'
image_path = os.path.join(image_dir, image_fname)
script_blocks = split_code_and_text_blocks(example_file)
amount_of_code = sum([len(bcontent)
for blabel, bcontent in script_blocks
if blabel == 'code'])
if _plots_are_current(example_file, image_path):
return amount_of_code
time_elapsed = 0
ref_fname = example_file.replace(os.path.sep, '_')
example_rst = """\n\n.. _sphx_glr_{0}:\n\n""".format(ref_fname)
example_nb = Notebook(fname, target_dir)
filename_pattern = gallery_conf.get('filename_pattern')
if re.search(filename_pattern, src_file) and gallery_conf['plot_gallery']:
# A lot of examples contains 'print(__doc__)' for example in
# scikit-learn so that running the example prints some useful
# information. Because the docstring has been separated from
# the code blocks in sphinx-gallery, __doc__ is actually
# __builtin__.__doc__ in the execution context and we do not
# want to print it
example_globals = {'__doc__': ''}
fig_count = 0
# A simple example has two blocks: one for the
# example introduction/explanation and one for the code
is_example_notebook_like = len(script_blocks) > 2
for blabel, bcontent in script_blocks:
if blabel == 'code':
code_output, rtime, fig_count = execute_script(bcontent,
example_globals,
image_path,
fig_count,
src_file,
gallery_conf)
time_elapsed += rtime
example_nb.add_code_cell(bcontent)
if is_example_notebook_like:
example_rst += codestr2rst(bcontent) + '\n'
example_rst += code_output
else:
example_rst += code_output
example_rst += codestr2rst(bcontent) + '\n'
else:
example_rst += text2string(bcontent) + '\n'
example_nb.add_markdown_cell(text2string(bcontent))
else:
for blabel, bcontent in script_blocks:
if blabel == 'code':
example_rst += codestr2rst(bcontent) + '\n'
example_nb.add_code_cell(bcontent)
else:
example_rst += bcontent + '\n'
example_nb.add_markdown_cell(text2string(bcontent))
save_thumbnail(image_path, base_image_name, gallery_conf)
time_m, time_s = divmod(time_elapsed, 60)
example_nb.save_file()
with open(os.path.join(target_dir, base_image_name + '.rst'), 'w') as f:
example_rst += CODE_DOWNLOAD.format(time_m, time_s, fname,
example_nb.file_name)
f.write(example_rst)
return amount_of_code |
def _arburg2(X, order):
"""This version is 10 times faster than arburg, but the output rho is not correct.
returns [1 a0,a1, an-1]
"""
x = np.array(X)
N = len(x)
if order <= 0.:
raise ValueError("order must be > 0")
# Initialisation
# ------ rho, den
rho = sum(abs(x)**2.) / N # Eq 8.21 [Marple]_
den = rho * 2. * N
# ------ backward and forward errors
ef = np.zeros(N, dtype=complex)
eb = np.zeros(N, dtype=complex)
for j in range(0, N): #eq 8.11
ef[j] = x[j]
eb[j] = x[j]
# AR order to be stored
a = np.zeros(1, dtype=complex)
a[0] = 1
# ---- rflection coeff to be stored
ref = np.zeros(order, dtype=complex)
temp = 1.
E = np.zeros(order+1)
E[0] = rho
for m in range(0, order):
#print m
# Calculate the next order reflection (parcor) coefficient
efp = ef[1:]
ebp = eb[0:-1]
#print efp, ebp
num = -2.* np.dot(ebp.conj().transpose(), efp)
den = np.dot(efp.conj().transpose(), efp)
den += np.dot(ebp, ebp.conj().transpose())
ref[m] = num / den
# Update the forward and backward prediction errors
ef = efp + ref[m] * ebp
eb = ebp + ref[m].conj().transpose() * efp
# Update the AR coeff.
a.resize(len(a)+1)
a = a + ref[m] * np.flipud(a).conjugate()
# Update the prediction error
E[m+1] = (1 - ref[m].conj().transpose()*ref[m]) * E[m]
#print 'REF', ref, num, den
return a, E[-1], ref |
def arburg(X, order, criteria=None):
r"""Estimate the complex autoregressive parameters by the Burg algorithm.
.. math:: x(n) = \sqrt{(v}) e(n) + \sum_{k=1}^{P+1} a(k) x(n-k)
:param x: Array of complex data samples (length N)
:param order: Order of autoregressive process (0<order<N)
:param criteria: select a criteria to automatically select the order
:return:
* A Array of complex autoregressive parameters A(1) to A(order). First
value (unity) is not included !!
* P Real variable representing driving noise variance (mean square
of residual noise) from the whitening operation of the Burg
filter.
* reflection coefficients defining the filter of the model.
.. plot::
:width: 80%
:include-source:
from pylab import plot, log10, linspace, axis
from spectrum import *
AR, P, k = arburg(marple_data, 15)
PSD = arma2psd(AR, sides='centerdc')
plot(linspace(-0.5, 0.5, len(PSD)), 10*log10(PSD/max(PSD)))
axis([-0.5,0.5,-60,0])
.. note::
1. no detrend. Should remove the mean trend to get PSD. Be careful if
presence of large mean.
2. If you don't know what the order value should be, choose the
criterion='AKICc', which has the least bias and best
resolution of model-selection criteria.
.. note:: real and complex results double-checked versus octave using
complex 64 samples stored in marple_data. It does not agree with Marple
fortran routine but this is due to the simplex precision of complex
data in fortran.
:reference: [Marple]_ [octave]_
"""
if order <= 0.:
raise ValueError("order must be > 0")
if order > len(X):
raise ValueError("order must be less than length input - 2")
x = np.array(X)
N = len(x)
# Initialisation
# ------ rho, den
rho = sum(abs(x)**2.) / float(N) # Eq 8.21 [Marple]_
den = rho * 2. * N
# ---- criteria
if criteria:
from spectrum import Criteria
crit = Criteria(name=criteria, N=N)
crit.data = rho
logging.debug('Step {}. old criteria={} new one={}. rho={}'.format(
0, crit.old_data, crit.data, rho))
#p =0
a = np.zeros(0, dtype=complex)
ref = np.zeros(0, dtype=complex)
ef = x.astype(complex)
eb = x.astype(complex)
temp = 1.
# Main recursion
for k in range(0, order):
# calculate the next order reflection coefficient Eq 8.14 Marple
num = sum([ef[j]*eb[j-1].conjugate() for j in range(k+1, N)])
den = temp * den - abs(ef[k])**2 - abs(eb[N-1])**2
kp = -2. * num / den #eq 8.14
temp = 1. - abs(kp)**2.
new_rho = temp * rho
if criteria:
logging.debug('Step {}. old criteria={} new one={}. rho={}'.format(
k+1, crit.old_data, crit.data, new_rho))
#k+1 because order goes from 1 to P whereas k starts at 0.
status = crit(rho=temp*rho, k=k+1)
if status is False:
logging.debug('Stop criteria reached %s %s ' % (crit.data, crit.old_data))
break
# this should be after the criteria
rho = new_rho
if rho <= 0:
raise ValueError("Found a negative value (expected positive stricly) %s. Decrease the order" % rho)
a.resize(a.size+1)
a[k] = kp
if k == 0:
for j in range(N-1, k, -1):
save2 = ef[j]
ef[j] = save2 + kp * eb[j-1] # Eq. (8.7)
eb[j] = eb[j-1] + kp.conjugate() * save2
else:
# update the AR coeff
khalf = (k+1)//2 # FIXME here khalf must be an integer
for j in range(0, khalf):
ap = a[j] # previous value
a[j] = ap + kp * a[k-j-1].conjugate() # Eq. (8.2)
if j != k-j-1:
a[k-j-1] = a[k-j-1] + kp * ap.conjugate() # Eq. (8.2)
# update the prediction error
for j in range(N-1, k, -1):
save2 = ef[j]
ef[j] = save2 + kp * eb[j-1] # Eq. (8.7)
eb[j] = eb[j-1] + kp.conjugate() * save2
# save the reflection coefficient
ref.resize(ref.size+1)
ref[k] = kp
return a, rho, ref |
def _numpy_cholesky(A, B):
"""Solve Ax=B using numpy cholesky solver
A = LU
in the case where A is square and Hermitian, A = L.L* where L* is
transpoed and conjugate matrix
Ly = b
where
Ux=y
so x = U^{-1} y
where U = L*
and y = L^{-1} B
"""
L = numpy.linalg.cholesky(A)
# A=L*numpy.transpose(L).conjugate()
# Ly = b
y = numpy.linalg.solve(L,B)
# Ux = y
x = numpy.linalg.solve(L.transpose().conjugate(),y)
return x, L |
def _numpy_solver(A, B):
"""This function solve Ax=B directly without taking care of the input
matrix properties.
"""
x = numpy.linalg.solve(A, B)
return x |
def CHOLESKY(A, B, method='scipy'):
"""Solve linear system `AX=B` using CHOLESKY method.
:param A: an input Hermitian matrix
:param B: an array
:param str method: a choice of method in [numpy, scipy, numpy_solver]
* `numpy_solver` relies entirely on numpy.solver (no cholesky decomposition)
* `numpy` relies on the numpy.linalg.cholesky for the decomposition and
numpy.linalg.solve for the inversion.
* `scipy` uses scipy.linalg.cholesky for the decomposition and
scipy.linalg.cho_solve for the inversion.
.. rubric:: Description
When a matrix is square and Hermitian (symmetric with lower part being
the complex conjugate of the upper one), then the usual triangular
factorization takes on the special form:
.. math:: A = R R^H
where :math:`R` is a lower triangular matrix with nonzero real principal
diagonal element. The input matrix can be made of complex data. Then, the
inversion to find :math:`x` is made as follows:
.. math:: Ry = B
and
.. math:: Rx = y
.. doctest::
>>> import numpy
>>> from spectrum import CHOLESKY
>>> A = numpy.array([[ 2.0+0.j , 0.5-0.5j, -0.2+0.1j],
... [ 0.5+0.5j, 1.0+0.j , 0.3-0.2j],
... [-0.2-0.1j, 0.3+0.2j, 0.5+0.j ]])
>>> B = numpy.array([ 1.0+3.j , 2.0-1.j , 0.5+0.8j])
>>> CHOLESKY(A, B)
array([ 0.95945946+5.25675676j, 4.41891892-7.04054054j,
-5.13513514+6.35135135j])
"""
if method == 'numpy_solver':
X = _numpy_solver(A,B)
return X
elif method == 'numpy':
X, _L = _numpy_cholesky(A, B)
return X
elif method == 'scipy':
import scipy.linalg
L = scipy.linalg.cholesky(A)
X = scipy.linalg.cho_solve((L, False), B)
else:
raise ValueError('method must be numpy_solver, numpy_cholesky or cholesky_inplace')
return X |
def music(X, IP, NSIG=None, NFFT=default_NFFT, threshold=None, criteria='aic',
verbose=False):
"""Eigen value pseudo spectrum estimate. See :func:`eigenfre`"""
return eigen(X, IP, NSIG=NSIG, method='music', NFFT=NFFT,
threshold=threshold, criteria=criteria, verbose=verbose) |
def eigen(X, P, NSIG=None, method='music', threshold=None, NFFT=default_NFFT,
criteria='aic', verbose=False):
r"""Pseudo spectrum using eigenvector method (EV or Music)
This function computes either the Music or EigenValue (EV) noise
subspace frequency estimator.
First, an autocorrelation matrix of order `P` is computed from
the data. Second, this matrix is separated into vector subspaces,
one a signal subspace and the other a noise
subspace using a SVD method to obtain the eigen values and vectors.
From the eigen values :math:`\lambda_i`, and eigen vectors :math:`v_k`,
the **pseudo spectrum** (see note below) is computed as follows:
.. math:: P_{ev}(f) = \frac{1}{e^H(f)\left(\sum\limits_{k=M+1}^{p} \frac{1}{\lambda_k}v_kv_k^H\right)e(f)}
The separation of the noise and signal subspaces requires expertise
of the signal. However, AIC and MDL criteria may be used to automatically
perform this task.
You still need to provide the parameter `P` to indicate the maximum number
of eigen values to be computed. The criteria will just select a subset
to estimate the pseudo spectrum (see :func:`~spectrum.criteria.aic_eigen`
and :func:`~spectrum.criteria.mdl_eigen` for details.
.. note:: **pseudo spectrum**. func:`eigen` does not compute a PSD estimate.
Indeed, the method does not preserve the measured process power.
:param X: Array data samples
:param int P: maximum number of eigen values to compute. NSIG (if
specified) must therefore be less than P.
:param str method: 'music' or 'ev'.
:param int NSIG: If specified, the signal sub space uses NSIG eigen values.
:param float threshold: If specified, the signal sub space is made of the
eigen values larger than :math:`\rm{threshold} \times \lambda_{min}`,
where :math:`\lambda_{min}` is the minimum eigen values.
:param int NFFT: total length of the final data sets (padded with zero
if needed; default is 4096)
:return:
* PSD: Array of real frequency estimator values (two sided for
complex data and one sided for real data)
* S, the eigen values
.. plot::
:width: 80%
:include-source:
from spectrum import eigen, marple_data
from pylab import plot, log10, linspace, legend, axis
psd, ev = eigen(marple_data, 15, NSIG=11)
f = linspace(-0.5, 0.5, len(psd))
plot(f, 10 * log10(psd/max(psd)), label='User defined')
psd, ev = eigen(marple_data, 15, threshold=2)
plot(f, 10 * log10(psd/max(psd)), label='threshold method (100)')
psd, ev = eigen(marple_data, 15)
plot(f, 10 * log10(psd/max(psd)), label='AIC method (8)')
legend()
axis([-0.5, 0.5, -120, 0])
.. seealso::
:func:`pev`,
:func:`pmusic`,
:func:`~spectrum.criteria.aic_eigen`
:References: [Marple]_, Chap 13
.. todo:: for developers:
* what should be the second argument of the criteria N, N-P, P...?
* what should be the max value of NP
"""
if method not in ['music', 'ev']:
raise ValueError("method must be 'music' or 'ev'")
if NSIG != None and threshold != None:
raise ValueError("NSIG and threshold cannot be provided together")
if NSIG is not None:
if NSIG < 0:
raise ValueError('NSIG must be positive')
if NSIG >= P:
raise ValueError("NSIG must be stricly less than IP")
#
N = len(X)
NP = N - P
assert 2 * NP > P-1, 'decrease the second argument'
if NP > 100:
NP = 100
FB = np.zeros((2*NP, P), dtype=complex)
#FB = numpy.zeros((MAXU, IP), dtype=complex)
Z = np.zeros(NFFT, dtype=complex)
PSD = np.zeros(NFFT)
# These loops can surely be replaced by a function that create such matrix
for I in range(0, NP):
for K in range(0, P):
FB[I, K] = X[I-K+P-1]
FB[I+NP, K] = X[I+K+1].conjugate()
# This commented line produces the correct FB, as the 2 for loops above
# It is more elegant but slower...corrmtx needs to be optimised (20/4/11)
#FB2 = spectrum.linalg.corrmtx(X, P-1, method='modified')
#Compute the eigen values / vectors
_U, S, V = svd (FB)
# U and V are not the same as in Marple. Real or Imaginary absolute values
# are correct but signs are not. This is wierd because the svd function
# gives the same result as cvsd in Marple. Is FB correct ? it seems so.
# The following operation has to be done. Otherwise, the resulting PSD is
# not corect
V = -V.transpose()
NSIG = _get_signal_space(S, 2*NP,
verbose=verbose, threshold=threshold,
NSIG=NSIG, criteria=criteria)
#C AI or Expert Knowledge to choose "signal" singular values, or input
#C NSIG at this point
for I in range(NSIG, P):
Z[0:P] = V[0:P, I]
Z[P:NFFT] = 0
Z = fft(Z, NFFT)
if method == 'music':
PSD = PSD + abs(Z)**2.
elif method == 'ev' :
PSD = PSD + abs(Z)**2. / S[I]
PSD = 1./PSD
# for some reasons, we need to rearrange the output. this is related to
# the way U and V are order in the routine svd
nby2 = int(NFFT/2)
#return PSD, S
newpsd = np.append(PSD[nby2:0:-1], PSD[nby2*2-1:nby2-1:-1])
return newpsd, S |
def _get_signal_space(S, NP, verbose=False, threshold=None, NSIG=None,
criteria='aic'):
"""todo
"""
from .criteria import aic_eigen, mdl_eigen
# This section selects automatically the noise and signal subspaces.
# NSIG being the number of eigenvalues corresponding to signals.
if NSIG is None:
if threshold is None:
logging.debug('computing NSIG using AIC method')
# get the minimum index of the AIC vector
if criteria == 'aic':
aic = aic_eigen(S, NP*2)
elif criteria == 'mdl':
aic = mdl_eigen(S, NP*2)
# get the minimum index of the AIC vector, add 1 to get the NSIG
NSIG = np.argmin(aic) + 1
logging.debug('NSIG=', NSIG, ' found as the number of pertinent sinusoids')
else:
logging.debug('computing NSIG using user threshold ')
# following an idea from Matlab, pmusic, we look at the minimum
# eigen value, and split the eigen values above and below
# K times min eigen value, where K is >1
m = threshold * min(S)
new_s = S[np.where(S>m)]
NSIG = len(new_s)
logging.debug('found', NSIG)
if NSIG == 0:
NSIG = 1
return NSIG |
def speriodogram(x, NFFT=None, detrend=True, sampling=1.,
scale_by_freq=True, window='hamming', axis=0):
"""Simple periodogram, but matrices accepted.
:param x: an array or matrix of data samples.
:param NFFT: length of the data before FFT is computed (zero padding)
:param bool detrend: detrend the data before co,puteing the FFT
:param float sampling: sampling frequency of the input :attr:`data`.
:param scale_by_freq:
:param str window:
:return: 2-sided PSD if complex data, 1-sided if real.
if a matrix is provided (using numpy.matrix), then a periodogram
is computed for each row. The returned matrix has the same shape as the input
matrix.
The mean of the input data is also removed from the data before computing
the psd.
.. plot::
:width: 80%
:include-source:
from pylab import grid, semilogy
from spectrum import data_cosine, speriodogram
data = data_cosine(N=1024, A=0.1, sampling=1024, freq=200)
semilogy(speriodogram(data, detrend=False, sampling=1024), marker='o')
grid(True)
.. plot::
:width: 80%
:include-source:
import numpy
from spectrum import speriodogram, data_cosine
from pylab import figure, semilogy, figure ,imshow
# create N data sets and make the frequency dependent on the time
N = 100
m = numpy.concatenate([data_cosine(N=1024, A=0.1, sampling=1024, freq=x)
for x in range(1, N)]);
m.resize(N, 1024)
res = speriodogram(m)
figure(1)
semilogy(res)
figure(2)
imshow(res.transpose(), aspect='auto')
.. todo:: a proper spectrogram class/function that takes care of normalisation
"""
x = np.array(x)
# array with 1 dimension case
if x.ndim == 1:
axis = 0
r = x.shape[0]
w = Window(r, window) #same size as input data
w = w.data
# matrix case
elif x.ndim == 2:
logging.debug('2D array. each row is a 1D array')
[r, c] = x.shape
w = np.array([Window(r, window).data for this in range(c)]).reshape(r,c)
if NFFT is None:
NFFT = len(x)
isreal = np.isrealobj(x)
if detrend == True:
m = np.mean(x, axis=axis)
else:
m = 0
if isreal == True:
if x.ndim == 2:
res = (abs (rfft (x*w - m, NFFT, axis=0))) ** 2. / r
else:
res = (abs (rfft (x*w - m, NFFT, axis=-1))) ** 2. / r
else:
if x.ndim == 2:
res = (abs (fft (x*w - m, NFFT, axis=0))) ** 2. / r
else:
res = (abs (fft (x*w - m, NFFT, axis=-1))) ** 2. / r
if scale_by_freq is True:
df = sampling / float(NFFT)
res*= 2 * np.pi / df
if x.ndim == 1:
return res.transpose()
else:
return res |
def WelchPeriodogram(data, NFFT=None, sampling=1., **kargs):
r"""Simple periodogram wrapper of numpy.psd function.
:param A: the input data
:param int NFFT: total length of the final data sets (padded
with zero if needed; default is 4096)
:param str window:
:Technical documentation:
When we calculate the periodogram of a set of data we get an estimation
of the spectral density. In fact as we use a Fourier transform and a
truncated segments the spectrum is the convolution of the data with a
rectangular window which Fourier transform is
.. math::
W(s)= \frac{1}{N^2} \left[ \frac{\sin(\pi s)}{\sin(\pi s/N)} \right]^2
Thus oscillations and sidelobes appears around the main frequency. One aim of t he tapering is to reduced this effects. We multiply data by a window whose sidelobes are much smaller than the main lobe. Classical window is hanning window. But other windows are available. However we must take into account this energy and divide the spectrum by energy of taper used. Thus periodogram becomes :
.. math::
D_k \equiv \sum_{j=0}^{N-1}c_jw_j \; e^{2\pi ijk/N} \qquad k=0,...,N-1
.. math::
P(0)=P(f_0)=\frac{1}{2\pi W_{ss}}\arrowvert{D_0}\arrowvert^2
.. math::
P(f_k)=\frac{1}{2\pi W_{ss}} \left[\arrowvert{D_k}\arrowvert^2+\arrowvert{D_{N-k}}\arrowvert^2\right] \qquad k=0,1,..., \left( \frac{1}{2}-1 \right)
.. math::
P(f_c)=P(f_{N/2})= \frac{1}{2\pi W_{ss}} \arrowvert{D_{N/2}}\arrowvert^2
with
.. math::
{W_{ss}} \equiv N\sum_{j=0}^{N-1}w_j^2
.. plot::
:width: 80%
:include-source:
from spectrum import WelchPeriodogram, marple_data
psd = WelchPeriodogram(marple_data, 256)
"""
from pylab import psd
spectrum = Spectrum(data, sampling=1.)
P = psd(data, NFFT, Fs=sampling, **kargs)
spectrum.psd = P[0]
#spectrum.__Spectrum_sides = 'twosided'
return P, spectrum |
def DaniellPeriodogram(data, P, NFFT=None, detrend='mean', sampling=1.,
scale_by_freq=True, window='hamming'):
r"""Return Daniell's periodogram.
To reduce fast fluctuations of the spectrum one idea proposed by daniell
is to average each value with points in its neighboorhood. It's like
a low filter.
.. math:: \hat{P}_D[f_i]= \frac{1}{2P+1} \sum_{n=i-P}^{i+P} \tilde{P}_{xx}[f_n]
where P is the number of points to average.
Daniell's periodogram is the convolution of the spectrum with a low filter:
.. math:: \hat{P}_D(f)= \hat{P}_{xx}(f)*H(f)
Example::
>>> DaniellPeriodogram(data, 8)
if N/P is not integer, the final values of the original PSD are not used.
using DaniellPeriodogram(data, 0) should give the original PSD.
"""
psd = speriodogram(data, NFFT=NFFT, detrend=detrend, sampling=sampling,
scale_by_freq=scale_by_freq, window=window)
if len(psd) % 2 == 1:
datatype = 'real'
else:
datatype = 'complex'
N = len(psd)
_slice = 2 * P + 1
if datatype == 'real': #must get odd value
newN = np.ceil(psd.size/float(_slice))
if newN % 2 == 0:
newN = psd.size/_slice
else:
newN = np.ceil(psd.size/float(_slice))
if newN % 2 == 1:
newN = psd.size/_slice
newpsd = np.zeros(int(newN)) # keep integer division
for i in range(0, newpsd.size):
count = 0 #needed to know the number of valid averaged values
for n in range(i*_slice-P, i*_slice+P+1): #+1 to have P values on each sides
if n > 0 and n<N: #needed to start the average
count += 1
newpsd[i] += psd[n]
newpsd[i] /= float(count)
#todo: check this
if datatype == 'complex':
freq = np.linspace(0, sampling, len(newpsd))
else:
df = 1. / sampling
freq = np.linspace(0,sampling/2., len(newpsd))
#psd.refreq(2*psd.size()/A.freq());
#psd.retime(-1./psd.freq()+1./A.size());
return newpsd, freq |
def centerdc_gen(self):
"""Return the centered frequency range as a generator.
::
>>> print(list(Range(8).centerdc_gen()))
[-0.5, -0.375, -0.25, -0.125, 0.0, 0.125, 0.25, 0.375]
"""
for a in range(0, self.N):
yield (a-self.N/2) * self.df |
def onesided_gen(self):
"""Return the one-sided frequency range as a generator.
If :attr:`N` is even, the length is N/2 + 1.
If :attr:`N` is odd, the length is (N+1)/2.
::
>>> print(list(Range(8).onesided()))
[0.0, 0.125, 0.25, 0.375, 0.5]
>>> print(list(Range(9).onesided()))
[0.0, 0.1111, 0.2222, 0.3333, 0.4444]
"""
if self.N % 2 == 0:
for n in range(0, self.N//2 + 1):
yield n * self.df
else:
for n in range(0, (self.N+1)//2):
yield n * self.df |
def frequencies(self, sides=None):
"""Return the frequency vector according to :attr:`sides`"""
# use the attribute sides except if a valid sides argument is provided
if sides is None:
sides = self.sides
if sides not in self._sides_choices:
raise errors.SpectrumChoiceError(sides, self._sides_choices)
if sides == 'onesided':
return self._range.onesided()
if sides == 'twosided':
return self._range.twosided()
if sides == 'centerdc':
return self._range.centerdc() |
def get_converted_psd(self, sides):
"""This function returns the PSD in the **sides** format
:param str sides: the PSD format in ['onesided', 'twosided', 'centerdc']
:return: the expected PSD.
.. doctest::
from spectrum import *
p = pcovar(marple_data, 15)
centerdc_psd = p.get_converted_psd('centerdc')
.. note:: this function does not change the object, in particular, it
does not change the :attr:`psd` attribute. If you want to change
the psd on the fly, change the attribute :attr:`sides`.
"""
if sides == self.sides:
#nothing to be done is sides = :attr:`sides
return self.__psd
if self.datatype == 'complex':
assert sides != 'onesided', \
"complex datatype so sides cannot be onesided."
if self.sides == 'onesided':
logging.debug('Current sides is onesided')
if sides == 'twosided':
logging.debug('--->Converting to twosided')
# here we divide everything by 2 to get the twosided version
#N = self.NFFT
newpsd = numpy.concatenate((self.psd[0:-1]/2., list(reversed(self.psd[0:-1]/2.))))
# so we need to multiply by 2 the 0 and FS/2 frequencies
newpsd[-1] = self.psd[-1]
newpsd[0] *= 2.
elif sides == 'centerdc':
# FIXME. this assumes data is even so PSD is stored as
# P0 X1 X2 X3 P1
logging.debug('--->Converting to centerdc')
P0 = self.psd[0]
P1 = self.psd[-1]
newpsd = numpy.concatenate((self.psd[-1:0:-1]/2., self.psd[0:-1]/2.))
# so we need to multiply by 2 the 0 and F2/2 frequencies
#newpsd[-1] = P0 / 2
newpsd[0] = P1
elif self.sides == 'twosided':
logging.debug('Current sides is twosided')
if sides == 'onesided':
# we assume that data is stored as X0,X1,X2,X3,XN
# that is original data is even.
logging.debug('Converting to onesided assuming ori data is even')
midN = (len(self.psd)-2) / 2
newpsd = numpy.array(self.psd[0:int(midN)+2]*2)
newpsd[0] /= 2
newpsd[-1] = self.psd[-1]
elif sides == 'centerdc':
newpsd = stools.twosided_2_centerdc(self.psd)
elif self.sides == 'centerdc': # same as twosided to onesided
logging.debug('Current sides is centerdc')
if sides == 'onesided':
logging.debug('--->Converting to onesided')
midN = int(len(self.psd) / 2)
P1 = self.psd[0]
newpsd = numpy.append(self.psd[midN:]*2, P1)
elif sides == 'twosided':
newpsd = stools.centerdc_2_twosided(self.psd)
else:
raise ValueError("sides must be set to 'onesided', 'twosided' or 'centerdc'")
return newpsd |
def plot(self, filename=None, norm=False, ylim=None,
sides=None, **kargs):
"""a simple plotting routine to plot the PSD versus frequency.
:param str filename: save the figure into a file
:param norm: False by default. If True, the PSD is normalised.
:param ylim: readjust the y range .
:param sides: if not provided, :attr:`sides` is used. See :attr:`sides`
for details.
:param kargs: any optional argument accepted by :func:`pylab.plot`.
.. plot::
:width: 80%
:include-source:
from spectrum import *
p = Periodogram(marple_data)
p.plot(norm=True, marker='o')
"""
import pylab
from pylab import ylim as plt_ylim
#First, check that psd attribute is up-to-date
# just to get the PSD to be recomputed if needed
_ = self.psd
# check that the input sides parameter is correct if provided
if sides is not None:
if sides not in self._sides_choices:
raise errors.SpectrumChoiceError(sides, self._sides_choices)
# if sides is provided but identical to the current psd, nothing to do.
# if sides not provided, let us use self.sides
if sides is None or sides == self.sides:
frequencies = self.frequencies()
psd = self.psd
sides = self.sides
elif sides is not None:
# if sides argument is different from the attribute, we need to
# create a new PSD/Freq ; indeed we do not want to change the
# attribute itself
# if data is complex, one-sided is wrong in any case.
if self.datatype == 'complex':
if sides == 'onesided':
raise ValueError("sides cannot be one-sided with complex data")
logging.debug("sides is different from the one provided. Converting PSD")
frequencies = self.frequencies(sides=sides)
psd = self.get_converted_psd(sides)
if len(psd) != len(frequencies):
raise ValueError("PSD length is %s and freq length is %s" % (len(psd), len(frequencies)))
if 'ax' in list(kargs.keys()):
save_ax = pylab.gca()
pylab.sca(kargs['ax'])
rollback = True
del kargs['ax']
else:
rollback = False
if norm:
pylab.plot(frequencies, 10 * stools.log10(psd/max(psd)), **kargs)
else:
pylab.plot(frequencies, 10 * stools.log10(psd),**kargs)
pylab.xlabel('Frequency')
pylab.ylabel('Power (dB)')
pylab.grid(True)
if ylim:
plt_ylim(ylim)
if sides == 'onesided':
pylab.xlim(0, self.sampling/2.)
elif sides == 'twosided':
pylab.xlim(0, self.sampling)
elif sides == 'centerdc':
pylab.xlim(-self.sampling/2., self.sampling/2.)
if filename:
pylab.savefig(filename)
if rollback:
pylab.sca(save_ax)
del psd, frequencies |
def power(self):
r"""Return the power contained in the PSD
if scale_by_freq is False, the power is:
.. math:: P = N \sum_{k=1}^{N} P_{xx}(k)
else, it is
.. math:: P = \sum_{k=1}^{N} P_{xx}(k) \frac{df}{2\pi}
.. todo:: check these equations
"""
if self.scale_by_freq == False:
return sum(self.psd) * len(self.psd)
else:
return sum(self.psd) * self.df/(2.*numpy.pi) |
def periodogram(self):
"""An alias to :class:`~spectrum.periodogram.Periodogram`
The parameters are extracted from the attributes. Relevant attributes
ares :attr:`window`, attr:`sampling`, attr:`NFFT`, attr:`scale_by_freq`,
:attr:`detrend`.
.. plot::
:width: 80%
:include-source:
from spectrum import datasets
from spectrum import FourierSpectrum
s = FourierSpectrum(datasets.data_cosine(), sampling=1024, NFFT=512)
s.periodogram()
s.plot()
"""
from .periodogram import speriodogram
psd = speriodogram(self.data, window=self.window, sampling=self.sampling,
NFFT=self.NFFT, scale_by_freq=self.scale_by_freq,
detrend=self.detrend)
self.psd = psd |
def ipy_notebook_skeleton():
"""Returns a dictionary with the elements of a Jupyter notebook"""
py_version = sys.version_info
notebook_skeleton = {
"cells": [],
"metadata": {
"kernelspec": {
"display_name": "Python " + str(py_version[0]),
"language": "python",
"name": "python" + str(py_version[0])
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": py_version[0]
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython" + str(py_version[0]),
"version": '{0}.{1}.{2}'.format(*sys.version_info[:3])
}
},
"nbformat": 4,
"nbformat_minor": 0
}
return notebook_skeleton |
def rst2md(text):
"""Converts the RST text from the examples docstrigs and comments
into markdown text for the IPython notebooks"""
top_heading = re.compile(r'^=+$\s^([\w\s-]+)^=+$', flags=re.M)
text = re.sub(top_heading, r'# \1', text)
math_eq = re.compile(r'^\.\. math::((?:.+)?(?:\n+^ .+)*)', flags=re.M)
text = re.sub(math_eq,
lambda match: r'$${0}$$'.format(match.group(1).strip()),
text)
inline_math = re.compile(r':math:`(.+)`')
text = re.sub(inline_math, r'$\1$', text)
return text |
def add_markdown_cell(self, text):
"""Add a markdown cell to the notebook
Parameters
----------
code : str
Cell content
"""
markdown_cell = {
"cell_type": "markdown",
"metadata": {},
"source": [rst2md(text)]
}
self.work_notebook["cells"].append(markdown_cell) |
def save_file(self):
"""Saves the notebook to a file"""
with open(self.write_file, 'w') as out_nb:
json.dump(self.work_notebook, out_nb, indent=2) |
def arma2psd(A=None, B=None, rho=1., T=1., NFFT=4096, sides='default',
norm=False):
r"""Computes power spectral density given ARMA values.
This function computes the power spectral density values
given the ARMA parameters of an ARMA model. It assumes that
the driving sequence is a white noise process of zero mean and
variance :math:`\rho_w`. The sampling frequency and noise variance are
used to scale the PSD output, which length is set by the user with the
`NFFT` parameter.
:param array A: Array of AR parameters (complex or real)
:param array B: Array of MA parameters (complex or real)
:param float rho: White noise variance to scale the returned PSD
:param float T: Sample interval in seconds to scale the returned PSD
:param int NFFT: Final size of the PSD
:param str sides: Default PSD is two-sided, but sides can be set to centerdc.
.. warning:: By convention, the AR or MA arrays does not contain the
A0=1 value.
If :attr:`B` is None, the model is a pure AR model. If :attr:`A` is None,
the model is a pure MA model.
:return: two-sided PSD
.. rubric:: Details:
AR case: the power spectral density is:
.. math:: P_{ARMA}(f) = T \rho_w \left|\frac{B(f)}{A(f)}\right|^2
where:
.. math:: A(f) = 1 + \sum_{k=1}^q b(k) e^{-j2\pi fkT}
.. math:: B(f) = 1 + \sum_{k=1}^p a(k) e^{-j2\pi fkT}
.. rubric:: **Example:**
.. plot::
:width: 80%
:include-source:
import spectrum.arma
from pylab import plot, log10, legend
plot(10*log10(spectrum.arma.arma2psd([1,0.5],[0.5,0.5])), label='ARMA(2,2)')
plot(10*log10(spectrum.arma.arma2psd([1,0.5],None)), label='AR(2)')
plot(10*log10(spectrum.arma.arma2psd(None,[0.5,0.5])), label='MA(2)')
legend()
:References: [Marple]_
"""
if NFFT is None:
NFFT = 4096
if A is None and B is None:
raise ValueError("Either AR or MA model must be provided")
psd = np.zeros(NFFT, dtype=complex)
if A is not None:
ip = len(A)
den = np.zeros(NFFT, dtype=complex)
den[0] = 1.+0j
for k in range(0, ip):
den[k+1] = A[k]
denf = fft(den, NFFT)
if B is not None:
iq = len(B)
num = np.zeros(NFFT, dtype=complex)
num[0] = 1.+0j
for k in range(0, iq):
num[k+1] = B[k]
numf = fft(num, NFFT)
# Changed in version 0.6.9 (divided by T instead of multiply)
if A is not None and B is not None:
psd = rho / T * abs(numf)**2. / abs(denf)**2.
elif A is not None:
psd = rho / T / abs(denf)**2.
elif B is not None:
psd = rho / T * abs(numf)**2.
psd = np.real(psd)
# The PSD is a twosided PSD.
# to obtain the centerdc
if sides != 'default':
from . import tools
assert sides in ['centerdc']
if sides == 'centerdc':
psd = tools.twosided_2_centerdc(psd)
if norm == True:
psd /= max(psd)
return psd |
def arma_estimate(X, P, Q, lag):
"""Autoregressive and moving average estimators.
This function provides an estimate of the autoregressive
parameters, the moving average parameters, and the driving
white noise variance of an ARMA(P,Q) for a complex or real data sequence.
The parameters are estimated using three steps:
* Estimate the AR parameters from the original data based on a least
squares modified Yule-Walker technique,
* Produce a residual time sequence by filtering the original data
with a filter based on the AR parameters,
* Estimate the MA parameters from the residual time sequence.
:param array X: Array of data samples (length N)
:param int P: Desired number of AR parameters
:param int Q: Desired number of MA parameters
:param int lag: Maximum lag to use for autocorrelation estimates
:return:
* A - Array of complex P AR parameter estimates
* B - Array of complex Q MA parameter estimates
* RHO - White noise variance estimate
.. note::
* lag must be >= Q (MA order)
**dependencies**:
* :meth:`spectrum.correlation.CORRELATION`
* :meth:`spectrum.covar.arcovar`
* :meth:`spectrum.arma.ma`
.. plot::
:width: 80%
:include-source:
from spectrum import arma_estimate, arma2psd, marple_data
import pylab
a,b, rho = arma_estimate(marple_data, 15, 15, 30)
psd = arma2psd(A=a, B=b, rho=rho, sides='centerdc', norm=True)
pylab.plot(10 * pylab.log10(psd))
pylab.ylim([-50,0])
:reference: [Marple]_
"""
R = CORRELATION(X, maxlags=lag, norm='unbiased')
R0 = R[0]
#C Estimate the AR parameters (no error weighting is used).
#C Number of equation errors is M-Q .
MPQ = lag - Q + P
N = len(X)
Y = np.zeros(N-P, dtype=complex)
for K in range(0, MPQ):
KPQ = K + Q - P+1
if KPQ < 0:
Y[K] = R[-KPQ].conjugate()
if KPQ == 0:
Y[K] = R0
if KPQ > 0:
Y[K] = R[KPQ]
# The resize is very important for the normalissation.
Y.resize(lag)
if P <= 4:
res = arcovar_marple(Y.copy(), P) #! Eq. (10.12)
ar_params = res[0]
else:
res = arcovar(Y.copy(), P) #! Eq. (10.12)
ar_params = res[0]
# the .copy is used to prevent a reference somewhere. this is a bug
# to be tracked down.
Y.resize(N-P)
#C Filter the original time series
for k in range(P, N):
SUM = X[k]
#SUM += sum([ar_params[j]*X[k-j-1] for j in range(0,P)])
for j in range(0, P):
SUM = SUM + ar_params[j] * X[k-j-1] #! Eq. (10.17)
Y[k-P] = SUM
# Estimate the MA parameters (a "long" AR of order at least 2*IQ
#C is suggested)
#Y.resize(N-P)
ma_params, rho = ma(Y, Q, 2*Q) #! Eq. (10.3)
return ar_params, ma_params, rho |
def ma(X, Q, M):
"""Moving average estimator.
This program provides an estimate of the moving average parameters
and driving noise variance for a data sequence based on a
long AR model and a least squares fit.
:param array X: The input data array
:param int Q: Desired MA model order (must be >0 and <M)
:param int M: Order of "long" AR model (suggest at least 2*Q )
:return:
* MA - Array of Q complex MA parameter estimates
* RHO - Real scalar of white noise variance estimate
.. plot::
:width: 80%
:include-source:
from spectrum import arma2psd, ma, marple_data
import pylab
# Estimate 15 Ma parameters
b, rho = ma(marple_data, 15, 30)
# Create the PSD from those MA parameters
psd = arma2psd(B=b, rho=rho, sides='centerdc')
# and finally plot the PSD
pylab.plot(pylab.linspace(-0.5, 0.5, 4096), 10 * pylab.log10(psd/max(psd)))
pylab.axis([-0.5, 0.5, -30, 0])
:reference: [Marple]_
"""
if Q <= 0 or Q >= M:
raise ValueError('Q(MA) must be in ]0,lag[')
#C Fit a high-order AR to the data
a, rho, _c = yulewalker.aryule(X, M, 'biased') #! Eq. (10.5)
#add an element unity to the AR parameter array
a = np.insert(a, 0, 1)
#C Find MA parameters from autocorrelations by Yule-Walker method
ma_params, _p, _c = yulewalker.aryule(a, Q, 'biased') #! Eq. (10.7)
return ma_params, rho |
def CORRELOGRAMPSD(X, Y=None, lag=-1, window='hamming',
norm='unbiased', NFFT=4096, window_params={},
correlation_method='xcorr'):
"""PSD estimate using correlogram method.
:param array X: complex or real data samples X(1) to X(N)
:param array Y: complex data samples Y(1) to Y(N). If provided, computes
the cross PSD, otherwise the PSD is returned
:param int lag: highest lag index to compute. Must be less than N
:param str window_name: see :mod:`window` for list of valid names
:param str norm: one of the valid normalisation of :func:`xcorr` (biased,
unbiased, coeff, None)
:param int NFFT: total length of the final data sets (padded with zero
if needed; default is 4096)
:param str correlation_method: either `xcorr` or `CORRELATION`.
CORRELATION should be removed in the future.
:return:
* Array of real (cross) power spectral density estimate values. This is
a two sided array with negative values following the positive ones
whatever is the input data (real or complex).
.. rubric:: Description:
The exact power spectral density is the Fourier transform of the
autocorrelation sequence:
.. math:: P_{xx}(f) = T \sum_{m=-\infty}^{\infty} r_{xx}[m] exp^{-j2\pi fmT}
The correlogram method of PSD estimation substitutes a finite sequence of
autocorrelation estimates :math:`\hat{r}_{xx}` in place of :math:`r_{xx}`.
This estimation can be computed with :func:`xcorr` or :func:`CORRELATION` by
chosing a proprer lag `L`. The estimated PSD is then
.. math:: \hat{P}_{xx}(f) = T \sum_{m=-L}^{L} \hat{r}_{xx}[m] exp^{-j2\pi fmT}
The lag index must be less than the number of data samples `N`. Ideally, it
should be around `L/10` [Marple]_ so as to avoid greater statistical
variance associated with higher lags.
To reduce the leakage of the implicit rectangular window and therefore to
reduce the bias in the estimate, a tapering window is normally used and lead
to the so-called Blackman and Tukey correlogram:
.. math:: \hat{P}_{BT}(f) = T \sum_{m=-L}^{L} w[m] \hat{r}_{xx}[m] exp^{-j2\pi fmT}
The correlogram for the cross power spectral estimate is
.. math:: \hat{P}_{xx}(f) = T \sum_{m=-L}^{L} \hat{r}_{xx}[m] exp^{-j2\pi fmT}
which is computed if :attr:`Y` is not provide. In such case,
:math:`r_{yx} = r_{xy}` so we compute the correlation only once.
.. plot::
:width: 80%
:include-source:
from spectrum import CORRELOGRAMPSD, marple_data
from spectrum.tools import cshift
from pylab import log10, axis, grid, plot,linspace
psd = CORRELOGRAMPSD(marple_data, marple_data, lag=15)
f = linspace(-0.5, 0.5, len(psd))
psd = cshift(psd, len(psd)/2)
plot(f, 10*log10(psd/max(psd)))
axis([-0.5,0.5,-50,0])
grid(True)
.. seealso:: :func:`create_window`, :func:`CORRELATION`, :func:`xcorr`,
:class:`pcorrelogram`.
"""
N = len(X)
assert lag<N, 'lag must be < size of input data'
assert correlation_method in ['CORRELATION', 'xcorr']
if Y is None:
Y = numpy.array(X)
crosscorrelation = False
else:
crosscorrelation = True
if NFFT is None:
NFFT = N
psd = numpy.zeros(NFFT, dtype=complex)
# Window should be centered around zero. Moreover, we want only the
# positive values. So, we need to use 2*lag + 1 window and keep values on
# the right side.
w = Window(2.*lag+1, window, **window_params)
w = w.data[lag+1:]
# compute the cross correlation
if correlation_method == 'CORRELATION':
rxy = CORRELATION (X, Y, maxlags=lag, norm=norm)
elif correlation_method == 'xcorr':
rxy, _l = xcorr (X, Y, maxlags=lag, norm=norm)
rxy = rxy[lag:]
# keep track of the first elt.
psd[0] = rxy[0]
# create the first part of the PSD
psd[1:lag+1] = rxy[1:] * w
# create the second part.
# First, we need to compute the auto or cross correlation ryx
if crosscorrelation is True:
# compute the cross correlation
if correlation_method == 'CORRELATION':
ryx = CORRELATION(Y, X, maxlags=lag, norm=norm)
elif correlation_method == 'xcorr':
ryx, _l = xcorr(Y, X, maxlags=lag, norm=norm)
ryx = ryx[lag:]
#print len(ryx), len(psd[-1:NPSD-lag-1:-1])
psd[-1:NFFT-lag-1:-1] = ryx[1:].conjugate() * w
else: #autocorrelation no additional correlation call required
psd[-1:NFFT-lag-1:-1] = rxy[1:].conjugate() * w
psd = numpy.real(fft(psd))
return psd |
def _get_data(url):
"""Helper function to get data over http or from a local file"""
if url.startswith('http://'):
# Try Python 2, use Python 3 on exception
try:
resp = urllib.urlopen(url)
encoding = resp.headers.dict.get('content-encoding', 'plain')
except AttributeError:
resp = urllib.request.urlopen(url)
encoding = resp.headers.get('content-encoding', 'plain')
data = resp.read()
if encoding == 'plain':
pass
elif encoding == 'gzip':
data = StringIO(data)
data = gzip.GzipFile(fileobj=data).read()
else:
raise RuntimeError('unknown encoding')
else:
with open(url, 'r') as fid:
data = fid.read()
return data |
def _select_block(str_in, start_tag, end_tag):
"""Select first block delimited by start_tag and end_tag"""
start_pos = str_in.find(start_tag)
if start_pos < 0:
raise ValueError('start_tag not found')
depth = 0
for pos in range(start_pos, len(str_in)):
if str_in[pos] == start_tag:
depth += 1
elif str_in[pos] == end_tag:
depth -= 1
if depth == 0:
break
sel = str_in[start_pos + 1:pos]
return sel |
def _parse_dict_recursive(dict_str):
"""Parse a dictionary from the search index"""
dict_out = dict()
pos_last = 0
pos = dict_str.find(':')
while pos >= 0:
key = dict_str[pos_last:pos]
if dict_str[pos + 1] == '[':
# value is a list
pos_tmp = dict_str.find(']', pos + 1)
if pos_tmp < 0:
raise RuntimeError('error when parsing dict')
value = dict_str[pos + 2: pos_tmp].split(',')
# try to convert elements to int
for i in range(len(value)):
try:
value[i] = int(value[i])
except ValueError:
pass
elif dict_str[pos + 1] == '{':
# value is another dictionary
subdict_str = _select_block(dict_str[pos:], '{', '}')
value = _parse_dict_recursive(subdict_str)
pos_tmp = pos + len(subdict_str)
else:
raise ValueError('error when parsing dict: unknown elem')
key = key.strip('"')
if len(key) > 0:
dict_out[key] = value
pos_last = dict_str.find(',', pos_tmp)
if pos_last < 0:
break
pos_last += 1
pos = dict_str.find(':', pos_last)
return dict_out |
def parse_sphinx_searchindex(searchindex):
"""Parse a Sphinx search index
Parameters
----------
searchindex : str
The Sphinx search index (contents of searchindex.js)
Returns
-------
filenames : list of str
The file names parsed from the search index.
objects : dict
The objects parsed from the search index.
"""
# Make sure searchindex uses UTF-8 encoding
if hasattr(searchindex, 'decode'):
searchindex = searchindex.decode('UTF-8')
# parse objects
query = 'objects:'
pos = searchindex.find(query)
if pos < 0:
raise ValueError('"objects:" not found in search index')
sel = _select_block(searchindex[pos:], '{', '}')
objects = _parse_dict_recursive(sel)
# parse filenames
query = 'filenames:'
pos = searchindex.find(query)
if pos < 0:
raise ValueError('"filenames:" not found in search index')
filenames = searchindex[pos + len(query) + 1:]
filenames = filenames[:filenames.find(']')]
filenames = [f.strip('"') for f in filenames.split(',')]
return filenames, objects |
def embed_code_links(app, exception):
"""Embed hyperlinks to documentation into example code"""
if exception is not None:
return
# No need to waste time embedding hyperlinks when not running the examples
# XXX: also at the time of writing this fixes make html-noplot
# for some reason I don't fully understand
if not app.builder.config.plot_gallery:
return
# XXX: Whitelist of builders for which it makes sense to embed
# hyperlinks inside the example html. Note that the link embedding
# require searchindex.js to exist for the links to the local doc
# and there does not seem to be a good way of knowing which
# builders creates a searchindex.js.
if app.builder.name not in ['html', 'readthedocs']:
return
print('Embedding documentation hyperlinks in examples..')
gallery_conf = app.config.sphinx_gallery_conf
gallery_dirs = gallery_conf['gallery_dirs']
if not isinstance(gallery_dirs, list):
gallery_dirs = [gallery_dirs]
for gallery_dir in gallery_dirs:
_embed_code_links(app, gallery_conf, gallery_dir) |
def _get_link(self, cobj):
"""Get a valid link, False if not found"""
fname_idx = None
full_name = cobj['module_short'] + '.' + cobj['name']
if full_name in self._searchindex['objects']:
value = self._searchindex['objects'][full_name]
if isinstance(value, dict):
value = value[next(iter(value.keys()))]
fname_idx = value[0]
elif cobj['module_short'] in self._searchindex['objects']:
value = self._searchindex['objects'][cobj['module_short']]
if cobj['name'] in value.keys():
fname_idx = value[cobj['name']][0]
if fname_idx is not None:
fname = self._searchindex['filenames'][fname_idx] + '.html'
if self._is_windows:
fname = fname.replace('/', '\\')
link = os.path.join(self.doc_url, fname)
else:
link = posixpath.join(self.doc_url, fname)
if hasattr(link, 'decode'):
link = link.decode('utf-8', 'replace')
if link in self._page_cache:
html = self._page_cache[link]
else:
html = get_data(link, self.gallery_dir)
self._page_cache[link] = html
# test if cobj appears in page
comb_names = [cobj['module_short'] + '.' + cobj['name']]
if self.extra_modules_test is not None:
for mod in self.extra_modules_test:
comb_names.append(mod + '.' + cobj['name'])
url = False
if hasattr(html, 'decode'):
# Decode bytes under Python 3
html = html.decode('utf-8', 'replace')
for comb_name in comb_names:
if hasattr(comb_name, 'decode'):
# Decode bytes under Python 3
comb_name = comb_name.decode('utf-8', 'replace')
if comb_name in html:
url = link + u'#' + comb_name
link = url
else:
link = False
return link |
def create_all_psd():
f = pylab.linspace(0, 1, 4096)
pylab.figure(figsize=(12,8))
# MA model
p = spectrum.pma(xx, 64,128); p(); p.plot()
"""
#ARMA 15 order
a, b, rho = spectrum.arma_estimate(data, 15,15, 30)
psd = spectrum.arma2psd(A=a,B=b, rho=rho)
newpsd = tools.cshift(psd, len(psd)//2) # switch positive and negative freq
pylab.plot(f, 10 * pylab.log10(newpsd/max(newpsd)), label='ARMA 15,15')
"""
# YULE WALKER
p = spectrum.pyule(xx, 7 , NFFT=4096, scale_by_freq=False); p.plot()
# equivalent to
# plot([x for x in p.frequencies()] , 10*log10(p.psd)); grid(True)
#burg method
p = spectrum.pburg(xx, 7, scale_by_freq=False); p.plot()
#pcovar
p = spectrum.pcovar(xx, 7, scale_by_freq=False); p.plot()
#pmodcovar
p = spectrum.pmodcovar(xx, 7, scale_by_freq=False); p.plot()
# correlogram
p = spectrum.pcorrelogram(xx, lag=60, NFFT=512, scale_by_freq=False); p.plot()
# minvar
p = spectrum.pminvar(xx, 7, NFFT=256, scale_by_freq=False); p.plot()
# pmusic
p = spectrum.pmusic(xx, 10,4, scale_by_freq=False); p.plot()
# pmusic
p = spectrum.pev(xx, 10, 4, scale_by_freq=False); p.plot()
# periodogram
p = spectrum.Periodogram(xx, scale_by_freq=False); p.plot()
#
legend( ["MA 32", "pyule 7", "pburg 7", "pcovar", "pmodcovar", "correlogram",
"minvar", "pmusic", "pev", "periodgram"])
pylab.ylim([-80,80]) |
def tf2zp(b,a):
"""Convert transfer function filter parameters to zero-pole-gain form
Find the zeros, poles, and gains of this continuous-time system:
.. warning:: b and a must have the same length.
::
from spectrum import tf2zp
b = [2,3,0]
a = [1, 0.4, 1]
[z,p,k] = tf2zp(b,a) % Obtain zero-pole-gain form
z =
1.5
0
p =
-0.2000 + 0.9798i
-0.2000 - 0.9798i
k =
2
:param b: numerator
:param a: denominator
:param fill: If True, check that the length of a and b are the same. If not, create a copy of the shortest element and append zeros to it.
:return: z (zeros), p (poles), g (gain)
Convert transfer function f(x)=sum(b*x^n)/sum(a*x^n) to
zero-pole-gain form f(x)=g*prod(1-z*x)/prod(1-p*x)
.. todo:: See if tf2ss followed by ss2zp gives better results. These
are available from the control system toolbox. Note that
the control systems toolbox doesn't bother, but instead uses
.. seealso:: scipy.signal.tf2zpk, which gives the same results but uses a different
algorithm (z^-1 instead of z).
"""
from numpy import roots
assert len(b) == len(a), "length of the vectors a and b must be identical. fill with zeros if needed."
g = b[0] / a[0]
z = roots(b)
p = roots(a)
return z, p, g |
def eqtflength(b,a):
"""Given two list or arrays, pad with zeros the shortest array
:param b: list or array
:param a: list or array
.. doctest::
>>> from spectrum.transfer import eqtflength
>>> a = [1,2]
>>> b = [1,2,3,4]
>>> a, b, = eqtflength(a,b)
"""
d = abs(len(b)-len(a))
if d != 0:
if len(a) > len(b):
try:
b.extend([0.]*d)
except:
b = np.append(b, [0]*d)
elif len(b)>len(a):
try:
a.extend([0.]*d)
except:
a = np.append(a, [0]*d)
return b,a
else:
return b,a |
def tf2zpk(b, a):
"""Return zero, pole, gain (z,p,k) representation from a numerator,
denominator representation of a linear filter.
Convert zero-pole-gain filter parameters to transfer function form
:param ndarray b: numerator polynomial.
:param ndarray a: numerator and denominator polynomials.
:return:
* z : ndarray Zeros of the transfer function.
* p : ndarray Poles of the transfer function.
* k : float System gain.
If some values of b are too close to 0, they are removed. In that case, a
BadCoefficients warning is emitted.
.. doctest::
>>> import scipy.signal
>>> from spectrum.transfer import tf2zpk
>>> [b, a] = scipy.signal.butter(3.,.4)
>>> z, p ,k = tf2zpk(b,a)
.. seealso:: :func:`zpk2tf`
.. note:: wrapper of scipy function tf2zpk
"""
import scipy.signal
z,p,k = scipy.signal.tf2zpk(b, a)
return z,p,k |
def ss2zpk(a,b,c,d, input=0):
"""State-space representation to zero-pole-gain representation.
:param A: ndarray State-space representation of linear system.
:param B: ndarray State-space representation of linear system.
:param C: ndarray State-space representation of linear system.
:param D: ndarray State-space representation of linear system.
:param int input: optional For multiple-input systems, the input to use.
:return:
* z, p : sequence Zeros and poles.
* k : float System gain.
.. note:: wrapper of scipy function ss2zpk
"""
import scipy.signal
z, p, k = scipy.signal.ss2zpk(a, b, c, d, input=input)
return z, p, k |
def zpk2tf(z, p, k):
r"""Return polynomial transfer function representation from zeros and poles
:param ndarray z: Zeros of the transfer function.
:param ndarray p: Poles of the transfer function.
:param float k: System gain.
:return:
b : ndarray Numerator polynomial.
a : ndarray Numerator and denominator polynomials.
:func:`zpk2tf` forms transfer function polynomials from the zeros, poles, and gains
of a system in factored form.
zpk2tf(z,p,k) finds a rational transfer function
.. math:: \frac{B(s)}{A(s)} = \frac{b_1 s^{n-1}+\dots b_{n-1}s+b_n}{a_1 s^{m-1}+\dots a_{m-1}s+a_m}
given a system in factored transfer function form
.. math:: H(s) = \frac{Z(s)}{P(s)} = k \frac{(s-z_1)(s-z_2)\dots(s-z_m)}{(s-p_1)(s-p_2)\dots(s-p_n)}
with p being the pole locations, and z the zero locations, with as many.
The gains for each numerator transfer function are in vector k.
The zeros and poles must be real or come in complex conjugate pairs.
The polynomial denominator coefficients are returned in row vector a and
the polynomial numerator coefficients are returned in matrix b, which has
as many rows as there are columns of z.
Inf values can be used as place holders in z if some columns have fewer zeros than others.
.. note:: wrapper of scipy function zpk2tf
"""
import scipy.signal
b, a = scipy.signal.zpk2tf(z, p, k)
return b, a |
def zpk2ss(z, p, k):
"""Zero-pole-gain representation to state-space representation
:param sequence z,p: Zeros and poles.
:param float k: System gain.
:return:
* A, B, C, D : ndarray State-space matrices.
.. note:: wrapper of scipy function zpk2ss
"""
import scipy.signal
return scipy.signal.zpk2ss(z,p,k) |
def create_window(N, name=None, **kargs):
r"""Returns the N-point window given a valid name
:param int N: window size
:param str name: window name (default is *rectangular*). Valid names
are stored in :func:`~spectrum.window.window_names`.
:param kargs: optional arguments are:
* *beta*: argument of the :func:`window_kaiser` function (default is 8.6)
* *attenuation*: argument of the :func:`window_chebwin` function (default is 50dB)
* *alpha*: argument of the
1. :func:`window_gaussian` function (default is 2.5)
2. :func:`window_blackman` function (default is 0.16)
3. :func:`window_poisson` function (default is 2)
4. :func:`window_cauchy` function (default is 3)
* *mode*: argument :func:`window_flattop` function (default is *symmetric*, can be *periodic*)
* *r*: argument of the :func:`window_tukey` function (default is 0.5).
The following windows have been simply wrapped from existing librairies like
NumPy:
* **Rectangular**: :func:`window_rectangle`,
* **Bartlett** or Triangular: see :func:`window_bartlett`,
* **Hanning** or Hann: see :func:`window_hann`,
* **Hamming**: see :func:`window_hamming`,
* **Kaiser**: see :func:`window_kaiser`,
* **chebwin**: see :func:`window_chebwin`.
The following windows have been implemented from scratch:
* **Blackman**: See :func:`window_blackman`
* **Bartlett-Hann** : see :func:`window_bartlett_hann`
* **cosine or sine**: see :func:`window_cosine`
* **gaussian**: see :func:`window_gaussian`
* **Bohman**: see :func:`window_bohman`
* **Lanczos or sinc**: see :func:`window_lanczos`
* **Blackman Harris**: see :func:`window_blackman_harris`
* **Blackman Nuttall**: see :func:`window_blackman_nuttall`
* **Nuttall**: see :func:`window_nuttall`
* **Tukey**: see :func:`window_tukey`
* **Parzen**: see :func:`window_parzen`
* **Flattop**: see :func:`window_flattop`
* **Riesz**: see :func:`window_riesz`
* **Riemann**: see :func:`window_riemann`
* **Poisson**: see :func:`window_poisson`
* **Poisson-Hanning**: see :func:`window_poisson_hanning`
.. todo:: on request taylor, potter, Bessel, expo,
rife-vincent, Kaiser-Bessel derived (KBD)
.. plot::
:width: 80%
:include-source:
from pylab import plot, legend
from spectrum import create_window
data = create_window(51, 'hamming')
plot(data, label='hamming')
data = create_window(51, 'kaiser')
plot(data, label='kaiser')
legend()
.. plot::
:width: 80%
:include-source:
from pylab import plot, log10, linspace, fft, clip
from spectrum import create_window, fftshift
A = fft(create_window(51, 'hamming'), 2048) / 25.5
mag = abs(fftshift(A))
freq = linspace(-0.5,0.5,len(A))
response = 20 * log10(mag)
mindB = -60
response = clip(response,mindB,100)
plot(freq, response)
.. seealso:: :func:`window_visu`, :func:`Window`, :mod:`spectrum.dpss`
"""
if name is None:
name = 'rectangle'
name = name.lower()
assert name in list(window_names.keys()), \
"""window name %s not implemented or incorrect. Try to use one of %s"""\
% (name, window_names)
# create the function name
f = eval(window_names[name])
windows_with_parameters = \
{'kaiser': {'beta': eval(window_names['kaiser']).__defaults__[0]},
'blackman': {'alpha': eval(window_names['blackman']).__defaults__[0]},
'cauchy': {'alpha': eval(window_names['cauchy']).__defaults__[0]},
'flattop': {'mode': eval(window_names['flattop']).__defaults__[0]},
'gaussian': {'alpha': eval(window_names['gaussian']).__defaults__[0]},
'chebwin': {'attenuation':eval(window_names['chebwin']).__defaults__[0]},
'tukey': {'r':eval(window_names['tukey']).__defaults__[0]},
'poisson': {'alpha': eval(window_names['poisson']).__defaults__[0]},
'poisson_hanning': {'alpha':
eval(window_names['poisson_hanning']).__defaults__[0]},
'taylor': {'nbar': eval(window_names['taylor']).__defaults__[0],
'sll': eval(window_names['taylor']).__defaults__[0]},
}
if name not in list(windows_with_parameters.keys()):
if len(kargs) == 0:
# no parameters, so we directly call the function
w = f(N)
else:
raise ValueError("""
Parameters do not match any of the window. The window provided
do not expect any parameters. Try to remove the parameters""")
elif name in list(windows_with_parameters.keys()):
# user optional parameters are provided, scan them:
dargs = {}
for arg in list(kargs.keys()):
# check that the parameters are valid, and obtain the default value
try:
default = windows_with_parameters[name][arg]
except:
raise ValueError("""
Invalid optional argument (%s) for %s window.
Valid optional arguments are (%s)""" % \
(arg, name, list(windows_with_parameters[name].keys())))
# add the user parameter to the list of parameters
dargs[arg] = kargs.get(arg, default)
# call the proper function with the optional arguments
w = f(N, **dargs)
return w |
def enbw(data):
r"""Computes the equivalent noise bandwidth
.. math:: ENBW = N \frac{\sum_{n=1}^{N} w_n^2}{\left(\sum_{n=1}^{N} w_n \right)^2}
.. doctest::
>>> from spectrum import create_window, enbw
>>> w = create_window(64, 'rectangular')
>>> enbw(w)
1.0
The following table contains the ENBW values for some of the
implemented windows in this module (with N=16384). They have been
double checked against litterature (Source: [Harris]_, [Marple]_).
If not present, it means that it has not been checked.
=================== ============ =============
name ENBW litterature
=================== ============ =============
rectangular 1. 1.
triangle 1.3334 1.33
Hann 1.5001 1.5
Hamming 1.3629 1.36
blackman 1.7268 1.73
kaiser 1.7
blackmanharris,4 2.004 2.
riesz 1.2000 1.2
riemann 1.32 1.3
parzen 1.917 1.92
tukey 0.25 1.102 1.1
bohman 1.7858 1.79
poisson 2 1.3130 1.3
hanningpoisson 0.5 1.609 1.61
cauchy 1.489 1.48
lanczos 1.3
=================== ============ =============
"""
N = len(data)
return N * np.sum(data**2) / np.sum(data)**2 |
def _kaiser(n, beta):
"""Independant Kaiser window
For the definition of the Kaiser window, see A. V. Oppenheim & R. W. Schafer, "Discrete-Time Signal Processing".
The continuous version of width n centered about x=0 is:
.. note:: 2 times slower than scipy.kaiser
"""
from scipy.special import iv as besselI
m = n - 1
k = arange(0, m)
k = 2. * beta / m * sqrt (k * (m - k))
w = besselI (0, k) / besselI (0, beta)
return w |
def window_visu(N=51, name='hamming', **kargs):
"""A Window visualisation tool
:param N: length of the window
:param name: name of the window
:param NFFT: padding used by the FFT
:param mindB: the minimum frequency power in dB
:param maxdB: the maximum frequency power in dB
:param kargs: optional arguments passed to :func:`create_window`
This function plot the window shape and its equivalent in the Fourier domain.
.. plot::
:width: 80%
:include-source:
from spectrum import window_visu
window_visu(64, 'kaiser', beta=8.)
"""
# get the default parameters
mindB = kargs.pop('mindB', -100)
maxdB = kargs.pop('maxdB', None)
norm = kargs.pop('norm', True)
# create a window object
w = Window(N, name, **kargs)
# plot the time and frequency windows
w.plot_time_freq(mindB=mindB, maxdB=maxdB, norm=norm) |
def window_kaiser(N, beta=8.6, method='numpy'):
r"""Kaiser window
:param N: window length
:param beta: kaiser parameter (default is 8.6)
To obtain a Kaiser window that designs an FIR filter with
sidelobe attenuation of :math:`\alpha` dB, use the following :math:`\beta` where
:math:`\beta = \pi \alpha`.
.. math::
w_n = \frac{I_0\left(\pi\alpha\sqrt{1-\left(\frac{2n}{M}-1\right)^2}\right)} {I_0(\pi \alpha)}
where
* :math:`I_0` is the zeroth order Modified Bessel function of the first kind.
* :math:`\alpha` is a real number that determines the shape of the
window. It determines the trade-off between main-lobe width and side
lobe level.
* the length of the sequence is N=M+1.
The Kaiser window can approximate many other windows by varying
the :math:`\beta` parameter:
===== ========================
beta Window shape
===== ========================
0 Rectangular
5 Similar to a Hamming
6 Similar to a Hanning
8.6 Similar to a Blackman
===== ========================
.. plot::
:width: 80%
:include-source:
from pylab import plot, legend, xlim
from spectrum import window_kaiser
N = 64
for beta in [1,2,4,8,16]:
plot(window_kaiser(N, beta), label='beta='+str(beta))
xlim(0,N)
legend()
.. plot::
:width: 80%
:include-source:
from spectrum import window_visu
window_visu(64, 'kaiser', beta=8.)
.. seealso:: numpy.kaiser, :func:`spectrum.window.create_window`
"""
if N == 1:
return ones(1)
if method == 'numpy':
from numpy import kaiser
return kaiser(N, beta)
else:
return _kaiser(N, beta) |
def window_blackman(N, alpha=0.16):
r"""Blackman window
:param N: window length
.. math:: a_0 - a_1 \cos(\frac{2\pi n}{N-1}) +a_2 \cos(\frac{4\pi n }{N-1})
with
.. math::
a_0 = (1-\alpha)/2, a_1=0.5, a_2=\alpha/2 \rm{\;and\; \alpha}=0.16
When :math:`\alpha=0.16`, this is the unqualified Blackman window with
:math:`a_0=0.48` and :math:`a_2=0.08`.
.. plot::
:width: 80%
:include-source:
from spectrum import window_visu
window_visu(64, 'blackman')
.. note:: Although Numpy implements a blackman window for :math:`\alpha=0.16`,
this implementation is valid for any :math:`\alpha`.
.. seealso:: numpy.blackman, :func:`create_window`, :class:`Window`
"""
a0 = (1. - alpha)/2.
a1 = 0.5
a2 = alpha/2.
if (N == 1):
win = array([1.])
else:
k = arange(0, N)/float(N-1.)
win = a0 - a1 * cos (2 * pi * k) + a2 * cos (4 * pi * k)
return win |
def window_gaussian(N, alpha=2.5):
r"""Gaussian window
:param N: window length
.. math:: \exp^{-0.5 \left( \sigma\frac{n}{N/2} \right)^2}
with :math:`\frac{N-1}{2}\leq n \leq \frac{N-1}{2}`.
.. note:: N-1 is used to be in agreement with octave convention. The ENBW of
1.4 is also in agreement with [Harris]_
.. plot::
:width: 80%
:include-source:
from spectrum import window_visu
window_visu(64, 'gaussian', alpha=2.5)
.. seealso:: scipy.signal.gaussian, :func:`create_window`
"""
t = linspace(-(N-1)/2., (N-1)/2., N)
#t = linspace(-(N)/2., (N)/2., N)
w = exp(-0.5*(alpha * t/(N/2.))**2.)
return w |
def window_chebwin(N, attenuation=50):
"""Cheb window
:param N: window length
.. plot::
:width: 80%
:include-source:
from spectrum import window_visu
window_visu(64, 'chebwin', attenuation=50)
.. seealso:: scipy.signal.chebwin, :func:`create_window`, :class:`Window`
"""
import scipy.signal
return scipy.signal.chebwin(N, attenuation) |
def window_cosine(N):
r"""Cosine tapering window also known as sine window.
:param N: window length
.. math:: w(n) = \cos\left(\frac{\pi n}{N-1} - \frac{\pi}{2}\right) = \sin \left(\frac{\pi n}{N-1}\right)
.. plot::
:width: 80%
:include-source:
from spectrum import window_visu
window_visu(64, 'cosine')
.. seealso:: :func:`create_window`, :class:`Window`
"""
if N ==1:
return ones(1)
n = arange(0, N)
win = sin(pi*n/(N-1.))
return win |
def window_lanczos(N):
r"""Lanczos window also known as sinc window.
:param N: window length
.. math:: w(n) = sinc \left( \frac{2n}{N-1} - 1 \right)
.. plot::
:width: 80%
:include-source:
from spectrum import window_visu
window_visu(64, 'lanczos')
.. seealso:: :func:`create_window`, :class:`Window`
"""
if N ==1:
return ones(1)
n = linspace(-N/2., N/2., N)
win = sinc(2*n/(N-1.))
return win |
def window_bartlett_hann(N):
r"""Bartlett-Hann window
:param N: window length
.. math:: w(n) = a_0 + a_1 \left| \frac{n}{N-1} -\frac{1}{2}\right| - a_2 \cos \left( \frac{2\pi n}{N-1} \right)
with :math:`a_0 = 0.62`, :math:`a_1 = 0.48` and :math:`a_2=0.38`
.. plot::
:width: 80%
:include-source:
from spectrum import window_visu
window_visu(64, 'bartlett_hann')
.. seealso:: :func:`create_window`, :class:`Window`
"""
if N == 1:
return ones(1)
n = arange(0, N)
a0 = 0.62
a1 = 0.48
a2 = 0.38
win = a0 - a1 *abs(n/(N-1.)-0.5) -a2 * cos(2*pi*n/(N-1.))
return win |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.